diff --git a/Cargo.lock b/Cargo.lock index 897a0701f9d6f..8ff2f8ea9cf50 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1479,6 +1479,15 @@ dependencies = [ "proc-macro-hack 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "historical-data" +version = "2.0.0" +dependencies = [ + "num-traits 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)", + "sr-std 2.0.0", +] + [[package]] name = "hmac" version = "0.4.2" @@ -5259,6 +5268,7 @@ version = "2.0.0" dependencies = [ "env_logger 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "hash-db 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)", + "historical-data 2.0.0", "kvdb 0.1.0 (git+https://github.com/paritytech/parity-common?rev=b0317f649ab2c665b7987b8475878fc4d2e1f81d)", "kvdb-memorydb 0.1.0 (git+https://github.com/paritytech/parity-common?rev=b0317f649ab2c665b7987b8475878fc4d2e1f81d)", "kvdb-rocksdb 0.1.4 (git+https://github.com/paritytech/parity-common?rev=b0317f649ab2c665b7987b8475878fc4d2e1f81d)", @@ -5982,6 +5992,7 @@ name = "substrate-state-db" version = "2.0.0" dependencies = [ "env_logger 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "historical-data 2.0.0", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "parity-scale-codec 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", diff --git a/Cargo.toml b/Cargo.toml index 5e1238866b876..cf2682be4d379 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -50,6 +50,7 @@ members = [ "core/transaction-pool/graph", "core/trie", "core/utils/fork-tree", + "core/utils/historical-data", "core/utils/wasm-builder", "core/utils/wasm-builder-runner", "core/wasm-interface", diff --git a/core/client/db/Cargo.toml b/core/client/db/Cargo.toml index 7d88c39d7fd7e..79391e0bbb760 100644 --- a/core/client/db/Cargo.toml +++ b/core/client/db/Cargo.toml @@ -23,6 +23,7 @@ state_db = { package = "substrate-state-db", path = "../../state-db" } trie = { package = "substrate-trie", path = "../../trie" } consensus_common = { package = "substrate-consensus-common", path = "../../consensus/common" } header_metadata = { package = "substrate-header-metadata", path = "../header-metadata" } +historical-data = { path = "../../utils/historical-data" } [dev-dependencies] substrate-keyring = { path = "../../keyring" } diff --git a/core/client/db/src/lib.rs b/core/client/db/src/lib.rs index 8bd0001981611..81805c106e646 100644 --- a/core/client/db/src/lib.rs +++ b/core/client/db/src/lib.rs @@ -34,6 +34,7 @@ mod storage_cache; mod utils; use std::sync::Arc; +use std::ops::Deref; use std::path::PathBuf; use std::io; use std::collections::{HashMap, HashSet}; @@ -41,7 +42,7 @@ use std::collections::{HashMap, HashSet}; use client::backend::NewBlockState; use client::blockchain::{well_known_cache_keys, HeaderBackend}; use client::{ForkBlocks, ExecutionStrategies}; -use client::backend::{StorageCollection, ChildStorageCollection}; +use client::backend::{StorageCollection, ChildStorageCollection, FullStorageCollection}; use client::error::{Result as ClientResult, Error as ClientError}; use codec::{Decode, Encode}; use hash_db::{Hasher, Prefix}; @@ -60,19 +61,27 @@ use sr_primitives::traits::{ use executor::RuntimeInfo; use state_machine::{ DBValue, ChangesTrieTransaction, ChangesTrieCacheAction, ChangesTrieBuildCache, - backend::Backend as StateBackend, + backend::Backend as StateBackend, InMemoryKvBackend, }; use crate::utils::{Meta, db_err, meta_keys, read_db, read_meta}; use client::leaves::{LeafSet, FinalizationDisplaced}; use client::children; use state_db::StateDb; +use state_db::BranchRanges; use header_metadata::{CachedHeaderMetadata, HeaderMetadata, HeaderMetadataCache}; use crate::storage_cache::{CachingState, SharedCache, new_shared_cache}; use log::{trace, debug, warn}; pub use state_db::PruningMode; +use historical_data::tree::Serialized; +use historical_data::PruneResult; +use historical_data::linear::DefaultVersion; + +type Ser<'a> = Serialized<'a, DefaultVersion>; #[cfg(feature = "test-helpers")] use client::in_mem::Backend as InMemoryBackend; +#[cfg(feature = "test-helpers")] +use state_machine::backend::InMemoryTransaction; const CANONICALIZATION_DELAY: u64 = 4096; const MIN_BLOCKS_TO_KEEP_CHANGES_TRIES_FOR: u32 = 32768; @@ -81,7 +90,12 @@ const MIN_BLOCKS_TO_KEEP_CHANGES_TRIES_FOR: u32 = 32768; const DEFAULT_CHILD_RATIO: (usize, usize) = (1, 10); /// DB-backed patricia trie state, transaction type is an overlay of changes to commit. -pub type DbState = state_machine::TrieBackend>, Blake2Hasher>; +/// A simple key value backend is also accessible for direct key value storage. +pub type DbState = state_machine::TrieBackend< + Arc>, + Blake2Hasher, + Arc, +>; /// Re-export the KVDB trait so that one can pass an implementation of it. pub use kvdb; @@ -124,6 +138,7 @@ impl StateBackend for RefTrackingState { type Error = >::Error; type Transaction = >::Transaction; type TrieBackendStorage = >::TrieBackendStorage; + type KvBackend = >::KvBackend; fn storage(&self, key: &[u8]) -> Result>, Self::Error> { self.state.storage(key) @@ -137,6 +152,10 @@ impl StateBackend for RefTrackingState { self.state.child_storage(storage_key, key) } + fn kv_storage(&self, key: &[u8]) -> Result>, Self::Error> { + self.state.kv_storage(key) + } + fn exists_storage(&self, key: &[u8]) -> Result { self.state.exists_storage(key) } @@ -175,10 +194,29 @@ impl StateBackend for RefTrackingState { self.state.child_storage_root(storage_key, delta) } + fn kv_transaction(&self, delta: I) -> Self::Transaction + where + I: IntoIterator, Option>)> + { + self.state.kv_transaction(delta) + } + fn pairs(&self) -> Vec<(Vec, Vec)> { self.state.pairs() } + fn children_storage_keys(&self) -> Vec> { + self.state.children_storage_keys() + } + + fn child_pairs(&self, child_storage_key: &[u8]) -> Vec<(Vec, Vec)> { + self.state.child_pairs(child_storage_key) + } + + fn kv_in_memory(&self) -> InMemoryKvBackend { + self.state.kv_in_memory() + } + fn keys(&self, prefix: &[u8]) -> Vec> { self.state.keys(prefix) } @@ -187,7 +225,9 @@ impl StateBackend for RefTrackingState { self.state.child_keys(child_key, prefix) } - fn as_trie_backend(&mut self) -> Option<&state_machine::TrieBackend> { + fn as_trie_backend(&mut self) -> Option< + &state_machine::TrieBackend + > { self.state.as_trie_backend() } } @@ -263,6 +303,10 @@ pub(crate) mod columns { pub const AUX: Option = Some(8); /// Offchain workers local storage pub const OFFCHAIN: Option = Some(9); + /// Kv storage main collection. + /// Content here should be organized by prefixing + /// keys to avoid conflicts. + pub const KV: Option = Some(10); } struct PendingBlock { @@ -448,7 +492,7 @@ impl HeaderMetadata for BlockchainDb { /// Database transaction pub struct BlockImportOperation { old_state: CachingState, Block>, - db_updates: PrefixedMemoryDB, + db_updates: (PrefixedMemoryDB, InMemoryKvBackend), storage_updates: StorageCollection, child_storage_updates: ChildStorageCollection, changes_trie_updates: MemoryDB, @@ -500,7 +544,10 @@ impl client::backend::BlockImportOperation // Currently cache isn't implemented on full nodes. } - fn update_db_storage(&mut self, update: PrefixedMemoryDB) -> ClientResult<()> { + fn update_db_storage( + &mut self, + update: (PrefixedMemoryDB, InMemoryKvBackend), + ) -> ClientResult<()> { self.db_updates = update; Ok(()) } @@ -527,7 +574,8 @@ impl client::backend::BlockImportOperation let (root, transaction) = self.old_state.full_storage_root( top.into_iter().map(|(k, v)| (k, Some(v))), - child_delta + child_delta, + None, ); self.db_updates = transaction; @@ -552,11 +600,10 @@ impl client::backend::BlockImportOperation fn update_storage( &mut self, - update: StorageCollection, - child_update: ChildStorageCollection, + update: FullStorageCollection, ) -> ClientResult<()> { - self.storage_updates = update; - self.child_storage_updates = child_update; + self.storage_updates = update.top; + self.child_storage_updates = update.children; Ok(()) } @@ -577,6 +624,11 @@ struct StorageDb { pub state_db: StateDb>, } +struct StorageDbAt { + pub storage_db: Arc>, + pub state: State, +} + impl state_machine::Storage for StorageDb { fn get(&self, key: &H256, prefix: Prefix) -> Result, String> { let key = prefixed_key::(key, prefix); @@ -594,6 +646,44 @@ impl state_db::NodeDb for StorageDb { } } +impl state_db::KvDb for StorageDb { + + type Error = io::Error; + + fn get_kv(&self, key: &[u8], state: &u64) -> Result>, Self::Error> { + Ok(self.db.get(columns::KV, key)? + .as_ref() + .map(|s| Ser::from_slice(&s[..])) + .and_then(|s| s.get(*state) + .unwrap_or(None) // flatten + .map(Into::into) + )) + } + + fn get_kv_pairs(&self, state: &u64) -> Vec<(Vec, Vec)> { + self.db.iter(columns::KV).filter_map(|(k, v)| + Ser::from_slice(&v[..]).get(*state) + .unwrap_or(None) // flatten + .map(Into::into) + .map(|v| (k.to_vec(), v)) + ).collect() + } +} + +impl state_machine::KvBackend for StorageDbAt { + + fn get(&self, key: &[u8]) -> Result>, String> { + self.storage_db.state_db.get_kv(key, &self.state, self.storage_db.deref()) + .map_err(|e| format!("Database backend error: {:?}", e)) + } + + fn in_memory(&self) -> InMemoryKvBackend { + self.storage_db.state_db.get_kv_pairs(&self.state, self.storage_db.deref()) + // No deletion on storage db. + .into_iter().map(|(k, v)| (k, Some(v))).collect() + } +} + struct DbGenesisStorage(pub H256); impl DbGenesisStorage { @@ -873,7 +963,13 @@ impl> Backend { let id = BlockId::Hash(hash); let justification = self.blockchain.justification(id).unwrap(); let body = self.blockchain.body(id).unwrap(); - let state = self.state_at(id).unwrap().pairs(); + let state = self.state_at(id).unwrap(); + let mut storage: Vec<_> = state.pairs().into_iter() + .map(|(k, v)| (None, k, Some(v))).collect(); + for child_key in state.children_storage_keys() { + storage.extend(state.child_pairs(child_key.as_slice()) + .into_iter().map(|(k, v)| (Some(child_key.clone()), k, Some(v)))); + } let new_block_state = if number.is_zero() { NewBlockState::Final @@ -884,7 +980,10 @@ impl> Backend { }; let mut op = inmem.begin_operation().unwrap(); op.set_block_data(header, body, justification, new_block_state).unwrap(); - op.update_db_storage(state.into_iter().map(|(k, v)| (None, k, Some(v))).collect()).unwrap(); + op.update_db_storage(InMemoryTransaction { + storage, + kv: state.kv_in_memory().clone(), + }).unwrap(); inmem.commit_operation(op).unwrap(); } @@ -1055,7 +1154,11 @@ impl> Backend { trace!(target: "db", "Canonicalize block #{} ({:?})", new_canonical, hash); let commit = self.storage.state_db.canonicalize_block(&hash) .map_err(|e: state_db::Error| client::error::Error::from(format!("State database error: {:?}", e)))?; - apply_state_commit(transaction, commit); + apply_state_commit_canonical(transaction, commit.0, &self.storage.db, commit.1).map_err(|err| + client::error::Error::Backend( + format!("Error building commit transaction : {}", err) + ) + )?; }; Ok(()) @@ -1128,17 +1231,31 @@ impl> Backend { } let mut changeset: state_db::ChangeSet> = state_db::ChangeSet::default(); - for (key, (val, rc)) in operation.db_updates.drain() { + for (key, (val, rc)) in operation.db_updates.0.drain() { if rc > 0 { changeset.inserted.push((key, val.to_vec())); } else if rc < 0 { changeset.deleted.push(key); } } + let kv_changeset: state_db::KvChangeSet> = operation.db_updates.1 + // switch to vec for size + .into_iter().collect(); let number_u64 = number.saturated_into::(); - let commit = self.storage.state_db.insert_block(&hash, number_u64, &pending_block.header.parent_hash(), changeset) - .map_err(|e: state_db::Error| client::error::Error::from(format!("State database error: {:?}", e)))?; - apply_state_commit(&mut transaction, commit); + let commit = self.storage.state_db.insert_block( + &hash, + number_u64, + &pending_block.header.parent_hash(), + changeset, + kv_changeset, + ).map_err(|e: state_db::Error| + client::error::Error::from(format!("State database error: {:?}", e)) + )?; + apply_state_commit(&mut transaction, commit, &self.storage.db, number_u64).map_err(|err| + client::error::Error::Backend( + format!("Error building commit transaction : {}", err) + ) + )?; // Check if need to finalize. Genesis is always finalized instantly. let finalized = number_u64 == 0 || pending_block.leaf_state.is_final(); @@ -1174,9 +1291,20 @@ impl> Backend { displaced_leaf }; - let mut children = children::read_children(&*self.storage.db, columns::META, meta_keys::CHILDREN_PREFIX, parent_hash)?; + let mut children = children::read_children( + &*self.storage.db, + columns::META, + meta_keys::CHILDREN_PREFIX, + parent_hash, + )?; children.push(hash); - children::write_children(&mut transaction, columns::META, meta_keys::CHILDREN_PREFIX, parent_hash, children); + children::write_children( + &mut transaction, + columns::META, + meta_keys::CHILDREN_PREFIX, + parent_hash, + children, + ); meta_updates.push((hash, number, pending_block.leaf_state.is_best(), finalized)); @@ -1262,15 +1390,22 @@ impl> Backend { { let f_num = f_header.number().clone(); - if self.storage.state_db.best_canonical().map(|c| f_num.saturated_into::() > c).unwrap_or(true) { + if self.storage.state_db.best_canonical() + .map(|c| f_num.saturated_into::() > c) + .unwrap_or(true) { let parent_hash = f_header.parent_hash().clone(); let lookup_key = utils::number_and_hash_to_lookup_key(f_num, f_hash.clone())?; transaction.put(columns::META, meta_keys::FINALIZED_BLOCK, &lookup_key); let commit = self.storage.state_db.canonicalize_block(&f_hash) - .map_err(|e: state_db::Error| client::error::Error::from(format!("State database error: {:?}", e)))?; - apply_state_commit(transaction, commit); + .map_err(|e: state_db::Error| + client::error::Error::from(format!("State database error: {:?}", e)) + )?; + apply_state_commit_canonical(transaction, commit.0, &self.storage.db, commit.1) + .map_err(|err| client::error::Error::Backend( + format!("Error building commit transaction : {}", err) + ))?; let changes_trie_config = self.changes_trie_config(parent_hash)?; if let Some(changes_trie_config) = changes_trie_config { @@ -1288,7 +1423,75 @@ impl> Backend { } } -fn apply_state_commit(transaction: &mut DBTransaction, commit: state_db::CommitSet>) { +fn apply_state_commit( + transaction: &mut DBTransaction, + commit: state_db::CommitSet>, + db: &Arc, + last_block: u64, +) -> Result<(), io::Error> { + apply_state_commit_inner(transaction, commit, db, last_block, None) +} + +fn apply_state_commit_inner( + transaction: &mut DBTransaction, + commit: state_db::CommitSet>, + db: &Arc, + last_block: u64, + mut kv_prune_key: state_db::KvChangeSetPrune, +) -> Result<(), io::Error> { + + for (key, change) in commit.kv.iter() { + let (mut ser, new) = if let Some(stored) = db.get(columns::KV, key)? { + (Ser::from_vec(stored.to_vec()), false) + } else { + if change.is_some() { + (Ser::default(), true) + } else { + break; + } + }; + ser.push(last_block, change.as_ref().map(|v| v.as_slice())); + if let Some((block_prune, kv_prune_keys)) = kv_prune_key.as_mut() { + if !new && kv_prune_keys.remove(key) { + match ser.prune(*block_prune) { + PruneResult::Cleared => transaction.delete(columns::KV, &key), + PruneResult::Changed + | PruneResult::Unchanged => transaction.put(columns::KV, &key[..], &ser.into_vec()), + } + } else { + transaction.put(columns::KV, &key[..], &ser.into_vec()) + } + } else { + transaction.put(columns::KV, &key[..], &ser.into_vec()) + } + } + + if let Some((block_prune, kv_prune_key)) = kv_prune_key { + // no need to into_iter + for key in kv_prune_key.iter() { + let mut ser = if let Some(stored) = db.get(columns::KV, key)? { + Ser::from_vec(stored.to_vec()) + } else { + break; + }; + + match ser.prune(block_prune) { + PruneResult::Cleared => transaction.delete(columns::KV, &key), + PruneResult::Changed => transaction.put(columns::KV, &key[..], &ser.into_vec()), + PruneResult::Unchanged => (), + } + } + + } + apply_state_commit_no_kv(transaction, commit); + Ok(()) +} + +fn apply_state_commit_no_kv( + transaction: &mut DBTransaction, + commit: state_db::CommitSet>, + ) { + for (key, val) in commit.data.inserted.into_iter() { transaction.put(columns::STATE, &key[..], &val); } @@ -1301,6 +1504,17 @@ fn apply_state_commit(transaction: &mut DBTransaction, commit: state_db::CommitS for key in commit.meta.deleted.into_iter() { transaction.delete(columns::STATE_META, &key[..]); } + +} + + +fn apply_state_commit_canonical( + transaction: &mut DBTransaction, + commit: state_db::CommitSetCanonical>, + db: &Arc, + last_block: u64, +) -> Result<(), io::Error> { + apply_state_commit_inner(transaction, commit.0, db, last_block, commit.1) } impl client::backend::AuxStore for Backend where Block: BlockT { @@ -1339,7 +1553,7 @@ impl client::backend::Backend for Backend whe Ok(BlockImportOperation { pending_block: None, old_state, - db_updates: PrefixedMemoryDB::default(), + db_updates: Default::default(), storage_updates: Default::default(), child_storage_updates: Default::default(), changes_trie_updates: MemoryDB::default(), @@ -1428,7 +1642,8 @@ impl client::backend::Backend for Backend whe let mut transaction = DBTransaction::new(); match self.storage.state_db.revert_one() { Some(commit) => { - apply_state_commit(&mut transaction, commit); + debug_assert!(commit.kv.is_empty(), "revert do not change key value store"); + apply_state_commit_no_kv(&mut transaction, commit); let removed = self.blockchain.header(BlockId::Number(best))?.ok_or_else( || client::error::Error::UnknownBlock( format!("Error reverting to {}. Block hash not found.", best)))?; @@ -1468,7 +1683,8 @@ impl client::backend::Backend for Backend whe BlockId::Hash(h) if h == Default::default() => { let genesis_storage = DbGenesisStorage::new(); let root = genesis_storage.0.clone(); - let db_state = DbState::new(Arc::new(genesis_storage), root); + let genesis_kv = InMemoryKvBackend::default(); + let db_state = DbState::new(Arc::new(genesis_storage), root, Arc::new(genesis_kv)); let state = RefTrackingState::new(db_state, self.storage.clone(), None); return Ok(CachingState::new(state, self.shared_cache.clone(), None)); }, @@ -1479,8 +1695,14 @@ impl client::backend::Backend for Backend whe Ok(Some(ref hdr)) => { let hash = hdr.hash(); if let Ok(()) = self.storage.state_db.pin(&hash) { + let block_number = hdr.number().clone().saturated_into::(); + let range = self.storage.state_db.get_branch_range(&hash, block_number); let root = H256::from_slice(hdr.state_root().as_ref()); - let db_state = DbState::new(self.storage.clone(), root); + let kv = StorageDbAt { + storage_db: self.storage.clone(), + state: (range.unwrap_or_else(Default::default), block_number), + }; + let db_state = DbState::new(self.storage.clone(), root, Arc::new(kv)); let state = RefTrackingState::new(db_state, self.storage.clone(), Some(hash.clone())); Ok(CachingState::new(state, self.shared_cache.clone(), Some(hash))) } else { @@ -1588,6 +1810,69 @@ mod tests { header_hash } + fn insert_kvs_and_finalize( + backend: &Backend, + number: u64, + parent_hash: H256, + kvs: Vec<(Vec, Option>)>, + extrinsics_root: H256, + finalize: Option>, + ) -> H256 { + let header = Header { + number, + parent_hash, + state_root: BlakeTwo256::trie_root(Vec::new()), + digest: Default::default(), + extrinsics_root, + }; + let header_hash = header.hash(); + + let block_id = if number == 0 { + BlockId::Hash(Default::default()) + } else { + BlockId::Number(number - 1) + }; + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, block_id).unwrap(); + if let Some(finalize) = finalize { + for finalize in finalize { + op.mark_finalized(BlockId::hash(finalize), None).unwrap(); + } + } + let kv_transaction = kvs.into_iter().collect(); + op.update_db_storage((Default::default(), kv_transaction)).unwrap(); + op.set_block_data(header, None, None, NewBlockState::Best).unwrap(); + backend.commit_operation(op).unwrap(); + header_hash + } + + + fn insert_kvs( + backend: &Backend, + number: u64, + parent_hash: H256, + kvs: Vec<(Vec, Option>)>, + extrinsics_root: H256, + ) -> H256 { + insert_kvs_and_finalize( + backend, + number, + parent_hash, + kvs, + extrinsics_root, + None, + ) + } + + fn check_kv (backend: &Backend, block: H256, val: &Vec<(Vec, Option>)>) { + let block = BlockId::Hash(block); + let state = backend.state_at(block).unwrap(); + for (k, v) in val { + let content = state.kv_storage(k).unwrap(); + assert_eq!(v, &content); + } + } + #[test] fn block_hash_inserted_correctly() { let backing = { @@ -1703,7 +1988,12 @@ mod tests { (vec![5, 5, 5], Some(vec![4, 5, 6])), ]; - let (root, overlay) = op.old_state.storage_root(storage.iter().cloned()); + let child: Option<(_, Option<_>)> = None; + let (root, overlay) = op.old_state.full_storage_root( + storage.iter().cloned(), + child, + storage.iter().cloned(), + ); op.update_db_storage(overlay).unwrap(); header.state_root = root.into(); @@ -1721,6 +2011,13 @@ mod tests { assert_eq!(state.storage(&[1, 3, 5]).unwrap(), None); assert_eq!(state.storage(&[1, 2, 3]).unwrap(), Some(vec![9, 9, 9])); assert_eq!(state.storage(&[5, 5, 5]).unwrap(), Some(vec![4, 5, 6])); + // TODO kv check disabled, they require state db implementation + /*assert_eq!(state.kv_in_memory(), vec![ + (vec![5, 5, 5], Some(vec![4, 5, 6])) + ].into_iter().collect()); + let state = db.state_at(BlockId::Number(0)).unwrap(); + assert_eq!(state.kv_in_memory(), vec![].into_iter().collect());*/ + } } @@ -1752,7 +2049,7 @@ mod tests { op.reset_storage(storage.iter().cloned().collect(), Default::default()).unwrap(); - key = op.db_updates.insert(EMPTY_PREFIX, b"hello"); + key = op.db_updates.0.insert(EMPTY_PREFIX, b"hello"); op.set_block_data( header, Some(vec![]), @@ -1788,8 +2085,8 @@ mod tests { ).0.into(); let hash = header.hash(); - op.db_updates.insert(EMPTY_PREFIX, b"hello"); - op.db_updates.remove(&key, EMPTY_PREFIX); + op.db_updates.0.insert(EMPTY_PREFIX, b"hello"); + op.db_updates.0.remove(&key, EMPTY_PREFIX); op.set_block_data( header, Some(vec![]), @@ -1825,7 +2122,7 @@ mod tests { ).0.into(); let hash = header.hash(); - op.db_updates.remove(&key, EMPTY_PREFIX); + op.db_updates.0.remove(&key, EMPTY_PREFIX); op.set_block_data( header, Some(vec![]), @@ -2103,6 +2400,150 @@ mod tests { assert!(backend.changes_tries_storage.get(&root3, EMPTY_PREFIX).unwrap().is_some()); } + #[allow(dead_code)] + //#[test] TODO test disable, it requires state db implementation + fn kv_storage_works() { + let backend = Backend::::new_test(1000, 100); + + let changes0 = vec![(b"key_at_0".to_vec(), Some(b"val_at_0".to_vec()))]; + let changes1 = vec![ + (b"key_at_1".to_vec(), Some(b"val_at_1".to_vec())), + (b"another_key_at_1".to_vec(), Some(b"another_val_at_1".to_vec())), + ]; + let changes2 = vec![(b"key_at_2".to_vec(), Some(b"val_at_2".to_vec()))]; + let changes3 = vec![(b"another_key_at_1".to_vec(), None)]; + + let block0 = insert_kvs(&backend, 0, Default::default(), changes0.clone(), Default::default()); + let block1 = insert_kvs(&backend, 1, block0, changes1.clone(), Default::default()); + let block2 = insert_kvs(&backend, 2, block1, changes2.clone(), Default::default()); + let block3 = insert_kvs(&backend, 3, block2, changes3.clone(), Default::default()); + + check_kv(&backend, block0, &changes0); + check_kv(&backend, block1, &changes0); + check_kv(&backend, block1, &changes1); + check_kv(&backend, block2, &changes1); + check_kv(&backend, block2, &changes2); + check_kv(&backend, block3, &changes3); + } + + #[allow(dead_code)] + //#[test] TODO test disable, it requires state db implementation + fn kv_storage_works_with_forks() { + let backend = Backend::::new_test(4, 4); + + let changes0 = vec![(b"k0".to_vec(), Some(b"v0".to_vec()))]; + let changes1 = vec![ + (b"k1".to_vec(), Some(b"v1".to_vec())), + (b"k0".to_vec(), None), + ]; + let changes2 = vec![(b"k2".to_vec(), Some(b"v2".to_vec()))]; + let block0 = insert_kvs(&backend, 0, Default::default(), changes0.clone(), Default::default()); + let block1 = insert_kvs(&backend, 1, block0, changes1.clone(), Default::default()); + let block2 = insert_kvs(&backend, 2, block1, changes2.clone(), Default::default()); + + let changes2_1_0 = vec![(b"k3".to_vec(), Some(b"v3".to_vec()))]; + let changes2_1_1 = vec![(b"k4".to_vec(), Some(b"v4".to_vec()))]; + let block2_1_0 = insert_kvs(&backend, 3, block2, changes2_1_0.clone(), Default::default()); + let block2_1_1 = insert_kvs(&backend, 4, block2_1_0, changes2_1_1.clone(), Default::default()); + + let changes2_2_0 = vec![(b"k5".to_vec(), Some(b"v5".to_vec()))]; + let changes2_2_1 = vec![(b"k6".to_vec(), Some(b"v6".to_vec()))]; + // use different extrinsic root to have different hash than 2_1_0 + let block2_2_0 = insert_kvs(&backend, 3, block2, changes2_2_0.clone(), [1u8; 32].into()); + let block2_2_1 = insert_kvs(&backend, 4, block2_2_0, changes2_2_1.clone(), Default::default()); + + // branch1: when asking for finalized block hash + check_kv(&backend, block0, &changes0); + check_kv(&backend, block1, &vec![]); + check_kv(&backend, block1, &changes1); + check_kv(&backend, block2, &changes2); + check_kv(&backend, block2_1_0, &changes2_1_0); + check_kv(&backend, block2_1_1, &changes2_1_1); + check_kv(&backend, block2_2_0, &changes2_2_0); + check_kv(&backend, block2_2_1, &changes2_2_1); + + // after canonicalize range on 2_2_0 side + let mut block_prev = block2_1_1; + let mut nb_prev = 4; + let mut next = || { + block_prev = insert_kvs(&backend, nb_prev, block_prev, vec![], Default::default()); + nb_prev += 1; + }; + + next(); + next(); + next(); + check_kv(&backend, block0, &changes0); + check_kv(&backend, block1, &changes1); + check_kv(&backend, block2, &changes2); + check_kv(&backend, block2_1_0, &changes2_1_0); + check_kv(&backend, block2_2_0, &changes2_2_0); + let state = backend.state_at(BlockId::Hash(block2_2_0)).unwrap(); + assert!(state.kv_storage(&b"k5"[..]).unwrap().is_some()); + next(); + check_kv(&backend, block2_1_0, &changes2_1_0); + assert!(backend.state_at(BlockId::Hash(block2_2_0)).is_err()); + // pinned state is not garbage collected + assert!(state.kv_storage(&b"k5"[..]).unwrap().is_some()); + + // check pruning is called on storage + check_kv(&backend, block0, &changes0); + assert!(backend.storage.db.get(crate::columns::KV, &b"k0"[..]).unwrap().is_some()); + next(); + assert!(backend.state_at(BlockId::Hash(block0)).is_err()); + assert!(backend.storage.db.get(crate::columns::KV, &b"k0"[..]).unwrap().is_none()); + } + + #[allow(dead_code)] + //#[test] TODO test disable, it requires state db implementation + fn kv_storage_works_with_finalize() { + let backend = Backend::::new_test(1000, 100); + + let changes0 = vec![(b"k0".to_vec(), Some(b"v0".to_vec()))]; + let changes1 = vec![(b"k1".to_vec(), Some(b"v1".to_vec()))]; + let changes2 = vec![(b"k2".to_vec(), Some(b"v2".to_vec()))]; + let block0 = insert_kvs(&backend, 0, Default::default(), changes0.clone(), Default::default()); + let block1 = insert_kvs(&backend, 1, block0, changes1.clone(), Default::default()); + let block2 = insert_kvs(&backend, 2, block1, changes2.clone(), Default::default()); + + let changes2_1_0 = vec![(b"k3".to_vec(), Some(b"v3".to_vec()))]; + let changes2_1_1 = vec![(b"k4".to_vec(), Some(b"v4".to_vec()))]; + let block2_1_0 = insert_kvs(&backend, 3, block2, changes2_1_0.clone(), Default::default()); + let _block2_1_1 = insert_kvs(&backend, 4, block2_1_0, changes2_1_1.clone(), Default::default()); + + let changes2_2_0 = vec![(b"k5".to_vec(), Some(b"v5".to_vec()))]; + let changes2_2_1 = vec![(b"k6".to_vec(), Some(b"v6".to_vec()))]; + // use different extrinsic root to have different hash than 2_1_0 + let block2_2_0 = insert_kvs(&backend, 3, block2, changes2_2_0.clone(), [1u8; 32].into()); + let state = backend.state_at(BlockId::Hash(block2_1_0)).unwrap(); + + let block2_2_1 = insert_kvs_and_finalize( + &backend, + 4, + block2_2_0, + changes2_2_1.clone(), + Default::default(), + Some(vec![ + block1, + block2, + block2_2_0, + ]), + ); + + check_kv(&backend, block0, &changes0); + check_kv(&backend, block1, &changes0); + check_kv(&backend, block1, &changes1); + check_kv(&backend, block2, &changes2); + check_kv(&backend, block2_2_0, &changes2_2_0); + check_kv(&backend, block2_2_1, &changes2_2_1); + + // still accessible due to pinned state + assert!(state.kv_storage(&b"k3"[..]).unwrap().is_some()); + assert!(backend.state_at(BlockId::Hash(block2_1_0)).is_err()); + } + + + #[test] fn tree_route_works() { let backend = Backend::::new_test(1000, 100); diff --git a/core/client/db/src/storage_cache.rs b/core/client/db/src/storage_cache.rs index 8c81e44ba6bf2..150ada3dcd062 100644 --- a/core/client/db/src/storage_cache.rs +++ b/core/client/db/src/storage_cache.rs @@ -23,7 +23,7 @@ use linked_hash_map::{LinkedHashMap, Entry}; use hash_db::Hasher; use sr_primitives::traits::{Block as BlockT, Header}; use primitives::hexdisplay::HexDisplay; -use state_machine::{backend::Backend as StateBackend, TrieBackend}; +use state_machine::{backend::Backend as StateBackend, TrieBackend, InMemoryKvBackend}; use log::trace; use super::{StorageCollection, ChildStorageCollection}; use std::hash::Hash as StdHash; @@ -466,6 +466,7 @@ impl, B: BlockT> StateBackend for CachingState< type Error = S::Error; type Transaction = S::Transaction; type TrieBackendStorage = S::TrieBackendStorage; + type KvBackend = S::KvBackend; fn storage(&self, key: &[u8]) -> Result>, Self::Error> { let local_cache = self.cache.local_cache.upgradable_read(); @@ -526,6 +527,10 @@ impl, B: BlockT> StateBackend for CachingState< Ok(value) } + fn kv_storage(&self, key: &[u8]) -> Result>, Self::Error> { + self.state.kv_storage(key) + } + fn exists_storage(&self, key: &[u8]) -> Result { Ok(self.storage(key)?.is_some()) } @@ -566,10 +571,29 @@ impl, B: BlockT> StateBackend for CachingState< self.state.child_storage_root(storage_key, delta) } + fn kv_transaction(&self, delta: I) -> Self::Transaction + where + I: IntoIterator, Option>)>, + { + self.state.kv_transaction(delta) + } + fn pairs(&self) -> Vec<(Vec, Vec)> { self.state.pairs() } + fn children_storage_keys(&self) -> Vec> { + self.state.children_storage_keys() + } + + fn child_pairs(&self, storage_key: &[u8]) -> Vec<(Vec, Vec)> { + self.state.child_pairs(storage_key) + } + + fn kv_in_memory(&self) -> InMemoryKvBackend { + self.state.kv_in_memory() + } + fn keys(&self, prefix: &[u8]) -> Vec> { self.state.keys(prefix) } @@ -578,7 +602,9 @@ impl, B: BlockT> StateBackend for CachingState< self.state.child_keys(child_key, prefix) } - fn as_trie_backend(&mut self) -> Option<&TrieBackend> { + fn as_trie_backend(&mut self) -> Option< + &TrieBackend + > { self.state.as_trie_backend() } } diff --git a/core/client/db/src/utils.rs b/core/client/db/src/utils.rs index 0a6112abe7a6e..bc2cee293952f 100644 --- a/core/client/db/src/utils.rs +++ b/core/client/db/src/utils.rs @@ -37,7 +37,7 @@ use crate::{DatabaseSettings, DatabaseSettingsSrc}; /// Number of columns in the db. Must be the same for both full && light dbs. /// Otherwise RocksDb will fail to open database && check its type. -pub const NUM_COLUMNS: u32 = 10; +pub const NUM_COLUMNS: u32 = 11; /// Meta column. The set of keys in the column is shared by full && light storages. pub const COLUMN_META: Option = Some(0); diff --git a/core/client/src/backend.rs b/core/client/src/backend.rs index 9b6d9ce58fbfe..eef01c49ac14a 100644 --- a/core/client/src/backend.rs +++ b/core/client/src/backend.rs @@ -36,12 +36,24 @@ pub type StorageCollection = Vec<(Vec, Option>)>; /// In memory arrays of storage values for multiple child tries. pub type ChildStorageCollection = Vec<(Vec, StorageCollection)>; +#[derive(Clone)] +/// Collection of all storage element, can manage a single state delta +/// (deletion are included). +pub struct FullStorageCollection { + /// Parent trie changes. + pub top: StorageCollection, + /// Children trie changes. + pub children: ChildStorageCollection, + /// Key value not in trie changes. + pub kv: StorageCollection, +} + pub(crate) struct ImportSummary { pub(crate) hash: Block::Hash, pub(crate) origin: BlockOrigin, pub(crate) header: Block::Header, pub(crate) is_new_best: bool, - pub(crate) storage_changes: Option<(StorageCollection, ChildStorageCollection)>, + pub(crate) storage_changes: Option, pub(crate) retracted: Vec, } @@ -113,14 +125,15 @@ pub trait BlockImportOperation where /// Set storage changes. fn update_storage( &mut self, - update: StorageCollection, - child_update: ChildStorageCollection, + update: FullStorageCollection, ) -> error::Result<()>; + /// Inject changes trie data into the database. fn update_changes_trie(&mut self, update: ChangesTrieTransaction>) -> error::Result<()>; /// Insert auxiliary keys. Values are `None` if should be deleted. fn insert_aux(&mut self, ops: I) -> error::Result<()> where I: IntoIterator, Option>)>; + /// Mark a block as finalized. fn mark_finalized(&mut self, id: BlockId, justification: Option) -> error::Result<()>; /// Mark a block as new head. If both block import and set head are specified, set head overrides block import's best block rule. diff --git a/core/client/src/call_executor.rs b/core/client/src/call_executor.rs index e634fcf8faa9f..59dc625e1c8de 100644 --- a/core/client/src/call_executor.rs +++ b/core/client/src/call_executor.rs @@ -139,9 +139,12 @@ where /// Execute a call to a contract on top of given trie state, gathering execution proof. /// /// No changes are made. - fn prove_at_trie_state>( + fn prove_at_trie_state< + S: state_machine::TrieBackendStorage, + K: state_machine::KvBackend, + >( &self, - trie_state: &state_machine::TrieBackend, + trie_state: &state_machine::TrieBackend, overlay: &mut OverlayedChanges, method: &str, call_data: &[u8] @@ -371,9 +374,12 @@ where .map_err(Into::into) } - fn prove_at_trie_state>( + fn prove_at_trie_state< + S: state_machine::TrieBackendStorage, + K: state_machine::KvBackend, + >( &self, - trie_state: &state_machine::TrieBackend, + trie_state: &state_machine::TrieBackend, overlay: &mut OverlayedChanges, method: &str, call_data: &[u8] diff --git a/core/client/src/cht.rs b/core/client/src/cht.rs index aff875032d357..71d166cfd1c52 100644 --- a/core/client/src/cht.rs +++ b/core/client/src/cht.rs @@ -30,8 +30,10 @@ use trie; use primitives::{H256, convert_hash}; use sr_primitives::traits::{Header as HeaderT, SimpleArithmetic, Zero, One}; use state_machine::backend::InMemory as InMemoryState; +use state_machine::backend::InMemoryTransaction; use state_machine::{MemoryDB, TrieBackend, Backend as StateBackend, StorageProof, prove_read_on_trie_backend, read_proof_check, read_proof_check_on_proving_backend}; +use state_machine::InMemoryKvBackend; use crate::error::{Error as ClientError, Result as ClientResult}; @@ -100,7 +102,10 @@ pub fn build_proof( .into_iter() .map(|(k, v)| (None, k, Some(v))) .collect::>(); - let mut storage = InMemoryState::::default().update(transaction); + let mut storage = InMemoryState::::default().update(InMemoryTransaction { + storage: transaction, + kv: Default::default(), + }); let trie_storage = storage.as_trie_backend() .expect("InMemoryState::as_trie_backend always returns Some; qed"); prove_read_on_trie_backend( @@ -143,7 +148,7 @@ pub fn check_proof_on_proving_backend( local_root: Header::Hash, local_number: Header::Number, remote_hash: Header::Hash, - proving_backend: &TrieBackend, Hasher>, + proving_backend: &TrieBackend, Hasher, InMemoryKvBackend>, ) -> ClientResult<()> where Header: HeaderT, diff --git a/core/client/src/client.rs b/core/client/src/client.rs index 71d6e4f01d637..0d15369ce31c7 100644 --- a/core/client/src/client.rs +++ b/core/client/src/client.rs @@ -60,7 +60,7 @@ use crate::{ }, backend::{ self, BlockImportOperation, PrunableStateChangesTrieStorage, - ClientImportOperation, Finalizer, ImportSummary, + ClientImportOperation, Finalizer, ImportSummary, FullStorageCollection, }, blockchain::{ self, Info as ChainInfo, Backend as ChainBackend, @@ -982,7 +982,7 @@ impl Client where operation.op.update_db_storage(storage_update)?; } if let Some(storage_changes) = storage_changes.clone() { - operation.op.update_storage(storage_changes.0, storage_changes.1)?; + operation.op.update_storage(storage_changes)?; } if let Some(Some(changes_update)) = changes_update { operation.op.update_changes_trie(changes_update)?; @@ -1018,10 +1018,7 @@ impl Client where ) -> error::Result<( Option>, Option>>, - Option<( - Vec<(Vec, Option>)>, - Vec<(Vec, Vec<(Vec, Option>)>)> - )> + Option, )> where E: CallExecutor + Send + Sync + Clone, @@ -1073,13 +1070,21 @@ impl Client where overlay.commit_prospective(); - let (top, children) = overlay.into_committed(); + let (top, children, kv) = overlay.into_committed(); let children = children.map(|(sk, it)| (sk, it.collect())).collect(); if import_headers.post().state_root() != &storage_update.1 { return Err(error::Error::InvalidStateRoot); } - Ok((Some(storage_update.0), Some(changes_update), Some((top.collect(), children)))) + Ok(( + Some(storage_update.0), + Some(changes_update), + Some(FullStorageCollection { + top: top.collect(), + children, + kv: kv.collect(), + }), + )) }, None => Ok((None, None, None)) } @@ -1179,8 +1184,8 @@ impl Client where self.storage_notifications.lock() .trigger( ¬ify_import.hash, - storage_changes.0.into_iter(), - storage_changes.1.into_iter().map(|(sk, v)| (sk, v.into_iter())), + storage_changes.top.into_iter(), + storage_changes.children.into_iter().map(|(sk, v)| (sk, v.into_iter())), ); } diff --git a/core/client/src/in_mem.rs b/core/client/src/in_mem.rs index 5c35400d7743c..92ec39bf89f10 100644 --- a/core/client/src/in_mem.rs +++ b/core/client/src/in_mem.rs @@ -30,7 +30,7 @@ use trie::MemoryDB; use header_metadata::{CachedHeaderMetadata, HeaderMetadata}; use crate::error; -use crate::backend::{self, NewBlockState, StorageCollection, ChildStorageCollection}; +use crate::backend::{self, NewBlockState, FullStorageCollection}; use crate::light; use crate::leaves::LeafSet; use crate::blockchain::{ @@ -511,7 +511,8 @@ where let (root, transaction) = self.old_state.full_storage_root( top.into_iter().map(|(k, v)| (k, Some(v))), - child_delta + child_delta, + None, ); self.new_state = Some(InMemory::from(transaction)); @@ -527,8 +528,7 @@ where fn update_storage( &mut self, - _update: StorageCollection, - _child_update: ChildStorageCollection, + _update: FullStorageCollection, ) -> error::Result<()> { Ok(()) } diff --git a/core/client/src/light/backend.rs b/core/client/src/light/backend.rs index 300d140630d85..2fbe5cb5b28c4 100644 --- a/core/client/src/light/backend.rs +++ b/core/client/src/light/backend.rs @@ -22,12 +22,15 @@ use std::sync::Arc; use parking_lot::{RwLock, Mutex}; use sr_primitives::{generic::BlockId, Justification, StorageOverlay, ChildrenStorageOverlay}; -use state_machine::{Backend as StateBackend, TrieBackend, backend::InMemory as InMemoryState, ChangesTrieTransaction}; +use state_machine::{ + Backend as StateBackend, TrieBackend, backend::InMemory as InMemoryState, + ChangesTrieTransaction, InMemoryKvBackend, +}; use sr_primitives::traits::{Block as BlockT, NumberFor, Zero, Header}; use crate::in_mem::{self, check_genesis_storage}; use crate::backend::{ AuxStore, Backend as ClientBackend, BlockImportOperation, RemoteBackend, NewBlockState, - StorageCollection, ChildStorageCollection, + FullStorageCollection, }; use crate::blockchain::{HeaderBackend as BlockchainHeaderBackend, well_known_cache_keys}; use crate::error::{Error as ClientError, Result as ClientResult}; @@ -291,7 +294,11 @@ where } let storage_update: InMemoryState = storage.into(); - let (storage_root, _) = storage_update.full_storage_root(::std::iter::empty(), child_delta); + let (storage_root, _) = storage_update.full_storage_root( + ::std::iter::empty(), + child_delta, + ::std::iter::empty(), + ); self.storage_update = Some(storage_update); Ok(storage_root) @@ -306,8 +313,7 @@ where fn update_storage( &mut self, - _update: StorageCollection, - _child_update: ChildStorageCollection, + _update: FullStorageCollection, ) -> ClientResult<()> { // we're not storing anything locally => ignore changes Ok(()) @@ -340,6 +346,7 @@ impl StateBackend for GenesisOrUnavailableState type Error = ClientError; type Transaction = (); type TrieBackendStorage = MemoryDB; + type KvBackend = state_machine::InMemoryKvBackend; fn storage(&self, key: &[u8]) -> ClientResult>> { match *self { @@ -357,6 +364,14 @@ impl StateBackend for GenesisOrUnavailableState } } + fn kv_storage(&self, key: &[u8]) -> ClientResult>> { + match *self { + GenesisOrUnavailableState::Genesis(ref state) => + Ok(state.kv_storage(key).expect(IN_MEMORY_EXPECT_PROOF)), + GenesisOrUnavailableState::Unavailable => Err(ClientError::NotAvailableOnLightClient), + } + } + fn for_keys_with_prefix(&self, prefix: &[u8], action: A) { match *self { GenesisOrUnavailableState::Genesis(ref state) => state.for_keys_with_prefix(prefix, action), @@ -416,6 +431,13 @@ impl StateBackend for GenesisOrUnavailableState } } + fn kv_transaction(&self, _delta: I) -> Self::Transaction + where + I: IntoIterator, Option>)> + { + () + } + fn pairs(&self) -> Vec<(Vec, Vec)> { match *self { GenesisOrUnavailableState::Genesis(ref state) => state.pairs(), @@ -423,6 +445,27 @@ impl StateBackend for GenesisOrUnavailableState } } + fn children_storage_keys(&self) -> Vec> { + match *self { + GenesisOrUnavailableState::Genesis(ref state) => state.children_storage_keys(), + GenesisOrUnavailableState::Unavailable => Vec::new(), + } + } + + fn child_pairs(&self, child_storage_key: &[u8]) -> Vec<(Vec, Vec)> { + match *self { + GenesisOrUnavailableState::Genesis(ref state) => state.child_pairs(child_storage_key), + GenesisOrUnavailableState::Unavailable => Vec::new(), + } + } + + fn kv_in_memory(&self) -> InMemoryKvBackend { + match *self { + GenesisOrUnavailableState::Genesis(ref state) => state.kv_in_memory(), + GenesisOrUnavailableState::Unavailable => Default::default(), + } + } + fn keys(&self, prefix: &[u8]) -> Vec> { match *self { GenesisOrUnavailableState::Genesis(ref state) => state.keys(prefix), @@ -430,7 +473,9 @@ impl StateBackend for GenesisOrUnavailableState } } - fn as_trie_backend(&mut self) -> Option<&TrieBackend> { + fn as_trie_backend(&mut self) -> Option< + &TrieBackend + > { match self { GenesisOrUnavailableState::Genesis(ref mut state) => state.as_trie_backend(), GenesisOrUnavailableState::Unavailable => None, diff --git a/core/client/src/light/call_executor.rs b/core/client/src/light/call_executor.rs index 7f54004ae6722..8e503d09dfa0e 100644 --- a/core/client/src/light/call_executor.rs +++ b/core/client/src/light/call_executor.rs @@ -171,9 +171,12 @@ impl CallExecutor for Err(ClientError::NotAvailableOnLightClient) } - fn prove_at_trie_state>( + fn prove_at_trie_state< + S: state_machine::TrieBackendStorage, + K: state_machine::KvBackend, + >( &self, - _state: &state_machine::TrieBackend, + _state: &state_machine::TrieBackend, _changes: &mut OverlayedChanges, _method: &str, _call_data: &[u8] @@ -370,9 +373,12 @@ mod tests { unreachable!() } - fn prove_at_trie_state>( + fn prove_at_trie_state< + S: state_machine::TrieBackendStorage, + K: state_machine::KvBackend, + >( &self, - _trie_state: &state_machine::TrieBackend, + _trie_state: &state_machine::TrieBackend, _overlay: &mut OverlayedChanges, _method: &str, _call_data: &[u8] diff --git a/core/client/src/light/fetcher.rs b/core/client/src/light/fetcher.rs index 6ae28b748c527..438f757fac390 100644 --- a/core/client/src/light/fetcher.rs +++ b/core/client/src/light/fetcher.rs @@ -350,8 +350,13 @@ impl> LightDataChecker { return Err(ClientError::InvalidCHTProof.into()); } + // using empty kv as light do not use kv information + // (things being fetch proved and proof currently do not rely on + // kv). + let kv = state_machine::InMemoryKvBackend::default(); + // check proof for single changes trie root - let proving_backend = TrieBackend::new(storage, cht_root); + let proving_backend = TrieBackend::new(storage, cht_root, kv); let remote_changes_trie_root = remote_roots[&block]; cht::check_proof_on_proving_backend::( local_cht_root, diff --git a/core/state-db/Cargo.toml b/core/state-db/Cargo.toml index d271a0e179d6d..8625ac4b74e79 100644 --- a/core/state-db/Cargo.toml +++ b/core/state-db/Cargo.toml @@ -9,6 +9,8 @@ parking_lot = "0.9.0" log = "0.4.8" primitives = { package = "substrate-primitives", path = "../../core/primitives" } codec = { package = "parity-scale-codec", version = "1.0.0", features = ["derive"] } +historical-data = { path = "../../core/utils/historical-data" } [dev-dependencies] env_logger = "0.7.0" +historical-data = { path = "../../core/utils/historical-data", features = ["test"] } diff --git a/core/state-db/src/lib.rs b/core/state-db/src/lib.rs index e561d9ce9617c..ed5e0f7aa9dd3 100644 --- a/core/state-db/src/lib.rs +++ b/core/state-db/src/lib.rs @@ -36,10 +36,13 @@ mod pruning; use std::fmt; use parking_lot::RwLock; use codec::Codec; -use std::collections::{HashMap, hash_map::Entry}; +use std::collections::{HashSet, HashMap, hash_map::Entry}; use noncanonical::NonCanonicalOverlay; use pruning::RefWindow; use log::trace; +// TODO this is a stub type, representing a query state +// among multiple branch (a fork path) +pub type BranchRanges = (); const PRUNING_MODE: &[u8] = b"mode"; const PRUNING_MODE_ARCHIVE: &[u8] = b"archive"; @@ -49,6 +52,9 @@ const PRUNING_MODE_CONSTRAINED: &[u8] = b"constrained"; /// Database value type. pub type DBValue = Vec; +/// Kv storage key definition. +pub type KvKey = Vec; + /// Basic set of requirements for the Block hash and node key types. pub trait Hash: Send + Sync + Sized + Eq + PartialEq + Clone + Default + fmt::Debug + Codec + std::hash::Hash + 'static {} impl Hash for T {} @@ -70,6 +76,22 @@ pub trait NodeDb { fn get(&self, key: &Self::Key) -> Result, Self::Error>; } +/// Backend database trait. Read-only. +/// +/// All query uses a state parameter which indicates +/// where to query kv storage. +/// It any additional information that is needed to resolve +/// a chain state (depending on the implementation). +pub trait KvDb { + type Error: fmt::Debug; + + /// Get state trie node. + fn get_kv(&self, key: &[u8], state: &State) -> Result, Self::Error>; + + /// Get all pairs of key values at current state. + fn get_kv_pairs(&self, state: &State) -> Vec<(KvKey, DBValue)>; +} + /// Error type. pub enum Error { /// Database backend error. @@ -120,6 +142,20 @@ pub struct ChangeSet { pub deleted: Vec, } +/// A set of key values state changes. +/// +/// This assumes that we only commit block per block (otherwhise +/// we will need to include a block number value). +pub type KvChangeSet = Vec<(H, Option)>; + +/// Info for pruning key values. +/// This is a last prune index (pruning will be done up to this index), +/// and a set keys to prune. +/// Is set to none when not initialized. +pub type KvChangeSetPrune = Option<(u64, HashSet)>; + +/// Commit set on block canonicalization operation. +pub type CommitSetCanonical = (CommitSet, KvChangeSetPrune); /// A set of changes to the backing database. #[derive(Default, Debug, Clone)] @@ -128,6 +164,8 @@ pub struct CommitSet { pub data: ChangeSet, /// Metadata changes. pub meta: ChangeSet>, + /// Key values data changes. + pub kv: KvChangeSet, } /// Pruning constraints. If none are specified pruning is @@ -234,7 +272,14 @@ impl StateDbSync { } } - pub fn insert_block(&mut self, hash: &BlockHash, number: u64, parent_hash: &BlockHash, mut changeset: ChangeSet) -> Result, Error> { + pub fn insert_block( + &mut self, + hash: &BlockHash, + number: u64, + parent_hash: &BlockHash, + mut changeset: ChangeSet, + _kv_changeset: KvChangeSet, + ) -> Result, Error> { let mut meta = ChangeSet::default(); if number == 0 { // Save pruning mode when writing first block. @@ -248,6 +293,7 @@ impl StateDbSync { Ok(CommitSet { data: changeset, meta: meta, + kv: Default::default(), }) }, PruningMode::Constrained(_) | PruningMode::ArchiveCanonical => { @@ -260,24 +306,28 @@ impl StateDbSync { } } - pub fn canonicalize_block(&mut self, hash: &BlockHash) -> Result, Error> { - let mut commit = CommitSet::default(); + pub fn canonicalize_block( + &mut self, + hash: &BlockHash, + ) -> Result<(CommitSetCanonical, u64), Error> { + let mut commit = (CommitSet::default(), None); if self.mode == PruningMode::ArchiveAll { - return Ok(commit) + return Ok((commit, 0)) } - match self.non_canonical.canonicalize(&hash, &mut commit) { - Ok(()) => { + let block_number = match self.non_canonical.canonicalize(&hash, &mut commit.0) { + Ok(block_number) => { if self.mode == PruningMode::ArchiveCanonical { - commit.data.deleted.clear(); + commit.0.data.deleted.clear(); } - } + block_number + }, Err(e) => return Err(e), }; if let Some(ref mut pruning) = self.pruning { - pruning.note_canonical(&hash, &mut commit); + pruning.note_canonical(&hash, &mut commit.0); } self.prune(&mut commit); - Ok(commit) + Ok((commit, block_number)) } pub fn best_canonical(&self) -> Option { @@ -297,7 +347,7 @@ impl StateDbSync { } } - fn prune(&mut self, commit: &mut CommitSet) { + fn prune(&mut self, commit: &mut CommitSetCanonical) { if let (&mut Some(ref mut pruning), &PruningMode::Constrained(ref constraints)) = (&mut self.pruning, &self.mode) { loop { if pruning.window_size() <= constraints.max_blocks.unwrap_or(0) as u64 { @@ -331,6 +381,13 @@ impl StateDbSync { } } + /// For a a given block return its path in the block tree. + /// Using a block hash and its number. + pub fn get_branch_range(&self, _hash: &BlockHash, _number: u64) -> Option { + // TODO implement kv for state-db + None + } + pub fn pin(&mut self, hash: &BlockHash) -> Result<(), PinError> { match self.mode { PruningMode::ArchiveAll => Ok(()), @@ -377,6 +434,31 @@ impl StateDbSync { db.get(key.as_ref()).map_err(|e| Error::Db(e)) } + /// Get a value from non-canonical/pruning overlay or the backing DB. + /// + /// State is both a branch ranges for non canonical storage + /// and a block number for cannonical storage. + pub fn get_kv>( + &self, + _key: &[u8], + _state: &(BranchRanges, u64), + _db: &D, + ) -> Result, Error> { + // TODO state db kv implementation + Ok(None) + } + + /// Access current full state for both backend and non cannoical. + /// Very inefficient and costly. + pub fn get_kv_pairs>( + &self, + _state: &(BranchRanges, u64), + _db: &D, + ) -> Vec<(KvKey, DBValue)> { + // TODO state db kv implementation + Default::default() + } + pub fn apply_pending(&mut self) { self.non_canonical.apply_pending(); if let Some(pruning) = &mut self.pruning { @@ -414,15 +496,31 @@ impl StateDb { } /// Add a new non-canonical block. - pub fn insert_block(&self, hash: &BlockHash, number: u64, parent_hash: &BlockHash, changeset: ChangeSet) -> Result, Error> { - self.db.write().insert_block(hash, number, parent_hash, changeset) + pub fn insert_block( + &self, + hash: &BlockHash, + number: u64, + parent_hash: &BlockHash, + changeset: ChangeSet, + kv_changeset: KvChangeSet, + ) -> Result, Error> { + self.db.write().insert_block(hash, number, parent_hash, changeset, kv_changeset) } /// Finalize a previously inserted block. - pub fn canonicalize_block(&self, hash: &BlockHash) -> Result, Error> { + pub fn canonicalize_block( + &self, + hash: &BlockHash, + ) -> Result<(CommitSetCanonical, u64), Error> { self.db.write().canonicalize_block(hash) } + /// For a a given block return its path in the block tree. + /// Note that using `number` is use to skip a query to block number for hash. + pub fn get_branch_range(&self, hash: &BlockHash, number: u64) -> Option { + self.db.read().get_branch_range(hash, number) + } + /// Prevents pruning of specified block and its descendants. pub fn pin(&self, hash: &BlockHash) -> Result<(), PinError> { self.db.write().pin(hash) @@ -440,6 +538,29 @@ impl StateDb { self.db.read().get(key, db) } + /// Get a value from non-canonical/pruning overlay or the backing DB. + /// + /// State is both a branch ranges for non canonical storage + /// and a block number for cannonical storage. + pub fn get_kv>( + &self, + key: &[u8], + state: &(BranchRanges, u64), + db: &D, + ) -> Result, Error> { + self.db.read().get_kv(key, state, db) + } + + /// Access current full state for both backend and non cannoical. + /// Very inefficient and costly. + pub fn get_kv_pairs>( + &self, + state: &(BranchRanges, u64), + db: &D, + ) -> Vec<(KvKey, DBValue)> { + self.db.read().get_kv_pairs(state, db) + } + /// Revert all non-canonical blocks with the best block number. /// Returns a database commit or `None` if not possible. /// For archive an empty commit set is returned. @@ -486,6 +607,7 @@ mod tests { 1, &H256::from_low_u64_be(0), make_changeset(&[1], &[91]), + Default::default(), ) .unwrap(), ); @@ -496,6 +618,7 @@ mod tests { 2, &H256::from_low_u64_be(1), make_changeset(&[21], &[921, 1]), + Default::default(), ) .unwrap(), ); @@ -506,6 +629,7 @@ mod tests { 2, &H256::from_low_u64_be(1), make_changeset(&[22], &[922]), + Default::default(), ) .unwrap(), ); @@ -516,11 +640,12 @@ mod tests { 3, &H256::from_low_u64_be(21), make_changeset(&[3], &[93]), + Default::default(), ) .unwrap(), ); state_db.apply_pending(); - db.commit(&state_db.canonicalize_block::(&H256::from_low_u64_be(1)).unwrap()); + db.commit(&(state_db.canonicalize_block::(&H256::from_low_u64_be(1)).unwrap().0).0); state_db.apply_pending(); db.commit( &state_db @@ -529,13 +654,14 @@ mod tests { 4, &H256::from_low_u64_be(3), make_changeset(&[4], &[94]), + Default::default(), ) .unwrap(), ); state_db.apply_pending(); - db.commit(&state_db.canonicalize_block::(&H256::from_low_u64_be(21)).unwrap()); + db.commit(&(state_db.canonicalize_block::(&H256::from_low_u64_be(21)).unwrap().0).0); state_db.apply_pending(); - db.commit(&state_db.canonicalize_block::(&H256::from_low_u64_be(3)).unwrap()); + db.commit(&(state_db.canonicalize_block::(&H256::from_low_u64_be(3)).unwrap().0).0); state_db.apply_pending(); (db, state_db) @@ -600,6 +726,7 @@ mod tests { 0, &H256::from_low_u64_be(0), make_changeset(&[], &[]), + Default::default(), ) .unwrap(), ); diff --git a/core/state-db/src/noncanonical.rs b/core/state-db/src/noncanonical.rs index 58715715ccdd2..d8f6775410720 100644 --- a/core/state-db/src/noncanonical.rs +++ b/core/state-db/src/noncanonical.rs @@ -289,7 +289,7 @@ impl NonCanonicalOverlay { &mut self, hash: &BlockHash, commit: &mut CommitSet, - ) -> Result<(), Error> { + ) -> Result> { trace!(target: "state-db", "Canonicalizing {:?}", hash); let level = self.levels.get(self.pending_canonicalizations.len()).ok_or_else(|| Error::InvalidBlock)?; let index = level @@ -317,13 +317,14 @@ impl NonCanonicalOverlay { commit.data.inserted.extend(overlay.inserted.iter() .map(|k| (k.clone(), self.values.get(k).expect("For each key in overlays there's a value in values").1.clone()))); commit.data.deleted.extend(overlay.deleted.clone()); + let block_number = self.front_block_number() + self.pending_canonicalizations.len() as u64; commit.meta.deleted.append(&mut discarded_journals); - let canonicalized = (hash.clone(), self.front_block_number() + self.pending_canonicalizations.len() as u64); + let canonicalized = (hash.clone(), block_number); commit.meta.inserted.push((to_meta_key(LAST_CANONICAL, &()), canonicalized.encode())); trace!(target: "state-db", "Discarding {} records", commit.meta.deleted.len()); self.pending_canonicalizations.push(hash.clone()); - Ok(()) + Ok(block_number) } fn apply_canonicalizations(&mut self) { diff --git a/core/state-db/src/pruning.rs b/core/state-db/src/pruning.rs index 21f472fe69da9..ea0cbded66f2c 100644 --- a/core/state-db/src/pruning.rs +++ b/core/state-db/src/pruning.rs @@ -24,7 +24,7 @@ use std::collections::{HashMap, HashSet, VecDeque}; use codec::{Encode, Decode}; -use crate::{CommitSet, Error, MetaDb, to_meta_key, Hash}; +use crate::{CommitSet, CommitSetCanonical, Error, MetaDb, to_meta_key, Hash}; use log::{trace, warn}; const LAST_PRUNED: &[u8] = b"last_pruned"; @@ -139,8 +139,15 @@ impl RefWindow { self.death_rows.iter().skip(self.pending_prunings).any(|r| r.hash == *hash) } - /// Prune next block. Expects at least one block in the window. Adds changes to `commit`. - pub fn prune_one(&mut self, commit: &mut CommitSet) { + /// Prune next block. Expects at least one block in the window. + /// Adds changes to `commit`. + /// `kv_prune` to None indicates archive mode. + pub fn prune_one( + &mut self, + commit: &mut CommitSetCanonical, + ) { + // TODO handle kv_prune + let (commit, _kv_prune) = commit; if let Some(pruned) = self.death_rows.get(self.pending_prunings) { trace!(target: "state-db", "Pruning {:?} ({} deleted)", pruned.hash, pruned.deleted.len()); let index = self.pending_number + self.pending_prunings as u64; @@ -202,8 +209,8 @@ impl RefWindow { mod tests { use super::RefWindow; use primitives::H256; - use crate::CommitSet; - use crate::test::{make_db, make_commit, TestDb}; + use crate::CommitSetCanonical; + use crate::test::{make_db, TestDb, make_commit}; fn check_journal(pruning: &RefWindow, db: &TestDb) { let restored: RefWindow = RefWindow::new(db).unwrap(); @@ -225,7 +232,7 @@ mod tests { fn prune_empty() { let db = make_db(&[]); let mut pruning: RefWindow = RefWindow::new(&db).unwrap(); - let mut commit = CommitSet::default(); + let mut commit = CommitSetCanonical::default(); pruning.prune_one(&mut commit); assert_eq!(pruning.pending_number, 0); assert!(pruning.death_rows.is_empty()); @@ -251,10 +258,10 @@ mod tests { assert!(db.data_eq(&make_db(&[1, 2, 3, 4, 5]))); check_journal(&pruning, &db); - let mut commit = CommitSet::default(); + let mut commit = CommitSetCanonical::default(); pruning.prune_one(&mut commit); assert!(!pruning.have_block(&h)); - db.commit(&commit); + db.commit(&commit.0); pruning.apply_pending(); assert!(!pruning.have_block(&h)); assert!(db.data_eq(&make_db(&[2, 4, 5]))); @@ -278,14 +285,14 @@ mod tests { check_journal(&pruning, &db); - let mut commit = CommitSet::default(); + let mut commit = CommitSetCanonical::default(); pruning.prune_one(&mut commit); - db.commit(&commit); + db.commit(&commit.0); pruning.apply_pending(); assert!(db.data_eq(&make_db(&[2, 3, 4, 5]))); - let mut commit = CommitSet::default(); + let mut commit = CommitSetCanonical::default(); pruning.prune_one(&mut commit); - db.commit(&commit); + db.commit(&commit.0); pruning.apply_pending(); assert!(db.data_eq(&make_db(&[3, 4, 5]))); assert_eq!(pruning.pending_number, 2); @@ -302,13 +309,13 @@ mod tests { pruning.note_canonical(&H256::random(), &mut commit); db.commit(&commit); assert!(db.data_eq(&make_db(&[1, 2, 3, 4, 5]))); - let mut commit = CommitSet::default(); + let mut commit = CommitSetCanonical::default(); pruning.prune_one(&mut commit); - db.commit(&commit); + db.commit(&commit.0); assert!(db.data_eq(&make_db(&[2, 3, 4, 5]))); - let mut commit = CommitSet::default(); + let mut commit = CommitSetCanonical::default(); pruning.prune_one(&mut commit); - db.commit(&commit); + db.commit(&commit.0); pruning.apply_pending(); assert!(db.data_eq(&make_db(&[3, 4, 5]))); assert_eq!(pruning.pending_number, 2); @@ -332,16 +339,16 @@ mod tests { check_journal(&pruning, &db); - let mut commit = CommitSet::default(); + let mut commit = CommitSetCanonical::default(); pruning.prune_one(&mut commit); - db.commit(&commit); + db.commit(&commit.0); assert!(db.data_eq(&make_db(&[1, 2, 3]))); - let mut commit = CommitSet::default(); + let mut commit = CommitSetCanonical::default(); pruning.prune_one(&mut commit); - db.commit(&commit); + db.commit(&commit.0); assert!(db.data_eq(&make_db(&[1, 2, 3]))); pruning.prune_one(&mut commit); - db.commit(&commit); + db.commit(&commit.0); assert!(db.data_eq(&make_db(&[1, 3]))); pruning.apply_pending(); assert_eq!(pruning.pending_number, 3); @@ -362,16 +369,16 @@ mod tests { db.commit(&commit); assert!(db.data_eq(&make_db(&[1, 2, 3]))); - let mut commit = CommitSet::default(); + let mut commit = CommitSetCanonical::default(); pruning.prune_one(&mut commit); - db.commit(&commit); + db.commit(&commit.0); assert!(db.data_eq(&make_db(&[1, 2, 3]))); - let mut commit = CommitSet::default(); + let mut commit = CommitSetCanonical::default(); pruning.prune_one(&mut commit); - db.commit(&commit); + db.commit(&commit.0); assert!(db.data_eq(&make_db(&[1, 2, 3]))); pruning.prune_one(&mut commit); - db.commit(&commit); + db.commit(&commit.0); assert!(db.data_eq(&make_db(&[1, 3]))); pruning.apply_pending(); assert_eq!(pruning.pending_number, 3); diff --git a/core/state-db/src/test.rs b/core/state-db/src/test.rs index d90c36990612e..86cbc078943cb 100644 --- a/core/state-db/src/test.rs +++ b/core/state-db/src/test.rs @@ -77,6 +77,7 @@ pub fn make_commit(inserted: &[u64], deleted: &[u64]) -> CommitSet { CommitSet { data: make_changeset(inserted, deleted), meta: ChangeSet::default(), + kv: Default::default(), } } diff --git a/core/state-machine/src/backend.rs b/core/state-machine/src/backend.rs index e2f398ef7ccae..a4fb5518f83fa 100644 --- a/core/state-machine/src/backend.rs +++ b/core/state-machine/src/backend.rs @@ -21,6 +21,7 @@ use log::warn; use hash_db::Hasher; use crate::trie_backend::TrieBackend; use crate::trie_backend_essence::TrieBackendStorage; +use crate::kv_backend::{KvBackend, InMemory as InMemoryKvBackend}; use trie::{ TrieMut, MemoryDB, child_trie_root, default_child_trie_root, TrieConfiguration, trie_types::{TrieDBMut, Layout}, @@ -40,9 +41,15 @@ pub trait Backend: std::fmt::Debug { /// Type of trie backend storage. type TrieBackendStorage: TrieBackendStorage; + /// Type of trie backend storage. + type KvBackend: KvBackend; + /// Get keyed storage or None if there is nothing associated. fn storage(&self, key: &[u8]) -> Result>, Self::Error>; + /// Access a value in the key value storage. + fn kv_storage(&self, key: &[u8]) -> Result>, Self::Error>; + /// Get keyed storage value hash or None if there is nothing associated. fn storage_hash(&self, key: &[u8]) -> Result, Self::Error> { self.storage(key).map(|v| v.map(|v| H::hash(&v))) @@ -100,9 +107,24 @@ pub trait Backend: std::fmt::Debug { I: IntoIterator, Option>)>, H::Out: Ord; + /// Produce transaction for a given kv information deltas. + fn kv_transaction(&self, delta: I) -> Self::Transaction + where + I: IntoIterator, Option>)>; + /// Get all key/value pairs into a Vec. fn pairs(&self) -> Vec<(Vec, Vec)>; + /// Get all children storage keys + fn children_storage_keys(&self) -> Vec>; + + /// Get all key/value pairs into a Vec for a child storage. + fn child_pairs(&self, child_storage_key: &[u8]) -> Vec<(Vec, Vec)>; + + /// Get all key/value pairs of kv storage, and pending deletion + /// if allowed. + fn kv_in_memory(&self) -> crate::InMemoryKvBackend; + /// Get all keys with given prefix fn keys(&self, prefix: &[u8]) -> Vec> { let mut all = Vec::new(); @@ -118,25 +140,30 @@ pub trait Backend: std::fmt::Debug { } /// Try convert into trie backend. - fn as_trie_backend(&mut self) -> Option<&TrieBackend> { + fn as_trie_backend(&mut self) -> Option< + &TrieBackend + > { None } /// Calculate the storage root, with given delta over what is already stored /// in the backend, and produce a "transaction" that can be used to commit. /// Does include child storage updates. - fn full_storage_root( + fn full_storage_root( &self, delta: I1, - child_deltas: I2) - -> (H::Out, Self::Transaction) + child_deltas: I2, + kv_deltas: I3, + ) -> (H::Out, Self::Transaction) where I1: IntoIterator, Option>)>, I2i: IntoIterator, Option>)>, I2: IntoIterator, I2i)>, + I3: IntoIterator, Option>)>, ::Out: Ord, { - let mut txs: Self::Transaction = Default::default(); + let mut txs: Self::Transaction = self.kv_transaction(kv_deltas); + let mut child_roots: Vec<_> = Default::default(); // child first for (storage_key, child_delta) in child_deltas { @@ -161,6 +188,7 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { type Error = T::Error; type Transaction = T::Transaction; type TrieBackendStorage = T::TrieBackendStorage; + type KvBackend = T::KvBackend; fn storage(&self, key: &[u8]) -> Result>, Self::Error> { (*self).storage(key) @@ -170,6 +198,10 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { (*self).child_storage(storage_key, key) } + fn kv_storage(&self, key: &[u8]) -> Result>, Self::Error> { + (*self).kv_storage(key) + } + fn for_keys_in_child_storage(&self, storage_key: &[u8], f: F) { (*self).for_keys_in_child_storage(storage_key, f) } @@ -198,10 +230,29 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { (*self).child_storage_root(storage_key, delta) } + fn kv_transaction(&self, delta: I) -> Self::Transaction + where + I: IntoIterator, Option>)> + { + (*self).kv_transaction(delta) + } + fn pairs(&self) -> Vec<(Vec, Vec)> { (*self).pairs() } + fn children_storage_keys(&self) -> Vec> { + (*self).children_storage_keys() + } + + fn child_pairs(&self, child_storage_key: &[u8]) -> Vec<(Vec, Vec)> { + (*self).child_pairs(child_storage_key) + } + + fn kv_in_memory(&self) -> crate::InMemoryKvBackend { + (*self).kv_in_memory() + } + fn for_key_values_with_prefix(&self, prefix: &[u8], f: F) { (*self).for_key_values_with_prefix(prefix, f); } @@ -219,12 +270,32 @@ impl Consolidate for () { } } -impl Consolidate for Vec<(Option>, Vec, Option>)> { +impl Consolidate for Vec { fn consolidate(&mut self, mut other: Self) { self.append(&mut other); } } +impl Consolidate for InMemoryTransaction { + fn consolidate(&mut self, other: Self) { + self.storage.consolidate(other.storage); + self.kv.consolidate(other.kv); + } +} + +impl Consolidate for HashMap { + fn consolidate(&mut self, other: Self) { + self.extend(other); + } +} + +impl Consolidate for (U, V) { + fn consolidate(&mut self, other: Self) { + self.0.consolidate(other.0); + self.1.consolidate(other.1); + } +} + impl> Consolidate for trie::GenericMemoryDB { fn consolidate(&mut self, other: Self) { trie::GenericMemoryDB::consolidate(self, other) @@ -250,7 +321,7 @@ impl error::Error for Void { /// tests. pub struct InMemory { inner: HashMap>, HashMap, Vec>>, - trie: Option, H>>, + trie: Option, H, InMemoryKvBackend>>, _hasher: PhantomData, } @@ -290,14 +361,15 @@ impl InMemory { /// Copy the state, with applied updates pub fn update(&self, changes: >::Transaction) -> Self { let mut inner: HashMap<_, _> = self.inner.clone(); - for (storage_key, key, val) in changes { + for (storage_key, key, val) in changes.storage { match val { Some(v) => { inner.entry(storage_key).or_default().insert(key, v); }, None => { inner.entry(storage_key).or_default().remove(&key); }, } } + // TODO implement kv - inner.into() + InMemory { inner, trie: None, _hasher: PhantomData } } } @@ -354,6 +426,19 @@ impl From>, Vec, Option>)>> for InMem } } +impl From for InMemory { + fn from(inner: InMemoryTransaction) -> Self { + let mut expanded: HashMap>, HashMap, Vec>> = HashMap::new(); + for (child_key, key, value) in inner.storage { + if let Some(value) = value { + expanded.entry(child_key).or_default().insert(key, value); + } + } + // TODO manage kv + expanded.into() + } +} + impl InMemory { /// child storage key iterator pub fn child_storage_keys(&self) -> impl Iterator { @@ -361,10 +446,21 @@ impl InMemory { } } +#[derive(Default)] +/// Transaction produced by the state machine execution for +/// in memory storage. +pub struct InMemoryTransaction { + /// State trie key values changes (both top and child trie). + pub storage: Vec<(Option>, Vec, Option>)>, + /// Changes to non trie key value datas. + pub kv: HashMap, Option>>, +} + impl Backend for InMemory { type Error = Void; - type Transaction = Vec<(Option>, Vec, Option>)>; + type Transaction = InMemoryTransaction; type TrieBackendStorage = MemoryDB; + type KvBackend = InMemoryKvBackend; fn storage(&self, key: &[u8]) -> Result>, Self::Error> { Ok(self.inner.get(&None).and_then(|map| map.get(key).map(Clone::clone))) @@ -374,6 +470,11 @@ impl Backend for InMemory { Ok(self.inner.get(&Some(storage_key.to_vec())).and_then(|map| map.get(key).map(Clone::clone))) } + fn kv_storage(&self, _key: &[u8]) -> Result>, Self::Error> { + // TODO implement this + Ok(None) + } + fn exists_storage(&self, key: &[u8]) -> Result { Ok(self.inner.get(&None).map(|map| map.get(key).is_some()).unwrap_or(false)) } @@ -414,7 +515,7 @@ impl Backend for InMemory { let full_transaction = transaction.into_iter().map(|(k, v)| (None, k, v)).collect(); - (root, full_transaction) + (root, InMemoryTransaction { storage: full_transaction, kv: Default::default() }) } fn child_storage_root(&self, storage_key: &[u8], delta: I) -> (Vec, bool, Self::Transaction) @@ -441,7 +542,19 @@ impl Backend for InMemory { let is_default = root == default_child_trie_root::>(&storage_key); - (root, is_default, full_transaction) + ( + root, + is_default, + InMemoryTransaction { storage: full_transaction, kv: Default::default() }, + ) + } + + fn kv_transaction(&self, _delta: I) -> Self::Transaction + where + I: IntoIterator, Option>)> + { + // TODO implement this + Default::default() } fn pairs(&self) -> Vec<(Vec, Vec)> { @@ -451,6 +564,22 @@ impl Backend for InMemory { .collect() } + fn children_storage_keys(&self) -> Vec> { + self.inner.iter().filter_map(|(child, _)| child.clone()).collect() + } + + fn child_pairs(&self, storage_key: &[u8]) -> Vec<(Vec, Vec)> { + self.inner.get(&Some(storage_key.to_vec())) + .into_iter() + .flat_map(|map| map.iter().map(|(k, v)| (k.clone(), v.clone()))) + .collect() + } + + fn kv_in_memory(&self) -> crate::InMemoryKvBackend { + // TODO implement this + Default::default() + } + fn keys(&self, prefix: &[u8]) -> Vec> { self.inner.get(&None) .into_iter() @@ -465,7 +594,9 @@ impl Backend for InMemory { .collect() } - fn as_trie_backend(&mut self)-> Option<&TrieBackend> { + fn as_trie_backend(&mut self)-> Option< + &TrieBackend + > { let mut mdb = MemoryDB::default(); let mut root = None; let mut new_child_roots = Vec::new(); @@ -489,7 +620,8 @@ impl Backend for InMemory { Some(root) => root, None => insert_into_memory_db::(&mut mdb, ::std::iter::empty())?, }; - self.trie = Some(TrieBackend::new(mdb, root)); + // TODO get kv from self + self.trie = Some(TrieBackend::new(mdb, root, Default::default())); self.trie.as_ref() } } diff --git a/core/state-machine/src/ext.rs b/core/state-machine/src/ext.rs index 0e93302a95a54..2a0c4e729fd1b 100644 --- a/core/state-machine/src/ext.rs +++ b/core/state-machine/src/ext.rs @@ -441,7 +441,9 @@ where let delta = self.overlay.committed.top.iter().map(|(k, v)| (k.clone(), v.value.clone())) .chain(self.overlay.prospective.top.iter().map(|(k, v)| (k.clone(), v.value.clone()))); - let (root, transaction) = self.backend.full_storage_root(delta, child_delta_iter); + // TODO this requires state machine implementation + let kv_delta = None.into_iter(); + let (root, transaction) = self.backend.full_storage_root(delta, child_delta_iter, kv_delta); self.storage_transaction = Some((transaction, root)); trace!(target: "state-trace", "{:04x}: Root {}", self.id, diff --git a/core/state-machine/src/kv_backend.rs b/core/state-machine/src/kv_backend.rs new file mode 100644 index 0000000000000..c1a1a4d3044ce --- /dev/null +++ b/core/state-machine/src/kv_backend.rs @@ -0,0 +1,62 @@ +// Copyright 2017-2019 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Backend for storing data without a state. + +use std::sync::Arc; +use std::collections::HashMap; +use std::ops::Deref; + +/// This covers kv values access. +/// It target a single history state (state machine +/// only run for a single history state). +pub trait KvBackend: Send + Sync { + /// Retrieve a value from storage under given key. + fn get(&self, key: &[u8]) -> Result>, String>; + + /// Return all values (in memory) for this backend, mainly for + /// tests. This method should only be use for testing or + /// for small kv. + /// When use for a storage that implements this trait + /// It should return pending deletion as a `None` value. + /// This contracdicts a bit the backend aspect of this + /// trait but is practical in some cases. + fn in_memory(&self) -> InMemory; +} + +/// In memory storage of content. It is a storage so it +/// can contains deletion of value as a `None` variant. +pub type InMemory = HashMap, Option>>; + +impl KvBackend for InMemory { + fn get(&self, key: &[u8]) -> Result>, String> { + Ok(self.get(key).map(Clone::clone).unwrap_or(None)) + } + + fn in_memory(&self) -> InMemory { + self.clone() + } +} + +impl KvBackend for Arc { + fn get(&self, key: &[u8]) -> Result>, String> { + KvBackend::get(self.deref(), key) + } + + fn in_memory(&self) -> InMemory { + KvBackend::in_memory(self.deref()) + } +} diff --git a/core/state-machine/src/lib.rs b/core/state-machine/src/lib.rs index 1da9cfb4e7dbe..09c02d54ff7e9 100644 --- a/core/state-machine/src/lib.rs +++ b/core/state-machine/src/lib.rs @@ -29,6 +29,8 @@ use primitives::{ use overlayed_changes::OverlayedChangeSet; use externalities::Extensions; +pub use crate::kv_backend::{KvBackend, InMemory as InMemoryKvBackend}; + pub mod backend; mod changes_trie; mod error; @@ -39,6 +41,7 @@ mod overlayed_changes; mod proving_backend; mod trie_backend; mod trie_backend_essence; +mod kv_backend; pub use trie::{trie_types::{Layout, TrieDBMut}, TrieMut, DBValue, MemoryDB}; pub use testing::TestExternalities; @@ -483,8 +486,8 @@ where /// /// Note: changes to code will be in place if this call is made again. For running partial /// blocks (e.g. a transaction at a time), ensure a different method is used. -pub fn prove_execution_on_trie_backend( - trie_backend: &TrieBackend, +pub fn prove_execution_on_trie_backend( + trie_backend: &TrieBackend, overlay: &mut OverlayedChanges, exec: &Exec, method: &str, @@ -495,6 +498,8 @@ where S: trie_backend_essence::TrieBackendStorage, H: Hasher, Exec: CodeExecutor, + H::Out: Ord + 'static, + K: KvBackend, { let proving_backend = proving_backend::ProvingBackend::new(trie_backend); let mut sm = StateMachine::<_, H, _, InMemoryChangesTrieStorage, Exec>::new( @@ -531,7 +536,7 @@ where /// Check execution proof on proving backend, generated by `prove_execution` call. pub fn execution_proof_check_on_trie_backend( - trie_backend: &TrieBackend, H>, + trie_backend: &TrieBackend, H, InMemoryKvBackend>, overlay: &mut OverlayedChanges, exec: &Exec, method: &str, @@ -591,18 +596,19 @@ where } /// Generate storage read proof on pre-created trie backend. -pub fn prove_read_on_trie_backend( - trie_backend: &TrieBackend, +pub fn prove_read_on_trie_backend( + trie_backend: &TrieBackend, keys: I, ) -> Result> where S: trie_backend_essence::TrieBackendStorage, H: Hasher, H::Out: Ord, + K: KvBackend, I: IntoIterator, I::Item: AsRef<[u8]>, { - let proving_backend = proving_backend::ProvingBackend::<_, H>::new(trie_backend); + let proving_backend = proving_backend::ProvingBackend::<_, H, _>::new(trie_backend); for key in keys.into_iter() { proving_backend .storage(key.as_ref()) @@ -612,8 +618,8 @@ where } /// Generate storage read proof on pre-created trie backend. -pub fn prove_child_read_on_trie_backend( - trie_backend: &TrieBackend, +pub fn prove_child_read_on_trie_backend( + trie_backend: &TrieBackend, storage_key: &[u8], keys: I, ) -> Result> @@ -621,10 +627,11 @@ where S: trie_backend_essence::TrieBackendStorage, H: Hasher, H::Out: Ord, + K: KvBackend, I: IntoIterator, I::Item: AsRef<[u8]>, { - let proving_backend = proving_backend::ProvingBackend::<_, H>::new(trie_backend); + let proving_backend = proving_backend::ProvingBackend::<_, H, _>::new(trie_backend); for key in keys.into_iter() { proving_backend .child_storage(storage_key, key.as_ref()) @@ -682,7 +689,7 @@ where /// Check storage read proof on pre-created proving backend. pub fn read_proof_check_on_proving_backend( - proving_backend: &TrieBackend, H>, + proving_backend: &TrieBackend, H, InMemoryKvBackend>, key: &[u8], ) -> Result>, Box> where @@ -694,7 +701,7 @@ where /// Check child storage read proof on pre-created proving backend. pub fn read_child_proof_check_on_proving_backend( - proving_backend: &TrieBackend, H>, + proving_backend: &TrieBackend, H, InMemoryKvBackend>, storage_key: &[u8], key: &[u8], ) -> Result>, Box> diff --git a/core/state-machine/src/overlayed_changes.rs b/core/state-machine/src/overlayed_changes.rs index 53a66dc49ee05..dd776b411b79b 100644 --- a/core/state-machine/src/overlayed_changes.rs +++ b/core/state-machine/src/overlayed_changes.rs @@ -132,6 +132,14 @@ impl OverlayedChanges { None } + /// Returns a double-Option: None if the key is unknown (i.e. and the query should be refered + /// to the backend); Some(None) if the key has been deleted. Some(Some(...)) for a key whose + /// value has been set. + pub fn kv_storage(&self, _key: &[u8]) -> Option> { + // TODO implement kv for state_machine + None + } + /// Inserts the given key-value pair into the prospective change set. /// /// `None` can be used to delete a value specified by the given key. @@ -307,11 +315,14 @@ impl OverlayedChanges { pub fn into_committed(self) -> ( impl Iterator, Option>)>, impl Iterator, impl Iterator, Option>)>)>, + impl Iterator, Option>)>, ){ assert!(self.prospective.is_empty()); (self.committed.top.into_iter().map(|(k, v)| (k, v.value)), self.committed.children.into_iter() - .map(|(sk, v)| (sk, v.into_iter().map(|(k, v)| (k, v.value))))) + .map(|(sk, v)| (sk, v.into_iter().map(|(k, v)| (k, v.value)))), + // TODO state machine kv implementation + None.into_iter()) } /// Inserts storage entry responsible for current extrinsic index. diff --git a/core/state-machine/src/proving_backend.rs b/core/state-machine/src/proving_backend.rs index 14f17a3a48c47..0a290fd70af6b 100644 --- a/core/state-machine/src/proving_backend.rs +++ b/core/state-machine/src/proving_backend.rs @@ -24,11 +24,13 @@ use trie::{ MemoryDB, PrefixedMemoryDB, default_child_trie_root, read_trie_value_with, read_child_trie_value_with, record_all_keys }; +use std::collections::HashMap; pub use trie::Recorder; pub use trie::trie_types::{Layout, TrieError}; use crate::trie_backend::TrieBackend; use crate::trie_backend_essence::{Ephemeral, TrieBackendEssence, TrieBackendStorage}; use crate::{Error, ExecutionError, Backend}; +use crate::kv_backend::{KvBackend, InMemory as InMemoryKvBackend}; /// A proof that some set of key-value pairs are included in the storage trie. The proof contains /// the storage values so that the partial storage backend can be reconstructed by a verifier that @@ -180,14 +182,22 @@ impl<'a, S, H> ProvingBackendEssence<'a, S, H> /// Patricia trie-based backend which also tracks all touched storage trie values. /// These can be sent to remote node and used as a proof of execution. -pub struct ProvingBackend<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { - backend: &'a TrieBackend, +pub struct ProvingBackend<'a, + S: 'a + TrieBackendStorage, + H: 'a + Hasher, + K: 'a + KvBackend, +> { + backend: &'a TrieBackend, proof_recorder: Rc>>, } -impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> { +impl<'a, + S: 'a + TrieBackendStorage, + H: 'a + Hasher, + K: 'a + KvBackend, +> ProvingBackend<'a, S, H, K> { /// Create new proving backend. - pub fn new(backend: &'a TrieBackend) -> Self { + pub fn new(backend: &'a TrieBackend) -> Self { ProvingBackend { backend, proof_recorder: Rc::new(RefCell::new(Recorder::new())), @@ -196,7 +206,7 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> /// Create new proving backend with the given recorder. pub fn new_with_recorder( - backend: &'a TrieBackend, + backend: &'a TrieBackend, proof_recorder: Rc>>, ) -> Self { ProvingBackend { @@ -217,21 +227,27 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> } } -impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> std::fmt::Debug for ProvingBackend<'a, S, H> { +impl<'a, + S: 'a + TrieBackendStorage, + H: 'a + Hasher, + K: 'a + KvBackend, +> std::fmt::Debug for ProvingBackend<'a, S, H, K> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "ProvingBackend") } } -impl<'a, S, H> Backend for ProvingBackend<'a, S, H> +impl<'a, S, H, K> Backend for ProvingBackend<'a, S, H, K> where S: 'a + TrieBackendStorage, H: 'a + Hasher, H::Out: Ord, + K: 'a + KvBackend, { type Error = String; - type Transaction = S::Overlay; + type Transaction = (S::Overlay, HashMap, Option>>); type TrieBackendStorage = PrefixedMemoryDB; + type KvBackend = K; fn storage(&self, key: &[u8]) -> Result>, Self::Error> { ProvingBackendEssence { @@ -249,6 +265,10 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> }.child_storage(storage_key, key) } + fn kv_storage(&self, key: &[u8]) -> Result>, Self::Error> { + self.backend.kv_storage(key) + } + fn for_keys_in_child_storage(&self, storage_key: &[u8], f: F) { self.backend.for_keys_in_child_storage(storage_key, f) } @@ -269,6 +289,18 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> self.backend.pairs() } + fn children_storage_keys(&self) -> Vec> { + self.backend.children_storage_keys() + } + + fn child_pairs(&self, storage_key: &[u8]) -> Vec<(Vec, Vec)> { + self.backend.child_pairs(storage_key) + } + + fn kv_in_memory(&self) -> HashMap, Option>> { + self.backend.kv_in_memory() + } + fn keys(&self, prefix: &[u8]) -> Vec> { self.backend.keys(prefix) } @@ -290,20 +322,31 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> { self.backend.child_storage_root(storage_key, delta) } + + fn kv_transaction(&self, delta: I) -> Self::Transaction + where + I: IntoIterator, Option>)> + { + self.backend.kv_transaction(delta) + } + } /// Create proof check backend. pub fn create_proof_check_backend( root: H::Out, proof: StorageProof, -) -> Result, H>, Box> +) -> Result, H, InMemoryKvBackend>, Box> where H: Hasher, { let db = create_proof_check_backend_storage(proof); + // run on empty kv (current proof does not require + // kv). + let kv = InMemoryKvBackend::default(); if db.contains(&root, EMPTY_PREFIX) { - Ok(TrieBackend::new(db, root)) + Ok(TrieBackend::new(db, root, kv)) } else { Err(Box::new(ExecutionError::InvalidProof)) } @@ -325,14 +368,16 @@ where #[cfg(test)] mod tests { - use crate::backend::{InMemory}; + use crate::backend::{InMemory, InMemoryTransaction}; use crate::trie_backend::tests::test_trie; use super::*; use primitives::{Blake2Hasher, storage::ChildStorageKey}; + type KvBackend = InMemoryKvBackend; + fn test_proving<'a>( - trie_backend: &'a TrieBackend,Blake2Hasher>, - ) -> ProvingBackend<'a, PrefixedMemoryDB, Blake2Hasher> { + trie_backend: &'a TrieBackend, Blake2Hasher, KvBackend>, + ) -> ProvingBackend<'a, PrefixedMemoryDB, Blake2Hasher, KvBackend> { ProvingBackend::new(trie_backend) } @@ -370,14 +415,19 @@ mod tests { let (trie_root, mut trie_mdb) = trie_backend.storage_root(::std::iter::empty()); let (proving_root, mut proving_mdb) = proving_backend.storage_root(::std::iter::empty()); assert_eq!(trie_root, proving_root); - assert_eq!(trie_mdb.drain(), proving_mdb.drain()); + assert_eq!(trie_mdb.0.drain(), proving_mdb.0.drain()); + assert_eq!(trie_mdb.1, proving_mdb.1); } #[test] fn proof_recorded_and_checked() { let contents = (0..64).map(|i| (None, vec![i], Some(vec![i]))).collect::>(); let in_memory = InMemory::::default(); - let mut in_memory = in_memory.update(contents); + let mut in_memory = in_memory.update(InMemoryTransaction { + storage: contents, + kv: Default::default(), + }); + let in_memory_root = in_memory.storage_root(::std::iter::empty()).0; (0..64).for_each(|i| assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i])); @@ -406,10 +456,14 @@ mod tests { .chain((10..15).map(|i| (Some(own2.clone()), vec![i], Some(vec![i])))) .collect::>(); let in_memory = InMemory::::default(); - let mut in_memory = in_memory.update(contents); - let in_memory_root = in_memory.full_storage_root::<_, Vec<_>, _>( + let mut in_memory = in_memory.update(InMemoryTransaction { + storage: contents, + kv: Default::default(), + }); + let in_memory_root = in_memory.full_storage_root::<_, Vec<_>, _, _>( + ::std::iter::empty(), + in_memory.child_storage_keys().map(|k|(k.to_vec(), Vec::new())), ::std::iter::empty(), - in_memory.child_storage_keys().map(|k|(k.to_vec(), Vec::new())) ).0; (0..64).for_each(|i| assert_eq!( in_memory.storage(&[i]).unwrap().unwrap(), diff --git a/core/state-machine/src/testing.rs b/core/state-machine/src/testing.rs index 16ff62020b594..a0767baae65fe 100644 --- a/core/state-machine/src/testing.rs +++ b/core/state-machine/src/testing.rs @@ -19,7 +19,7 @@ use std::{collections::HashMap, any::{Any, TypeId}}; use hash_db::Hasher; use crate::{ - backend::{InMemory, Backend}, OverlayedChanges, + backend::{InMemory, InMemoryTransaction, Backend}, OverlayedChanges, changes_trie::{ InMemoryStorage as ChangesTrieInMemoryStorage, BlockNumber as ChangesTrieBlockNumber, @@ -93,7 +93,10 @@ impl, N: ChangesTrieBlockNumber> TestExternalities { /// Insert key/value into backend pub fn insert(&mut self, k: Vec, v: Vec) { - self.backend = self.backend.update(vec![(None, k, Some(v))]); + self.backend = self.backend.update(InMemoryTransaction { + storage: vec![(None, k, Some(v))], + kv: Default::default(), + }); } /// Registers the given extension for this instance. @@ -120,7 +123,13 @@ impl, N: ChangesTrieBlockNumber> TestExternalities { .collect::>() }); - self.backend.update(top.chain(children).collect()) + // TODO state machine kv implementation + let kv = Default::default(); + + self.backend.update(InMemoryTransaction { + storage: top.chain(children).collect(), + kv, + }) } /// Execute the given closure while `self` is set as externalities. diff --git a/core/state-machine/src/trie_backend.rs b/core/state-machine/src/trie_backend.rs index 432ccf3e75f0e..b93a33e12fc59 100644 --- a/core/state-machine/src/trie_backend.rs +++ b/core/state-machine/src/trie_backend.rs @@ -22,17 +22,23 @@ use trie::{Trie, delta_trie_root, default_child_trie_root, child_delta_trie_root use trie::trie_types::{TrieDB, TrieError, Layout}; use crate::trie_backend_essence::{TrieBackendEssence, TrieBackendStorage, Ephemeral}; use crate::Backend; +use crate::kv_backend::KvBackend; +use primitives::storage::well_known_keys::CHILD_STORAGE_KEY_PREFIX; +use std::collections::HashMap; /// Patricia trie-based backend. Transaction type is an overlay of changes to commit. -pub struct TrieBackend, H: Hasher> { +/// A simple key value backend is also accessible for direct key value storage. +pub struct TrieBackend, H: Hasher, K: KvBackend> { essence: TrieBackendEssence, + kv_storage: K, } -impl, H: Hasher> TrieBackend { +impl, K: KvBackend, H: Hasher> TrieBackend { /// Create new trie-based backend. - pub fn new(storage: S, root: H::Out) -> Self { + pub fn new(storage: S, root: H::Out, kv_storage: K) -> Self { TrieBackend { essence: TrieBackendEssence::new(storage, root), + kv_storage, } } @@ -46,6 +52,16 @@ impl, H: Hasher> TrieBackend { self.essence.backend_storage() } + /// Get key value storage backend reference. + pub fn kv_backend(&self) -> &K { + &self.kv_storage + } + + /// Get key value storage backend mutable reference. + pub fn kv_backend_mut(&mut self) -> &mut K { + &mut self.kv_storage + } + /// Get trie root. pub fn root(&self) -> &H::Out { self.essence.root() @@ -55,20 +71,30 @@ impl, H: Hasher> TrieBackend { pub fn into_storage(self) -> S { self.essence.into_storage() } + } -impl, H: Hasher> std::fmt::Debug for TrieBackend { +impl< + S: TrieBackendStorage, + H: Hasher, + K: KvBackend, +> std::fmt::Debug for TrieBackend { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "TrieBackend") } } -impl, H: Hasher> Backend for TrieBackend where +impl< + S: TrieBackendStorage, + H: Hasher, + K: KvBackend, +> Backend for TrieBackend where H::Out: Ord, { type Error = String; - type Transaction = S::Overlay; + type Transaction = (S::Overlay, HashMap, Option>>); type TrieBackendStorage = S; + type KvBackend = K; fn storage(&self, key: &[u8]) -> Result>, Self::Error> { self.essence.storage(key) @@ -78,6 +104,10 @@ impl, H: Hasher> Backend for TrieBackend where self.essence.child_storage(storage_key, key) } + fn kv_storage(&self, key: &[u8]) -> Result>, Self::Error> { + self.kv_storage.get(key) + } + fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { self.essence.for_keys_with_prefix(prefix, f) } @@ -118,6 +148,47 @@ impl, H: Hasher> Backend for TrieBackend where } } + fn children_storage_keys(&self) -> Vec> { + let mut result = Vec::new(); + self.for_keys_with_prefix(CHILD_STORAGE_KEY_PREFIX, |k| result.push(k.to_vec())); + result + } + + fn child_pairs(&self, storage_key: &[u8]) -> Vec<(Vec, Vec)> { + + let root_slice = self.essence.storage(storage_key) + .unwrap_or(None) + .unwrap_or(default_child_trie_root::>(storage_key)); + let mut root = H::Out::default(); + root.as_mut().copy_from_slice(&root_slice[..]); + + let mut read_overlay = S::Overlay::default(); + let eph = Ephemeral::new(self.essence.backend_storage(), &mut read_overlay); + + let collect_all = || -> Result<_, Box>> { + let trie = TrieDB::::new(&eph, &root)?; + let mut v = Vec::new(); + for x in trie.iter()? { + let (key, value) = x?; + v.push((key.to_vec(), value.to_vec())); + } + + Ok(v) + }; + + match collect_all() { + Ok(v) => v, + Err(e) => { + debug!(target: "trie", "Error extracting child trie values: {}", e); + Vec::new() + } + } + } + + fn kv_in_memory(&self) -> HashMap, Option>> { + self.kv_storage.in_memory() + } + fn keys(&self, prefix: &[u8]) -> Vec> { let mut read_overlay = S::Overlay::default(); let eph = Ephemeral::new(self.essence.backend_storage(), &mut read_overlay); @@ -138,7 +209,7 @@ impl, H: Hasher> Backend for TrieBackend where collect_all().map_err(|e| debug!(target: "trie", "Error extracting trie keys: {}", e)).unwrap_or_default() } - fn storage_root(&self, delta: I) -> (H::Out, S::Overlay) + fn storage_root(&self, delta: I) -> (H::Out, Self::Transaction) where I: IntoIterator, Option>)> { let mut write_overlay = S::Overlay::default(); @@ -156,13 +227,13 @@ impl, H: Hasher> Backend for TrieBackend where } } - (root, write_overlay) + (root, (write_overlay, Default::default())) } fn child_storage_root(&self, storage_key: &[u8], delta: I) -> (Vec, bool, Self::Transaction) - where - I: IntoIterator, Option>)>, - H::Out: Ord + where + I: IntoIterator, Option>)>, + H::Out: Ord { let default_root = default_child_trie_root::>(storage_key); @@ -194,10 +265,21 @@ impl, H: Hasher> Backend for TrieBackend where let is_default = root == default_root; - (root, is_default, write_overlay) + (root, is_default, (write_overlay, Default::default())) + } + + fn kv_transaction(&self, delta: I) -> Self::Transaction + where + I: IntoIterator, Option>)> + { + let mut result = self.kv_storage.in_memory(); + result.extend(delta.into_iter()); + (Default::default(), result) } - fn as_trie_backend(&mut self) -> Option<&TrieBackend> { + fn as_trie_backend(&mut self) -> Option< + &TrieBackend + > { Some(self) } } @@ -210,7 +292,9 @@ pub mod tests { use trie::{TrieMut, PrefixedMemoryDB, trie_types::TrieDBMut}; use super::*; - fn test_db() -> (PrefixedMemoryDB, H256) { + type KvBackend = crate::kv_backend::InMemory; + + fn test_db() -> (PrefixedMemoryDB, H256, KvBackend) { let mut root = H256::default(); let mut mdb = PrefixedMemoryDB::::default(); { @@ -232,12 +316,17 @@ pub mod tests { trie.insert(&[i], &[i]).unwrap(); } } - (mdb, root) + // empty history. + let mut kv = crate::kv_backend::InMemory::default(); + kv.insert(b"kv1".to_vec(), Some(b"kv_value1".to_vec())); + kv.insert(b"kv2".to_vec(), Some(b"kv_value2".to_vec())); + (mdb, root, kv) } - pub(crate) fn test_trie() -> TrieBackend, Blake2Hasher> { - let (mdb, root) = test_db(); - TrieBackend::new(mdb, root) + pub(crate) fn test_trie( + ) -> TrieBackend, Blake2Hasher, KvBackend> { + let (mdb, root, kv) = test_db(); + TrieBackend::new(mdb, root, kv) } #[test] @@ -257,8 +346,9 @@ pub mod tests { #[test] fn pairs_are_empty_on_empty_storage() { - assert!(TrieBackend::, Blake2Hasher>::new( - PrefixedMemoryDB::default(), + assert!(TrieBackend::, Blake2Hasher, KvBackend>::new( + Default::default(), + Default::default(), Default::default(), ).pairs().is_empty()); } @@ -270,13 +360,15 @@ pub mod tests { #[test] fn storage_root_transaction_is_empty() { - assert!(test_trie().storage_root(::std::iter::empty()).1.drain().is_empty()); + let mut tx = test_trie().storage_root(::std::iter::empty()).1; + assert!(tx.0.drain().is_empty()); + assert!(tx.1.is_empty()); } #[test] fn storage_root_transaction_is_non_empty() { let (new_root, mut tx) = test_trie().storage_root(vec![(b"new-key".to_vec(), Some(b"new-value".to_vec()))]); - assert!(!tx.drain().is_empty()); + assert!(!tx.0.drain().is_empty()); assert!(new_root != test_trie().storage_root(::std::iter::empty()).0); } diff --git a/core/utils/historical-data/Cargo.toml b/core/utils/historical-data/Cargo.toml new file mode 100644 index 0000000000000..196209b8cd9b0 --- /dev/null +++ b/core/utils/historical-data/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "historical-data" +version = "2.0.0" +authors = ["Parity Technologies "] +description = "Data associated with its history" +edition = "2018" + +[dependencies] +rstd = { package = "sr-std", path = "../../sr-std", default-features = false } +smallvec = { version = "0.6", optional = true } +num-traits = { version = "0.2.8", default-features = false } + +[dev-dependencies] + +[features] +default = ["std"] +std = [ + "rstd/std", + "num-traits/std", + "smallvec", +] +test = [] diff --git a/core/utils/historical-data/README.md b/core/utils/historical-data/README.md new file mode 100644 index 0000000000000..7b2cfcadc1f98 --- /dev/null +++ b/core/utils/historical-data/README.md @@ -0,0 +1,19 @@ +## Historical data + +Crate with methods to manage data that stores its own history. + +This covers: +- linear history driven data, eg. transactional layers for overlay. +- long term storage with multiple branch, eg. offchain storage. + +General design is container where query are done depending on a local history context +and update requires a global history context. + +Internally storage of multiple state is done independantly for each values, as oposed to a trie +where a global state is use to index all content. Each key value manages its own history. + +This crates is `no_std` compatible as long as the `std` feature is not enabled. + +For more information see + +License: GPL-3.0 diff --git a/core/utils/historical-data/src/lib.rs b/core/utils/historical-data/src/lib.rs new file mode 100644 index 0000000000000..93e974fb5d5b4 --- /dev/null +++ b/core/utils/historical-data/src/lib.rs @@ -0,0 +1,53 @@ +// Copyright 2017-2019 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! History driven data storage. +//! Useful to store information with history +//! on a per item basis. + +#![cfg_attr(not(feature = "std"), no_std)] + +use rstd::convert::TryInto; + +pub mod tree; +pub mod linear; + +/// An entry at a given history index. +#[derive(Debug, Clone)] +#[cfg_attr(any(test, feature = "test"), derive(PartialEq))] +pub struct HistoricalValue { + /// The stored value. + pub value: V, + /// The moment in history when the value got set. + pub index: I, +} + +// Utility function for panicking cast (enabling casts similar to `as` cast for number). +fn saturating_into>(i: I) -> U { + match i.try_into() { + Ok(index) => index, + Err(_) => ::max_value(), + } +} + +#[cfg_attr(any(test, feature = "test"), derive(PartialEq, Debug))] +/// Prunning result to be able to proceed +/// with further update if the value needs it. +pub enum PruneResult { + Unchanged, + Changed, + Cleared, +} diff --git a/core/utils/historical-data/src/linear.rs b/core/utils/historical-data/src/linear.rs new file mode 100644 index 0000000000000..2e95ca3c3c22a --- /dev/null +++ b/core/utils/historical-data/src/linear.rs @@ -0,0 +1,392 @@ +// Copyright 2017-2019 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Linear historical data. +//! +//! Current encoding is a single encoded succession of values and +//! their state index (version 1 is used for it). +//! The frame for n elements is: +//! +//! `1 byte version ++ (u64 le encoded state index ++ byte value of element) * n +//! ++ (u64 le encoded index of element in the frame) * n - 1 ++ n encoded as le u64` +//! +//! Start index of first element and end of last element are not needed since +//! all other values are of constant size. +//! Latest values and states are pushed a the end, this is ordered +//! by state. +//! Version is optional as can be ommitted for storages of a single kind. +//! This history does not scale with number of version and would need to be split. +//! Other version can be use (a reverse linked list storage with range indexing +//! could be use). +//! Access to latest value in history should be the less costy access. + +#[cfg(not(feature = "std"))] +use rstd::{vec::Vec, vec}; +use rstd::marker::PhantomData; +use rstd::borrow::Cow; +use crate::HistoricalValue; + + +/// Arraylike buffer with in place byte data. +/// Can be written as is in underlying storage. +/// Could be extended to direct access memory too. +#[derive(Debug, Clone)] +pub struct Serialized<'a, F>(Cow<'a, [u8]>, PhantomData); + +impl<'a, 'b, F> PartialEq> for Serialized<'a, F> { + fn eq(&self, other: &Serialized<'b, F>) -> bool { + self.0.eq(&other.0) + } +} + +impl<'a, F> Eq for Serialized<'a, F> { } + +/// Serialized specific behavior. +pub trait SerializedConfig { + /// Encoded empty slice. + fn empty() -> &'static [u8]; + /// Size needed to encode version. + /// Should be a static value. + fn version_len() -> usize; +} + +#[derive(Debug, Clone)] +#[cfg_attr(any(test, feature = "test"), derive(PartialEq))] +/// Serialize without versioning. +pub struct NoVersion; + +#[derive(Debug, Clone)] +#[cfg_attr(any(test, feature = "test"), derive(PartialEq))] +/// Serialize with default version. +pub struct DefaultVersion; + +impl SerializedConfig for NoVersion { + fn empty() -> &'static [u8] { + &NO_VERSION_EMPTY_SERIALIZED + } + fn version_len() -> usize { + 0 + } +} + +impl SerializedConfig for DefaultVersion { + fn empty() -> &'static [u8] { + &DEFAULT_VERSION_EMPTY_SERIALIZED + } + fn version_len() -> usize { + 1 + } +} + +// Length in number of bytes for an encoded size. +// Current value is one of a u64. +// Used both for number of element in history and +// length of elements. +const SIZE_BYTE_LEN: usize = 8; + +// Basis implementation must be on par with InMemory. +// Those method could be move to a 'VecLike' trait. +// +// Those function requires prior index checking. +impl<'a, F: SerializedConfig> Serialized<'a, F> { + + pub fn into_vec(self) -> Vec { + self.0.into_owned() + } + + pub(crate) fn len(&self) -> usize { + let len = self.0.len(); + self.read_le_usize(len - SIZE_BYTE_LEN) as usize + } + + pub(crate) fn clear(&mut self) { + self.write_le_usize(F::version_len(), 0); + self.0.to_mut().truncate(F::version_len() + SIZE_BYTE_LEN); + } + + #[cfg(test)] + fn truncate(&mut self, index: usize) { + // This could be implemented more efficiently + // (useless for test) + self.remove_range(index, self.len()); + } + + // index stay in truncated content + pub(crate) fn truncate_until(&mut self, index: usize) { + self.remove_range(0, index); + } + + pub(crate) fn pop(&mut self) -> Option, u64>> { + let len = self.len(); + if len == 0 { + return None; + } + let start_ix = self.index_element(len - 1); + let end_ix = self.index_start(); + let state = self.read_le_u64(start_ix); + let value = self.0[start_ix + SIZE_BYTE_LEN..end_ix].to_vec(); + if len - 1 == 0 { + self.clear(); + return Some(HistoricalValue { value, index: state }) + } else { + self.write_le_usize(self.0.len() - (SIZE_BYTE_LEN * 2), len - 1); + }; + let ix_size = (len * SIZE_BYTE_LEN) - SIZE_BYTE_LEN; + self.slice_copy(end_ix, start_ix, ix_size); + self.0.to_mut().truncate(start_ix + ix_size); + Some(HistoricalValue { value, index: state }) + } + + pub(crate) fn push(&mut self, val: HistoricalValue<&[u8], u64>) { + self.push_extra(val, &[]) + } + + /// variant of push where part of the value is in a second slice. + pub(crate) fn push_extra(&mut self, val: HistoricalValue<&[u8], u64>, extra: &[u8]) { + let len = self.len(); + let start_ix = self.index_start(); + let end_ix = self.0.len(); + let new_len = self.0.len() + SIZE_BYTE_LEN + val.value.len() + extra.len(); + self.0.to_mut().resize(new_len, 0); + self.0.to_mut().copy_within( + start_ix .. end_ix, + start_ix + SIZE_BYTE_LEN + val.value.len() + extra.len() + ); + let mut position = start_ix; + self.write_le_u64(position, val.index); + position += SIZE_BYTE_LEN; + self.0.to_mut()[position .. position + val.value.len()].copy_from_slice(val.value); + position += val.value.len(); + self.0.to_mut()[position .. position + extra.len()].copy_from_slice(extra); + if len > 0 { + self.write_le_usize(self.0.len() - SIZE_BYTE_LEN, start_ix); + self.append_le_usize(len + 1); + } else { + self.write_le_usize(self.0.len() - SIZE_BYTE_LEN, 1); + } + } + + #[cfg(test)] + fn remove(&mut self, index: usize) { + self.remove_range(index, index + 1); + } + + fn remove_range(&mut self, index: usize, end: usize) { + if end == 0 { + return; + } + let len = self.len(); + if len <= end - index && index == 0 { + self.clear(); + return; + } + // eager removal is costy, running some gc impl + // can be interesting. + let elt_start = self.index_element(index); + let start_ix = self.index_start(); + let elt_end = if end == len { + start_ix + } else { + self.index_element(end) + }; + let delete_size = elt_end - elt_start; + for _ in elt_start..elt_end { + let _ = self.0.to_mut().remove(elt_start); + } + let start_ix = start_ix - delete_size; + + let len = len - (end - index); + for i in index..end { + let pos = i + (end - index); + if pos < len { + let old_value = self.read_le_usize(start_ix + pos * SIZE_BYTE_LEN); + self.write_le_usize(start_ix + i * SIZE_BYTE_LEN, old_value - delete_size); + } + } + let end_index = start_ix + len * SIZE_BYTE_LEN; + self.write_le_usize(end_index - SIZE_BYTE_LEN, len); + self.0.to_mut().truncate(end_index); + + } + + pub(crate) fn get_state(&self, index: usize) -> HistoricalValue<&[u8], u64> { + let start_ix = self.index_element(index); + let len = self.len(); + let end_ix = if index == len - 1 { + self.index_start() + } else { + self.index_element(index + 1) + }; + let state = self.read_le_u64(start_ix); + HistoricalValue { + value: &self.0[start_ix + SIZE_BYTE_LEN..end_ix], + index: state, + } + } + +} + +const NO_VERSION_EMPTY_SERIALIZED: [u8; SIZE_BYTE_LEN] = [0u8; SIZE_BYTE_LEN]; +const DEFAULT_VERSION: u8 = 1; +const DEFAULT_VERSION_EMPTY_SERIALIZED: [u8; SIZE_BYTE_LEN + 1] = { + let mut buf = [0u8; SIZE_BYTE_LEN + 1]; + buf[0] = DEFAULT_VERSION; + buf +}; + +impl<'a, F: SerializedConfig> Default for Serialized<'a, F> { + fn default() -> Self { + Serialized(Cow::Borrowed(F::empty()), PhantomData) + } +} + +impl<'a, F> Into> for &'a[u8] { + fn into(self) -> Serialized<'a, F> { + Serialized(Cow::Borrowed(self), PhantomData) + } +} + +impl Into> for Vec { + fn into(self) -> Serialized<'static, F> { + Serialized(Cow::Owned(self), PhantomData) + } +} + +// Utility function for basis implementation. +impl<'a, F: SerializedConfig> Serialized<'a, F> { + + // Index at the end of the element part, start of internal index table in the buffer. + // (also followed by the encoded size, the last part in the buffer) + fn index_start(&self) -> usize { + let nb_ix = self.len(); + if nb_ix == 0 { return F::version_len(); } + let end = self.0.len(); + end - (nb_ix * SIZE_BYTE_LEN) + } + + fn index_element(&self, position: usize) -> usize { + if position == 0 { + return F::version_len(); + } + let i = self.index_start() + (position - 1) * SIZE_BYTE_LEN; + self.read_le_usize(i) + } + + fn slice_copy(&mut self, start_from: usize, start_to: usize, size: usize) { + self.0.to_mut().copy_within(start_from..start_from + size, start_to); + } + + // Usize encoded as le u64 (for historical value). + fn read_le_u64(&self, pos: usize) -> u64 { + let mut buffer = [0u8; SIZE_BYTE_LEN]; + buffer.copy_from_slice(&self.0[pos..pos + SIZE_BYTE_LEN]); + u64::from_le_bytes(buffer) + } + + // Read usize encoded as le u64 (only for internal indexing). + fn read_le_usize(&self, pos: usize) -> usize { + let mut buffer = [0u8; SIZE_BYTE_LEN]; + buffer.copy_from_slice(&self.0[pos..pos + SIZE_BYTE_LEN]); + u64::from_le_bytes(buffer) as usize + } + + // Write usize encoded as le u64. + fn write_le_usize(&mut self, pos: usize, value: usize) { + let buffer = (value as u64).to_le_bytes(); + self.0.to_mut()[pos..pos + SIZE_BYTE_LEN].copy_from_slice(&buffer[..]); + } + + // Append usize encoded as le u64. + fn append_le_usize(&mut self, value: usize) { + let buffer = (value as u64).to_le_bytes(); + self.0.to_mut().extend_from_slice(&buffer[..]); + } + + // Write u64 encoded as le. + fn write_le_u64(&mut self, pos: usize, value: u64) { + let buffer = (value as u64).to_le_bytes(); + self.0.to_mut()[pos..pos + SIZE_BYTE_LEN].copy_from_slice(&buffer[..]); + } + +} + +#[cfg(test)] +mod test { + use super::*; + + impl From<(V, I)> for HistoricalValue { + fn from(input: (V, I)) -> HistoricalValue { + HistoricalValue { value: input.0, index: input.1 } + } + } + + fn test_serialized_basis(mut ser: Serialized) { + // test basis unsafe function similar to a simple vec + // without index checking. + let v1 = &b"val1"[..]; + let v2 = &b"value_2"[..]; + let v3 = &b"a third value 3"[..]; + + assert_eq!(ser.len(), 0); + assert_eq!(ser.pop(), None); + ser.push((v1, 1).into()); + assert_eq!(ser.get_state(0), (v1, 1).into()); + assert_eq!(ser.pop(), Some((v1.to_vec(), 1).into())); + assert_eq!(ser.len(), 0); + ser.push((v1, 1).into()); + ser.push((v2, 2).into()); + ser.push((v3, 3).into()); + assert_eq!(ser.get_state(0), (v1, 1).into()); + assert_eq!(ser.get_state(1), (v2, 2).into()); + assert_eq!(ser.get_state(2), (v3, 3).into()); + assert_eq!(ser.pop(), Some((v3.to_vec(), 3).into())); + assert_eq!(ser.len(), 2); + ser.push((v3, 3).into()); + assert_eq!(ser.get_state(2), (v3, 3).into()); + ser.remove(0); + assert_eq!(ser.len(), 2); + assert_eq!(ser.get_state(0), (v2, 2).into()); + assert_eq!(ser.get_state(1), (v3, 3).into()); + ser.push((v1, 1).into()); + ser.remove(1); + assert_eq!(ser.len(), 2); + assert_eq!(ser.get_state(0), (v2, 2).into()); + assert_eq!(ser.get_state(1), (v1, 1).into()); + ser.push((v1, 1).into()); + ser.truncate(1); + assert_eq!(ser.len(), 1); + assert_eq!(ser.get_state(0), (v2, 2).into()); + ser.push((v1, 1).into()); + ser.push((v3, 3).into()); + ser.truncate_until(1); + assert_eq!(ser.len(), 2); + assert_eq!(ser.get_state(0), (v1, 1).into()); + assert_eq!(ser.get_state(1), (v3, 3).into()); + ser.push((v2, 2).into()); + ser.truncate_until(2); + assert_eq!(ser.len(), 1); + assert_eq!(ser.get_state(0), (v2, 2).into()); + + } + + #[test] + fn serialized_basis() { + let ser1: Serialized = Default::default(); + let ser2: Serialized = Default::default(); + test_serialized_basis(ser1); + test_serialized_basis(ser2); + } +} diff --git a/core/utils/historical-data/src/tree.rs b/core/utils/historical-data/src/tree.rs new file mode 100644 index 0000000000000..beb996341026e --- /dev/null +++ b/core/utils/historical-data/src/tree.rs @@ -0,0 +1,340 @@ +// Copyright 2017-2019 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use crate::linear::{ + Serialized as SerializedInner, + SerializedConfig, +}; +use crate::HistoricalValue; +use crate::PruneResult; +use crate::saturating_into; +use rstd::vec::Vec; +use rstd::convert::{TryFrom, TryInto}; +use num_traits::Bounded; + +/// Trait defining a state for querying or modifying a tree. +/// This is a collection of branches index, corresponding +/// to a tree path. +pub trait BranchesStateTrait { + type Branch: BranchStateTrait; + type Iter: Iterator; + + /// Get branch state for node at a given index. + fn get_branch(self, index: I) -> Option; + + /// Get the last index for the state, inclusive. + fn last_index(self) -> I; + + /// Iterator over the branch states. + fn iter(self) -> Self::Iter; +} + +/// Trait defining a state for querying or modifying a branch. +/// This is therefore the representation of a branch state. +pub trait BranchStateTrait { + + /// Get state for node at a given index. + fn get_node(&self, i: I) -> S; + + /// Get the last index for the state, inclusive. + fn last_index(&self) -> I; +} + +/// This is a simple range, end non inclusive. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct BranchRange { + pub start: u64, + pub end: u64, +} + +impl<'a> BranchStateTrait for &'a BranchRange { + + fn get_node(&self, i: u64) -> bool { + i >= self.start && i < self.end + } + + fn last_index(&self) -> u64 { + // underflow should not happen as long as branchstateref are not allowed to be empty. + self.end - 1 + } + +} + +/// u64 is use a a state target so it is implemented as +/// a upper bound. +impl<'a> BranchStateTrait for u64 { + + fn get_node(&self, i: u64) -> bool { + &i <= self + } + + fn last_index(&self) -> u64 { + *self + } + +} + +impl BranchRange { + /// Return true if the state exists, false otherwhise. + pub fn get_state(&self, index: u64) -> bool { + index < self.end && index >= self.start + } +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct BranchState { + pub branch_index: u64, + pub range: BranchRange, +} + + +impl<'a, F: SerializedConfig> Serialized<'a, F> { + + pub fn into_vec(self) -> Vec { + self.0.into_vec() + } + + pub fn get (&self, state: S) -> Option> + where + S: BranchStateTrait, + I: Copy + Eq + TryFrom + TryInto + Bounded, + { + let mut index = self.0.len(); + if index == 0 { + return None; + } + while index > 0 { + index -= 1; + let HistoricalValue { value, index: state_index } = self.0.get_state(index); + let state_index = saturating_into(state_index); + if state.get_node(state_index) { + // Note this extra byte is note optimal, should be part of index encoding + if value.len() > 0 { + return Some(Some(&value[..value.len() - 1])); + } else { + return Some(None); + } + } + } + None + } + + /// This append the value, and can only be use in an + /// orderly fashion. + pub fn push(&mut self, state: S, value: Option<&[u8]>) + where + S: BranchStateTrait, + I: Copy + Eq + TryFrom + TryInto, + { + let target_state_index = saturating_into(state.last_index()); + let index = self.0.len(); + if index > 0 { + let last = self.0.get_state(index - 1); + debug_assert!(target_state_index >= last.index); + if target_state_index == last.index { + self.0.pop(); + } + } + match value { + Some(value) => + self.0.push_extra(HistoricalValue {value, index: target_state_index}, &[0][..]), + None => + self.0.push(HistoricalValue {value: &[], index: target_state_index}), + } + } + + /// Prune value that are before the index if they are + /// not needed afterward. + pub fn prune(&mut self, index: I) -> PruneResult + where + I: Copy + Eq + TryFrom + TryInto, + { + let from = saturating_into(index); + let len = self.0.len(); + let mut last_index_with_value = None; + let mut index = 0; + while index < len { + let history = self.0.get_state(index); + if history.index == from + 1 { + // new first content + if history.value.len() != 0 { + // start value over a value drop until here + last_index_with_value = Some(index); + break; + } + } else if history.index > from { + if history.value.len() == 0 + && last_index_with_value.is_none() { + // delete on delete, continue + } else { + if last_index_with_value.is_none() { + // first value, use this index + last_index_with_value = Some(index); + } + break; + } + } + if history.value.len() > 0 { + last_index_with_value = Some(index); + } else { + last_index_with_value = None; + } + index += 1; + } + + if let Some(last_index_with_value) = last_index_with_value { + if last_index_with_value > 0 { + self.0.truncate_until(last_index_with_value); + return PruneResult::Changed; + } + } else { + self.0.clear(); + return PruneResult::Cleared; + } + + PruneResult::Unchanged + } + +} + +#[derive(Debug, Clone)] +/// Serialized implementation when transaction support is not +/// needed. +pub struct Serialized<'a, F>(SerializedInner<'a, F>); + +impl<'a, 'b, F> PartialEq> for Serialized<'a, F> { + fn eq(&self, other: &Serialized<'b, F>) -> bool { + self.0.eq(&other.0) + } +} + +impl<'a, F> Eq for Serialized<'a, F> { } + +impl<'a, F> Serialized<'a, F> { + pub fn from_slice(s: &'a [u8]) -> Serialized<'a, F> { + Serialized(s.into()) + } + + pub fn from_vec(s: Vec) -> Serialized<'static, F> { + Serialized(s.into()) + } +} + +impl<'a, F> Into> for &'a [u8] { + fn into(self) -> Serialized<'a, F> { + Serialized(self.into()) + } +} + +impl Into> for Vec { + fn into(self) -> Serialized<'static, F> { + Serialized(self.into()) + } +} + +impl<'a, F: SerializedConfig> Default for Serialized<'a, F> { + fn default() -> Self { + Serialized(SerializedInner::<'a, F>::default()) + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_prune() { + let mut item: Serialized = Default::default(); + // setting value respecting branch build order + for i in 1..6 { + item.push(i, Some(&[i as u8])); + } + + for a in 1..6 { + assert_eq!(item.get(a), Some(Some(&[a as u8][..]))); + } + item.prune(1); + assert_eq!(item.get(1), None); + for a in 2..6 { + assert_eq!(item.get(a), Some(Some(&[a as u8][..]))); + } + + item.prune(4); + for a in 1..5 { + assert_eq!(item.get(a), None); + } + for a in 5..6 { + assert_eq!(item.get(a), Some(Some(&[a as u8][..]))); + } + + item.prune(80); + for a in 1..4 { + assert_eq!(item.get(a), None); + } + // pruning preserve last valid value + for a in 5..11 { + assert_eq!(item.get(a), Some(Some(&[5 as u8][..]))); + } + + // prune skip unrelevant delete + let mut item: Serialized = Default::default(); + item.push(1, Some(&[1 as u8])); + item.push(2, None); + item.push(3, Some(&[3 as u8])); + assert_eq!(item.get(1), Some(Some(&[1][..]))); + assert_eq!(item.get(2), Some(None)); + assert_eq!(item.get(3), Some(Some(&[3][..]))); + assert_eq!(item.0.len(), 3); + item.prune(1); + assert_eq!(item.0.len(), 1); + assert_eq!(item.get(1), None); + assert_eq!(item.get(2), None); + assert_eq!(item.get(3), Some(Some(&[3][..]))); + + // prune skip unrelevant delete + let mut item: Serialized = Default::default(); + item.push(1, Some(&[1 as u8])); + item.push(3, None); + item.push(4, Some(&[4 as u8])); + assert_eq!(item.get(1), Some(Some(&[1][..]))); + assert_eq!(item.get(2), Some(Some(&[1][..]))); + assert_eq!(item.get(3), Some(None)); + assert_eq!(item.get(4), Some(Some(&[4][..]))); + assert_eq!(item.0.len(), 3); + // 1 needed for state two + assert_eq!(PruneResult::Unchanged, item.prune(1)); + // 3 unneeded + item.prune(2); + assert_eq!(item.0.len(), 1); + assert_eq!(item.get(1), None); + assert_eq!(item.get(2), None); + assert_eq!(item.get(3), None); + assert_eq!(item.get(4), Some(Some(&[4][..]))); + + // prune delete at block + let mut item: Serialized = Default::default(); + item.push(0, Some(&[0 as u8])); + item.push(1, None); + assert_eq!(item.get(0), Some(Some(&[0][..]))); + assert_eq!(item.get(1), Some(None)); + item.prune(0); + assert_eq!(item.get(0), None); + assert_eq!(item.get(1), None); + assert_eq!(item.0.len(), 0); + + } + +}