diff --git a/bin/node-template/node/src/rpc.rs b/bin/node-template/node/src/rpc.rs index 7edae4d81474f..981f375d0b462 100644 --- a/bin/node-template/node/src/rpc.rs +++ b/bin/node-template/node/src/rpc.rs @@ -39,14 +39,14 @@ where C::Api: BlockBuilder, P: TransactionPool + 'static, { - use pallet_transaction_payment_rpc::{TransactionPaymentApiServer, TransactionPaymentRpc}; - use substrate_frame_rpc_system::{SystemApiServer, SystemRpc}; + use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer}; + use substrate_frame_rpc_system::{System, SystemApiServer}; let mut module = RpcModule::new(()); let FullDeps { client, pool, deny_unsafe } = deps; - module.merge(SystemRpc::new(client.clone(), pool.clone(), deny_unsafe).into_rpc())?; - module.merge(TransactionPaymentRpc::new(client).into_rpc())?; + module.merge(System::new(client.clone(), pool.clone(), deny_unsafe).into_rpc())?; + module.merge(TransactionPayment::new(client).into_rpc())?; // Extend this RPC with a custom API by using the following syntax. // `YourRpcStruct` should have a reference to a client, which is needed diff --git a/bin/node/rpc/src/lib.rs b/bin/node/rpc/src/lib.rs index 05aa973e102b1..e5b666195e1bc 100644 --- a/bin/node/rpc/src/lib.rs +++ b/bin/node/rpc/src/lib.rs @@ -37,12 +37,10 @@ use jsonrpsee::RpcModule; use node_primitives::{AccountId, Balance, Block, BlockNumber, Hash, Index}; use sc_client_api::AuxStore; use sc_consensus_babe::{Config, Epoch}; -use sc_consensus_babe_rpc::BabeRpc; use sc_consensus_epochs::SharedEpochChanges; use sc_finality_grandpa::{ FinalityProofProvider, GrandpaJustificationStream, SharedAuthoritySet, SharedVoterState, }; -use sc_finality_grandpa_rpc::GrandpaRpc; use sc_rpc::SubscriptionTaskExecutor; pub use sc_rpc_api::DenyUnsafe; use sc_transaction_pool_api::TransactionPool; @@ -120,15 +118,15 @@ where B: sc_client_api::Backend + Send + Sync + 'static, B::State: sc_client_api::backend::StateBackend>, { - use pallet_contracts_rpc::{ContractsApiServer, ContractsRpc}; - use pallet_mmr_rpc::{MmrApiServer, MmrRpc}; - use pallet_transaction_payment_rpc::{TransactionPaymentApiServer, TransactionPaymentRpc}; - use sc_consensus_babe_rpc::BabeApiServer; - use sc_finality_grandpa_rpc::GrandpaApiServer; + use pallet_contracts_rpc::{Contracts, ContractsApiServer}; + use pallet_mmr_rpc::{Mmr, MmrApiServer}; + use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer}; + use sc_consensus_babe_rpc::{Babe, BabeApiServer}; + use sc_finality_grandpa_rpc::{Grandpa, GrandpaApiServer}; use sc_rpc::dev::{Dev, DevApiServer}; - use sc_sync_state_rpc::{SyncStateRpc, SyncStateRpcApiServer}; - use substrate_frame_rpc_system::{SystemApiServer, SystemRpc}; - use substrate_state_trie_migration_rpc::StateMigrationApiServer; + use sc_sync_state_rpc::{SyncState, SyncStateApiServer}; + use substrate_frame_rpc_system::{System, SystemApiServer}; + use substrate_state_trie_migration_rpc::{StateMigration, StateMigrationApiServer}; let mut io = RpcModule::new(()); let FullDeps { client, pool, select_chain, chain_spec, deny_unsafe, babe, grandpa } = deps; @@ -142,15 +140,15 @@ where finality_provider, } = grandpa; - io.merge(SystemRpc::new(client.clone(), pool, deny_unsafe).into_rpc())?; + io.merge(System::new(client.clone(), pool, deny_unsafe).into_rpc())?; // Making synchronous calls in light client freezes the browser currently, // more context: https://github.com/paritytech/substrate/pull/3480 // These RPCs should use an asynchronous caller instead. - io.merge(ContractsRpc::new(client.clone()).into_rpc())?; - io.merge(MmrRpc::new(client.clone()).into_rpc())?; - io.merge(TransactionPaymentRpc::new(client.clone()).into_rpc())?; + io.merge(Contracts::new(client.clone()).into_rpc())?; + io.merge(Mmr::new(client.clone()).into_rpc())?; + io.merge(TransactionPayment::new(client.clone()).into_rpc())?; io.merge( - BabeRpc::new( + Babe::new( client.clone(), shared_epoch_changes.clone(), keystore, @@ -161,7 +159,7 @@ where .into_rpc(), )?; io.merge( - GrandpaRpc::new( + Grandpa::new( subscription_executor, shared_authority_set.clone(), shared_voter_state, @@ -172,14 +170,11 @@ where )?; io.merge( - SyncStateRpc::new(chain_spec, client.clone(), shared_authority_set, shared_epoch_changes)? + SyncState::new(chain_spec, client.clone(), shared_authority_set, shared_epoch_changes)? .into_rpc(), )?; - io.merge( - substrate_state_trie_migration_rpc::MigrationRpc::new(client.clone(), backend, deny_unsafe) - .into_rpc(), - )?; + io.merge(StateMigration::new(client.clone(), backend, deny_unsafe).into_rpc())?; io.merge(Dev::new(client, deny_unsafe).into_rpc())?; Ok(io) diff --git a/client/beefy/rpc/src/lib.rs b/client/beefy/rpc/src/lib.rs index ea35678a48b8f..c248d33cb6c23 100644 --- a/client/beefy/rpc/src/lib.rs +++ b/client/beefy/rpc/src/lib.rs @@ -100,17 +100,17 @@ pub trait BeefyApi { } /// Implements the BeefyApi RPC trait for interacting with BEEFY. -pub struct BeefyRpcHandler { +pub struct Beefy { signed_commitment_stream: BeefySignedCommitmentStream, beefy_best_block: Arc>>, executor: SubscriptionTaskExecutor, } -impl BeefyRpcHandler +impl Beefy where Block: BlockT, { - /// Creates a new BeefyRpcHandler instance. + /// Creates a new Beefy Rpc handler instance. pub fn new( signed_commitment_stream: BeefySignedCommitmentStream, best_block_stream: BeefyBestBlockStream, @@ -131,8 +131,7 @@ where } #[async_trait] -impl BeefyApiServer - for BeefyRpcHandler +impl BeefyApiServer for Beefy where Block: BlockT, { @@ -174,24 +173,20 @@ mod tests { use sp_runtime::traits::{BlakeTwo256, Hash}; use substrate_test_runtime_client::runtime::Block; - fn setup_io_handler() -> (RpcModule>, BeefySignedCommitmentSender) - { + fn setup_io_handler() -> (RpcModule>, BeefySignedCommitmentSender) { let (_, stream) = BeefyBestBlockStream::::channel(); setup_io_handler_with_best_block_stream(stream) } fn setup_io_handler_with_best_block_stream( best_block_stream: BeefyBestBlockStream, - ) -> (RpcModule>, BeefySignedCommitmentSender) { + ) -> (RpcModule>, BeefySignedCommitmentSender) { let (commitment_sender, commitment_stream) = BeefySignedCommitmentStream::::channel(); - let handler = BeefyRpcHandler::new( - commitment_stream, - best_block_stream, - sc_rpc::testing::test_executor(), - ) - .expect("Setting up the BEEFY RPC handler works"); + let handler = + Beefy::new(commitment_stream, best_block_stream, sc_rpc::testing::test_executor()) + .expect("Setting up the BEEFY RPC handler works"); (handler.into_rpc(), commitment_sender) } diff --git a/client/cli/src/commands/run_cmd.rs b/client/cli/src/commands/run_cmd.rs index 6cb0de0ebd04c..b4a3885e39906 100644 --- a/client/cli/src/commands/run_cmd.rs +++ b/client/cli/src/commands/run_cmd.rs @@ -115,6 +115,11 @@ pub struct RunCmd { #[clap(long)] pub rpc_max_response_size: Option, + /// Set the the maximum concurrent subscriptions per connection. + /// Default is 1024. + #[clap(long)] + pub rpc_max_subscriptions_per_connection: Option, + /// Expose Prometheus exporter on all interfaces. /// /// Default is local. @@ -459,6 +464,18 @@ impl CliConfiguration for RunCmd { Ok(self.rpc_max_payload) } + fn rpc_max_request_size(&self) -> Result> { + Ok(self.rpc_max_request_size) + } + + fn rpc_max_response_size(&self) -> Result> { + Ok(self.rpc_max_response_size) + } + + fn rpc_max_subscriptions_per_connection(&self) -> Result> { + Ok(self.rpc_max_subscriptions_per_connection) + } + fn ws_max_out_buffer_capacity(&self) -> Result> { Ok(self.ws_max_out_buffer_capacity) } diff --git a/client/cli/src/config.rs b/client/cli/src/config.rs index e38d34b92c74d..5e91cf6c74dae 100644 --- a/client/cli/src/config.rs +++ b/client/cli/src/config.rs @@ -369,6 +369,11 @@ pub trait CliConfiguration: Sized { Ok(None) } + /// Get maximum number of subscriptions per connection. + fn rpc_max_subscriptions_per_connection(&self) -> Result> { + Ok(None) + } + /// Get maximum WS output buffer capacity. fn ws_max_out_buffer_capacity(&self) -> Result> { Ok(None) @@ -539,7 +544,7 @@ pub trait CliConfiguration: Sized { rpc_max_request_size: self.rpc_max_request_size()?, rpc_max_response_size: self.rpc_max_response_size()?, rpc_id_provider: None, - rpc_max_subs_per_conn: None, + rpc_max_subs_per_conn: self.rpc_max_subscriptions_per_connection()?, ws_max_out_buffer_capacity: self.ws_max_out_buffer_capacity()?, prometheus_config: self .prometheus_config(DCV::prometheus_listen_port(), &chain_spec)?, diff --git a/client/cli/src/params/network_params.rs b/client/cli/src/params/network_params.rs index ac5039fb89754..74c2db92c3215 100644 --- a/client/cli/src/params/network_params.rs +++ b/client/cli/src/params/network_params.rs @@ -163,7 +163,7 @@ impl NetworkParams { let port = self.port.unwrap_or(default_listen_port); let listen_addresses = if self.listen_addr.is_empty() { - if is_validator { + if is_validator || is_dev { vec![ Multiaddr::empty() .with(Protocol::Ip6([0, 0, 0, 0, 0, 0, 0, 0].into())) diff --git a/client/consensus/babe/rpc/src/lib.rs b/client/consensus/babe/rpc/src/lib.rs index d5f21606c62ed..af19d410346e3 100644 --- a/client/consensus/babe/rpc/src/lib.rs +++ b/client/consensus/babe/rpc/src/lib.rs @@ -49,7 +49,7 @@ pub trait BabeApi { } /// Provides RPC methods for interacting with Babe. -pub struct BabeRpc { +pub struct Babe { /// shared reference to the client. client: Arc, /// shared reference to EpochChanges @@ -64,8 +64,8 @@ pub struct BabeRpc { deny_unsafe: DenyUnsafe, } -impl BabeRpc { - /// Creates a new instance of the BabeRpc handler. +impl Babe { + /// Creates a new instance of the Babe Rpc handler. pub fn new( client: Arc, shared_epoch_changes: SharedEpochChanges, @@ -79,7 +79,7 @@ impl BabeRpc { } #[async_trait] -impl BabeApiServer for BabeRpc +impl BabeApiServer for Babe where B: BlockT, C: ProvideRuntimeApi @@ -239,7 +239,7 @@ mod tests { fn test_babe_rpc_module( deny_unsafe: DenyUnsafe, - ) -> BabeRpc> { + ) -> Babe> { let builder = TestClientBuilder::new(); let (client, longest_chain) = builder.build_with_longest_chain(); let client = Arc::new(client); @@ -250,7 +250,7 @@ mod tests { let epoch_changes = link.epoch_changes().clone(); let keystore = create_temp_keystore::(Sr25519Keyring::Alice).0; - BabeRpc::new(client.clone(), epoch_changes, keystore, config, longest_chain, deny_unsafe) + Babe::new(client.clone(), epoch_changes, keystore, config, longest_chain, deny_unsafe) } #[tokio::test] diff --git a/client/finality-grandpa/rpc/src/lib.rs b/client/finality-grandpa/rpc/src/lib.rs index cb51d71b20bf4..bdb86c125e20a 100644 --- a/client/finality-grandpa/rpc/src/lib.rs +++ b/client/finality-grandpa/rpc/src/lib.rs @@ -66,7 +66,7 @@ pub trait GrandpaApi { } /// Provides RPC methods for interacting with GRANDPA. -pub struct GrandpaRpc { +pub struct Grandpa { executor: SubscriptionTaskExecutor, authority_set: AuthoritySet, voter_state: VoterState, @@ -74,9 +74,9 @@ pub struct GrandpaRpc { finality_proof_provider: Arc, } impl - GrandpaRpc + Grandpa { - /// Prepare a new [`GrandpaRpc`] + /// Prepare a new [`Grandpa`] Rpc handler. pub fn new( executor: SubscriptionTaskExecutor, authority_set: AuthoritySet, @@ -91,7 +91,7 @@ impl #[async_trait] impl GrandpaApiServer> - for GrandpaRpc + for Grandpa where VoterState: ReportVoterState + Send + Sync + 'static, AuthoritySet: ReportAuthoritySet + Send + Sync + 'static, @@ -243,7 +243,7 @@ mod tests { fn setup_io_handler( voter_state: VoterState, ) -> ( - RpcModule>, + RpcModule>, GrandpaJustificationSender, ) where @@ -256,7 +256,7 @@ mod tests { voter_state: VoterState, finality_proof: Option>, ) -> ( - RpcModule>, + RpcModule>, GrandpaJustificationSender, ) where @@ -266,7 +266,7 @@ mod tests { let finality_proof_provider = Arc::new(TestFinalityProofProvider { finality_proof }); let executor = Arc::new(TaskExecutor::default()); - let rpc = GrandpaRpc::new( + let rpc = Grandpa::new( executor, TestAuthoritySet, voter_state, diff --git a/client/rpc/src/state/mod.rs b/client/rpc/src/state/mod.rs index 4b5b74950a486..232be4edc8aab 100644 --- a/client/rpc/src/state/mod.rs +++ b/client/rpc/src/state/mod.rs @@ -167,7 +167,7 @@ pub fn new_full( executor: SubscriptionTaskExecutor, deny_unsafe: DenyUnsafe, rpc_max_payload: Option, -) -> (StateApi, ChildState) +) -> (State, ChildState) where Block: BlockT + 'static, Block::Hash: Unpin, @@ -192,17 +192,17 @@ where rpc_max_payload, )); let backend = Box::new(self::state_full::FullState::new(client, executor, rpc_max_payload)); - (StateApi { backend, deny_unsafe }, ChildState { backend: child_backend }) + (State { backend, deny_unsafe }, ChildState { backend: child_backend }) } /// State API with subscriptions support. -pub struct StateApi { +pub struct State { backend: Box>, /// Whether to deny unsafe calls deny_unsafe: DenyUnsafe, } -impl StateApiServer for StateApi +impl StateApiServer for State where Block: BlockT + 'static, Client: Send + Sync + 'static, diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 027b704789635..24ba670cfcd65 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -480,11 +480,18 @@ where } fn legacy_cli_parsing(config: &Configuration) -> (Option, Option, Option) { - let ws_max_response_size = config.ws_max_out_buffer_capacity.map(|max| { - eprintln!("DEPRECATED: `--ws_max_out_buffer_capacity` has been removed use `rpc-max-response-size or rpc-max-request-size` instead"); - eprintln!("Setting WS `rpc-max-response-size` to `max(ws_max_out_buffer_capacity, rpc_max_response_size)`"); - std::cmp::max(max, config.rpc_max_response_size.unwrap_or(0)) - }); + let ws_max_response_size = match ( + config.ws_max_out_buffer_capacity, + config.rpc_max_response_size, + ) { + (Some(legacy_max), max) => { + eprintln!("DEPRECATED: `--ws_max_out_buffer_capacity` has been removed; use `rpc-max-response-size or rpc-max-request-size` instead"); + eprintln!("Setting WS `rpc-max-response-size` to `max(ws_max_out_buffer_capacity, rpc_max_response_size)`"); + Some(std::cmp::max(legacy_max, max.unwrap_or(0))) + }, + (None, Some(m)) => Some(m), + (None, None) => None, + }; let max_request_size = match (config.rpc_max_payload, config.rpc_max_request_size) { (Some(legacy_max), max) => { @@ -498,7 +505,7 @@ fn legacy_cli_parsing(config: &Configuration) -> (Option, Option, (None, None) => None, }; - let http_max_response_size = match (config.rpc_max_payload, config.rpc_max_request_size) { + let http_max_response_size = match (config.rpc_max_payload, config.rpc_max_response_size) { (Some(legacy_max), max) => { eprintln!("DEPRECATED: `--rpc_max_payload` has been removed use `rpc-max-response-size or rpc-max-request-size` instead"); eprintln!( diff --git a/client/sync-state-rpc/src/lib.rs b/client/sync-state-rpc/src/lib.rs index 02a22a838b8b2..9540d94c57918 100644 --- a/client/sync-state-rpc/src/lib.rs +++ b/client/sync-state-rpc/src/lib.rs @@ -37,7 +37,7 @@ //! ``` //! //! If the [`LightSyncStateExtension`] is not added as an extension to the chain spec, -//! the [`SyncStateRpc`] will fail at instantiation. +//! the [`SyncState`] will fail at instantiation. #![deny(unused_crate_dependencies)] @@ -125,21 +125,21 @@ pub struct LightSyncState { /// An api for sync state RPC calls. #[rpc(client, server)] -pub trait SyncStateRpcApi { +pub trait SyncStateApi { /// Returns the JSON serialized chainspec running the node, with a sync state. #[method(name = "sync_state_genSyncSpec")] fn system_gen_sync_spec(&self, raw: bool) -> RpcResult; } /// An api for sync state RPC calls. -pub struct SyncStateRpc { +pub struct SyncState { chain_spec: Box, client: Arc, shared_authority_set: SharedAuthoritySet, shared_epoch_changes: SharedEpochChanges, } -impl SyncStateRpc +impl SyncState where Block: BlockT, Client: HeaderBackend + sc_client_api::AuxStore + 'static, @@ -180,7 +180,7 @@ where } } -impl SyncStateRpcApiServer for SyncStateRpc +impl SyncStateApiServer for SyncState where Block: BlockT, Backend: HeaderBackend + sc_client_api::AuxStore + 'static, diff --git a/frame/contracts/rpc/src/lib.rs b/frame/contracts/rpc/src/lib.rs index 599e80676cb19..77ae3f3ed35e3 100644 --- a/frame/contracts/rpc/src/lib.rs +++ b/frame/contracts/rpc/src/lib.rs @@ -173,12 +173,12 @@ where } /// Contracts RPC methods. -pub struct ContractsRpc { +pub struct Contracts { client: Arc, _marker: PhantomData, } -impl ContractsRpc { +impl Contracts { /// Create new `Contracts` with the given reference to the client. pub fn new(client: Arc) -> Self { Self { client, _marker: Default::default() } @@ -193,7 +193,7 @@ impl AccountId, Balance, Hash, - > for ContractsRpc + > for Contracts where Block: BlockT, Client: Send + Sync + 'static + ProvideRuntimeApi + HeaderBackend, diff --git a/frame/merkle-mountain-range/rpc/src/lib.rs b/frame/merkle-mountain-range/rpc/src/lib.rs index 12e4e11f88256..75032d40f492a 100644 --- a/frame/merkle-mountain-range/rpc/src/lib.rs +++ b/frame/merkle-mountain-range/rpc/src/lib.rs @@ -131,12 +131,12 @@ pub trait MmrApi { } /// MMR RPC methods. -pub struct MmrRpc { +pub struct Mmr { client: Arc, _marker: PhantomData, } -impl MmrRpc { +impl Mmr { /// Create new `Mmr` with the given reference to the client. pub fn new(client: Arc) -> Self { Self { client, _marker: Default::default() } @@ -144,8 +144,7 @@ impl MmrRpc { } #[async_trait] -impl MmrApiServer<::Hash> - for MmrRpc +impl MmrApiServer<::Hash> for Mmr where Block: BlockT, Client: Send + Sync + 'static + ProvideRuntimeApi + HeaderBackend, diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index d78fd7d9ca932..899250cc3f3c3 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -848,6 +848,8 @@ mod benchmarks { // function. let null = MigrationLimits::default(); let caller = frame_benchmarking::whitelisted_caller(); + // Allow signed migrations. + SignedMigrationMaxLimits::::put(MigrationLimits { size: 1024, item: 5 }); }: _(frame_system::RawOrigin::Signed(caller), null, 0, StateTrieMigration::::migration_process()) verify { assert_eq!(StateTrieMigration::::migration_process(), Default::default()) @@ -1146,14 +1148,7 @@ mod mock { } sp_tracing::try_init_simple(); - let mut ext: sp_io::TestExternalities = (custom_storage, version).into(); - - // set some genesis values for this pallet as well. - ext.execute_with(|| { - SignedMigrationMaxLimits::::put(MigrationLimits { size: 1024, item: 5 }); - }); - - ext + (custom_storage, version).into() } pub(crate) fn run_to_block(n: u32) -> (H256, u64) { @@ -1292,6 +1287,9 @@ mod test { new_test_ext(StateVersion::V0, true, None, None).execute_with(|| { assert_eq!(MigrationProcess::::get(), Default::default()); + // Allow signed migrations. + SignedMigrationMaxLimits::::put(MigrationLimits { size: 1024, item: 5 }); + // can't submit if limit is too high. frame_support::assert_err!( StateTrieMigration::continue_migrate( diff --git a/frame/transaction-payment/rpc/src/lib.rs b/frame/transaction-payment/rpc/src/lib.rs index b0be19fdb22a9..75ec42321ef5e 100644 --- a/frame/transaction-payment/rpc/src/lib.rs +++ b/frame/transaction-payment/rpc/src/lib.rs @@ -51,14 +51,14 @@ pub trait TransactionPaymentApi { } /// Provides RPC methods to query a dispatchable's class, weight and fee. -pub struct TransactionPaymentRpc { +pub struct TransactionPayment { /// Shared reference to the client. client: Arc, _marker: std::marker::PhantomData

, } -impl TransactionPaymentRpc { - /// Creates a new instance of the TransactionPaymentRpc helper. +impl TransactionPayment { + /// Creates a new instance of the TransactionPayment Rpc helper. pub fn new(client: Arc) -> Self { Self { client, _marker: Default::default() } } @@ -84,7 +84,7 @@ impl From for i32 { #[async_trait] impl TransactionPaymentApiServer<::Hash, RuntimeDispatchInfo> - for TransactionPaymentRpc + for TransactionPayment where Block: BlockT, C: ProvideRuntimeApi + HeaderBackend + Send + Sync + 'static, diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index f1c09251faa5e..bb387e6406243 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -30,7 +30,7 @@ use sp_core::{ Blake2Hasher, }; use sp_externalities::{Extension, Extensions}; -use sp_trie::{empty_child_trie_root, LayoutV0, LayoutV1, TrieConfiguration}; +use sp_trie::{empty_child_trie_root, HashKey, LayoutV0, LayoutV1, TrieConfiguration}; use std::{ any::{Any, TypeId}, collections::BTreeMap, @@ -310,7 +310,7 @@ impl Externalities for BasicExternalities { ) -> Vec { if let Some(child) = self.inner.children_default.get(child_info.storage_key()) { let delta = child.data.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))); - crate::in_memory_backend::new_in_mem::() + crate::in_memory_backend::new_in_mem::>() .child_storage_root(&child.child_info, delta, state_version) .0 } else { diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index 457d89b8c59aa..6df23cdb7096e 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -23,22 +23,36 @@ use crate::{ use codec::Codec; use hash_db::Hasher; use sp_core::storage::{ChildInfo, StateVersion, Storage}; -use sp_trie::{empty_trie_root, LayoutV1, MemoryDB}; +use sp_trie::{empty_trie_root, GenericMemoryDB, HashKey, KeyFunction, LayoutV1, MemoryDB}; use std::collections::{BTreeMap, HashMap}; /// Create a new empty instance of in-memory backend. -pub fn new_in_mem() -> TrieBackend, H> +/// +/// It will use [`HashKey`] to store the keys internally. +pub fn new_in_mem_hash_key() -> TrieBackend, H> where + H: Hasher, H::Out: Codec + Ord, { - let db = MemoryDB::default(); + new_in_mem::>() +} + +/// Create a new empty instance of in-memory backend. +pub fn new_in_mem() -> TrieBackend, H> +where + H: Hasher, + H::Out: Codec + Ord, + KF: KeyFunction + Send + Sync, +{ + let db = GenericMemoryDB::default(); // V1 is same as V0 for an empty trie. TrieBackend::new(db, empty_trie_root::>()) } -impl TrieBackend, H> +impl TrieBackend, H> where H::Out: Codec + Ord, + KF: KeyFunction + Send + Sync, { /// Copy the state, with applied updates pub fn update, StorageCollection)>>( @@ -70,14 +84,14 @@ where } /// Merge trie nodes into this backend. - pub fn update_backend(&self, root: H::Out, changes: MemoryDB) -> Self { + pub fn update_backend(&self, root: H::Out, changes: GenericMemoryDB) -> Self { let mut clone = self.backend_storage().clone(); clone.consolidate(changes); Self::new(clone, root) } /// Apply the given transaction to this backend and set the root to the given value. - pub fn apply_transaction(&mut self, root: H::Out, transaction: MemoryDB) { + pub fn apply_transaction(&mut self, root: H::Out, transaction: GenericMemoryDB) { let mut storage = sp_std::mem::take(self).into_storage(); storage.consolidate(transaction); *self = TrieBackend::new(storage, root); @@ -89,28 +103,33 @@ where } } -impl Clone for TrieBackend, H> +impl Clone for TrieBackend, H> where H::Out: Codec + Ord, + KF: KeyFunction + Send + Sync, { fn clone(&self) -> Self { TrieBackend::new(self.backend_storage().clone(), *self.root()) } } -impl Default for TrieBackend, H> +impl Default for TrieBackend, H> where + H: Hasher, H::Out: Codec + Ord, + KF: KeyFunction + Send + Sync, { fn default() -> Self { new_in_mem() } } -impl From<(HashMap, BTreeMap>, StateVersion)> - for TrieBackend, H> +impl + From<(HashMap, BTreeMap>, StateVersion)> + for TrieBackend, H> where H::Out: Codec + Ord, + KF: KeyFunction + Send + Sync, { fn from( (inner, state_version): ( @@ -129,9 +148,10 @@ where } } -impl From<(Storage, StateVersion)> for TrieBackend, H> +impl From<(Storage, StateVersion)> for TrieBackend, H> where H::Out: Codec + Ord, + KF: KeyFunction + Send + Sync, { fn from((inners, state_version): (Storage, StateVersion)) -> Self { let mut inner: HashMap, BTreeMap> = inners @@ -144,10 +164,11 @@ where } } -impl From<(BTreeMap, StateVersion)> - for TrieBackend, H> +impl From<(BTreeMap, StateVersion)> + for TrieBackend, H> where H::Out: Codec + Ord, + KF: KeyFunction + Send + Sync, { fn from((inner, state_version): (BTreeMap, StateVersion)) -> Self { let mut expanded = HashMap::new(); @@ -156,10 +177,11 @@ where } } -impl From<(Vec<(Option, StorageCollection)>, StateVersion)> - for TrieBackend, H> +impl From<(Vec<(Option, StorageCollection)>, StateVersion)> + for TrieBackend, H> where H::Out: Codec + Ord, + KF: KeyFunction + Send + Sync, { fn from( (inner, state_version): (Vec<(Option, StorageCollection)>, StateVersion), @@ -189,7 +211,7 @@ mod tests { #[test] fn in_memory_with_child_trie_only() { let state_version = StateVersion::default(); - let storage = new_in_mem::(); + let storage = new_in_mem_hash_key::(); let child_info = ChildInfo::new_default(b"1"); let child_info = &child_info; let storage = storage.update( @@ -205,7 +227,7 @@ mod tests { #[test] fn insert_multiple_times_child_data_works() { let state_version = StateVersion::default(); - let mut storage = new_in_mem::(); + let mut storage = new_in_mem_hash_key::(); let child_info = ChildInfo::new_default(b"1"); storage.insert( diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 97a9ae8d88cd2..e5b521588aa79 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -143,7 +143,7 @@ mod std_reexport { pub use crate::{ basic::BasicExternalities, error::{Error, ExecutionError}, - in_memory_backend::new_in_mem, + in_memory_backend::{new_in_mem, new_in_mem_hash_key}, proving_backend::{ create_proof_check_backend, ProofRecorder, ProvingBackend, ProvingBackendRecorder, }, @@ -1347,7 +1347,7 @@ mod execution { #[cfg(test)] mod tests { use super::{ext::Ext, *}; - use crate::execution::CallResult; + use crate::{execution::CallResult, in_memory_backend::new_in_mem_hash_key}; use codec::{Decode, Encode}; use sp_core::{ map, @@ -1687,7 +1687,7 @@ mod tests { fn set_child_storage_works() { let child_info = ChildInfo::new_default(b"sub1"); let child_info = &child_info; - let state = new_in_mem::(); + let state = new_in_mem_hash_key::(); let backend = state.as_trie_backend().unwrap(); let mut overlay = OverlayedChanges::default(); let mut cache = StorageTransactionCache::default(); @@ -1703,7 +1703,7 @@ mod tests { fn append_storage_works() { let reference_data = vec![b"data1".to_vec(), b"2".to_vec(), b"D3".to_vec(), b"d4".to_vec()]; let key = b"key".to_vec(); - let state = new_in_mem::(); + let state = new_in_mem_hash_key::(); let backend = state.as_trie_backend().unwrap(); let mut overlay = OverlayedChanges::default(); let mut cache = StorageTransactionCache::default(); @@ -1740,7 +1740,7 @@ mod tests { let key = b"events".to_vec(); let mut cache = StorageTransactionCache::default(); - let state = new_in_mem::(); + let state = new_in_mem_hash_key::(); let backend = state.as_trie_backend().unwrap(); let mut overlay = OverlayedChanges::default(); diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 358cb43ab4e92..1dca617acf912 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -31,9 +31,8 @@ pub use error::Error; /// Various re-exports from the `hash-db` crate. pub use hash_db::{HashDB as HashDBT, EMPTY_PREFIX}; use hash_db::{Hasher, Prefix}; -pub use memory_db::prefixed_key; /// Various re-exports from the `memory-db` crate. -pub use memory_db::KeyFunction; +pub use memory_db::{prefixed_key, HashKey, KeyFunction, PrefixedKey}; /// The Substrate format implementation of `NodeCodec`. pub use node_codec::NodeCodec; use sp_std::{borrow::Borrow, boxed::Box, marker::PhantomData, vec::Vec}; diff --git a/utils/frame/benchmarking-cli/src/overhead/weights.hbs b/utils/frame/benchmarking-cli/src/overhead/weights.hbs index ad33f55a9f363..6de4c462c62dc 100644 --- a/utils/frame/benchmarking-cli/src/overhead/weights.hbs +++ b/utils/frame/benchmarking-cli/src/overhead/weights.hbs @@ -41,13 +41,13 @@ parameter_types! { {{/if}} /// Calculated by multiplying the *{{params.weight.weight_metric}}* with `{{params.weight.weight_mul}}` and adding `{{params.weight.weight_add}}`. /// - /// Stats [NS]: + /// Statistics in nanoseconds: /// Min, Max: {{underscore stats.min}}, {{underscore stats.max}} /// Average: {{underscore stats.avg}} /// Median: {{underscore stats.median}} /// Std-Dev: {{stats.stddev}} /// - /// Percentiles [NS]: + /// Percentiles in nanoseconds: /// 99th: {{underscore stats.p99}} /// 95th: {{underscore stats.p95}} /// 75th: {{underscore stats.p75}} diff --git a/utils/frame/benchmarking-cli/src/storage/weights.hbs b/utils/frame/benchmarking-cli/src/storage/weights.hbs index 63f896e1104b8..4cb06d7c36b1e 100644 --- a/utils/frame/benchmarking-cli/src/storage/weights.hbs +++ b/utils/frame/benchmarking-cli/src/storage/weights.hbs @@ -49,13 +49,13 @@ pub mod constants { /// Time to read one storage item. /// Calculated by multiplying the *{{params.weight_params.weight_metric}}* of all values with `{{params.weight_params.weight_mul}}` and adding `{{params.weight_params.weight_add}}`. /// - /// Stats [NS]: + /// Statistics in nanoseconds: /// Min, Max: {{underscore read.0.min}}, {{underscore read.0.max}} /// Average: {{underscore read.0.avg}} /// Median: {{underscore read.0.median}} /// Std-Dev: {{read.0.stddev}} /// - /// Percentiles [NS]: + /// Percentiles in nanoseconds: /// 99th: {{underscore read.0.p99}} /// 95th: {{underscore read.0.p95}} /// 75th: {{underscore read.0.p75}} @@ -64,13 +64,13 @@ pub mod constants { /// Time to write one storage item. /// Calculated by multiplying the *{{params.weight_params.weight_metric}}* of all values with `{{params.weight_params.weight_mul}}` and adding `{{params.weight_params.weight_add}}`. /// - /// Stats [NS]: + /// Statistics in nanoseconds: /// Min, Max: {{underscore write.0.min}}, {{underscore write.0.max}} /// Average: {{underscore write.0.avg}} /// Median: {{underscore write.0.median}} /// Std-Dev: {{write.0.stddev}} /// - /// Percentiles [NS]: + /// Percentiles in nanoseconds: /// 99th: {{underscore write.0.p99}} /// 95th: {{underscore write.0.p95}} /// 75th: {{underscore write.0.p75}} diff --git a/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs b/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs index 531bf463f6523..b6d403ff2fcfd 100644 --- a/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs +++ b/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs @@ -122,21 +122,21 @@ pub trait StateMigrationApi { } /// An implementation of state migration specific RPC methods. -pub struct MigrationRpc { +pub struct StateMigration { client: Arc, backend: Arc, deny_unsafe: DenyUnsafe, _marker: std::marker::PhantomData<(B, BA)>, } -impl MigrationRpc { +impl StateMigration { /// Create new state migration rpc for the given reference to the client. pub fn new(client: Arc, backend: Arc, deny_unsafe: DenyUnsafe) -> Self { - MigrationRpc { client, backend, deny_unsafe, _marker: Default::default() } + StateMigration { client, backend, deny_unsafe, _marker: Default::default() } } } -impl StateMigrationApiServer<::Hash> for MigrationRpc +impl StateMigrationApiServer<::Hash> for StateMigration where B: BlockT, C: Send + Sync + 'static + sc_client_api::HeaderBackend, diff --git a/utils/frame/rpc/system/src/lib.rs b/utils/frame/rpc/system/src/lib.rs index b044035c8120e..72ad99e435f72 100644 --- a/utils/frame/rpc/system/src/lib.rs +++ b/utils/frame/rpc/system/src/lib.rs @@ -70,14 +70,14 @@ impl From for i32 { } /// An implementation of System-specific RPC methods on full client. -pub struct SystemRpc { +pub struct System { client: Arc, pool: Arc

, deny_unsafe: DenyUnsafe, _marker: std::marker::PhantomData, } -impl SystemRpc { +impl System { /// Create new `FullSystem` given client and transaction pool. pub fn new(client: Arc, pool: Arc

, deny_unsafe: DenyUnsafe) -> Self { Self { client, pool, deny_unsafe, _marker: Default::default() } @@ -86,7 +86,7 @@ impl SystemRpc { #[async_trait] impl - SystemApiServer<::Hash, AccountId, Index> for SystemRpc + SystemApiServer<::Hash, AccountId, Index> for System where C: sp_api::ProvideRuntimeApi, C: HeaderBackend, @@ -251,7 +251,7 @@ mod tests { let ext1 = new_transaction(1); block_on(pool.submit_one(&BlockId::number(0), source, ext1)).unwrap(); - let accounts = SystemRpc::new(client, pool, DenyUnsafe::Yes); + let accounts = System::new(client, pool, DenyUnsafe::Yes); // when let nonce = accounts.nonce(AccountKeyring::Alice.into()).await; @@ -270,7 +270,7 @@ mod tests { let pool = BasicPool::new_full(Default::default(), true.into(), None, spawner, client.clone()); - let accounts = SystemRpc::new(client, pool, DenyUnsafe::Yes); + let accounts = System::new(client, pool, DenyUnsafe::Yes); // when let res = accounts.dry_run(vec![].into(), None).await; @@ -289,7 +289,7 @@ mod tests { let pool = BasicPool::new_full(Default::default(), true.into(), None, spawner, client.clone()); - let accounts = SystemRpc::new(client, pool, DenyUnsafe::No); + let accounts = System::new(client, pool, DenyUnsafe::No); let tx = Transfer { from: AccountKeyring::Alice.into(), @@ -317,7 +317,7 @@ mod tests { let pool = BasicPool::new_full(Default::default(), true.into(), None, spawner, client.clone()); - let accounts = SystemRpc::new(client, pool, DenyUnsafe::No); + let accounts = System::new(client, pool, DenyUnsafe::No); let tx = Transfer { from: AccountKeyring::Alice.into(),