From 9fda259134bbcbca09c7c0d5538f93e2dc302cc5 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Thu, 8 Aug 2019 14:47:10 +0200 Subject: [PATCH 01/32] Move Service::new to a macro --- core/service/src/lib.rs | 87 +++++++++++++++++++++++------------------ 1 file changed, 48 insertions(+), 39 deletions(-) diff --git a/core/service/src/lib.rs b/core/service/src/lib.rs index 33a42e87fe04a..08ed334742738 100644 --- a/core/service/src/lib.rs +++ b/core/service/src/lib.rs @@ -155,11 +155,8 @@ pub struct TelemetryOnConnect { pub telemetry_connection_sinks: TelemetryOnConnectNotifications, } -impl Service { - /// Creates a new service. - pub fn new( - mut config: FactoryFullConfiguration, - ) -> Result { +macro_rules! new_impl { + ($config:ident, $components:ty) => {{ let (signal, exit) = exit_future::signal(); // List of asynchronous tasks to spawn. We collect them, then spawn them all at once. @@ -167,38 +164,38 @@ impl Service { mpsc::unbounded:: + Send>>(); // Create client - let executor = NativeExecutor::new(config.default_heap_pages); + let executor = NativeExecutor::new($config.default_heap_pages); - let keystore = Keystore::open(config.keystore_path.clone(), config.keystore_password.clone())?; + let keystore = Keystore::open($config.keystore_path.clone(), $config.keystore_password.clone())?; - let (client, on_demand) = Components::build_client(&config, executor, Some(keystore.clone()))?; - let select_chain = Components::build_select_chain(&mut config, client.clone())?; + let (client, on_demand) = <$components>::build_client(&$config, executor, Some(keystore.clone()))?; + let select_chain = <$components>::build_select_chain(&mut $config, client.clone())?; let transaction_pool = Arc::new( - Components::build_transaction_pool(config.transaction_pool.clone(), client.clone())? + <$components>::build_transaction_pool($config.transaction_pool.clone(), client.clone())? ); let transaction_pool_adapter = Arc::new(TransactionPoolAdapter { - imports_external_transactions: !config.roles.is_light(), + imports_external_transactions: !$config.roles.is_light(), pool: transaction_pool.clone(), client: client.clone(), }); - let (import_queue, finality_proof_request_builder) = Components::build_import_queue( - &mut config, + let (import_queue, finality_proof_request_builder) = <$components>::build_import_queue( + &mut $config, client.clone(), select_chain.clone(), Some(transaction_pool.clone()), )?; let import_queue = Box::new(import_queue); - let finality_proof_provider = Components::build_finality_proof_provider(client.clone())?; + let finality_proof_provider = <$components>::build_finality_proof_provider(client.clone())?; let chain_info = client.info().chain; - Components::RuntimeServices::generate_initial_session_keys( + <$components>::RuntimeServices::generate_initial_session_keys( client.clone(), - config.dev_key_seed.clone().map(|s| vec![s]).unwrap_or_default(), + $config.dev_key_seed.clone().map(|s| vec![s]).unwrap_or_default(), )?; - let version = config.full_version(); + let version = $config.full_version(); info!("Highest known block at #{}", chain_info.best_number); telemetry!( SUBSTRATE_INFO; @@ -207,10 +204,10 @@ impl Service { "best" => ?chain_info.best_hash ); - let network_protocol = ::build_network_protocol(&config)?; + let network_protocol = <<$components>::Factory>::build_network_protocol(&$config)?; let protocol_id = { - let protocol_id_full = match config.chain_spec.protocol_id() { + let protocol_id_full = match $config.chain_spec.protocol_id() { Some(pid) => pid, None => { warn!("Using default protocol ID {:?} because none is configured in the \ @@ -223,8 +220,8 @@ impl Service { }; let network_params = network::config::Params { - roles: config.roles, - network_config: config.network.clone(), + roles: $config.roles, + network_config: $config.network.clone(), chain: client.clone(), finality_proof_provider, finality_proof_request_builder, @@ -242,7 +239,7 @@ impl Service { #[allow(deprecated)] let offchain_storage = client.backend().offchain_storage(); - let offchain_workers = match (config.offchain_worker, offchain_storage) { + let offchain_workers = match ($config.offchain_worker, offchain_storage) { (true, Some(db)) => { Some(Arc::new(offchain::OffchainWorkers::new(client.clone(), db))) }, @@ -260,7 +257,7 @@ impl Service { let offchain = offchain_workers.as_ref().map(Arc::downgrade); let to_spawn_tx_ = to_spawn_tx.clone(); let network_state_info: Arc = network.clone(); - let is_validator = config.roles.is_authority(); + let is_validator = $config.roles.is_authority(); let events = client.import_notification_stream() .map(|v| Ok::<_, ()>(v)).compat() @@ -268,7 +265,7 @@ impl Service { let number = *notification.header.number(); if let (Some(txpool), Some(client)) = (txpool.upgrade(), wclient.upgrade()) { - Components::RuntimeServices::maintain_transaction_pool( + <$components>::RuntimeServices::maintain_transaction_pool( &BlockId::hash(notification.hash), &*client, &*txpool, @@ -276,7 +273,7 @@ impl Service { } if let (Some(txpool), Some(offchain)) = (txpool.upgrade(), offchain.as_ref().and_then(|o| o.upgrade())) { - let future = Components::RuntimeServices::offchain_workers( + let future = <$components>::RuntimeServices::offchain_workers( &number, &offchain, &txpool, @@ -321,7 +318,7 @@ impl Service { let client_ = client.clone(); let mut sys = System::new(); let self_pid = get_current_pid().ok(); - let (netstat_tx, netstat_rx) = mpsc::unbounded::<(NetworkStatus>, NetworkState)>(); + let (netstat_tx, netstat_rx) = mpsc::unbounded::<(NetworkStatus>, NetworkState)>(); network_status_sinks.lock().push(netstat_tx); let tel_task = netstat_rx.for_each(move |(net_status, network_state)| { let info = client_.info(); @@ -374,12 +371,12 @@ impl Service { let (system_rpc_tx, system_rpc_rx) = futures03::channel::mpsc::unbounded(); let gen_handler = || { let system_info = rpc::system::SystemInfo { - chain_name: config.chain_spec.name().into(), - impl_name: config.impl_name.into(), - impl_version: config.impl_version.into(), - properties: config.chain_spec.properties(), + chain_name: $config.chain_spec.name().into(), + impl_name: $config.impl_name.into(), + impl_version: $config.impl_version.into(), + properties: $config.chain_spec.properties(), }; - Components::RuntimeServices::start_rpc( + <$components>::RuntimeServices::start_rpc( client.clone(), system_rpc_tx.clone(), system_info.clone(), @@ -390,7 +387,7 @@ impl Service { ) }; let rpc_handlers = gen_handler(); - let rpc = start_rpc_servers(&config, gen_handler)?; + let rpc = start_rpc_servers(&$config, gen_handler)?; let _ = to_spawn_tx.unbounded_send(Box::new(build_network_future( network_mut, @@ -406,17 +403,17 @@ impl Service { let telemetry_connection_sinks: Arc>>> = Default::default(); // Telemetry - let telemetry = config.telemetry_endpoints.clone().map(|endpoints| { - let is_authority = config.roles.is_authority(); + let telemetry = $config.telemetry_endpoints.clone().map(|endpoints| { + let is_authority = $config.roles.is_authority(); let network_id = network.local_peer_id().to_base58(); - let name = config.name.clone(); - let impl_name = config.impl_name.to_owned(); + let name = $config.name.clone(); + let impl_name = $config.impl_name.to_owned(); let version = version.clone(); - let chain_name = config.chain_spec.name().to_owned(); + let chain_name = $config.chain_spec.name().to_owned(); let telemetry_connection_sinks_ = telemetry_connection_sinks.clone(); let telemetry = tel::init_telemetry(tel::TelemetryConfig { endpoints, - wasm_external_transport: config.telemetry_external_transport.take(), + wasm_external_transport: $config.telemetry_external_transport.take(), }); let future = telemetry.clone() .map(|ev| Ok::<_, ()>(ev)) @@ -458,7 +455,7 @@ impl Service { to_spawn_tx, to_spawn_rx, to_poll: Vec::new(), - config, + $config, rpc_handlers, _rpc: rpc, _telemetry: telemetry, @@ -466,6 +463,18 @@ impl Service { _telemetry_on_connect_sinks: telemetry_connection_sinks.clone(), keystore, }) + }} +} + +impl Service { + /// Creates a new service. + pub fn new( + mut config: FactoryFullConfiguration, + ) -> Result { + new_impl!( + config, + Components + ) } /// Returns a reference to the config passed at initialization. From ec9c66dda17c502b00a2f8b27c8f83c82be46e90 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Thu, 8 Aug 2019 15:11:07 +0200 Subject: [PATCH 02/32] Move function calls to macros --- core/service/src/lib.rs | 47 +++++++++++++++++++++++++++++------------ 1 file changed, 34 insertions(+), 13 deletions(-) diff --git a/core/service/src/lib.rs b/core/service/src/lib.rs index 08ed334742738..ba0922e7bbca6 100644 --- a/core/service/src/lib.rs +++ b/core/service/src/lib.rs @@ -156,7 +156,19 @@ pub struct TelemetryOnConnect { } macro_rules! new_impl { - ($config:ident, $components:ty) => {{ + ( + $config:ident, + $build_client:expr, + $build_select_chain:expr, + $build_import_queue:expr, + $build_finality_proof_provider:expr, + $generate_intial_session_keys:expr, + $build_network_protocol:expr, + $build_transaction_pool:expr, + $maintain_transaction_pool:expr, + $offchain_workers:expr, + $start_rpc:expr, + ) => {{ let (signal, exit) = exit_future::signal(); // List of asynchronous tasks to spawn. We collect them, then spawn them all at once. @@ -168,11 +180,11 @@ macro_rules! new_impl { let keystore = Keystore::open($config.keystore_path.clone(), $config.keystore_password.clone())?; - let (client, on_demand) = <$components>::build_client(&$config, executor, Some(keystore.clone()))?; - let select_chain = <$components>::build_select_chain(&mut $config, client.clone())?; + let (client, on_demand) = $build_client(&$config, executor, Some(keystore.clone()))?; + let select_chain = $build_select_chain(&mut $config, client.clone())?; let transaction_pool = Arc::new( - <$components>::build_transaction_pool($config.transaction_pool.clone(), client.clone())? + $build_transaction_pool($config.transaction_pool.clone(), client.clone())? ); let transaction_pool_adapter = Arc::new(TransactionPoolAdapter { imports_external_transactions: !$config.roles.is_light(), @@ -180,17 +192,17 @@ macro_rules! new_impl { client: client.clone(), }); - let (import_queue, finality_proof_request_builder) = <$components>::build_import_queue( + let (import_queue, finality_proof_request_builder) = $build_import_queue( &mut $config, client.clone(), select_chain.clone(), Some(transaction_pool.clone()), )?; let import_queue = Box::new(import_queue); - let finality_proof_provider = <$components>::build_finality_proof_provider(client.clone())?; + let finality_proof_provider = $build_finality_proof_provider(client.clone())?; let chain_info = client.info().chain; - <$components>::RuntimeServices::generate_initial_session_keys( + $generate_intial_session_keys( client.clone(), $config.dev_key_seed.clone().map(|s| vec![s]).unwrap_or_default(), )?; @@ -204,7 +216,7 @@ macro_rules! new_impl { "best" => ?chain_info.best_hash ); - let network_protocol = <<$components>::Factory>::build_network_protocol(&$config)?; + let network_protocol = $build_network_protocol(&$config)?; let protocol_id = { let protocol_id_full = match $config.chain_spec.protocol_id() { @@ -265,7 +277,7 @@ macro_rules! new_impl { let number = *notification.header.number(); if let (Some(txpool), Some(client)) = (txpool.upgrade(), wclient.upgrade()) { - <$components>::RuntimeServices::maintain_transaction_pool( + $maintain_transaction_pool( &BlockId::hash(notification.hash), &*client, &*txpool, @@ -273,7 +285,7 @@ macro_rules! new_impl { } if let (Some(txpool), Some(offchain)) = (txpool.upgrade(), offchain.as_ref().and_then(|o| o.upgrade())) { - let future = <$components>::RuntimeServices::offchain_workers( + let future = $offchain_workers( &number, &offchain, &txpool, @@ -318,7 +330,7 @@ macro_rules! new_impl { let client_ = client.clone(); let mut sys = System::new(); let self_pid = get_current_pid().ok(); - let (netstat_tx, netstat_rx) = mpsc::unbounded::<(NetworkStatus>, NetworkState)>(); + let (netstat_tx, netstat_rx) = mpsc::unbounded::<(NetworkStatus<_>, NetworkState)>(); network_status_sinks.lock().push(netstat_tx); let tel_task = netstat_rx.for_each(move |(net_status, network_state)| { let info = client_.info(); @@ -376,7 +388,7 @@ macro_rules! new_impl { impl_version: $config.impl_version.into(), properties: $config.chain_spec.properties(), }; - <$components>::RuntimeServices::start_rpc( + $start_rpc( client.clone(), system_rpc_tx.clone(), system_info.clone(), @@ -473,7 +485,16 @@ impl Service { ) -> Result { new_impl!( config, - Components + Components::build_client, + Components::build_select_chain, + Components::build_import_queue, + Components::build_finality_proof_provider, + Components::RuntimeServices::generate_initial_session_keys, + ::build_network_protocol, + Components::build_transaction_pool, + Components::RuntimeServices::maintain_transaction_pool, + Components::RuntimeServices::offchain_workers, + Components::RuntimeServices::start_rpc, ) } From 0b28c319a28cc6f0a2508dcb02f27a9a0b87c698 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Thu, 8 Aug 2019 16:22:12 +0200 Subject: [PATCH 03/32] Extract offchain_workers and start_rpc in separate function In follow-up commits, we want to be able to directly call maintain_transaction_pool, offchain_workers, and start_rpc, without having to implement the Components trait. This commit is a preliminary step: we extract the code to freestanding functions. --- core/service/src/components.rs | 92 +++++++++++++++++++++++++--------- 1 file changed, 68 insertions(+), 24 deletions(-) diff --git a/core/service/src/components.rs b/core/service/src/components.rs index a9aa2129f2498..0e9fe274dc045 100644 --- a/core/service/src/components.rs +++ b/core/service/src/components.rs @@ -30,7 +30,7 @@ use network::{ use substrate_executor::{NativeExecutor, NativeExecutionDispatch}; use transaction_pool::txpool::{self, Options as TransactionPoolOptions, Pool as TransactionPool}; use sr_primitives::{ - BuildStorage, traits::{Block as BlockT, Header as HeaderT, ProvideRuntimeApi}, generic::BlockId + BuildStorage, traits::{Block as BlockT, Header as HeaderT, NumberFor, ProvideRuntimeApi}, generic::BlockId }; use crate::config::Configuration; use primitives::{Blake2Hasher, H256, traits::BareCryptoStorePtr}; @@ -197,28 +197,48 @@ impl StartRpc for C where rpc_extensions: impl rpc::RpcExtension, keystore: KeyStorePtr, ) -> RpcHandler { - use rpc::{chain, state, author, system}; - let subscriptions = rpc::Subscriptions::new(task_executor.clone()); - let chain = chain::Chain::new(client.clone(), subscriptions.clone()); - let state = state::State::new(client.clone(), subscriptions.clone()); - let author = rpc::author::Author::new( - client, - transaction_pool, - subscriptions, - keystore, - ); - let system = system::System::new(rpc_system_info, system_send_back); - - rpc_servers::rpc_handler(( - state::StateApi::to_delegate(state), - chain::ChainApi::to_delegate(chain), - author::AuthorApi::to_delegate(author), - system::SystemApi::to_delegate(system), - rpc_extensions, - )) + start_rpc(client, system_send_back, rpc_system_info, task_executor, transaction_pool, rpc_extensions, keystore) } } +pub(crate) fn start_rpc( + client: Arc>, + system_send_back: mpsc::UnboundedSender>, + rpc_system_info: SystemInfo, + task_executor: TaskExecutor, + transaction_pool: Arc>, + rpc_extensions: impl rpc::RpcExtension, + keystore: KeyStorePtr, +) -> RpcHandler +where + Block: BlockT::Out>, + Backend: client::backend::Backend + 'static, + Client: ProvideRuntimeApi, + as ProvideRuntimeApi>::Api: runtime_api::Metadata + session::SessionKeys, + Api: Send + Sync + 'static, + Executor: client::CallExecutor + Send + Sync + Clone + 'static, + PoolApi: txpool::ChainApi + 'static { + use rpc::{chain, state, author, system}; + let subscriptions = rpc::Subscriptions::new(task_executor.clone()); + let chain = chain::Chain::new(client.clone(), subscriptions.clone()); + let state = state::State::new(client.clone(), subscriptions.clone()); + let author = rpc::author::Author::new( + client, + transaction_pool, + subscriptions, + keystore, + ); + let system = system::System::new(rpc_system_info, system_send_back); + + rpc_servers::rpc_handler(( + state::StateApi::to_delegate(state), + chain::ChainApi::to_delegate(chain), + author::AuthorApi::to_delegate(author), + system::SystemApi::to_delegate(system), + rpc_extensions, + )) +} + /// Something that can maintain transaction pool on every imported block. pub trait MaintainTransactionPool { fn maintain_transaction_pool( @@ -228,7 +248,7 @@ pub trait MaintainTransactionPool { ) -> error::Result<()>; } -fn maintain_transaction_pool( +pub(crate) fn maintain_transaction_pool( id: &BlockId, client: &Client, transaction_pool: &TransactionPool, @@ -296,12 +316,36 @@ impl OffchainWorker for C where network_state: &Arc, is_validator: bool, ) -> error::Result + Send>> { - let future = offchain.on_block_imported(number, pool, network_state.clone(), is_validator) - .map(|()| Ok(())); - Ok(Box::new(Compat::new(future))) + offchain_workers(number, offchain, pool, network_state, is_validator) } } +pub(crate) fn offchain_workers( + number: &NumberFor, + offchain: &offchain::OffchainWorkers< + Client, + >::OffchainStorage, + Block + >, + pool: &Arc>, + network_state: &Arc, + is_validator: bool, +) -> error::Result + Send>> +where + Block: BlockT::Out>, + Backend: client::backend::Backend + 'static, + Api: 'static, + >::OffchainStorage: 'static, + Client: ProvideRuntimeApi + Send + Sync, + as ProvideRuntimeApi>::Api: offchain::OffchainWorkerApi, + Executor: client::CallExecutor + 'static, + PoolApi: txpool::ChainApi + 'static, +{ + let future = offchain.on_block_imported(number, pool, network_state.clone(), is_validator) + .map(|()| Ok(())); + Ok(Box::new(Compat::new(future))) +} + /// The super trait that combines all required traits a `Service` needs to implement. pub trait ServiceTrait: Deref> From 7600276c7fc8c84418b54cd23d06da39186c1afe Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Thu, 8 Aug 2019 17:08:47 +0200 Subject: [PATCH 04/32] Introduce an AbstractService trait --- core/cli/src/informant.rs | 9 +- core/service/src/lib.rs | 227 +++++++++++++++++++++++++++-------- node-template/src/cli.rs | 12 +- node-template/src/service.rs | 2 +- node/cli/src/lib.rs | 12 +- node/cli/src/service.rs | 2 +- 6 files changed, 195 insertions(+), 69 deletions(-) diff --git a/core/cli/src/informant.rs b/core/cli/src/informant.rs index b5a2f03d79546..d8f0471a89f75 100644 --- a/core/cli/src/informant.rs +++ b/core/cli/src/informant.rs @@ -21,22 +21,19 @@ use futures::{Future, Stream}; use futures03::{StreamExt as _, TryStreamExt as _}; use log::{info, warn}; use sr_primitives::{generic::BlockId, traits::Header}; -use service::{Service, Components}; +use service::AbstractService; use tokio::runtime::TaskExecutor; mod display; /// Spawn informant on the event loop #[deprecated(note = "Please use informant::build instead, and then create the task manually")] -pub fn start(service: &Service, exit: ::exit_future::Exit, handle: TaskExecutor) where - C: Components, -{ +pub fn start(service: &impl AbstractService, exit: ::exit_future::Exit, handle: TaskExecutor) { handle.spawn(exit.until(build(service)).map(|_| ())); } /// Creates an informant in the form of a `Future` that must be polled regularly. -pub fn build(service: &Service) -> impl Future -where C: Components { +pub fn build(service: &impl AbstractService) -> impl Future { let client = service.client(); let mut display = display::InformantDisplay::new(); diff --git a/core/service/src/lib.rs b/core/service/src/lib.rs index ba0922e7bbca6..a6d80df7d62ed 100644 --- a/core/service/src/lib.rs +++ b/core/service/src/lib.rs @@ -29,6 +29,7 @@ use std::io; use std::net::SocketAddr; use std::collections::HashMap; use std::sync::atomic::{AtomicBool, Ordering}; +use std::ops::DerefMut; use std::time::{Duration, Instant}; use futures::sync::mpsc; use parking_lot::Mutex; @@ -41,6 +42,7 @@ use keystore::Store as Keystore; use network::{NetworkState, NetworkStateInfo}; use log::{log, info, warn, debug, error, Level}; use codec::{Encode, Decode}; +use primitives::{Blake2Hasher, H256}; use sr_primitives::generic::BlockId; use sr_primitives::traits::{Header, NumberFor, SaturatedConversion}; use substrate_executor::NativeExecutor; @@ -497,47 +499,122 @@ impl Service { Components::RuntimeServices::start_rpc, ) } +} + +/// Abstraction over a Substrate service. +pub trait AbstractService: 'static + Future + + Executor + Send>> + Send { + /// Type of block of this chain. + type Block: BlockT; + /// Backend storage for the client. + type Backend: 'static + client::backend::Backend; + /// How to execute calls towards the runtime. + type Executor: 'static + client::CallExecutor + Send + Sync + Clone; + /// API that the runtime provides. + type RuntimeApi: Send + Sync; + /// Configuration struct of the service. + type Config; + /// Chain selection algorithm. + type SelectChain; + /// API of the transaction pool. + type TransactionPoolApi: ChainApi; + /// Network service. + type NetworkService; + + /// Get event stream for telemetry connection established events. + fn telemetry_on_connect_stream(&self) -> TelemetryOnConnectNotifications; + + /// Returns the configuration passed on construction. + fn config(&self) -> &Self::Config; + + /// Returns the configuration passed on construction. + fn config_mut(&mut self) -> &mut Self::Config; + + /// return a shared instance of Telemetry (if enabled) + fn telemetry(&self) -> Option; + + /// Spawns a task in the background that runs the future passed as parameter. + fn spawn_task(&self, task: impl Future + Send + 'static); + + /// Spawns a task in the background that runs the future passed as + /// parameter. The given task is considered essential, i.e. if it errors we + /// trigger a service exit. + fn spawn_essential_task(&self, task: impl Future + Send + 'static); + + /// Returns a handle for spawning tasks. + fn spawn_task_handle(&self) -> SpawnTaskHandle; - /// Returns a reference to the config passed at initialization. - pub fn config(&self) -> &FactoryFullConfiguration { + /// Returns the keystore that stores keys. + fn keystore(&self) -> keystore::KeyStorePtr; + + /// Starts an RPC query. + /// + /// The query is passed as a string and must be a JSON text similar to what an HTTP client + /// would for example send. + /// + /// Returns a `Future` that contains the optional response. + /// + /// If the request subscribes you to events, the `Sender` in the `RpcSession` object is used to + /// send back spontaneous events. + fn rpc_query(&self, mem: &RpcSession, request: &str) -> Box, Error = ()> + Send>; + + /// Get shared client instance. + fn client(&self) -> Arc>; + + /// Get clone of select chain. + fn select_chain(&self) -> Option; + + /// Get shared network instance. + fn network(&self) -> Arc; + + /// Returns a receiver that periodically receives a status of the network. + fn network_status(&self) -> mpsc::UnboundedReceiver<(NetworkStatus, NetworkState)>; + + /// Get shared transaction pool instance. + fn transaction_pool(&self) -> Arc>; + + /// Get a handle to a future that will resolve on exit. + fn on_exit(&self) -> ::exit_future::Exit; +} + +impl AbstractService for Service +where FactoryFullConfiguration: Send { + type Block = ComponentBlock; + type Backend = ::Backend; + type Executor = ::Executor; + type RuntimeApi = ::RuntimeApi; + type Config = FactoryFullConfiguration; + type SelectChain = ::SelectChain; + type TransactionPoolApi = Components::TransactionPoolApi; + type NetworkService = components::NetworkService; + + fn config(&self) -> &Self::Config { &self.config } - /// Returns a reference to the config passed at initialization. - /// - /// > **Note**: This method is currently necessary because we extract some elements from the - /// > configuration at the end of the service initialization. It is intended to be - /// > removed. - pub fn config_mut(&mut self) -> &mut FactoryFullConfiguration { + fn config_mut(&mut self) -> &mut Self::Config { &mut self.config } - /// Get event stream for telemetry connection established events. - pub fn telemetry_on_connect_stream(&self) -> TelemetryOnConnectNotifications { + fn telemetry_on_connect_stream(&self) -> TelemetryOnConnectNotifications { let (sink, stream) = mpsc::unbounded(); self._telemetry_on_connect_sinks.lock().push(sink); stream } - /// Return a shared instance of Telemetry (if enabled) - pub fn telemetry(&self) -> Option { + fn telemetry(&self) -> Option { self._telemetry.as_ref().map(|t| t.clone()) } - /// Returns the keystore instance. - pub fn keystore(&self) -> keystore::KeyStorePtr { + fn keystore(&self) -> keystore::KeyStorePtr { self.keystore.clone() } - /// Spawns a task in the background that runs the future passed as parameter. - pub fn spawn_task(&self, task: impl Future + Send + 'static) { + fn spawn_task(&self, task: impl Future + Send + 'static) { let _ = self.to_spawn_tx.unbounded_send(Box::new(task)); } - /// Spawns a task in the background that runs the future passed as - /// parameter. The given task is considered essential, i.e. if it errors we - /// trigger a service exit. - pub fn spawn_essential_task(&self, task: impl Future + Send + 'static) { + fn spawn_essential_task(&self, task: impl Future + Send + 'static) { let essential_failed = self.essential_failed.clone(); let essential_task = Box::new(task.map_err(move |_| { error!("Essential task failed. Shutting down service."); @@ -547,57 +624,39 @@ impl Service { let _ = self.to_spawn_tx.unbounded_send(essential_task); } - /// Returns a handle for spawning tasks. - pub fn spawn_task_handle(&self) -> SpawnTaskHandle { + fn spawn_task_handle(&self) -> SpawnTaskHandle { SpawnTaskHandle { sender: self.to_spawn_tx.clone(), } } - /// Starts an RPC query. - /// - /// The query is passed as a string and must be a JSON text similar to what an HTTP client - /// would for example send. - /// - /// Returns a `Future` that contains the optional response. - /// - /// If the request subscribes you to events, the `Sender` in the `RpcSession` object is used to - /// send back spontaneous events. - pub fn rpc_query(&self, mem: &RpcSession, request: &str) - -> impl Future, Error = ()> - { - self.rpc_handlers.handle_request(request, mem.metadata.clone()) + fn rpc_query(&self, mem: &RpcSession, request: &str) -> Box, Error = ()> + Send> { + Box::new(self.rpc_handlers.handle_request(request, mem.metadata.clone())) } - /// Get shared client instance. - pub fn client(&self) -> Arc> { + fn client(&self) -> Arc> { self.client.clone() } - /// Get clone of select chain. - pub fn select_chain(&self) -> Option<::SelectChain> { + fn select_chain(&self) -> Option { self.select_chain.clone() } - /// Get shared network instance. - pub fn network(&self) -> Arc> { + fn network(&self) -> Arc { self.network.clone() } - /// Returns a receiver that periodically receives a status of the network. - pub fn network_status(&self) -> mpsc::UnboundedReceiver<(NetworkStatus>, NetworkState)> { + fn network_status(&self) -> mpsc::UnboundedReceiver<(NetworkStatus, NetworkState)> { let (sink, stream) = mpsc::unbounded(); self.network_status_sinks.lock().push(sink); stream } - /// Get shared transaction pool instance. - pub fn transaction_pool(&self) -> Arc> { + fn transaction_pool(&self) -> Arc> { self.transaction_pool.clone() } - /// Get a handle to a future that will resolve on exit. - pub fn on_exit(&self) -> ::exit_future::Exit { + fn on_exit(&self) -> ::exit_future::Exit { self.exit.clone() } } @@ -649,6 +708,80 @@ impl Executor + Send>> } } +impl AbstractService for T +where T: 'static + Deref + DerefMut + Future + Send + + Executor + Send>>, + T::Target: AbstractService { + type Block = <::Target as AbstractService>::Block; + type Backend = <::Target as AbstractService>::Backend; + type Executor = <::Target as AbstractService>::Executor; + type RuntimeApi = <::Target as AbstractService>::RuntimeApi; + type Config = <::Target as AbstractService>::Config; + type SelectChain = <::Target as AbstractService>::SelectChain; + type TransactionPoolApi = <::Target as AbstractService>::TransactionPoolApi; + type NetworkService = <::Target as AbstractService>::NetworkService; + + fn telemetry_on_connect_stream(&self) -> TelemetryOnConnectNotifications { + (**self).telemetry_on_connect_stream() + } + + fn config(&self) -> &Self::Config { + (**self).config() + } + + fn config_mut(&mut self) -> &mut Self::Config { + (&mut **self).config_mut() + } + + fn telemetry(&self) -> Option { + (**self).telemetry() + } + + fn spawn_task(&self, task: impl Future + Send + 'static) { + (**self).spawn_task(task) + } + + fn spawn_essential_task(&self, task: impl Future + Send + 'static) { + (**self).spawn_essential_task(task) + } + + fn spawn_task_handle(&self) -> SpawnTaskHandle { + (**self).spawn_task_handle() + } + + fn keystore(&self) -> keystore::KeyStorePtr { + (**self).keystore() + } + + fn rpc_query(&self, mem: &RpcSession, request: &str) -> Box, Error = ()> + Send> { + (**self).rpc_query(mem, request) + } + + fn client(&self) -> Arc> { + (**self).client() + } + + fn select_chain(&self) -> Option { + (**self).select_chain() + } + + fn network(&self) -> Arc { + (**self).network() + } + + fn network_status(&self) -> mpsc::UnboundedReceiver<(NetworkStatus, NetworkState)> { + (**self).network_status() + } + + fn transaction_pool(&self) -> Arc> { + (**self).transaction_pool() + } + + fn on_exit(&self) -> ::exit_future::Exit { + (**self).on_exit() + } +} + /// Builds a never-ending future that continuously polls the network. /// /// The `status_sink` contain a list of senders to send a periodic network status to. diff --git a/node-template/src/cli.rs b/node-template/src/cli.rs index 4d672491c18e6..c9337bb043f78 100644 --- a/node-template/src/cli.rs +++ b/node-template/src/cli.rs @@ -4,9 +4,8 @@ use std::cell::RefCell; use tokio::runtime::Runtime; pub use substrate_cli::{VersionInfo, IntoExit, error}; use substrate_cli::{informant, parse_and_prepare, ParseAndPrepare, NoCustom}; -use substrate_service::{ServiceFactory, Roles as ServiceRoles}; +use substrate_service::{AbstractService, ServiceFactory, Roles as ServiceRoles}; use crate::chain_spec; -use std::ops::Deref; use log::info; /// Parse command line arguments into service configuration. @@ -55,14 +54,13 @@ fn load_spec(id: &str) -> Result, String> { }) } -fn run_until_exit( +fn run_until_exit( mut runtime: Runtime, service: T, e: E, -) -> error::Result<()> where - T: Deref>, - T: Future + Send + 'static, - C: substrate_service::Components, +) -> error::Result<()> +where + T: AbstractService, E: IntoExit, { let (exit_send, exit) = exit_future::signal(); diff --git a/node-template/src/service.rs b/node-template/src/service.rs index 2baa0c7631373..57414e36fb9ec 100644 --- a/node-template/src/service.rs +++ b/node-template/src/service.rs @@ -12,7 +12,7 @@ use node_template_runtime::{self, GenesisConfig, opaque::Block, RuntimeApi, WASM use substrate_service::{ FactoryFullConfiguration, LightComponents, FullComponents, FullBackend, FullClient, LightClient, LightBackend, FullExecutor, LightExecutor, - error::{Error as ServiceError}, + error::{Error as ServiceError}, AbstractService, }; use transaction_pool::{self, txpool::{Pool as TransactionPool}}; use inherents::InherentDataProviders; diff --git a/node/cli/src/lib.rs b/node/cli/src/lib.rs index 4e3cfa7f01092..9f789fc355137 100644 --- a/node/cli/src/lib.rs +++ b/node/cli/src/lib.rs @@ -27,8 +27,7 @@ mod factory_impl; use tokio::prelude::Future; use tokio::runtime::{Builder as RuntimeBuilder, Runtime}; pub use cli::{VersionInfo, IntoExit, NoCustom, SharedParams, ExecutionStrategyParam}; -use substrate_service::{ServiceFactory, Roles as ServiceRoles}; -use std::ops::Deref; +use substrate_service::{AbstractService, ServiceFactory, Roles as ServiceRoles}; use log::info; use structopt::{StructOpt, clap::App}; use cli::{AugmentClap, GetLogFilter, parse_and_prepare, ParseAndPrepare}; @@ -219,14 +218,13 @@ pub fn run(args: I, exit: E, version: cli::VersionInfo) -> error::Resul } } -fn run_until_exit( +fn run_until_exit( mut runtime: Runtime, service: T, e: E, -) -> error::Result<()> where - T: Deref>, - T: Future + Send + 'static, - C: substrate_service::Components, +) -> error::Result<()> +where + T: AbstractService, E: IntoExit, { let (exit_send, exit) = exit_future::signal(); diff --git a/node/cli/src/service.rs b/node/cli/src/service.rs index 7022d12d69a0f..0c231e7f2b65d 100644 --- a/node/cli/src/service.rs +++ b/node/cli/src/service.rs @@ -37,7 +37,7 @@ use transaction_pool::{self, txpool::{Pool as TransactionPool}}; use inherents::InherentDataProviders; use network::construct_simple_protocol; use substrate_service::construct_service_factory; -use substrate_service::TelemetryOnConnect; +use substrate_service::{TelemetryOnConnect, AbstractService}; construct_simple_protocol! { /// Demo protocol attachment for substrate. From 4af1fc779c7660844e5b68a47c24a88c98a6dd68 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Fri, 9 Aug 2019 10:55:15 +0200 Subject: [PATCH 05/32] Introduce NewService as an implementation detail of Service --- core/service/src/lib.rs | 95 ++++++++++++++++++++++++----------------- 1 file changed, 57 insertions(+), 38 deletions(-) diff --git a/core/service/src/lib.rs b/core/service/src/lib.rs index a6d80df7d62ed..d417f5d476deb 100644 --- a/core/service/src/lib.rs +++ b/core/service/src/lib.rs @@ -26,6 +26,7 @@ pub mod chain_ops; pub mod error; use std::io; +use std::marker::PhantomData; use std::net::SocketAddr; use std::collections::HashMap; use std::sync::atomic::{AtomicBool, Ordering}; @@ -77,14 +78,32 @@ const DEFAULT_PROTOCOL_ID: &str = "sup"; /// Substrate service. pub struct Service { - client: Arc>, - select_chain: Option, - network: Arc>, + inner: NewService< + FactoryFullConfiguration, + ComponentBlock, + ComponentClient, + Components::SelectChain, + NetworkStatus>, + NetworkService, + TransactionPool, + offchain::OffchainWorkers< + ComponentClient, + ComponentOffchainStorage, + ComponentBlock + >, + >, +} + +/// Substrate service. +pub struct NewService { + client: Arc, + select_chain: Option, + network: Arc, /// Sinks to propagate network status updates. network_status_sinks: Arc>, NetworkState + TNetStatus, NetworkState )>>>>, - transaction_pool: Arc>, + transaction_pool: Arc, /// A future that resolves when the service has exited, this is useful to /// make sure any internally spawned futures stop when the service does. exit: exit_future::Exit, @@ -102,17 +121,14 @@ pub struct Service { /// The elements must then be polled manually. to_poll: Vec + Send>>, /// Configuration of this Service - config: FactoryFullConfiguration, - rpc_handlers: components::RpcHandler, + config: TCfg, + rpc_handlers: rpc_servers::RpcHandler, _rpc: Box, _telemetry: Option, _telemetry_on_connect_sinks: Arc>>>, - _offchain_workers: Option, - ComponentOffchainStorage, - ComponentBlock> - >>, + _offchain_workers: Option>, keystore: keystore::KeyStorePtr, + marker: PhantomData, } /// Creates bare client without any networking. @@ -457,7 +473,7 @@ macro_rules! new_impl { telemetry }); - Ok(Service { + Ok(NewService { client, network, network_status_sinks, @@ -476,6 +492,7 @@ macro_rules! new_impl { _offchain_workers: offchain_workers, _telemetry_on_connect_sinks: telemetry_connection_sinks.clone(), keystore, + marker: PhantomData, }) }} } @@ -485,7 +502,7 @@ impl Service { pub fn new( mut config: FactoryFullConfiguration, ) -> Result { - new_impl!( + let inner = new_impl!( config, Components::build_client, Components::build_select_chain, @@ -497,7 +514,9 @@ impl Service { Components::RuntimeServices::maintain_transaction_pool, Components::RuntimeServices::offchain_workers, Components::RuntimeServices::start_rpc, - ) + ); + + inner.map(|inner| Service { inner }) } } @@ -589,75 +608,75 @@ where FactoryFullConfiguration: Send { type NetworkService = components::NetworkService; fn config(&self) -> &Self::Config { - &self.config + &self.inner.config } fn config_mut(&mut self) -> &mut Self::Config { - &mut self.config + &mut self.inner.config } fn telemetry_on_connect_stream(&self) -> TelemetryOnConnectNotifications { let (sink, stream) = mpsc::unbounded(); - self._telemetry_on_connect_sinks.lock().push(sink); + self.inner._telemetry_on_connect_sinks.lock().push(sink); stream } fn telemetry(&self) -> Option { - self._telemetry.as_ref().map(|t| t.clone()) + self.inner._telemetry.as_ref().map(|t| t.clone()) } fn keystore(&self) -> keystore::KeyStorePtr { - self.keystore.clone() + self.inner.keystore.clone() } fn spawn_task(&self, task: impl Future + Send + 'static) { - let _ = self.to_spawn_tx.unbounded_send(Box::new(task)); + let _ = self.inner.to_spawn_tx.unbounded_send(Box::new(task)); } fn spawn_essential_task(&self, task: impl Future + Send + 'static) { - let essential_failed = self.essential_failed.clone(); + let essential_failed = self.inner.essential_failed.clone(); let essential_task = Box::new(task.map_err(move |_| { error!("Essential task failed. Shutting down service."); essential_failed.store(true, Ordering::Relaxed); })); - let _ = self.to_spawn_tx.unbounded_send(essential_task); + let _ = self.inner.to_spawn_tx.unbounded_send(essential_task); } fn spawn_task_handle(&self) -> SpawnTaskHandle { SpawnTaskHandle { - sender: self.to_spawn_tx.clone(), + sender: self.inner.to_spawn_tx.clone(), } } fn rpc_query(&self, mem: &RpcSession, request: &str) -> Box, Error = ()> + Send> { - Box::new(self.rpc_handlers.handle_request(request, mem.metadata.clone())) + Box::new(self.inner.rpc_handlers.handle_request(request, mem.metadata.clone())) } fn client(&self) -> Arc> { - self.client.clone() + self.inner.client.clone() } fn select_chain(&self) -> Option { - self.select_chain.clone() + self.inner.select_chain.clone() } fn network(&self) -> Arc { - self.network.clone() + self.inner.network.clone() } fn network_status(&self) -> mpsc::UnboundedReceiver<(NetworkStatus, NetworkState)> { let (sink, stream) = mpsc::unbounded(); - self.network_status_sinks.lock().push(sink); + self.inner.network_status_sinks.lock().push(sink); stream } fn transaction_pool(&self) -> Arc> { - self.transaction_pool.clone() + self.inner.transaction_pool.clone() } fn on_exit(&self) -> ::exit_future::Exit { - self.exit.clone() + self.inner.exit.clone() } } @@ -666,11 +685,11 @@ impl Future for Service where Components: components::Co type Error = Error; fn poll(&mut self) -> Poll { - if self.essential_failed.load(Ordering::Relaxed) { + if self.inner.essential_failed.load(Ordering::Relaxed) { return Err(Error::Other("Essential task failed.".into())); } - while let Ok(Async::Ready(Some(task_to_spawn))) = self.to_spawn_rx.poll() { + while let Ok(Async::Ready(Some(task_to_spawn))) = self.inner.to_spawn_rx.poll() { let executor = tokio_executor::DefaultExecutor::current(); if let Err(err) = executor.execute(task_to_spawn) { debug!( @@ -678,13 +697,13 @@ impl Future for Service where Components: components::Co "Failed to spawn background task: {:?}; falling back to manual polling", err ); - self.to_poll.push(err.into_future()); + self.inner.to_poll.push(err.into_future()); } } // Polling all the `to_poll` futures. - while let Some(pos) = self.to_poll.iter_mut().position(|t| t.poll().map(|t| t.is_ready()).unwrap_or(true)) { - self.to_poll.remove(pos); + while let Some(pos) = self.inner.to_poll.iter_mut().position(|t| t.poll().map(|t| t.is_ready()).unwrap_or(true)) { + self.inner.to_poll.remove(pos); } // The service future never ends. @@ -699,7 +718,7 @@ impl Executor + Send>> &self, future: Box + Send> ) -> Result<(), futures::future::ExecuteError + Send>>> { - if let Err(err) = self.to_spawn_tx.unbounded_send(future) { + if let Err(err) = self.inner.to_spawn_tx.unbounded_send(future) { let kind = futures::future::ExecuteErrorKind::Shutdown; Err(futures::future::ExecuteError::new(kind, err.into_inner())) } else { @@ -912,7 +931,7 @@ pub struct NetworkStatus { impl Drop for Service where Components: components::Components { fn drop(&mut self) { debug!(target: "service", "Substrate service shutdown"); - if let Some(signal) = self.signal.take() { + if let Some(signal) = self.inner.signal.take() { signal.fire(); } } From 8ef6cc39f06050d058b664cd5fd756044bc08edb Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Fri, 9 Aug 2019 11:09:24 +0200 Subject: [PATCH 06/32] Implement traits on NewService instead Instead of implementing AbstractService, Future, and Executor on Service, we implement them on NewService instead. The implementations of AbstractService, Future, and Executor on Service still exist, but they just wrap to the respective implementations for NewService. --- core/service/src/lib.rs | 136 +++++++++++++++++++++++++++++----------- 1 file changed, 98 insertions(+), 38 deletions(-) diff --git a/core/service/src/lib.rs b/core/service/src/lib.rs index d417f5d476deb..5c2f8aa93eaa2 100644 --- a/core/service/src/lib.rs +++ b/core/service/src/lib.rs @@ -35,7 +35,7 @@ use std::time::{Duration, Instant}; use futures::sync::mpsc; use parking_lot::Mutex; -use client::{BlockchainEvents, backend::Backend, runtime_api::BlockT}; +use client::{BlockchainEvents, backend::Backend, runtime_api::BlockT, Client}; use exit_future::Signal; use futures::prelude::*; use futures03::stream::{StreamExt as _, TryStreamExt as _}; @@ -596,100 +596,140 @@ pub trait AbstractService: 'static + Future + fn on_exit(&self) -> ::exit_future::Exit; } -impl AbstractService for Service +impl Deref for Service where FactoryFullConfiguration: Send { - type Block = ComponentBlock; - type Backend = ::Backend; - type Executor = ::Executor; - type RuntimeApi = ::RuntimeApi; - type Config = FactoryFullConfiguration; - type SelectChain = ::SelectChain; - type TransactionPoolApi = Components::TransactionPoolApi; - type NetworkService = components::NetworkService; + type Target = NewService< + FactoryFullConfiguration, + ComponentBlock, + ComponentClient, + Components::SelectChain, + NetworkStatus>, + NetworkService, + TransactionPool, + offchain::OffchainWorkers< + ComponentClient, + ComponentOffchainStorage, + ComponentBlock + >, + >; + + fn deref(&self) -> &Self::Target { + &self.inner + } +} + +impl DerefMut for Service +where FactoryFullConfiguration: Send { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} + +impl AbstractService for + NewService, TSc, NetworkStatus, TNet, TransactionPool, TOc> +where TCfg: 'static + Send, + TBl: BlockT, + TBackend: 'static + client::backend::Backend, + TExec: 'static + client::CallExecutor + Send + Sync + Clone, + TRtApi: 'static + Send + Sync, + TSc: 'static + Clone + Send, + TNet: 'static + Send + Sync, + TExPoolApi: 'static + ChainApi, + TOc: 'static + Send + Sync, +{ + type Block = TBl; + type Backend = TBackend; + type Executor = TExec; + type RuntimeApi = TRtApi; + type Config = TCfg; + type SelectChain = TSc; + type TransactionPoolApi = TExPoolApi; + type NetworkService = TNet; fn config(&self) -> &Self::Config { - &self.inner.config + &self.config } fn config_mut(&mut self) -> &mut Self::Config { - &mut self.inner.config + &mut self.config } fn telemetry_on_connect_stream(&self) -> TelemetryOnConnectNotifications { let (sink, stream) = mpsc::unbounded(); - self.inner._telemetry_on_connect_sinks.lock().push(sink); + self._telemetry_on_connect_sinks.lock().push(sink); stream } fn telemetry(&self) -> Option { - self.inner._telemetry.as_ref().map(|t| t.clone()) + self._telemetry.as_ref().map(|t| t.clone()) } fn keystore(&self) -> keystore::KeyStorePtr { - self.inner.keystore.clone() + self.keystore.clone() } fn spawn_task(&self, task: impl Future + Send + 'static) { - let _ = self.inner.to_spawn_tx.unbounded_send(Box::new(task)); + let _ = self.to_spawn_tx.unbounded_send(Box::new(task)); } fn spawn_essential_task(&self, task: impl Future + Send + 'static) { - let essential_failed = self.inner.essential_failed.clone(); + let essential_failed = self.essential_failed.clone(); let essential_task = Box::new(task.map_err(move |_| { error!("Essential task failed. Shutting down service."); essential_failed.store(true, Ordering::Relaxed); })); - let _ = self.inner.to_spawn_tx.unbounded_send(essential_task); + let _ = self.to_spawn_tx.unbounded_send(essential_task); } fn spawn_task_handle(&self) -> SpawnTaskHandle { SpawnTaskHandle { - sender: self.inner.to_spawn_tx.clone(), + sender: self.to_spawn_tx.clone(), } } fn rpc_query(&self, mem: &RpcSession, request: &str) -> Box, Error = ()> + Send> { - Box::new(self.inner.rpc_handlers.handle_request(request, mem.metadata.clone())) + Box::new(self.rpc_handlers.handle_request(request, mem.metadata.clone())) } fn client(&self) -> Arc> { - self.inner.client.clone() + self.client.clone() } fn select_chain(&self) -> Option { - self.inner.select_chain.clone() + self.select_chain.clone() } fn network(&self) -> Arc { - self.inner.network.clone() + self.network.clone() } fn network_status(&self) -> mpsc::UnboundedReceiver<(NetworkStatus, NetworkState)> { let (sink, stream) = mpsc::unbounded(); - self.inner.network_status_sinks.lock().push(sink); + self.network_status_sinks.lock().push(sink); stream } fn transaction_pool(&self) -> Arc> { - self.inner.transaction_pool.clone() + self.transaction_pool.clone() } fn on_exit(&self) -> ::exit_future::Exit { - self.inner.exit.clone() + self.exit.clone() } } -impl Future for Service where Components: components::Components { +impl Future for +NewService { type Item = (); type Error = Error; fn poll(&mut self) -> Poll { - if self.inner.essential_failed.load(Ordering::Relaxed) { + if self.essential_failed.load(Ordering::Relaxed) { return Err(Error::Other("Essential task failed.".into())); } - while let Ok(Async::Ready(Some(task_to_spawn))) = self.inner.to_spawn_rx.poll() { + while let Ok(Async::Ready(Some(task_to_spawn))) = self.to_spawn_rx.poll() { let executor = tokio_executor::DefaultExecutor::current(); if let Err(err) = executor.execute(task_to_spawn) { debug!( @@ -697,13 +737,13 @@ impl Future for Service where Components: components::Co "Failed to spawn background task: {:?}; falling back to manual polling", err ); - self.inner.to_poll.push(err.into_future()); + self.to_poll.push(err.into_future()); } } // Polling all the `to_poll` futures. - while let Some(pos) = self.inner.to_poll.iter_mut().position(|t| t.poll().map(|t| t.is_ready()).unwrap_or(true)) { - self.inner.to_poll.remove(pos); + while let Some(pos) = self.to_poll.iter_mut().position(|t| t.poll().map(|t| t.is_ready()).unwrap_or(true)) { + self.to_poll.remove(pos); } // The service future never ends. @@ -711,14 +751,22 @@ impl Future for Service where Components: components::Co } } -impl Executor + Send>> - for Service where Components: components::Components -{ +impl Future for Service where Components: components::Components { + type Item = (); + type Error = Error; + + fn poll(&mut self) -> Poll { + self.inner.poll() + } +} + +impl Executor + Send>> for +NewService { fn execute( &self, future: Box + Send> ) -> Result<(), futures::future::ExecuteError + Send>>> { - if let Err(err) = self.inner.to_spawn_tx.unbounded_send(future) { + if let Err(err) = self.to_spawn_tx.unbounded_send(future) { let kind = futures::future::ExecuteErrorKind::Shutdown; Err(futures::future::ExecuteError::new(kind, err.into_inner())) } else { @@ -727,6 +775,17 @@ impl Executor + Send>> } } +impl Executor + Send>> + for Service where Components: components::Components +{ + fn execute( + &self, + future: Box + Send> + ) -> Result<(), futures::future::ExecuteError + Send>>> { + self.inner.execute(future) + } +} + impl AbstractService for T where T: 'static + Deref + DerefMut + Future + Send + Executor + Send>>, @@ -928,10 +987,11 @@ pub struct NetworkStatus { pub average_upload_per_sec: u64, } -impl Drop for Service where Components: components::Components { +impl Drop for +NewService { fn drop(&mut self) { debug!(target: "service", "Substrate service shutdown"); - if let Some(signal) = self.inner.signal.take() { + if let Some(signal) = self.signal.take() { signal.fire(); } } From d66692d1cfd93089ee170b222030b7f0f4f2c7d2 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Fri, 9 Aug 2019 11:26:50 +0200 Subject: [PATCH 07/32] Move components creation back to macro invocation Instead of having multiple $build_ parameters passed to the macro, let's group them all into one. This change is necessary for the follow-up commits, because we are going to call new_impl! only after all the components have already been built. --- core/service/src/lib.rs | 102 +++++++++++++++++++++++----------------- 1 file changed, 58 insertions(+), 44 deletions(-) diff --git a/core/service/src/lib.rs b/core/service/src/lib.rs index 5c2f8aa93eaa2..d5bf798feb47d 100644 --- a/core/service/src/lib.rs +++ b/core/service/src/lib.rs @@ -176,13 +176,7 @@ pub struct TelemetryOnConnect { macro_rules! new_impl { ( $config:ident, - $build_client:expr, - $build_select_chain:expr, - $build_import_queue:expr, - $build_finality_proof_provider:expr, - $generate_intial_session_keys:expr, - $build_network_protocol:expr, - $build_transaction_pool:expr, + $build_components:expr, $maintain_transaction_pool:expr, $offchain_workers:expr, $start_rpc:expr, @@ -193,38 +187,21 @@ macro_rules! new_impl { let (to_spawn_tx, to_spawn_rx) = mpsc::unbounded:: + Send>>(); - // Create client - let executor = NativeExecutor::new($config.default_heap_pages); - - let keystore = Keystore::open($config.keystore_path.clone(), $config.keystore_password.clone())?; - - let (client, on_demand) = $build_client(&$config, executor, Some(keystore.clone()))?; - let select_chain = $build_select_chain(&mut $config, client.clone())?; - - let transaction_pool = Arc::new( - $build_transaction_pool($config.transaction_pool.clone(), client.clone())? - ); - let transaction_pool_adapter = Arc::new(TransactionPoolAdapter { - imports_external_transactions: !$config.roles.is_light(), - pool: transaction_pool.clone(), - client: client.clone(), - }); - - let (import_queue, finality_proof_request_builder) = $build_import_queue( - &mut $config, - client.clone(), - select_chain.clone(), - Some(transaction_pool.clone()), - )?; + // Create all the components. + let ( + client, + on_demand, + keystore, + select_chain, + import_queue, + finality_proof_request_builder, + finality_proof_provider, + network_protocol, + transaction_pool + ) = $build_components(&mut $config)?; let import_queue = Box::new(import_queue); - let finality_proof_provider = $build_finality_proof_provider(client.clone())?; let chain_info = client.info().chain; - $generate_intial_session_keys( - client.clone(), - $config.dev_key_seed.clone().map(|s| vec![s]).unwrap_or_default(), - )?; - let version = $config.full_version(); info!("Highest known block at #{}", chain_info.best_number); telemetry!( @@ -234,7 +211,11 @@ macro_rules! new_impl { "best" => ?chain_info.best_hash ); - let network_protocol = $build_network_protocol(&$config)?; + let transaction_pool_adapter = Arc::new(TransactionPoolAdapter { + imports_external_transactions: !$config.roles.is_light(), + pool: transaction_pool.clone(), + client: client.clone(), + }); let protocol_id = { let protocol_id_full = match $config.chain_spec.protocol_id() { @@ -504,13 +485,46 @@ impl Service { ) -> Result { let inner = new_impl!( config, - Components::build_client, - Components::build_select_chain, - Components::build_import_queue, - Components::build_finality_proof_provider, - Components::RuntimeServices::generate_initial_session_keys, - ::build_network_protocol, - Components::build_transaction_pool, + |mut config: &mut FactoryFullConfiguration| -> Result<_, error::Error> { + // Create client + let executor = NativeExecutor::new(config.default_heap_pages); + + let keystore = Keystore::open(config.keystore_path.clone(), config.keystore_password.clone())?; + + let (client, on_demand) = Components::build_client(&config, executor, Some(keystore.clone()))?; + let select_chain = Components::build_select_chain(&mut config, client.clone())?; + + let transaction_pool = Arc::new( + Components::build_transaction_pool(config.transaction_pool.clone(), client.clone())? + ); + + let (import_queue, finality_proof_request_builder) = Components::build_import_queue( + &mut config, + client.clone(), + select_chain.clone(), + Some(transaction_pool.clone()), + )?; + let finality_proof_provider = Components::build_finality_proof_provider(client.clone())?; + + Components::RuntimeServices::generate_initial_session_keys( + client.clone(), + config.dev_key_seed.clone().map(|s| vec![s]).unwrap_or_default(), + )?; + + let network_protocol = ::build_network_protocol(&config)?; + + Ok(( + client, + on_demand, + keystore, + select_chain, + import_queue, + finality_proof_request_builder, + finality_proof_provider, + network_protocol, + transaction_pool + )) + }, Components::RuntimeServices::maintain_transaction_pool, Components::RuntimeServices::offchain_workers, Components::RuntimeServices::start_rpc, From e169901fb23402a8ea31d8e79085f27b1cbd0df5 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Fri, 9 Aug 2019 11:46:12 +0200 Subject: [PATCH 08/32] Add a $block parameter to new_impl This makes it possible to be explicit as what the generic parameter of the NewServiceis, without relying on type inference. --- core/service/src/lib.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/core/service/src/lib.rs b/core/service/src/lib.rs index d5bf798feb47d..f48e6c2e484b4 100644 --- a/core/service/src/lib.rs +++ b/core/service/src/lib.rs @@ -175,6 +175,7 @@ pub struct TelemetryOnConnect { macro_rules! new_impl { ( + $block:ty, $config:ident, $build_components:expr, $maintain_transaction_pool:expr, @@ -473,7 +474,7 @@ macro_rules! new_impl { _offchain_workers: offchain_workers, _telemetry_on_connect_sinks: telemetry_connection_sinks.clone(), keystore, - marker: PhantomData, + marker: PhantomData::<$block>, }) }} } @@ -484,6 +485,7 @@ impl Service { mut config: FactoryFullConfiguration, ) -> Result { let inner = new_impl!( + ComponentBlock, config, |mut config: &mut FactoryFullConfiguration| -> Result<_, error::Error> { // Create client From 9d667d8e991daf52eee907f8daa49143deb1a5ff Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Fri, 9 Aug 2019 13:24:16 +0200 Subject: [PATCH 09/32] Introduce the ServiceBuilder struct Introduces a new builder-like ServiceBuilder struct that creates a NewService. --- core/service/src/factory.rs | 494 ++++++++++++++++++++++++++++++++++++ core/service/src/lib.rs | 4 +- 2 files changed, 497 insertions(+), 1 deletion(-) create mode 100644 core/service/src/factory.rs diff --git a/core/service/src/factory.rs b/core/service/src/factory.rs new file mode 100644 index 0000000000000..f77779d77d954 --- /dev/null +++ b/core/service/src/factory.rs @@ -0,0 +1,494 @@ +// Copyright 2017-2019 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use crate::{NewService, NetworkStatus, NetworkState, error::Error, DEFAULT_PROTOCOL_ID}; +use crate::{SpawnTaskHandle, start_rpc_servers, build_network_future, components::maintain_transaction_pool}; +use crate::{components, TransactionPoolAdapter}; +use crate::config::{Configuration, Roles}; +use client::{BlockchainEvents, Client, runtime_api}; +use consensus_common::import_queue::ImportQueue; +use futures::{prelude::*, sync::mpsc}; +use futures03::{StreamExt as _, TryStreamExt as _}; +use keystore::Store as Keystore; +use log::{info, warn}; +use network::{FinalityProofProvider, OnDemand, NetworkService, NetworkStateInfo}; +use network::{config::BoxFinalityProofRequestBuilder, specialization::NetworkSpecialization}; +use parking_lot::{Mutex, RwLock}; +use primitives::{Blake2Hasher, H256, Hasher}; +use sr_primitives::{BuildStorage, generic::BlockId}; +use sr_primitives::traits::{Block as BlockT, ProvideRuntimeApi, Header, SaturatedConversion}; +use substrate_executor::{NativeExecutor, NativeExecutionDispatch}; +use serde::{Serialize, de::DeserializeOwned}; +use std::{marker::PhantomData, sync::Arc}; +use sysinfo::{get_current_pid, ProcessExt, System, SystemExt}; +use tel::{telemetry, SUBSTRATE_INFO}; +use transaction_pool::txpool::{ChainApi, Pool as TransactionPool}; + +/// Aggregator for the components required to build a service. +/// +/// # Usage +/// +/// Call [`ServiceBuilder::new_full`] or [`ServiceBuilder::new_light`], then call the various +/// `with_` methods to add the required components that you built yourself: +/// +/// - [`with_select_chain`](ServiceBuilder::with_select_chain) +/// - [`with_import_queue`](ServiceBuilder::with_import_queue) +/// - [`with_network_protocol`](ServiceBuilder::with_network_protocol) +/// - [`with_finality_proof_provider`](ServiceBuilder::with_finality_proof_provider) +/// - [`with_transaction_pool`](ServiceBuilder::with_transaction_pool) +/// +/// After this is done, call [`build`](ServiceBuilder::build) to construct the service. +/// +/// The order in which the `with_*` methods are called doesn't matter, as the correct binding of +/// generics is done when you call `build`. +/// +pub struct ServiceBuilder { + config: Configuration, + client: Arc, + keystore: Arc>, + fetcher: Option, + select_chain: Option, + import_queue: TImpQu, + finality_proof_request_builder: Option, + finality_proof_provider: Option, + network_protocol: TNetP, + transaction_pool: TExPool, + marker: PhantomData<(TBl, TRtApi)>, +} + +impl ServiceBuilder<(), (), TCfg, TGen, (), (), (), (), (), (), (), ()> +where TGen: Serialize + DeserializeOwned + BuildStorage { + /// Start the service builder with a configuration. + pub fn new_full, TRtApi, TExecDisp: NativeExecutionDispatch>( + config: Configuration + ) -> Result, + client::LocalCallExecutor, NativeExecutor>, + TBl, + TRtApi + >, + Arc>, + (), + (), + BoxFinalityProofRequestBuilder, + (), + (), + () + >, Error> { + let keystore = Keystore::open(config.keystore_path.clone(), config.keystore_password.clone())?; + + let db_settings = client_db::DatabaseSettings { + cache_size: None, + state_cache_size: config.state_cache_size, + state_cache_child_ratio: + config.state_cache_child_ratio.map(|v| (v, 100)), + path: config.database_path.clone(), + pruning: config.pruning.clone(), + }; + + let executor = NativeExecutor::::new(config.default_heap_pages); + + let client = Arc::new(client_db::new_client( + db_settings, + executor, + &config.chain_spec, + config.execution_strategies.clone(), + Some(keystore.clone()), + )?); + + Ok(ServiceBuilder { + config, + client, + keystore, + fetcher: None, + select_chain: None, + import_queue: (), + finality_proof_request_builder: None, + finality_proof_provider: None, + network_protocol: (), + transaction_pool: (), + marker: PhantomData, + }) + } + + /// Start the service builder with a configuration. + pub fn new_light, TRtApi, TExecDisp: NativeExecutionDispatch + 'static>( + config: Configuration + ) -> Result, network::OnDemand, Blake2Hasher>, + client::light::call_executor::RemoteOrLocalCallExecutor< + TBl, + client::light::backend::Backend< + client_db::light::LightStorage, + network::OnDemand, + Blake2Hasher + >, + client::light::call_executor::RemoteCallExecutor< + client::light::blockchain::Blockchain< + client_db::light::LightStorage, + network::OnDemand + >, + network::OnDemand, + >, + client::LocalCallExecutor< + client::light::backend::Backend< + client_db::light::LightStorage, + network::OnDemand, + Blake2Hasher + >, + NativeExecutor + > + >, + TBl, + TRtApi + >, + Arc>, + (), + (), + BoxFinalityProofRequestBuilder, + (), + (), + () + >, Error> { + let keystore = Keystore::open(config.keystore_path.clone(), config.keystore_password.clone())?; + + let db_settings = client_db::DatabaseSettings { + cache_size: config.database_cache_size.map(|u| u as usize), + state_cache_size: config.state_cache_size, + state_cache_child_ratio: + config.state_cache_child_ratio.map(|v| (v, 100)), + path: config.database_path.clone(), + pruning: config.pruning.clone(), + }; + + let executor = NativeExecutor::::new(config.default_heap_pages); + + let db_storage = client_db::light::LightStorage::new(db_settings)?; + let light_blockchain = client::light::new_light_blockchain(db_storage); + let fetch_checker = Arc::new(client::light::new_fetch_checker(light_blockchain.clone(), executor.clone())); + let fetcher = Arc::new(network::OnDemand::new(fetch_checker)); + let client_backend = client::light::new_light_backend(light_blockchain, fetcher.clone()); + let client = client::light::new_light(client_backend, fetcher.clone(), &config.chain_spec, executor)?; + + Ok(ServiceBuilder { + config, + client: Arc::new(client), + keystore, + fetcher: Some(fetcher), + select_chain: None, + import_queue: (), + finality_proof_request_builder: None, + finality_proof_provider: None, + network_protocol: (), + transaction_pool: (), + marker: PhantomData, + }) + } +} + +impl + ServiceBuilder { + + /// Defines which head-of-chain strategy to use. + pub fn with_opt_select_chain( + mut self, + select_chain_builder: impl FnOnce(&mut Configuration, Arc) -> Result, Error> + ) -> Result, Error> { + let select_chain = select_chain_builder(&mut self.config, self.client.clone())?; + + Ok(ServiceBuilder { + config: self.config, + client: self.client, + keystore: self.keystore, + fetcher: self.fetcher, + select_chain, + import_queue: self.import_queue, + finality_proof_request_builder: self.finality_proof_request_builder, + finality_proof_provider: self.finality_proof_provider, + network_protocol: self.network_protocol, + transaction_pool: self.transaction_pool, + marker: self.marker, + }) + } + + /// Defines which head-of-chain strategy to use. + pub fn with_select_chain( + self, + builder: impl FnOnce(&mut Configuration, Arc) -> Result + ) -> Result, Error> { + self.with_opt_select_chain(|cfg, cl| builder(cfg, cl).map(Option::Some)) + } + + /// Defines which import queue to use. + pub fn with_import_queue( + mut self, + builder: impl FnOnce(&mut Configuration, Arc, Option) -> Result + ) -> Result, Error> + where TSc: Clone { + let import_queue = builder(&mut self.config, self.client.clone(), self.select_chain.clone())?; + + Ok(ServiceBuilder { + config: self.config, + client: self.client, + keystore: self.keystore, + fetcher: self.fetcher, + select_chain: self.select_chain, + import_queue, + finality_proof_request_builder: self.finality_proof_request_builder, + finality_proof_provider: self.finality_proof_provider, + network_protocol: self.network_protocol, + transaction_pool: self.transaction_pool, + marker: self.marker, + }) + } + + /// Defines which network specialization protocol to use. + pub fn with_network_protocol( + self, + network_protocol_builder: impl FnOnce(&Configuration) -> Result + ) -> Result, Error> { + let network_protocol = network_protocol_builder(&self.config)?; + + Ok(ServiceBuilder { + config: self.config, + client: self.client, + keystore: self.keystore, + fetcher: self.fetcher, + select_chain: self.select_chain, + import_queue: self.import_queue, + finality_proof_request_builder: self.finality_proof_request_builder, + finality_proof_provider: self.finality_proof_provider, + network_protocol, + transaction_pool: self.transaction_pool, + marker: self.marker, + }) + } + + /// Defines which strategy to use for providing finality proofs. + pub fn with_opt_finality_proof_provider( + self, + builder: impl FnOnce(Arc) -> Result>>, Error> + ) -> Result>, + TNetP, + TExPool + >, Error> { + let finality_proof_provider = builder(self.client.clone())?; + + Ok(ServiceBuilder { + config: self.config, + client: self.client, + keystore: self.keystore, + fetcher: self.fetcher, + select_chain: self.select_chain, + import_queue: self.import_queue, + finality_proof_request_builder: self.finality_proof_request_builder, + finality_proof_provider, + network_protocol: self.network_protocol, + transaction_pool: self.transaction_pool, + marker: self.marker, + }) + } + + /// Defines which strategy to use for providing finality proofs. + pub fn with_finality_proof_provider( + self, + build: impl FnOnce(Arc) -> Result>, Error> + ) -> Result>, + TNetP, + TExPool + >, Error> { + self.with_opt_finality_proof_provider(|client| build(client).map(Option::Some)) + } + + /// Defines which import queue to use. + pub fn with_import_queue_and_opt_fprb( + mut self, + builder: impl FnOnce(&mut Configuration, Arc, Option) + -> Result<(UImpQu, Option), Error> + ) -> Result, Error> + where TSc: Clone { + let (import_queue, fprb) = builder(&mut self.config, self.client.clone(), self.select_chain.clone())?; + + Ok(ServiceBuilder { + config: self.config, + client: self.client, + keystore: self.keystore, + fetcher: self.fetcher, + select_chain: self.select_chain, + import_queue, + finality_proof_request_builder: fprb, + finality_proof_provider: self.finality_proof_provider, + network_protocol: self.network_protocol, + transaction_pool: self.transaction_pool, + marker: self.marker, + }) + } + + /// Defines which import queue to use. + pub fn with_import_queue_and_fprb( + self, + builder: impl FnOnce(&mut Configuration, Arc, Option) -> Result<(UImpQu, UFprb), Error> + ) -> Result, Error> + where TSc: Clone { + self.with_import_queue_and_opt_fprb(|cfg, cl, sc| builder(cfg, cl, sc).map(|(q, f)| (q, Some(f)))) + } + + /// Defines which transaction pool to use. + pub fn with_transaction_pool( + self, + transaction_pool_builder: impl FnOnce(transaction_pool::txpool::Options, Arc) -> Result + ) -> Result, Error> { + let transaction_pool = transaction_pool_builder(self.config.transaction_pool.clone(), self.client.clone())?; + + Ok(ServiceBuilder { + config: self.config, + client: self.client, + keystore: self.keystore, + fetcher: self.fetcher, + select_chain: self.select_chain, + import_queue: self.import_queue, + finality_proof_request_builder: self.finality_proof_request_builder, + finality_proof_provider: self.finality_proof_provider, + network_protocol: self.network_protocol, + transaction_pool, + marker: self.marker, + }) + } +} + +impl +ServiceBuilder< + TBl, + TRtApi, + TCfg, + TGen, + Client, + Arc>, + TSc, + TImpQu, + BoxFinalityProofRequestBuilder, + Arc>, + TNetP, + TransactionPool +> where + Client: ProvideRuntimeApi, + as ProvideRuntimeApi>::Api: + runtime_api::Metadata + offchain::OffchainWorkerApi + runtime_api::TaggedTransactionQueue + session::SessionKeys, + TBl: BlockT::Out>, + TRtApi: 'static + Send + Sync, + TCfg: Default, + TGen: Serialize + DeserializeOwned + BuildStorage, + TBackend: 'static + client::backend::Backend + Send, + TExec: 'static + client::CallExecutor + Send + Sync + Clone, + TSc: Clone, + TImpQu: 'static + ImportQueue, + TNetP: NetworkSpecialization, + TExPoolApi: 'static + ChainApi::Hash>, +{ + /// Builds the service. + pub fn build(self) -> Result, + TBl, + Client, + TSc, + NetworkStatus, + NetworkService::Hash>, + TransactionPool, + offchain::OffchainWorkers< + Client, + TBackend::OffchainStorage, + TBl + >, + >, Error> { + let mut config = self.config; + session::generate_initial_session_keys( + self.client.clone(), + config.dev_key_seed.clone().map(|s| vec![s]).unwrap_or_default() + )?; + let ( + client, + fetcher, + keystore, + select_chain, + import_queue, + finality_proof_request_builder, + finality_proof_provider, + network_protocol, + transaction_pool + ) = ( + self.client, + self.fetcher, + self.keystore, + self.select_chain, + self.import_queue, + self.finality_proof_request_builder, + self.finality_proof_provider, + self.network_protocol, + self.transaction_pool + ); + + new_impl!( + TBl, + config, + move |_| -> Result<_, Error> { + Ok(( + client, + fetcher, + keystore, + select_chain, + import_queue, + finality_proof_request_builder, + finality_proof_provider, + network_protocol, + transaction_pool + )) + }, + |h, c, tx| maintain_transaction_pool(h, c, tx), + |n, o, p, ns, v| components::offchain_workers(n, o, p, ns, v), + |c, ssb, si, te, tp, ks| components::start_rpc(c, ssb, si, te, tp, ks), + ) + } +} diff --git a/core/service/src/lib.rs b/core/service/src/lib.rs index f48e6c2e484b4..5b0632d76a253 100644 --- a/core/service/src/lib.rs +++ b/core/service/src/lib.rs @@ -50,7 +50,7 @@ use substrate_executor::NativeExecutor; use sysinfo::{get_current_pid, ProcessExt, System, SystemExt}; use tel::{telemetry, SUBSTRATE_INFO}; -pub use self::error::Error; +pub use self::{error::Error, factory::ServiceBuilder}; pub use config::{Configuration, Roles, PruningMode}; pub use chain_spec::{ChainSpec, Properties}; pub use transaction_pool::txpool::{ @@ -479,6 +479,8 @@ macro_rules! new_impl { }} } +mod factory; + impl Service { /// Creates a new service. pub fn new( From 4fc8aacafcadfc4493c35a47cb5f5655290d8975 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Fri, 9 Aug 2019 15:20:31 +0200 Subject: [PATCH 10/32] Macro-ify import_blocks, export_blocks and revert_chain Similar to the introduction of new_impl!, we extract the actual code into a macro, letting us get rid of the Components and Factory traits --- core/service/src/chain_ops.rs | 155 ++++++++++++++++++++-------------- core/service/src/factory.rs | 10 +-- 2 files changed, 95 insertions(+), 70 deletions(-) diff --git a/core/service/src/chain_ops.rs b/core/service/src/chain_ops.rs index c801b81186f18..df943d7597a80 100644 --- a/core/service/src/chain_ops.rs +++ b/core/service/src/chain_ops.rs @@ -18,42 +18,27 @@ use std::{self, io::{Read, Write, Seek}}; use futures::prelude::*; -use futures03::TryFutureExt as _; use log::{info, warn}; -use sr_primitives::generic::{SignedBlock, BlockId}; -use sr_primitives::traits::{SaturatedConversion, Zero, One, Block, Header, NumberFor}; -use consensus_common::import_queue::{ImportQueue, IncomingBlock, Link, BlockImportError, BlockImportResult}; -use network::message; +use sr_primitives::generic::BlockId; +use sr_primitives::traits::{SaturatedConversion, Zero, One, Header, NumberFor}; +use consensus_common::import_queue::ImportQueue; -use consensus_common::BlockOrigin; use crate::components::{self, Components, ServiceFactory, FactoryFullConfiguration, FactoryBlockNumber, RuntimeGenesis}; use crate::new_client; use codec::{Decode, Encode, IoReader}; use crate::error; use crate::chain_spec::ChainSpec; -/// Export a range of blocks to a binary stream. -pub fn export_blocks( - config: FactoryFullConfiguration, - exit: E, - mut output: W, - from: FactoryBlockNumber, - to: Option>, - json: bool -) -> error::Result<()> - where - F: ServiceFactory, - E: Future + Send + 'static, - W: Write, -{ - let client = new_client::(&config)?; - let mut block = from; +#[macro_export] +macro_rules! export_blocks { +($client:ident, $exit:ident, $output:ident, $from:ident, $to:ident, $json:ident) => {{ + let mut block = $from; - let last = match to { + let last = match $to { Some(v) if v.is_zero() => One::one(), Some(v) => v, - None => client.info().chain.best_number, + None => $client.info().chain.best_number, }; if last < block { @@ -62,28 +47,28 @@ pub fn export_blocks( let (exit_send, exit_recv) = std::sync::mpsc::channel(); ::std::thread::spawn(move || { - let _ = exit.wait(); + let _ = $exit.wait(); let _ = exit_send.send(()); }); info!("Exporting blocks from #{} to #{}", block, last); - if !json { + if !$json { let last_: u64 = last.saturated_into::(); let block_: u64 = block.saturated_into::(); let len: u64 = last_ - block_ + 1; - output.write(&len.encode())?; + $output.write(&len.encode())?; } loop { if exit_recv.try_recv().is_ok() { break; } - match client.block(&BlockId::number(block))? { + match $client.block(&BlockId::number(block))? { Some(block) => { - if json { - serde_json::to_writer(&mut output, &block) + if $json { + serde_json::to_writer(&mut $output, &block) .map_err(|e| format!("Error writing JSON: {}", e))?; } else { - output.write(&block.encode())?; + $output.write(&block.encode())?; } }, None => break, @@ -97,8 +82,37 @@ pub fn export_blocks( block += One::one(); } Ok(()) +}} +} + +/// Export a range of blocks to a binary stream. +pub fn export_blocks( + config: FactoryFullConfiguration, + exit: E, + mut output: W, + from: FactoryBlockNumber, + to: Option>, + json: bool +) -> error::Result<()> + where + F: ServiceFactory, + E: Future + Send + 'static, + W: Write, +{ + let client = new_client::(&config)?; + export_blocks!(client, exit, output, from, to, json) } +#[macro_export] +macro_rules! import_blocks { +($block:ty, $client:ident, $queue:ident, $exit:ident, $input:ident) => {{ + use consensus_common::import_queue::{IncomingBlock, Link, BlockImportError, BlockImportResult}; + use consensus_common::BlockOrigin; + use network::message; + use sr_primitives::generic::SignedBlock; + use sr_primitives::traits::Block; + use futures03::TryFutureExt as _; + struct WaitLink { imported_blocks: u64, has_error: bool, @@ -132,31 +146,13 @@ impl Link for WaitLink { } } -/// Returns a future that import blocks from a binary stream. -pub fn import_blocks( - mut config: FactoryFullConfiguration, - exit: E, - input: R -) -> error::Result> - where F: ServiceFactory, E: Future + Send + 'static, R: Read + Seek, -{ - let client = new_client::(&config)?; - // FIXME #1134 this shouldn't need a mutable config. - let select_chain = components::FullComponents::::build_select_chain(&mut config, client.clone())?; - let (mut queue, _) = components::FullComponents::::build_import_queue( - &mut config, - client.clone(), - select_chain, - None, - )?; - let (exit_send, exit_recv) = std::sync::mpsc::channel(); ::std::thread::spawn(move || { - let _ = exit.wait(); + let _ = $exit.wait(); let _ = exit_send.send(()); }); - let mut io_reader_input = IoReader(input); + let mut io_reader_input = IoReader($input); let count: u64 = Decode::decode(&mut io_reader_input) .map_err(|e| format!("Error reading file: {}", e))?; info!("Importing {} blocks", count); @@ -165,11 +161,11 @@ pub fn import_blocks( if exit_recv.try_recv().is_ok() { break; } - match SignedBlock::::decode(&mut io_reader_input) { + match SignedBlock::<$block>::decode(&mut io_reader_input) { Ok(signed) => { let (header, extrinsics) = signed.block.deconstruct(); let hash = header.hash(); - let block = message::BlockData:: { + let block = message::BlockData::<$block> { hash, justification: signed.justification, header: Some(header), @@ -178,8 +174,8 @@ pub fn import_blocks( message_queue: None }; // import queue handles verification and importing it into the client - queue.import_blocks(BlockOrigin::File, vec![ - IncomingBlock:: { + $queue.import_blocks(BlockOrigin::File, vec![ + IncomingBlock::<$block> { hash: block.hash, header: block.header, body: block.body, @@ -208,7 +204,7 @@ pub fn import_blocks( let blocks_before = link.imported_blocks; let _ = futures03::future::poll_fn(|cx| { - queue.poll_actions(cx, &mut link); + $queue.poll_actions(cx, &mut link); std::task::Poll::Pending::> }).compat().poll(); if link.has_error { @@ -226,24 +222,41 @@ pub fn import_blocks( ); } if link.imported_blocks >= count { - info!("Imported {} blocks. Best: #{}", block_count, client.info().chain.best_number); + info!("Imported {} blocks. Best: #{}", block_count, $client.info().chain.best_number); Ok(Async::Ready(())) } else { Ok(Async::NotReady) } })) +}} } -/// Revert the chain. -pub fn revert_chain( - config: FactoryFullConfiguration, - blocks: FactoryBlockNumber -) -> error::Result<()> - where F: ServiceFactory, +/// Returns a future that import blocks from a binary stream. +pub fn import_blocks( + mut config: FactoryFullConfiguration, + exit: E, + input: R +) -> error::Result> + where F: ServiceFactory, E: Future + Send + 'static, R: Read + Seek, { let client = new_client::(&config)?; - let reverted = client.revert(blocks)?; - let info = client.info().chain; + // FIXME #1134 this shouldn't need a mutable config. + let select_chain = components::FullComponents::::build_select_chain(&mut config, client.clone())?; + let (mut queue, _) = components::FullComponents::::build_import_queue( + &mut config, + client.clone(), + select_chain, + None + )?; + + import_blocks!(F::Block, client, queue, exit, input) +} + +#[macro_export] +macro_rules! revert_chain { +($client:ident, $blocks:ident) => {{ + let reverted = $client.revert($blocks)?; + let info = $client.info().chain; if reverted.is_zero() { info!("There aren't any non-finalized blocks to revert."); @@ -251,6 +264,18 @@ pub fn revert_chain( info!("Reverted {} blocks. Best: #{} ({})", reverted, info.best_number, info.best_hash); } Ok(()) +}} +} + +/// Revert the chain. +pub fn revert_chain( + config: FactoryFullConfiguration, + blocks: FactoryBlockNumber +) -> error::Result<()> + where F: ServiceFactory, +{ + let client = new_client::(&config)?; + revert_chain!(client, blocks) } /// Build a chain spec json diff --git a/core/service/src/factory.rs b/core/service/src/factory.rs index f77779d77d954..2a1840d490528 100644 --- a/core/service/src/factory.rs +++ b/core/service/src/factory.rs @@ -32,7 +32,7 @@ use sr_primitives::{BuildStorage, generic::BlockId}; use sr_primitives::traits::{Block as BlockT, ProvideRuntimeApi, Header, SaturatedConversion}; use substrate_executor::{NativeExecutor, NativeExecutionDispatch}; use serde::{Serialize, de::DeserializeOwned}; -use std::{marker::PhantomData, sync::Arc}; +use std::{marker::PhantomData, sync::Arc, sync::atomic::AtomicBool}; use sysinfo::{get_current_pid, ProcessExt, System, SystemExt}; use tel::{telemetry, SUBSTRATE_INFO}; use transaction_pool::txpool::{ChainApi, Pool as TransactionPool}; @@ -65,7 +65,7 @@ pub struct ServiceBuilder, finality_proof_provider: Option, network_protocol: TNetP, - transaction_pool: TExPool, + transaction_pool: Arc, marker: PhantomData<(TBl, TRtApi)>, } @@ -124,7 +124,7 @@ where TGen: Serialize + DeserializeOwned + BuildStorage { finality_proof_request_builder: None, finality_proof_provider: None, network_protocol: (), - transaction_pool: (), + transaction_pool: Arc::new(()), marker: PhantomData, }) } @@ -203,7 +203,7 @@ where TGen: Serialize + DeserializeOwned + BuildStorage { finality_proof_request_builder: None, finality_proof_provider: None, network_protocol: (), - transaction_pool: (), + transaction_pool: Arc::new(()), marker: PhantomData, }) } @@ -393,7 +393,7 @@ impl Date: Fri, 9 Aug 2019 15:21:29 +0200 Subject: [PATCH 11/32] Add export_blocks, import_blocks and revert_chain methods on ServiceBuilder Can be used as a replacement for the chain_ops::* methods --- core/service/src/factory.rs | 107 +++++++++++++++++++++++++++++++++++- core/service/src/lib.rs | 4 +- 2 files changed, 108 insertions(+), 3 deletions(-) diff --git a/core/service/src/factory.rs b/core/service/src/factory.rs index 2a1840d490528..926124c276aad 100644 --- a/core/service/src/factory.rs +++ b/core/service/src/factory.rs @@ -19,6 +19,7 @@ use crate::{SpawnTaskHandle, start_rpc_servers, build_network_future, components use crate::{components, TransactionPoolAdapter}; use crate::config::{Configuration, Roles}; use client::{BlockchainEvents, Client, runtime_api}; +use codec::{Decode, Encode, IoReader}; use consensus_common::import_queue::ImportQueue; use futures::{prelude::*, sync::mpsc}; use futures03::{StreamExt as _, TryStreamExt as _}; @@ -29,10 +30,10 @@ use network::{config::BoxFinalityProofRequestBuilder, specialization::NetworkSpe use parking_lot::{Mutex, RwLock}; use primitives::{Blake2Hasher, H256, Hasher}; use sr_primitives::{BuildStorage, generic::BlockId}; -use sr_primitives::traits::{Block as BlockT, ProvideRuntimeApi, Header, SaturatedConversion}; +use sr_primitives::traits::{Block as BlockT, ProvideRuntimeApi, NumberFor, One, Zero, Header, SaturatedConversion}; use substrate_executor::{NativeExecutor, NativeExecutionDispatch}; use serde::{Serialize, de::DeserializeOwned}; -use std::{marker::PhantomData, sync::Arc, sync::atomic::AtomicBool}; +use std::{io::{Read, Write, Seek}, marker::PhantomData, sync::Arc, sync::atomic::AtomicBool}; use sysinfo::{get_current_pid, ProcessExt, System, SystemExt}; use tel::{telemetry, SUBSTRATE_INFO}; use transaction_pool::txpool::{ChainApi, Pool as TransactionPool}; @@ -399,6 +400,108 @@ impl + Send + 'static, + input: impl Read + Seek, + ) -> Result + Send>, Error>; +} + +/// Implemented on `ServiceBuilder`. Allows exporting blocks once you have given all the required +/// components to the builder. +pub trait ServiceBuilderExport { + /// Type of block of the builder. + type Block: BlockT; + + /// Performs the blocks export. + fn export_blocks( + &self, + exit: impl Future + Send + 'static, + output: impl Write, + from: NumberFor, + to: Option>, + json: bool + ) -> Result<(), Error>; +} + +/// Implemented on `ServiceBuilder`. Allows reverting the chain once you have given all the +/// required components to the builder. +pub trait ServiceBuilderRevert { + /// Type of block of the builder. + type Block: BlockT; + + /// Performs a revert of `blocks` bocks. + fn revert_chain( + &self, + blocks: NumberFor + ) -> Result<(), Error>; +} + +impl ServiceBuilderImport for + ServiceBuilder, TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool> +where + TBl: BlockT::Out>, + TBackend: 'static + client::backend::Backend + Send, + TExec: 'static + client::CallExecutor + Send + Sync + Clone, + TImpQu: 'static + ImportQueue, + TRtApi: 'static + Send + Sync, +{ + fn import_blocks( + self, + exit: impl Future + Send + 'static, + input: impl Read + Seek, + ) -> Result + Send>, Error> { + let client = self.client; + let mut queue = self.import_queue; + import_blocks!(TBl, client, queue, exit, input) + .map(|f| Box::new(f) as Box<_>) + } +} + +impl ServiceBuilderExport for + ServiceBuilder, TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool> +where + TBl: BlockT::Out>, + TBackend: 'static + client::backend::Backend + Send, + TExec: 'static + client::CallExecutor + Send + Sync + Clone +{ + type Block = TBl; + + fn export_blocks( + &self, + exit: impl Future + Send + 'static, + mut output: impl Write, + from: NumberFor, + to: Option>, + json: bool + ) -> Result<(), Error> { + let client = &self.client; + export_blocks!(client, exit, output, from, to, json) + } +} + +impl ServiceBuilderRevert for + ServiceBuilder, TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool> +where + TBl: BlockT::Out>, + TBackend: 'static + client::backend::Backend + Send, + TExec: 'static + client::CallExecutor + Send + Sync + Clone +{ + type Block = TBl; + + fn revert_chain( + &self, + blocks: NumberFor + ) -> Result<(), Error> { + let client = &self.client; + revert_chain!(client, blocks) + } +} + impl ServiceBuilder< TBl, diff --git a/core/service/src/lib.rs b/core/service/src/lib.rs index 5b0632d76a253..595023b879321 100644 --- a/core/service/src/lib.rs +++ b/core/service/src/lib.rs @@ -22,6 +22,7 @@ mod components; mod chain_spec; pub mod config; +#[macro_use] pub mod chain_ops; pub mod error; @@ -50,7 +51,8 @@ use substrate_executor::NativeExecutor; use sysinfo::{get_current_pid, ProcessExt, System, SystemExt}; use tel::{telemetry, SUBSTRATE_INFO}; -pub use self::{error::Error, factory::ServiceBuilder}; +pub use self::error::Error; +pub use self::factory::{ServiceBuilder, ServiceBuilderExport, ServiceBuilderImport, ServiceBuilderRevert}; pub use config::{Configuration, Roles, PruningMode}; pub use chain_spec::{ChainSpec, Properties}; pub use transaction_pool::txpool::{ From be2a7126800e26603a50c2e5349c558600fa621b Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Mon, 12 Aug 2019 11:24:14 +0200 Subject: [PATCH 12/32] Add run_with_builder Instead of just run, adds run_with_builder to ParseAndPrepareExport/Import/Revert. This lets you run these operations with a ServiceBuilder instead of a ServiceFactory. --- core/cli/src/lib.rs | 88 +++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 86 insertions(+), 2 deletions(-) diff --git a/core/cli/src/lib.rs b/core/cli/src/lib.rs index ef5290413166d..42f094a1dc6ee 100644 --- a/core/cli/src/lib.rs +++ b/core/cli/src/lib.rs @@ -28,8 +28,9 @@ pub mod informant; use client::ExecutionStrategies; use service::{ - config::Configuration, - ServiceFactory, FactoryFullConfiguration, RuntimeGenesis, + config::Configuration, ServiceFactory, + ServiceBuilderExport, ServiceBuilderImport, ServiceBuilderRevert, + FactoryFullConfiguration, RuntimeGenesis, FactoryGenesis, PruningMode, ChainSpec, }; use network::{ @@ -342,6 +343,37 @@ impl<'a> ParseAndPrepareExport<'a> { config, exit.into_exit(), file, from.into(), to.map(Into::into), json ).map_err(Into::into) } + + /// Runs the command and exports from the chain. + pub fn run_with_builder( + self, + builder: F, + spec_factory: S, + exit: E, + ) -> error::Result<()> + where S: FnOnce(&str) -> Result>, String>, + F: FnOnce(Configuration) -> B, + B: ServiceBuilderExport, + C: Default, + G: RuntimeGenesis, + E: IntoExit + { + let config = create_config_with_db_path(spec_factory, &self.params.shared_params, self.version)?; + + info!("DB path: {}", config.database_path.display()); + let from = self.params.from.unwrap_or(1); + let to = self.params.to; + let json = self.params.json; + + let file: Box = match self.params.output { + Some(filename) => Box::new(File::create(filename)?), + None => Box::new(stdout()), + }; + + let builder = builder(config); + builder.export_blocks(exit.into_exit(), file, from.into(), to.map(Into::into), json)?; + Ok(()) + } } /// Command ready to import the chain. @@ -381,6 +413,41 @@ impl<'a> ParseAndPrepareImport<'a> { tokio::run(fut); Ok(()) } + + /// Runs the command and imports to the chain. + pub fn run_with_builder( + self, + builder: F, + spec_factory: S, + exit: E, + ) -> error::Result<()> + where S: FnOnce(&str) -> Result>, String>, + F: FnOnce(Configuration) -> B, + B: ServiceBuilderImport, + C: Default, + G: RuntimeGenesis, + E: IntoExit + { + let mut config = create_config_with_db_path(spec_factory, &self.params.shared_params, self.version)?; + config.execution_strategies = ExecutionStrategies { + importing: self.params.execution.into(), + other: self.params.execution.into(), + ..Default::default() + }; + + let file: Box = match self.params.input { + Some(filename) => Box::new(File::open(filename)?), + None => { + let mut buffer = Vec::new(); + stdin().read_to_end(&mut buffer)?; + Box::new(Cursor::new(buffer)) + }, + }; + + let fut = builder(config).import_blocks(exit.into_exit(), file)?; + tokio::run(fut); + Ok(()) + } } /// Command ready to purge the chain. @@ -450,6 +517,23 @@ impl<'a> ParseAndPrepareRevert<'a> { let blocks = self.params.num; Ok(service::chain_ops::revert_chain::(config, blocks.into())?) } + + /// Runs the command and reverts the chain. + pub fn run_with_builder( + self, + builder: F, + spec_factory: S + ) -> error::Result<()> + where S: FnOnce(&str) -> Result>, String>, + F: FnOnce(Configuration) -> B, + B: ServiceBuilderRevert, + C: Default, + G: RuntimeGenesis { + let config = create_config_with_db_path(spec_factory, &self.params.shared_params, self.version)?; + let blocks = self.params.num; + builder(config).revert_chain(blocks.into())?; + Ok(()) + } } /// Parse command line interface arguments and executes the desired command. From 7eee4755b77fc9c1a84f4dee4d1fa06d9bc0a049 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Mon, 12 Aug 2019 13:47:11 +0200 Subject: [PATCH 13/32] Transition node and node-template to ServiceBuilder --- core/cli/src/lib.rs | 13 +- core/service/src/components.rs | 4 +- core/service/src/factory.rs | 103 +++++++++++----- core/service/src/lib.rs | 10 +- node-template/src/cli.rs | 14 +-- node-template/src/main.rs | 1 + node-template/src/service.rs | 206 ++++++++++++++++++++++++++++++- node/cli/src/lib.rs | 13 +- node/cli/src/service.rs | 219 ++++++++++++++++++++++++++++++++- 9 files changed, 524 insertions(+), 59 deletions(-) diff --git a/core/cli/src/lib.rs b/core/cli/src/lib.rs index 42f094a1dc6ee..b5e5eb0c30cfe 100644 --- a/core/cli/src/lib.rs +++ b/core/cli/src/lib.rs @@ -352,7 +352,7 @@ impl<'a> ParseAndPrepareExport<'a> { exit: E, ) -> error::Result<()> where S: FnOnce(&str) -> Result>, String>, - F: FnOnce(Configuration) -> B, + F: FnOnce(Configuration) -> Result, B: ServiceBuilderExport, C: Default, G: RuntimeGenesis, @@ -370,8 +370,7 @@ impl<'a> ParseAndPrepareExport<'a> { None => Box::new(stdout()), }; - let builder = builder(config); - builder.export_blocks(exit.into_exit(), file, from.into(), to.map(Into::into), json)?; + builder(config)?.export_blocks(exit.into_exit(), file, from.into(), to.map(Into::into), json)?; Ok(()) } } @@ -422,7 +421,7 @@ impl<'a> ParseAndPrepareImport<'a> { exit: E, ) -> error::Result<()> where S: FnOnce(&str) -> Result>, String>, - F: FnOnce(Configuration) -> B, + F: FnOnce(Configuration) -> Result, B: ServiceBuilderImport, C: Default, G: RuntimeGenesis, @@ -444,7 +443,7 @@ impl<'a> ParseAndPrepareImport<'a> { }, }; - let fut = builder(config).import_blocks(exit.into_exit(), file)?; + let fut = builder(config)?.import_blocks(exit.into_exit(), file)?; tokio::run(fut); Ok(()) } @@ -525,13 +524,13 @@ impl<'a> ParseAndPrepareRevert<'a> { spec_factory: S ) -> error::Result<()> where S: FnOnce(&str) -> Result>, String>, - F: FnOnce(Configuration) -> B, + F: FnOnce(Configuration) -> Result, B: ServiceBuilderRevert, C: Default, G: RuntimeGenesis { let config = create_config_with_db_path(spec_factory, &self.params.shared_params, self.version)?; let blocks = self.params.num; - builder(config).revert_chain(blocks.into())?; + builder(config)?.revert_chain(blocks.into())?; Ok(()) } } diff --git a/core/service/src/components.rs b/core/service/src/components.rs index 0e9fe274dc045..0b25ed5989e86 100644 --- a/core/service/src/components.rs +++ b/core/service/src/components.rs @@ -388,7 +388,7 @@ pub trait ServiceFactory: 'static + Sized { /// Other configuration for service members. type Configuration: Default; /// RPC initialisation. - type RpcExtensions: rpc::RpcExtension; + type RpcExtensions: rpc::RpcExtension + Clone; /// Extended full service type. type FullService: ServiceTrait>; /// Extended light service type. @@ -488,7 +488,7 @@ pub trait Components: Sized + 'static { /// The type that can start all runtime-dependent services. type RuntimeServices: ServiceTrait; /// The type that can extend the RPC methods. - type RpcExtensions: rpc::RpcExtension; + type RpcExtensions: rpc::RpcExtension + Clone; // TODO: Traitify transaction pool and allow people to implement their own. (#1242) /// Extrinsic pool type. type TransactionPoolApi: 'static + txpool::ChainApi< diff --git a/core/service/src/factory.rs b/core/service/src/factory.rs index 926124c276aad..3a96c7b1377e1 100644 --- a/core/service/src/factory.rs +++ b/core/service/src/factory.rs @@ -56,7 +56,7 @@ use transaction_pool::txpool::{ChainApi, Pool as TransactionPool}; /// The order in which the `with_*` methods are called doesn't matter, as the correct binding of /// generics is done when you call `build`. /// -pub struct ServiceBuilder { +pub struct ServiceBuilder { config: Configuration, client: Arc, keystore: Arc>, @@ -67,10 +67,11 @@ pub struct ServiceBuilder, network_protocol: TNetP, transaction_pool: Arc, + rpc_extensions: TRpc, marker: PhantomData<(TBl, TRtApi)>, } -impl ServiceBuilder<(), (), TCfg, TGen, (), (), (), (), (), (), (), ()> +impl ServiceBuilder<(), (), TCfg, TGen, (), (), (), (), (), (), (), (), ()> where TGen: Serialize + DeserializeOwned + BuildStorage { /// Start the service builder with a configuration. pub fn new_full, TRtApi, TExecDisp: NativeExecutionDispatch>( @@ -92,6 +93,7 @@ where TGen: Serialize + DeserializeOwned + BuildStorage { BoxFinalityProofRequestBuilder, (), (), + (), () >, Error> { let keystore = Keystore::open(config.keystore_path.clone(), config.keystore_password.clone())?; @@ -126,6 +128,7 @@ where TGen: Serialize + DeserializeOwned + BuildStorage { finality_proof_provider: None, network_protocol: (), transaction_pool: Arc::new(()), + rpc_extensions: Default::default(), marker: PhantomData, }) } @@ -172,6 +175,7 @@ where TGen: Serialize + DeserializeOwned + BuildStorage { BoxFinalityProofRequestBuilder, (), (), + (), () >, Error> { let keystore = Keystore::open(config.keystore_path.clone(), config.keystore_password.clone())?; @@ -205,19 +209,20 @@ where TGen: Serialize + DeserializeOwned + BuildStorage { finality_proof_provider: None, network_protocol: (), transaction_pool: Arc::new(()), + rpc_extensions: Default::default(), marker: PhantomData, }) } } -impl - ServiceBuilder { +impl + ServiceBuilder { /// Defines which head-of-chain strategy to use. pub fn with_opt_select_chain( mut self, select_chain_builder: impl FnOnce(&mut Configuration, Arc) -> Result, Error> - ) -> Result, Error> { + ) -> Result, Error> { let select_chain = select_chain_builder(&mut self.config, self.client.clone())?; Ok(ServiceBuilder { @@ -231,6 +236,7 @@ impl( self, builder: impl FnOnce(&mut Configuration, Arc) -> Result - ) -> Result, Error> { + ) -> Result, Error> { self.with_opt_select_chain(|cfg, cl| builder(cfg, cl).map(Option::Some)) } /// Defines which import queue to use. pub fn with_import_queue( mut self, - builder: impl FnOnce(&mut Configuration, Arc, Option) -> Result - ) -> Result, Error> + builder: impl FnOnce(&mut Configuration, Arc, Option, Arc) -> Result + ) -> Result, Error> where TSc: Clone { - let import_queue = builder(&mut self.config, self.client.clone(), self.select_chain.clone())?; + let import_queue = builder(&mut self.config, self.client.clone(), self.select_chain.clone(), self.transaction_pool.clone())?; Ok(ServiceBuilder { config: self.config, @@ -262,6 +268,7 @@ impl( self, network_protocol_builder: impl FnOnce(&Configuration) -> Result - ) -> Result, Error> { + ) -> Result, Error> { let network_protocol = network_protocol_builder(&self.config)?; Ok(ServiceBuilder { @@ -284,6 +291,7 @@ impl>, TNetP, - TExPool + TExPool, + TRpc >, Error> { let finality_proof_provider = builder(self.client.clone())?; @@ -319,6 +328,7 @@ impl>, TNetP, - TExPool + TExPool, + TRpc >, Error> { self.with_opt_finality_proof_provider(|client| build(client).map(Option::Some)) } @@ -347,11 +358,11 @@ impl( mut self, - builder: impl FnOnce(&mut Configuration, Arc, Option) + builder: impl FnOnce(&mut Configuration, Arc, Option, Arc) -> Result<(UImpQu, Option), Error> - ) -> Result, Error> + ) -> Result, Error> where TSc: Clone { - let (import_queue, fprb) = builder(&mut self.config, self.client.clone(), self.select_chain.clone())?; + let (import_queue, fprb) = builder(&mut self.config, self.client.clone(), self.select_chain.clone(), self.transaction_pool.clone())?; Ok(ServiceBuilder { config: self.config, @@ -364,6 +375,7 @@ impl( self, - builder: impl FnOnce(&mut Configuration, Arc, Option) -> Result<(UImpQu, UFprb), Error> - ) -> Result, Error> + builder: impl FnOnce(&mut Configuration, Arc, Option, Arc) -> Result<(UImpQu, UFprb), Error> + ) -> Result, Error> where TSc: Clone { - self.with_import_queue_and_opt_fprb(|cfg, cl, sc| builder(cfg, cl, sc).map(|(q, f)| (q, Some(f)))) + self.with_import_queue_and_opt_fprb(|cfg, cl, sc, tx| builder(cfg, cl, sc, tx).map(|(q, f)| (q, Some(f)))) } /// Defines which transaction pool to use. pub fn with_transaction_pool( self, transaction_pool_builder: impl FnOnce(transaction_pool::txpool::Options, Arc) -> Result - ) -> Result, Error> { + ) -> Result, Error> { let transaction_pool = transaction_pool_builder(self.config.transaction_pool.clone(), self.client.clone())?; Ok(ServiceBuilder { @@ -395,6 +407,30 @@ impl( + self, + rpc_ext_builder: impl FnOnce(Arc, Arc) -> URpc + ) -> Result, Error> { + let rpc_extensions = rpc_ext_builder(self.client.clone(), self.transaction_pool.clone()); + + Ok(ServiceBuilder { + config: self.config, + client: self.client, + keystore: self.keystore, + fetcher: self.fetcher, + select_chain: self.select_chain, + import_queue: self.import_queue, + finality_proof_request_builder: self.finality_proof_request_builder, + finality_proof_provider: self.finality_proof_provider, + network_protocol: self.network_protocol, + transaction_pool: self.transaction_pool, + rpc_extensions, marker: self.marker, }) } @@ -441,8 +477,8 @@ pub trait ServiceBuilderRevert { ) -> Result<(), Error>; } -impl ServiceBuilderImport for - ServiceBuilder, TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool> +impl ServiceBuilderImport for + ServiceBuilder, TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool, TRpc> where TBl: BlockT::Out>, TBackend: 'static + client::backend::Backend + Send, @@ -462,8 +498,8 @@ where } } -impl ServiceBuilderExport for - ServiceBuilder, TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool> +impl ServiceBuilderExport for + ServiceBuilder, TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool, TRpc> where TBl: BlockT::Out>, TBackend: 'static + client::backend::Backend + Send, @@ -484,8 +520,8 @@ where } } -impl ServiceBuilderRevert for - ServiceBuilder, TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool> +impl ServiceBuilderRevert for + ServiceBuilder, TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool, TRpc> where TBl: BlockT::Out>, TBackend: 'static + client::backend::Backend + Send, @@ -502,7 +538,7 @@ where } } -impl +impl ServiceBuilder< TBl, TRtApi, @@ -515,7 +551,8 @@ ServiceBuilder< BoxFinalityProofRequestBuilder, Arc>, TNetP, - TransactionPool + TransactionPool, + TRpc > where Client: ProvideRuntimeApi, as ProvideRuntimeApi>::Api: @@ -530,6 +567,7 @@ ServiceBuilder< TImpQu: 'static + ImportQueue, TNetP: NetworkSpecialization, TExPoolApi: 'static + ChainApi::Hash>, + TRpc: rpc::RpcExtension + Clone, { /// Builds the service. pub fn build(self) -> Result Service { )?; let network_protocol = ::build_network_protocol(&config)?; + let rpc_extensions = Components::build_rpc_extensions(client.clone(), transaction_pool.clone()); Ok(( client, @@ -528,7 +531,8 @@ impl Service { finality_proof_request_builder, finality_proof_provider, network_protocol, - transaction_pool + transaction_pool, + rpc_extensions )) }, Components::RuntimeServices::maintain_transaction_pool, diff --git a/node-template/src/cli.rs b/node-template/src/cli.rs index c9337bb043f78..d60b59a8c5533 100644 --- a/node-template/src/cli.rs +++ b/node-template/src/cli.rs @@ -4,7 +4,7 @@ use std::cell::RefCell; use tokio::runtime::Runtime; pub use substrate_cli::{VersionInfo, IntoExit, error}; use substrate_cli::{informant, parse_and_prepare, ParseAndPrepare, NoCustom}; -use substrate_service::{AbstractService, ServiceFactory, Roles as ServiceRoles}; +use substrate_service::{AbstractService, Roles as ServiceRoles}; use crate::chain_spec; use log::info; @@ -15,7 +15,7 @@ pub fn run(args: I, exit: E, version: VersionInfo) -> error::Result<()> E: IntoExit, { match parse_and_prepare::(&version, "substrate-node", args) { - ParseAndPrepare::Run(cmd) => cmd.run(load_spec, exit, |exit, _cli_args, _custom_args, config| { + ParseAndPrepare::Run(cmd) => cmd.run::<(), _, _, _, _>(load_spec, exit, |exit, _cli_args, _custom_args, config| { info!("{}", version.name); info!(" version {}", config.full_version()); info!(" by {}, 2017, 2018", version.author); @@ -26,21 +26,21 @@ pub fn run(args: I, exit: E, version: VersionInfo) -> error::Result<()> match config.roles { ServiceRoles::LIGHT => run_until_exit( runtime, - service::Factory::new_light(config).map_err(|e| format!("{:?}", e))?, + service::new_light(config).map_err(|e| format!("{:?}", e))?, exit ), _ => run_until_exit( runtime, - service::Factory::new_full(config).map_err(|e| format!("{:?}", e))?, + service::new_full(config).map_err(|e| format!("{:?}", e))?, exit ), }.map_err(|e| format!("{:?}", e)) }), ParseAndPrepare::BuildSpec(cmd) => cmd.run(load_spec), - ParseAndPrepare::ExportBlocks(cmd) => cmd.run::(load_spec, exit), - ParseAndPrepare::ImportBlocks(cmd) => cmd.run::(load_spec, exit), + ParseAndPrepare::ExportBlocks(cmd) => cmd.run_with_builder::<(), _, _, _, _, _>(|config| Ok(new_full_start!(config).0), load_spec, exit), + ParseAndPrepare::ImportBlocks(cmd) => cmd.run_with_builder::<(), _, _, _, _, _>(|config| Ok(new_full_start!(config).0), load_spec, exit), ParseAndPrepare::PurgeChain(cmd) => cmd.run(load_spec), - ParseAndPrepare::RevertChain(cmd) => cmd.run::(load_spec), + ParseAndPrepare::RevertChain(cmd) => cmd.run_with_builder::<(), _, _, _, _>(|config| Ok(new_full_start!(config).0), load_spec), ParseAndPrepare::CustomCommand(_) => Ok(()) }?; diff --git a/node-template/src/main.rs b/node-template/src/main.rs index 18e9638833fd2..024efcc7db541 100644 --- a/node-template/src/main.rs +++ b/node-template/src/main.rs @@ -4,6 +4,7 @@ #![warn(unused_extern_crates)] mod chain_spec; +#[macro_use] mod service; mod cli; diff --git a/node-template/src/service.rs b/node-template/src/service.rs index 57414e36fb9ec..039ed228d65fa 100644 --- a/node-template/src/service.rs +++ b/node-template/src/service.rs @@ -12,7 +12,7 @@ use node_template_runtime::{self, GenesisConfig, opaque::Block, RuntimeApi, WASM use substrate_service::{ FactoryFullConfiguration, LightComponents, FullComponents, FullBackend, FullClient, LightClient, LightBackend, FullExecutor, LightExecutor, - error::{Error as ServiceError}, AbstractService, + error::{Error as ServiceError}, AbstractService, Configuration, ServiceBuilder, }; use transaction_pool::{self, txpool::{Pool as TransactionPool}}; use inherents::InherentDataProviders; @@ -71,6 +71,210 @@ impl Default for NodeConfig where F: ServiceFactory { } } +/// Starts a `ServiceBuilder` for a full service. +/// +/// Use this macro if you don't actually need the full service, but just the builder in order to +/// be able to perform chain operations. +macro_rules! new_full_start { + ($config:expr) => {{ + let mut import_setup = None; + let inherent_data_providers = inherents::InherentDataProviders::new(); + let mut tasks_to_spawn = None; + + let builder = substrate_service::ServiceBuilder::new_full::< + node_template_runtime::opaque::Block, node_template_runtime::RuntimeApi, crate::service::Executor + >($config)? + .with_select_chain(|_config, client| { + #[allow(deprecated)] + Ok(substrate_client::LongestChain::new(client.backend().clone())) + })? + .with_transaction_pool(|config, client| + Ok(transaction_pool::txpool::Pool::new(config, transaction_pool::ChainApi::new(client))) + )? + .with_import_queue(|_config, client, mut select_chain, transaction_pool| { + let select_chain = select_chain.take() + .ok_or_else(|| substrate_service::Error::SelectChainRequired)?; + let (block_import, link_half) = + grandpa::block_import::<_, _, _, node_template_runtime::RuntimeApi, _, _>( + client.clone(), client.clone(), select_chain + )?; + let justification_import = block_import.clone(); + + let (import_queue, babe_link, babe_block_import, pruning_task) = babe::import_queue( + babe::Config::get_or_compute(&*client)?, + block_import, + Some(Box::new(justification_import)), + None, + client.clone(), + client, + inherent_data_providers.clone(), + Some(transaction_pool) + )?; + + import_setup = Some((babe_block_import.clone(), link_half, babe_link)); + tasks_to_spawn = Some(vec![Box::new(pruning_task)]); + + Ok(import_queue) + })?; + + (builder, import_setup, inherent_data_providers, tasks_to_spawn) + }} +} + +/// Builds a new service for a full client. +pub fn new_full(config: Configuration) +-> Result { + + let (builder, mut import_setup, inherent_data_providers, mut tasks_to_spawn) = new_full_start!(config); + + let service = builder.with_network_protocol(|_| Ok(NodeProtocol::new()))? + .with_finality_proof_provider(|client| + Ok(Arc::new(GrandpaFinalityProofProvider::new(client.clone(), client)) as _) + )? + .build()?; + + let (block_import, link_half, babe_link) = + import_setup.take() + .expect("Link Half and Block Import are present for Full Services or setup failed before. qed"); + + // spawn any futures that were created in the previous setup steps + if let Some(tasks) = tasks_to_spawn.take() { + for task in tasks { + service.spawn_task( + task.select(service.on_exit()) + .map(|_| ()) + .map_err(|_| ()) + ); + } + } + + if service.config().roles.is_authority() { + let proposer = basic_authorship::ProposerFactory { + client: service.client(), + transaction_pool: service.transaction_pool(), + }; + + let client = service.client(); + let select_chain = service.select_chain() + .ok_or(ServiceError::SelectChainRequired)?; + + let babe_config = babe::BabeParams { + config: Config::get_or_compute(&*client)?, + keystore: service.keystore(), + client, + select_chain, + block_import, + env: proposer, + sync_oracle: service.network(), + inherent_data_providers: inherent_data_providers.clone(), + force_authoring: service.config().force_authoring, + time_source: babe_link, + }; + + let babe = start_babe(babe_config)?; + let select = babe.select(service.on_exit()).then(|_| Ok(())); + + // the BABE authoring task is considered infallible, i.e. if it + // fails we take down the service with it. + service.spawn_essential_task(select); + } + + let config = grandpa::Config { + // FIXME #1578 make this available through chainspec + gossip_duration: Duration::from_millis(333), + justification_period: 4096, + name: Some(service.config().name.clone()), + keystore: Some(service.keystore()), + }; + + match (service.config().roles.is_authority(), service.config().disable_grandpa) { + (false, false) => { + // start the lightweight GRANDPA observer + service.spawn_task(Box::new(grandpa::run_grandpa_observer( + config, + link_half, + service.network(), + service.on_exit(), + )?)); + }, + (true, false) => { + // start the full GRANDPA voter + let telemetry_on_connect = TelemetryOnConnect { + telemetry_connection_sinks: service.telemetry_on_connect_stream(), + }; + let grandpa_config = grandpa::GrandpaParams { + config: config, + link: link_half, + network: service.network(), + inherent_data_providers: inherent_data_providers.clone(), + on_exit: service.on_exit(), + telemetry_on_connect: Some(telemetry_on_connect), + }; + + // the GRANDPA voter task is considered infallible, i.e. + // if it fails we take down the service with it. + service.spawn_essential_task(grandpa::run_grandpa_voter(grandpa_config)?); + }, + (_, true) => { + grandpa::setup_disabled_grandpa( + service.client(), + &inherent_data_providers, + service.network(), + )?; + }, + } + + Ok(service) +} + +/// Builds a new service for a light client. +pub fn new_light(config: Configuration) +-> Result { + let inherent_data_providers = InherentDataProviders::new(); + + ServiceBuilder::new_light::(config)? + .with_select_chain(|_config, client| { + #[allow(deprecated)] + Ok(LongestChain::new(client.backend().clone())) + })? + .with_transaction_pool(|config, client| + Ok(TransactionPool::new(config, transaction_pool::ChainApi::new(client))) + )? + .with_import_queue_and_fprb(|_config, client, _select_chain, transaction_pool| { + #[allow(deprecated)] + let fetch_checker = client.backend().blockchain().fetcher() + .upgrade() + .map(|fetcher| fetcher.checker().clone()) + .ok_or_else(|| "Trying to start light import queue without active fetch checker")?; + let block_import = grandpa::light_block_import::<_, _, _, RuntimeApi, _>( + client.clone(), Arc::new(fetch_checker), client.clone() + )?; + + let finality_proof_import = block_import.clone(); + let finality_proof_request_builder = + finality_proof_import.create_finality_proof_request_builder(); + + // FIXME: pruning task isn't started since light client doesn't do `AuthoritySetup`. + let (import_queue, ..) = import_queue( + Config::get_or_compute(&*client)?, + block_import, + None, + Some(Box::new(finality_proof_import)), + client.clone(), + client, + inherent_data_providers.clone(), + Some(transaction_pool) + )?; + + Ok((import_queue, finality_proof_request_builder)) + })? + .with_network_protocol(|_| Ok(NodeProtocol::new()))? + .with_finality_proof_provider(|client| + Ok(Arc::new(GrandpaFinalityProofProvider::new(client.clone(), client)) as _) + )? + .build() +} + construct_service_factory! { struct Factory { Block = Block, diff --git a/node/cli/src/lib.rs b/node/cli/src/lib.rs index 9f789fc355137..a33d70b46a0fe 100644 --- a/node/cli/src/lib.rs +++ b/node/cli/src/lib.rs @@ -21,6 +21,7 @@ pub use cli::error; pub mod chain_spec; +#[macro_use] mod service; mod factory_impl; @@ -158,7 +159,7 @@ pub fn run(args: I, exit: E, version: cli::VersionInfo) -> error::Resul E: IntoExit, { match parse_and_prepare::(&version, "substrate-node", args) { - ParseAndPrepare::Run(cmd) => cmd.run(load_spec, exit, |exit, _cli_args, _custom_args, config| { + ParseAndPrepare::Run(cmd) => cmd.run::<(), _, _, _, _>(load_spec, exit, |exit, _cli_args, _custom_args, config| { info!("{}", version.name); info!(" version {}", config.full_version()); info!(" by Parity Technologies, 2017-2019"); @@ -170,21 +171,21 @@ pub fn run(args: I, exit: E, version: cli::VersionInfo) -> error::Resul match config.roles { ServiceRoles::LIGHT => run_until_exit( runtime, - service::Factory::new_light(config).map_err(|e| format!("{:?}", e))?, + service::new_light(config).map_err(|e| format!("{:?}", e))?, exit ), _ => run_until_exit( runtime, - service::Factory::new_full(config).map_err(|e| format!("{:?}", e))?, + service::new_full(config).map_err(|e| format!("{:?}", e))?, exit ), }.map_err(|e| format!("{:?}", e)) }), ParseAndPrepare::BuildSpec(cmd) => cmd.run(load_spec), - ParseAndPrepare::ExportBlocks(cmd) => cmd.run::(load_spec, exit), - ParseAndPrepare::ImportBlocks(cmd) => cmd.run::(load_spec, exit), + ParseAndPrepare::ExportBlocks(cmd) => cmd.run_with_builder::<(), _, _, _, _, _>(|config| Ok(new_full_start!(config).0), load_spec, exit), + ParseAndPrepare::ImportBlocks(cmd) => cmd.run_with_builder::<(), _, _, _, _, _>(|config| Ok(new_full_start!(config).0), load_spec, exit), ParseAndPrepare::PurgeChain(cmd) => cmd.run(load_spec), - ParseAndPrepare::RevertChain(cmd) => cmd.run::(load_spec), + ParseAndPrepare::RevertChain(cmd) => cmd.run_with_builder::<(), _, _, _, _>(|config| Ok(new_full_start!(config).0), load_spec), ParseAndPrepare::CustomCommand(CustomSubcommands::Factory(cli_args)) => { let mut config = cli::create_config_with_db_path( load_spec, diff --git a/node/cli/src/service.rs b/node/cli/src/service.rs index 0c231e7f2b65d..b0c336eb95bc4 100644 --- a/node/cli/src/service.rs +++ b/node/cli/src/service.rs @@ -31,13 +31,13 @@ use node_runtime::{GenesisConfig, RuntimeApi}; use substrate_service::{ FactoryFullConfiguration, LightComponents, FullComponents, FullBackend, FullClient, LightClient, LightBackend, FullExecutor, LightExecutor, - error::{Error as ServiceError}, + config::Configuration, AbstractService, ServiceBuilder, error::{Error as ServiceError} }; use transaction_pool::{self, txpool::{Pool as TransactionPool}}; use inherents::InherentDataProviders; use network::construct_simple_protocol; use substrate_service::construct_service_factory; -use substrate_service::{TelemetryOnConnect, AbstractService}; +use substrate_service::TelemetryOnConnect; construct_simple_protocol! { /// Demo protocol attachment for substrate. @@ -82,6 +82,221 @@ impl Default for NodeConfig where F: substrate_service::ServiceFactory { } } +/// Starts a `ServiceBuilder` for a full service. +/// +/// Use this macro if you don't actually need the full service, but just the builder in order to +/// be able to perform chain operations. +macro_rules! new_full_start { + ($config:expr) => {{ + let mut import_setup = None; + let inherent_data_providers = inherents::InherentDataProviders::new(); + let mut tasks_to_spawn = None; + + let builder = substrate_service::ServiceBuilder::new_full::< + node_primitives::Block, node_runtime::RuntimeApi, node_executor::Executor + >($config)? + .with_select_chain(|_config, client| { + #[allow(deprecated)] + Ok(client::LongestChain::new(client.backend().clone())) + })? + .with_transaction_pool(|config, client| + Ok(transaction_pool::txpool::Pool::new(config, transaction_pool::ChainApi::new(client))) + )? + .with_import_queue(|_config, client, mut select_chain, transaction_pool| { + let select_chain = select_chain.take() + .ok_or_else(|| substrate_service::Error::SelectChainRequired)?; + let (block_import, link_half) = + grandpa::block_import::<_, _, _, node_runtime::RuntimeApi, _, _>( + client.clone(), client.clone(), select_chain + )?; + let justification_import = block_import.clone(); + + let (import_queue, babe_link, babe_block_import, pruning_task) = babe::import_queue( + babe::Config::get_or_compute(&*client)?, + block_import, + Some(Box::new(justification_import)), + None, + client.clone(), + client, + inherent_data_providers.clone(), + Some(transaction_pool) + )?; + + import_setup = Some((babe_block_import.clone(), link_half, babe_link)); + tasks_to_spawn = Some(vec![Box::new(pruning_task)]); + + Ok(import_queue) + })? + .with_rpc_extensions(|client, pool| { + use node_rpc::accounts::{Accounts, AccountsApi}; + + let mut io = jsonrpc_core::IoHandler::::default(); + io.extend_with( + AccountsApi::to_delegate(Accounts::new(client, pool)) + ); + io + })?; + + (builder, import_setup, inherent_data_providers, tasks_to_spawn) + }} +} + +/// Builds a new service for a full client. +pub fn new_full(config: Configuration) +-> Result { + + let (builder, mut import_setup, inherent_data_providers, mut tasks_to_spawn) = new_full_start!(config); + + let service = builder.with_network_protocol(|_| Ok(NodeProtocol::new()))? + .with_finality_proof_provider(|client| + Ok(Arc::new(GrandpaFinalityProofProvider::new(client.clone(), client)) as _) + )? + .build()?; + + let (block_import, link_half, babe_link) = import_setup.take() + .expect("Link Half and Block Import are present for Full Services or setup failed before. qed"); + + // spawn any futures that were created in the previous setup steps + if let Some(tasks) = tasks_to_spawn.take() { + for task in tasks { + service.spawn_task( + task.select(service.on_exit()) + .map(|_| ()) + .map_err(|_| ()) + ); + } + } + + if service.config().roles.is_authority() { + let proposer = substrate_basic_authorship::ProposerFactory { + client: service.client(), + transaction_pool: service.transaction_pool(), + }; + + let client = service.client(); + let select_chain = service.select_chain() + .ok_or(ServiceError::SelectChainRequired)?; + + let babe_config = babe::BabeParams { + config: Config::get_or_compute(&*client)?, + keystore: service.keystore(), + client, + select_chain, + block_import, + env: proposer, + sync_oracle: service.network(), + inherent_data_providers: inherent_data_providers.clone(), + force_authoring: service.config().force_authoring, + time_source: babe_link, + }; + + let babe = start_babe(babe_config)?; + let select = babe.select(service.on_exit()).then(|_| Ok(())); + service.spawn_task(Box::new(select)); + } + + let config = grandpa::Config { + // FIXME #1578 make this available through chainspec + gossip_duration: Duration::from_millis(333), + justification_period: 4096, + name: Some(service.config().name.clone()), + keystore: Some(service.keystore()), + }; + + match (service.config().roles.is_authority(), service.config().disable_grandpa) { + (false, false) => { + // start the lightweight GRANDPA observer + service.spawn_task(Box::new(grandpa::run_grandpa_observer( + config, + link_half, + service.network(), + service.on_exit(), + )?)); + }, + (true, false) => { + // start the full GRANDPA voter + let telemetry_on_connect = TelemetryOnConnect { + telemetry_connection_sinks: service.telemetry_on_connect_stream(), + }; + let grandpa_config = grandpa::GrandpaParams { + config: config, + link: link_half, + network: service.network(), + inherent_data_providers: inherent_data_providers.clone(), + on_exit: service.on_exit(), + telemetry_on_connect: Some(telemetry_on_connect), + }; + service.spawn_task(Box::new(grandpa::run_grandpa_voter(grandpa_config)?)); + }, + (_, true) => { + grandpa::setup_disabled_grandpa( + service.client(), + &inherent_data_providers, + service.network(), + )?; + }, + } + + Ok(service) +} + +/// Builds a new service for a light client. +pub fn new_light(config: Configuration) +-> Result { + let inherent_data_providers = InherentDataProviders::new(); + + ServiceBuilder::new_light::(config)? + .with_select_chain(|_config, client| { + #[allow(deprecated)] + Ok(LongestChain::new(client.backend().clone())) + })? + .with_transaction_pool(|config, client| + Ok(TransactionPool::new(config, transaction_pool::ChainApi::new(client))) + )? + .with_import_queue_and_fprb(|_config, client, _select_chain, transaction_pool| { + #[allow(deprecated)] + let fetch_checker = client.backend().blockchain().fetcher() + .upgrade() + .map(|fetcher| fetcher.checker().clone()) + .ok_or_else(|| "Trying to start light import queue without active fetch checker")?; + let block_import = grandpa::light_block_import::<_, _, _, RuntimeApi, _>( + client.clone(), Arc::new(fetch_checker), client.clone() + )?; + + let finality_proof_import = block_import.clone(); + let finality_proof_request_builder = + finality_proof_import.create_finality_proof_request_builder(); + + // FIXME: pruning task isn't started since light client doesn't do `AuthoritySetup`. + let (import_queue, ..) = import_queue( + Config::get_or_compute(&*client)?, + block_import, + None, + Some(Box::new(finality_proof_import)), + client.clone(), + client, + inherent_data_providers.clone(), + Some(transaction_pool) + )?; + + Ok((import_queue, finality_proof_request_builder)) + })? + .with_network_protocol(|_| Ok(NodeProtocol::new()))? + .with_finality_proof_provider(|client| + Ok(Arc::new(GrandpaFinalityProofProvider::new(client.clone(), client)) as _) + )? + .with_rpc_extensions(|client, pool| { + use node_rpc::accounts::{Accounts, AccountsApi}; + + let mut io = jsonrpc_core::IoHandler::default(); + io.extend_with( + AccountsApi::to_delegate(Accounts::new(client, pool)) + ); + io + })? + .build() +} + construct_service_factory! { struct Factory { Block = Block, From 8e623c75330de6aaf9e21666523fd792130a3da1 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Mon, 12 Aug 2019 16:53:23 +0200 Subject: [PATCH 14/32] Transition transaction-factory to the new service factory This is technically a breaking change, but the transaction-factory crate is only ever used from within substrate-node, which this commit updates as well. --- core/service/src/factory.rs | 10 +++ node/cli/src/lib.rs | 10 ++- .../transaction-factory/src/complex_mode.rs | 25 +++---- test-utils/transaction-factory/src/lib.rs | 66 ++++++++++--------- .../transaction-factory/src/simple_modes.rs | 25 +++---- 5 files changed, 77 insertions(+), 59 deletions(-) diff --git a/core/service/src/factory.rs b/core/service/src/factory.rs index 3a96c7b1377e1..3d02ce0b9c481 100644 --- a/core/service/src/factory.rs +++ b/core/service/src/factory.rs @@ -218,6 +218,16 @@ where TGen: Serialize + DeserializeOwned + BuildStorage { impl ServiceBuilder { + /// Returns a reference to the client that was stored in this builder. + pub fn client(&self) -> &Arc { + &self.client + } + + /// Returns a reference to the select-chain that was stored in this builder. + pub fn select_chain(&self) -> Option<&TSc> { + self.select_chain.as_ref() + } + /// Defines which head-of-chain strategy to use. pub fn with_opt_select_chain( mut self, diff --git a/node/cli/src/lib.rs b/node/cli/src/lib.rs index a33d70b46a0fe..2b12d028e5bdd 100644 --- a/node/cli/src/lib.rs +++ b/node/cli/src/lib.rs @@ -187,7 +187,7 @@ pub fn run(args: I, exit: E, version: cli::VersionInfo) -> error::Resul ParseAndPrepare::PurgeChain(cmd) => cmd.run(load_spec), ParseAndPrepare::RevertChain(cmd) => cmd.run_with_builder::<(), _, _, _, _>(|config| Ok(new_full_start!(config).0), load_spec), ParseAndPrepare::CustomCommand(CustomSubcommands::Factory(cli_args)) => { - let mut config = cli::create_config_with_db_path( + let mut config = cli::create_config_with_db_path::<(), _, _>( load_spec, &cli_args.shared_params, &version, @@ -209,9 +209,13 @@ pub fn run(args: I, exit: E, version: cli::VersionInfo) -> error::Resul cli_args.num, cli_args.rounds, ); - transaction_factory::factory::>( + + let service_builder = new_full_start!(config).0; + transaction_factory::factory::, _, _, _, _, _>( factory_state, - config, + service_builder.client(), + service_builder.select_chain() + .expect("The select_chain is always initialized by new_full_start!; QED") ).map_err(|e| format!("Error in transaction factory: {}", e))?; Ok(()) diff --git a/test-utils/transaction-factory/src/complex_mode.rs b/test-utils/transaction-factory/src/complex_mode.rs index 85b12248d80ba..ed76a66b09083 100644 --- a/test-utils/transaction-factory/src/complex_mode.rs +++ b/test-utils/transaction-factory/src/complex_mode.rs @@ -41,29 +41,30 @@ use std::sync::Arc; use log::info; +use client::Client; use client::block_builder::api::BlockBuilder; use client::runtime_api::ConstructRuntimeApi; +use primitives::{Blake2Hasher, Hasher}; use sr_primitives::generic::BlockId; use sr_primitives::traits::{Block as BlockT, ProvideRuntimeApi, One, Zero}; -use substrate_service::{ - FactoryBlock, FullClient, ServiceFactory, ComponentClient, FullComponents -}; use crate::{RuntimeAdapter, create_block}; -pub fn next( +pub fn next( factory_state: &mut RA, - client: &Arc>>, + client: &Arc>, version: u32, genesis_hash: ::Hash, prior_block_hash: ::Hash, - prior_block_id: BlockId, -) -> Option<::Block> + prior_block_id: BlockId, +) -> Option where - F: ServiceFactory, - F::RuntimeApi: ConstructRuntimeApi, FullClient>, - FullClient: ProvideRuntimeApi, - as ProvideRuntimeApi>::Api: BlockBuilder>, + Block: BlockT::Out>, + Exec: client::CallExecutor + Send + Sync + Clone, + Backend: client::backend::Backend + Send, + Client: ProvideRuntimeApi, + as ProvideRuntimeApi>::Api: BlockBuilder, + RtApi: ConstructRuntimeApi> + Send + Sync, RA: RuntimeAdapter, { let total = factory_state.start_number() + factory_state.num() * factory_state.rounds(); @@ -102,7 +103,7 @@ where let inherents = client.runtime_api().inherent_extrinsics(&prior_block_id, inherents) .expect("Failed to create inherent extrinsics"); - let block = create_block::(&client, transfer, inherents); + let block = create_block::(&client, transfer, inherents); info!( "Created block {} with hash {}. Transferring {} from {} to {}.", factory_state.block_no() + RA::Number::one(), diff --git a/test-utils/transaction-factory/src/lib.rs b/test-utils/transaction-factory/src/lib.rs index 16bb08a2b436d..5d63f906a73cf 100644 --- a/test-utils/transaction-factory/src/lib.rs +++ b/test-utils/transaction-factory/src/lib.rs @@ -26,22 +26,19 @@ use std::fmt::Display; use log::info; -use client::block_builder::api::BlockBuilder; -use client::runtime_api::ConstructRuntimeApi; +use client::{Client, block_builder::api::BlockBuilder, runtime_api::ConstructRuntimeApi}; use consensus_common::{ BlockOrigin, BlockImportParams, InherentData, ForkChoiceStrategy, SelectChain }; use consensus_common::block_import::BlockImport; use codec::{Decode, Encode}; +use primitives::{Blake2Hasher, Hasher}; use sr_primitives::generic::BlockId; use sr_primitives::traits::{ Block as BlockT, Header as HeaderT, ProvideRuntimeApi, SimpleArithmetic, One, Zero, }; -use substrate_service::{ - FactoryBlock, FactoryFullConfiguration, FullClient, new_client, - ServiceFactory, ComponentClient, FullComponents}; pub use crate::modes::Mode; pub mod modes; @@ -95,15 +92,19 @@ pub trait RuntimeAdapter { /// Manufactures transactions. The exact amount depends on /// `mode`, `num` and `rounds`. -pub fn factory( +pub fn factory( mut factory_state: RA, - mut config: FactoryFullConfiguration, + client: &Arc>, + select_chain: &Sc, ) -> cli::error::Result<()> where - F: ServiceFactory, - F::RuntimeApi: ConstructRuntimeApi, FullClient>, - FullClient: ProvideRuntimeApi, - as ProvideRuntimeApi>::Api: BlockBuilder>, + Block: BlockT::Out>, + Exec: client::CallExecutor + Send + Sync + Clone, + Backend: client::backend::Backend + Send, + Client: ProvideRuntimeApi, + as ProvideRuntimeApi>::Api: BlockBuilder, + RtApi: ConstructRuntimeApi> + Send + Sync, + Sc: SelectChain, RA: RuntimeAdapter, <::Block as BlockT>::Hash: From, { @@ -112,20 +113,16 @@ where return Err(cli::error::Error::Input(msg)); } - let client = new_client::(&config)?; - - let select_chain = F::build_select_chain(&mut config, client.clone())?; - - let best_header: Result<::Header, cli::error::Error> = + let best_header: Result<::Header, cli::error::Error> = select_chain.best_chain().map_err(|e| format!("{:?}", e).into()); let mut best_hash = best_header?.hash(); - let best_block_id = BlockId::::hash(best_hash); + let best_block_id = BlockId::::hash(best_hash); let version = client.runtime_version_at(&best_block_id)?.spec_version; let genesis_hash = client.block_hash(Zero::zero())? .expect("Genesis block always exists; qed").into(); while let Some(block) = match factory_state.mode() { - Mode::MasterToNToM => complex_mode::next::( + Mode::MasterToNToM => complex_mode::next::( &mut factory_state, &client, version, @@ -133,7 +130,7 @@ where best_hash.into(), best_block_id, ), - _ => simple_modes::next::( + _ => simple_modes::next::( &mut factory_state, &client, version, @@ -143,7 +140,7 @@ where ), } { best_hash = block.header().hash(); - import_block::(&client, block); + import_block(&client, block); info!("Imported block at {}", factory_state.block_no()); } @@ -152,16 +149,18 @@ where } /// Create a baked block from a transfer extrinsic and timestamp inherent. -pub fn create_block( - client: &Arc>>, +pub fn create_block( + client: &Arc>, transfer: ::Extrinsic, - inherent_extrinsics: Vec<::Extrinsic>, -) -> ::Block + inherent_extrinsics: Vec<::Extrinsic>, +) -> Block where - F: ServiceFactory, - FullClient: ProvideRuntimeApi, - F::RuntimeApi: ConstructRuntimeApi, FullClient>, - as ProvideRuntimeApi>::Api: BlockBuilder>, + Block: BlockT::Out>, + Exec: client::CallExecutor + Send + Sync + Clone, + Backend: client::backend::Backend + Send, + Client: ProvideRuntimeApi, + RtApi: ConstructRuntimeApi> + Send + Sync, + as ProvideRuntimeApi>::Api: BlockBuilder, RA: RuntimeAdapter, { let mut block = client.new_block(Default::default()).expect("Failed to create new block"); @@ -177,10 +176,13 @@ where block.bake().expect("Failed to bake block") } -fn import_block( - client: &Arc>>, - block: ::Block -) -> () where F: ServiceFactory +fn import_block( + client: &Arc>, + block: Block +) -> () where + Block: BlockT::Out>, + Exec: client::CallExecutor + Send + Sync + Clone, + Backend: client::backend::Backend + Send, { let import = BlockImportParams { origin: BlockOrigin::File, diff --git a/test-utils/transaction-factory/src/simple_modes.rs b/test-utils/transaction-factory/src/simple_modes.rs index ec4f484fa9827..bcbb91200657f 100644 --- a/test-utils/transaction-factory/src/simple_modes.rs +++ b/test-utils/transaction-factory/src/simple_modes.rs @@ -36,29 +36,30 @@ use std::sync::Arc; use log::info; +use client::Client; use client::block_builder::api::BlockBuilder; use client::runtime_api::ConstructRuntimeApi; +use primitives::{Blake2Hasher, Hasher}; use sr_primitives::traits::{Block as BlockT, ProvideRuntimeApi, One}; use sr_primitives::generic::BlockId; -use substrate_service::{ - FactoryBlock, FullClient, ServiceFactory, ComponentClient, FullComponents -}; use crate::{Mode, RuntimeAdapter, create_block}; -pub fn next( +pub fn next( factory_state: &mut RA, - client: &Arc>>, + client: &Arc>, version: u32, genesis_hash: ::Hash, prior_block_hash: ::Hash, - prior_block_id: BlockId, -) -> Option<::Block> + prior_block_id: BlockId, +) -> Option where - F: ServiceFactory, - F::RuntimeApi: ConstructRuntimeApi, FullClient>, - FullClient: ProvideRuntimeApi, - as ProvideRuntimeApi>::Api: BlockBuilder>, + Block: BlockT::Out>, + Exec: client::CallExecutor + Send + Sync + Clone, + Backend: client::backend::Backend + Send, + Client: ProvideRuntimeApi, + as ProvideRuntimeApi>::Api: BlockBuilder, + RtApi: ConstructRuntimeApi> + Send + Sync, RA: RuntimeAdapter, { if factory_state.block_no() >= factory_state.num() { @@ -93,7 +94,7 @@ where let inherents = client.runtime_api().inherent_extrinsics(&prior_block_id, inherents) .expect("Failed to create inherent extrinsics"); - let block = create_block::(&client, transfer, inherents); + let block = create_block::(&client, transfer, inherents); factory_state.set_block_no(factory_state.block_no() + RA::Number::one()); From 3d8f9cca7de22b085e329e6bc832cbc79d875bfb Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Mon, 12 Aug 2019 17:09:06 +0200 Subject: [PATCH 15/32] Remove old service factory --- node/cli/src/lib.rs | 2 +- node/cli/src/service.rs | 256 +--------------------------------------- 2 files changed, 3 insertions(+), 255 deletions(-) diff --git a/node/cli/src/lib.rs b/node/cli/src/lib.rs index 2b12d028e5bdd..5008061c86bc2 100644 --- a/node/cli/src/lib.rs +++ b/node/cli/src/lib.rs @@ -28,7 +28,7 @@ mod factory_impl; use tokio::prelude::Future; use tokio::runtime::{Builder as RuntimeBuilder, Runtime}; pub use cli::{VersionInfo, IntoExit, NoCustom, SharedParams, ExecutionStrategyParam}; -use substrate_service::{AbstractService, ServiceFactory, Roles as ServiceRoles}; +use substrate_service::{AbstractService, Roles as ServiceRoles}; use log::info; use structopt::{StructOpt, clap::App}; use cli::{AugmentClap, GetLogFilter, parse_and_prepare, ParseAndPrepare}; diff --git a/node/cli/src/service.rs b/node/cli/src/service.rs index b0c336eb95bc4..209acde4232f1 100644 --- a/node/cli/src/service.rs +++ b/node/cli/src/service.rs @@ -21,7 +21,7 @@ use std::sync::Arc; use std::time::Duration; -use babe::{import_queue, start_babe, BabeImportQueue, Config}; +use babe::{import_queue, start_babe, Config}; use client::{self, LongestChain}; use grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider}; use node_executor; @@ -29,14 +29,11 @@ use futures::prelude::*; use node_primitives::Block; use node_runtime::{GenesisConfig, RuntimeApi}; use substrate_service::{ - FactoryFullConfiguration, LightComponents, FullComponents, FullBackend, - FullClient, LightClient, LightBackend, FullExecutor, LightExecutor, - config::Configuration, AbstractService, ServiceBuilder, error::{Error as ServiceError} + AbstractService, ServiceBuilder, config::Configuration, error::{Error as ServiceError}, }; use transaction_pool::{self, txpool::{Pool as TransactionPool}}; use inherents::InherentDataProviders; use network::construct_simple_protocol; -use substrate_service::construct_service_factory; use substrate_service::TelemetryOnConnect; construct_simple_protocol! { @@ -44,44 +41,6 @@ construct_simple_protocol! { pub struct NodeProtocol where Block = Block { } } -type BabeBlockImportForService = babe::BabeBlockImport< - FullBackend, - FullExecutor, - ::Block, - grandpa::BlockImportForService, - ::RuntimeApi, - client::Client< - FullBackend, - FullExecutor, - ::Block, - ::RuntimeApi - >, ->; - -/// Node specific configuration -pub struct NodeConfig { - /// GRANDPA and BABE connection to import block. - // FIXME #1134 rather than putting this on the config, let's have an actual intermediate setup state - pub import_setup: Option<( - BabeBlockImportForService, - grandpa::LinkHalfForService, - babe::BabeLink, - )>, - /// Tasks that were created by previous setup steps and should be spawned. - pub tasks_to_spawn: Option + Send>>>, - inherent_data_providers: InherentDataProviders, -} - -impl Default for NodeConfig where F: substrate_service::ServiceFactory { - fn default() -> NodeConfig { - NodeConfig { - import_setup: None, - inherent_data_providers: InherentDataProviders::new(), - tasks_to_spawn: None, - } - } -} - /// Starts a `ServiceBuilder` for a full service. /// /// Use this macro if you don't actually need the full service, but just the builder in order to @@ -297,217 +256,6 @@ pub fn new_light(config: Configuration, FullExecutor, Block, RuntimeApi>, - Block - > { - |config, client| - Ok(TransactionPool::new(config, transaction_pool::ChainApi::new(client))) - }, - LightTransactionPoolApi = - transaction_pool::ChainApi< - client::Client, LightExecutor, Block, RuntimeApi>, - Block - > { - |config, client| - Ok(TransactionPool::new(config, transaction_pool::ChainApi::new(client))) - }, - Genesis = GenesisConfig, - Configuration = NodeConfig, - FullService = FullComponents { - |config: FactoryFullConfiguration| FullComponents::::new(config) - }, - AuthoritySetup = { - |mut service: Self::FullService| { - let (block_import, link_half, babe_link) = - service.config_mut().custom.import_setup.take() - .expect("Link Half and Block Import are present for Full Services or setup failed before. qed"); - - // spawn any futures that were created in the previous setup steps - if let Some(tasks) = service.config_mut().custom.tasks_to_spawn.take() { - for task in tasks { - service.spawn_task( - task.select(service.on_exit()) - .map(|_| ()) - .map_err(|_| ()) - ); - } - } - - if service.config().roles.is_authority() { - let proposer = substrate_basic_authorship::ProposerFactory { - client: service.client(), - transaction_pool: service.transaction_pool(), - }; - - let client = service.client(); - let select_chain = service.select_chain() - .ok_or(ServiceError::SelectChainRequired)?; - - let babe_config = babe::BabeParams { - config: Config::get_or_compute(&*client)?, - keystore: service.keystore(), - client, - select_chain, - block_import, - env: proposer, - sync_oracle: service.network(), - inherent_data_providers: service.config() - .custom.inherent_data_providers.clone(), - force_authoring: service.config().force_authoring, - time_source: babe_link, - }; - - let babe = start_babe(babe_config)?; - let select = babe.select(service.on_exit()).then(|_| Ok(())); - - // the BABE authoring task is considered infallible, i.e. if it - // fails we take down the service with it. - service.spawn_essential_task(select); - } - - let config = grandpa::Config { - // FIXME #1578 make this available through chainspec - gossip_duration: Duration::from_millis(333), - justification_period: 4096, - name: Some(service.config().name.clone()), - keystore: Some(service.keystore()), - }; - - match (service.config().roles.is_authority(), service.config().disable_grandpa) { - (false, false) => { - // start the lightweight GRANDPA observer - service.spawn_task(Box::new(grandpa::run_grandpa_observer( - config, - link_half, - service.network(), - service.on_exit(), - )?)); - }, - (true, false) => { - // start the full GRANDPA voter - let telemetry_on_connect = TelemetryOnConnect { - telemetry_connection_sinks: service.telemetry_on_connect_stream(), - }; - let grandpa_config = grandpa::GrandpaParams { - config: config, - link: link_half, - network: service.network(), - inherent_data_providers: - service.config().custom.inherent_data_providers.clone(), - on_exit: service.on_exit(), - telemetry_on_connect: Some(telemetry_on_connect), - }; - - // the GRANDPA voter task is considered infallible, i.e. - // if it fails we take down the service with it. - service.spawn_essential_task(grandpa::run_grandpa_voter(grandpa_config)?); - }, - (_, true) => { - grandpa::setup_disabled_grandpa( - service.client(), - &service.config().custom.inherent_data_providers, - service.network(), - )?; - }, - } - - Ok(service) - } - }, - LightService = LightComponents - { |config| >::new(config) }, - FullImportQueue = BabeImportQueue - { - | - config: &mut FactoryFullConfiguration, - client: Arc>, - select_chain: Self::SelectChain, - transaction_pool: Option>>, - | - { - let (block_import, link_half) = - grandpa::block_import::<_, _, _, RuntimeApi, FullClient, _>( - client.clone(), client.clone(), select_chain - )?; - let justification_import = block_import.clone(); - - let (import_queue, babe_link, babe_block_import, pruning_task) = import_queue( - Config::get_or_compute(&*client)?, - block_import, - Some(Box::new(justification_import)), - None, - client.clone(), - client, - config.custom.inherent_data_providers.clone(), - transaction_pool, - )?; - - config.custom.import_setup = Some((babe_block_import.clone(), link_half, babe_link)); - config.custom.tasks_to_spawn = Some(vec![Box::new(pruning_task)]); - - Ok(import_queue) - }}, - LightImportQueue = BabeImportQueue - { |config: &FactoryFullConfiguration, client: Arc>| { - #[allow(deprecated)] - let fetch_checker = client.backend().blockchain().fetcher() - .upgrade() - .map(|fetcher| fetcher.checker().clone()) - .ok_or_else(|| "Trying to start light import queue without active fetch checker")?; - let block_import = grandpa::light_block_import::<_, _, _, RuntimeApi, LightClient>( - client.clone(), Arc::new(fetch_checker), client.clone() - )?; - - let finality_proof_import = block_import.clone(); - let finality_proof_request_builder = - finality_proof_import.create_finality_proof_request_builder(); - - // FIXME: pruning task isn't started since light client doesn't do `AuthoritySetup`. - let (import_queue, ..) = import_queue::<_, _, _, _, _, _, TransactionPool>( - Config::get_or_compute(&*client)?, - block_import, - None, - Some(Box::new(finality_proof_import)), - client.clone(), - client, - config.custom.inherent_data_providers.clone(), - None, - )?; - - Ok((import_queue, finality_proof_request_builder)) - }}, - SelectChain = LongestChain, Self::Block> - { |config: &FactoryFullConfiguration, client: Arc>| { - #[allow(deprecated)] - Ok(LongestChain::new(client.backend().clone())) - } - }, - FinalityProofProvider = { |client: Arc>| { - Ok(Some(Arc::new(GrandpaFinalityProofProvider::new(client.clone(), client)) as _)) - }}, - - RpcExtensions = jsonrpc_core::IoHandler - { |client, pool| { - use node_rpc::accounts::{Accounts, AccountsApi}; - - let mut io = jsonrpc_core::IoHandler::default(); - io.extend_with( - AccountsApi::to_delegate(Accounts::new(client, pool)) - ); - io - }}, - } -} - - #[cfg(test)] mod tests { use std::sync::Arc; From f290efe05fcf5905b3c076cb4bf8a7fe7c53cca0 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Tue, 13 Aug 2019 11:05:09 +0200 Subject: [PATCH 16/32] Adjust the AbstractService trait to be more usable We slightly change the trait bounds in order to make all the methods usable. --- core/service/src/lib.rs | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/core/service/src/lib.rs b/core/service/src/lib.rs index 88bfda3d4ca56..97ffb93574aa1 100644 --- a/core/service/src/lib.rs +++ b/core/service/src/lib.rs @@ -41,7 +41,7 @@ use exit_future::Signal; use futures::prelude::*; use futures03::stream::{StreamExt as _, TryStreamExt as _}; use keystore::Store as Keystore; -use network::{NetworkState, NetworkStateInfo}; +use network::{NetworkService, NetworkState, NetworkStateInfo, specialization::NetworkSpecialization}; use log::{log, info, warn, debug, error, Level}; use codec::{Encode, Decode}; use primitives::{Blake2Hasher, H256}; @@ -65,7 +65,7 @@ pub use components::{ ServiceFactory, FullBackend, FullExecutor, LightBackend, LightExecutor, Components, PoolApi, ComponentClient, ComponentOffchainStorage, ComponentBlock, FullClient, LightClient, FullComponents, LightComponents, - CodeExecutor, NetworkService, FactoryChainSpec, FactoryBlock, + CodeExecutor, NetworkService as ComponentNetworkService, FactoryChainSpec, FactoryBlock, FactoryFullConfiguration, RuntimeGenesis, FactoryGenesis, ComponentExHash, ComponentExtrinsic, FactoryExtrinsic, InitialSessionKeys, }; @@ -87,7 +87,7 @@ pub struct Service { ComponentClient, Components::SelectChain, NetworkStatus>, - NetworkService, + ComponentNetworkService, TransactionPool, offchain::OffchainWorkers< ComponentClient, @@ -560,9 +560,9 @@ pub trait AbstractService: 'static + Future + /// Chain selection algorithm. type SelectChain; /// API of the transaction pool. - type TransactionPoolApi: ChainApi; - /// Network service. - type NetworkService; + type TransactionPoolApi: ChainApi; + /// Network specialization. + type NetworkSpecialization: NetworkSpecialization; /// Get event stream for telemetry connection established events. fn telemetry_on_connect_stream(&self) -> TelemetryOnConnectNotifications; @@ -608,7 +608,7 @@ pub trait AbstractService: 'static + Future + fn select_chain(&self) -> Option; /// Get shared network instance. - fn network(&self) -> Arc; + fn network(&self) -> Arc>; /// Returns a receiver that periodically receives a status of the network. fn network_status(&self) -> mpsc::UnboundedReceiver<(NetworkStatus, NetworkState)>; @@ -628,7 +628,7 @@ where FactoryFullConfiguration: Send { ComponentClient, Components::SelectChain, NetworkStatus>, - NetworkService, + ComponentNetworkService, TransactionPool, offchain::OffchainWorkers< ComponentClient, @@ -649,17 +649,17 @@ where FactoryFullConfiguration: Send { } } -impl AbstractService for - NewService, TSc, NetworkStatus, TNet, TransactionPool, TOc> +impl AbstractService for + NewService, TSc, NetworkStatus, NetworkService, TransactionPool, TOc> where TCfg: 'static + Send, TBl: BlockT, TBackend: 'static + client::backend::Backend, TExec: 'static + client::CallExecutor + Send + Sync + Clone, TRtApi: 'static + Send + Sync, TSc: 'static + Clone + Send, - TNet: 'static + Send + Sync, - TExPoolApi: 'static + ChainApi, + TExPoolApi: 'static + ChainApi, TOc: 'static + Send + Sync, + TNetSpec: NetworkSpecialization, { type Block = TBl; type Backend = TBackend; @@ -668,7 +668,7 @@ where TCfg: 'static + Send, type Config = TCfg; type SelectChain = TSc; type TransactionPoolApi = TExPoolApi; - type NetworkService = TNet; + type NetworkSpecialization = TNetSpec; fn config(&self) -> &Self::Config { &self.config @@ -724,7 +724,7 @@ where TCfg: 'static + Send, self.select_chain.clone() } - fn network(&self) -> Arc { + fn network(&self) -> Arc> { self.network.clone() } @@ -821,7 +821,7 @@ where T: 'static + Deref + DerefMut + Future + Send + type Config = <::Target as AbstractService>::Config; type SelectChain = <::Target as AbstractService>::SelectChain; type TransactionPoolApi = <::Target as AbstractService>::TransactionPoolApi; - type NetworkService = <::Target as AbstractService>::NetworkService; + type NetworkSpecialization = <::Target as AbstractService>::NetworkSpecialization; fn telemetry_on_connect_stream(&self) -> TelemetryOnConnectNotifications { (**self).telemetry_on_connect_stream() @@ -867,7 +867,7 @@ where T: 'static + Deref + DerefMut + Future + Send + (**self).select_chain() } - fn network(&self) -> Arc { + fn network(&self) -> Arc> { (**self).network() } From 8107b285c2e7b75d1558dc1b0243cd1b0c662a86 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Tue, 13 Aug 2019 11:05:39 +0200 Subject: [PATCH 17/32] Make substrate-service-test compile --- core/service/test/src/lib.rs | 186 +++++++++++++++++++---------------- 1 file changed, 103 insertions(+), 83 deletions(-) diff --git a/core/service/test/src/lib.rs b/core/service/test/src/lib.rs index c2895c5329496..84576908d3387 100644 --- a/core/service/test/src/lib.rs +++ b/core/service/test/src/lib.rs @@ -27,32 +27,31 @@ use tempdir::TempDir; use tokio::{runtime::Runtime, prelude::FutureExt}; use tokio::timer::Interval; use service::{ - ServiceFactory, + AbstractService, + ChainSpec, Configuration, - FactoryFullConfiguration, - FactoryChainSpec, Roles, - FactoryExtrinsic, + Error, }; use network::{multiaddr, Multiaddr}; use network::config::{NetworkConfiguration, TransportConfig, NodeKeyConfig, Secret, NonReservedPeerMode}; -use sr_primitives::generic::BlockId; +use sr_primitives::{generic::BlockId, traits::Block as BlockT}; use consensus::{BlockImportParams, BlockImport}; /// Maximum duration of single wait call. const MAX_WAIT_TIME: Duration = Duration::from_secs(60 * 3); -struct TestNet { +struct TestNet { runtime: Runtime, - authority_nodes: Vec<(usize, SyncService, Multiaddr)>, - full_nodes: Vec<(usize, SyncService, Multiaddr)>, - light_nodes: Vec<(usize, SyncService, Multiaddr)>, - chain_spec: FactoryChainSpec, + authority_nodes: Vec<(usize, SyncService, Multiaddr)>, + full_nodes: Vec<(usize, SyncService, Multiaddr)>, + light_nodes: Vec<(usize, SyncService, Multiaddr)>, + chain_spec: ChainSpec, base_port: u16, nodes: usize, } -/// Wraps around an `Arc>` and implements `Future`. +/// Wraps around an `Arc` and implements `Future`. pub struct SyncService(Arc>); impl SyncService { @@ -82,15 +81,17 @@ impl> Future for SyncService { } } -impl TestNet { +impl TestNet +where F: Send + 'static, L: Send +'static +{ pub fn run_until_all_full( &mut self, full_predicate: FP, light_predicate: LP, ) where - FP: Send + Fn(usize, &SyncService) -> bool + 'static, - LP: Send + Fn(usize, &SyncService) -> bool + 'static, + FP: Send + Fn(usize, &SyncService) -> bool + 'static, + LP: Send + Fn(usize, &SyncService) -> bool + 'static, { let full_nodes = self.full_nodes.clone(); let light_nodes = self.light_nodes.clone(); @@ -125,14 +126,14 @@ impl TestNet { } } -fn node_config ( +fn node_config ( index: usize, - spec: &FactoryChainSpec, + spec: &ChainSpec, role: Roles, key_seed: Option, base_port: u16, root: &TempDir, -) -> FactoryFullConfiguration +) -> Configuration<(), G> { let root = root.path().join(format!("node-{}", index)); @@ -194,18 +195,18 @@ fn node_config ( } } -impl TestNet where - F::FullService: Future, - F::LightService: Future, +impl TestNet where + F: AbstractService, + L: AbstractService, { fn new( temp: &TempDir, - spec: FactoryChainSpec, - full: usize, - light: usize, - authorities: Vec, + spec: ChainSpec, + full: impl Iterator) -> Result>, + light: impl Iterator) -> Result>, + authorities: impl Iterator) -> Result)>, base_port: u16 - ) -> TestNet { + ) -> TestNet { let _ = env_logger::try_init(); fdlimit::raise_fd_limit(); let runtime = Runtime::new().expect("Error creating tokio runtime"); @@ -222,70 +223,76 @@ impl TestNet where net } - fn insert_nodes(&mut self, temp: &TempDir, full: usize, light: usize, authorities: Vec) { - let mut nodes = self.nodes; - let base_port = self.base_port; - let spec = &self.chain_spec; + fn insert_nodes( + &mut self, + temp: &TempDir, + full: impl Iterator) -> Result>, + light: impl Iterator) -> Result>, + authorities: impl Iterator) -> Result)> + ) { let executor = self.runtime.executor(); - self.authority_nodes.extend(authorities.iter().enumerate().map(|(index, key)| { - let node_config = node_config::( - index, - &spec, + + for (key, authority) in authorities { + let node_config = node_config( + self.nodes, + &self.chain_spec, Roles::AUTHORITY, - Some(key.clone()), - base_port, + Some(key), + self.base_port, &temp, ); let addr = node_config.network.listen_addresses.iter().next().unwrap().clone(); - let service = SyncService::from(F::new_full(node_config).expect("Error creating test node service")); + let service = SyncService::from(authority(node_config).expect("Error creating test node service")); executor.spawn(service.clone().map_err(|_| ())); let addr = addr.with(multiaddr::Protocol::P2p(service.get().network().local_peer_id().into())); - ((index + nodes), service, addr) - })); - nodes += authorities.len(); + self.full_nodes.push((self.nodes, service, addr)); + self.nodes += 1; + } - self.full_nodes.extend((nodes..nodes + full).map(|index| { - let node_config = node_config::(index, &spec, Roles::FULL, None, base_port, &temp); + for full in full { + let node_config = node_config(self.nodes, &self.chain_spec, Roles::FULL, None, self.base_port, &temp); let addr = node_config.network.listen_addresses.iter().next().unwrap().clone(); - let service = SyncService::from(F::new_full(node_config).expect("Error creating test node service")); + let service = SyncService::from(full(node_config).expect("Error creating test node service")); executor.spawn(service.clone().map_err(|_| ())); let addr = addr.with(multiaddr::Protocol::P2p(service.get().network().local_peer_id().into())); - (index, service, addr) - })); - nodes += full; + self.full_nodes.push((self.nodes, service, addr)); + self.nodes += 1; + } - self.light_nodes.extend((nodes..nodes + light).map(|index| { - let node_config = node_config::(index, &spec, Roles::LIGHT, None, base_port, &temp); + for light in light { + let node_config = node_config(self.nodes, &self.chain_spec, Roles::LIGHT, None, self.base_port, &temp); let addr = node_config.network.listen_addresses.iter().next().unwrap().clone(); - let service = SyncService::from(F::new_light(node_config).expect("Error creating test node service")); + let service = SyncService::from(light(node_config).expect("Error creating test node service")); executor.spawn(service.clone().map_err(|_| ())); let addr = addr.with(multiaddr::Protocol::P2p(service.get().network().local_peer_id().into())); - (index, service, addr) - })); - nodes += light; - - self.nodes = nodes; + self.light_nodes.push((self.nodes, service, addr)); + self.nodes += 1; + } } } -pub fn connectivity(spec: FactoryChainSpec) where - F::FullService: Future, - F::LightService: Future, +pub fn connectivity(spec: ChainSpec, full_builder: Fb, light_builder: Lb) where + Fb: Fn(Configuration<(), G>) -> Result, + F: AbstractService, + Lb: Fn(Configuration<(), G>) -> Result, + L: AbstractService, { const NUM_FULL_NODES: usize = 5; const NUM_LIGHT_NODES: usize = 5; { let temp = TempDir::new("substrate-connectivity-test").expect("Error creating test dir"); let runtime = { - let mut network = TestNet::::new( + let mut network = TestNet::new( &temp, spec.clone(), - NUM_FULL_NODES, - NUM_LIGHT_NODES, - vec![], + (0..NUM_FULL_NODES).map(|_| { |cfg| full_builder(cfg) }), + (0..NUM_LIGHT_NODES).map(|_| { |cfg| light_builder(cfg) }), + // Note: this iterator is empty but we can't just use `iter::empty()`, otherwise + // the type of the closure cannot be inferred. + (0..0).map(|_| (String::new(), { |cfg| full_builder(cfg) })), 30400, ); info!("Checking star topology"); @@ -311,12 +318,14 @@ pub fn connectivity(spec: FactoryChainSpec) where { let temp = TempDir::new("substrate-connectivity-test").expect("Error creating test dir"); { - let mut network = TestNet::::new( + let mut network = TestNet::new( &temp, spec, - NUM_FULL_NODES, - NUM_LIGHT_NODES, - vec![], + (0..NUM_FULL_NODES).map(|_| { |cfg| full_builder(cfg) }), + (0..NUM_LIGHT_NODES).map(|_| { |cfg| light_builder(cfg) }), + // Note: this iterator is empty but we can't just use `iter::empty()`, otherwise + // the type of the closure cannot be inferred. + (0..0).map(|_| (String::new(), { |cfg| full_builder(cfg) })), 30400, ); info!("Checking linked topology"); @@ -345,24 +354,27 @@ pub fn connectivity(spec: FactoryChainSpec) where } } -pub fn sync(spec: FactoryChainSpec, mut block_factory: B, mut extrinsic_factory: E) where - F: ServiceFactory, - F::FullService: Future, - F::LightService: Future, - B: FnMut(&SyncService) -> BlockImportParams, - E: FnMut(&SyncService) -> FactoryExtrinsic, +pub fn sync(spec: ChainSpec, full_builder: Fb, light_builder: Lb, mut block_factory: B, mut extrinsic_factory: E) where + Fb: Fn(Configuration<(), G>) -> Result, + F: AbstractService, + Lb: Fn(Configuration<(), G>) -> Result, + L: AbstractService, + B: FnMut(&SyncService) -> BlockImportParams, + E: FnMut(&SyncService) -> ::Extrinsic, { const NUM_FULL_NODES: usize = 10; // FIXME: BABE light client support is currently not working. const NUM_LIGHT_NODES: usize = 10; const NUM_BLOCKS: usize = 512; let temp = TempDir::new("substrate-sync-test").expect("Error creating test dir"); - let mut network = TestNet::::new( + let mut network = TestNet::new( &temp, spec.clone(), - NUM_FULL_NODES, - NUM_LIGHT_NODES, - vec![], + (0..NUM_FULL_NODES).map(|_| { |cfg| full_builder(cfg) }), + (0..NUM_LIGHT_NODES).map(|_| { |cfg| light_builder(cfg) }), + // Note: this iterator is empty but we can't just use `iter::empty()`, otherwise + // the type of the closure cannot be inferred. + (0..0).map(|_| (String::new(), { |cfg| full_builder(cfg) })), 30500, ); info!("Checking block sync"); @@ -404,21 +416,22 @@ pub fn sync(spec: FactoryChainSpec, mut block_factory: B, mut extrin ); } -pub fn consensus(spec: FactoryChainSpec, authorities: Vec) where - F: ServiceFactory, - F::FullService: Future, - F::LightService: Future, +pub fn consensus(spec: ChainSpec, full_builder: Fb, light_builder: Lb, authorities: impl IntoIterator) where + Fb: Fn(Configuration<(), G>) -> Result, + F: AbstractService, + Lb: Fn(Configuration<(), G>) -> Result, + L: AbstractService, { const NUM_FULL_NODES: usize = 10; const NUM_LIGHT_NODES: usize = 10; const NUM_BLOCKS: usize = 10; // 10 * 2 sec block production time = ~20 seconds let temp = TempDir::new("substrate-conensus-test").expect("Error creating test dir"); - let mut network = TestNet::::new( + let mut network = TestNet::new( &temp, spec.clone(), - NUM_FULL_NODES / 2, - NUM_LIGHT_NODES / 2, - authorities, + (0..NUM_FULL_NODES / 2).map(|_| { |cfg| full_builder(cfg) }), + (0..NUM_LIGHT_NODES / 2).map(|_| { |cfg| light_builder(cfg) }), + authorities.into_iter().map(|key| (key, { |cfg| full_builder(cfg) })), 30600, ); @@ -441,7 +454,14 @@ pub fn consensus(spec: FactoryChainSpec, authorities: Vec) where ); info!("Adding more peers"); - network.insert_nodes(&temp, NUM_FULL_NODES / 2, NUM_LIGHT_NODES / 2, vec![]); + network.insert_nodes( + &temp, + (0..NUM_FULL_NODES / 2).map(|_| { |cfg| full_builder(cfg) }), + (0..NUM_LIGHT_NODES / 2).map(|_| { |cfg| light_builder(cfg) }), + // Note: this iterator is empty but we can't just use `iter::empty()`, otherwise + // the type of the closure cannot be inferred. + (0..0).map(|_| (String::new(), { |cfg| full_builder(cfg) })), + ); for (_, service, _) in network.full_nodes.iter() { service.get().network().add_reserved_peer(first_address.to_string()).expect("Error adding reserved peer"); } From a44e91f7c0124f73c7cfe80090987dcb80f91b51 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Tue, 13 Aug 2019 11:14:25 +0200 Subject: [PATCH 18/32] Fix the node-cli tests --- core/service/test/src/lib.rs | 10 +- node/cli/src/chain_spec.rs | 8 +- node/cli/src/service.rs | 233 ++++++++++++++++++----------------- 3 files changed, 129 insertions(+), 122 deletions(-) diff --git a/core/service/test/src/lib.rs b/core/service/test/src/lib.rs index 84576908d3387..05b49b6c29d66 100644 --- a/core/service/test/src/lib.rs +++ b/core/service/test/src/lib.rs @@ -246,7 +246,7 @@ impl TestNet where executor.spawn(service.clone().map_err(|_| ())); let addr = addr.with(multiaddr::Protocol::P2p(service.get().network().local_peer_id().into())); - self.full_nodes.push((self.nodes, service, addr)); + self.authority_nodes.push((self.nodes, service, addr)); self.nodes += 1; } @@ -359,8 +359,8 @@ pub fn sync(spec: ChainSpec, full_builder: Fb, light_b F: AbstractService, Lb: Fn(Configuration<(), G>) -> Result, L: AbstractService, - B: FnMut(&SyncService) -> BlockImportParams, - E: FnMut(&SyncService) -> ::Extrinsic, + B: FnMut(&F) -> BlockImportParams, + E: FnMut(&F) -> ::Extrinsic, { const NUM_FULL_NODES: usize = 10; // FIXME: BABE light client support is currently not working. @@ -385,7 +385,7 @@ pub fn sync(spec: ChainSpec, full_builder: Fb, light_b if i % 128 == 0 { info!("Generating #{}", i); } - let import_data = block_factory(&first_service); + let import_data = block_factory(&first_service.get()); client.import_block(import_data, HashMap::new()).expect("Error importing test block"); } network.full_nodes[0].2.clone() @@ -408,7 +408,7 @@ pub fn sync(spec: ChainSpec, full_builder: Fb, light_b info!("Checking extrinsic propagation"); let first_service = network.full_nodes[0].1.clone(); let best_block = BlockId::number(first_service.get().client().info().chain.best_number); - let extrinsic = extrinsic_factory(&first_service); + let extrinsic = extrinsic_factory(&first_service.get()); first_service.get().transaction_pool().submit_one(&best_block, extrinsic).unwrap(); network.run_until_all_full( |_index, service| service.get().transaction_pool().ready().count() == 1, diff --git a/node/cli/src/chain_spec.rs b/node/cli/src/chain_spec.rs index fca4c78b892ee..f83958eef482c 100644 --- a/node/cli/src/chain_spec.rs +++ b/node/cli/src/chain_spec.rs @@ -350,8 +350,8 @@ pub fn local_testnet_config() -> ChainSpec { #[cfg(test)] pub(crate) mod tests { use super::*; + use crate::service::{new_full, new_light}; use service_test; - use crate::service::Factory; fn local_testnet_genesis_instant_single() -> GenesisConfig { testnet_genesis( @@ -395,6 +395,10 @@ pub(crate) mod tests { #[test] #[ignore] fn test_connectivity() { - service_test::connectivity::(integration_test_config_with_two_authorities()); + service_test::connectivity( + integration_test_config_with_two_authorities(), + |config| new_full(config), + |config| new_light(config), + ); } } diff --git a/node/cli/src/service.rs b/node/cli/src/service.rs index 209acde4232f1..5fe44f86f8763 100644 --- a/node/cli/src/service.rs +++ b/node/cli/src/service.rs @@ -275,9 +275,9 @@ mod tests { use timestamp; use finality_tracker; use keyring::AccountKeyring; - use substrate_service::ServiceFactory; + use substrate_service::AbstractService; use service_test::SyncService; - use crate::service::Factory; + use crate::service::{new_full, new_light}; #[cfg(feature = "rhd")] fn test_sync() { @@ -332,14 +332,16 @@ mod tests { let v: Vec = Decode::decode(&mut xt.as_slice()).unwrap(); OpaqueExtrinsic(v) }; - service_test::sync::( + service_test::sync( chain_spec::integration_test_config(), + |config| new_full(config), + |config| new_light(config), block_factory, extrinsic_factory, ); } - #[test] + /*#[test] #[ignore] fn test_sync() { let keystore_path = tempfile::tempdir().expect("Creates keystore path"); @@ -350,130 +352,131 @@ mod tests { let chain_spec = crate::chain_spec::tests::integration_test_config_with_single_authority(); + // For the block factory let mut slot_num = 1u64; - let block_factory = |service: &SyncService<::FullService>| { - let service = service.get(); - let mut inherent_data = service - .config() - .custom - .inherent_data_providers - .create_inherent_data() - .expect("Creates inherent data."); - inherent_data.replace_data(finality_tracker::INHERENT_IDENTIFIER, &1u64); - - let parent_id = BlockId::number(service.client().info().chain.best_number); - let parent_header = service.client().header(&parent_id).unwrap().unwrap(); - let mut proposer_factory = substrate_basic_authorship::ProposerFactory { - client: service.client(), - transaction_pool: service.transaction_pool(), - }; - - let mut digest = Digest::::default(); - - // even though there's only one authority some slots might be empty, - // so we must keep trying the next slots until we can claim one. - let babe_pre_digest = loop { - inherent_data.replace_data(timestamp::INHERENT_IDENTIFIER, &(slot_num * SLOT_DURATION)); - if let Some(babe_pre_digest) = babe::test_helpers::claim_slot( - slot_num, - &parent_header, - &*service.client(), - (278, 1000), - &keystore, - ) { - break babe_pre_digest; - } - - slot_num += 1; - }; - - digest.push(::babe_pre_digest(babe_pre_digest)); - - let mut proposer = proposer_factory.init(&parent_header).unwrap(); - let new_block = futures03::executor::block_on(proposer.propose( - inherent_data, - digest, - std::time::Duration::from_secs(1), - )).expect("Error making test block"); - - let (new_header, new_body) = new_block.deconstruct(); - let pre_hash = new_header.hash(); - // sign the pre-sealed hash of the block and then - // add it to a digest item. - let to_sign = pre_hash.encode(); - let signature = alice.sign(&to_sign[..]); - let item = ::babe_seal( - signature.into(), - ); - slot_num += 1; - - BlockImportParams { - origin: BlockOrigin::File, - header: new_header, - justification: None, - post_digests: vec![item], - body: Some(new_body), - finalized: true, - auxiliary: Vec::new(), - fork_choice: ForkChoiceStrategy::LongestChain, - } - }; + // For the extrinsics factory let bob = Arc::new(AccountKeyring::Bob.pair()); let charlie = Arc::new(AccountKeyring::Charlie.pair()); - let mut index = 0; - let extrinsic_factory = |service: &SyncService<::FullService>| { - let amount = 5 * CENTS; - let to = AddressPublic::from_raw(bob.public().0); - let from = AddressPublic::from_raw(charlie.public().0); - let genesis_hash = service.get().client().block_hash(0).unwrap().unwrap(); - let best_block_id = BlockId::number(service.get().client().info().chain.best_number); - let version = service.get().client().runtime_version_at(&best_block_id).unwrap().spec_version; - let signer = charlie.clone(); - - let function = Call::Balances(BalancesCall::transfer(to.into(), amount)); - - let check_version = system::CheckVersion::new(); - let check_genesis = system::CheckGenesis::new(); - let check_era = system::CheckEra::from(Era::Immortal); - let check_nonce = system::CheckNonce::from(index); - let check_weight = system::CheckWeight::new(); - let take_fees = balances::TakeFees::from(0); - let extra = (check_version, check_genesis, check_era, check_nonce, check_weight, take_fees); - - let raw_payload = (function, extra.clone(), version, genesis_hash, genesis_hash); - let signature = raw_payload.using_encoded(|payload| if payload.len() > 256 { - signer.sign(&blake2_256(payload)[..]) - } else { - signer.sign(payload) - }); - let xt = UncheckedExtrinsic::new_signed( - raw_payload.0, - from.into(), - signature.into(), - extra, - ).encode(); - let v: Vec = Decode::decode(&mut xt.as_slice()).unwrap(); - index += 1; - OpaqueExtrinsic(v) - }; - - service_test::sync::( + service_test::sync( chain_spec, - block_factory, - extrinsic_factory, + |config| new_full(config), + |config| new_light(config), + |service| { + let service = service.get(); + let mut inherent_data = service + .config() + .custom + .inherent_data_providers + .create_inherent_data() + .expect("Creates inherent data."); + inherent_data.replace_data(finality_tracker::INHERENT_IDENTIFIER, &1u64); + + let parent_id = BlockId::number(service.client().info().chain.best_number); + let parent_header = service.client().header(&parent_id).unwrap().unwrap(); + let mut proposer_factory = substrate_basic_authorship::ProposerFactory { + client: service.client(), + transaction_pool: service.transaction_pool(), + }; + + let mut digest = Digest::::default(); + + // even though there's only one authority some slots might be empty, + // so we must keep trying the next slots until we can claim one. + let babe_pre_digest = loop { + inherent_data.replace_data(timestamp::INHERENT_IDENTIFIER, &(slot_num * SLOT_DURATION)); + if let Some(babe_pre_digest) = babe::test_helpers::claim_slot( + slot_num, + &parent_header, + &*service.client(), + (278, 1000), + &keystore, + ) { + break babe_pre_digest; + } + + slot_num += 1; + }; + + digest.push(::babe_pre_digest(babe_pre_digest)); + + let mut proposer = proposer_factory.init(&parent_header).unwrap(); + let new_block = futures03::executor::block_on(proposer.propose( + inherent_data, + digest, + std::time::Duration::from_secs(1), + )).expect("Error making test block"); + + let (new_header, new_body) = new_block.deconstruct(); + let pre_hash = new_header.hash(); + // sign the pre-sealed hash of the block and then + // add it to a digest item. + let to_sign = pre_hash.encode(); + let signature = alice.sign(&to_sign[..]); + let item = ::babe_seal( + signature.into(), + ); + slot_num += 1; + + BlockImportParams { + origin: BlockOrigin::File, + header: new_header, + justification: None, + post_digests: vec![item], + body: Some(new_body), + finalized: true, + auxiliary: Vec::new(), + fork_choice: ForkChoiceStrategy::LongestChain, + } + }, + |service| { + let amount = 5 * CENTS; + let to = AddressPublic::from_raw(bob.public().0); + let from = AddressPublic::from_raw(charlie.public().0); + let genesis_hash = service.get().client().block_hash(0).unwrap().unwrap(); + let best_block_id = BlockId::number(service.get().client().info().chain.best_number); + let version = service.get().client().runtime_version_at(&best_block_id).unwrap().spec_version; + let signer = charlie.clone(); + + let function = Call::Balances(BalancesCall::transfer(to.into(), amount)); + + let check_version = system::CheckVersion::new(); + let check_genesis = system::CheckGenesis::new(); + let check_era = system::CheckEra::from(Era::Immortal); + let check_nonce = system::CheckNonce::from(index); + let check_weight = system::CheckWeight::new(); + let take_fees = balances::TakeFees::from(0); + let extra = (check_version, check_genesis, check_era, check_nonce, check_weight, take_fees); + + let raw_payload = (function, extra.clone(), version, genesis_hash, genesis_hash); + let signature = raw_payload.using_encoded(|payload| if payload.len() > 256 { + signer.sign(&blake2_256(payload)[..]) + } else { + signer.sign(payload) + }); + let xt = UncheckedExtrinsic::new_signed( + raw_payload.0, + from.into(), + signature.into(), + extra, + ).encode(); + let v: Vec = Decode::decode(&mut xt.as_slice()).unwrap(); + + index += 1; + OpaqueExtrinsic(v) + }, ); - } + }*/ #[test] #[ignore] fn test_consensus() { - use super::Factory; - - service_test::consensus::( + service_test::consensus( crate::chain_spec::tests::integration_test_config_with_two_authorities(), + |config| new_full(config), + |config| new_light(config), vec![ "//Alice".into(), "//Bob".into(), From 00e5e358e5866fe6130330d01ca7f2f0ebf76d91 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Tue, 13 Aug 2019 11:53:52 +0200 Subject: [PATCH 19/32] Remove the old API --- core/cli/src/lib.rs | 124 +----------- core/finality-grandpa/Cargo.toml | 6 +- core/finality-grandpa/src/lib.rs | 4 - .../src/service_integration.rs | 49 ----- core/service/src/chain_ops.rs | 62 +----- core/service/src/components.rs | 190 ++---------------- core/service/src/lib.rs | 103 ++-------- node-template/src/service.rs | 5 + node/cli/src/service.rs | 2 +- 9 files changed, 45 insertions(+), 500 deletions(-) delete mode 100644 core/finality-grandpa/src/service_integration.rs diff --git a/core/cli/src/lib.rs b/core/cli/src/lib.rs index b5e5eb0c30cfe..6e9955ca1a48b 100644 --- a/core/cli/src/lib.rs +++ b/core/cli/src/lib.rs @@ -28,10 +28,9 @@ pub mod informant; use client::ExecutionStrategies; use service::{ - config::Configuration, ServiceFactory, + config::Configuration, ServiceBuilderExport, ServiceBuilderImport, ServiceBuilderRevert, - FactoryFullConfiguration, RuntimeGenesis, - FactoryGenesis, PruningMode, ChainSpec, + RuntimeGenesis, PruningMode, ChainSpec, }; use network::{ self, multiaddr::Protocol, @@ -317,33 +316,6 @@ pub struct ParseAndPrepareExport<'a> { } impl<'a> ParseAndPrepareExport<'a> { - /// Runs the command and exports from the chain. - pub fn run( - self, - spec_factory: S, - exit: E, - ) -> error::Result<()> - where S: FnOnce(&str) -> Result>>, String>, - F: ServiceFactory, - E: IntoExit - { - let config = create_config_with_db_path(spec_factory, &self.params.shared_params, self.version)?; - - info!("DB path: {}", config.database_path.display()); - let from = self.params.from.unwrap_or(1); - let to = self.params.to; - let json = self.params.json; - - let file: Box = match self.params.output { - Some(filename) => Box::new(File::create(filename)?), - None => Box::new(stdout()), - }; - - service::chain_ops::export_blocks::( - config, exit.into_exit(), file, from.into(), to.map(Into::into), json - ).map_err(Into::into) - } - /// Runs the command and exports from the chain. pub fn run_with_builder( self, @@ -382,37 +354,6 @@ pub struct ParseAndPrepareImport<'a> { } impl<'a> ParseAndPrepareImport<'a> { - /// Runs the command and imports to the chain. - pub fn run( - self, - spec_factory: S, - exit: E, - ) -> error::Result<()> - where S: FnOnce(&str) -> Result>>, String>, - F: ServiceFactory, - E: IntoExit - { - let mut config = create_config_with_db_path(spec_factory, &self.params.shared_params, self.version)?; - config.execution_strategies = ExecutionStrategies { - importing: self.params.execution.into(), - other: self.params.execution.into(), - ..Default::default() - }; - - let file: Box = match self.params.input { - Some(filename) => Box::new(File::open(filename)?), - None => { - let mut buffer = Vec::new(); - stdin().read_to_end(&mut buffer)?; - Box::new(Cursor::new(buffer)) - }, - }; - - let fut = service::chain_ops::import_blocks::(config, exit.into_exit(), file)?; - tokio::run(fut); - Ok(()) - } - /// Runs the command and imports to the chain. pub fn run_with_builder( self, @@ -505,18 +446,6 @@ pub struct ParseAndPrepareRevert<'a> { } impl<'a> ParseAndPrepareRevert<'a> { - /// Runs the command and reverts the chain. - pub fn run( - self, - spec_factory: S - ) -> error::Result<()> - where S: FnOnce(&str) -> Result>>, String>, - F: ServiceFactory { - let config = create_config_with_db_path(spec_factory, &self.params.shared_params, self.version)?; - let blocks = self.params.num; - Ok(service::chain_ops::revert_chain::(config, blocks.into())?) - } - /// Runs the command and reverts the chain. pub fn run_with_builder( self, @@ -535,55 +464,6 @@ impl<'a> ParseAndPrepareRevert<'a> { } } -/// Parse command line interface arguments and executes the desired command. -/// -/// # Return value -/// -/// A result that indicates if any error occurred. -/// If no error occurred and a custom subcommand was found, the subcommand is returned. -/// The user needs to handle this subcommand on its own. -/// -/// # Remarks -/// -/// `CC` is a custom subcommand. This needs to be an `enum`! If no custom subcommand is required, -/// `NoCustom` can be used as type here. -/// `RP` are custom parameters for the run command. This needs to be a `struct`! The custom -/// parameters are visible to the user as if they were normal run command parameters. If no custom -/// parameters are required, `NoCustom` can be used as type here. -#[deprecated( - note = "Use parse_and_prepare instead; see the source code of parse_and_execute for how to transition" -)] -pub fn parse_and_execute<'a, F, CC, RP, S, RS, E, I, T>( - spec_factory: S, - version: &VersionInfo, - impl_name: &'static str, - args: I, - exit: E, - run_service: RS, -) -> error::Result> -where - F: ServiceFactory, - S: FnOnce(&str) -> Result>>, String>, - CC: StructOpt + Clone + GetLogFilter, - RP: StructOpt + Clone + AugmentClap, - E: IntoExit, - RS: FnOnce(E, RunCmd, RP, FactoryFullConfiguration) -> Result<(), String>, - I: IntoIterator, - T: Into + Clone, -{ - match parse_and_prepare::(version, impl_name, args) { - ParseAndPrepare::Run(cmd) => cmd.run(spec_factory, exit, run_service), - ParseAndPrepare::BuildSpec(cmd) => cmd.run(spec_factory), - ParseAndPrepare::ExportBlocks(cmd) => cmd.run::(spec_factory, exit), - ParseAndPrepare::ImportBlocks(cmd) => cmd.run::(spec_factory, exit), - ParseAndPrepare::PurgeChain(cmd) => cmd.run(spec_factory), - ParseAndPrepare::RevertChain(cmd) => cmd.run::(spec_factory), - ParseAndPrepare::CustomCommand(cmd) => return Ok(Some(cmd)) - }?; - - Ok(None) -} - /// Create a `NodeKeyConfig` from the given `NodeKeyParams` in the context /// of an optional network config storage directory. fn node_key_config

(params: NodeKeyParams, net_config_dir: &Option

) diff --git a/core/finality-grandpa/Cargo.toml b/core/finality-grandpa/Cargo.toml index 22237c5a0b5a0..145f23085218d 100644 --- a/core/finality-grandpa/Cargo.toml +++ b/core/finality-grandpa/Cargo.toml @@ -23,7 +23,7 @@ serde_json = "1.0" client = { package = "substrate-client", path = "../client" } inherents = { package = "substrate-inherents", path = "../../core/inherents" } network = { package = "substrate-network", path = "../network" } -service = { package = "substrate-service", path = "../service", optional = true } +service = { package = "substrate-service", path = "../service" } srml-finality-tracker = { path = "../../srml/finality-tracker" } fg_primitives = { package = "substrate-finality-grandpa-primitives", path = "primitives" } grandpa = { package = "finality-grandpa", version = "0.9.0", features = ["derive-codec"] } @@ -37,7 +37,3 @@ babe_primitives = { package = "substrate-consensus-babe-primitives", path = "../ env_logger = "0.6" tokio = "0.1.17" tempfile = "3.1" - -[features] -default = ["service-integration"] -service-integration = ["service"] diff --git a/core/finality-grandpa/src/lib.rs b/core/finality-grandpa/src/lib.rs index b79b120e35714..01d7d4ead9d48 100644 --- a/core/finality-grandpa/src/lib.rs +++ b/core/finality-grandpa/src/lib.rs @@ -93,10 +93,6 @@ mod light_import; mod observer; mod until_imported; -#[cfg(feature="service-integration")] -mod service_integration; -#[cfg(feature="service-integration")] -pub use service_integration::{LinkHalfForService, BlockImportForService, BlockImportForLightService}; pub use communication::Network; pub use finality_proof::FinalityProofProvider; pub use light_import::light_block_import; diff --git a/core/finality-grandpa/src/service_integration.rs b/core/finality-grandpa/src/service_integration.rs deleted file mode 100644 index 9f19b9204190b..0000000000000 --- a/core/finality-grandpa/src/service_integration.rs +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2018-2019 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -/// Integrate grandpa finality with substrate service - -use client; -use service::{FullBackend, FullExecutor, LightBackend, LightExecutor, ServiceFactory}; - -pub type BlockImportForService = crate::GrandpaBlockImport< - FullBackend, - FullExecutor, - ::Block, - ::RuntimeApi, - client::Client< - FullBackend, - FullExecutor, - ::Block, - ::RuntimeApi - >, - ::SelectChain, ->; - -pub type LinkHalfForService = crate::LinkHalf< - FullBackend, - FullExecutor, - ::Block, - ::RuntimeApi, - ::SelectChain ->; - -pub type BlockImportForLightService = crate::light_import::GrandpaLightBlockImport< - LightBackend, - LightExecutor, - ::Block, - ::RuntimeApi, ->; diff --git a/core/service/src/chain_ops.rs b/core/service/src/chain_ops.rs index df943d7597a80..bd649bc8e5038 100644 --- a/core/service/src/chain_ops.rs +++ b/core/service/src/chain_ops.rs @@ -16,17 +16,7 @@ //! Chain utilities. -use std::{self, io::{Read, Write, Seek}}; -use futures::prelude::*; -use log::{info, warn}; - -use sr_primitives::generic::BlockId; -use sr_primitives::traits::{SaturatedConversion, Zero, One, Header, NumberFor}; -use consensus_common::import_queue::ImportQueue; - -use crate::components::{self, Components, ServiceFactory, FactoryFullConfiguration, FactoryBlockNumber, RuntimeGenesis}; -use crate::new_client; -use codec::{Decode, Encode, IoReader}; +use crate::components::RuntimeGenesis; use crate::error; use crate::chain_spec::ChainSpec; @@ -85,24 +75,6 @@ macro_rules! export_blocks { }} } -/// Export a range of blocks to a binary stream. -pub fn export_blocks( - config: FactoryFullConfiguration, - exit: E, - mut output: W, - from: FactoryBlockNumber, - to: Option>, - json: bool -) -> error::Result<()> - where - F: ServiceFactory, - E: Future + Send + 'static, - W: Write, -{ - let client = new_client::(&config)?; - export_blocks!(client, exit, output, from, to, json) -} - #[macro_export] macro_rules! import_blocks { ($block:ty, $client:ident, $queue:ident, $exit:ident, $input:ident) => {{ @@ -231,27 +203,6 @@ impl Link for WaitLink { }} } -/// Returns a future that import blocks from a binary stream. -pub fn import_blocks( - mut config: FactoryFullConfiguration, - exit: E, - input: R -) -> error::Result> - where F: ServiceFactory, E: Future + Send + 'static, R: Read + Seek, -{ - let client = new_client::(&config)?; - // FIXME #1134 this shouldn't need a mutable config. - let select_chain = components::FullComponents::::build_select_chain(&mut config, client.clone())?; - let (mut queue, _) = components::FullComponents::::build_import_queue( - &mut config, - client.clone(), - select_chain, - None - )?; - - import_blocks!(F::Block, client, queue, exit, input) -} - #[macro_export] macro_rules! revert_chain { ($client:ident, $blocks:ident) => {{ @@ -267,17 +218,6 @@ macro_rules! revert_chain { }} } -/// Revert the chain. -pub fn revert_chain( - config: FactoryFullConfiguration, - blocks: FactoryBlockNumber -) -> error::Result<()> - where F: ServiceFactory, -{ - let client = new_client::(&config)?; - revert_chain!(client, blocks) -} - /// Build a chain spec json pub fn build_spec(spec: ChainSpec, raw: bool) -> error::Result where G: RuntimeGenesis, diff --git a/core/service/src/components.rs b/core/service/src/components.rs index 0b25ed5989e86..cef2677ef3c88 100644 --- a/core/service/src/components.rs +++ b/core/service/src/components.rs @@ -16,135 +16,32 @@ //! Substrate service components. -use std::{sync::Arc, ops::Deref, ops::DerefMut}; +use std::sync::Arc; use serde::{Serialize, de::DeserializeOwned}; -use crate::chain_spec::ChainSpec; use keystore::KeyStorePtr; -use client_db; -use client::{self, Client, runtime_api}; -use crate::{error, Service}; -use consensus_common::{import_queue::ImportQueue, SelectChain}; -use network::{ - self, OnDemand, FinalityProofProvider, NetworkStateInfo, config::BoxFinalityProofRequestBuilder -}; -use substrate_executor::{NativeExecutor, NativeExecutionDispatch}; -use transaction_pool::txpool::{self, Options as TransactionPoolOptions, Pool as TransactionPool}; +use client::{Client, runtime_api}; +use crate::error; +use network::NetworkStateInfo; +use transaction_pool::txpool::{self, Pool as TransactionPool}; use sr_primitives::{ BuildStorage, traits::{Block as BlockT, Header as HeaderT, NumberFor, ProvideRuntimeApi}, generic::BlockId }; +<<<<<<< HEAD use crate::config::Configuration; use primitives::{Blake2Hasher, H256, traits::BareCryptoStorePtr}; use rpc::{self, system::SystemInfo}; +======= +use primitives::Blake2Hasher; +use rpc::{self, apis::system::SystemInfo}; +>>>>>>> Remove the old API use futures::{prelude::*, future::Executor}; use futures03::{FutureExt as _, channel::mpsc, compat::Compat}; -// Type aliases. -// These exist mainly to avoid typing `::Foo` all over the code. - -/// Network service type for `Components`. -pub type NetworkService = network::NetworkService< - ComponentBlock, - <::Factory as ServiceFactory>::NetworkProtocol, - ComponentExHash ->; - -/// Code executor type for a factory. -pub type CodeExecutor = NativeExecutor<::RuntimeDispatch>; - -/// Full client backend type for a factory. -pub type FullBackend = client_db::Backend<::Block>; - -/// Full client executor type for a factory. -pub type FullExecutor = client::LocalCallExecutor< - client_db::Backend<::Block>, - CodeExecutor, ->; - -/// Light client backend type for a factory. -pub type LightBackend = client::light::backend::Backend< - client_db::light::LightStorage<::Block>, - network::OnDemand<::Block>, - Blake2Hasher, ->; - -/// Light client executor type for a factory. -pub type LightExecutor = client::light::call_executor::RemoteOrLocalCallExecutor< - ::Block, - client::light::backend::Backend< - client_db::light::LightStorage<::Block>, - network::OnDemand<::Block>, - Blake2Hasher - >, - client::light::call_executor::RemoteCallExecutor< - client::light::blockchain::Blockchain< - client_db::light::LightStorage<::Block>, - network::OnDemand<::Block> - >, - network::OnDemand<::Block>, - >, - client::LocalCallExecutor< - client::light::backend::Backend< - client_db::light::LightStorage<::Block>, - network::OnDemand<::Block>, - Blake2Hasher - >, - CodeExecutor - > ->; - -/// Full client type for a factory. -pub type FullClient = Client, FullExecutor, ::Block, ::RuntimeApi>; - -/// Light client type for a factory. -pub type LightClient = Client, LightExecutor, ::Block, ::RuntimeApi>; - -/// `ChainSpec` specialization for a factory. -pub type FactoryChainSpec = ChainSpec<::Genesis>; - -/// `Genesis` specialization for a factory. -pub type FactoryGenesis = ::Genesis; - -/// `Block` type for a factory. -pub type FactoryBlock = ::Block; - -/// `Extrinsic` type for a factory. -pub type FactoryExtrinsic = <::Block as BlockT>::Extrinsic; - -/// `Number` type for a factory. -pub type FactoryBlockNumber = < as BlockT>::Header as HeaderT>::Number; - -/// Full `Configuration` type for a factory. -pub type FactoryFullConfiguration = Configuration<::Configuration, FactoryGenesis>; - -/// Client type for `Components`. -pub type ComponentClient = Client< - ::Backend, - ::Executor, - FactoryBlock<::Factory>, - ::RuntimeApi, ->; - -/// A offchain workers storage backend type. -pub type ComponentOffchainStorage = < - ::Backend as client::backend::Backend, Blake2Hasher> ->::OffchainStorage; - -/// Block type for `Components` -pub type ComponentBlock = <::Factory as ServiceFactory>::Block; - -/// Extrinsic hash type for `Components` -pub type ComponentExHash = <::TransactionPoolApi as txpool::ChainApi>::Hash; - -/// Extrinsic type. -pub type ComponentExtrinsic = as BlockT>::Extrinsic; - -/// Extrinsic pool API type for `Components`. -pub type PoolApi = ::TransactionPoolApi; - /// A set of traits for the runtime genesis config. pub trait RuntimeGenesis: Serialize + DeserializeOwned + BuildStorage {} impl RuntimeGenesis for T {} +<<<<<<< HEAD /// A transport-agnostic handler of the RPC queries. pub type RpcHandler = rpc_servers::RpcHandler; @@ -201,6 +98,8 @@ impl StartRpc for C where } } +======= +>>>>>>> Remove the old API pub(crate) fn start_rpc( client: Arc>, system_send_back: mpsc::UnboundedSender>, @@ -239,15 +138,6 @@ where )) } -/// Something that can maintain transaction pool on every imported block. -pub trait MaintainTransactionPool { - fn maintain_transaction_pool( - id: &BlockId>, - client: &ComponentClient, - transaction_pool: &TransactionPool, - ) -> error::Result<()>; -} - pub(crate) fn maintain_transaction_pool( id: &BlockId, client: &Client, @@ -274,52 +164,6 @@ pub(crate) fn maintain_transaction_pool( Ok(()) } -impl MaintainTransactionPool for C where - ComponentClient: ProvideRuntimeApi, - as ProvideRuntimeApi>::Api: runtime_api::TaggedTransactionQueue>, -{ - fn maintain_transaction_pool( - id: &BlockId>, - client: &ComponentClient, - transaction_pool: &TransactionPool, - ) -> error::Result<()> { - maintain_transaction_pool(id, client, transaction_pool) - } -} - -pub trait OffchainWorker { - fn offchain_workers( - number: &FactoryBlockNumber, - offchain: &offchain::OffchainWorkers< - ComponentClient, - ComponentOffchainStorage, - ComponentBlock - >, - pool: &Arc>, - network_state: &Arc, - is_validator: bool, - ) -> error::Result + Send>>; -} - -impl OffchainWorker for C where - ComponentClient: ProvideRuntimeApi, - as ProvideRuntimeApi>::Api: offchain::OffchainWorkerApi>, -{ - fn offchain_workers( - number: &FactoryBlockNumber, - offchain: &offchain::OffchainWorkers< - ComponentClient, - ComponentOffchainStorage, - ComponentBlock - >, - pool: &Arc>, - network_state: &Arc, - is_validator: bool, - ) -> error::Result + Send>> { - offchain_workers(number, offchain, pool, network_state, is_validator) - } -} - pub(crate) fn offchain_workers( number: &NumberFor, offchain: &offchain::OffchainWorkers< @@ -346,6 +190,7 @@ where Ok(Box::new(Compat::new(future))) } +<<<<<<< HEAD /// The super trait that combines all required traits a `Service` needs to implement. pub trait ServiceTrait: Deref> @@ -808,10 +653,15 @@ impl Components for LightComponents { } } +======= +/// Alias for a an implementation of `futures::future::Executor`. +pub type TaskExecutor = Arc + Send>> + Send + Sync>; + +>>>>>>> Remove the old API #[cfg(test)] mod tests { use super::*; - use consensus_common::BlockOrigin; + use consensus_common::{BlockOrigin, SelectChain}; use substrate_test_runtime_client::{prelude::*, runtime::Transfer}; #[test] diff --git a/core/service/src/lib.rs b/core/service/src/lib.rs index 97ffb93574aa1..f62eb90c93821 100644 --- a/core/service/src/lib.rs +++ b/core/service/src/lib.rs @@ -36,20 +36,16 @@ use std::time::{Duration, Instant}; use futures::sync::mpsc; use parking_lot::Mutex; -use client::{BlockchainEvents, backend::Backend, runtime_api::BlockT, Client}; +use client::{runtime_api::BlockT, Client}; use exit_future::Signal; use futures::prelude::*; use futures03::stream::{StreamExt as _, TryStreamExt as _}; -use keystore::Store as Keystore; -use network::{NetworkService, NetworkState, NetworkStateInfo, specialization::NetworkSpecialization}; -use log::{log, info, warn, debug, error, Level}; +use network::{NetworkService, NetworkState, specialization::NetworkSpecialization}; +use log::{log, warn, debug, error, Level}; use codec::{Encode, Decode}; use primitives::{Blake2Hasher, H256}; use sr_primitives::generic::BlockId; -use sr_primitives::traits::{Header, NumberFor, SaturatedConversion}; -use substrate_executor::NativeExecutor; -use sysinfo::{get_current_pid, ProcessExt, System, SystemExt}; -use tel::{telemetry, SUBSTRATE_INFO}; +use sr_primitives::traits::NumberFor; pub use self::error::Error; pub use self::factory::{ServiceBuilder, ServiceBuilderExport, ServiceBuilderImport, ServiceBuilderRevert}; @@ -59,6 +55,7 @@ pub use transaction_pool::txpool::{ self, Pool as TransactionPool, Options as TransactionPoolOptions, ChainApi, IntoPoolError }; pub use client::FinalityNotifications; +<<<<<<< HEAD pub use rpc::Metadata as RpcMetadata; pub use components::{ @@ -70,6 +67,10 @@ pub use components::{ ComponentExHash, ComponentExtrinsic, FactoryExtrinsic, InitialSessionKeys, }; use components::{StartRpc, MaintainTransactionPool, OffchainWorker}; +======= +pub use components::RuntimeGenesis; + +>>>>>>> Remove the old API #[doc(hidden)] pub use std::{ops::Deref, result::Result, sync::Arc}; #[doc(hidden)] @@ -79,24 +80,6 @@ pub use futures::future::Executor; const DEFAULT_PROTOCOL_ID: &str = "sup"; -/// Substrate service. -pub struct Service { - inner: NewService< - FactoryFullConfiguration, - ComponentBlock, - ComponentClient, - Components::SelectChain, - NetworkStatus>, - ComponentNetworkService, - TransactionPool, - offchain::OffchainWorkers< - ComponentClient, - ComponentOffchainStorage, - ComponentBlock - >, - >, -} - /// Substrate service. pub struct NewService { client: Arc, @@ -134,19 +117,6 @@ pub struct NewService { marker: PhantomData, } -/// Creates bare client without any networking. -pub fn new_client( - config: &FactoryFullConfiguration, -) -> Result>>, error::Error> { - let executor = NativeExecutor::new(config.default_heap_pages); - - components::FullComponents::::build_client( - config, - executor, - None, - ).map(|r| r.0) -} - /// An handle for spawning tasks in the service. #[derive(Clone)] pub struct SpawnTaskHandle { @@ -485,6 +455,7 @@ macro_rules! new_impl { mod factory; +<<<<<<< HEAD impl Service { /// Creates a new service. pub fn new( @@ -544,6 +515,8 @@ impl Service { } } +======= +>>>>>>> Remove the old API /// Abstraction over a Substrate service. pub trait AbstractService: 'static + Future + Executor + Send>> + Send { @@ -620,35 +593,6 @@ pub trait AbstractService: 'static + Future + fn on_exit(&self) -> ::exit_future::Exit; } -impl Deref for Service -where FactoryFullConfiguration: Send { - type Target = NewService< - FactoryFullConfiguration, - ComponentBlock, - ComponentClient, - Components::SelectChain, - NetworkStatus>, - ComponentNetworkService, - TransactionPool, - offchain::OffchainWorkers< - ComponentClient, - ComponentOffchainStorage, - ComponentBlock - >, - >; - - fn deref(&self) -> &Self::Target { - &self.inner - } -} - -impl DerefMut for Service -where FactoryFullConfiguration: Send { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.inner - } -} - impl AbstractService for NewService, TSc, NetworkStatus, NetworkService, TransactionPool, TOc> where TCfg: 'static + Send, @@ -775,15 +719,6 @@ NewService { } } -impl Future for Service where Components: components::Components { - type Item = (); - type Error = Error; - - fn poll(&mut self) -> Poll { - self.inner.poll() - } -} - impl Executor + Send>> for NewService { fn execute( @@ -799,17 +734,6 @@ NewService { } } -impl Executor + Send>> - for Service where Components: components::Components -{ - fn execute( - &self, - future: Box + Send> - ) -> Result<(), futures::future::ExecuteError + Send>>> { - self.inner.execute(future) - } -} - impl AbstractService for T where T: 'static + Deref + DerefMut + Future + Send + Executor + Send>>, @@ -1172,6 +1096,7 @@ where } } +<<<<<<< HEAD /// Constructs a service factory with the given name that implements the `ServiceFactory` trait. /// The required parameters are required to be given in the exact order. Some parameters are followed /// by `{}` blocks. These blocks are required and used to initialize the given parameter. @@ -1391,6 +1316,8 @@ macro_rules! construct_service_factory { } } +======= +>>>>>>> Remove the old API #[cfg(test)] mod tests { use super::*; diff --git a/node-template/src/service.rs b/node-template/src/service.rs index 039ed228d65fa..43651e294ee3b 100644 --- a/node-template/src/service.rs +++ b/node-template/src/service.rs @@ -1,3 +1,8 @@ +<<<<<<< HEAD +======= +//! Service implementation. Specialized wrapper over Substrate service. + +>>>>>>> Remove the old API #![warn(unused_extern_crates)] //! Service and ServiceFactory implementation. Specialized wrapper over substrate service. diff --git a/node/cli/src/service.rs b/node/cli/src/service.rs index 5fe44f86f8763..ea0897e7f65a3 100644 --- a/node/cli/src/service.rs +++ b/node/cli/src/service.rs @@ -16,7 +16,7 @@ #![warn(unused_extern_crates)] -//! Service and ServiceFactory implementation. Specialized wrapper over substrate service. +//! Service implementation. Specialized wrapper over substrate service. use std::sync::Arc; use std::time::Duration; From 98252b40441e81123d805f7650cb54b68b2f5002 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Tue, 13 Aug 2019 14:41:25 +0200 Subject: [PATCH 20/32] Remove the components module --- core/service/src/chain_ops.rs | 2 +- core/service/src/chain_spec.rs | 2 +- core/service/src/components.rs | 702 --------------------------------- core/service/src/factory.rs | 152 ++++++- core/service/src/lib.rs | 311 +-------------- node-template/src/service.rs | 248 +----------- 6 files changed, 158 insertions(+), 1259 deletions(-) delete mode 100644 core/service/src/components.rs diff --git a/core/service/src/chain_ops.rs b/core/service/src/chain_ops.rs index bd649bc8e5038..312fa90f8e849 100644 --- a/core/service/src/chain_ops.rs +++ b/core/service/src/chain_ops.rs @@ -16,7 +16,7 @@ //! Chain utilities. -use crate::components::RuntimeGenesis; +use crate::RuntimeGenesis; use crate::error; use crate::chain_spec::ChainSpec; diff --git a/core/service/src/chain_spec.rs b/core/service/src/chain_spec.rs index 1683876c3f86f..8b35b0bac9581 100644 --- a/core/service/src/chain_spec.rs +++ b/core/service/src/chain_spec.rs @@ -24,7 +24,7 @@ use serde::{Serialize, Deserialize}; use primitives::storage::{StorageKey, StorageData}; use sr_primitives::{BuildStorage, StorageOverlay, ChildrenStorageOverlay}; use serde_json as json; -use crate::components::RuntimeGenesis; +use crate::RuntimeGenesis; use network::Multiaddr; use tel::TelemetryEndpoints; diff --git a/core/service/src/components.rs b/core/service/src/components.rs deleted file mode 100644 index cef2677ef3c88..0000000000000 --- a/core/service/src/components.rs +++ /dev/null @@ -1,702 +0,0 @@ -// Copyright 2017-2019 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -//! Substrate service components. - -use std::sync::Arc; -use serde::{Serialize, de::DeserializeOwned}; -use keystore::KeyStorePtr; -use client::{Client, runtime_api}; -use crate::error; -use network::NetworkStateInfo; -use transaction_pool::txpool::{self, Pool as TransactionPool}; -use sr_primitives::{ - BuildStorage, traits::{Block as BlockT, Header as HeaderT, NumberFor, ProvideRuntimeApi}, generic::BlockId -}; -<<<<<<< HEAD -use crate::config::Configuration; -use primitives::{Blake2Hasher, H256, traits::BareCryptoStorePtr}; -use rpc::{self, system::SystemInfo}; -======= -use primitives::Blake2Hasher; -use rpc::{self, apis::system::SystemInfo}; ->>>>>>> Remove the old API -use futures::{prelude::*, future::Executor}; -use futures03::{FutureExt as _, channel::mpsc, compat::Compat}; - -/// A set of traits for the runtime genesis config. -pub trait RuntimeGenesis: Serialize + DeserializeOwned + BuildStorage {} -impl RuntimeGenesis for T {} - -<<<<<<< HEAD -/// A transport-agnostic handler of the RPC queries. -pub type RpcHandler = rpc_servers::RpcHandler; - -/// Something that can create and store initial session keys from given seeds. -pub trait InitialSessionKeys { - /// Generate the initial session keys for the given seeds and store them in - /// an internal keystore. - fn generate_initial_session_keys( - client: Arc>, - seeds: Vec, - ) -> error::Result<()>; -} - -impl InitialSessionKeys for C where - ComponentClient: ProvideRuntimeApi, - as ProvideRuntimeApi>::Api: session::SessionKeys>, -{ - fn generate_initial_session_keys( - client: Arc>, - seeds: Vec, - ) -> error::Result<()> { - session::generate_initial_session_keys(client, seeds).map_err(Into::into) - } -} - -/// Something that can start the RPC service. -pub trait StartRpc { - fn start_rpc( - client: Arc>, - system_send_back: mpsc::UnboundedSender>>, - system_info: SystemInfo, - task_executor: TaskExecutor, - transaction_pool: Arc>, - rpc_extensions: impl rpc::RpcExtension, - keystore: KeyStorePtr, - ) -> RpcHandler; -} - -impl StartRpc for C where - ComponentClient: ProvideRuntimeApi, - as ProvideRuntimeApi>::Api: - runtime_api::Metadata> + session::SessionKeys>, -{ - fn start_rpc( - client: Arc>, - system_send_back: mpsc::UnboundedSender>>, - rpc_system_info: SystemInfo, - task_executor: TaskExecutor, - transaction_pool: Arc>, - rpc_extensions: impl rpc::RpcExtension, - keystore: KeyStorePtr, - ) -> RpcHandler { - start_rpc(client, system_send_back, rpc_system_info, task_executor, transaction_pool, rpc_extensions, keystore) - } -} - -======= ->>>>>>> Remove the old API -pub(crate) fn start_rpc( - client: Arc>, - system_send_back: mpsc::UnboundedSender>, - rpc_system_info: SystemInfo, - task_executor: TaskExecutor, - transaction_pool: Arc>, - rpc_extensions: impl rpc::RpcExtension, - keystore: KeyStorePtr, -) -> RpcHandler -where - Block: BlockT::Out>, - Backend: client::backend::Backend + 'static, - Client: ProvideRuntimeApi, - as ProvideRuntimeApi>::Api: runtime_api::Metadata + session::SessionKeys, - Api: Send + Sync + 'static, - Executor: client::CallExecutor + Send + Sync + Clone + 'static, - PoolApi: txpool::ChainApi + 'static { - use rpc::{chain, state, author, system}; - let subscriptions = rpc::Subscriptions::new(task_executor.clone()); - let chain = chain::Chain::new(client.clone(), subscriptions.clone()); - let state = state::State::new(client.clone(), subscriptions.clone()); - let author = rpc::author::Author::new( - client, - transaction_pool, - subscriptions, - keystore, - ); - let system = system::System::new(rpc_system_info, system_send_back); - - rpc_servers::rpc_handler(( - state::StateApi::to_delegate(state), - chain::ChainApi::to_delegate(chain), - author::AuthorApi::to_delegate(author), - system::SystemApi::to_delegate(system), - rpc_extensions, - )) -} - -pub(crate) fn maintain_transaction_pool( - id: &BlockId, - client: &Client, - transaction_pool: &TransactionPool, -) -> error::Result<()> where - Block: BlockT::Out>, - Backend: client::backend::Backend, - Client: ProvideRuntimeApi, - as ProvideRuntimeApi>::Api: runtime_api::TaggedTransactionQueue, - Executor: client::CallExecutor, - PoolApi: txpool::ChainApi, -{ - // Avoid calling into runtime if there is nothing to prune from the pool anyway. - if transaction_pool.status().is_empty() { - return Ok(()) - } - - if let Some(block) = client.block(id)? { - let parent_id = BlockId::hash(*block.block.header().parent_hash()); - let extrinsics = block.block.extrinsics(); - transaction_pool.prune(id, &parent_id, extrinsics).map_err(|e| format!("{:?}", e))?; - } - - Ok(()) -} - -pub(crate) fn offchain_workers( - number: &NumberFor, - offchain: &offchain::OffchainWorkers< - Client, - >::OffchainStorage, - Block - >, - pool: &Arc>, - network_state: &Arc, - is_validator: bool, -) -> error::Result + Send>> -where - Block: BlockT::Out>, - Backend: client::backend::Backend + 'static, - Api: 'static, - >::OffchainStorage: 'static, - Client: ProvideRuntimeApi + Send + Sync, - as ProvideRuntimeApi>::Api: offchain::OffchainWorkerApi, - Executor: client::CallExecutor + 'static, - PoolApi: txpool::ChainApi + 'static, -{ - let future = offchain.on_block_imported(number, pool, network_state.clone(), is_validator) - .map(|()| Ok(())); - Ok(Box::new(Compat::new(future))) -} - -<<<<<<< HEAD -/// The super trait that combines all required traits a `Service` needs to implement. -pub trait ServiceTrait: - Deref> - + Send - + 'static - + StartRpc - + MaintainTransactionPool - + OffchainWorker - + InitialSessionKeys -{} -impl ServiceTrait for T where - T: Deref> - + Send - + 'static - + StartRpc - + MaintainTransactionPool - + OffchainWorker - + InitialSessionKeys -{} - -/// Alias for a an implementation of `futures::future::Executor`. -pub type TaskExecutor = Arc + Send>> + Send + Sync>; - -/// A collection of types and methods to build a service on top of the substrate service. -pub trait ServiceFactory: 'static + Sized { - /// Block type. - type Block: BlockT; - /// The type that implements the runtime API. - type RuntimeApi: Send + Sync; - /// Network protocol extensions. - type NetworkProtocol: network::specialization::NetworkSpecialization; - /// Chain runtime. - type RuntimeDispatch: NativeExecutionDispatch + Send + Sync + 'static; - /// Extrinsic pool backend type for the full client. - type FullTransactionPoolApi: txpool::ChainApi::Hash, Block = Self::Block> + Send + 'static; - /// Extrinsic pool backend type for the light client. - type LightTransactionPoolApi: txpool::ChainApi::Hash, Block = Self::Block> + 'static; - /// Genesis configuration for the runtime. - type Genesis: RuntimeGenesis; - /// Other configuration for service members. - type Configuration: Default; - /// RPC initialisation. - type RpcExtensions: rpc::RpcExtension + Clone; - /// Extended full service type. - type FullService: ServiceTrait>; - /// Extended light service type. - type LightService: ServiceTrait>; - /// ImportQueue for full client - type FullImportQueue: ImportQueue + 'static; - /// ImportQueue for light clients - type LightImportQueue: ImportQueue + 'static; - /// The Fork Choice Strategy for the chain - type SelectChain: SelectChain + 'static; - - //TODO: replace these with a constructor trait. that TransactionPool implements. (#1242) - /// Extrinsic pool constructor for the full client. - fn build_full_transaction_pool(config: TransactionPoolOptions, client: Arc>) - -> Result, error::Error>; - /// Extrinsic pool constructor for the light client. - fn build_light_transaction_pool(config: TransactionPoolOptions, client: Arc>) - -> Result, error::Error>; - - /// Build network protocol. - fn build_network_protocol(config: &FactoryFullConfiguration) - -> Result; - - /// Build finality proof provider for serving network requests on full node. - fn build_finality_proof_provider( - client: Arc> - ) -> Result>>, error::Error>; - - /// Build the Fork Choice algorithm for full client - fn build_select_chain( - config: &mut FactoryFullConfiguration, - client: Arc>, - ) -> Result; - - /// Build full service. - fn new_full(config: FactoryFullConfiguration) - -> Result; - /// Build light service. - fn new_light(config: FactoryFullConfiguration) - -> Result; - - /// ImportQueue for a full client - fn build_full_import_queue( - config: &mut FactoryFullConfiguration, - _client: Arc>, - _select_chain: Self::SelectChain, - _transaction_pool: Option>>, - ) -> Result { - if let Some(name) = config.chain_spec.consensus_engine() { - match name { - _ => Err(format!("Chain Specification defines unknown consensus engine '{}'", name).into()) - } - - } else { - Err("Chain Specification doesn't contain any consensus_engine name".into()) - } - } - - /// ImportQueue for a light client - fn build_light_import_queue( - config: &mut FactoryFullConfiguration, - _client: Arc> - ) -> Result<(Self::LightImportQueue, BoxFinalityProofRequestBuilder), error::Error> { - if let Some(name) = config.chain_spec.consensus_engine() { - match name { - _ => Err(format!("Chain Specification defines unknown consensus engine '{}'", name).into()) - } - - } else { - Err("Chain Specification doesn't contain any consensus_engine name".into()) - } - } - - /// Create custom RPC method handlers for full node. - fn build_full_rpc_extensions( - client: Arc>, - transaction_pool: Arc>, - ) -> Self::RpcExtensions; - - /// Create custom RPC method handlers for light node. - fn build_light_rpc_extensions( - client: Arc>, - transaction_pool: Arc>, - ) -> Self::RpcExtensions; -} - -/// A collection of types and function to generalize over full / light client type. -pub trait Components: Sized + 'static { - /// Associated service factory. - type Factory: ServiceFactory; - /// Client backend. - type Backend: 'static + client::backend::Backend, Blake2Hasher>; - /// Client executor. - type Executor: 'static + client::CallExecutor, Blake2Hasher> + Send + Sync + Clone; - /// The type that implements the runtime API. - type RuntimeApi: Send + Sync; - /// The type that can start all runtime-dependent services. - type RuntimeServices: ServiceTrait; - /// The type that can extend the RPC methods. - type RpcExtensions: rpc::RpcExtension + Clone; - // TODO: Traitify transaction pool and allow people to implement their own. (#1242) - /// Extrinsic pool type. - type TransactionPoolApi: 'static + txpool::ChainApi< - Hash = as BlockT>::Hash, - Block = FactoryBlock - >; - /// Our Import Queue - type ImportQueue: ImportQueue> + 'static; - /// The Fork Choice Strategy for the chain - type SelectChain: SelectChain>; - - /// Create client. - fn build_client( - config: &FactoryFullConfiguration, - executor: CodeExecutor, - keystore: Option, - ) -> Result< - ( - Arc>, - Option>>> - ), - error::Error - >; - - /// Create extrinsic pool. - fn build_transaction_pool(config: TransactionPoolOptions, client: Arc>) - -> Result, error::Error>; - - /// Build the queue that imports blocks from the network, and optionally a way for the network - /// to build requests for proofs of finality. - fn build_import_queue( - config: &mut FactoryFullConfiguration, - client: Arc>, - select_chain: Option, - _transaction_pool: Option>>, - ) -> Result<(Self::ImportQueue, Option>>), error::Error>; - - /// Finality proof provider for serving network requests. - fn build_finality_proof_provider( - client: Arc> - ) -> Result::Block>>>, error::Error>; - - /// Build fork choice selector - fn build_select_chain( - config: &mut FactoryFullConfiguration, - client: Arc> - ) -> Result, error::Error>; - - /// Build RPC extensions - fn build_rpc_extensions( - client: Arc>, - transaction_pool: Arc>, - ) -> Self::RpcExtensions; -} - -/// A struct that implement `Components` for the full client. -pub struct FullComponents { - service: Service>, -} - -impl FullComponents { - /// Create new `FullComponents` - pub fn new( - config: FactoryFullConfiguration - ) -> Result { - Ok( - Self { - service: Service::new(config)?, - } - ) - } -} - -impl Deref for FullComponents { - type Target = Service; - - fn deref(&self) -> &Self::Target { - &self.service - } -} - -impl DerefMut for FullComponents { - fn deref_mut(&mut self) -> &mut Service { - &mut self.service - } -} - -impl Future for FullComponents { - type Item = (); - type Error = super::Error; - - fn poll(&mut self) -> Poll { - self.service.poll() - } -} - -impl Executor + Send>> -for FullComponents { - fn execute( - &self, - future: Box + Send> - ) -> Result<(), futures::future::ExecuteError + Send>>> { - self.service.execute(future) - } -} - -impl Components for FullComponents { - type Factory = Factory; - type Executor = FullExecutor; - type Backend = FullBackend; - type TransactionPoolApi = ::FullTransactionPoolApi; - type ImportQueue = Factory::FullImportQueue; - type RuntimeApi = Factory::RuntimeApi; - type RuntimeServices = Factory::FullService; - type RpcExtensions = Factory::RpcExtensions; - type SelectChain = Factory::SelectChain; - - fn build_client( - config: &FactoryFullConfiguration, - executor: CodeExecutor, - keystore: Option, - ) -> Result< - (Arc>, Option>>>), - error::Error, - > - { - let db_settings = client_db::DatabaseSettings { - cache_size: config.database_cache_size.map(|u| u as usize), - state_cache_size: config.state_cache_size, - state_cache_child_ratio: - config.state_cache_child_ratio.map(|v| (v, 100)), - path: config.database_path.clone(), - pruning: config.pruning.clone(), - }; - - Ok(( - Arc::new( - client_db::new_client( - db_settings, - executor, - &config.chain_spec, - config.execution_strategies.clone(), - keystore, - )? - ), - None, - )) - } - - fn build_transaction_pool( - config: TransactionPoolOptions, - client: Arc> - ) -> Result, error::Error> { - Factory::build_full_transaction_pool(config, client) - } - - fn build_import_queue( - config: &mut FactoryFullConfiguration, - client: Arc>, - select_chain: Option, - transaction_pool: Option>>, - ) -> Result<(Self::ImportQueue, Option>>), error::Error> { - let select_chain = select_chain - .ok_or(error::Error::SelectChainRequired)?; - Factory::build_full_import_queue(config, client, select_chain, transaction_pool) - .map(|queue| (queue, None)) - } - - fn build_select_chain( - config: &mut FactoryFullConfiguration, - client: Arc> - ) -> Result, error::Error> { - Self::Factory::build_select_chain(config, client).map(Some) - } - - fn build_finality_proof_provider( - client: Arc> - ) -> Result::Block>>>, error::Error> { - Factory::build_finality_proof_provider(client) - } - - fn build_rpc_extensions( - client: Arc>, - transaction_pool: Arc>, - ) -> Self::RpcExtensions { - Factory::build_full_rpc_extensions(client, transaction_pool) - } -} - -/// A struct that implement `Components` for the light client. -pub struct LightComponents { - service: Service>, -} - -impl LightComponents { - /// Create new `LightComponents` - pub fn new( - config: FactoryFullConfiguration, - ) -> Result { - Ok( - Self { - service: Service::new(config)?, - } - ) - } -} - -impl Deref for LightComponents { - type Target = Service; - - fn deref(&self) -> &Self::Target { - &self.service - } -} - -impl DerefMut for LightComponents { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.service - } -} - -impl Future for LightComponents { - type Item = (); - type Error = super::Error; - - fn poll(&mut self) -> Poll { - self.service.poll() - } -} - -impl Executor + Send>> -for LightComponents { - fn execute( - &self, - future: Box + Send> - ) -> Result<(), futures::future::ExecuteError + Send>>> { - self.service.execute(future) - } -} - -impl Components for LightComponents { - type Factory = Factory; - type Executor = LightExecutor; - type Backend = LightBackend; - type TransactionPoolApi = ::LightTransactionPoolApi; - type ImportQueue = ::LightImportQueue; - type RuntimeApi = Factory::RuntimeApi; - type RuntimeServices = Factory::LightService; - type RpcExtensions = Factory::RpcExtensions; - type SelectChain = Factory::SelectChain; - - fn build_client( - config: &FactoryFullConfiguration, - executor: CodeExecutor, - _: Option, - ) - -> Result< - ( - Arc>, - Option>>> - ), error::Error> - { - let db_settings = client_db::DatabaseSettings { - cache_size: None, - state_cache_size: config.state_cache_size, - state_cache_child_ratio: - config.state_cache_child_ratio.map(|v| (v, 100)), - path: config.database_path.clone(), - pruning: config.pruning.clone(), - }; - - let db_storage = client_db::light::LightStorage::new(db_settings)?; - let light_blockchain = client::light::new_light_blockchain(db_storage); - let fetch_checker = Arc::new( - client::light::new_fetch_checker(light_blockchain.clone(), executor.clone()) - ); - let fetcher = Arc::new(network::OnDemand::new(fetch_checker)); - let client_backend = client::light::new_light_backend(light_blockchain, fetcher.clone()); - let client = client::light::new_light(client_backend, fetcher.clone(), &config.chain_spec, executor)?; - Ok((Arc::new(client), Some(fetcher))) - } - - fn build_transaction_pool(config: TransactionPoolOptions, client: Arc>) - -> Result, error::Error> - { - Factory::build_light_transaction_pool(config, client) - } - - fn build_import_queue( - config: &mut FactoryFullConfiguration, - client: Arc>, - _select_chain: Option, - _transaction_pool: Option>>, - ) -> Result<(Self::ImportQueue, Option>>), error::Error> { - Factory::build_light_import_queue(config, client) - .map(|(queue, builder)| (queue, Some(builder))) - } - - fn build_finality_proof_provider( - _client: Arc> - ) -> Result::Block>>>, error::Error> { - Ok(None) - } - - fn build_select_chain( - _config: &mut FactoryFullConfiguration, - _client: Arc> - ) -> Result, error::Error> { - Ok(None) - } - - fn build_rpc_extensions( - client: Arc>, - transaction_pool: Arc>, - ) -> Self::RpcExtensions { - Factory::build_light_rpc_extensions(client, transaction_pool) - } -} - -======= -/// Alias for a an implementation of `futures::future::Executor`. -pub type TaskExecutor = Arc + Send>> + Send + Sync>; - ->>>>>>> Remove the old API -#[cfg(test)] -mod tests { - use super::*; - use consensus_common::{BlockOrigin, SelectChain}; - use substrate_test_runtime_client::{prelude::*, runtime::Transfer}; - - #[test] - fn should_remove_transactions_from_the_pool() { - let (client, longest_chain) = TestClientBuilder::new().build_with_longest_chain(); - let client = Arc::new(client); - let pool = TransactionPool::new(Default::default(), ::transaction_pool::ChainApi::new(client.clone())); - let transaction = Transfer { - amount: 5, - nonce: 0, - from: AccountKeyring::Alice.into(), - to: Default::default(), - }.into_signed_tx(); - let best = longest_chain.best_chain().unwrap(); - - // store the transaction in the pool - pool.submit_one(&BlockId::hash(best.hash()), transaction.clone()).unwrap(); - - // import the block - let mut builder = client.new_block(Default::default()).unwrap(); - builder.push(transaction.clone()).unwrap(); - let block = builder.bake().unwrap(); - let id = BlockId::hash(block.header().hash()); - client.import(BlockOrigin::Own, block).unwrap(); - - // fire notification - this should clean up the queue - assert_eq!(pool.status().ready, 1); - maintain_transaction_pool( - &id, - &client, - &pool, - ).unwrap(); - - // then - assert_eq!(pool.status().ready, 0); - assert_eq!(pool.status().future, 0); - } -} diff --git a/core/service/src/factory.rs b/core/service/src/factory.rs index 3d02ce0b9c481..29c4128dfbb75 100644 --- a/core/service/src/factory.rs +++ b/core/service/src/factory.rs @@ -14,21 +14,22 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use crate::{NewService, NetworkStatus, NetworkState, error::Error, DEFAULT_PROTOCOL_ID}; -use crate::{SpawnTaskHandle, start_rpc_servers, build_network_future, components::maintain_transaction_pool}; -use crate::{components, TransactionPoolAdapter}; -use crate::config::{Configuration, Roles}; +use crate::{NewService, NetworkStatus, NetworkState, error::{self, Error}, DEFAULT_PROTOCOL_ID}; +use crate::{SpawnTaskHandle, start_rpc_servers, build_network_future, TransactionPoolAdapter}; +use crate::TaskExecutor; +use crate::config::Configuration; use client::{BlockchainEvents, Client, runtime_api}; use codec::{Decode, Encode, IoReader}; use consensus_common::import_queue::ImportQueue; use futures::{prelude::*, sync::mpsc}; -use futures03::{StreamExt as _, TryStreamExt as _}; -use keystore::Store as Keystore; +use futures03::{FutureExt as _, compat::Compat, StreamExt as _, TryStreamExt as _}; +use keystore::{Store as Keystore, KeyStorePtr}; use log::{info, warn}; use network::{FinalityProofProvider, OnDemand, NetworkService, NetworkStateInfo}; use network::{config::BoxFinalityProofRequestBuilder, specialization::NetworkSpecialization}; use parking_lot::{Mutex, RwLock}; use primitives::{Blake2Hasher, H256, Hasher}; +use rpc::{self, system::SystemInfo}; use sr_primitives::{BuildStorage, generic::BlockId}; use sr_primitives::traits::{Block as BlockT, ProvideRuntimeApi, NumberFor, One, Zero, Header, SaturatedConversion}; use substrate_executor::{NativeExecutor, NativeExecutionDispatch}; @@ -36,7 +37,7 @@ use serde::{Serialize, de::DeserializeOwned}; use std::{io::{Read, Write, Seek}, marker::PhantomData, sync::Arc, sync::atomic::AtomicBool}; use sysinfo::{get_current_pid, ProcessExt, System, SystemExt}; use tel::{telemetry, SUBSTRATE_INFO}; -use transaction_pool::txpool::{ChainApi, Pool as TransactionPool}; +use transaction_pool::txpool::{self, ChainApi, Pool as TransactionPool}; /// Aggregator for the components required to build a service. /// @@ -641,8 +642,141 @@ ServiceBuilder< )) }, |h, c, tx| maintain_transaction_pool(h, c, tx), - |n, o, p, ns, v| components::offchain_workers(n, o, p, ns, v), - |c, ssb, si, te, tp, ext, ks| components::start_rpc(c, ssb, si, te, tp, ext, ks), + |n, o, p, ns, v| offchain_workers(n, o, p, ns, v), + |c, ssb, si, te, tp, ext, ks| start_rpc(c, ssb, si, te, tp, ext, ks), ) } } + +pub(crate) fn start_rpc( + client: Arc>, + system_send_back: futures03::channel::mpsc::UnboundedSender>, + rpc_system_info: SystemInfo, + task_executor: TaskExecutor, + transaction_pool: Arc>, + rpc_extensions: impl rpc::RpcExtension, + keystore: KeyStorePtr, +) -> rpc_servers::RpcHandler +where + Block: BlockT::Out>, + Backend: client::backend::Backend + 'static, + Client: ProvideRuntimeApi, + as ProvideRuntimeApi>::Api: runtime_api::Metadata + session::SessionKeys, + Api: Send + Sync + 'static, + Executor: client::CallExecutor + Send + Sync + Clone + 'static, + PoolApi: txpool::ChainApi + 'static { + use rpc::{chain, state, author, system}; + let subscriptions = rpc::Subscriptions::new(task_executor.clone()); + let chain = chain::Chain::new(client.clone(), subscriptions.clone()); + let state = state::State::new(client.clone(), subscriptions.clone()); + let author = rpc::author::Author::new( + client, + transaction_pool, + subscriptions, + keystore, + ); + let system = system::System::new(rpc_system_info, system_send_back); + + rpc_servers::rpc_handler(( + state::StateApi::to_delegate(state), + chain::ChainApi::to_delegate(chain), + author::AuthorApi::to_delegate(author), + system::SystemApi::to_delegate(system), + rpc_extensions, + )) +} + +pub(crate) fn maintain_transaction_pool( + id: &BlockId, + client: &Client, + transaction_pool: &TransactionPool, +) -> error::Result<()> where + Block: BlockT::Out>, + Backend: client::backend::Backend, + Client: ProvideRuntimeApi, + as ProvideRuntimeApi>::Api: runtime_api::TaggedTransactionQueue, + Executor: client::CallExecutor, + PoolApi: txpool::ChainApi, +{ + // Avoid calling into runtime if there is nothing to prune from the pool anyway. + if transaction_pool.status().is_empty() { + return Ok(()) + } + + if let Some(block) = client.block(id)? { + let parent_id = BlockId::hash(*block.block.header().parent_hash()); + let extrinsics = block.block.extrinsics(); + transaction_pool.prune(id, &parent_id, extrinsics).map_err(|e| format!("{:?}", e))?; + } + + Ok(()) +} + +pub(crate) fn offchain_workers( + number: &NumberFor, + offchain: &offchain::OffchainWorkers< + Client, + >::OffchainStorage, + Block + >, + pool: &Arc>, + network_state: &Arc, + is_validator: bool, +) -> error::Result + Send>> +where + Block: BlockT::Out>, + Backend: client::backend::Backend + 'static, + Api: 'static, + >::OffchainStorage: 'static, + Client: ProvideRuntimeApi + Send + Sync, + as ProvideRuntimeApi>::Api: offchain::OffchainWorkerApi, + Executor: client::CallExecutor + 'static, + PoolApi: txpool::ChainApi + 'static, +{ + let future = offchain.on_block_imported(number, pool, network_state.clone(), is_validator) + .map(|()| Ok(())); + Ok(Box::new(Compat::new(future))) +} + +#[cfg(test)] +mod tests { + use super::*; + use consensus_common::{BlockOrigin, SelectChain}; + use substrate_test_runtime_client::{prelude::*, runtime::Transfer}; + + #[test] + fn should_remove_transactions_from_the_pool() { + let (client, longest_chain) = TestClientBuilder::new().build_with_longest_chain(); + let client = Arc::new(client); + let pool = TransactionPool::new(Default::default(), ::transaction_pool::ChainApi::new(client.clone())); + let transaction = Transfer { + amount: 5, + nonce: 0, + from: AccountKeyring::Alice.into(), + to: Default::default(), + }.into_signed_tx(); + let best = longest_chain.best_chain().unwrap(); + + // store the transaction in the pool + pool.submit_one(&BlockId::hash(best.hash()), transaction.clone()).unwrap(); + + // import the block + let mut builder = client.new_block(Default::default()).unwrap(); + builder.push(transaction.clone()).unwrap(); + let block = builder.bake().unwrap(); + let id = BlockId::hash(block.header().hash()); + client.import(BlockOrigin::Own, block).unwrap(); + + // fire notification - this should clean up the queue + assert_eq!(pool.status().ready, 1); + maintain_transaction_pool( + &id, + &client, + &pool, + ).unwrap(); + + // then + assert_eq!(pool.status().ready, 0); + assert_eq!(pool.status().future, 0); + } +} diff --git a/core/service/src/lib.rs b/core/service/src/lib.rs index f62eb90c93821..9b04532aaf663 100644 --- a/core/service/src/lib.rs +++ b/core/service/src/lib.rs @@ -19,7 +19,6 @@ #![warn(missing_docs)] -mod components; mod chain_spec; pub mod config; #[macro_use] @@ -33,6 +32,7 @@ use std::collections::HashMap; use std::sync::atomic::{AtomicBool, Ordering}; use std::ops::DerefMut; use std::time::{Duration, Instant}; +use serde::{Serialize, de::DeserializeOwned}; use futures::sync::mpsc; use parking_lot::Mutex; @@ -44,6 +44,7 @@ use network::{NetworkService, NetworkState, specialization::NetworkSpecializatio use log::{log, warn, debug, error, Level}; use codec::{Encode, Decode}; use primitives::{Blake2Hasher, H256}; +use sr_primitives::BuildStorage; use sr_primitives::generic::BlockId; use sr_primitives::traits::NumberFor; @@ -55,22 +56,7 @@ pub use transaction_pool::txpool::{ self, Pool as TransactionPool, Options as TransactionPoolOptions, ChainApi, IntoPoolError }; pub use client::FinalityNotifications; -<<<<<<< HEAD pub use rpc::Metadata as RpcMetadata; - -pub use components::{ - ServiceFactory, FullBackend, FullExecutor, LightBackend, - LightExecutor, Components, PoolApi, ComponentClient, ComponentOffchainStorage, - ComponentBlock, FullClient, LightClient, FullComponents, LightComponents, - CodeExecutor, NetworkService as ComponentNetworkService, FactoryChainSpec, FactoryBlock, - FactoryFullConfiguration, RuntimeGenesis, FactoryGenesis, - ComponentExHash, ComponentExtrinsic, FactoryExtrinsic, InitialSessionKeys, -}; -use components::{StartRpc, MaintainTransactionPool, OffchainWorker}; -======= -pub use components::RuntimeGenesis; - ->>>>>>> Remove the old API #[doc(hidden)] pub use std::{ops::Deref, result::Result, sync::Arc}; #[doc(hidden)] @@ -117,6 +103,13 @@ pub struct NewService { marker: PhantomData, } +/// A set of traits for the runtime genesis config. +pub trait RuntimeGenesis: Serialize + DeserializeOwned + BuildStorage {} +impl RuntimeGenesis for T {} + +/// Alias for a an implementation of `futures::future::Executor`. +pub type TaskExecutor = Arc + Send>> + Send + Sync>; + /// An handle for spawning tasks in the service. #[derive(Clone)] pub struct SpawnTaskHandle { @@ -455,68 +448,6 @@ macro_rules! new_impl { mod factory; -<<<<<<< HEAD -impl Service { - /// Creates a new service. - pub fn new( - mut config: FactoryFullConfiguration, - ) -> Result { - let inner = new_impl!( - ComponentBlock, - config, - |mut config: &mut FactoryFullConfiguration| -> Result<_, error::Error> { - // Create client - let executor = NativeExecutor::new(config.default_heap_pages); - - let keystore = Keystore::open(config.keystore_path.clone(), config.keystore_password.clone())?; - - let (client, on_demand) = Components::build_client(&config, executor, Some(keystore.clone()))?; - let select_chain = Components::build_select_chain(&mut config, client.clone())?; - - let transaction_pool = Arc::new( - Components::build_transaction_pool(config.transaction_pool.clone(), client.clone())? - ); - - let (import_queue, finality_proof_request_builder) = Components::build_import_queue( - &mut config, - client.clone(), - select_chain.clone(), - Some(transaction_pool.clone()), - )?; - let finality_proof_provider = Components::build_finality_proof_provider(client.clone())?; - - Components::RuntimeServices::generate_initial_session_keys( - client.clone(), - config.dev_key_seed.clone().map(|s| vec![s]).unwrap_or_default(), - )?; - - let network_protocol = ::build_network_protocol(&config)?; - let rpc_extensions = Components::build_rpc_extensions(client.clone(), transaction_pool.clone()); - - Ok(( - client, - on_demand, - keystore, - select_chain, - import_queue, - finality_proof_request_builder, - finality_proof_provider, - network_protocol, - transaction_pool, - rpc_extensions - )) - }, - Components::RuntimeServices::maintain_transaction_pool, - Components::RuntimeServices::offchain_workers, - Components::RuntimeServices::start_rpc, - ); - - inner.map(|inner| Service { inner }) - } -} - -======= ->>>>>>> Remove the old API /// Abstraction over a Substrate service. pub trait AbstractService: 'static + Future + Executor + Send>> + Send { @@ -947,7 +878,7 @@ NewService { /// Starts RPC servers that run in their own thread, and returns an opaque object that keeps them alive. #[cfg(not(target_os = "unknown"))] -fn start_rpc_servers components::RpcHandler>( +fn start_rpc_servers rpc_servers::RpcHandler>( config: &Configuration, mut gen_handler: H ) -> Result, error::Error> { @@ -1096,228 +1027,6 @@ where } } -<<<<<<< HEAD -/// Constructs a service factory with the given name that implements the `ServiceFactory` trait. -/// The required parameters are required to be given in the exact order. Some parameters are followed -/// by `{}` blocks. These blocks are required and used to initialize the given parameter. -/// In these block it is required to write a closure that takes the same number of arguments, -/// the corresponding function in the `ServiceFactory` trait provides. -/// -/// # Example -/// -/// ``` -/// # use substrate_service::{ -/// # construct_service_factory, Service, FullBackend, FullExecutor, LightBackend, LightExecutor, -/// # FullComponents, LightComponents, FactoryFullConfiguration, FullClient -/// # }; -/// # use transaction_pool::{self, txpool::{Pool as TransactionPool}}; -/// # use network::{config::DummyFinalityProofRequestBuilder, construct_simple_protocol}; -/// # use client::{self, LongestChain}; -/// # use consensus_common::import_queue::{BasicQueue, Verifier}; -/// # use consensus_common::{BlockOrigin, BlockImportParams, well_known_cache_keys::Id as CacheKeyId}; -/// # use node_runtime::{GenesisConfig, RuntimeApi}; -/// # use std::sync::Arc; -/// # use node_primitives::Block; -/// # use babe_primitives::AuthorityPair as BabePair; -/// # use grandpa_primitives::AuthorityPair as GrandpaPair; -/// # use sr_primitives::Justification; -/// # use sr_primitives::traits::Block as BlockT; -/// # use grandpa; -/// # construct_simple_protocol! { -/// # pub struct NodeProtocol where Block = Block { } -/// # } -/// # struct MyVerifier; -/// # impl Verifier for MyVerifier { -/// # fn verify( -/// # &mut self, -/// # origin: BlockOrigin, -/// # header: B::Header, -/// # justification: Option, -/// # body: Option>, -/// # ) -> Result<(BlockImportParams, Option)>>), String> { -/// # unimplemented!(); -/// # } -/// # } -/// type FullChainApi = transaction_pool::ChainApi< -/// client::Client, FullExecutor, Block, RuntimeApi>, Block>; -/// type LightChainApi = transaction_pool::ChainApi< -/// client::Client, LightExecutor, Block, RuntimeApi>, Block>; -/// -/// construct_service_factory! { -/// struct Factory { -/// // Declare the block type -/// Block = Block, -/// RuntimeApi = RuntimeApi, -/// // Declare the network protocol and give an initializer. -/// NetworkProtocol = NodeProtocol { |config| Ok(NodeProtocol::new()) }, -/// RuntimeDispatch = node_executor::Executor, -/// FullTransactionPoolApi = FullChainApi -/// { |config, client| Ok(TransactionPool::new(config, transaction_pool::ChainApi::new(client))) }, -/// LightTransactionPoolApi = LightChainApi -/// { |config, client| Ok(TransactionPool::new(config, transaction_pool::ChainApi::new(client))) }, -/// Genesis = GenesisConfig, -/// Configuration = (), -/// FullService = FullComponents -/// { |config| >::new(config) }, -/// // Setup as Consensus Authority (if the role and key are given) -/// AuthoritySetup = { -/// |service: Self::FullService| { -/// Ok(service) -/// }}, -/// LightService = LightComponents -/// { |config| >::new(config) }, -/// FullImportQueue = BasicQueue -/// { |_, client, _, _| Ok(BasicQueue::new(MyVerifier, Box::new(client), None, None)) }, -/// LightImportQueue = BasicQueue -/// { |_, client| { -/// let fprb = Box::new(DummyFinalityProofRequestBuilder::default()) as Box<_>; -/// Ok((BasicQueue::new(MyVerifier, Box::new(client), None, None), fprb)) -/// }}, -/// SelectChain = LongestChain, Self::Block> -/// { |config: &FactoryFullConfiguration, client: Arc>| { -/// #[allow(deprecated)] -/// Ok(LongestChain::new(client.backend().clone())) -/// }}, -/// FinalityProofProvider = { |client: Arc>| { -/// Ok(Some(Arc::new(grandpa::FinalityProofProvider::new(client.clone(), client)) as _)) -/// }}, -/// RpcExtensions = (), -/// } -/// } -/// ``` -#[macro_export] -macro_rules! construct_service_factory { - ( - $(#[$attr:meta])* - struct $name:ident { - Block = $block:ty, - RuntimeApi = $runtime_api:ty, - NetworkProtocol = $protocol:ty { $( $protocol_init:tt )* }, - RuntimeDispatch = $dispatch:ty, - FullTransactionPoolApi = $full_transaction:ty { $( $full_transaction_init:tt )* }, - LightTransactionPoolApi = $light_transaction:ty { $( $light_transaction_init:tt )* }, - Genesis = $genesis:ty, - Configuration = $config:ty, - FullService = $full_service:ty { $( $full_service_init:tt )* }, - AuthoritySetup = { $( $authority_setup:tt )* }, - LightService = $light_service:ty { $( $light_service_init:tt )* }, - FullImportQueue = $full_import_queue:ty - { $( $full_import_queue_init:tt )* }, - LightImportQueue = $light_import_queue:ty - { $( $light_import_queue_init:tt )* }, - SelectChain = $select_chain:ty - { $( $select_chain_init:tt )* }, - FinalityProofProvider = { $( $finality_proof_provider_init:tt )* }, - RpcExtensions = $rpc_extensions_ty:ty - $( { $( $rpc_extensions:tt )* } )?, - } - ) => { - $( #[$attr] )* - pub struct $name {} - - #[allow(unused_variables)] - impl $crate::ServiceFactory for $name { - type Block = $block; - type RuntimeApi = $runtime_api; - type NetworkProtocol = $protocol; - type RuntimeDispatch = $dispatch; - type FullTransactionPoolApi = $full_transaction; - type LightTransactionPoolApi = $light_transaction; - type Genesis = $genesis; - type Configuration = $config; - type FullService = $full_service; - type LightService = $light_service; - type FullImportQueue = $full_import_queue; - type LightImportQueue = $light_import_queue; - type SelectChain = $select_chain; - type RpcExtensions = $rpc_extensions_ty; - - fn build_full_transaction_pool( - config: $crate::TransactionPoolOptions, - client: $crate::Arc<$crate::FullClient> - ) -> $crate::Result<$crate::TransactionPool, $crate::Error> - { - ( $( $full_transaction_init )* ) (config, client) - } - - fn build_light_transaction_pool( - config: $crate::TransactionPoolOptions, - client: $crate::Arc<$crate::LightClient> - ) -> $crate::Result<$crate::TransactionPool, $crate::Error> - { - ( $( $light_transaction_init )* ) (config, client) - } - - fn build_network_protocol(config: &$crate::FactoryFullConfiguration) - -> $crate::Result - { - ( $( $protocol_init )* ) (config) - } - - fn build_select_chain( - config: &mut $crate::FactoryFullConfiguration, - client: Arc<$crate::FullClient> - ) -> $crate::Result { - ( $( $select_chain_init )* ) (config, client) - } - - fn build_full_import_queue( - config: &mut $crate::FactoryFullConfiguration, - client: $crate::Arc<$crate::FullClient>, - select_chain: Self::SelectChain, - transaction_pool: Option>>, - ) -> $crate::Result { - ( $( $full_import_queue_init )* ) (config, client, select_chain, transaction_pool) - } - - fn build_light_import_queue( - config: &mut FactoryFullConfiguration, - client: Arc<$crate::LightClient>, - ) -> Result<(Self::LightImportQueue, $crate::BoxFinalityProofRequestBuilder<$block>), $crate::Error> { - ( $( $light_import_queue_init )* ) (config, client) - } - - fn build_finality_proof_provider( - client: Arc<$crate::FullClient> - ) -> Result>>, $crate::Error> { - ( $( $finality_proof_provider_init )* ) (client) - } - - fn new_light( - config: $crate::FactoryFullConfiguration - ) -> $crate::Result - { - ( $( $light_service_init )* ) (config) - } - - fn new_full( - config: $crate::FactoryFullConfiguration - ) -> Result - { - ( $( $full_service_init )* ) (config).and_then(|service| { - ($( $authority_setup )*)(service) - }) - } - - fn build_full_rpc_extensions( - client: Arc<$crate::FullClient>, - transaction_pool: Arc<$crate::TransactionPool>, - ) -> Self::RpcExtensions { - $( ( $( $rpc_extensions )* ) (client, transaction_pool) )? - } - - fn build_light_rpc_extensions( - client: Arc<$crate::LightClient>, - transaction_pool: Arc<$crate::TransactionPool>, - ) -> Self::RpcExtensions { - $( ( $( $rpc_extensions )* ) (client, transaction_pool) )? - } - } - } -} - -======= ->>>>>>> Remove the old API #[cfg(test)] mod tests { use super::*; diff --git a/node-template/src/service.rs b/node-template/src/service.rs index 43651e294ee3b..dbec0e63a689d 100644 --- a/node-template/src/service.rs +++ b/node-template/src/service.rs @@ -1,29 +1,20 @@ -<<<<<<< HEAD -======= -//! Service implementation. Specialized wrapper over Substrate service. - ->>>>>>> Remove the old API -#![warn(unused_extern_crates)] - //! Service and ServiceFactory implementation. Specialized wrapper over substrate service. use std::sync::Arc; use std::time::Duration; -use substrate_client::{self as client, LongestChain}; -use babe::{import_queue, start_babe, BabeImportQueue, Config}; +use substrate_client::LongestChain; +use babe::{import_queue, start_babe, Config}; use grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider}; use futures::prelude::*; use node_template_runtime::{self, GenesisConfig, opaque::Block, RuntimeApi, WASM_BINARY}; use substrate_service::{ - FactoryFullConfiguration, LightComponents, FullComponents, FullBackend, - FullClient, LightClient, LightBackend, FullExecutor, LightExecutor, error::{Error as ServiceError}, AbstractService, Configuration, ServiceBuilder, + TelemetryOnConnect, }; use transaction_pool::{self, txpool::{Pool as TransactionPool}}; use inherents::InherentDataProviders; use network::construct_simple_protocol; use substrate_executor::native_executor_instance; -use substrate_service::{ServiceFactory, construct_service_factory, TelemetryOnConnect}; pub use substrate_executor::NativeExecutor; // Our native executor instance. @@ -39,43 +30,6 @@ construct_simple_protocol! { pub struct NodeProtocol where Block = Block { } } -type BabeBlockImportForService = babe::BabeBlockImport< - FullBackend, - FullExecutor, - ::Block, - grandpa::BlockImportForService, - ::RuntimeApi, - client::Client< - FullBackend, - FullExecutor, - ::Block, - ::RuntimeApi - >, ->; - -pub struct NodeConfig { - /// GRANDPA and BABE connection to import block. - // FIXME #1134 rather than putting this on the config, let's have an actual intermediate setup state - pub import_setup: Option<( - BabeBlockImportForService, - grandpa::LinkHalfForService, - babe::BabeLink, - )>, - /// Tasks that were created by previous setup steps and should be spawned. - pub tasks_to_spawn: Option + Send>>>, - inherent_data_providers: InherentDataProviders, -} - -impl Default for NodeConfig where F: ServiceFactory { - fn default() -> NodeConfig { - NodeConfig { - import_setup: None, - inherent_data_providers: InherentDataProviders::new(), - tasks_to_spawn: None, - } - } -} - /// Starts a `ServiceBuilder` for a full service. /// /// Use this macro if you don't actually need the full service, but just the builder in order to @@ -279,199 +233,3 @@ pub fn new_light(config: Configuration, FullExecutor, Block, RuntimeApi>, - Block - > { - |config, client| - Ok(TransactionPool::new(config, transaction_pool::ChainApi::new(client))) - }, - LightTransactionPoolApi = - transaction_pool::ChainApi< - client::Client, LightExecutor, Block, RuntimeApi>, - Block - > { - |config, client| - Ok(TransactionPool::new(config, transaction_pool::ChainApi::new(client))) - }, - Genesis = GenesisConfig, - Configuration = NodeConfig, - FullService = FullComponents { - |config: FactoryFullConfiguration| FullComponents::::new(config) - }, - AuthoritySetup = { - |mut service: Self::FullService| { - let (block_import, link_half, babe_link) = - service.config_mut().custom.import_setup.take() - .expect("Link Half and Block Import are present for Full Services or setup failed before. qed"); - - // spawn any futures that were created in the previous setup steps - if let Some(tasks) = service.config_mut().custom.tasks_to_spawn.take() { - for task in tasks { - service.spawn_task( - task.select(service.on_exit()) - .map(|_| ()) - .map_err(|_| ()) - ); - } - } - - if service.config().roles.is_authority() { - let proposer = basic_authorship::ProposerFactory { - client: service.client(), - transaction_pool: service.transaction_pool(), - }; - - let client = service.client(); - let select_chain = service.select_chain() - .ok_or(ServiceError::SelectChainRequired)?; - - let babe_config = babe::BabeParams { - config: Config::get_or_compute(&*client)?, - keystore: service.keystore(), - client, - select_chain, - block_import, - env: proposer, - sync_oracle: service.network(), - inherent_data_providers: service.config() - .custom.inherent_data_providers.clone(), - force_authoring: service.config().force_authoring, - time_source: babe_link, - }; - - let babe = start_babe(babe_config)?; - let select = babe.select(service.on_exit()).then(|_| Ok(())); - - // the BABE authoring task is considered infallible, i.e. if it - // fails we take down the service with it. - service.spawn_essential_task(select); - } - - let config = grandpa::Config { - // FIXME #1578 make this available through chainspec - gossip_duration: Duration::from_millis(333), - justification_period: 4096, - name: Some(service.config().name.clone()), - keystore: Some(service.keystore()), - }; - - match (service.config().roles.is_authority(), service.config().disable_grandpa) { - (false, false) => { - // start the lightweight GRANDPA observer - service.spawn_task(Box::new(grandpa::run_grandpa_observer( - config, - link_half, - service.network(), - service.on_exit(), - )?)); - }, - (true, false) => { - // start the full GRANDPA voter - let telemetry_on_connect = TelemetryOnConnect { - telemetry_connection_sinks: service.telemetry_on_connect_stream(), - }; - let grandpa_config = grandpa::GrandpaParams { - config: config, - link: link_half, - network: service.network(), - inherent_data_providers: - service.config().custom.inherent_data_providers.clone(), - on_exit: service.on_exit(), - telemetry_on_connect: Some(telemetry_on_connect), - }; - - // the GRANDPA voter task is considered infallible, i.e. - // if it fails we take down the service with it. - service.spawn_essential_task(grandpa::run_grandpa_voter(grandpa_config)?); - }, - (_, true) => { - grandpa::setup_disabled_grandpa( - service.client(), - &service.config().custom.inherent_data_providers, - service.network(), - )?; - }, - } - - Ok(service) - } - }, - LightService = LightComponents - { |config| >::new(config) }, - FullImportQueue = BabeImportQueue { - | - config: &mut FactoryFullConfiguration, - client: Arc>, - select_chain: Self::SelectChain, - transaction_pool: Option>>, - | { - let (block_import, link_half) = - grandpa::block_import::<_, _, _, RuntimeApi, FullClient, _>( - client.clone(), client.clone(), select_chain - )?; - let justification_import = block_import.clone(); - let (import_queue, babe_link, babe_block_import, pruning_task) = import_queue( - Config::get_or_compute(&*client)?, - block_import, - Some(Box::new(justification_import)), - None, - client.clone(), - client, - config.custom.inherent_data_providers.clone(), - transaction_pool, - )?; - config.custom.import_setup = Some((babe_block_import.clone(), link_half, babe_link)); - config.custom.tasks_to_spawn = Some(vec![Box::new(pruning_task)]); - Ok(import_queue) - } - }, - LightImportQueue = BabeImportQueue - { |config: &FactoryFullConfiguration, client: Arc>| { - #[allow(deprecated)] - let fetch_checker = client.backend().blockchain().fetcher() - .upgrade() - .map(|fetcher| fetcher.checker().clone()) - .ok_or_else(|| "Trying to start light import queue without active fetch checker")?; - let block_import = grandpa::light_block_import::<_, _, _, RuntimeApi, LightClient>( - client.clone(), Arc::new(fetch_checker), client.clone() - )?; - - let finality_proof_import = block_import.clone(); - let finality_proof_request_builder = - finality_proof_import.create_finality_proof_request_builder(); - - // FIXME: pruning task isn't started since light client doesn't do `AuthoritySetup`. - let (import_queue, ..) = import_queue::<_, _, _, _, _, _, TransactionPool>( - Config::get_or_compute(&*client)?, - block_import, - None, - Some(Box::new(finality_proof_import)), - client.clone(), - client, - config.custom.inherent_data_providers.clone(), - None, - )?; - - Ok((import_queue, finality_proof_request_builder)) - }}, - SelectChain = LongestChain, Self::Block> - { |config: &FactoryFullConfiguration, client: Arc>| { - #[allow(deprecated)] - Ok(LongestChain::new(client.backend().clone())) - } - }, - FinalityProofProvider = { |client: Arc>| { - Ok(Some(Arc::new(GrandpaFinalityProofProvider::new(client.clone(), client)) as _)) - }}, - RpcExtensions = (), - } -} From 1bded021ae2b5fb4a5a9f2f0215e9018d4c18668 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Tue, 13 Aug 2019 14:42:10 +0200 Subject: [PATCH 21/32] Fix indentation on chain_ops --- core/service/src/chain_ops.rs | 50 +++++++++++++++++------------------ 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/core/service/src/chain_ops.rs b/core/service/src/chain_ops.rs index 312fa90f8e849..3a3677798b6ad 100644 --- a/core/service/src/chain_ops.rs +++ b/core/service/src/chain_ops.rs @@ -85,38 +85,38 @@ macro_rules! import_blocks { use sr_primitives::traits::Block; use futures03::TryFutureExt as _; -struct WaitLink { - imported_blocks: u64, - has_error: bool, -} + struct WaitLink { + imported_blocks: u64, + has_error: bool, + } -impl WaitLink { - fn new() -> WaitLink { - WaitLink { - imported_blocks: 0, - has_error: false, + impl WaitLink { + fn new() -> WaitLink { + WaitLink { + imported_blocks: 0, + has_error: false, + } } } -} -impl Link for WaitLink { - fn blocks_processed( - &mut self, - imported: usize, - _count: usize, - results: Vec<(Result>, BlockImportError>, B::Hash)> - ) { - self.imported_blocks += imported as u64; - - for result in results { - if let (Err(err), hash) = result { - warn!("There was an error importing block with hash {:?}: {:?}", hash, err); - self.has_error = true; - break; + impl Link for WaitLink { + fn blocks_processed( + &mut self, + imported: usize, + _count: usize, + results: Vec<(Result>, BlockImportError>, B::Hash)> + ) { + self.imported_blocks += imported as u64; + + for result in results { + if let (Err(err), hash) = result { + warn!("There was an error importing block with hash {:?}: {:?}", hash, err); + self.has_error = true; + break; + } } } } -} let (exit_send, exit_recv) = std::sync::mpsc::channel(); ::std::thread::spawn(move || { From bd34a60dfd3718722d867dc7c820da239955efd0 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Tue, 13 Aug 2019 14:53:25 +0200 Subject: [PATCH 22/32] Line widths --- core/service/src/factory.rs | 43 ++++++++++++++++++++++++++---------- core/service/src/lib.rs | 9 +++++--- core/service/test/src/lib.rs | 27 +++++++++++++++++----- node-template/src/cli.rs | 15 ++++++++----- node/cli/src/lib.rs | 12 ++++++---- 5 files changed, 76 insertions(+), 30 deletions(-) diff --git a/core/service/src/factory.rs b/core/service/src/factory.rs index 29c4128dfbb75..113495cc1b695 100644 --- a/core/service/src/factory.rs +++ b/core/service/src/factory.rs @@ -263,10 +263,17 @@ impl( mut self, - builder: impl FnOnce(&mut Configuration, Arc, Option, Arc) -> Result - ) -> Result, Error> + builder: impl FnOnce(&mut Configuration, Arc, Option, Arc) + -> Result + ) -> Result, Error> where TSc: Clone { - let import_queue = builder(&mut self.config, self.client.clone(), self.select_chain.clone(), self.transaction_pool.clone())?; + let import_queue = builder( + &mut self.config, + self.client.clone(), + self.select_chain.clone(), + self.transaction_pool.clone() + )?; Ok(ServiceBuilder { config: self.config, @@ -373,7 +380,12 @@ impl Result<(UImpQu, Option), Error> ) -> Result, Error> where TSc: Clone { - let (import_queue, fprb) = builder(&mut self.config, self.client.clone(), self.select_chain.clone(), self.transaction_pool.clone())?; + let (import_queue, fprb) = builder( + &mut self.config, + self.client.clone(), + self.select_chain.clone(), + self.transaction_pool.clone() + )?; Ok(ServiceBuilder { config: self.config, @@ -488,8 +500,9 @@ pub trait ServiceBuilderRevert { ) -> Result<(), Error>; } -impl ServiceBuilderImport for - ServiceBuilder, TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool, TRpc> +impl + ServiceBuilderImport for ServiceBuilder, + TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool, TRpc> where TBl: BlockT::Out>, TBackend: 'static + client::backend::Backend + Send, @@ -509,8 +522,9 @@ where } } -impl ServiceBuilderExport for - ServiceBuilder, TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool, TRpc> +impl + ServiceBuilderExport for ServiceBuilder, + TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool, TRpc> where TBl: BlockT::Out>, TBackend: 'static + client::backend::Backend + Send, @@ -531,8 +545,9 @@ where } } -impl ServiceBuilderRevert for - ServiceBuilder, TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool, TRpc> +impl + ServiceBuilderRevert for ServiceBuilder, + TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool, TRpc> where TBl: BlockT::Out>, TBackend: 'static + client::backend::Backend + Send, @@ -567,7 +582,10 @@ ServiceBuilder< > where Client: ProvideRuntimeApi, as ProvideRuntimeApi>::Api: - runtime_api::Metadata + offchain::OffchainWorkerApi + runtime_api::TaggedTransactionQueue + session::SessionKeys, + runtime_api::Metadata + + offchain::OffchainWorkerApi + + runtime_api::TaggedTransactionQueue + + session::SessionKeys, TBl: BlockT::Out>, TRtApi: 'static + Send + Sync, TCfg: Default, @@ -661,7 +679,8 @@ where Block: BlockT::Out>, Backend: client::backend::Backend + 'static, Client: ProvideRuntimeApi, - as ProvideRuntimeApi>::Api: runtime_api::Metadata + session::SessionKeys, + as ProvideRuntimeApi>::Api: + runtime_api::Metadata + session::SessionKeys, Api: Send + Sync + 'static, Executor: client::CallExecutor + Send + Sync + Clone + 'static, PoolApi: txpool::ChainApi + 'static { diff --git a/core/service/src/lib.rs b/core/service/src/lib.rs index 9b04532aaf663..972060daab979 100644 --- a/core/service/src/lib.rs +++ b/core/service/src/lib.rs @@ -242,8 +242,9 @@ macro_rules! new_impl { .map(|v| Ok::<_, ()>(v)).compat() .for_each(move |notification| { let number = *notification.header.number(); + let txpool = txpool.upgrade(); - if let (Some(txpool), Some(client)) = (txpool.upgrade(), wclient.upgrade()) { + if let (Some(ref txpool), Some(ref client)) = (&txpool, wclient.upgrade()) { $maintain_transaction_pool( &BlockId::hash(notification.hash), &*client, @@ -251,7 +252,8 @@ macro_rules! new_impl { ).map_err(|e| warn!("Pool error processing new block: {:?}", e))?; } - if let (Some(txpool), Some(offchain)) = (txpool.upgrade(), offchain.as_ref().and_then(|o| o.upgrade())) { + let offchain = offchain.as_ref().and_then(|o| o.upgrade()); + if let (Some(txpool), Some(offchain)) = (txpool, offchain) { let future = $offchain_workers( &number, &offchain, @@ -525,7 +527,8 @@ pub trait AbstractService: 'static + Future + } impl AbstractService for - NewService, TSc, NetworkStatus, NetworkService, TransactionPool, TOc> + NewService, TSc, NetworkStatus, + NetworkService, TransactionPool, TOc> where TCfg: 'static + Send, TBl: BlockT, TBackend: 'static + client::backend::Backend, diff --git a/core/service/test/src/lib.rs b/core/service/test/src/lib.rs index 05b49b6c29d66..fbff470ba5089 100644 --- a/core/service/test/src/lib.rs +++ b/core/service/test/src/lib.rs @@ -298,10 +298,12 @@ pub fn connectivity(spec: ChainSpec, full_builder: Fb, light info!("Checking star topology"); let first_address = network.full_nodes[0].2.clone(); for (_, service, _) in network.full_nodes.iter().skip(1) { - service.get().network().add_reserved_peer(first_address.to_string()).expect("Error adding reserved peer"); + service.get().network().add_reserved_peer(first_address.to_string()) + .expect("Error adding reserved peer"); } for (_, service, _) in network.light_nodes.iter() { - service.get().network().add_reserved_peer(first_address.to_string()).expect("Error adding reserved peer"); + service.get().network().add_reserved_peer(first_address.to_string()) + .expect("Error adding reserved peer"); } network.run_until_all_full( |_index, service| service.get().network().num_connected() == NUM_FULL_NODES - 1 @@ -334,13 +336,15 @@ pub fn connectivity(spec: ChainSpec, full_builder: Fb, light for i in 0..max_nodes { if i != 0 { if let Some((_, service, node_id)) = network.full_nodes.get(i) { - service.get().network().add_reserved_peer(address.to_string()).expect("Error adding reserved peer"); + service.get().network().add_reserved_peer(address.to_string()) + .expect("Error adding reserved peer"); address = node_id.clone(); } } if let Some((_, service, node_id)) = network.light_nodes.get(i) { - service.get().network().add_reserved_peer(address.to_string()).expect("Error adding reserved peer"); + service.get().network().add_reserved_peer(address.to_string()) + .expect("Error adding reserved peer"); address = node_id.clone(); } } @@ -354,7 +358,13 @@ pub fn connectivity(spec: ChainSpec, full_builder: Fb, light } } -pub fn sync(spec: ChainSpec, full_builder: Fb, light_builder: Lb, mut block_factory: B, mut extrinsic_factory: E) where +pub fn sync( + spec: ChainSpec, + full_builder: Fb, + light_builder: Lb, + mut block_factory: B, + mut extrinsic_factory: E +) where Fb: Fn(Configuration<(), G>) -> Result, F: AbstractService, Lb: Fn(Configuration<(), G>) -> Result, @@ -416,7 +426,12 @@ pub fn sync(spec: ChainSpec, full_builder: Fb, light_b ); } -pub fn consensus(spec: ChainSpec, full_builder: Fb, light_builder: Lb, authorities: impl IntoIterator) where +pub fn consensus( + spec: ChainSpec, + full_builder: Fb, + light_builder: Lb, + authorities: impl IntoIterator +) where Fb: Fn(Configuration<(), G>) -> Result, F: AbstractService, Lb: Fn(Configuration<(), G>) -> Result, diff --git a/node-template/src/cli.rs b/node-template/src/cli.rs index d60b59a8c5533..f6edbb2cc3ee3 100644 --- a/node-template/src/cli.rs +++ b/node-template/src/cli.rs @@ -15,7 +15,8 @@ pub fn run(args: I, exit: E, version: VersionInfo) -> error::Result<()> E: IntoExit, { match parse_and_prepare::(&version, "substrate-node", args) { - ParseAndPrepare::Run(cmd) => cmd.run::<(), _, _, _, _>(load_spec, exit, |exit, _cli_args, _custom_args, config| { + ParseAndPrepare::Run(cmd) => cmd.run::<(), _, _, _, _>(load_spec, exit, + |exit, _cli_args, _custom_args, config| { info!("{}", version.name); info!(" version {}", config.full_version()); info!(" by {}, 2017, 2018", version.author); @@ -37,10 +38,13 @@ pub fn run(args: I, exit: E, version: VersionInfo) -> error::Result<()> }.map_err(|e| format!("{:?}", e)) }), ParseAndPrepare::BuildSpec(cmd) => cmd.run(load_spec), - ParseAndPrepare::ExportBlocks(cmd) => cmd.run_with_builder::<(), _, _, _, _, _>(|config| Ok(new_full_start!(config).0), load_spec, exit), - ParseAndPrepare::ImportBlocks(cmd) => cmd.run_with_builder::<(), _, _, _, _, _>(|config| Ok(new_full_start!(config).0), load_spec, exit), + ParseAndPrepare::ExportBlocks(cmd) => cmd.run_with_builder::<(), _, _, _, _, _>(|config| + Ok(new_full_start!(config).0), load_spec, exit), + ParseAndPrepare::ImportBlocks(cmd) => cmd.run_with_builder::<(), _, _, _, _, _>(|config| + Ok(new_full_start!(config).0), load_spec, exit), ParseAndPrepare::PurgeChain(cmd) => cmd.run(load_spec), - ParseAndPrepare::RevertChain(cmd) => cmd.run_with_builder::<(), _, _, _, _>(|config| Ok(new_full_start!(config).0), load_spec), + ParseAndPrepare::RevertChain(cmd) => cmd.run_with_builder::<(), _, _, _, _>(|config| + Ok(new_full_start!(config).0), load_spec), ParseAndPrepare::CustomCommand(_) => Ok(()) }?; @@ -97,7 +101,8 @@ impl IntoExit for Exit { let exit_send_cell = RefCell::new(Some(exit_send)); ctrlc::set_handler(move || { - if let Some(exit_send) = exit_send_cell.try_borrow_mut().expect("signal handler not reentrant; qed").take() { + let exit_send = exit_send_cell.try_borrow_mut().expect("signal handler not reentrant; qed").take(); + if let Some(exit_send) = exit_send { exit_send.send(()).expect("Error sending exit notification"); } }).expect("Error setting Ctrl-C handler"); diff --git a/node/cli/src/lib.rs b/node/cli/src/lib.rs index 5008061c86bc2..b7679be1764e0 100644 --- a/node/cli/src/lib.rs +++ b/node/cli/src/lib.rs @@ -159,7 +159,8 @@ pub fn run(args: I, exit: E, version: cli::VersionInfo) -> error::Resul E: IntoExit, { match parse_and_prepare::(&version, "substrate-node", args) { - ParseAndPrepare::Run(cmd) => cmd.run::<(), _, _, _, _>(load_spec, exit, |exit, _cli_args, _custom_args, config| { + ParseAndPrepare::Run(cmd) => cmd.run::<(), _, _, _, _>(load_spec, exit, + |exit, _cli_args, _custom_args, config| { info!("{}", version.name); info!(" version {}", config.full_version()); info!(" by Parity Technologies, 2017-2019"); @@ -182,10 +183,13 @@ pub fn run(args: I, exit: E, version: cli::VersionInfo) -> error::Resul }.map_err(|e| format!("{:?}", e)) }), ParseAndPrepare::BuildSpec(cmd) => cmd.run(load_spec), - ParseAndPrepare::ExportBlocks(cmd) => cmd.run_with_builder::<(), _, _, _, _, _>(|config| Ok(new_full_start!(config).0), load_spec, exit), - ParseAndPrepare::ImportBlocks(cmd) => cmd.run_with_builder::<(), _, _, _, _, _>(|config| Ok(new_full_start!(config).0), load_spec, exit), + ParseAndPrepare::ExportBlocks(cmd) => cmd.run_with_builder::<(), _, _, _, _, _>(|config| + Ok(new_full_start!(config).0), load_spec, exit), + ParseAndPrepare::ImportBlocks(cmd) => cmd.run_with_builder::<(), _, _, _, _, _>(|config| + Ok(new_full_start!(config).0), load_spec, exit), ParseAndPrepare::PurgeChain(cmd) => cmd.run(load_spec), - ParseAndPrepare::RevertChain(cmd) => cmd.run_with_builder::<(), _, _, _, _>(|config| Ok(new_full_start!(config).0), load_spec), + ParseAndPrepare::RevertChain(cmd) => cmd.run_with_builder::<(), _, _, _, _>(|config| + Ok(new_full_start!(config).0), load_spec), ParseAndPrepare::CustomCommand(CustomSubcommands::Factory(cli_args)) => { let mut config = cli::create_config_with_db_path::<(), _, _>( load_spec, From 0ebb4fef66e83d064e46d8347dadf87f9dee853e Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Tue, 13 Aug 2019 14:58:07 +0200 Subject: [PATCH 23/32] Fix bad line widths commit --- core/service/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/service/src/lib.rs b/core/service/src/lib.rs index 972060daab979..b20eb8706890e 100644 --- a/core/service/src/lib.rs +++ b/core/service/src/lib.rs @@ -244,7 +244,7 @@ macro_rules! new_impl { let number = *notification.header.number(); let txpool = txpool.upgrade(); - if let (Some(ref txpool), Some(ref client)) = (&txpool, wclient.upgrade()) { + if let (Some(txpool), Some(client)) = (txpool.as_ref(), wclient.upgrade()) { $maintain_transaction_pool( &BlockId::hash(notification.hash), &*client, From 7f4c9530a70de439a101c9699d4ab9feea04bbaa Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Tue, 13 Aug 2019 15:02:37 +0200 Subject: [PATCH 24/32] =?UTF-8?q?Line=20widths=20again=20=F0=9F=A4=A6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- core/service/src/factory.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/core/service/src/factory.rs b/core/service/src/factory.rs index 113495cc1b695..14016c750f862 100644 --- a/core/service/src/factory.rs +++ b/core/service/src/factory.rs @@ -406,8 +406,10 @@ impl( self, - builder: impl FnOnce(&mut Configuration, Arc, Option, Arc) -> Result<(UImpQu, UFprb), Error> - ) -> Result, Error> + builder: impl FnOnce(&mut Configuration, Arc, Option, Arc) + -> Result<(UImpQu, UFprb), Error> + ) -> Result, Error> where TSc: Clone { self.with_import_queue_and_opt_fprb(|cfg, cl, sc, tx| builder(cfg, cl, sc, tx).map(|(q, f)| (q, Some(f)))) } From 1dbe6f67eed7e60032eda488ec824b6d54c29f12 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Tue, 13 Aug 2019 15:22:50 +0200 Subject: [PATCH 25/32] Fix the sync test --- core/service/test/src/lib.rs | 85 +++++++------- node/cli/src/service.rs | 215 ++++++++++++++++++----------------- 2 files changed, 154 insertions(+), 146 deletions(-) diff --git a/core/service/test/src/lib.rs b/core/service/test/src/lib.rs index fbff470ba5089..870f287bff8f2 100644 --- a/core/service/test/src/lib.rs +++ b/core/service/test/src/lib.rs @@ -41,10 +41,10 @@ use consensus::{BlockImportParams, BlockImport}; /// Maximum duration of single wait call. const MAX_WAIT_TIME: Duration = Duration::from_secs(60 * 3); -struct TestNet { +struct TestNet { runtime: Runtime, - authority_nodes: Vec<(usize, SyncService, Multiaddr)>, - full_nodes: Vec<(usize, SyncService, Multiaddr)>, + authority_nodes: Vec<(usize, SyncService, U, Multiaddr)>, + full_nodes: Vec<(usize, SyncService, U, Multiaddr)>, light_nodes: Vec<(usize, SyncService, Multiaddr)>, chain_spec: ChainSpec, base_port: u16, @@ -81,8 +81,8 @@ impl> Future for SyncService { } } -impl TestNet -where F: Send + 'static, L: Send +'static +impl TestNet +where F: Send + 'static, L: Send +'static, U: Clone + Send + 'static { pub fn run_until_all_full( &mut self, @@ -98,7 +98,7 @@ where F: Send + 'static, L: Send +'static let interval = Interval::new_interval(Duration::from_millis(100)) .map_err(|_| ()) .for_each(move |_| { - let full_ready = full_nodes.iter().all(|&(ref id, ref service, _)| + let full_ready = full_nodes.iter().all(|&(ref id, ref service, _, _)| full_predicate(*id, service) ); @@ -195,18 +195,18 @@ fn node_config ( } } -impl TestNet where +impl TestNet where F: AbstractService, L: AbstractService, { fn new( temp: &TempDir, spec: ChainSpec, - full: impl Iterator) -> Result>, + full: impl Iterator) -> Result<(F, U), Error>>, light: impl Iterator) -> Result>, - authorities: impl Iterator) -> Result)>, + authorities: impl Iterator) -> Result<(F, U), Error>)>, base_port: u16 - ) -> TestNet { + ) -> TestNet { let _ = env_logger::try_init(); fdlimit::raise_fd_limit(); let runtime = Runtime::new().expect("Error creating tokio runtime"); @@ -226,9 +226,9 @@ impl TestNet where fn insert_nodes( &mut self, temp: &TempDir, - full: impl Iterator) -> Result>, + full: impl Iterator) -> Result<(F, U), Error>>, light: impl Iterator) -> Result>, - authorities: impl Iterator) -> Result)> + authorities: impl Iterator) -> Result<(F, U), Error>)> ) { let executor = self.runtime.executor(); @@ -242,22 +242,24 @@ impl TestNet where &temp, ); let addr = node_config.network.listen_addresses.iter().next().unwrap().clone(); - let service = SyncService::from(authority(node_config).expect("Error creating test node service")); + let (service, user_data) = authority(node_config).expect("Error creating test node service"); + let service = SyncService::from(service); executor.spawn(service.clone().map_err(|_| ())); let addr = addr.with(multiaddr::Protocol::P2p(service.get().network().local_peer_id().into())); - self.authority_nodes.push((self.nodes, service, addr)); + self.authority_nodes.push((self.nodes, service, user_data, addr)); self.nodes += 1; } for full in full { let node_config = node_config(self.nodes, &self.chain_spec, Roles::FULL, None, self.base_port, &temp); let addr = node_config.network.listen_addresses.iter().next().unwrap().clone(); - let service = SyncService::from(full(node_config).expect("Error creating test node service")); + let (service, user_data) = full(node_config).expect("Error creating test node service"); + let service = SyncService::from(service); executor.spawn(service.clone().map_err(|_| ())); let addr = addr.with(multiaddr::Protocol::P2p(service.get().network().local_peer_id().into())); - self.full_nodes.push((self.nodes, service, addr)); + self.full_nodes.push((self.nodes, service, user_data, addr)); self.nodes += 1; } @@ -288,16 +290,16 @@ pub fn connectivity(spec: ChainSpec, full_builder: Fb, light let mut network = TestNet::new( &temp, spec.clone(), - (0..NUM_FULL_NODES).map(|_| { |cfg| full_builder(cfg) }), + (0..NUM_FULL_NODES).map(|_| { |cfg| full_builder(cfg).map(|s| (s, ())) }), (0..NUM_LIGHT_NODES).map(|_| { |cfg| light_builder(cfg) }), // Note: this iterator is empty but we can't just use `iter::empty()`, otherwise // the type of the closure cannot be inferred. - (0..0).map(|_| (String::new(), { |cfg| full_builder(cfg) })), + (0..0).map(|_| (String::new(), { |cfg| full_builder(cfg).map(|s| (s, ())) })), 30400, ); info!("Checking star topology"); - let first_address = network.full_nodes[0].2.clone(); - for (_, service, _) in network.full_nodes.iter().skip(1) { + let first_address = network.full_nodes[0].3.clone(); + for (_, service, _, _) in network.full_nodes.iter().skip(1) { service.get().network().add_reserved_peer(first_address.to_string()) .expect("Error adding reserved peer"); } @@ -323,19 +325,19 @@ pub fn connectivity(spec: ChainSpec, full_builder: Fb, light let mut network = TestNet::new( &temp, spec, - (0..NUM_FULL_NODES).map(|_| { |cfg| full_builder(cfg) }), + (0..NUM_FULL_NODES).map(|_| { |cfg| full_builder(cfg).map(|s| (s, ())) }), (0..NUM_LIGHT_NODES).map(|_| { |cfg| light_builder(cfg) }), // Note: this iterator is empty but we can't just use `iter::empty()`, otherwise // the type of the closure cannot be inferred. - (0..0).map(|_| (String::new(), { |cfg| full_builder(cfg) })), + (0..0).map(|_| (String::new(), { |cfg| full_builder(cfg).map(|s| (s, ())) })), 30400, ); info!("Checking linked topology"); - let mut address = network.full_nodes[0].2.clone(); + let mut address = network.full_nodes[0].3.clone(); let max_nodes = std::cmp::max(NUM_FULL_NODES, NUM_LIGHT_NODES); for i in 0..max_nodes { if i != 0 { - if let Some((_, service, node_id)) = network.full_nodes.get(i) { + if let Some((_, service, _, node_id)) = network.full_nodes.get(i) { service.get().network().add_reserved_peer(address.to_string()) .expect("Error adding reserved peer"); address = node_id.clone(); @@ -358,19 +360,20 @@ pub fn connectivity(spec: ChainSpec, full_builder: Fb, light } } -pub fn sync( +pub fn sync( spec: ChainSpec, full_builder: Fb, light_builder: Lb, mut block_factory: B, mut extrinsic_factory: E ) where - Fb: Fn(Configuration<(), G>) -> Result, + Fb: Fn(Configuration<(), G>) -> Result<(F, U), Error>, F: AbstractService, Lb: Fn(Configuration<(), G>) -> Result, L: AbstractService, - B: FnMut(&F) -> BlockImportParams, - E: FnMut(&F) -> ::Extrinsic, + B: FnMut(&F, &U) -> BlockImportParams, + E: FnMut(&F, &U) -> ::Extrinsic, + U: Clone + Send + 'static, { const NUM_FULL_NODES: usize = 10; // FIXME: BABE light client support is currently not working. @@ -390,19 +393,20 @@ pub fn sync( info!("Checking block sync"); let first_address = { let first_service = &network.full_nodes[0].1; + let first_user_data = &network.full_nodes[0].2; let mut client = first_service.get().client(); for i in 0 .. NUM_BLOCKS { if i % 128 == 0 { info!("Generating #{}", i); } - let import_data = block_factory(&first_service.get()); + let import_data = block_factory(&first_service.get(), first_user_data); client.import_block(import_data, HashMap::new()).expect("Error importing test block"); } - network.full_nodes[0].2.clone() + network.full_nodes[0].3.clone() }; info!("Running sync"); - for (_, service, _) in network.full_nodes.iter().skip(1) { + for (_, service, _, _) in network.full_nodes.iter().skip(1) { service.get().network().add_reserved_peer(first_address.to_string()).expect("Error adding reserved peer"); } for (_, service, _) in network.light_nodes.iter() { @@ -417,8 +421,9 @@ pub fn sync( info!("Checking extrinsic propagation"); let first_service = network.full_nodes[0].1.clone(); + let first_user_data = &network.full_nodes[0].2; let best_block = BlockId::number(first_service.get().client().info().chain.best_number); - let extrinsic = extrinsic_factory(&first_service.get()); + let extrinsic = extrinsic_factory(&first_service.get(), first_user_data); first_service.get().transaction_pool().submit_one(&best_block, extrinsic).unwrap(); network.run_until_all_full( |_index, service| service.get().transaction_pool().ready().count() == 1, @@ -444,21 +449,21 @@ pub fn consensus( let mut network = TestNet::new( &temp, spec.clone(), - (0..NUM_FULL_NODES / 2).map(|_| { |cfg| full_builder(cfg) }), + (0..NUM_FULL_NODES / 2).map(|_| { |cfg| full_builder(cfg).map(|s| (s, ())) }), (0..NUM_LIGHT_NODES / 2).map(|_| { |cfg| light_builder(cfg) }), - authorities.into_iter().map(|key| (key, { |cfg| full_builder(cfg) })), + authorities.into_iter().map(|key| (key, { |cfg| full_builder(cfg).map(|s| (s, ())) })), 30600, ); info!("Checking consensus"); - let first_address = network.authority_nodes[0].2.clone(); - for (_, service, _) in network.full_nodes.iter() { + let first_address = network.authority_nodes[0].3.clone(); + for (_, service, _, _) in network.full_nodes.iter() { service.get().network().add_reserved_peer(first_address.to_string()).expect("Error adding reserved peer"); } for (_, service, _) in network.light_nodes.iter() { service.get().network().add_reserved_peer(first_address.to_string()).expect("Error adding reserved peer"); } - for (_, service, _) in network.authority_nodes.iter().skip(1) { + for (_, service, _, _) in network.authority_nodes.iter().skip(1) { service.get().network().add_reserved_peer(first_address.to_string()).expect("Error adding reserved peer"); } network.run_until_all_full( @@ -471,13 +476,13 @@ pub fn consensus( info!("Adding more peers"); network.insert_nodes( &temp, - (0..NUM_FULL_NODES / 2).map(|_| { |cfg| full_builder(cfg) }), + (0..NUM_FULL_NODES / 2).map(|_| { |cfg| full_builder(cfg).map(|s| (s, ())) }), (0..NUM_LIGHT_NODES / 2).map(|_| { |cfg| light_builder(cfg) }), // Note: this iterator is empty but we can't just use `iter::empty()`, otherwise // the type of the closure cannot be inferred. - (0..0).map(|_| (String::new(), { |cfg| full_builder(cfg) })), + (0..0).map(|_| (String::new(), { |cfg| full_builder(cfg).map(|s| (s, ())) })), ); - for (_, service, _) in network.full_nodes.iter() { + for (_, service, _, _) in network.full_nodes.iter() { service.get().network().add_reserved_peer(first_address.to_string()).expect("Error adding reserved peer"); } for (_, service, _) in network.light_nodes.iter() { diff --git a/node/cli/src/service.rs b/node/cli/src/service.rs index ea0897e7f65a3..5be831088f8a1 100644 --- a/node/cli/src/service.rs +++ b/node/cli/src/service.rs @@ -19,13 +19,11 @@ //! Service implementation. Specialized wrapper over substrate service. use std::sync::Arc; -use std::time::Duration; -use babe::{import_queue, start_babe, Config}; +use babe::{import_queue, Config}; use client::{self, LongestChain}; use grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider}; use node_executor; -use futures::prelude::*; use node_primitives::Block; use node_runtime::{GenesisConfig, RuntimeApi}; use substrate_service::{ @@ -34,7 +32,6 @@ use substrate_service::{ use transaction_pool::{self, txpool::{Pool as TransactionPool}}; use inherents::InherentDataProviders; use network::construct_simple_protocol; -use substrate_service::TelemetryOnConnect; construct_simple_protocol! { /// Demo protocol attachment for substrate. @@ -100,103 +97,114 @@ macro_rules! new_full_start { }} } -/// Builds a new service for a full client. -pub fn new_full(config: Configuration) --> Result { +/// Creates a full service from the configuration. +/// +/// We need to use a macro because the test suit doesn't work with an opaque service. It expects +/// concrete types instead. +macro_rules! new_full { + ($config:expr) => {{ + use futures::Future; - let (builder, mut import_setup, inherent_data_providers, mut tasks_to_spawn) = new_full_start!(config); + let (builder, mut import_setup, inherent_data_providers, mut tasks_to_spawn) = new_full_start!($config); - let service = builder.with_network_protocol(|_| Ok(NodeProtocol::new()))? - .with_finality_proof_provider(|client| - Ok(Arc::new(GrandpaFinalityProofProvider::new(client.clone(), client)) as _) - )? - .build()?; - - let (block_import, link_half, babe_link) = import_setup.take() - .expect("Link Half and Block Import are present for Full Services or setup failed before. qed"); - - // spawn any futures that were created in the previous setup steps - if let Some(tasks) = tasks_to_spawn.take() { - for task in tasks { - service.spawn_task( - task.select(service.on_exit()) - .map(|_| ()) - .map_err(|_| ()) - ); + let service = builder.with_network_protocol(|_| Ok(crate::service::NodeProtocol::new()))? + .with_finality_proof_provider(|client| + Ok(Arc::new(grandpa::FinalityProofProvider::new(client.clone(), client)) as _) + )? + .build()?; + + let (block_import, link_half, babe_link) = import_setup.take() + .expect("Link Half and Block Import are present for Full Services or setup failed before. qed"); + + // spawn any futures that were created in the previous setup steps + if let Some(tasks) = tasks_to_spawn.take() { + for task in tasks { + service.spawn_task( + task.select(service.on_exit()) + .map(|_| ()) + .map_err(|_| ()) + ); + } } - } - - if service.config().roles.is_authority() { - let proposer = substrate_basic_authorship::ProposerFactory { - client: service.client(), - transaction_pool: service.transaction_pool(), - }; - let client = service.client(); - let select_chain = service.select_chain() - .ok_or(ServiceError::SelectChainRequired)?; - - let babe_config = babe::BabeParams { - config: Config::get_or_compute(&*client)?, - keystore: service.keystore(), - client, - select_chain, - block_import, - env: proposer, - sync_oracle: service.network(), - inherent_data_providers: inherent_data_providers.clone(), - force_authoring: service.config().force_authoring, - time_source: babe_link, - }; - - let babe = start_babe(babe_config)?; - let select = babe.select(service.on_exit()).then(|_| Ok(())); - service.spawn_task(Box::new(select)); - } + if service.config().roles.is_authority() { + let proposer = substrate_basic_authorship::ProposerFactory { + client: service.client(), + transaction_pool: service.transaction_pool(), + }; - let config = grandpa::Config { - // FIXME #1578 make this available through chainspec - gossip_duration: Duration::from_millis(333), - justification_period: 4096, - name: Some(service.config().name.clone()), - keystore: Some(service.keystore()), - }; + let client = service.client(); + let select_chain = service.select_chain() + .ok_or(substrate_service::Error::SelectChainRequired)?; - match (service.config().roles.is_authority(), service.config().disable_grandpa) { - (false, false) => { - // start the lightweight GRANDPA observer - service.spawn_task(Box::new(grandpa::run_grandpa_observer( - config, - link_half, - service.network(), - service.on_exit(), - )?)); - }, - (true, false) => { - // start the full GRANDPA voter - let telemetry_on_connect = TelemetryOnConnect { - telemetry_connection_sinks: service.telemetry_on_connect_stream(), - }; - let grandpa_config = grandpa::GrandpaParams { - config: config, - link: link_half, - network: service.network(), + let babe_config = babe::BabeParams { + config: babe::Config::get_or_compute(&*client)?, + keystore: service.keystore(), + client, + select_chain, + block_import, + env: proposer, + sync_oracle: service.network(), inherent_data_providers: inherent_data_providers.clone(), - on_exit: service.on_exit(), - telemetry_on_connect: Some(telemetry_on_connect), + force_authoring: service.config().force_authoring, + time_source: babe_link, }; - service.spawn_task(Box::new(grandpa::run_grandpa_voter(grandpa_config)?)); - }, - (_, true) => { - grandpa::setup_disabled_grandpa( - service.client(), - &inherent_data_providers, - service.network(), - )?; - }, - } - Ok(service) + let babe = babe::start_babe(babe_config)?; + let select = babe.select(service.on_exit()).then(|_| Ok(())); + service.spawn_task(Box::new(select)); + } + + let config = grandpa::Config { + // FIXME #1578 make this available through chainspec + gossip_duration: std::time::Duration::from_millis(333), + justification_period: 4096, + name: Some(service.config().name.clone()), + keystore: Some(service.keystore()), + }; + + match (service.config().roles.is_authority(), service.config().disable_grandpa) { + (false, false) => { + // start the lightweight GRANDPA observer + service.spawn_task(Box::new(grandpa::run_grandpa_observer( + config, + link_half, + service.network(), + service.on_exit(), + )?)); + }, + (true, false) => { + // start the full GRANDPA voter + let telemetry_on_connect = substrate_service::TelemetryOnConnect { + telemetry_connection_sinks: service.telemetry_on_connect_stream(), + }; + let grandpa_config = grandpa::GrandpaParams { + config: config, + link: link_half, + network: service.network(), + inherent_data_providers: inherent_data_providers.clone(), + on_exit: service.on_exit(), + telemetry_on_connect: Some(telemetry_on_connect), + }; + service.spawn_task(Box::new(grandpa::run_grandpa_voter(grandpa_config)?)); + }, + (_, true) => { + grandpa::setup_disabled_grandpa( + service.client(), + &inherent_data_providers, + service.network(), + )?; + }, + } + + Ok((service, inherent_data_providers)) + }} +} + +/// Builds a new service for a full client. +pub fn new_full(config: Configuration) +-> Result { + new_full!(config).map(|(service, _)| service) } /// Builds a new service for a light client. @@ -276,7 +284,6 @@ mod tests { use finality_tracker; use keyring::AccountKeyring; use substrate_service::AbstractService; - use service_test::SyncService; use crate::service::{new_full, new_light}; #[cfg(feature = "rhd")] @@ -341,7 +348,7 @@ mod tests { ); } - /*#[test] + #[test] #[ignore] fn test_sync() { let keystore_path = tempfile::tempdir().expect("Creates keystore path"); @@ -362,14 +369,10 @@ mod tests { service_test::sync( chain_spec, - |config| new_full(config), + |config| new_full!(config), |config| new_light(config), - |service| { - let service = service.get(); - let mut inherent_data = service - .config() - .custom - .inherent_data_providers + |service, inherent_data_providers| { + let mut inherent_data = inherent_data_providers .create_inherent_data() .expect("Creates inherent data."); inherent_data.replace_data(finality_tracker::INHERENT_IDENTIFIER, &1u64); @@ -431,13 +434,13 @@ mod tests { fork_choice: ForkChoiceStrategy::LongestChain, } }, - |service| { + |service, _| { let amount = 5 * CENTS; let to = AddressPublic::from_raw(bob.public().0); let from = AddressPublic::from_raw(charlie.public().0); - let genesis_hash = service.get().client().block_hash(0).unwrap().unwrap(); - let best_block_id = BlockId::number(service.get().client().info().chain.best_number); - let version = service.get().client().runtime_version_at(&best_block_id).unwrap().spec_version; + let genesis_hash = service.client().block_hash(0).unwrap().unwrap(); + let best_block_id = BlockId::number(service.client().info().chain.best_number); + let version = service.client().runtime_version_at(&best_block_id).unwrap().spec_version; let signer = charlie.clone(); let function = Call::Balances(BalancesCall::transfer(to.into(), amount)); @@ -468,7 +471,7 @@ mod tests { OpaqueExtrinsic(v) }, ); - }*/ + } #[test] #[ignore] From 69f313119ba4368c7bfc7e8c4d6861d00c736bf7 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Tue, 13 Aug 2019 22:05:46 +0200 Subject: [PATCH 26/32] Apply suggestions from code review Co-Authored-By: Gavin Wood --- node-template/src/service.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/node-template/src/service.rs b/node-template/src/service.rs index dbec0e63a689d..509a81f1d3378 100644 --- a/node-template/src/service.rs +++ b/node-template/src/service.rs @@ -82,7 +82,8 @@ macro_rules! new_full_start { /// Builds a new service for a full client. pub fn new_full(config: Configuration) --> Result { + -> Result +{ let (builder, mut import_setup, inherent_data_providers, mut tasks_to_spawn) = new_full_start!(config); @@ -188,7 +189,8 @@ pub fn new_full(config: Configuration(config: Configuration) --> Result { + -> Result +{ let inherent_data_providers = InherentDataProviders::new(); ServiceBuilder::new_light::(config)? From f814f4e058d2e35fe0ad5a6ce3db46b06a7fe353 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Mon, 26 Aug 2019 14:30:38 +0200 Subject: [PATCH 27/32] Address some concerns --- core/service/src/lib.rs | 83 ++--------------------------------------- 1 file changed, 4 insertions(+), 79 deletions(-) diff --git a/core/service/src/lib.rs b/core/service/src/lib.rs index b20eb8706890e..7e4aac51761ca 100644 --- a/core/service/src/lib.rs +++ b/core/service/src/lib.rs @@ -30,7 +30,6 @@ use std::marker::PhantomData; use std::net::SocketAddr; use std::collections::HashMap; use std::sync::atomic::{AtomicBool, Ordering}; -use std::ops::DerefMut; use std::time::{Duration, Instant}; use serde::{Serialize, de::DeserializeOwned}; use futures::sync::mpsc; @@ -458,7 +457,7 @@ pub trait AbstractService: 'static + Future + /// Backend storage for the client. type Backend: 'static + client::backend::Backend; /// How to execute calls towards the runtime. - type Executor: 'static + client::CallExecutor + Send + Sync + Clone; + type CallExecutor: 'static + client::CallExecutor + Send + Sync + Clone; /// API that the runtime provides. type RuntimeApi: Send + Sync; /// Configuration struct of the service. @@ -508,7 +507,7 @@ pub trait AbstractService: 'static + Future + fn rpc_query(&self, mem: &RpcSession, request: &str) -> Box, Error = ()> + Send>; /// Get shared client instance. - fn client(&self) -> Arc>; + fn client(&self) -> Arc>; /// Get clone of select chain. fn select_chain(&self) -> Option; @@ -541,7 +540,7 @@ where TCfg: 'static + Send, { type Block = TBl; type Backend = TBackend; - type Executor = TExec; + type CallExecutor = TExec; type RuntimeApi = TRtApi; type Config = TCfg; type SelectChain = TSc; @@ -594,7 +593,7 @@ where TCfg: 'static + Send, Box::new(self.rpc_handlers.handle_request(request, mem.metadata.clone())) } - fn client(&self) -> Arc> { + fn client(&self) -> Arc> { self.client.clone() } @@ -668,80 +667,6 @@ NewService { } } -impl AbstractService for T -where T: 'static + Deref + DerefMut + Future + Send + - Executor + Send>>, - T::Target: AbstractService { - type Block = <::Target as AbstractService>::Block; - type Backend = <::Target as AbstractService>::Backend; - type Executor = <::Target as AbstractService>::Executor; - type RuntimeApi = <::Target as AbstractService>::RuntimeApi; - type Config = <::Target as AbstractService>::Config; - type SelectChain = <::Target as AbstractService>::SelectChain; - type TransactionPoolApi = <::Target as AbstractService>::TransactionPoolApi; - type NetworkSpecialization = <::Target as AbstractService>::NetworkSpecialization; - - fn telemetry_on_connect_stream(&self) -> TelemetryOnConnectNotifications { - (**self).telemetry_on_connect_stream() - } - - fn config(&self) -> &Self::Config { - (**self).config() - } - - fn config_mut(&mut self) -> &mut Self::Config { - (&mut **self).config_mut() - } - - fn telemetry(&self) -> Option { - (**self).telemetry() - } - - fn spawn_task(&self, task: impl Future + Send + 'static) { - (**self).spawn_task(task) - } - - fn spawn_essential_task(&self, task: impl Future + Send + 'static) { - (**self).spawn_essential_task(task) - } - - fn spawn_task_handle(&self) -> SpawnTaskHandle { - (**self).spawn_task_handle() - } - - fn keystore(&self) -> keystore::KeyStorePtr { - (**self).keystore() - } - - fn rpc_query(&self, mem: &RpcSession, request: &str) -> Box, Error = ()> + Send> { - (**self).rpc_query(mem, request) - } - - fn client(&self) -> Arc> { - (**self).client() - } - - fn select_chain(&self) -> Option { - (**self).select_chain() - } - - fn network(&self) -> Arc> { - (**self).network() - } - - fn network_status(&self) -> mpsc::UnboundedReceiver<(NetworkStatus, NetworkState)> { - (**self).network_status() - } - - fn transaction_pool(&self) -> Arc> { - (**self).transaction_pool() - } - - fn on_exit(&self) -> ::exit_future::Exit { - (**self).on_exit() - } -} - /// Builds a never-ending future that continuously polls the network. /// /// The `status_sink` contain a list of senders to send a periodic network status to. From 36d3f27fee1f1ae653fbaffaa8eaec637a647bdd Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Mon, 26 Aug 2019 14:37:42 +0200 Subject: [PATCH 28/32] Remove TelemetryOnConnect --- Cargo.lock | 1 - core/finality-grandpa/Cargo.toml | 1 - core/finality-grandpa/src/lib.rs | 5 ++--- core/service/src/lib.rs | 13 ++----------- node-template/src/service.rs | 10 ++-------- node/cli/src/service.rs | 5 +---- 6 files changed, 7 insertions(+), 28 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2967d167da1f7..c16c68630c265 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4769,7 +4769,6 @@ dependencies = [ "substrate-keystore 2.0.0", "substrate-network 2.0.0", "substrate-primitives 2.0.0", - "substrate-service 2.0.0", "substrate-telemetry 2.0.0", "substrate-test-runtime-client 2.0.0", "tempfile 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)", diff --git a/core/finality-grandpa/Cargo.toml b/core/finality-grandpa/Cargo.toml index 145f23085218d..393ee45db5776 100644 --- a/core/finality-grandpa/Cargo.toml +++ b/core/finality-grandpa/Cargo.toml @@ -23,7 +23,6 @@ serde_json = "1.0" client = { package = "substrate-client", path = "../client" } inherents = { package = "substrate-inherents", path = "../../core/inherents" } network = { package = "substrate-network", path = "../network" } -service = { package = "substrate-service", path = "../service" } srml-finality-tracker = { path = "../../srml/finality-tracker" } fg_primitives = { package = "substrate-finality-grandpa-primitives", path = "primitives" } grandpa = { package = "finality-grandpa", version = "0.9.0", features = ["derive-codec"] } diff --git a/core/finality-grandpa/src/lib.rs b/core/finality-grandpa/src/lib.rs index 01d7d4ead9d48..d6f4d768472a3 100644 --- a/core/finality-grandpa/src/lib.rs +++ b/core/finality-grandpa/src/lib.rs @@ -103,7 +103,6 @@ use environment::{Environment, VoterSetState}; use import::GrandpaBlockImport; use until_imported::UntilGlobalMessageBlocksImported; use communication::NetworkBridge; -use service::TelemetryOnConnect; use fg_primitives::{AuthoritySignature, SetId, AuthorityWeight}; // Re-export these two because it's just so damn convenient. @@ -480,7 +479,7 @@ pub struct GrandpaParams, N, RA, SC, X> { /// Handle to a future that will resolve on exit. pub on_exit: X, /// If supplied, can be used to hook on telemetry connection established events. - pub telemetry_on_connect: Option, + pub telemetry_on_connect: Option>, } /// Run a GRANDPA voter as a task. Provide configuration and a link to a @@ -527,7 +526,7 @@ pub fn run_grandpa_voter, N, RA, SC, X>( let telemetry_task = if let Some(telemetry_on_connect) = telemetry_on_connect { let authorities = persistent_data.authority_set.clone(); - let events = telemetry_on_connect.telemetry_connection_sinks + let events = telemetry_on_connect .for_each(move |_| { telemetry!(CONSENSUS_INFO; "afg.authority_set"; "authority_set_id" => ?authorities.set_id(), diff --git a/core/service/src/lib.rs b/core/service/src/lib.rs index 7e4aac51761ca..53a99f3677af5 100644 --- a/core/service/src/lib.rs +++ b/core/service/src/lib.rs @@ -129,15 +129,6 @@ impl Executor + Send>> for SpawnTaskHandle } } -/// Stream of events for connection established to a telemetry server. -pub type TelemetryOnConnectNotifications = mpsc::UnboundedReceiver<()>; - -/// Used to hook on telemetry connection established events. -pub struct TelemetryOnConnect { - /// Event stream. - pub telemetry_connection_sinks: TelemetryOnConnectNotifications, -} - macro_rules! new_impl { ( $block:ty, @@ -470,7 +461,7 @@ pub trait AbstractService: 'static + Future + type NetworkSpecialization: NetworkSpecialization; /// Get event stream for telemetry connection established events. - fn telemetry_on_connect_stream(&self) -> TelemetryOnConnectNotifications; + fn telemetry_on_connect_stream(&self) -> mpsc::UnboundedReceiver<()>; /// Returns the configuration passed on construction. fn config(&self) -> &Self::Config; @@ -555,7 +546,7 @@ where TCfg: 'static + Send, &mut self.config } - fn telemetry_on_connect_stream(&self) -> TelemetryOnConnectNotifications { + fn telemetry_on_connect_stream(&self) -> mpsc::UnboundedReceiver<()> { let (sink, stream) = mpsc::unbounded(); self._telemetry_on_connect_sinks.lock().push(sink); stream diff --git a/node-template/src/service.rs b/node-template/src/service.rs index 509a81f1d3378..07c0aa26cff31 100644 --- a/node-template/src/service.rs +++ b/node-template/src/service.rs @@ -7,10 +7,7 @@ use babe::{import_queue, start_babe, Config}; use grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider}; use futures::prelude::*; use node_template_runtime::{self, GenesisConfig, opaque::Block, RuntimeApi, WASM_BINARY}; -use substrate_service::{ - error::{Error as ServiceError}, AbstractService, Configuration, ServiceBuilder, - TelemetryOnConnect, -}; +use substrate_service::{error::{Error as ServiceError}, AbstractService, Configuration, ServiceBuilder}; use transaction_pool::{self, txpool::{Pool as TransactionPool}}; use inherents::InherentDataProviders; use network::construct_simple_protocol; @@ -159,16 +156,13 @@ pub fn new_full(config: Configuration { // start the full GRANDPA voter - let telemetry_on_connect = TelemetryOnConnect { - telemetry_connection_sinks: service.telemetry_on_connect_stream(), - }; let grandpa_config = grandpa::GrandpaParams { config: config, link: link_half, network: service.network(), inherent_data_providers: inherent_data_providers.clone(), on_exit: service.on_exit(), - telemetry_on_connect: Some(telemetry_on_connect), + telemetry_on_connect: Some(service.telemetry_on_connect_stream()), }; // the GRANDPA voter task is considered infallible, i.e. diff --git a/node/cli/src/service.rs b/node/cli/src/service.rs index 5be831088f8a1..c47e764c4294e 100644 --- a/node/cli/src/service.rs +++ b/node/cli/src/service.rs @@ -175,16 +175,13 @@ macro_rules! new_full { }, (true, false) => { // start the full GRANDPA voter - let telemetry_on_connect = substrate_service::TelemetryOnConnect { - telemetry_connection_sinks: service.telemetry_on_connect_stream(), - }; let grandpa_config = grandpa::GrandpaParams { config: config, link: link_half, network: service.network(), inherent_data_providers: inherent_data_providers.clone(), on_exit: service.on_exit(), - telemetry_on_connect: Some(telemetry_on_connect), + telemetry_on_connect: Some(service.telemetry_on_connect_stream()), }; service.spawn_task(Box::new(grandpa::run_grandpa_voter(grandpa_config)?)); }, From 9b034c48dcb89cf375f0ef3ee32fc87790faa9c5 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Mon, 26 Aug 2019 14:38:26 +0200 Subject: [PATCH 29/32] Remove informant::start --- core/cli/src/informant.rs | 7 ------- 1 file changed, 7 deletions(-) diff --git a/core/cli/src/informant.rs b/core/cli/src/informant.rs index d8f0471a89f75..52a5f67c26d59 100644 --- a/core/cli/src/informant.rs +++ b/core/cli/src/informant.rs @@ -22,16 +22,9 @@ use futures03::{StreamExt as _, TryStreamExt as _}; use log::{info, warn}; use sr_primitives::{generic::BlockId, traits::Header}; use service::AbstractService; -use tokio::runtime::TaskExecutor; mod display; -/// Spawn informant on the event loop -#[deprecated(note = "Please use informant::build instead, and then create the task manually")] -pub fn start(service: &impl AbstractService, exit: ::exit_future::Exit, handle: TaskExecutor) { - handle.spawn(exit.until(build(service)).map(|_| ())); -} - /// Creates an informant in the form of a `Future` that must be polled regularly. pub fn build(service: &impl AbstractService) -> impl Future { let client = service.client(); From df96677577f04118b74912ef9293efecda49e0f7 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Mon, 26 Aug 2019 14:44:02 +0200 Subject: [PATCH 30/32] Update jsonrpc --- Cargo.lock | 117 ++++++++++++++++++++++-------------- core/rpc-servers/Cargo.toml | 8 +-- core/rpc/Cargo.toml | 8 +-- node/cli/Cargo.toml | 2 +- node/rpc-client/Cargo.toml | 2 +- node/rpc/Cargo.toml | 8 +-- 6 files changed, 87 insertions(+), 58 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c16c68630c265..34b51a107300f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1344,6 +1344,16 @@ dependencies = [ "unicode-normalization 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "idna" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-bidi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-normalization 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "impl-codec" version = "0.4.0" @@ -1421,24 +1431,25 @@ dependencies = [ [[package]] name = "jsonrpc-client-transports" -version = "13.0.0" +version = "13.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "failure 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", "hyper 0.12.33 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-pubsub 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-pubsub 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.40 (registry+https://github.com/rust-lang/crates.io-index)", "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", + "url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)", "websocket 0.23.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "jsonrpc-core" -version = "13.0.0" +version = "13.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "futures 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1450,15 +1461,15 @@ dependencies = [ [[package]] name = "jsonrpc-core-client" -version = "13.0.0" +version = "13.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "jsonrpc-client-transports 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-client-transports 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "jsonrpc-derive" -version = "13.0.0" +version = "13.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "proc-macro-crate 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1469,12 +1480,12 @@ dependencies = [ [[package]] name = "jsonrpc-http-server" -version = "13.0.0" +version = "13.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "hyper 0.12.33 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-server-utils 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-server-utils 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", "net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1483,10 +1494,10 @@ dependencies = [ [[package]] name = "jsonrpc-pubsub" -version = "13.0.0" +version = "13.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "jsonrpc-core 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1494,12 +1505,12 @@ dependencies = [ [[package]] name = "jsonrpc-server-utils" -version = "13.0.0" +version = "13.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "globset 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.10.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1510,15 +1521,15 @@ dependencies = [ [[package]] name = "jsonrpc-ws-server" -version = "13.0.0" +version = "13.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "jsonrpc-core 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-server-utils 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-server-utils 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "ws 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", + "ws 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2298,7 +2309,7 @@ dependencies = [ "futures 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", "futures-preview 0.3.0-alpha.17 (registry+https://github.com/rust-lang/crates.io-index)", "hex-literal 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", "node-executor 2.0.0", "node-primitives 2.0.0", @@ -2388,10 +2399,10 @@ name = "node-rpc" version = "2.0.0" dependencies = [ "env_logger 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core-client 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-derive 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-pubsub 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core-client 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-derive 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-pubsub 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", "node-primitives 2.0.0", "node-runtime 2.0.0", @@ -2412,7 +2423,7 @@ dependencies = [ "env_logger 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", "hyper 0.12.33 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core-client 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core-client 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", "node-primitives 2.0.0", "substrate-rpc 2.0.0", @@ -2934,6 +2945,11 @@ name = "percent-encoding" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "percent-encoding" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "pin-utils" version = "0.1.0-alpha.4" @@ -4973,10 +4989,10 @@ dependencies = [ "derive_more 0.14.1 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", "futures-preview 0.3.0-alpha.17 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core-client 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-derive 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-pubsub 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core-client 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-derive 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-pubsub 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", "parity-scale-codec 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -5002,10 +5018,10 @@ dependencies = [ name = "substrate-rpc-servers" version = "2.0.0" dependencies = [ - "jsonrpc-core 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-http-server 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-pubsub 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-ws-server 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-http-server 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-pubsub 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-ws-server 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", "sr-primitives 2.0.0", @@ -5744,7 +5760,7 @@ name = "twox-hash" version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "rand 0.3.23 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -5843,6 +5859,16 @@ dependencies = [ "percent-encoding 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "url" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "idna 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "percent-encoding 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "utf8-ranges" version = "1.0.3" @@ -6155,7 +6181,7 @@ dependencies = [ [[package]] name = "ws" -version = "0.8.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -6167,7 +6193,7 @@ dependencies = [ "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", "sha-1 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)", + "url 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -6387,6 +6413,7 @@ dependencies = [ "checksum hyper 0.12.33 (registry+https://github.com/rust-lang/crates.io-index)" = "7cb44cbce9d8ee4fb36e4c0ad7b794ac44ebaad924b9c8291a63215bb44c2c8f" "checksum hyper-tls 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "3a800d6aa50af4b5850b2b0f659625ce9504df908e9733b635720483be26174f" "checksum idna 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "38f09e0f0b1fb55fdee1f17470ad800da77af5186a1a76c026b679358b7e844e" +"checksum idna 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "02e2673c30ee86b5b96a9cb52ad15718aa1f966f5ab9ad54a8b95d5ca33120a9" "checksum impl-codec 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "78c441b3d2b5e24b407161e76d482b7bbd29b5da357707839ac40d95152f031f" "checksum impl-serde 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5158079de9d4158e0ce1de3ae0bd7be03904efc40b3d7dd8b8c301cbf6b52b56" "checksum impl-serde 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7d26be4b97d738552ea423f76c4f681012ff06c3fa36fa968656b3679f60b4a1" @@ -6398,14 +6425,14 @@ dependencies = [ "checksum itertools 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5b8467d9c1cebe26feb08c640139247fac215782d35371ade9a2136ed6085358" "checksum itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "501266b7edd0174f8530248f87f99c88fbe60ca4ef3dd486835b8d8d53136f7f" "checksum js-sys 0.3.25 (registry+https://github.com/rust-lang/crates.io-index)" = "da3ea71161651a4cd97d999b2da139109c537b15ab33abc8ae4ead38deac8a03" -"checksum jsonrpc-client-transports 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0bb6fd4acf48d1f17eb7b0e27ab7043c16f063ad0aa7020ec92a431648286c2f" -"checksum jsonrpc-core 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "34d379861584fe4e3678f6ae9ee60b41726df2989578c1dc0f90190dfc92dbe0" -"checksum jsonrpc-core-client 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d6b0a3dc76953d88cdb47f5fe4ae21abcabc8d7edf4951ebce42db5c722d6698" -"checksum jsonrpc-derive 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a9e2d4475549bc0126690788ed5107573c8917f97db5298f0043fb73d46fc498" -"checksum jsonrpc-http-server 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "aad55e8dd67c2c5b16436738b0baf319a6b353feba7401dbc1508a0bd8bd451f" -"checksum jsonrpc-pubsub 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "583f5930821dbc043236fe5d672d496ead7ff83d21351146598386c66fe8722a" -"checksum jsonrpc-server-utils 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "04f18ca34046c249751fe90428e77e9570beaa03b33a108e74418a586063d07d" -"checksum jsonrpc-ws-server 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "aee1265de937bd53ad0fc95ff5817314922ce009fa99a04a09fdf449b140ddf6" +"checksum jsonrpc-client-transports 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "39577db48b004cffb4c5b8e5c9b993c177c52599ecbee88711e815acf65144db" +"checksum jsonrpc-core 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "dd42951eb35079520ee29b7efbac654d85821b397ef88c8151600ef7e2d00217" +"checksum jsonrpc-core-client 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f047c10738edee7c3c6acf5241a0ce33df32ef9230c1a7fb03e4a77ee72c992f" +"checksum jsonrpc-derive 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "29f9149f785deaae92a4c834a9a1a83a4313b8cfedccf15362cd4cf039a64501" +"checksum jsonrpc-http-server 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4edd28922653d79e4f6c0f5d0a1034a4edbc5f9cf6cad8ec85e2a685713e3708" +"checksum jsonrpc-pubsub 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2c08b444cc0ed70263798834343d0ac875e664257df8079160f23ac1ea79446" +"checksum jsonrpc-server-utils 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "44561bfdd31401bad790527f1e951dde144f2341ddc3e1b859d32945e1a34eff" +"checksum jsonrpc-ws-server 13.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d230ff76a8e4a3fb068aab6ba23d0c4e7d6e3b41bca524daa33988b04b065265" "checksum keccak 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "67c21572b4949434e4fc1e1978b99c5f77064153c59d998bf13ecd96fb5ecba7" "checksum keccak-hasher 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)" = "3468207deea1359a0e921591ae9b4c928733d94eb9d6a2eeda994cfd59f42cf8" "checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" @@ -6510,6 +6537,7 @@ dependencies = [ "checksum pbkdf2 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "006c038a43a45995a9670da19e67600114740e8511d4333bf97a56e66a7542d9" "checksum peeking_take_while 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" "checksum percent-encoding 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "31010dd2e1ac33d5b46a5b413495239882813e0369f8ed8a5e266f173602f831" +"checksum percent-encoding 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" "checksum pin-utils 0.1.0-alpha.4 (registry+https://github.com/rust-lang/crates.io-index)" = "5894c618ce612a3fa23881b152b608bafb8c56cfc22f434a3ba3120b40f7b587" "checksum pkg-config 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)" = "a7c1d2cfa5a714db3b5f24f0915e74fcdf91d09d496ba61329705dda7774d2af" "checksum ppv-lite86 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "e3cbf9f658cdb5000fcf6f362b8ea2ba154b9f146a61c7a20d647034c6b6561b" @@ -6664,6 +6692,7 @@ dependencies = [ "checksum unsigned-varint 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "2c64cdf40b4a9645534a943668681bcb219faf51874d4b65d2e0abda1b10a2ab" "checksum untrusted 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "55cd1f4b4e96b46aeb8d4855db4a7a9bd96eeeb5c6a1ab54593328761642ce2f" "checksum url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "dd4e7c0d531266369519a4aa4f399d748bd37043b00bde1e4ff1f60a120b355a" +"checksum url 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "75b414f6c464c879d7f9babf951f23bc3743fb7313c081b2e6ca719067ea9d61" "checksum utf8-ranges 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "9d50aa7650df78abf942826607c62468ce18d9019673d4a2ebe1865dbb96ffde" "checksum vcpkg 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)" = "33dd455d0f96e90a75803cfeb7f948768c08d70a6de9a8d2362461935698bf95" "checksum vec_map 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "05c78687fb1a80548ae3250346c3db86a80a7cdd77bda190189f2d0a0987c81a" @@ -6698,7 +6727,7 @@ dependencies = [ "checksum winapi-util 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7168bab6e1daee33b4557efd0e95d5ca70a03706d39fa5f3fe7a236f584b03c9" "checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" "checksum wincolor 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "561ed901ae465d6185fa7864d63fbd5720d0ef718366c9a4dc83cf6170d7e9ba" -"checksum ws 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ec91ea61b83ce033c43c06c52ddc7532f465c0153281610d44c58b74083aee1a" +"checksum ws 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8a6f5bb86663ff4d1639408410f50bf6050367a8525d644d49a6894cd618a631" "checksum ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" "checksum x25519-dalek 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7ee1585dc1484373cbc1cee7aafda26634665cf449436fd6e24bfd1fad230538" "checksum xdg 2.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d089681aa106a86fade1b0128fb5daf07d5867a509ab036d99988dec80429a57" diff --git a/core/rpc-servers/Cargo.toml b/core/rpc-servers/Cargo.toml index 54a4b68eab9bf..d4befd52e9f7a 100644 --- a/core/rpc-servers/Cargo.toml +++ b/core/rpc-servers/Cargo.toml @@ -5,12 +5,12 @@ authors = ["Parity Technologies "] edition = "2018" [dependencies] -jsonrpc-core = "13.0.0" -pubsub = { package = "jsonrpc-pubsub", version = "13.0.0" } +jsonrpc-core = "13.1.0" +pubsub = { package = "jsonrpc-pubsub", version = "13.1.0" } log = "0.4" serde = "1.0" sr-primitives = { path = "../sr-primitives" } [target.'cfg(not(target_os = "unknown"))'.dependencies] -http = { package = "jsonrpc-http-server", version = "13.0.0" } -ws = { package = "jsonrpc-ws-server", version = "13.0.0" } +http = { package = "jsonrpc-http-server", version = "13.1.0" } +ws = { package = "jsonrpc-ws-server", version = "13.1.0" } diff --git a/core/rpc/Cargo.toml b/core/rpc/Cargo.toml index 0a9cf108c9ede..f35408c7b3c9c 100644 --- a/core/rpc/Cargo.toml +++ b/core/rpc/Cargo.toml @@ -8,10 +8,10 @@ edition = "2018" derive_more = "0.14.0" futures = "0.1" futures03 = { package = "futures-preview", version = "0.3.0-alpha.17", features = ["compat"] } -jsonrpc-core = "13.0.0" -jsonrpc-core-client = "13.0.0" -jsonrpc-pubsub = "13.0.0" -jsonrpc-derive = "13.0.0" +jsonrpc-core = "13.1.0" +jsonrpc-core-client = "13.1.0" +jsonrpc-pubsub = "13.1.0" +jsonrpc-derive = "13.1.0" log = "0.4" parking_lot = "0.9.0" codec = { package = "parity-scale-codec", version = "1.0.0" } diff --git a/node/cli/Cargo.toml b/node/cli/Cargo.toml index 7b4ebb0c5f32d..1f35f7b86b41c 100644 --- a/node/cli/Cargo.toml +++ b/node/cli/Cargo.toml @@ -11,7 +11,7 @@ log = "0.4" tokio = "0.1.7" futures = "0.1" exit-future = "0.1" -jsonrpc-core = "13.0.0" +jsonrpc-core = "13.1.0" cli = { package = "substrate-cli", path = "../../core/cli" } codec = { package = "parity-scale-codec", version = "1.0.0" } sr-io = { path = "../../core/sr-io" } diff --git a/node/rpc-client/Cargo.toml b/node/rpc-client/Cargo.toml index bc492bc003394..b98df224dfcf1 100644 --- a/node/rpc-client/Cargo.toml +++ b/node/rpc-client/Cargo.toml @@ -8,7 +8,7 @@ edition = "2018" env_logger = "0.6" futures = "0.1.26" hyper = "0.12" -jsonrpc-core-client = { version = "13.0.0", features = ["http", "ws"] } +jsonrpc-core-client = { version = "13.1.0", features = ["http", "ws"] } log = "0.4" node-primitives = { path = "../primitives" } substrate-rpc = { path = "../../core/rpc", version = "2.0.0" } diff --git a/node/rpc/Cargo.toml b/node/rpc/Cargo.toml index 6042380c8379f..55371daad6b0f 100644 --- a/node/rpc/Cargo.toml +++ b/node/rpc/Cargo.toml @@ -6,10 +6,10 @@ edition = "2018" [dependencies] client = { package = "substrate-client", path = "../../core/client" } -jsonrpc-core = "13.0.0" -jsonrpc-core-client = "13.0.0" -jsonrpc-derive = "13.0.0" -jsonrpc-pubsub = "13.0.0" +jsonrpc-core = "13.1.0" +jsonrpc-core-client = "13.1.0" +jsonrpc-derive = "13.1.0" +jsonrpc-pubsub = "13.1.0" keyring = { package = "substrate-keyring", path = "../../core/keyring" } log = "0.4" node-primitives = { path = "../primitives" } From 1695b7389aca99084da53cfc0099d82b69602262 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Mon, 26 Aug 2019 14:55:24 +0200 Subject: [PATCH 31/32] Rename factory to builder --- core/service/src/{factory.rs => builder.rs} | 0 core/service/src/lib.rs | 4 ++-- 2 files changed, 2 insertions(+), 2 deletions(-) rename core/service/src/{factory.rs => builder.rs} (100%) diff --git a/core/service/src/factory.rs b/core/service/src/builder.rs similarity index 100% rename from core/service/src/factory.rs rename to core/service/src/builder.rs diff --git a/core/service/src/lib.rs b/core/service/src/lib.rs index 53a99f3677af5..363ad9cfdada3 100644 --- a/core/service/src/lib.rs +++ b/core/service/src/lib.rs @@ -48,7 +48,7 @@ use sr_primitives::generic::BlockId; use sr_primitives::traits::NumberFor; pub use self::error::Error; -pub use self::factory::{ServiceBuilder, ServiceBuilderExport, ServiceBuilderImport, ServiceBuilderRevert}; +pub use self::builder::{ServiceBuilder, ServiceBuilderExport, ServiceBuilderImport, ServiceBuilderRevert}; pub use config::{Configuration, Roles, PruningMode}; pub use chain_spec::{ChainSpec, Properties}; pub use transaction_pool::txpool::{ @@ -438,7 +438,7 @@ macro_rules! new_impl { }} } -mod factory; +mod builder; /// Abstraction over a Substrate service. pub trait AbstractService: 'static + Future + From 93740858f2505efc020976e578b081b4da57ea16 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Mon, 26 Aug 2019 14:58:41 +0200 Subject: [PATCH 32/32] =?UTF-8?q?Line=20widths=20=F0=9F=98=A9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- core/service/src/builder.rs | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/core/service/src/builder.rs b/core/service/src/builder.rs index 14016c750f862..3b079e549d8cd 100644 --- a/core/service/src/builder.rs +++ b/core/service/src/builder.rs @@ -233,7 +233,8 @@ impl( mut self, select_chain_builder: impl FnOnce(&mut Configuration, Arc) -> Result, Error> - ) -> Result, Error> { + ) -> Result, Error> { let select_chain = select_chain_builder(&mut self.config, self.client.clone())?; Ok(ServiceBuilder { @@ -256,7 +257,8 @@ impl( self, builder: impl FnOnce(&mut Configuration, Arc) -> Result - ) -> Result, Error> { + ) -> Result, Error> { self.with_opt_select_chain(|cfg, cl| builder(cfg, cl).map(Option::Some)) } @@ -295,7 +297,8 @@ impl( self, network_protocol_builder: impl FnOnce(&Configuration) -> Result - ) -> Result, Error> { + ) -> Result, Error> { let network_protocol = network_protocol_builder(&self.config)?; Ok(ServiceBuilder { @@ -378,7 +381,8 @@ impl, Arc, Option, Arc) -> Result<(UImpQu, Option), Error> - ) -> Result, Error> + ) -> Result, Error> where TSc: Clone { let (import_queue, fprb) = builder( &mut self.config, @@ -418,7 +422,8 @@ impl( self, transaction_pool_builder: impl FnOnce(transaction_pool::txpool::Options, Arc) -> Result - ) -> Result, Error> { + ) -> Result, Error> { let transaction_pool = transaction_pool_builder(self.config.transaction_pool.clone(), self.client.clone())?; Ok(ServiceBuilder { @@ -441,7 +446,8 @@ impl( self, rpc_ext_builder: impl FnOnce(Arc, Arc) -> URpc - ) -> Result, Error> { + ) -> Result, Error> { let rpc_extensions = rpc_ext_builder(self.client.clone(), self.transaction_pool.clone()); Ok(ServiceBuilder {