diff --git a/Cargo.lock b/Cargo.lock index b7310b66beaa4..4e55e59a6def6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2783,18 +2783,41 @@ dependencies = [ [[package]] name = "jsonrpsee" version = "0.3.0" -source = "git+https://github.com/paritytech/jsonrpsee?branch=master#c69e0dd871cf25104f827da5da1f1832641419af" +source = "git+https://github.com/paritytech/jsonrpsee?branch=master#3ad1cc215e0788121a36116bb5f2d38cc3d8a4cb" dependencies = [ + "jsonrpsee-http-client", "jsonrpsee-http-server", + "jsonrpsee-proc-macros 0.3.0 (git+https://github.com/paritytech/jsonrpsee?branch=master)", "jsonrpsee-types 0.3.0 (git+https://github.com/paritytech/jsonrpsee?branch=master)", "jsonrpsee-utils", + "jsonrpsee-ws-client 0.3.0 (git+https://github.com/paritytech/jsonrpsee?branch=master)", "jsonrpsee-ws-server", ] +[[package]] +name = "jsonrpsee-http-client" +version = "0.3.0" +source = "git+https://github.com/paritytech/jsonrpsee?branch=master#3ad1cc215e0788121a36116bb5f2d38cc3d8a4cb" +dependencies = [ + "async-trait", + "fnv", + "futures 0.3.16", + "hyper", + "hyper-rustls", + "jsonrpsee-types 0.3.0 (git+https://github.com/paritytech/jsonrpsee?branch=master)", + "jsonrpsee-utils", + "log", + "serde", + "serde_json", + "thiserror", + "tokio", + "url", +] + [[package]] name = "jsonrpsee-http-server" version = "0.3.0" -source = "git+https://github.com/paritytech/jsonrpsee?branch=master#c69e0dd871cf25104f827da5da1f1832641419af" +source = "git+https://github.com/paritytech/jsonrpsee?branch=master#3ad1cc215e0788121a36116bb5f2d38cc3d8a4cb" dependencies = [ "futures-channel", "futures-util", @@ -2826,6 +2849,19 @@ dependencies = [ "syn", ] +[[package]] +name = "jsonrpsee-proc-macros" +version = "0.3.0" +source = "git+https://github.com/paritytech/jsonrpsee?branch=master#3ad1cc215e0788121a36116bb5f2d38cc3d8a4cb" +dependencies = [ + "Inflector", + "bae", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "jsonrpsee-types" version = "0.3.0" @@ -2847,8 +2883,9 @@ dependencies = [ [[package]] name = "jsonrpsee-types" version = "0.3.0" -source = "git+https://github.com/paritytech/jsonrpsee?branch=master#c69e0dd871cf25104f827da5da1f1832641419af" +source = "git+https://github.com/paritytech/jsonrpsee?branch=master#3ad1cc215e0788121a36116bb5f2d38cc3d8a4cb" dependencies = [ + "anyhow", "async-trait", "beef", "futures-channel", @@ -2864,7 +2901,7 @@ dependencies = [ [[package]] name = "jsonrpsee-utils" version = "0.3.0" -source = "git+https://github.com/paritytech/jsonrpsee?branch=master#c69e0dd871cf25104f827da5da1f1832641419af" +source = "git+https://github.com/paritytech/jsonrpsee?branch=master#3ad1cc215e0788121a36116bb5f2d38cc3d8a4cb" dependencies = [ "beef", "futures-channel", @@ -2905,10 +2942,33 @@ dependencies = [ "url", ] +[[package]] +name = "jsonrpsee-ws-client" +version = "0.3.0" +source = "git+https://github.com/paritytech/jsonrpsee?branch=master#3ad1cc215e0788121a36116bb5f2d38cc3d8a4cb" +dependencies = [ + "async-trait", + "fnv", + "futures 0.3.16", + "jsonrpsee-types 0.3.0 (git+https://github.com/paritytech/jsonrpsee?branch=master)", + "log", + "pin-project 1.0.5", + "rustls", + "rustls-native-certs", + "serde", + "serde_json", + "soketto 0.6.0", + "thiserror", + "tokio", + "tokio-rustls", + "tokio-util", + "url", +] + [[package]] name = "jsonrpsee-ws-server" version = "0.3.0" -source = "git+https://github.com/paritytech/jsonrpsee?branch=master#c69e0dd871cf25104f827da5da1f1832641419af" +source = "git+https://github.com/paritytech/jsonrpsee?branch=master#3ad1cc215e0788121a36116bb5f2d38cc3d8a4cb" dependencies = [ "futures-channel", "futures-util", @@ -4185,6 +4245,18 @@ dependencies = [ "sp-keystore", ] +[[package]] +name = "node-rpc-client" +version = "2.0.0" +dependencies = [ + "futures 0.3.16", + "jsonrpsee", + "node-primitives", + "sc-rpc", + "sp-tracing", + "tokio", +] + [[package]] name = "node-runtime" version = "3.0.0-dev" @@ -6549,8 +6621,8 @@ name = "remote-externalities" version = "0.10.0-dev" dependencies = [ "env_logger 0.9.0", - "jsonrpsee-proc-macros", - "jsonrpsee-ws-client", + "jsonrpsee-proc-macros 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpsee-ws-client 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "log", "pallet-elections-phragmen", "parity-scale-codec", @@ -9200,6 +9272,21 @@ dependencies = [ "structopt", ] +[[package]] +name = "substrate-frame-rpc-support" +version = "3.0.0" +dependencies = [ + "frame-support", + "frame-system", + "futures 0.3.16", + "jsonrpsee", + "parity-scale-codec", + "sc-rpc-api", + "serde", + "sp-storage", + "tokio", +] + [[package]] name = "substrate-frame-rpc-system" version = "4.0.0-dev" diff --git a/Cargo.toml b/Cargo.toml index 9e07e27fbb4fa..f583c2b087c0c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,8 +11,7 @@ members = [ "bin/node/executor", "bin/node/primitives", "bin/node/rpc", - # TODO(niklasad1): bring back once rpsee macros is a thing. - # "bin/node/rpc-client", + "bin/node/rpc-client", "bin/node/runtime", "bin/node/testing", "bin/utils/chain-spec-builder", @@ -198,8 +197,7 @@ members = [ "utils/frame/remote-externalities", "utils/frame/frame-utilities-cli", "utils/frame/try-runtime/cli", - # TODO(niklasad1): port this to jsonrpsee - # "utils/frame/rpc/support", + "utils/frame/rpc/support", "utils/frame/rpc/system", "utils/prometheus", "utils/wasm-builder", diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index e9e1ad45308d4..20cbc7b8b22ff 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -34,13 +34,13 @@ use sp_runtime::traits::Block as BlockT; use std::sync::Arc; use jsonrpsee::RpcModule; -use pallet_contracts_rpc::ContractsRpc; -use pallet_mmr_rpc::MmrRpc; -use pallet_transaction_payment_rpc::TransactionPaymentRpc; -use sc_consensus_babe_rpc::BabeRpc; -use sc_finality_grandpa_rpc::GrandpaRpc; -use sc_sync_state_rpc::SyncStateRpc; -use substrate_frame_rpc_system::{SystemRpc, SystemRpcBackendFull}; +use pallet_contracts_rpc::{ContractsApiServer, ContractsRpc}; +use pallet_mmr_rpc::{MmrApiServer, MmrRpc}; +use pallet_transaction_payment_rpc::{TransactionPaymentApiServer, TransactionPaymentRpc}; +use sc_consensus_babe_rpc::{BabeApiServer, BabeRpc}; +use sc_finality_grandpa_rpc::{GrandpaApiServer, GrandpaRpc}; +use sc_sync_state_rpc::{SyncStateRpc, SyncStateRpcApiServer}; +use substrate_frame_rpc_system::{SystemApiServer, SystemRpc, SystemRpcBackendFull}; type FullClient = sc_service::TFullClient>; @@ -180,8 +180,7 @@ pub fn new_partial( Some(shared_authority_set.clone()), ), ) - .into_rpc_module() - .expect("TODO: error handling"); + .into_rpc(); let babe_rpc = BabeRpc::new( client2.clone(), @@ -191,8 +190,7 @@ pub fn new_partial( select_chain2, deny_unsafe, ) - .into_rpc_module() - .expect("TODO: error handling"); + .into_rpc(); let sync_state_rpc = SyncStateRpc::new( chain_spec, client2.clone(), @@ -201,21 +199,13 @@ pub fn new_partial( deny_unsafe, ) .expect("TODO: error handling") - .into_rpc_module() - .expect("TODO: error handling"); - let transaction_payment_rpc = TransactionPaymentRpc::new(client2.clone()) - .into_rpc_module() - .expect("TODO: error handling"); + .into_rpc(); + let transaction_payment_rpc = TransactionPaymentRpc::new(client2.clone()).into_rpc(); let system_rpc_backend = SystemRpcBackendFull::new(client2.clone(), transaction_pool2.clone(), deny_unsafe); - let system_rpc = SystemRpc::new(Box::new(system_rpc_backend)) - .into_rpc_module() - .expect("TODO: error handling"); - let mmr_rpc = MmrRpc::new(client2.clone()).into_rpc_module().expect("TODO: error handling"); - let contracts_rpc = ContractsRpc::new(client2.clone()) - .into_rpc_module() - .expect("TODO: error handling"); - + let system_rpc = SystemRpc::new(Box::new(system_rpc_backend)).into_rpc(); + let mmr_rpc = MmrRpc::new(client2.clone()).into_rpc(); + let contracts_rpc = ContractsRpc::new(client2.clone()).into_rpc(); let mut module = RpcModule::new(()); module.merge(grandpa_rpc).expect("TODO: error handling"); module.merge(babe_rpc).expect("TODO: error handling"); diff --git a/bin/node/rpc-client/Cargo.toml b/bin/node/rpc-client/Cargo.toml index a5255769158a4..e368e812c183e 100644 --- a/bin/node/rpc-client/Cargo.toml +++ b/bin/node/rpc-client/Cargo.toml @@ -12,9 +12,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] futures = "0.3.16" -jsonrpc-core-client = { version = "18.0.0", default-features = false, features = [ - "http", -] } +jsonrpsee = { git = "https://github.com/paritytech/jsonrpsee", branch = "master", features = ["client", "macros"] } +tokio = { version = "1.10", features = ["full"] } node-primitives = { version = "2.0.0", path = "../primitives" } sp-tracing = { version = "4.0.0-dev", path = "../../../primitives/tracing" } sc-rpc = { version = "4.0.0-dev", path = "../../../client/rpc" } diff --git a/bin/node/rpc-client/src/main.rs b/bin/node/rpc-client/src/main.rs index 6d0b88799f54c..5b372a5c0f73a 100644 --- a/bin/node/rpc-client/src/main.rs +++ b/bin/node/rpc-client/src/main.rs @@ -22,21 +22,22 @@ //! This module shows how you can write a Rust RPC client that connects to a running //! substrate node and use statically typed RPC wrappers. -use futures::{Future, TryFutureExt}; -use jsonrpc_core_client::{transports::http, RpcError}; +use futures::TryFutureExt; +use jsonrpsee::{types::Error, ws_client::WsClientBuilder}; use node_primitives::Hash; -use sc_rpc::author::{hash::ExtrinsicOrHash, AuthorClient}; +use sc_rpc::author::{hash::ExtrinsicOrHash, AuthorApiClient}; -fn main() -> Result<(), RpcError> { +#[tokio::main] +async fn main() -> Result<(), Error> { sp_tracing::try_init_simple(); - futures::executor::block_on(async { - let uri = "http://localhost:9933"; - - http::connect(uri) - .and_then(|client: AuthorClient| remove_all_extrinsics(client)) - .await - }) + // TODO(niklasad1): https://github.com/paritytech/jsonrpsee/issues/448 + // changed this to the WS client because the jsonrpsee proc macros + // requires the trait bound `SubscriptionClient` which is not implemented for the HTTP client. + WsClientBuilder::default() + .build("ws://localhost:9944") + .and_then(|client| remove_all_extrinsics(client)) + .await } /// Remove all pending extrinsics from the node. @@ -47,17 +48,19 @@ fn main() -> Result<(), RpcError> { /// /// As the result of running the code the entire content of the transaction pool is going /// to be removed and the extrinsics are going to be temporarily banned. -fn remove_all_extrinsics( - client: AuthorClient, -) -> impl Future> { - client - .pending_extrinsics() - .and_then(move |pending| { - client.remove_extrinsic( - pending.into_iter().map(|tx| ExtrinsicOrHash::Extrinsic(tx.into())).collect(), - ) - }) - .map_ok(|removed| { - println!("Removed extrinsics: {:?}", removed); - }) +async fn remove_all_extrinsics(client: C) -> Result<(), Error> +where + C: AuthorApiClient + Sync, +{ + let pending_exts = client.pending_extrinsics().await?; + let removed = client + .remove_extrinsic( + pending_exts + .into_iter() + .map(|tx| ExtrinsicOrHash::Extrinsic(tx.into())) + .collect(), + ) + .await?; + println!("Removed extrinsics: {:?}", removed); + Ok(()) } diff --git a/client/consensus/babe/rpc/src/lib.rs b/client/consensus/babe/rpc/src/lib.rs index eaf49f2bbd134..0baab0dbf212a 100644 --- a/client/consensus/babe/rpc/src/lib.rs +++ b/client/consensus/babe/rpc/src/lib.rs @@ -18,10 +18,10 @@ //! RPC api for babe. -use futures::{FutureExt as _, TryFutureExt as _}; +use futures::TryFutureExt; use jsonrpsee::{ - types::error::{CallError, Error as JsonRpseeError}, - RpcModule, + proc_macros::rpc, + types::{async_trait, Error as JsonRpseeError, JsonRpcResult}, }; use sc_consensus_babe::{authorship, Config, Epoch}; @@ -38,6 +38,15 @@ use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; use sp_runtime::traits::{Block as BlockT, Header as _}; use std::{collections::HashMap, sync::Arc}; +/// Provides rpc methods for interacting with Babe. +#[rpc(client, server, namespace = "babe")] +pub trait BabeApi { + /// Returns data about which slots (primary or secondary) can be claimed in the current epoch + /// with the keys in the keystore. + #[method(name = "epochAuthorship")] + async fn epoch_authorship(&self) -> JsonRpcResult>; +} + /// Provides RPC methods for interacting with Babe. pub struct BabeRpc { /// shared reference to the client. @@ -54,16 +63,7 @@ pub struct BabeRpc { deny_unsafe: DenyUnsafe, } -impl BabeRpc -where - B: BlockT, - C: ProvideRuntimeApi - + HeaderBackend - + HeaderMetadata - + 'static, - C::Api: BabeRuntimeApi, - SC: SelectChain + Clone + 'static, -{ +impl BabeRpc { /// Creates a new instance of the BabeRpc handler. pub fn new( client: Arc, @@ -75,78 +75,76 @@ where ) -> Self { Self { client, shared_epoch_changes, keystore, babe_config, select_chain, deny_unsafe } } +} - /// Convert this [`BabeRpc`] to an [`RpcModule`]. - pub fn into_rpc_module(self) -> Result, JsonRpseeError> { - let mut module = RpcModule::new(self); - // Returns data about which slots (primary or secondary) can be claimed in the current epoch - // with the keys in the keystore. - module.register_async_method("babe_epochAuthorship", |_params, babe| { - async move { - babe.deny_unsafe.check_if_safe()?; - let header = babe.select_chain.best_chain().map_err(Error::Consensus).await?; - let epoch_start = babe - .client - .runtime_api() - .current_epoch_start(&BlockId::Hash(header.hash())) - .map_err(|err| Error::StringError(format!("{:?}", err)))?; - - let epoch = epoch_data( - &babe.shared_epoch_changes, - &babe.client, - &babe.babe_config, - *epoch_start, - &babe.select_chain, - ) - .await?; - let (epoch_start, epoch_end) = (epoch.start_slot(), epoch.end_slot()); - let mut claims: HashMap = HashMap::new(); - - let keys = { - epoch - .authorities - .iter() - .enumerate() - .filter_map(|(i, a)| { - if SyncCryptoStore::has_keys( - &*babe.keystore, - &[(a.0.to_raw_vec(), AuthorityId::ID)], - ) { - Some((a.0.clone(), i)) - } else { - None - } - }) - .collect::>() - }; - - for slot in *epoch_start..*epoch_end { - if let Some((claim, key)) = authorship::claim_slot_using_keys( - slot.into(), - &epoch, - &babe.keystore, - &keys, +#[async_trait] +impl BabeApiServer for BabeRpc +where + B: BlockT, + C: ProvideRuntimeApi + + HeaderBackend + + HeaderMetadata + + 'static, + C::Api: BabeRuntimeApi, + SC: SelectChain + Clone + 'static, +{ + async fn epoch_authorship(&self) -> JsonRpcResult> { + self.deny_unsafe.check_if_safe()?; + let header = self.select_chain.best_chain().map_err(Error::Consensus).await?; + let epoch_start = self + .client + .runtime_api() + .current_epoch_start(&BlockId::Hash(header.hash())) + .map_err(|err| Error::StringError(format!("{:?}", err)))?; + + let epoch = epoch_data( + &self.shared_epoch_changes, + &self.client, + &self.babe_config, + *epoch_start, + &self.select_chain, + ) + .await?; + let (epoch_start, epoch_end) = (epoch.start_slot(), epoch.end_slot()); + let mut claims: HashMap = HashMap::new(); + + let keys = { + epoch + .authorities + .iter() + .enumerate() + .filter_map(|(i, a)| { + if SyncCryptoStore::has_keys( + &*self.keystore, + &[(a.0.to_raw_vec(), AuthorityId::ID)], ) { - match claim { - PreDigest::Primary { .. } => { - claims.entry(key).or_default().primary.push(slot); - }, - PreDigest::SecondaryPlain { .. } => { - claims.entry(key).or_default().secondary.push(slot); - }, - PreDigest::SecondaryVRF { .. } => { - claims.entry(key).or_default().secondary_vrf.push(slot.into()); - }, - }; + Some((a.0.clone(), i)) + } else { + None } - } - - Ok(claims) + }) + .collect::>() + }; + + for slot in *epoch_start..*epoch_end { + if let Some((claim, key)) = + authorship::claim_slot_using_keys(slot.into(), &epoch, &self.keystore, &keys) + { + match claim { + PreDigest::Primary { .. } => { + claims.entry(key).or_default().primary.push(slot); + }, + PreDigest::SecondaryPlain { .. } => { + claims.entry(key).or_default().secondary.push(slot); + }, + PreDigest::SecondaryVRF { .. } => { + claims.entry(key).or_default().secondary_vrf.push(slot.into()); + }, + }; } - .boxed() - })?; + } - Ok(module) + Ok(claims) } } @@ -172,9 +170,9 @@ pub enum Error { impl std::error::Error for Error {} -impl From for CallError { +impl From for JsonRpseeError { fn from(error: Error) -> Self { - CallError::Failed(Box::new(error)) + JsonRpseeError::to_call_error(error) } } diff --git a/client/consensus/manual-seal/src/error.rs b/client/consensus/manual-seal/src/error.rs index bc6a266520363..fa8aa34d8824a 100644 --- a/client/consensus/manual-seal/src/error.rs +++ b/client/consensus/manual-seal/src/error.rs @@ -20,7 +20,7 @@ //! This is suitable for a testing environment. use futures::channel::{mpsc::SendError, oneshot}; -use jsonrpsee::types::error::CallError; +use jsonrpsee::types::error::{CallError, Error as JsonRpseeError}; use sc_consensus::ImportResult; use sp_blockchain::Error as BlockchainError; use sp_consensus::Error as ConsensusError; @@ -76,24 +76,58 @@ pub enum Error { Other(Box), } -impl Error { - fn to_code(&self) -> i32 { +impl From for JsonRpseeError { + fn from(err: Error) -> Self { use Error::*; - match self { - BlockImportError(_) => codes::BLOCK_IMPORT_FAILED, - BlockNotFound(_) => codes::BLOCK_NOT_FOUND, - EmptyTransactionPool => codes::EMPTY_TRANSACTION_POOL, - ConsensusError(_) => codes::CONSENSUS_ERROR, - InherentError(_) => codes::INHERENTS_ERROR, - BlockchainError(_) => codes::BLOCKCHAIN_ERROR, - SendError(_) | Canceled(_) => codes::SERVER_SHUTTING_DOWN, - _ => codes::UNKNOWN_ERROR, + match err { + BlockImportError(e) => CallError::Custom { + code: codes::BLOCK_IMPORT_FAILED, + message: format!("{:?}", e), + data: None, + } + .into(), + BlockNotFound(e) => CallError::Custom { + code: codes::BLOCK_NOT_FOUND, + message: format!("{:?}", e), + data: None, + } + .into(), + EmptyTransactionPool => CallError::Custom { + code: codes::EMPTY_TRANSACTION_POOL, + message: "Empty transaction pool".to_string(), + data: None, + } + .into(), + ConsensusError(e) => CallError::Custom { + code: codes::CONSENSUS_ERROR, + message: format!("{:?}", e), + data: None, + } + .into(), + InherentError(e) => CallError::Custom { + code: codes::INHERENTS_ERROR, + message: format!("{:?}", e), + data: None, + } + .into(), + BlockchainError(e) => CallError::Custom { + code: codes::BLOCKCHAIN_ERROR, + message: format!("{:?}", e), + data: None, + } + .into(), + SendError(_) | Canceled(_) => CallError::Custom { + code: codes::SERVER_SHUTTING_DOWN, + message: "Server is shutting down".to_string(), + data: None, + } + .into(), + _ => CallError::Custom { + code: codes::UNKNOWN_ERROR, + message: "Unknown error".to_string(), + data: None, + } + .into(), } } } - -/// Helper method to convert error type to JsonCallError. -pub fn to_call_error(err: impl Into) -> CallError { - let err = err.into(); - CallError::Custom { code: err.to_code(), message: err.to_string(), data: None } -} diff --git a/client/consensus/manual-seal/src/rpc.rs b/client/consensus/manual-seal/src/rpc.rs index dfb4da9c77ea3..b5c7ca911e7e8 100644 --- a/client/consensus/manual-seal/src/rpc.rs +++ b/client/consensus/manual-seal/src/rpc.rs @@ -18,28 +18,19 @@ //! RPC interface for the `ManualSeal` Engine. -use crate::error::{to_call_error, Error}; +use crate::error::Error; use futures::{ channel::{mpsc, oneshot}, - FutureExt, SinkExt, + SinkExt, +}; +use jsonrpsee::{ + proc_macros::rpc, + types::{async_trait, Error as JsonRpseeError, JsonRpcResult}, }; -use jsonrpsee::types::{Error as JsonRpseeError, RpcModule}; use sc_consensus::ImportedAux; -use serde::{de::DeserializeOwned, Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use sp_runtime::EncodedJustification; -/// Helper macro to bail early in async context when you want to -/// return `Box::pin(future::err(e))` once an error occurs. -/// Because `Try` is not implemented for it. -macro_rules! unwrap_or_fut_err { - ( $e:expr ) => { - match $e { - Ok(x) => x, - Err(e) => return Box::pin(futures::future::err(e)), - } - }; -} - /// Sender passed to the authorship task to report errors or successes. pub type Sender = Option>>; @@ -73,6 +64,27 @@ pub enum EngineCommand { }, } +/// RPC trait that provides methods for interacting with the manual-seal authorship task over rpc. +#[rpc(client, server, namespace = "engine")] +pub trait ManualSealApi { + /// Instructs the manual-seal authorship task to create a new block + #[method(name = "createBlock")] + async fn create_block( + &self, + create_empty: bool, + finalize: bool, + parent_hash: Option, + ) -> JsonRpcResult>; + + /// Instructs the manual-seal authorship task to finalize a block + #[method(name = "finalizeBlock")] + async fn finalize_block( + &self, + hash: Hash, + justification: Option, + ) -> JsonRpcResult; +} + /// A struct that implements the [`ManualSealApi`]. pub struct ManualSeal { import_block_channel: mpsc::Sender>, @@ -94,63 +106,43 @@ impl ManualSeal { } } -// TODO(niklasad1): this should be replaced with a proc macro impl. -impl ManualSeal { - /// Convert a [`ManualSealApi`] to an [`RpcModule`]. Registers all the RPC methods available - /// with the RPC server. - pub fn into_rpc_module(self) -> std::result::Result, JsonRpseeError> { - let mut module = RpcModule::new(self); - - module.register_async_method::, _>( - "engine_createBlock", - |params, engine| { - let mut seq = params.sequence(); - - let create_empty = unwrap_or_fut_err!(seq.next()); - let finalize = unwrap_or_fut_err!(seq.next()); - let parent_hash = unwrap_or_fut_err!(seq.optional_next()); - let mut sink = engine.import_block_channel.clone(); - - async move { - let (sender, receiver) = oneshot::channel(); - // NOTE: this sends a Result over the channel. - let command = EngineCommand::SealNewBlock { - create_empty, - finalize, - parent_hash, - sender: Some(sender), - }; - - sink.send(command).await.map_err(|e| to_call_error(e))?; - - match receiver.await { - Ok(Ok(rx)) => Ok(rx), - Ok(Err(e)) => Err(to_call_error(e)), - Err(e) => Err(to_call_error(e)), - } - } - .boxed() - }, - )?; - - module.register_async_method("engine_finalizeBlock", |params, engine| { - let mut seq = params.sequence(); - - let hash = unwrap_or_fut_err!(seq.next()); - let justification = unwrap_or_fut_err!(seq.optional_next()); - let mut sink = engine.import_block_channel.clone(); - - async move { - let (sender, receiver) = oneshot::channel(); - let command = - EngineCommand::FinalizeBlock { hash, sender: Some(sender), justification }; - sink.send(command).await.map_err(|e| to_call_error(e))?; - receiver.await.map(|_| true).map_err(|e| to_call_error(e)) - } - .boxed() - })?; - - Ok(module) +#[async_trait] +impl ManualSealApiServer for ManualSeal { + async fn create_block( + &self, + create_empty: bool, + finalize: bool, + parent_hash: Option, + ) -> JsonRpcResult> { + let mut sink = self.import_block_channel.clone(); + let (sender, receiver) = oneshot::channel(); + // NOTE: this sends a Result over the channel. + let command = EngineCommand::SealNewBlock { + create_empty, + finalize, + parent_hash, + sender: Some(sender), + }; + + sink.send(command).await?; + + match receiver.await { + Ok(Ok(rx)) => Ok(rx), + Ok(Err(e)) => Err(e.into()), + Err(e) => Err(JsonRpseeError::to_call_error(e)), + } + } + + async fn finalize_block( + &self, + hash: Hash, + justification: Option, + ) -> JsonRpcResult { + let mut sink = self.import_block_channel.clone(); + let (sender, receiver) = oneshot::channel(); + let command = EngineCommand::FinalizeBlock { hash, sender: Some(sender), justification }; + sink.send(command).await?; + receiver.await.map(|_| true).map_err(|e| JsonRpseeError::to_call_error(e)) } } diff --git a/client/finality-grandpa/rpc/src/lib.rs b/client/finality-grandpa/rpc/src/lib.rs index 5bbf46198de24..6d5621f846d8b 100644 --- a/client/finality-grandpa/rpc/src/lib.rs +++ b/client/finality-grandpa/rpc/src/lib.rs @@ -24,8 +24,9 @@ use log::warn; use std::sync::Arc; use jsonrpsee::{ - types::error::{CallError, Error as JsonRpseeError}, - RpcModule, SubscriptionSink, + proc_macros::rpc, + types::{async_trait, error::Error as JsonRpseeError, JsonRpcResult}, + SubscriptionSink, }; mod error; @@ -37,10 +38,33 @@ use sc_finality_grandpa::GrandpaJustificationStream; use sc_rpc::SubscriptionTaskExecutor; use sp_runtime::traits::{Block as BlockT, NumberFor}; -use finality::RpcFinalityProofProvider; +use finality::{EncodedFinalityProof, RpcFinalityProofProvider}; use notification::JustificationNotification; use report::{ReportAuthoritySet, ReportVoterState, ReportedRoundStates}; +/// Provides RPC methods for interacting with GRANDPA. +#[rpc(client, server, namespace = "grandpa")] +pub trait GrandpaApi { + /// Returns the state of the current best round state as well as the + /// ongoing background rounds. + #[method(name = "roundState")] + async fn round_state(&self) -> JsonRpcResult; + + /// Returns the block most recently finalized by Grandpa, alongside + /// side its justification. + #[subscription( + name = "subscribeJustifications" + aliases = "grandpa_justifications" + item = Notification + )] + fn subscribe_justifications(&self); + + /// Prove finality for the given block number by returning the Justification for the last block + /// in the set and all the intermediary headers to link them together. + #[method(name = "proveFinality")] + async fn prove_finality(&self, block: Number) -> JsonRpcResult>; +} + /// Provides RPC methods for interacting with GRANDPA. pub struct GrandpaRpc { executor: Arc, @@ -49,14 +73,8 @@ pub struct GrandpaRpc { justification_stream: GrandpaJustificationStream, finality_proof_provider: Arc, } - -impl +impl GrandpaRpc -where - VoterState: ReportVoterState + Send + Sync + 'static, - AuthoritySet: ReportAuthoritySet + Send + Sync + 'static, - Block: BlockT, - ProofProvider: RpcFinalityProofProvider + Send + Sync + 'static, { /// Prepare a new [`GrandpaApi`] pub fn new( @@ -68,69 +86,59 @@ where ) -> Self { Self { executor, authority_set, voter_state, justification_stream, finality_proof_provider } } +} + +#[async_trait] +impl + GrandpaApiServer> + for GrandpaRpc +where + VoterState: ReportVoterState + Send + Sync + 'static, + AuthoritySet: ReportAuthoritySet + Send + Sync + 'static, + Block: BlockT, + ProofProvider: RpcFinalityProofProvider + Send + Sync + 'static, +{ + async fn round_state(&self) -> JsonRpcResult { + ReportedRoundStates::from(&self.authority_set, &self.voter_state) + .map_err(|e| JsonRpseeError::to_call_error(e)) + } - /// Convert this [`GrandpaApi`] to an [`RpcModule`]. - pub fn into_rpc_module(self) -> Result, JsonRpseeError> { - let mut module = RpcModule::new(self); - - // Returns the state of the current best round state as well as the - // ongoing background rounds. - module.register_method("grandpa_roundState", |_params, grandpa| { - ReportedRoundStates::from(&grandpa.authority_set, &grandpa.voter_state) - .map_err(to_jsonrpsee_call_error) - })?; - - // Prove finality for the given block number by returning the [`Justification`] for the last - // block in the set and all the intermediary headers to link them together. - module.register_method("grandpa_proveFinality", |params, grandpa| { - let block: NumberFor = params.one()?; - grandpa - .finality_proof_provider - .rpc_prove_finality(block) - .map_err(|finality_err| error::Error::ProveFinalityFailed(finality_err)) - .map_err(to_jsonrpsee_call_error) - })?; - - // Returns the block most recently finalized by Grandpa, alongside its justification. - module.register_subscription( - "grandpa_subscribeJustifications", - "grandpa_unsubscribeJustifications", - |_params, mut sink: SubscriptionSink, ctx: Arc>| { - let stream = ctx.justification_stream.subscribe().map( - |x: sc_finality_grandpa::GrandpaJustification| { - JustificationNotification::from(x) - }, - ); - - fn log_err(err: JsonRpseeError) -> bool { - log::error!( - "Could not send data to grandpa_justifications subscription. Error: {:?}", - err - ); - false - } - - let fut = async move { - stream - .take_while(|justification| { - future::ready(sink.send(justification).map_or_else(log_err, |_| true)) - }) - .for_each(|_| future::ready(())) - .await; - } - .boxed(); - ctx.executor.execute(fut); - Ok(()) + fn subscribe_justifications(&self, mut sink: SubscriptionSink) { + let stream = self.justification_stream.subscribe().map( + |x: sc_finality_grandpa::GrandpaJustification| { + JustificationNotification::from(x) }, - )?; + ); + + fn log_err(err: JsonRpseeError) -> bool { + log::error!( + "Could not send data to grandpa_justifications subscription. Error: {:?}", + err + ); + false + } - Ok(module) + let fut = async move { + stream + .take_while(|justification| { + future::ready(sink.send(justification).map_or_else(log_err, |_| true)) + }) + .for_each(|_| future::ready(())) + .await; + } + .boxed(); + self.executor.execute(fut); } -} -// TODO: (dp) make available to other code? -fn to_jsonrpsee_call_error(err: error::Error) -> CallError { - CallError::Failed(Box::new(err)) + async fn prove_finality( + &self, + block: NumberFor, + ) -> JsonRpcResult> { + self.finality_proof_provider + .rpc_prove_finality(block) + .map_err(|finality_err| error::Error::ProveFinalityFailed(finality_err)) + .map_err(|e| JsonRpseeError::to_call_error(e)) + } } #[cfg(test)] diff --git a/client/rpc-api/Cargo.toml b/client/rpc-api/Cargo.toml index 05286b7f14b58..3de0a93f50cb6 100644 --- a/client/rpc-api/Cargo.toml +++ b/client/rpc-api/Cargo.toml @@ -25,7 +25,7 @@ sc-chain-spec = { path = "../chain-spec", version = "4.0.0-dev" } serde = { version = "1.0.126", features = ["derive"] } serde_json = "1.0.41" -jsonrpsee = { git = "https://github.com/paritytech/jsonrpsee", branch = "master", features = ["server"] } +jsonrpsee = { git = "https://github.com/paritytech/jsonrpsee", branch = "master", features = ["full"] } sc-transaction-pool-api = { version = "4.0.0-dev", path = "../transaction-pool/api" } sp-rpc = { version = "4.0.0-dev", path = "../../primitives/rpc" } diff --git a/client/rpc-api/src/author/error.rs b/client/rpc-api/src/author/error.rs index 4eb84a730fa8a..f37a51ef05a12 100644 --- a/client/rpc-api/src/author/error.rs +++ b/client/rpc-api/src/author/error.rs @@ -18,7 +18,10 @@ //! Authoring RPC module errors. -use jsonrpsee::types::{error::CallError, to_json_raw_value, JsonRawValue}; +use jsonrpsee::types::{ + error::{CallError, Error as JsonRpseeError}, + to_json_raw_value, JsonRawValue, +}; use sp_runtime::transaction_validity::InvalidTransaction; /// Author RPC Result type. @@ -99,81 +102,81 @@ const UNSUPPORTED_KEY_TYPE: i32 = POOL_INVALID_TX + 7; /// it is not propagable and the local node does not author blocks. const POOL_UNACTIONABLE: i32 = POOL_INVALID_TX + 8; -impl From for CallError { +impl From for JsonRpseeError { fn from(e: Error) -> Self { use sc_transaction_pool_api::error::Error as PoolError; match e { - Error::BadFormat(e) => Self::Custom { + Error::BadFormat(e) => CallError::Custom { code: BAD_FORMAT, message: format!("Extrinsic has invalid format: {}", e).into(), data: None, - }, - Error::Verification(e) => Self::Custom { + }.into(), + Error::Verification(e) => CallError::Custom { code: VERIFICATION_ERROR, message: format!("Verification Error: {}", e).into(), data: JsonRawValue::from_string(format!("\"{:?}\"", e)).ok(), - }, - Error::Pool(PoolError::InvalidTransaction(InvalidTransaction::Custom(e))) => Self::Custom { + }.into(), + Error::Pool(PoolError::InvalidTransaction(InvalidTransaction::Custom(e))) => CallError::Custom { code: POOL_INVALID_TX, message: "Invalid Transaction".into(), data: JsonRawValue::from_string(format!("\"Custom error: {}\"", e)).ok(), - }, + }.into(), Error::Pool(PoolError::InvalidTransaction(e)) => { - Self::Custom { + CallError::Custom { code: POOL_INVALID_TX, message: "Invalid Transaction".into(), data: to_json_raw_value(&e).ok(), } - }, - Error::Pool(PoolError::UnknownTransaction(e)) => Self::Custom { + }.into(), + Error::Pool(PoolError::UnknownTransaction(e)) => CallError::Custom { code: POOL_UNKNOWN_VALIDITY, message: "Unknown Transaction Validity".into(), data: to_json_raw_value(&e).ok(), - }, - Error::Pool(PoolError::TemporarilyBanned) => Self::Custom { + }.into(), + Error::Pool(PoolError::TemporarilyBanned) => CallError::Custom { code: (POOL_TEMPORARILY_BANNED), message: "Transaction is temporarily banned".into(), data: None, - }, - Error::Pool(PoolError::AlreadyImported(hash)) => Self::Custom { + }.into(), + Error::Pool(PoolError::AlreadyImported(hash)) => CallError::Custom { code: (POOL_ALREADY_IMPORTED), message: "Transaction Already Imported".into(), data: JsonRawValue::from_string(format!("\"{:?}\"", hash)).ok(), - }, - Error::Pool(PoolError::TooLowPriority { old, new }) => Self::Custom { + }.into(), + Error::Pool(PoolError::TooLowPriority { old, new }) => CallError::Custom { code: (POOL_TOO_LOW_PRIORITY), message: format!("Priority is too low: ({} vs {})", old, new), data: to_json_raw_value(&"The transaction has too low priority to replace another transaction already in the pool.").ok(), - }, - Error::Pool(PoolError::CycleDetected) => Self::Custom { + }.into(), + Error::Pool(PoolError::CycleDetected) => CallError::Custom { code: (POOL_CYCLE_DETECTED), message: "Cycle Detected".into(), data: None, - }, - Error::Pool(PoolError::ImmediatelyDropped) => Self::Custom { + }.into(), + Error::Pool(PoolError::ImmediatelyDropped) => CallError::Custom { code: (POOL_IMMEDIATELY_DROPPED), message: "Immediately Dropped".into(), data: to_json_raw_value(&"The transaction couldn't enter the pool because of the limit").ok(), - }, - Error::Pool(PoolError::Unactionable) => Self::Custom { + }.into(), + Error::Pool(PoolError::Unactionable) => CallError::Custom { code: (POOL_UNACTIONABLE), message: "Unactionable".into(), data: to_json_raw_value( &"The transaction is unactionable since it is not propagable and \ the local node does not author blocks" ).ok(), - }, - Error::UnsupportedKeyType => Self::Custom { + }.into(), + Error::UnsupportedKeyType => CallError::Custom { code: UNSUPPORTED_KEY_TYPE, message: "Unknown key type crypto" .into(), data: to_json_raw_value( &"The crypto for the given key type is unknown, please add the public key to the \ request to insert the key successfully." ).ok(), - }, + }.into(), Error::UnsafeRpcCalled(e) => e.into(), - e => Self::Failed(Box::new(e)), + e => e.into(), } } } diff --git a/client/rpc-api/src/author/mod.rs b/client/rpc-api/src/author/mod.rs index 37bbda978193a..a94cf6ccd8f49 100644 --- a/client/rpc-api/src/author/mod.rs +++ b/client/rpc-api/src/author/mod.rs @@ -18,5 +18,62 @@ //! Substrate block-author/full-node API. +use jsonrpsee::{proc_macros::rpc, types::JsonRpcResult}; +use sc_transaction_pool_api::TransactionStatus; +use sp_core::Bytes; + pub mod error; pub mod hash; + +/// Substrate authoring RPC API +#[rpc(client, server, namespace = "author")] +pub trait AuthorApi { + /// Submit hex-encoded extrinsic for inclusion in block. + #[method(name = "submitExtrinsic")] + async fn submit_extrinsic(&self, extrinsic: Bytes) -> JsonRpcResult; + + /// Insert a key into the keystore. + #[method(name = "insertKey")] + fn insert_key(&self, key_type: String, suri: String, public: Bytes) -> JsonRpcResult<()>; + + /// Generate new session keys and returns the corresponding public keys. + #[method(name = "rotateKeys")] + fn rotate_keys(&self) -> JsonRpcResult; + + /// Checks if the keystore has private keys for the given session public keys. + /// + /// `session_keys` is the SCALE encoded session keys object from the runtime. + /// + /// Returns `true` iff all private keys could be found. + #[method(name = "hasSessionKeys")] + fn has_session_keys(&self, session_keys: Bytes) -> JsonRpcResult; + + /// Checks if the keystore has private keys for the given public key and key type. + /// + /// Returns `true` if a private key could be found. + #[method(name = "hasKey")] + fn has_key(&self, public_key: Bytes, key_type: String) -> JsonRpcResult; + + /// Returns all pending extrinsics, potentially grouped by sender. + #[method(name = "pendingExtrinsics")] + fn pending_extrinsics(&self) -> JsonRpcResult>; + + /// Remove given extrinsic from the pool and temporarily ban it to prevent reimporting. + #[method(name = "removeExtrinsic")] + fn remove_extrinsic( + &self, + bytes_or_hash: Vec>, + ) -> JsonRpcResult>; + + /// Submit an extrinsic to watch. + /// + /// See [`TransactionStatus`](sc_transaction_pool_api::TransactionStatus) for details on + /// transaction life cycle. + #[subscription( + name = "submitAndWatchExtrinsic", + aliases = "author_extrinsicUpdate", + unsubscribe_aliases = "author_unwatchExtrinsic", + item = TransactionStatus + )] + fn watch_extrinsic(&self, bytes: Bytes); +} diff --git a/client/rpc-api/src/chain/error.rs b/client/rpc-api/src/chain/error.rs index 1b01228497516..f35261446b665 100644 --- a/client/rpc-api/src/chain/error.rs +++ b/client/rpc-api/src/chain/error.rs @@ -18,7 +18,7 @@ //! Error helpers for Chain RPC module. -use jsonrpsee::types::error::CallError; +use jsonrpsee::types::error::{CallError, Error as JsonRpseeError}; /// Chain RPC Result type. pub type Result = std::result::Result; @@ -45,20 +45,12 @@ impl std::error::Error for Error { /// Base error code for all chain errors. const BASE_ERROR: i32 = 3000; -impl From for CallError { +impl From for JsonRpseeError { fn from(e: Error) -> Self { match e { - Error::Other(message) => Self::Custom { code: BASE_ERROR + 1, message, data: None }, - e => Self::Failed(Box::new(e)), - } - } -} - -impl From for jsonrpsee::types::Error { - fn from(e: Error) -> Self { - match e { - Error::Other(msg) => Self::Custom(msg), - Error::Client(e) => Self::Custom(e.to_string()), + Error::Other(message) => + CallError::Custom { code: BASE_ERROR + 1, message, data: None }.into(), + e => e.into(), } } } diff --git a/client/rpc-api/src/chain/mod.rs b/client/rpc-api/src/chain/mod.rs index 1364896b0aa0e..8ab7b73baf973 100644 --- a/client/rpc-api/src/chain/mod.rs +++ b/client/rpc-api/src/chain/mod.rs @@ -18,4 +18,58 @@ //! Substrate blockchain API. +use jsonrpsee::{proc_macros::rpc, types::JsonRpcResult}; +use sp_rpc::{list::ListOrValue, number::NumberOrHex}; + pub mod error; + +#[rpc(client, server, namespace = "chain")] +pub trait ChainApi { + /// Get header. + #[method(name = "getHeader")] + async fn header(&self, hash: Option) -> JsonRpcResult>; + + /// Get header and body of a relay chain block. + #[method(name = "getBlock")] + async fn block(&self, hash: Option) -> JsonRpcResult>; + + /// Get hash of the n-th block in the canon chain. + /// + /// By default returns latest block hash. + #[method(name = "getBlockHash", aliases = "chain_getHead")] + fn block_hash( + &self, + hash: Option>, + ) -> JsonRpcResult>>; + + /// Get hash of the last finalized block in the canon chain. + #[method(name = "getFinalizedHead", aliases = "chain_getFinalisedHead")] + fn finalized_head(&self) -> JsonRpcResult; + + /// All head subscription. + #[subscription( + name = "allHead", + aliases = "chain_subscribeAllHeads", + unsubscribe_aliases = "chain_unsubscribeAllHeads", + item = Header + )] + fn subscribe_all_heads(&self); + + /// New head subscription. + #[subscription( + name = "newHead", + aliases = "subscribe_newHead, chain_subscribeNewHead, chain_subscribeNewHeads", + unsubscribe_aliases = "chain_unsubscribeNewHead, chain_unsubscribeNewHeads", + item = Header + )] + fn subscribe_new_heads(&self); + + /// Finalized head subscription. + #[subscription( + name = "finalizedHead", + aliases = "chain_subscribeFinalisedHeads, chain_subscribeFinalizedHeads", + unsubscribe_aliases = "chain_unsubscribeFinalizedHeads, chain_unsubscribeFinalisedHeads", + item = Header + )] + fn subscribe_finalized_heads(&self); +} diff --git a/client/rpc-api/src/child_state/mod.rs b/client/rpc-api/src/child_state/mod.rs index e88d24e0337db..76c6b593b6578 100644 --- a/client/rpc-api/src/child_state/mod.rs +++ b/client/rpc-api/src/child_state/mod.rs @@ -16,4 +16,72 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! Substrate state API. +use crate::state::ReadProof; +use jsonrpsee::{proc_macros::rpc, types::JsonRpcResult}; +use sp_core::storage::{PrefixedStorageKey, StorageData, StorageKey}; + +/// Substrate child state API +/// +/// Note that all `PrefixedStorageKey` are deserialized +/// from json and not guaranteed valid. +#[rpc(client, server, namespace = "childstate")] +pub trait ChildStateApi { + /// DEPRECATED: Please use `getKeysPaged` with proper paging support. + /// Returns the keys with prefix from a child storage, leave empty to get all the keys + #[method(name = "getKeys")] + async fn storage_keys( + &self, + child_storage_key: PrefixedStorageKey, + prefix: StorageKey, + hash: Option, + ) -> JsonRpcResult>; + + /// Returns the keys with prefix from a child storage with pagination support. + /// Up to `count` keys will be returned. + /// If `start_key` is passed, return next keys in storage in lexicographic order. + #[method(name = "getKeysPaged", aliases = "getKeysPagedAt")] + async fn storage_keys_paged( + &self, + child_storage_key: PrefixedStorageKey, + prefix: Option, + count: u32, + start_key: Option, + hash: Option, + ) -> JsonRpcResult>; + + /// Returns a child storage entry at a specific block's state. + #[method(name = "getStorage")] + async fn storage( + &self, + child_storage_key: PrefixedStorageKey, + key: StorageKey, + hash: Option, + ) -> JsonRpcResult>; + + /// Returns the hash of a child storage entry at a block's state. + #[method(name = "getStorageHash")] + async fn storage_hash( + &self, + child_storage_key: PrefixedStorageKey, + key: StorageKey, + hash: Option, + ) -> JsonRpcResult>; + + /// Returns the size of a child storage entry at a block's state. + #[method(name = "getStorageSize")] + async fn storage_size( + &self, + child_storage_key: PrefixedStorageKey, + key: StorageKey, + hash: Option, + ) -> JsonRpcResult>; + + /// Returns proof of storage for child key entries at a specific block's state. + #[method(name = "state_getChildReadProof")] + async fn read_child_proof( + &self, + child_storage_key: PrefixedStorageKey, + keys: Vec, + hash: Option, + ) -> JsonRpcResult>; +} diff --git a/client/rpc-api/src/offchain/mod.rs b/client/rpc-api/src/offchain/mod.rs index 646268e23e906..9069583f4cfcb 100644 --- a/client/rpc-api/src/offchain/mod.rs +++ b/client/rpc-api/src/offchain/mod.rs @@ -18,4 +18,19 @@ //! Substrate offchain API. +use jsonrpsee::{proc_macros::rpc, types::JsonRpcResult}; +use sp_core::{offchain::StorageKind, Bytes}; + pub mod error; + +/// Substrate offchain RPC API +#[rpc(client, server, namespace = "offchain")] +pub trait OffchainApi { + /// Set offchain local storage under given key and prefix. + #[method(name = "localStorageSet")] + fn set_local_storage(&self, kind: StorageKind, key: Bytes, value: Bytes) -> JsonRpcResult<()>; + + /// Get offchain local storage under given key and prefix. + #[method(name = "localStorageGet")] + fn get_local_storage(&self, kind: StorageKind, key: Bytes) -> JsonRpcResult>; +} diff --git a/client/rpc-api/src/policy.rs b/client/rpc-api/src/policy.rs index 628651f93e450..c0a21ac2eddcb 100644 --- a/client/rpc-api/src/policy.rs +++ b/client/rpc-api/src/policy.rs @@ -21,7 +21,7 @@ //! Contains a `DenyUnsafe` type that can be used to deny potentially unsafe //! RPC when accessed externally. -use jsonrpsee::types::error::CallError; +use jsonrpsee::types::error::{CallError, Error as JsonRpseeError}; /// Signifies whether a potentially unsafe RPC should be denied. #[derive(Clone, Copy, Debug)] @@ -57,6 +57,12 @@ impl std::error::Error for UnsafeRpcError {} impl From for CallError { fn from(e: UnsafeRpcError) -> CallError { - CallError::Failed(Box::new(e)) + CallError::from_std_error(e) + } +} + +impl From for JsonRpseeError { + fn from(e: UnsafeRpcError) -> JsonRpseeError { + JsonRpseeError::to_call_error(e) } } diff --git a/client/rpc-api/src/state/error.rs b/client/rpc-api/src/state/error.rs index 8e824ea41e963..e70019db3c3a5 100644 --- a/client/rpc-api/src/state/error.rs +++ b/client/rpc-api/src/state/error.rs @@ -63,27 +63,16 @@ impl std::error::Error for Error { /// Base code for all state errors. const BASE_ERROR: i32 = 4000; -impl From for CallError { +impl From for JsonRpseeError { fn from(e: Error) -> Self { match e { Error::InvalidBlockRange { .. } => - Self::Custom { code: BASE_ERROR + 1, message: e.to_string(), data: None }, + CallError::Custom { code: BASE_ERROR + 1, message: e.to_string(), data: None } + .into(), Error::InvalidCount { .. } => - Self::Custom { code: BASE_ERROR + 2, message: e.to_string(), data: None }, - e => Self::Failed(Box::new(e)), + CallError::Custom { code: BASE_ERROR + 2, message: e.to_string(), data: None } + .into(), + e => e.into(), } } } - -/// TODO(niklasad1): better errors -impl From for JsonRpseeError { - fn from(e: Error) -> Self { - Self::Custom(e.to_string()) - } -} - -impl From for Error { - fn from(e: JsonRpseeError) -> Self { - Self::Client(Box::new(e)) - } -} diff --git a/client/rpc-api/src/state/mod.rs b/client/rpc-api/src/state/mod.rs index 6f22488664fa7..aed8aeeddd6e2 100644 --- a/client/rpc-api/src/state/mod.rs +++ b/client/rpc-api/src/state/mod.rs @@ -18,7 +18,288 @@ //! Substrate state API. +use jsonrpsee::{proc_macros::rpc, types::JsonRpcResult}; +use sp_core::{ + storage::{StorageChangeSet, StorageData, StorageKey}, + Bytes, +}; +use sp_version::RuntimeVersion; + pub mod error; pub mod helpers; pub use self::helpers::ReadProof; + +/// Substrate state API +#[rpc(client, server, namespace = "state")] +pub trait StateApi { + /// Call a contract at a block's state. + #[method(name = "call", aliases = "state_callAt")] + async fn call(&self, name: String, bytes: Bytes, hash: Option) -> JsonRpcResult; + + /// DEPRECATED: Please use `getKeysPaged` with proper paging support. + /// Returns the keys with prefix, leave empty to get all the keys. + #[method(name = "getKeys")] + async fn storage_keys( + &self, + prefix: StorageKey, + hash: Option, + ) -> JsonRpcResult>; + + /// Returns the keys with prefix, leave empty to get all the keys + #[method(name = "getPairs")] + async fn storage_pairs( + &self, + prefix: StorageKey, + hash: Option, + ) -> JsonRpcResult>; + + /// Returns the keys with prefix with pagination support. + /// Up to `count` keys will be returned. + /// If `start_key` is passed, return next keys in storage in lexicographic order. + #[method(name = "getKeysPaged", aliases = "state_getKeysPagedAt")] + async fn storage_keys_paged( + &self, + prefix: Option, + count: u32, + start_key: Option, + hash: Option, + ) -> JsonRpcResult>; + + /// Returns a storage entry at a specific block's state. + #[method(name = "getStorage", aliases = "state_getStorageAt")] + async fn storage( + &self, + key: StorageKey, + hash: Option, + ) -> JsonRpcResult>; + + /// Returns the hash of a storage entry at a block's state. + #[method(name = "getStorageHash", aliases = "state_getStorageHashAt")] + async fn storage_hash( + &self, + key: StorageKey, + hash: Option, + ) -> JsonRpcResult>; + + /// Returns the size of a storage entry at a block's state. + #[method(name = "getStorageSize", aliases = "state_getStorageSizeAt")] + async fn storage_size(&self, key: StorageKey, hash: Option) + -> JsonRpcResult>; + + /// Returns the runtime metadata as an opaque blob. + #[method(name = "getMetadata")] + async fn metadata(&self, hash: Option) -> JsonRpcResult; + + /// Get the runtime version. + #[method(name = "getRuntimeVersion", aliases = "chain_getRuntimeVersion")] + async fn runtime_version(&self, hash: Option) -> JsonRpcResult; + + /// Query historical storage entries (by key) starting from a block given as the second + /// parameter. + /// + /// NOTE This first returned result contains the initial state of storage for all keys. + /// Subsequent values in the vector represent changes to the previous state (diffs). + #[method(name = "queryStorage")] + async fn query_storage( + &self, + keys: Vec, + block: Hash, + hash: Option, + ) -> JsonRpcResult>>; + + /// Query storage entries (by key) starting at block hash given as the second parameter. + #[method(name = "queryStorageAt")] + async fn query_storage_at( + &self, + keys: Vec, + at: Option, + ) -> JsonRpcResult>>; + + /// Returns proof of storage entries at a specific block's state. + #[method(name = "getReadProof")] + async fn read_proof( + &self, + keys: Vec, + hash: Option, + ) -> JsonRpcResult>; + + /// New runtime version subscription + #[subscription( + name = "runtimeVersion", + aliases = "state_subscribeRuntimeVersion, chain_subscribeRuntimeVersion", + unsubscribe_aliases = "state_unsubscribeRuntimeVersion, chain_unsubscribeRuntimeVersion", + item = RuntimeVersion, + )] + fn subscribe_runtime_version(&self); + + /// New storage subscription + #[subscription( + name = "storage", + aliases = "state_subscribeStorage", + unsubscribe_aliases = "state_unsubscribeStorage", + item = StorageChangeSet, + )] + fn subscribe_storage(&self, keys: Option>); + + /// The `traceBlock` RPC provides a way to trace the re-execution of a single + /// block, collecting Spans and Events from both the client and the relevant WASM runtime. + /// The Spans and Events are conceptually equivalent to those from the [Tracing][1] crate. + /// + /// The structure of the traces follows that of the block execution pipeline, so meaningful + /// interpretation of the traces requires an understanding of the Substrate chain's block + /// execution. + /// + /// [Link to conceptual map of trace structure for Polkadot and Kusama block execution.][2] + /// + /// [1]: https://crates.io/crates/tracing + /// [2]: https://docs.google.com/drawings/d/1vZoJo9jaXlz0LmrdTOgHck9_1LsfuQPRmTr-5g1tOis/edit?usp=sharing + /// + /// ## Node requirements + /// + /// - Fully synced archive node (i.e. a node that is not actively doing a "major" sync). + /// - [Tracing enabled WASM runtimes](#creating-tracing-enabled-wasm-runtimes) for all runtime + /// versions + /// for which tracing is desired. + /// + /// ## Node recommendations + /// + /// - Use fast SSD disk storage. + /// - Run node flags to increase DB read speed (i.e. `--state-cache-size`, `--db-cache`). + /// + /// ## Creating tracing enabled WASM runtimes + /// + /// - Checkout commit of chain version to compile with WASM traces + /// - [diener][1] can help to peg commit of substrate to what the chain expects. + /// - Navigate to the `runtime` folder/package of the chain + /// - Add feature `with-tracing = ["frame-executive/with-tracing", "sp-io/with-tracing"]` + /// under `[features]` to the `runtime` packages' `Cargo.toml`. + /// - Compile the runtime with `cargo build --release --features with-tracing` + /// - Tracing-enabled WASM runtime should be found in + /// `./target/release/wbuild/{{chain}}-runtime` + /// and be called something like `{{your_chain}}_runtime.compact.wasm`. This can be + /// renamed/modified however you like, as long as it retains the `.wasm` extension. + /// - Run the node with the wasm blob overrides by placing them in a folder with all your + /// runtimes, + /// and passing the path of this folder to your chain, e.g.: + /// - `./target/release/polkadot --wasm-runtime-overrides /home/user/my-custom-wasm-runtimes` + /// + /// You can also find some pre-built tracing enabled wasm runtimes in [substrate-archive][2] + /// + /// [Source.][3] + /// + /// [1]: https://crates.io/crates/diener + /// [2]: https://github.com/paritytech/substrate-archive/tree/master/wasm-tracing + /// [3]: https://github.com/paritytech/substrate-archive/wiki + /// + /// ## RPC Usage + /// + /// The RPC allows for two filtering mechanisms: tracing targets and storage key prefixes. + /// The filtering of spans and events takes place after they are all collected; so while filters + /// do not reduce time for actual block re-execution, they reduce the response payload size. + /// + /// Note: storage events primarily come from _primitives/state-machine/src/ext.rs_. + /// The default filters can be overridden, see the [params section](#params) for details. + /// + /// ### `curl` example + /// + /// - Get tracing spans and events + /// ```text + /// curl \ + /// -H "Content-Type: application/json" \ + /// -d '{"id":1, "jsonrpc":"2.0", "method": "traceBlock", \ + /// "params": ["0xb246acf1adea1f801ce15c77a5fa7d8f2eb8fed466978bcee172cc02cf64e264", "pallet,frame,state", "", ""]}' \ + /// http://localhost:9933/ + /// ``` + /// + /// - Get tracing events with all `storage_keys` + /// ```text + /// curl \ + /// -H "Content-Type: application/json" \ + /// -d '{"id":1, "jsonrpc":"2.0", "method": "traceBlock", \ + /// "params": ["0xb246acf1adea1f801ce15c77a5fa7d8f2eb8fed466978bcee172cc02cf64e264", "state", "", ""]}' \ + /// http://localhost:9933/ + /// ``` + /// + /// - Get tracing events with `storage_keys` ('f0c365c3cf59d671eb72da0e7a4113c4') + /// ```text + /// curl \ + /// -H "Content-Type: application/json" \ + /// -d '{"id":1, "jsonrpc":"2.0", "method": "traceBlock", \ + /// "params": ["0xb246acf1adea1f801ce15c77a5fa7d8f2eb8fed466978bcee172cc02cf64e264", "state", "f0c365c3cf59d671eb72da0e7a4113c4", ""]}' \ + /// http://localhost:9933/ + /// ``` + /// + /// - Get tracing events with `storage_keys` ('f0c365c3cf59d671eb72da0e7a4113c4') and method + /// ('Put') + /// ```text + /// curl \ + /// -H "Content-Type: application/json" \ + /// -d '{"id":1, "jsonrpc":"2.0", "method": "traceBlock", \ + /// "params": ["0xb246acf1adea1f801ce15c77a5fa7d8f2eb8fed466978bcee172cc02cf64e264", "state", "f0c365c3cf59d671eb72da0e7a4113c4", "Put"]}' \ + /// http://localhost:9933/ + /// ``` + /// + /// - Get tracing events with all `storage_keys` and method ('Put') + /// ```text + /// curl \ + /// -H "Content-Type: application/json" \ + /// -d '{"id":1, "jsonrpc":"2.0", "method": "traceBlock", \ + /// "params": ["0xb246acf1adea1f801ce15c77a5fa7d8f2eb8fed466978bcee172cc02cf64e264", "state", "", "Put"]}' \ + /// http://localhost:9933/ + /// ``` + /// + /// ### Params + /// + /// - `block` (param index 0): Hash of the block to trace. + /// - `targets` (param index 1): String of comma separated (no spaces) targets. Specified + /// targets match with trace targets by prefix (i.e if a target is in the beginning + /// of a trace target it is considered a match). If an empty string is specified no + /// targets will be filtered out. The majority of targets correspond to Rust module names, + /// and the ones that do not are typically "hardcoded" into span or event location + /// somewhere in the Substrate source code. ("Non-hardcoded" targets typically come from frame + /// support macros.) + /// - `storage_keys` (param index 2): String of comma separated (no spaces) hex encoded + /// (no `0x` prefix) storage keys. If an empty string is specified no events will + /// be filtered out. If anything other than an empty string is specified, events + /// will be filtered by storage key (so non-storage events will **not** show up). + /// You can specify any length of a storage key prefix (i.e. if a specified storage + /// key is in the beginning of an events storage key it is considered a match). + /// Example: for balance tracking on Polkadot & Kusama you would likely want + /// to track changes to account balances with the frame_system::Account storage item, + /// which is a map from `AccountId` to `AccountInfo`. The key filter for this would be + /// the storage prefix for the map: + /// `26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da9` + /// - `methods` (param index 3): String of comma separated (no spaces) tracing event method. + /// If an empty string is specified no events will be filtered out. If anything other than + /// an empty string is specified, events will be filtered by method (so non-method events will + /// **not** show up). + /// + /// Additionally you would want to track the extrinsic index, which is under the + /// `:extrinsic_index` key. The key for this would be the aforementioned string as bytes + /// in hex: `3a65787472696e7369635f696e646578`. + /// The following are some resources to learn more about storage keys in substrate: + /// [substrate storage][1], [transparent keys in substrate][2], + /// [querying substrate storage via rpc][3]. + /// + /// [1]: https://substrate.dev/docs/en/knowledgebase/advanced/storage#storage-map-key + /// [2]: https://www.shawntabrizi.com/substrate/transparent-keys-in-substrate/ + /// [3]: https://www.shawntabrizi.com/substrate/querying-substrate-storage-via-rpc/ + /// + /// ### Maximum payload size + /// + /// The maximum payload size allowed is 15mb. Payloads over this size will return a + /// object with a simple error message. If you run into issues with payload size you can + /// narrow down the traces using a smaller set of targets and/or storage keys. + /// + /// If you are having issues with maximum payload size you can use the flag + /// `-ltracing=trace` to get some logging during tracing. + #[method(name = "traceBlock")] + async fn trace_block( + &self, + block: Hash, + targets: Option, + storage_keys: Option, + methods: Option, + ) -> JsonRpcResult; +} diff --git a/client/rpc-api/src/system/mod.rs b/client/rpc-api/src/system/mod.rs index 70a80291d9aba..101452e83c5d5 100644 --- a/client/rpc-api/src/system/mod.rs +++ b/client/rpc-api/src/system/mod.rs @@ -18,5 +18,106 @@ //! Substrate system API. +use jsonrpsee::{ + proc_macros::rpc, + types::{JsonRpcResult, JsonValue}, +}; + +pub use self::helpers::{Health, NodeRole, PeerInfo, SyncState, SystemInfo}; + pub mod error; pub mod helpers; + +/// Substrate system RPC API +#[rpc(client, server, namespace = "system")] +pub trait SystemApi { + /// Get the node's implementation name. Plain old string. + #[method(name = "name")] + fn system_name(&self) -> JsonRpcResult; + + /// Get the node implementation's version. Should be a semver string. + #[method(name = "version")] + fn system_version(&self) -> JsonRpcResult; + + /// Get the chain's name. Given as a string identifier. + #[method(name = "chain")] + fn system_chain(&self) -> JsonRpcResult; + + /// Get the chain's type. + #[method(name = "chainType")] + fn system_type(&self) -> JsonRpcResult; + + /// Get a custom set of properties as a JSON object, defined in the chain spec. + #[method(name = "properties")] + fn system_properties(&self) -> JsonRpcResult; + + /// Return health status of the node. + /// + /// Node is considered healthy if it is: + /// - connected to some peers (unless running in dev mode) + /// - not performing a major sync + #[method(name = "health")] + async fn system_health(&self) -> JsonRpcResult; + + /// Returns the base58-encoded PeerId of the node. + #[method(name = "localPeerId")] + async fn system_local_peer_id(&self) -> JsonRpcResult; + + /// Returns the multi-addresses that the local node is listening on + /// + /// The addresses include a trailing `/p2p/` with the local PeerId, and are thus suitable to + /// be passed to `addReservedPeer` or as a bootnode address for example. + #[method(name = "localListenAddresses")] + async fn system_local_listen_addresses(&self) -> JsonRpcResult>; + + /// Returns currently connected peers + #[method(name = "peers")] + async fn system_peers(&self) -> JsonRpcResult>>; + + /// Returns current state of the network. + /// + /// **Warning**: This API is not stable. Please do not programmatically interpret its output, + /// as its format might change at any time. + // TODO: the future of this call is uncertain: https://github.com/paritytech/substrate/issues/1890 + // https://github.com/paritytech/substrate/issues/5541 + #[method(name = "unstable_networkState")] + async fn system_network_state(&self) -> JsonRpcResult; + + /// Adds a reserved peer. Returns the empty string or an error. The string + /// parameter should encode a `p2p` multiaddr. + /// + /// `/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV` + /// is an example of a valid, passing multiaddr with PeerId attached. + #[method(name = "addReservedPeer")] + async fn system_add_reserved_peer(&self, peer: String) -> JsonRpcResult<()>; + + /// Remove a reserved peer. Returns the empty string or an error. The string + /// should encode only the PeerId e.g. `QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV`. + #[method(name = "removeReservedPeer")] + async fn system_remove_reserved_peer(&self, peer_id: String) -> JsonRpcResult<()>; + + /// Returns the list of reserved peers + #[method(name = "reservedPeers")] + async fn system_reserved_peers(&self) -> JsonRpcResult>; + + /// Returns the roles the node is running as. + #[method(name = "nodeRoles")] + async fn system_node_roles(&self) -> JsonRpcResult>; + + /// Returns the state of the syncing of the node: starting block, current best block, highest + /// known block. + #[method(name = "syncState")] + async fn system_sync_state(&self) -> JsonRpcResult>; + + /// Adds the supplied directives to the current log filter + /// + /// The syntax is identical to the CLI `=`: + /// + /// `sync=debug,state=trace` + #[method(name = "addLogFilter")] + fn system_add_log_filter(&self, directives: String) -> JsonRpcResult<()>; + + /// Resets the log filter to Substrate defaults + #[method(name = "resetLogFilter")] + fn system_reset_log_filter(&self) -> JsonRpcResult<()>; +} diff --git a/client/rpc-servers/src/middleware.rs b/client/rpc-servers/src/middleware.rs deleted file mode 100644 index 43380977455df..0000000000000 --- a/client/rpc-servers/src/middleware.rs +++ /dev/null @@ -1,249 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Middleware for RPC requests. - -use std::collections::HashSet; - -use jsonrpc_core::{FutureOutput, FutureResponse, Metadata, Middleware as RequestMiddleware}; -use prometheus_endpoint::{ - register, CounterVec, HistogramOpts, HistogramVec, Opts, PrometheusError, Registry, U64, -}; - -use futures::{future::Either, Future, FutureExt}; -use pubsub::PubSubMetadata; - -use crate::RpcHandler; - -/// Metrics for RPC middleware -#[derive(Debug, Clone)] -pub struct RpcMetrics { - requests_started: CounterVec, - requests_finished: CounterVec, - calls_time: HistogramVec, - calls_started: CounterVec, - calls_finished: CounterVec, -} - -impl RpcMetrics { - /// Create an instance of metrics - pub fn new(metrics_registry: Option<&Registry>) -> Result, PrometheusError> { - if let Some(r) = metrics_registry { - Ok(Some(Self { - requests_started: register( - CounterVec::new( - Opts::new( - "rpc_requests_started", - "Number of RPC requests (not calls) received by the server.", - ), - &["protocol"], - )?, - r, - )?, - requests_finished: register( - CounterVec::new( - Opts::new( - "rpc_requests_finished", - "Number of RPC requests (not calls) processed by the server.", - ), - &["protocol"], - )?, - r, - )?, - calls_time: register( - HistogramVec::new( - HistogramOpts::new( - "rpc_calls_time", - "Total time [μs] of processed RPC calls", - ), - &["protocol", "method"], - )?, - r, - )?, - calls_started: register( - CounterVec::new( - Opts::new( - "rpc_calls_started", - "Number of received RPC calls (unique un-batched requests)", - ), - &["protocol", "method"], - )?, - r, - )?, - calls_finished: register( - CounterVec::new( - Opts::new( - "rpc_calls_finished", - "Number of processed RPC calls (unique un-batched requests)", - ), - &["protocol", "method", "is_error"], - )?, - r, - )?, - })) - } else { - Ok(None) - } - } -} - -/// Instantiates a dummy `IoHandler` given a builder function to extract supported method names. -pub fn method_names(gen_handler: F) -> Result, E> -where - F: FnOnce(RpcMiddleware) -> Result, E>, - M: PubSubMetadata, -{ - let io = gen_handler(RpcMiddleware::new(None, HashSet::new(), "dummy"))?; - Ok(io.iter().map(|x| x.0.clone()).collect()) -} - -/// Middleware for RPC calls -pub struct RpcMiddleware { - metrics: Option, - known_rpc_method_names: HashSet, - transport_label: String, -} - -impl RpcMiddleware { - /// Create an instance of middleware. - /// - /// - `metrics`: Will be used to report statistics. - /// - `transport_label`: The label that is used when reporting the statistics. - pub fn new( - metrics: Option, - known_rpc_method_names: HashSet, - transport_label: &str, - ) -> Self { - RpcMiddleware { metrics, known_rpc_method_names, transport_label: transport_label.into() } - } -} - -impl RequestMiddleware for RpcMiddleware { - type Future = FutureResponse; - type CallFuture = FutureOutput; - - fn on_request( - &self, - request: jsonrpc_core::Request, - meta: M, - next: F, - ) -> Either - where - F: Fn(jsonrpc_core::Request, M) -> X + Send + Sync, - X: Future> + Send + 'static, - { - let metrics = self.metrics.clone(); - let transport_label = self.transport_label.clone(); - if let Some(ref metrics) = metrics { - metrics.requests_started.with_label_values(&[transport_label.as_str()]).inc(); - } - let r = next(request, meta); - Either::Left( - async move { - let r = r.await; - if let Some(ref metrics) = metrics { - metrics.requests_finished.with_label_values(&[transport_label.as_str()]).inc(); - } - r - } - .boxed(), - ) - } - - fn on_call( - &self, - call: jsonrpc_core::Call, - meta: M, - next: F, - ) -> Either - where - F: Fn(jsonrpc_core::Call, M) -> X + Send + Sync, - X: Future> + Send + 'static, - { - #[cfg(not(target_os = "unknown"))] - let start = std::time::Instant::now(); - let name = call_name(&call, &self.known_rpc_method_names).to_owned(); - let metrics = self.metrics.clone(); - let transport_label = self.transport_label.clone(); - log::trace!(target: "rpc_metrics", "[{}] {} call: {:?}", transport_label, name, &call); - if let Some(ref metrics) = metrics { - metrics - .calls_started - .with_label_values(&[transport_label.as_str(), name.as_str()]) - .inc(); - } - let r = next(call, meta); - Either::Left( - async move { - let r = r.await; - #[cfg(not(target_os = "unknown"))] - let micros = start.elapsed().as_micros(); - // seems that std::time is not implemented for browser target - #[cfg(target_os = "unknown")] - let micros = 1; - if let Some(ref metrics) = metrics { - metrics - .calls_time - .with_label_values(&[transport_label.as_str(), name.as_str()]) - .observe(micros as _); - metrics - .calls_finished - .with_label_values(&[ - transport_label.as_str(), - name.as_str(), - if is_success(&r) { "true" } else { "false" }, - ]) - .inc(); - } - log::debug!( - target: "rpc_metrics", - "[{}] {} call took {} μs", - transport_label, - name, - micros, - ); - r - } - .boxed(), - ) - } -} - -fn call_name<'a>(call: &'a jsonrpc_core::Call, known_methods: &HashSet) -> &'a str { - // To prevent bloating metric with all invalid method names we filter them out here. - let only_known = |method: &'a String| { - if known_methods.contains(method) { - method.as_str() - } else { - "invalid method" - } - }; - - match call { - jsonrpc_core::Call::Invalid { .. } => "invalid call", - jsonrpc_core::Call::MethodCall(ref call) => only_known(&call.method), - jsonrpc_core::Call::Notification(ref notification) => only_known(¬ification.method), - } -} - -fn is_success(output: &Option) -> bool { - match output { - Some(jsonrpc_core::Output::Success(..)) => true, - _ => false, - } -} diff --git a/client/rpc/src/author/mod.rs b/client/rpc/src/author/mod.rs index c3a2c26759b46..8beebe903f1c1 100644 --- a/client/rpc/src/author/mod.rs +++ b/client/rpc/src/author/mod.rs @@ -26,15 +26,15 @@ use std::{convert::TryInto, sync::Arc}; use crate::SubscriptionTaskExecutor; use codec::{Decode, Encode}; -use futures::{FutureExt, StreamExt}; +use futures::StreamExt; use jsonrpsee::{ - types::error::{CallError as RpseeCallError, Error as JsonRpseeError}, - RpcModule, + types::{async_trait, error::Error as JsonRpseeError, JsonRpcResult}, + SubscriptionSink, }; use sc_rpc_api::DenyUnsafe; use sc_transaction_pool_api::{ - error::IntoPoolError, InPoolTransaction, TransactionFor, TransactionPool, TransactionSource, - TxHash, + error::IntoPoolError, BlockHash, InPoolTransaction, TransactionFor, TransactionPool, + TransactionSource, TxHash, }; use sp_api::ProvideRuntimeApi; use sp_blockchain::HeaderBackend; @@ -74,7 +74,8 @@ impl Author { } } -impl Author +#[async_trait] +impl AuthorApiServer, BlockHash

> for Author where P: TransactionPool + Sync + Send + 'static, Client: HeaderBackend + ProvideRuntimeApi + Send + Sync + 'static, @@ -82,157 +83,127 @@ where P::Hash: Unpin, ::Hash: Unpin, { - /// Convert a [`Author`] to an [`RpcModule`]. Registers all the RPC methods available with the - /// RPC server. - pub fn into_rpc_module(self) -> std::result::Result, JsonRpseeError> { - let mut module = RpcModule::new(self); - - module.register_method("author_insertKey", |params, author| { - author.deny_unsafe.check_if_safe()?; - let (key_type, suri, public): (String, String, Bytes) = params.parse()?; - let key_type = key_type.as_str().try_into().map_err(|_| Error::BadKeyType)?; - SyncCryptoStore::insert_unknown(&*author.keystore, key_type, &suri, &public[..]) - .map_err(|_| Error::KeyStoreUnavailable)?; - Ok(()) - })?; - - module.register_method::("author_rotateKeys", |_params, author| { - author.deny_unsafe.check_if_safe()?; - - let best_block_hash = author.client.info().best_hash; - author - .client - .runtime_api() - .generate_session_keys(&generic::BlockId::Hash(best_block_hash), None) - .map(Into::into) - .map_err(|api_err| Error::Client(Box::new(api_err)).into()) - })?; - - module.register_method("author_hasSessionKeys", |params, author| { - author.deny_unsafe.check_if_safe()?; - - let session_keys: Bytes = params.one()?; - let best_block_hash = author.client.info().best_hash; - let keys = author - .client - .runtime_api() - .decode_session_keys( - &generic::BlockId::Hash(best_block_hash), - session_keys.to_vec(), - ) - .map_err(|e| RpseeCallError::Failed(Box::new(e)))? - .ok_or_else(|| Error::InvalidSessionKeys)?; - - Ok(SyncCryptoStore::has_keys(&*author.keystore, &keys)) - })?; - - module.register_method("author_hasKey", |params, author| { - author.deny_unsafe.check_if_safe()?; - - let (public_key, key_type) = params.parse::<(Vec, String)>()?; - let key_type = key_type.as_str().try_into().map_err(|_| Error::BadKeyType)?; - Ok(SyncCryptoStore::has_keys(&*author.keystore, &[(public_key, key_type)])) - })?; - - module.register_async_method::, _>( - "author_submitExtrinsic", - |params, author| { - let ext: Bytes = match params.one() { - Ok(ext) => ext, - Err(e) => return Box::pin(futures::future::err(e)), - }; - async move { - let xt = match Decode::decode(&mut &ext[..]) { - Ok(xt) => xt, - Err(err) => return Err(RpseeCallError::Failed(err.into())), - }; - let best_block_hash = author.client.info().best_hash; - author - .pool - .submit_one(&generic::BlockId::hash(best_block_hash), TX_SOURCE, xt) - .await - .map_err(|e| { - e.into_pool_error() - .map(|e| RpseeCallError::Failed(Box::new(e))) - .unwrap_or_else(|e| RpseeCallError::Failed(Box::new(e))) - }) - } - .boxed() - }, - )?; - - module.register_method::, _>("author_pendingExtrinsics", |_, author| { - Ok(author.pool.ready().map(|tx| tx.data().encode().into()).collect()) - })?; - - module.register_method::>, _>( - "author_removeExtrinsic", - |params, author| { - author.deny_unsafe.check_if_safe()?; - - let bytes_or_hash: Vec>> = params.parse()?; - let hashes = bytes_or_hash - .into_iter() - .map(|x| match x { - hash::ExtrinsicOrHash::Hash(h) => Ok(h), - hash::ExtrinsicOrHash::Extrinsic(bytes) => { - let xt = Decode::decode(&mut &bytes[..])?; - Ok(author.pool.hash_of(&xt)) - }, - }) - .collect::>>()?; - - Ok(author - .pool - .remove_invalid(&hashes) - .into_iter() - .map(|tx| tx.hash().clone()) - .collect()) - }, - )?; - - module.register_subscription( - "author_extrinsicUpdate", - "author_unwatchExtrinsic", - |params, mut sink, ctx| { - let xt: Bytes = params.one()?; - let best_block_hash = ctx.client.info().best_hash; - let dxt = TransactionFor::

::decode(&mut &xt[..]) - .map_err(|e| JsonRpseeError::Custom(e.to_string()))?; - - let executor = ctx.executor.clone(); - let fut = async move { - let stream = match ctx - .pool - .submit_and_watch(&generic::BlockId::hash(best_block_hash), TX_SOURCE, dxt) - .await - { - Ok(stream) => stream, - Err(e) => { - let _ = sink.send(&format!( - "txpool subscription failed: {:?}; subscription useless", - e - )); - return - }, - }; - - stream - .for_each(|item| { - let _ = sink.send(&item); - futures::future::ready(()) - }) - .await; - }; - - executor.execute(Box::pin(fut)); - Ok(()) - }, - )?; + async fn submit_extrinsic(&self, ext: Bytes) -> JsonRpcResult> { + let xt = match Decode::decode(&mut &ext[..]) { + Ok(xt) => xt, + Err(err) => return Err(JsonRpseeError::to_call_error(err)), + }; + let best_block_hash = self.client.info().best_hash; + self.pool + .submit_one(&generic::BlockId::hash(best_block_hash), TX_SOURCE, xt) + .await + .map_err(|e| { + e.into_pool_error() + .map(|e| JsonRpseeError::to_call_error(e)) + .unwrap_or_else(|e| JsonRpseeError::to_call_error(e)) + }) + } + + fn insert_key(&self, key_type: String, suri: String, public: Bytes) -> JsonRpcResult<()> { + self.deny_unsafe.check_if_safe()?; + + let key_type = key_type.as_str().try_into().map_err(|_| Error::BadKeyType)?; + SyncCryptoStore::insert_unknown(&*self.keystore, key_type, &suri, &public[..]) + .map_err(|_| Error::KeyStoreUnavailable)?; + Ok(()) + } + + fn rotate_keys(&self) -> JsonRpcResult { + self.deny_unsafe.check_if_safe()?; + + let best_block_hash = self.client.info().best_hash; + self.client + .runtime_api() + .generate_session_keys(&generic::BlockId::Hash(best_block_hash), None) + .map(Into::into) + .map_err(|api_err| Error::Client(Box::new(api_err)).into()) + } + + fn has_session_keys(&self, session_keys: Bytes) -> JsonRpcResult { + self.deny_unsafe.check_if_safe()?; - module.register_alias("author_submitAndWatchExtrinsic", "author_extrinsicUpdate")?; + let best_block_hash = self.client.info().best_hash; + let keys = self + .client + .runtime_api() + .decode_session_keys(&generic::BlockId::Hash(best_block_hash), session_keys.to_vec()) + .map_err(|e| JsonRpseeError::to_call_error(e))? + .ok_or_else(|| Error::InvalidSessionKeys)?; - Ok(module) + Ok(SyncCryptoStore::has_keys(&*self.keystore, &keys)) + } + + fn has_key(&self, public_key: Bytes, key_type: String) -> JsonRpcResult { + self.deny_unsafe.check_if_safe()?; + + let key_type = key_type.as_str().try_into().map_err(|_| Error::BadKeyType)?; + Ok(SyncCryptoStore::has_keys(&*self.keystore, &[(public_key.to_vec(), key_type)])) + } + + fn pending_extrinsics(&self) -> JsonRpcResult> { + Ok(self.pool.ready().map(|tx| tx.data().encode().into()).collect()) + } + + fn remove_extrinsic( + &self, + bytes_or_hash: Vec>>, + ) -> JsonRpcResult>> { + self.deny_unsafe.check_if_safe()?; + let hashes = bytes_or_hash + .into_iter() + .map(|x| match x { + hash::ExtrinsicOrHash::Hash(h) => Ok(h), + hash::ExtrinsicOrHash::Extrinsic(bytes) => { + let xt = Decode::decode(&mut &bytes[..])?; + Ok(self.pool.hash_of(&xt)) + }, + }) + .collect::>>()?; + + Ok(self + .pool + .remove_invalid(&hashes) + .into_iter() + .map(|tx| tx.hash().clone()) + .collect()) + } + + fn watch_extrinsic(&self, mut sink: SubscriptionSink, xt: Bytes) { + let best_block_hash = self.client.info().best_hash; + let dxt = match TransactionFor::

::decode(&mut &xt[..]) { + Ok(dxt) => dxt, + Err(e) => { + log::error!("[watch_extrinsic sub] failed to decode extrinsic: {:?}", e); + return + }, + }; + + let executor = self.executor.clone(); + let pool = self.pool.clone(); + let fut = async move { + let stream = match pool + .submit_and_watch(&generic::BlockId::hash(best_block_hash), TX_SOURCE, dxt) + .await + { + Ok(stream) => stream, + Err(e) => { + let _ = sink.send(&format!( + "txpool subscription failed: {:?}; subscription useless", + e + )); + return + }, + }; + + stream + .for_each(|item| { + let _ = sink.send(&item); + futures::future::ready(()) + }) + .await; + }; + + executor.execute(Box::pin(fut)); } } diff --git a/client/rpc/src/chain/mod.rs b/client/rpc/src/chain/mod.rs index ec0ef15636b4e..f5c6e379ac269 100644 --- a/client/rpc/src/chain/mod.rs +++ b/client/rpc/src/chain/mod.rs @@ -29,11 +29,9 @@ use std::sync::Arc; use crate::SubscriptionTaskExecutor; -use futures::FutureExt; use jsonrpsee::{ - types::error::{CallError as JsonRpseeCallError, Error as JsonRpseeError}, - ws_server::SubscriptionSink, - RpcModule, + types::{async_trait, JsonRpcResult}, + SubscriptionSink, }; use sc_client_api::{ light::{Fetcher, RemoteBlockchain}, @@ -158,90 +156,34 @@ pub struct Chain { backend: Box>, } -impl Chain +// TODO(niklasad1): check if those DeserializeOwned bounds are really required. +#[async_trait] +impl ChainApiServer, Block::Hash, Block::Header, SignedBlock> + for Chain where - Client: BlockchainEvents + HeaderBackend + Send + Sync + 'static, Block: BlockT + 'static, - ::Header: Unpin, + Block::Header: Unpin, + Client: HeaderBackend + BlockchainEvents + 'static, { - /// Convert a [`Chain`] to an [`RpcModule`]. Registers all the RPC methods available with the - /// RPC server. - pub fn into_rpc_module(self) -> Result, JsonRpseeError> { - let mut rpc_module = RpcModule::new(self); - - rpc_module.register_async_method("chain_getHeader", |params, chain| { - let hash = params.one().ok(); - async move { chain.header(hash).await.map_err(rpc_err) }.boxed() - })?; - - rpc_module.register_async_method("chain_getBlock", |params, chain| { - let hash = params.one().ok(); - async move { chain.block(hash).await.map_err(rpc_err) }.boxed() - })?; - - rpc_module.register_method("chain_getBlockHash", |params, chain| { - let hash = params.one().ok(); - chain.block_hash(hash).map_err(rpc_err) - })?; - - rpc_module.register_alias("chain_getHead", "chain_getBlockHash")?; - - rpc_module.register_method("chain_getFinalizedHead", |_, chain| { - chain.finalized_head().map_err(rpc_err) - })?; - - rpc_module.register_alias("chain_getFinalisedHead", "chain_getFinalizedHead")?; - - rpc_module.register_subscription( - "chain_allHead", - "chain_unsubscribeAllHeads", - |_params, sink, ctx| ctx.backend.subscribe_all_heads(sink).map_err(Into::into), - )?; - - rpc_module.register_alias("chain_subscribeAllHeads", "chain_allHead")?; - - rpc_module.register_subscription( - "chain_newHead", - "chain_unsubscribeNewHead", - |_params, sink, ctx| ctx.backend.subscribe_new_heads(sink).map_err(Into::into), - )?; - - rpc_module.register_subscription( - "chain_finalizedHead", - "chain_unsubscribeFinalizedHeads", - |_params, sink, ctx| ctx.backend.subscribe_finalized_heads(sink).map_err(Into::into), - )?; - - rpc_module.register_alias("chain_subscribeNewHead", "chain_newHead")?; - rpc_module.register_alias("chain_subscribeNewHeads", "chain_newHead")?; - rpc_module.register_alias("chain_unsubscribeNewHeads", "chain_unsubscribeNewHead")?; - rpc_module.register_alias("chain_subscribeFinalisedHeads", "chain_finalizedHead")?; - rpc_module.register_alias("chain_subscribeFinalizedHeads", "chain_finalizedHead")?; - rpc_module - .register_alias("chain_unsubscribeFinalisedHeads", "chain_unsubscribeFinalizedHeads")?; - - Ok(rpc_module) - } - - /// TODO: document this - pub async fn header(&self, hash: Option) -> Result, Error> { - self.backend.header(hash).await + async fn header(&self, hash: Option) -> JsonRpcResult> { + self.backend.header(hash).await.map_err(Into::into) } - /// TODO: document this - async fn block(&self, hash: Option) -> Result>, Error> { - self.backend.block(hash).await + async fn block(&self, hash: Option) -> JsonRpcResult>> { + self.backend.block(hash).await.map_err(Into::into) } - /// TODO: document this fn block_hash( &self, number: Option>, - ) -> Result>, Error> { + ) -> JsonRpcResult>> { match number { - None => self.backend.block_hash(None).map(ListOrValue::Value), - Some(ListOrValue::Value(number)) => - self.backend.block_hash(Some(number)).map(ListOrValue::Value), + None => self.backend.block_hash(None).map(ListOrValue::Value).map_err(Into::into), + Some(ListOrValue::Value(number)) => self + .backend + .block_hash(Some(number)) + .map(ListOrValue::Value) + .map_err(Into::into), Some(ListOrValue::List(list)) => Ok(ListOrValue::List( list.into_iter() .map(|number| self.backend.block_hash(Some(number))) @@ -250,16 +192,23 @@ where } } - /// TODO: document this - fn finalized_head(&self) -> Result { - self.backend.finalized_head() + fn finalized_head(&self) -> JsonRpcResult { + self.backend.finalized_head().map_err(Into::into) + } + + fn subscribe_all_heads(&self, sink: SubscriptionSink) { + let _ = self.backend.subscribe_all_heads(sink); + } + + fn subscribe_new_heads(&self, sink: SubscriptionSink) { + let _ = self.backend.subscribe_new_heads(sink); + } + + fn subscribe_finalized_heads(&self, sink: SubscriptionSink) { + let _ = self.backend.subscribe_finalized_heads(sink); } } fn client_err(err: sp_blockchain::Error) -> Error { Error::Client(Box::new(err)) } - -fn rpc_err(err: Error) -> JsonRpseeCallError { - JsonRpseeCallError::Failed(Box::new(err)) -} diff --git a/client/rpc/src/lib.rs b/client/rpc/src/lib.rs index ea5d14fb4cd25..2d0666714e131 100644 --- a/client/rpc/src/lib.rs +++ b/client/rpc/src/lib.rs @@ -60,7 +60,7 @@ macro_rules! unwrap_or_fut_err { ( $e:expr ) => { match $e { Ok(x) => x, - Err(e) => return Box::pin(future::err(e)), + Err(e) => return Box::pin(future::err(e.into())), } }; } diff --git a/client/rpc/src/offchain/mod.rs b/client/rpc/src/offchain/mod.rs index 3e935b4a19ec4..72519f14e0320 100644 --- a/client/rpc/src/offchain/mod.rs +++ b/client/rpc/src/offchain/mod.rs @@ -22,10 +22,7 @@ mod tests; use self::error::Error; -use jsonrpsee::{ - types::error::{CallError as JsonRpseeCallError, Error as JsonRpseeError}, - RpcModule, -}; +use jsonrpsee::types::{async_trait, Error as JsonRpseeError, JsonRpcResult}; use parking_lot::RwLock; /// Re-export the API for backward compatibility. pub use sc_rpc_api::offchain::*; @@ -44,46 +41,33 @@ pub struct Offchain { deny_unsafe: DenyUnsafe, } -impl Offchain { +impl Offchain { /// Create new instance of Offchain API. pub fn new(storage: T, deny_unsafe: DenyUnsafe) -> Self { Offchain { storage: Arc::new(RwLock::new(storage)), deny_unsafe } } +} - /// Convert this to a RPC module. - pub fn into_rpc_module(self) -> Result, JsonRpseeError> { - let mut ctx = RpcModule::new(self); - - ctx.register_method("offchain_localStorageSet", |params, offchain| { - offchain.deny_unsafe.check_if_safe()?; - let (kind, key, value): (StorageKind, Bytes, Bytes) = - params.parse().map_err(|_| JsonRpseeCallError::InvalidParams)?; - let prefix = match kind { - StorageKind::PERSISTENT => sp_offchain::STORAGE_PREFIX, - StorageKind::LOCAL => return Err(to_jsonrpsee_error(Error::UnavailableStorageKind)), - }; - offchain.storage.write().set(prefix, &*key, &*value); - Ok(()) - })?; - - ctx.register_method("offchain_localStorageGet", |params, offchain| { - offchain.deny_unsafe.check_if_safe()?; - let (kind, key): (StorageKind, Bytes) = - params.parse().map_err(|_| JsonRpseeCallError::InvalidParams)?; - - let prefix = match kind { - StorageKind::PERSISTENT => sp_offchain::STORAGE_PREFIX, - StorageKind::LOCAL => return Err(to_jsonrpsee_error(Error::UnavailableStorageKind)), - }; +#[async_trait] +impl OffchainApiServer for Offchain { + fn set_local_storage(&self, kind: StorageKind, key: Bytes, value: Bytes) -> JsonRpcResult<()> { + let prefix = match kind { + StorageKind::PERSISTENT => sp_offchain::STORAGE_PREFIX, + StorageKind::LOCAL => + return Err(JsonRpseeError::to_call_error(Error::UnavailableStorageKind)), + }; + self.storage.write().set(prefix, &*key, &*value); + Ok(()) + } - let bytes: Option = offchain.storage.read().get(prefix, &*key).map(Into::into); - Ok(bytes) - })?; + fn get_local_storage(&self, kind: StorageKind, key: Bytes) -> JsonRpcResult> { + let prefix = match kind { + StorageKind::PERSISTENT => sp_offchain::STORAGE_PREFIX, + StorageKind::LOCAL => + return Err(JsonRpseeError::to_call_error(Error::UnavailableStorageKind)), + }; - Ok(ctx) + let bytes: Option = self.storage.read().get(prefix, &*key).map(Into::into); + Ok(bytes) } } - -fn to_jsonrpsee_error(err: Error) -> JsonRpseeCallError { - JsonRpseeCallError::Failed(Box::new(err)) -} diff --git a/client/rpc/src/state/mod.rs b/client/rpc/src/state/mod.rs index f4d991854031c..8646be1e2ffde 100644 --- a/client/rpc/src/state/mod.rs +++ b/client/rpc/src/state/mod.rs @@ -26,13 +26,11 @@ mod tests; use std::sync::Arc; -use crate::{unwrap_or_fut_err, SubscriptionTaskExecutor}; +use crate::SubscriptionTaskExecutor; -use futures::{future, FutureExt}; use jsonrpsee::{ - types::error::{CallError as JsonRpseeCallError, Error as JsonRpseeError}, + types::{async_trait, error::Error as JsonRpseeError, JsonRpcResult}, ws_server::SubscriptionSink, - RpcModule, }; use sc_client_api::light::{Fetcher, RemoteBlockchain}; @@ -245,203 +243,173 @@ pub struct StateApi { deny_unsafe: DenyUnsafe, } -impl StateApi +#[async_trait] +impl StateApiServer for StateApi where Block: BlockT + 'static, - Client: - BlockchainEvents + CallApiAt + HeaderBackend + Send + Sync + 'static, + Client: Send + Sync + 'static, { - /// Convert this to a RPC module. - pub fn into_rpc_module(self) -> Result, JsonRpseeError> { - let mut module = RpcModule::new(self); - - module.register_async_method("state_call", |params, state| { - let mut seq = params.sequence(); - - let method = unwrap_or_fut_err!(seq.next()); - let data = unwrap_or_fut_err!(seq.next()); - let block = unwrap_or_fut_err!(seq.optional_next()); - - async move { state.backend.call(block, method, data).await.map_err(call_err) }.boxed() - })?; - - module.register_alias("state_callAt", "state_call")?; - - module.register_async_method("state_getKeys", |params, state| { - let mut seq = params.sequence(); - - let key_prefix = unwrap_or_fut_err!(seq.next()); - let block = unwrap_or_fut_err!(seq.optional_next()); + async fn call( + &self, + method: String, + data: Bytes, + block: Option, + ) -> JsonRpcResult { + self.backend + .call(block, method, data) + .await + .map_err(|e| JsonRpseeError::to_call_error(e)) + } - async move { state.backend.storage_keys(block, key_prefix).await.map_err(call_err) } - .boxed() - })?; + async fn storage_keys( + &self, + key_prefix: StorageKey, + block: Option, + ) -> JsonRpcResult> { + self.backend + .storage_keys(block, key_prefix) + .await + .map_err(|e| JsonRpseeError::to_call_error(e)) + } - module.register_async_method("state_getPairs", |params, state| { - let mut seq = params.sequence(); + async fn storage_pairs( + &self, + key_prefix: StorageKey, + block: Option, + ) -> JsonRpcResult> { + self.deny_unsafe.check_if_safe()?; + self.backend + .storage_pairs(block, key_prefix) + .await + .map_err(|e| JsonRpseeError::to_call_error(e)) + } - let key = unwrap_or_fut_err!(seq.next()); - let block = unwrap_or_fut_err!(seq.optional_next()); + async fn storage_keys_paged( + &self, + prefix: Option, + count: u32, + start_key: Option, + block: Option, + ) -> JsonRpcResult> { + if count > STORAGE_KEYS_PAGED_MAX_COUNT { + return Err(JsonRpseeError::to_call_error(Error::InvalidCount { + value: count, + max: STORAGE_KEYS_PAGED_MAX_COUNT, + })) + } + self.backend + .storage_keys_paged(block, prefix, count, start_key) + .await + .map_err(|e| JsonRpseeError::to_call_error(e)) + } - async move { - state.deny_unsafe.check_if_safe()?; - state.backend.storage_pairs(block, key).await.map_err(call_err) - } - .boxed() - })?; + async fn storage( + &self, + key: StorageKey, + block: Option, + ) -> JsonRpcResult> { + self.backend + .storage(block, key) + .await + .map_err(|e| JsonRpseeError::to_call_error(e)) + } - module.register_async_method("state_getKeysPaged", |params, state| { - let mut seq = params.sequence(); + async fn storage_hash( + &self, + key: StorageKey, + block: Option, + ) -> JsonRpcResult> { + self.backend + .storage_hash(block, key) + .await + .map_err(|e| JsonRpseeError::to_call_error(e)) + } - let prefix = unwrap_or_fut_err!(seq.optional_next()); - let count = unwrap_or_fut_err!(seq.next()); - let start_key = unwrap_or_fut_err!(seq.optional_next()); - let block = unwrap_or_fut_err!(seq.optional_next()); + async fn storage_size( + &self, + key: StorageKey, + block: Option, + ) -> JsonRpcResult> { + self.backend + .storage_size(block, key) + .await + .map_err(|e| JsonRpseeError::to_call_error(e)) + } - async move { - if count > STORAGE_KEYS_PAGED_MAX_COUNT { - return Err(JsonRpseeCallError::Failed(Box::new(Error::InvalidCount { - value: count, - max: STORAGE_KEYS_PAGED_MAX_COUNT, - }))) - } - state - .backend - .storage_keys_paged(block, prefix, count, start_key) - .await - .map_err(call_err) - } - .boxed() - })?; + async fn metadata(&self, block: Option) -> JsonRpcResult { + self.backend.metadata(block).await.map_err(|e| JsonRpseeError::to_call_error(e)) + } - module.register_alias("state_getKeysPagedAt", "state_getKeysPaged")?; + async fn runtime_version(&self, at: Option) -> JsonRpcResult { + self.deny_unsafe.check_if_safe()?; + self.backend + .runtime_version(at) + .await + .map_err(|e| JsonRpseeError::to_call_error(e)) + } - module.register_async_method("state_getStorage", |params, state| { - let mut seq = params.sequence(); + async fn query_storage( + &self, + keys: Vec, + from: Block::Hash, + to: Option, + ) -> JsonRpcResult>> { + self.deny_unsafe.check_if_safe()?; + self.backend + .query_storage(from, to, keys) + .await + .map_err(|e| JsonRpseeError::to_call_error(e)) + } - let key = unwrap_or_fut_err!(seq.next()); - let block = unwrap_or_fut_err!(seq.optional_next()); + async fn query_storage_at( + &self, + keys: Vec, + at: Option, + ) -> JsonRpcResult>> { + self.deny_unsafe.check_if_safe()?; + self.backend + .query_storage_at(keys, at) + .await + .map_err(|e| JsonRpseeError::to_call_error(e)) + } - async move { state.backend.storage(block, key).await.map_err(call_err) }.boxed() - })?; + async fn read_proof( + &self, + keys: Vec, + block: Option, + ) -> JsonRpcResult> { + self.deny_unsafe.check_if_safe()?; + self.backend + .read_proof(block, keys) + .await + .map_err(|e| JsonRpseeError::to_call_error(e)) + } - module.register_alias("state_getStorageAt", "state_getStorage")?; - - module.register_async_method("state_getStorageHash", |params, state| { - let mut seq = params.sequence(); + // TODO(niklasad1): use methods (goes probably away by merging to master) + async fn trace_block( + &self, + block: Block::Hash, + targets: Option, + storage_keys: Option, + _methods: Option, + ) -> JsonRpcResult { + self.deny_unsafe.check_if_safe()?; + self.backend + .trace_block(block, targets, storage_keys) + .await + .map_err(|e| JsonRpseeError::to_call_error(e)) + } - let key = unwrap_or_fut_err!(seq.next()); - let block = unwrap_or_fut_err!(seq.optional_next()); - - async move { state.backend.storage_hash(block, key).await.map_err(call_err) }.boxed() - })?; - - module.register_alias("state_getStorageHashAt", "state_getStorageHash")?; - - module.register_async_method("state_getStorageSize", |params, state| { - let mut seq = params.sequence(); - - let key = unwrap_or_fut_err!(seq.next()); - let block = unwrap_or_fut_err!(seq.optional_next()); - - async move { state.backend.storage_size(block, key).await.map_err(call_err) }.boxed() - })?; - - module.register_alias("state_getStorageSizeAt", "state_getStorageSize")?; - - module.register_async_method("state_getMetadata", |params, state| { - let maybe_block = params.one().ok(); - async move { state.backend.metadata(maybe_block).await.map_err(call_err) }.boxed() - })?; - - module.register_async_method("state_getRuntimeVersion", |params, state| { - let at = params.one().ok(); - async move { - state.deny_unsafe.check_if_safe()?; - state.backend.runtime_version(at).await.map_err(call_err) - } - .boxed() - })?; - - module.register_alias("chain_getRuntimeVersion", "state_getRuntimeVersion")?; - - module.register_async_method("state_queryStorage", |params, state| { - let mut seq = params.sequence(); - - let keys = unwrap_or_fut_err!(seq.next()); - let from = unwrap_or_fut_err!(seq.next()); - let to = unwrap_or_fut_err!(seq.optional_next()); - - async move { - state.deny_unsafe.check_if_safe()?; - state.backend.query_storage(from, to, keys).await.map_err(call_err) - } - .boxed() - })?; - - module.register_async_method("state_queryStorageAt", |params, state| { - let mut seq = params.sequence(); - - let keys = unwrap_or_fut_err!(seq.next()); - let at = unwrap_or_fut_err!(seq.optional_next()); - - async move { - state.deny_unsafe.check_if_safe()?; - state.backend.query_storage_at(keys, at).await.map_err(call_err) - } - .boxed() - })?; - - module.register_async_method("state_getReadProof", |params, state| { - let mut seq = params.sequence(); - - let keys = unwrap_or_fut_err!(seq.next()); - let block = unwrap_or_fut_err!(seq.optional_next()); - - async move { - state.deny_unsafe.check_if_safe()?; - state.backend.read_proof(block, keys).await.map_err(call_err) - } - .boxed() - })?; - - module.register_async_method("state_traceBlock", |params, state| { - let mut seq = params.sequence(); - - let block = unwrap_or_fut_err!(seq.next()); - let targets = unwrap_or_fut_err!(seq.optional_next()); - let storage_keys = unwrap_or_fut_err!(seq.optional_next()); + fn subscribe_runtime_version(&self, sink: SubscriptionSink) { + if let Err(e) = self.backend.subscribe_runtime_version(sink) { + log::error!("[subscribe_runtimeVersion]: error {:?}", e); + } + } - async move { - state.deny_unsafe.check_if_safe()?; - state.backend.trace_block(block, targets, storage_keys).await.map_err(call_err) - } - .boxed() - })?; - - module.register_subscription( - "state_runtimeVersion", - "state_unsubscribeRuntimeVersion", - |_params, sink, ctx| ctx.backend.subscribe_runtime_version(sink).map_err(Into::into), - )?; - - module.register_alias("chain_subscribeRuntimeVersion", "state_runtimeVersion")?; - module.register_alias("state_subscribeRuntimeVersion", "state_runtimeVersion")?; - module - .register_alias("chain_unsubscribeRuntimeVersion", "state_unsubscribeRuntimeVersion")?; - - module.register_subscription( - "state_storage", - "state_unsubscribeStorage", - |params, sink, ctx| { - let keys = params.one::>().ok(); - ctx.backend.subscribe_storage(sink, keys).map_err(Into::into) - }, - )?; - module.register_alias("chain_subscribeStorage", "state_storage")?; - module.register_alias("state_subscribeStorage", "state_storage")?; - - Ok(module) + fn subscribe_storage(&self, sink: SubscriptionSink, keys: Option>) { + if let Err(e) = self.backend.subscribe_storage(sink, keys) { + log::error!("[subscribe_storage]: error {:?}", e); + } } } @@ -511,122 +479,87 @@ pub struct ChildState { backend: Box>, } -impl ChildState +#[async_trait] +impl ChildStateApiServer for ChildState where Block: BlockT + 'static, Client: Send + Sync + 'static, { - /// Convert this to a RPC module. - pub fn into_rpc_module(self) -> Result, JsonRpseeError> { - let mut module = RpcModule::new(self); - - // DEPRECATED: Please use `childstate_getKeysPaged` with proper paging support. - // Returns the keys with prefix from a child storage, leave empty to get all the keys - module.register_async_method("childstate_getKeys", |params, state| { - let mut seq = params.sequence(); - - let storage_key = unwrap_or_fut_err!(seq.next()); - let key = unwrap_or_fut_err!(seq.next()); - let block = unwrap_or_fut_err!(seq.optional_next()); - - async move { - state.backend.storage_keys(block, storage_key, key) - .await - .map_err(call_err) - }.boxed() - })?; - - // Returns the keys with prefix from a child storage with pagination support. - // Up to `count` keys will be returned. - // If `start_key` is passed, return next keys in storage in lexicographic order. - module.register_async_method("childstate_getKeysPaged", |params, state| { - // TODO: (dp) what is the order of the params here? https://polkadot.js.org/docs/substrate/rpc/#getkeyspagedkey-storagekey-count-u32-startkey-storagekey-at-blockhash-vecstoragekey is a bit unclear on what the `prefix` is here. - let mut seq = params.sequence(); - - let storage_key = unwrap_or_fut_err!(seq.next()); - let prefix = unwrap_or_fut_err!(seq.optional_next()); - let count = unwrap_or_fut_err!(seq.next()); - let start_key = unwrap_or_fut_err!(seq.optional_next()); - let block = unwrap_or_fut_err!(seq.optional_next()); - - async move { - state - .backend - .storage_keys_paged(block, storage_key, prefix, count, start_key) - .await - .map_err(call_err) - } - .boxed() - })?; - - module.register_alias("childstate_getKeysPagedAt", "childstate_getKeysPaged")?; - - // Returns a child storage entry at a specific block's state. - module.register_async_method("childstate_getStorage", |params, state| { - let mut seq = params.sequence(); - - let storage_key = unwrap_or_fut_err!(seq.next()); - let key = unwrap_or_fut_err!(seq.next()); - let block = unwrap_or_fut_err!(seq.optional_next()); - - async move { state.backend.storage(block, storage_key, key).await.map_err(call_err) } - .boxed() - })?; - - // Returns the hash of a child storage entry at a block's state. - module.register_async_method("childstate_getStorageHash", |params, state| { - let mut seq = params.sequence(); - - let storage_key = unwrap_or_fut_err!(seq.next()); - let key = unwrap_or_fut_err!(seq.next()); - let block = unwrap_or_fut_err!(seq.optional_next()); - - async move { - state.backend.storage_hash(block, storage_key, key) - .await - .map_err(call_err) - }.boxed() - })?; - - // Returns the size of a child storage entry at a block's state. - module.register_async_method("childstate_getStorageSize", |params, state| { - let mut seq = params.sequence(); - - let storage_key = unwrap_or_fut_err!(seq.next()); - let key = unwrap_or_fut_err!(seq.next()); - let block = unwrap_or_fut_err!(seq.optional_next()); - - async move { - state.backend.storage_size(block, storage_key, key) - .await - .map_err(call_err) - }.boxed() - })?; - - // Returns proof of storage for child key entries at a specific block's state. - module.register_async_method("state_getChildReadProof", |params, state| { - let mut seq = params.sequence(); - - let storage_key = unwrap_or_fut_err!(seq.next()); - let keys = unwrap_or_fut_err!(seq.next()); - let block = unwrap_or_fut_err!(seq.optional_next()); - - async move { - state.backend.read_child_proof(block, storage_key, keys).await.map_err(call_err) - } - .boxed() - })?; - - module.register_alias("childstate_getChildReadProof", "state_getChildReadProof")?; - - Ok(module) + async fn storage_keys( + &self, + storage_key: PrefixedStorageKey, + key_prefix: StorageKey, + block: Option, + ) -> JsonRpcResult> { + self.backend + .storage_keys(block, storage_key, key_prefix) + .await + .map_err(|e| JsonRpseeError::to_call_error(e)) + } + + async fn storage_keys_paged( + &self, + storage_key: PrefixedStorageKey, + prefix: Option, + count: u32, + start_key: Option, + block: Option, + ) -> JsonRpcResult> { + self.backend + .storage_keys_paged(block, storage_key, prefix, count, start_key) + .await + .map_err(|e| JsonRpseeError::to_call_error(e)) + } + + async fn storage( + &self, + storage_key: PrefixedStorageKey, + key: StorageKey, + block: Option, + ) -> JsonRpcResult> { + self.backend + .storage(block, storage_key, key) + .await + .map_err(|e| JsonRpseeError::to_call_error(e)) + } + + async fn storage_hash( + &self, + storage_key: PrefixedStorageKey, + key: StorageKey, + block: Option, + ) -> JsonRpcResult> { + self.backend + .storage_hash(block, storage_key, key) + .await + .map_err(|e| JsonRpseeError::to_call_error(e)) + } + + async fn storage_size( + &self, + storage_key: PrefixedStorageKey, + key: StorageKey, + block: Option, + ) -> JsonRpcResult> { + self.backend + .storage_size(block, storage_key, key) + .await + .map_err(|e| JsonRpseeError::to_call_error(e)) + } + + async fn read_child_proof( + &self, + child_storage_key: PrefixedStorageKey, + keys: Vec, + block: Option, + ) -> JsonRpcResult> { + self.backend + .read_child_proof(block, child_storage_key, keys) + .await + .map_err(|e| JsonRpseeError::to_call_error(e)) } } fn client_err(err: sp_blockchain::Error) -> Error { Error::Client(Box::new(err)) } - -fn call_err(err: Error) -> JsonRpseeCallError { - JsonRpseeCallError::Failed(Box::new(err)) -} diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index aebb8fe8f3c4f..3397bc508cdb5 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -33,7 +33,7 @@ use super::{ use crate::SubscriptionTaskExecutor; use futures::{future, FutureExt, StreamExt}; -use jsonrpsee::ws_server::SubscriptionSink; +use jsonrpsee::SubscriptionSink; use sc_client_api::{ Backend, BlockBackend, BlockchainEvents, CallExecutor, ExecutorProvider, ProofProvider, StorageProvider, @@ -521,7 +521,8 @@ where }) .unwrap_or_default(); if !changes.is_empty() { - sink.send(&StorageChangeSet { block, changes })?; + sink.send(&StorageChangeSet { block, changes }) + .map_err(|e| Error::Client(Box::new(e)))?; } let fut = async move { diff --git a/client/rpc/src/system/mod.rs b/client/rpc/src/system/mod.rs index b34a05c3715c1..dad3a30457544 100644 --- a/client/rpc/src/system/mod.rs +++ b/client/rpc/src/system/mod.rs @@ -21,11 +21,8 @@ #[cfg(test)] mod tests; -use futures::{channel::oneshot, FutureExt}; -use jsonrpsee::{ - types::error::{CallError as JsonRpseeCallError, Error as JsonRpseeError}, - RpcModule, -}; +use futures::channel::oneshot; +use jsonrpsee::types::{async_trait, error::Error as JsonRpseeError, JsonRpcResult, JsonValue}; use sc_rpc_api::DenyUnsafe; use sc_tracing::logging; use sp_runtime::traits::{self, Header as HeaderT}; @@ -80,201 +77,115 @@ impl System { ) -> Self { System { info, send_back, deny_unsafe } } +} - /// Convert to a RPC Module. - pub fn into_rpc_module(self) -> std::result::Result, JsonRpseeError> { - let mut rpc_module = RpcModule::new(self); - - // Get the node's implementation name. Plain old string. - rpc_module.register_method("system_name", |_, system| Ok(system.info.impl_name.clone()))?; - - // Get the node implementation's version. Should be a semver string. - rpc_module - .register_method("system_version", |_, system| Ok(system.info.impl_version.clone()))?; - - // Get the chain's name. Given as a string identifier. - rpc_module - .register_method("system_chain", |_, system| Ok(system.info.chain_name.clone()))?; - - // Get the chain's type. - rpc_module - .register_method("system_chainType", |_, system| Ok(system.info.chain_type.clone()))?; +#[async_trait] +impl SystemApiServer::Number> for System { + fn system_name(&self) -> JsonRpcResult { + Ok(self.info.impl_name.clone()) + } - // Get a custom set of properties as a JSON object, defined in the chain spec. - rpc_module - .register_method("system_properties", |_, system| Ok(system.info.properties.clone()))?; + fn system_version(&self) -> JsonRpcResult { + Ok(self.info.impl_version.clone()) + } - // Return health status of the node. - // - // Node is considered healthy if it is: - // - connected to some peers (unless running in dev mode) - // - not performing a major sync - rpc_module.register_async_method("system_health", |_, system| { - async move { - let (tx, rx) = oneshot::channel(); - let _ = system.send_back.unbounded_send(Request::Health(tx)); - rx.await.map_err(to_call_error) - } - .boxed() - })?; + fn system_chain(&self) -> JsonRpcResult { + Ok(self.info.chain_name.clone()) + } - // Returns the base58-encoded PeerId of the node. - rpc_module.register_async_method("system_localPeerId", |_, system| { - async move { - let (tx, rx) = oneshot::channel(); - let _ = system.send_back.unbounded_send(Request::LocalPeerId(tx)); - rx.await.map_err(to_call_error) - } - .boxed() - })?; + fn system_type(&self) -> JsonRpcResult { + Ok(self.info.chain_type.clone()) + } - // Returns the multiaddresses that the local node is listening on - // - // The addresses include a trailing `/p2p/` with the local PeerId, and are thus suitable to - // be passed to `system_addReservedPeer` or as a bootnode address for example. - rpc_module.register_async_method("system_localListenAddresses", |_, system| { - async move { - let (tx, rx) = oneshot::channel(); - let _ = system.send_back.unbounded_send(Request::LocalListenAddresses(tx)); - rx.await.map_err(to_call_error) - } - .boxed() - })?; + fn system_properties(&self) -> JsonRpcResult { + Ok(self.info.properties.clone()) + } - // Returns currently connected peers - rpc_module.register_async_method("system_peers", |_, system| { - async move { - system.deny_unsafe.check_if_safe()?; - let (tx, rx) = oneshot::channel(); - let _ = system.send_back.unbounded_send(Request::Peers(tx)); - rx.await.map_err(to_call_error) - } - .boxed() - })?; + async fn system_health(&self) -> JsonRpcResult { + let (tx, rx) = oneshot::channel(); + let _ = self.send_back.unbounded_send(Request::Health(tx)); + rx.await.map_err(|e| JsonRpseeError::to_call_error(e)) + } - // Returns current state of the network. - // - // **Warning**: This API is not stable. Please do not programmatically interpret its output, - // as its format might change at any time. - // TODO: the future of this call is uncertain: https://github.com/paritytech/substrate/issues/1890 - // https://github.com/paritytech/substrate/issues/5541 - rpc_module.register_async_method("system_unstable_networkState", |_, system| { - async move { - system.deny_unsafe.check_if_safe()?; - let (tx, rx) = oneshot::channel(); - let _ = system.send_back.unbounded_send(Request::NetworkState(tx)); - rx.await.map_err(to_call_error) - } - .boxed() - })?; + async fn system_local_peer_id(&self) -> JsonRpcResult { + let (tx, rx) = oneshot::channel(); + let _ = self.send_back.unbounded_send(Request::LocalPeerId(tx)); + rx.await.map_err(|e| JsonRpseeError::to_call_error(e)) + } - // Adds a reserved peer. Returns the empty string or an error. The string - // parameter should encode a `p2p` multiaddr. - // - // `/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV` - // is an example of a valid, passing multiaddr with PeerId attached. - rpc_module.register_async_method("system_addReservedPeer", |param, system| { - let peer = match param.one() { - Ok(peer) => peer, - Err(e) => return Box::pin(futures::future::err(e)), - }; - async move { - system.deny_unsafe.check_if_safe()?; - let (tx, rx) = oneshot::channel(); - let _ = system.send_back.unbounded_send(Request::NetworkAddReservedPeer(peer, tx)); - match rx.await { - Ok(Ok(())) => Ok(()), - Ok(Err(e)) => Err(to_call_error(e)), - Err(e) => Err(to_call_error(e)), - } - } - .boxed() - })?; + async fn system_local_listen_addresses(&self) -> JsonRpcResult> { + let (tx, rx) = oneshot::channel(); + let _ = self.send_back.unbounded_send(Request::LocalListenAddresses(tx)); + rx.await.map_err(|e| JsonRpseeError::to_call_error(e)) + } - // Remove a reserved peer. Returns the empty string or an error. The string - // should encode only the PeerId e.g. `QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV`. - rpc_module.register_async_method::<(), _>( - "system_removeReservedPeer", - |param, system| { - let peer = match param.one() { - Ok(peer) => peer, - Err(e) => return Box::pin(futures::future::err(e)), - }; + async fn system_peers( + &self, + ) -> JsonRpcResult::Number>>> { + self.deny_unsafe.check_if_safe()?; + let (tx, rx) = oneshot::channel(); + let _ = self.send_back.unbounded_send(Request::Peers(tx)); + rx.await.map_err(|e| JsonRpseeError::to_call_error(e)) + } - async move { - system.deny_unsafe.check_if_safe()?; - let (tx, rx) = oneshot::channel(); - let _ = system - .send_back - .unbounded_send(Request::NetworkRemoveReservedPeer(peer, tx)); - match rx.await { - Ok(Ok(())) => Ok(()), - Ok(Err(e)) => Err(to_call_error(e)), - Err(e) => Err(to_call_error(e)), - } - } - .boxed() - }, - )?; + async fn system_network_state(&self) -> JsonRpcResult { + self.deny_unsafe.check_if_safe()?; + let (tx, rx) = oneshot::channel(); + let _ = self.send_back.unbounded_send(Request::NetworkState(tx)); + rx.await.map_err(|e| JsonRpseeError::to_call_error(e)) + } - // Returns the list of reserved peers - rpc_module.register_async_method("system_reservedPeers", |_, system| { - async move { - let (tx, rx) = oneshot::channel(); - let _ = system.send_back.unbounded_send(Request::NetworkReservedPeers(tx)); - rx.await.map_err(to_call_error) - } - .boxed() - })?; + async fn system_add_reserved_peer(&self, peer: String) -> JsonRpcResult<()> { + self.deny_unsafe.check_if_safe()?; + let (tx, rx) = oneshot::channel(); + let _ = self.send_back.unbounded_send(Request::NetworkAddReservedPeer(peer, tx)); + match rx.await { + Ok(Ok(())) => Ok(()), + Ok(Err(e)) => Err(JsonRpseeError::to_call_error(e)), + Err(e) => Err(JsonRpseeError::to_call_error(e)), + } + } - // Returns the roles the node is running as. - rpc_module.register_async_method("system_nodeRoles", |_, system| { - async move { - system.deny_unsafe.check_if_safe()?; - let (tx, rx) = oneshot::channel(); - let _ = system.send_back.unbounded_send(Request::NodeRoles(tx)); - rx.await.map_err(to_call_error) - } - .boxed() - })?; + async fn system_remove_reserved_peer(&self, peer: String) -> JsonRpcResult<()> { + self.deny_unsafe.check_if_safe()?; + let (tx, rx) = oneshot::channel(); + let _ = self.send_back.unbounded_send(Request::NetworkRemoveReservedPeer(peer, tx)); + match rx.await { + Ok(Ok(())) => Ok(()), + Ok(Err(e)) => Err(JsonRpseeError::to_call_error(e)), + Err(e) => Err(JsonRpseeError::to_call_error(e)), + } + } - // Returns the state of the syncing of the node: starting block, current best block, highest - // known block. - rpc_module.register_async_method("system_syncState", |_, system| { - async move { - system.deny_unsafe.check_if_safe()?; - let (tx, rx) = oneshot::channel(); - let _ = system.send_back.unbounded_send(Request::SyncState(tx)); - rx.await.map_err(to_call_error) - } - .boxed() - })?; + async fn system_reserved_peers(&self) -> JsonRpcResult> { + let (tx, rx) = oneshot::channel(); + let _ = self.send_back.unbounded_send(Request::NetworkReservedPeers(tx)); + rx.await.map_err(|e| JsonRpseeError::to_call_error(e)) + } - // Adds the supplied directives to the current log filter - // - // The syntax is identical to the CLI `=`: - // - // `sync=debug,state=trace` - rpc_module.register_method("system_addLogFilter", |param, system| { - system.deny_unsafe.check_if_safe()?; + async fn system_node_roles(&self) -> JsonRpcResult> { + self.deny_unsafe.check_if_safe()?; + let (tx, rx) = oneshot::channel(); + let _ = self.send_back.unbounded_send(Request::NodeRoles(tx)); + rx.await.map_err(|e| JsonRpseeError::to_call_error(e)) + } - let directives = param.one().map_err(|_| JsonRpseeCallError::InvalidParams)?; - logging::add_directives(directives); - logging::reload_filter() - .map_err(|e| JsonRpseeCallError::Failed(anyhow::anyhow!("{:?}", e).into())) - })?; + async fn system_sync_state(&self) -> JsonRpcResult::Number>> { + self.deny_unsafe.check_if_safe()?; + let (tx, rx) = oneshot::channel(); + let _ = self.send_back.unbounded_send(Request::SyncState(tx)); + rx.await.map_err(|e| JsonRpseeError::to_call_error(e)) + } - // Resets the log filter to Substrate defaults - rpc_module.register_method("system_resetLogFilter", |_, system| { - system.deny_unsafe.check_if_safe()?; - logging::reset_log_filter() - .map_err(|e| JsonRpseeCallError::Failed(anyhow::anyhow!("{:?}", e).into())) - })?; + fn system_add_log_filter(&self, directives: String) -> JsonRpcResult<()> { + self.deny_unsafe.check_if_safe()?; - Ok(rpc_module) + logging::add_directives(&directives); + logging::reload_filter().map_err(|e| anyhow::anyhow!("{:?}", e).into()) } -} -fn to_call_error(err: E) -> JsonRpseeCallError { - JsonRpseeCallError::Failed(Box::new(err)) + fn system_reset_log_filter(&self) -> JsonRpcResult<()> { + self.deny_unsafe.check_if_safe()?; + logging::reset_log_filter().map_err(|e| anyhow::anyhow!("{:?}", e).into()) + } } diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 90d8308d41ad2..daa99f956839c 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -46,7 +46,14 @@ use sc_network::{ warp_request_handler::{self, RequestHandler as WarpSyncRequestHandler, WarpSyncProvider}, NetworkService, }; -use sc_rpc::{DenyUnsafe, SubscriptionTaskExecutor}; +use sc_rpc::{ + author::AuthorApiServer, + chain::ChainApiServer, + offchain::OffchainApiServer, + state::{ChildStateApiServer, StateApiServer}, + system::SystemApiServer, + DenyUnsafe, SubscriptionTaskExecutor, +}; use sc_telemetry::{telemetry, ConnectionMessage, Telemetry, TelemetryHandle, SUBSTRATE_INFO}; use sc_transaction_pool_api::MaintainedTransactionPool; use sp_api::{CallApiAt, ProvideRuntimeApi}; @@ -686,7 +693,7 @@ where { const UNIQUE_METHOD_NAMES_PROOF: &str = "Method names are unique; qed"; - // TODO(niklasad1): expose CORS to jsonrpsee to handle this propely. + // TODO(niklasad1): fix CORS. let deny_unsafe = DenyUnsafe::No; let system_info = sc_rpc::system::SystemInfo { @@ -709,8 +716,7 @@ where remote_blockchain.clone(), on_demand.clone(), ) - .into_rpc_module() - .expect(UNIQUE_METHOD_NAMES_PROOF); + .into_rpc(); let (state, child_state) = sc_rpc::state::new_light( client.clone(), task_executor.clone(), @@ -718,16 +724,10 @@ where on_demand, deny_unsafe, ); - ( - chain, - state.into_rpc_module().expect(UNIQUE_METHOD_NAMES_PROOF), - child_state.into_rpc_module().expect(UNIQUE_METHOD_NAMES_PROOF), - ) + (chain, state.into_rpc(), child_state.into_rpc()) } else { // Full nodes - let chain = sc_rpc::chain::new_full(client.clone(), task_executor.clone()) - .into_rpc_module() - .expect(UNIQUE_METHOD_NAMES_PROOF); + let chain = sc_rpc::chain::new_full(client.clone(), task_executor.clone()).into_rpc(); let (state, child_state) = sc_rpc::state::new_full( client.clone(), @@ -735,8 +735,8 @@ where deny_unsafe, config.rpc_max_payload, ); - let state = state.into_rpc_module().expect(UNIQUE_METHOD_NAMES_PROOF); - let child_state = child_state.into_rpc_module().expect(UNIQUE_METHOD_NAMES_PROOF); + let state = state.into_rpc(); + let child_state = child_state.into_rpc(); (chain, state, child_state) }; @@ -748,17 +748,12 @@ where deny_unsafe, task_executor.clone(), ) - .into_rpc_module() - .expect(UNIQUE_METHOD_NAMES_PROOF); + .into_rpc(); - let system = sc_rpc::system::System::new(system_info, system_rpc_tx, deny_unsafe) - .into_rpc_module() - .expect(UNIQUE_METHOD_NAMES_PROOF); + let system = sc_rpc::system::System::new(system_info, system_rpc_tx, deny_unsafe).into_rpc(); if let Some(storage) = offchain_storage { - let offchain = sc_rpc::offchain::Offchain::new(storage, deny_unsafe) - .into_rpc_module() - .expect(UNIQUE_METHOD_NAMES_PROOF); + let offchain = sc_rpc::offchain::Offchain::new(storage, deny_unsafe).into_rpc(); rpc_api.merge(offchain).expect(UNIQUE_METHOD_NAMES_PROOF); } diff --git a/client/sync-state-rpc/src/lib.rs b/client/sync-state-rpc/src/lib.rs index ca30f409a88e4..dd2a20c1147d1 100644 --- a/client/sync-state-rpc/src/lib.rs +++ b/client/sync-state-rpc/src/lib.rs @@ -42,8 +42,8 @@ #![deny(unused_crate_dependencies)] use jsonrpsee::{ - types::error::{CallError, Error as JsonRpseeError}, - RpcModule, + proc_macros::rpc, + types::{error::Error as JsonRpseeError, JsonRpcResult}, }; use sc_client_api::StorageData; use sp_blockchain::HeaderBackend; @@ -90,7 +90,7 @@ fn serialize_encoded( /// chain-spec as an extension. pub type LightSyncStateExtension = Option; -/// Hardcoded infomation that allows light clients to sync quickly. +/// Hardcoded information that allows light clients to sync quickly. #[derive(serde::Serialize, Clone)] #[serde(rename_all = "camelCase")] #[serde(deny_unknown_fields)] @@ -109,6 +109,16 @@ pub struct LightSyncState { sc_finality_grandpa::AuthoritySet<::Hash, NumberFor>, } +/// An api for sync state RPC calls. +#[rpc(client, server, namespace = "sync_state")] +pub trait SyncStateRpcApi { + /// Returns the JSON serialized chainspec running the node, with a sync state. + // NOTE(niklasad1): I changed to `JsonValue` -> `String` as the chainspec + // already returns a JSON String. + #[method(name = "genSyncSpec")] + fn system_gen_sync_spec(&self, raw: bool) -> JsonRpcResult; +} + /// An api for sync state RPC calls. pub struct SyncStateRpc { chain_spec: Box, @@ -140,39 +150,6 @@ where } } - /// Convert this [`SyncStateRpc`] to a RPC module. - pub fn into_rpc_module(self) -> Result, JsonRpseeError> { - let mut module = RpcModule::new(self); - - // Returns the json-serialized chainspec running the node, with a sync state. - module.register_method("sync_state_genSyncSpec", |params, sync_state| { - sync_state.deny_unsafe.check_if_safe()?; - - let raw = params.one()?; - let current_sync_state = - sync_state.build_sync_state().map_err(|e| CallError::Failed(Box::new(e)))?; - let mut chain_spec = sync_state.chain_spec.cloned_box(); - - let extension = sc_chain_spec::get_extension_mut::( - chain_spec.extensions_mut(), - ) - .ok_or_else(|| { - CallError::Failed( - anyhow::anyhow!("Could not find `LightSyncState` chain-spec extension!").into(), - ) - })?; - - let val = serde_json::to_value(¤t_sync_state) - .map_err(|e| CallError::Failed(Box::new(e)))?; - *extension = Some(val); - - chain_spec - .as_json(raw) - .map_err(|e| CallError::Failed(anyhow::anyhow!(e).into())) - })?; - Ok(module) - } - fn build_sync_state(&self) -> Result, Error> { let finalized_hash = self.client.info().finalized_hash; let finalized_header = self @@ -192,3 +169,32 @@ where }) } } + +impl SyncStateRpcApiServer for SyncStateRpc +where + Block: BlockT, + Backend: HeaderBackend + sc_client_api::AuxStore + 'static, +{ + fn system_gen_sync_spec(&self, raw: bool) -> JsonRpcResult { + self.deny_unsafe.check_if_safe()?; + + let current_sync_state = + self.build_sync_state().map_err(|e| JsonRpseeError::to_call_error(e))?; + let mut chain_spec = self.chain_spec.cloned_box(); + + let extension = sc_chain_spec::get_extension_mut::( + chain_spec.extensions_mut(), + ) + .ok_or_else(|| { + JsonRpseeError::from(anyhow::anyhow!( + "Could not find `LightSyncState` chain-spec extension!" + )) + })?; + + let val = serde_json::to_value(¤t_sync_state) + .map_err(|e| JsonRpseeError::to_call_error(e))?; + *extension = Some(val); + + chain_spec.as_json(raw).map_err(|e| anyhow::anyhow!(e).into()) + } +} diff --git a/frame/contracts/rpc/src/lib.rs b/frame/contracts/rpc/src/lib.rs index e032824dd4a8c..36eb88d2b0975 100644 --- a/frame/contracts/rpc/src/lib.rs +++ b/frame/contracts/rpc/src/lib.rs @@ -23,8 +23,12 @@ use std::{marker::PhantomData, sync::Arc}; use codec::Codec; use jsonrpsee::{ - types::error::{CallError, Error as JsonRpseeError}, - RpcModule, + proc_macros::rpc, + types::{ + async_trait, + error::{CallError, Error as JsonRpseeError}, + JsonRpcResult, + }, }; use pallet_contracts_primitives::{ Code, ContractExecResult, ContractInstantiateResult, RentProjection, @@ -65,20 +69,22 @@ const GAS_LIMIT: Weight = 5 * GAS_PER_SECOND; /// A private newtype for converting `ContractAccessError` into an RPC error. struct ContractAccessError(pallet_contracts_primitives::ContractAccessError); -impl From for CallError { - fn from(e: ContractAccessError) -> CallError { +impl From for JsonRpseeError { + fn from(e: ContractAccessError) -> Self { use pallet_contracts_primitives::ContractAccessError::*; match e.0 { DoesntExist => CallError::Custom { code: CONTRACT_DOESNT_EXIST, message: "The specified contract doesn't exist.".into(), data: None, - }, + } + .into(), IsTombstone => CallError::Custom { code: CONTRACT_IS_A_TOMBSTONE, message: "The contract is a tombstone and doesn't have any storage.".into(), data: None, - }, + } + .into(), } } } @@ -108,6 +114,59 @@ pub struct InstantiateRequest { salt: Bytes, } +/// Contracts RPC methods. +#[rpc(client, server, namespace = "contracts")] +pub trait ContractsApi { + /// Executes a call to a contract. + /// + /// This call is performed locally without submitting any transactions. Thus executing this + /// won't change any state. Nonetheless, the calling state-changing contracts is still possible. + /// + /// This method is useful for calling getter-like methods on contracts. + #[method(name = "call")] + fn call( + &self, + call_request: CallRequest, + at: Option, + ) -> JsonRpcResult; + + /// Instantiate a new contract. + /// + /// This call is performed locally without submitting any transactions. Thus the contract + /// is not actually created. + /// + /// This method is useful for UIs to dry-run contract instantiations. + #[method(name = "instantiate")] + fn instantiate( + &self, + instantiate_request: InstantiateRequest, + at: Option, + ) -> JsonRpcResult>; + + /// Returns the value under a specified storage `key` in a contract given by `address` param, + /// or `None` if it is not set. + #[method(name = "getStorage")] + fn get_storage( + &self, + address: AccountId, + key: H256, + at: Option, + ) -> JsonRpcResult>; + + /// Returns the projected time a given contract will be able to sustain paying its rent. + /// + /// The returned projection is relevant for the given block, i.e. it is as if the contract was + /// accessed at the beginning of that block. + /// + /// Returns `None` if the contract is exempted from rent. + #[method(name = "rentProjection")] + fn rent_projection( + &self, + address: AccountId, + at: Option, + ) -> JsonRpcResult>; +} + /// Contracts RPC methods. pub struct ContractsRpc { client: Arc, @@ -117,7 +176,30 @@ pub struct ContractsRpc { _hash: PhantomData, } -impl ContractsRpc +impl + ContractsRpc +{ + /// Create new `Contracts` with the given reference to the client. + pub fn new(client: Arc) -> Self { + Self { + client, + _block: Default::default(), + _account_id: Default::default(), + _balance: Default::default(), + _hash: Default::default(), + } + } +} + +#[async_trait] +impl + ContractsApiServer< + ::Hash, + <::Header as HeaderT>::Number, + AccountId, + Balance, + Hash, + > for ContractsRpc where Block: BlockT, Client: Send + Sync + 'static + ProvideRuntimeApi + HeaderBackend, @@ -132,169 +214,112 @@ where Balance: Codec + TryFrom + Send + Sync + 'static, Hash: traits::MaybeSerializeDeserialize + Codec + Send + Sync + 'static, { - pub fn new(client: Arc) -> Self { - Self { - client, - _block: Default::default(), - _account_id: Default::default(), - _balance: Default::default(), - _hash: Default::default(), - } + fn call( + &self, + call_request: CallRequest, + at: Option<::Hash>, + ) -> JsonRpcResult { + let api = self.client.runtime_api(); + let at = BlockId::hash(at.unwrap_or_else(|| self.client.info().best_hash)); + + let CallRequest { origin, dest, value, gas_limit, input_data } = call_request; + + let value: Balance = decode_hex(value, "balance")?; + let gas_limit: Weight = decode_hex(gas_limit, "weight")?; + limit_gas(gas_limit)?; + + let exec_result = api + .call(&at, origin, dest, value, gas_limit, input_data.to_vec()) + .map_err(runtime_error_into_rpc_err)?; + + Ok(exec_result) + } + + fn instantiate( + &self, + instantiate_request: InstantiateRequest, + at: Option<::Hash>, + ) -> JsonRpcResult< + ContractInstantiateResult::Header as HeaderT>::Number>, + > { + let api = self.client.runtime_api(); + let at = BlockId::hash(at.unwrap_or_else(|| self.client.info().best_hash)); + let InstantiateRequest { origin, endowment, gas_limit, code, data, salt } = + instantiate_request; + + let endowment: Balance = decode_hex(endowment, "balance")?; + let gas_limit: Weight = decode_hex(gas_limit, "weight")?; + limit_gas(gas_limit)?; + + let exec_result = api + .instantiate(&at, origin, endowment, gas_limit, code, data.to_vec(), salt.to_vec()) + .map_err(runtime_error_into_rpc_err)?; + + Ok(exec_result) + } + + fn get_storage( + &self, + address: AccountId, + key: H256, + at: Option<::Hash>, + ) -> JsonRpcResult> { + let api = self.client.runtime_api(); + let at = BlockId::hash(at.unwrap_or_else(|| self.client.info().best_hash)); + let result = api + .get_storage(&at, address, key.into()) + .map_err(runtime_error_into_rpc_err)? + .map_err(ContractAccessError)? + .map(Bytes); + + Ok(result) } - /// Convert a [`ContractsRpc`] to an [`RpcModule`]. Registers all the RPC methods available with - /// the RPC server. - pub fn into_rpc_module(self) -> Result, JsonRpseeError> { - let mut module = RpcModule::new(self); - - // Executes a call to a contract. - // - // This call is performed locally without submitting any transactions. Thus executing this - // won't change any state. Nonetheless, calling state-changing contracts is still possible. - // - // This method is useful for calling getter-like methods on contracts. - module.register_method( - "contracts_call", - |params, contracts| -> Result { - let (call_request, at): (CallRequest, Option<::Hash>) = - params.parse()?; - let api = contracts.client.runtime_api(); - let at = BlockId::hash(at.unwrap_or_else(|| contracts.client.info().best_hash)); - - let CallRequest { origin, dest, value, gas_limit, input_data } = call_request; - - let value: Balance = decode_hex(value, "balance")?; - let gas_limit: Weight = decode_hex(gas_limit, "weight")?; - limit_gas(gas_limit)?; - - let exec_result = api - .call(&at, origin, dest, value, gas_limit, input_data.to_vec()) - .map_err(runtime_error_into_rpc_err)?; - - Ok(exec_result) - }, - )?; - - // Instantiate a new contract. - // - // This call is performed locally without submitting any transactions. Thus the contract - // is not actually created. - // - // This method is useful for UIs to dry-run contract instantiations. - module.register_method( - "contracts_instantiate", - |params, - contracts| - -> Result< - ContractInstantiateResult< - AccountId, - <::Header as HeaderT>::Number, - >, - CallError, - > { - let (instantiate_request, at): ( - InstantiateRequest, - Option<::Hash>, - ) = params.parse()?; - - let api = contracts.client.runtime_api(); - let at = BlockId::hash(at.unwrap_or_else(|| contracts.client.info().best_hash)); - let InstantiateRequest { origin, endowment, gas_limit, code, data, salt } = - instantiate_request; - - let endowment: Balance = decode_hex(endowment, "balance")?; - let gas_limit: Weight = decode_hex(gas_limit, "weight")?; - limit_gas(gas_limit)?; - - let exec_result = api - .instantiate( - &at, - origin, - endowment, - gas_limit, - code, - data.to_vec(), - salt.to_vec(), - ) - .map_err(runtime_error_into_rpc_err)?; - - Ok(exec_result) - }, - )?; - - // Returns the value under a specified storage `key` in a contract given by `address` param, - // or `None` if it is not set. - module.register_method( - "contracts_getStorage", - |params, contracts| -> Result, CallError> { - let (address, key, at): (AccountId, H256, Option<::Hash>) = - params.parse()?; - - let api = contracts.client.runtime_api(); - let at = BlockId::hash(at.unwrap_or_else(|| contracts.client.info().best_hash)); - let result = api - .get_storage(&at, address, key.into()) - .map_err(runtime_error_into_rpc_err)? - .map_err(ContractAccessError)? - .map(Bytes); - - Ok(result) - }, - )?; - - // Returns the projected time a given contract will be able to sustain paying its rent. - // - // The returned projection is relevant for the given block, i.e. it is as if the contract - // was accessed at the beginning of that block. - // - // Returns `None` if the contract is exempted from rent. - module.register_method( - "contracts_rentProjection", - |params, - contracts| - -> Result::Header as HeaderT>::Number>, CallError> { - let (address, at): (AccountId, Option<::Hash>) = params.parse()?; - - let api = contracts.client.runtime_api(); - let at = BlockId::hash(at.unwrap_or_else(|| contracts.client.info().best_hash)); - - let result = api - .rent_projection(&at, address) - .map_err(runtime_error_into_rpc_err)? - .map_err(ContractAccessError)?; - - Ok(match result { - RentProjection::NoEviction => None, - RentProjection::EvictionAt(block_num) => Some(block_num), - }) - }, - )?; - - Ok(module) + fn rent_projection( + &self, + address: AccountId, + at: Option<::Hash>, + ) -> JsonRpcResult::Header as HeaderT>::Number>> { + let api = self.client.runtime_api(); + let at = BlockId::hash(at.unwrap_or_else(|| self.client.info().best_hash)); + + let result = api + .rent_projection(&at, address) + .map_err(runtime_error_into_rpc_err)? + .map_err(ContractAccessError)?; + + Ok(match result { + RentProjection::NoEviction => None, + RentProjection::EvictionAt(block_num) => Some(block_num), + }) } } /// Converts a runtime trap into an RPC error. -fn runtime_error_into_rpc_err(err: impl std::fmt::Debug) -> CallError { +fn runtime_error_into_rpc_err(err: impl std::fmt::Debug) -> JsonRpseeError { CallError::Custom { code: RUNTIME_ERROR, message: "Runtime error".into(), data: to_raw_value(&format!("{:?}", err)).ok(), } + .into() } fn decode_hex>( from: H, name: &str, -) -> Result { - from.try_into().map_err(|_| CallError::Custom { - code: -32602, // TODO: was `ErrorCode::InvalidParams` - message: format!("{:?} does not fit into the {} type", from, name), - data: None, +) -> Result { + from.try_into().map_err(|_| { + CallError::Custom { + code: -32602, // TODO: was `ErrorCode::InvalidParams` + message: format!("{:?} does not fit into the {} type", from, name), + data: None, + } + .into() }) } -fn limit_gas(gas_limit: Weight) -> Result<(), CallError> { +fn limit_gas(gas_limit: Weight) -> Result<(), JsonRpseeError> { if gas_limit > GAS_LIMIT { Err(CallError::Custom { code: -32602, // TODO: was `ErrorCode::InvalidParams,` @@ -303,7 +328,8 @@ fn limit_gas(gas_limit: Weight) -> Result<(), CallError> { gas_limit, GAS_LIMIT ), data: None, - }) + } + .into()) } else { Ok(()) } diff --git a/frame/merkle-mountain-range/rpc/src/lib.rs b/frame/merkle-mountain-range/rpc/src/lib.rs index d0bf494d6196b..ce019fec5e1e9 100644 --- a/frame/merkle-mountain-range/rpc/src/lib.rs +++ b/frame/merkle-mountain-range/rpc/src/lib.rs @@ -24,8 +24,8 @@ use std::{marker::PhantomData, sync::Arc}; use codec::{Codec, Encode}; use jsonrpsee::{ - types::{error::CallError, Error as JsonRpseeError}, - RpcModule, + proc_macros::rpc, + types::{async_trait, error::CallError, JsonRpcResult}, }; use pallet_mmr_primitives::{Error as MmrError, Proof}; use serde::{Deserialize, Serialize}; @@ -64,54 +64,65 @@ impl LeafProof { } } +/// MMR RPC methods. +#[rpc(client, server, namespace = "mmr")] +pub trait MmrApi { + /// Generate MMR proof for given leaf index. + /// + /// This method calls into a runtime with MMR pallet included and attempts to generate + /// MMR proof for leaf at given `leaf_index`. + /// Optionally, a block hash at which the runtime should be queried can be specified. + /// + /// Returns the (full) leaf itself and a proof for this leaf (compact encoding, i.e. hash of + /// the leaf). Both parameters are SCALE-encoded. + #[method(name = "generateProof")] + fn generate_proof( + &self, + leaf_index: u64, + at: Option, + ) -> JsonRpcResult>; +} + /// MMR RPC methods. pub struct MmrRpc { client: Arc, _marker: PhantomData, } -impl MmrRpc +impl MmrRpc { + /// Create new `Mmr` with the given reference to the client. + pub fn new(client: Arc) -> Self { + Self { client, _marker: Default::default() } + } +} + +#[async_trait] +impl MmrApiServer<::Hash> + for MmrRpc where Block: BlockT, Client: Send + Sync + 'static + ProvideRuntimeApi + HeaderBackend, Client::Api: MmrRuntimeApi, MmrHash: Codec + Send + Sync + 'static, { - /// Create a new [`MmrRpc`]. - pub fn new(client: Arc) -> Self { - MmrRpc { client, _marker: Default::default() } - } - - /// Convert this [`MmrRpc`] to an [`RpcModule`]. - pub fn into_rpc_module(self) -> Result, JsonRpseeError> { - let mut module = RpcModule::new(self); - - // Generate MMR proof for given leaf index. - // - // This method calls into a runtime with MMR pallet included and attempts to generate - // MMR proof for leaf at given `leaf_index`. - // Optionally, a block hash at which the runtime should be queried can be specified. - // - // Returns the (full) leaf itself and a proof for this leaf (compact encoding, i.e. hash of - // the leaf). Both parameters are SCALE-encoded. - module.register_method("mmr_generateProof", |params, mmr| { - let (leaf_index, at): (u64, Option<::Hash>) = params.parse()?; - let api = mmr.client.runtime_api(); - let block_hash = at.unwrap_or_else(|| mmr.client.info().best_hash); - - let (leaf, proof) = api - .generate_proof_with_context( - &BlockId::hash(block_hash), - sp_core::ExecutionContext::OffchainCall(None), - leaf_index, - ) - .map_err(runtime_error_into_rpc_error)? - .map_err(mmr_error_into_rpc_error)?; - - Ok(LeafProof::new(block_hash, leaf, proof)) - })?; - - Ok(module) + fn generate_proof( + &self, + leaf_index: u64, + at: Option<::Hash>, + ) -> JsonRpcResult> { + let api = self.client.runtime_api(); + let block_hash = at.unwrap_or_else(|| self.client.info().best_hash); + + let (leaf, proof) = api + .generate_proof_with_context( + &BlockId::hash(block_hash), + sp_core::ExecutionContext::OffchainCall(None), + leaf_index, + ) + .map_err(runtime_error_into_rpc_error)? + .map_err(mmr_error_into_rpc_error)?; + + Ok(LeafProof::new(block_hash, leaf, proof)) } } diff --git a/frame/transaction-payment/rpc/src/lib.rs b/frame/transaction-payment/rpc/src/lib.rs index e1ff4102f295b..ee9c500ffc55f 100644 --- a/frame/transaction-payment/rpc/src/lib.rs +++ b/frame/transaction-payment/rpc/src/lib.rs @@ -21,8 +21,12 @@ use std::{convert::TryInto, sync::Arc}; use codec::{Codec, Decode}; use jsonrpsee::{ - types::error::{CallError, Error as JsonRpseeError}, - RpcModule, + proc_macros::rpc, + types::{ + async_trait, + error::{CallError, Error as JsonRpseeError}, + JsonRpcResult, + }, }; pub use pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi as TransactionPaymentRuntimeApi; use pallet_transaction_payment_rpc_runtime_api::{FeeDetails, InclusionFee, RuntimeDispatchInfo}; @@ -35,6 +39,19 @@ use sp_runtime::{ traits::{Block as BlockT, MaybeDisplay}, }; +#[rpc(client, server, namespace = "payment")] +pub trait TransactionPaymentApi { + #[method(name = "queryInfo")] + fn query_info(&self, encoded_xt: Bytes, at: Option) -> JsonRpcResult; + + #[method(name = "queryFeeDetails")] + fn query_fee_details( + &self, + encoded_xt: Bytes, + at: Option, + ) -> JsonRpcResult>; +} + /// Provides RPC methods to query a dispatchable's class, weight and fee. pub struct TransactionPaymentRpc { /// Shared reference to the client. @@ -43,72 +60,69 @@ pub struct TransactionPaymentRpc { _balance_marker: std::marker::PhantomData, } -impl TransactionPaymentRpc +impl TransactionPaymentRpc { + /// Creates a new instance of the TransactionPaymentRpc helper. + pub fn new(client: Arc) -> Self { + Self { client, _block_marker: Default::default(), _balance_marker: Default::default() } + } +} + +#[async_trait] +impl + TransactionPaymentApiServer<::Hash, RuntimeDispatchInfo> + for TransactionPaymentRpc where Block: BlockT, C: ProvideRuntimeApi + HeaderBackend + Send + Sync + 'static, C::Api: TransactionPaymentRuntimeApi, Balance: Codec + MaybeDisplay + Copy + TryInto + Send + Sync + 'static, { - /// Creates a new instance of the TransactionPaymentRpc helper. - pub fn new(client: Arc) -> Self { - Self { client, _block_marker: Default::default(), _balance_marker: Default::default() } + fn query_info( + &self, + encoded_xt: Bytes, + at: Option, + ) -> JsonRpcResult> { + let api = self.client.runtime_api(); + let at = BlockId::hash(at.unwrap_or_else(|| self.client.info().best_hash)); + + let encoded_len = encoded_xt.len() as u32; + + let uxt: Block::Extrinsic = Decode::decode(&mut &*encoded_xt) + .map_err(|codec_err| CallError::from_std_error(codec_err))?; + api.query_info(&at, uxt, encoded_len) + .map_err(|api_err| JsonRpseeError::to_call_error(api_err)) } - /// Convert this [`TransactionPaymentRpc`] to an [`RpcModule`]. - pub fn into_rpc_module(self) -> Result, JsonRpseeError> { - let mut module = RpcModule::new(self); - - module.register_method::, _>( - "payment_queryInfo", - |params, trx_payment| { - let (encoded_xt, at): (Bytes, Option<::Hash>) = params.parse()?; - - let api = trx_payment.client.runtime_api(); - let at = BlockId::hash(at.unwrap_or_else(|| trx_payment.client.info().best_hash)); - - let encoded_len = encoded_xt.len() as u32; - - let uxt: Block::Extrinsic = Decode::decode(&mut &*encoded_xt) - .map_err(|codec_err| CallError::Failed(Box::new(codec_err)))?; - api.query_info(&at, uxt, encoded_len) - .map_err(|api_err| CallError::Failed(Box::new(api_err))) + fn query_fee_details( + &self, + encoded_xt: Bytes, + at: Option, + ) -> JsonRpcResult> { + let api = self.client.runtime_api(); + let at = BlockId::hash(at.unwrap_or_else(|| self.client.info().best_hash)); + + let encoded_len = encoded_xt.len() as u32; + + let uxt: Block::Extrinsic = Decode::decode(&mut &*encoded_xt) + .map_err(|codec_err| CallError::from_std_error(codec_err))?; + let fee_details = api + .query_fee_details(&at, uxt, encoded_len) + .map_err(|api_err| CallError::from_std_error(api_err))?; + + let try_into_rpc_balance = + |value: Balance| value.try_into().map_err(|_try_err| CallError::InvalidParams); + + Ok(FeeDetails { + inclusion_fee: if let Some(inclusion_fee) = fee_details.inclusion_fee { + Some(InclusionFee { + base_fee: try_into_rpc_balance(inclusion_fee.base_fee)?, + len_fee: try_into_rpc_balance(inclusion_fee.len_fee)?, + adjusted_weight_fee: try_into_rpc_balance(inclusion_fee.adjusted_weight_fee)?, + }) + } else { + None }, - )?; - - module.register_method("payment_queryFeeDetails", |params, trx_payment| { - let (encoded_xt, at): (Bytes, Option<::Hash>) = params.parse()?; - - let api = trx_payment.client.runtime_api(); - let at = BlockId::hash(at.unwrap_or_else(|| trx_payment.client.info().best_hash)); - - let encoded_len = encoded_xt.len() as u32; - - let uxt: Block::Extrinsic = Decode::decode(&mut &*encoded_xt) - .map_err(|codec_err| CallError::Failed(Box::new(codec_err)))?; - let fee_details = api - .query_fee_details(&at, uxt, encoded_len) - .map_err(|api_err| CallError::Failed(Box::new(api_err)))?; - - let try_into_rpc_balance = - |value: Balance| value.try_into().map_err(|_try_err| CallError::InvalidParams); - - Ok(FeeDetails { - inclusion_fee: if let Some(inclusion_fee) = fee_details.inclusion_fee { - Some(InclusionFee { - base_fee: try_into_rpc_balance(inclusion_fee.base_fee)?, - len_fee: try_into_rpc_balance(inclusion_fee.len_fee)?, - adjusted_weight_fee: try_into_rpc_balance( - inclusion_fee.adjusted_weight_fee, - )?, - }) - } else { - None - }, - tip: Default::default(), - }) - })?; - - Ok(module) + tip: Default::default(), + }) } } diff --git a/test-utils/test-runner/src/client.rs b/test-utils/test-runner/src/client.rs index 519e2e7be5eae..1085979a0fe9d 100644 --- a/test-utils/test-runner/src/client.rs +++ b/test-utils/test-runner/src/client.rs @@ -22,7 +22,7 @@ use jsonrpsee::types::RpcModule; use manual_seal::{ consensus::babe::{BabeConsensusDataProvider, SlotTimestampProvider}, import_queue, - rpc::ManualSeal, + rpc::{ManualSeal, ManualSealApiServer}, run_manual_seal, EngineCommand, ManualSealParams, }; use sc_client_api::backend::Backend; @@ -187,9 +187,9 @@ where let rpc_sink = command_sink.clone(); let rpc_builder = Box::new(move |_, _| -> RpcModule<()> { - let seal = ManualSeal::new(rpc_sink).into_rpc_module().expect("TODO; error handling"); + let seal = ManualSeal::new(rpc_sink).into_rpc(); let mut module = RpcModule::new(()); - module.merge(seal).expect("TODO: error handling"); + module.merge(seal).expect("only one module; qed"); module }); diff --git a/utils/frame/rpc/support/Cargo.toml b/utils/frame/rpc/support/Cargo.toml index aa9f1bbef8024..93ee6e3e8c892 100644 --- a/utils/frame/rpc/support/Cargo.toml +++ b/utils/frame/rpc/support/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] futures = "0.3.16" -jsonrpc-client-transports = { version = "18.0.0", features = ["http"] } +jsonrpsee = { git = "https://github.com/paritytech/jsonrpsee", branch = "master", features = ["client", "types"] } codec = { package = "parity-scale-codec", version = "2.0.0" } serde = "1" frame-support = { version = "4.0.0-dev", path = "../../../../frame/support" } diff --git a/utils/frame/rpc/support/src/lib.rs b/utils/frame/rpc/support/src/lib.rs index 584f35e8d5d8f..07f2881f6287c 100644 --- a/utils/frame/rpc/support/src/lib.rs +++ b/utils/frame/rpc/support/src/lib.rs @@ -23,6 +23,8 @@ use codec::{DecodeAll, FullCodec, FullEncode}; use core::marker::PhantomData; use frame_support::storage::generator::{StorageDoubleMap, StorageMap, StorageValue}; +use jsonrpsee::types::Error as RpcError; +use sc_rpc_api::state::StateApiClient; use serde::{de::DeserializeOwned, Serialize}; use sp_storage::{StorageData, StorageKey}; @@ -108,10 +110,6 @@ impl StorageQuery { Self { key: StorageKey(St::storage_double_map_final_key(key1, key2)), _spook: PhantomData } } - /* - - TODO(niklasad1): should be ported to jsonrpsee - /// Send this query over RPC, await the typed result. /// /// Hash should be ::Hash. @@ -122,15 +120,18 @@ impl StorageQuery { /// /// block_index indicates the block for which state will be queried. A value of None indicates /// the latest block. - pub async fn get( + pub async fn get( self, - state_client: &StateClient, + state_client: &StateClient, block_index: Option, - ) -> Result, RpcError> { + ) -> Result, RpcError> + where + Hash: Send + Sync + 'static + DeserializeOwned + Serialize, + StateClient: StateApiClient + Sync, + { let opt: Option = state_client.storage(self.key, block_index).await?; opt.map(|encoded| V::decode_all(&encoded.0)) .transpose() - .map_err(|decode_err| RpcError::Other(Box::new(decode_err))) + .map_err(|decode_err| RpcError::Custom(decode_err.to_string())) } - */ } diff --git a/utils/frame/rpc/system/src/lib.rs b/utils/frame/rpc/system/src/lib.rs index 0851d89726e6a..1c4e4ae75ee01 100644 --- a/utils/frame/rpc/system/src/lib.rs +++ b/utils/frame/rpc/system/src/lib.rs @@ -20,10 +20,9 @@ use std::{fmt::Display, marker::PhantomData, sync::Arc}; use codec::{self, Codec, Decode, Encode}; -use futures::{future, FutureExt}; use jsonrpsee::{ - types::{error::CallError, Error as JsonRpseeError}, - RpcModule, + proc_macros::rpc, + types::{async_trait, error::CallError, Error as JsonRpseeError, JsonRpcResult}, }; use sc_client_api::light::{self, future_header, RemoteBlockchain, RemoteCallRequest}; use sc_rpc_api::DenyUnsafe; @@ -35,12 +34,36 @@ use sp_runtime::{generic::BlockId, traits}; pub use frame_system_rpc_runtime_api::AccountNonceApi; +/// System RPC methods. +#[rpc(client, server, namespace = "system")] +pub trait SystemApi { + /// Returns the next valid index (aka nonce) for given account. + /// + /// This method takes into consideration all pending transactions + /// currently in the pool and if no transactions are found in the pool + /// it fallbacks to query the index from the runtime (aka. state nonce). + #[method(name = "system_accountNextIndex", aliases = "system_nextIndex")] + async fn nonce(&self, account: AccountId) -> JsonRpcResult; + + /// Dry run an extrinsic at a given block. Return SCALE encoded ApplyExtrinsicResult. + #[method(name = "system_dryRun", aliases = "system_dryRunAt")] + async fn dry_run(&self, extrinsic: Bytes, at: Option) -> JsonRpcResult; +} + /// System RPC methods. pub struct SystemRpc { backend: Box>, } -impl SystemRpc +impl SystemRpc { + pub fn new(backend: Box>) -> Self { + Self { backend } + } +} + +#[async_trait] +impl SystemApiServer + for SystemRpc where AccountId: Clone + Display + Codec + traits::MaybeSerializeDeserialize + Send + 'static, BlockHash: Send + traits::MaybeSerializeDeserialize + 'static, @@ -53,61 +76,28 @@ where + traits::MaybeSerialize + 'static, { - pub fn new(backend: Box>) -> Self { - Self { backend } + async fn nonce(&self, account: AccountId) -> JsonRpcResult { + self.backend.nonce(account).await } - /// Convert this [`SystemRpc`] to an [`RpcModule`]. - pub fn into_rpc_module(self) -> Result, JsonRpseeError> { - let mut module = RpcModule::new(self); - - // Returns the next valid index (aka nonce) for given account. - // - // This method takes into consideration all pending transactions - // currently in the pool and if no transactions are found in the pool - // it fallbacks to query the index from the runtime (aka. state nonce). - module.register_async_method("system_accountNextIndex", |params, system| { - let account = match params.one() { - Ok(a) => a, - Err(e) => return Box::pin(future::err(e)), - }; - - async move { system.backend.nonce(account).await }.boxed() - })?; - - // Dry run an extrinsic at a given block. Return SCALE encoded ApplyExtrinsicResult. - module.register_async_method("system_dryRun", |params, system| { - let mut seq = params.sequence(); - - let extrinsic = match seq.next() { - Ok(params) => params, - Err(e) => return Box::pin(future::err(e)), - }; - - let at = match seq.optional_next() { - Ok(at) => at, - Err(e) => return Box::pin(future::err(e)), - }; - - async move { system.backend.dry_run(extrinsic, at).await }.boxed() - })?; - - module.register_alias("account_nextIndex", "system_accountNextIndex")?; - module.register_alias("system_dryRunAt", "system_dryRun")?; - - Ok(module) + async fn dry_run(&self, extrinsic: Bytes, at: Option) -> JsonRpcResult { + self.backend.dry_run(extrinsic, at).await } } /// Blockchain backend API -#[async_trait::async_trait] +#[async_trait] pub trait SystemRpcBackend: Send + Sync + 'static where AccountId: Clone + Display + Codec, Index: Clone + Display + Codec + Send + traits::AtLeast32Bit + 'static, { - async fn nonce(&self, account: AccountId) -> Result; - async fn dry_run(&self, extrinsic: Bytes, at: Option) -> Result; + async fn nonce(&self, account: AccountId) -> Result; + async fn dry_run( + &self, + extrinsic: Bytes, + at: Option, + ) -> Result; } /// A full-client backend for [`SystemRpc`]. @@ -160,13 +150,13 @@ where AccountId: Clone + std::fmt::Display + Codec + Send + 'static, Index: Clone + std::fmt::Display + Codec + Send + traits::AtLeast32Bit + 'static, { - async fn nonce(&self, account: AccountId) -> Result { + async fn nonce(&self, account: AccountId) -> Result { let api = self.client.runtime_api(); let best = self.client.info().best_hash; let at = BlockId::hash(best); let nonce = api .account_nonce(&at, account.clone()) - .map_err(|api_err| CallError::Failed(Box::new(api_err)))?; + .map_err(|api_err| CallError::from_std_error(api_err))?; Ok(adjust_nonce(&*self.pool, account, nonce)) } @@ -174,7 +164,7 @@ where &self, extrinsic: Bytes, at: Option<::Hash>, - ) -> Result { + ) -> Result { self.deny_unsafe.check_if_safe()?; let api = self.client.runtime_api(); let at = BlockId::::hash(at.unwrap_or_else(|| self.client.info().best_hash)); @@ -193,7 +183,7 @@ where } } -#[async_trait::async_trait] +#[async_trait] impl SystemRpcBackend<::Hash, AccountId, Index> for SystemRpcBackendLight @@ -206,14 +196,14 @@ where AccountId: Clone + Display + Codec + Send + 'static, Index: Clone + Display + Codec + Send + traits::AtLeast32Bit + 'static, { - async fn nonce(&self, account: AccountId) -> Result { + async fn nonce(&self, account: AccountId) -> Result { let best_hash = self.client.info().best_hash; let best_id = BlockId::hash(best_hash); let best_header = future_header(&*self.remote_blockchain, &*self.fetcher, best_id) .await - .map_err(|blockchain_err| CallError::Failed(Box::new(blockchain_err)))? + .map_err(|blockchain_err| CallError::from_std_error(blockchain_err))? .ok_or_else(|| ClientError::UnknownBlock(format!("{}", best_hash))) - .map_err(|client_err| CallError::Failed(Box::new(client_err)))?; + .map_err(|client_err| CallError::from_std_error(client_err))?; let call_data = account.encode(); let nonce = self .fetcher @@ -225,10 +215,10 @@ where retry_count: None, }) .await - .map_err(|blockchain_err| CallError::Failed(Box::new(blockchain_err)))?; + .map_err(|blockchain_err| CallError::from_std_error(blockchain_err))?; let nonce: Index = Decode::decode(&mut &nonce[..]) - .map_err(|codec_err| CallError::Failed(Box::new(codec_err)))?; + .map_err(|codec_err| CallError::from_std_error(codec_err))?; Ok(adjust_nonce(&*self.pool, account, nonce)) } @@ -237,13 +227,14 @@ where &self, _extrinsic: Bytes, _at: Option<::Hash>, - ) -> Result { + ) -> Result { Err(CallError::Custom { code: -32601, /* TODO: (dp) We have this in jsonrpsee too somewhere. This is * jsonrpsee::ErrorCode::MethodNotFound */ message: "Not implemented for light clients".into(), data: None, - }) + } + .into()) } }