Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
122 changes: 122 additions & 0 deletions rpcs/src/methods/chain_head.rs
Original file line number Diff line number Diff line change
Expand Up @@ -292,6 +292,128 @@ impl<T: RpcConfig> ChainHeadRpcMethods<T> {
.await
}

/// Fetch the block body (ie the extrinsics in the block) given its hash.
///
/// Returns an array of the hexadecimal-encoded scale-encoded extrinsics found in the block,
/// or `None` if the block wasn't found.
pub async fn archive_v1_body(&self, block_hash: T::Hash) -> Result<Option<Vec<Bytes>>, Error> {
self.client
.request("archive_v1_body", rpc_params![block_hash])
.await
}

/// Call the `archive_v1_call` method and return the response.
pub async fn archive_v1_call(
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

hmm, we could perhaps have a shared implementation for this for both archive_v1_call and archive_unstable_call but doesn't matter that much since we will remove the unstable stuff eventually :)

Copy link
Copy Markdown
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah, I contemplated that, but then it'd make it messier to remove them, whereas at the mo we can just delete the unstable ones nice and cleanly :)

&self,
block_hash: T::Hash,
function: &str,
call_parameters: &[u8],
) -> Result<ArchiveCallResult, Error> {
use serde::de::Error as _;

// We deserialize to this intermediate shape, since
// we can't have a boolean tag to denote variants.
#[derive(Deserialize)]
struct Response {
success: bool,
value: Option<Bytes>,
error: Option<String>,
// This was accidentally used instead of value in Substrate,
// so to support those impls we try it here if needed:
result: Option<Bytes>,
}

let res: Response = self
.client
.request(
"archive_v1_call",
rpc_params![block_hash, function, to_hex(call_parameters)],
)
.await?;

let value = res.value.or(res.result);
match (res.success, value, res.error) {
(true, Some(value), _) => Ok(ArchiveCallResult::Success(value)),
(false, _, err) => Ok(ArchiveCallResult::Error(err.unwrap_or(String::new()))),
(true, None, _) => {
let m = "archive_v1_call: 'success: true' response should have `value: 0x1234` alongside it";
Err(Error::Deserialization(serde_json::Error::custom(m)))
}
}
}

/// Return the finalized block height of the chain.
pub async fn archive_v1_finalized_height(&self) -> Result<usize, Error> {
self.client
.request("archive_v1_finalizedHeight", rpc_params![])
.await
}

/// Return the genesis hash.
pub async fn archive_v1_genesis_hash(&self) -> Result<T::Hash, Error> {
self.client
.request("archive_v1_genesisHash", rpc_params![])
.await
}

/// Given a block height, return the hashes of the zero or more blocks at that height.
/// For blocks older than the latest finalized block, only one entry will be returned. For blocks
/// newer than the latest finalized block, it's possible to have 0, 1 or multiple blocks at
/// that height given that forks could occur.
pub async fn archive_v1_hash_by_height(&self, height: usize) -> Result<Vec<T::Hash>, Error> {
self.client
.request("archive_v1_hashByHeight", rpc_params![height])
.await
}

/// Fetch the header for a block with the given hash, or `None` if no block with that hash exists.
pub async fn archive_v1_header(&self, block_hash: T::Hash) -> Result<Option<T::Header>, Error> {
let maybe_encoded_header: Option<Bytes> = self
.client
.request("archive_v1_header", rpc_params![block_hash])
.await?;

let Some(encoded_header) = maybe_encoded_header else {
return Ok(None);
};

let header =
<T::Header as codec::Decode>::decode(&mut &*encoded_header.0).map_err(Error::Decode)?;
Ok(Some(header))
}

/// Query the node storage and return a subscription which streams corresponding storage events back.
pub async fn archive_v1_storage(
&self,
block_hash: T::Hash,
items: impl IntoIterator<Item = StorageQuery<&[u8]>>,
child_key: Option<&[u8]>,
) -> Result<ArchiveStorageSubscription<T::Hash>, Error> {
let items: Vec<StorageQuery<String>> = items
.into_iter()
.map(|item| StorageQuery {
key: to_hex(item.key),
query_type: item.query_type,
})
.collect();

let sub = self
.client
.subscribe(
"archive_v1_storage",
rpc_params![block_hash, items, child_key.map(to_hex)],
"archive_v1_stopStorage",
)
.await?;

Ok(ArchiveStorageSubscription { sub, done: false })
}

// Dev note: we continue to support the latest "unstable" archive methods because
// they will be around for a while before the stable ones make it into a release.
// The below are just a copy-paste of the v1 methods, above, but calling the
// "unstable" RPCs instead. Eventually we'll remove them.
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

maybe open a tracking issue for that so we don't forget


/// Fetch the block body (ie the extrinsics in the block) given its hash.
///
/// Returns an array of the hexadecimal-encoded scale-encoded extrinsics found in the block,
Expand Down
32 changes: 14 additions & 18 deletions testing/integration-tests/src/full_client/client/archive_rpcs.rs
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ async fn fetch_finalized_blocks<T: Config>(
}

#[subxt_test]
async fn archive_unstable_body() {
async fn archive_v1_body() {
let ctx = test_context().await;
let rpc = ctx.chainhead_rpc_methods().await;
let mut blocks = fetch_finalized_blocks(&ctx, 3).await;
Expand All @@ -52,7 +52,7 @@ async fn archive_unstable_body() {
.iter()
.map(|e| e.bytes().to_vec());
let archive_block_bodies = rpc
.archive_unstable_body(block.hash())
.archive_v1_body(block.hash())
.await
.unwrap()
.into_iter()
Expand All @@ -67,7 +67,7 @@ async fn archive_unstable_body() {
}

#[subxt_test]
async fn archive_unstable_call() {
async fn archive_v1_call() {
let ctx = test_context().await;
let rpc = ctx.chainhead_rpc_methods().await;
let mut blocks = fetch_finalized_blocks(&ctx, 3).await;
Expand All @@ -82,7 +82,7 @@ async fn archive_unstable_call() {
.unwrap()
.encode();
let archive_metadata_versions = rpc
.archive_unstable_call(block.hash(), "Metadata_metadata_versions", &[])
.archive_v1_call(block.hash(), "Metadata_metadata_versions", &[])
.await
.unwrap()
.as_success()
Expand All @@ -94,7 +94,7 @@ async fn archive_unstable_call() {
}

#[subxt_test]
async fn archive_unstable_finalized_height() {
async fn archive_v1_finalized_height() {
let ctx = test_context().await;
let rpc = ctx.chainhead_rpc_methods().await;

Expand All @@ -105,7 +105,7 @@ async fn archive_unstable_finalized_height() {
let mut last_block_height = None;
loop {
// Fetch archive block height.
let archive_block_height = rpc.archive_unstable_finalized_height().await.unwrap();
let archive_block_height = rpc.archive_v1_finalized_height().await.unwrap();

// On a dev node we expect blocks to be finalized 1 by 1, so panic
// if the height we fetch has grown by more than 1.
Expand All @@ -126,18 +126,18 @@ async fn archive_unstable_finalized_height() {
}

#[subxt_test]
async fn archive_unstable_genesis_hash() {
async fn archive_v1_genesis_hash() {
let ctx = test_context().await;
let rpc = ctx.chainhead_rpc_methods().await;

let chain_head_genesis_hash = rpc.chainspec_v1_genesis_hash().await.unwrap();
let archive_genesis_hash = rpc.archive_unstable_genesis_hash().await.unwrap();
let archive_genesis_hash = rpc.archive_v1_genesis_hash().await.unwrap();

assert_eq!(chain_head_genesis_hash, archive_genesis_hash);
}

#[subxt_test]
async fn archive_unstable_hash_by_height() {
async fn archive_v1_hash_by_height() {
let ctx = test_context().await;
let rpc = ctx.chainhead_rpc_methods().await;
let mut blocks = fetch_finalized_blocks(&ctx, 3).await;
Expand All @@ -147,7 +147,7 @@ async fn archive_unstable_hash_by_height() {
let subxt_block_hash = block.hash();

let archive_block_hash = rpc
.archive_unstable_hash_by_height(subxt_block_height)
.archive_v1_hash_by_height(subxt_block_height)
.await
.unwrap();

Expand All @@ -158,7 +158,7 @@ async fn archive_unstable_hash_by_height() {
}

#[subxt_test]
async fn archive_unstable_header() {
async fn archive_v1_header() {
let ctx = test_context().await;
let rpc = ctx.chainhead_rpc_methods().await;
let mut blocks = fetch_finalized_blocks(&ctx, 3).await;
Expand All @@ -167,18 +167,14 @@ async fn archive_unstable_header() {
let block_hash = block.hash();

let subxt_block_header = block.header();
let archive_block_header = rpc
.archive_unstable_header(block_hash)
.await
.unwrap()
.unwrap();
let archive_block_header = rpc.archive_v1_header(block_hash).await.unwrap().unwrap();

assert_eq!(subxt_block_header, &archive_block_header);
}
}

#[subxt_test]
async fn archive_unstable_storage() {
async fn archive_v1_storage() {
let ctx = test_context().await;
let rpc = ctx.chainhead_rpc_methods().await;
let api = ctx.client();
Expand Down Expand Up @@ -214,7 +210,7 @@ async fn archive_unstable_storage() {
];

let mut res = rpc
.archive_unstable_storage(block_hash, storage_query, None)
.archive_v1_storage(block_hash, storage_query, None)
.await
.unwrap();

Expand Down
Loading