Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 24 additions & 0 deletions modules/parachains/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -404,6 +404,7 @@ mod tests {
use frame_support::{
assert_noop, assert_ok,
dispatch::DispatchResultWithPostInfo,
storage::generator::{StorageDoubleMap, StorageMap},
traits::{Get, OnInitialize},
weights::Weight,
};
Expand Down Expand Up @@ -809,4 +810,27 @@ mod tests {
);
});
}

#[test]
fn storage_keys_computed_properly() {
assert_eq!(
BestParaHeads::<TestRuntime>::storage_map_final_key(ParaId(42)).to_vec(),
bp_parachains::best_parachain_head_hash_storage_key_at_target("Parachains", ParaId(42))
.0,
);

assert_eq!(
ImportedParaHeads::<TestRuntime>::storage_double_map_final_key(
ParaId(42),
ParaHash::from([21u8; 32])
)
.to_vec(),
bp_parachains::imported_parachain_head_storage_key_at_target(
"Parachains",
ParaId(42),
ParaHash::from([21u8; 32])
)
.0,
);
}
}
18 changes: 17 additions & 1 deletion primitives/parachains/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ pub fn parachain_head_storage_key_at_source(
/// Returns runtime storage key of best known parachain head at the target chain.
///
/// The head is stored by the `pallet-bridge-parachains` pallet in the `BestParaHeads` map.
pub fn parachain_head_storage_key_at_target(
pub fn best_parachain_head_hash_storage_key_at_target(
bridge_parachains_pallet_name: &str,
para_id: ParaId,
) -> StorageKey {
Expand All @@ -66,3 +66,19 @@ pub fn parachain_head_storage_key_at_target(
&para_id.encode(),
)
}

/// Returns runtime storage key of the parachain head with given hash at the target chain.
///
/// The head is stored by the `pallet-bridge-parachains` pallet in the `ImportedParaHeads` map.
pub fn imported_parachain_head_storage_key_at_target(
bridge_parachains_pallet_name: &str,
para_id: ParaId,
head_hash: ParaHash,
) -> StorageKey {
bp_runtime::storage_double_map_final_key::<Blake2_128Concat, Blake2_128Concat>(
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm not very familiar with storage values, but considering these fixes, I was wondering if we could use a safer approach here. I'm thinking of the following issues:

  1. As seen here, we can use different hashers for the storage key vs the StorageDoubleMap that is defined in the pallet. This seems to be covered by unit tests however.
  2. We can use different types when we decode the value. For example here the correct ones would be ParaId, ParaHash, but we might by mistake use something else when decoding. Not sure if this is covered by unit tests.

So I was wondering if we could use a safer approach to cover these issues. Maybe define a new structure to hold all these hashers and types and use if both when defining the StorageDoubleMap and when computing the key and reading the value. A scaled-down version of StorageDoubleMap maybe.

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yep, that's a good idea. I'm not sure, though that you'll able to do that - all storage delarations are handled by procedural macro and it may happen that you can't specify e,g, MyMap::Hasher there. But you can try to experiment of course :)

bridge_parachains_pallet_name,
"ImportedParaHeads",
&para_id.encode(),
&head_hash.encode(),
)
}
32 changes: 32 additions & 0 deletions primitives/runtime/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -236,6 +236,38 @@ pub fn storage_map_final_key<H: StorageHasher>(
StorageKey(final_key)
}

/// This is a copy of the
/// `frame_support::storage::generator::StorageDoubleMap::storage_double_map_final_key` for maps
/// based on selected hashers.
///
/// We're using it because to call `storage_double_map_final_key` directly, we need access to the
/// runtime and pallet instance, which (sometimes) is impossible.
pub fn storage_double_map_final_key<H1: StorageHasher, H2: StorageHasher>(
pallet_prefix: &str,
map_name: &str,
key1: &[u8],
key2: &[u8],
) -> StorageKey {
let key1_hashed = H1::hash(key1);
let key2_hashed = H2::hash(key2);
let pallet_prefix_hashed = frame_support::Twox128::hash(pallet_prefix.as_bytes());
let storage_prefix_hashed = frame_support::Twox128::hash(map_name.as_bytes());

let mut final_key = Vec::with_capacity(
pallet_prefix_hashed.len() +
storage_prefix_hashed.len() +
key1_hashed.as_ref().len() +
key2_hashed.as_ref().len(),
);

final_key.extend_from_slice(&pallet_prefix_hashed[..]);
final_key.extend_from_slice(&storage_prefix_hashed[..]);
final_key.extend_from_slice(key1_hashed.as_ref());
final_key.extend_from_slice(key2_hashed.as_ref());

StorageKey(final_key)
}

/// This is how a storage key of storage parameter (`parameter_types! { storage Param: bool = false;
/// }`) is computed.
///
Expand Down
22 changes: 19 additions & 3 deletions relays/lib-substrate-relay/src/parachains/source.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,10 @@ use async_trait::async_trait;
use bp_parachains::parachain_head_storage_key_at_source;
use bp_polkadot_core::parachains::{ParaHash, ParaHead, ParaHeadsProof, ParaId};
use codec::Decode;
use parachains_relay::parachains_loop::{ParaHashAtSource, SourceClient};
use parachains_relay::{
parachains_loop::{ParaHashAtSource, SourceClient},
parachains_loop_metrics::ParachainsLoopMetrics,
};
use relay_substrate_client::{
Chain, Client, Error as SubstrateError, HeaderIdOf, HeaderOf, RelayChain,
};
Expand Down Expand Up @@ -100,6 +103,7 @@ where
async fn parachain_head(
&self,
at_block: HeaderIdOf<P::SourceRelayChain>,
metrics: Option<&ParachainsLoopMetrics>,
para_id: ParaId,
) -> Result<ParaHashAtSource, Self::Error> {
// we don't need to support many parachains now
Expand All @@ -111,9 +115,11 @@ where
)))
}

Ok(match self.on_chain_parachain_header(at_block, para_id).await? {
let mut para_header_number_at_source = None;
let para_hash_at_source = match self.on_chain_parachain_header(at_block, para_id).await? {
Some(parachain_header) => {
let mut parachain_head = ParaHashAtSource::Some(parachain_header.hash());
para_header_number_at_source = Some(*parachain_header.number());
Comment on lines +119 to +122
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Nit: para_hash_at_source and parachain_head seem to be almost the same variable. I think we can delete one of them.

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah, but they have different scopes. Feel free to change the code if you think your version is better, though :)

// never return head that is larger than requested. This way we'll never sync
// headers past `maximal_header_id`
if let Some(ref maximal_header_id) = self.maximal_header_id {
Expand All @@ -125,19 +131,29 @@ where
// we don't want this header yet => let's report previously requested
// header
parachain_head = ParaHashAtSource::Some(maximal_header_id.1);
para_header_number_at_source = Some(maximal_header_id.0);
},
Some(_) => (),
None => {
// on-demand relay has not yet asked us to sync anything let's do that
parachain_head = ParaHashAtSource::Unavailable;
para_header_number_at_source = None;
},
}
}

parachain_head
},
None => ParaHashAtSource::None,
})
};

if let (Some(metrics), Some(para_header_number_at_source)) =
(metrics, para_header_number_at_source)
{
metrics.update_best_parachain_block_at_source(para_id, para_header_number_at_source);
}

Ok(para_hash_at_source)
}

async fn prove_parachain_heads(
Expand Down
51 changes: 44 additions & 7 deletions relays/lib-substrate-relay/src/parachains/target.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,13 +24,19 @@ use crate::{
};

use async_trait::async_trait;
use bp_parachains::{parachain_head_storage_key_at_target, BestParaHeadHash};
use bp_polkadot_core::parachains::{ParaHeadsProof, ParaId};
use bp_parachains::{
best_parachain_head_hash_storage_key_at_target, imported_parachain_head_storage_key_at_target,
BestParaHeadHash,
};
use bp_polkadot_core::parachains::{ParaHead, ParaHeadsProof, ParaId};
use codec::{Decode, Encode};
use parachains_relay::parachains_loop::TargetClient;
use parachains_relay::{
parachains_loop::TargetClient, parachains_loop_metrics::ParachainsLoopMetrics,
};
use relay_substrate_client::{
AccountIdOf, AccountKeyPairOf, BlockNumberOf, Chain, Client, Error as SubstrateError, HashOf,
HeaderIdOf, RelayChain, SignParam, TransactionEra, TransactionSignScheme, UnsignedTransaction,
HeaderIdOf, HeaderOf, RelayChain, SignParam, TransactionEra, TransactionSignScheme,
UnsignedTransaction,
};
use relay_utils::{relay_loop::Client as RelayClient, HeaderId};
use sp_core::{Bytes, Pair};
Expand Down Expand Up @@ -115,15 +121,46 @@ where
async fn parachain_head(
&self,
at_block: HeaderIdOf<P::TargetChain>,
metrics: Option<&ParachainsLoopMetrics>,
para_id: ParaId,
) -> Result<Option<BestParaHeadHash>, Self::Error> {
let storage_key = parachain_head_storage_key_at_target(
let best_para_head_hash_key = best_parachain_head_hash_storage_key_at_target(
P::SourceRelayChain::PARACHAINS_FINALITY_PALLET_NAME,
para_id,
);
let para_head = self.client.storage_value(storage_key, Some(at_block.1)).await?;
let best_para_head_hash: Option<BestParaHeadHash> =
self.client.storage_value(best_para_head_hash_key, Some(at_block.1)).await?;
if let (Some(metrics), &Some(ref best_para_head_hash)) = (metrics, &best_para_head_hash) {
let imported_para_head_key = imported_parachain_head_storage_key_at_target(
P::SourceRelayChain::PARACHAINS_FINALITY_PALLET_NAME,
para_id,
best_para_head_hash.head_hash,
);
let imported_para_header = self
.client
.storage_value::<ParaHead>(imported_para_head_key, Some(at_block.1))
.await?
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sorry, one more thing. await? returns the error directly, if any, while if HeaderOf::<P::SourceParachain>::decode() returns an error, we also log a message. Is this desired? Or should we log a message in both error cases ?

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If you're going to make a new method for double-encoded storage values, then let's log it here, where we know what the storage value actually mean and where we may log the meaningful error.

Ideally (imo) we'll need to migrate to some crate that would allow us to propagate error with context and log it once at the upper level (inside the relay loop). But that's not a part of your follow-up PR of course :)

Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sure, I will log the error here, but I was wondering if we should log an error both when storage_value() returns an Err and when decode returns an Err. Or should we keep the await? and log an error only when decode() returns an Err ?

The idea with having a new method for double-encoded storage value is not going very well so far. I think I might have to give it up.

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sure, I will log the error here, but I was wondering if we should log an error both when storage_value() returns an Err and when decode returns an Err. Or should we keep the await? and log an error only when decode() returns an Err ?

Imo more detailing logging = better :) The fact that logging occurs at the one branch only is probably the outcome of some bug that I've met. In the end the error is logged at the top level anyway, but this additional trace adds some context. So I'd add the second logging call here.

.and_then(|h| match HeaderOf::<P::SourceParachain>::decode(&mut &h.0[..]) {
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Nit: Would it make sense to add a helper function similar to storage_value for double-encoded storage values instead of doing the decoding here ?

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't mind - feel free to add :)

Ok(header) => Some(header),
Err(e) => {
log::error!(
target: "bridge-metrics",
"Failed to decode {} parachain header at {}: {:?}. Metric will have obsolete value",
P::SourceParachain::NAME,
P::TargetChain::NAME,
e,
);

None
},
});
if let Some(imported_para_header) = imported_para_header {
metrics
.update_best_parachain_block_at_target(para_id, *imported_para_header.number());
}
}

Ok(para_head)
Ok(best_para_head_hash)
}

async fn submit_parachain_heads_proof(
Expand Down
29 changes: 23 additions & 6 deletions relays/parachains/src/parachains_loop.rs
Original file line number Diff line number Diff line change
Expand Up @@ -76,9 +76,13 @@ pub trait SourceClient<P: ParachainsPipeline>: RelayClient {
async fn ensure_synced(&self) -> Result<bool, Self::Error>;

/// Get parachain head hash at given block.
///
/// The implementation may call `ParachainsLoopMetrics::update_best_parachain_block_at_source`
/// on provided `metrics` object to update corresponding metric value.
async fn parachain_head(
&self,
at_block: HeaderIdOf<P::SourceChain>,
metrics: Option<&ParachainsLoopMetrics>,
para_id: ParaId,
) -> Result<ParaHashAtSource, Self::Error>;

Expand All @@ -103,9 +107,13 @@ pub trait TargetClient<P: ParachainsPipeline>: RelayClient {
) -> Result<HeaderIdOf<P::SourceChain>, Self::Error>;

/// Get parachain head hash at given block.
///
/// The implementation may call `ParachainsLoopMetrics::update_best_parachain_block_at_target`
/// on provided `metrics` object to update corresponding metric value.
async fn parachain_head(
&self,
at_block: HeaderIdOf<P::TargetChain>,
metrics: Option<&ParachainsLoopMetrics>,
para_id: ParaId,
) -> Result<Option<BestParaHeadHash>, Self::Error>;

Expand Down Expand Up @@ -158,7 +166,7 @@ async fn run_until_connection_lost<P: ParachainsPipeline>(
source_client: impl SourceClient<P>,
target_client: impl TargetClient<P>,
sync_params: ParachainSyncParams,
_metrics: Option<ParachainsLoopMetrics>,
metrics: Option<ParachainsLoopMetrics>,
exit_signal: impl Future<Output = ()> + Send,
) -> Result<(), FailedClient>
where
Expand Down Expand Up @@ -213,9 +221,13 @@ where
log::warn!(target: "bridge", "Failed to read best {} block: {:?}", P::SourceChain::NAME, e);
FailedClient::Target
})?;
let heads_at_target =
read_heads_at_target(&target_client, &best_target_block, &sync_params.parachains)
.await?;
let heads_at_target = read_heads_at_target(
&target_client,
metrics.as_ref(),
&best_target_block,
&sync_params.parachains,
)
.await?;
tx_tracker = tx_tracker.take().and_then(|tx_tracker| tx_tracker.update(&heads_at_target));
if tx_tracker.is_some() {
continue
Expand All @@ -238,6 +250,7 @@ where
})?;
let heads_at_source = read_heads_at_source(
&source_client,
metrics.as_ref(),
&best_finalized_relay_block,
&sync_params.parachains,
)
Expand Down Expand Up @@ -398,12 +411,13 @@ fn is_update_required(sync_params: &ParachainSyncParams, updated_ids: &[ParaId])
/// Guarantees that the returning map will have an entry for every parachain from `parachains`.
async fn read_heads_at_source<P: ParachainsPipeline>(
source_client: &impl SourceClient<P>,
metrics: Option<&ParachainsLoopMetrics>,
at_relay_block: &HeaderIdOf<P::SourceChain>,
parachains: &[ParaId],
) -> Result<BTreeMap<ParaId, ParaHashAtSource>, FailedClient> {
let mut para_head_hashes = BTreeMap::new();
for para in parachains {
let para_head = source_client.parachain_head(*at_relay_block, *para).await;
let para_head = source_client.parachain_head(*at_relay_block, metrics, *para).await;
match para_head {
Ok(para_head) => {
para_head_hashes.insert(*para, para_head);
Expand All @@ -428,12 +442,13 @@ async fn read_heads_at_source<P: ParachainsPipeline>(
/// Guarantees that the returning map will have an entry for every parachain from `parachains`.
async fn read_heads_at_target<P: ParachainsPipeline>(
target_client: &impl TargetClient<P>,
metrics: Option<&ParachainsLoopMetrics>,
at_block: &HeaderIdOf<P::TargetChain>,
parachains: &[ParaId],
) -> Result<BTreeMap<ParaId, Option<BestParaHeadHash>>, FailedClient> {
let mut para_best_head_hashes = BTreeMap::new();
for para in parachains {
let para_best_head = target_client.parachain_head(*at_block, *para).await;
let para_best_head = target_client.parachain_head(*at_block, metrics, *para).await;
match para_best_head {
Ok(para_best_head) => {
para_best_head_hashes.insert(*para, para_best_head);
Expand Down Expand Up @@ -638,6 +653,7 @@ mod tests {
async fn parachain_head(
&self,
_at_block: HeaderIdOf<TestChain>,
_metrics: Option<&ParachainsLoopMetrics>,
para_id: ParaId,
) -> Result<ParaHashAtSource, TestError> {
match self.data.lock().await.source_heads.get(&para_id.0).cloned() {
Expand Down Expand Up @@ -684,6 +700,7 @@ mod tests {
async fn parachain_head(
&self,
_at_block: HeaderIdOf<TestChain>,
_metrics: Option<&ParachainsLoopMetrics>,
para_id: ParaId,
) -> Result<Option<BestParaHeadHash>, TestError> {
self.data.lock().await.target_heads.get(&para_id.0).cloned().transpose()
Expand Down
Loading