Skip to content
This repository was archived by the owner on Nov 15, 2023. It is now read-only.
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
33 commits
Select commit Hold shift + click to select a range
d3e8c97
Initial draft
expenses Sep 23, 2020
1eb1297
Add an iterator that helps us get most items
expenses Sep 28, 2020
a118cea
Merge remote-tracking branch 'origin/master' into ashley-improve-sync…
expenses Sep 28, 2020
ccfb11d
Revert changes to grandpa
expenses Sep 28, 2020
b3e8f09
Merge remote-tracking branch 'origin/master' into ashley-improve-sync…
expenses Sep 29, 2020
35a580a
Change fields to just be the grandpa authority set and babe epoch cha…
expenses Sep 29, 2020
a8f0966
Only use the fields we need from the shared authority set
expenses Sep 30, 2020
833c007
Made a start at getting the sync state to load in substrate
expenses Oct 1, 2020
bad0a28
Add info to panic
expenses Oct 1, 2020
96c1748
babe: example code to filter epoch changes tree
andresilva Oct 1, 2020
3e7391b
babe: don't try to prune epoch changes tree to avoid unknown headers
andresilva Oct 1, 2020
14cc62a
Merge remote-tracking branch 'origin/master' into ashley-improve-sync…
expenses Oct 2, 2020
b2b0860
Filter before saving
expenses Oct 5, 2020
447b676
Merge remote-tracking branch 'origin/master' into ashley-improve-sync…
expenses Oct 5, 2020
fdd282f
Switch to RPC call
expenses Oct 6, 2020
32c12ed
Merge remote-tracking branch 'origin/master' into ashley-improve-sync…
expenses Oct 6, 2020
6a5db06
Merge branch 'ashley-improve-sync-state' into ashley-improve-sync-sta…
expenses Oct 6, 2020
186e248
Merge remote-tracking branch 'origin/master' into ashley-improve-sync…
expenses Oct 7, 2020
a7f607e
Reset flaming fir network
ddorgan Oct 7, 2020
40f19e2
Merge remote-tracking branch 'origin/dd-flamingfir-9' into ashley-imp…
expenses Oct 7, 2020
7f28aa9
Revert "Only use the fields we need from the shared authority set"
expenses Oct 9, 2020
5dd7405
Merge branch 'ashley-improve-sync-state' into HEAD
expenses Oct 9, 2020
6e7855b
Merge remote-tracking branch 'origin/master' into ashley-improve-sync…
expenses Oct 9, 2020
81ac0ab
Add babe_finalized_block_weight from `ashley-improve-sync-state-WIP-l…
expenses Oct 9, 2020
aac528b
Merge branch 'ashley-improve-sync-state' into ashley-improve-sync-sta…
expenses Oct 9, 2020
3dd9357
Add sync state to flaming-fir.json
expenses Oct 9, 2020
136231b
Fix rpc test
expenses Oct 9, 2020
d94adf1
Move sync state rpc stuff into sc-sync-state-rpc
expenses Oct 12, 2020
d56cafc
Merge remote-tracking branch 'origin/master' into ashley-improve-sync…
expenses Oct 12, 2020
9a842ea
Merge branch 'ashley-improve-sync-state' into ashley-improve-sync-sta…
expenses Oct 12, 2020
a9b22d4
Update sync state
expenses Oct 12, 2020
76afda0
Comment out CHT creation
expenses Oct 14, 2020
f14c1d9
Merge remote-tracking branch 'origin/master' into HEAD
expenses Oct 15, 2020
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

3 changes: 3 additions & 0 deletions client/api/src/backend.rs
Original file line number Diff line number Diff line change
Expand Up @@ -200,6 +200,9 @@ pub trait BlockImportOperation<Block: BlockT> {
/// Mark a block as new head. If both block import and set head are specified, set head
/// overrides block import's best block rule.
fn mark_head(&mut self, id: BlockId<Block>) -> sp_blockchain::Result<()>;

/// Can the block still be imported if the parent block is not found.
fn allow_missing_parent(&mut self, allow: bool);
}

/// Interface for performing operations on the backend.
Expand Down
6 changes: 2 additions & 4 deletions client/api/src/cht.rs
Original file line number Diff line number Diff line change
Expand Up @@ -267,9 +267,7 @@ fn build_pairs<Header, I>(
let mut pairs = Vec::new();
let mut hash_index = Header::Number::zero();
for hash in hashes.into_iter() {
let hash = hash?.ok_or_else(|| ClientError::from(
ClientError::MissingHashRequiredForCHT
))?;
let hash = hash?.unwrap_or_else(|| panic!("MissingHashRequiredForCHT"));
pairs.push((
encode_cht_key(start_num + hash_index).to_vec(),
encode_cht_value(hash)
Expand All @@ -283,7 +281,7 @@ fn build_pairs<Header, I>(
if hash_index == cht_size {
Ok(pairs)
} else {
Err(ClientError::MissingHashRequiredForCHT)
panic!("MissingHashRequiredForCHT")
}
}

Expand Down
23 changes: 17 additions & 6 deletions client/api/src/in_mem.rs
Original file line number Diff line number Diff line change
Expand Up @@ -167,10 +167,11 @@ impl<Block: BlockT> Blockchain<Block> {
justification: Option<Justification>,
body: Option<Vec<<Block as BlockT>::Extrinsic>>,
new_state: NewBlockState,
allow_missing_parent: bool,
) -> sp_blockchain::Result<()> {
let number = header.number().clone();
if new_state.is_best() {
self.apply_head(&header)?;
self.apply_head(&header, allow_missing_parent)?;
}

{
Expand Down Expand Up @@ -231,18 +232,18 @@ impl<Block: BlockT> Blockchain<Block> {
None => return Err(sp_blockchain::Error::UnknownBlock(format!("{}", id))),
};

self.apply_head(&header)
self.apply_head(&header, false)
}

fn apply_head(&self, header: &<Block as BlockT>::Header) -> sp_blockchain::Result<()> {
fn apply_head(&self, header: &<Block as BlockT>::Header, allow_missing_parent: bool) -> sp_blockchain::Result<()> {
let hash = header.hash();
let number = header.number();

// Note: this may lock storage, so it must happen before obtaining storage
// write lock.
let best_tree_route = {
let best_hash = self.storage.read().best_hash;
if &best_hash == header.parent_hash() {
if &best_hash == header.parent_hash() || allow_missing_parent {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I wonder if modifying tree_route to not fail on missing parent may be better, or if it could break some assumption and make existing code unsecure.

None
} else {
let route = sp_blockchain::tree_route(self, best_hash, *header.parent_hash())?;
Expand Down Expand Up @@ -427,9 +428,10 @@ impl<Block: BlockT> light::Storage<Block> for Blockchain<Block>
_cache: HashMap<CacheKeyId, Vec<u8>>,
state: NewBlockState,
aux_ops: Vec<(Vec<u8>, Option<Vec<u8>>)>,
allow_missing_parent: bool,
) -> sp_blockchain::Result<()> {
let hash = header.hash();
self.insert(hash, header, None, None, state)?;
self.insert(hash, header, None, None, state, allow_missing_parent)?;

self.write_aux(aux_ops);
Ok(())
Expand Down Expand Up @@ -487,6 +489,7 @@ pub struct BlockImportOperation<Block: BlockT> {
aux: Vec<(Vec<u8>, Option<Vec<u8>>)>,
finalized_blocks: Vec<(BlockId<Block>, Option<Justification>)>,
set_head: Option<BlockId<Block>>,
allow_missing_parent: bool,
}

impl<Block: BlockT> backend::BlockImportOperation<Block> for BlockImportOperation<Block> where
Expand Down Expand Up @@ -581,6 +584,10 @@ impl<Block: BlockT> backend::BlockImportOperation<Block> for BlockImportOperatio
self.set_head = Some(block);
Ok(())
}

fn allow_missing_parent(&mut self, allow: bool) {
self.allow_missing_parent = allow;
}
}

/// In-memory backend. Keeps all states and blocks in memory.
Expand Down Expand Up @@ -636,6 +643,7 @@ impl<Block: BlockT> backend::Backend<Block> for Backend<Block> where Block::Hash
aux: Default::default(),
finalized_blocks: Default::default(),
set_head: None,
allow_missing_parent: false,
})
}

Expand Down Expand Up @@ -671,7 +679,10 @@ impl<Block: BlockT> backend::Backend<Block> for Backend<Block> where Block::Hash

self.states.write().insert(hash, new_state);

self.blockchain.insert(hash, header, justification, body, pending_block.state)?;
self.blockchain.insert(
hash, header, justification, body, pending_block.state,
operation.allow_missing_parent,
)?;
}

if !operation.aux.is_empty() {
Expand Down
1 change: 1 addition & 0 deletions client/api/src/light.rs
Original file line number Diff line number Diff line change
Expand Up @@ -245,6 +245,7 @@ pub trait Storage<Block: BlockT>: AuxStore + HeaderBackend<Block>
cache: HashMap<well_known_cache_keys::Id, Vec<u8>>,
state: NewBlockState,
aux_ops: Vec<(Vec<u8>, Option<Vec<u8>>)>,
allow_missing_parent: bool,
) -> ClientResult<()>;

/// Set an existing block as new best block.
Expand Down
8 changes: 8 additions & 0 deletions client/chain-spec/src/chain_spec.rs
Original file line number Diff line number Diff line change
Expand Up @@ -266,6 +266,10 @@ impl<G, E> ChainSpec<G, E> {
fn set_light_sync_state(&mut self, light_sync_state: SerializableLightSyncState) {
self.client_spec.light_sync_state = Some(light_sync_state);
}

fn get_light_sync_state(&self) -> Option<&SerializableLightSyncState> {
self.client_spec.light_sync_state.as_ref()
}
}

impl<G, E: serde::de::DeserializeOwned> ChainSpec<G, E> {
Expand Down Expand Up @@ -395,6 +399,10 @@ where
fn set_light_sync_state(&mut self, light_sync_state: SerializableLightSyncState) {
ChainSpec::set_light_sync_state(self, light_sync_state)
}

fn get_light_sync_state(&self) -> Option<&SerializableLightSyncState> {
ChainSpec::get_light_sync_state(self)
}
}

/// Hardcoded infomation that allows light clients to sync quickly.
Expand Down
2 changes: 2 additions & 0 deletions client/chain-spec/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -159,6 +159,8 @@ pub trait ChainSpec: BuildStorage + Send + Sync {
fn set_storage(&mut self, storage: Storage);
/// Hardcode infomation to allow light clients to sync quickly into the chain spec.
fn set_light_sync_state(&mut self, light_sync_state: SerializableLightSyncState);
/// Get the light sync state, if available.
fn get_light_sync_state(&self) -> Option<&SerializableLightSyncState>;
}

impl std::fmt::Debug for dyn ChainSpec {
Expand Down
2 changes: 1 addition & 1 deletion client/consensus/babe/src/aux_schema.rs
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ pub(crate) fn write_epoch_changes<Block: BlockT, F, R>(
}

/// Write the cumulative chain-weight of a block ot aux storage.
pub(crate) fn write_block_weight<H: Encode, F, R>(
pub fn write_block_weight<H: Encode, F, R>(
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

why not have a specific 'load_snapshot_state' method that is also public but makes it clear that in should not be use lighty?
Could be part of a trait.

block_hash: H,
block_weight: BabeBlockWeight,
write_aux: F,
Expand Down
4 changes: 4 additions & 0 deletions client/consensus/babe/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1275,10 +1275,12 @@ impl<Block, Client, Inner> BlockImport<Block> for BabeBlockImport<Block, Client,
// used by pruning may not know about the block that is being
// imported.
let prune_and_import = || {
/*
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Wonder if light pruning method from https://github.com/paritytech/substrate/pull/6851/files would work here, really just a wild guess.

prune_finalized(
self.client.clone(),
&mut epoch_changes,
)?;
*/

epoch_changes.import(
descendent_query(&*self.client),
Expand Down Expand Up @@ -1415,10 +1417,12 @@ pub fn block_import<Client, Block: BlockT, I>(
// NOTE: this isn't entirely necessary, but since we didn't use to prune the
// epoch tree it is useful as a migration, so that nodes prune long trees on
// startup rather than waiting until importing the next epoch change block.
/*
prune_finalized(
client.clone(),
&mut epoch_changes.lock(),
)?;
*/

let import = BabeBlockImport::new(
client,
Expand Down
14 changes: 12 additions & 2 deletions client/consensus/epochs/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ pub trait Epoch {
/// Descriptor for the next epoch.
type NextEpochDescriptor;
/// Type of the slot number.
type SlotNumber: Ord + Copy;
type SlotNumber: Ord + Copy + std::fmt::Debug;

/// The starting slot of the epoch.
fn start_slot(&self) -> Self::SlotNumber;
Expand Down Expand Up @@ -249,7 +249,7 @@ impl<'a, E: Epoch> From<&'a PersistedEpoch<E>> for PersistedEpochHeader<E> {
}

/// Persisted epoch header stored in ForkTree.
#[derive(Encode, Decode, PartialEq, Eq)]
#[derive(Debug, Encode, Decode, PartialEq, Eq)]
pub enum PersistedEpochHeader<E: Epoch> {
/// Genesis persisted epoch header. epoch_0, epoch_1.
Genesis(EpochHeader<E>, EpochHeader<E>),
Expand Down Expand Up @@ -329,6 +329,16 @@ impl<Hash, Number, E: Epoch> EpochChanges<Hash, Number, E> where
Self::default()
}

pub fn filter(&mut self, number: Number) {
let epochs = std::mem::replace(&mut self.epochs, BTreeMap::new());

for (k, v) in epochs.into_iter().filter(|((_, n), _)| *n <= number) {
self.epochs.insert(k, v);
}

self.inner = self.inner.filter(number);
}

/// Rebalances the tree of epoch changes so that it is sorted by length of
/// fork (longest fork first).
pub fn rebalance(&mut self) {
Expand Down
2 changes: 2 additions & 0 deletions client/db/src/cache/list_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -294,6 +294,7 @@ impl<Block: BlockT, T: CacheItemT, S: Storage<Block, T>> ListCache<Block, T, S>
) -> ClientResult<Option<CommitOperation<Block, T>>> {
// this guarantee is currently provided by LightStorage && we're relying on it here
let prev_operation = operations.operations.last();
/*
debug_assert!(
entry_type != EntryType::Final ||
self.best_finalized_block.hash == parent.hash ||
Expand All @@ -303,6 +304,7 @@ impl<Block: BlockT, T: CacheItemT, S: Storage<Block, T>> ListCache<Block, T, S>
_ => false,
}
);
*/

// we do not store any values behind finalized
if block.number != Zero::zero() && self.best_finalized_block.number >= block.number {
Expand Down
2 changes: 2 additions & 0 deletions client/db/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -734,6 +734,8 @@ impl<Block: BlockT> sc_client_api::backend::BlockImportOperation<Block> for Bloc
self.set_head = Some(block);
Ok(())
}

fn allow_missing_parent(&mut self, _allow: bool) {}
}

struct StorageDb<Block: BlockT> {
Expand Down
22 changes: 16 additions & 6 deletions client/db/src/light.rs
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,7 @@ impl<Block: BlockT> HeaderMetadata<Block> for LightStorage<Block> {
header_metadata.clone(),
);
header_metadata
}).ok_or_else(|| ClientError::UnknownBlock(format!("header not found in db: {}", hash)))
}).ok_or_else(|| panic!("header not found in db: {:?}. info: {:?}", hash, self.info()))
}, Ok)
}

Expand Down Expand Up @@ -234,12 +234,13 @@ impl<Block: BlockT> LightStorage<Block> {
transaction: &mut Transaction<DbHash>,
route_to: Block::Hash,
best_to: (NumberFor<Block>, Block::Hash),
allow_missing_parent: bool,
) -> ClientResult<()> {
let lookup_key = utils::number_and_hash_to_lookup_key(best_to.0, &best_to.1)?;

// handle reorg.
let meta = self.meta.read();
if meta.best_hash != Default::default() {
if meta.best_hash != Default::default() && !allow_missing_parent {
let tree_route = sp_blockchain::tree_route(self, meta.best_hash, route_to)?;

// update block number to hash lookup entries.
Expand Down Expand Up @@ -284,9 +285,10 @@ impl<Block: BlockT> LightStorage<Block> {
transaction: &mut Transaction<DbHash>,
header: &Block::Header,
hash: Block::Hash,
allow_missing_parent: bool,
) -> ClientResult<()> {
let meta = self.meta.read();
if &meta.finalized_hash != header.parent_hash() {
if &meta.finalized_hash != header.parent_hash() && !allow_missing_parent {
return Err(::sp_blockchain::Error::NonSequentialFinalization(
format!("Last finalized {:?} not parent of {:?}",
meta.finalized_hash, hash),
Expand All @@ -296,6 +298,7 @@ impl<Block: BlockT> LightStorage<Block> {
let lookup_key = utils::number_and_hash_to_lookup_key(header.number().clone(), hash)?;
transaction.set_from_vec(columns::META, meta_keys::FINALIZED_BLOCK, lookup_key);

/*
// build new CHT(s) if required
if let Some(new_cht_number) = cht::is_build_required(cht::size(), *header.number()) {
let new_cht_start: NumberFor<Block> = cht::start_number(cht::size(), new_cht_number);
Expand Down Expand Up @@ -356,6 +359,7 @@ impl<Block: BlockT> LightStorage<Block> {
prune_block += One::one();
}
}
*/

Ok(())
}
Expand Down Expand Up @@ -421,6 +425,7 @@ impl<Block> Storage<Block> for LightStorage<Block>
mut cache_at: HashMap<well_known_cache_keys::Id, Vec<u8>>,
leaf_state: NewBlockState,
aux_ops: Vec<(Vec<u8>, Option<Vec<u8>>)>,
allow_missing_parent: bool,
) -> ClientResult<()> {
let mut transaction = Transaction::new();

Expand All @@ -439,7 +444,9 @@ impl<Block> Storage<Block> for LightStorage<Block>
let lookup_key = utils::number_and_hash_to_lookup_key(number, &hash)?;

if leaf_state.is_best() {
self.set_head_with_transaction(&mut transaction, parent_hash, (number, hash))?;
self.set_head_with_transaction(
&mut transaction, parent_hash, (number, hash), allow_missing_parent,
)?;
}

utils::insert_hash_to_key_mapping(
Expand Down Expand Up @@ -473,6 +480,7 @@ impl<Block> Storage<Block> for LightStorage<Block>
&mut transaction,
&header,
hash,
allow_missing_parent,
)?;
}

Expand Down Expand Up @@ -513,7 +521,9 @@ impl<Block> Storage<Block> for LightStorage<Block>
let number = header.number();

let mut transaction = Transaction::new();
self.set_head_with_transaction(&mut transaction, hash.clone(), (number.clone(), hash.clone()))?;
self.set_head_with_transaction(
&mut transaction, hash.clone(), (number.clone(), hash.clone()), false,
)?;
self.db.commit(transaction)?;
self.update_meta(hash, header.number().clone(), true, false);

Expand All @@ -528,7 +538,7 @@ impl<Block> Storage<Block> for LightStorage<Block>
let mut transaction = Transaction::new();
let hash = header.hash();
let number = *header.number();
self.note_finalized(&mut transaction, &header, hash.clone())?;
self.note_finalized(&mut transaction, &header, hash.clone(), false)?;
{
let mut cache = self.cache.0.write();
let cache_ops = cache.transaction(&mut transaction)
Expand Down
4 changes: 2 additions & 2 deletions client/finality-grandpa/src/authorities.rs
Original file line number Diff line number Diff line change
Expand Up @@ -108,9 +108,9 @@ pub(crate) struct Status<H, N> {
#[derive(Debug, Clone, Encode, Decode, PartialEq)]
pub struct AuthoritySet<H, N> {
/// The current active authorities.
pub(crate) current_authorities: AuthorityList,
pub current_authorities: AuthorityList,
/// The current set id.
pub(crate) set_id: u64,
pub set_id: u64,
/// Tree of pending standard changes across forks. Standard changes are
/// enacted on finality and must be enacted (i.e. finalized) in-order across
/// a given branch
Expand Down
2 changes: 1 addition & 1 deletion client/finality-grandpa/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ pub use finality_proof::{FinalityProofFragment, FinalityProofProvider, StorageAn
pub use notification::{GrandpaJustificationSender, GrandpaJustificationStream};
pub use import::GrandpaBlockImport;
pub use justification::GrandpaJustification;
pub use light_import::{light_block_import, GrandpaLightBlockImport};
pub use light_import::{light_block_import, GrandpaLightBlockImport, LightAuthoritySet};
pub use voting_rule::{
BeforeBestBlockBy, ThreeQuartersOfTheUnfinalizedChain, VotingRule, VotingRulesBuilder
};
Expand Down
6 changes: 3 additions & 3 deletions client/finality-grandpa/src/light_import.rs
Original file line number Diff line number Diff line change
Expand Up @@ -103,9 +103,9 @@ struct LightImportData<Block: BlockT> {

/// Latest authority set tracker.
#[derive(Debug, Encode, Decode)]
struct LightAuthoritySet {
set_id: u64,
authorities: AuthorityList,
pub struct LightAuthoritySet {
pub set_id: u64,
pub authorities: AuthorityList,
}

impl<BE, Block: BlockT, Client> GrandpaLightBlockImport<BE, Block, Client> {
Expand Down
Loading