Skip to content
This repository was archived by the owner on Nov 15, 2023. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from 12 commits
Commits
Show all changes
17 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 22 additions & 3 deletions client/finality-grandpa/src/authorities.rs
Original file line number Diff line number Diff line change
Expand Up @@ -423,9 +423,28 @@ where
fork_tree::FinalizationResult::Changed(change) => {
status.changed = true;

// if we are able to finalize any standard change then we can
// discard all pending forced changes (on different forks)
self.pending_forced_changes.clear();
// NOTE: the grandpa runtime module guarantees that there are no
// overlaping pending changes emitted, either forced or standard,
// and taking into account the enactment delay. therefore, an
// invariant of this module is that there is always at most one
// forced change per fork (the first must have been enacted before
// the next one is emitted).
let mut pending_forced_change = None;
for change in &self.pending_forced_changes {
// we will keep a forced change for this fork, which must be for
// a later block otherwise it would have been enacted already,
// and must be a descendent of the finalized block.
if change.effective_number() > finalized_number &&
is_descendent_of(&finalized_hash, &change.canon_hash)
.map_err(fork_tree::Error::Client)?
{
pending_forced_change = Some(change.clone());
break;
}
}

// collect the single forced change into a vec (if any)
self.pending_forced_changes = pending_forced_change.into_iter().collect();

if let Some(change) = change {
afg_log!(initial_sync,
Expand Down
13 changes: 12 additions & 1 deletion frame/grandpa/src/benchmarking.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,9 @@

#![cfg_attr(not(feature = "std"), no_std)]

use super::*;
use super::{*, Module as Grandpa};
use frame_benchmarking::benchmarks;
use frame_system::RawOrigin;
use sp_core::H256;

benchmarks! {
Expand Down Expand Up @@ -62,6 +63,15 @@ benchmarks! {
} verify {
assert!(sp_finality_grandpa::check_equivocation_proof(equivocation_proof2));
}

note_stalled {
let delay = 1000.into();
let best_finalized_block_number = 1.into();

}: _(RawOrigin::Root, delay, best_finalized_block_number)
verify {
assert!(Grandpa::<T>::stalled().is_some());
}
}

#[cfg(test)]
Expand All @@ -74,6 +84,7 @@ mod tests {
fn test_benchmarks() {
new_test_ext(vec![(1, 1), (2, 1), (3, 1)]).execute_with(|| {
assert_ok!(test_benchmark_check_equivocation_proof::<Test>());
assert_ok!(test_benchmark_note_stalled::<Test>());
})
}

Expand Down
92 changes: 46 additions & 46 deletions frame/grandpa/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -43,9 +43,10 @@ use frame_support::{
decl_error, decl_event, decl_module, decl_storage, storage, traits::KeyOwnerProofSystem,
Parameter,
};
use frame_system::{ensure_none, ensure_signed, DigestOf};
use frame_system::{ensure_none, ensure_root, ensure_signed};
use pallet_finality_tracker::OnFinalizationStalled;
use sp_runtime::{
generic::{DigestItem, OpaqueDigestItemId},
generic::DigestItem,
traits::Zero,
DispatchResult, KeyTypeId,
};
Expand Down Expand Up @@ -205,7 +206,7 @@ decl_storage! {
State get(fn state): StoredState<T::BlockNumber> = StoredState::Live;

/// Pending change: (signaled at, scheduled change).
PendingChange: Option<StoredPendingChange<T::BlockNumber>>;
PendingChange get(fn pending_change): Option<StoredPendingChange<T::BlockNumber>>;
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think it's useful to expose this so that we can easily check if there's any pending forced change (standard changes are applied immediately).


/// next block number where we can force a change.
NextForced get(fn next_forced): Option<T::BlockNumber>;
Expand Down Expand Up @@ -280,6 +281,24 @@ decl_module! {
)?;
}

/// Note that the current authority set of the GRANDPA finality gadget has
/// stalled. This will trigger a forced authority set change at the beginning
/// of the next session, to be enacted `delay` blocks after that. The delay
/// should be high enough to safely assume that the block signalling the
/// forced change will not be re-orged (e.g. 1000 blocks). The GRANDPA voters
/// will start the new authority set using the given finalized block as base.
/// Only callable by root.
#[weight = weight_for::note_stalled::<T>()]
fn note_stalled(
origin,
delay: T::BlockNumber,
best_finalized_block_number: T::BlockNumber,
) {
ensure_root(origin)?;

Self::on_stalled(delay, best_finalized_block_number)
}

fn on_finalize(block_number: T::BlockNumber) {
// check for scheduled pending authority set changes
if let Some(pending_change) = <PendingChange<T>>::get() {
Expand All @@ -295,7 +314,7 @@ decl_module! {
))
} else {
Self::deposit_log(ConsensusLog::ScheduledChange(
ScheduledChange{
ScheduledChange {
delay: pending_change.delay,
next_authorities: pending_change.next_authorities.clone(),
}
Expand Down Expand Up @@ -377,6 +396,11 @@ mod weight_for {
// fetching set id -> session index mappings
.saturating_add(T::DbWeight::get().reads(2))
}

pub fn note_stalled<T: super::Trait>() -> Weight {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is called note_stalled, but is just seems to compute a weight and not do anything else

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

(oh, I see this is in the weight_for module)

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It's simple enough that I could have declared it in the weight attribute itself, but since we already have the weight_for module I decided to put it there 🤷.

(3 * WEIGHT_PER_MICROS)
.saturating_add(T::DbWeight::get().writes(1))
}
}

impl<T: Trait> Module<T> {
Expand Down Expand Up @@ -580,42 +604,6 @@ impl<T: Trait> Module<T> {
}
}

impl<T: Trait> Module<T> {
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This was unused and the same functionality is implemented on the client where it is needed.

/// Attempt to extract a GRANDPA log from a generic digest.
pub fn grandpa_log(digest: &DigestOf<T>) -> Option<ConsensusLog<T::BlockNumber>> {
let id = OpaqueDigestItemId::Consensus(&GRANDPA_ENGINE_ID);
digest.convert_first(|l| l.try_to::<ConsensusLog<T::BlockNumber>>(id))
}

/// Attempt to extract a pending set-change signal from a digest.
pub fn pending_change(digest: &DigestOf<T>)
-> Option<ScheduledChange<T::BlockNumber>>
{
Self::grandpa_log(digest).and_then(|signal| signal.try_into_change())
}

/// Attempt to extract a forced set-change signal from a digest.
pub fn forced_change(digest: &DigestOf<T>)
-> Option<(T::BlockNumber, ScheduledChange<T::BlockNumber>)>
{
Self::grandpa_log(digest).and_then(|signal| signal.try_into_forced_change())
}

/// Attempt to extract a pause signal from a digest.
pub fn pending_pause(digest: &DigestOf<T>)
-> Option<T::BlockNumber>
{
Self::grandpa_log(digest).and_then(|signal| signal.try_into_pause())
}

/// Attempt to extract a resume signal from a digest.
pub fn pending_resume(digest: &DigestOf<T>)
-> Option<T::BlockNumber>
{
Self::grandpa_log(digest).and_then(|signal| signal.try_into_resume())
}
}

impl<T: Trait> sp_runtime::BoundToRuntimeAppPublic for Module<T> {
type Public = AuthorityId;
}
Expand All @@ -638,14 +626,26 @@ impl<T: Trait> pallet_session::OneSessionHandler<T::AccountId> for Module<T>
// Always issue a change if `session` says that the validators have changed.
// Even if their session keys are the same as before, the underlying economic
// identities have changed.
let current_set_id = if changed {
let current_set_id = if changed || <Stalled<T>>::exists() {
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Even when the session module reports no changes, if there is a stall we want to schedule a forced change explicitly (regardless of whether it is the same authorities as in the last session).

let next_authorities = validators.map(|(_, k)| (k, 1)).collect::<Vec<_>>();
if let Some((further_wait, median)) = <Stalled<T>>::take() {
let _ = Self::schedule_change(next_authorities, further_wait, Some(median));

let res = if let Some((further_wait, median)) = <Stalled<T>>::take() {
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If we fail to schedule a change, either there is already a pending change or it is too soon for a forced change, we'd erroneously increase the set id.

Self::schedule_change(next_authorities, further_wait, Some(median))
} else {
Self::schedule_change(next_authorities, Zero::zero(), None)
};

if res.is_ok() {
CurrentSetId::mutate(|s| {
*s += 1;
*s
})
} else {
let _ = Self::schedule_change(next_authorities, Zero::zero(), None);
// either the session module signalled that the validators have changed
// or the set was stalled. but since we didn't successfully schedule
// an authority set change we do not increment the set id.
Self::current_set_id()
}
CurrentSetId::mutate(|s| { *s += 1; *s })
} else {
// nothing's changed, neither economic conditions nor session keys. update the pointer
// of the current set.
Expand All @@ -663,7 +663,7 @@ impl<T: Trait> pallet_session::OneSessionHandler<T::AccountId> for Module<T>
}
}

impl<T: Trait> pallet_finality_tracker::OnFinalizationStalled<T::BlockNumber> for Module<T> {
impl<T: Trait> OnFinalizationStalled<T::BlockNumber> for Module<T> {
fn on_stalled(further_wait: T::BlockNumber, median: T::BlockNumber) {
// when we record old authority sets, we can use `pallet_finality_tracker::median`
// to figure out _who_ failed. until then, we can't meaningfully guard
Expand Down
27 changes: 13 additions & 14 deletions frame/grandpa/src/mock.rs
Original file line number Diff line number Diff line change
Expand Up @@ -365,23 +365,18 @@ pub fn new_test_ext_raw_authorities(authorities: AuthorityList) -> sp_io::TestEx
}

pub fn start_session(session_index: SessionIndex) {
let mut parent_hash = System::parent_hash();

for i in Session::current_index()..session_index {
System::on_finalize(System::block_number());
Session::on_finalize(System::block_number());
Staking::on_finalize(System::block_number());
System::set_block_number((i + 1).into());
Timestamp::set_timestamp(System::block_number() * 6000);
Grandpa::on_finalize(System::block_number());

// In order to be able to use `System::parent_hash()` in the tests
// we need to first get it via `System::finalize` and then set it
// the `System::initialize`. However, it is needed to be taken into
// consideration that finalizing will prune some data in `System`
// storage including old values `BlockHash` if that reaches above
// `BlockHashCount` capacity.
if System::block_number() > 1 {
let parent_hash = if System::block_number() > 1 {
let hdr = System::finalize();
parent_hash = hdr.hash();
}
hdr.hash()
} else {
System::parent_hash()
};

System::initialize(
&(i as u64 + 1),
Expand All @@ -390,9 +385,13 @@ pub fn start_session(session_index: SessionIndex) {
&Default::default(),
Default::default(),
);
System::set_block_number((i + 1).into());
Timestamp::set_timestamp(System::block_number() * 6000);

Session::on_initialize(System::block_number());
System::on_initialize(System::block_number());
Session::on_initialize(System::block_number());
Staking::on_initialize(System::block_number());
Grandpa::on_initialize(System::block_number());
}

assert_eq!(Session::current_index(), session_index);
Expand Down
59 changes: 36 additions & 23 deletions frame/grandpa/src/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -342,14 +342,15 @@ fn report_equivocation_current_set_works() {
start_era(1);

let authorities = Grandpa::grandpa_authorities();
let validators = Session::validators();

// make sure that all authorities have the same balance
for i in 0..authorities.len() {
assert_eq!(Balances::total_balance(&(i as u64)), 10_000_000);
assert_eq!(Staking::slashable_balance_of(&(i as u64)), 10_000);
// make sure that all validators have the same balance
for validator in &validators {
assert_eq!(Balances::total_balance(validator), 10_000_000);
assert_eq!(Staking::slashable_balance_of(validator), 10_000);

assert_eq!(
Staking::eras_stakers(1, i as u64),
Staking::eras_stakers(1, validator),
pallet_staking::Exposure {
total: 10_000,
own: 10_000,
Expand Down Expand Up @@ -388,11 +389,12 @@ fn report_equivocation_current_set_works() {
start_era(2);

// check that the balance of 0-th validator is slashed 100%.
assert_eq!(Balances::total_balance(&0), 10_000_000 - 10_000);
assert_eq!(Staking::slashable_balance_of(&0), 0);
let equivocation_validator_id = validators[equivocation_authority_index];

assert_eq!(Balances::total_balance(&equivocation_validator_id), 10_000_000 - 10_000);
assert_eq!(Staking::slashable_balance_of(&equivocation_validator_id), 0);
assert_eq!(
Staking::eras_stakers(2, 0),
Staking::eras_stakers(2, equivocation_validator_id),
pallet_staking::Exposure {
total: 0,
own: 0,
Expand All @@ -401,12 +403,16 @@ fn report_equivocation_current_set_works() {
);

// check that the balances of all other validators are left intact.
for i in 1..authorities.len() {
assert_eq!(Balances::total_balance(&(i as u64)), 10_000_000);
assert_eq!(Staking::slashable_balance_of(&(i as u64)), 10_000);
for validator in &validators {
if *validator == equivocation_validator_id {
continue;
}

assert_eq!(Balances::total_balance(validator), 10_000_000);
assert_eq!(Staking::slashable_balance_of(validator), 10_000);

assert_eq!(
Staking::eras_stakers(2, i as u64),
Staking::eras_stakers(2, validator),
pallet_staking::Exposure {
total: 10_000,
own: 10_000,
Expand All @@ -425,6 +431,7 @@ fn report_equivocation_old_set_works() {
start_era(1);

let authorities = Grandpa::grandpa_authorities();
let validators = Session::validators();

let equivocation_authority_index = 0;
let equivocation_key = &authorities[equivocation_authority_index].0;
Expand All @@ -436,12 +443,12 @@ fn report_equivocation_old_set_works() {
start_era(2);

// make sure that all authorities have the same balance
for i in 0..authorities.len() {
assert_eq!(Balances::total_balance(&(i as u64)), 10_000_000);
assert_eq!(Staking::slashable_balance_of(&(i as u64)), 10_000);
for validator in &validators {
assert_eq!(Balances::total_balance(validator), 10_000_000);
assert_eq!(Staking::slashable_balance_of(validator), 10_000);

assert_eq!(
Staking::eras_stakers(2, i as u64),
Staking::eras_stakers(2, validator),
pallet_staking::Exposure {
total: 10_000,
own: 10_000,
Expand Down Expand Up @@ -474,11 +481,13 @@ fn report_equivocation_old_set_works() {
start_era(3);

// check that the balance of 0-th validator is slashed 100%.
assert_eq!(Balances::total_balance(&0), 10_000_000 - 10_000);
assert_eq!(Staking::slashable_balance_of(&0), 0);
let equivocation_validator_id = validators[equivocation_authority_index];

assert_eq!(Balances::total_balance(&equivocation_validator_id), 10_000_000 - 10_000);
assert_eq!(Staking::slashable_balance_of(&equivocation_validator_id), 0);

assert_eq!(
Staking::eras_stakers(3, 0),
Staking::eras_stakers(3, equivocation_validator_id),
pallet_staking::Exposure {
total: 0,
own: 0,
Expand All @@ -487,12 +496,16 @@ fn report_equivocation_old_set_works() {
);

// check that the balances of all other validators are left intact.
for i in 1..authorities.len() {
assert_eq!(Balances::total_balance(&(i as u64)), 10_000_000);
assert_eq!(Staking::slashable_balance_of(&(i as u64)), 10_000);
for validator in &validators {
if *validator == equivocation_validator_id {
continue;
}

assert_eq!(Balances::total_balance(validator), 10_000_000);
assert_eq!(Staking::slashable_balance_of(validator), 10_000);

assert_eq!(
Staking::eras_stakers(3, i as u64),
Staking::eras_stakers(3, validator),
pallet_staking::Exposure {
total: 10_000,
own: 10_000,
Expand Down