From 9379833d6ce8b44756298f09e0b619b2137dc458 Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Wed, 5 Mar 2025 00:53:28 +0000 Subject: [PATCH 1/2] feat: new leader schedule by vote delegation (SIMD-0180) --- ledger/Cargo.toml | 1 + ledger/src/leader_schedule.rs | 17 +- ledger/src/leader_schedule/vote_keyed.rs | 251 +++++++++++++++++++++++ ledger/src/leader_schedule_utils.rs | 52 +++-- runtime/src/bank.rs | 39 ++++ runtime/src/bank/tests.rs | 64 ++++++ 6 files changed, 405 insertions(+), 19 deletions(-) create mode 100644 ledger/src/leader_schedule/vote_keyed.rs diff --git a/ledger/Cargo.toml b/ledger/Cargo.toml index 34c0f35f136498..cd53b2ace216c2 100644 --- a/ledger/Cargo.toml +++ b/ledger/Cargo.toml @@ -96,6 +96,7 @@ criterion = { workspace = true } solana-account-decoder = { workspace = true } solana-logger = { workspace = true } solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } +solana-vote = { workspace = true, features = ["dev-context-only-utils"] } spl-pod = { workspace = true } test-case = { workspace = true } diff --git a/ledger/src/leader_schedule.rs b/ledger/src/leader_schedule.rs index 32191787e7cf6c..bcc91bac2d2fe1 100644 --- a/ledger/src/leader_schedule.rs +++ b/ledger/src/leader_schedule.rs @@ -7,7 +7,11 @@ use { }; mod identity_keyed; -pub use identity_keyed::LeaderSchedule as IdentityKeyedLeaderSchedule; +mod vote_keyed; +pub use { + identity_keyed::LeaderSchedule as IdentityKeyedLeaderSchedule, + vote_keyed::LeaderSchedule as VoteKeyedLeaderSchedule, +}; // Used for testing #[derive(Clone, Debug)] @@ -23,6 +27,15 @@ pub trait LeaderScheduleVariant: { fn get_slot_leaders(&self) -> &[Pubkey]; fn get_leader_slots_map(&self) -> &HashMap>>; + fn is_vote_keyed(&self) -> bool { + false + } + + /// Get the vote account address for the given epoch slot index. This is + /// guaranteed to be Some if the leader schedule is keyed by vote account + fn get_vote_key_at_slot_index(&self, _epoch_slot_index: usize) -> Option<&Pubkey> { + None + } fn get_leader_upcoming_slots( &self, @@ -58,7 +71,7 @@ pub trait LeaderScheduleVariant: } } -// Note: passing in zero stakers will cause a panic. +// Note: passing in zero keyed stakes will cause a panic. fn stake_weighted_slot_leaders( mut keyed_stakes: Vec<(&Pubkey, u64)>, epoch: Epoch, diff --git a/ledger/src/leader_schedule/vote_keyed.rs b/ledger/src/leader_schedule/vote_keyed.rs new file mode 100644 index 00000000000000..f8722d29f189e7 --- /dev/null +++ b/ledger/src/leader_schedule/vote_keyed.rs @@ -0,0 +1,251 @@ +use { + super::{stake_weighted_slot_leaders, IdentityKeyedLeaderSchedule, LeaderScheduleVariant}, + solana_pubkey::Pubkey, + solana_sdk::clock::Epoch, + solana_vote::vote_account::VoteAccountsHashMap, + std::{collections::HashMap, ops::Index, sync::Arc}, +}; + +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct LeaderSchedule { + vote_keyed_slot_leaders: Vec, + // cached leader schedule keyed by validator identities created by mapping + // vote account addresses to the validator identity designated at the time + // of leader schedule generation. This is used to avoid the need to look up + // the validator identity address for each slot. + identity_keyed_leader_schedule: IdentityKeyedLeaderSchedule, +} + +impl LeaderSchedule { + // Note: passing in zero vote accounts will cause a panic. + pub fn new( + vote_accounts_map: &VoteAccountsHashMap, + epoch: Epoch, + len: u64, + repeat: u64, + ) -> Self { + let keyed_stakes: Vec<_> = vote_accounts_map + .iter() + .map(|(vote_pubkey, (stake, _account))| (vote_pubkey, *stake)) + .collect(); + let vote_keyed_slot_leaders = stake_weighted_slot_leaders(keyed_stakes, epoch, len, repeat); + Self::new_from_schedule(vote_keyed_slot_leaders, vote_accounts_map) + } + + fn new_from_schedule( + vote_keyed_slot_leaders: Vec, + vote_accounts_map: &VoteAccountsHashMap, + ) -> Self { + struct SlotLeaderInfo<'a> { + vote_account_address: &'a Pubkey, + validator_identity_address: &'a Pubkey, + } + + let default_pubkey = Pubkey::default(); + let mut current_slot_leader_info = SlotLeaderInfo { + vote_account_address: &default_pubkey, + validator_identity_address: &default_pubkey, + }; + + let slot_leaders: Vec = vote_keyed_slot_leaders + .iter() + .map(|vote_account_address| { + if vote_account_address != current_slot_leader_info.vote_account_address { + let validator_identity_address = vote_accounts_map + .get(vote_account_address) + .expect("vote account must be in vote_accounts_map") + .1 + .node_pubkey(); + current_slot_leader_info = SlotLeaderInfo { + vote_account_address, + validator_identity_address, + }; + } + *current_slot_leader_info.validator_identity_address + }) + .collect(); + + Self { + vote_keyed_slot_leaders, + identity_keyed_leader_schedule: IdentityKeyedLeaderSchedule::new_from_schedule( + slot_leaders, + ), + } + } +} + +impl LeaderScheduleVariant for LeaderSchedule { + fn get_slot_leaders(&self) -> &[Pubkey] { + self.identity_keyed_leader_schedule.get_slot_leaders() + } + + fn get_leader_slots_map(&self) -> &HashMap>> { + self.identity_keyed_leader_schedule.get_leader_slots_map() + } + + fn is_vote_keyed(&self) -> bool { + true + } + + fn get_vote_key_at_slot_index(&self, index: usize) -> Option<&Pubkey> { + let slot_vote_addresses = &self.vote_keyed_slot_leaders; + Some(&slot_vote_addresses[index % slot_vote_addresses.len()]) + } +} + +impl Index for LeaderSchedule { + type Output = Pubkey; + fn index(&self, index: u64) -> &Pubkey { + &self.get_slot_leaders()[index as usize % self.num_slots()] + } +} + +#[cfg(test)] +mod tests { + use {super::*, solana_vote::vote_account::VoteAccount}; + + #[test] + fn test_index() { + let pubkey0 = solana_pubkey::new_rand(); + let pubkey1 = solana_pubkey::new_rand(); + let vote_keyed_slot_leaders = vec![pubkey0, pubkey1]; + let vote_accounts_map: VoteAccountsHashMap = [ + (pubkey0, (0, VoteAccount::new_random())), + (pubkey1, (0, VoteAccount::new_random())), + ] + .into_iter() + .collect(); + + let leader_schedule = + LeaderSchedule::new_from_schedule(vote_keyed_slot_leaders, &vote_accounts_map); + assert_eq!( + &leader_schedule[0], + vote_accounts_map.get(&pubkey0).unwrap().1.node_pubkey() + ); + assert_eq!( + &leader_schedule[1], + vote_accounts_map.get(&pubkey1).unwrap().1.node_pubkey() + ); + assert_eq!( + &leader_schedule[2], + vote_accounts_map.get(&pubkey0).unwrap().1.node_pubkey() + ); + } + + #[test] + fn test_get_vote_key_at_slot_index() { + let pubkey0 = solana_pubkey::new_rand(); + let pubkey1 = solana_pubkey::new_rand(); + let vote_keyed_slot_leaders = vec![pubkey0, pubkey1]; + let vote_accounts_map: VoteAccountsHashMap = [ + (pubkey0, (0, VoteAccount::new_random())), + (pubkey1, (0, VoteAccount::new_random())), + ] + .into_iter() + .collect(); + + let leader_schedule = + LeaderSchedule::new_from_schedule(vote_keyed_slot_leaders, &vote_accounts_map); + assert_eq!( + leader_schedule.get_vote_key_at_slot_index(0), + Some(&pubkey0) + ); + assert_eq!( + leader_schedule.get_vote_key_at_slot_index(1), + Some(&pubkey1) + ); + assert_eq!( + leader_schedule.get_vote_key_at_slot_index(2), + Some(&pubkey0) + ); + } + + #[test] + fn test_leader_schedule_basic() { + let num_keys = 10; + let vote_accounts_map: HashMap<_, _> = (0..num_keys) + .map(|i| (solana_pubkey::new_rand(), (i, VoteAccount::new_random()))) + .collect(); + + let epoch: Epoch = rand::random(); + let len = num_keys * 10; + let leader_schedule = LeaderSchedule::new(&vote_accounts_map, epoch, len, 1); + let leader_schedule2 = LeaderSchedule::new(&vote_accounts_map, epoch, len, 1); + assert_eq!(leader_schedule.num_slots() as u64, len); + // Check that the same schedule is reproducibly generated + assert_eq!(leader_schedule, leader_schedule2); + } + + #[test] + fn test_repeated_leader_schedule() { + let num_keys = 10; + let vote_accounts_map: HashMap<_, _> = (0..num_keys) + .map(|i| (solana_pubkey::new_rand(), (i, VoteAccount::new_random()))) + .collect(); + + let epoch = rand::random::(); + let len = num_keys * 10; + let repeat = 8; + let leader_schedule = LeaderSchedule::new(&vote_accounts_map, epoch, len, repeat); + assert_eq!(leader_schedule.num_slots() as u64, len); + let mut leader_node = Pubkey::default(); + for (i, node) in leader_schedule.get_slot_leaders().iter().enumerate() { + if i % repeat as usize == 0 { + leader_node = *node; + } else { + assert_eq!(leader_node, *node); + } + } + } + + #[test] + fn test_repeated_leader_schedule_specific() { + let vote_key0 = solana_pubkey::new_rand(); + let vote_key1 = solana_pubkey::new_rand(); + let vote_accounts_map: HashMap<_, _> = [ + (vote_key0, (2, VoteAccount::new_random())), + (vote_key1, (1, VoteAccount::new_random())), + ] + .into_iter() + .collect(); + let alice_pubkey = *vote_accounts_map.get(&vote_key0).unwrap().1.node_pubkey(); + let bob_pubkey = *vote_accounts_map.get(&vote_key1).unwrap().1.node_pubkey(); + + let epoch = 0; + let len = 8; + // What the schedule looks like without any repeats + let leaders1 = LeaderSchedule::new(&vote_accounts_map, epoch, len, 1) + .get_slot_leaders() + .to_vec(); + + // What the schedule looks like with repeats + let leaders2 = LeaderSchedule::new(&vote_accounts_map, epoch, len, 2) + .get_slot_leaders() + .to_vec(); + assert_eq!(leaders1.len(), leaders2.len()); + + let leaders1_expected = vec![ + alice_pubkey, + alice_pubkey, + alice_pubkey, + bob_pubkey, + alice_pubkey, + alice_pubkey, + alice_pubkey, + alice_pubkey, + ]; + let leaders2_expected = vec![ + alice_pubkey, + alice_pubkey, + alice_pubkey, + alice_pubkey, + alice_pubkey, + alice_pubkey, + bob_pubkey, + bob_pubkey, + ]; + + assert_eq!(leaders1, leaders1_expected); + assert_eq!(leaders2, leaders2_expected); + } +} diff --git a/ledger/src/leader_schedule_utils.rs b/ledger/src/leader_schedule_utils.rs index 187c2bc414f513..21fff172dee1ff 100644 --- a/ledger/src/leader_schedule_utils.rs +++ b/ledger/src/leader_schedule_utils.rs @@ -1,5 +1,7 @@ use { - crate::leader_schedule::{IdentityKeyedLeaderSchedule, LeaderSchedule}, + crate::leader_schedule::{ + IdentityKeyedLeaderSchedule, LeaderSchedule, VoteKeyedLeaderSchedule, + }, solana_runtime::bank::Bank, solana_sdk::{ clock::{Epoch, Slot, NUM_CONSECUTIVE_LEADER_SLOTS}, @@ -10,15 +12,26 @@ use { /// Return the leader schedule for the given epoch. pub fn leader_schedule(epoch: Epoch, bank: &Bank) -> Option { - bank.epoch_staked_nodes(epoch) - .map(|stakes| -> LeaderSchedule { + let use_new_leader_schedule = bank.should_use_vote_keyed_leader_schedule(epoch)?; + if use_new_leader_schedule { + bank.epoch_vote_accounts(epoch).map(|vote_accounts_map| { + Box::new(VoteKeyedLeaderSchedule::new( + vote_accounts_map, + epoch, + bank.get_slots_in_epoch(epoch), + NUM_CONSECUTIVE_LEADER_SLOTS, + )) as LeaderSchedule + }) + } else { + bank.epoch_staked_nodes(epoch).map(|stakes| { Box::new(IdentityKeyedLeaderSchedule::new( &stakes, epoch, bank.get_slots_in_epoch(epoch), NUM_CONSECUTIVE_LEADER_SLOTS, - )) + )) as LeaderSchedule }) + } } /// Map of leader base58 identity pubkeys to the slot indices relative to the first epoch slot @@ -65,27 +78,32 @@ mod tests { super::*, solana_runtime::genesis_utils::{ bootstrap_validator_stake_lamports, create_genesis_config_with_leader, + deactivate_features, }, + test_case::test_case, }; - #[test] - fn test_leader_schedule_via_bank() { + #[test_case(true; "vote keyed leader schedule")] + #[test_case(false; "identity keyed leader schedule")] + fn test_leader_schedule_via_bank(use_vote_keyed_leader_schedule: bool) { let pubkey = solana_pubkey::new_rand(); - let genesis_config = + let mut genesis_config = create_genesis_config_with_leader(0, &pubkey, bootstrap_validator_stake_lamports()) .genesis_config; + + if !use_vote_keyed_leader_schedule { + deactivate_features( + &mut genesis_config, + &vec![solana_feature_set::enable_vote_address_leader_schedule::id()], + ); + } + let bank = Bank::new_for_tests(&genesis_config); + let leader_schedule = leader_schedule(0, &bank).unwrap(); - let pubkeys_and_stakes: HashMap<_, _> = bank - .current_epoch_staked_nodes() - .iter() - .map(|(pubkey, stake)| (*pubkey, *stake)) - .collect(); - let leader_schedule = IdentityKeyedLeaderSchedule::new( - &pubkeys_and_stakes, - 0, - genesis_config.epoch_schedule.slots_per_epoch, - NUM_CONSECUTIVE_LEADER_SLOTS, + assert_eq!( + leader_schedule.is_vote_keyed(), + use_vote_keyed_leader_schedule ); assert_eq!(leader_schedule[0], pubkey); diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 88fe6645a4eb63..644cbac6857add 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -6084,6 +6084,45 @@ impl Bank { self.epoch_schedule().get_leader_schedule_epoch(slot) } + /// Returns whether the specified epoch should use the new vote account + /// keyed leader schedule + pub fn should_use_vote_keyed_leader_schedule(&self, epoch: Epoch) -> Option { + let effective_epoch = self + .feature_set + .activated_slot(&solana_feature_set::enable_vote_address_leader_schedule::id()) + .map(|activation_slot| { + // If the feature was activated at genesis, then the new leader + // schedule should be effective immediately in the first epoch + if activation_slot == 0 { + return 0; + } + + // Calculate the epoch that the feature became activated in + let activation_epoch = self.epoch_schedule.get_epoch(activation_slot); + + // The effective epoch is the epoch immediately after the + // activation epoch + activation_epoch.wrapping_add(1) + }); + + // Starting from the effective epoch, always use the new leader schedule + if let Some(effective_epoch) = effective_epoch { + return Some(epoch >= effective_epoch); + } + + // Calculate the max epoch we can cache a leader schedule for + let max_cached_leader_schedule = self.get_leader_schedule_epoch(self.slot()); + if epoch <= max_cached_leader_schedule { + // The feature cannot be effective by the specified epoch + Some(false) + } else { + // Cannot determine if an epoch should use the new leader schedule if the + // the epoch is too far in the future because we won't know if the feature + // will have been activated by then or not. + None + } + } + /// a bank-level cache of vote accounts and stake delegation info fn update_stakes_cache( &self, diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index d004f70fe7364d..abd4d159e60cda 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -14014,3 +14014,67 @@ fn test_rehash_accounts_modified() { // let the show begin bank.rehash(); } + +#[test] +fn test_should_use_vote_keyed_leader_schedule() { + let genesis_config = genesis_utils::create_genesis_config(10_000).genesis_config; + let epoch_schedule = &genesis_config.epoch_schedule; + let create_test_bank = |bank_epoch: Epoch, feature_activation_slot: Option| -> Bank { + let mut bank = Bank::new_for_tests(&genesis_config); + bank.epoch = bank_epoch; + let mut feature_set = FeatureSet::default(); + if let Some(feature_activation_slot) = feature_activation_slot { + let feature_activation_epoch = bank.epoch_schedule().get_epoch(feature_activation_slot); + assert!(feature_activation_epoch <= bank_epoch); + feature_set.activate( + &solana_feature_set::enable_vote_address_leader_schedule::id(), + feature_activation_slot, + ); + } + bank.feature_set = Arc::new(feature_set); + bank + }; + + // Test feature activation at genesis + let test_bank = create_test_bank(0, Some(0)); + for epoch in 0..10 { + assert_eq!( + test_bank.should_use_vote_keyed_leader_schedule(epoch), + Some(true), + ); + } + + // Test feature activated in previous epoch + let slot_in_prev_epoch = epoch_schedule.get_first_slot_in_epoch(1); + let test_bank = create_test_bank(2, Some(slot_in_prev_epoch)); + for epoch in 0..=(test_bank.epoch + 1) { + assert_eq!( + test_bank.should_use_vote_keyed_leader_schedule(epoch), + Some(epoch >= test_bank.epoch), + ); + } + + // Test feature activated in current epoch + let current_epoch_slot = epoch_schedule.get_last_slot_in_epoch(1); + let test_bank = create_test_bank(1, Some(current_epoch_slot)); + for epoch in 0..=(test_bank.epoch + 1) { + assert_eq!( + test_bank.should_use_vote_keyed_leader_schedule(epoch), + Some(epoch > test_bank.epoch), + ); + } + + // Test feature not activated yet + let test_bank = create_test_bank(1, None); + let max_cached_leader_schedule = epoch_schedule.get_leader_schedule_epoch(test_bank.slot()); + for epoch in 0..=(max_cached_leader_schedule + 1) { + if epoch <= max_cached_leader_schedule { + assert_eq!( + test_bank.should_use_vote_keyed_leader_schedule(epoch), + Some(false), + ); + } else { + assert_eq!(test_bank.should_use_vote_keyed_leader_schedule(epoch), None); + } + } +} From c7d917b29f049ec5f9f818ca40636f8e0d0d58fb Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Tue, 11 Mar 2025 18:06:22 +0000 Subject: [PATCH 2/2] feedback --- ledger/src/leader_schedule.rs | 3 --- ledger/src/leader_schedule/vote_keyed.rs | 4 ---- ledger/src/leader_schedule_utils.rs | 2 +- 3 files changed, 1 insertion(+), 8 deletions(-) diff --git a/ledger/src/leader_schedule.rs b/ledger/src/leader_schedule.rs index bcc91bac2d2fe1..14f70f7787be14 100644 --- a/ledger/src/leader_schedule.rs +++ b/ledger/src/leader_schedule.rs @@ -27,9 +27,6 @@ pub trait LeaderScheduleVariant: { fn get_slot_leaders(&self) -> &[Pubkey]; fn get_leader_slots_map(&self) -> &HashMap>>; - fn is_vote_keyed(&self) -> bool { - false - } /// Get the vote account address for the given epoch slot index. This is /// guaranteed to be Some if the leader schedule is keyed by vote account diff --git a/ledger/src/leader_schedule/vote_keyed.rs b/ledger/src/leader_schedule/vote_keyed.rs index f8722d29f189e7..059215efbc400a 100644 --- a/ledger/src/leader_schedule/vote_keyed.rs +++ b/ledger/src/leader_schedule/vote_keyed.rs @@ -83,10 +83,6 @@ impl LeaderScheduleVariant for LeaderSchedule { self.identity_keyed_leader_schedule.get_leader_slots_map() } - fn is_vote_keyed(&self) -> bool { - true - } - fn get_vote_key_at_slot_index(&self, index: usize) -> Option<&Pubkey> { let slot_vote_addresses = &self.vote_keyed_slot_leaders; Some(&slot_vote_addresses[index % slot_vote_addresses.len()]) diff --git a/ledger/src/leader_schedule_utils.rs b/ledger/src/leader_schedule_utils.rs index 21fff172dee1ff..f49a39a2bc5ae1 100644 --- a/ledger/src/leader_schedule_utils.rs +++ b/ledger/src/leader_schedule_utils.rs @@ -102,7 +102,7 @@ mod tests { let leader_schedule = leader_schedule(0, &bank).unwrap(); assert_eq!( - leader_schedule.is_vote_keyed(), + leader_schedule.get_vote_key_at_slot_index(0).is_some(), use_vote_keyed_leader_schedule );