From cc3afa55883883191ef019f45e6f6814370cad3b Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Sun, 17 Mar 2024 15:29:20 -0700 Subject: [PATCH 001/153] Remove public visibility of program cache from bank (#279) --- core/benches/banking_stage.rs | 5 +---- core/src/replay_stage.rs | 6 +---- ledger-tool/src/program.rs | 11 +++------ ledger/src/blockstore_processor.rs | 6 +---- runtime/src/bank.rs | 36 ++++++++++++++++++++++++++++-- runtime/src/bank_forks.rs | 13 ++--------- unified-scheduler-pool/src/lib.rs | 5 +---- 7 files changed, 43 insertions(+), 39 deletions(-) diff --git a/core/benches/banking_stage.rs b/core/benches/banking_stage.rs index 242d3b0ed6b530..9defba6a02d155 100644 --- a/core/benches/banking_stage.rs +++ b/core/benches/banking_stage.rs @@ -398,10 +398,7 @@ fn simulate_process_entries( let bank_fork = BankForks::new_rw_arc(bank); let bank = bank_fork.read().unwrap().get_with_scheduler(slot).unwrap(); bank.clone_without_scheduler() - .loaded_programs_cache - .write() - .unwrap() - .set_fork_graph(bank_fork.clone()); + .set_fork_graph_in_program_cache(bank_fork.clone()); for i in 0..(num_accounts / 2) { bank.transfer(initial_lamports, mint_keypair, &keypairs[i * 2].pubkey()) diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 015ec5360448f9..90be2dade6a191 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -1686,11 +1686,7 @@ impl ReplayStage { root_bank.clear_slot_signatures(slot); // Remove cached entries of the programs that were deployed in this slot. - root_bank - .loaded_programs_cache - .write() - .unwrap() - .prune_by_deployment_slot(slot); + root_bank.prune_program_cache_by_deployment_slot(slot); if let Some(bank_hash) = blockstore.get_bank_hash(slot) { // If a descendant was successfully replayed and chained from a duplicate it must diff --git a/ledger-tool/src/program.rs b/ledger-tool/src/program.rs index af50d59bca0255..24df2168a338bf 100644 --- a/ledger-tool/src/program.rs +++ b/ledger-tool/src/program.rs @@ -514,14 +514,9 @@ pub fn program(ledger_path: &Path, matches: &ArgMatches<'_>) { with_mock_invoke_context!(invoke_context, transaction_context, transaction_accounts); // Adding `DELAY_VISIBILITY_SLOT_OFFSET` to slots to accommodate for delay visibility of the program - let mut loaded_programs = LoadedProgramsForTxBatch::new( - bank.slot() + DELAY_VISIBILITY_SLOT_OFFSET, - bank.loaded_programs_cache - .read() - .unwrap() - .environments - .clone(), - ); + let slot = bank.slot() + DELAY_VISIBILITY_SLOT_OFFSET; + let mut loaded_programs = + LoadedProgramsForTxBatch::new(slot, bank.get_runtime_environments_for_slot(slot)); for key in cached_account_keys { loaded_programs.replenish(key, bank.load_program(&key, false, bank.epoch())); debug!("Loaded program {}", key); diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index a76387f7cb2054..9eace1e7c9cd34 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -1674,11 +1674,7 @@ fn load_frozen_forks( root = new_root_bank.slot(); leader_schedule_cache.set_root(new_root_bank); - new_root_bank - .loaded_programs_cache - .write() - .unwrap() - .prune(root, new_root_bank.epoch()); + new_root_bank.prune_program_cache(root, new_root_bank.epoch()); let _ = bank_forks.write().unwrap().set_root( root, accounts_background_request_sender, diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index d1a1805d0d3a20..6d5c2345f92aca 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -98,7 +98,9 @@ use { solana_program_runtime::{ compute_budget_processor::process_compute_budget_instructions, invoke_context::BuiltinFunctionWithContext, - loaded_programs::{LoadedProgram, LoadedProgramType, LoadedPrograms}, + loaded_programs::{ + LoadedProgram, LoadedProgramType, LoadedPrograms, ProgramRuntimeEnvironments, + }, runtime_config::RuntimeConfig, timings::{ExecuteTimingType, ExecuteTimings}, }, @@ -803,7 +805,7 @@ pub struct Bank { pub incremental_snapshot_persistence: Option, - pub loaded_programs_cache: Arc>>, + loaded_programs_cache: Arc>>, epoch_reward_status: EpochRewardStatus, @@ -1467,6 +1469,36 @@ impl Bank { new } + pub fn set_fork_graph_in_program_cache(&self, fork_graph: Arc>) { + self.loaded_programs_cache + .write() + .unwrap() + .set_fork_graph(fork_graph); + } + + pub fn prune_program_cache(&self, new_root_slot: Slot, new_root_epoch: Epoch) { + self.loaded_programs_cache + .write() + .unwrap() + .prune(new_root_slot, new_root_epoch); + } + + pub fn prune_program_cache_by_deployment_slot(&self, deployment_slot: Slot) { + self.loaded_programs_cache + .write() + .unwrap() + .prune_by_deployment_slot(deployment_slot); + } + + pub fn get_runtime_environments_for_slot(&self, slot: Slot) -> ProgramRuntimeEnvironments { + let epoch = self.epoch_schedule.get_epoch(slot); + self.loaded_programs_cache + .read() + .unwrap() + .get_environments_for_epoch(epoch) + .clone() + } + /// Epoch in which the new cooldown warmup rate for stake was activated pub fn new_warmup_cooldown_rate_epoch(&self) -> Option { self.feature_set diff --git a/runtime/src/bank_forks.rs b/runtime/src/bank_forks.rs index 668062c8d31cce..770cd9059a8e57 100644 --- a/runtime/src/bank_forks.rs +++ b/runtime/src/bank_forks.rs @@ -126,12 +126,7 @@ impl BankForks { scheduler_pool: None, })); - root_bank - .loaded_programs_cache - .write() - .unwrap() - .set_fork_graph(bank_forks.clone()); - + root_bank.set_fork_graph_in_program_cache(bank_forks.clone()); bank_forks } @@ -451,11 +446,7 @@ impl BankForks { pub fn prune_program_cache(&self, root: Slot) { if let Some(root_bank) = self.banks.get(&root) { - root_bank - .loaded_programs_cache - .write() - .unwrap() - .prune(root, root_bank.epoch()); + root_bank.prune_program_cache(root, root_bank.epoch()); } } diff --git a/unified-scheduler-pool/src/lib.rs b/unified-scheduler-pool/src/lib.rs index 09ded82ee88e7d..81a3506ea28480 100644 --- a/unified-scheduler-pool/src/lib.rs +++ b/unified-scheduler-pool/src/lib.rs @@ -941,10 +941,7 @@ mod tests { let slot = bank.slot(); let bank_fork = BankForks::new_rw_arc(bank); let bank = bank_fork.read().unwrap().get(slot).unwrap(); - bank.loaded_programs_cache - .write() - .unwrap() - .set_fork_graph(bank_fork); + bank.set_fork_graph_in_program_cache(bank_fork); bank } From 1fc4e38a4f3ccb99a610ef9190073ca9ca48700b Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Mon, 18 Mar 2024 09:53:44 -0500 Subject: [PATCH 002/153] add stats for write cache flushing (#233) * add stats for write cache flushing * some renames --- accounts-db/src/accounts_db.rs | 64 ++++++++++++++++++++++++++-------- 1 file changed, 49 insertions(+), 15 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index e706958af8d0f2..34bcdedd2c5499 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -1719,6 +1719,8 @@ struct FlushStats { num_flushed: usize, num_purged: usize, total_size: u64, + store_accounts_timing: StoreAccountsTiming, + store_accounts_total_us: u64, } impl FlushStats { @@ -1726,6 +1728,9 @@ impl FlushStats { saturating_add_assign!(self.num_flushed, other.num_flushed); saturating_add_assign!(self.num_purged, other.num_purged); saturating_add_assign!(self.total_size, other.total_size); + self.store_accounts_timing + .accumulate(&other.store_accounts_timing); + saturating_add_assign!(self.store_accounts_total_us, other.store_accounts_total_us); } } @@ -6050,7 +6055,7 @@ impl AccountsDb { // Note even if force_flush is false, we will still flush all roots <= the // given `requested_flush_root`, even if some of the later roots cannot be used for // cleaning due to an ongoing scan - let (total_new_cleaned_roots, num_cleaned_roots_flushed) = self + let (total_new_cleaned_roots, num_cleaned_roots_flushed, mut flush_stats) = self .flush_rooted_accounts_cache( requested_flush_root, Some((&mut account_bytes_saved, &mut num_accounts_saved)), @@ -6062,7 +6067,7 @@ impl AccountsDb { // banks // If 'should_aggressively_flush_cache', then flush the excess ones to storage - let (total_new_excess_roots, num_excess_roots_flushed) = + let (total_new_excess_roots, num_excess_roots_flushed, flush_stats_aggressively) = if self.should_aggressively_flush_cache() { // Start by flushing the roots // @@ -6071,8 +6076,9 @@ impl AccountsDb { // for `should_clean`. self.flush_rooted_accounts_cache(None, None) } else { - (0, 0) + (0, 0, FlushStats::default()) }; + flush_stats.accumulate(&flush_stats_aggressively); let mut excess_slot_count = 0; let mut unflushable_unrooted_slot_count = 0; @@ -6123,6 +6129,26 @@ impl AccountsDb { ), ("account_bytes_saved", account_bytes_saved, i64), ("num_accounts_saved", num_accounts_saved, i64), + ( + "store_accounts_total_us", + flush_stats.store_accounts_total_us, + i64 + ), + ( + "update_index_us", + flush_stats.store_accounts_timing.update_index_elapsed, + i64 + ), + ( + "store_accounts_elapsed_us", + flush_stats.store_accounts_timing.store_accounts_elapsed, + i64 + ), + ( + "handle_reclaims_elapsed_us", + flush_stats.store_accounts_timing.handle_reclaims_elapsed, + i64 + ), ); } @@ -6130,7 +6156,7 @@ impl AccountsDb { &self, requested_flush_root: Option, should_clean: Option<(&mut usize, &mut usize)>, - ) -> (usize, usize) { + ) -> (usize, usize, FlushStats) { let max_clean_root = should_clean.as_ref().and_then(|_| { // If there is a long running scan going on, this could prevent any cleaning // based on updates from slots > `max_clean_root`. @@ -6161,12 +6187,13 @@ impl AccountsDb { // Iterate from highest to lowest so that we don't need to flush earlier // outdated updates in earlier roots let mut num_roots_flushed = 0; + let mut flush_stats = FlushStats::default(); for &root in cached_roots.iter().rev() { - if self - .flush_slot_cache_with_clean(root, should_flush_f.as_mut(), max_clean_root) - .is_some() + if let Some(stats) = + self.flush_slot_cache_with_clean(root, should_flush_f.as_mut(), max_clean_root) { num_roots_flushed += 1; + flush_stats.accumulate(&stats); } // Regardless of whether this slot was *just* flushed from the cache by the above @@ -6183,7 +6210,7 @@ impl AccountsDb { // so that clean will actually be able to clean the slots. let num_new_roots = cached_roots.len(); self.accounts_index.add_uncleaned_roots(cached_roots); - (num_new_roots, num_roots_flushed) + (num_new_roots, num_roots_flushed, flush_stats) } fn do_flush_slot_cache( @@ -6246,18 +6273,23 @@ impl AccountsDb { &HashSet::default(), ); + let mut store_accounts_timing = StoreAccountsTiming::default(); + let mut store_accounts_total_us = 0; if !is_dead_slot { // This ensures that all updates are written to an AppendVec, before any // updates to the index happen, so anybody that sees a real entry in the index, // will be able to find the account in storage let flushed_store = self.create_and_insert_store(slot, total_size, "flush_slot_cache"); - self.store_accounts_frozen( - (slot, &accounts[..]), - Some(hashes), - &flushed_store, - None, - StoreReclaims::Default, - ); + let (store_accounts_timing_inner, store_accounts_total_inner_us) = measure_us!(self + .store_accounts_frozen( + (slot, &accounts[..]), + Some(hashes), + &flushed_store, + None, + StoreReclaims::Default, + )); + store_accounts_timing = store_accounts_timing_inner; + store_accounts_total_us = store_accounts_total_inner_us; // If the above sizing function is correct, just one AppendVec is enough to hold // all the data for the slot @@ -6273,6 +6305,8 @@ impl AccountsDb { num_flushed, num_purged, total_size, + store_accounts_timing, + store_accounts_total_us, } } From a71a62f2e2496aef310376445986868dc6423a53 Mon Sep 17 00:00:00 2001 From: WGB5445 <919603023@qq.com> Date: Mon, 18 Mar 2024 09:58:28 -0700 Subject: [PATCH 003/153] [solana-install-init] Optimize error message for Windows user permission installation (#234) * feat: check user's permissions in Windows * feat: Remove check fun and check os_err * fmt and optimize code --- install/src/command.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/install/src/command.rs b/install/src/command.rs index 4ae9e7ee38cedd..ffd4d4c0ae3b33 100644 --- a/install/src/command.rs +++ b/install/src/command.rs @@ -1171,13 +1171,17 @@ pub fn init_or_update(config_file: &str, is_init: bool, check_only: bool) -> Res release_dir.join("solana-release"), config.active_release_dir(), ) - .map_err(|err| { - format!( + .map_err(|err| match err.raw_os_error() { + #[cfg(windows)] + Some(os_err) if os_err == winapi::shared::winerror::ERROR_PRIVILEGE_NOT_HELD => { + "You need to run this command with administrator privileges.".to_string() + } + _ => format!( "Unable to symlink {:?} to {:?}: {}", release_dir, config.active_release_dir(), err - ) + ), })?; config.save(config_file)?; From 36e97654e3ea12beb37c8a7459b16e15cc94a3f0 Mon Sep 17 00:00:00 2001 From: ryleung-solana <91908731+ryleung-solana@users.noreply.github.com> Date: Tue, 19 Mar 2024 03:05:00 +0800 Subject: [PATCH 004/153] Make the quic server connection table use an async lock, reducing thrashing (#293) Make the quic server connection table use an async lock, reducing lock contention --- streamer/src/nonblocking/quic.rs | 34 ++++++++++++++++++++++++-------- 1 file changed, 26 insertions(+), 8 deletions(-) diff --git a/streamer/src/nonblocking/quic.rs b/streamer/src/nonblocking/quic.rs index 225412dd08b315..c4969006288dbf 100644 --- a/streamer/src/nonblocking/quic.rs +++ b/streamer/src/nonblocking/quic.rs @@ -33,13 +33,26 @@ use { std::{ iter::repeat_with, net::{IpAddr, SocketAddr, UdpSocket}, + // CAUTION: be careful not to introduce any awaits while holding an RwLock. sync::{ atomic::{AtomicBool, AtomicU64, Ordering}, - Arc, Mutex, MutexGuard, RwLock, + Arc, RwLock, }, time::{Duration, Instant}, }, - tokio::{task::JoinHandle, time::timeout}, + tokio::{ + // CAUTION: It's kind of sketch that we're mixing async and sync locks (see the RwLock above). + // This is done so that sync code can also access the stake table. + // Make sure we don't hold a sync lock across an await - including the await to + // lock an async Mutex. This does not happen now and should not happen as long as we + // don't hold an async Mutex and sync RwLock at the same time (currently true) + // but if we do, the scope of the RwLock must always be a subset of the async Mutex + // (i.e. lock order is always async Mutex -> RwLock). Also, be careful not to + // introduce any other awaits while holding the RwLock. + sync::{Mutex, MutexGuard}, + task::JoinHandle, + time::timeout, + }, }; const WAIT_FOR_STREAM_TIMEOUT: Duration = Duration::from_millis(100); @@ -383,7 +396,7 @@ fn handle_and_cache_new_connection( } } -fn prune_unstaked_connections_and_add_new_connection( +async fn prune_unstaked_connections_and_add_new_connection( connection: Connection, connection_table: Arc>, max_connections: usize, @@ -394,7 +407,7 @@ fn prune_unstaked_connections_and_add_new_connection( let stats = params.stats.clone(); if max_connections > 0 { let connection_table_clone = connection_table.clone(); - let mut connection_table = connection_table.lock().unwrap(); + let mut connection_table = connection_table.lock().await; prune_unstaked_connection_table(&mut connection_table, max_connections, stats); handle_and_cache_new_connection( connection, @@ -504,7 +517,8 @@ async fn setup_connection( match params.peer_type { ConnectionPeerType::Staked(stake) => { - let mut connection_table_l = staked_connection_table.lock().unwrap(); + let mut connection_table_l = staked_connection_table.lock().await; + if connection_table_l.total_size >= max_staked_connections { let num_pruned = connection_table_l.prune_random(PRUNE_RANDOM_SAMPLE_SIZE, stake); @@ -535,7 +549,9 @@ async fn setup_connection( ¶ms, wait_for_chunk_timeout, stream_load_ema.clone(), - ) { + ) + .await + { stats .connection_added_from_staked_peer .fetch_add(1, Ordering::Relaxed); @@ -557,7 +573,9 @@ async fn setup_connection( ¶ms, wait_for_chunk_timeout, stream_load_ema.clone(), - ) { + ) + .await + { stats .connection_added_from_unstaked_peer .fetch_add(1, Ordering::Relaxed); @@ -800,7 +818,7 @@ async fn handle_connection( } } - let removed_connection_count = connection_table.lock().unwrap().remove_connection( + let removed_connection_count = connection_table.lock().await.remove_connection( ConnectionTableKey::new(remote_addr.ip(), params.remote_pubkey), remote_addr.port(), stable_id, From 64412096829241e5c70d537403584dbc9e04ee93 Mon Sep 17 00:00:00 2001 From: Yueh-Hsuan Chiang <93241502+yhchiang-sol@users.noreply.github.com> Date: Mon, 18 Mar 2024 12:24:19 -0700 Subject: [PATCH 005/153] [TieredStorage] TieredStorageFile -> TieredReadonlyFile and TieredWritableFIle (#260) #### Problem TieredStorageFile struct currently offers new_readonly() and new_writable() to allow both read and write work-load to share the same struct. However, as we need the writer to use BufWriter to improve performance as well as enable Hasher on writes. There is a need to refactor TieredStorageFile to split its usage for read-only and writable. #### Summary of Changes Refactor TieredStorageFile to TieredReadonlyFIle and TieredWritableFile. #### Test Plan Existing tiered-storage tests. --- accounts-db/src/tiered_storage/file.rs | 84 +++++++++++++++--------- accounts-db/src/tiered_storage/footer.rs | 12 ++-- accounts-db/src/tiered_storage/hot.rs | 24 +++---- accounts-db/src/tiered_storage/index.rs | 16 ++--- accounts-db/src/tiered_storage/owners.rs | 8 +-- 5 files changed, 83 insertions(+), 61 deletions(-) diff --git a/accounts-db/src/tiered_storage/file.rs b/accounts-db/src/tiered_storage/file.rs index 51801c6133e1f7..5bcf5f62efbbbd 100644 --- a/accounts-db/src/tiered_storage/file.rs +++ b/accounts-db/src/tiered_storage/file.rs @@ -9,10 +9,10 @@ use { }; #[derive(Debug)] -pub struct TieredStorageFile(pub File); +pub struct TieredReadableFile(pub File); -impl TieredStorageFile { - pub fn new_readonly(file_path: impl AsRef) -> Self { +impl TieredReadableFile { + pub fn new(file_path: impl AsRef) -> Self { Self( OpenOptions::new() .read(true) @@ -36,30 +36,6 @@ impl TieredStorageFile { )) } - /// Writes `value` to the file. - /// - /// `value` must be plain ol' data. - pub fn write_pod(&self, value: &T) -> IoResult { - // SAFETY: Since T is NoUninit, it does not contain any uninitialized bytes. - unsafe { self.write_type(value) } - } - - /// Writes `value` to the file. - /// - /// Prefer `write_pod` when possible, because `write_value` may cause - /// undefined behavior if `value` contains uninitialized bytes. - /// - /// # Safety - /// - /// Caller must ensure casting T to bytes is safe. - /// Refer to the Safety sections in std::slice::from_raw_parts() - /// and bytemuck's Pod and NoUninit for more information. - pub unsafe fn write_type(&self, value: &T) -> IoResult { - let ptr = value as *const _ as *const u8; - let bytes = unsafe { std::slice::from_raw_parts(ptr, mem::size_of::()) }; - self.write_bytes(bytes) - } - /// Reads a value of type `T` from the file. /// /// Type T must be plain ol' data. @@ -95,13 +71,59 @@ impl TieredStorageFile { (&self.0).seek(SeekFrom::End(offset)) } + pub fn read_bytes(&self, buffer: &mut [u8]) -> IoResult<()> { + (&self.0).read_exact(buffer) + } +} + +#[derive(Debug)] +pub struct TieredWritableFile(pub File); + +impl TieredWritableFile { + pub fn new(file_path: impl AsRef) -> IoResult { + Ok(Self( + OpenOptions::new() + .create_new(true) + .write(true) + .open(file_path)?, + )) + } + + /// Writes `value` to the file. + /// + /// `value` must be plain ol' data. + pub fn write_pod(&self, value: &T) -> IoResult { + // SAFETY: Since T is NoUninit, it does not contain any uninitialized bytes. + unsafe { self.write_type(value) } + } + + /// Writes `value` to the file. + /// + /// Prefer `write_pod` when possible, because `write_value` may cause + /// undefined behavior if `value` contains uninitialized bytes. + /// + /// # Safety + /// + /// Caller must ensure casting T to bytes is safe. + /// Refer to the Safety sections in std::slice::from_raw_parts() + /// and bytemuck's Pod and NoUninit for more information. + pub unsafe fn write_type(&self, value: &T) -> IoResult { + let ptr = value as *const _ as *const u8; + let bytes = unsafe { std::slice::from_raw_parts(ptr, mem::size_of::()) }; + self.write_bytes(bytes) + } + + pub fn seek(&self, offset: u64) -> IoResult { + (&self.0).seek(SeekFrom::Start(offset)) + } + + pub fn seek_from_end(&self, offset: i64) -> IoResult { + (&self.0).seek(SeekFrom::End(offset)) + } + pub fn write_bytes(&self, bytes: &[u8]) -> IoResult { (&self.0).write_all(bytes)?; Ok(bytes.len()) } - - pub fn read_bytes(&self, buffer: &mut [u8]) -> IoResult<()> { - (&self.0).read_exact(buffer) - } } diff --git a/accounts-db/src/tiered_storage/footer.rs b/accounts-db/src/tiered_storage/footer.rs index 1eb4fbdb3ff2ec..dd786a4e804189 100644 --- a/accounts-db/src/tiered_storage/footer.rs +++ b/accounts-db/src/tiered_storage/footer.rs @@ -1,7 +1,7 @@ use { crate::tiered_storage::{ error::TieredStorageError, - file::TieredStorageFile, + file::{TieredReadableFile, TieredWritableFile}, index::IndexBlockFormat, mmap_utils::{get_pod, get_type}, owners::OwnersBlockFormat, @@ -186,11 +186,11 @@ impl Default for TieredStorageFooter { impl TieredStorageFooter { pub fn new_from_path(path: impl AsRef) -> TieredStorageResult { - let file = TieredStorageFile::new_readonly(path); + let file = TieredReadableFile::new(path); Self::new_from_footer_block(&file) } - pub fn write_footer_block(&self, file: &TieredStorageFile) -> TieredStorageResult<()> { + pub fn write_footer_block(&self, file: &TieredWritableFile) -> TieredStorageResult<()> { // SAFETY: The footer does not contain any uninitialized bytes. unsafe { file.write_type(self)? }; file.write_pod(&TieredStorageMagicNumber::default())?; @@ -198,7 +198,7 @@ impl TieredStorageFooter { Ok(()) } - pub fn new_from_footer_block(file: &TieredStorageFile) -> TieredStorageResult { + pub fn new_from_footer_block(file: &TieredReadableFile) -> TieredStorageResult { file.seek_from_end(-(FOOTER_TAIL_SIZE as i64))?; let mut footer_version: u64 = 0; @@ -326,7 +326,7 @@ mod tests { use { super::*, crate::{ - append_vec::test_utils::get_append_vec_path, tiered_storage::file::TieredStorageFile, + append_vec::test_utils::get_append_vec_path, tiered_storage::file::TieredWritableFile, }, memoffset::offset_of, solana_sdk::hash::Hash, @@ -356,7 +356,7 @@ mod tests { // Persist the expected footer. { - let file = TieredStorageFile::new_writable(&path.path).unwrap(); + let file = TieredWritableFile::new(&path.path).unwrap(); expected_footer.write_footer_block(&file).unwrap(); } diff --git a/accounts-db/src/tiered_storage/hot.rs b/accounts-db/src/tiered_storage/hot.rs index 0e1ce6bf9a5a8e..198eccd724f17b 100644 --- a/accounts-db/src/tiered_storage/hot.rs +++ b/accounts-db/src/tiered_storage/hot.rs @@ -7,7 +7,7 @@ use { accounts_hash::AccountHash, tiered_storage::{ byte_block, - file::TieredStorageFile, + file::TieredWritableFile, footer::{AccountBlockFormat, AccountMetaFormat, TieredStorageFooter}, index::{AccountIndexWriterEntry, AccountOffset, IndexBlockFormat, IndexOffset}, meta::{AccountMetaFlags, AccountMetaOptionalFields, TieredAccountMeta}, @@ -542,7 +542,7 @@ impl HotStorageReader { } fn write_optional_fields( - file: &TieredStorageFile, + file: &TieredWritableFile, opt_fields: &AccountMetaOptionalFields, ) -> TieredStorageResult { let mut size = 0; @@ -558,14 +558,14 @@ fn write_optional_fields( /// The writer that creates a hot accounts file. #[derive(Debug)] pub struct HotStorageWriter { - storage: TieredStorageFile, + storage: TieredWritableFile, } impl HotStorageWriter { /// Create a new HotStorageWriter with the specified path. pub fn new(file_path: impl AsRef) -> TieredStorageResult { Ok(Self { - storage: TieredStorageFile::new_writable(file_path)?, + storage: TieredWritableFile::new(file_path)?, }) } @@ -706,7 +706,7 @@ pub mod tests { super::*, crate::tiered_storage::{ byte_block::ByteBlockWriter, - file::TieredStorageFile, + file::TieredWritableFile, footer::{AccountBlockFormat, AccountMetaFormat, TieredStorageFooter, FOOTER_SIZE}, hot::{HotAccountMeta, HotStorageReader}, index::{AccountIndexWriterEntry, IndexBlockFormat, IndexOffset}, @@ -892,7 +892,7 @@ pub mod tests { }; { - let file = TieredStorageFile::new_writable(&path).unwrap(); + let file = TieredWritableFile::new(&path).unwrap(); expected_footer.write_footer_block(&file).unwrap(); } @@ -928,7 +928,7 @@ pub mod tests { ..TieredStorageFooter::default() }; { - let file = TieredStorageFile::new_writable(&path).unwrap(); + let file = TieredWritableFile::new(&path).unwrap(); let mut current_offset = 0; account_offsets = hot_account_metas @@ -971,7 +971,7 @@ pub mod tests { }; { - let file = TieredStorageFile::new_writable(&path).unwrap(); + let file = TieredWritableFile::new(&path).unwrap(); footer.write_footer_block(&file).unwrap(); } @@ -1016,7 +1016,7 @@ pub mod tests { ..TieredStorageFooter::default() }; { - let file = TieredStorageFile::new_writable(&path).unwrap(); + let file = TieredWritableFile::new(&path).unwrap(); let cursor = footer .index_block_format @@ -1059,7 +1059,7 @@ pub mod tests { }; { - let file = TieredStorageFile::new_writable(&path).unwrap(); + let file = TieredWritableFile::new(&path).unwrap(); let mut owners_table = OwnersTable::default(); addresses.iter().for_each(|owner_address| { @@ -1118,7 +1118,7 @@ pub mod tests { let account_offsets: Vec<_>; { - let file = TieredStorageFile::new_writable(&path).unwrap(); + let file = TieredWritableFile::new(&path).unwrap(); let mut current_offset = 0; account_offsets = hot_account_metas @@ -1237,7 +1237,7 @@ pub mod tests { }; { - let file = TieredStorageFile::new_writable(&path).unwrap(); + let file = TieredWritableFile::new(&path).unwrap(); let mut current_offset = 0; // write accounts blocks diff --git a/accounts-db/src/tiered_storage/index.rs b/accounts-db/src/tiered_storage/index.rs index c82e65ce6d275a..405866c3f0fb96 100644 --- a/accounts-db/src/tiered_storage/index.rs +++ b/accounts-db/src/tiered_storage/index.rs @@ -1,6 +1,6 @@ use { crate::tiered_storage::{ - file::TieredStorageFile, footer::TieredStorageFooter, mmap_utils::get_pod, + file::TieredWritableFile, footer::TieredStorageFooter, mmap_utils::get_pod, TieredStorageResult, }, bytemuck::{Pod, Zeroable}, @@ -59,7 +59,7 @@ impl IndexBlockFormat { /// the total number of bytes written. pub fn write_index_block( &self, - file: &TieredStorageFile, + file: &TieredWritableFile, index_entries: &[AccountIndexWriterEntry], ) -> TieredStorageResult { match self { @@ -147,7 +147,7 @@ mod tests { use { super::*, crate::tiered_storage::{ - file::TieredStorageFile, + file::TieredWritableFile, hot::{HotAccountOffset, HOT_ACCOUNT_ALIGNMENT}, }, memmap2::MmapOptions, @@ -181,7 +181,7 @@ mod tests { .collect(); { - let file = TieredStorageFile::new_writable(&path).unwrap(); + let file = TieredWritableFile::new(&path).unwrap(); let indexer = IndexBlockFormat::AddressesThenOffsets; let cursor = indexer.write_index_block(&file, &index_entries).unwrap(); footer.owners_block_offset = cursor as u64; @@ -223,7 +223,7 @@ mod tests { { // we only write a footer here as the test should hit an assert // failure before it actually reads the file. - let file = TieredStorageFile::new_writable(&path).unwrap(); + let file = TieredWritableFile::new(&path).unwrap(); footer.write_footer_block(&file).unwrap(); } @@ -259,7 +259,7 @@ mod tests { { // we only write a footer here as the test should hit an assert // failure before it actually reads the file. - let file = TieredStorageFile::new_writable(&path).unwrap(); + let file = TieredWritableFile::new(&path).unwrap(); footer.write_footer_block(&file).unwrap(); } @@ -294,7 +294,7 @@ mod tests { { // we only write a footer here as the test should hit an assert // failure before we actually read the file. - let file = TieredStorageFile::new_writable(&path).unwrap(); + let file = TieredWritableFile::new(&path).unwrap(); footer.write_footer_block(&file).unwrap(); } @@ -334,7 +334,7 @@ mod tests { { // we only write a footer here as the test should hit an assert // failure before we actually read the file. - let file = TieredStorageFile::new_writable(&path).unwrap(); + let file = TieredWritableFile::new(&path).unwrap(); footer.write_footer_block(&file).unwrap(); } diff --git a/accounts-db/src/tiered_storage/owners.rs b/accounts-db/src/tiered_storage/owners.rs index ebe60cc6f8ed0f..ccebdd64ad50aa 100644 --- a/accounts-db/src/tiered_storage/owners.rs +++ b/accounts-db/src/tiered_storage/owners.rs @@ -1,6 +1,6 @@ use { crate::tiered_storage::{ - file::TieredStorageFile, footer::TieredStorageFooter, mmap_utils::get_pod, + file::TieredWritableFile, footer::TieredStorageFooter, mmap_utils::get_pod, TieredStorageResult, }, indexmap::set::IndexSet, @@ -47,7 +47,7 @@ impl OwnersBlockFormat { /// Persists the provided owners' addresses into the specified file. pub fn write_owners_block( &self, - file: &TieredStorageFile, + file: &TieredWritableFile, owners_table: &OwnersTable, ) -> TieredStorageResult { match self { @@ -116,7 +116,7 @@ impl<'a> OwnersTable<'a> { #[cfg(test)] mod tests { use { - super::*, crate::tiered_storage::file::TieredStorageFile, memmap2::MmapOptions, + super::*, crate::tiered_storage::file::TieredWritableFile, memmap2::MmapOptions, std::fs::OpenOptions, tempfile::TempDir, }; @@ -139,7 +139,7 @@ mod tests { }; { - let file = TieredStorageFile::new_writable(&path).unwrap(); + let file = TieredWritableFile::new(&path).unwrap(); let mut owners_table = OwnersTable::default(); addresses.iter().for_each(|owner_address| { From 69b40a4d755cc8afe756a2480332b66e41824664 Mon Sep 17 00:00:00 2001 From: Lijun Wang <83639177+lijunwangs@users.noreply.github.com> Date: Mon, 18 Mar 2024 13:54:07 -0700 Subject: [PATCH 006/153] Net script fix for expected shred version (#280) Fix for --expected-shred-version when maybeWaitForSupermajority is on Co-authored-by: Lijun Wang --- multinode-demo/bootstrap-validator.sh | 3 +++ net/remote/remote-node.sh | 3 ++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/multinode-demo/bootstrap-validator.sh b/multinode-demo/bootstrap-validator.sh index 2872af5cc426af..374a9288f11597 100755 --- a/multinode-demo/bootstrap-validator.sh +++ b/multinode-demo/bootstrap-validator.sh @@ -88,6 +88,9 @@ while [[ -n $1 ]]; do elif [[ $1 == --expected-bank-hash ]]; then args+=("$1" "$2") shift 2 + elif [[ $1 == --expected-shred-version ]]; then + args+=("$1" "$2") + shift 2 elif [[ $1 == --accounts ]]; then args+=("$1" "$2") shift 2 diff --git a/net/remote/remote-node.sh b/net/remote/remote-node.sh index b7d224088da9f9..71378019730f05 100755 --- a/net/remote/remote-node.sh +++ b/net/remote/remote-node.sh @@ -264,7 +264,8 @@ EOF if [[ -n "$maybeWaitForSupermajority" ]]; then bankHash=$(agave-ledger-tool -l config/bootstrap-validator bank-hash --halt-at-slot 0) - extraNodeArgs="$extraNodeArgs --expected-bank-hash $bankHash" + shredVersion="$(cat "$SOLANA_CONFIG_DIR"/shred-version)" + extraNodeArgs="$extraNodeArgs --expected-bank-hash $bankHash --expected-shred-version $shredVersion" echo "$bankHash" > config/bank-hash fi fi From 7c49b9c59e726c3287e1937d20b4838673aa8cec Mon Sep 17 00:00:00 2001 From: Yueh-Hsuan Chiang <93241502+yhchiang-sol@users.noreply.github.com> Date: Mon, 18 Mar 2024 14:27:55 -0700 Subject: [PATCH 007/153] [TieredStorage] Use BufWriter in TieredWritableFile (#261) #### Problem TieredWritableFile currently uses File instead of BufWriter. This will introduce more syscall when doing file writes. #### Summary of Changes This PR makes TieredWritableFile uses BufWriter to allow the write-call to be more optimized to reduce the number of syscalls. #### Test Plan Existing tiered-storage test. Will run experiments to verify its performance improvement. #### Dependency https://github.com/anza-xyz/agave/pull/260 --- accounts-db/src/tiered_storage.rs | 2 +- accounts-db/src/tiered_storage/file.rs | 24 +++++------ accounts-db/src/tiered_storage/footer.rs | 6 +-- accounts-db/src/tiered_storage/hot.rs | 54 ++++++++++++------------ accounts-db/src/tiered_storage/index.rs | 24 ++++++----- accounts-db/src/tiered_storage/owners.rs | 8 ++-- 6 files changed, 60 insertions(+), 58 deletions(-) diff --git a/accounts-db/src/tiered_storage.rs b/accounts-db/src/tiered_storage.rs index e15adb388605c2..cc2776ed178cf6 100644 --- a/accounts-db/src/tiered_storage.rs +++ b/accounts-db/src/tiered_storage.rs @@ -125,7 +125,7 @@ impl TieredStorage { if format == &HOT_FORMAT { let result = { - let writer = HotStorageWriter::new(&self.path)?; + let mut writer = HotStorageWriter::new(&self.path)?; writer.write_accounts(accounts, skip) }; diff --git a/accounts-db/src/tiered_storage/file.rs b/accounts-db/src/tiered_storage/file.rs index 5bcf5f62efbbbd..605e55a0b193a1 100644 --- a/accounts-db/src/tiered_storage/file.rs +++ b/accounts-db/src/tiered_storage/file.rs @@ -2,7 +2,7 @@ use { bytemuck::{AnyBitPattern, NoUninit}, std::{ fs::{File, OpenOptions}, - io::{Read, Result as IoResult, Seek, SeekFrom, Write}, + io::{BufWriter, Read, Result as IoResult, Seek, SeekFrom, Write}, mem, path::Path, }, @@ -77,22 +77,22 @@ impl TieredReadableFile { } #[derive(Debug)] -pub struct TieredWritableFile(pub File); +pub struct TieredWritableFile(pub BufWriter); impl TieredWritableFile { pub fn new(file_path: impl AsRef) -> IoResult { - Ok(Self( + Ok(Self(BufWriter::new( OpenOptions::new() .create_new(true) .write(true) .open(file_path)?, - )) + ))) } /// Writes `value` to the file. /// /// `value` must be plain ol' data. - pub fn write_pod(&self, value: &T) -> IoResult { + pub fn write_pod(&mut self, value: &T) -> IoResult { // SAFETY: Since T is NoUninit, it does not contain any uninitialized bytes. unsafe { self.write_type(value) } } @@ -107,22 +107,22 @@ impl TieredWritableFile { /// Caller must ensure casting T to bytes is safe. /// Refer to the Safety sections in std::slice::from_raw_parts() /// and bytemuck's Pod and NoUninit for more information. - pub unsafe fn write_type(&self, value: &T) -> IoResult { + pub unsafe fn write_type(&mut self, value: &T) -> IoResult { let ptr = value as *const _ as *const u8; let bytes = unsafe { std::slice::from_raw_parts(ptr, mem::size_of::()) }; self.write_bytes(bytes) } - pub fn seek(&self, offset: u64) -> IoResult { - (&self.0).seek(SeekFrom::Start(offset)) + pub fn seek(&mut self, offset: u64) -> IoResult { + self.0.seek(SeekFrom::Start(offset)) } - pub fn seek_from_end(&self, offset: i64) -> IoResult { - (&self.0).seek(SeekFrom::End(offset)) + pub fn seek_from_end(&mut self, offset: i64) -> IoResult { + self.0.seek(SeekFrom::End(offset)) } - pub fn write_bytes(&self, bytes: &[u8]) -> IoResult { - (&self.0).write_all(bytes)?; + pub fn write_bytes(&mut self, bytes: &[u8]) -> IoResult { + self.0.write_all(bytes)?; Ok(bytes.len()) } diff --git a/accounts-db/src/tiered_storage/footer.rs b/accounts-db/src/tiered_storage/footer.rs index dd786a4e804189..fa885f2394ce63 100644 --- a/accounts-db/src/tiered_storage/footer.rs +++ b/accounts-db/src/tiered_storage/footer.rs @@ -190,7 +190,7 @@ impl TieredStorageFooter { Self::new_from_footer_block(&file) } - pub fn write_footer_block(&self, file: &TieredWritableFile) -> TieredStorageResult<()> { + pub fn write_footer_block(&self, file: &mut TieredWritableFile) -> TieredStorageResult<()> { // SAFETY: The footer does not contain any uninitialized bytes. unsafe { file.write_type(self)? }; file.write_pod(&TieredStorageMagicNumber::default())?; @@ -356,8 +356,8 @@ mod tests { // Persist the expected footer. { - let file = TieredWritableFile::new(&path.path).unwrap(); - expected_footer.write_footer_block(&file).unwrap(); + let mut file = TieredWritableFile::new(&path.path).unwrap(); + expected_footer.write_footer_block(&mut file).unwrap(); } // Reopen the same storage, and expect the persisted footer is diff --git a/accounts-db/src/tiered_storage/hot.rs b/accounts-db/src/tiered_storage/hot.rs index 198eccd724f17b..c00dff302c9cea 100644 --- a/accounts-db/src/tiered_storage/hot.rs +++ b/accounts-db/src/tiered_storage/hot.rs @@ -542,7 +542,7 @@ impl HotStorageReader { } fn write_optional_fields( - file: &TieredWritableFile, + file: &mut TieredWritableFile, opt_fields: &AccountMetaOptionalFields, ) -> TieredStorageResult { let mut size = 0; @@ -572,7 +572,7 @@ impl HotStorageWriter { /// Persists an account with the specified information and returns /// the stored size of the account. fn write_account( - &self, + &mut self, lamports: u64, owner_offset: OwnerOffset, account_data: &[u8], @@ -599,7 +599,7 @@ impl HotStorageWriter { stored_size += self .storage .write_bytes(&PADDING_BUFFER[0..(padding_len as usize)])?; - stored_size += write_optional_fields(&self.storage, &optional_fields)?; + stored_size += write_optional_fields(&mut self.storage, &optional_fields)?; Ok(stored_size) } @@ -614,7 +614,7 @@ impl HotStorageWriter { U: StorableAccounts<'a, T>, V: Borrow, >( - &self, + &mut self, accounts: &StorableAccountsWithHashesAndWriteVersions<'a, 'b, T, U, V>, skip: usize, ) -> TieredStorageResult> { @@ -677,7 +677,7 @@ impl HotStorageWriter { footer.index_block_offset = cursor as u64; cursor += footer .index_block_format - .write_index_block(&self.storage, &index)?; + .write_index_block(&mut self.storage, &index)?; if cursor % HOT_BLOCK_ALIGNMENT != 0 { // In case it is not yet aligned, it is due to the fact that // the index block has an odd number of entries. In such case, @@ -692,9 +692,9 @@ impl HotStorageWriter { footer.owner_count = owners_table.len() as u32; footer .owners_block_format - .write_owners_block(&self.storage, &owners_table)?; + .write_owners_block(&mut self.storage, &owners_table)?; - footer.write_footer_block(&self.storage)?; + footer.write_footer_block(&mut self.storage)?; Ok(stored_infos) } @@ -892,8 +892,8 @@ pub mod tests { }; { - let file = TieredWritableFile::new(&path).unwrap(); - expected_footer.write_footer_block(&file).unwrap(); + let mut file = TieredWritableFile::new(&path).unwrap(); + expected_footer.write_footer_block(&mut file).unwrap(); } // Reopen the same storage, and expect the persisted footer is @@ -928,7 +928,7 @@ pub mod tests { ..TieredStorageFooter::default() }; { - let file = TieredWritableFile::new(&path).unwrap(); + let mut file = TieredWritableFile::new(&path).unwrap(); let mut current_offset = 0; account_offsets = hot_account_metas @@ -942,7 +942,7 @@ pub mod tests { // while the test only focuses on account metas, writing a footer // here is necessary to make it a valid tiered-storage file. footer.index_block_offset = current_offset as u64; - footer.write_footer_block(&file).unwrap(); + footer.write_footer_block(&mut file).unwrap(); } let hot_storage = HotStorageReader::new_from_path(&path).unwrap(); @@ -971,8 +971,8 @@ pub mod tests { }; { - let file = TieredWritableFile::new(&path).unwrap(); - footer.write_footer_block(&file).unwrap(); + let mut file = TieredWritableFile::new(&path).unwrap(); + footer.write_footer_block(&mut file).unwrap(); } let hot_storage = HotStorageReader::new_from_path(&path).unwrap(); @@ -1016,14 +1016,14 @@ pub mod tests { ..TieredStorageFooter::default() }; { - let file = TieredWritableFile::new(&path).unwrap(); + let mut file = TieredWritableFile::new(&path).unwrap(); let cursor = footer .index_block_format - .write_index_block(&file, &index_writer_entries) + .write_index_block(&mut file, &index_writer_entries) .unwrap(); footer.owners_block_offset = cursor as u64; - footer.write_footer_block(&file).unwrap(); + footer.write_footer_block(&mut file).unwrap(); } let hot_storage = HotStorageReader::new_from_path(&path).unwrap(); @@ -1059,7 +1059,7 @@ pub mod tests { }; { - let file = TieredWritableFile::new(&path).unwrap(); + let mut file = TieredWritableFile::new(&path).unwrap(); let mut owners_table = OwnersTable::default(); addresses.iter().for_each(|owner_address| { @@ -1067,12 +1067,12 @@ pub mod tests { }); footer .owners_block_format - .write_owners_block(&file, &owners_table) + .write_owners_block(&mut file, &owners_table) .unwrap(); // while the test only focuses on account metas, writing a footer // here is necessary to make it a valid tiered-storage file. - footer.write_footer_block(&file).unwrap(); + footer.write_footer_block(&mut file).unwrap(); } let hot_storage = HotStorageReader::new_from_path(&path).unwrap(); @@ -1118,7 +1118,7 @@ pub mod tests { let account_offsets: Vec<_>; { - let file = TieredWritableFile::new(&path).unwrap(); + let mut file = TieredWritableFile::new(&path).unwrap(); let mut current_offset = 0; account_offsets = hot_account_metas @@ -1141,12 +1141,12 @@ pub mod tests { }); footer .owners_block_format - .write_owners_block(&file, &owners_table) + .write_owners_block(&mut file, &owners_table) .unwrap(); // while the test only focuses on account metas, writing a footer // here is necessary to make it a valid tiered-storage file. - footer.write_footer_block(&file).unwrap(); + footer.write_footer_block(&mut file).unwrap(); } let hot_storage = HotStorageReader::new_from_path(&path).unwrap(); @@ -1237,7 +1237,7 @@ pub mod tests { }; { - let file = TieredWritableFile::new(&path).unwrap(); + let mut file = TieredWritableFile::new(&path).unwrap(); let mut current_offset = 0; // write accounts blocks @@ -1264,7 +1264,7 @@ pub mod tests { footer.index_block_offset = current_offset as u64; current_offset += footer .index_block_format - .write_index_block(&file, &index_writer_entries) + .write_index_block(&mut file, &index_writer_entries) .unwrap(); // write owners block @@ -1275,10 +1275,10 @@ pub mod tests { }); footer .owners_block_format - .write_owners_block(&file, &owners_table) + .write_owners_block(&mut file, &owners_table) .unwrap(); - footer.write_footer_block(&file).unwrap(); + footer.write_footer_block(&mut file).unwrap(); } let hot_storage = HotStorageReader::new_from_path(&path).unwrap(); @@ -1358,7 +1358,7 @@ pub mod tests { let temp_dir = TempDir::new().unwrap(); let path = temp_dir.path().join("test_write_account_and_index_blocks"); let stored_infos = { - let writer = HotStorageWriter::new(&path).unwrap(); + let mut writer = HotStorageWriter::new(&path).unwrap(); writer.write_accounts(&storable_accounts, 0).unwrap() }; diff --git a/accounts-db/src/tiered_storage/index.rs b/accounts-db/src/tiered_storage/index.rs index 405866c3f0fb96..82dbb9332c7550 100644 --- a/accounts-db/src/tiered_storage/index.rs +++ b/accounts-db/src/tiered_storage/index.rs @@ -59,7 +59,7 @@ impl IndexBlockFormat { /// the total number of bytes written. pub fn write_index_block( &self, - file: &TieredWritableFile, + file: &mut TieredWritableFile, index_entries: &[AccountIndexWriterEntry], ) -> TieredStorageResult { match self { @@ -181,9 +181,11 @@ mod tests { .collect(); { - let file = TieredWritableFile::new(&path).unwrap(); + let mut file = TieredWritableFile::new(&path).unwrap(); let indexer = IndexBlockFormat::AddressesThenOffsets; - let cursor = indexer.write_index_block(&file, &index_entries).unwrap(); + let cursor = indexer + .write_index_block(&mut file, &index_entries) + .unwrap(); footer.owners_block_offset = cursor as u64; } @@ -223,8 +225,8 @@ mod tests { { // we only write a footer here as the test should hit an assert // failure before it actually reads the file. - let file = TieredWritableFile::new(&path).unwrap(); - footer.write_footer_block(&file).unwrap(); + let mut file = TieredWritableFile::new(&path).unwrap(); + footer.write_footer_block(&mut file).unwrap(); } let file = OpenOptions::new() @@ -259,8 +261,8 @@ mod tests { { // we only write a footer here as the test should hit an assert // failure before it actually reads the file. - let file = TieredWritableFile::new(&path).unwrap(); - footer.write_footer_block(&file).unwrap(); + let mut file = TieredWritableFile::new(&path).unwrap(); + footer.write_footer_block(&mut file).unwrap(); } let file = OpenOptions::new() @@ -294,8 +296,8 @@ mod tests { { // we only write a footer here as the test should hit an assert // failure before we actually read the file. - let file = TieredWritableFile::new(&path).unwrap(); - footer.write_footer_block(&file).unwrap(); + let mut file = TieredWritableFile::new(&path).unwrap(); + footer.write_footer_block(&mut file).unwrap(); } let file = OpenOptions::new() @@ -334,8 +336,8 @@ mod tests { { // we only write a footer here as the test should hit an assert // failure before we actually read the file. - let file = TieredWritableFile::new(&path).unwrap(); - footer.write_footer_block(&file).unwrap(); + let mut file = TieredWritableFile::new(&path).unwrap(); + footer.write_footer_block(&mut file).unwrap(); } let file = OpenOptions::new() diff --git a/accounts-db/src/tiered_storage/owners.rs b/accounts-db/src/tiered_storage/owners.rs index ccebdd64ad50aa..fa42ffaca97dac 100644 --- a/accounts-db/src/tiered_storage/owners.rs +++ b/accounts-db/src/tiered_storage/owners.rs @@ -47,7 +47,7 @@ impl OwnersBlockFormat { /// Persists the provided owners' addresses into the specified file. pub fn write_owners_block( &self, - file: &TieredWritableFile, + file: &mut TieredWritableFile, owners_table: &OwnersTable, ) -> TieredStorageResult { match self { @@ -139,7 +139,7 @@ mod tests { }; { - let file = TieredWritableFile::new(&path).unwrap(); + let mut file = TieredWritableFile::new(&path).unwrap(); let mut owners_table = OwnersTable::default(); addresses.iter().for_each(|owner_address| { @@ -147,12 +147,12 @@ mod tests { }); footer .owners_block_format - .write_owners_block(&file, &owners_table) + .write_owners_block(&mut file, &owners_table) .unwrap(); // while the test only focuses on account metas, writing a footer // here is necessary to make it a valid tiered-storage file. - footer.write_footer_block(&file).unwrap(); + footer.write_footer_block(&mut file).unwrap(); } let file = OpenOptions::new().read(true).open(path).unwrap(); From 2c125005133b2459b62965a0fc20c49c82392350 Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Tue, 19 Mar 2024 07:28:53 +0800 Subject: [PATCH 008/153] cli: skip no-op program buffer writes (#277) cli: skip no-op program deploy write txs --- cli/src/program.rs | 79 ++++++++++++++++++++++++++++------------------ 1 file changed, 49 insertions(+), 30 deletions(-) diff --git a/cli/src/program.rs b/cli/src/program.rs index 92c3c657adc40a..099da9dbaf2438 100644 --- a/cli/src/program.rs +++ b/cli/src/program.rs @@ -2213,11 +2213,12 @@ fn do_process_program_write_and_deploy( let blockhash = rpc_client.get_latest_blockhash()?; // Initialize buffer account or complete if already partially initialized - let (initial_instructions, balance_needed) = if let Some(account) = rpc_client - .get_account_with_commitment(buffer_pubkey, config.commitment)? - .value + let (initial_instructions, balance_needed, buffer_program_data) = if let Some(mut account) = + rpc_client + .get_account_with_commitment(buffer_pubkey, config.commitment)? + .value { - complete_partial_program_init( + let (ixs, balance_needed) = complete_partial_program_init( loader_id, &fee_payer_signer.pubkey(), buffer_pubkey, @@ -2229,7 +2230,11 @@ fn do_process_program_write_and_deploy( }, min_rent_exempt_program_data_balance, allow_excessive_balance, - )? + )?; + let buffer_program_data = account + .data + .split_off(UpgradeableLoaderState::size_of_buffer_metadata()); + (ixs, balance_needed, buffer_program_data) } else if loader_id == &bpf_loader_upgradeable::id() { ( bpf_loader_upgradeable::create_buffer( @@ -2240,6 +2245,7 @@ fn do_process_program_write_and_deploy( program_len, )?, min_rent_exempt_program_data_balance, + vec![0; program_len], ) } else { ( @@ -2251,6 +2257,7 @@ fn do_process_program_write_and_deploy( loader_id, )], min_rent_exempt_program_data_balance, + vec![0; program_len], ) }; let initial_message = if !initial_instructions.is_empty() { @@ -2281,7 +2288,10 @@ fn do_process_program_write_and_deploy( let mut write_messages = vec![]; let chunk_size = calculate_max_chunk_size(&create_msg); for (chunk, i) in program_data.chunks(chunk_size).zip(0..) { - write_messages.push(create_msg((i * chunk_size) as u32, chunk.to_vec())); + let offset = i * chunk_size; + if chunk != &buffer_program_data[offset..offset + chunk.len()] { + write_messages.push(create_msg(offset as u32, chunk.to_vec())); + } } // Create and add final message @@ -2370,31 +2380,37 @@ fn do_process_program_upgrade( let (initial_message, write_messages, balance_needed) = if let Some(buffer_signer) = buffer_signer { // Check Buffer account to see if partial initialization has occurred - let (initial_instructions, balance_needed) = if let Some(account) = rpc_client - .get_account_with_commitment(&buffer_signer.pubkey(), config.commitment)? - .value - { - complete_partial_program_init( - &bpf_loader_upgradeable::id(), - &fee_payer_signer.pubkey(), - &buffer_signer.pubkey(), - &account, - UpgradeableLoaderState::size_of_buffer(program_len), - min_rent_exempt_program_data_balance, - true, - )? - } else { - ( - bpf_loader_upgradeable::create_buffer( + let (initial_instructions, balance_needed, buffer_program_data) = + if let Some(mut account) = rpc_client + .get_account_with_commitment(&buffer_signer.pubkey(), config.commitment)? + .value + { + let (ixs, balance_needed) = complete_partial_program_init( + &bpf_loader_upgradeable::id(), &fee_payer_signer.pubkey(), - buffer_pubkey, - &upgrade_authority.pubkey(), + &buffer_signer.pubkey(), + &account, + UpgradeableLoaderState::size_of_buffer(program_len), min_rent_exempt_program_data_balance, - program_len, - )?, - min_rent_exempt_program_data_balance, - ) - }; + true, + )?; + let buffer_program_data = account + .data + .split_off(UpgradeableLoaderState::size_of_buffer_metadata()); + (ixs, balance_needed, buffer_program_data) + } else { + ( + bpf_loader_upgradeable::create_buffer( + &fee_payer_signer.pubkey(), + buffer_pubkey, + &upgrade_authority.pubkey(), + min_rent_exempt_program_data_balance, + program_len, + )?, + min_rent_exempt_program_data_balance, + vec![0; program_len], + ) + }; let initial_message = if !initial_instructions.is_empty() { Some(Message::new_with_blockhash( @@ -2426,7 +2442,10 @@ fn do_process_program_upgrade( let mut write_messages = vec![]; let chunk_size = calculate_max_chunk_size(&create_msg); for (chunk, i) in program_data.chunks(chunk_size).zip(0..) { - write_messages.push(create_msg((i * chunk_size) as u32, chunk.to_vec())); + let offset = i * chunk_size; + if chunk != &buffer_program_data[offset..offset + chunk.len()] { + write_messages.push(create_msg(offset as u32, chunk.to_vec())); + } } (initial_message, write_messages, balance_needed) From ed573ff60c3a6f13ff0b005a480447ad21a25656 Mon Sep 17 00:00:00 2001 From: Greg Cusack Date: Mon, 18 Mar 2024 17:58:11 -0700 Subject: [PATCH 009/153] add in method for building a `TpuClient` for `LocalCluster` tests (#258) * add in method for building a TpuClient for LocalCluster tests * add cluster trait. leave dependency on solana_client::tpu_client --- Cargo.lock | 1 + bench-tps/tests/bench_tps.rs | 27 ++++------------- client/src/tpu_client.rs | 2 ++ dos/src/main.rs | 39 +++++------------------- local-cluster/Cargo.toml | 1 + local-cluster/src/cluster.rs | 11 +++++-- local-cluster/src/local_cluster.rs | 48 +++++++++++++++++++++++++++++- 7 files changed, 72 insertions(+), 57 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f9f5dbbc541b07..06d28868c2bcff 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6420,6 +6420,7 @@ dependencies = [ "solana-ledger", "solana-logger", "solana-pubsub-client", + "solana-quic-client", "solana-rpc-client", "solana-rpc-client-api", "solana-runtime", diff --git a/bench-tps/tests/bench_tps.rs b/bench-tps/tests/bench_tps.rs index 7a2b0fe20a5b8d..bfff1f7e1250c4 100644 --- a/bench-tps/tests/bench_tps.rs +++ b/bench-tps/tests/bench_tps.rs @@ -7,13 +7,11 @@ use { cli::{Config, InstructionPaddingConfig}, send_batch::generate_durable_nonce_accounts, }, - solana_client::{ - connection_cache::ConnectionCache, - tpu_client::{TpuClient, TpuClientConfig}, - }, + solana_client::tpu_client::{TpuClient, TpuClientConfig}, solana_core::validator::ValidatorConfig, solana_faucet::faucet::run_local_faucet, solana_local_cluster::{ + cluster::Cluster, local_cluster::{ClusterConfig, LocalCluster}, validator_configs::make_identical_validator_configs, }, @@ -78,24 +76,9 @@ fn test_bench_tps_local_cluster(config: Config) { cluster.transfer(&cluster.funding_keypair, &faucet_pubkey, 100_000_000); - let ConnectionCache::Quic(cache) = &*cluster.connection_cache else { - panic!("Expected a Quic ConnectionCache."); - }; - - let rpc_pubsub_url = format!("ws://{}/", cluster.entry_point_info.rpc_pubsub().unwrap()); - let rpc_url = format!("http://{}", cluster.entry_point_info.rpc().unwrap()); - - let client = Arc::new( - TpuClient::new_with_connection_cache( - Arc::new(RpcClient::new(rpc_url)), - rpc_pubsub_url.as_str(), - TpuClientConfig::default(), - cache.clone(), - ) - .unwrap_or_else(|err| { - panic!("Could not create TpuClient {err:?}"); - }), - ); + let client = Arc::new(cluster.build_tpu_quic_client().unwrap_or_else(|err| { + panic!("Could not create TpuClient with Quic Cache {err:?}"); + })); let lamports_per_account = 100; diff --git a/client/src/tpu_client.rs b/client/src/tpu_client.rs index 038dd86774ea98..555d3aad88bcb1 100644 --- a/client/src/tpu_client.rs +++ b/client/src/tpu_client.rs @@ -21,6 +21,8 @@ pub use { solana_tpu_client::tpu_client::{TpuClientConfig, DEFAULT_FANOUT_SLOTS, MAX_FANOUT_SLOTS}, }; +pub type QuicTpuClient = TpuClient; + pub enum TpuClientWrapper { Quic(TpuClient), Udp(TpuClient), diff --git a/dos/src/main.rs b/dos/src/main.rs index 3bf7cce0e782cc..15874a86973f9c 100644 --- a/dos/src/main.rs +++ b/dos/src/main.rs @@ -818,7 +818,7 @@ fn main() { pub mod test { use { super::*, - solana_client::tpu_client::TpuClient, + solana_client::tpu_client::QuicTpuClient, solana_core::validator::ValidatorConfig, solana_faucet::faucet::run_local_faucet, solana_gossip::contact_info::LegacyContactInfo, @@ -827,10 +827,8 @@ pub mod test { local_cluster::{ClusterConfig, LocalCluster}, validator_configs::make_identical_validator_configs, }, - solana_quic_client::{QuicConfig, QuicConnectionManager, QuicPool}, solana_rpc::rpc::JsonRpcConfig, solana_sdk::timing::timestamp, - solana_tpu_client::tpu_client::TpuClientConfig, }; const TEST_SEND_BATCH_SIZE: usize = 1; @@ -838,32 +836,7 @@ pub mod test { // thin wrapper for the run_dos function // to avoid specifying everywhere generic parameters fn run_dos_no_client(nodes: &[ContactInfo], iterations: usize, params: DosClientParameters) { - run_dos::>( - nodes, iterations, None, params, - ); - } - - fn build_tpu_quic_client( - cluster: &LocalCluster, - ) -> Arc> { - let rpc_pubsub_url = format!("ws://{}/", cluster.entry_point_info.rpc_pubsub().unwrap()); - let rpc_url = format!("http://{}", cluster.entry_point_info.rpc().unwrap()); - - let ConnectionCache::Quic(cache) = &*cluster.connection_cache else { - panic!("Expected a Quic ConnectionCache."); - }; - - Arc::new( - TpuClient::new_with_connection_cache( - Arc::new(RpcClient::new(rpc_url)), - rpc_pubsub_url.as_str(), - TpuClientConfig::default(), - cache.clone(), - ) - .unwrap_or_else(|err| { - panic!("Could not create TpuClient with Quic Cache {err:?}"); - }), - ) + run_dos::(nodes, iterations, None, params); } #[test] @@ -1003,7 +976,9 @@ pub mod test { .unwrap(); let nodes_slice = [node]; - let client = build_tpu_quic_client(&cluster); + let client = Arc::new(cluster.build_tpu_quic_client().unwrap_or_else(|err| { + panic!("Could not create TpuClient with Quic Cache {err:?}"); + })); // creates one transaction with 8 valid signatures and sends it 10 times run_dos( @@ -1135,7 +1110,9 @@ pub mod test { .unwrap(); let nodes_slice = [node]; - let client = build_tpu_quic_client(&cluster); + let client = Arc::new(cluster.build_tpu_quic_client().unwrap_or_else(|err| { + panic!("Could not create TpuClient with Quic Cache {err:?}"); + })); // creates one transaction and sends it 10 times // this is done in single thread diff --git a/local-cluster/Cargo.toml b/local-cluster/Cargo.toml index 4248fc02945238..07b30030295e52 100644 --- a/local-cluster/Cargo.toml +++ b/local-cluster/Cargo.toml @@ -24,6 +24,7 @@ solana-gossip = { workspace = true } solana-ledger = { workspace = true } solana-logger = { workspace = true } solana-pubsub-client = { workspace = true } +solana-quic-client = { workspace = true } solana-rpc-client = { workspace = true } solana-rpc-client-api = { workspace = true } solana-runtime = { workspace = true } diff --git a/local-cluster/src/cluster.rs b/local-cluster/src/cluster.rs index 03ec1b7abe13f2..425f65c48e14c5 100644 --- a/local-cluster/src/cluster.rs +++ b/local-cluster/src/cluster.rs @@ -1,11 +1,11 @@ use { - solana_client::thin_client::ThinClient, + solana_client::{thin_client::ThinClient, tpu_client::QuicTpuClient}, solana_core::validator::{Validator, ValidatorConfig}, solana_gossip::{cluster_info::Node, contact_info::ContactInfo}, solana_ledger::shred::Shred, - solana_sdk::{pubkey::Pubkey, signature::Keypair}, + solana_sdk::{commitment_config::CommitmentConfig, pubkey::Pubkey, signature::Keypair}, solana_streamer::socket::SocketAddrSpace, - std::{path::PathBuf, sync::Arc}, + std::{io::Result, path::PathBuf, sync::Arc}, }; pub struct ValidatorInfo { @@ -38,6 +38,11 @@ impl ClusterValidatorInfo { pub trait Cluster { fn get_node_pubkeys(&self) -> Vec; fn get_validator_client(&self, pubkey: &Pubkey) -> Option; + fn build_tpu_quic_client(&self) -> Result; + fn build_tpu_quic_client_with_commitment( + &self, + commitment_config: CommitmentConfig, + ) -> Result; fn get_contact_info(&self, pubkey: &Pubkey) -> Option<&ContactInfo>; fn exit_node(&mut self, pubkey: &Pubkey) -> ClusterValidatorInfo; fn restart_node( diff --git a/local-cluster/src/local_cluster.rs b/local-cluster/src/local_cluster.rs index 9d1b483d85fdd3..400f4f73f78c26 100644 --- a/local-cluster/src/local_cluster.rs +++ b/local-cluster/src/local_cluster.rs @@ -7,7 +7,12 @@ use { itertools::izip, log::*, solana_accounts_db::utils::create_accounts_run_and_snapshot_dirs, - solana_client::{connection_cache::ConnectionCache, thin_client::ThinClient}, + solana_client::{ + connection_cache::ConnectionCache, + rpc_client::RpcClient, + thin_client::ThinClient, + tpu_client::{QuicTpuClient, TpuClient, TpuClientConfig}, + }, solana_core::{ consensus::tower_storage::FileTowerStorage, validator::{Validator, ValidatorConfig, ValidatorStartProgress}, @@ -802,6 +807,34 @@ impl LocalCluster { ..SnapshotConfig::new_load_only() } } + + fn build_tpu_client(&self, rpc_client_builder: F) -> Result + where + F: FnOnce(String) -> Arc, + { + let rpc_pubsub_url = format!("ws://{}/", self.entry_point_info.rpc_pubsub().unwrap()); + let rpc_url = format!("http://{}", self.entry_point_info.rpc().unwrap()); + + let cache = match &*self.connection_cache { + ConnectionCache::Quic(cache) => cache, + ConnectionCache::Udp(_) => { + return Err(Error::new( + ErrorKind::Other, + "Expected a Quic ConnectionCache. Got UDP", + )) + } + }; + + let tpu_client = TpuClient::new_with_connection_cache( + rpc_client_builder(rpc_url), + rpc_pubsub_url.as_str(), + TpuClientConfig::default(), + cache.clone(), + ) + .map_err(|err| Error::new(ErrorKind::Other, format!("TpuSenderError: {}", err)))?; + + Ok(tpu_client) + } } impl Cluster for LocalCluster { @@ -820,6 +853,19 @@ impl Cluster for LocalCluster { }) } + fn build_tpu_quic_client(&self) -> Result { + self.build_tpu_client(|rpc_url| Arc::new(RpcClient::new(rpc_url))) + } + + fn build_tpu_quic_client_with_commitment( + &self, + commitment_config: CommitmentConfig, + ) -> Result { + self.build_tpu_client(|rpc_url| { + Arc::new(RpcClient::new_with_commitment(rpc_url, commitment_config)) + }) + } + fn exit_node(&mut self, pubkey: &Pubkey) -> ClusterValidatorInfo { let mut node = self.validators.remove(pubkey).unwrap(); From 67c3bff092f88ae20917258cb3aca1e92fa861ea Mon Sep 17 00:00:00 2001 From: Brennan Date: Mon, 18 Mar 2024 19:37:32 -0700 Subject: [PATCH 010/153] fix polarity for concurrent replay (#297) * fix polarity for concurrent replay --- core/src/replay_stage.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 90be2dade6a191..8a29d037dedf3c 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -655,14 +655,14 @@ impl ReplayStage { }; // Thread pool to (maybe) replay multiple threads in parallel let replay_mode = if replay_slots_concurrently { - ForkReplayMode::Serial - } else { let pool = rayon::ThreadPoolBuilder::new() .num_threads(MAX_CONCURRENT_FORKS_TO_REPLAY) .thread_name(|i| format!("solReplayFork{i:02}")) .build() .expect("new rayon threadpool"); ForkReplayMode::Parallel(pool) + } else { + ForkReplayMode::Serial }; // Thread pool to replay multiple transactions within one block in parallel let replay_tx_thread_pool = rayon::ThreadPoolBuilder::new() From d4bcdf856e94769c2a3356a9970ca1f08a5c7693 Mon Sep 17 00:00:00 2001 From: Jon C Date: Tue, 19 Mar 2024 07:20:27 +0100 Subject: [PATCH 011/153] install: Fix check for windows build (#295) --- install/src/command.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/install/src/command.rs b/install/src/command.rs index ffd4d4c0ae3b33..fe7617af6447e8 100644 --- a/install/src/command.rs +++ b/install/src/command.rs @@ -1173,7 +1173,7 @@ pub fn init_or_update(config_file: &str, is_init: bool, check_only: bool) -> Res ) .map_err(|err| match err.raw_os_error() { #[cfg(windows)] - Some(os_err) if os_err == winapi::shared::winerror::ERROR_PRIVILEGE_NOT_HELD => { + Some(os_err) if os_err == winapi::shared::winerror::ERROR_PRIVILEGE_NOT_HELD as i32 => { "You need to run this command with administrator privileges.".to_string() } _ => format!( From e8526f60aa80d986111f935717f003d6be7670df Mon Sep 17 00:00:00 2001 From: Lucas Steuernagel <38472950+LucasSte@users.noreply.github.com> Date: Tue, 19 Mar 2024 13:44:34 -0300 Subject: [PATCH 012/153] SVM integration test (#307) --- svm/src/transaction_processor.rs | 8 +- svm/tests/hello_solana_program.so | Bin 0 -> 35408 bytes svm/tests/integration_test.rs | 272 ++++++++++++++++++++++++++++++ svm/tests/mock_bank.rs | 3 +- svm/tests/test_program.so | Bin 170136 -> 0 bytes 5 files changed, 278 insertions(+), 5 deletions(-) create mode 100755 svm/tests/hello_solana_program.so create mode 100644 svm/tests/integration_test.rs delete mode 100755 svm/tests/test_program.so diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index c42566fc9876f9..d1d68365d01fc2 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -1215,7 +1215,7 @@ mod tests { fn test_load_program_from_bytes() { let mut dir = env::current_dir().unwrap(); dir.push("tests"); - dir.push("test_program.so"); + dir.push("hello_solana_program.so"); let mut file = File::open(dir.clone()).expect("file not found"); let metadata = fs::metadata(dir).expect("Unable to read metadata"); let mut buffer = vec![0; metadata.len() as usize]; @@ -1321,7 +1321,7 @@ mod tests { let mut dir = env::current_dir().unwrap(); dir.push("tests"); - dir.push("test_program.so"); + dir.push("hello_solana_program.so"); let mut file = File::open(dir.clone()).expect("file not found"); let metadata = fs::metadata(dir).expect("Unable to read metadata"); let mut buffer = vec![0; metadata.len() as usize]; @@ -1394,7 +1394,7 @@ mod tests { let mut dir = env::current_dir().unwrap(); dir.push("tests"); - dir.push("test_program.so"); + dir.push("hello_solana_program.so"); let mut file = File::open(dir.clone()).expect("file not found"); let metadata = fs::metadata(dir).expect("Unable to read metadata"); let mut buffer = vec![0; metadata.len() as usize]; @@ -1479,7 +1479,7 @@ mod tests { let mut dir = env::current_dir().unwrap(); dir.push("tests"); - dir.push("test_program.so"); + dir.push("hello_solana_program.so"); let mut file = File::open(dir.clone()).expect("file not found"); let metadata = fs::metadata(dir).expect("Unable to read metadata"); let mut buffer = vec![0; metadata.len() as usize]; diff --git a/svm/tests/hello_solana_program.so b/svm/tests/hello_solana_program.so new file mode 100755 index 0000000000000000000000000000000000000000..a9da4ff47e5d84903c9ade357361930953992ec3 GIT binary patch literal 35408 zcmd^oeQ;gJb>9NyBS<ole=Nf3SbVMmtH$y0yc& zc0zwd==q(G`|ev15@p8|XSzeaeP{RV*|TTQIeT{RzKhR1^zn~WR#Z4k%iP~MKzp^Q zTPxO_FbwCqU8h@4pR3$T#!XRGp!j~icSPD*?$!tl^cD1fa#Y|&`ARLx^`Bo>sxPh+#Nv$P zRW#re3kwUtdzF|8zjAs3HQ~9FlFyvrJgPas`9{JwnE>x1wZYKPp2#d*ga9c;HRoge zo+DBw45Is7V3WH>X*_~Gmn=Qh6d3TTycRaK6sFy%^cBg$zqiR?ml1rellkv~iF5A#wG zj@=OYn+2|-IYPUrS>Wt4Dxef@mKiVA3mob^f^TkDr|l}gD|FXsySQH9l>0Wzo?a$GFSbfq@d3$e*Gq2WyGUXHX66CG2aII;kWDF_CMkbG#N+nW z3A~YD;m|qsn^^}&oI|-Klt-q&q;E*3_m&s8O1bGd^wkpDn_X;#(M?GW%G2s=inmKy zF(-NLPRZkO(!7IYCMSe15{z^a^eAOvP?U_$3m{HuU%TYsr)-?LRb1io8RY~&vq$MY zB(U*Sd|Y7Y<(79-e|-X*9HJW0Uz2}Sb3x{v$n}-kqu5``9>w0wev|e!2mMq#1;2r? z#vMmBH@B~-X0e@45}l;oog^6J_zuY`oS78;ofSV}c4y-P+)Q`|+k6&PA7LkuZ`39B zRXihdLHP~v-_>v(CD8w=qf&>@sH-t9U!gWCu>axql@Op_} zK(N_widIG>(EFoba^$rLLE(Q%jHg(urT$w4`~BrrzS(7>=*e+>JHao;fu7LMuM*!W zmJ0brk`MhOdFZ^HMekHj$e5QTy}!5#y^S=iAR&Lye>owS;$1?|){~Mxi{4*Uql;d; z#QGJzv{)}M5Fc32kT8x;%F)kmVmv%2bi%JnyXe??rc?bTMt4QknPmG8ZU0|L-h~$c z(fD6K&FzY>FzDdF(NCBY*w#C&BVf7MFXb3d7k#}b(^!%#!H{cq`6?Gs9?mFV(*#f| zJ}-IrhV)yzM_}`3kU!Q<^Vcc&0Y=#IEU04s&T(FB}egFQd8IA<4t9sQh$11=YDK8U%XIJo=xxzm)qc&Ue&* zTKd@_?W>hu74ZZ7g02tg)cagOaPft5yyA9+b=>(c3!GkMK8BXlgDfFJ+oqf&-X=r1$?e(2hfQaS8zrDuO92?unE>_YWk z!7b^{$G9LobBg&(uiV72*++O*<-A4O<#$THwc-;FB8l}at@V#i@t4AF0>f@Fu1yX2 zgr5F@`+=Rngfr8sm(yJBCa4^k#b*T$&uIS|Z&@6b*ElD?RqzVB&KB+$*zh1f(1TyY z_$_`&xxbP5I4ARwTtBZ6UN|fL7M~M(kl#Nh+X!c_aJy`UC}=n%>vA+r2$Z6{I52A; z)k*s=Nak`g|DD_S>pGm9`7?$81LygfdCt=n-(P@y!r7|~7iz9^9&NiO`AdQq9g}*~ zqo_vs%gtP7y#5-||J;nw@2~kjmnZcL<9#drnoH;t^lLr=LoN>{^sA`*qa9)>=E8!tzZLw?C-9T&`qfco zf-=yzePb`Fw$U%bQBx{5Xnwoo>6NSbOSn_)AYCCFf$)ABAf;@j)TgW7%doA-X77+g zj`&JD8DPo@;pnKehu=VoZWsC~=8o)Sr<9w$7RLn!y)mXI4nE~*k*AcdqDiH6v;H4t zeCa5~yMF3V`;U%Gzu*HHwy(7PMR5vL6Zk7i_*|}Xish)V`AMUju9W_5yuI3AwxWUQ zhdH5J)b(tLhKClJdTCdzXEe8!93QE<1Am;8{Qv{@^IKp0+Sh4zHEB-C3>t}RS$={1V1^n?u zx$O^&Tez94)^!Kt0sYMV(u>KXSIe^%o4LV~^YpN^D?TiAF;5r?<_URmOv=v@FW_&2 z@()wd(()(AOeyT=tW56cDVBTJt;yZyAM$t>^7%Q)|2+vgC*^-fqP!?#bm2N;gp%ze z!Ph59P_Z2)_LUnk-dosLb{}Pu{E^&O)-AEGJY9yrcwdPbuzjTMFK@P=tWMB}f2Csq zj^pk~lw*9qL%Lw&3vbu)tGy@f#_Z(%B#-n8T13Fdu^Y1f&M?UF5Ac%vdFf@kAH zoK)sFbEK@4c)q9(If-?lI3(@rX$~9dtBvm>edlrR7C+{%L-4<-2c4UYUDPglc6lY^nI2hQ^!vG6&9h98%hEY9_((a~m-}?{buzDZu9&ZrhITHQuWR5AqZ*m#ur5$o<~gj>dDZ<* zIHT*$V~plZpPyX!+cFO`QclaCnk2*FT)E@&oiFceaih7!_hX;qoVHrtJTls zb28wVU$sx;H?QZxcHRm5zzC9dx0dWK>)9LXfd~F}Q5&0=*uK`td;q3@E?Ogc5J|DK z^I(i~7wT7^mvO97|8=m3N}cQfA}IcG2d0Nvi(^d>yQIFj zpS$66HSis66F*|-Y0(CZipp)hn`Y2e@01*J+{^WQcSXdBeJRS0a8f|4}?AeIuU0^MhpkaUQU?0Y4uky91p*U3<|o zsyQKca%_ro=&h8G%J>Mrk%(WSnw}UBs8P*cX|K;klk+--d9CT=`XpaH;43HnfUjSt zqn;xDM+#?8F@C%c*Yh9Kci^9ao%S($92cP+GlZPTv!8=WBw>_WoZ*)*1w&pp5^{%q z->YbZX8dLe!-67cme>WUM+vyeJ%ne zO3CvG*ugL2J{Kp^KM%P#5nZ$2q})H8D38z6RR7bJ*cB2#@j2T21dh+qa4IM3IOp`i z^He3zRm`u#AD|rl|Mibc{x;3KPS~LO_a)BPEKaa4C*#<{yjAm_@1INOFwv>ZRQ5G8~h2 zK3%E%EtR+JCyXxk9l#7brG51-$x%+MaNGqyV9y=1l|SNfgkz!)=}Ofv2{YMWV_pz1 zqP@*W=Cddj7`O|wE&=aKL=QnV^PsiD-dYP@1#eRsIF@3_~T*$ig%5~gsaAjq@ zZhnLKj^#7W)iFOP|1$JL%d>7ZBiOtr&zb*If)4iQNMT;bul%Rn|6#e9UYcHD{7AO{ zH2tuBcTyh^Kl7Uu-5a9$wZ$fj(V~XI+I+35rr+=HAyE05N&a6W# zS`SojZ$<|iP}+BIF2nDo{yt144^uuxd7nO}mTswv(@ksj=amQ$V+#dva^C~`6`tRXp<@EF}y*GXPberz3$?h<+_mth1O z|KjloKgRFF#Md0PC>&Ec5)9Q#IXRG7nvR^8SUu#zvmtXBP6;j%fSHGSU*z7TB4@K;+;`V}P`?h49PHu7`*=~}is(MoJqvO(l99On$ ze+`<8J91&DeCl)C+Lxt%N3MMRv3nqP&ja@HHSig?mv+_KJ|0)UlmmYN{2dfH`jYeu zzk+0Z0`FxSmtSt@pDFT!NqlPu|MJ+k|NS4J?#9r;hNw>T7Uh3F_7hB%d*kIlTN~Bk zTdBXIm@A!I1zaB1i-5vVDoo!&r&7=6fC#@1JNq2{Uq%0mq*qAb8(*^(y#QS}UG5K} zZ6}$3(3_iJ6#j^|$$k!g0+tqSn`FFbo9zFR^M0D-6K#|IpxqyYona-3w#~)#PY);Q z%YNO?DU$6l6L$Upz5)cqnxbvzg%7*;dPXX+j*deAOpn#qV*Y-EXe^dT8#fH~oJPa6O4Twd@_G>(FfNBK5-rq5r z&y)Q19^<&4)MJeYQGQ-&qrXVw&7|C}lH4HgH|g91Ie`mB^;3)VZU$839t-QgK6gbw zmy>#~_x3CvN%n{HmN;HLr}W4(TtL5P6(;e_1J?Bm@?E1o6&R2Q^bv6&a`1g8eKCAH z*F(9DkH{?)KZ(|`GdVx8`+w1HwXd$zVz-dzO~#WPhd=%@d(}8SiGPy%|7phE_%`ES zw#2x9pZbgEv%|)HjmKyAkL?^K+9vxDJAaF|U17X%EeR0vQhmBeauHbK!;5gbl*}Hm zUIQgOJJ0Ry{#Cr6y-obE-Cu&9!xDTy70%w^_NM3AisuQ7)_dv?6&D#I|GsLPzh#c@ z~}N3G}`@?wBb zT2Ff?OtDIkL+vWd(TaRUq!;Y&Uzu^&D?eeS73bb4Ap%Ml6;9_O6VPY zv~>Y{V6Gr@wNVu7o{BjV2TnTGF9^S5z^7@jXT&hkoLH zoBCh72WR%ODX-@@x_`6hjp+*6pDtym@W0<36<+WO|4S;!@c?q)ezO&_Z?|!UXT@LG zyxKZ%^KE`*)2<28548{5$K-F5coTRSXL|YA0ywsIDT;y!jN{k%dBR3rUpJtZT<=Qhs)o5r8#%o!c{HOzR5qf+Ykc`-c0 zd#CbDSN!kt@il0Fw@Z$A6>`bzKJg+s3c%S43F4xzQ`|nkU-T|KDD84DxAGS|uj$o! zjdq`8{OE}joM)FwhjvZ`{G2ExjF0e)jNk6pmgw#hx-HuO zE2aK1Ucr2X8#!>u!%P&^5k{e%5s`U7N?ku_Dh`_)5U#;j21t zXBp&mqloop_XU~XE{%J(N!(-4@xxbFO8eKuUcxWPICCc(xjg^nPR}fYo!N=6&lZ2bo&A`;F0Icp4}WL+Eb8V8dd`LG%+QbMi#+^AzcVa~ zIO5+sVxEA%c-?wjs5A{#J_f*Dy{2!yv~>(lrYnUtujwH({E_L>5b{7@fokH ze0oXx`<&3Rb^{Z6-Xi(v{?qOOzLRyK&C_$p&-k_eVtv>3{TF>*_^I{X zCc_wcfy1pYk!OR zjn{?E+?@S$a-GHch8vVIUXS2`eejc6CVXVtWI(nrh$;fACB1Bt`lyaU7djj&@5t68}!g&M$NxqhnVj?l?BbdDw|o)UN9S!&&|PS=c1wPggv{_2I14 zhx#04i1@_1YWwYKL3iQZjPBqMA<-1?)@i=|xZxGy+n%SH9cP^!{)T!VHOvXUo4cjQ`7uU_$4Q-n zKN~0HeVOEvUe(5UcK?wjp7 zjh#2y^N4U->>_;b8q0ra{=UKWCI0@H;djp8AC%GY`AfNf#r3g%>2v!`9#VpwaW-;Z7wf0eDI z384f(m!aJDIk^`kezkbWewQTvaP1;II~V9BIv9DjLe3?_N9FwhW>4|`3O#QCKICHi z>y*>;ooOnhl(lo67aJH3j|ZIFIh4gK;Th2f`+YV1>>Gqxig)U{UCK%O@VMw>=8NDK z^CWhF=dq|sS6TUoxxQB8!TgJ`R+7_d^}oAi|2McQ%i~V38sd68Z?tpjlpEl3$S*+q zF0rpng`V%J-rN01yHAdB%+pk-+@0J${3+z{Jhd#zK@LR2*WevX=Xv(~*zk<#h3T2a zeUJ+zQXJ)G@%oJMz?@BQ!mmpElHSNZC3(IE3br4_IUtgq1KatN?U$+>xjKHI0`|4! z9v4h+t4~UOcvj?DB>#zI`_Pp8J+{xhOs^XvWy1`9LvxFV2A6+tp-6NABcUi~cDkdMu11aS$GM?GR1hEf&vT;q&xER**lHAqbPnXLtJQ09wtk=*tmdmPg zeKNU}p5qF=l3dO*UnZ9lo{nGrN_bZ6JbXj!9(IW%RGaUt`==@oZKrZ9(Yq@3o_%HA zYg}$}GQYM-&)3Z^-fkQgPo(wt_<$h~^UKC(=Jw-d{iWRV4BK->`yFNRZWgbzb)%^M z3F~J-RTG+v{jPQ4YbYhVo94{fJ-pbD5I&NvPxkv!Tfgi*Lbh(j{>kX+d4=5@F}f+b zg$=sKXNZZRWN}PU?f#KHTukTgE&L_gCi4_^i9V!L4{~{UPRi|hoBbZ4TK6j!2ibXe zdbvK25w}Y3P0{wD^tWmIcK%ZQlE5}kc^#;|uVaX+*)M56NO1$tU!onEv@?7>|CuK^ zlah{;^x?u#f1T^mirOXUkn4->uJ#qKcGW81%z&(;n>y~~ioEXS?7cg_{~V;>S%FWi z>+{qi(<=1B(`vV05qYF5*K$QBkagGm>?XY*8UOx#I$*r;d9io@94_UCxxP5SnXBF+ zxzV%dUp}9Z#~krF{E#T3>5<)su;+7;o&(3_?Lyz4UuA8*nhvKjCpJJK{EEs+^(K=8HqDpDN46e9t-nQWY+W?FNv|M7zX^Uy?K*ueNjjf3p@p2baXzrKFgTVLfqsJ}j%9PQl7=BZ@w z8Ri4=A~f5cD;r<_I%4Zg^t$M^tv9(B8<|e{g6i3cfZ@{mD)ST;q&)U(YA@gic9x85 zAg62vRt50&*X)63g2oTF&OndAx6!xz zqNZQHS|9u6+X5;f{Ix2#w2p5l7jwKprWVJC_PdyHo#L+z7?I;zItMYji`E|@OMYD2 zSzIzL?M)vLe_<6n0ke?NPvXx@bCg*I$?I081|1kS0)vx1vTX%AG z;!t~OoS+o@pSnQs?o|0C&fo4+pA7`~MX?iEC&Ib_#Wy{-yVq+6(VConJy&K_Uq}DGO@Pa7Pg5zZJ&`mZ@q^3)Ah^z1?J-d=0oq{ z*nT=&0qrEat>cXS9eT>@KFsVbv+e9XC}GZNq_A5RPuM07ob;Xh0iK7zInj9E<+9%CZ!IaW@m^qU~k~Rf&81{ zC)e?B{wQ&;?H6!dSKek^?_OeDf0P)P?Q?Bjah&p3BtJXJt_j%0j?cB}o4`E*e7xoR!?Kw5# z9Pl|R`3;hrUikeEuhH|MM+L7|+rbZD+}VmW zSM2zR#(i~ySFL!V(zo9aJ)-)$Z(8u|p45)^cXPoa{?-cKV!ot5izhPuvd*U5XSo5& zN#$K;K+fasy$B`zZ*n}8a-U&*zF#peec{uek2LvHGC7Jn@8CGgpO0+CDBGFEp~WGg zW%H9M{5$RM8LqH+r;6lA|D3hUtOtbodWv&<50^a$$*bIad8DfX4lL4DA8_BNmgpCe zbT8%T*ynd^)f2)m)$=}NLp}GGa#&eQ{P=v9_{EPLx!Nm{NwyU_-^6)l$|3BP`@qRIUeYL}D~{H@LBjwa#XekWV4_5eD-%d0*3^NIG5z+lBzw)3Pu z@qSvCGqvrq+IO1WVU~d?Ox>(%V!DuFUhAu`TsTMKRhFTpzxeDEO_ssK|BDt z+WXz?JS%E~w;(yL=gisrg@BI~X?eW<>pa5F=pW0C?>DOtsnzu&v*jgjZ|g!uATB|ar=ceO_ujN>GG4p#lDl$-y_u3W|A zwsUEF-$uGp{I{Ls!1{R{%73*?bIgbB5Ar(BMduncD2ObcpXD+?W)J3n(<|iuCFU6^ z<^C7XpWV+!d;>cE`T7K{v(RfhN5Ff;D=2{m@|621zPkLwoVg(q7q6q5WBmp4V3nS) z6O3f%>tTby_MX@YY6p~a#3!lq!UW}1DkGUh=R(_3#CO9(H(M?91*CLQgRt_P*F! zS$>`MAL)L|-m{DMmMpMmmV94_(4Qc=Kyv$9wLg{5MiQ31*+}yHv>f>GCiO?G(|sk8W2Wv;Snrc|KTl(h2UP zrFMuxQ!@Lp_`~chT^Vq_-Gi?_&LH2v#kXD(FGvh&j<_jle_KW{eyjKuM(1%%(SWOUKUG-q8Lq(Z z3n7Q3zl8!M|6u;WW;lrckr3X$@_l+_tO8AllM?3_cv2izO`bFLvH0S zklc{$z4^)gD)ZYU_0Wqc#*_0Z|9yvcE&_XgtRQXh2|dS@VZT{P=N+*esc?$Qfj_0b zlR(D1o%Yqhhg|+4$h(91{za7cf2H>$+={%f(SF&NxBlGuCi*S@^rzMD36d45gv;!K z)tB^qX}$g)`@@o6%lV1vuj%FAr(U1CMLGZLe;{%eeg4Ih^RZi$^XHRtCS|K9J;V9U z8v2q+2T;5C?=}?Gk9UxNL%qe(_2gd}?qPIS+b6l5gTnvf7gHUS4^WluYwAg@0oRTy z{yE7JcmFr?XVni2JVfo_dT~CpC*d!v4@mvNHnM5FXG&?b_63G z?BmQmQ?-@9AZ{kNyRbF&KNc3|e~U9MDC|Nhd!7eDl6lM{5KN%BX_=#Tvu2QJEi`hWbPRdqf~rUa?BUj)eK&qPRaVi{k*b% zbpGD9pDdGc&nSJnFNSu+-h!=XIKN~59uj=`mqz+p(0-O0hrI_F@grnatK*B;)i;Em z`7Nw_W#{e02NL}7ri7l`^C{c6+d2!pv&8ooq?t3k+*&C};{E1L@a+C#67Tf{Ua0$9 zJEyaIH^%?xs2i{x_rFE@_3sJV?*+i`x02)1{i;2;NBmAAafV+{a{M9jQ?2pG){2c1 z?@&1s^aJr+rVUymF!Ev_J}EY${*e50mJkaNrD#-g%rEF*-a$IENB&;Kr}dt8{2c!_ z!M8ZRTJ<5HmGZ(K$w3cvhS%%9s87mcIpqYl^)}_?{kP_KZe1Q5mXOE0xdY>q?U_pa zxcoll-_A3zz9N|)vva8k4u?`KC;HBES}wo0{&bVX1HPOJst+Z3$T;oqBJ@)H$8oMF z51T)e*RH`sheqxR9v?ZBJDjU~RC_~?UOL1C=_zQM!&BLjo? zjurZIhYk%61bvSU7LFVm@9H{ocw#I!+HiMYFmgD^9S-^)8XM~ij^+*>8NC0?u3vihd#cwgtX;SMw)Dd78#cbT=6!c;dVg&o2z7UE z-m>+s`i8r=ePH39%)R$DHZ`}jwzYS3Zs+WSR`{(-@#h7KHj`kCRwBcu7T!uXM+6O-XFk>B`OI689h@HokkXWcoLKvyt4 zI6N{I1|*`9{@nOM65~_3gCyg7f{`QR!N^mJp!kOl?jOsIh4(S~eIPxSo7i`Bu%G8< zG}nJ{JPbyT4vsx_Xk;Q6JT)>loEslMcz7rnAIlva4-O8G9vU1TJUlL4jSY?;89Pi9 zN>j&TUQGX~VdeSmgNJ#(g520p!SMP=#s--ULc4ExWPsQh%N-sX34t*O1K zqp7oLdvjxRQ*(23OLJ>;TXTDJM{{TM_Ljz$rk3WGmX_9*wwCsmj+V}r?X8WiO|8wX zEv>DsZLRIC9j%?M+uIu3n%bJ%TH0FM+S=ONI@&thwzoI7H?=pnx3ssmx3#yoceHo5 zZ|`XAXzFP0Xz6I}XzOV2=;-L|*xuRL+0@zG+0xnC+1AcyKocZi&>+&nKYAEt6$>dNpBP`P;Ka{Mfn2hv>`{;yHF_0=a= zt{97o<8)celYIRJ)dq^Om+G;WB3X{_II}}qs=h&E*QM&O%cIdH+n-SS2dIBJ?JV#A zfUb3?sJ=m1DdX>iuIam(rK0*12xQ({Nd#ThB0WxFOlC9bDC>Fe#Mp z)rqCoZgRr)sH9A!ROEKEa@f<&%3GHxcl}2S<0HfScr|sj#B0Ri3L}U19U2+h=W_c; z#>N?>prbH2zR$k*kB0l)DdSfeyX?VhbR4Endv)9fpZ4HBofxa1^x%^oJnz9bJovo& zBBOW1gZ0k^#Qx`iDuUrpdGJXOzO4FY`12lo-GlA#TN{3l`ci`@J@~u_U-4n`p-9&L zlKM`AuXyk^4{lUnZ1}r8c+P`QZz}I^r5dKS@A2R%A6DOK_?JDnAtyr&#K<-u1xxF=JNKXq?8?CvXvH+t}t2hV$OPh)xe zDG$Et!F_Gz_!rcNn>o?+Kgnk$BK?yLBhn6U-jT_jhC!`uLrN$ zKul7y`V$_!7Xf^tzR!bi)Rfns(v6eRyQJ}!!Ho%dK|b9c+~>guJb2WDCq4MQ2Ve5w z3w7oEUiRPy-FzFr-5%Vi@tW1|_29|7%IVK}@Kuf1to<&Hj|>hp{xNu??p+POzO$VE z_1<#$^4@ZIRM$UiKdW9$^4HMRnr%=hYvuqb=&gORw31Ke4!w}ivqkG+I96) zmS0dr8=v*RM$4gwa6Rg-;&Xx1p&*=BJhTU&)_$XwS5c0>t-YNaSl+F8mZ^9R_y7L@)@JYG literal 0 HcmV?d00001 diff --git a/svm/tests/integration_test.rs b/svm/tests/integration_test.rs new file mode 100644 index 00000000000000..700b9c2f6a0ad1 --- /dev/null +++ b/svm/tests/integration_test.rs @@ -0,0 +1,272 @@ +#![cfg(test)] + +use { + crate::mock_bank::MockBankCallback, + solana_bpf_loader_program::syscalls::{SyscallAbort, SyscallLog, SyscallMemcpy, SyscallMemset}, + solana_program_runtime::{ + compute_budget::ComputeBudget, + invoke_context::InvokeContext, + loaded_programs::{ + BlockRelation, ForkGraph, LoadedProgram, LoadedPrograms, ProgramRuntimeEnvironments, + }, + runtime_config::RuntimeConfig, + solana_rbpf::{ + program::{BuiltinFunction, BuiltinProgram, FunctionRegistry}, + vm::Config, + }, + timings::ExecuteTimings, + }, + solana_sdk::{ + account::{AccountSharedData, WritableAccount}, + bpf_loader, + clock::{Epoch, Slot}, + epoch_schedule::EpochSchedule, + fee::FeeStructure, + hash::Hash, + instruction::CompiledInstruction, + message::{Message, MessageHeader}, + native_loader, + pubkey::Pubkey, + signature::Signature, + transaction::{SanitizedTransaction, Transaction}, + }, + solana_svm::{ + account_loader::TransactionCheckResult, + transaction_error_metrics::TransactionErrorMetrics, + transaction_processor::{ExecutionRecordingConfig, TransactionBatchProcessor}, + }, + std::{ + cmp::Ordering, + env, + fs::{self, File}, + io::Read, + sync::{Arc, RwLock}, + }, +}; + +// This module contains the implementation of TransactionProcessingCallback +mod mock_bank; + +const BPF_LOADER_NAME: &str = "solana_bpf_loader_program"; +const DEPLOYMENT_SLOT: u64 = 0; +const EXECUTION_SLOT: u64 = 5; // The execution slot must be greater than the deployment slot +const DEPLOYMENT_EPOCH: u64 = 0; +const EXECUTION_EPOCH: u64 = 2; // The execution epoch must be greater than the deployment epoch + +struct MockForkGraph {} + +impl ForkGraph for MockForkGraph { + fn relationship(&self, a: Slot, b: Slot) -> BlockRelation { + match a.cmp(&b) { + Ordering::Less => BlockRelation::Ancestor, + Ordering::Equal => BlockRelation::Equal, + Ordering::Greater => BlockRelation::Descendant, + } + } + + fn slot_epoch(&self, _slot: Slot) -> Option { + Some(0) + } +} + +fn create_custom_environment<'a>() -> BuiltinProgram> { + let compute_budget = ComputeBudget::default(); + let vm_config = Config { + max_call_depth: compute_budget.max_call_depth, + stack_frame_size: compute_budget.stack_frame_size, + enable_address_translation: true, + enable_stack_frame_gaps: true, + instruction_meter_checkpoint_distance: 10000, + enable_instruction_meter: true, + enable_instruction_tracing: true, + enable_symbol_and_section_labels: true, + reject_broken_elfs: true, + noop_instruction_rate: 256, + sanitize_user_provided_values: true, + external_internal_function_hash_collision: false, + reject_callx_r10: false, + enable_sbpf_v1: true, + enable_sbpf_v2: false, + optimize_rodata: false, + new_elf_parser: false, + aligned_memory_mapping: true, + }; + + // These functions are system calls the compile contract calls during execution, so they + // need to be registered. + let mut function_registry = FunctionRegistry::>::default(); + function_registry + .register_function_hashed(*b"abort", SyscallAbort::vm) + .expect("Registration failed"); + function_registry + .register_function_hashed(*b"sol_log_", SyscallLog::vm) + .expect("Registration failed"); + function_registry + .register_function_hashed(*b"sol_memcpy_", SyscallMemcpy::vm) + .expect("Registration failed"); + function_registry + .register_function_hashed(*b"sol_memset_", SyscallMemset::vm) + .expect("Registration failed"); + + BuiltinProgram::new_loader(vm_config, function_registry) +} + +fn create_executable_environment( + mock_bank: &mut MockBankCallback, +) -> (LoadedPrograms, Vec) { + let mut programs_cache = LoadedPrograms::::new(0, 20); + + // We must register the bpf loader account as a loadable account, otherwise programs + // won't execute. + let account_data = native_loader::create_loadable_account_with_fields( + BPF_LOADER_NAME, + (5000, DEPLOYMENT_EPOCH), + ); + mock_bank + .account_shared_data + .insert(bpf_loader::id(), account_data); + + // The bpf loader needs an executable as well + programs_cache.assign_program( + bpf_loader::id(), + Arc::new(LoadedProgram::new_builtin( + DEPLOYMENT_SLOT, + BPF_LOADER_NAME.len(), + solana_bpf_loader_program::Entrypoint::vm, + )), + ); + + programs_cache.environments = ProgramRuntimeEnvironments { + program_runtime_v1: Arc::new(create_custom_environment()), + // We are not using program runtime v2 + program_runtime_v2: Arc::new(BuiltinProgram::new_loader( + Config::default(), + FunctionRegistry::default(), + )), + }; + + programs_cache.fork_graph = Some(Arc::new(RwLock::new(MockForkGraph {}))); + + // Inform SVM of the registered builins + let registered_built_ins = vec![bpf_loader::id()]; + (programs_cache, registered_built_ins) +} + +fn prepare_transactions( + mock_bank: &mut MockBankCallback, +) -> (Vec, Vec) { + let mut all_transactions = Vec::new(); + let mut transaction_checks = Vec::new(); + + // A transaction that works without any account + let key1 = Pubkey::new_unique(); + let fee_payer = Pubkey::new_unique(); + let message = Message { + account_keys: vec![fee_payer, key1], + header: MessageHeader { + num_required_signatures: 1, + num_readonly_signed_accounts: 0, + num_readonly_unsigned_accounts: 0, + }, + instructions: vec![CompiledInstruction { + program_id_index: 1, + accounts: vec![], + data: vec![], + }], + recent_blockhash: Hash::default(), + }; + + let transaction = Transaction { + signatures: vec![Signature::new_unique()], + message, + }; + let sanitized_transaction = + SanitizedTransaction::try_from_legacy_transaction(transaction).unwrap(); + all_transactions.push(sanitized_transaction); + transaction_checks.push((Ok(()), None, Some(20))); + + // Loading the program file + let mut dir = env::current_dir().unwrap(); + dir.push("tests"); + // File compiled from + // https://github.com/solana-developers/program-examples/blob/feb82f254a4633ce2107d06060f2d0558dc987f5/basics/hello-solana/native/program/src/lib.rs + dir.push("hello_solana_program.so"); + let mut file = File::open(dir.clone()).expect("file not found"); + let metadata = fs::metadata(dir).expect("Unable to read metadata"); + let mut buffer = vec![0; metadata.len() as usize]; + file.read_exact(&mut buffer).expect("Buffer overflow"); + + // The program account must have funds and hold the executable binary + let mut account_data = AccountSharedData::default(); + // The executable account owner must be one of the loaders. + account_data.set_owner(bpf_loader::id()); + account_data.set_data(buffer); + account_data.set_executable(true); + account_data.set_lamports(25); + mock_bank.account_shared_data.insert(key1, account_data); + + // The transaction fee payer must have enough funds + let mut account_data = AccountSharedData::default(); + account_data.set_lamports(80000); + mock_bank + .account_shared_data + .insert(fee_payer, account_data); + + // TODO: Include these examples as well: + // A simple funds transfer between accounts + // A transaction that fails + // A transaction whose verification has already failed + + (all_transactions, transaction_checks) +} + +#[test] +fn svm_integration() { + let mut mock_bank = MockBankCallback::default(); + let (transactions, mut check_results) = prepare_transactions(&mut mock_bank); + let (programs_cache, builtins) = create_executable_environment(&mut mock_bank); + let programs_cache = Arc::new(RwLock::new(programs_cache)); + let batch_processor = TransactionBatchProcessor::::new( + EXECUTION_SLOT, + EXECUTION_EPOCH, + EpochSchedule::default(), + FeeStructure::default(), + Arc::new(RuntimeConfig::default()), + programs_cache.clone(), + ); + + let mut error_counter = TransactionErrorMetrics::default(); + let recording_config = ExecutionRecordingConfig { + enable_log_recording: true, + enable_return_data_recording: false, + enable_cpi_recording: false, + }; + let mut timings = ExecuteTimings::default(); + + let result = batch_processor.load_and_execute_sanitized_transactions( + &mock_bank, + &transactions, + check_results.as_mut_slice(), + &mut error_counter, + recording_config, + &mut timings, + None, + builtins.iter(), + None, + false, + ); + + assert_eq!(result.execution_results.len(), 1); + assert!(result.execution_results[0] + .details() + .unwrap() + .status + .is_ok()); + let logs = result.execution_results[0] + .details() + .unwrap() + .log_messages + .as_ref() + .unwrap(); + assert!(logs.contains(&"Program log: Hello, Solana!".to_string())); +} diff --git a/svm/tests/mock_bank.rs b/svm/tests/mock_bank.rs index 3548b5fbac32da..0c123369e25451 100644 --- a/svm/tests/mock_bank.rs +++ b/svm/tests/mock_bank.rs @@ -35,7 +35,8 @@ impl TransactionProcessingCallback for MockBankCallback { } fn get_last_blockhash_and_lamports_per_signature(&self) -> (Hash, u64) { - todo!() + // Mock a hash and a value + (Hash::new_unique(), 2) } fn get_rent_collector(&self) -> &RentCollector { diff --git a/svm/tests/test_program.so b/svm/tests/test_program.so deleted file mode 100755 index 9336ced24df6b4eec7b37c9530c99003016c28d6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 170136 zcmeFa51d_9buWHql1l>`XiP$gAVw2luKdwBxtWaNkCEI= z_;WbcOaKM_{rcpOAhotK6Clv)gV&~pzRvTemNsbH_ptU4eYQ2Vwm)8LYg((OR+!)S zyZ)TB@45Gep!jP0aYJU`z4zK{ueJ8tYp?z1oU>>BhBtS1bVMGeMBj~=TJtK+Sz`qc z+}CNpP!_F-rsD66=oEo1prm9d^6`@OTHtXsQ_(Q@B>da-W-ZUhM_#RP9*6a&qUgx5 z*2~A!7ic|?y&kD&drPcdK7K;0MIKMLbamcx`89E_W(0p{6K2_rZ zFQ0m{WDHD!jPT3r6{j@T%iuM?00}dE5RF~i6Gepx2rRv5F4GFf^2?CEPfDYxK2Lu* z9q}yX#M6X-YcYfWap3RmIsRtk_&YD)@5h1O$ArGDKkomj#{PH;Y3~o0du_J%%K=nl z&sC;c|0|k^`XTvKsHNz~K8d0^P~4xjF~XdKfTL!xAG?6F5WPYGr=c?IC*8*;nNfd8 zvZHALJ~lZy8RDz8z&8haedH=)&_Mj30RB85%>Ujiv;3FZADN#2@Z(}yPH%oE*AGX7 zesDfqK6Ak8(LfGK_fe5svUvJYN`U1%-7JS9uL$~oxA5D(>gJ!~#xRG(z|Ga-E|wO?D&c|{vKx4cYrhImNdRRNu$Jfj9Wn&mmO zp*)l4=*vWBh=+8p59q9)X8i-X(jq1%&-%VBpW`H1x#%c!DE~ttT`QNQ`P&-$Wloy^ zU<17JmvpN>9Ynu#q`JN^%@L4vm!y8(^dlUnJ|~hMj(ft-!FawhXrFxkJa}{cJ_!8- zXq5D!0D)=zkF?GMOj8AuK9*+n>Q`zyx%fe~A1gFnUugN=4@f@8#Rr4-iI1Xny{*?2 zp6Mq*aD2b=ALc)f{0S0)Q0cf;;`rk_j)?z(9R5XsNb&C#@F;%F=)6bM^%0A=X-xP- zuxT7`M?ZDX{X^ZKR=&oxoc8h=j?-~VgO5)m9opBy(?S2(Gg7Z;K2jmlhT^ejB|qtY zLgIM*h^7}mY4Ot%7dtz|UKgT78mdxQ2nWy}NQmeMkUpjv1}wjVdJKi={Ss5o==w|! z0Uu9*Zs8YkJm&N$9bL1eI`}vw^msa1tn^hnZx+9(c=B5XupZzQ`P{1YiT_<7!0E0A za_I(0d-~561TG(!TYRedCG!;jG$<`OPL^q!?VStVtE(7f9@G;4Ev;es&RluVRomXc z(ywm(lme{tDY=(GTCQ(o=}mOG}i)7cdSOLp>(h z9{VdE*YP}L`cZ%{&rluzBhiz1pXzt~km_~(@N=5~pw>$^%@O!y4G<$FOSC_SO4QOQ z8q(~{E-6`Jk62Fox+s_rNZ)Sw6;3DZz_k}zCBRevUu*Ra(7e_ux*$B(F;iT7K7 zjGKKj`x)<3c_k~={`-8H$Cu8JE$sXvwCCffYWQh)#V%RD9F*P($EW(u@tD%*evI2m z_p{i z>{Pt#n2xu3!~%H8lJIUHD^rkx5YN+bmzA4cwSJ*LD)ssVJgRhlQ}NomFe}%3h3N04 zoc->BKVuF)1dfAfl)u!^@O5lm&3sh9Ov`#t)7aO$<;#>$rWw3HT5YzYMQa*KA7ggwZAk^VoFx)}p2!pTFnDjL8S*6zpxob7~EqqqxXH@2d zgHT_u>AK~6KX4o_#d@{AMaxq;SpVel`g*mgpN+42(b~C0<6i5pw4X0gdYby#`L}g% z%D?yX0>#(NzX~!>XX1QX?EVM&J+}qFN%v0CYu7iQ4~X~Kz`iBjccpOTBr(4?>3&r5 z!|{HI6f}b_x>LMFXX@SGVEX7216X~U+p}TX=2y;{+Ez;dSi7fqWU9J zEOzoa%BQMn^7}{NFZqbt$NGMQgPO=bhr!i%X+G`P1Mkwdn8W&C!Md=%*Xp5&rEdSd zo#WO2a;`pA_@&hUyk8y<1hqNCJwj?bsJbnvHyWaXwbf8Z@){)#kz$-BdR-Iod5Edi77S}bG$u-agdI; z({=Wb&k!_GIDgO%5`H=Kx_FZMugNWD2geki?uFRPaZ7^^+Qn9Oa3$>s>?Wzq{LybW z%}?I$F9JTLGq4v+f^pVL56Dq_J;?Ukp4^C5zQBX!aX)opaFqp5)pmMXt%U&GSFkOI+VFuw8Dw zv7M(ubNr~{i666icL-S2^>K}TU&H+$)<4hc&&K{Y*F_8?XD}E1e?tA0`aP12=>N05 zc%SkSFV%ead$_L9{?l^GaV0xEo$2R zv1wRWJPkP<&}`s+jr>}*RT(HmpOLiF*|kmqYv*cQJPDH-g8R93lT&R(%eNaJUA8{) z^~!l@m;6jzsc`e5Buvk}SJNXZwrrdr8)Qxvgx8uMqu{#H$u+h59Hjry_PgjCxB<)ccFEFA?t3P~P;@zjvXw#fCUus39DmTLS-%>z2_VO?;2R5HKd5 zE?akHdi6GaruKz zQr)vaX)i=-U#a&fipo{f+p1KHZbCjG>nhHu>sHP8b-L42Tdw)FevL;iv-tc*c#dD< zV|S8&U&j`r?+U-ZFB8r)&j7A&?be4BALaM6;K}#HD;=uP@tCfck}3cYlA6w6$zHAJ z^F?*+ucW;pTNhQwzHaH?li2rNYqoz!Iy-^F?b-kmXwbGzpxom_iF z<6E>m`N%VpuG&7&P`B2vj%m9?-Txxxp}vv-i_fwFs>#6(cgu5uG&;JWswGKx;rTrjWl!NO&e=2|v z;g;m!P73IJsebCqxvsoW{82Kl@+e^K7_KXqAnp3sF82EoZjao53GLC7&u8oH6M_HL z06tlKO!d+Abfun$B^Rr|LOpvjkgxlf;eNnTj>m_D&Fnn?VNLt|&i)VaP(L0H`qTZd zWbsj>$HvRWN)PEc8t8F&-hJ#v=o5c4pik&+LEp0h9@2MrNZ$#_X-+Wiei%80cu2=+ z105z8+uuztw*Fgba=R*%*YrVJBn>Y`UPp5M*-l=e+)3wk4RpS^UafeUm0{ewurraEZFa;yPvg4>-m0UaXOG9#A|gPD|TRU z!}R%R5TVkk<;7_*J_zoA`+P~d=3x96BV8aBB6T>D8@0asyNRvy3PK&*6LgW!@Aci9 zCcd);pE`&I+`3~ZPSbtiR_#w0c-75(et zmL%psxjl8c(LW(RKZoJ@3xnII0c)?)p#qPqinnh5(oNM=Iw3CO4`a@^Ymf8;K-nw5RX;Vz(6-$+j2ttB{zq5HP|oNAK1TSWwu}PYy*7rehGWP_eawC z)#mZgzTOw$r+vk7u&l$zgM9MGCAr&)_;lmn#(Ow!e4P9l;9rh?t*^rWekuD}=fk8r zom}stdK8~!^v>6~GF8W^_b1m?Ea5%~^*gcoAz7;Mw1?7ql?T_gUfv zC`727#9GbvDY;hl{vgO8CCNv0o#Xw|wO6X8`W5d}yB0t6fcksO6>q%H{F~**-#%L} zEm!_{uE1Z;XV-0tH(6eiAIa=R5_@~;eIm0j&KLd32K!vi?AH2ld~8?z3t=~w;HUF< z5Wh)BviJ$1KR@4b-NSO~+2Zfh{wZ8VW($>{@UaU(26ti5uFEqyMfuOq2fn{W{boq6 zeN^ZuL^xbTsIQeMohRzbUgYwq+P#61S&Id*YWJ^3W}R*6rv=~0ER|<+X-V>vW!g{4 zJ=#CXR&s&%jgRWsze&BJS+9}&+SsJwIY-jG?<@B5V(WjuZ(;X&()|FSCFXcc`6@4E zE&2n$F8K0#Ra18ccIjCuPcD8&;$p`eBK|1P_WGX^JU)NsBUZ-)kuFUj|%I*b?CgPr5!A z(C6da^_F&?^tnCqeyaGr5VI$44-Np9@M$@IGx&a0ns4J^=#)d!FGDNUe$=Z9PrgZC zVtU1WVxgPJnSVx3erK?h`4Xk);az>2CcZ1A-0acEOH$DFfR&^AAR4?Jv7EbGJ)ak1 zT^_-{sJ?EVt?jkb)1j+WPbuFUpr6%;HJ!;(^)hdFU2p3qSKoJb`Hh^Gv%jb5cpJG4 z#3Lm4mLyK^d7Wnbj;Or+ydpVG=}(3PT=roo+QjT^A-YV*i;X+3Pwsj*AvBP$D}ciF zx^Ckso~7ln;g4rqyQdl*wqKfDto@#>d{Wx+d6Dz%FNNoseotk;;-w#S7wy5grJ=`< zGF{8)+K>5@c(&-2A_YnJo!ZVVqY|g%(C*O?el*}O>E4vWk?W-U3a#ILwZvh+kD~pg zdyD1=bZsJC)r_v+fMYoUJ!bIxs_8zR?K_A2bn7sFC$d)o(Z|d6ZQQh>jIQ{KX z!PmcBR#Wl|(N{|l&q4bjOK3&3ryyP*=qZ-89+^d;eURAn(ESJRZ_4Ybiw)S|(KV?) z#CJ>eG@bgdpoetOFM9?A6sNBcK;l~y^gG+9-|Fpof3>5lenVq>#CKngz7SvSKGn+? z!|Ux6--9`PG+|yI->r7R(}aIG2Y*TfypI3a)=}m3JVnQ|`>XCxxE~(dc#h9ie$(e7 z&>llO`Gj;mo};UXdR`vieN1#CqiaIa7ayDypI^(N0o11 z7rCDLxmlNu;~I%VXxBgCeiHTB^=tEu-w}H6)9P~1a8r)%BLUs{am;;+!dp$4Y#;jn z(}>B(EAZR*8EWHdNBp^l=KY52g@SnO=Y<~J16H|?-EHZ}(jT_;F}cGZkL|VeQAxL= zZ)qF)z6;v(^tIo&QXUJ@_8gsyj4r3Ml%sQwrJc^1l5R!k<~DSG?FXWBFh}Q3qr>S` zyBv?XJk=g&@>KgBk12mi_eEA;`AfRjSX%i@x>s0Q`AfQ&NZRFloy`x@?-1p6-8V}) z^_6yk`cDZ{Z+3nU^MKj?%1N0Y#&W{B9r-yF`FJ5-=8v4N@X~JQl&M3uU&(eZ$h9-X zc4&kVLVwTOAw2Pq2JNKsI78>>c+BKs{#86?@-Y7@9y58EpB0Z?A$%uioF(nXV=DLL z4E0muG3}S+jAd4?{g#};Q$E#K3hKHu&QH^%_qsDml0Jy}Lgb0eyj<6lU(6#6mFYSW z79xG#qh{l1#GhZ-r*O2VoOfui+MT~Y5uAhYJoK(Dq(}qh_ZyJ0_tVSS&ds@YKK&zV zXG^Y~PlfGNRo_N<5dooERlbK*J>dUN*Zwv?E*^_4eMsVXOzoKa?a3z|)pEP1lYHa> zNmGBJ`tYmnvif&wZ2nn1ruNV8HOJ$c?&)K#%+C39KHje1$Jjhe_Z6JJcwFh*JnNXW zBjXl$;&J6C-uJZRZ=R>;=e{q`b9-q|n{Dwt+fpfy$CUmG_74c^_Xt>@>4J<_DF88;VV0`B9FSvhCwLFxY&o70)@8huv@0j65H$FkUK|4ot z_j1H80RN*o{5YyOA^dl?pqu{0k>L4H(tlBI{MAkn|6M1De@PDiaDd^GE(g zdq^`3c>WaA!;$kP-|4CNdnOG30Aj^sdN{J~1mWq8>-@_&S^SL9AL_>yt>lgVdp4&Z zoq>FPeO>9;C-ar>N74>({whSDW^=F)4@i4eTQAp617d{j;J<`;qfVDmb%?pRhqcYh z5tWeMsBA_(^HhF*pV0f4e#HfV3)hqJze4}_B9r+jm-!U!5d+814J@xpam(@=%g3~w z`oncE_4~tZ{Je(&oFvAbUxy}U}hy(YXiyEQY`DYMfe%g@hZ~M!cTQN2+#Ik6VT!Pn3!Lfd^|^o`K!rx zVgVi7B}(N#()~Trm+S1g6yl?Y>gTW_pB(r8d}MOV5vdoJbAIx9EBVN?s)u`R-nH?} zex@E4qIU@)ex4fl8QfVV$***tF8Wsem?WdBt$TgnwP(Ewy4gOW!w?*w%%Z4i9vInv5emCwVH9r4~X?Lz%tO>^If zXYxSoo^$O+CkFn@-emUEDCf5%461^RcZ-!*C z=|do2pC_Kn_1^1&9#T6QQBTMKOK{HQ-92c)&OGutaPZs z=DK-BZlAA&a-I*Y$hC7m6yN#E+WA-I&;7pRw9^?pS491qXYE{_YvdC2bu93c5RkA4!mY0}T7DjhmM#kRi7$~C{z0R=$_ z{kTO)hxpe3f8F-iDHqxomQ$2$|BWd3{ZIEVhdWP}n|8yUT>?McIbYVL#m;k4OV0)5 z2mSyh)_YaK$1&~!O`z_(B{|$VBjQvy+*yin%?0qwIRTp(B8H!8^6Kok$#59{-uO(D{CbC}QmuG+exIOR1+L2q6s5#Z03A0a&NjTWK> zP5JD;r5tzyz!jX?(yHE&liUNuFNwJE4}`lo!>|F=i-QW7eL(3SI2E0gS;pS z<}+mT$$Uw>9d$ZGIqd^qem|)lz1jSDVssNf1N~BxM7rOJ_{8Z}MhaqG(7)q4Z+gF! zcWHUOkFB5`^b2!1pO+AOJJ-=@ArvB&gX9A+%U1+1hM)TiCBef{272~NgmukxGS9|) zRWbbiF7aN~&!pSdEg#nP$npAN7y2Q&Ul+iB?=(4`qYLf%`-x7cwPjk{O-=_h2;_@l zB;{8PUtSK!Ykw=+zfaMm`s-=;59xn&KKh-z3HJ6o$KJPjnbBwaRxE%0Re!aJGb`pLZp6)-(U87Xxv9x5%@dB4sDqEYlD31FDYZcGcZj%?e|Ei&yN7W+pS+D zU{SrKu|L0CoDxfh>q*`EscWgi`MNm1RL50(iP;BRKi5tj6~H0Wqx9aPiVb?rSp{?! z)Gn@CP?5Cvb5~W8(a0ql`*WbYuRv1B6Jz&>H_tTty8np!k5iUN#j4$d_Iq`OXrbiS zjGu|3B%^JpPECKxVqfR_du_I(!4k?3Tf7}`lD2rq<%*E?7_xP<&dV$&Vevz157Or+ z*9$o0aE_F(+6sgSzRyN|imHm6`Ru>tn1~qDZ!Dhlr^2tp*VT$!Y-X13#7$MLGIC8^-_k)P+bH1g9O z@KfHZWd-r(Ko8{29G^%0`RbH@G{bV9+lf4;=<|++=huo>X;m8@XeVBsm z!k}OLp76Z}2O#1dn#Wp*&5tTXYUg}ji5B+RrJ(vo`vMU~LwyRLRF%*BG%5PT z9M>zxgb&li7s4~0FVCkrFFzdGC(!XThWFc6pZ-Hw58Hof+V8p0&dteH@d&!`JVR$`7VFz zJN4ZCgmhkICc*$>{;YJ&lXTnXdKPMh@cAo_gCED|dN^bLDCEv5-N&ErLo>ZP$WPZF z=BE`q%|BYJcBI&Wmv10s&kb@F1UXuN9K>(_EKYkhvm5p=n9ugQFfQFsF3N5J$DsP3 z{yd-K^XCbfW_^F|FkVaS>h~}|ya)E`2K+2#J+A7oFBv?q$P&lz>qc)kmT-KjofPkS1L-(1$MPiJTs~9#B6< zfp4OQKM3X1v@O?*ted*Uly`h!*d}>y|Pw=PpM87(Y)1K1d`)W=f z=PlaZ3lNVY9_fBT`q#^MBcB$FDUcFnI885VyTaKl;yWPulj*BNh(B(@E3Zt z6#`^ze&FU=LU6k7pj>A`FETg*kNkOw5tFCS!^r~0SBT02==3)8qx3kv7l5uqs4;Sj z^1*o*DgZjwZ*GrA@2Gn@Hk(f4s}m-`+JPABnx1bpT5{W;+R ztY`@D_5Aq}e}0~L>Trk-m|f27lQQo1G@M7D4D82=%%fKXcFE__c&m+{YKde<`EzK} z^~wf&sdDBz7Nn+rkDvb%{t@so!9s+IQHfG{(Jo*5cVGM*bB}GmW>M0ofXx4!UEimQ zMi1Zp^;45R{>tPW_m!B$bG=6Ie4RH~?=t~k+#lk($Isy(^*WWe`FXU@&s~iI3+``( z^9Q;!y$=z-pY%!U??YOV`wV>U+5IBY@pRB1qytSw2O%!Tl*{K)$KgYHeFgw5XS?m} zzT1VmPT$dX;3@&{VT*_HTqEtM+w;thE8}j*eV%QGdpl}p{s!T&Ol*N~V-p?`J4e=H_@&51VEMbSFe*euL4io~r6a$S*znW;i{^ z3gOOb38#emdCo-OPmw~>O^EjEHFPZT5(u;&mwyo8=Wroi3t7L3&&5P5`O-f6i#KnY z;gsM3)5nxvzgNi02m^@u)6ZjF->8>2<6mZv?hyE}{2LH(R;~yOa+;6*nH!;?2yAy0 zakeks&+HSE3sk5O={oC#` z)_u_s4=uFAhxyd(G(Vt=`2%R2>-&ij{6W|a{iA)N9!mZVk{#`aeI)(?#FD?$@)42F z0sNEvn=K!}G=T$%nLh_cYDw@M9sTSu-F3Uxn*;cU{6&@zIvet>|GBT*kbjxMgU*J0 zq#5X6k)AF+W{3{p_s`((3=#Dwe)V{l^*?2?!(Rhu6-s^kThtT)WlN zx^AqU?`geX@(`3k<`3(C${+5|_hyBg| z@O;+mLYR-Z1A*=E+@AaGY?tdE&co$doAH-=f4Rgz(chowFBRcGj{fQZr#{njk@Krz zD7gjBkG}=m;{IE>-odugq`yyu>+l41M*MjLq~8S_c%9jsYp^&W%Ip078sln*6FNBv zw9{;VWC2GNV)LVLA4lrdb-Ih<1tlqu7pj4a?S=gHs$|Q1t^eq48U6HAX_x6Q)8F%X zoc4=Q6IcT=R62D0#&+*F+mE?R+Nb~fY~cSUi+4&s;m`!`N!~2!WbvI!@3^3qeX$22 zr}Q4B*(a{UY3Gy0hf@5D{!Z)guYdn4vF~;H_dr;W^H2JGJ<`X%8P
9FrUgzKH1 zpu^=<-=%ffuRFj%UM`i6a|OWf7Zf{4E!wr`<{b~^awI2jWVMxdr9=I?`lEs_;`tZl zoOPSpHkdtN(!IpL}}!x8i+Uv|P76qK)%d0zK9!eLsxv zS3CVLU0$4(z{iO3V{|({*X#SNU5-QQ^>y}M`~3Sf$;Wg%vt<=uCo{is{rs%(7s~Bf z$c^&e6+9o~_imDlpH}#Xln|~Tc~0o>h4B64CZH@YwzvJzn3(m;}H9A znYKfE{>tb=H0k%{viF`p*u;m7M|-ax`9Agy#l^q=UN_%=4cp~CbC*}RZhnI827Di3 zx~4eU--`lyQ?EHbxNc@l`oDr74j-_k_rY^&Y%S zaq_Q!Hz&D8^+fn;!25vI3-N6dd^#_Ld^|%w0zJ5k=|B%|&glVEU-W>n?<=$Ye+lH8 z-d{xicY^#--qa(v%W)qP2$ha6H_@%~%jD1~zb6IWJf4@y5DmHcMCc&-Va{r!X3hbMD< zvp+-oaCsZP&ky;2y|4|wm0tQ&49Q}p)9;D-eT!uAvqHb$xAW&E<5MAM*0c8u`?*a_TTK6zEb)g7G+wQ0KtXm=J-gOd1UFU1eam3*7 z&mF*T=?{BPBA;(#+VS~!01I;K3Hc=yB6Ml@MKzPB>uvF*Z>yXY4DIOmHXL8inf+4I zLD(0V@3d+shthM!;8%wKmr?f0#9otfUx$=NCTZF_^~uIQIgv@d4ADjg~} zmsfg!)b!EUEv~1elcDFLcPO7rHSOoNwb{DAmhv<8676TJSBN%BJ@@}77HIw0p5yg< zh@1~tCI7sAe5MXS|DF@=tiaiO#J$~#t%^6_uOl`-<9ka2;Qo%oQ(n|8f6gQRm{xGO zmn`QK?q9VRE*HNq=6YYV_m&>OrU3bMKHJSJm5vVxf3fLpb?YkmCi*{p4@V$>Vfqy<+J$hmHRs7Rg9fn*5lF;Qf(4|7`fYAH2VPUQOlH zg6?y2bh8Pk+x?kJhdN@eA0wZrOMk|)@yJKiUJjk2_SC;0!+G`Dz|VC1nq2j)w&&~al}}sy zha?Wy0qPH{pXd80@jjI|{qM)IF`mi6`e{talJy(w%X&B`M|KJ8?{BDYeA;^y(|w$A z!-q z7p4CI{2`9scJ{^X$pC-|SLslPF1|+FsorgNM%^?yM{2M$1=h;!jO%r3Usk$(nJsYS z`@DefBJRO4Fh8^>9GApgoUZtNoW^&T2p+{BTcmN`jwm|#WoqAiJXbp3pzuC^?~!CM zeqSdU-d;K$y}f&^y}*vF2>5XN(*9GjAa zW2UF~*?7EbiQujMyp6|CytMJyZoIwNIQ92a{a+s^=I@W#eO!N@A^DJqA^qN6I!;t9 z>HX8%km`BUIB9!+&!_vKf7d8}@7D=izj2>R#s~gT1yX;9nFzHZn}@x8>gkg2<1?Kn zOn-y+=vrw|5c)9j}t8v63;d<$I)A`Np)fzz*-==udd%R8Kgii%wyc_)~ zy~dyKdt}cuDI+H`p5BlfPps*5XLPZ54BlIgc zkFzL++oW)0k93x&C>`Zqk`IRiy@}y4-l1p?vBx`{f!SS}bFZgJ9K4P_V*d{S4u;qTm$+0a&Z3P_v7RFCda;#0Q&pw3D4S2H^(hZ zR5W7W?c#nDYxsM*eZMZ1&EZh1!ivRl+Ii&wA~>aBc2@+;GOB^@tSc@;aZ*YvoS z$9CU$JBXpF@b!0>f=BV$0v_>sP43%#ZLc56_=4|HS7Rw=0#-w@AIPocuZ+>AsvmK@Rp@da_v8 zXZ{}X0hE*Oit$m9w_L*R+4qKgKC5)ve%_gj1Y>-9N#c4{qA0yDzuwZjHKu(&grCj+ zn4WX`^XIXB53l?om8ZWKWdP%pn(Xs$(cVAEG=s|}+h*TnA^(1j z+4*Z^Pb}=pFda1(|yznB^ibD(_do@7NS0d z`vK(l0BSbN?nk$UgXQVxUPSN)jtNWgn ze7p}-;@WJjzYR)12S1;(n0&Ck=KJmcFj+_YI=M^OYTP$qA_X5_l+G-TRoz15h@*@)q2)nRhzFz z`cxiOI|p<*QeF&&=y9p%|K~zaaGa`yCwK_0;t+e#rd9ht;mI|K*t;-;mPtB69KdlJ652JJlaxK7*fo z`gkaxJ|gnV`n#t6{SJk9`%ypJ(j2@9X*e|^I2z&6C>;3 zydy`Sw?6=Kq#tcxvc65o!MI^=c26L&bC)##%?g)XtK-J~`1HB#y(IyNZ`FCj_hHuMh z+TLECC-6Ks<#;Q|>J0sQzhpPf`|cMcpU`!u+p+40>D{2+JFGwNEJ>Q>v=gacG@|)# z$93DQLe7^Si z+wW<)AK9~2J2rk1K54&%KMEb8?fi{0!g)L8Sxmvc$Y0nu#LP+O$=M|-iBGkDD4Lws zO1Ai)FbOHVzqiQ!xA;`8&uR0+_+Gi=_4|dXpP*#Qe4aF zMXe+NjwjWNTVA4G?ALbaPm5mY_CVy%`Lmz-z0eoc3#&)?Ul8~eM~OKeTBGr{i!>hI zB(cvAgum}6dIWv{5$Vx|+Q0SJTYQ3c#>Y##4`t&y)ibj{Zg;Zt5uL}v^ACCc-~4g+ zx7hDrL%+Ly$o!dh_F;)IKzqQD-m9Fa>C{eOP?FU4=T{4*g8Mo2p9o(Yvvtla!-q`4 z)8>A#-#3h_C8?0UA0jyax-Gb8@An{lytwR%ZdyzgJCtphwjFK9iCVn_T;>_-8)8NH;@q+N-48-}j!}@~FW1 zzQ2DTjdGIp3gQy#9kw{>en9cU?lbtlw!d#GvG1}Kr|z$F zUCw%rw;%9dMcUWv@wn1m?`Li7AEEP!D!EGcw`wKUR{w7`V2zfOPC?8E`I&o9e!JY9(QH&Z{^bZz|Wlca$5XB08>8Aum@Ryv-L^4gd_e^49O z@6OM){-Hh)!0{0e>ow!qFL=s#=snQlG+l4TV>*7yHSLd{GhT(-?ANzQ?BmSGm){Sk zUWoo$?Cozyx`djCEspo;c-t|LRJEe(jgnuq_q3<<>@7=v4=)-Z`1b;Qy!t*@dLP{O zxBNTb>3RE@+7X|Bd>*KD+$?zGF_pjT8TCZu3HsSG?d&LGAAi+-`u@2ge?HXCfqh*1 zJYVSq0@j0BA4Ob<^!{G$F?(Obetl2Fs*hhru=G7099C@Czt7Y^ZN5Br9qJYR)qH=X z_we^UBmM05)#;(1OS~EXSFl;orTUci^Ipl0UTQxNL+|ReQ)~ob05N}7I=&$EIR1EC z^{sCGIrgR@tBiUaC#%sHjDdD?s`banVwGElpH>BS-28CvpK=GB&*9I>J%^81 zQD@J&jY&1RSNl@r>GE^ARyxiRM85yfYCMhtqFF9F&X`ZGYvVb+Sg5jSuJ=l9Zr zxSyD3X&5i^UtVteoQekhtnbaPj_dn5eO~qVTTxFLhU~s&A^NKHo6|@AA4Pe2StDIM zw_wfy{xFUodr<0CI_5Oh=Rt3)`Wz`9qY`END|c%;9rtR!BhF)Dhb<;vQobF3N{HR>8OFYx zj86(1AL zSn6M%q2=j&D*DvH@^ipYehUFgIa6dlE`2?e?e}dGgjE|ac{?ieG8iTu19%~a>wC@4 zH@WXJ3JlC=$n;bNnc4S#DQ~v#t0dpw!_;iwXP%IKcfOt9W_zv#79w3Ixg7lcYA=WV z{674av|q=Bt+w_vPp|h+r9A3uBf6}2bU&xJ_s=ZV|PztVA1Rk?5+hWeT6w_ZoX zJrlK){+@+UFZDeMx}Qb&=S1JH@F3<%zxNnESN{a^!~6M1m^M4d_2JdQ`1R)i!sn+S z=Xb>oK0F8cM5qrxhJ4-mB7A+U4PSeH%=vosMfi%_@U<=A%b!Pgdpr$#_D|3YuHQHW z%1ikdc2B1*`92WSm4fE``FC;38zm!LUt;QxE=N5+=iRGjQQjlX`n02mke{EQ+z;XV z+C~uw%h=D^cu$NC`8|Zn$qS`jzsFRU8kh@n^~+NP4D-0c`?|bKvm<-GbTqM-En@wM z{D%1y?gBymzQcAfS3>%*#d$tismX(!ShYka(0qO0fB50~Q-yH;3gV?H512xu^3>3jDLD<3P;whUyhp+rpJ|2x%TM)j`X5Wa>xn$M2TLke`$dQ^$d6MvaKlbDK zIgz`Po%&x+cctSI!Q=h39rTotbJ${kZ~KnB6+yoLD%00W{X+DMibscEa%qMF20foCUv(VjQ-w~r^FTY~a( zoBm4iPS<;Gh3Gwk*Yzgf|E?FqxL5p9&TnXkVZhr1pX8u2eY4~fKZAe&3&URYZ`IN( zHGhFbQI{UOMzyM@o9wFLZ5p@MZn_>bdPqO@-07-Jf2$x)=`lFk6WXg)D=w7s6SgDm z=s`b_|CAo28AuP?@p-p$vL2-2{t&xO^65vq++81dZuTJaiIi}N`7`ae4(~VJH}U#S z{bu#pZ=X6YKJAcJa-uvlyW2-{vUzo{B-`8FA3$Exea{oQ@!}kk%2Q;7S z5vqQ9-G7mcZr@c2yGCi+L?`7Sq`J|65WaCc< zLj16{=kI+fc6LjJ`WmfwIU?c5_~>$fS&Jb*oiJ<0Z_~X?>(~6fx^#Yrzg?&P+qm?5 zQtqd7o?vi4y;As_N#;;r7z)u}OFwsQk!1S*r0l-F5_}V!C60H$r^fHV^1Z#R#n`_m zTqDB>P4_zqkNoYBV+pa(8^d^ylF*NG2KNi&MVhYPq47G2QTKE0nGynV{Qg!-Fj{iXDe zf3MR0toSk0a~&2D^_=}vUx7aey{5O$zd!%T_d!m|%&PI=g$jQY*{h>85=+gt)wSKK>#N zVh$np=aGCrFk4ru|LyhseU-j0%PI6mVo3h&RGa6HR+Lbi+t#9%ktxCVa^237r|u976v=QrtJh&_XR8M60DC|ztf z)UUe14bD>dn-Bp-6MmsrI5z#V_6aZgWqNg=qLKYr$|v1#e1{e!7q5_hNmee={y~4! zN|pQ7pQshJktREf>cp3~v%_v*6}`MSkbsp;DuHp-ujRCgAJ|06X((T+ zaLf;%7w7p7hids!g=Lx{x#}YA-|jUU-*SbBlYwgHOyZj`G|U=o{)OszU(OS z!+lxVr`2VWycgi<+&&-e8TVxw`#sU}Jmuf*XR>mqwlk)*(axwGl%HlfJS*@{e@M5= zp&h?ZHnp?!2jue#E}ksV-=m!)pNu*Fzlwd!R{PIi{FmT7!t6=cbk7zH7m^+o|FdC1lk+-H)pm6|U=Cjcc1U-iC-{;wg*W zpW`~_5b)*c+YY!A{vEb>2RjzQ{k{Rni#h53>^dpNeXA|nPxCdsYM#zxKL7JQmbjiR z-=|{z@^Ztg;A#)?+n*m!pKDyA02DK8tlGrc26DMXqR8h3p2Oc6^e^R9M1w9bzvtU^ zxz(GkaTn-f=-sVxUB#NtpFFn{IUz0q#P3V_eNxf|wkF9R;{jxl&I%nb1b?rizt^QW z)xNXA$qQi{V(A}?ef`S$w}kS;7Vo(2pGCgD{`B`Sx;-pTMaLkN&)54Op5MD(vZJ2! zNfpPlW)ur5*sir&?)NYz`Ze8T&%r-2w3>g&H|4~@do6^kOj#vq=VziM$*3QtU&T-7 zqgGV9s8_3z&vW5k#1r!rp7*iX;~Os1^gt?KtyhSaN`bdSI*ua8_v5`C@9ms# z^A+GXn#&4KIqjtjcZ2#OX;tA&|i5y$@=d%C)tAAdtWby_UKvM*Yxq_ z{l)W0_6+wAMiILo&3uNTmG2e2^qUDs`7rkP0)+MH;N|PH9#LiWbO1A-J)!O39*5l@ zN&WMSb-r?aPr7eb{>D%VA)CLBN`CYFbxiX8xy$DH>quJf$?b}he_PG_I)7#J%Hi&@O|oy=d{i9;}(H`LhG69vOf%FLGFR70)UP_3M(}lO){Yi9 z9)C`@yj$;0IzIn?Y@AxzvY zk?iBKV`>*^`hZ61^PD|%^!c-lE?r;c<>Y+y%u+e{y#(T8j^Fz$c9@*EYD1py@7Iz2 zb`Vq>((<;?0fSG5CjSHCEg}D~#eUy+$K^_w-}7}lG!1Yp$?oT}_t1Z~j`MlCr(X>- z<+%stoL4Bxn(6$8b*zQ{$0R`vb9Ea!m7}+te-5>NpHyp_7k6+%KybNd{iyUkgfc>G zUZDG=1C};)at_&QUZ9`zU@%T;e{cU*W{3Sep{~m(xu5uC4*s9;nBX_8V)a=ZZ)(xE~nSd-qbU$D6KcyW5-FWzGf)fbnvT{rLj*8$!QA$K{0iTT?;o zuYcqF@_YpGkUp@Itz*a(LgttJA-| z9nq0TI@9~&S-bkatgs&3w1#@GezH|PeP7m85!*M@bsF0{+VT8kd6xPa2gnpcA=3A@ z_B0UC;id-m z`2VK-&jUUGndN`!3CjPEaPQ6MPnW-a&Rf}y9u~}D*X4Dt z1ABa~MDjiw>aowqzVF90!vJEM$OyHQ&F_%<=Rmfb;kG{l^_w zXGf#pYRgBrCKp&ui3 z8ed7b`Y+r!DPqMpMy>Yj_d6?y(Hf1sZy--Br?jgOOoN^-p}XH z106F_?%!{rtR=l(>(7Os$uO}=;{inc5)Z|`jrs5o8RjEqdhW0!8~jJ%-~30yh&-p| z`X6bAYj$dU-AVK02iG%%zZ@|&f_wQ5dSi55V{~5kj((}fc^;1=P4-Xy+T`SsZx@&b zoA5t7q9MfoUT4=|_Pg+fm^4nT7c|j);AML5C7N!~W732226{}t*dEsf?eyT9isHM@ z@cR7)`Zd(k@@zf-P4y4?4ejXgi(a&y3#^^B8vA|?>w&z6{mO7WO@+{xpo)*Dc6|IO z^!b%7^x5BgEPfw4@+bQI6Md#e|3}d0U&Hy*eLDQ`9cKOE&wu#yr|!@2xj+o{?B3;k zgSYP-_4yS5No<=x8|c>fd?tCEXz7sQ+@ zik;I0f7hF}!1wX0L(~Gmp`OB_VFbVL>F0_(FD5;H4qja^V6v{jQ(Niz{eA#ZZr(q| zDa4x1Yn;cn*%<)y{hlD@%|N@$;P(OiyORB#uZsQzxP3p|^ZmPsGZtt)mwWkiN;i}H z0O~Tw-?PATex}Ro)c@*Qg9L*2=Y5(?{UiFXlq>zS=QufA;JEnI(QJL|{uJT+07Lj^ za_~PN!k?~){C>qvpbJ&fdxi_q)W1@m=D+oK%#nRH`gIH(XFUHXpUc@?(~Rp|_!sg8 zeGL8{o~|=A9p9vJvQy(}z(+iO4pScZF4Z>rS^GYSzkjo9w!*t!bZPfSwTkACP=E-; zM-q3Orsu6rPb$}{fQes8eJMoxJ_Emh?RM%WAQ5?3y9*&7k;iha-#^vlVS1CvgS*HW zxBUt?06CE%`dPdO$$fm%$H4fU_3|^-zk`~L{QH?*7ivawxyIBVkr$GzU+(yx%4@l< zpZ$4eKc^n)%jyw+$2#TLpNFG<5`ovFUQsT2xg7uu>vJ5j96uTF@%#0IW=ClUvv71z zc}NB5=l_EPT3X(w4*W)wC-E~Br>gw?J&l7lpA1haNV|k%`#buSU#3Y{PoFmI^JNM* zOFJloBQ)*0#>{7M{UHA=@wC(Z_-p}_aizcH@95xiO(k}&E%%yJ`E*4oAL|s(-$yn0 z7EAN61%da-iHi4UCoxyJKJq=}l8;#Ow+Xns|EB{NV5xVVh|B9UJ_+IPD-nBx^8Ts! zM1-dur~C+mXpi6f8U!8&-mlIA`tJtQtNEHcsLkNL6fQ>{|Ge*zh3U~oUS2`)c?I$o z2%cAE0dReqa()YP8yj?FK=^fZPmev%TM+F>d4rxE+tV}k+fsveF9@T9Hs3#P^!UDg z&(wbruwvn;##28p@d2yc$j{8EY-ewUNDhGaQ`Lp&} zj{h^hgM5m%6+Fj#t9+n}eE=Tv$={f@uzupG!Mue0HHeE-teJmY-<$dqBPBm*$@n^yf_c`MyTIDMjU3>Nk)dq07cw zyzhv#dyetJd$G$wPwyfjOYZATg64R?-lrSJI_m}SR*>FMyP}Z%dvs6yh_1H^vYU!} zhTqpysT|P`?Nr##3utE;-;pPNw!`4>ZzMkupU_JCHP|P`vK}M78(l8I%bNb3?SrWI z`>3~Lt3B_xRqL_8Snqlp7ldQIUMt^Y_?zfwM(>%lv#^#3427XY$tfzc%@2<|w$IL4T=)dj33>`{Dkba`AWJ_e|Kh z_4^#$KPSolK7Xbe4810zqpJrDt}te2&R37to8g>~UlP3ZOR{iu zbzRj_*3nnuAi(>^Ugd}-KYKJ-^kHWbf;rq-O+>Q(& z$sAmDlaZO;Q-mF2zUQ+T zk7@nNv~j`b^^A_Sj(4I@3Ed4Msc}QyIv)>!KGN;aEyeqd?~d<@JgZ|0$8p3kbd~no ziH|E{Xa9UGrH@N`$2-^dg-Ay)^4(s){|44VTFprG-dNMlkIR#3wy?QPe`fO0f#>6t z^~m5{+gC~PfluNy`?K2l_Ij0q?gu;nshu+UWN^-Rj}Y$fk)vOS6UajwZk6m*Zxl}} zeg8F;8_*FW?fm5AH&?rUxjwmGRyzJhTJU|O;m%*7xtMGmztL#?ppK8nxi~>6M2|>2 z^-)Q}AJg*Tj(siaWBeQa90z}f=cEeJFDad z9Nl~T`RM|l{v#po$5DTNANK$c2o~&b+@tg|zXJ!${@qx&3yuEvnsw~=KZVY_9nRzN z?~vStHcM>R^ugPixC@yG6DI$s5YsLV;2-_p^gP7)rMy1+z!dm{s+Z~WSwcVTiG4q5 z#&Ti>UW{*s3nYf$W_Z%)gv^g7-djHViuBwhk2kBQggRXp2=*hZdGDE$^Y(@=R(qV7 zo%HXI`FdtMfJ=b)b=Sr-1yQQcTz_$bOBSo$mHvDSThQR^bbrrUa_u4Q*Sodc{d4xu zH-RfTPvKa#^^awpvV&;$P-)^6Y=wR|pd z7UCJUe&GAFuybAV=MuwqX<)ov-xqTI@$&<=^I__n^AWWFhxlE${Xnl5kE>i*?|-t~ z+j8|Lf93kG zu)WXj_=?oyC$U@6`Q}``De1oDUrW93LpQuX()z(TcvDb6^lRV1{Jo?AG33TTx%@Nn zS--@On7*{=pVzX!Zl9-gH`>wZYl!bT;$fk^5OjYcX0zfJw24)t(lzuAdpk4zd=2o!RF-?^8Y(8fq&nBC*Pae z@b@<62jk%RDJ%b+)Si6!A4N}|hb?zLL%Dvt2t(?jBqXV;&IT+etO~} zYXo)TN7at|dm#OuPbl9<$tV6XgzX}Egn3Ti|3SNMXPuAG-hay4KNPf|w(I*OE$DuL z?PF{)RL6AwBA<6*B1-$GrsbiW?+)s{n0|)!<_7%`_V<}VzJDi-bezuosOWeE#&qD>*ef1 zSpPk&kADn)4$SvPm5>(y4Dp1GBl-)>KWy=^tUo`xhHwl6i21Xk$8YeXS4zIu*Y(KG z`Ud06@wZ=(C}RIUQu2{wI$!P;v@*ZEaO-P^nYys}@i(%cHiYu1DqN@!{~IV(u z`{49UfIo&-{4zh!Z~UmzmGS*%g2BG&`jvM0lQ%?nGVhIFKkoyi^Jjzp%$FCU*9zlY zFYrgYe_{JIyr<6m=Jmw?$0pK!8Gfh3(~PtA$ql?VoXO!Aiz=y}nH)&(V)*I! zF2?hO^Zum2ii@FpTyC-t@OyV1M<3>~dRkxfVfpo|^ZHO}yB}lc^;JQWo`aIUPm%7u zmLJf4_Ycvpm5x^mLhnc1YmzkAA81aVGvxZ%-yh3-hRlyA6xWkqkeK`84E~9Kn5DQ(>-qFe5U#9imHA`b3kM38o-?yNm+c)1w2*>*uIq#c&8?x`zaC;`{}_n>o3H<#jKT8_|G$Cv z)Q$&qy#Bq9@<=+WzN;kuz;_6@mpO<7{GTD8`FY@0g`=DvW4X<{KJSO~&)*YoZk`~W z&ph-wvHsVb_ntQq&)*U*;NzdJ`@`R1D{KG9Zwh^%WIK0jyCHpF&DHzqzwCdUP}jcW zRFUs}ZR$Ont9S4J_?-0Cdw=Uab<*=++SL0}uHKu{`v|`y?cLF)-WOTV=g~mEr-A2G zK3*R5`){+J&!fhFs7GK`{N$V2uJvyypATSvB#+;pb0?oP<=*erM|Iyctj~!tZQtqJ zN(rm}Jx=;swELbma`-mtwUWa>Gas}tH1c;;7qP&H`wJ@h%OMg&@Nyf zyk3azlKIZc?;jR; z-(SV8wC@x00OZQ{?Hq13(+srN%=h`TAg96^{KSxeHOZOZ(_?7H^PJcy_VbnBI`(%Z(CknWrO zHQRAJV)T-q1*q4k-$xqu@!ki1oPQr*tk=u-e12)n`nBNZx*Hb((c6#uA zJ{dm~3j}>SZYQW<2=x(-^L9_ZZwb1a$KzR2f%cLb&hk$9<*v7fkuD+M@540N>6c z^-!OI{uXm4wn{SnPHy_1GUyS@`=@n8D{!CF{m@2z+A@Xx``2jO?SbQ?zxf5UF<^Kq z(^U|y>VGAupMTHJFxuaVi(I@v&zSGuxCeco+8M*|{!CsT9Cy42)0+Mxdq{t{KO54| z^NEapm0$Qi6qOwRDxS{=e6W9>$3?3=J$e3i?6Q6VUxX)ryk{HI!Fxl*Lsy&h{yKj9 zd2s$7>f_6lD#8(S2uJ;8DWW|-zxusmzo$8PuGVkFJ9E^Z^Y{1l@*a&+PWYLl<&#vg z>T9&V!VEfkw?e&;C`+E)JvMxvwVr9@xDUMhx{3eQ_Gs3 zr|I&5BqP5sm!|u)JbX@q{mpgNB=r|!V$G)eBIt*#UvUrI`hRe%;WIiop9wz1#OL#( zKPR8wH%2>a08{L=d#wFPAo%@V*PD$4nqOb9G0UNrQLB8g&@!J>RV#Rs+$z5mNi@H{4^je9CDB>T~@p_l-@CQ<2Z1cRAwKh_AmCe-P-mkPm;~DANq-cpcWX z`$3gyssIO}7i&;dELcCjL*aODjre?AZQP>yzJ6f2^ka*B^4$tv`mqJP^ka+sQh;Ya zeoH^1-G+WNKGS~O4nTz7ofLq8fm<~Q`C z#uo48QZb4_jskYqn|-dlsXd^1x>%U|rz*t|*gQ~mmXq-5h(1>pBj*e^ccxnJk! z!j1EI)RF3Azt*2vqj9mYN`CNp6Sl*3AmfdDwcO9O{d>l}w`%@DjbMj9D8Cw?2ilW9 zx907XFI^&V{@hk-ulf|x#!EDII#YYqB46~pMZV~Hi+s`Z7WpMK>Fw?Z)1v3-=LS7z zWd!Og&pGmO`tc-9#qx9Uw{r-*fwV=M%V)IvSLc3xofROlD%6(eb`@!`JPjk5( z|2CRF(e0pVaCz|k4W!BW+_(e@gp6NR0AFtu9iEL74l!BqbB2PvMM|HSVSZN$e-PY$ zOzhP3?HbAb8|+BpGcqIlKKUMAwK6$Qx_ir(jXNq(9tW z!t-7NH)wM7@&cZ=B&7GJ?{lZ`1LEL9aCF*)-_jyjRvk+L$CR^pRzgp!}bglAd2Kf(xKI%)@4(VWj(;m%ThXjI`GoN;6 zH|mid_BYcrjLr#zXF2KeeAc6VnU9$Eg#6HkIXw6Ksae;XT-e_|fSJ1nKM-7RU4M3i zNXm6P;?;;Zk|~6=pMC#+tLAfjl2fc-M%yr7P=DC3B*pPMysw*Geg_P$y!Jaf z&P5-HU%&gl8~kJ09XUMZ&HZTBV@T<5+h68?%~yD>KRvCN?V zd#>V{SZerCD0apA|GgA8!tF*M<-qdyzfQ8FiK@mM`Yiosi_Pv)p4`{)_q)3sozK+n zktl@ZDm`zY-3!kH2T)Fs3ciHJ;Cy-eyHQBGHdwptH^RRd+yRk)N#<3-YxQU+81TJ4 ziRbQ;D4Jn%Nbzpb{N0G8ebaAme>d=x4yS)&iNaSp&=5kh((aur-cjE-qI?*Jy7j!j z5Y3bH#9E1>8Sej?AClkKt1nVGzn6FzI$lE7j@y(#f1lQj^$O?fydAe`!xLv%eI!}$ zu*L20y8iCiYxUO|UL={*KlM*189QHv=#M7}k;>8Zjq(yXp3hQ!j|S_xJX1N|AjQy! zM!(DNRp$4-{5c(e4#MxP9>#Dg0af}PP0L1oPUw>Hc(PRHot4Hs$@jLfYg-`X^ z`n_uR;b++RcYT|y>KRR3uJsDC>I7d#-{x73^tpT-zA{x2xO`UGbAe9R0ICzi#6rbi z>G%`T2Zv`n)YS6w-%RrfS%`i^()oKF?e6vX{nXf=%c5uh?z!-3EIg*pz)_eY~#@LYv`A2{8|g*?}wN^z>~|E<&d?0-ta*OeR1&iH;b%Q?AncsZP*u~lZW8@IDRhCYx69}1=k~lVeIt#zCpS#V(|W6 z%<#QC>)^;2(ITzxp)YN}fOIFTCgD1p8}lyM9`x@}*9&KfQmFYtOPJaFB&&zWi z`vT?>_cwh#QJHe8RCGMbUNL0Z&N&6|5AP|XBa?C`BbJVe#(vI>G{}fLSg)y{Pvig-DvrH zj9$0H?#KIcksG&2_1rzHXtdnNRymJx)a#xeb9`TPbDt9E`|Vw$gkrhYbAIpB>~y`8 z*t*8|bEzLCz|f8|u3v;d2<|u2zbGMH{}6NWo)42}vRL)e_hHVle(>|yn*dwFzdHJl z;T+RX9~WJBXujXua60N&Ykrqa-1>Yq`UCXg|ET;S{?#<&@`$BvT>5xEhsuEv+jD&9 z&{#`a*L8{Yx8H+ze}1Dq7wqSL=ai&c)ODA|7`F`my+!|SxXbwf%1BY8Klh*Sy=LT- zN3xBu_& znWS6F(gs@SP+ADIO=q7wT_9~~Nogs8E`&9+cS74_$U;*D9Li!4k$_ti@jD<(1i>gS zQ4pdm21SXu5JbNOM2U(LL@|oO?>YB;&ScI*B7xuk_xj(yntPu2dG2$bv!8qJotehH zsphf&x%t!ETlS^gYu38kTRT$QTAEXCI_g_ncDKg%xto%y=Juv^Q>t~v|#?JJ$#)$s%EsW!K$6LBn( zC#w!bu(hMDePvs$xvBlY%C^?T%DqkTn%1^hTU)BtN+KO=+M7zQa(inc5i^ty1>9@g z%^myP#~o*#*0_B&9-@iZUQ%;M&q}p*>}_AQs-t;-Yb;xRLex!e#+uzxDvPL_%AzCn zZrjJiv+2gYEwLo!cU2;jO6+NDN>*oOVNmddl{?9uwv~H35_EBUYGq9#*1Ef8Wov48 z6BUMZys0^v>a0n$#wfQnO)V=zX>T~`^Tp#aZ_wlMrdP(>g60ZOO`yhOWi-{CXi26h zukXY4^&RPSs&z+8%l3VgU#leFotqPz+8W!Mb~mS58?)`LmRs%?s-~8-J5E)ZY+L1S z&!pVc4BSm^?*H0OHnq0wD=1j?Xf+{Jp>--9dV0g3{JFZ!T&kqJd2K z8Qq&ox4Tbp@6V72ZF`#%P0hRAQF-Z@X!3N7pWGU2-kox%tOiE~>vp#`?as7E-Ksr! zygq**7z&$_SUizTrO{3|H>X<)`KQ4C)~5DYd~b@^;3+MMj(ya~+g8SQ$1X{&Y)kH0 zi3+!BCnlSHV-wknl9S2uel|Aacf(9$0hqa4_x|wGfrjFdLY}<)Z9+lUg6%{=9X<~ zg?mftzzX-)*uHE_YdgiLfk9NRXiU4?Gb`L@?5Ae;ev?GGQ+lUG>d)DbXxY-yR(D{_ zS*JHNuixqo@7mF@!GHR$jXMrpy#7?*mIGTGc3l!$*Hm}OfxxMCyX$wfoV%@M-ciP3-OV2+0qTM_8H?(f*NN-7Zp3}5t&#u!>IsL#{Tg}!Ddt#~ZzSxHCmjuEc z*=-%`Pv4aEpT4`L&6~c|x9*a2F5PwMIiVKwtjM;uot@j-_H6Q=o<93@Z`v0M9ti9= zL#Le+-?;zc4f}S)63MN5>axD}i?#$B4s6>Q3^XLSMJ{UD(-{bzdFj>z@%YwVr|v)V z%u9BpGl`D%4eKsCYoY_FCVXP5GnMGDny2Wg?^ZZQbY<0og$m02^i+E++YzTBneOLX zTbg%oZ@M(Kx!D@&dEx(mF;pc}7^bGg86o$<`$_ zsJF&woV2d6`d4dgf8!;ogf;wSV+k7l-7S}-TGM-5_Q%}mmezf-_I5EYP_Z<%yPNi9 z_ohU5m8rF++B;gCX}G6>-5QYIZ9lzFv|sW-jjm^8g`<38a7V?|hl$~YCRv1T8r*4I zk}fA&T4{8kiHR((+}DyMt2H(2TWBWTZ)GjEw>1?@9&qooD)Imgu_8*$SgX#<(zKS? zBm8W4uc~mzX%H48uN;zT>|ML|n6;EqnsM6n;jO|=Ju2P4%3UG32!O0|yFcV!PJb$v zS6uGCytJuiqQ+2E8F~*lT(t zK9A4q^ZER~fG_9^`NBTa7x8=iUcZmFu>$^}KjaVlO@Acd33vm(fIkoj1OuT!IA8`M zK~K;d^acIFKrk2#1;arz7zueo-jFZk4+TQOP$(1*nW0G76ZVFEVShLf4u(VFaM%n- zOpoa`eWu?Gm_ajShE3CqM99SmB_AQv5xOiwC>`5Nv#>QSw6(`tsa1#>*R9%|jG?Au z%_%7{-N{i2vedS&q=iho+o}$lUo1ha^#1=mtgiBZ^~e5S4+lG(;wVb*yqcPt3*xk# zX%Im>O$yOS?PfPGn-Oav4GWd9_>g%{EeMqZK1f=~U)X0kq|YMc>sz zY2AeG^scB5>NT`9qWPF+Au)N2o?L3PIc!dQNm-fQhU# z_uT)L?*>N>`N(x2|AKSY?74@W;0>(WxM}lg+s;U)&j04O7A!1tRm?bSekii4_e;ai zm4~};>Mg5SeNwvV#+&E1G(Pf|*UpXq?ceWg-|@LyYE~X!z4O-mZC||O?%w;p^U!0Z zGiDxs)T)y=o_W{ZPyIMwcEpj3m!5R;E3dxx&f`xw-N!CHuG(*|syl6K{q~)^8qPic z!e}Cu-qY53`G-Gx$30)^8yLF(E6ptrf8xT$A1ZM;S2)rR+sc~U!J{1BISZXj%8xEt zR&t7S_KCTBN|!j7IICTO8QV5o87`k!;i_7_G2%$L%02T+j&Up~vDKQ+EhQ_R6=mgR zweI7cGs{DcRV7E3IcJvDZw~us`O9it6;~d2)|O?i6XzXy+`_}=mv5s4PMLK?Sw(4` z>-h4H8S76vv2=AwMd_KPwvtLmN$&di(RHqh++7zg-Z-P8bk?D(N-ILko%3_wUX$ED zv#z{i)5ZmLuI;lnmsRB6+*EOt+2;6WU3E*kV@B?= zk8VxOy4q7Y@5Xx$o_gE24@SyPbe>;&T*an}>XJhb_MMm7;*6Bdtrbn;GvltSe|cj0 z?XO(vpJO|!bhguV<;Sjb?kSn&C@-tLIeKb&`YBU;hQc!Z02Dz&n`bA_mL}4 zbzHN4&f!=>HfN^O zq|8;j7H_+9$IPS1myl~Vsg{@JezdgW>QckzC@Co|wU?E;%F5?fESzz~%p+%2&Yn5P zS?M_BkVDJo+2%W|Y)3ebEL&h(XkRqX?O5hmF{8%jad_=M+uimr+3#`Q>-v}dt&;!P z-*&uHeqZN->upH{JZDuYTvj$Dere zhyVNX@7^(-R)Y?&TD^AjX%}4maf-Xb;C_xqDvls@`aawH#TeT#?8r8?&_~S^zb9Ucy{ddtFF2J zj=LUy~kDhdhkDvbIRtGSCxwfcTs6Y>5--9lr1aSRI%Ji zZNuT6vD|rN=?q8i%M`WJd%7cchii>vj$=)k=~`BD@SV9;u9b6FIF6Zf%$(fEoCiO1 z#EiqcK3TG|WHq(4s`A{!i`!@BetG1~lH5Bbxff>s^;So?{L1qW%k{f*KPsuHTJ5MP zHC>xrGfUfN9PK#Qc}{ums;Y$*^UAk6b000e_l}wKo!;A=SN`g_vY90%xw|T_{Ck{#HKGi%BL8UuBsxCNjw`dxY{y0^_*qc=@8v`=!C({0>T5;ZP3G;bU- z-@Rz2JG$t#<@seNdfdxf?iyKc?~SfF`mJb<@wPkkg?FN%|JYs(*(#0+FPZgX_}iO&F~yvX-iQB+U+*yv9_ZZoHt`t zc{%+;g{_?Cnv!LXHLeq@Z0;~!;B-+%l~ve}vaJ%AJ6)twVL#Glw?}AFa@wh3*p9Y4 zY%_#hLJGFS?DJ^KB5ktmvXwb1>_^*H)3r0{%4#xCRvjhOWXkL_EUUsLO4Kgoh4u*f zr&2k}R%dh4?WE1-vYlzOm(6s=ZT9jRWt;5_$ezs>o^7KPOJ>-Xl-tryTPZnZKf>;G zR61u-u+%n3d}FBNDEra$uhwoWbJ^@O%5BtKZ5{T-wo4pNd%3OD@jMj*d0Qqd+Fhj; zcAICB*Xbd-#8zEC)9$8B+8icD(uIyyF1x+QVVh+u6A3!(Pt+Q=M~^WaH`twrDLji zYvcF7#1Fm4^|e?^PGvtzeP0|?#rI)zaNj2R?zhyYT_BS2_{OLus2*h&^X?!o?rr=`ZN!b^Eial&`6qlp;o-%P1rz1AL)aF+)7u&dMPqd!$QGDEbs?E3nU@t|< zZ&WpmV!n^K@}Zb#h$}yf`A*_)*evE35|`f?Lw+xw@PcsR7(7k}i?91VAvZKe={i*o z>MWv0@QiG#co!;fs`zeHoQPL>Ew5&|~HD1B9r>MX7pLcKK7_hw>4 z)!0NF#c34t^N2&;Ft-+4q2F0Q&1S3>2I+`V6uD^~KO!!_MT_1KAgQhDv^wnDn%{bRSp#?{1UA}mh#l1$Nsqn~7C)nW}K7az}%Tuiv!hx~|ZiuL+3 z(naY58%N@gWl|1*{D9>x@`UNs*G|^f4-c9*XoPvdZTg-!$F;(`(yoUH< z*gXCWy}TkMt2m)j$@0(R&m(;`vM3LNbgFwyqgt^>Y*H7uWO6 zB&$9luD_LJOwmF#hRI&>Jn|^Tms&CM&wJT>lJtxH=lU;Dyej8%atzay@h{>_k&YY={2H2?pKhVw16A`{J#iH-=Ax@9UmhiWRd)g6stk&GoVb{Wj;8s?1+U6)Bqf1LTeVv?W(=ieyvV0GTIH#b2Z+-?32TQ? z?T^CYVO%NNkKnavNW<_`%;72zP_<6tx%y;Wl=);_tP3XNTc+S*Jw2Je7=kC`MuHyf zx9n@Qo`Dz>PZfDN!z^7b&fmdTw=&NF$C z$>U6RW8zl+<;3Jbhmr@G?81DY;@wQnFu9k>V@x)e>gm-pxrfQaOm-iqUms<1FO!Fu z+>gmtsOzo%e73dXL2`_dzswNzF_5b-A0#c_xoB z+1Q|8A3a5vvm2SbNtcH=>+&d*&C~SwIwlV?d4$PhO!kNc3LUC^>X_WX3V#W$(>9dWbz1;J==79bxf|_uE!5Exqp`)Kg8s|20cD^jxJ}<)8&39 zSK;1Qm2Y2E*B@cBE2hU+C3Lx)$-PYOXY%-N{rc*RE?YN*gp_}tCOy83$%eJcpU|&l zay64{ncTqSERzQ=*3%zi@+gzl4TJK(3O5i+_AoiyrrYv9*9N15#I(Dn079=b%2 zuivlB877Y~Iohe~4=}msfF56UsV>(uIm_fhCXX@M^&#C}1CzU%Jj&#T%k=BJm^{Md z&V#!C$W^**T%*fXOdeu#*R{HS-A8nJh{+9IdVKwLx}0Hh)kpRCVJ7FU*W>#>rpuWd zbh-NDx;)6_?2USSp2^kSdVB+ub4(ujgsz|aq%POqrOTa6?&{U!2bi3_M~@$6a_(L| z-uSXES2MZyYkK@RlcV3%UG9Eem-9@H{#B26zoEEnLNs5<9yx!DkgiFT*u@nlRKH5V{#9Z`+9j?q%`-lZTl+#$;Ec?!TMKCX?%#oMCc~$vsT&WAY%AhnPId zWb4Opipx8y`|n|L9h0L>?qqT|lY5ywz~n(Dk1%t=E_lT9YqF}Z=s876l!xr@m?OzvfJ zKa&TUJj~=VCXX}ONa*=@G1<*zlgaf=&M>)?$z4qDVRA2%`@Jy*9TYT6 zb$Nuz-81$0K_=JE(&KYX9$>P1aX|QEr9WH0e)MQvZa7AlyP4e2-6gz*6VUVldCr9@txSuSN7^o z)#HbloZqa+8>i`Vl*xG}ySC{1StbuKxqqv!UwyhRcQbjM$2g<_F8A!$`=?5FU#?%@)2+(`*e_H1l@I9go!CE9@%7k0 zQ*sphVM;b1)UU66NS6nm(&dpMUC#VemwT{ZrR+zsU!~-6Cg+~lukT~>2$Qof>iT0Z z>2d@1b5wc**uPP7C-!HQY`&pipJ8$xZiIxslm7QIxvzZUdJ*4us4nNix;(^Wx2eZx znVe^`Sv0xwWZz4}Hyud+XLy+}?1FrzT`YPx;42F;3(4ceuhz z0`$R{!e=}Bps?RM+YEYDPdlyv?_>NL@Gizb2A*g96W{}ke-3<*@!N+@ILvEpr2>@zkHZjz|-JExQ|nIn!&S7 z{}S*z+{Y>XE5N&%{pxUiL?NZd^MfBM@arlw2 z_?;}FulD(s{?X7s8T!khzXJN|If5H}4Y+!KpzLgco!PMCh5otFuf_8NrQbwccr=Rp zcBmT{gE!1kM4k_TcQJktyqoclf@c~31o$xHw}98HfdR>X5xkS}d%=4ce*k=h@dv@p zxq7}H1&=cRBk&yKKLa0Q{MX<^jK2(S9-^oFDtMIfzk%l%|2Oyu<4ziPM7vVYi&VA` zwYc3-66n&6$}i4p9MY&?gEd2k0~zA(R?HAjH4XJpnqFb zau*D!{VFf{GugUPUS=}})W8WlxumqCIFdKkO<_m;Zi48K|6={?5{o+w_1wS>`_~Y+ z#&e{r>^}uQ0^SGx!>s(!=a3Eqz-b|PmhmOvb&M|uk1`$r&oRCRyo>QuhztMKXI48= zFCQZ=+KGCO=Z2rRKwo_ZR*et81$U!?3_^dIH6PfFI@NH%{oq}UuLd7sd;@W79tT(D zc|G*2=EE>fp8)S<{1)(f#=i)jVfN z68;|wUig3+_yX`MrhhEB$@ntxTE_jvg`euP#`MuG>-Z3HQSa(`k{juM)Z%ty2YrT2 z9BSJ8EcDg$MYk18>q_WvfxcB-6!$IYtLIrt{|V?X#b( zx*uF9x%&Hk@H)nS2tIz4(v;_C!26i~ufVGo>-sM#`^?Utz(<&!H^A$e{=dPqcrKt) zDY4dz)W42VM4o4Z4>CLR!AF_C8+?q}KLNa&aUXa+F6hJxu>};-dYi_bJ@S z#`)mtJqpzh_rQ*8k)Gdn@GMLBa&SW##%0%nyP2IE!3UU~9`H`4e;ar|#b?*W zLDRH#Y@+qF;Nmmy;!yk`_&DP~1h+nX;}Gl0racbc)l0J9uuRTupXF09! zMZOxC{u<)KfAt+G9{6*!rEfQuVt%oj4aMCK{c+ewwDAk#B8ANHFo4rvsMQKFajp2CriJW!8GyW{flaN^lR;KZ3X@hg$HwO{SlQeun9{fcIlQRo)%| z&oljl;C+l=4?fKJC&4S3{V#ymGTsZ`!}!<0yBU|ik4p8<_!BDK6IG7n`Dt)-na-aB zuVeZzfj2PzC-82@{|4UA_}k!BEM1p%e?s?1j2{AC$M{0xqJE>`s<G9|-S`yrX}YnF!@+N7c9s$seyZ=r>4pB) z#D%4?yLsDFgve<=a~M#z{eSPSod|bZdU^pl9>f=uGG_=2R_QUy#J)b~t2lyYU6ITRhG-kUDg0d>(aHl{vqJCC+mD6ajQK?WjR}Fq_P}by|=6Q8gTV~ui{&+0Ige? z{axUrj7P!iYIT1u0*^A@4&KT572w8t-A%-cpc-z;Ju8$1fF61Rq!6h-voDW(ESl#Dkth)y~l2;kxC`Fdf#1f zH+UUuCmwM1KDg4aB`#8EVCkL)UdQ+j@Lt9*1Rr2L1MWFR&sQt>7}LKD+|Bf_1U-pzWt_bATnd>!0%ny&vaxUo^^gWwsa{|s?4&R@sInU|p7$Lzcc z-plN~30}KNPgi__ld%5{X1|iSi0NbcM}iMBz688-i*COLy#7?3%Y8|kG0gaS==ap= z`dh&}nf+bh8CEY*@DXO`B5;H0w}X!{{VTw0nZEqq78-9EzZv=sjDH?{kokWXanatY zqBbkTs=a*^++h3(aP_$jrTM*4j+@!}E_jCVLD+v0{qY#s zABO#UW`7iRMwy*Al>IYw|NjG?XS~$fzoz#P>UI4&;3JG50p4(yu751}5VOAmyp!1p zgIAuZ+gS%*&G;5@gW1^$Ud!|w!5bKFQudjhHt;OdzZ|@a@sEJ#nVp*yXZp8*4>0{Z z!TXv1SHOoD|2Ft2wd}@h#xpjPC?Db}A3$xeqv$GKT z>oD%A`_1LhA7}bO=!@?{7RLqFU&Gi4F22)P9P0L9C%E_yV{xpt{u)LKTzsdlI97qb z7NvaAdm8Hf-AeEiVv>)cJo~}Z;3FXk)O(u01g~5nc^3NT#HAe%5>($)w>=?w4*KIL zh);r7V%+O>NJDodsWj~q<}SuYnIR z{w8=Gv;Q`D_8i?#=^`Q1emmo{z%$IB!@$Rw{zC97mhKYpA!cVeaj{-fpKGmxTdS0P zroSFM%F;azJkR)9;GN8$bBT*|)#qzfx*6#AF*{lC%JcMclh0YHoiqD6%f8+E4ry_P z8XvZzomZm1)c1ehNcxkttNW(lPffu`r{E6vI$bEn{!Ou=s=F2-l|ISDn6 zeV4cxpYOzYTVu$>PvtAdjcSy$(!T}cNuKfQwA8NySM~VQX35p(p_HAH7Rjraoo|7Q z_u9naktgF1?#at-<}s7=6;trjh>LRQgZ*mQ{}6HO{wQX%h)VZPi`$K1ln>O6r(tIp zc3g;TvTza6}n>7NJQ&v*iSjPX6-Rm^@najU&G$^_N?ayRrlnSLL5mho?c_cH!{ z*dG8_@7>K=BFkZv>CXqRgg+<-Lw?UEJ@2?s5qYkLzK3xSc!qHkyqocL;A4!R20qI8 zc5pZI^L+4r#*^S?RQG=`_z2T)2k&J1mw{I?ehqjx;~yt3`b!=9i|W_k23Ox-pvHlh zVW&T?`}wM}AJh5Yz-yWQf569?e#ugCJw5ki`m?}?8J`DkGQJ3Wl<{itEaP7ATtfGM z6>-sjE93`@3h$+-EKZ*jLAkkcSvR=)9tTx!4-yysXjs{I$O~SDees(H;+QM3G4D7L zpmIn8;B+*&E2Z<}!Rwg*O7INR4}**Mf{Txn!Fw351Mg#Y&IGSZ7h5rmbHPofp8y|X z`g_5<8Sfx&l^b|3lKC0*&E0ys&qH7RW`rvL)yK2}qUv|A5Eq{2!C}Vu z3-rZrEr>(SQ_IK!VMqO@gyL<)MZ4v{-Eb@P2X2&hM&!wO+R~@@IoUYy7WDg(?l|hJf!hZ+ANcrD}4fDh1pO!4tNc!uf!7JQ89zY5;X^#2at%lO;i)yz)W zGT{Z?x6=Jn@gd&}p>+t|w-p~pK!2F=#o!*smw~&P{Q!6a(_agoWqcEO594Qmk1;O4 z=YjecOE(7nDpn2`f%h~0Ht;&8e;Ifu+Z0!-ga zT;$gkm!7*}?gH>?#;=2&`U6nJ>1J^CJ+dnH7Vs{ne+Rhwo>`@TKXEaBj>G>lMEnZ+ z!_3ah;I$u87UcOa#6|fJvhokn4VuVrC-PN`%i`9J8};kUbUS-3eY^2DR-Qf3uVU%m z2K_gg{*NtA>pi5a>iwVK>id=4NPg)`Sq_WoH*myJ0)D2&sr@iJmlGFP7*{|Mr)$CU zm+QP6y!tAge@<~`=ZoNDSL*urf!8zrZ-V#dbo~MF5vKn`@Tw2%`ac67VEVrSuV?%|<>@)r~ z#k+Jn4}lLd{l}I4YjypniHq^)M2zPSM85_7>T7g;r&sE)VDo}}uZ}+7#PnA|U;LJr zIMn!@CN3;>GXJyS4b0C=!Ly8C4L-p5$BB!4tz`N74!Do`^LyACVg9@Z-g~|9cjEXb zxaYbFCPH?f_)G7XGCq^I@F$FPWogj568fD?e;s%?<6FS<%%7d$#z#f^6GtO>J>yNp zg+C@MhfjfDh4EbZ`91Je%>O^bzUyNXX^POB;A0=xxxBHZ^%&DH_X|B*SKpxP9|E3Z z`U}9v7+(rL%FbLEx!4C_F0F_%e08U2{7xR8EyRX>{uD)+jt@k=$r}}2y&K2POpVawB zz$=-4H+b$QUH`M-b)VAt7s30P{+GdBpV0Nc0p7{deFVIo=|2hH$N0~{2N?eic;zSc zbYB6lXZ&^WDC2K|4>Mj86n1G}ot48J@Os9N0PkjeF?eN zz^gv3+n4Wc)B5}~IzI>cT}(egT(sNF=cWEIF1rW%W6aLi!3P+B7(CDH4}xcz{xixB z)BiR2Fyp@mH$JQ9YYcpx@qdB$d`{PQhJ;M}Y0RJ5;MJ^Ns=zxLUku*Q_;T3H#~JSdcipNi z$n$OBm5kp5p8X$P{{ir!yw1N1ZhS%KKLBrFT)t0F`zI{jUqQczrTa2?AG7mk@Nvff z0d6uowy^Mz`r{qCpEJP6nEv75BaANs@5$?SP5`fB+z*~-d@Xp6@l(P37(Wwykn!`t zt8O#2BLyC1yal|M@dMyvj9&%b#rO^2=IvUt#%I8@jNb;HXZ&999OK^rA7uOyaQ7GW zbe{z8W&CI0os9nm+{N0%D@y-P-ToMOKjZ%d@3~9YcbGz^=L<}KCU_Oop9dagTz;P) zeGdlXCqjSVZarN;c;O4Q5WNO`q*vG91ny@14Dcx9=YaPzo&X?q-m-KWmQTmL3 zSn1!Z>t6?M+@tfG!5bL=yt2diUEn>8_kj;E{vB|W`STcYvA+EoTmO$if0XI}6Wsk} z-G6&T$n@Nq@fqO#-_Z3B2d`s%5qRf)x_-5?&$v(7VSEkv*w=MCo4|*d{u$u)U(xl? z0XOc~c^o{$_#W^qv(o|I#q>W6-p}-}12_8gbU&f&d`;)Kg7-50yTNN2|0;M7;}3!l zG5$DsHB0wt@DWzd&w89eiVuKy44Jkz(W5_;6$Sh_R7N1308gLg8% zh`6|KQs1{;h2*zDe~{_#1g~O#Ho}hjo^WO7dgz->|5M=IjQi2GzZ<-P@r%Jb8UGOY0OQwy*D^nE1RrPm zpCvBF=LYN#*ijshTb!P2u=0N$JOl2A{!wdWeRVOu8N8SA4&vg9x`$u@rz^mtjDJMw zKcVa21YY@FoqrB|klDEdyo%YmAAE%A_k-t{{qKPfJgletBk*3ve*r$m?7RTp&-fp} zGtAB#;C+n$2Yj5_DO)T2qkVX$KbN?u_bc#xgaqqY3w_rkQdWOY1s`YpOmGk5=PCX@ zWkH@(;0;W_1-ysx1K{of-Og3uwT#~Y-u-=D|1*j+{oBAtnf|@tLrni0;8iSNkAQm~ z)zf_vyn*Te47`)^-zfdZbUUwr4>JAN!ShW2E$~W~uac95UAn(xd=7Zm<9fPBfM=Qh zvEZXD-4)=2Og{`>#mZ+LcrW8yz)faxbvsXi*E9Z0@F8aB1@JM(N5Ngp z&R@ZMf2gPXZ}1+bUwX38qt9hLsq4$%lcs(9r*wWK^!r)4EdlRi`ZeGq%pduC6=a{; zSr7eg#tr0^!-}QpBco( zdT$Wx1WS!H!&ZPkU&->j4t$KIyM?&0qkiAVLpH>*zgGOEeR7uWmEhxyUk^UY_@}^) zXUJU9@jtafpy!W2D}ogKQmv5bK7#2#0N%j(cfs?F|A4s2m--!|DzYk$H)_RS8qZm} z{{bIocFNXC{VJwE7rc-01>l)qi0dYfrQpMiuLM`Wm#VIafV+OKh&*oqH-4$})0G{j ze>QlI@tCsD?Cb&0GyM+Y*18w_63dW$9{T+(U-y7lGy7jx_8ET|Jj3`PaZ!&h{2o+2 z()}B8@myo{SpcUzpRit>X@BfFaGZSL`C*-}2Jd8iBlrO0+rX=seffJn^!;hi>-OW& z?`M23c!t@z1iX&%9C$6W^HK0VrvFLs%3taJ+zOs${BH0(<6i|IV*EjH^*g*OmB+!| z%+IHZi+WVQ1E%`pTj1*Vz!X1zgRn@?Re!B4%d-c(_cuCU1zykeH-P6D-wLjN*H>BF z1>VQ>qu_&#Uj*LI{AnjH`pfFGY?kTo%af4@Uv#Df4M_MY@crOwSvC&)d1l{!inPCX zr?j&Y_G`fZvt0t!U(N+TUH(9Zu?YIt5*L=nnf-3?>i>mjyTfy^;?*unr*6myfKE(KL@Gi!) z;I7~4b`F5&89xX<&iM7<^}pBcd=k8x@ms-b8NVC6`xV{JSHZg&e-M0t*?A1SlkuN` zk2C%(c*Cfk?u+1+EZskW8%+Q2;QdVh9dMKBmv0j3(faKVdb)>zyBJ>p-pBY7@F?Re z!0Y~`+Yf=)|54{BgEuh!I`C1(&s26^)$N=Mo?|=#UiBAUe=oTE&pPh_uV(sJfcG>0 z5oPB!-A*@nJ>#DxF6KM+JC}V=c*HDT8}Y1hk#cyI}5>wnf`I$1 z1NsZMNPlhxKMuSO{88{y@OJPg!IhtR@E5>UIXngaFYu+XBfc+5)YmQON7djsKQ}7v_M!qh!N;-g%7H%%-ua@`S3J}x?U;AKPkA!-fRFV`kU=?o72MS> zx%wWAs;IQ%I-v7&!MkC<3U+RfN&Tvaq<#kFBRxi~q<$arHG*<@ z3cUNMO{2zt6<)lBE zKW9(DpF@A|g8fR^IXH#>eefsq58eN-gO9(Z^M}Aknf{~T1C0L=yi!dFF!xjNVWvL} zKK72D?g+St*?9##%lK>HRa7^{$3MVDofRMNfcLyzOzHcfw+Wd(SI+Fu0k8d+u0J2V zit$C@1FSrc2k&R;t^^-rJPba{%I9QogYi?rO~%gv?`8aK@L|Rew#)X}gLb8U@3E2e zCu^S{oq}&fJFiChtM>VgDfCC+PY8HRu8Q%xqRjQ}kp*YjO96ZnXHQ>XH-vHk2((Qj5e2nRTLD_fc`genyjDH2Zmf8On zc$Dcs0^Y^=55Pwlm*4McGu*74pM$<LHX6Fs?VaDH5_Dgg-j(XY7 zqm0i0&m5)e9|}Ii^cR4SGJY(0J!=ok!1Ijz!Mn=zbXS89GW`wUBaClR`pnLD@Jhzd z12-8@fOj6P`?Cjpl{&$0gU4tO`y zcbzHQ`7q;i!N(at61=uT_vcvfLB^MZ_b?s=uRcV#a}s#Np*lYmJj-}Jcr6Le z{Wy3(<9om}tUa`Y*D-!Mcn_g3#(Ti4Sbg0FKF;*-0nf1fJ^-F${2}l@ z#vcdwF#mr7?q>X1rO(E_5%4I}{{wg@<9`JoVEKIue2D2g&ywwPl<`^MX-E-v-{p z?B4_KX7%y__z>d{D*KE-2HwT&KLy^0{L0)K#?O@fWAyU;6}XG>7s0C;A651j>vqP# zYnlEy_yFU^cG*sPnH?9n!T22TI>zUN=NW$+>wFXIXVWc9ViD<2w$2~wkoN^cu%q^! zPMbo%1L<}itCz!NiZ9XmRp32L|9bEN#y3W3-}Pz|04J}{b#`Qj6VlH#P|rfd#Rq^QSiRwbUp^&#rQb*Amhdk+1@&mo5nZ600XS^0Xx=c^E4!nW!dhlMx<@cV`_ZKl9 zh5i`h8F0^XJ>4vL9pjzg{fy_pdl~NnuVUrX4L;0x4|pBpdGILXz2N@VCY+ij&sw{t9bl<^Z4 zH+6jQ@hb2f<8E*d^RpU!l<9lG ztC+qC-pP0^xQq3pI`9FeUk^Uc_<;_rD=-e^U2+}LK>Cx7Cs$3u@1KG{HwAZ|J-Pjr zQ}A=A;MYvSH(-B0i}IP zuU)0vFKv+ixYp@>w&G0xaPWG@j{=V}z7)Kd@fF|(OE(BU%JkQQcdpU>+z4LJ`02_% zE6<(EKI0dFyHD2br@*Tj-v=ILyaRlk@yo&c8NUYHU8|>i19+bCPl4Alek=G8<9C9O zFuof7F^6{BjsAEa=}*=lhhb-w)z=H)j_AKkVdsc*CohLnrr<48@Xt-bAD@D+fuFVTUyYMvQ|P})^=R0g zh9{-xcfomvafm^mgE&OA7sD8qC+l}01h1Vf!7wh{5vAXBgii74_fzoO$j{02|4Hv> zOvW##c0L)uZVKK{`I=1MsA*4iwi`9AEy-AW%%~B6Yg$u#36OB0IkvAUVFlU_?33sA zR?;Q~iZp5xE&KMRn(2CqZjZ(3oM~%sl_7aH+FJI;nq!S^@pNNrM_YSiTdcXM{XmT` znTmPSX-_Pe428Wh?~26kjv9~Ac<#1{^@DOpy2nWA5&=$9$_Wr}{8qW=S@m<|Md@lZ14j|7uc zYC$WH0hwZ;F(4`;Kq=O2ZEWmlrWTh2h4QX zXXPa%^AeJ!7m}qHlBE}tr58f!S&n4eTYaHqYfHAVskxE-PozYHFC;|`IvMcAQi)j7 z8;Hk(iG*h&7m+OWpTwS~=G|U@tgS88+TNIsHSJ9$=_+3+Zl;3qP$))CDj7*{)XG#Z z;ZUJm{+q>cDijZT62VX~5(@{?l-=T5GBrsyNRh$+X3-2LyrFo^7c)a~f6zmpvnaOc z_2{Bk7k$F%kZcxmIH1v{rY6-Q2V~b2K$bxOg%gxr z*Bg}M5S?X71Q8!bd>HXz#D|rkFm-HEs3sImD4I|-$&lu_F8cK%@du5jNc@sLO&zJ$ zy~&GW0iT*bv#l+=TVwlb;vsJ!7z}tkF={j3hzIkgmc+XXQVfA!YcP$sw8&9!8k1_A zM$lj??u`UOiDV@3L8mx!l{$OG8>6AbpH9mu&WkaV&a%KUYLYCg65}SxA9SMg1d|?L zD4g_rgNa}`MPq1jA^GJL^ZwTwzCa`$NK0^SnU&tQ|gd)LsEX1mtI#O}%2lOry z(8n735?M`IA8P_(J=Xky6-28OU&I$Qy@8l7oleqhP2-3U;|QI-AJDGp^NN3Q)9(w? z0-NUS57t1_o;Y=WT9o>e-n8FjGj&KGxI%jQhV=3c>E#=eZ93i-G()N8L`yQ|^KG=w z>pPnECR43qIkZ`lra#n2ru2e$U*s)~8l6AsPxHUyfrAtyiZ?dHk;l@~VvN1z{_NK&&D`BQX{$wB= zN~dGK0L{Qu8vzg1f;?MO9m#UJ>XD-s$yPI_GtGQb(VBDxu*NY8_*IPGntDW(HKmGx zx>`Ur-mVW1;SUIe>lCa)N!g?zS2dtL*|8}h&z^x9QWocl!M6xVFESpH4SkU@u z#qRZZ-?d~H_o!CmtEr)uS5t##w`6O*!m{Po)NHp}Ff~W0Gk#Y6646YpCLhkEQb{wG z@OvZableQ2ViT)>;fOEzER>@p z8=B$ByBuUCwyCvc-^5B^IvWpoeKa43L$PFtvQGs@Z8#{F7|Qm4yCw0^92pG8JwabQ z7z;50h6?7fVHeeqbzpGx|YiEw}xFM5eZ zrdMKde;^r%`>5KIURnXM67zbj%^<2oF_4R?g6{LIIijZK{oLevf*#Whc>F0^E5|~9 zR=Hk}wG0eeZLBbZc+*YIO>LRR-K{PAsZ04hDSy}(p(aMnBo>cbtt)~%13Fvd2A!?( zgwECt0iETbE&|rzDgxG+Dgu)wJK4nL4XN3Rb|f0J9c>wM$WJpAm3`O~j?f(FrKJuv zcPcR4f=~c=ZxoVuW)!fz6iV`bMJUO8BN37}aw1gF_sY9G!Q_3USJ|fk)F>qNDIo1r zK-w2d@>WZP)a^sT z$t>%m#}o;$-9^)9#=NO?JWM?%6bsUgP;uw=()_N=`ufnz=54QEpE11wtD)=e7SASy zMa(p}HR7}Y44JfMONSzUYWuQv6mGaEpijzPwvwPFpPnI_mqq&uh!(Hql*TQJ@OgrA z2SNm_>JheAx#sXg`f|8(sEC~uw zH_$F(dquSG%i{F~mQU`x2-`BB6ku1-Hj8kWBBVbQV2)B7Bn`g@j*5WZYJJoyp<$JS z(6EX@1b7Z)#fX@~MY3`QlI3k(X4Rw>vI@fr6|D1Y2lQ6yV|Q#mx@CidvOp|r6WJ=H zAa|!ktZY+O$SM~rRPb1?#s!-w9J%JTnAMW3P{Cu_E-Y5CLbrpV4-Y6R#OmZ2Mc))7QAIQZM2o4e5M;Vcu)6gDpbf@ zsE{TbD}2F1dZB`?aN#;OlK3V(mSc+WGhFb~EaWF#$hTRrZx%eH-;#odasU!m%n2(K z*A~2v6f8#y_Gk^Jtgu?3($qR^P``VnMF5LuwHz#X5Sj3x;GbC7AYrekkQ1+mCsHU* zFO4#qKU9ka1BIl#ypvAUs8>$9qL{oitLwJ7T&O#*+!K~pu_1}Kf{FG)8xPVs+P{Er zrWz%%CF=K>jdV|GWx;GzO@j74P(&0$)zDrl3W!1lCKnTyl|l+?^dieBUAm%RkGd67 zQ+cA;f}WfLggw>C$V%Z_)#)h~9fLvz?^UNKhWer$6+A^36uZtcR&7lCDIk5KKp{%b zav~0+DTSmT0?CpPA?b$*$^26Yr7l>(uACKuw54q!5wNZl0c-6Q4qGcEJfu(?%r)7L z_@2~(nwt7pYg?+`65h{(+q%v3#eyN9FOg1)ZLQ)47nEfejEId+J;KVWh|rrPt-3VD za9E%H!Za`x8#1X=#rMYcr2I7zTJ^_Fx-+CFfnlFFh`n-TINjQEX{xy~7H_HvhXP4& zA{B}x;`BHumV}~p*OBr>%w!@Nk9qvzU_2Z^95pCfNLfA?9{;O_M{St*q)kDn4ZHW z)AS(D^oD$?n96XIyuN(%>X?UAQd0t_O|S< ziKoJJV{gVHrk@_CdDJugcuQ+rrY1#C=INnkgdPS5BB_YFPDaLJfq?0WMG_vLN&BLv z^2fR|<_(GGkn|YAOHbQVDv~zzlBpVciy$61<0-nCjfFymDsE5Kgv_9y9`Pk2^aRsS zuNlB5-5U7Y+LQja18shKDIrX)p^==Y+bY^KG5zrX-Qm&RM}nRuQW3|RTbgUA$9U=X zKAkjEv_FtYr~Py<8BBQmp-3_kOC)LnG(=m^WvMl_w6@pyg8^?Mod|g1^z0;=Qf(#^ zYs2)aPAvaQ~P%t&&c;PvA@a)#6_EcoQ z_}eJof$-6;dxAD==pH34mj4CseaT2D7Eh+bu~axt&n=R{q@Suc?)RsBNi!X*5&g-^ z=X+|(XC`VBX!@H;L0Tfyt!hMU(8m)YJTp;t0>1a!^TGX~-0bm((k6A*m_JSX{Nc2` zzrkJ)?I(%t0$~}iioBnpE07A(Ry=Ls1XGD{(qlbBm|nqHdoMwK7h$s7`S;$3riVh_ zFzrMq%n-d7kksv)AizIw_ zBuCd;Qy^e= zpJ_UgsO)1O1k{rLaVQM zkY02vDzyJ+Z@1AFyfyFp2_gVv-KKYxcxvj)z~CnN28t-qt=!7gcII0 z75wyTMs7)3ZTh`Snw}!6mJ<+tDZDS$o@q(8(Yp?!fG6Jbqd4mUTPW3*h>?@j@H%!k z((A9Hrm0;dg8{M2OA8gRTvS-SoCb9|%khQ+)(}N!dH+ZOYuKT)TBb3C~GzpQR*sr^DKypqF;!UuFLyi zijud%6bLHE*^}vah#5DXUWP^I>w2LET zby|`6={Ig#T+TNVDGvVpmr8mx}Z&z-e(ygm}heZVBCYlJzer|=ViWEpT1PaL& zGzH{1W0{caZ(_0~Q9zD!mI>Jy1S?#M`xIfs8e1$Qve*SHTp@=k5i1`YSRq-d#N-fa zrD2UnR>&F-txzE~xrHy79GAWHk~fl7J(39WpzfWBATxODjh>0Ct0;hEFyxCk%JH;o zRc=y?tKLTwlY87&QtxAQl)L4Y^T;8&IuSx)5}STQ^O^6X0h?$z*a!2{f zehN?S*Ne;1V#p#|426&(3L%pe5*uC99Bi$3F{Y>MwYK`Fr|Xjs6ufjxfR;zMb7)_* zDJ**#ZUTi8az;uM>JtpDonp}pDO9+5d+%XUJ{a-R({*W_0`H?j>9NIhOVdwvpjKaE z6+Owu6lIp)RG_^$+CB>hV>G&pnZp#T%IT&;CZ?_H#~5Tr4uS%hRgi_pXsg;=y#ibbtPRQ*suwrUE<3n?I- zrGPX_0c4Fr(ldc3-q^A3Vm7t3?u)gzr&_(fY%ED1FlkTGd*ge(K6$g5ZB3<{I>mxF zK-+-<`iw~+6b!`!^6gW4M+Ofvtk6UvvTP_8SrsAKL`A6JD28jn3c0`lFW5vrWYH1B zG$BH=m_$f6c?!u#E|xD7_6lCf6}O0$i(V^K(8CNTtiV&6VyvcTJz@}TkOFW?&3O?u zZBRh2w!IX>h(jT$Tl->`1=2@e)Epx8ksyVT6`F(!mttZeJymd&R>6q8R}iVn8vzkg zMMw-qO`(E4%zdPXxle=&v2tZEtf<1ITuj8GVu_(_C{*xXKFJc-$|qS?2*po1D(J~a zjot{gdRf5Y{R69bCR)1cgrtO_T!aexvTOM0Z6gbbfMr4iWFd->)kiF3m6sJN=qnSG zy@Vort$57?Cg^n(Tv+^K%7h8oF2x1Z!KOP*`D9C@*Kd#u3Rn$55Sbk>y_R5=IlcCw zUV5RBRjmTa=i*kVZ~?rc*a=jyVO59FFA&)v1(NMhgk-n1LIoRgF({b4i?l*;mzdR& z1e5K?3dvF@CLh{cOcp3H`6__L3ckp;B$#}yzzWHN7nY~Lnx%KyMT@eox7wW*n*Izz zpY$qRUnnx!4=hU)xhTZaLvEx z6Jf;`@`AyLu8^27 zQVJnMs>jlmaEan%-xbD?3F?Z4D^&$ir!Ux0RYeS0EOc#Q1sN-JXkv1e}vSLI?R;38NkNY8c7br?bzD*=TXze0Y$d7zc z&x$RS8~Ot2VaA~l`mbOU*A}jjFJ1{Nvq6rx(VjzXtLb zeO@BmK_6A8Pj^MK^zkkFQi#2&U?P)B>=CygF@MbKp^q}q>(8nG)7qKrNRkv`xC8PmEqyxi}_||zS!?$r@Lw#i7;sxCeh-f>Y})GM2VMGMVKD@U`997 zZ);!@v+;qSRhgeg`--wTxsSR;m=iWxDTG+f;)<78xr-l7c_<-+TpMxbBSlaKa2Sz% zGlFng`EV^Uf^n8-gM>_Sbv;Lc%6@zgvdUF#Xt(p-n3d8zW5*1*c}v)+Qs z*v7(jan0%3$>EY-B@LJ1@ZZGYvl54IEphlnIWrUjTic3{1tx7x$3#n3GyCyAIP?<^ zO$UdU7g%P7FszqqV4Kd$CksR8)lW8xFbptKacDOZFYQ|1 zvQs%;v9bF6@zZB7R!`r5|M`0_R?j~D_&NL(YoIbKI62p{bDkG!5+oVSdHUKFA79@- zZ(l9mnD?7WgLB7nfCObS zK5Tq*C;p6l+>)(AIn(%=nlW5dSNn>|3-WlUjW>mhrH(g#hR!!g!)8XmB1lDl{sazsdMJ#cD=^Glc4z#ZMJ zI-mk>FRr%dO;WXK3V=@mYEm^edYDN?y~#+EEhc?z9nl0S6Bl9C=R9GQ1Z z7jq@wyL5(0FE{FRn-)5-dG-NnOL*yzmb02I(LXI<GA zEKQwP^KO~y+6`4@-*slpyJewkPrO@BIuB;(TB&t4i_-Z%fxo4rYe!UiD5Ux8emsQE zg$hMbBP_mzis$5?R#2j=FMQI~V+j*Jsp^kUC;+EPELJEARutx=vj|Biw6%6^@v-tG zp@KN1WU+KvjWD6Eu!FEqx|+@>wYtNhlXV=}sQT9B+@m>kX-__d=F>S3KB}SP2%Sw% z-f8qup`zrN&-^K55h|M#i54a_7kl^Sqf2;XF?Tmz-VvV=ORe|t(Ulu<_*3j0ggs>U zIlG&$^7Tb`S#pYR-CfxOrLzE;3CoMdv8XUKm7+jMTA=`8#L0HD7D?Aw2@?iO68hO( zqA(b=8fNzBi#IbuUl16Y`RZWc-FE<6;PnCYIAAPEPK|Nj~k*Goij0keBaO zJpFiJf*42(-+d)5qz2NpKJYi*YbCJ~gmT+c`xRMRX3FY#x5$B!&kWp%2#j6_0}S56 z+a9hRPece=y0(>R%maTTM;nb+v2Z#7dACB^Hs(rO8-JtQwlZxajLM0Yh4B`+l`e0>w2egtb{0nAJP56F12fTf zL?}d8b3)gO9yn!e(1ePLOjQ_sbkKNyK+D9Y%3z5O`T#CtjKJ829c9opoLO5mc4(ta zd(*5#22M0{CL5$-aL>%tq6ZE%GZ|wZcC2L98UwSLnTjz3?c6XIOHXAwv>HdGGso6b zknr=^jgDvo&>kj7<8@OQRcu!AJPfoDO+=xG=@;vW#bokb1K1Ihlv6m!lPhyKU@`4n zU$e7%#1KXCcaCN7;pU6+=JEKY2edhvLn`pR-B9IenRIe+|F9Uv=^Q@2zJ7ZSRg2{i z_RS;EyMUCMRjL9KVfz&cw=?z*jKr~R8%(LH6dcb(I(>B?8l$|Wy31TlRpdmUtfqo# zUJWg1Y@HY~?3zjis~@#{jj!yz^s}z2XwR&~kO~O3V^&3{(?4F-d+CGlo^^GdWmQ`b zQ-NXi(JE3~sbgh0+9N%Ew@B&~EZf6pJRqUc7Jxn0p8H)67RY_v{kusRq>+dx>@$Tt8s3v|Fi5B;W$Q zozv~z^%`68i*Y?9JOtp3u2tuG%lcW;1IgWGm5wsZe-_Yk@o;L7UGIX5J(#cXsTb_c7j*~2sE=;{@xSgTi!18)~lS>}X8=aKcik^QOS zVs9^IT?}BLwY9wIQeKPz0dnXtOH2Ez$hEvDfx6ka-t^;k3oV)+_K;9CwOu~|f`AER z!$t>*7>K}1h)VK(M2%7bHA;oj$P{a!E&)oY3u+~s;GYWdco>rn^6Lq30*3re?RU=A zu+EJWU=?Xr^Z+=p*y?NZFtX1`ND*`cgbd)(TWC*!IIRZk@$(M+f?{{9Q^dO8Tzw9v z{I&rd<8pNeqPoNZaBeKd3OLCUV{RQ?hxq}I*Y}&dt6M5-+9b&%eUomIG|09U$OvE` ztS;|vbz=)0y8=VTZmO$cav0tTEvt#*-rije7sJF`JAfdPon3Ff;N|5lw#Gcy`n+G$ zm0_5GJR-*oR^AkmPIY&AEtafOe1IodMNW&Bf{pN{rRhX)-+`vU=G*PoH>5{f*y*3O zj)9Is4%<~Y$eVb&6 zd^Y@cK>eo{8~~R)0Peb?JW=^>(I1jz5U|^ecFolVlIKwNMLux@11o+woRvweVVvu>2COfrX$X)#uDB5H5LJ7i~7S@Cv!yVpnxivJCV0 zMn1h7Oi#tO=<;9Ua5YmeWiK@RtW%! z0ymq~4wHhVTQ6fkT?STCB;yF+a)rruhxh`3g67?YWtm3|ShifWF>+H0(g`$kOt%Zd{R#byfg&^46H; zEYYWgwP5fnAv7Pb?bwgHqZb?zl-VLE`<(Gux}@TgjsnhcDwzx`6B!1=!yB3jHF&5K zt07g2=(wl)-@R4>$QPg=SY6&0nC$Lo{l#{D(T@ewq4CQ!_b)4sLooF5Sgk>Fl6z1f zC99wU%dLU6A5dzK=z?EIAluU&~ZVsqu1&1J7 zLt^)^H!_$@D}*rIK>WbysjC9pem9j>r>&=BZ-%F>uwHL_E>T)-kktrG0jbTIjP>Op zcJ@V+wsG0@h<2$B@KSTT zpnh_TLfe9AR+pf_3VFlD=SEd|97)Y!$ll0-?~QeI_#mijmbb5 z(oWY+N3fegZOOW%ZC#>@=ZWOsE&&~i?Q^P?iR%a90Vf~k8l zFk~%*=BWIR%ivxa(h9J;g1x;KeVOT@FN59xA^;hunDIefv$Vrr8rOxjAPRTYjs(~n zGseZXw=Gvu-DAc;*{4joQefK}^txRZNcH&ZxK1xE9G#e7>@6*11sXL6-{ zd3*XtXXg9qPgyzi3`3b{)ALji?htjS_{B!ZRJXZ1DVkaHiY)X-DLdyK=qR{QmBmq`-6I8o5%?d};^6jW)W-nvm3T z3Oh+JwyXI}ws1}XNCySqH9c2x7|#)kRr1t+MG{S!pc4RjEVl4laLJ2Jpbf=aVQxfX zoS(nrXL}DpUrQJmj;@=58kzL9j&068FG~@AK+P~cr@HaI@h}!J?wUms_Rm38(~it} z+=GOJ1}N(r!=s_Sra@2SaU3ZMv>S|RLD=ISFGQ*8F8`l=4zQxw=$5+_=$Tv(=f(pS zXI3n0mG$GIS2hhIjA>+*aqC$xfNVj~?~CRG<6*iN7eNi^F_g_^z1&tbM?{q={KTJK z_svR|0EErRbbCOOxeCbJDH$Dzkvq}C@Oy7l2eXsi{%!y{YbSY8u$n^! zYvo5>SM(ET2`+O#E#V*W|W$a01cFxpY$2E1IB`)=1q z+uPaMcW_AvyQcT-6T9~}Jpc2 Date: Tue, 19 Mar 2024 15:32:07 -0400 Subject: [PATCH 013/153] SVM: minor refactoring to improve code readability (#317) --- svm/src/transaction_processor.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index d1d68365d01fc2..a566802dc12987 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -342,11 +342,11 @@ impl TransactionBatchProcessor { fn filter_executable_program_accounts<'a, CB: TransactionProcessingCallback>( callbacks: &CB, txs: &[SanitizedTransaction], - lock_results: &mut [TransactionCheckResult], + check_results: &mut [TransactionCheckResult], program_owners: &'a [Pubkey], ) -> HashMap { let mut result: HashMap = HashMap::new(); - lock_results.iter_mut().zip(txs).for_each(|etx| { + check_results.iter_mut().zip(txs).for_each(|etx| { if let ((Ok(()), _nonce, lamports_per_signature), tx) = etx { if lamports_per_signature.is_some() { tx.message() @@ -361,9 +361,9 @@ impl TransactionBatchProcessor { if let Some(index) = callbacks.account_matches_owners(key, program_owners) { - program_owners - .get(index) - .map(|owner| entry.insert((owner, 1))); + if let Some(owner) = program_owners.get(index) { + entry.insert((owner, 1)); + } } } }); From 228413ca8a6f58699c3385b05fc502bbe988e5fc Mon Sep 17 00:00:00 2001 From: Ashwin Sekar Date: Tue, 19 Mar 2024 15:26:13 -0700 Subject: [PATCH 014/153] vote: reuse ff to gate tvc constant update from 8 -> 16 (#322) --- programs/vote/src/vote_state/mod.rs | 2 ++ sdk/program/src/vote/state/mod.rs | 12 ++++++++++-- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/programs/vote/src/vote_state/mod.rs b/programs/vote/src/vote_state/mod.rs index ba84fa9bc4790e..b95f47e8c1b9c2 100644 --- a/programs/vote/src/vote_state/mod.rs +++ b/programs/vote/src/vote_state/mod.rs @@ -2173,6 +2173,7 @@ mod tests { let mut feature_set = FeatureSet::default(); feature_set.activate(&feature_set::timely_vote_credits::id(), 1); + feature_set.activate(&feature_set::deprecate_unused_legacy_vote_plumbing::id(), 1); // For each vote group, process all vote groups leading up to it and it itself, and ensure that the number of // credits earned is correct for both regular votes and vote state updates @@ -2307,6 +2308,7 @@ mod tests { let mut feature_set = FeatureSet::default(); feature_set.activate(&feature_set::timely_vote_credits::id(), 1); + feature_set.activate(&feature_set::deprecate_unused_legacy_vote_plumbing::id(), 1); // Retroactive voting is only possible with VoteStateUpdate transactions, which is why Vote transactions are // not tested here diff --git a/sdk/program/src/vote/state/mod.rs b/sdk/program/src/vote/state/mod.rs index 1bb8c7dc88d91c..d22d5814c2ebd2 100644 --- a/sdk/program/src/vote/state/mod.rs +++ b/sdk/program/src/vote/state/mod.rs @@ -47,6 +47,9 @@ pub const VOTE_CREDITS_GRACE_SLOTS: u8 = 2; // Maximum number of credits to award for a vote; this number of credits is awarded to votes on slots that land within the grace period. After that grace period, vote credits are reduced. pub const VOTE_CREDITS_MAXIMUM_PER_SLOT: u8 = 16; +// Previous max per slot +pub const VOTE_CREDITS_MAXIMUM_PER_SLOT_OLD: u8 = 8; + #[frozen_abi(digest = "Ch2vVEwos2EjAVqSHCyJjnN2MNX1yrpapZTGhMSCjWUH")] #[derive(Serialize, Default, Deserialize, Debug, PartialEq, Eq, Clone, AbiExample)] pub struct Vote { @@ -597,6 +600,11 @@ impl VoteState { .votes .get(index) .map_or(0, |landed_vote| landed_vote.latency); + let max_credits = if deprecate_unused_legacy_vote_plumbing { + VOTE_CREDITS_MAXIMUM_PER_SLOT + } else { + VOTE_CREDITS_MAXIMUM_PER_SLOT_OLD + }; // If latency is 0, this means that the Lockout was created and stored from a software version that did not // store vote latencies; in this case, 1 credit is awarded @@ -606,13 +614,13 @@ impl VoteState { match latency.checked_sub(VOTE_CREDITS_GRACE_SLOTS) { None | Some(0) => { // latency was <= VOTE_CREDITS_GRACE_SLOTS, so maximum credits are awarded - VOTE_CREDITS_MAXIMUM_PER_SLOT as u64 + max_credits as u64 } Some(diff) => { // diff = latency - VOTE_CREDITS_GRACE_SLOTS, and diff > 0 // Subtract diff from VOTE_CREDITS_MAXIMUM_PER_SLOT which is the number of credits to award - match VOTE_CREDITS_MAXIMUM_PER_SLOT.checked_sub(diff) { + match max_credits.checked_sub(diff) { // If diff >= VOTE_CREDITS_MAXIMUM_PER_SLOT, 1 credit is awarded None | Some(0) => 1, From 8df80d9c12d828edfa9cd45ab58a56717be54cef Mon Sep 17 00:00:00 2001 From: Alessandro Decina Date: Wed, 20 Mar 2024 09:39:33 +1100 Subject: [PATCH 015/153] accounts-db: unpack_archive: unpack accounts straight into their final destination (#289) * accounts-db: unpack_archive: avoid extra iteration on each path We used to do a iterator.clone().any(...) followed by iterator.collect(). Merge the two and avoid an extra iteration and re-parsing of the path. * accounts-db: unpack_archive: unpack accounts straight into their final destination We used to unpack accounts into account_path/accounts/ then rename to account_path/. We now unpack them into their final destination directly and avoid the rename syscall. --- accounts-db/src/hardened_unpack.rs | 76 ++++++++++++++++++++---------- 1 file changed, 50 insertions(+), 26 deletions(-) diff --git a/accounts-db/src/hardened_unpack.rs b/accounts-db/src/hardened_unpack.rs index 39eca4f9cdf3d9..ebdafe675f0512 100644 --- a/accounts-db/src/hardened_unpack.rs +++ b/accounts-db/src/hardened_unpack.rs @@ -112,27 +112,26 @@ where // first by ourselves when there are odd paths like including `..` or / // for our clearer pattern matching reasoning: // https://docs.rs/tar/0.4.26/src/tar/entry.rs.html#371 - let parts = path.components().map(|p| match p { - CurDir => Some("."), - Normal(c) => c.to_str(), - _ => None, // Prefix (for Windows) and RootDir are forbidden - }); + let parts = path + .components() + .map(|p| match p { + CurDir => Ok("."), + Normal(c) => c.to_str().ok_or(()), + _ => Err(()), // Prefix (for Windows) and RootDir are forbidden + }) + .collect::, _>>(); // Reject old-style BSD directory entries that aren't explicitly tagged as directories let legacy_dir_entry = entry.header().as_ustar().is_none() && entry.path_bytes().ends_with(b"/"); let kind = entry.header().entry_type(); let reject_legacy_dir_entry = legacy_dir_entry && (kind != Directory); - - if parts.clone().any(|p| p.is_none()) || reject_legacy_dir_entry { + let (Ok(parts), false) = (parts, reject_legacy_dir_entry) else { return Err(UnpackError::Archive(format!( "invalid path found: {path_str:?}" ))); - } + }; - let parts: Vec<_> = parts.map(|p| p.unwrap()).collect(); - let account_filename = - (parts.len() == 2 && parts[0] == "accounts").then(|| PathBuf::from(parts[1])); let unpack_dir = match entry_checker(parts.as_slice(), kind) { UnpackPath::Invalid => { return Err(UnpackError::Archive(format!( @@ -159,13 +158,24 @@ where )?; total_count = checked_total_count_increment(total_count, limit_count)?; - let target = sanitize_path(&entry.path()?, unpack_dir)?; // ? handles file system errors - if target.is_none() { + let account_filename = match parts.as_slice() { + ["accounts", account_filename] => Some(PathBuf::from(account_filename)), + _ => None, + }; + let entry_path = if let Some(account) = account_filename { + // Special case account files. We're unpacking an account entry inside one of the + // account_paths returned by `entry_checker`. We want to unpack into + // account_path/ instead of account_path/accounts/ so we strip the + // accounts/ prefix. + sanitize_path(&account, unpack_dir) + } else { + sanitize_path(&path, unpack_dir) + }?; // ? handles file system errors + let Some(entry_path) = entry_path else { continue; // skip it - } - let target = target.unwrap(); + }; - let unpack = entry.unpack(target); + let unpack = entry.unpack(&entry_path); check_unpack_result(unpack.map(|_unpack| true)?, path_str)?; // Sanitize permissions. @@ -173,16 +183,7 @@ where GNUSparse | Regular => 0o644, _ => 0o755, }; - let entry_path_buf = unpack_dir.join(entry.path()?); - set_perms(&entry_path_buf, mode)?; - - let entry_path = if let Some(account_filename) = account_filename { - let stripped_path = unpack_dir.join(account_filename); // strip away "accounts" - fs::rename(&entry_path_buf, &stripped_path)?; - stripped_path - } else { - entry_path_buf - }; + set_perms(&entry_path, mode)?; // Process entry after setting permissions entry_processor(entry_path); @@ -1029,4 +1030,27 @@ mod tests { if message == "too many files in snapshot: 1000000000000" ); } + + #[test] + fn test_archive_unpack_account_path() { + let mut header = Header::new_gnu(); + header.set_path("accounts/123.456").unwrap(); + header.set_size(4); + header.set_cksum(); + let data: &[u8] = &[1, 2, 3, 4]; + + let mut archive = Builder::new(Vec::new()); + archive.append(&header, data).unwrap(); + let result = with_finalize_and_unpack(archive, |ar, tmp| { + unpack_snapshot_with_processors( + ar, + tmp, + &[tmp.join("accounts_dest")], + None, + |_, _| {}, + |path| assert_eq!(path, tmp.join("accounts_dest/123.456")), + ) + }); + assert_matches!(result, Ok(())); + } } From 184ba6cb845bef90399e13fe18e90753f46c6e0f Mon Sep 17 00:00:00 2001 From: Tao Zhu <82401714+tao-stones@users.noreply.github.com> Date: Tue, 19 Mar 2024 19:44:34 -0500 Subject: [PATCH 016/153] qos service should also accumulate executed but errored units (#328) qos service should also accumulated executed but errored units --- core/src/banking_stage/consumer.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/core/src/banking_stage/consumer.rs b/core/src/banking_stage/consumer.rs index c5ed22a34278ce..e66b32c0bda898 100644 --- a/core/src/banking_stage/consumer.rs +++ b/core/src/banking_stage/consumer.rs @@ -772,7 +772,9 @@ impl Consumer { (0, 0), |(units, times), program_timings| { ( - units.saturating_add(program_timings.accumulated_units), + units + .saturating_add(program_timings.accumulated_units) + .saturating_add(program_timings.total_errored_units), times.saturating_add(program_timings.accumulated_us), ) }, From 261b3e9ee78f3edb7e8d02debfab233689f87814 Mon Sep 17 00:00:00 2001 From: Jon C Date: Wed, 20 Mar 2024 13:21:00 +0100 Subject: [PATCH 017/153] CI: Add windows clippy job and fix clippy errors (#330) * CI: Run clippy on windows * Update cargo-clippy-before-script.sh for Windows * Pacify clippy --- .github/scripts/cargo-clippy-before-script.sh | 4 +++ .github/workflows/cargo.yml | 2 ++ accounts-db/src/hardened_unpack.rs | 3 ++ .../src/geyser_plugin_manager.rs | 6 +++- install/src/command.rs | 10 ++---- programs/sbf/benches/bpf_loader.rs | 7 +++-- rpc/src/rpc_service.rs | 31 +++++++++---------- 7 files changed, 36 insertions(+), 27 deletions(-) diff --git a/.github/scripts/cargo-clippy-before-script.sh b/.github/scripts/cargo-clippy-before-script.sh index b9426203aa6ffc..bba03060877434 100755 --- a/.github/scripts/cargo-clippy-before-script.sh +++ b/.github/scripts/cargo-clippy-before-script.sh @@ -6,6 +6,10 @@ os_name="$1" case "$os_name" in "Windows") + vcpkg install openssl:x64-windows-static-md + vcpkg integrate install + choco install protoc + export PROTOC='C:\ProgramData\chocolatey\lib\protoc\tools\bin\protoc.exe' ;; "macOS") brew install protobuf diff --git a/.github/workflows/cargo.yml b/.github/workflows/cargo.yml index 3d7b1371b6578b..b52a543e2d9e01 100644 --- a/.github/workflows/cargo.yml +++ b/.github/workflows/cargo.yml @@ -31,6 +31,7 @@ jobs: matrix: os: - macos-latest-large + - windows-latest runs-on: ${{ matrix.os }} steps: - uses: actions/checkout@v4 @@ -53,6 +54,7 @@ jobs: matrix: os: - macos-latest-large + - windows-latest runs-on: ${{ matrix.os }} steps: - uses: actions/checkout@v4 diff --git a/accounts-db/src/hardened_unpack.rs b/accounts-db/src/hardened_unpack.rs index ebdafe675f0512..cff22fde8ab368 100644 --- a/accounts-db/src/hardened_unpack.rs +++ b/accounts-db/src/hardened_unpack.rs @@ -205,6 +205,9 @@ where #[cfg(windows)] fn set_perms(dst: &Path, _mode: u32) -> std::io::Result<()> { let mut perm = fs::metadata(dst)?.permissions(); + // This is OK for Windows, but clippy doesn't realize we're doing this + // only on Windows. + #[allow(clippy::permissions_set_readonly_false)] perm.set_readonly(false); fs::set_permissions(dst, perm) } diff --git a/geyser-plugin-manager/src/geyser_plugin_manager.rs b/geyser-plugin-manager/src/geyser_plugin_manager.rs index d88814d88e9470..d5521c9ad41e19 100644 --- a/geyser-plugin-manager/src/geyser_plugin_manager.rs +++ b/geyser-plugin-manager/src/geyser_plugin_manager.rs @@ -451,9 +451,13 @@ mod tests { plugin: P, config_path: &'static str, ) -> (LoadedGeyserPlugin, Library, &'static str) { + #[cfg(unix)] + let library = libloading::os::unix::Library::this(); + #[cfg(windows)] + let library = libloading::os::windows::Library::this().unwrap(); ( LoadedGeyserPlugin::new(Box::new(plugin), None), - Library::from(libloading::os::unix::Library::this()), + Library::from(library), config_path, ) } diff --git a/install/src/command.rs b/install/src/command.rs index fe7617af6447e8..ad6c1ea1fe3aa7 100644 --- a/install/src/command.rs +++ b/install/src/command.rs @@ -333,9 +333,7 @@ pub fn string_from_winreg_value(val: &winreg::RegValue) -> Option { let words = unsafe { slice::from_raw_parts(val.bytes.as_ptr() as *const u16, val.bytes.len() / 2) }; - let mut s = if let Ok(s) = String::from_utf16(words) { - s - } else { + let Ok(mut s) = String::from_utf16(words) else { return None; }; while s.ends_with('\u{0}') { @@ -392,11 +390,9 @@ fn add_to_path(new_path: &str) -> bool { }, }; - let old_path = if let Some(s) = + let Some(old_path) = get_windows_path_var().unwrap_or_else(|err| panic!("Unable to get PATH: {}", err)) - { - s - } else { + else { return false; }; diff --git a/programs/sbf/benches/bpf_loader.rs b/programs/sbf/benches/bpf_loader.rs index 47c55245000df1..1dd827bbeb197b 100644 --- a/programs/sbf/benches/bpf_loader.rs +++ b/programs/sbf/benches/bpf_loader.rs @@ -2,7 +2,10 @@ #![cfg(feature = "sbf_c")] #![allow(clippy::uninlined_format_args)] #![allow(clippy::arithmetic_side_effects)] -#![cfg_attr(not(target_arch = "x86_64"), allow(dead_code, unused_imports))] +#![cfg_attr( + any(target_os = "windows", not(target_arch = "x86_64")), + allow(dead_code, unused_imports) +)] use { solana_rbpf::memory_region::MemoryState, @@ -103,7 +106,7 @@ fn bench_program_create_executable(bencher: &mut Bencher) { } #[bench] -#[cfg(target_arch = "x86_64")] +#[cfg(all(not(target_os = "windows"), target_arch = "x86_64"))] fn bench_program_alu(bencher: &mut Bencher) { let ns_per_s = 1000000000; let one_million = 1000000; diff --git a/rpc/src/rpc_service.rs b/rpc/src/rpc_service.rs index 303a1e94b223b2..10580b4711c054 100644 --- a/rpc/src/rpc_service.rs +++ b/rpc/src/rpc_service.rs @@ -878,24 +878,21 @@ mod tests { panic!("Unexpected RequestMiddlewareAction variant"); } - #[cfg(unix)] + std::fs::remove_file(&genesis_path).unwrap(); { - std::fs::remove_file(&genesis_path).unwrap(); - { - let mut file = std::fs::File::create(ledger_path.path().join("wrong")).unwrap(); - file.write_all(b"wrong file").unwrap(); - } - symlink::symlink_file("wrong", &genesis_path).unwrap(); - - // File is a symbolic link => request should fail. - let action = rrm.process_file_get(DEFAULT_GENESIS_DOWNLOAD_PATH); - if let RequestMiddlewareAction::Respond { response, .. } = action { - let response = runtime.block_on(response); - let response = response.unwrap(); - assert_ne!(response.status(), 200); - } else { - panic!("Unexpected RequestMiddlewareAction variant"); - } + let mut file = std::fs::File::create(ledger_path.path().join("wrong")).unwrap(); + file.write_all(b"wrong file").unwrap(); + } + symlink::symlink_file("wrong", &genesis_path).unwrap(); + + // File is a symbolic link => request should fail. + let action = rrm.process_file_get(DEFAULT_GENESIS_DOWNLOAD_PATH); + if let RequestMiddlewareAction::Respond { response, .. } = action { + let response = runtime.block_on(response); + let response = response.unwrap(); + assert_ne!(response.status(), 200); + } else { + panic!("Unexpected RequestMiddlewareAction variant"); } } } From aba8ce5f3ef5af4c7241906ccf419e0dd4d7ff69 Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Wed, 20 Mar 2024 05:39:07 -0700 Subject: [PATCH 018/153] Move code to check_program_modification_slot out of SVM (#329) * Move code to check_program_modification_slot out of SVM * add documentation for the public function --- runtime/src/bank.rs | 27 ++++++++++++--- svm/src/transaction_processor.rs | 58 +++++++++++--------------------- 2 files changed, 42 insertions(+), 43 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 6d5c2345f92aca..b7329724a2558e 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -99,7 +99,8 @@ use { compute_budget_processor::process_compute_budget_instructions, invoke_context::BuiltinFunctionWithContext, loaded_programs::{ - LoadedProgram, LoadedProgramType, LoadedPrograms, ProgramRuntimeEnvironments, + LoadedProgram, LoadedProgramMatchCriteria, LoadedProgramType, LoadedPrograms, + ProgramRuntimeEnvironments, }, runtime_config::RuntimeConfig, timings::{ExecuteTimingType, ExecuteTimings}, @@ -168,7 +169,8 @@ use { account_overrides::AccountOverrides, transaction_error_metrics::TransactionErrorMetrics, transaction_processor::{ - TransactionBatchProcessor, TransactionLogMessages, TransactionProcessingCallback, + ExecutionRecordingConfig, TransactionBatchProcessor, TransactionLogMessages, + TransactionProcessingCallback, }, transaction_results::{ TransactionExecutionDetails, TransactionExecutionResult, TransactionResults, @@ -271,7 +273,6 @@ pub struct BankRc { #[cfg(RUSTC_WITH_SPECIALIZATION)] use solana_frozen_abi::abi_example::AbiExample; -use solana_svm::transaction_processor::ExecutionRecordingConfig; #[cfg(RUSTC_WITH_SPECIALIZATION)] impl AbiExample for BankRc { @@ -550,6 +551,7 @@ impl PartialEq for Bank { loaded_programs_cache: _, epoch_reward_status: _, transaction_processor: _, + check_program_modification_slot: _, // Ignore new fields explicitly if they do not impact PartialEq. // Adding ".." will remove compile-time checks that if a new field // is added to the struct, this PartialEq is accordingly updated. @@ -810,6 +812,8 @@ pub struct Bank { epoch_reward_status: EpochRewardStatus, transaction_processor: TransactionBatchProcessor, + + check_program_modification_slot: bool, } struct VoteWithStakeDelegations { @@ -996,6 +1000,7 @@ impl Bank { ))), epoch_reward_status: EpochRewardStatus::default(), transaction_processor: TransactionBatchProcessor::default(), + check_program_modification_slot: false, }; bank.transaction_processor = TransactionBatchProcessor::new( @@ -1314,6 +1319,7 @@ impl Bank { loaded_programs_cache: parent.loaded_programs_cache.clone(), epoch_reward_status: parent.epoch_reward_status.clone(), transaction_processor: TransactionBatchProcessor::default(), + check_program_modification_slot: false, }; new.transaction_processor = TransactionBatchProcessor::new( @@ -1864,6 +1870,7 @@ impl Bank { ))), epoch_reward_status: fields.epoch_reward_status, transaction_processor: TransactionBatchProcessor::default(), + check_program_modification_slot: false, }; bank.transaction_processor = TransactionBatchProcessor::new( @@ -7517,7 +7524,7 @@ impl Bank { } pub fn check_program_modification_slot(&mut self) { - self.transaction_processor.check_program_modification_slot = true; + self.check_program_modification_slot = true; } pub fn load_program( @@ -7579,6 +7586,18 @@ impl TransactionProcessingCallback for Bank { Ok(()) } } + + fn get_program_match_criteria(&self, program: &Pubkey) -> LoadedProgramMatchCriteria { + if self.check_program_modification_slot { + self.transaction_processor + .program_modification_slot(self, program) + .map_or(LoadedProgramMatchCriteria::Tombstone, |slot| { + LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(slot) + }) + } else { + LoadedProgramMatchCriteria::NoCriteria + } + } } #[cfg(feature = "dev-context-only-utils")] diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index a566802dc12987..40ccf81561f26e 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -103,6 +103,10 @@ pub trait TransactionProcessingCallback { ) -> transaction::Result<()> { Ok(()) } + + fn get_program_match_criteria(&self, _program: &Pubkey) -> LoadedProgramMatchCriteria { + LoadedProgramMatchCriteria::NoCriteria + } } #[derive(Debug)] @@ -128,8 +132,6 @@ pub struct TransactionBatchProcessor { /// Transaction fee structure fee_structure: FeeStructure, - pub check_program_modification_slot: bool, - /// Optional config parameters that can override runtime behavior runtime_config: Arc, @@ -145,10 +147,6 @@ impl Debug for TransactionBatchProcessor { .field("epoch", &self.epoch) .field("epoch_schedule", &self.epoch_schedule) .field("fee_structure", &self.fee_structure) - .field( - "check_program_modification_slot", - &self.check_program_modification_slot, - ) .field("runtime_config", &self.runtime_config) .field("sysvar_cache", &self.sysvar_cache) .field("loaded_programs_cache", &self.loaded_programs_cache) @@ -163,7 +161,6 @@ impl Default for TransactionBatchProcessor { epoch: Epoch::default(), epoch_schedule: EpochSchedule::default(), fee_structure: FeeStructure::default(), - check_program_modification_slot: false, runtime_config: Arc::::default(), sysvar_cache: RwLock::::default(), loaded_programs_cache: Arc::new(RwLock::new(LoadedPrograms::new( @@ -188,7 +185,6 @@ impl TransactionBatchProcessor { epoch, epoch_schedule, fee_structure, - check_program_modification_slot: false, runtime_config, sysvar_cache: RwLock::::default(), loaded_programs_cache, @@ -491,30 +487,15 @@ impl TransactionBatchProcessor { limit_to_load_programs: bool, ) -> LoadedProgramsForTxBatch { let mut missing_programs: Vec<(Pubkey, (LoadedProgramMatchCriteria, u64))> = - if self.check_program_modification_slot { - program_accounts_map - .iter() - .map(|(pubkey, (_, count))| { - ( - *pubkey, - ( - self.program_modification_slot(callback, pubkey) - .map_or(LoadedProgramMatchCriteria::Tombstone, |slot| { - LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(slot) - }), - *count, - ), - ) - }) - .collect() - } else { - program_accounts_map - .iter() - .map(|(pubkey, (_, count))| { - (*pubkey, (LoadedProgramMatchCriteria::NoCriteria, *count)) - }) - .collect() - }; + program_accounts_map + .iter() + .map(|(pubkey, (_, count))| { + ( + *pubkey, + (callback.get_program_match_criteria(pubkey), *count), + ) + }) + .collect(); let mut loaded_programs_for_txs = None; let mut program_to_store = None; @@ -763,7 +744,11 @@ impl TransactionBatchProcessor { } } - fn program_modification_slot( + /// Find the slot in which the program was most recently modified. + /// Returns slot 0 for programs deployed with v1/v2 loaders, since programs deployed + /// with those loaders do not retain deployment slot information. + /// Returns an error if the program's account state can not be found or parsed. + pub fn program_modification_slot( &self, callbacks: &CB, pubkey: &Pubkey, @@ -1815,10 +1800,7 @@ mod tests { fn test_replenish_program_cache() { // Case 1 let mut mock_bank = MockBankCallback::default(); - let mut batch_processor = TransactionBatchProcessor:: { - check_program_modification_slot: true, - ..TransactionBatchProcessor::default() - }; + let batch_processor = TransactionBatchProcessor::::default(); batch_processor .loaded_programs_cache .write() @@ -1848,8 +1830,6 @@ mod tests { )); // Case 2 - batch_processor.check_program_modification_slot = false; - let result = batch_processor.replenish_program_cache(&mock_bank, &account_maps, true); let program1 = result.find(&key1).unwrap(); From 0f8f8cd9706f869f05153c22f3d26507b27f2c6f Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Wed, 20 Mar 2024 10:54:15 -0500 Subject: [PATCH 019/153] Revert deprecate executable feature (#309) * revert deprecate executable feature * add native loader account transfer test --------- Co-authored-by: HaoranYi --- cli/src/program.rs | 15 +- cli/tests/program.rs | 44 ++-- ledger-tool/src/program.rs | 1 - program-runtime/src/invoke_context.rs | 14 +- program-runtime/src/message_processor.rs | 16 +- program-test/src/lib.rs | 24 +-- .../address-lookup-table/src/processor.rs | 22 +- programs/bpf_loader/benches/serialization.rs | 49 +---- programs/bpf_loader/src/lib.rs | 188 ++++++------------ programs/bpf_loader/src/serialization.rs | 65 ++---- programs/bpf_loader/src/syscalls/cpi.rs | 84 +++----- programs/config/src/config_processor.rs | 2 +- programs/loader-v4/src/lib.rs | 25 ++- programs/sbf/benches/bpf_loader.rs | 10 +- programs/stake/src/stake_instruction.rs | 37 +--- programs/stake/src/stake_state.rs | 105 ++++------ programs/system/src/system_instruction.rs | 22 +- programs/system/src/system_processor.rs | 62 +++++- programs/vote/src/vote_state/mod.rs | 24 +-- programs/zk-token-proof/src/lib.rs | 14 +- runtime/src/bank.rs | 9 +- runtime/src/bank/tests.rs | 40 ++-- sdk/src/account.rs | 94 +-------- sdk/src/feature_set.rs | 5 - sdk/src/transaction_context.rs | 96 +++------ svm/src/account_loader.rs | 15 +- 26 files changed, 353 insertions(+), 729 deletions(-) diff --git a/cli/src/program.rs b/cli/src/program.rs index 099da9dbaf2438..c35871868f0f04 100644 --- a/cli/src/program.rs +++ b/cli/src/program.rs @@ -44,7 +44,7 @@ use { }, solana_rpc_client_nonce_utils::blockhash_query::BlockhashQuery, solana_sdk::{ - account::{is_executable, Account}, + account::Account, account_utils::StateMut, bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable::{self, UpgradeableLoaderState}, @@ -1066,15 +1066,6 @@ fn get_default_program_keypair(program_location: &Option) -> Keypair { program_keypair } -fn is_account_executable(account: &Account) -> bool { - if account.owner == bpf_loader_deprecated::id() || account.owner == bpf_loader::id() { - account.executable - } else { - let feature_set = FeatureSet::all_enabled(); - is_executable(account, &feature_set) - } -} - /// Deploy program using upgradeable loader. It also can process program upgrades #[allow(clippy::too_many_arguments)] fn process_program_deploy( @@ -1131,7 +1122,7 @@ fn process_program_deploy( .into()); } - if !is_account_executable(&account) { + if !account.executable { // Continue an initial deploy true } else if let Ok(UpgradeableLoaderState::Program { @@ -2534,7 +2525,7 @@ fn complete_partial_program_init( ) -> Result<(Vec, u64), Box> { let mut instructions: Vec = vec![]; let mut balance_needed = 0; - if is_account_executable(account) { + if account.executable { return Err("Buffer account is already executable".into()); } if account.owner != *loader_id && !system_program::check_id(&account.owner) { diff --git a/cli/tests/program.rs b/cli/tests/program.rs index 039df1d64b8ae8..6eb281d65b9e35 100644 --- a/cli/tests/program.rs +++ b/cli/tests/program.rs @@ -14,11 +14,9 @@ use { solana_rpc_client::rpc_client::RpcClient, solana_rpc_client_nonce_utils::blockhash_query::BlockhashQuery, solana_sdk::{ - account::is_executable, account_utils::StateMut, bpf_loader_upgradeable::{self, UpgradeableLoaderState}, commitment_config::CommitmentConfig, - feature_set::FeatureSet, pubkey::Pubkey, signature::{Keypair, NullSigner, Signer}, }, @@ -102,7 +100,7 @@ fn test_cli_program_deploy_non_upgradeable() { let account0 = rpc_client.get_account(&program_id).unwrap(); assert_eq!(account0.lamports, minimum_balance_for_program); assert_eq!(account0.owner, bpf_loader_upgradeable::id()); - assert!(is_executable(&account0, &FeatureSet::all_enabled())); + assert!(account0.executable); let (programdata_pubkey, _) = Pubkey::find_program_address(&[program_id.as_ref()], &bpf_loader_upgradeable::id()); @@ -112,10 +110,7 @@ fn test_cli_program_deploy_non_upgradeable() { minimum_balance_for_programdata ); assert_eq!(programdata_account.owner, bpf_loader_upgradeable::id()); - assert!(!is_executable( - &programdata_account, - &FeatureSet::all_enabled() - )); + assert!(!programdata_account.executable); assert_eq!( programdata_account.data[UpgradeableLoaderState::size_of_programdata_metadata()..], program_data[..] @@ -143,7 +138,7 @@ fn test_cli_program_deploy_non_upgradeable() { .unwrap(); assert_eq!(account1.lamports, minimum_balance_for_program); assert_eq!(account1.owner, bpf_loader_upgradeable::id()); - assert!(is_executable(&account1, &FeatureSet::all_enabled())); + assert!(account1.executable); let (programdata_pubkey, _) = Pubkey::find_program_address( &[custom_address_keypair.pubkey().as_ref()], &bpf_loader_upgradeable::id(), @@ -154,10 +149,7 @@ fn test_cli_program_deploy_non_upgradeable() { minimum_balance_for_programdata ); assert_eq!(programdata_account.owner, bpf_loader_upgradeable::id()); - assert!(!is_executable( - &programdata_account, - &FeatureSet::all_enabled() - )); + assert!(!programdata_account.executable); assert_eq!( programdata_account.data[UpgradeableLoaderState::size_of_programdata_metadata()..], program_data[..] @@ -385,7 +377,7 @@ fn test_cli_program_deploy_with_authority() { let program_account = rpc_client.get_account(&program_keypair.pubkey()).unwrap(); assert_eq!(program_account.lamports, minimum_balance_for_program); assert_eq!(program_account.owner, bpf_loader_upgradeable::id()); - assert!(is_executable(&program_account, &FeatureSet::all_enabled())); + assert!(program_account.executable); let (programdata_pubkey, _) = Pubkey::find_program_address( &[program_keypair.pubkey().as_ref()], &bpf_loader_upgradeable::id(), @@ -396,10 +388,7 @@ fn test_cli_program_deploy_with_authority() { minimum_balance_for_programdata ); assert_eq!(programdata_account.owner, bpf_loader_upgradeable::id()); - assert!(!is_executable( - &programdata_account, - &FeatureSet::all_enabled() - )); + assert!(!programdata_account.executable); assert_eq!( programdata_account.data[UpgradeableLoaderState::size_of_programdata_metadata()..], program_data[..] @@ -433,7 +422,7 @@ fn test_cli_program_deploy_with_authority() { let program_account = rpc_client.get_account(&program_pubkey).unwrap(); assert_eq!(program_account.lamports, minimum_balance_for_program); assert_eq!(program_account.owner, bpf_loader_upgradeable::id()); - assert!(is_executable(&program_account, &FeatureSet::all_enabled())); + assert!(program_account.executable); let (programdata_pubkey, _) = Pubkey::find_program_address(&[program_pubkey.as_ref()], &bpf_loader_upgradeable::id()); let programdata_account = rpc_client.get_account(&programdata_pubkey).unwrap(); @@ -442,10 +431,7 @@ fn test_cli_program_deploy_with_authority() { minimum_balance_for_programdata ); assert_eq!(programdata_account.owner, bpf_loader_upgradeable::id()); - assert!(!is_executable( - &programdata_account, - &FeatureSet::all_enabled() - )); + assert!(program_account.executable); assert_eq!( programdata_account.data[UpgradeableLoaderState::size_of_programdata_metadata()..], program_data[..] @@ -470,7 +456,7 @@ fn test_cli_program_deploy_with_authority() { let program_account = rpc_client.get_account(&program_pubkey).unwrap(); assert_eq!(program_account.lamports, minimum_balance_for_program); assert_eq!(program_account.owner, bpf_loader_upgradeable::id()); - assert!(is_executable(&program_account, &FeatureSet::all_enabled())); + assert!(program_account.executable); let (programdata_pubkey, _) = Pubkey::find_program_address(&[program_pubkey.as_ref()], &bpf_loader_upgradeable::id()); let programdata_account = rpc_client.get_account(&programdata_pubkey).unwrap(); @@ -479,10 +465,7 @@ fn test_cli_program_deploy_with_authority() { minimum_balance_for_programdata ); assert_eq!(programdata_account.owner, bpf_loader_upgradeable::id()); - assert!(!is_executable( - &programdata_account, - &FeatureSet::all_enabled() - )); + assert!(program_account.executable); assert_eq!( programdata_account.data[UpgradeableLoaderState::size_of_programdata_metadata()..], program_data[..] @@ -548,7 +531,7 @@ fn test_cli_program_deploy_with_authority() { let program_account = rpc_client.get_account(&program_pubkey).unwrap(); assert_eq!(program_account.lamports, minimum_balance_for_program); assert_eq!(program_account.owner, bpf_loader_upgradeable::id()); - assert!(is_executable(&program_account, &FeatureSet::all_enabled())); + assert!(program_account.executable); let (programdata_pubkey, _) = Pubkey::find_program_address(&[program_pubkey.as_ref()], &bpf_loader_upgradeable::id()); let programdata_account = rpc_client.get_account(&programdata_pubkey).unwrap(); @@ -557,10 +540,7 @@ fn test_cli_program_deploy_with_authority() { minimum_balance_for_programdata ); assert_eq!(programdata_account.owner, bpf_loader_upgradeable::id()); - assert!(!is_executable( - &programdata_account, - &FeatureSet::all_enabled() - )); + assert!(program_account.executable); assert_eq!( programdata_account.data[UpgradeableLoaderState::size_of_programdata_metadata()..], program_data[..] diff --git a/ledger-tool/src/program.rs b/ledger-tool/src/program.rs index 24df2168a338bf..0b4855ccb7f756 100644 --- a/ledger-tool/src/program.rs +++ b/ledger-tool/src/program.rs @@ -540,7 +540,6 @@ pub fn program(ledger_path: &Path, matches: &ArgMatches<'_>) { .get_current_instruction_context() .unwrap(), true, // copy_account_data - &invoke_context.feature_set, ) .unwrap(); diff --git a/program-runtime/src/invoke_context.rs b/program-runtime/src/invoke_context.rs index 7e930fad169627..5b2d417912256f 100644 --- a/program-runtime/src/invoke_context.rs +++ b/program-runtime/src/invoke_context.rs @@ -403,7 +403,7 @@ impl<'a> InvokeContext<'a> { })?; let borrowed_program_account = instruction_context .try_borrow_instruction_account(self.transaction_context, program_account_index)?; - if !borrowed_program_account.is_executable(&self.feature_set) { + if !borrowed_program_account.is_executable() { ic_msg!(self, "Account {} is not executable", callee_program_id); return Err(InstructionError::AccountNotExecutable); } @@ -802,17 +802,17 @@ mod tests { MockInstruction::NoopFail => return Err(InstructionError::GenericError), MockInstruction::ModifyOwned => instruction_context .try_borrow_instruction_account(transaction_context, 0)? - .set_data_from_slice(&[1], &invoke_context.feature_set)?, + .set_data_from_slice(&[1])?, MockInstruction::ModifyNotOwned => instruction_context .try_borrow_instruction_account(transaction_context, 1)? - .set_data_from_slice(&[1], &invoke_context.feature_set)?, + .set_data_from_slice(&[1])?, MockInstruction::ModifyReadonly => instruction_context .try_borrow_instruction_account(transaction_context, 2)? - .set_data_from_slice(&[1], &invoke_context.feature_set)?, + .set_data_from_slice(&[1])?, MockInstruction::UnbalancedPush => { instruction_context .try_borrow_instruction_account(transaction_context, 0)? - .checked_add_lamports(1, &invoke_context.feature_set)?; + .checked_add_lamports(1)?; let program_id = *transaction_context.get_key_of_account_at_index(3)?; let metas = vec![ AccountMeta::new_readonly( @@ -843,7 +843,7 @@ mod tests { } MockInstruction::UnbalancedPop => instruction_context .try_borrow_instruction_account(transaction_context, 0)? - .checked_add_lamports(1, &invoke_context.feature_set)?, + .checked_add_lamports(1)?, MockInstruction::ConsumeComputeUnits { compute_units_to_consume, desired_result, @@ -855,7 +855,7 @@ mod tests { } MockInstruction::Resize { new_len } => instruction_context .try_borrow_instruction_account(transaction_context, 0)? - .set_data(vec![0; new_len as usize], &invoke_context.feature_set)?, + .set_data(vec![0; new_len as usize])?, } } else { return Err(InstructionError::InvalidInstructionData); diff --git a/program-runtime/src/message_processor.rs b/program-runtime/src/message_processor.rs index 507197298479d9..e307609e096501 100644 --- a/program-runtime/src/message_processor.rs +++ b/program-runtime/src/message_processor.rs @@ -221,16 +221,16 @@ mod tests { MockSystemInstruction::TransferLamports { lamports } => { instruction_context .try_borrow_instruction_account(transaction_context, 0)? - .checked_sub_lamports(lamports, &invoke_context.feature_set)?; + .checked_sub_lamports(lamports)?; instruction_context .try_borrow_instruction_account(transaction_context, 1)? - .checked_add_lamports(lamports, &invoke_context.feature_set)?; + .checked_add_lamports(lamports)?; Ok(()) } MockSystemInstruction::ChangeData { data } => { instruction_context .try_borrow_instruction_account(transaction_context, 1)? - .set_data(vec![data], &invoke_context.feature_set)?; + .set_data(vec![data])?; Ok(()) } } @@ -444,14 +444,14 @@ mod tests { MockSystemInstruction::DoWork { lamports, data } => { let mut dup_account = instruction_context .try_borrow_instruction_account(transaction_context, 2)?; - dup_account.checked_sub_lamports(lamports, &invoke_context.feature_set)?; - to_account.checked_add_lamports(lamports, &invoke_context.feature_set)?; - dup_account.set_data(vec![data], &invoke_context.feature_set)?; + dup_account.checked_sub_lamports(lamports)?; + to_account.checked_add_lamports(lamports)?; + dup_account.set_data(vec![data])?; drop(dup_account); let mut from_account = instruction_context .try_borrow_instruction_account(transaction_context, 0)?; - from_account.checked_sub_lamports(lamports, &invoke_context.feature_set)?; - to_account.checked_add_lamports(lamports, &invoke_context.feature_set)?; + from_account.checked_sub_lamports(lamports)?; + to_account.checked_add_lamports(lamports)?; Ok(()) } } diff --git a/program-test/src/lib.rs b/program-test/src/lib.rs index 669cb15a595afb..f4fba5761d1332 100644 --- a/program-test/src/lib.rs +++ b/program-test/src/lib.rs @@ -133,7 +133,6 @@ pub fn invoke_builtin_function( .transaction_context .get_current_instruction_context()?, true, // copy_account_data // There is no VM so direct mapping can not be implemented here - &invoke_context.feature_set, )?; // Deserialize data back into instruction params @@ -164,25 +163,18 @@ pub fn invoke_builtin_function( if borrowed_account.is_writable() { if let Some(account_info) = account_info_map.get(borrowed_account.get_key()) { if borrowed_account.get_lamports() != account_info.lamports() { - borrowed_account - .set_lamports(account_info.lamports(), &invoke_context.feature_set)?; + borrowed_account.set_lamports(account_info.lamports())?; } if borrowed_account .can_data_be_resized(account_info.data_len()) .is_ok() - && borrowed_account - .can_data_be_changed(&invoke_context.feature_set) - .is_ok() + && borrowed_account.can_data_be_changed().is_ok() { - borrowed_account.set_data_from_slice( - &account_info.data.borrow(), - &invoke_context.feature_set, - )?; + borrowed_account.set_data_from_slice(&account_info.data.borrow())?; } if borrowed_account.get_owner() != account_info.owner { - borrowed_account - .set_owner(account_info.owner.as_ref(), &invoke_context.feature_set)?; + borrowed_account.set_owner(account_info.owner.as_ref())?; } } } @@ -293,17 +285,17 @@ impl solana_sdk::program_stubs::SyscallStubs for SyscallStubs { .unwrap(); if borrowed_account.get_lamports() != account_info.lamports() { borrowed_account - .set_lamports(account_info.lamports(), &invoke_context.feature_set) + .set_lamports(account_info.lamports()) .unwrap(); } let account_info_data = account_info.try_borrow_data().unwrap(); // The redundant check helps to avoid the expensive data comparison if we can match borrowed_account .can_data_be_resized(account_info_data.len()) - .and_then(|_| borrowed_account.can_data_be_changed(&invoke_context.feature_set)) + .and_then(|_| borrowed_account.can_data_be_changed()) { Ok(()) => borrowed_account - .set_data_from_slice(&account_info_data, &invoke_context.feature_set) + .set_data_from_slice(&account_info_data) .unwrap(), Err(err) if borrowed_account.get_data() != *account_info_data => { panic!("{err:?}"); @@ -313,7 +305,7 @@ impl solana_sdk::program_stubs::SyscallStubs for SyscallStubs { // Change the owner at the end so that we are allowed to change the lamports and data before if borrowed_account.get_owner() != account_info.owner { borrowed_account - .set_owner(account_info.owner.as_ref(), &invoke_context.feature_set) + .set_owner(account_info.owner.as_ref()) .unwrap(); } if instruction_account.is_writable { diff --git a/programs/address-lookup-table/src/processor.rs b/programs/address-lookup-table/src/processor.rs index 643310d316bf83..4db568c71a1a20 100644 --- a/programs/address-lookup-table/src/processor.rs +++ b/programs/address-lookup-table/src/processor.rs @@ -162,10 +162,9 @@ impl Processor { let instruction_context = transaction_context.get_current_instruction_context()?; let mut lookup_table_account = instruction_context.try_borrow_instruction_account(transaction_context, 0)?; - lookup_table_account.set_state( - &ProgramState::LookupTable(LookupTableMeta::new(authority_key)), - &invoke_context.feature_set, - )?; + lookup_table_account.set_state(&ProgramState::LookupTable(LookupTableMeta::new( + authority_key, + )))?; Ok(()) } @@ -214,7 +213,7 @@ impl Processor { let mut lookup_table_meta = lookup_table.meta; lookup_table_meta.authority = None; AddressLookupTable::overwrite_meta_data( - lookup_table_account.get_data_mut(&invoke_context.feature_set)?, + lookup_table_account.get_data_mut()?, lookup_table_meta, )?; @@ -306,12 +305,11 @@ impl Processor { )?; { AddressLookupTable::overwrite_meta_data( - lookup_table_account.get_data_mut(&invoke_context.feature_set)?, + lookup_table_account.get_data_mut()?, lookup_table_meta, )?; for new_address in new_addresses { - lookup_table_account - .extend_from_slice(new_address.as_ref(), &invoke_context.feature_set)?; + lookup_table_account.extend_from_slice(new_address.as_ref())?; } } drop(lookup_table_account); @@ -383,7 +381,7 @@ impl Processor { lookup_table_meta.deactivation_slot = clock.slot; AddressLookupTable::overwrite_meta_data( - lookup_table_account.get_data_mut(&invoke_context.feature_set)?, + lookup_table_account.get_data_mut()?, lookup_table_meta, )?; @@ -458,13 +456,13 @@ impl Processor { let mut recipient_account = instruction_context.try_borrow_instruction_account(transaction_context, 2)?; - recipient_account.checked_add_lamports(withdrawn_lamports, &invoke_context.feature_set)?; + recipient_account.checked_add_lamports(withdrawn_lamports)?; drop(recipient_account); let mut lookup_table_account = instruction_context.try_borrow_instruction_account(transaction_context, 0)?; - lookup_table_account.set_data_length(0, &invoke_context.feature_set)?; - lookup_table_account.set_lamports(0, &invoke_context.feature_set)?; + lookup_table_account.set_data_length(0)?; + lookup_table_account.set_lamports(0)?; Ok(()) } diff --git a/programs/bpf_loader/benches/serialization.rs b/programs/bpf_loader/benches/serialization.rs index abd0823b83497e..5d3c55a165e399 100644 --- a/programs/bpf_loader/benches/serialization.rs +++ b/programs/bpf_loader/benches/serialization.rs @@ -7,7 +7,6 @@ use { solana_sdk::{ account::{Account, AccountSharedData}, bpf_loader, bpf_loader_deprecated, - feature_set::FeatureSet, pubkey::Pubkey, sysvar::rent::Rent, transaction_context::{IndexOfAccount, InstructionAccount, TransactionContext}, @@ -127,13 +126,7 @@ fn bench_serialize_unaligned(bencher: &mut Bencher) { .get_current_instruction_context() .unwrap(); bencher.iter(|| { - let _ = serialize_parameters( - &transaction_context, - instruction_context, - false, - &FeatureSet::all_enabled(), - ) - .unwrap(); + let _ = serialize_parameters(&transaction_context, instruction_context, false).unwrap(); }); } @@ -144,13 +137,7 @@ fn bench_serialize_unaligned_copy_account_data(bencher: &mut Bencher) { .get_current_instruction_context() .unwrap(); bencher.iter(|| { - let _ = serialize_parameters( - &transaction_context, - instruction_context, - true, - &FeatureSet::all_enabled(), - ) - .unwrap(); + let _ = serialize_parameters(&transaction_context, instruction_context, true).unwrap(); }); } @@ -162,13 +149,7 @@ fn bench_serialize_aligned(bencher: &mut Bencher) { .unwrap(); bencher.iter(|| { - let _ = serialize_parameters( - &transaction_context, - instruction_context, - false, - &FeatureSet::all_enabled(), - ) - .unwrap(); + let _ = serialize_parameters(&transaction_context, instruction_context, false).unwrap(); }); } @@ -180,13 +161,7 @@ fn bench_serialize_aligned_copy_account_data(bencher: &mut Bencher) { .unwrap(); bencher.iter(|| { - let _ = serialize_parameters( - &transaction_context, - instruction_context, - true, - &FeatureSet::all_enabled(), - ) - .unwrap(); + let _ = serialize_parameters(&transaction_context, instruction_context, true).unwrap(); }); } @@ -197,13 +172,7 @@ fn bench_serialize_unaligned_max_accounts(bencher: &mut Bencher) { .get_current_instruction_context() .unwrap(); bencher.iter(|| { - let _ = serialize_parameters( - &transaction_context, - instruction_context, - false, - &FeatureSet::all_enabled(), - ) - .unwrap(); + let _ = serialize_parameters(&transaction_context, instruction_context, false).unwrap(); }); } @@ -215,12 +184,6 @@ fn bench_serialize_aligned_max_accounts(bencher: &mut Bencher) { .unwrap(); bencher.iter(|| { - let _ = serialize_parameters( - &transaction_context, - instruction_context, - false, - &FeatureSet::all_enabled(), - ) - .unwrap(); + let _ = serialize_parameters(&transaction_context, instruction_context, false).unwrap(); }); } diff --git a/programs/bpf_loader/src/lib.rs b/programs/bpf_loader/src/lib.rs index a9c34fbabfc6f6..2cae8b502efdb9 100644 --- a/programs/bpf_loader/src/lib.rs +++ b/programs/bpf_loader/src/lib.rs @@ -34,8 +34,7 @@ use { clock::Slot, entrypoint::{MAX_PERMITTED_DATA_INCREASE, SUCCESS}, feature_set::{ - bpf_account_data_direct_mapping, deprecate_executable_meta_update_in_bpf_loader, - enable_bpf_loader_set_authority_checked_ix, FeatureSet, + bpf_account_data_direct_mapping, enable_bpf_loader_set_authority_checked_ix, }, instruction::{AccountMeta, InstructionError}, loader_upgradeable_instruction::UpgradeableLoaderInstruction, @@ -172,7 +171,7 @@ fn write_program_data( let transaction_context = &invoke_context.transaction_context; let instruction_context = transaction_context.get_current_instruction_context()?; let mut program = instruction_context.try_borrow_instruction_account(transaction_context, 0)?; - let data = program.get_data_mut(&invoke_context.feature_set)?; + let data = program.get_data_mut()?; let write_offset = program_data_offset.saturating_add(bytes.len()); if data.len() < write_offset { ic_msg!( @@ -402,7 +401,7 @@ pub fn process_instruction_inner( } // Program Invocation - if !program_account.is_executable(&invoke_context.feature_set) { + if !program_account.is_executable() { ic_logger_msg!(log_collector, "Program is not executable"); return Err(Box::new(InstructionError::IncorrectProgramId)); } @@ -460,12 +459,9 @@ fn process_loader_upgradeable_instruction( instruction_context.get_index_of_instruction_account_in_transaction(1)?, )?); - buffer.set_state( - &UpgradeableLoaderState::Buffer { - authority_address: authority_key, - }, - &invoke_context.feature_set, - )?; + buffer.set_state(&UpgradeableLoaderState::Buffer { + authority_address: authority_key, + })?; } UpgradeableLoaderInstruction::Write { offset, bytes } => { instruction_context.check_number_of_instruction_accounts(2)?; @@ -589,8 +585,8 @@ fn process_loader_upgradeable_instruction( instruction_context.try_borrow_instruction_account(transaction_context, 3)?; let mut payer = instruction_context.try_borrow_instruction_account(transaction_context, 0)?; - payer.checked_add_lamports(buffer.get_lamports(), &invoke_context.feature_set)?; - buffer.set_lamports(0, &invoke_context.feature_set)?; + payer.checked_add_lamports(buffer.get_lamports())?; + buffer.set_lamports(0)?; } let owner_id = *program_id; @@ -644,15 +640,12 @@ fn process_loader_upgradeable_instruction( { let mut programdata = instruction_context.try_borrow_instruction_account(transaction_context, 1)?; - programdata.set_state( - &UpgradeableLoaderState::ProgramData { - slot: clock.slot, - upgrade_authority_address: authority_key, - }, - &invoke_context.feature_set, - )?; + programdata.set_state(&UpgradeableLoaderState::ProgramData { + slot: clock.slot, + upgrade_authority_address: authority_key, + })?; let dst_slice = programdata - .get_data_mut(&invoke_context.feature_set)? + .get_data_mut()? .get_mut( programdata_data_offset ..programdata_data_offset.saturating_add(buffer_data_len), @@ -665,30 +658,16 @@ fn process_loader_upgradeable_instruction( .get(buffer_data_offset..) .ok_or(InstructionError::AccountDataTooSmall)?; dst_slice.copy_from_slice(src_slice); - buffer.set_data_length( - UpgradeableLoaderState::size_of_buffer(0), - &invoke_context.feature_set, - )?; + buffer.set_data_length(UpgradeableLoaderState::size_of_buffer(0))?; } // Update the Program account let mut program = instruction_context.try_borrow_instruction_account(transaction_context, 2)?; - program.set_state( - &UpgradeableLoaderState::Program { - programdata_address: programdata_key, - }, - &invoke_context.feature_set, - )?; - - // Skip writing true to executable meta after bpf program deployment when - // `deprecate_executable_meta_update_in_bpf_loader` feature is activated. - if !invoke_context - .feature_set - .is_active(&deprecate_executable_meta_update_in_bpf_loader::id()) - { - program.set_executable(true)?; - } + program.set_state(&UpgradeableLoaderState::Program { + programdata_address: programdata_key, + })?; + program.set_executable(true)?; drop(program); ic_logger_msg!(log_collector, "Deployed program {:?}", new_program_id); @@ -710,7 +689,7 @@ fn process_loader_upgradeable_instruction( let program = instruction_context.try_borrow_instruction_account(transaction_context, 1)?; - if !program.is_executable(&invoke_context.feature_set) { + if !program.is_executable() { ic_logger_msg!(log_collector, "Program account not executable"); return Err(InstructionError::AccountNotExecutable); } @@ -841,15 +820,12 @@ fn process_loader_upgradeable_instruction( let mut programdata = instruction_context.try_borrow_instruction_account(transaction_context, 0)?; { - programdata.set_state( - &UpgradeableLoaderState::ProgramData { - slot: clock.slot, - upgrade_authority_address: authority_key, - }, - &invoke_context.feature_set, - )?; + programdata.set_state(&UpgradeableLoaderState::ProgramData { + slot: clock.slot, + upgrade_authority_address: authority_key, + })?; let dst_slice = programdata - .get_data_mut(&invoke_context.feature_set)? + .get_data_mut()? .get_mut( programdata_data_offset ..programdata_data_offset.saturating_add(buffer_data_len), @@ -864,7 +840,7 @@ fn process_loader_upgradeable_instruction( dst_slice.copy_from_slice(src_slice); } programdata - .get_data_mut(&invoke_context.feature_set)? + .get_data_mut()? .get_mut(programdata_data_offset.saturating_add(buffer_data_len)..) .ok_or(InstructionError::AccountDataTooSmall)? .fill(0); @@ -879,14 +855,10 @@ fn process_loader_upgradeable_instruction( .get_lamports() .saturating_add(buffer_lamports) .saturating_sub(programdata_balance_required), - &invoke_context.feature_set, - )?; - buffer.set_lamports(0, &invoke_context.feature_set)?; - programdata.set_lamports(programdata_balance_required, &invoke_context.feature_set)?; - buffer.set_data_length( - UpgradeableLoaderState::size_of_buffer(0), - &invoke_context.feature_set, )?; + buffer.set_lamports(0)?; + programdata.set_lamports(programdata_balance_required)?; + buffer.set_data_length(UpgradeableLoaderState::size_of_buffer(0))?; ic_logger_msg!(log_collector, "Upgraded program {:?}", new_program_id); } @@ -922,12 +894,9 @@ fn process_loader_upgradeable_instruction( ic_logger_msg!(log_collector, "Buffer authority did not sign"); return Err(InstructionError::MissingRequiredSignature); } - account.set_state( - &UpgradeableLoaderState::Buffer { - authority_address: new_authority.cloned(), - }, - &invoke_context.feature_set, - )?; + account.set_state(&UpgradeableLoaderState::Buffer { + authority_address: new_authority.cloned(), + })?; } UpgradeableLoaderState::ProgramData { slot, @@ -945,13 +914,10 @@ fn process_loader_upgradeable_instruction( ic_logger_msg!(log_collector, "Upgrade authority did not sign"); return Err(InstructionError::MissingRequiredSignature); } - account.set_state( - &UpgradeableLoaderState::ProgramData { - slot, - upgrade_authority_address: new_authority.cloned(), - }, - &invoke_context.feature_set, - )?; + account.set_state(&UpgradeableLoaderState::ProgramData { + slot, + upgrade_authority_address: new_authority.cloned(), + })?; } _ => { ic_logger_msg!(log_collector, "Account does not support authorities"); @@ -997,12 +963,9 @@ fn process_loader_upgradeable_instruction( ic_logger_msg!(log_collector, "New authority did not sign"); return Err(InstructionError::MissingRequiredSignature); } - account.set_state( - &UpgradeableLoaderState::Buffer { - authority_address: Some(*new_authority_key), - }, - &invoke_context.feature_set, - )?; + account.set_state(&UpgradeableLoaderState::Buffer { + authority_address: Some(*new_authority_key), + })?; } UpgradeableLoaderState::ProgramData { slot, @@ -1024,13 +987,10 @@ fn process_loader_upgradeable_instruction( ic_logger_msg!(log_collector, "New authority did not sign"); return Err(InstructionError::MissingRequiredSignature); } - account.set_state( - &UpgradeableLoaderState::ProgramData { - slot, - upgrade_authority_address: Some(*new_authority_key), - }, - &invoke_context.feature_set, - )?; + account.set_state(&UpgradeableLoaderState::ProgramData { + slot, + upgrade_authority_address: Some(*new_authority_key), + })?; } _ => { ic_logger_msg!(log_collector, "Account does not support authorities"); @@ -1055,19 +1015,13 @@ fn process_loader_upgradeable_instruction( instruction_context.try_borrow_instruction_account(transaction_context, 0)?; let close_key = *close_account.get_key(); let close_account_state = close_account.get_state()?; - close_account.set_data_length( - UpgradeableLoaderState::size_of_uninitialized(), - &invoke_context.feature_set, - )?; + close_account.set_data_length(UpgradeableLoaderState::size_of_uninitialized())?; match close_account_state { UpgradeableLoaderState::Uninitialized => { let mut recipient_account = instruction_context .try_borrow_instruction_account(transaction_context, 1)?; - recipient_account.checked_add_lamports( - close_account.get_lamports(), - &invoke_context.feature_set, - )?; - close_account.set_lamports(0, &invoke_context.feature_set)?; + recipient_account.checked_add_lamports(close_account.get_lamports())?; + close_account.set_lamports(0)?; ic_logger_msg!(log_collector, "Closed Uninitialized {}", close_key); } @@ -1079,7 +1033,6 @@ fn process_loader_upgradeable_instruction( transaction_context, instruction_context, &log_collector, - &invoke_context.feature_set, )?; ic_logger_msg!(log_collector, "Closed Buffer {}", close_key); @@ -1126,7 +1079,6 @@ fn process_loader_upgradeable_instruction( transaction_context, instruction_context, &log_collector, - &invoke_context.feature_set, )?; let clock = invoke_context.get_sysvar_cache().get_clock()?; invoke_context.programs_modified_by_tx.replenish( @@ -1277,7 +1229,7 @@ fn process_loader_upgradeable_instruction( let instruction_context = transaction_context.get_current_instruction_context()?; let mut programdata_account = instruction_context .try_borrow_instruction_account(transaction_context, PROGRAM_DATA_ACCOUNT_INDEX)?; - programdata_account.set_data_length(new_len, &invoke_context.feature_set)?; + programdata_account.set_data_length(new_len)?; let programdata_data_offset = UpgradeableLoaderState::size_of_programdata_metadata(); @@ -1298,13 +1250,10 @@ fn process_loader_upgradeable_instruction( let mut programdata_account = instruction_context .try_borrow_instruction_account(transaction_context, PROGRAM_DATA_ACCOUNT_INDEX)?; - programdata_account.set_state( - &UpgradeableLoaderState::ProgramData { - slot: clock_slot, - upgrade_authority_address, - }, - &invoke_context.feature_set, - )?; + programdata_account.set_state(&UpgradeableLoaderState::ProgramData { + slot: clock_slot, + upgrade_authority_address, + })?; ic_logger_msg!( log_collector, @@ -1322,7 +1271,6 @@ fn common_close_account( transaction_context: &TransactionContext, instruction_context: &InstructionContext, log_collector: &Option>>, - feature_set: &FeatureSet, ) -> Result<(), InstructionError> { if authority_address.is_none() { ic_logger_msg!(log_collector, "Account is immutable"); @@ -1346,9 +1294,9 @@ fn common_close_account( let mut recipient_account = instruction_context.try_borrow_instruction_account(transaction_context, 1)?; - recipient_account.checked_add_lamports(close_account.get_lamports(), feature_set)?; - close_account.set_lamports(0, feature_set)?; - close_account.set_state(&UpgradeableLoaderState::Uninitialized, feature_set)?; + recipient_account.checked_add_lamports(close_account.get_lamports())?; + close_account.set_lamports(0)?; + close_account.set_state(&UpgradeableLoaderState::Uninitialized)?; Ok(()) } @@ -1383,7 +1331,6 @@ fn execute<'a, 'b: 'a>( invoke_context.transaction_context, instruction_context, !direct_mapping, - &invoke_context.feature_set, )?; serialize_time.stop(); @@ -1464,15 +1411,13 @@ fn execute<'a, 'b: 'a>( instruction_account_index as IndexOfAccount, )?; - error = EbpfError::SyscallError(Box::new( - if account.is_executable(&invoke_context.feature_set) { - InstructionError::ExecutableDataModified - } else if account.is_writable() { - InstructionError::ExternalAccountDataModified - } else { - InstructionError::ReadonlyDataModified - }, - )); + error = EbpfError::SyscallError(Box::new(if account.is_executable() { + InstructionError::ExecutableDataModified + } else if account.is_writable() { + InstructionError::ExternalAccountDataModified + } else { + InstructionError::ReadonlyDataModified + })); } } } @@ -1500,7 +1445,6 @@ fn execute<'a, 'b: 'a>( copy_account_data, parameter_bytes, &invoke_context.get_syscall_context()?.accounts_metadata, - &invoke_context.feature_set, ) } @@ -1630,9 +1574,6 @@ mod tests { expected_result, Entrypoint::vm, |invoke_context| { - let mut features = FeatureSet::all_enabled(); - features.deactivate(&deprecate_executable_meta_update_in_bpf_loader::id()); - invoke_context.feature_set = Arc::new(features); test_utils::load_all_invoked_programs(invoke_context); }, |_invoke_context| {}, @@ -1721,9 +1662,6 @@ mod tests { Err(InstructionError::ProgramFailedToComplete), Entrypoint::vm, |invoke_context| { - let mut features = FeatureSet::all_enabled(); - features.deactivate(&deprecate_executable_meta_update_in_bpf_loader::id()); - invoke_context.feature_set = Arc::new(features); invoke_context.mock_set_remaining(0); test_utils::load_all_invoked_programs(invoke_context); }, @@ -2269,11 +2207,7 @@ mod tests { instruction_accounts, expected_result, Entrypoint::vm, - |invoke_context| { - let mut features = FeatureSet::all_enabled(); - features.deactivate(&deprecate_executable_meta_update_in_bpf_loader::id()); - invoke_context.feature_set = Arc::new(features); - }, + |_invoke_context| {}, |_invoke_context| {}, ) } diff --git a/programs/bpf_loader/src/serialization.rs b/programs/bpf_loader/src/serialization.rs index d4cbd09642f47c..f9cbc2e752c54d 100644 --- a/programs/bpf_loader/src/serialization.rs +++ b/programs/bpf_loader/src/serialization.rs @@ -11,7 +11,6 @@ use { solana_sdk::{ bpf_loader_deprecated, entrypoint::{BPF_ALIGN_OF_U128, MAX_PERMITTED_DATA_INCREASE, NON_DUP_MARKER}, - feature_set::FeatureSet, instruction::InstructionError, pubkey::Pubkey, system_instruction::MAX_PERMITTED_DATA_LENGTH, @@ -94,7 +93,6 @@ impl Serializer { fn write_account( &mut self, account: &mut BorrowedAccount<'_>, - feature_set: &FeatureSet, ) -> Result { let vm_data_addr = if self.copy_account_data { let vm_data_addr = self.vaddr.saturating_add(self.buffer.len() as u64); @@ -103,7 +101,7 @@ impl Serializer { } else { self.push_region(true); let vaddr = self.vaddr; - self.push_account_data_region(account, feature_set)?; + self.push_account_data_region(account)?; vaddr }; @@ -123,7 +121,7 @@ impl Serializer { .map_err(|_| InstructionError::InvalidArgument)?; self.region_start += BPF_ALIGN_OF_U128.saturating_sub(align_offset); // put the realloc padding in its own region - self.push_region(account.can_data_be_changed(feature_set).is_ok()); + self.push_region(account.can_data_be_changed().is_ok()); } } @@ -133,13 +131,12 @@ impl Serializer { fn push_account_data_region( &mut self, account: &mut BorrowedAccount<'_>, - feature_set: &FeatureSet, ) -> Result<(), InstructionError> { if !account.get_data().is_empty() { - let region = match account_data_region_memory_state(account, feature_set) { + let region = match account_data_region_memory_state(account) { MemoryState::Readable => MemoryRegion::new_readonly(account.get_data(), self.vaddr), MemoryState::Writable => { - MemoryRegion::new_writable(account.get_data_mut(feature_set)?, self.vaddr) + MemoryRegion::new_writable(account.get_data_mut()?, self.vaddr) } MemoryState::Cow(index_in_transaction) => { MemoryRegion::new_cow(account.get_data(), self.vaddr, index_in_transaction) @@ -194,7 +191,6 @@ pub fn serialize_parameters( transaction_context: &TransactionContext, instruction_context: &InstructionContext, copy_account_data: bool, - feature_set: &FeatureSet, ) -> Result< ( AlignedMemory, @@ -243,7 +239,6 @@ pub fn serialize_parameters( instruction_context.get_instruction_data(), &program_id, copy_account_data, - feature_set, ) } else { serialize_parameters_aligned( @@ -251,7 +246,6 @@ pub fn serialize_parameters( instruction_context.get_instruction_data(), &program_id, copy_account_data, - feature_set, ) } } @@ -262,7 +256,6 @@ pub fn deserialize_parameters( copy_account_data: bool, buffer: &[u8], accounts_metadata: &[SerializedAccountMetadata], - feature_set: &FeatureSet, ) -> Result<(), InstructionError> { let is_loader_deprecated = *instruction_context .try_borrow_last_program_account(transaction_context)? @@ -276,7 +269,6 @@ pub fn deserialize_parameters( copy_account_data, buffer, account_lengths, - feature_set, ) } else { deserialize_parameters_aligned( @@ -285,7 +277,6 @@ pub fn deserialize_parameters( copy_account_data, buffer, account_lengths, - feature_set, ) } } @@ -295,7 +286,6 @@ fn serialize_parameters_unaligned( instruction_data: &[u8], program_id: &Pubkey, copy_account_data: bool, - feature_set: &FeatureSet, ) -> Result< ( AlignedMemory, @@ -346,9 +336,9 @@ fn serialize_parameters_unaligned( let vm_key_addr = s.write_all(account.get_key().as_ref()); let vm_lamports_addr = s.write::(account.get_lamports().to_le()); s.write::((account.get_data().len() as u64).to_le()); - let vm_data_addr = s.write_account(&mut account, feature_set)?; + let vm_data_addr = s.write_account(&mut account)?; let vm_owner_addr = s.write_all(account.get_owner().as_ref()); - s.write::(account.is_executable(feature_set) as u8); + s.write::(account.is_executable() as u8); s.write::((account.get_rent_epoch()).to_le()); accounts_metadata.push(SerializedAccountMetadata { original_data_len: account.get_data().len(), @@ -374,7 +364,6 @@ pub fn deserialize_parameters_unaligned>( copy_account_data: bool, buffer: &[u8], account_lengths: I, - feature_set: &FeatureSet, ) -> Result<(), InstructionError> { let mut start = size_of::(); // number of accounts for (instruction_account_index, pre_len) in (0..instruction_context @@ -396,7 +385,7 @@ pub fn deserialize_parameters_unaligned>( .ok_or(InstructionError::InvalidArgument)?, ); if borrowed_account.get_lamports() != lamports { - borrowed_account.set_lamports(lamports, feature_set)?; + borrowed_account.set_lamports(lamports)?; } start += size_of::() // lamports + size_of::(); // data length @@ -407,9 +396,9 @@ pub fn deserialize_parameters_unaligned>( // The redundant check helps to avoid the expensive data comparison if we can match borrowed_account .can_data_be_resized(data.len()) - .and_then(|_| borrowed_account.can_data_be_changed(feature_set)) + .and_then(|_| borrowed_account.can_data_be_changed()) { - Ok(()) => borrowed_account.set_data_from_slice(data, feature_set)?, + Ok(()) => borrowed_account.set_data_from_slice(data)?, Err(err) if borrowed_account.get_data() != data => return Err(err), _ => {} } @@ -428,7 +417,6 @@ fn serialize_parameters_aligned( instruction_data: &[u8], program_id: &Pubkey, copy_account_data: bool, - feature_set: &FeatureSet, ) -> Result< ( AlignedMemory, @@ -478,13 +466,13 @@ fn serialize_parameters_aligned( s.write::(NON_DUP_MARKER); s.write::(borrowed_account.is_signer() as u8); s.write::(borrowed_account.is_writable() as u8); - s.write::(borrowed_account.is_executable(feature_set) as u8); + s.write::(borrowed_account.is_executable() as u8); s.write_all(&[0u8, 0, 0, 0]); let vm_key_addr = s.write_all(borrowed_account.get_key().as_ref()); let vm_owner_addr = s.write_all(borrowed_account.get_owner().as_ref()); let vm_lamports_addr = s.write::(borrowed_account.get_lamports().to_le()); s.write::((borrowed_account.get_data().len() as u64).to_le()); - let vm_data_addr = s.write_account(&mut borrowed_account, feature_set)?; + let vm_data_addr = s.write_account(&mut borrowed_account)?; s.write::((borrowed_account.get_rent_epoch()).to_le()); accounts_metadata.push(SerializedAccountMetadata { original_data_len: borrowed_account.get_data().len(), @@ -515,7 +503,6 @@ pub fn deserialize_parameters_aligned>( copy_account_data: bool, buffer: &[u8], account_lengths: I, - feature_set: &FeatureSet, ) -> Result<(), InstructionError> { let mut start = size_of::(); // number of accounts for (instruction_account_index, pre_len) in (0..instruction_context @@ -545,7 +532,7 @@ pub fn deserialize_parameters_aligned>( .ok_or(InstructionError::InvalidArgument)?, ); if borrowed_account.get_lamports() != lamports { - borrowed_account.set_lamports(lamports, feature_set)?; + borrowed_account.set_lamports(lamports)?; } start += size_of::(); // lamports let post_len = LittleEndian::read_u64( @@ -567,9 +554,9 @@ pub fn deserialize_parameters_aligned>( .ok_or(InstructionError::InvalidArgument)?; match borrowed_account .can_data_be_resized(post_len) - .and_then(|_| borrowed_account.can_data_be_changed(feature_set)) + .and_then(|_| borrowed_account.can_data_be_changed()) { - Ok(()) => borrowed_account.set_data_from_slice(data, feature_set)?, + Ok(()) => borrowed_account.set_data_from_slice(data)?, Err(err) if borrowed_account.get_data() != data => return Err(err), _ => {} } @@ -583,14 +570,14 @@ pub fn deserialize_parameters_aligned>( .ok_or(InstructionError::InvalidArgument)?; match borrowed_account .can_data_be_resized(post_len) - .and_then(|_| borrowed_account.can_data_be_changed(feature_set)) + .and_then(|_| borrowed_account.can_data_be_changed()) { Ok(()) => { - borrowed_account.set_data_length(post_len, feature_set)?; + borrowed_account.set_data_length(post_len)?; let allocated_bytes = post_len.saturating_sub(pre_len); if allocated_bytes > 0 { borrowed_account - .get_data_mut(feature_set)? + .get_data_mut()? .get_mut(pre_len..pre_len.saturating_add(allocated_bytes)) .ok_or(InstructionError::InvalidArgument)? .copy_from_slice( @@ -608,18 +595,15 @@ pub fn deserialize_parameters_aligned>( start += size_of::(); // rent_epoch if borrowed_account.get_owner().to_bytes() != owner { // Change the owner at the end so that we are allowed to change the lamports and data before - borrowed_account.set_owner(owner, feature_set)?; + borrowed_account.set_owner(owner)?; } } } Ok(()) } -pub(crate) fn account_data_region_memory_state( - account: &BorrowedAccount<'_>, - feature_set: &FeatureSet, -) -> MemoryState { - if account.can_data_be_changed(feature_set).is_ok() { +pub(crate) fn account_data_region_memory_state(account: &BorrowedAccount<'_>) -> MemoryState { + if account.can_data_be_changed().is_ok() { if account.is_shared() { MemoryState::Cow(account.get_index_in_transaction() as u64) } else { @@ -744,7 +728,6 @@ mod tests { invoke_context.transaction_context, instruction_context, copy_account_data, - &invoke_context.feature_set, ); assert_eq!( serialization_result.as_ref().err(), @@ -899,7 +882,6 @@ mod tests { invoke_context.transaction_context, instruction_context, copy_account_data, - &invoke_context.feature_set, ) .unwrap(); @@ -938,7 +920,7 @@ mod tests { assert_eq!(account.lamports(), account_info.lamports()); assert_eq!(account.data(), &account_info.data.borrow()[..]); assert_eq!(account.owner(), account_info.owner); - assert!(account_info.executable); + assert_eq!(account.executable(), account_info.executable); assert_eq!(account.rent_epoch(), account_info.rent_epoch); assert_eq!( @@ -961,7 +943,6 @@ mod tests { copy_account_data, serialized.as_slice(), &accounts_metadata, - &invoke_context.feature_set, ) .unwrap(); for (index_in_transaction, (_key, original_account)) in @@ -992,7 +973,6 @@ mod tests { invoke_context.transaction_context, instruction_context, copy_account_data, - &invoke_context.feature_set, ) .unwrap(); let mut serialized_regions = concat_regions(®ions); @@ -1023,7 +1003,7 @@ mod tests { assert_eq!(account.lamports(), account_info.lamports()); assert_eq!(account.data(), &account_info.data.borrow()[..]); assert_eq!(account.owner(), account_info.owner); - assert!(account_info.executable); + assert_eq!(account.executable(), account_info.executable); assert_eq!(account.rent_epoch(), account_info.rent_epoch); } @@ -1033,7 +1013,6 @@ mod tests { copy_account_data, serialized.as_slice(), &account_lengths, - &invoke_context.feature_set, ) .unwrap(); for (index_in_transaction, (_key, original_account)) in diff --git a/programs/bpf_loader/src/syscalls/cpi.rs b/programs/bpf_loader/src/syscalls/cpi.rs index b4368f2172e04f..13f9cbaf905275 100644 --- a/programs/bpf_loader/src/syscalls/cpi.rs +++ b/programs/bpf_loader/src/syscalls/cpi.rs @@ -8,7 +8,7 @@ use { memory_region::{MemoryRegion, MemoryState}, }, solana_sdk::{ - feature_set::{enable_bpf_loader_set_authority_checked_ix, FeatureSet}, + feature_set::enable_bpf_loader_set_authority_checked_ix, stable_layout::stable_instruction::StableInstruction, syscalls::{ MAX_CPI_ACCOUNT_INFOS, MAX_CPI_INSTRUCTION_ACCOUNTS, MAX_CPI_INSTRUCTION_DATA_LEN, @@ -883,7 +883,7 @@ where .transaction_context .get_key_of_account_at_index(instruction_account.index_in_transaction)?; - if callee_account.is_executable(&invoke_context.feature_set) { + if callee_account.is_executable() { // Use the known account consume_compute_meter( invoke_context, @@ -1139,7 +1139,6 @@ fn cpi_common( caller_account, &callee_account, is_loader_deprecated, - &invoke_context.feature_set, )?; } } @@ -1180,7 +1179,7 @@ fn update_callee_account( direct_mapping: bool, ) -> Result<(), Error> { if callee_account.get_lamports() != *caller_account.lamports { - callee_account.set_lamports(*caller_account.lamports, &invoke_context.feature_set)?; + callee_account.set_lamports(*caller_account.lamports)?; } if direct_mapping { @@ -1188,7 +1187,7 @@ fn update_callee_account( let post_len = *caller_account.ref_to_len_in_vm.get()? as usize; match callee_account .can_data_be_resized(post_len) - .and_then(|_| callee_account.can_data_be_changed(&invoke_context.feature_set)) + .and_then(|_| callee_account.can_data_be_changed()) { Ok(()) => { let realloc_bytes_used = post_len.saturating_sub(caller_account.original_data_len); @@ -1196,7 +1195,7 @@ fn update_callee_account( if is_loader_deprecated && realloc_bytes_used > 0 { return Err(InstructionError::InvalidRealloc.into()); } - callee_account.set_data_length(post_len, &invoke_context.feature_set)?; + callee_account.set_data_length(post_len)?; if realloc_bytes_used > 0 { let serialized_data = translate_slice::( memory_mapping, @@ -1207,7 +1206,7 @@ fn update_callee_account( invoke_context.get_check_aligned(), )?; callee_account - .get_data_mut(&invoke_context.feature_set)? + .get_data_mut()? .get_mut(caller_account.original_data_len..post_len) .ok_or(SyscallError::InvalidLength)? .copy_from_slice(serialized_data); @@ -1222,10 +1221,9 @@ fn update_callee_account( // The redundant check helps to avoid the expensive data comparison if we can match callee_account .can_data_be_resized(caller_account.serialized_data.len()) - .and_then(|_| callee_account.can_data_be_changed(&invoke_context.feature_set)) + .and_then(|_| callee_account.can_data_be_changed()) { - Ok(()) => callee_account - .set_data_from_slice(caller_account.serialized_data, &invoke_context.feature_set)?, + Ok(()) => callee_account.set_data_from_slice(caller_account.serialized_data)?, Err(err) if callee_account.get_data() != caller_account.serialized_data => { return Err(Box::new(err)); } @@ -1235,7 +1233,7 @@ fn update_callee_account( // Change the owner at the end so that we are allowed to change the lamports and data before if callee_account.get_owner() != caller_account.owner { - callee_account.set_owner(caller_account.owner.as_ref(), &invoke_context.feature_set)?; + callee_account.set_owner(caller_account.owner.as_ref())?; } Ok(()) @@ -1246,7 +1244,6 @@ fn update_caller_account_perms( caller_account: &CallerAccount, callee_account: &BorrowedAccount<'_>, is_loader_deprecated: bool, - feature_set: &FeatureSet, ) -> Result<(), Error> { let CallerAccount { original_data_len, @@ -1256,10 +1253,9 @@ fn update_caller_account_perms( let data_region = account_data_region(memory_mapping, *vm_data_addr, *original_data_len)?; if let Some(region) = data_region { - region.state.set(account_data_region_memory_state( - callee_account, - feature_set, - )); + region + .state + .set(account_data_region_memory_state(callee_account)); } let realloc_region = account_realloc_region( memory_mapping, @@ -1270,7 +1266,7 @@ fn update_caller_account_perms( if let Some(region) = realloc_region { region .state - .set(if callee_account.can_data_be_changed(feature_set).is_ok() { + .set(if callee_account.can_data_be_changed().is_ok() { MemoryState::Writable } else { MemoryState::Readable @@ -1818,11 +1814,9 @@ mod tests { let mut callee_account = borrow_instruction_account!(invoke_context, 0); + callee_account.set_lamports(42).unwrap(); callee_account - .set_lamports(42, &invoke_context.feature_set) - .unwrap(); - callee_account - .set_owner(Pubkey::new_unique().as_ref(), &invoke_context.feature_set) + .set_owner(Pubkey::new_unique().as_ref()) .unwrap(); update_caller_account( @@ -1891,9 +1885,7 @@ mod tests { (b"foobazbad".to_vec(), MAX_PERMITTED_DATA_INCREASE - 3), ] { assert_eq!(caller_account.serialized_data, callee_account.get_data()); - callee_account - .set_data_from_slice(&new_value, &invoke_context.feature_set) - .unwrap(); + callee_account.set_data_from_slice(&new_value).unwrap(); update_caller_account( &invoke_context, @@ -1921,10 +1913,7 @@ mod tests { } callee_account - .set_data_length( - original_data_len + MAX_PERMITTED_DATA_INCREASE, - &invoke_context.feature_set, - ) + .set_data_length(original_data_len + MAX_PERMITTED_DATA_INCREASE) .unwrap(); update_caller_account( &invoke_context, @@ -1940,10 +1929,7 @@ mod tests { assert!(is_zeroed(&data_slice[data_len..])); callee_account - .set_data_length( - original_data_len + MAX_PERMITTED_DATA_INCREASE + 1, - &invoke_context.feature_set, - ) + .set_data_length(original_data_len + MAX_PERMITTED_DATA_INCREASE + 1) .unwrap(); assert_matches!( update_caller_account( @@ -1958,11 +1944,9 @@ mod tests { ); // close the account + callee_account.set_data_length(0).unwrap(); callee_account - .set_data_length(0, &invoke_context.feature_set) - .unwrap(); - callee_account - .set_owner(system_program::id().as_ref(), &invoke_context.feature_set) + .set_owner(system_program::id().as_ref()) .unwrap(); update_caller_account( &invoke_context, @@ -2031,13 +2015,9 @@ mod tests { (vec![], 0), // check lower bound ] { if change_ptr { - callee_account - .set_data(new_value, &invoke_context.feature_set) - .unwrap(); + callee_account.set_data(new_value).unwrap(); } else { - callee_account - .set_data_from_slice(&new_value, &invoke_context.feature_set) - .unwrap(); + callee_account.set_data_from_slice(&new_value).unwrap(); } update_caller_account( @@ -2107,10 +2087,7 @@ mod tests { } callee_account - .set_data_length( - original_data_len + MAX_PERMITTED_DATA_INCREASE, - &invoke_context.feature_set, - ) + .set_data_length(original_data_len + MAX_PERMITTED_DATA_INCREASE) .unwrap(); update_caller_account( &invoke_context, @@ -2128,10 +2105,7 @@ mod tests { ); callee_account - .set_data_length( - original_data_len + MAX_PERMITTED_DATA_INCREASE + 1, - &invoke_context.feature_set, - ) + .set_data_length(original_data_len + MAX_PERMITTED_DATA_INCREASE + 1) .unwrap(); assert_matches!( update_caller_account( @@ -2146,11 +2120,9 @@ mod tests { ); // close the account + callee_account.set_data_length(0).unwrap(); callee_account - .set_data_length(0, &invoke_context.feature_set) - .unwrap(); - callee_account - .set_owner(system_program::id().as_ref(), &invoke_context.feature_set) + .set_owner(system_program::id().as_ref()) .unwrap(); update_caller_account( &invoke_context, @@ -2493,9 +2465,7 @@ mod tests { // this is done when a writable account is mapped, and it ensures // through make_data_mut() that the account is made writable and resized // with enough padding to hold the realloc padding - callee_account - .get_data_mut(&invoke_context.feature_set) - .unwrap(); + callee_account.get_data_mut().unwrap(); let serialized_data = translate_slice_mut::( &memory_mapping, diff --git a/programs/config/src/config_processor.rs b/programs/config/src/config_processor.rs index b85715eb171391..fd4b806567180d 100644 --- a/programs/config/src/config_processor.rs +++ b/programs/config/src/config_processor.rs @@ -127,7 +127,7 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| ic_msg!(invoke_context, "instruction data too large"); return Err(InstructionError::InvalidInstructionData); } - config_account.get_data_mut(&invoke_context.feature_set)?[..data.len()].copy_from_slice(data); + config_account.get_data_mut()?[..data.len()].copy_from_slice(data); Ok(()) }); diff --git a/programs/loader-v4/src/lib.rs b/programs/loader-v4/src/lib.rs index 4764b23fe65e50..9573f925085585 100644 --- a/programs/loader-v4/src/lib.rs +++ b/programs/loader-v4/src/lib.rs @@ -247,7 +247,7 @@ pub fn process_instruction_write( } let end_offset = (offset as usize).saturating_add(bytes.len()); program - .get_data_mut(&invoke_context.feature_set)? + .get_data_mut()? .get_mut( LoaderV4State::program_data_offset().saturating_add(offset as usize) ..LoaderV4State::program_data_offset().saturating_add(end_offset), @@ -325,20 +325,19 @@ pub fn process_instruction_truncate( return Err(InstructionError::InvalidArgument); } let lamports_to_receive = program.get_lamports().saturating_sub(required_lamports); - program.checked_sub_lamports(lamports_to_receive, &invoke_context.feature_set)?; - recipient.checked_add_lamports(lamports_to_receive, &invoke_context.feature_set)?; + program.checked_sub_lamports(lamports_to_receive)?; + recipient.checked_add_lamports(lamports_to_receive)?; } std::cmp::Ordering::Equal => {} } if new_size == 0 { - program.set_data_length(0, &invoke_context.feature_set)?; + program.set_data_length(0)?; } else { program.set_data_length( LoaderV4State::program_data_offset().saturating_add(new_size as usize), - &invoke_context.feature_set, )?; if is_initialization { - let state = get_state_mut(program.get_data_mut(&invoke_context.feature_set)?)?; + let state = get_state_mut(program.get_data_mut()?)?; state.slot = 0; state.status = LoaderV4Status::Retracted; state.authority_address = *authority_address; @@ -432,12 +431,12 @@ pub fn process_instruction_deploy( let rent = invoke_context.get_sysvar_cache().get_rent()?; let required_lamports = rent.minimum_balance(source_program.get_data().len()); let transfer_lamports = required_lamports.saturating_sub(program.get_lamports()); - program.set_data_from_slice(source_program.get_data(), &invoke_context.feature_set)?; - source_program.set_data_length(0, &invoke_context.feature_set)?; - source_program.checked_sub_lamports(transfer_lamports, &invoke_context.feature_set)?; - program.checked_add_lamports(transfer_lamports, &invoke_context.feature_set)?; + program.set_data_from_slice(source_program.get_data())?; + source_program.set_data_length(0)?; + source_program.checked_sub_lamports(transfer_lamports)?; + program.checked_add_lamports(transfer_lamports)?; } - let state = get_state_mut(program.get_data_mut(&invoke_context.feature_set)?)?; + let state = get_state_mut(program.get_data_mut()?)?; state.slot = current_slot; state.status = LoaderV4Status::Deployed; @@ -486,7 +485,7 @@ pub fn process_instruction_retract( ic_logger_msg!(log_collector, "Program is not deployed"); return Err(InstructionError::InvalidArgument); } - let state = get_state_mut(program.get_data_mut(&invoke_context.feature_set)?)?; + let state = get_state_mut(program.get_data_mut()?)?; state.status = LoaderV4Status::Retracted; Ok(()) } @@ -516,7 +515,7 @@ pub fn process_instruction_transfer_authority( ic_logger_msg!(log_collector, "New authority did not sign"); return Err(InstructionError::MissingRequiredSignature); } - let state = get_state_mut(program.get_data_mut(&invoke_context.feature_set)?)?; + let state = get_state_mut(program.get_data_mut()?)?; if let Some(new_authority_address) = new_authority_address { state.authority_address = new_authority_address; } else if matches!(state.status, LoaderV4Status::Deployed) { diff --git a/programs/sbf/benches/bpf_loader.rs b/programs/sbf/benches/bpf_loader.rs index 1dd827bbeb197b..cf8670cc86151a 100644 --- a/programs/sbf/benches/bpf_loader.rs +++ b/programs/sbf/benches/bpf_loader.rs @@ -38,7 +38,7 @@ use { bpf_loader, client::SyncClient, entrypoint::SUCCESS, - feature_set::{self, FeatureSet}, + feature_set::FeatureSet, instruction::{AccountMeta, Instruction}, message::Message, native_loader, @@ -189,15 +189,11 @@ fn bench_program_alu(bencher: &mut Bencher) { #[bench] fn bench_program_execute_noop(bencher: &mut Bencher) { let GenesisConfigInfo { - mut genesis_config, + genesis_config, mint_keypair, .. } = create_genesis_config(50); - genesis_config - .accounts - .remove(&feature_set::deprecate_executable_meta_update_in_bpf_loader::id()); - let bank = Bank::new_for_benches(&genesis_config); let (bank, bank_forks) = bank.wrap_with_bank_forks_for_tests(); let mut bank_client = BankClient::new_shared(bank.clone()); @@ -261,7 +257,6 @@ fn bench_create_vm(bencher: &mut Bencher) { .get_current_instruction_context() .unwrap(), !direct_mapping, // copy_account_data, - &invoke_context.feature_set, ) .unwrap(); @@ -296,7 +291,6 @@ fn bench_instruction_count_tuner(_bencher: &mut Bencher) { .get_current_instruction_context() .unwrap(), !direct_mapping, // copy_account_data - &invoke_context.feature_set, ) .unwrap(); diff --git a/programs/stake/src/stake_instruction.rs b/programs/stake/src/stake_instruction.rs index f52f978da324fe..b7ce459a541315 100644 --- a/programs/stake/src/stake_instruction.rs +++ b/programs/stake/src/stake_instruction.rs @@ -73,13 +73,7 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| Ok(StakeInstruction::Initialize(authorized, lockup)) => { let mut me = get_stake_account()?; let rent = get_sysvar_with_account_check::rent(invoke_context, instruction_context, 1)?; - initialize( - &mut me, - &authorized, - &lockup, - &rent, - &invoke_context.feature_set, - ) + initialize(&mut me, &authorized, &lockup, &rent) } Ok(StakeInstruction::Authorize(authorized_pubkey, stake_authorize)) => { let mut me = get_stake_account()?; @@ -96,7 +90,6 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| stake_authorize, &clock, custodian_pubkey, - &invoke_context.feature_set, ) } Ok(StakeInstruction::AuthorizeWithSeed(args)) => { @@ -118,7 +111,6 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| args.stake_authorize, &clock, custodian_pubkey, - &invoke_context.feature_set, ) } Ok(StakeInstruction::DelegateStake) => { @@ -221,7 +213,6 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| None }, new_warmup_cooldown_rate_epoch(invoke_context), - &invoke_context.feature_set, ) } Ok(StakeInstruction::Deactivate) => { @@ -233,13 +224,7 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| Ok(StakeInstruction::SetLockup(lockup)) => { let mut me = get_stake_account()?; let clock = invoke_context.get_sysvar_cache().get_clock()?; - set_lockup( - &mut me, - &lockup, - &signers, - &clock, - &invoke_context.feature_set, - ) + set_lockup(&mut me, &lockup, &signers, &clock) } Ok(StakeInstruction::InitializeChecked) => { let mut me = get_stake_account()?; @@ -260,13 +245,7 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| }; let rent = get_sysvar_with_account_check::rent(invoke_context, instruction_context, 1)?; - initialize( - &mut me, - &authorized, - &Lockup::default(), - &rent, - &invoke_context.feature_set, - ) + initialize(&mut me, &authorized, &Lockup::default(), &rent) } Ok(StakeInstruction::AuthorizeChecked(stake_authorize)) => { let mut me = get_stake_account()?; @@ -289,7 +268,6 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| stake_authorize, &clock, custodian_pubkey, - &invoke_context.feature_set, ) } Ok(StakeInstruction::AuthorizeCheckedWithSeed(args)) => { @@ -318,7 +296,6 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| args.stake_authorize, &clock, custodian_pubkey, - &invoke_context.feature_set, ) } Ok(StakeInstruction::SetLockupChecked(lockup_checked)) => { @@ -332,13 +309,7 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| custodian: custodian_pubkey.cloned(), }; let clock = invoke_context.get_sysvar_cache().get_clock()?; - set_lockup( - &mut me, - &lockup, - &signers, - &clock, - &invoke_context.feature_set, - ) + set_lockup(&mut me, &lockup, &signers, &clock) } Ok(StakeInstruction::GetMinimumDelegation) => { let feature_set = invoke_context.feature_set.as_ref(); diff --git a/programs/stake/src/stake_state.rs b/programs/stake/src/stake_state.rs index 68b03c9e1429a9..f20283cfbd8582 100644 --- a/programs/stake/src/stake_state.rs +++ b/programs/stake/src/stake_state.rs @@ -149,7 +149,6 @@ pub fn initialize( authorized: &Authorized, lockup: &Lockup, rent: &Rent, - feature_set: &FeatureSet, ) -> Result<(), InstructionError> { if stake_account.get_data().len() != StakeStateV2::size_of() { return Err(InstructionError::InvalidAccountData); @@ -158,14 +157,11 @@ pub fn initialize( if let StakeStateV2::Uninitialized = stake_account.get_state()? { let rent_exempt_reserve = rent.minimum_balance(stake_account.get_data().len()); if stake_account.get_lamports() >= rent_exempt_reserve { - stake_account.set_state( - &StakeStateV2::Initialized(Meta { - rent_exempt_reserve, - authorized: *authorized, - lockup: *lockup, - }), - feature_set, - ) + stake_account.set_state(&StakeStateV2::Initialized(Meta { + rent_exempt_reserve, + authorized: *authorized, + lockup: *lockup, + })) } else { Err(InstructionError::InsufficientFunds) } @@ -184,7 +180,6 @@ pub fn authorize( stake_authorize: StakeAuthorize, clock: &Clock, custodian: Option<&Pubkey>, - feature_set: &FeatureSet, ) -> Result<(), InstructionError> { match stake_account.get_state()? { StakeStateV2::Stake(mut meta, stake, stake_flags) => { @@ -194,7 +189,7 @@ pub fn authorize( stake_authorize, Some((&meta.lockup, clock, custodian)), )?; - stake_account.set_state(&StakeStateV2::Stake(meta, stake, stake_flags), feature_set) + stake_account.set_state(&StakeStateV2::Stake(meta, stake, stake_flags)) } StakeStateV2::Initialized(mut meta) => { meta.authorized.authorize( @@ -203,7 +198,7 @@ pub fn authorize( stake_authorize, Some((&meta.lockup, clock, custodian)), )?; - stake_account.set_state(&StakeStateV2::Initialized(meta), feature_set) + stake_account.set_state(&StakeStateV2::Initialized(meta)) } _ => Err(InstructionError::InvalidAccountData), } @@ -221,7 +216,6 @@ pub fn authorize_with_seed( stake_authorize: StakeAuthorize, clock: &Clock, custodian: Option<&Pubkey>, - feature_set: &FeatureSet, ) -> Result<(), InstructionError> { let mut signers = HashSet::default(); if instruction_context.is_instruction_account_signer(authority_base_index)? { @@ -242,7 +236,6 @@ pub fn authorize_with_seed( stake_authorize, clock, custodian, - feature_set, ) } @@ -280,10 +273,7 @@ pub fn delegate( &vote_state?.convert_to_current(), clock.epoch, ); - stake_account.set_state( - &StakeStateV2::Stake(meta, stake, StakeFlags::empty()), - feature_set, - ) + stake_account.set_state(&StakeStateV2::Stake(meta, stake, StakeFlags::empty())) } StakeStateV2::Stake(meta, mut stake, stake_flags) => { meta.authorized.check(signers, StakeAuthorize::Staker)?; @@ -298,7 +288,7 @@ pub fn delegate( clock, stake_history, )?; - stake_account.set_state(&StakeStateV2::Stake(meta, stake, stake_flags), feature_set) + stake_account.set_state(&StakeStateV2::Stake(meta, stake, stake_flags)) } _ => Err(InstructionError::InvalidAccountData), } @@ -354,10 +344,7 @@ pub fn deactivate( if let StakeStateV2::Stake(meta, mut stake, mut stake_flags) = stake_account.get_state()? { meta.authorized.check(signers, StakeAuthorize::Staker)?; deactivate_stake(invoke_context, &mut stake, &mut stake_flags, clock.epoch)?; - stake_account.set_state( - &StakeStateV2::Stake(meta, stake, stake_flags), - &invoke_context.feature_set, - ) + stake_account.set_state(&StakeStateV2::Stake(meta, stake, stake_flags)) } else { Err(InstructionError::InvalidAccountData) } @@ -368,16 +355,15 @@ pub fn set_lockup( lockup: &LockupArgs, signers: &HashSet, clock: &Clock, - feature_set: &FeatureSet, ) -> Result<(), InstructionError> { match stake_account.get_state()? { StakeStateV2::Initialized(mut meta) => { meta.set_lockup(lockup, signers, clock)?; - stake_account.set_state(&StakeStateV2::Initialized(meta), feature_set) + stake_account.set_state(&StakeStateV2::Initialized(meta)) } StakeStateV2::Stake(mut meta, stake, stake_flags) => { meta.set_lockup(lockup, signers, clock)?; - stake_account.set_state(&StakeStateV2::Stake(meta, stake, stake_flags), feature_set) + stake_account.set_state(&StakeStateV2::Stake(meta, stake, stake_flags)) } _ => Err(InstructionError::InvalidAccountData), } @@ -481,17 +467,11 @@ pub fn split( let mut stake_account = instruction_context .try_borrow_instruction_account(transaction_context, stake_account_index)?; - stake_account.set_state( - &StakeStateV2::Stake(meta, stake, stake_flags), - &invoke_context.feature_set, - )?; + stake_account.set_state(&StakeStateV2::Stake(meta, stake, stake_flags))?; drop(stake_account); let mut split = instruction_context .try_borrow_instruction_account(transaction_context, split_index)?; - split.set_state( - &StakeStateV2::Stake(split_meta, split_stake, stake_flags), - &invoke_context.feature_set, - )?; + split.set_state(&StakeStateV2::Stake(split_meta, split_stake, stake_flags))?; } StakeStateV2::Initialized(meta) => { meta.authorized.check(signers, StakeAuthorize::Staker)?; @@ -510,10 +490,7 @@ pub fn split( split_meta.rent_exempt_reserve = validated_split_info.destination_rent_exempt_reserve; let mut split = instruction_context .try_borrow_instruction_account(transaction_context, split_index)?; - split.set_state( - &StakeStateV2::Initialized(split_meta), - &invoke_context.feature_set, - )?; + split.set_state(&StakeStateV2::Initialized(split_meta))?; } StakeStateV2::Uninitialized => { let stake_pubkey = transaction_context.get_key_of_account_at_index( @@ -531,17 +508,17 @@ pub fn split( let mut stake_account = instruction_context .try_borrow_instruction_account(transaction_context, stake_account_index)?; if lamports == stake_account.get_lamports() { - stake_account.set_state(&StakeStateV2::Uninitialized, &invoke_context.feature_set)?; + stake_account.set_state(&StakeStateV2::Uninitialized)?; } drop(stake_account); let mut split = instruction_context.try_borrow_instruction_account(transaction_context, split_index)?; - split.checked_add_lamports(lamports, &invoke_context.feature_set)?; + split.checked_add_lamports(lamports)?; drop(split); let mut stake_account = instruction_context .try_borrow_instruction_account(transaction_context, stake_account_index)?; - stake_account.checked_sub_lamports(lamports, &invoke_context.feature_set)?; + stake_account.checked_sub_lamports(lamports)?; Ok(()) } @@ -597,16 +574,16 @@ pub fn merge( ic_msg!(invoke_context, "Merging stake accounts"); if let Some(merged_state) = stake_merge_kind.merge(invoke_context, source_merge_kind, clock)? { - stake_account.set_state(&merged_state, &invoke_context.feature_set)?; + stake_account.set_state(&merged_state)?; } // Source is about to be drained, deinitialize its state - source_account.set_state(&StakeStateV2::Uninitialized, &invoke_context.feature_set)?; + source_account.set_state(&StakeStateV2::Uninitialized)?; // Drain the source stake account let lamports = source_account.get_lamports(); - source_account.checked_sub_lamports(lamports, &invoke_context.feature_set)?; - stake_account.checked_add_lamports(lamports, &invoke_context.feature_set)?; + source_account.checked_sub_lamports(lamports)?; + stake_account.checked_add_lamports(lamports)?; Ok(()) } @@ -698,9 +675,8 @@ pub fn redelegate( deactivate(invoke_context, stake_account, &clock, signers)?; // transfer the effective stake to the uninitialized stake account - stake_account.checked_sub_lamports(effective_stake, &invoke_context.feature_set)?; - uninitialized_stake_account - .checked_add_lamports(effective_stake, &invoke_context.feature_set)?; + stake_account.checked_sub_lamports(effective_stake)?; + uninitialized_stake_account.checked_add_lamports(effective_stake)?; // initialize and schedule `uninitialized_stake_account` for activation let sysvar_cache = invoke_context.get_sysvar_cache(); @@ -714,19 +690,16 @@ pub fn redelegate( &uninitialized_stake_meta, &invoke_context.feature_set, )?; - uninitialized_stake_account.set_state( - &StakeStateV2::Stake( - uninitialized_stake_meta, - new_stake( - stake_amount, - &vote_pubkey, - &vote_state.convert_to_current(), - clock.epoch, - ), - StakeFlags::MUST_FULLY_ACTIVATE_BEFORE_DEACTIVATION_IS_PERMITTED, + uninitialized_stake_account.set_state(&StakeStateV2::Stake( + uninitialized_stake_meta, + new_stake( + stake_amount, + &vote_pubkey, + &vote_state.convert_to_current(), + clock.epoch, ), - &invoke_context.feature_set, - )?; + StakeFlags::MUST_FULLY_ACTIVATE_BEFORE_DEACTIVATION_IS_PERMITTED, + ))?; Ok(()) } @@ -743,7 +716,6 @@ pub fn withdraw( withdraw_authority_index: IndexOfAccount, custodian_index: Option, new_rate_activation_epoch: Option, - feature_set: &FeatureSet, ) -> Result<(), InstructionError> { let withdraw_authority_pubkey = transaction_context.get_key_of_account_at_index( instruction_context @@ -828,14 +800,14 @@ pub fn withdraw( // Deinitialize state upon zero balance if lamports == stake_account.get_lamports() { - stake_account.set_state(&StakeStateV2::Uninitialized, feature_set)?; + stake_account.set_state(&StakeStateV2::Uninitialized)?; } - stake_account.checked_sub_lamports(lamports, feature_set)?; + stake_account.checked_sub_lamports(lamports)?; drop(stake_account); let mut to = instruction_context.try_borrow_instruction_account(transaction_context, to_index)?; - to.checked_add_lamports(lamports, feature_set)?; + to.checked_add_lamports(lamports)?; Ok(()) } @@ -883,10 +855,7 @@ pub(crate) fn deactivate_delinquent( // voted in the last `MINIMUM_DELINQUENT_EPOCHS_FOR_DEACTIVATION` if eligible_for_deactivate_delinquent(&delinquent_vote_state.epoch_credits, current_epoch) { deactivate_stake(invoke_context, &mut stake, &mut stake_flags, current_epoch)?; - stake_account.set_state( - &StakeStateV2::Stake(meta, stake, stake_flags), - &invoke_context.feature_set, - ) + stake_account.set_state(&StakeStateV2::Stake(meta, stake, stake_flags)) } else { Err(StakeError::MinimumDelinquentEpochsForDeactivationNotMet.into()) } diff --git a/programs/system/src/system_instruction.rs b/programs/system/src/system_instruction.rs index 0c9daf22d4b024..95860379fb17a0 100644 --- a/programs/system/src/system_instruction.rs +++ b/programs/system/src/system_instruction.rs @@ -56,10 +56,7 @@ pub fn advance_nonce_account( next_durable_nonce, invoke_context.lamports_per_signature, ); - account.set_state( - &Versions::new(State::Initialized(new_data)), - &invoke_context.feature_set, - ) + account.set_state(&Versions::new(State::Initialized(new_data))) } State::Uninitialized => { ic_msg!( @@ -117,10 +114,7 @@ pub fn withdraw_nonce_account( ); return Err(SystemError::NonceBlockhashNotExpired.into()); } - from.set_state( - &Versions::new(State::Uninitialized), - &invoke_context.feature_set, - )?; + from.set_state(&Versions::new(State::Uninitialized))?; } else { let min_balance = rent.minimum_balance(from.get_data().len()); let amount = checked_add(lamports, min_balance)?; @@ -147,11 +141,11 @@ pub fn withdraw_nonce_account( return Err(InstructionError::MissingRequiredSignature); } - from.checked_sub_lamports(lamports, &invoke_context.feature_set)?; + from.checked_sub_lamports(lamports)?; drop(from); let mut to = instruction_context .try_borrow_instruction_account(transaction_context, to_account_index)?; - to.checked_add_lamports(lamports, &invoke_context.feature_set)?; + to.checked_add_lamports(lamports)?; Ok(()) } @@ -190,7 +184,7 @@ pub fn initialize_nonce_account( invoke_context.lamports_per_signature, ); let state = State::Initialized(data); - account.set_state(&Versions::new(state), &invoke_context.feature_set) + account.set_state(&Versions::new(state)) } State::Initialized(_) => { ic_msg!( @@ -221,7 +215,7 @@ pub fn authorize_nonce_account( .get_state::()? .authorize(signers, *nonce_authority) { - Ok(versions) => account.set_state(&versions, &invoke_context.feature_set), + Ok(versions) => account.set_state(&versions), Err(AuthorizeNonceError::Uninitialized) => { ic_msg!( invoke_context, @@ -1002,9 +996,7 @@ mod test { let mut nonce_account = instruction_context .try_borrow_instruction_account(transaction_context, NONCE_ACCOUNT_INDEX) .unwrap(); - nonce_account - .checked_sub_lamports(42 * 2, &invoke_context.feature_set) - .unwrap(); + nonce_account.checked_sub_lamports(42 * 2).unwrap(); set_invoke_context_blockhash!(invoke_context, 63); let authorized = *nonce_account.get_key(); let result = diff --git a/programs/system/src/system_processor.rs b/programs/system/src/system_processor.rs index 2a66b388103f9a..57cd8e546f13d8 100644 --- a/programs/system/src/system_processor.rs +++ b/programs/system/src/system_processor.rs @@ -104,7 +104,7 @@ fn allocate( return Err(SystemError::InvalidAccountDataLength.into()); } - account.set_data_length(space as usize, &invoke_context.feature_set)?; + account.set_data_length(space as usize)?; Ok(()) } @@ -126,7 +126,7 @@ fn assign( return Err(InstructionError::MissingRequiredSignature); } - account.set_owner(&owner.to_bytes(), &invoke_context.feature_set) + account.set_owner(&owner.to_bytes()) } fn allocate_and_assign( @@ -203,11 +203,11 @@ fn transfer_verified( return Err(SystemError::ResultWithNegativeLamports.into()); } - from.checked_sub_lamports(lamports, &invoke_context.feature_set)?; + from.checked_sub_lamports(lamports)?; drop(from); let mut to = instruction_context .try_borrow_instruction_account(transaction_context, to_account_index)?; - to.checked_add_lamports(lamports, &invoke_context.feature_set)?; + to.checked_add_lamports(lamports)?; Ok(()) } @@ -481,9 +481,7 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| let nonce_versions: nonce::state::Versions = nonce_account.get_state()?; match nonce_versions.upgrade() { None => Err(InstructionError::InvalidArgument), - Some(nonce_versions) => { - nonce_account.set_state(&nonce_versions, &invoke_context.feature_set) - } + Some(nonce_versions) => nonce_account.set_state(&nonce_versions), } } SystemInstruction::Allocate { space } => { @@ -2065,4 +2063,54 @@ mod tests { upgraded_nonce_account ); } + + #[test] + fn test_assign_native_loader_and_transfer() { + for size in [0, 10] { + let pubkey = Pubkey::new_unique(); + let account = AccountSharedData::new(100, size, &system_program::id()); + let accounts = process_instruction( + &bincode::serialize(&SystemInstruction::Assign { + owner: solana_sdk::native_loader::id(), + }) + .unwrap(), + vec![(pubkey, account.clone())], + vec![AccountMeta { + pubkey, + is_signer: true, + is_writable: true, + }], + Ok(()), + ); + assert_eq!(accounts[0].owner(), &solana_sdk::native_loader::id()); + assert_eq!(accounts[0].lamports(), 100); + + let pubkey2 = Pubkey::new_unique(); + let accounts = process_instruction( + &bincode::serialize(&SystemInstruction::Transfer { lamports: 50 }).unwrap(), + vec![ + ( + pubkey2, + AccountSharedData::new(100, 0, &system_program::id()), + ), + (pubkey, accounts[0].clone()), + ], + vec![ + AccountMeta { + pubkey: pubkey2, + is_signer: true, + is_writable: true, + }, + AccountMeta { + pubkey, + is_signer: false, + is_writable: true, + }, + ], + Ok(()), + ); + assert_eq!(accounts[1].owner(), &solana_sdk::native_loader::id()); + assert_eq!(accounts[1].lamports(), 150); + } + } } diff --git a/programs/vote/src/vote_state/mod.rs b/programs/vote/src/vote_state/mod.rs index b95f47e8c1b9c2..f5901374d9b6d9 100644 --- a/programs/vote/src/vote_state/mod.rs +++ b/programs/vote/src/vote_state/mod.rs @@ -156,24 +156,22 @@ fn set_vote_account_state( && (!vote_account .is_rent_exempt_at_data_length(VoteStateVersions::vote_state_size_of(true)) || vote_account - .set_data_length(VoteStateVersions::vote_state_size_of(true), feature_set) + .set_data_length(VoteStateVersions::vote_state_size_of(true)) .is_err()) { // Account cannot be resized to the size of a vote state as it will not be rent exempt, or failed to be // resized for other reasons. So store the V1_14_11 version. - return vote_account.set_state( - &VoteStateVersions::V1_14_11(Box::new(VoteState1_14_11::from(vote_state))), - feature_set, - ); + return vote_account.set_state(&VoteStateVersions::V1_14_11(Box::new( + VoteState1_14_11::from(vote_state), + ))); } // Vote account is large enough to store the newest version of vote state - vote_account.set_state(&VoteStateVersions::new_current(vote_state), feature_set) + vote_account.set_state(&VoteStateVersions::new_current(vote_state)) // Else when the vote_state_add_vote_latency feature is not enabled, then the V1_14_11 version is stored } else { - vote_account.set_state( - &VoteStateVersions::V1_14_11(Box::new(VoteState1_14_11::from(vote_state))), - feature_set, - ) + vote_account.set_state(&VoteStateVersions::V1_14_11(Box::new( + VoteState1_14_11::from(vote_state), + ))) } } @@ -1023,11 +1021,11 @@ pub fn withdraw( } } - vote_account.checked_sub_lamports(lamports, feature_set)?; + vote_account.checked_sub_lamports(lamports)?; drop(vote_account); let mut to_account = instruction_context .try_borrow_instruction_account(transaction_context, to_account_index)?; - to_account.checked_add_lamports(lamports, feature_set)?; + to_account.checked_add_lamports(lamports)?; Ok(()) } @@ -1373,7 +1371,7 @@ mod tests { // Test that when the feature is enabled, if the vote account does have sufficient lamports, the // new vote state is written out assert_eq!( - borrowed_account.set_lamports(rent.minimum_balance(VoteState::size_of()), &feature_set), + borrowed_account.set_lamports(rent.minimum_balance(VoteState::size_of()),), Ok(()) ); assert_eq!( diff --git a/programs/zk-token-proof/src/lib.rs b/programs/zk-token-proof/src/lib.rs index 21c09b4ef123f2..ba47d13624826e 100644 --- a/programs/zk-token-proof/src/lib.rs +++ b/programs/zk-token-proof/src/lib.rs @@ -130,8 +130,7 @@ where return Err(InstructionError::InvalidAccountData); } - proof_context_account - .set_data_from_slice(&context_state_data, &invoke_context.feature_set)?; + proof_context_account.set_data_from_slice(&context_state_data)?; } Ok(()) @@ -173,13 +172,10 @@ fn process_close_proof_context(invoke_context: &mut InvokeContext) -> Result<(), let mut destination_account = instruction_context.try_borrow_instruction_account(transaction_context, 1)?; - destination_account.checked_add_lamports( - proof_context_account.get_lamports(), - &invoke_context.feature_set, - )?; - proof_context_account.set_lamports(0, &invoke_context.feature_set)?; - proof_context_account.set_data_length(0, &invoke_context.feature_set)?; - proof_context_account.set_owner(system_program::id().as_ref(), &invoke_context.feature_set)?; + destination_account.checked_add_lamports(proof_context_account.get_lamports())?; + proof_context_account.set_lamports(0)?; + proof_context_account.set_data_length(0)?; + proof_context_account.set_owner(system_program::id().as_ref())?; Ok(()) } diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index b7329724a2558e..388c2f4a15f529 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -107,9 +107,8 @@ use { }, solana_sdk::{ account::{ - create_account_shared_data_with_fields as create_account, create_executable_meta, - from_account, Account, AccountSharedData, InheritableAccountFields, ReadableAccount, - WritableAccount, + create_account_shared_data_with_fields as create_account, from_account, Account, + AccountSharedData, InheritableAccountFields, ReadableAccount, WritableAccount, }, clock::{ BankId, Epoch, Slot, SlotCount, SlotIndex, UnixTimestamp, DEFAULT_HASHES_PER_TICK, @@ -3955,12 +3954,10 @@ impl Bank { // Add a bogus executable account, which will be loaded and ignored. let (lamports, rent_epoch) = self.inherit_specially_retained_account_fields(&None); - // Mock account_data with executable_meta so that the account is executable. - let account_data = create_executable_meta(&owner); let account = AccountSharedData::from(Account { lamports, owner, - data: account_data.to_vec(), + data: vec![], executable: true, rent_epoch, }); diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 29dbdc2e5aeacd..6960f220244998 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -1051,7 +1051,6 @@ fn test_rent_exempt_executable_account() { let mut account = AccountSharedData::new(account_balance, 0, &solana_sdk::pubkey::new_rand()); account.set_executable(true); account.set_owner(bpf_loader_upgradeable::id()); - account.set_data(create_executable_meta(account.owner()).to_vec()); bank.store_account(&account_pubkey, &account); let transfer_lamports = 1; @@ -1089,10 +1088,10 @@ fn test_rent_complex() { MockInstruction::Deduction => { instruction_context .try_borrow_instruction_account(transaction_context, 1)? - .checked_add_lamports(1, &invoke_context.feature_set)?; + .checked_add_lamports(1)?; instruction_context .try_borrow_instruction_account(transaction_context, 2)? - .checked_sub_lamports(1, &invoke_context.feature_set)?; + .checked_sub_lamports(1)?; Ok(()) } } @@ -5994,16 +5993,16 @@ fn test_transaction_with_duplicate_accounts_in_instruction() { let lamports = u64::from_le_bytes(instruction_data.try_into().unwrap()); instruction_context .try_borrow_instruction_account(transaction_context, 2)? - .checked_sub_lamports(lamports, &invoke_context.feature_set)?; + .checked_sub_lamports(lamports)?; instruction_context .try_borrow_instruction_account(transaction_context, 1)? - .checked_add_lamports(lamports, &invoke_context.feature_set)?; + .checked_add_lamports(lamports)?; instruction_context .try_borrow_instruction_account(transaction_context, 0)? - .checked_sub_lamports(lamports, &invoke_context.feature_set)?; + .checked_sub_lamports(lamports)?; instruction_context .try_borrow_instruction_account(transaction_context, 1)? - .checked_add_lamports(lamports, &invoke_context.feature_set)?; + .checked_add_lamports(lamports)?; Ok(()) }); @@ -6473,25 +6472,26 @@ fn test_bank_hash_consistency() { if bank.slot == 0 { assert_eq!( bank.hash().to_string(), - "3VqF5pMe3XABLqzUaYw2UVXfAokMJgMkrdfvneFQkHbB", + "i5hGiQ3WtEehNrvhbfPFkUdm267t18fSpujcYtkBioW", ); } + if bank.slot == 32 { assert_eq!( bank.hash().to_string(), - "B8GsaBJ9aJrQcbhTTfgNVuV4uwb4v8nKT86HUjDLvNgk", + "7NmBtNvbhoqzatJv8NgBs84qWrm4ZhpuC75DCpbqwiS" ); } if bank.slot == 64 { assert_eq!( bank.hash().to_string(), - "Eg9VRE3zUwarxWyHXhitX9wLkg1vfNeiVqVQxSif6qEC" + "A1jjuUaENeDcsSvwejFGaZ5zWmnJ77doSzqdKtfzpoFk" ); } if bank.slot == 128 { assert_eq!( bank.hash().to_string(), - "5rLmK24zyxdeb8aLn5LDEnHLDQmxRd5gWZDVJGgsFX1c" + "ApnMkFt5Bs4yDJ8S2CCPsQRL1He6vWXw6vMzAyc5i811" ); break; } @@ -6507,7 +6507,7 @@ fn test_same_program_id_uses_unique_executable_accounts() { let instruction_context = transaction_context.get_current_instruction_context()?; instruction_context .try_borrow_program_account(transaction_context, 0)? - .set_data_length(2, &invoke_context.feature_set) + .set_data_length(2) }); let (genesis_config, mint_keypair) = create_genesis_config(50000); @@ -9477,7 +9477,7 @@ fn test_transfer_sysvar() { let instruction_context = transaction_context.get_current_instruction_context()?; instruction_context .try_borrow_instruction_account(transaction_context, 1)? - .set_data(vec![0; 40], &invoke_context.feature_set)?; + .set_data(vec![0; 40])?; Ok(()) }); @@ -10321,10 +10321,10 @@ declare_process_instruction!(MockTransferBuiltin, 1, |invoke_context| { MockTransferInstruction::Transfer(amount) => { instruction_context .try_borrow_instruction_account(transaction_context, 1)? - .checked_sub_lamports(amount, &invoke_context.feature_set)?; + .checked_sub_lamports(amount)?; instruction_context .try_borrow_instruction_account(transaction_context, 2)? - .checked_add_lamports(amount, &invoke_context.feature_set)?; + .checked_add_lamports(amount)?; Ok(()) } } @@ -11034,7 +11034,7 @@ declare_process_instruction!(MockReallocBuiltin, 1, |invoke_context| { // Set data length instruction_context .try_borrow_instruction_account(transaction_context, 1)? - .set_data_length(new_size, &invoke_context.feature_set)?; + .set_data_length(new_size)?; // set balance let current_balance = instruction_context @@ -11045,17 +11045,17 @@ declare_process_instruction!(MockReallocBuiltin, 1, |invoke_context| { if diff_balance.is_positive() { instruction_context .try_borrow_instruction_account(transaction_context, 0)? - .checked_sub_lamports(amount, &invoke_context.feature_set)?; + .checked_sub_lamports(amount)?; instruction_context .try_borrow_instruction_account(transaction_context, 1)? - .set_lamports(new_balance, &invoke_context.feature_set)?; + .set_lamports(new_balance)?; } else { instruction_context .try_borrow_instruction_account(transaction_context, 0)? - .checked_add_lamports(amount, &invoke_context.feature_set)?; + .checked_add_lamports(amount)?; instruction_context .try_borrow_instruction_account(transaction_context, 1)? - .set_lamports(new_balance, &invoke_context.feature_set)?; + .set_lamports(new_balance)?; } Ok(()) } diff --git a/sdk/src/account.rs b/sdk/src/account.rs index 96cdd5b90ce99b..f701e868ccf4e2 100644 --- a/sdk/src/account.rs +++ b/sdk/src/account.rs @@ -6,9 +6,8 @@ use { crate::{ bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable, clock::{Epoch, INITIAL_RENT_EPOCH}, - feature_set::{deprecate_executable_meta_update_in_bpf_loader, FeatureSet}, lamports::LamportsError, - loader_v4, native_loader, + loader_v4, pubkey::Pubkey, }, serde::{ @@ -40,9 +39,6 @@ pub struct Account { /// the program that owns this account. If executable, the program that loads this account. pub owner: Pubkey, /// this account's data contains a loaded program (and is now read-only) - /// - /// When feature `deprecate_executable_meta_update_in_bpf_loader` is active, - /// `executable` is deprecated, please use `fn is_executable(&account)` instead. pub executable: bool, /// the epoch at which this account will next owe rent pub rent_epoch: Epoch, @@ -767,94 +763,6 @@ pub const PROGRAM_OWNERS: &[Pubkey] = &[ loader_v4::id(), ]; -const LOADER_V4_STATUS_BYTE_OFFSET: usize = 40; - -/// Create executable account meta data based on account's `owner`. -/// -/// This function is only used for testing and an optimization during -/// transaction loading. -/// -/// When the program account is already present in the program cache, we don't -/// need to load the full account data during transaction loading. Instead, all -/// we need is a minimal executable account meta data, which is what this -/// function returns. -pub fn create_executable_meta(owner: &Pubkey) -> &[u8] { - // For upgradable program account, only `UpgradeableLoaderState::Program` - // variant (i.e. discriminant = 2) should *executable*, which means the - // discriminant for the enum at byte offset 0 in account data is 2. - const EXECUTABLE_META_FOR_BPF_LOADER_UPGRADABLE: [u8; 1] = [2]; - - // For loader v4 program, when LoaderV4Status (byte_offset = 40 in account - // data) is set, the program is executable. - const fn get_executable_meta_for_loader_v4() -> [u8; 41] { - let mut v = [0; LOADER_V4_STATUS_BYTE_OFFSET + 1]; - v[LOADER_V4_STATUS_BYTE_OFFSET] = 1; - v - } - const EXECUTABLE_META_FOR_LOADER_V4: [u8; LOADER_V4_STATUS_BYTE_OFFSET + 1] = - get_executable_meta_for_loader_v4(); - - // For other owners, simple returns a 1 byte array would make it executable. - const DEFAULT_EXECUTABLE_META: [u8; 1] = [1]; - - if bpf_loader_upgradeable::check_id(owner) { - &EXECUTABLE_META_FOR_BPF_LOADER_UPGRADABLE - } else if loader_v4::check_id(owner) { - &EXECUTABLE_META_FOR_LOADER_V4 - } else { - &DEFAULT_EXECUTABLE_META - } -} - -/// Return true if the account program is executable. -pub fn is_executable(account: &impl ReadableAccount, feature_set: &FeatureSet) -> bool { - if !feature_set.is_active(&deprecate_executable_meta_update_in_bpf_loader::id()) { - account.executable() - } else { - // First, check if the account is empty. Empty accounts are not executable. - if account.data().is_empty() { - return false; - } - - // bpf_loader/bpf_loader_deprecated still relies on `executable` on the - // program account. When the program account is finalized, the loader will - // mark `executable` flag on the account. We can't emulate `executable` for - // these two loaders. However, when `deprecate_executable` is true, we - // should have already disabled the deployment of bpf_loader and - // bpf_loader_deprecated. Therefore, we can safely assume that all those - // programs are `executable`. - if bpf_loader::check_id(account.owner()) || bpf_loader_deprecated::check_id(account.owner()) - { - return true; - } - - if bpf_loader_upgradeable::check_id(account.owner()) { - // For upgradable program account, only - // `UpgradeableLoaderState::Program` variant (i.e. discriminant = 2) is - // *executable*. - return account.data()[0] == 2; - } - - if loader_v4::check_id(account.owner()) { - // LoaderV4Status (byte_offset = 40) - // return account.data()[LOADER_V4_STATUS_BYTE_OFFSET] != 0; - return false; // TODO: return false for now - } - - false - } -} - -/// Return true if the account program is a builtin program. -/// -/// This function also ensures that all valid builtin programs have non-empty -/// program data. Typically, the program data contains only the "name" for the -/// program. If, for some reason, the program account's data is empty, we should -/// exclude such a program from `builtins`. -pub fn is_builtin(account: &impl ReadableAccount) -> bool { - native_loader::check_id(account.owner()) && !account.data().is_empty() -} - #[cfg(test)] pub mod tests { use super::*; diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index bb7c50f460fd81..55ce4c1253940a 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -756,10 +756,6 @@ pub mod disable_bpf_loader_instructions { solana_sdk::declare_id!("7WeS1vfPRgeeoXArLh7879YcB9mgE9ktjPDtajXeWfXn"); } -pub mod deprecate_executable_meta_update_in_bpf_loader { - solana_sdk::declare_id!("k6uR1J9VtKJnTukBV2Eo15BEy434MBg8bT6hHQgmU8v"); -} - pub mod enable_zk_proof_from_account { solana_sdk::declare_id!("zkiTNuzBKxrCLMKehzuQeKZyLtX2yvFcEKMML8nExU8"); } @@ -967,7 +963,6 @@ lazy_static! { (index_erasure_conflict_duplicate_proofs::id(), "generate duplicate proofs for index and erasure conflicts #34360"), (merkle_conflict_duplicate_proofs::id(), "generate duplicate proofs for merkle root conflicts #34270"), (disable_bpf_loader_instructions::id(), "disable bpf loader management instructions #34194"), - (deprecate_executable_meta_update_in_bpf_loader::id(), "deprecate executable meta flag update in bpf loader #34194"), (enable_zk_proof_from_account::id(), "Enable zk token proof program to read proof from accounts instead of instruction data #34750"), (curve25519_restrict_msm_length::id(), "restrict curve25519 multiscalar multiplication vector lengths #34763"), (cost_model_requested_write_lock_cost::id(), "cost model uses number of requested write locks #34819"), diff --git a/sdk/src/transaction_context.rs b/sdk/src/transaction_context.rs index 981f64870f6063..7df7fc96d67933 100644 --- a/sdk/src/transaction_context.rs +++ b/sdk/src/transaction_context.rs @@ -17,8 +17,7 @@ use { }; use { crate::{ - account::{is_builtin, is_executable, AccountSharedData, ReadableAccount}, - feature_set::FeatureSet, + account::{AccountSharedData, ReadableAccount}, instruction::InstructionError, pubkey::Pubkey, }, @@ -740,11 +739,7 @@ impl<'a> BorrowedAccount<'a> { /// Assignes the owner of this account (transaction wide) #[cfg(not(target_os = "solana"))] - pub fn set_owner( - &mut self, - pubkey: &[u8], - feature_set: &FeatureSet, - ) -> Result<(), InstructionError> { + pub fn set_owner(&mut self, pubkey: &[u8]) -> Result<(), InstructionError> { // Only the owner can assign a new owner if !self.is_owned_by_current_program() { return Err(InstructionError::ModifiedProgramId); @@ -754,7 +749,7 @@ impl<'a> BorrowedAccount<'a> { return Err(InstructionError::ModifiedProgramId); } // and only if the account is not executable - if self.is_executable(feature_set) { + if self.is_executable() { return Err(InstructionError::ModifiedProgramId); } // and only if the data is zero-initialized or empty @@ -778,11 +773,7 @@ impl<'a> BorrowedAccount<'a> { /// Overwrites the number of lamports of this account (transaction wide) #[cfg(not(target_os = "solana"))] - pub fn set_lamports( - &mut self, - lamports: u64, - feature_set: &FeatureSet, - ) -> Result<(), InstructionError> { + pub fn set_lamports(&mut self, lamports: u64) -> Result<(), InstructionError> { // An account not owned by the program cannot have its balance decrease if !self.is_owned_by_current_program() && lamports < self.get_lamports() { return Err(InstructionError::ExternalAccountLamportSpend); @@ -792,7 +783,7 @@ impl<'a> BorrowedAccount<'a> { return Err(InstructionError::ReadonlyLamportChange); } // The balance of executable accounts may not change - if self.is_executable(feature_set) { + if self.is_executable() { return Err(InstructionError::ExecutableLamportChange); } // don't touch the account if the lamports do not change @@ -806,31 +797,21 @@ impl<'a> BorrowedAccount<'a> { /// Adds lamports to this account (transaction wide) #[cfg(not(target_os = "solana"))] - pub fn checked_add_lamports( - &mut self, - lamports: u64, - feature_set: &FeatureSet, - ) -> Result<(), InstructionError> { + pub fn checked_add_lamports(&mut self, lamports: u64) -> Result<(), InstructionError> { self.set_lamports( self.get_lamports() .checked_add(lamports) .ok_or(InstructionError::ArithmeticOverflow)?, - feature_set, ) } /// Subtracts lamports from this account (transaction wide) #[cfg(not(target_os = "solana"))] - pub fn checked_sub_lamports( - &mut self, - lamports: u64, - feature_set: &FeatureSet, - ) -> Result<(), InstructionError> { + pub fn checked_sub_lamports(&mut self, lamports: u64) -> Result<(), InstructionError> { self.set_lamports( self.get_lamports() .checked_sub(lamports) .ok_or(InstructionError::ArithmeticOverflow)?, - feature_set, ) } @@ -842,11 +823,8 @@ impl<'a> BorrowedAccount<'a> { /// Returns a writable slice of the account data (transaction wide) #[cfg(not(target_os = "solana"))] - pub fn get_data_mut( - &mut self, - feature_set: &FeatureSet, - ) -> Result<&mut [u8], InstructionError> { - self.can_data_be_changed(feature_set)?; + pub fn get_data_mut(&mut self) -> Result<&mut [u8], InstructionError> { + self.can_data_be_changed()?; self.touch()?; self.make_data_mut(); Ok(self.account.data_as_mut_slice()) @@ -871,13 +849,9 @@ impl<'a> BorrowedAccount<'a> { not(target_os = "solana"), any(test, feature = "dev-context-only-utils") ))] - pub fn set_data( - &mut self, - data: Vec, - feature_set: &FeatureSet, - ) -> Result<(), InstructionError> { + pub fn set_data(&mut self, data: Vec) -> Result<(), InstructionError> { self.can_data_be_resized(data.len())?; - self.can_data_be_changed(feature_set)?; + self.can_data_be_changed()?; self.touch()?; self.update_accounts_resize_delta(data.len())?; @@ -890,18 +864,14 @@ impl<'a> BorrowedAccount<'a> { /// Call this when you have a slice of data you do not own and want to /// replace the account data with it. #[cfg(not(target_os = "solana"))] - pub fn set_data_from_slice( - &mut self, - data: &[u8], - feature_set: &FeatureSet, - ) -> Result<(), InstructionError> { + pub fn set_data_from_slice(&mut self, data: &[u8]) -> Result<(), InstructionError> { self.can_data_be_resized(data.len())?; - self.can_data_be_changed(feature_set)?; + self.can_data_be_changed()?; self.touch()?; self.update_accounts_resize_delta(data.len())?; // Calling make_data_mut() here guarantees that set_data_from_slice() // copies in places, extending the account capacity if necessary but - // never reducing it. This is required as the account might be directly + // never reducing it. This is required as the account migh be directly // mapped into a MemoryRegion, and therefore reducing capacity would // leave a hole in the vm address space. After CPI or upon program // termination, the runtime will zero the extra capacity. @@ -915,13 +885,9 @@ impl<'a> BorrowedAccount<'a> { /// /// Fills it with zeros at the end if is extended or truncates at the end otherwise. #[cfg(not(target_os = "solana"))] - pub fn set_data_length( - &mut self, - new_length: usize, - feature_set: &FeatureSet, - ) -> Result<(), InstructionError> { + pub fn set_data_length(&mut self, new_length: usize) -> Result<(), InstructionError> { self.can_data_be_resized(new_length)?; - self.can_data_be_changed(feature_set)?; + self.can_data_be_changed()?; // don't touch the account if the length does not change if self.get_data().len() == new_length { return Ok(()); @@ -934,14 +900,10 @@ impl<'a> BorrowedAccount<'a> { /// Appends all elements in a slice to the account #[cfg(not(target_os = "solana"))] - pub fn extend_from_slice( - &mut self, - data: &[u8], - feature_set: &FeatureSet, - ) -> Result<(), InstructionError> { + pub fn extend_from_slice(&mut self, data: &[u8]) -> Result<(), InstructionError> { let new_len = self.get_data().len().saturating_add(data.len()); self.can_data_be_resized(new_len)?; - self.can_data_be_changed(feature_set)?; + self.can_data_be_changed()?; if data.is_empty() { return Ok(()); @@ -995,7 +957,7 @@ impl<'a> BorrowedAccount<'a> { // about to write into it. Make the account mutable by copying it in a // buffer with MAX_PERMITTED_DATA_INCREASE capacity so that if the // transaction reallocs, we don't have to copy the whole account data a - // second time to fulfill the realloc. + // second time to fullfill the realloc. // // NOTE: The account memory region CoW code in bpf_loader::create_vm() implements the same // logic and must be kept in sync. @@ -1014,12 +976,8 @@ impl<'a> BorrowedAccount<'a> { /// Serializes a state into the account data #[cfg(not(target_os = "solana"))] - pub fn set_state( - &mut self, - state: &T, - feature_set: &FeatureSet, - ) -> Result<(), InstructionError> { - let data = self.get_data_mut(feature_set)?; + pub fn set_state(&mut self, state: &T) -> Result<(), InstructionError> { + let data = self.get_data_mut()?; let serialized_size = bincode::serialized_size(state).map_err(|_| InstructionError::GenericError)?; if serialized_size > data.len() as u64 { @@ -1040,8 +998,8 @@ impl<'a> BorrowedAccount<'a> { /// Returns whether this account is executable (transaction wide) #[inline] - pub fn is_executable(&self, feature_set: &FeatureSet) -> bool { - is_builtin(&*self.account) || is_executable(&*self.account, feature_set) + pub fn is_executable(&self) -> bool { + self.account.executable() } /// Configures whether this account is executable (transaction wide) @@ -1064,11 +1022,11 @@ impl<'a> BorrowedAccount<'a> { return Err(InstructionError::ExecutableModified); } // one can not clear the executable flag - if self.account.executable() && !is_executable { + if self.is_executable() && !is_executable { return Err(InstructionError::ExecutableModified); } // don't touch the account if the executable flag does not change - if self.account.executable() == is_executable { + if self.is_executable() == is_executable { return Ok(()); } self.touch()?; @@ -1119,9 +1077,9 @@ impl<'a> BorrowedAccount<'a> { /// Returns an error if the account data can not be mutated by the current program #[cfg(not(target_os = "solana"))] - pub fn can_data_be_changed(&self, feature_set: &FeatureSet) -> Result<(), InstructionError> { + pub fn can_data_be_changed(&self) -> Result<(), InstructionError> { // Only non-executable accounts data can be changed - if self.is_executable(feature_set) { + if self.is_executable() { return Err(InstructionError::ExecutableDataModified); } // and only if the account is writable diff --git a/svm/src/account_loader.rs b/svm/src/account_loader.rs index ee06dd5fbf2198..374fc756de31da 100644 --- a/svm/src/account_loader.rs +++ b/svm/src/account_loader.rs @@ -11,10 +11,7 @@ use { loaded_programs::LoadedProgramsForTxBatch, }, solana_sdk::{ - account::{ - create_executable_meta, is_builtin, is_executable, Account, AccountSharedData, - ReadableAccount, WritableAccount, - }, + account::{Account, AccountSharedData, ReadableAccount, WritableAccount}, feature_set::{ self, include_loaded_accounts_data_size_in_fee_calculation, remove_rounding_in_fee_calculation, @@ -336,7 +333,7 @@ fn load_transaction_accounts( return Err(TransactionError::ProgramAccountNotFound); } - if !(is_builtin(program_account) || is_executable(program_account, &feature_set)) { + if !program_account.executable() { error_counters.invalid_program_for_execution += 1; return Err(TransactionError::InvalidProgramForExecution); } @@ -356,8 +353,7 @@ fn load_transaction_accounts( let owner_index = accounts.len(); if let Some(owner_account) = callbacks.get_account_shared_data(owner_id) { if !native_loader::check_id(owner_account.owner()) - || !(is_builtin(&owner_account) - || is_executable(&owner_account, &feature_set)) + || !owner_account.executable() { error_counters.invalid_program_for_execution += 1; return Err(TransactionError::InvalidProgramForExecution); @@ -423,7 +419,6 @@ fn account_shared_data_from_program( .ok_or(TransactionError::AccountNotFound)?; program_account.set_owner(**program_owner); program_account.set_executable(true); - program_account.set_data_from_slice(create_executable_meta(program_owner)); Ok(program_account) } @@ -887,7 +882,7 @@ mod tests { let mut account = AccountSharedData::new(40, 1, &Pubkey::default()); account.set_owner(bpf_loader_upgradeable::id()); - account.set_data(create_executable_meta(account.owner()).to_vec()); + account.set_executable(true); accounts.push((key1, account)); let instructions = vec![CompiledInstruction::new(1, &(), vec![0])]; @@ -967,7 +962,6 @@ mod tests { account.set_executable(true); account.set_rent_epoch(1); account.set_owner(key1); - account.set_data(create_executable_meta(account.owner()).to_vec()); accounts.push((key2, account)); let instructions = vec![ @@ -1428,7 +1422,6 @@ mod tests { let mut expected = AccountSharedData::default(); expected.set_owner(other_key); expected.set_executable(true); - expected.set_data_from_slice(create_executable_meta(&other_key)); assert_eq!(result.unwrap(), expected); } From f41fb84e1543d44610988bafd4c4a06afe515ca7 Mon Sep 17 00:00:00 2001 From: Trent Nelson <490004+t-nelson@users.noreply.github.com> Date: Wed, 20 Mar 2024 11:13:50 -0600 Subject: [PATCH 020/153] rpc-sts: add config options for stake-weighted qos (#197) * rpc-sts: plumb options for swqos config * rpc-sts: send to specific tpu peers when configured --- .../src/send_transaction_service.rs | 22 ++++++++++++--- validator/src/cli.rs | 16 +++++++++++ validator/src/main.rs | 28 +++++++++++++++---- 3 files changed, 57 insertions(+), 9 deletions(-) diff --git a/send-transaction-service/src/send_transaction_service.rs b/send-transaction-service/src/send_transaction_service.rs index dbdcda2f2ff905..abe53b236d2e75 100644 --- a/send-transaction-service/src/send_transaction_service.rs +++ b/send-transaction-service/src/send_transaction_service.rs @@ -115,6 +115,7 @@ pub struct Config { pub batch_send_rate_ms: u64, /// When the retry pool exceeds this max size, new transactions are dropped after their first broadcast attempt pub retry_pool_max_size: usize, + pub tpu_peers: Option>, } impl Default for Config { @@ -127,6 +128,7 @@ impl Default for Config { batch_size: DEFAULT_TRANSACTION_BATCH_SIZE, batch_send_rate_ms: DEFAULT_BATCH_SEND_RATE_MS, retry_pool_max_size: MAX_TRANSACTION_RETRY_POOL_SIZE, + tpu_peers: None, } } } @@ -566,12 +568,18 @@ impl SendTransactionService { stats: &SendTransactionServiceStats, ) { // Processing the transactions in batch - let addresses = Self::get_tpu_addresses_with_slots( + let mut addresses = config + .tpu_peers + .as_ref() + .map(|addrs| addrs.iter().map(|a| (a, 0)).collect::>()) + .unwrap_or_default(); + let leader_addresses = Self::get_tpu_addresses_with_slots( tpu_address, leader_info, config, connection_cache.protocol(), ); + addresses.extend(leader_addresses); let wire_transactions = transactions .iter() @@ -584,8 +592,8 @@ impl SendTransactionService { }) .collect::>(); - for address in &addresses { - Self::send_transactions(address.0, &wire_transactions, connection_cache, stats); + for (address, _) in &addresses { + Self::send_transactions(address, &wire_transactions, connection_cache, stats); } } @@ -702,14 +710,20 @@ impl SendTransactionService { let iter = wire_transactions.chunks(config.batch_size); for chunk in iter { + let mut addresses = config + .tpu_peers + .as_ref() + .map(|addrs| addrs.iter().collect::>()) + .unwrap_or_default(); let mut leader_info_provider = leader_info_provider.lock().unwrap(); let leader_info = leader_info_provider.get_leader_info(); - let addresses = Self::get_tpu_addresses( + let leader_addresses = Self::get_tpu_addresses( tpu_address, leader_info, config, connection_cache.protocol(), ); + addresses.extend(leader_addresses); for address in &addresses { Self::send_transactions(address, chunk, connection_cache, stats); diff --git a/validator/src/cli.rs b/validator/src/cli.rs index e9298d9c02928e..f127273c8da2f3 100644 --- a/validator/src/cli.rs +++ b/validator/src/cli.rs @@ -1163,6 +1163,22 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .default_value(&default_args.rpc_send_transaction_retry_pool_max_size) .help("The maximum size of transactions retry pool."), ) + .arg( + Arg::with_name("rpc_send_transaction_tpu_peer") + .long("rpc-send-transaction-tpu-peer") + .takes_value(true) + .number_of_values(1) + .multiple(true) + .value_name("HOST:PORT") + .validator(solana_net_utils::is_host_port) + .help("Peer(s) to broadcast transactions to instead of the current leader") + ) + .arg( + Arg::with_name("rpc_send_transaction_also_leader") + .long("rpc-send-transaction-also-leader") + .requires("rpc_send_transaction_tpu_peer") + .help("With `--rpc-send-transaction-tpu-peer HOST:PORT`, also send to the current leader") + ) .arg( Arg::with_name("rpc_scan_and_fix_roots") .long("rpc-scan-and-fix-roots") diff --git a/validator/src/main.rs b/validator/src/main.rs index 7f3de66b457c74..cadd2759040657 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -1308,6 +1308,27 @@ pub fn main() { ); exit(1); } + let rpc_send_transaction_tpu_peers = matches + .values_of("rpc_send_transaction_tpu_peer") + .map(|values| { + values + .map(solana_net_utils::parse_host_port) + .collect::, String>>() + }) + .transpose() + .unwrap_or_else(|e| { + eprintln!("failed to parse rpc send-transaction-service tpu peer address: {e}"); + exit(1); + }); + let rpc_send_transaction_also_leader = matches.is_present("rpc_send_transaction_also_leader"); + let leader_forward_count = + if rpc_send_transaction_tpu_peers.is_some() && !rpc_send_transaction_also_leader { + // rpc-sts is configured to send only to specific tpu peers. disable leader forwards + 0 + } else { + value_t_or_exit!(matches, "rpc_send_transaction_leader_forward_count", u64) + }; + let full_api = matches.is_present("full_rpc_api"); let mut validator_config = ValidatorConfig { @@ -1399,11 +1420,7 @@ pub fn main() { contact_debug_interval, send_transaction_service_config: send_transaction_service::Config { retry_rate_ms: rpc_send_retry_rate_ms, - leader_forward_count: value_t_or_exit!( - matches, - "rpc_send_transaction_leader_forward_count", - u64 - ), + leader_forward_count, default_max_retries: value_t!( matches, "rpc_send_transaction_default_max_retries", @@ -1422,6 +1439,7 @@ pub fn main() { "rpc_send_transaction_retry_pool_max_size", usize ), + tpu_peers: rpc_send_transaction_tpu_peers, }, no_poh_speed_test: matches.is_present("no_poh_speed_test"), no_os_memory_stats_reporting: matches.is_present("no_os_memory_stats_reporting"), From 0e932c7308e7ddb9c2ad33697d40ac409f034d38 Mon Sep 17 00:00:00 2001 From: Yueh-Hsuan Chiang <93241502+yhchiang-sol@users.noreply.github.com> Date: Wed, 20 Mar 2024 10:39:25 -0700 Subject: [PATCH 021/153] [TieredStorage] Refactor TieredStorage::new_readonly() code path (#195) #### Problem The TieredStorage::new_readonly() function currently has the following problems: * It opens the file without checking the magic number before checking and loading the footer. * It opens the file twice: first to load the footer, then open again by the reader. #### Summary of Changes This PR refactors TieredStorage::new_readonly() so that it first performs all checks inside the constructor of TieredReadableFile. The TieredReadableFile instance is then passed to the proper reader (currently HotStorageReader) when all checks are passed. #### Test Plan * Added a new test to check MagicNumberMismatch. * Existing tiered-storage tests --- accounts-db/src/tiered_storage.rs | 3 +- accounts-db/src/tiered_storage/file.rs | 86 +++++++++++++++++++--- accounts-db/src/tiered_storage/footer.rs | 24 +----- accounts-db/src/tiered_storage/hot.rs | 35 +++++---- accounts-db/src/tiered_storage/readable.rs | 6 +- 5 files changed, 105 insertions(+), 49 deletions(-) diff --git a/accounts-db/src/tiered_storage.rs b/accounts-db/src/tiered_storage.rs index cc2776ed178cf6..70169a59428fe6 100644 --- a/accounts-db/src/tiered_storage.rs +++ b/accounts-db/src/tiered_storage.rs @@ -170,7 +170,8 @@ mod tests { use { super::*, crate::account_storage::meta::StoredMetaWriteVersion, - footer::{TieredStorageFooter, TieredStorageMagicNumber}, + file::TieredStorageMagicNumber, + footer::TieredStorageFooter, hot::HOT_FORMAT, index::IndexOffset, solana_sdk::{ diff --git a/accounts-db/src/tiered_storage/file.rs b/accounts-db/src/tiered_storage/file.rs index 605e55a0b193a1..e6ea4a7c65d15d 100644 --- a/accounts-db/src/tiered_storage/file.rs +++ b/accounts-db/src/tiered_storage/file.rs @@ -1,5 +1,6 @@ use { - bytemuck::{AnyBitPattern, NoUninit}, + super::{error::TieredStorageError, TieredStorageResult}, + bytemuck::{AnyBitPattern, NoUninit, Pod, Zeroable}, std::{ fs::{File, OpenOptions}, io::{BufWriter, Read, Result as IoResult, Seek, SeekFrom, Write}, @@ -8,23 +9,37 @@ use { }, }; +/// The ending 8 bytes of a valid tiered account storage file. +pub const FILE_MAGIC_NUMBER: u64 = u64::from_le_bytes(*b"AnzaTech"); + +#[derive(Debug, PartialEq, Eq, Clone, Copy, Pod, Zeroable)] +#[repr(C)] +pub struct TieredStorageMagicNumber(pub u64); + +// Ensure there are no implicit padding bytes +const _: () = assert!(std::mem::size_of::() == 8); + +impl Default for TieredStorageMagicNumber { + fn default() -> Self { + Self(FILE_MAGIC_NUMBER) + } +} + #[derive(Debug)] pub struct TieredReadableFile(pub File); impl TieredReadableFile { - pub fn new(file_path: impl AsRef) -> Self { - Self( + pub fn new(file_path: impl AsRef) -> TieredStorageResult { + let file = Self( OpenOptions::new() .read(true) .create(false) - .open(&file_path) - .unwrap_or_else(|err| { - panic!( - "[TieredStorageError] Unable to open {} as read-only: {err}", - file_path.as_ref().display(), - ); - }), - ) + .open(&file_path)?, + ); + + file.check_magic_number()?; + + Ok(file) } pub fn new_writable(file_path: impl AsRef) -> IoResult { @@ -36,6 +51,19 @@ impl TieredReadableFile { )) } + fn check_magic_number(&self) -> TieredStorageResult<()> { + self.seek_from_end(-(std::mem::size_of::() as i64))?; + let mut magic_number = TieredStorageMagicNumber::zeroed(); + self.read_pod(&mut magic_number)?; + if magic_number != TieredStorageMagicNumber::default() { + return Err(TieredStorageError::MagicNumberMismatch( + TieredStorageMagicNumber::default().0, + magic_number.0, + )); + } + Ok(()) + } + /// Reads a value of type `T` from the file. /// /// Type T must be plain ol' data. @@ -127,3 +155,39 @@ impl TieredWritableFile { Ok(bytes.len()) } } + +#[cfg(test)] +mod tests { + use { + crate::tiered_storage::{ + error::TieredStorageError, + file::{TieredReadableFile, TieredWritableFile, FILE_MAGIC_NUMBER}, + }, + std::path::Path, + tempfile::TempDir, + }; + + fn generate_test_file_with_number(path: impl AsRef, number: u64) { + let mut file = TieredWritableFile::new(path).unwrap(); + file.write_pod(&number).unwrap(); + } + + #[test] + fn test_new() { + let temp_dir = TempDir::new().unwrap(); + let path = temp_dir.path().join("test_new"); + generate_test_file_with_number(&path, FILE_MAGIC_NUMBER); + assert!(TieredReadableFile::new(&path).is_ok()); + } + + #[test] + fn test_magic_number_mismatch() { + let temp_dir = TempDir::new().unwrap(); + let path = temp_dir.path().join("test_magic_number_mismatch"); + generate_test_file_with_number(&path, !FILE_MAGIC_NUMBER); + assert!(matches!( + TieredReadableFile::new(&path), + Err(TieredStorageError::MagicNumberMismatch(_, _)) + )); + } +} diff --git a/accounts-db/src/tiered_storage/footer.rs b/accounts-db/src/tiered_storage/footer.rs index fa885f2394ce63..89e671d121cce6 100644 --- a/accounts-db/src/tiered_storage/footer.rs +++ b/accounts-db/src/tiered_storage/footer.rs @@ -1,13 +1,13 @@ use { crate::tiered_storage::{ error::TieredStorageError, - file::{TieredReadableFile, TieredWritableFile}, + file::{TieredReadableFile, TieredStorageMagicNumber, TieredWritableFile}, index::IndexBlockFormat, mmap_utils::{get_pod, get_type}, owners::OwnersBlockFormat, TieredStorageResult, }, - bytemuck::{Pod, Zeroable}, + bytemuck::Zeroable, memmap2::Mmap, num_enum::TryFromPrimitiveError, solana_sdk::{hash::Hash, pubkey::Pubkey}, @@ -26,22 +26,6 @@ static_assertions::const_assert_eq!(mem::size_of::(), 160); /// even when the footer's format changes. pub const FOOTER_TAIL_SIZE: usize = 24; -/// The ending 8 bytes of a valid tiered account storage file. -pub const FOOTER_MAGIC_NUMBER: u64 = 0x502A2AB5; // SOLALABS -> SOLANA LABS - -#[derive(Debug, PartialEq, Eq, Clone, Copy, Pod, Zeroable)] -#[repr(C)] -pub struct TieredStorageMagicNumber(pub u64); - -// Ensure there are no implicit padding bytes -const _: () = assert!(std::mem::size_of::() == 8); - -impl Default for TieredStorageMagicNumber { - fn default() -> Self { - Self(FOOTER_MAGIC_NUMBER) - } -} - #[repr(u16)] #[derive( Clone, @@ -133,7 +117,7 @@ pub struct TieredStorageFooter { /// The size of the footer including the magic number. pub footer_size: u64, // This field is persisted in the storage but not in this struct. - // The number should match FOOTER_MAGIC_NUMBER. + // The number should match FILE_MAGIC_NUMBER. // pub magic_number: u64, } @@ -186,7 +170,7 @@ impl Default for TieredStorageFooter { impl TieredStorageFooter { pub fn new_from_path(path: impl AsRef) -> TieredStorageResult { - let file = TieredReadableFile::new(path); + let file = TieredReadableFile::new(path)?; Self::new_from_footer_block(&file) } diff --git a/accounts-db/src/tiered_storage/hot.rs b/accounts-db/src/tiered_storage/hot.rs index c00dff302c9cea..1a5017535cdded 100644 --- a/accounts-db/src/tiered_storage/hot.rs +++ b/accounts-db/src/tiered_storage/hot.rs @@ -7,7 +7,7 @@ use { accounts_hash::AccountHash, tiered_storage::{ byte_block, - file::TieredWritableFile, + file::{TieredReadableFile, TieredWritableFile}, footer::{AccountBlockFormat, AccountMetaFormat, TieredStorageFooter}, index::{AccountIndexWriterEntry, AccountOffset, IndexBlockFormat, IndexOffset}, meta::{AccountMetaFlags, AccountMetaOptionalFields, TieredAccountMeta}, @@ -24,7 +24,7 @@ use { account::ReadableAccount, pubkey::Pubkey, rent_collector::RENT_EXEMPT_RENT_EPOCH, stake_history::Epoch, }, - std::{borrow::Borrow, fs::OpenOptions, option::Option, path::Path}, + std::{borrow::Borrow, option::Option, path::Path}, }; pub const HOT_FORMAT: TieredStorageFormat = TieredStorageFormat { @@ -346,10 +346,8 @@ pub struct HotStorageReader { } impl HotStorageReader { - /// Constructs a HotStorageReader from the specified path. - pub fn new_from_path(path: impl AsRef) -> TieredStorageResult { - let file = OpenOptions::new().read(true).open(path)?; - let mmap = unsafe { MmapOptions::new().map(&file)? }; + pub fn new(file: TieredReadableFile) -> TieredStorageResult { + let mmap = unsafe { MmapOptions::new().map(&file.0)? }; // Here we are copying the footer, as accessing any data in a // TieredStorage instance requires accessing its Footer. // This can help improve cache locality and reduce the overhead @@ -899,7 +897,8 @@ pub mod tests { // Reopen the same storage, and expect the persisted footer is // the same as what we have written. { - let hot_storage = HotStorageReader::new_from_path(&path).unwrap(); + let file = TieredReadableFile::new(&path).unwrap(); + let hot_storage = HotStorageReader::new(file).unwrap(); assert_eq!(expected_footer, *hot_storage.footer()); } } @@ -945,7 +944,8 @@ pub mod tests { footer.write_footer_block(&mut file).unwrap(); } - let hot_storage = HotStorageReader::new_from_path(&path).unwrap(); + let file = TieredReadableFile::new(&path).unwrap(); + let hot_storage = HotStorageReader::new(file).unwrap(); for (offset, expected_meta) in account_offsets.iter().zip(hot_account_metas.iter()) { let meta = hot_storage.get_account_meta_from_offset(*offset).unwrap(); @@ -975,7 +975,8 @@ pub mod tests { footer.write_footer_block(&mut file).unwrap(); } - let hot_storage = HotStorageReader::new_from_path(&path).unwrap(); + let file = TieredReadableFile::new(&path).unwrap(); + let hot_storage = HotStorageReader::new(file).unwrap(); let offset = HotAccountOffset::new(footer.index_block_offset as usize).unwrap(); // Read from index_block_offset, which offset doesn't belong to // account blocks. Expect assert failure here @@ -1026,7 +1027,8 @@ pub mod tests { footer.write_footer_block(&mut file).unwrap(); } - let hot_storage = HotStorageReader::new_from_path(&path).unwrap(); + let file = TieredReadableFile::new(&path).unwrap(); + let hot_storage = HotStorageReader::new(file).unwrap(); for (i, index_writer_entry) in index_writer_entries.iter().enumerate() { let account_offset = hot_storage .get_account_offset(IndexOffset(i as u32)) @@ -1075,7 +1077,8 @@ pub mod tests { footer.write_footer_block(&mut file).unwrap(); } - let hot_storage = HotStorageReader::new_from_path(&path).unwrap(); + let file = TieredReadableFile::new(&path).unwrap(); + let hot_storage = HotStorageReader::new(file).unwrap(); for (i, address) in addresses.iter().enumerate() { assert_eq!( hot_storage @@ -1149,7 +1152,8 @@ pub mod tests { footer.write_footer_block(&mut file).unwrap(); } - let hot_storage = HotStorageReader::new_from_path(&path).unwrap(); + let file = TieredReadableFile::new(&path).unwrap(); + let hot_storage = HotStorageReader::new(file).unwrap(); // First, verify whether we can find the expected owners. let mut owner_candidates = owner_addresses.clone(); @@ -1281,7 +1285,8 @@ pub mod tests { footer.write_footer_block(&mut file).unwrap(); } - let hot_storage = HotStorageReader::new_from_path(&path).unwrap(); + let file = TieredReadableFile::new(&path).unwrap(); + let hot_storage = HotStorageReader::new(file).unwrap(); for i in 0..NUM_ACCOUNTS { let (stored_meta, next) = hot_storage @@ -1362,10 +1367,10 @@ pub mod tests { writer.write_accounts(&storable_accounts, 0).unwrap() }; - let hot_storage = HotStorageReader::new_from_path(&path).unwrap(); + let file = TieredReadableFile::new(&path).unwrap(); + let hot_storage = HotStorageReader::new(file).unwrap(); let num_accounts = account_data_sizes.len(); - for i in 0..num_accounts { let (stored_meta, next) = hot_storage .get_account(IndexOffset(i as u32)) diff --git a/accounts-db/src/tiered_storage/readable.rs b/accounts-db/src/tiered_storage/readable.rs index e3d169d4f6d99e..15d678ffc856fc 100644 --- a/accounts-db/src/tiered_storage/readable.rs +++ b/accounts-db/src/tiered_storage/readable.rs @@ -3,6 +3,7 @@ use { account_storage::meta::StoredAccountMeta, accounts_file::MatchAccountOwnerError, tiered_storage::{ + file::TieredReadableFile, footer::{AccountMetaFormat, TieredStorageFooter}, hot::HotStorageReader, index::IndexOffset, @@ -22,9 +23,10 @@ pub enum TieredStorageReader { impl TieredStorageReader { /// Creates a reader for the specified tiered storage accounts file. pub fn new_from_path(path: impl AsRef) -> TieredStorageResult { - let footer = TieredStorageFooter::new_from_path(&path)?; + let file = TieredReadableFile::new(&path)?; + let footer = TieredStorageFooter::new_from_footer_block(&file)?; match footer.account_meta_format { - AccountMetaFormat::Hot => Ok(Self::Hot(HotStorageReader::new_from_path(path)?)), + AccountMetaFormat::Hot => Ok(Self::Hot(HotStorageReader::new(file)?)), } } From 3038d47f1c16f5355a025b8de21a27677060eb55 Mon Sep 17 00:00:00 2001 From: Yueh-Hsuan Chiang <93241502+yhchiang-sol@users.noreply.github.com> Date: Wed, 20 Mar 2024 12:17:12 -0700 Subject: [PATCH 022/153] [TieredStorage] Store account address range (#172) #### Problem The TieredStorageFooter has the min_account_address and max_account_address fields to describe the account address range in its file. But the current implementation hasn't updated the fields yet. #### Summary of Changes This PR enables the TieredStorage to persist address range information into its footer via min_account_address and max_account_address. #### Test Plan Updated tiered-storage test to verify persisted account address range. --- accounts-db/src/tiered_storage.rs | 24 +++++- accounts-db/src/tiered_storage/hot.rs | 9 ++- accounts-db/src/tiered_storage/meta.rs | 77 +++++++++++++++++++- accounts-db/src/tiered_storage/test_utils.rs | 12 +++ 4 files changed, 117 insertions(+), 5 deletions(-) diff --git a/accounts-db/src/tiered_storage.rs b/accounts-db/src/tiered_storage.rs index 70169a59428fe6..3f655896a28ed6 100644 --- a/accounts-db/src/tiered_storage.rs +++ b/accounts-db/src/tiered_storage.rs @@ -183,7 +183,7 @@ mod tests { mem::ManuallyDrop, }, tempfile::tempdir, - test_utils::{create_test_account, verify_test_account}, + test_utils::{create_test_account, verify_test_account_with_footer}, }; impl TieredStorage { @@ -368,13 +368,33 @@ mod tests { let mut index_offset = IndexOffset(0); let mut verified_accounts = HashSet::new(); + let footer = reader.footer(); + + const MIN_PUBKEY: Pubkey = Pubkey::new_from_array([0x00u8; 32]); + const MAX_PUBKEY: Pubkey = Pubkey::new_from_array([0xFFu8; 32]); + let mut min_pubkey_ref = &MAX_PUBKEY; + let mut max_pubkey_ref = &MIN_PUBKEY; + while let Some((stored_meta, next)) = reader.get_account(index_offset).unwrap() { if let Some(account) = expected_accounts_map.get(stored_meta.pubkey()) { - verify_test_account(&stored_meta, *account, stored_meta.pubkey()); + verify_test_account_with_footer( + &stored_meta, + *account, + stored_meta.pubkey(), + footer, + ); verified_accounts.insert(stored_meta.pubkey()); + if *min_pubkey_ref > *stored_meta.pubkey() { + min_pubkey_ref = stored_meta.pubkey(); + } + if *max_pubkey_ref < *stored_meta.pubkey() { + max_pubkey_ref = stored_meta.pubkey(); + } } index_offset = next; } + assert_eq!(footer.min_account_address, *min_pubkey_ref); + assert_eq!(footer.max_account_address, *max_pubkey_ref); assert!(!verified_accounts.is_empty()); assert_eq!(verified_accounts.len(), expected_accounts_map.len()) } diff --git a/accounts-db/src/tiered_storage/hot.rs b/accounts-db/src/tiered_storage/hot.rs index 1a5017535cdded..c1e92e4469b269 100644 --- a/accounts-db/src/tiered_storage/hot.rs +++ b/accounts-db/src/tiered_storage/hot.rs @@ -10,7 +10,9 @@ use { file::{TieredReadableFile, TieredWritableFile}, footer::{AccountBlockFormat, AccountMetaFormat, TieredStorageFooter}, index::{AccountIndexWriterEntry, AccountOffset, IndexBlockFormat, IndexOffset}, - meta::{AccountMetaFlags, AccountMetaOptionalFields, TieredAccountMeta}, + meta::{ + AccountAddressRange, AccountMetaFlags, AccountMetaOptionalFields, TieredAccountMeta, + }, mmap_utils::{get_pod, get_slice}, owners::{OwnerOffset, OwnersBlockFormat, OwnersTable, OWNER_NO_OWNER}, StorableAccounts, StorableAccountsWithHashesAndWriteVersions, TieredStorageError, @@ -620,6 +622,7 @@ impl HotStorageWriter { let mut index = vec![]; let mut owners_table = OwnersTable::default(); let mut cursor = 0; + let mut address_range = AccountAddressRange::default(); // writing accounts blocks let len = accounts.accounts.len(); @@ -631,6 +634,7 @@ impl HotStorageWriter { address, offset: HotAccountOffset::new(cursor)?, }; + address_range.update(address); // Obtain necessary fields from the account, or default fields // for a zero-lamport account in the None case. @@ -691,7 +695,8 @@ impl HotStorageWriter { footer .owners_block_format .write_owners_block(&mut self.storage, &owners_table)?; - + footer.min_account_address = *address_range.min; + footer.max_account_address = *address_range.max; footer.write_footer_block(&mut self.storage)?; Ok(stored_infos) diff --git a/accounts-db/src/tiered_storage/meta.rs b/accounts-db/src/tiered_storage/meta.rs index 2aa53e5a4de1ed..c98fe2efa8b6f6 100644 --- a/accounts-db/src/tiered_storage/meta.rs +++ b/accounts-db/src/tiered_storage/meta.rs @@ -4,7 +4,7 @@ use { crate::tiered_storage::owners::OwnerOffset, bytemuck::{Pod, Zeroable}, modular_bitfield::prelude::*, - solana_sdk::stake_history::Epoch, + solana_sdk::{pubkey::Pubkey, stake_history::Epoch}, }; /// The struct that handles the account meta flags. @@ -124,6 +124,38 @@ impl AccountMetaOptionalFields { } } +const MIN_ACCOUNT_ADDRESS: Pubkey = Pubkey::new_from_array([0x00u8; 32]); +const MAX_ACCOUNT_ADDRESS: Pubkey = Pubkey::new_from_array([0xFFu8; 32]); + +#[derive(Debug)] +/// A struct that maintains an address-range using its min and max fields. +pub struct AccountAddressRange<'a> { + /// The minimum address observed via update() + pub min: &'a Pubkey, + /// The maximum address observed via update() + pub max: &'a Pubkey, +} + +impl Default for AccountAddressRange<'_> { + fn default() -> Self { + Self { + min: &MAX_ACCOUNT_ADDRESS, + max: &MIN_ACCOUNT_ADDRESS, + } + } +} + +impl<'a> AccountAddressRange<'a> { + pub fn update(&mut self, address: &'a Pubkey) { + if *self.min > *address { + self.min = address; + } + if *self.max < *address { + self.max = address; + } + } +} + #[cfg(test)] pub mod tests { use super::*; @@ -221,4 +253,47 @@ pub mod tests { ); } } + + #[test] + fn test_pubkey_range_update_single() { + let address = solana_sdk::pubkey::new_rand(); + let mut address_range = AccountAddressRange::default(); + + address_range.update(&address); + // For a single update, the min and max should equal to the address + assert_eq!(*address_range.min, address); + assert_eq!(*address_range.max, address); + } + + #[test] + fn test_pubkey_range_update_multiple() { + const NUM_PUBKEYS: usize = 20; + + let mut address_range = AccountAddressRange::default(); + let mut addresses = Vec::with_capacity(NUM_PUBKEYS); + + let mut min_index = 0; + let mut max_index = 0; + + // Generate random addresses and track expected min and max indices + for i in 0..NUM_PUBKEYS { + let address = solana_sdk::pubkey::new_rand(); + addresses.push(address); + + // Update expected min and max indices + if address < addresses[min_index] { + min_index = i; + } + if address > addresses[max_index] { + max_index = i; + } + } + + addresses + .iter() + .for_each(|address| address_range.update(address)); + + assert_eq!(*address_range.min, addresses[min_index]); + assert_eq!(*address_range.max, addresses[max_index]); + } } diff --git a/accounts-db/src/tiered_storage/test_utils.rs b/accounts-db/src/tiered_storage/test_utils.rs index f44f20f77cc5dd..8916ef894cc26e 100644 --- a/accounts-db/src/tiered_storage/test_utils.rs +++ b/accounts-db/src/tiered_storage/test_utils.rs @@ -1,6 +1,7 @@ #![cfg(test)] //! Helper functions for TieredStorage tests use { + super::footer::TieredStorageFooter, crate::{ account_storage::meta::{StoredAccountMeta, StoredMeta}, accounts_hash::AccountHash, @@ -61,3 +62,14 @@ pub(super) fn verify_test_account( assert_eq!(stored_meta.pubkey(), address); assert_eq!(*stored_meta.hash(), AccountHash(Hash::default())); } + +pub(super) fn verify_test_account_with_footer( + stored_meta: &StoredAccountMeta<'_>, + account: Option<&impl ReadableAccount>, + address: &Pubkey, + footer: &TieredStorageFooter, +) { + verify_test_account(stored_meta, account, address); + assert!(footer.min_account_address <= *address); + assert!(footer.max_account_address >= *address); +} From 09ae5872b3657084bb7892d7cf0293745d6f2cc4 Mon Sep 17 00:00:00 2001 From: Dmitri Makarov Date: Wed, 20 Mar 2024 15:26:45 -0400 Subject: [PATCH 023/153] Rename LoadedPrograms to ProgramCache for readability (#339) --- program-runtime/src/loaded_programs.rs | 60 ++++++++++---------- runtime/src/bank.rs | 78 ++++++++++++-------------- runtime/src/bank/tests.rs | 14 ++--- svm/src/transaction_processor.rs | 61 +++++++++----------- svm/tests/integration_test.rs | 20 +++---- 5 files changed, 110 insertions(+), 123 deletions(-) diff --git a/program-runtime/src/loaded_programs.rs b/program-runtime/src/loaded_programs.rs index f6163d63cd738c..8364f7013d65d1 100644 --- a/program-runtime/src/loaded_programs.rs +++ b/program-runtime/src/loaded_programs.rs @@ -145,7 +145,7 @@ pub struct LoadedProgram { pub latest_access_slot: AtomicU64, } -/// Global cache statistics for [LoadedPrograms]. +/// Global cache statistics for [ProgramCache]. #[derive(Debug, Default)] pub struct Stats { /// a program was already in the cache @@ -568,7 +568,7 @@ struct SecondLevel { /// - allows for cooperative loading of TX batches which hit the same missing programs simultaneously. /// - enforces that all programs used in a batch are eagerly loaded ahead of execution. /// - is not persisted to disk or a snapshot, so it needs to cold start and warm up first. -pub struct LoadedPrograms { +pub struct ProgramCache { /// A two level index: /// /// The first level is for the address at which programs are deployed and the second level for the slot (and thus also fork). @@ -595,9 +595,9 @@ pub struct LoadedPrograms { pub loading_task_waiter: Arc, } -impl Debug for LoadedPrograms { +impl Debug for ProgramCache { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - f.debug_struct("LoadedPrograms") + f.debug_struct("ProgramCache") .field("root slot", &self.latest_root_slot) .field("root epoch", &self.latest_root_epoch) .field("stats", &self.stats) @@ -606,11 +606,11 @@ impl Debug for LoadedPrograms { } } -/// Local view into [LoadedPrograms] which was extracted for a specific TX batch. +/// Local view into [ProgramCache] which was extracted for a specific TX batch. /// -/// This isolation enables the global [LoadedPrograms] to continue to evolve (e.g. evictions), +/// This isolation enables the global [ProgramCache] to continue to evolve (e.g. evictions), /// while the TX batch is guaranteed it will continue to find all the programs it requires. -/// For program management instructions this also buffers them before they are merged back into the global [LoadedPrograms]. +/// For program management instructions this also buffers them before they are merged back into the global [ProgramCache]. #[derive(Clone, Debug, Default)] pub struct LoadedProgramsForTxBatch { /// Pubkey is the address of a program. @@ -681,7 +681,7 @@ pub enum LoadedProgramMatchCriteria { NoCriteria, } -impl LoadedPrograms { +impl ProgramCache { pub fn new(root_slot: Slot, root_epoch: Epoch) -> Self { Self { entries: HashMap::new(), @@ -734,7 +734,7 @@ impl LoadedPrograms { (LoadedProgramType::Unloaded(_), LoadedProgramType::TestLoaded(_)) => {} _ => { // Something is wrong, I can feel it ... - error!("LoadedPrograms::assign_program() failed key={:?} existing={:?} entry={:?}", key, slot_versions, entry); + error!("ProgramCache::assign_program() failed key={:?} existing={:?} entry={:?}", key, slot_versions, entry); debug_assert!(false, "Unexpected replacement of an entry"); self.stats.replacements.fetch_add(1, Ordering::Relaxed); return true; @@ -1146,9 +1146,9 @@ impl solana_frozen_abi::abi_example::AbiExample for LoadedProgram { } #[cfg(RUSTC_WITH_SPECIALIZATION)] -impl solana_frozen_abi::abi_example::AbiExample for LoadedPrograms { +impl solana_frozen_abi::abi_example::AbiExample for ProgramCache { fn example() -> Self { - // LoadedPrograms isn't serializable by definition. + // ProgramCache isn't serializable by definition. Self::new(Slot::default(), Epoch::default()) } } @@ -1158,7 +1158,7 @@ mod tests { use { crate::loaded_programs::{ BlockRelation, ForkGraph, LoadedProgram, LoadedProgramMatchCriteria, LoadedProgramType, - LoadedPrograms, LoadedProgramsForTxBatch, ProgramRuntimeEnvironment, + LoadedProgramsForTxBatch, ProgramCache, ProgramRuntimeEnvironment, ProgramRuntimeEnvironments, DELAY_VISIBILITY_SLOT_OFFSET, }, assert_matches::assert_matches, @@ -1178,8 +1178,8 @@ mod tests { static MOCK_ENVIRONMENT: std::sync::OnceLock = std::sync::OnceLock::::new(); - fn new_mock_cache() -> LoadedPrograms { - let mut cache = LoadedPrograms::new(0, 0); + fn new_mock_cache() -> ProgramCache { + let mut cache = ProgramCache::new(0, 0); cache.environments.program_runtime_v1 = MOCK_ENVIRONMENT .get_or_init(|| Arc::new(BuiltinProgram::new_mock())) @@ -1220,7 +1220,7 @@ mod tests { } fn set_tombstone( - cache: &mut LoadedPrograms, + cache: &mut ProgramCache, key: Pubkey, slot: Slot, reason: LoadedProgramType, @@ -1231,7 +1231,7 @@ mod tests { } fn insert_unloaded_program( - cache: &mut LoadedPrograms, + cache: &mut ProgramCache, key: Pubkey, slot: Slot, ) -> Arc { @@ -1254,7 +1254,7 @@ mod tests { unloaded } - fn num_matching_entries(cache: &LoadedPrograms, predicate: P) -> usize + fn num_matching_entries(cache: &ProgramCache, predicate: P) -> usize where P: Fn(&LoadedProgramType) -> bool, FG: ForkGraph, @@ -1302,7 +1302,7 @@ mod tests { } fn program_deploy_test_helper( - cache: &mut LoadedPrograms, + cache: &mut ProgramCache, program: Pubkey, deployment_slots: Vec, usage_counters: Vec, @@ -2574,28 +2574,28 @@ mod tests { let tombstone = Arc::new(LoadedProgram::new_tombstone(0, LoadedProgramType::Closed)); assert!( - LoadedPrograms::::matches_loaded_program_criteria( + ProgramCache::::matches_loaded_program_criteria( &tombstone, &LoadedProgramMatchCriteria::NoCriteria ) ); assert!( - LoadedPrograms::::matches_loaded_program_criteria( + ProgramCache::::matches_loaded_program_criteria( &tombstone, &LoadedProgramMatchCriteria::Tombstone ) ); assert!( - LoadedPrograms::::matches_loaded_program_criteria( + ProgramCache::::matches_loaded_program_criteria( &tombstone, &LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(0) ) ); assert!( - !LoadedPrograms::::matches_loaded_program_criteria( + !ProgramCache::::matches_loaded_program_criteria( &tombstone, &LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(1) ) @@ -2604,28 +2604,28 @@ mod tests { let program = new_test_loaded_program(0, 1); assert!( - LoadedPrograms::::matches_loaded_program_criteria( + ProgramCache::::matches_loaded_program_criteria( &program, &LoadedProgramMatchCriteria::NoCriteria ) ); assert!( - !LoadedPrograms::::matches_loaded_program_criteria( + !ProgramCache::::matches_loaded_program_criteria( &program, &LoadedProgramMatchCriteria::Tombstone ) ); assert!( - LoadedPrograms::::matches_loaded_program_criteria( + ProgramCache::::matches_loaded_program_criteria( &program, &LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(0) ) ); assert!( - !LoadedPrograms::::matches_loaded_program_criteria( + !ProgramCache::::matches_loaded_program_criteria( &program, &LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(1) ) @@ -2638,28 +2638,28 @@ mod tests { )); assert!( - LoadedPrograms::::matches_loaded_program_criteria( + ProgramCache::::matches_loaded_program_criteria( &program, &LoadedProgramMatchCriteria::NoCriteria ) ); assert!( - !LoadedPrograms::::matches_loaded_program_criteria( + !ProgramCache::::matches_loaded_program_criteria( &program, &LoadedProgramMatchCriteria::Tombstone ) ); assert!( - LoadedPrograms::::matches_loaded_program_criteria( + ProgramCache::::matches_loaded_program_criteria( &program, &LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(0) ) ); assert!( - !LoadedPrograms::::matches_loaded_program_criteria( + !ProgramCache::::matches_loaded_program_criteria( &program, &LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(1) ) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 388c2f4a15f529..e2ab858660361f 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -99,7 +99,7 @@ use { compute_budget_processor::process_compute_budget_instructions, invoke_context::BuiltinFunctionWithContext, loaded_programs::{ - LoadedProgram, LoadedProgramMatchCriteria, LoadedProgramType, LoadedPrograms, + LoadedProgram, LoadedProgramMatchCriteria, LoadedProgramType, ProgramCache, ProgramRuntimeEnvironments, }, runtime_config::RuntimeConfig, @@ -547,7 +547,7 @@ impl PartialEq for Bank { accounts_data_size_delta_off_chain: _, fee_structure: _, incremental_snapshot_persistence: _, - loaded_programs_cache: _, + program_cache: _, epoch_reward_status: _, transaction_processor: _, check_program_modification_slot: _, @@ -806,7 +806,7 @@ pub struct Bank { pub incremental_snapshot_persistence: Option, - loaded_programs_cache: Arc>>, + program_cache: Arc>>, epoch_reward_status: EpochRewardStatus, @@ -993,7 +993,7 @@ impl Bank { accounts_data_size_delta_on_chain: AtomicI64::new(0), accounts_data_size_delta_off_chain: AtomicI64::new(0), fee_structure: FeeStructure::default(), - loaded_programs_cache: Arc::new(RwLock::new(LoadedPrograms::new( + program_cache: Arc::new(RwLock::new(ProgramCache::new( Slot::default(), Epoch::default(), ))), @@ -1008,7 +1008,7 @@ impl Bank { bank.epoch_schedule.clone(), bank.fee_structure.clone(), bank.runtime_config.clone(), - bank.loaded_programs_cache.clone(), + bank.program_cache.clone(), ); let accounts_data_size_initial = bank.get_total_accounts_stats().unwrap().data_len as u64; @@ -1315,7 +1315,7 @@ impl Bank { accounts_data_size_delta_on_chain: AtomicI64::new(0), accounts_data_size_delta_off_chain: AtomicI64::new(0), fee_structure: parent.fee_structure.clone(), - loaded_programs_cache: parent.loaded_programs_cache.clone(), + program_cache: parent.program_cache.clone(), epoch_reward_status: parent.epoch_reward_status.clone(), transaction_processor: TransactionBatchProcessor::default(), check_program_modification_slot: false, @@ -1327,7 +1327,7 @@ impl Bank { new.epoch_schedule.clone(), new.fee_structure.clone(), new.runtime_config.clone(), - new.loaded_programs_cache.clone(), + new.program_cache.clone(), ); let (_, ancestors_time_us) = measure_us!({ @@ -1367,13 +1367,12 @@ impl Bank { .min(slots_in_epoch) .checked_div(2) .unwrap(); - let mut loaded_programs_cache = new.loaded_programs_cache.write().unwrap(); - if loaded_programs_cache.upcoming_environments.is_some() { - if let Some((key, program_to_recompile)) = - loaded_programs_cache.programs_to_recompile.pop() + let mut program_cache = new.program_cache.write().unwrap(); + if program_cache.upcoming_environments.is_some() { + if let Some((key, program_to_recompile)) = program_cache.programs_to_recompile.pop() { - let effective_epoch = loaded_programs_cache.latest_root_epoch.saturating_add(1); - drop(loaded_programs_cache); + let effective_epoch = program_cache.latest_root_epoch.saturating_add(1); + drop(program_cache); let recompiled = new.load_program(&key, false, effective_epoch); recompiled .tx_usage_counter @@ -1381,17 +1380,17 @@ impl Bank { recompiled .ix_usage_counter .fetch_add(program_to_recompile.ix_usage_counter.load(Relaxed), Relaxed); - let mut loaded_programs_cache = new.loaded_programs_cache.write().unwrap(); - loaded_programs_cache.assign_program(key, recompiled); + let mut program_cache = new.program_cache.write().unwrap(); + program_cache.assign_program(key, recompiled); } - } else if new.epoch() != loaded_programs_cache.latest_root_epoch + } else if new.epoch() != program_cache.latest_root_epoch || slot_index.saturating_add(slots_in_recompilation_phase) >= slots_in_epoch { // Anticipate the upcoming program runtime environment for the next epoch, // so we can try to recompile loaded programs before the feature transition hits. - drop(loaded_programs_cache); + drop(program_cache); let (feature_set, _new_feature_activations) = new.compute_active_feature_set(true); - let mut loaded_programs_cache = new.loaded_programs_cache.write().unwrap(); + let mut program_cache = new.program_cache.write().unwrap(); let program_runtime_environment_v1 = create_program_runtime_environment_v1( &feature_set, &new.runtime_config.compute_budget.unwrap_or_default(), @@ -1403,7 +1402,7 @@ impl Bank { &new.runtime_config.compute_budget.unwrap_or_default(), false, /* debugging_features */ ); - let mut upcoming_environments = loaded_programs_cache.environments.clone(); + let mut upcoming_environments = program_cache.environments.clone(); let changed_program_runtime_v1 = *upcoming_environments.program_runtime_v1 != program_runtime_environment_v1; let changed_program_runtime_v2 = @@ -1416,10 +1415,10 @@ impl Bank { upcoming_environments.program_runtime_v2 = Arc::new(program_runtime_environment_v2); } - loaded_programs_cache.upcoming_environments = Some(upcoming_environments); - loaded_programs_cache.programs_to_recompile = loaded_programs_cache + program_cache.upcoming_environments = Some(upcoming_environments); + program_cache.programs_to_recompile = program_cache .get_flattened_entries(changed_program_runtime_v1, changed_program_runtime_v2); - loaded_programs_cache + program_cache .programs_to_recompile .sort_by_cached_key(|(_id, program)| program.decayed_usage_counter(slot)); } @@ -1464,32 +1463,32 @@ impl Bank { ); parent - .loaded_programs_cache + .program_cache .read() .unwrap() .stats .submit(parent.slot()); - new.loaded_programs_cache.write().unwrap().stats.reset(); + new.program_cache.write().unwrap().stats.reset(); new } pub fn set_fork_graph_in_program_cache(&self, fork_graph: Arc>) { - self.loaded_programs_cache + self.program_cache .write() .unwrap() .set_fork_graph(fork_graph); } pub fn prune_program_cache(&self, new_root_slot: Slot, new_root_epoch: Epoch) { - self.loaded_programs_cache + self.program_cache .write() .unwrap() .prune(new_root_slot, new_root_epoch); } pub fn prune_program_cache_by_deployment_slot(&self, deployment_slot: Slot) { - self.loaded_programs_cache + self.program_cache .write() .unwrap() .prune_by_deployment_slot(deployment_slot); @@ -1497,7 +1496,7 @@ impl Bank { pub fn get_runtime_environments_for_slot(&self, slot: Slot) -> ProgramRuntimeEnvironments { let epoch = self.epoch_schedule.get_epoch(slot); - self.loaded_programs_cache + self.program_cache .read() .unwrap() .get_environments_for_epoch(epoch) @@ -1863,10 +1862,7 @@ impl Bank { accounts_data_size_delta_on_chain: AtomicI64::new(0), accounts_data_size_delta_off_chain: AtomicI64::new(0), fee_structure: FeeStructure::default(), - loaded_programs_cache: Arc::new(RwLock::new(LoadedPrograms::new( - fields.slot, - fields.epoch, - ))), + program_cache: Arc::new(RwLock::new(ProgramCache::new(fields.slot, fields.epoch))), epoch_reward_status: fields.epoch_reward_status, transaction_processor: TransactionBatchProcessor::default(), check_program_modification_slot: false, @@ -1878,7 +1874,7 @@ impl Bank { bank.epoch_schedule.clone(), bank.fee_structure.clone(), bank.runtime_config.clone(), - bank.loaded_programs_cache.clone(), + bank.program_cache.clone(), ); bank.finish_init( @@ -4987,7 +4983,7 @@ impl Bank { } = execution_result { if details.status.is_ok() { - let mut cache = self.loaded_programs_cache.write().unwrap(); + let mut cache = self.program_cache.write().unwrap(); cache.merge(programs_modified_by_tx); } } @@ -6013,10 +6009,10 @@ impl Bank { } } - let mut loaded_programs_cache = self.loaded_programs_cache.write().unwrap(); - loaded_programs_cache.latest_root_slot = self.slot(); - loaded_programs_cache.latest_root_epoch = self.epoch(); - loaded_programs_cache.environments.program_runtime_v1 = Arc::new( + let mut program_cache = self.program_cache.write().unwrap(); + program_cache.latest_root_slot = self.slot(); + program_cache.latest_root_epoch = self.epoch(); + program_cache.environments.program_runtime_v1 = Arc::new( create_program_runtime_environment_v1( &self.feature_set, &self.runtime_config.compute_budget.unwrap_or_default(), @@ -6025,7 +6021,7 @@ impl Bank { ) .unwrap(), ); - loaded_programs_cache.environments.program_runtime_v2 = + program_cache.environments.program_runtime_v2 = Arc::new(create_program_runtime_environment_v2( &self.runtime_config.compute_budget.unwrap_or_default(), false, /* debugging_features */ @@ -7094,7 +7090,7 @@ impl Bank { debug!("Adding program {} under {:?}", name, program_id); self.add_builtin_account(name.as_str(), &program_id, false); self.builtin_programs.insert(program_id); - self.loaded_programs_cache + self.program_cache .write() .unwrap() .assign_program(program_id, Arc::new(builtin)); @@ -7399,7 +7395,7 @@ impl Bank { self.store_account(new_address, &AccountSharedData::default()); // Unload a program from the bank's cache - self.loaded_programs_cache + self.program_cache .write() .unwrap() .remove_programs([*old_address].into_iter()); diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 6960f220244998..f104c8ee2b963d 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -11910,14 +11910,14 @@ fn test_feature_activation_loaded_programs_recompilation_phase() { goto_end_of_slot(bank.clone()); let bank = new_bank_from_parent_with_bank_forks(&bank_forks, bank, &Pubkey::default(), 16); let current_env = bank - .loaded_programs_cache + .program_cache .read() .unwrap() .get_environments_for_epoch(0) .program_runtime_v1 .clone(); let upcoming_env = bank - .loaded_programs_cache + .program_cache .read() .unwrap() .get_environments_for_epoch(1) @@ -11926,9 +11926,8 @@ fn test_feature_activation_loaded_programs_recompilation_phase() { // Advance the bank to recompile the program. { - let loaded_programs_cache = bank.loaded_programs_cache.read().unwrap(); - let slot_versions = - loaded_programs_cache.get_slot_versions_for_tests(&program_keypair.pubkey()); + let program_cache = bank.program_cache.read().unwrap(); + let slot_versions = program_cache.get_slot_versions_for_tests(&program_keypair.pubkey()); assert_eq!(slot_versions.len(), 1); assert!(Arc::ptr_eq( slot_versions[0].program.get_environment().unwrap(), @@ -11938,9 +11937,8 @@ fn test_feature_activation_loaded_programs_recompilation_phase() { goto_end_of_slot(bank.clone()); let bank = new_from_parent_with_fork_next_slot(bank, bank_forks.as_ref()); { - let loaded_programs_cache = bank.loaded_programs_cache.read().unwrap(); - let slot_versions = - loaded_programs_cache.get_slot_versions_for_tests(&program_keypair.pubkey()); + let program_cache = bank.program_cache.read().unwrap(); + let slot_versions = program_cache.get_slot_versions_for_tests(&program_keypair.pubkey()); assert_eq!(slot_versions.len(), 2); assert!(Arc::ptr_eq( slot_versions[0].program.get_environment().unwrap(), diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index 40ccf81561f26e..f28820c41cceed 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -17,7 +17,7 @@ use { compute_budget::ComputeBudget, loaded_programs::{ ForkGraph, LoadProgramMetrics, LoadedProgram, LoadedProgramMatchCriteria, - LoadedProgramType, LoadedPrograms, LoadedProgramsForTxBatch, ProgramRuntimeEnvironment, + LoadedProgramType, LoadedProgramsForTxBatch, ProgramCache, ProgramRuntimeEnvironment, ProgramRuntimeEnvironments, DELAY_VISIBILITY_SLOT_OFFSET, }, log_collector::LogCollector, @@ -137,7 +137,8 @@ pub struct TransactionBatchProcessor { pub sysvar_cache: RwLock, - pub loaded_programs_cache: Arc>>, + /// Programs required for transaction batch processing + pub program_cache: Arc>>, } impl Debug for TransactionBatchProcessor { @@ -149,7 +150,7 @@ impl Debug for TransactionBatchProcessor { .field("fee_structure", &self.fee_structure) .field("runtime_config", &self.runtime_config) .field("sysvar_cache", &self.sysvar_cache) - .field("loaded_programs_cache", &self.loaded_programs_cache) + .field("program_cache", &self.program_cache) .finish() } } @@ -163,7 +164,7 @@ impl Default for TransactionBatchProcessor { fee_structure: FeeStructure::default(), runtime_config: Arc::::default(), sysvar_cache: RwLock::::default(), - loaded_programs_cache: Arc::new(RwLock::new(LoadedPrograms::new( + program_cache: Arc::new(RwLock::new(ProgramCache::new( Slot::default(), Epoch::default(), ))), @@ -178,7 +179,7 @@ impl TransactionBatchProcessor { epoch_schedule: EpochSchedule, fee_structure: FeeStructure, runtime_config: Arc, - loaded_programs_cache: Arc>>, + program_cache: Arc>>, ) -> Self { Self { slot, @@ -187,7 +188,7 @@ impl TransactionBatchProcessor { fee_structure, runtime_config, sysvar_cache: RwLock::::default(), - loaded_programs_cache, + program_cache, } } @@ -308,7 +309,7 @@ impl TransactionBatchProcessor { execution_time.stop(); const SHRINK_LOADED_PROGRAMS_TO_PERCENTAGE: u8 = 90; - self.loaded_programs_cache + self.program_cache .write() .unwrap() .evict_using_2s_random_selection( @@ -374,8 +375,8 @@ impl TransactionBatchProcessor { result } - /// Load program with a specific pubkey from loaded programs - /// cache, and update the program's access slot as a side-effect. + /// Load program with a specific pubkey from program cache, and + /// update the program's access slot as a side-effect. pub fn load_program_with_pubkey( &self, callbacks: &CB, @@ -383,8 +384,8 @@ impl TransactionBatchProcessor { reload: bool, effective_epoch: Epoch, ) -> Arc { - let loaded_programs_cache = self.loaded_programs_cache.read().unwrap(); - let environments = loaded_programs_cache.get_environments_for_epoch(effective_epoch); + let program_cache = self.program_cache.read().unwrap(); + let environments = program_cache.get_environments_for_epoch(effective_epoch); let mut load_program_metrics = LoadProgramMetrics { program_id: pubkey.to_string(), ..LoadProgramMetrics::default() @@ -463,10 +464,10 @@ impl TransactionBatchProcessor { load_program_metrics.submit_datapoint(&mut timings); if !Arc::ptr_eq( &environments.program_runtime_v1, - &loaded_programs_cache.environments.program_runtime_v1, + &program_cache.environments.program_runtime_v1, ) || !Arc::ptr_eq( &environments.program_runtime_v2, - &loaded_programs_cache.environments.program_runtime_v2, + &program_cache.environments.program_runtime_v2, ) { // There can be two entries per program when the environment changes. // One for the old environment before the epoch boundary and one for the new environment after the epoch boundary. @@ -502,21 +503,18 @@ impl TransactionBatchProcessor { loop { let (program_to_load, task_cookie, task_waiter) = { // Lock the global cache. - let mut loaded_programs_cache = self.loaded_programs_cache.write().unwrap(); + let mut program_cache = self.program_cache.write().unwrap(); // Initialize our local cache. let is_first_round = loaded_programs_for_txs.is_none(); if is_first_round { loaded_programs_for_txs = Some(LoadedProgramsForTxBatch::new( self.slot, - loaded_programs_cache - .get_environments_for_epoch(self.epoch) - .clone(), + program_cache.get_environments_for_epoch(self.epoch).clone(), )); } // Submit our last completed loading task. if let Some((key, program)) = program_to_store.take() { - if loaded_programs_cache - .finish_cooperative_loading_task(self.slot, key, program) + if program_cache.finish_cooperative_loading_task(self.slot, key, program) && limit_to_load_programs { // This branch is taken when there is an error in assigning a program to a @@ -524,21 +522,19 @@ impl TransactionBatchProcessor { // tests purposes. let mut ret = LoadedProgramsForTxBatch::new( self.slot, - loaded_programs_cache - .get_environments_for_epoch(self.epoch) - .clone(), + program_cache.get_environments_for_epoch(self.epoch).clone(), ); ret.hit_max_limit = true; return ret; } } // Figure out which program needs to be loaded next. - let program_to_load = loaded_programs_cache.extract( + let program_to_load = program_cache.extract( &mut missing_programs, loaded_programs_for_txs.as_mut().unwrap(), is_first_round, ); - let task_waiter = Arc::clone(&loaded_programs_cache.loading_task_waiter); + let task_waiter = Arc::clone(&program_cache.loading_task_waiter); (program_to_load, task_waiter.cookie(), task_waiter) // Unlock the global cache again. }; @@ -1266,7 +1262,7 @@ mod tests { 0, LoadedProgramType::FailedVerification( batch_processor - .loaded_programs_cache + .program_cache .read() .unwrap() .get_environments_for_epoch(20) @@ -1294,7 +1290,7 @@ mod tests { 0, LoadedProgramType::FailedVerification( batch_processor - .loaded_programs_cache + .program_cache .read() .unwrap() .get_environments_for_epoch(20) @@ -1367,7 +1363,7 @@ mod tests { 0, LoadedProgramType::FailedVerification( batch_processor - .loaded_programs_cache + .program_cache .read() .unwrap() .get_environments_for_epoch(0) @@ -1447,7 +1443,7 @@ mod tests { 0, LoadedProgramType::FailedVerification( batch_processor - .loaded_programs_cache + .program_cache .read() .unwrap() .get_environments_for_epoch(0) @@ -1506,7 +1502,7 @@ mod tests { let batch_processor = TransactionBatchProcessor::::default(); batch_processor - .loaded_programs_cache + .program_cache .write() .unwrap() .upcoming_environments = Some(ProgramRuntimeEnvironments::default()); @@ -1801,11 +1797,8 @@ mod tests { // Case 1 let mut mock_bank = MockBankCallback::default(); let batch_processor = TransactionBatchProcessor::::default(); - batch_processor - .loaded_programs_cache - .write() - .unwrap() - .fork_graph = Some(Arc::new(RwLock::new(TestForkGraph {}))); + batch_processor.program_cache.write().unwrap().fork_graph = + Some(Arc::new(RwLock::new(TestForkGraph {}))); let key1 = Pubkey::new_unique(); let key2 = Pubkey::new_unique(); let owner = Pubkey::new_unique(); diff --git a/svm/tests/integration_test.rs b/svm/tests/integration_test.rs index 700b9c2f6a0ad1..45409a3b146848 100644 --- a/svm/tests/integration_test.rs +++ b/svm/tests/integration_test.rs @@ -7,7 +7,7 @@ use { compute_budget::ComputeBudget, invoke_context::InvokeContext, loaded_programs::{ - BlockRelation, ForkGraph, LoadedProgram, LoadedPrograms, ProgramRuntimeEnvironments, + BlockRelation, ForkGraph, LoadedProgram, ProgramCache, ProgramRuntimeEnvironments, }, runtime_config::RuntimeConfig, solana_rbpf::{ @@ -113,8 +113,8 @@ fn create_custom_environment<'a>() -> BuiltinProgram> { fn create_executable_environment( mock_bank: &mut MockBankCallback, -) -> (LoadedPrograms, Vec) { - let mut programs_cache = LoadedPrograms::::new(0, 20); +) -> (ProgramCache, Vec) { + let mut program_cache = ProgramCache::::new(0, 20); // We must register the bpf loader account as a loadable account, otherwise programs // won't execute. @@ -127,7 +127,7 @@ fn create_executable_environment( .insert(bpf_loader::id(), account_data); // The bpf loader needs an executable as well - programs_cache.assign_program( + program_cache.assign_program( bpf_loader::id(), Arc::new(LoadedProgram::new_builtin( DEPLOYMENT_SLOT, @@ -136,7 +136,7 @@ fn create_executable_environment( )), ); - programs_cache.environments = ProgramRuntimeEnvironments { + program_cache.environments = ProgramRuntimeEnvironments { program_runtime_v1: Arc::new(create_custom_environment()), // We are not using program runtime v2 program_runtime_v2: Arc::new(BuiltinProgram::new_loader( @@ -145,11 +145,11 @@ fn create_executable_environment( )), }; - programs_cache.fork_graph = Some(Arc::new(RwLock::new(MockForkGraph {}))); + program_cache.fork_graph = Some(Arc::new(RwLock::new(MockForkGraph {}))); // Inform SVM of the registered builins let registered_built_ins = vec![bpf_loader::id()]; - (programs_cache, registered_built_ins) + (program_cache, registered_built_ins) } fn prepare_transactions( @@ -224,15 +224,15 @@ fn prepare_transactions( fn svm_integration() { let mut mock_bank = MockBankCallback::default(); let (transactions, mut check_results) = prepare_transactions(&mut mock_bank); - let (programs_cache, builtins) = create_executable_environment(&mut mock_bank); - let programs_cache = Arc::new(RwLock::new(programs_cache)); + let (program_cache, builtins) = create_executable_environment(&mut mock_bank); + let program_cache = Arc::new(RwLock::new(program_cache)); let batch_processor = TransactionBatchProcessor::::new( EXECUTION_SLOT, EXECUTION_EPOCH, EpochSchedule::default(), FeeStructure::default(), Arc::new(RuntimeConfig::default()), - programs_cache.clone(), + program_cache.clone(), ); let mut error_counter = TransactionErrorMetrics::default(); From 4a67cd495b778e2727fc33707cfe09d3aacceb9e Mon Sep 17 00:00:00 2001 From: steviez Date: Wed, 20 Mar 2024 15:07:04 -0500 Subject: [PATCH 024/153] Allow configuration of replay thread pools from CLI (#236) Bubble up the constants to the CLI that control the sizes of the following two thread pools: - The thread pool used to replay multiple forks in parallel - The thread pool used to execute transactions in parallel --- core/src/replay_stage.rs | 22 ++--- core/src/tvu.rs | 22 ++++- core/src/validator.rs | 14 ++- local-cluster/src/validator_configs.rs | 3 +- validator/src/cli.rs | 19 ++-- validator/src/cli/thread_args.rs | 115 +++++++++++++++++++++++++ validator/src/main.rs | 10 ++- 7 files changed, 180 insertions(+), 25 deletions(-) create mode 100644 validator/src/cli/thread_args.rs diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 8a29d037dedf3c..48641297f63fcc 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -51,7 +51,6 @@ use { solana_measure::measure::Measure, solana_poh::poh_recorder::{PohLeaderStatus, PohRecorder, GRACE_TICKS_FACTOR, MAX_GRACE_SLOTS}, solana_program_runtime::timings::ExecuteTimings, - solana_rayon_threadlimit::get_max_thread_count, solana_rpc::{ optimistically_confirmed_bank_tracker::{BankNotification, BankNotificationSenderConfig}, rpc_subscriptions::RpcSubscriptions, @@ -80,6 +79,7 @@ use { solana_vote_program::vote_state::VoteTransaction, std::{ collections::{HashMap, HashSet}, + num::NonZeroUsize, result, sync::{ atomic::{AtomicBool, AtomicU64, Ordering}, @@ -95,11 +95,9 @@ pub const SUPERMINORITY_THRESHOLD: f64 = 1f64 / 3f64; pub const MAX_UNCONFIRMED_SLOTS: usize = 5; pub const DUPLICATE_LIVENESS_THRESHOLD: f64 = 0.1; pub const DUPLICATE_THRESHOLD: f64 = 1.0 - SWITCH_FORK_THRESHOLD - DUPLICATE_LIVENESS_THRESHOLD; + const MAX_VOTE_SIGNATURES: usize = 200; const MAX_VOTE_REFRESH_INTERVAL_MILLIS: usize = 5000; -// Expect this number to be small enough to minimize thread pool overhead while large enough -// to be able to replay all active forks at the same time in most cases. -const MAX_CONCURRENT_FORKS_TO_REPLAY: usize = 4; const MAX_REPAIR_RETRY_LOOP_ATTEMPTS: usize = 10; #[derive(PartialEq, Eq, Debug)] @@ -291,7 +289,8 @@ pub struct ReplayStageConfig { // Stops voting until this slot has been reached. Should be used to avoid // duplicate voting which can lead to slashing. pub wait_to_vote_slot: Option, - pub replay_slots_concurrently: bool, + pub replay_forks_threads: NonZeroUsize, + pub replay_transactions_threads: NonZeroUsize, } /// Timing information for the ReplayStage main processing loop @@ -574,7 +573,8 @@ impl ReplayStage { ancestor_hashes_replay_update_sender, tower_storage, wait_to_vote_slot, - replay_slots_concurrently, + replay_forks_threads, + replay_transactions_threads, } = config; trace!("replay stage"); @@ -654,19 +654,19 @@ impl ReplayStage { ) }; // Thread pool to (maybe) replay multiple threads in parallel - let replay_mode = if replay_slots_concurrently { + let replay_mode = if replay_forks_threads.get() == 1 { + ForkReplayMode::Serial + } else { let pool = rayon::ThreadPoolBuilder::new() - .num_threads(MAX_CONCURRENT_FORKS_TO_REPLAY) + .num_threads(replay_forks_threads.get()) .thread_name(|i| format!("solReplayFork{i:02}")) .build() .expect("new rayon threadpool"); ForkReplayMode::Parallel(pool) - } else { - ForkReplayMode::Serial }; // Thread pool to replay multiple transactions within one block in parallel let replay_tx_thread_pool = rayon::ThreadPoolBuilder::new() - .num_threads(get_max_thread_count()) + .num_threads(replay_transactions_threads.get()) .thread_name(|i| format!("solReplayTx{i:02}")) .build() .expect("new rayon threadpool"); diff --git a/core/src/tvu.rs b/core/src/tvu.rs index 47bc9a7905da5f..2e64fe0675891b 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -53,6 +53,7 @@ use { std::{ collections::HashSet, net::{SocketAddr, UdpSocket}, + num::NonZeroUsize, sync::{atomic::AtomicBool, Arc, RwLock}, thread::{self, JoinHandle}, }, @@ -81,7 +82,6 @@ pub struct TvuSockets { pub ancestor_hashes_requests: UdpSocket, } -#[derive(Default)] pub struct TvuConfig { pub max_ledger_shreds: Option, pub shred_version: u16, @@ -90,7 +90,22 @@ pub struct TvuConfig { // Validators which should be given priority when serving repairs pub repair_whitelist: Arc>>, pub wait_for_vote_to_start_leader: bool, - pub replay_slots_concurrently: bool, + pub replay_forks_threads: NonZeroUsize, + pub replay_transactions_threads: NonZeroUsize, +} + +impl Default for TvuConfig { + fn default() -> Self { + Self { + max_ledger_shreds: None, + shred_version: 0, + repair_validators: None, + repair_whitelist: Arc::new(RwLock::new(HashSet::default())), + wait_for_vote_to_start_leader: false, + replay_forks_threads: NonZeroUsize::new(1).expect("1 is non-zero"), + replay_transactions_threads: NonZeroUsize::new(1).expect("1 is non-zero"), + } + } } impl Tvu { @@ -265,7 +280,8 @@ impl Tvu { ancestor_hashes_replay_update_sender, tower_storage: tower_storage.clone(), wait_to_vote_slot, - replay_slots_concurrently: tvu_config.replay_slots_concurrently, + replay_forks_threads: tvu_config.replay_forks_threads, + replay_transactions_threads: tvu_config.replay_transactions_threads, }; let (voting_sender, voting_receiver) = unbounded(); diff --git a/core/src/validator.rs b/core/src/validator.rs index 3d2a93daecba2f..98a267aeafc71a 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -74,6 +74,7 @@ use { poh_service::{self, PohService}, }, solana_program_runtime::runtime_config::RuntimeConfig, + solana_rayon_threadlimit::get_max_thread_count, solana_rpc::{ max_slots::MaxSlots, optimistically_confirmed_bank_tracker::{ @@ -123,6 +124,7 @@ use { std::{ collections::{HashMap, HashSet}, net::SocketAddr, + num::NonZeroUsize, path::{Path, PathBuf}, sync::{ atomic::{AtomicBool, AtomicU64, Ordering}, @@ -260,7 +262,6 @@ pub struct ValidatorConfig { pub wait_to_vote_slot: Option, pub ledger_column_options: LedgerColumnOptions, pub runtime_config: RuntimeConfig, - pub replay_slots_concurrently: bool, pub banking_trace_dir_byte_limit: banking_trace::DirByteLimit, pub block_verification_method: BlockVerificationMethod, pub block_production_method: BlockProductionMethod, @@ -268,6 +269,8 @@ pub struct ValidatorConfig { pub use_snapshot_archives_at_startup: UseSnapshotArchivesAtStartup, pub wen_restart_proto_path: Option, pub unified_scheduler_handler_threads: Option, + pub replay_forks_threads: NonZeroUsize, + pub replay_transactions_threads: NonZeroUsize, } impl Default for ValidatorConfig { @@ -328,7 +331,6 @@ impl Default for ValidatorConfig { wait_to_vote_slot: None, ledger_column_options: LedgerColumnOptions::default(), runtime_config: RuntimeConfig::default(), - replay_slots_concurrently: false, banking_trace_dir_byte_limit: 0, block_verification_method: BlockVerificationMethod::default(), block_production_method: BlockProductionMethod::default(), @@ -336,6 +338,8 @@ impl Default for ValidatorConfig { use_snapshot_archives_at_startup: UseSnapshotArchivesAtStartup::default(), wen_restart_proto_path: None, unified_scheduler_handler_threads: None, + replay_forks_threads: NonZeroUsize::new(1).expect("1 is non-zero"), + replay_transactions_threads: NonZeroUsize::new(1).expect("1 is non-zero"), } } } @@ -346,6 +350,9 @@ impl ValidatorConfig { enforce_ulimit_nofile: false, rpc_config: JsonRpcConfig::default_for_test(), block_production_method: BlockProductionMethod::ThreadLocalMultiIterator, + replay_forks_threads: NonZeroUsize::new(1).expect("1 is non-zero"), + replay_transactions_threads: NonZeroUsize::new(get_max_thread_count()) + .expect("thread count is non-zero"), ..Self::default() } } @@ -1305,7 +1312,8 @@ impl Validator { repair_validators: config.repair_validators.clone(), repair_whitelist: config.repair_whitelist.clone(), wait_for_vote_to_start_leader, - replay_slots_concurrently: config.replay_slots_concurrently, + replay_forks_threads: config.replay_forks_threads, + replay_transactions_threads: config.replay_transactions_threads, }, &max_slots, block_metadata_notifier, diff --git a/local-cluster/src/validator_configs.rs b/local-cluster/src/validator_configs.rs index 33883bb02c1d77..45045203412a73 100644 --- a/local-cluster/src/validator_configs.rs +++ b/local-cluster/src/validator_configs.rs @@ -61,7 +61,6 @@ pub fn safe_clone_config(config: &ValidatorConfig) -> ValidatorConfig { wait_to_vote_slot: config.wait_to_vote_slot, ledger_column_options: config.ledger_column_options.clone(), runtime_config: config.runtime_config.clone(), - replay_slots_concurrently: config.replay_slots_concurrently, banking_trace_dir_byte_limit: config.banking_trace_dir_byte_limit, block_verification_method: config.block_verification_method.clone(), block_production_method: config.block_production_method.clone(), @@ -69,6 +68,8 @@ pub fn safe_clone_config(config: &ValidatorConfig) -> ValidatorConfig { use_snapshot_archives_at_startup: config.use_snapshot_archives_at_startup, wen_restart_proto_path: config.wen_restart_proto_path.clone(), unified_scheduler_handler_threads: config.unified_scheduler_handler_threads, + replay_forks_threads: config.replay_forks_threads, + replay_transactions_threads: config.replay_transactions_threads, } } diff --git a/validator/src/cli.rs b/validator/src/cli.rs index f127273c8da2f3..8cae6667f87a34 100644 --- a/validator/src/cli.rs +++ b/validator/src/cli.rs @@ -52,6 +52,9 @@ use { std::{path::PathBuf, str::FromStr}, }; +pub mod thread_args; +use thread_args::{thread_args, DefaultThreadArgs}; + const EXCLUDE_KEY: &str = "account-index-exclude-key"; const INCLUDE_KEY: &str = "account-index-include-key"; // The default minimal snapshot download speed (bytes/second) @@ -1466,11 +1469,6 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .value_name("BYTES") .help("Maximum number of bytes written to the program log before truncation"), ) - .arg( - Arg::with_name("replay_slots_concurrently") - .long("replay-slots-concurrently") - .help("Allow concurrent replay of slots on different forks"), - ) .arg( Arg::with_name("banking_trace_dir_byte_limit") // expose friendly alternative name to cli than internal @@ -1555,6 +1553,7 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { ", ), ) + .args(&thread_args(&default_args.thread_args)) .args(&get_deprecated_arguments()) .after_help("The default subcommand is run") .subcommand( @@ -2073,6 +2072,13 @@ fn deprecated_arguments() -> Vec { .long("no-rocksdb-compaction") .takes_value(false) .help("Disable manual compaction of the ledger database")); + add_arg!( + Arg::with_name("replay_slots_concurrently") + .long("replay-slots-concurrently") + .help("Allow concurrent replay of slots on different forks") + .conflicts_with("replay_forks_threads"), + replaced_by: "replay_forks_threads", + usage_warning: "Equivalent behavior to this flag would be --replay-forks-threads 4"); add_arg!(Arg::with_name("rocksdb_compaction_interval") .long("rocksdb-compaction-interval-slots") .value_name("ROCKSDB_COMPACTION_INTERVAL_SLOTS") @@ -2195,6 +2201,8 @@ pub struct DefaultArgs { pub banking_trace_dir_byte_limit: String, pub wen_restart_path: String, + + pub thread_args: DefaultThreadArgs, } impl DefaultArgs { @@ -2277,6 +2285,7 @@ impl DefaultArgs { wait_for_restart_window_max_delinquent_stake: "5".to_string(), banking_trace_dir_byte_limit: BANKING_TRACE_DIR_DEFAULT_BYTE_LIMIT.to_string(), wen_restart_path: "wen_restart_progress.proto".to_string(), + thread_args: DefaultThreadArgs::default(), } } } diff --git a/validator/src/cli/thread_args.rs b/validator/src/cli/thread_args.rs new file mode 100644 index 00000000000000..53d8cf15d984a0 --- /dev/null +++ b/validator/src/cli/thread_args.rs @@ -0,0 +1,115 @@ +//! Arguments for controlling the number of threads allocated for various tasks + +use { + clap::{value_t_or_exit, Arg, ArgMatches}, + solana_clap_utils::{hidden_unless_forced, input_validators::is_within_range}, + solana_rayon_threadlimit::get_max_thread_count, + std::{num::NonZeroUsize, ops::RangeInclusive}, +}; + +// Need this struct to provide &str whose lifetime matches that of the CLAP Arg's +pub struct DefaultThreadArgs { + pub replay_forks_threads: String, + pub replay_transactions_threads: String, +} + +impl Default for DefaultThreadArgs { + fn default() -> Self { + Self { + replay_forks_threads: ReplayForksThreadsArg::default().to_string(), + replay_transactions_threads: ReplayTransactionsThreadsArg::default().to_string(), + } + } +} + +pub fn thread_args<'a>(defaults: &DefaultThreadArgs) -> Vec> { + vec![ + new_thread_arg::(&defaults.replay_forks_threads), + new_thread_arg::(&defaults.replay_transactions_threads), + ] +} + +fn new_thread_arg<'a, T: ThreadArg>(default: &str) -> Arg<'_, 'a> { + Arg::with_name(T::NAME) + .long(T::LONG_NAME) + .takes_value(true) + .value_name("NUMBER") + .default_value(default) + .validator(|num| is_within_range(num, T::range())) + .hidden(hidden_unless_forced()) + .help(T::HELP) +} + +pub struct NumThreadConfig { + pub replay_forks_threads: NonZeroUsize, + pub replay_transactions_threads: NonZeroUsize, +} + +pub fn parse_num_threads_args(matches: &ArgMatches) -> NumThreadConfig { + NumThreadConfig { + replay_forks_threads: if matches.is_present("replay_slots_concurrently") { + NonZeroUsize::new(4).expect("4 is non-zero") + } else { + value_t_or_exit!(matches, ReplayForksThreadsArg::NAME, NonZeroUsize) + }, + replay_transactions_threads: value_t_or_exit!( + matches, + ReplayTransactionsThreadsArg::NAME, + NonZeroUsize + ), + } +} + +/// Configuration for CLAP arguments that control the number of threads for various functions +trait ThreadArg { + /// The argument's name + const NAME: &'static str; + /// The argument's long name + const LONG_NAME: &'static str; + /// The argument's help message + const HELP: &'static str; + + /// The default number of threads + fn default() -> usize; + /// The minimum allowed number of threads (inclusive) + fn min() -> usize { + 1 + } + /// The maximum allowed number of threads (inclusive) + fn max() -> usize { + // By default, no thread pool should scale over the number of the machine's threads + get_max_thread_count() + } + /// The range of allowed number of threads (inclusive on both ends) + fn range() -> RangeInclusive { + RangeInclusive::new(Self::min(), Self::max()) + } +} + +struct ReplayForksThreadsArg; +impl ThreadArg for ReplayForksThreadsArg { + const NAME: &'static str = "replay_forks_threads"; + const LONG_NAME: &'static str = "replay-forks-threads"; + const HELP: &'static str = "Number of threads to use for replay of blocks on different forks"; + + fn default() -> usize { + // Default to single threaded fork execution + 1 + } + fn max() -> usize { + // Choose a value that is small enough to limit the overhead of having a large thread pool + // while also being large enough to allow replay of all active forks in most scenarios + 4 + } +} + +struct ReplayTransactionsThreadsArg; +impl ThreadArg for ReplayTransactionsThreadsArg { + const NAME: &'static str = "replay_transactions_threads"; + const LONG_NAME: &'static str = "replay-transactions-threads"; + const HELP: &'static str = "Number of threads to use for transaction replay"; + + fn default() -> usize { + get_max_thread_count() + } +} diff --git a/validator/src/main.rs b/validator/src/main.rs index cadd2759040657..56050031975a52 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -6,7 +6,7 @@ use { admin_rpc_service, admin_rpc_service::{load_staked_nodes_overrides, StakedNodesOverrides}, bootstrap, - cli::{app, warn_for_deprecated_arguments, DefaultArgs}, + cli::{self, app, warn_for_deprecated_arguments, DefaultArgs}, dashboard::Dashboard, ledger_lockfile, lock_ledger, new_spinner_progress_bar, println_name_value, redirect_stderr_to_file, @@ -1331,6 +1331,11 @@ pub fn main() { let full_api = matches.is_present("full_rpc_api"); + let cli::thread_args::NumThreadConfig { + replay_forks_threads, + replay_transactions_threads, + } = cli::thread_args::parse_num_threads_args(&matches); + let mut validator_config = ValidatorConfig { require_tower: matches.is_present("require_tower"), tower_storage, @@ -1464,12 +1469,13 @@ pub fn main() { ..RuntimeConfig::default() }, staked_nodes_overrides: staked_nodes_overrides.clone(), - replay_slots_concurrently: matches.is_present("replay_slots_concurrently"), use_snapshot_archives_at_startup: value_t_or_exit!( matches, use_snapshot_archives_at_startup::cli::NAME, UseSnapshotArchivesAtStartup ), + replay_forks_threads, + replay_transactions_threads, ..ValidatorConfig::default() }; From ade90354a1ae7cb5489fcc64c74e4d0a15087b46 Mon Sep 17 00:00:00 2001 From: Alessandro Decina Date: Thu, 21 Mar 2024 09:14:16 +1100 Subject: [PATCH 025/153] runtime: use str::split instead of regex to parse appendvec filenames (#323) --- runtime/src/snapshot_bank_utils.rs | 3 +- .../snapshot_storage_rebuilder.rs | 30 ++++++++----------- 2 files changed, 14 insertions(+), 19 deletions(-) diff --git a/runtime/src/snapshot_bank_utils.rs b/runtime/src/snapshot_bank_utils.rs index ab3a76fc80945a..cfb767d11fd7ab 100644 --- a/runtime/src/snapshot_bank_utils.rs +++ b/runtime/src/snapshot_bank_utils.rs @@ -2366,7 +2366,8 @@ mod tests { fs::read_dir(path).unwrap().for_each(|entry| { let path = entry.unwrap().path(); let filename = path.file_name().unwrap(); - let (_slot, append_vec_id) = get_slot_and_append_vec_id(filename.to_str().unwrap()); + let (_slot, append_vec_id) = + get_slot_and_append_vec_id(filename.to_str().unwrap()).unwrap(); max_id = std::cmp::max(max_id, append_vec_id); }); } diff --git a/runtime/src/snapshot_utils/snapshot_storage_rebuilder.rs b/runtime/src/snapshot_utils/snapshot_storage_rebuilder.rs index 30cbbd4afd5970..5806fcd46ccf5e 100644 --- a/runtime/src/snapshot_utils/snapshot_storage_rebuilder.rs +++ b/runtime/src/snapshot_utils/snapshot_storage_rebuilder.rs @@ -25,6 +25,7 @@ use { fs::File, io::{BufReader, Error as IoError}, path::{Path, PathBuf}, + str::FromStr as _, sync::{ atomic::{AtomicUsize, Ordering}, Arc, Mutex, @@ -36,8 +37,6 @@ use { lazy_static! { static ref VERSION_FILE_REGEX: Regex = Regex::new(r"^version$").unwrap(); static ref BANK_FIELDS_FILE_REGEX: Regex = Regex::new(r"^[0-9]+(\.pre)?$").unwrap(); - static ref STORAGE_FILE_REGEX: Regex = - Regex::new(r"^(?P[0-9]+)\.(?P[0-9]+)$").unwrap(); } /// Convenient wrapper for snapshot version and rebuilt storages @@ -268,8 +267,7 @@ impl SnapshotStorageRebuilder { /// Process an append_vec_file fn process_append_vec_file(&self, path: PathBuf) -> Result<(), SnapshotError> { let filename = path.file_name().unwrap().to_str().unwrap().to_owned(); - if let Some(SnapshotFileKind::Storage) = get_snapshot_file_kind(&filename) { - let (slot, append_vec_id) = get_slot_and_append_vec_id(&filename); + if let Ok((slot, append_vec_id)) = get_slot_and_append_vec_id(&filename) { if self.snapshot_from == SnapshotFrom::Dir { // Keep track of the highest append_vec_id in the system, so the future append_vecs // can be assigned to unique IDs. This is only needed when loading from a snapshot @@ -305,7 +303,7 @@ impl SnapshotStorageRebuilder { .iter() .map(|path| { let filename = path.file_name().unwrap().to_str().unwrap(); - let (_, old_append_vec_id) = get_slot_and_append_vec_id(filename); + let (_, old_append_vec_id) = get_slot_and_append_vec_id(filename)?; let current_len = *self .snapshot_storage_lengths .get(&slot) @@ -439,7 +437,7 @@ fn get_snapshot_file_kind(filename: &str) -> Option { Some(SnapshotFileKind::Version) } else if BANK_FIELDS_FILE_REGEX.is_match(filename) { Some(SnapshotFileKind::BankFields) - } else if STORAGE_FILE_REGEX.is_match(filename) { + } else if get_slot_and_append_vec_id(filename).is_ok() { Some(SnapshotFileKind::Storage) } else { None @@ -447,17 +445,13 @@ fn get_snapshot_file_kind(filename: &str) -> Option { } /// Get the slot and append vec id from the filename -pub(crate) fn get_slot_and_append_vec_id(filename: &str) -> (Slot, usize) { - STORAGE_FILE_REGEX - .captures(filename) - .map(|cap| { - let slot_str = cap.name("slot").map(|m| m.as_str()).unwrap(); - let id_str = cap.name("id").map(|m| m.as_str()).unwrap(); - let slot = slot_str.parse().unwrap(); - let id = id_str.parse().unwrap(); - (slot, id) - }) - .unwrap() +pub(crate) fn get_slot_and_append_vec_id(filename: &str) -> Result<(Slot, usize), SnapshotError> { + let mut parts = filename.splitn(2, '.'); + let slot = parts.next().and_then(|s| Slot::from_str(s).ok()); + let id = parts.next().and_then(|s| usize::from_str(s).ok()); + + slot.zip(id) + .ok_or_else(|| SnapshotError::InvalidAppendVecPath(PathBuf::from(filename))) } #[cfg(test)] @@ -510,7 +504,7 @@ mod tests { let expected_slot = 12345; let expected_id = 9987; let (slot, id) = - get_slot_and_append_vec_id(&AppendVec::file_name(expected_slot, expected_id)); + get_slot_and_append_vec_id(&AppendVec::file_name(expected_slot, expected_id)).unwrap(); assert_eq!(expected_slot, slot); assert_eq!(expected_id, id); } From 54575feb420cfd46680812f95eaafbfc2a69623d Mon Sep 17 00:00:00 2001 From: Dmitri Makarov Date: Wed, 20 Mar 2024 20:03:20 -0400 Subject: [PATCH 026/153] SVM: add a missing doc comment (#347) --- svm/src/transaction_processor.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index f28820c41cceed..fbf0a8266f415c 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -135,6 +135,9 @@ pub struct TransactionBatchProcessor { /// Optional config parameters that can override runtime behavior runtime_config: Arc, + /// SysvarCache is a collection of system variables that are + /// accessible from on chain programs. It is passed to SVM from + /// client code (e.g. Bank) and forwarded to the MessageProcessor. pub sysvar_cache: RwLock, /// Programs required for transaction batch processing From f194f70e68422a7c6351149d0eadb8d9797b26d8 Mon Sep 17 00:00:00 2001 From: Joe C Date: Wed, 20 Mar 2024 20:46:38 -0500 Subject: [PATCH 027/153] Runtime: Refactor builtins module (#304) * runtime: builtins: move to new bank submodule * runtime: builtins: change `feature_id` to `enable_feature_id` * runtime: builtins: add stateless builtins --- runtime/src/bank.rs | 11 +-- .../src/{builtins.rs => bank/builtins/mod.rs} | 70 +++++-------------- runtime/src/bank/builtins/prototypes.rs | 48 +++++++++++++ runtime/src/lib.rs | 1 - runtime/src/serde_snapshot.rs | 3 +- runtime/src/snapshot_bank_utils.rs | 3 +- runtime/src/snapshot_minimizer.rs | 5 +- 7 files changed, 80 insertions(+), 61 deletions(-) rename runtime/src/{builtins.rs => bank/builtins/mod.rs} (55%) create mode 100644 runtime/src/bank/builtins/prototypes.rs diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index e2ab858660361f..a4555ed60dbd79 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -42,9 +42,11 @@ use solana_sdk::recent_blockhashes_account; pub use solana_sdk::reward_type::RewardType; use { crate::{ - bank::metrics::*, + bank::{ + builtins::{BuiltinPrototype, BUILTINS}, + metrics::*, + }, bank_forks::BankForks, - builtins::{BuiltinPrototype, BUILTINS}, epoch_rewards_hasher::hash_rewards_into_partitions, epoch_stakes::{EpochStakes, NodeVoteAccounts}, installed_scheduler_pool::{BankWithScheduler, InstalledSchedulerRwLock}, @@ -210,6 +212,7 @@ struct VerifyAccountsHashConfig { mod address_lookup_table; pub mod bank_hash_details; mod builtin_programs; +pub mod builtins; pub mod epoch_accounts_hash_utils; mod fee_distribution; mod metrics; @@ -5994,7 +5997,7 @@ impl Bank { .iter() .chain(additional_builtins.unwrap_or(&[]).iter()) { - if builtin.feature_id.is_none() { + if builtin.enable_feature_id.is_none() { self.add_builtin( builtin.program_id, builtin.name.to_string(), @@ -7340,7 +7343,7 @@ impl Bank { new_feature_activations: &HashSet, ) { for builtin in BUILTINS.iter() { - if let Some(feature_id) = builtin.feature_id { + if let Some(feature_id) = builtin.enable_feature_id { let should_apply_action_for_feature_transition = if only_apply_transitions_for_new_features { new_feature_activations.contains(&feature_id) diff --git a/runtime/src/builtins.rs b/runtime/src/bank/builtins/mod.rs similarity index 55% rename from runtime/src/builtins.rs rename to runtime/src/bank/builtins/mod.rs index 2c7c36fa0ec415..4e8574b7f9144c 100644 --- a/runtime/src/builtins.rs +++ b/runtime/src/bank/builtins/mod.rs @@ -1,110 +1,78 @@ -use { - solana_program_runtime::invoke_context::BuiltinFunctionWithContext, - solana_sdk::{ - bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable, feature_set, pubkey::Pubkey, - }, -}; - -/// Transitions of built-in programs at epoch bondaries when features are activated. -pub struct BuiltinPrototype { - pub feature_id: Option, - pub program_id: Pubkey, - pub name: &'static str, - pub entrypoint: BuiltinFunctionWithContext, -} +pub mod prototypes; -impl std::fmt::Debug for BuiltinPrototype { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - let mut builder = f.debug_struct("BuiltinPrototype"); - builder.field("program_id", &self.program_id); - builder.field("name", &self.name); - builder.field("feature_id", &self.feature_id); - builder.finish() - } -} - -#[cfg(RUSTC_WITH_SPECIALIZATION)] -impl solana_frozen_abi::abi_example::AbiExample for BuiltinPrototype { - fn example() -> Self { - // BuiltinPrototype isn't serializable by definition. - solana_program_runtime::declare_process_instruction!(MockBuiltin, 0, |_invoke_context| { - // Do nothing - Ok(()) - }); - Self { - feature_id: None, - program_id: Pubkey::default(), - name: "", - entrypoint: MockBuiltin::vm, - } - } -} +pub use prototypes::{BuiltinPrototype, StatelessBuiltinPrototype}; +use solana_sdk::{bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable, feature_set}; pub static BUILTINS: &[BuiltinPrototype] = &[ BuiltinPrototype { - feature_id: None, + enable_feature_id: None, program_id: solana_system_program::id(), name: "system_program", entrypoint: solana_system_program::system_processor::Entrypoint::vm, }, BuiltinPrototype { - feature_id: None, + enable_feature_id: None, program_id: solana_vote_program::id(), name: "vote_program", entrypoint: solana_vote_program::vote_processor::Entrypoint::vm, }, BuiltinPrototype { - feature_id: None, + enable_feature_id: None, program_id: solana_stake_program::id(), name: "stake_program", entrypoint: solana_stake_program::stake_instruction::Entrypoint::vm, }, BuiltinPrototype { - feature_id: None, + enable_feature_id: None, program_id: solana_config_program::id(), name: "config_program", entrypoint: solana_config_program::config_processor::Entrypoint::vm, }, BuiltinPrototype { - feature_id: None, + enable_feature_id: None, program_id: bpf_loader_deprecated::id(), name: "solana_bpf_loader_deprecated_program", entrypoint: solana_bpf_loader_program::Entrypoint::vm, }, BuiltinPrototype { - feature_id: None, + enable_feature_id: None, program_id: bpf_loader::id(), name: "solana_bpf_loader_program", entrypoint: solana_bpf_loader_program::Entrypoint::vm, }, BuiltinPrototype { - feature_id: None, + enable_feature_id: None, program_id: bpf_loader_upgradeable::id(), name: "solana_bpf_loader_upgradeable_program", entrypoint: solana_bpf_loader_program::Entrypoint::vm, }, BuiltinPrototype { - feature_id: None, + enable_feature_id: None, program_id: solana_sdk::compute_budget::id(), name: "compute_budget_program", entrypoint: solana_compute_budget_program::Entrypoint::vm, }, BuiltinPrototype { - feature_id: None, + enable_feature_id: None, program_id: solana_sdk::address_lookup_table::program::id(), name: "address_lookup_table_program", entrypoint: solana_address_lookup_table_program::processor::Entrypoint::vm, }, BuiltinPrototype { - feature_id: Some(feature_set::zk_token_sdk_enabled::id()), + enable_feature_id: Some(feature_set::zk_token_sdk_enabled::id()), program_id: solana_zk_token_sdk::zk_token_proof_program::id(), name: "zk_token_proof_program", entrypoint: solana_zk_token_proof_program::Entrypoint::vm, }, BuiltinPrototype { - feature_id: Some(feature_set::enable_program_runtime_v2_and_loader_v4::id()), + enable_feature_id: Some(feature_set::enable_program_runtime_v2_and_loader_v4::id()), program_id: solana_sdk::loader_v4::id(), name: "loader_v4", entrypoint: solana_loader_v4_program::Entrypoint::vm, }, ]; + +pub static STATELESS_BUILTINS: &[StatelessBuiltinPrototype] = &[StatelessBuiltinPrototype { + program_id: solana_sdk::feature::id(), + name: "feature_gate_program", +}]; diff --git a/runtime/src/bank/builtins/prototypes.rs b/runtime/src/bank/builtins/prototypes.rs new file mode 100644 index 00000000000000..5d9ea505152dda --- /dev/null +++ b/runtime/src/bank/builtins/prototypes.rs @@ -0,0 +1,48 @@ +use { + solana_program_runtime::invoke_context::BuiltinFunctionWithContext, solana_sdk::pubkey::Pubkey, +}; + +/// Transitions of built-in programs at epoch boundaries when features are activated. +pub struct BuiltinPrototype { + pub enable_feature_id: Option, + pub program_id: Pubkey, + pub name: &'static str, + pub entrypoint: BuiltinFunctionWithContext, +} + +impl std::fmt::Debug for BuiltinPrototype { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + let mut builder = f.debug_struct("BuiltinPrototype"); + builder.field("program_id", &self.program_id); + builder.field("name", &self.name); + builder.field("enable_feature_id", &self.enable_feature_id); + builder.finish() + } +} + +#[cfg(RUSTC_WITH_SPECIALIZATION)] +impl solana_frozen_abi::abi_example::AbiExample for BuiltinPrototype { + fn example() -> Self { + // BuiltinPrototype isn't serializable by definition. + solana_program_runtime::declare_process_instruction!(MockBuiltin, 0, |_invoke_context| { + // Do nothing + Ok(()) + }); + Self { + enable_feature_id: None, + program_id: Pubkey::default(), + name: "", + entrypoint: MockBuiltin::vm, + } + } +} + +/// Transitions of stateless built-in programs at epoch boundaries when +/// features are activated. +/// These are built-in programs that don't actually exist, but their address +/// is reserved. +#[derive(Debug)] +pub struct StatelessBuiltinPrototype { + pub program_id: Pubkey, + pub name: &'static str, +} diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index fac4169301004d..57936c2c7e6bac 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -9,7 +9,6 @@ pub mod bank; pub mod bank_client; pub mod bank_forks; pub mod bank_utils; -pub mod builtins; pub mod commitment; pub mod compute_budget_details; mod epoch_rewards_hasher; diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index 8e678044e23670..f866f32577f38e 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -1,7 +1,6 @@ use { crate::{ - bank::{Bank, BankFieldsToDeserialize, BankRc}, - builtins::BuiltinPrototype, + bank::{builtins::BuiltinPrototype, Bank, BankFieldsToDeserialize, BankRc}, epoch_stakes::EpochStakes, serde_snapshot::storage::SerializableAccountStorageEntry, snapshot_utils::{ diff --git a/runtime/src/snapshot_bank_utils.rs b/runtime/src/snapshot_bank_utils.rs index cfb767d11fd7ab..a9f613e431feaa 100644 --- a/runtime/src/snapshot_bank_utils.rs +++ b/runtime/src/snapshot_bank_utils.rs @@ -1,7 +1,6 @@ use { crate::{ - bank::{Bank, BankFieldsToDeserialize, BankSlotDelta}, - builtins::BuiltinPrototype, + bank::{builtins::BuiltinPrototype, Bank, BankFieldsToDeserialize, BankSlotDelta}, serde_snapshot::{ bank_from_streams, bank_to_stream, fields_from_streams, BankIncrementalSnapshotPersistence, SerdeStyle, diff --git a/runtime/src/snapshot_minimizer.rs b/runtime/src/snapshot_minimizer.rs index 15fe706dc0e504..ddd47c887ab7bb 100644 --- a/runtime/src/snapshot_minimizer.rs +++ b/runtime/src/snapshot_minimizer.rs @@ -1,7 +1,10 @@ //! Used to create minimal snapshots - separated here to keep accounts_db simpler use { - crate::{bank::Bank, builtins::BUILTINS, static_ids}, + crate::{ + bank::{builtins::BUILTINS, Bank}, + static_ids, + }, dashmap::DashSet, log::info, rayon::{ From 11aa06d24f22f9f464bf46edcf82d684b3c26bf2 Mon Sep 17 00:00:00 2001 From: Wen <113942165+wen-coding@users.noreply.github.com> Date: Wed, 20 Mar 2024 19:38:46 -0700 Subject: [PATCH 028/153] wen-restart: Find heaviest fork (#183) * Pass the final result of LastVotedForkSlots aggregation to next stage and find the heaviest fork we will Gossip to others. * Change comments. * Small fixes to address PR comments. * Move correctness proof to SIMD. * Fix a broken merge. * Use blockstore to check parent slot of any block in FindHeaviestFork * Change error message. * Add special message when first slot in the list doesn't link to root. --- wen-restart/proto/wen_restart.proto | 13 + .../src/last_voted_fork_slots_aggregate.rs | 23 +- wen-restart/src/wen_restart.rs | 511 +++++++++++++++--- 3 files changed, 476 insertions(+), 71 deletions(-) diff --git a/wen-restart/proto/wen_restart.proto b/wen-restart/proto/wen_restart.proto index b25c2f17764bfd..98ea511a84f38c 100644 --- a/wen-restart/proto/wen_restart.proto +++ b/wen-restart/proto/wen_restart.proto @@ -20,10 +20,23 @@ message LastVotedForkSlotsRecord { message LastVotedForkSlotsAggregateRecord { map received = 1; + optional LastVotedForkSlotsAggregateFinal final_result = 2; +} + +message LastVotedForkSlotsAggregateFinal { + map slots_stake_map = 1; + uint64 total_active_stake = 2; +} + +message HeaviestFork { + uint64 slot = 1; + string bankhash = 2; + uint64 total_active_stake = 3; } message WenRestartProgress { State state = 1; optional LastVotedForkSlotsRecord my_last_voted_fork_slots = 2; optional LastVotedForkSlotsAggregateRecord last_voted_fork_slots_aggregate = 3; + optional HeaviestFork my_heaviest_fork = 4; } \ No newline at end of file diff --git a/wen-restart/src/last_voted_fork_slots_aggregate.rs b/wen-restart/src/last_voted_fork_slots_aggregate.rs index 96127c1a9fc2a3..451767ace4a892 100644 --- a/wen-restart/src/last_voted_fork_slots_aggregate.rs +++ b/wen-restart/src/last_voted_fork_slots_aggregate.rs @@ -22,6 +22,12 @@ pub struct LastVotedForkSlotsAggregate { slots_to_repair: HashSet, } +#[derive(Clone, Debug, PartialEq)] +pub struct LastVotedForkSlotsFinalResult { + pub slots_stake_map: HashMap, + pub total_active_stake: u64, +} + impl LastVotedForkSlotsAggregate { pub(crate) fn new( root_slot: Slot, @@ -35,7 +41,7 @@ impl LastVotedForkSlotsAggregate { active_peers.insert(*my_pubkey); let mut slots_stake_map = HashMap::new(); for slot in last_voted_fork_slots { - if slot > &root_slot { + if slot >= &root_slot { slots_stake_map.insert(*slot, sender_stake); } } @@ -137,6 +143,21 @@ impl LastVotedForkSlotsAggregate { pub(crate) fn slots_to_repair_iter(&self) -> impl Iterator { self.slots_to_repair.iter() } + + // TODO(wen): use better epoch stake and add a test later. + fn total_active_stake(&self) -> u64 { + self.active_peers.iter().fold(0, |sum: u64, pubkey| { + sum.saturating_add(Self::validator_stake(&self.epoch_stakes, pubkey)) + }) + } + + pub(crate) fn get_final_result(self) -> LastVotedForkSlotsFinalResult { + let total_active_stake = self.total_active_stake(); + LastVotedForkSlotsFinalResult { + slots_stake_map: self.slots_stake_map, + total_active_stake, + } + } } #[cfg(test)] diff --git a/wen-restart/src/wen_restart.rs b/wen-restart/src/wen_restart.rs index fe28270a4e9dc5..17a909df7243ab 100644 --- a/wen-restart/src/wen_restart.rs +++ b/wen-restart/src/wen_restart.rs @@ -2,10 +2,13 @@ use { crate::{ - last_voted_fork_slots_aggregate::LastVotedForkSlotsAggregate, + last_voted_fork_slots_aggregate::{ + LastVotedForkSlotsAggregate, LastVotedForkSlotsFinalResult, + }, solana::wen_restart_proto::{ - self, LastVotedForkSlotsAggregateRecord, LastVotedForkSlotsRecord, - State as RestartState, WenRestartProgress, + self, HeaviestFork, LastVotedForkSlotsAggregateFinal, + LastVotedForkSlotsAggregateRecord, LastVotedForkSlotsRecord, State as RestartState, + WenRestartProgress, }, }, anyhow::Result, @@ -37,9 +40,15 @@ use { // If >42% of the validators have this block, repair this block locally. const REPAIR_THRESHOLD: f64 = 0.42; +// When counting Heaviest Fork, only count those with no less than +// 67% - 5% - (100% - active_stake) = active_stake - 38% stake. +const HEAVIEST_FORK_THRESHOLD_DELTA: f64 = 0.38; #[derive(Debug, PartialEq)] pub enum WenRestartError { + BlockNotFound(Slot), + BlockNotLinkedToExpectedParent(Slot, Option, Slot), + ChildStakeLargerThanParent(Slot, u64, Slot, u64), Exiting, InvalidLastVoteType(VoteTransaction), MalformedLastVotedForkSlotsProtobuf(Option), @@ -50,6 +59,28 @@ pub enum WenRestartError { impl std::fmt::Display for WenRestartError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { + WenRestartError::BlockNotFound(slot) => { + write!(f, "Block not found: {}", slot) + } + WenRestartError::BlockNotLinkedToExpectedParent(slot, parent, expected_parent) => { + write!( + f, + "Block {} is not linked to expected parent {} but to {:?}", + slot, expected_parent, parent + ) + } + WenRestartError::ChildStakeLargerThanParent( + slot, + child_stake, + parent, + parent_stake, + ) => { + write!( + f, + "Block {} has more stake {} than its parent {} with stake {}", + slot, child_stake, parent, parent_stake + ) + } WenRestartError::Exiting => write!(f, "Exiting"), WenRestartError::InvalidLastVoteType(vote) => { write!(f, "Invalid last vote type: {:?}", vote) @@ -80,6 +111,11 @@ pub(crate) enum WenRestartProgressInternalState { }, LastVotedForkSlots { last_voted_fork_slots: Vec, + aggregate_final_result: Option, + }, + FindHeaviestFork { + aggregate_final_result: LastVotedForkSlotsFinalResult, + my_heaviest_fork: Option, }, Done, } @@ -108,7 +144,7 @@ pub(crate) fn aggregate_restart_last_voted_fork_slots( wen_restart_repair_slots: Arc>>, exit: Arc, progress: &mut WenRestartProgress, -) -> Result<()> { +) -> Result { let root_bank; { root_bank = bank_forks.read().unwrap().root_bank().clone(); @@ -132,6 +168,7 @@ pub(crate) fn aggregate_restart_last_voted_fork_slots( } else { progress.last_voted_fork_slots_aggregate = Some(LastVotedForkSlotsAggregateRecord { received: HashMap::new(), + final_result: None, }); } let mut cursor = solana_gossip::crds::Cursor::default(); @@ -198,7 +235,64 @@ pub(crate) fn aggregate_restart_last_voted_fork_slots( sleep(Duration::from_millis(time_left)); } } - Ok(()) + Ok(last_voted_fork_slots_aggregate.get_final_result()) +} + +// Verify that all blocks with at least (active_stake_percnet - 38%) of the stake form a +// single chain from the root, and use the highest slot in the blocks as the heaviest fork. +// Please see SIMD 46 "gossip current heaviest fork" for correctness proof. +pub(crate) fn find_heaviest_fork( + aggregate_final_result: LastVotedForkSlotsFinalResult, + bank_forks: Arc>, + blockstore: Arc, + exit: Arc, +) -> Result<(Slot, Hash)> { + // Because everything else is stopped, it's okay to grab a big lock on bank_forks. + let my_bank_forks = bank_forks.read().unwrap(); + let root_bank = my_bank_forks.root_bank().clone(); + let root_slot = root_bank.slot(); + // TODO: Should use better epoch_stakes later. + let epoch_stake = root_bank.epoch_stakes(root_bank.epoch()).unwrap(); + let total_stake = epoch_stake.total_stake(); + let stake_threshold = aggregate_final_result + .total_active_stake + .saturating_sub((HEAVIEST_FORK_THRESHOLD_DELTA * total_stake as f64) as u64); + let mut slots = aggregate_final_result + .slots_stake_map + .iter() + .filter(|(slot, stake)| **slot > root_slot && **stake > stake_threshold) + .map(|(slot, _)| *slot) + .collect::>(); + slots.sort(); + let mut expected_parent = root_slot; + for slot in slots { + if exit.load(Ordering::Relaxed) { + return Err(WenRestartError::Exiting.into()); + } + if let Ok(Some(block_meta)) = blockstore.meta(slot) { + if block_meta.parent_slot != Some(expected_parent) { + if expected_parent == root_slot { + error!("First block {} in repair list not linked to local root {}, this could mean our root is too old", + slot, root_slot); + } else { + error!( + "Block {} in blockstore is not linked to expected parent from Wen Restart {} but to Block {:?}", + slot, expected_parent, block_meta.parent_slot + ); + } + return Err(WenRestartError::BlockNotLinkedToExpectedParent( + slot, + block_meta.parent_slot, + expected_parent, + ) + .into()); + } + expected_parent = slot; + } else { + return Err(WenRestartError::BlockNotFound(slot).into()); + } + } + Ok((expected_parent, Hash::default())) } pub fn wait_for_wen_restart( @@ -214,30 +308,74 @@ pub fn wait_for_wen_restart( let (mut state, mut progress) = initialize(wen_restart_path, last_vote.clone(), blockstore.clone())?; loop { - match &state { + state = match state { WenRestartProgressInternalState::Init { last_voted_fork_slots, last_vote_bankhash, } => { progress.my_last_voted_fork_slots = Some(send_restart_last_voted_fork_slots( cluster_info.clone(), + &last_voted_fork_slots, + last_vote_bankhash, + )?); + WenRestartProgressInternalState::Init { last_voted_fork_slots, - *last_vote_bankhash, - )?) + last_vote_bankhash, + } } WenRestartProgressInternalState::LastVotedForkSlots { last_voted_fork_slots, - } => aggregate_restart_last_voted_fork_slots( - wen_restart_path, - wait_for_supermajority_threshold_percent, - cluster_info.clone(), - last_voted_fork_slots, - bank_forks.clone(), - blockstore.clone(), - wen_restart_repair_slots.clone().unwrap(), - exit.clone(), - &mut progress, - )?, + aggregate_final_result, + } => { + let final_result = match aggregate_final_result { + Some(result) => result, + None => aggregate_restart_last_voted_fork_slots( + wen_restart_path, + wait_for_supermajority_threshold_percent, + cluster_info.clone(), + &last_voted_fork_slots, + bank_forks.clone(), + blockstore.clone(), + wen_restart_repair_slots.clone().unwrap(), + exit.clone(), + &mut progress, + )?, + }; + WenRestartProgressInternalState::LastVotedForkSlots { + last_voted_fork_slots, + aggregate_final_result: Some(final_result), + } + } + WenRestartProgressInternalState::FindHeaviestFork { + aggregate_final_result, + my_heaviest_fork, + } => { + let heaviest_fork = match my_heaviest_fork { + Some(heaviest_fork) => heaviest_fork, + None => { + let total_active_stake = aggregate_final_result.total_active_stake; + let (slot, bankhash) = find_heaviest_fork( + aggregate_final_result.clone(), + bank_forks.clone(), + blockstore.clone(), + exit.clone(), + )?; + info!( + "Heaviest fork found: slot: {}, bankhash: {}", + slot, bankhash + ); + HeaviestFork { + slot, + bankhash: bankhash.to_string(), + total_active_stake, + } + } + }; + WenRestartProgressInternalState::FindHeaviestFork { + aggregate_final_result, + my_heaviest_fork: Some(heaviest_fork), + } + } WenRestartProgressInternalState::Done => return Ok(()), }; state = increment_and_write_wen_restart_records(wen_restart_path, state, &mut progress)?; @@ -257,13 +395,42 @@ pub(crate) fn increment_and_write_wen_restart_records( progress.set_state(RestartState::LastVotedForkSlots); WenRestartProgressInternalState::LastVotedForkSlots { last_voted_fork_slots, + aggregate_final_result: None, } } WenRestartProgressInternalState::LastVotedForkSlots { last_voted_fork_slots: _, + aggregate_final_result, } => { - progress.set_state(RestartState::Done); - WenRestartProgressInternalState::Done + if let Some(aggregate_final_result) = aggregate_final_result { + progress.set_state(RestartState::HeaviestFork); + if let Some(aggregate_record) = progress.last_voted_fork_slots_aggregate.as_mut() { + aggregate_record.final_result = Some(LastVotedForkSlotsAggregateFinal { + slots_stake_map: aggregate_final_result.slots_stake_map.clone(), + total_active_stake: aggregate_final_result.total_active_stake, + }); + } + WenRestartProgressInternalState::FindHeaviestFork { + aggregate_final_result, + my_heaviest_fork: None, + } + } else { + return Err( + WenRestartError::UnexpectedState(RestartState::LastVotedForkSlots).into(), + ); + } + } + WenRestartProgressInternalState::FindHeaviestFork { + aggregate_final_result: _, + my_heaviest_fork, + } => { + if let Some(my_heaviest_fork) = my_heaviest_fork { + progress.set_state(RestartState::Done); + progress.my_heaviest_fork = Some(my_heaviest_fork.clone()); + WenRestartProgressInternalState::Done + } else { + return Err(WenRestartError::UnexpectedState(RestartState::HeaviestFork).into()); + } } WenRestartProgressInternalState::Done => { return Err(WenRestartError::UnexpectedState(RestartState::Done).into()) @@ -289,8 +456,7 @@ pub(crate) fn initialize( ); let progress = WenRestartProgress { state: RestartState::Init.into(), - my_last_voted_fork_slots: None, - last_voted_fork_slots_aggregate: None, + ..Default::default() }; write_wen_restart_records(records_path, &progress)?; progress @@ -346,6 +512,17 @@ pub(crate) fn initialize( Ok(( WenRestartProgressInternalState::LastVotedForkSlots { last_voted_fork_slots: record.last_voted_fork_slots.clone(), + aggregate_final_result: progress + .last_voted_fork_slots_aggregate + .as_ref() + .and_then(|r| { + r.final_result.as_ref().map(|result| { + LastVotedForkSlotsFinalResult { + slots_stake_map: result.slots_stake_map.clone(), + total_active_stake: result.total_active_stake, + } + }) + }), }, progress, )) @@ -353,6 +530,24 @@ pub(crate) fn initialize( Err(WenRestartError::MalformedLastVotedForkSlotsProtobuf(None).into()) } } + RestartState::HeaviestFork => Ok(( + WenRestartProgressInternalState::FindHeaviestFork { + aggregate_final_result: progress + .last_voted_fork_slots_aggregate + .as_ref() + .and_then(|r| { + r.final_result + .as_ref() + .map(|result| LastVotedForkSlotsFinalResult { + slots_stake_map: result.slots_stake_map.clone(), + total_active_stake: result.total_active_stake, + }) + }) + .unwrap(), + my_heaviest_fork: progress.my_heaviest_fork.clone(), + }, + progress, + )), _ => Err(WenRestartError::UnexpectedState(progress.state()).into()), } } @@ -380,7 +575,7 @@ pub(crate) fn write_wen_restart_records( #[cfg(test)] mod tests { use { - crate::wen_restart::*, + crate::wen_restart::{tests::wen_restart_proto::LastVotedForkSlotsAggregateFinal, *}, assert_matches::assert_matches, solana_gossip::{ cluster_info::ClusterInfo, @@ -419,7 +614,7 @@ mod tests { fn push_restart_last_voted_fork_slots( cluster_info: Arc, node: &LegacyContactInfo, - expected_slots_to_repair: &[Slot], + last_voted_fork_slots: &[Slot], last_vote_hash: &Hash, node_keypair: &Keypair, wallclock: u64, @@ -427,7 +622,7 @@ mod tests { let slots = RestartLastVotedForkSlots::new( *node.pubkey(), wallclock, - expected_slots_to_repair, + last_voted_fork_slots, *last_vote_hash, SHRED_VERSION, ) @@ -523,8 +718,7 @@ mod tests { let start = timestamp(); let mut progress = WenRestartProgress { state: RestartState::Init.into(), - my_last_voted_fork_slots: None, - last_voted_fork_slots_aggregate: None, + ..Default::default() }; loop { if let Ok(new_progress) = read_wen_restart_records(&wen_restart_proto_path) { @@ -540,6 +734,14 @@ mod tests { } } if timestamp().saturating_sub(start) > WAIT_FOR_THREAD_TIMEOUT { + assert_eq!( + progress.my_last_voted_fork_slots, + expected_progress.my_last_voted_fork_slots + ); + assert_eq!( + progress.last_voted_fork_slots_aggregate, + expected_progress.last_voted_fork_slots_aggregate + ); panic!( "wait_on_expected_progress_with_timeout failed to get expected progress {:?} expected {:?}", &progress, @@ -586,6 +788,7 @@ mod tests { #[test] fn test_wen_restart_normal_flow() { + solana_logger::setup(); let ledger_path = get_tmp_ledger_path_auto_delete!(); let wen_restart_repair_slots = Some(Arc::new(RwLock::new(Vec::new()))); let test_state = wen_restart_test_init(&ledger_path); @@ -616,6 +819,9 @@ mod tests { let mut rng = rand::thread_rng(); let mut expected_messages = HashMap::new(); // Skip the first 2 validators, because 0 is myself, we only need 8 more to reach > 80%. + let mut last_voted_fork_slots_from_others = test_state.last_voted_fork_slots.clone(); + last_voted_fork_slots_from_others.reverse(); + last_voted_fork_slots_from_others.append(&mut expected_slots_to_repair.clone()); for keypairs in test_state.validator_voting_keypairs.iter().skip(2) { let node_pubkey = keypairs.node_keypair.pubkey(); let node = LegacyContactInfo::new_rand(&mut rng, Some(node_pubkey)); @@ -624,7 +830,7 @@ mod tests { push_restart_last_voted_fork_slots( test_state.cluster_info.clone(), &node, - &expected_slots_to_repair, + &last_voted_fork_slots_from_others, &last_vote_hash, &keypairs.node_keypair, now, @@ -632,7 +838,7 @@ mod tests { expected_messages.insert( node_pubkey.to_string(), LastVotedForkSlotsRecord { - last_voted_fork_slots: expected_slots_to_repair.clone(), + last_voted_fork_slots: last_voted_fork_slots_from_others.clone(), last_vote_bankhash: last_vote_hash.to_string(), shred_version: SHRED_VERSION as u32, wallclock: now, @@ -654,6 +860,14 @@ mod tests { .as_ref() .unwrap() .wallclock; + let mut expected_slots_stake_map: HashMap = test_state + .last_voted_fork_slots + .iter() + .map(|slot| (*slot, 900)) + .collect(); + expected_slots_stake_map.extend(expected_slots_to_repair.iter().map(|slot| (*slot, 800))); + let expected_heaviest_fork_slot = last_vote_slot + 2; + let expected_heaviest_fork_bankhash = Hash::default(); assert_eq!( progress, WenRestartProgress { @@ -665,10 +879,19 @@ mod tests { wallclock: progress_start_time, }), last_voted_fork_slots_aggregate: Some(LastVotedForkSlotsAggregateRecord { - received: expected_messages + received: expected_messages, + final_result: Some(LastVotedForkSlotsAggregateFinal { + slots_stake_map: expected_slots_stake_map, + total_active_stake: 900, + }), + }), + my_heaviest_fork: Some(HeaviestFork { + slot: expected_heaviest_fork_slot, + bankhash: expected_heaviest_fork_bankhash.to_string(), + total_active_stake: 900 }), } - ) + ); } fn change_proto_file_readonly(wen_restart_proto_path: &PathBuf, readonly: bool) { @@ -734,8 +957,7 @@ mod tests { assert_eq!(bankhash, last_vote_bankhash); assert_eq!(progress, WenRestartProgress { state: RestartState::Init.into(), - my_last_voted_fork_slots: None, - last_voted_fork_slots_aggregate: None, + ..Default::default() }); } ); @@ -743,8 +965,7 @@ mod tests { &test_state.wen_restart_proto_path, &WenRestartProgress { state: RestartState::LastVotedForkSlots.into(), - my_last_voted_fork_slots: None, - last_voted_fork_slots_aggregate: None, + ..Default::default() }, ); assert_eq!( @@ -762,8 +983,7 @@ mod tests { &test_state.wen_restart_proto_path, &WenRestartProgress { state: RestartState::WaitingForSupermajority.into(), - my_last_voted_fork_slots: None, - last_voted_fork_slots_aggregate: None, + ..Default::default() }, ); assert_eq!( @@ -785,8 +1005,7 @@ mod tests { let test_state = wen_restart_test_init(&ledger_path); let progress = wen_restart_proto::WenRestartProgress { state: RestartState::Init.into(), - my_last_voted_fork_slots: None, - last_voted_fork_slots_aggregate: None, + ..Default::default() }; let original_progress = progress.clone(); assert_eq!( @@ -816,7 +1035,9 @@ mod tests { }), last_voted_fork_slots_aggregate: Some(LastVotedForkSlotsAggregateRecord { received: HashMap::new(), + final_result: None, }), + ..Default::default() }, ); } @@ -827,8 +1048,7 @@ mod tests { let test_state = wen_restart_test_init(&ledger_path); let progress = wen_restart_proto::WenRestartProgress { state: RestartState::Init.into(), - my_last_voted_fork_slots: None, - last_voted_fork_slots_aggregate: None, + ..Default::default() }; assert!(write_wen_restart_records(&test_state.wen_restart_proto_path, &progress).is_ok()); change_proto_file_readonly(&test_state.wen_restart_proto_path, true); @@ -857,13 +1077,15 @@ mod tests { }), last_voted_fork_slots_aggregate: Some(LastVotedForkSlotsAggregateRecord { received: HashMap::new(), + final_result: None, }), + ..Default::default() }, ); } #[test] - fn test_wen_restart_aggregate_last_voted_fork_failures() { + fn test_wen_restart_aggregate_last_voted_fork_stop_and_restart() { solana_logger::setup(); let ledger_path = get_tmp_ledger_path_auto_delete!(); let test_state = wen_restart_test_init(&ledger_path); @@ -881,8 +1103,10 @@ mod tests { wallclock: start_time, }), last_voted_fork_slots_aggregate: Some(LastVotedForkSlotsAggregateRecord { - received: HashMap::new() + received: HashMap::new(), + final_result: None, }), + ..Default::default() } ) .is_ok()); @@ -890,6 +1114,9 @@ mod tests { let mut expected_messages = HashMap::new(); let expected_slots_to_repair: Vec = (last_vote_slot + 1..last_vote_slot + 3).collect(); + let mut last_voted_fork_slots_from_others = test_state.last_voted_fork_slots.clone(); + last_voted_fork_slots_from_others.reverse(); + last_voted_fork_slots_from_others.append(&mut expected_slots_to_repair.clone()); // Skip the first 2 validators, because 0 is myself, we need 8 so it hits 80%. assert_eq!(test_state.validator_voting_keypairs.len(), 10); let progress = WenRestartProgress { @@ -900,7 +1127,7 @@ mod tests { shred_version: SHRED_VERSION as u32, wallclock: start_time, }), - last_voted_fork_slots_aggregate: None, + ..Default::default() }; for keypairs in test_state.validator_voting_keypairs.iter().skip(2) { let wen_restart_proto_path_clone = test_state.wen_restart_proto_path.clone(); @@ -934,7 +1161,7 @@ mod tests { push_restart_last_voted_fork_slots( test_state.cluster_info.clone(), &node, - &expected_slots_to_repair, + &last_voted_fork_slots_from_others, &last_vote_hash, &keypairs.node_keypair, now, @@ -942,13 +1169,12 @@ mod tests { expected_messages.insert( node_pubkey.to_string(), LastVotedForkSlotsRecord { - last_voted_fork_slots: expected_slots_to_repair.clone(), + last_voted_fork_slots: last_voted_fork_slots_from_others.clone(), last_vote_bankhash: last_vote_hash.to_string(), shred_version: SHRED_VERSION as u32, wallclock: now, }, ); - // Wait for the newly pushed message to be in written proto file. wait_on_expected_progress_with_timeout( test_state.wen_restart_proto_path.clone(), WenRestartProgress { @@ -961,7 +1187,9 @@ mod tests { }), last_voted_fork_slots_aggregate: Some(LastVotedForkSlotsAggregateRecord { received: expected_messages.clone(), + final_result: None, }), + ..Default::default() }, ); exit.store(true, Ordering::Relaxed); @@ -980,7 +1208,7 @@ mod tests { test_state, last_vote_bankhash, WenRestartProgress { - state: RestartState::Done.into(), + state: RestartState::LastVotedForkSlots.into(), my_last_voted_fork_slots: Some(LastVotedForkSlotsRecord { last_voted_fork_slots, last_vote_bankhash: last_vote_bankhash.to_string(), @@ -989,7 +1217,9 @@ mod tests { }), last_voted_fork_slots_aggregate: Some(LastVotedForkSlotsAggregateRecord { received: expected_messages, + final_result: None, }), + ..Default::default() }, ); } @@ -1001,56 +1231,197 @@ mod tests { let mut wen_restart_proto_path = my_dir.path().to_path_buf(); wen_restart_proto_path.push("wen_restart_status.proto"); let last_vote_bankhash = Hash::new_unique(); - let mut state = WenRestartProgressInternalState::Init { - last_voted_fork_slots: vec![0, 1], - last_vote_bankhash, - }; let my_last_voted_fork_slots = Some(LastVotedForkSlotsRecord { last_voted_fork_slots: vec![0, 1], last_vote_bankhash: last_vote_bankhash.to_string(), shred_version: 0, wallclock: 0, }); - let mut progress = WenRestartProgress { - state: RestartState::Init.into(), - my_last_voted_fork_slots: my_last_voted_fork_slots.clone(), - last_voted_fork_slots_aggregate: None, - }; - for (expected_state, expected_progress) in [ + let last_voted_fork_slots_aggregate = Some(LastVotedForkSlotsAggregateRecord { + received: HashMap::new(), + final_result: Some(LastVotedForkSlotsAggregateFinal { + slots_stake_map: vec![(0, 900), (1, 800)].into_iter().collect(), + total_active_stake: 900, + }), + }); + let expected_slots_stake_map: HashMap = + vec![(0, 900), (1, 800)].into_iter().collect(); + for (entrance_state, exit_state, entrance_progress, exit_progress) in [ ( + WenRestartProgressInternalState::Init { + last_voted_fork_slots: vec![0, 1], + last_vote_bankhash, + }, WenRestartProgressInternalState::LastVotedForkSlots { last_voted_fork_slots: vec![0, 1], + aggregate_final_result: None, + }, + WenRestartProgress { + state: RestartState::LastVotedForkSlots.into(), + my_last_voted_fork_slots: my_last_voted_fork_slots.clone(), + ..Default::default() + }, + WenRestartProgress { + state: RestartState::LastVotedForkSlots.into(), + my_last_voted_fork_slots: my_last_voted_fork_slots.clone(), + ..Default::default() + }, + ), + ( + WenRestartProgressInternalState::LastVotedForkSlots { + last_voted_fork_slots: vec![0, 1], + aggregate_final_result: Some(LastVotedForkSlotsFinalResult { + slots_stake_map: expected_slots_stake_map.clone(), + total_active_stake: 900, + }), + }, + WenRestartProgressInternalState::FindHeaviestFork { + aggregate_final_result: LastVotedForkSlotsFinalResult { + slots_stake_map: expected_slots_stake_map.clone(), + total_active_stake: 900, + }, + my_heaviest_fork: None, }, WenRestartProgress { state: RestartState::LastVotedForkSlots.into(), my_last_voted_fork_slots: my_last_voted_fork_slots.clone(), - last_voted_fork_slots_aggregate: None, + last_voted_fork_slots_aggregate: last_voted_fork_slots_aggregate.clone(), + ..Default::default() + }, + WenRestartProgress { + state: RestartState::HeaviestFork.into(), + my_last_voted_fork_slots: my_last_voted_fork_slots.clone(), + last_voted_fork_slots_aggregate: last_voted_fork_slots_aggregate.clone(), + ..Default::default() }, ), ( + WenRestartProgressInternalState::FindHeaviestFork { + aggregate_final_result: LastVotedForkSlotsFinalResult { + slots_stake_map: expected_slots_stake_map, + total_active_stake: 900, + }, + my_heaviest_fork: Some(HeaviestFork { + slot: 1, + bankhash: Hash::default().to_string(), + total_active_stake: 900, + }), + }, WenRestartProgressInternalState::Done, + WenRestartProgress { + state: RestartState::HeaviestFork.into(), + my_last_voted_fork_slots: my_last_voted_fork_slots.clone(), + last_voted_fork_slots_aggregate: last_voted_fork_slots_aggregate.clone(), + ..Default::default() + }, WenRestartProgress { state: RestartState::Done.into(), - my_last_voted_fork_slots, - last_voted_fork_slots_aggregate: None, + my_last_voted_fork_slots: my_last_voted_fork_slots.clone(), + last_voted_fork_slots_aggregate: last_voted_fork_slots_aggregate.clone(), + my_heaviest_fork: Some(HeaviestFork { + slot: 1, + bankhash: Hash::default().to_string(), + total_active_stake: 900, + }), }, ), ] { - state = increment_and_write_wen_restart_records( + let mut progress = entrance_progress; + let state = increment_and_write_wen_restart_records( &wen_restart_proto_path, - state, + entrance_state, &mut progress, ) .unwrap(); - assert_eq!(&state, &expected_state); - assert_eq!(&progress, &expected_progress); + assert_eq!(&state, &exit_state); + assert_eq!(&progress, &exit_progress); } + let mut progress = WenRestartProgress { + state: RestartState::Done.into(), + my_last_voted_fork_slots: my_last_voted_fork_slots.clone(), + last_voted_fork_slots_aggregate: last_voted_fork_slots_aggregate.clone(), + ..Default::default() + }; assert_eq!( - increment_and_write_wen_restart_records(&wen_restart_proto_path, state, &mut progress) - .unwrap_err() - .downcast::() - .unwrap(), + increment_and_write_wen_restart_records( + &wen_restart_proto_path, + WenRestartProgressInternalState::Done, + &mut progress + ) + .unwrap_err() + .downcast::() + .unwrap(), WenRestartError::UnexpectedState(RestartState::Done), ); } + + #[test] + fn test_find_heaviest_fork_failures() { + solana_logger::setup(); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let exit = Arc::new(AtomicBool::new(false)); + let test_state = wen_restart_test_init(&ledger_path); + let last_vote_slot = test_state.last_voted_fork_slots[0]; + let slot_with_no_block = last_vote_slot + 5; + // This fails because corresponding block is not found, which is wrong, we should have + // repaired all eligible blocks when we exit LastVotedForkSlots state. + assert_eq!( + find_heaviest_fork( + LastVotedForkSlotsFinalResult { + slots_stake_map: vec![(0, 900), (slot_with_no_block, 800)] + .into_iter() + .collect(), + total_active_stake: 900, + }, + test_state.bank_forks.clone(), + test_state.blockstore.clone(), + exit.clone(), + ) + .unwrap_err() + .downcast::() + .unwrap(), + WenRestartError::BlockNotFound(slot_with_no_block), + ); + // The following fails because we expect to see the first slot in slots_stake_map doesn't chain to local root. + assert_eq!( + find_heaviest_fork( + LastVotedForkSlotsFinalResult { + slots_stake_map: vec![(last_vote_slot, 900)].into_iter().collect(), + total_active_stake: 900, + }, + test_state.bank_forks.clone(), + test_state.blockstore.clone(), + exit.clone(), + ) + .unwrap_err() + .downcast::() + .unwrap(), + WenRestartError::BlockNotLinkedToExpectedParent( + last_vote_slot, + Some(last_vote_slot - 1), + 0 + ), + ); + // The following fails because we expect to see the some slot in slots_stake_map doesn't chain to the + // one before it. + assert_eq!( + find_heaviest_fork( + LastVotedForkSlotsFinalResult { + slots_stake_map: vec![(1, 900), (last_vote_slot, 900)].into_iter().collect(), + total_active_stake: 900, + }, + test_state.bank_forks.clone(), + test_state.blockstore.clone(), + exit.clone(), + ) + .unwrap_err() + .downcast::() + .unwrap(), + WenRestartError::BlockNotLinkedToExpectedParent( + last_vote_slot, + Some(last_vote_slot - 1), + 1 + ), + ); + } } From 981881544c0634705d745f793783442d14fa14fe Mon Sep 17 00:00:00 2001 From: Alessandro Decina Date: Thu, 21 Mar 2024 14:28:23 +1100 Subject: [PATCH 029/153] runtime: do fewer syscalls in remap_append_vec_file (#336) * runtime: do fewer syscalls in remap_append_vec_file Use renameat2(src, dest, NOREPLACE) as an atomic version of if statx(dest).is_err() { rename(src, dest) }. We have high inode contention during storage rebuild and this saves 1 fs syscall for each appendvec. * Address review feedback --- Cargo.lock | 1 + programs/sbf/Cargo.lock | 1 + runtime/Cargo.toml | 1 + runtime/src/serde_snapshot.rs | 89 ++++++++++++++++++++++++----- runtime/src/serde_snapshot/tests.rs | 63 +++++++++++++++++++- 5 files changed, 137 insertions(+), 18 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 06d28868c2bcff..2fb61646b042b1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6997,6 +6997,7 @@ dependencies = [ "index_list", "itertools", "lazy_static", + "libc", "libsecp256k1", "log", "lru", diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 3204776825622b..c211c81696541a 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -5695,6 +5695,7 @@ dependencies = [ "index_list", "itertools", "lazy_static", + "libc", "log", "lru", "lz4", diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index 02553d4215909d..49451aa02eed26 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -28,6 +28,7 @@ im = { workspace = true, features = ["rayon", "serde"] } index_list = { workspace = true } itertools = { workspace = true } lazy_static = { workspace = true } +libc = { workspace = true } log = { workspace = true } lru = { workspace = true } lz4 = { workspace = true } diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index f866f32577f38e..998fa82e2326d1 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -1,3 +1,5 @@ +#[cfg(target_os = "linux")] +use std::ffi::{CStr, CString}; use { crate::{ bank::{builtins::BuiltinPrototype, Bank, BankFieldsToDeserialize, BankRc}, @@ -655,30 +657,55 @@ pub(crate) fn reconstruct_single_storage( ))) } -fn remap_append_vec_file( +// Remap the AppendVec ID to handle any duplicate IDs that may previously existed +// due to full snapshots and incremental snapshots generated from different +// nodes +pub(crate) fn remap_append_vec_file( slot: Slot, old_append_vec_id: SerializedAppendVecId, append_vec_path: &Path, next_append_vec_id: &AtomicAppendVecId, num_collisions: &AtomicUsize, ) -> io::Result<(AppendVecId, PathBuf)> { - // Remap the AppendVec ID to handle any duplicate IDs that may previously existed - // due to full snapshots and incremental snapshots generated from different nodes + #[cfg(target_os = "linux")] + let append_vec_path_cstr = cstring_from_path(append_vec_path)?; + + let mut remapped_append_vec_path = append_vec_path.to_path_buf(); + + // Break out of the loop in the following situations: + // 1. The new ID is the same as the original ID. This means we do not need to + // rename the file, since the ID is the "correct" one already. + // 2. There is not a file already at the new path. This means it is safe to + // rename the file to this new path. let (remapped_append_vec_id, remapped_append_vec_path) = loop { let remapped_append_vec_id = next_append_vec_id.fetch_add(1, Ordering::AcqRel); + + // this can only happen in the first iteration of the loop + if old_append_vec_id == remapped_append_vec_id as SerializedAppendVecId { + break (remapped_append_vec_id, remapped_append_vec_path); + } + let remapped_file_name = AccountsFile::file_name(slot, remapped_append_vec_id); - let remapped_append_vec_path = append_vec_path.parent().unwrap().join(remapped_file_name); - - // Break out of the loop in the following situations: - // 1. The new ID is the same as the original ID. This means we do not need to - // rename the file, since the ID is the "correct" one already. - // 2. There is not a file already at the new path. This means it is safe to - // rename the file to this new path. - // **DEVELOPER NOTE:** Keep this check last so that it can short-circuit if - // possible. - if old_append_vec_id == remapped_append_vec_id as SerializedAppendVecId - || std::fs::metadata(&remapped_append_vec_path).is_err() + remapped_append_vec_path = append_vec_path.parent().unwrap().join(remapped_file_name); + + #[cfg(target_os = "linux")] { + let remapped_append_vec_path_cstr = cstring_from_path(&remapped_append_vec_path)?; + + // On linux we use renameat2(NO_REPLACE) instead of IF metadata(path).is_err() THEN + // rename() in order to save a statx() syscall. + match rename_no_replace(&append_vec_path_cstr, &remapped_append_vec_path_cstr) { + // If the file was successfully renamed, break out of the loop + Ok(_) => break (remapped_append_vec_id, remapped_append_vec_path), + // If there's already a file at the new path, continue so we try + // the next ID + Err(e) if e.kind() == io::ErrorKind::AlreadyExists => {} + Err(e) => return Err(e), + } + } + + #[cfg(not(target_os = "linux"))] + if std::fs::metadata(&remapped_append_vec_path).is_err() { break (remapped_append_vec_id, remapped_append_vec_path); } @@ -686,7 +713,10 @@ fn remap_append_vec_file( // and try again. num_collisions.fetch_add(1, Ordering::Relaxed); }; - // Only rename the file if the new ID is actually different from the original. + + // Only rename the file if the new ID is actually different from the original. In the target_os + // = linux case, we have already renamed if necessary. + #[cfg(not(target_os = "linux"))] if old_append_vec_id != remapped_append_vec_id as SerializedAppendVecId { std::fs::rename(append_vec_path, &remapped_append_vec_path)?; } @@ -953,3 +983,32 @@ where ReconstructedAccountsDbInfo { accounts_data_len }, )) } + +// Rename `src` to `dest` only if `dest` doesn't already exist. +#[cfg(target_os = "linux")] +fn rename_no_replace(src: &CStr, dest: &CStr) -> io::Result<()> { + let ret = unsafe { + libc::renameat2( + libc::AT_FDCWD, + src.as_ptr() as *const _, + libc::AT_FDCWD, + dest.as_ptr() as *const _, + libc::RENAME_NOREPLACE, + ) + }; + if ret == -1 { + return Err(io::Error::last_os_error()); + } + + Ok(()) +} + +#[cfg(target_os = "linux")] +fn cstring_from_path(path: &Path) -> io::Result { + // It is better to allocate here than use the stack. Jemalloc is going to give us a chunk of a + // preallocated small arena anyway. Instead if we used the stack since PATH_MAX=4096 it would + // result in LLVM inserting a stack probe, see + // https://docs.rs/compiler_builtins/latest/compiler_builtins/probestack/index.html. + CString::new(path.as_os_str().as_encoded_bytes()) + .map_err(|e| io::Error::new(io::ErrorKind::InvalidInput, e)) +} diff --git a/runtime/src/serde_snapshot/tests.rs b/runtime/src/serde_snapshot/tests.rs index 510069c92662fc..2e5393a3a5bf49 100644 --- a/runtime/src/serde_snapshot/tests.rs +++ b/runtime/src/serde_snapshot/tests.rs @@ -3,8 +3,8 @@ mod serde_snapshot_tests { use { crate::{ serde_snapshot::{ - newer, reconstruct_accountsdb_from_fields, SerdeStyle, SerializableAccountsDb, - SnapshotAccountsDbFields, TypeContext, + newer, reconstruct_accountsdb_from_fields, remap_append_vec_file, SerdeStyle, + SerializableAccountsDb, SnapshotAccountsDbFields, TypeContext, }, snapshot_utils::{get_storages_to_serialize, StorageAndNextAppendVecId}, }, @@ -34,12 +34,17 @@ mod serde_snapshot_tests { rent_collector::RentCollector, }, std::{ + fs::File, io::{BufReader, Cursor, Read, Write}, ops::RangeFull, path::{Path, PathBuf}, - sync::{atomic::Ordering, Arc}, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, }, tempfile::TempDir, + test_case::test_case, }; fn linear_ancestors(end_slot: u64) -> Ancestors { @@ -845,4 +850,56 @@ mod serde_snapshot_tests { ); } } + + // no remap needed + #[test_case(456, 456, 456, 0, |_| {})] + // remap from 456 to 457, no collisions + #[test_case(456, 457, 457, 0, |_| {})] + // attempt to remap from 456 to 457, but there's a collision, so we get 458 + #[test_case(456, 457, 458, 1, |tmp| { + File::create(tmp.join("123.457")).unwrap(); + })] + fn test_remap_append_vec_file( + old_id: usize, + next_id: usize, + expected_remapped_id: usize, + expected_collisions: usize, + become_ungovernable: impl FnOnce(&Path), + ) { + let tmp = tempfile::tempdir().unwrap(); + let old_path = tmp.path().join(format!("123.{old_id}")); + let expected_remapped_path = tmp.path().join(format!("123.{expected_remapped_id}")); + File::create(&old_path).unwrap(); + + become_ungovernable(tmp.path()); + + let next_append_vec_id = AtomicAppendVecId::new(next_id as u32); + let num_collisions = AtomicUsize::new(0); + let (remapped_id, remapped_path) = + remap_append_vec_file(123, old_id, &old_path, &next_append_vec_id, &num_collisions) + .unwrap(); + assert_eq!(remapped_id as usize, expected_remapped_id); + assert_eq!(&remapped_path, &expected_remapped_path); + assert_eq!(num_collisions.load(Ordering::Relaxed), expected_collisions); + } + + #[test] + #[should_panic(expected = "No such file or directory")] + fn test_remap_append_vec_file_error() { + let tmp = tempfile::tempdir().unwrap(); + let original_path = tmp.path().join("123.456"); + + // In remap_append_vec() we want to handle EEXIST (collisions), but we want to return all + // other errors + let next_append_vec_id = AtomicAppendVecId::new(457); + let num_collisions = AtomicUsize::new(0); + remap_append_vec_file( + 123, + 456, + &original_path, + &next_append_vec_id, + &num_collisions, + ) + .unwrap(); + } } From 1e08e90498cfd284f192db4370b612bec3de4687 Mon Sep 17 00:00:00 2001 From: Tao Zhu <82401714+tao-stones@users.noreply.github.com> Date: Wed, 20 Mar 2024 23:33:35 -0500 Subject: [PATCH 030/153] Add functions to collect executed transactions fee in details; (#178) * Add functions to collect executed transactions fee in details; * remove unnecessary derive attributes * change function name from add to accumulate; remove collector_fee_details from PartialEq * add AbiExample * add test * share function to withdraw errored transaction * more tests --- runtime/src/bank.rs | 117 ++++++++++++++++++++++++++++++---- runtime/src/bank/tests.rs | 129 ++++++++++++++++++++++++++++++++++++++ sdk/src/fee.rs | 17 +++++ 3 files changed, 252 insertions(+), 11 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index a4555ed60dbd79..702711231ea139 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -126,7 +126,7 @@ use { self, include_loaded_accounts_data_size_in_fee_calculation, remove_rounding_in_fee_calculation, FeatureSet, }, - fee::FeeStructure, + fee::{FeeDetails, FeeStructure}, fee_calculator::{FeeCalculator, FeeRateGovernor}, genesis_config::{ClusterType, GenesisConfig}, hard_forks::HardForks, @@ -259,6 +259,23 @@ impl AddAssign for SquashTiming { } } +#[derive(AbiExample, Debug, Default, PartialEq)] +pub(crate) struct CollectorFeeDetails { + pub transaction_fee: u64, + pub priority_fee: u64, +} + +impl CollectorFeeDetails { + pub(crate) fn accumulate(&mut self, fee_details: &FeeDetails) { + self.transaction_fee = self + .transaction_fee + .saturating_add(fee_details.transaction_fee()); + self.priority_fee = self + .priority_fee + .saturating_add(fee_details.prioritization_fee()); + } +} + #[derive(Debug)] pub struct BankRc { /// where all the Accounts are stored @@ -554,6 +571,7 @@ impl PartialEq for Bank { epoch_reward_status: _, transaction_processor: _, check_program_modification_slot: _, + collector_fee_details: _, // Ignore new fields explicitly if they do not impact PartialEq. // Adding ".." will remove compile-time checks that if a new field // is added to the struct, this PartialEq is accordingly updated. @@ -816,6 +834,9 @@ pub struct Bank { transaction_processor: TransactionBatchProcessor, check_program_modification_slot: bool, + + /// Collected fee details + collector_fee_details: RwLock, } struct VoteWithStakeDelegations { @@ -1003,6 +1024,7 @@ impl Bank { epoch_reward_status: EpochRewardStatus::default(), transaction_processor: TransactionBatchProcessor::default(), check_program_modification_slot: false, + collector_fee_details: RwLock::new(CollectorFeeDetails::default()), }; bank.transaction_processor = TransactionBatchProcessor::new( @@ -1322,6 +1344,7 @@ impl Bank { epoch_reward_status: parent.epoch_reward_status.clone(), transaction_processor: TransactionBatchProcessor::default(), check_program_modification_slot: false, + collector_fee_details: RwLock::new(CollectorFeeDetails::default()), }; new.transaction_processor = TransactionBatchProcessor::new( @@ -1869,6 +1892,8 @@ impl Bank { epoch_reward_status: fields.epoch_reward_status, transaction_processor: TransactionBatchProcessor::default(), check_program_modification_slot: false, + // collector_fee_details is not serialized to snapshot + collector_fee_details: RwLock::new(CollectorFeeDetails::default()), }; bank.transaction_processor = TransactionBatchProcessor::new( @@ -4886,16 +4911,12 @@ impl Bank { lamports_per_signature, ); - // In case of instruction error, even though no accounts - // were stored we still need to charge the payer the - // fee. - // - //...except nonce accounts, which already have their - // post-load, fee deducted, pre-execute account state - // stored - if execution_status.is_err() && !is_nonce { - self.withdraw(tx.message().fee_payer(), fee)?; - } + self.check_execution_status_and_charge_fee( + tx.message(), + execution_status, + is_nonce, + fee, + )?; fees += fee; Ok(()) @@ -4906,6 +4927,80 @@ impl Bank { results } + // Note: this function is not yet used; next PR will call it behind a feature gate + #[allow(dead_code)] + fn filter_program_errors_and_collect_fee_details( + &self, + txs: &[SanitizedTransaction], + execution_results: &[TransactionExecutionResult], + ) -> Vec> { + let mut accumulated_fee_details = FeeDetails::default(); + + let results = txs + .iter() + .zip(execution_results) + .map(|(tx, execution_result)| { + let (execution_status, durable_nonce_fee) = match &execution_result { + TransactionExecutionResult::Executed { details, .. } => { + Ok((&details.status, details.durable_nonce_fee.as_ref())) + } + TransactionExecutionResult::NotExecuted(err) => Err(err.clone()), + }?; + let is_nonce = durable_nonce_fee.is_some(); + + let message = tx.message(); + let fee_details = self.fee_structure.calculate_fee_details( + message, + &process_compute_budget_instructions(message.program_instructions_iter()) + .unwrap_or_default() + .into(), + self.feature_set + .is_active(&include_loaded_accounts_data_size_in_fee_calculation::id()), + ); + + self.check_execution_status_and_charge_fee( + message, + execution_status, + is_nonce, + fee_details.total_fee( + self.feature_set + .is_active(&remove_rounding_in_fee_calculation::id()), + ), + )?; + + accumulated_fee_details.accumulate(&fee_details); + Ok(()) + }) + .collect(); + + self.collector_fee_details + .write() + .unwrap() + .accumulate(&accumulated_fee_details); + results + } + + fn check_execution_status_and_charge_fee( + &self, + message: &SanitizedMessage, + execution_status: &transaction::Result<()>, + is_nonce: bool, + fee: u64, + ) -> Result<()> { + // In case of instruction error, even though no accounts + // were stored we still need to charge the payer the + // fee. + // + //...except nonce accounts, which already have their + // post-load, fee deducted, pre-execute account state + // stored + if execution_status.is_err() && !is_nonce { + self.withdraw(message.fee_payer(), fee)?; + } + + Ok(()) + } + /// `committed_transactions_count` is the number of transactions out of `sanitized_txs` /// that was executed. Of those, `committed_transactions_count`, /// `committed_with_failure_result_count` is the number of executed transactions that returned diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index f104c8ee2b963d..ea2354ef8e3586 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -13792,3 +13792,132 @@ fn test_failed_simulation_compute_units() { let simulation = bank.simulate_transaction(&sanitized, false); assert_eq!(expected_consumed_units, simulation.units_consumed); } + +#[test] +fn test_filter_program_errors_and_collect_fee_details() { + // TX | EXECUTION RESULT | is nonce | COLLECT | ADDITIONAL | COLLECT + // | | | (TX_FEE, PRIO_FEE) | WITHDRAW FROM PAYER | RESULT + // ------------------------------------------------------------------------------------------------------ + // tx1 | not executed | n/a | (0 , 0) | 0 | Original Err + // tx2 | executed and no error | n/a | (5_000, 1_000) | 0 | Ok + // tx3 | executed has error | true | (5_000, 1_000) | 0 | Ok + // tx4 | executed has error | false | (5_000, 1_000) | 6_000 | Ok + // tx5 | executed error, + // payer insufficient fund | false | (0 , 0) | 0 | InsufficientFundsForFee + // + let initial_payer_balance = 7_000; + let additional_payer_withdraw = 6_000; + let expected_collected_fee_details = CollectorFeeDetails { + transaction_fee: 15_000, + priority_fee: 3_000, + }; + let expected_collect_results = vec![ + Err(TransactionError::AccountNotFound), + Ok(()), + Ok(()), + Ok(()), + Err(TransactionError::InsufficientFundsForFee), + ]; + + let GenesisConfigInfo { + mut genesis_config, + mint_keypair, + .. + } = create_genesis_config_with_leader(initial_payer_balance, &Pubkey::new_unique(), 3); + genesis_config.fee_rate_governor = FeeRateGovernor::new(5000, 0); + let bank = Bank::new_for_tests(&genesis_config); + + let tx = SanitizedTransaction::from_transaction_for_tests(Transaction::new_signed_with_payer( + &[ + system_instruction::transfer(&mint_keypair.pubkey(), &Pubkey::new_unique(), 2), + ComputeBudgetInstruction::set_compute_unit_limit(1_000), + ComputeBudgetInstruction::set_compute_unit_price(1_000_000), + ], + Some(&mint_keypair.pubkey()), + &[&mint_keypair], + genesis_config.hash(), + )); + let txs = vec![tx.clone(), tx.clone(), tx.clone(), tx.clone(), tx]; + + let results = vec![ + TransactionExecutionResult::NotExecuted(TransactionError::AccountNotFound), + new_execution_result(Ok(()), None), + new_execution_result( + Err(TransactionError::InstructionError( + 1, + SystemError::ResultWithNegativeLamports.into(), + )), + Some(&NonceFull::default()), + ), + new_execution_result( + Err(TransactionError::InstructionError( + 1, + SystemError::ResultWithNegativeLamports.into(), + )), + None, + ), + new_execution_result(Err(TransactionError::AccountNotFound), None), + ]; + + let results = bank.filter_program_errors_and_collect_fee_details(&txs, &results); + + assert_eq!( + expected_collected_fee_details, + *bank.collector_fee_details.read().unwrap() + ); + assert_eq!( + initial_payer_balance - additional_payer_withdraw, + bank.get_balance(&mint_keypair.pubkey()) + ); + assert_eq!(expected_collect_results, results); +} + +#[test] +fn test_check_execution_status_and_charge_fee() { + let fee = 5000; + let initial_balance = fee - 1000; + let tx_error = + TransactionError::InstructionError(0, InstructionError::MissingRequiredSignature); + let GenesisConfigInfo { + mut genesis_config, + mint_keypair, + .. + } = create_genesis_config_with_leader(initial_balance, &Pubkey::new_unique(), 3); + genesis_config.fee_rate_governor = FeeRateGovernor::new(5000, 0); + let bank = Bank::new_for_tests(&genesis_config); + let message = new_sanitized_message(Message::new( + &[system_instruction::transfer( + &mint_keypair.pubkey(), + &Pubkey::new_unique(), + 1, + )], + Some(&mint_keypair.pubkey()), + )); + + [Ok(()), Err(tx_error)] + .iter() + .flat_map(|result| [true, false].iter().map(move |is_nonce| (result, is_nonce))) + .for_each(|(result, is_nonce)| { + if result.is_err() && !is_nonce { + assert_eq!( + Err(TransactionError::InsufficientFundsForFee), + bank.check_execution_status_and_charge_fee(&message, result, *is_nonce, fee) + ); + assert_eq!(initial_balance, bank.get_balance(&mint_keypair.pubkey())); + + let small_fee = 1; + assert!(bank + .check_execution_status_and_charge_fee(&message, result, *is_nonce, small_fee) + .is_ok()); + assert_eq!( + initial_balance - small_fee, + bank.get_balance(&mint_keypair.pubkey()) + ); + } else { + assert!(bank + .check_execution_status_and_charge_fee(&message, result, *is_nonce, fee) + .is_ok()); + assert_eq!(initial_balance, bank.get_balance(&mint_keypair.pubkey())); + } + }); +} diff --git a/sdk/src/fee.rs b/sdk/src/fee.rs index 27ba852ca201dd..a493bb383ed602 100644 --- a/sdk/src/fee.rs +++ b/sdk/src/fee.rs @@ -47,6 +47,23 @@ impl FeeDetails { (total_fee as f64).round() as u64 } } + + pub fn accumulate(&mut self, fee_details: &FeeDetails) { + self.transaction_fee = self + .transaction_fee + .saturating_add(fee_details.transaction_fee); + self.prioritization_fee = self + .prioritization_fee + .saturating_add(fee_details.prioritization_fee) + } + + pub fn transaction_fee(&self) -> u64 { + self.transaction_fee + } + + pub fn prioritization_fee(&self) -> u64 { + self.prioritization_fee + } } pub const ACCOUNT_DATA_COST_PAGE_SIZE: u64 = 32_u64.saturating_mul(1024); From b2f4fb306edfe6f253af978fe06dda68b6e86dc2 Mon Sep 17 00:00:00 2001 From: Jon C Date: Thu, 21 Mar 2024 14:35:09 +0100 Subject: [PATCH 031/153] client: Start resending sooner during `send_and_confirm_transactions_in_parallel` (#348) client: Confirm sooner during send_and_confirm_in_parallel --- ...nd_and_confirm_transactions_in_parallel.rs | 108 +++++++++--------- 1 file changed, 57 insertions(+), 51 deletions(-) diff --git a/client/src/send_and_confirm_transactions_in_parallel.rs b/client/src/send_and_confirm_transactions_in_parallel.rs index f97761cba14fde..43196d05a8a519 100644 --- a/client/src/send_and_confirm_transactions_in_parallel.rs +++ b/client/src/send_and_confirm_transactions_in_parallel.rs @@ -5,7 +5,7 @@ use { }, bincode::serialize, dashmap::DashMap, - futures_util::future::{join_all, TryFutureExt}, + futures_util::future::{join_all, FutureExt}, solana_quic_client::{QuicConfig, QuicConnectionManager, QuicPool}, solana_rpc_client::spinner::{self, SendTransactionProgress}, solana_rpc_client_api::{ @@ -188,9 +188,7 @@ async fn send_transaction_with_rpc_fallback( serialized_transaction: Vec, context: &SendingContext, index: usize, - counter: usize, ) -> Result<()> { - tokio::time::sleep(SEND_INTERVAL.saturating_mul(counter as u32)).await; let send_over_rpc = if let Some(tpu_client) = tpu_client { !tpu_client .send_wire_transaction(serialized_transaction.clone()) @@ -261,44 +259,42 @@ async fn sign_all_messages_and_send( .expect("Transaction should be signable"); let serialized_transaction = serialize(&transaction).expect("Transaction should serialize"); let signature = transaction.signatures[0]; - futures.push( + futures.push(async move { + tokio::time::sleep(SEND_INTERVAL.saturating_mul(counter as u32)).await; + // send to confirm the transaction + context.unconfirmed_transaction_map.insert( + signature, + TransactionData { + index: *index, + serialized_transaction: serialized_transaction.clone(), + last_valid_block_height: blockhashdata.last_valid_block_height, + message: message.clone(), + }, + ); + if let Some(progress_bar) = progress_bar { + let progress = progress_from_context_and_block_height( + context, + blockhashdata.last_valid_block_height, + ); + progress.set_message_for_confirmed_transactions( + progress_bar, + &format!( + "Sending {}/{} transactions", + counter + 1, + current_transaction_count, + ), + ); + } send_transaction_with_rpc_fallback( rpc_client, tpu_client, transaction, - serialized_transaction.clone(), + serialized_transaction, context, *index, - counter, ) - .and_then(move |_| async move { - // send to confirm the transaction - context.unconfirmed_transaction_map.insert( - signature, - TransactionData { - index: *index, - serialized_transaction, - last_valid_block_height: blockhashdata.last_valid_block_height, - message: message.clone(), - }, - ); - if let Some(progress_bar) = progress_bar { - let progress = progress_from_context_and_block_height( - context, - blockhashdata.last_valid_block_height, - ); - progress.set_message_for_confirmed_transactions( - progress_bar, - &format!( - "Sending {}/{} transactions", - counter + 1, - current_transaction_count, - ), - ); - } - Ok(()) - }), - ); + .await + }); } // collect to convert Vec> to Result> join_all(futures).await.into_iter().collect::>()?; @@ -477,23 +473,33 @@ pub async fn send_and_confirm_transactions_in_parallel( // clear the map so that we can start resending unconfirmed_transasction_map.clear(); - sign_all_messages_and_send( - &progress_bar, - &rpc_client, - &tpu_client, - messages_with_index, - signers, - &context, - ) - .await?; - - // wait until all the transactions are confirmed or expired - confirm_transactions_till_block_height_and_resend_unexpired_transaction_over_tpu( - &progress_bar, - &tpu_client, - &context, - ) - .await; + let futures = [ + sign_all_messages_and_send( + &progress_bar, + &rpc_client, + &tpu_client, + messages_with_index, + signers, + &context, + ) + .boxed_local(), + async { + // Give the signing and sending a head start before trying to + // confirm and resend + tokio::time::sleep(TPU_RESEND_REFRESH_RATE).await; + confirm_transactions_till_block_height_and_resend_unexpired_transaction_over_tpu( + &progress_bar, + &tpu_client, + &context, + ) + .await; + // Infallible, but required to have the same return type as + // `sign_all_messages_and_send` + Ok(()) + } + .boxed_local(), + ]; + join_all(futures).await.into_iter().collect::>()?; if unconfirmed_transasction_map.is_empty() { break; From 792d7454d9cf8f35a29130d5eb047905b668503c Mon Sep 17 00:00:00 2001 From: Greg Cusack Date: Thu, 21 Mar 2024 09:25:54 -0700 Subject: [PATCH 032/153] switch to `solana-tpu-client` from `solana_client::tpu_client` for `bench-tps`, `dos/`, `LocalCluster`, `gossip/` (#310) * switch over to solana-tpu-client for bench-tps, dos, gossip, local-cluster * put TpuClientWrapper back in solana_client --- Cargo.lock | 1 + bench-tps/Cargo.toml | 1 + bench-tps/src/bench_tps_client/tpu_client.rs | 2 +- bench-tps/src/main.rs | 6 ++---- bench-tps/tests/bench_tps.rs | 17 ++++++++++++++--- client/src/tpu_client.rs | 6 ++---- dos/src/main.rs | 8 ++++++-- gossip/src/gossip_service.rs | 5 ++--- local-cluster/src/cluster.rs | 6 +++++- local-cluster/src/local_cluster.rs | 10 ++++------ 10 files changed, 38 insertions(+), 24 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2fb61646b042b1..6ef0fffe4a4477 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5581,6 +5581,7 @@ dependencies = [ "solana-measure", "solana-metrics", "solana-net-utils", + "solana-quic-client", "solana-rpc", "solana-rpc-client", "solana-rpc-client-api", diff --git a/bench-tps/Cargo.toml b/bench-tps/Cargo.toml index 2c7060175f0a8c..3693f70e4ed9b8 100644 --- a/bench-tps/Cargo.toml +++ b/bench-tps/Cargo.toml @@ -28,6 +28,7 @@ solana-logger = { workspace = true } solana-measure = { workspace = true } solana-metrics = { workspace = true } solana-net-utils = { workspace = true } +solana-quic-client = { workspace = true } solana-rpc = { workspace = true } solana-rpc-client = { workspace = true } solana-rpc-client-api = { workspace = true } diff --git a/bench-tps/src/bench_tps_client/tpu_client.rs b/bench-tps/src/bench_tps_client/tpu_client.rs index 6c053271ad3eec..fbdae6ce02e32b 100644 --- a/bench-tps/src/bench_tps_client/tpu_client.rs +++ b/bench-tps/src/bench_tps_client/tpu_client.rs @@ -1,6 +1,5 @@ use { crate::bench_tps_client::{BenchTpsClient, BenchTpsError, Result}, - solana_client::tpu_client::TpuClient, solana_connection_cache::connection_cache::{ ConnectionManager, ConnectionPool, NewConnectionConfig, }, @@ -10,6 +9,7 @@ use { message::Message, pubkey::Pubkey, signature::Signature, slot_history::Slot, transaction::Transaction, }, + solana_tpu_client::tpu_client::TpuClient, solana_transaction_status::UiConfirmedBlock, }; diff --git a/bench-tps/src/main.rs b/bench-tps/src/main.rs index d3def39ed4d383..a5da5a515703c5 100644 --- a/bench-tps/src/main.rs +++ b/bench-tps/src/main.rs @@ -8,10 +8,7 @@ use { keypairs::get_keypairs, send_batch::{generate_durable_nonce_accounts, generate_keypairs}, }, - solana_client::{ - connection_cache::ConnectionCache, - tpu_client::{TpuClient, TpuClientConfig}, - }, + solana_client::connection_cache::ConnectionCache, solana_genesis::Base64Account, solana_rpc_client::rpc_client::RpcClient, solana_sdk::{ @@ -22,6 +19,7 @@ use { system_program, }, solana_streamer::streamer::StakedNodes, + solana_tpu_client::tpu_client::{TpuClient, TpuClientConfig}, std::{ collections::HashMap, fs::File, diff --git a/bench-tps/tests/bench_tps.rs b/bench-tps/tests/bench_tps.rs index bfff1f7e1250c4..857ae4cd2f7e20 100644 --- a/bench-tps/tests/bench_tps.rs +++ b/bench-tps/tests/bench_tps.rs @@ -7,7 +7,7 @@ use { cli::{Config, InstructionPaddingConfig}, send_batch::generate_durable_nonce_accounts, }, - solana_client::tpu_client::{TpuClient, TpuClientConfig}, + solana_connection_cache::connection_cache::NewConnectionConfig, solana_core::validator::ValidatorConfig, solana_faucet::faucet::run_local_faucet, solana_local_cluster::{ @@ -15,6 +15,7 @@ use { local_cluster::{ClusterConfig, LocalCluster}, validator_configs::make_identical_validator_configs, }, + solana_quic_client::{QuicConfig, QuicConnectionManager}, solana_rpc::rpc::JsonRpcConfig, solana_rpc_client::rpc_client::RpcClient, solana_sdk::{ @@ -26,6 +27,7 @@ use { }, solana_streamer::socket::SocketAddrSpace, solana_test_validator::TestValidatorGenesis, + solana_tpu_client::tpu_client::{TpuClient, TpuClientConfig}, std::{sync::Arc, time::Duration}, }; @@ -124,8 +126,17 @@ fn test_bench_tps_test_validator(config: Config) { CommitmentConfig::processed(), )); let websocket_url = test_validator.rpc_pubsub_url(); - let client = - Arc::new(TpuClient::new(rpc_client, &websocket_url, TpuClientConfig::default()).unwrap()); + + let client = Arc::new( + TpuClient::new( + "tpu_client_quic_bench_tps", + rpc_client, + &websocket_url, + TpuClientConfig::default(), + QuicConnectionManager::new_with_connection_config(QuicConfig::new().unwrap()), + ) + .expect("Should build Quic Tpu Client."), + ); let lamports_per_account = 1000; diff --git a/client/src/tpu_client.rs b/client/src/tpu_client.rs index 555d3aad88bcb1..893761637ce64d 100644 --- a/client/src/tpu_client.rs +++ b/client/src/tpu_client.rs @@ -21,11 +21,9 @@ pub use { solana_tpu_client::tpu_client::{TpuClientConfig, DEFAULT_FANOUT_SLOTS, MAX_FANOUT_SLOTS}, }; -pub type QuicTpuClient = TpuClient; - pub enum TpuClientWrapper { - Quic(TpuClient), - Udp(TpuClient), + Quic(BackendTpuClient), + Udp(BackendTpuClient), } /// Client which sends transactions directly to the current leader's TPU port over UDP. diff --git a/dos/src/main.rs b/dos/src/main.rs index 15874a86973f9c..5b46b7b6496719 100644 --- a/dos/src/main.rs +++ b/dos/src/main.rs @@ -795,6 +795,7 @@ fn main() { DEFAULT_TPU_CONNECTION_POOL_SIZE, ), }; + let client = get_client(&validators, Arc::new(connection_cache)); (gossip_nodes, Some(client)) } else { @@ -818,7 +819,6 @@ fn main() { pub mod test { use { super::*, - solana_client::tpu_client::QuicTpuClient, solana_core::validator::ValidatorConfig, solana_faucet::faucet::run_local_faucet, solana_gossip::contact_info::LegacyContactInfo, @@ -827,8 +827,10 @@ pub mod test { local_cluster::{ClusterConfig, LocalCluster}, validator_configs::make_identical_validator_configs, }, + solana_quic_client::{QuicConfig, QuicConnectionManager, QuicPool}, solana_rpc::rpc::JsonRpcConfig, solana_sdk::timing::timestamp, + solana_tpu_client::tpu_client::TpuClient, }; const TEST_SEND_BATCH_SIZE: usize = 1; @@ -836,7 +838,9 @@ pub mod test { // thin wrapper for the run_dos function // to avoid specifying everywhere generic parameters fn run_dos_no_client(nodes: &[ContactInfo], iterations: usize, params: DosClientParameters) { - run_dos::(nodes, iterations, None, params); + run_dos::>( + nodes, iterations, None, params, + ); } #[test] diff --git a/gossip/src/gossip_service.rs b/gossip/src/gossip_service.rs index 0bc258306edb32..d1c726051e6558 100644 --- a/gossip/src/gossip_service.rs +++ b/gossip/src/gossip_service.rs @@ -5,9 +5,7 @@ use { crossbeam_channel::{unbounded, Sender}, rand::{thread_rng, Rng}, solana_client::{ - connection_cache::ConnectionCache, - rpc_client::RpcClient, - tpu_client::{TpuClient, TpuClientConfig, TpuClientWrapper}, + connection_cache::ConnectionCache, rpc_client::RpcClient, tpu_client::TpuClientWrapper, }, solana_perf::recycler::Recycler, solana_runtime::bank_forks::BankForks, @@ -19,6 +17,7 @@ use { socket::SocketAddrSpace, streamer::{self, StreamerReceiveStats}, }, + solana_tpu_client::tpu_client::{TpuClient, TpuClientConfig}, std::{ collections::HashSet, net::{SocketAddr, TcpListener, UdpSocket}, diff --git a/local-cluster/src/cluster.rs b/local-cluster/src/cluster.rs index 425f65c48e14c5..5fab8df55205df 100644 --- a/local-cluster/src/cluster.rs +++ b/local-cluster/src/cluster.rs @@ -1,13 +1,17 @@ use { - solana_client::{thin_client::ThinClient, tpu_client::QuicTpuClient}, + solana_client::thin_client::ThinClient, solana_core::validator::{Validator, ValidatorConfig}, solana_gossip::{cluster_info::Node, contact_info::ContactInfo}, solana_ledger::shred::Shred, + solana_quic_client::{QuicConfig, QuicConnectionManager, QuicPool}, solana_sdk::{commitment_config::CommitmentConfig, pubkey::Pubkey, signature::Keypair}, solana_streamer::socket::SocketAddrSpace, + solana_tpu_client::tpu_client::TpuClient, std::{io::Result, path::PathBuf, sync::Arc}, }; +pub type QuicTpuClient = TpuClient; + pub struct ValidatorInfo { pub keypair: Arc, pub voting_keypair: Arc, diff --git a/local-cluster/src/local_cluster.rs b/local-cluster/src/local_cluster.rs index 400f4f73f78c26..d06c001bcc7ed1 100644 --- a/local-cluster/src/local_cluster.rs +++ b/local-cluster/src/local_cluster.rs @@ -1,6 +1,6 @@ use { crate::{ - cluster::{Cluster, ClusterValidatorInfo, ValidatorInfo}, + cluster::{Cluster, ClusterValidatorInfo, QuicTpuClient, ValidatorInfo}, cluster_tests, validator_configs::*, }, @@ -8,10 +8,7 @@ use { log::*, solana_accounts_db::utils::create_accounts_run_and_snapshot_dirs, solana_client::{ - connection_cache::ConnectionCache, - rpc_client::RpcClient, - thin_client::ThinClient, - tpu_client::{QuicTpuClient, TpuClient, TpuClientConfig}, + connection_cache::ConnectionCache, rpc_client::RpcClient, thin_client::ThinClient, }, solana_core::{ consensus::tower_storage::FileTowerStorage, @@ -52,7 +49,8 @@ use { solana_stake_program::stake_state, solana_streamer::socket::SocketAddrSpace, solana_tpu_client::tpu_client::{ - DEFAULT_TPU_CONNECTION_POOL_SIZE, DEFAULT_TPU_ENABLE_UDP, DEFAULT_TPU_USE_QUIC, + TpuClient, TpuClientConfig, DEFAULT_TPU_CONNECTION_POOL_SIZE, DEFAULT_TPU_ENABLE_UDP, + DEFAULT_TPU_USE_QUIC, }, solana_vote_program::{ vote_instruction, From dff99d07402663c1fec3cc45bb493ad022ae1ca7 Mon Sep 17 00:00:00 2001 From: Tyera Date: Thu, 21 Mar 2024 11:03:55 -0600 Subject: [PATCH 033/153] Cli stake-split: adjust transfer amount if recipient has lamports (#266) * Remove incorrect check * Move to closure * Use match statement instead * Adjust rent_exempt_reserve by existing balance * Only transfer lamports if rent_exempt_reserve needs are greater than 0 * Rename variable for clarity * Add minimum-delegation check * Bump test split amount to meet arbitrary mock minimum-delegation amount --- cli/src/cli.rs | 2 +- cli/src/stake.rs | 79 ++++++++++++++++++++++++++++-------------------- 2 files changed, 47 insertions(+), 34 deletions(-) diff --git a/cli/src/cli.rs b/cli/src/cli.rs index 99a0de0a719c69..31e9612fc10f3e 100644 --- a/cli/src/cli.rs +++ b/cli/src/cli.rs @@ -2254,7 +2254,7 @@ mod tests { memo: None, split_stake_account: 1, seed: None, - lamports: 30, + lamports: 200_000_000, fee_payer: 0, compute_unit_price: None, rent_exempt_reserve: None, diff --git a/cli/src/stake.rs b/cli/src/stake.rs index ac08fd3425dc65..5d9cbd540e2c29 100644 --- a/cli/src/stake.rs +++ b/cli/src/stake.rs @@ -38,13 +38,14 @@ use { }, solana_rpc_client_nonce_utils::blockhash_query::BlockhashQuery, solana_sdk::{ - account::from_account, + account::{from_account, Account}, account_utils::StateMut, clock::{Clock, UnixTimestamp, SECONDS_PER_DAY}, commitment_config::CommitmentConfig, epoch_schedule::EpochSchedule, feature_set, message::Message, + native_token::Sol, pubkey::Pubkey, stake::{ self, @@ -1980,40 +1981,49 @@ pub fn process_split_stake( }; let rent_exempt_reserve = if !sign_only { - if let Ok(stake_account) = rpc_client.get_account(&split_stake_account_address) { - if stake_account.owner == stake::program::id() { - return Err(CliError::BadParameter(format!( + let stake_minimum_delegation = rpc_client.get_stake_minimum_delegation()?; + if lamports < stake_minimum_delegation { + let lamports = Sol(lamports); + let stake_minimum_delegation = Sol(stake_minimum_delegation); + return Err(CliError::BadParameter(format!( + "need at least {stake_minimum_delegation} for minimum stake delegation, \ + provided: {lamports}" + )) + .into()); + } + + let check_stake_account = |account: Account| -> Result { + match account.owner { + owner if owner == stake::program::id() => Err(CliError::BadParameter(format!( "Stake account {split_stake_account_address} already exists" - )) - .into()); - } else if stake_account.owner == system_program::id() { - if !stake_account.data.is_empty() { - return Err(CliError::BadParameter(format!( - "Account {split_stake_account_address} has data and cannot be used to split stake" - )) - .into()); + ))), + owner if owner == system_program::id() => { + if !account.data.is_empty() { + Err(CliError::BadParameter(format!( + "Account {split_stake_account_address} has data and cannot be used to split stake" + ))) + } else { + // if `stake_account`'s owner is the system_program and its data is + // empty, `stake_account` is allowed to receive the stake split + Ok(account.lamports) + } } - // if `stake_account`'s owner is the system_program and its data is - // empty, `stake_account` is allowed to receive the stake split - } else { - return Err(CliError::BadParameter(format!( + _ => Err(CliError::BadParameter(format!( "Account {split_stake_account_address} already exists and cannot be used to split stake" - )) - .into()); + ))) } - } + }; + let current_balance = + if let Ok(stake_account) = rpc_client.get_account(&split_stake_account_address) { + check_stake_account(stake_account)? + } else { + 0 + }; - let minimum_balance = + let rent_exempt_reserve = rpc_client.get_minimum_balance_for_rent_exemption(StakeStateV2::size_of())?; - if lamports < minimum_balance { - return Err(CliError::BadParameter(format!( - "need at least {minimum_balance} lamports for stake account to be rent exempt, \ - provided lamports: {lamports}" - )) - .into()); - } - minimum_balance + rent_exempt_reserve.saturating_sub(current_balance) } else { rent_exempt_reserve .cloned() @@ -2022,11 +2032,14 @@ pub fn process_split_stake( let recent_blockhash = blockhash_query.get_blockhash(rpc_client, config.commitment)?; - let mut ixs = vec![system_instruction::transfer( - &fee_payer.pubkey(), - &split_stake_account_address, - rent_exempt_reserve, - )]; + let mut ixs = vec![]; + if rent_exempt_reserve > 0 { + ixs.push(system_instruction::transfer( + &fee_payer.pubkey(), + &split_stake_account_address, + rent_exempt_reserve, + )); + } if let Some(seed) = split_stake_account_seed { ixs.append( &mut stake_instruction::split_with_seed( From 4d838d5af9d811cbac07d135858310885330358d Mon Sep 17 00:00:00 2001 From: Lucas Steuernagel <38472950+LucasSte@users.noreply.github.com> Date: Thu, 21 Mar 2024 15:12:32 -0300 Subject: [PATCH 034/153] Add `hello-solana` example source files to SVM folder (#361) --- Cargo.toml | 2 +- svm/src/transaction_processor.rs | 37 ++++++------------ .../example-programs/hello-solana/Cargo.toml | 12 ++++++ .../hello-solana}/hello_solana_program.so | Bin .../example-programs/hello-solana/src/lib.rs | 15 +++++++ svm/tests/integration_test.rs | 4 +- 6 files changed, 43 insertions(+), 27 deletions(-) create mode 100644 svm/tests/example-programs/hello-solana/Cargo.toml rename svm/tests/{ => example-programs/hello-solana}/hello_solana_program.so (100%) create mode 100644 svm/tests/example-programs/hello-solana/src/lib.rs diff --git a/Cargo.toml b/Cargo.toml index c4802c1ba0fd5e..07881e624c83f2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -121,7 +121,7 @@ members = [ "zk-token-sdk", ] -exclude = ["programs/sbf"] +exclude = ["programs/sbf", "svm/tests/example-programs"] # This prevents a Travis CI error when building for Windows. resolver = "2" diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index fbf0a8266f415c..db5a8ac92c2ded 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -1195,19 +1195,26 @@ mod tests { } } - #[test] - fn test_load_program_from_bytes() { + fn load_test_program() -> Vec { let mut dir = env::current_dir().unwrap(); dir.push("tests"); + dir.push("example-programs"); + dir.push("hello-solana"); dir.push("hello_solana_program.so"); let mut file = File::open(dir.clone()).expect("file not found"); let metadata = fs::metadata(dir).expect("Unable to read metadata"); let mut buffer = vec![0; metadata.len() as usize]; file.read_exact(&mut buffer).expect("Buffer overflow"); + buffer + } + + #[test] + fn test_load_program_from_bytes() { + let buffer = load_test_program(); let mut metrics = LoadProgramMetrics::default(); let loader = bpf_loader_upgradeable::id(); - let size = metadata.len() as usize; + let size = buffer.len(); let slot = 2; let environment = ProgramRuntimeEnvironment::new(BuiltinProgram::new_mock()); @@ -1303,13 +1310,7 @@ mod tests { ); assert_eq!(result, Arc::new(loaded_program)); - let mut dir = env::current_dir().unwrap(); - dir.push("tests"); - dir.push("hello_solana_program.so"); - let mut file = File::open(dir.clone()).expect("file not found"); - let metadata = fs::metadata(dir).expect("Unable to read metadata"); - let mut buffer = vec![0; metadata.len() as usize]; - file.read_exact(&mut buffer).expect("buffer overflow"); + let buffer = load_test_program(); account_data.set_data(buffer); mock_bank @@ -1376,13 +1377,7 @@ mod tests { ); assert_eq!(result, Arc::new(loaded_program)); - let mut dir = env::current_dir().unwrap(); - dir.push("tests"); - dir.push("hello_solana_program.so"); - let mut file = File::open(dir.clone()).expect("file not found"); - let metadata = fs::metadata(dir).expect("Unable to read metadata"); - let mut buffer = vec![0; metadata.len() as usize]; - file.read_exact(&mut buffer).expect("buffer overflow"); + let mut buffer = load_test_program(); let mut header = bincode::serialize(&state).unwrap(); let mut complement = vec![ 0; @@ -1461,13 +1456,7 @@ mod tests { vec![0; std::cmp::max(0, LoaderV4State::program_data_offset() - header.len())]; header.append(&mut complement); - let mut dir = env::current_dir().unwrap(); - dir.push("tests"); - dir.push("hello_solana_program.so"); - let mut file = File::open(dir.clone()).expect("file not found"); - let metadata = fs::metadata(dir).expect("Unable to read metadata"); - let mut buffer = vec![0; metadata.len() as usize]; - file.read_exact(&mut buffer).expect("buffer overflow"); + let mut buffer = load_test_program(); header.append(&mut buffer); account_data.set_data(header); diff --git a/svm/tests/example-programs/hello-solana/Cargo.toml b/svm/tests/example-programs/hello-solana/Cargo.toml new file mode 100644 index 00000000000000..09995d8c6d8d2c --- /dev/null +++ b/svm/tests/example-programs/hello-solana/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "hello-solana-program" +version = "2.0.0" +edition = "2021" + +[dependencies] +solana-program = { path = "../../../../sdk/program", version = "=2.0.0" } + +[lib] +crate-type = ["cdylib", "rlib"] + +[workspace] \ No newline at end of file diff --git a/svm/tests/hello_solana_program.so b/svm/tests/example-programs/hello-solana/hello_solana_program.so similarity index 100% rename from svm/tests/hello_solana_program.so rename to svm/tests/example-programs/hello-solana/hello_solana_program.so diff --git a/svm/tests/example-programs/hello-solana/src/lib.rs b/svm/tests/example-programs/hello-solana/src/lib.rs new file mode 100644 index 00000000000000..249179a6617b5d --- /dev/null +++ b/svm/tests/example-programs/hello-solana/src/lib.rs @@ -0,0 +1,15 @@ +use solana_program::{ + account_info::AccountInfo, entrypoint, entrypoint::ProgramResult, msg, pubkey::Pubkey, +}; + +entrypoint!(process_instruction); + +fn process_instruction( + _program_id: &Pubkey, + _accounts: &[AccountInfo], + _instruction_data: &[u8], +) -> ProgramResult { + msg!("Hello, Solana!"); + + Ok(()) +} diff --git a/svm/tests/integration_test.rs b/svm/tests/integration_test.rs index 45409a3b146848..1b8d1c08ccdf9e 100644 --- a/svm/tests/integration_test.rs +++ b/svm/tests/integration_test.rs @@ -188,8 +188,8 @@ fn prepare_transactions( // Loading the program file let mut dir = env::current_dir().unwrap(); dir.push("tests"); - // File compiled from - // https://github.com/solana-developers/program-examples/blob/feb82f254a4633ce2107d06060f2d0558dc987f5/basics/hello-solana/native/program/src/lib.rs + dir.push("example-programs"); + dir.push("hello-solana"); dir.push("hello_solana_program.so"); let mut file = File::open(dir.clone()).expect("file not found"); let metadata = fs::metadata(dir).expect("Unable to read metadata"); From 8b66a670b7998154c3e796f081326004a541219e Mon Sep 17 00:00:00 2001 From: Brooks Date: Thu, 21 Mar 2024 15:09:26 -0400 Subject: [PATCH 035/153] Removes AccountsFile::is_recyclable() (#359) --- accounts-db/src/accounts_file.rs | 6 ------ 1 file changed, 6 deletions(-) diff --git a/accounts-db/src/accounts_file.rs b/accounts-db/src/accounts_file.rs index 97c761616e7ce3..885380cb7814d6 100644 --- a/accounts-db/src/accounts_file.rs +++ b/accounts-db/src/accounts_file.rs @@ -103,12 +103,6 @@ impl AccountsFile { } } - pub fn is_recyclable(&self) -> bool { - match self { - Self::AppendVec(_) => true, - } - } - pub fn file_name(slot: Slot, id: impl std::fmt::Display) -> String { format!("{slot}.{id}") } From 5f1693224ebd48c5c4b40b78486650f50e2cd4a1 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Thu, 21 Mar 2024 13:47:16 -0700 Subject: [PATCH 036/153] Discard packets statically known to fail (#370) * Discard packets statically known to fail * add test --- .../immutable_deserialized_packet.rs | 52 ++++++++++++++++++- core/src/banking_stage/packet_deserializer.rs | 13 ++++- core/src/banking_stage/packet_receiver.rs | 1 + .../scheduler_controller.rs | 2 +- 4 files changed, 64 insertions(+), 4 deletions(-) diff --git a/core/src/banking_stage/immutable_deserialized_packet.rs b/core/src/banking_stage/immutable_deserialized_packet.rs index 26ede7045d3480..8e31f9cd462473 100644 --- a/core/src/banking_stage/immutable_deserialized_packet.rs +++ b/core/src/banking_stage/immutable_deserialized_packet.rs @@ -1,4 +1,5 @@ use { + solana_cost_model::block_cost_limits::BUILT_IN_INSTRUCTION_COSTS, solana_perf::packet::Packet, solana_runtime::compute_budget_details::{ComputeBudgetDetails, GetComputeBudgetDetails}, solana_sdk::{ @@ -6,6 +7,7 @@ use { hash::Hash, message::Message, sanitize::SanitizeError, + saturating_add_assign, short_vec::decode_shortu16_len, signature::Signature, transaction::{ @@ -98,6 +100,22 @@ impl ImmutableDeserializedPacket { self.compute_budget_details.clone() } + /// Returns true if the transaction's compute unit limit is at least as + /// large as the sum of the static builtins' costs. + /// This is a simple sanity check so the leader can discard transactions + /// which are statically known to exceed the compute budget, and will + /// result in no useful state-change. + pub fn compute_unit_limit_above_static_builtins(&self) -> bool { + let mut static_builtin_cost_sum: u64 = 0; + for (program_id, _) in self.transaction.get_message().program_instructions_iter() { + if let Some(ix_cost) = BUILT_IN_INSTRUCTION_COSTS.get(program_id) { + saturating_add_assign!(static_builtin_cost_sum, *ix_cost); + } + } + + self.compute_unit_limit() >= static_builtin_cost_sum + } + // This function deserializes packets into transactions, computes the blake3 hash of transaction // messages, and verifies secp256k1 instructions. pub fn build_sanitized_transaction( @@ -150,7 +168,10 @@ fn packet_message(packet: &Packet) -> Result<&[u8], DeserializedPacketError> { mod tests { use { super::*, - solana_sdk::{signature::Keypair, system_transaction}, + solana_sdk::{ + compute_budget, instruction::Instruction, pubkey::Pubkey, signature::Keypair, + signer::Signer, system_instruction, system_transaction, transaction::Transaction, + }, }; #[test] @@ -166,4 +187,33 @@ mod tests { assert!(deserialized_packet.is_ok()); } + + #[test] + fn compute_unit_limit_above_static_builtins() { + // Cases: + // 1. compute_unit_limit under static builtins + // 2. compute_unit_limit equal to static builtins + // 3. compute_unit_limit above static builtins + for (cu_limit, expectation) in [(250, false), (300, true), (350, true)] { + let keypair = Keypair::new(); + let bpf_program_id = Pubkey::new_unique(); + let ixs = vec![ + system_instruction::transfer(&keypair.pubkey(), &Pubkey::new_unique(), 1), + compute_budget::ComputeBudgetInstruction::set_compute_unit_limit(cu_limit), + Instruction::new_with_bytes(bpf_program_id, &[], vec![]), // non-builtin - not counted in filter + ]; + let tx = Transaction::new_signed_with_payer( + &ixs, + Some(&keypair.pubkey()), + &[&keypair], + Hash::new_unique(), + ); + let packet = Packet::from_data(None, tx).unwrap(); + let deserialized_packet = ImmutableDeserializedPacket::new(packet).unwrap(); + assert_eq!( + deserialized_packet.compute_unit_limit_above_static_builtins(), + expectation + ); + } + } } diff --git a/core/src/banking_stage/packet_deserializer.rs b/core/src/banking_stage/packet_deserializer.rs index a405b626568482..1d1079eaf97fcd 100644 --- a/core/src/banking_stage/packet_deserializer.rs +++ b/core/src/banking_stage/packet_deserializer.rs @@ -50,6 +50,7 @@ impl PacketDeserializer { &self, recv_timeout: Duration, capacity: usize, + packet_filter: impl Fn(&ImmutableDeserializedPacket) -> bool, ) -> Result { let (packet_count, packet_batches) = self.receive_until(recv_timeout, capacity)?; @@ -62,6 +63,7 @@ impl PacketDeserializer { packet_count, &packet_batches, round_compute_unit_price_enabled, + &packet_filter, )) } @@ -71,6 +73,7 @@ impl PacketDeserializer { packet_count: usize, banking_batches: &[BankingPacketBatch], round_compute_unit_price_enabled: bool, + packet_filter: &impl Fn(&ImmutableDeserializedPacket) -> bool, ) -> ReceivePacketResults { let mut passed_sigverify_count: usize = 0; let mut failed_sigverify_count: usize = 0; @@ -88,6 +91,7 @@ impl PacketDeserializer { packet_batch, &packet_indexes, round_compute_unit_price_enabled, + packet_filter, )); } @@ -158,13 +162,16 @@ impl PacketDeserializer { packet_batch: &'a PacketBatch, packet_indexes: &'a [usize], round_compute_unit_price_enabled: bool, + packet_filter: &'a (impl Fn(&ImmutableDeserializedPacket) -> bool + 'a), ) -> impl Iterator + 'a { packet_indexes.iter().filter_map(move |packet_index| { let mut packet_clone = packet_batch[*packet_index].clone(); packet_clone .meta_mut() .set_round_compute_unit_price(round_compute_unit_price_enabled); - ImmutableDeserializedPacket::new(packet_clone).ok() + ImmutableDeserializedPacket::new(packet_clone) + .ok() + .filter(packet_filter) }) } } @@ -186,7 +193,7 @@ mod tests { #[test] fn test_deserialize_and_collect_packets_empty() { - let results = PacketDeserializer::deserialize_and_collect_packets(0, &[], false); + let results = PacketDeserializer::deserialize_and_collect_packets(0, &[], false, &|_| true); assert_eq!(results.deserialized_packets.len(), 0); assert!(results.new_tracer_stats_option.is_none()); assert_eq!(results.passed_sigverify_count, 0); @@ -204,6 +211,7 @@ mod tests { packet_count, &[BankingPacketBatch::new((packet_batches, None))], false, + &|_| true, ); assert_eq!(results.deserialized_packets.len(), 2); assert!(results.new_tracer_stats_option.is_none()); @@ -223,6 +231,7 @@ mod tests { packet_count, &[BankingPacketBatch::new((packet_batches, None))], false, + &|_| true, ); assert_eq!(results.deserialized_packets.len(), 1); assert!(results.new_tracer_stats_option.is_none()); diff --git a/core/src/banking_stage/packet_receiver.rs b/core/src/banking_stage/packet_receiver.rs index a566ef7cf3e4c1..bbb753967f20ce 100644 --- a/core/src/banking_stage/packet_receiver.rs +++ b/core/src/banking_stage/packet_receiver.rs @@ -49,6 +49,7 @@ impl PacketReceiver { .receive_packets( recv_timeout, unprocessed_transaction_storage.max_receive_size(), + |packet| packet.compute_unit_limit_above_static_builtins(), ) // Consumes results if Ok, otherwise we keep the Err .map(|receive_packet_results| { diff --git a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs index 12e8f7bf8bf0bf..0b10f613e64cd6 100644 --- a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs +++ b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs @@ -322,7 +322,7 @@ impl SchedulerController { let (received_packet_results, receive_time_us) = measure_us!(self .packet_receiver - .receive_packets(recv_timeout, remaining_queue_capacity)); + .receive_packets(recv_timeout, remaining_queue_capacity, |_| true)); self.timing_metrics.update(|timing_metrics| { saturating_add_assign!(timing_metrics.receive_time_us, receive_time_us); From e963f87da96fc07f05c8d36bf48be0ec0cd10e2c Mon Sep 17 00:00:00 2001 From: carllin Date: Thu, 21 Mar 2024 17:54:17 -0400 Subject: [PATCH 037/153] Evict oldest vote on vote refresh after restart (#327) --- gossip/src/cluster_info.rs | 164 ++++++++++++++++++++++++++++--------- 1 file changed, 124 insertions(+), 40 deletions(-) diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index 471d768a101051..783f8a067d7614 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -1045,6 +1045,31 @@ impl ClusterInfo { } } + fn find_vote_index_to_evict(&self, should_evict_vote: impl Fn(&Vote) -> bool) -> u8 { + let self_pubkey = self.id(); + let mut num_crds_votes = 0; + let vote_index = { + let gossip_crds = + self.time_gossip_read_lock("gossip_read_push_vote", &self.stats.push_vote_read); + (0..MAX_LOCKOUT_HISTORY as u8) + .filter_map(|ix| { + let vote = CrdsValueLabel::Vote(ix, self_pubkey); + let vote: &CrdsData = gossip_crds.get(&vote)?; + num_crds_votes += 1; + match &vote { + CrdsData::Vote(_, vote) if should_evict_vote(vote) => { + Some((vote.wallclock, ix)) + } + CrdsData::Vote(_, _) => None, + _ => panic!("this should not happen!"), + } + }) + .min() // Boot the oldest evicted vote by wallclock. + .map(|(_ /*wallclock*/, ix)| ix) + }; + vote_index.unwrap_or(num_crds_votes) + } + pub fn push_vote(&self, tower: &[Slot], vote: Transaction) { debug_assert!(tower.iter().tuple_windows().all(|(a, b)| a < b)); // Find a crds vote which is evicted from the tower, and recycle its @@ -1057,8 +1082,7 @@ impl ClusterInfo { // gossip. // TODO: When there are more than one vote evicted from the tower, only // one crds vote is overwritten here. Decide what to do with the rest. - let mut num_crds_votes = 0; - let self_pubkey = self.id(); + // Returns true if the tower does not contain the vote.slot. let should_evict_vote = |vote: &Vote| -> bool { match vote.slot() { @@ -1069,26 +1093,7 @@ impl ClusterInfo { } } }; - let vote_index = { - let gossip_crds = - self.time_gossip_read_lock("gossip_read_push_vote", &self.stats.push_vote_read); - (0..MAX_LOCKOUT_HISTORY as u8) - .filter_map(|ix| { - let vote = CrdsValueLabel::Vote(ix, self_pubkey); - let vote: &CrdsData = gossip_crds.get(&vote)?; - num_crds_votes += 1; - match &vote { - CrdsData::Vote(_, vote) if should_evict_vote(vote) => { - Some((vote.wallclock, ix)) - } - CrdsData::Vote(_, _) => None, - _ => panic!("this should not happen!"), - } - }) - .min() // Boot the oldest evicted vote by wallclock. - .map(|(_ /*wallclock*/, ix)| ix) - }; - let vote_index = vote_index.unwrap_or(num_crds_votes); + let vote_index = self.find_vote_index_to_evict(should_evict_vote); if (vote_index as usize) >= MAX_LOCKOUT_HISTORY { let (_, vote, hash, _) = vote_parser::parse_vote_transaction(&vote).unwrap(); panic!( @@ -1102,7 +1107,7 @@ impl ClusterInfo { self.push_vote_at_index(vote, vote_index); } - pub fn refresh_vote(&self, vote: Transaction, vote_slot: Slot) { + pub fn refresh_vote(&self, refresh_vote: Transaction, refresh_vote_slot: Slot) { let vote_index = { let self_pubkey = self.id(); let gossip_crds = @@ -1116,7 +1121,7 @@ impl ClusterInfo { panic!("this should not happen!"); }; match prev_vote.slot() { - Some(prev_vote_slot) => prev_vote_slot == vote_slot, + Some(prev_vote_slot) => prev_vote_slot == refresh_vote_slot, None => { error!("crds vote with no slots!"); false @@ -1125,13 +1130,27 @@ impl ClusterInfo { }) }; - // If you don't see a vote with the same slot yet, this means you probably - // restarted, and need to wait for your oldest vote to propagate back to you. - // // We don't write to an arbitrary index, because it may replace one of this validator's // existing votes on the network. if let Some(vote_index) = vote_index { - self.push_vote_at_index(vote, vote_index); + self.push_vote_at_index(refresh_vote, vote_index); + } else { + // If you don't see a vote with the same slot yet, this means you probably + // restarted, and need to repush and evict the oldest vote + let should_evict_vote = |vote: &Vote| -> bool { + vote.slot() + .map(|slot| refresh_vote_slot > slot) + .unwrap_or(true) + }; + let vote_index = self.find_vote_index_to_evict(should_evict_vote); + if (vote_index as usize) >= MAX_LOCKOUT_HISTORY { + warn!( + "trying to refresh slot {} but all votes in gossip table are for newer slots", + refresh_vote_slot, + ); + return; + } + self.push_vote_at_index(refresh_vote, vote_index); } } @@ -3673,6 +3692,77 @@ mod tests { .unwrap(); } + #[test] + fn test_refresh_vote_eviction() { + let keypair = Arc::new(Keypair::new()); + let contact_info = ContactInfo::new_localhost(&keypair.pubkey(), 0); + let cluster_info = ClusterInfo::new(contact_info, keypair, SocketAddrSpace::Unspecified); + + // Push MAX_LOCKOUT_HISTORY votes into gossip, one for each slot between + // [lowest_vote_slot, lowest_vote_slot + MAX_LOCKOUT_HISTORY) + let lowest_vote_slot = 1; + let max_vote_slot = lowest_vote_slot + MAX_LOCKOUT_HISTORY as Slot; + let mut first_vote = None; + let mut prev_votes = vec![]; + for slot in 1..max_vote_slot { + prev_votes.push(slot); + let unrefresh_vote = Vote::new(vec![slot], Hash::new_unique()); + let vote_ix = vote_instruction::vote( + &Pubkey::new_unique(), // vote_pubkey + &Pubkey::new_unique(), // authorized_voter_pubkey + unrefresh_vote, + ); + let vote_tx = Transaction::new_with_payer( + &[vote_ix], // instructions + None, // payer + ); + if first_vote.is_none() { + first_vote = Some(vote_tx.clone()); + } + cluster_info.push_vote(&prev_votes, vote_tx); + } + + let initial_votes = cluster_info.get_votes(&mut Cursor::default()); + assert_eq!(initial_votes.len(), MAX_LOCKOUT_HISTORY); + + // Trying to refresh a vote less than all votes in gossip should do nothing + let refresh_slot = lowest_vote_slot - 1; + let refresh_vote = Vote::new(vec![refresh_slot], Hash::new_unique()); + let refresh_ix = vote_instruction::vote( + &Pubkey::new_unique(), // vote_pubkey + &Pubkey::new_unique(), // authorized_voter_pubkey + refresh_vote.clone(), + ); + let refresh_tx = Transaction::new_with_payer( + &[refresh_ix], // instructions + None, // payer + ); + cluster_info.refresh_vote(refresh_tx.clone(), refresh_slot); + let current_votes = cluster_info.get_votes(&mut Cursor::default()); + assert_eq!(initial_votes, current_votes); + assert!(!current_votes.contains(&refresh_tx)); + + // Trying to refresh a vote should evict the first slot less than the refreshed vote slot + let refresh_slot = max_vote_slot + 1; + let refresh_vote = Vote::new(vec![refresh_slot], Hash::new_unique()); + let refresh_ix = vote_instruction::vote( + &Pubkey::new_unique(), // vote_pubkey + &Pubkey::new_unique(), // authorized_voter_pubkey + refresh_vote.clone(), + ); + let refresh_tx = Transaction::new_with_payer( + &[refresh_ix], // instructions + None, // payer + ); + cluster_info.refresh_vote(refresh_tx.clone(), refresh_slot); + + // This should evict the latest vote since it's for a slot less than refresh_slot + let votes = cluster_info.get_votes(&mut Cursor::default()); + assert_eq!(votes.len(), MAX_LOCKOUT_HISTORY); + assert!(votes.contains(&refresh_tx)); + assert!(!votes.contains(&first_vote.unwrap())); + } + #[test] fn test_refresh_vote() { let keypair = Arc::new(Keypair::new()); @@ -3697,8 +3787,9 @@ mod tests { let votes = cluster_info.get_votes(&mut cursor); assert_eq!(votes, vec![unrefresh_tx.clone()]); - // Now construct vote for the slot to be refreshed later - let refresh_slot = 7; + // Now construct vote for the slot to be refreshed later. Has to be less than the `unrefresh_slot`, + // otherwise it will evict that slot + let refresh_slot = unrefresh_slot - 1; let refresh_tower = vec![1, 3, unrefresh_slot, refresh_slot]; let refresh_vote = Vote::new(refresh_tower.clone(), Hash::new_unique()); let refresh_ix = vote_instruction::vote( @@ -3712,19 +3803,12 @@ mod tests { ); // Trying to refresh vote when it doesn't yet exist in gossip - // shouldn't add the vote + // should add the vote without eviction if there is room in the gossip table. cluster_info.refresh_vote(refresh_tx.clone(), refresh_slot); - let votes = cluster_info.get_votes(&mut cursor); - assert_eq!(votes, vec![]); - let votes = cluster_info.get_votes(&mut Cursor::default()); - assert_eq!(votes.len(), 1); - assert!(votes.contains(&unrefresh_tx)); - - // Push the new vote for `refresh_slot` - cluster_info.push_vote(&refresh_tower, refresh_tx.clone()); // Should be two votes in gossip - let votes = cluster_info.get_votes(&mut Cursor::default()); + cursor = Cursor::default(); + let votes = cluster_info.get_votes(&mut cursor); assert_eq!(votes.len(), 2); assert!(votes.contains(&unrefresh_tx)); assert!(votes.contains(&refresh_tx)); From c7cdf238f0613766914d3210ac57c6f63d64b529 Mon Sep 17 00:00:00 2001 From: samkim-crypto Date: Fri, 22 Mar 2024 07:52:42 +0900 Subject: [PATCH 038/153] [clap-v3-utils] Remove deprecated functions (#313) * add `deprecated` feature to produce warnings on use of deprecated functions * replace `multiple_occurrences` with arg actions * replace `possible_values` with `PossibleValueParser` * deprecated `value_of` and `values_of` * deprecate `unix_timestamp_from_rfc3339_datetime` * deprecate `cluster_type_of` * deprecate `commitment_of` * deprecate `keypair_of`, `keypairs_of`, `pubkey_of`, and `pubkeys_of` functions * replace deprecated functions from `try_keypair_of`, `try_keypairs_of`, `try_pubkey_of`, and `try_pubkeys_of` * deprecate `pubkeys_sigs_of` * allow deprecated on tests * remove `deprecation` feature from clap-v3-utils * re-export `pubkeys_sigs_of` * add helper `extract_keypair` to dedupe `try_keypair_of` and `try_keypairs_of` * remove unwraps and expects * bump deprecation version --- clap-v3-utils/src/input_parsers/mod.rs | 137 +++++++++++++++++++++- clap-v3-utils/src/input_parsers/signer.rs | 77 +++++++++--- clap-v3-utils/src/keygen/mnemonic.rs | 8 +- clap-v3-utils/src/keypair.rs | 1 + clap-v3-utils/src/offline.rs | 4 +- 5 files changed, 202 insertions(+), 25 deletions(-) diff --git a/clap-v3-utils/src/input_parsers/mod.rs b/clap-v3-utils/src/input_parsers/mod.rs index d96af9516b9e5d..5aa269274253d0 100644 --- a/clap-v3-utils/src/input_parsers/mod.rs +++ b/clap-v3-utils/src/input_parsers/mod.rs @@ -21,12 +21,18 @@ pub mod signer; since = "1.17.0", note = "Please use the functions in `solana_clap_v3_utils::input_parsers::signer` directly instead" )] +#[allow(deprecated)] pub use signer::{ pubkey_of_signer, pubkeys_of_multiple_signers, pubkeys_sigs_of, resolve_signer, signer_of, STDOUT_OUTFILE_TOKEN, }; // Return parsed values from matches at `name` +#[deprecated( + since = "2.0.0", + note = "Please use the functions `ArgMatches::get_many` or `ArgMatches::try_get_many` instead" +)] +#[allow(deprecated)] pub fn values_of(matches: &ArgMatches, name: &str) -> Option> where T: std::str::FromStr, @@ -38,6 +44,11 @@ where } // Return a parsed value from matches at `name` +#[deprecated( + since = "2.0.0", + note = "Please use the functions `ArgMatches::get_one` or `ArgMatches::try_get_one` instead" +)] +#[allow(deprecated)] pub fn value_of(matches: &ArgMatches, name: &str) -> Option where T: std::str::FromStr, @@ -48,6 +59,11 @@ where .and_then(|value| value.parse::().ok()) } +#[deprecated( + since = "2.0.0", + note = "Please use `ArgMatches::get_one::(...)` instead" +)] +#[allow(deprecated)] pub fn unix_timestamp_from_rfc3339_datetime( matches: &ArgMatches, name: &str, @@ -63,14 +79,25 @@ pub fn unix_timestamp_from_rfc3339_datetime( since = "1.17.0", note = "please use `Amount::parse_decimal` and `Amount::sol_to_lamport` instead" )] +#[allow(deprecated)] pub fn lamports_of_sol(matches: &ArgMatches, name: &str) -> Option { value_of(matches, name).map(sol_to_lamports) } +#[deprecated( + since = "2.0.0", + note = "Please use `ArgMatches::get_one::(...)` instead" +)] +#[allow(deprecated)] pub fn cluster_type_of(matches: &ArgMatches, name: &str) -> Option { value_of(matches, name) } +#[deprecated( + since = "2.0.0", + note = "Please use `ArgMatches::get_one::(...)` instead" +)] +#[allow(deprecated)] pub fn commitment_of(matches: &ArgMatches, name: &str) -> Option { matches .value_of(name) @@ -246,6 +273,11 @@ pub fn parse_derived_address_seed(arg: &str) -> Result { } // Return the keypair for an argument with filename `name` or None if not present. +#[deprecated( + since = "2.0.0", + note = "Please use `input_parsers::signer::try_keypair_of` instead" +)] +#[allow(deprecated)] pub fn keypair_of(matches: &ArgMatches, name: &str) -> Option { if let Some(value) = matches.value_of(name) { if value == ASK_KEYWORD { @@ -259,6 +291,11 @@ pub fn keypair_of(matches: &ArgMatches, name: &str) -> Option { } } +#[deprecated( + since = "2.0.0", + note = "Please use `input_parsers::signer::try_keypairs_of` instead" +)] +#[allow(deprecated)] pub fn keypairs_of(matches: &ArgMatches, name: &str) -> Option> { matches.values_of(name).map(|values| { values @@ -276,10 +313,20 @@ pub fn keypairs_of(matches: &ArgMatches, name: &str) -> Option> { // Return a pubkey for an argument that can itself be parsed into a pubkey, // or is a filename that can be read as a keypair +#[deprecated( + since = "2.0.0", + note = "Please use `input_parsers::signer::try_pubkey_of` instead" +)] +#[allow(deprecated)] pub fn pubkey_of(matches: &ArgMatches, name: &str) -> Option { value_of(matches, name).or_else(|| keypair_of(matches, name).map(|keypair| keypair.pubkey())) } +#[deprecated( + since = "2.0.0", + note = "Please use `input_parsers::signer::try_pubkeys_of` instead" +)] +#[allow(deprecated)] pub fn pubkeys_of(matches: &ArgMatches, name: &str) -> Option> { matches.values_of(name).map(|values| { values @@ -294,12 +341,13 @@ pub fn pubkeys_of(matches: &ArgMatches, name: &str) -> Option> { }) } +#[allow(deprecated)] #[cfg(test)] mod tests { use { super::*, - clap::{Arg, Command}, - solana_sdk::{hash::Hash, pubkey::Pubkey}, + clap::{Arg, ArgAction, Command}, + solana_sdk::{commitment_config::CommitmentLevel, hash::Hash, pubkey::Pubkey}, }; fn app<'ab>() -> Command<'ab> { @@ -308,7 +356,7 @@ mod tests { Arg::new("multiple") .long("multiple") .takes_value(true) - .multiple_occurrences(true) + .action(ArgAction::Append) .multiple_values(true), ) .arg(Arg::new("single").takes_value(true).long("single")) @@ -545,4 +593,87 @@ mod tests { } } } + + #[test] + fn test_unix_timestamp_from_rfc3339_datetime() { + let command = Command::new("test").arg( + Arg::new("timestamp") + .long("timestamp") + .takes_value(true) + .value_parser(clap::value_parser!(UnixTimestamp)), + ); + + // success case + let matches = command + .clone() + .try_get_matches_from(vec!["test", "--timestamp", "1234"]) + .unwrap(); + assert_eq!( + *matches.get_one::("timestamp").unwrap(), + 1234, + ); + + // validation fails + let matches_error = command + .clone() + .try_get_matches_from(vec!["test", "--timestamp", "this_is_an_invalid_arg"]) + .unwrap_err(); + assert_eq!(matches_error.kind, clap::error::ErrorKind::ValueValidation); + } + + #[test] + fn test_cluster_type() { + let command = Command::new("test").arg( + Arg::new("cluster") + .long("cluster") + .takes_value(true) + .value_parser(clap::value_parser!(ClusterType)), + ); + + // success case + let matches = command + .clone() + .try_get_matches_from(vec!["test", "--cluster", "testnet"]) + .unwrap(); + assert_eq!( + *matches.get_one::("cluster").unwrap(), + ClusterType::Testnet + ); + + // validation fails + let matches_error = command + .clone() + .try_get_matches_from(vec!["test", "--cluster", "this_is_an_invalid_arg"]) + .unwrap_err(); + assert_eq!(matches_error.kind, clap::error::ErrorKind::ValueValidation); + } + + #[test] + fn test_commitment_config() { + let command = Command::new("test").arg( + Arg::new("commitment") + .long("commitment") + .takes_value(true) + .value_parser(clap::value_parser!(CommitmentConfig)), + ); + + // success case + let matches = command + .clone() + .try_get_matches_from(vec!["test", "--commitment", "finalized"]) + .unwrap(); + assert_eq!( + *matches.get_one::("commitment").unwrap(), + CommitmentConfig { + commitment: CommitmentLevel::Finalized + }, + ); + + // validation fails + let matches_error = command + .clone() + .try_get_matches_from(vec!["test", "--commitment", "this_is_an_invalid_arg"]) + .unwrap_err(); + assert_eq!(matches_error.kind, clap::error::ErrorKind::ValueValidation); + } } diff --git a/clap-v3-utils/src/input_parsers/signer.rs b/clap-v3-utils/src/input_parsers/signer.rs index d71a37b888646a..0580799a6bf675 100644 --- a/clap-v3-utils/src/input_parsers/signer.rs +++ b/clap-v3-utils/src/input_parsers/signer.rs @@ -1,7 +1,7 @@ use { - crate::{ - input_parsers::{keypair_of, keypairs_of, pubkey_of, pubkeys_of}, - keypair::{pubkey_from_path, resolve_signer_from_path, signer_from_path, ASK_KEYWORD}, + crate::keypair::{ + keypair_from_seed_phrase, pubkey_from_path, resolve_signer_from_path, signer_from_path, + ASK_KEYWORD, SKIP_SEED_PHRASE_VALIDATION_ARG, }, clap::{builder::ValueParser, ArgMatches}, solana_remote_wallet::{ @@ -11,7 +11,7 @@ use { solana_sdk::{ derivation_path::{DerivationPath, DerivationPathError}, pubkey::Pubkey, - signature::{Keypair, Signature, Signer}, + signature::{read_keypair_file, Keypair, Signature, Signer}, }, std::{error, rc::Rc, str::FromStr}, thiserror::Error, @@ -236,16 +236,35 @@ pub fn try_keypair_of( matches: &ArgMatches, name: &str, ) -> Result, Box> { - matches.try_contains_id(name)?; - Ok(keypair_of(matches, name)) + if let Some(value) = matches.try_get_one::(name)? { + extract_keypair(matches, name, value) + } else { + Ok(None) + } } pub fn try_keypairs_of( matches: &ArgMatches, name: &str, ) -> Result>, Box> { - matches.try_contains_id(name)?; - Ok(keypairs_of(matches, name)) + Ok(matches.try_get_many::(name)?.map(|values| { + values + .filter_map(|value| extract_keypair(matches, name, value).ok().flatten()) + .collect() + })) +} + +fn extract_keypair( + matches: &ArgMatches, + name: &str, + path: &str, +) -> Result, Box> { + if path == ASK_KEYWORD { + let skip_validation = matches.try_contains_id(SKIP_SEED_PHRASE_VALIDATION_ARG.name)?; + keypair_from_seed_phrase(name, skip_validation, true, None, true).map(Some) + } else { + read_keypair_file(path).map(Some) + } } // Return a `Result` wrapped pubkey for an argument that can itself be parsed into a pubkey, @@ -254,19 +273,31 @@ pub fn try_pubkey_of( matches: &ArgMatches, name: &str, ) -> Result, Box> { - matches.try_contains_id(name)?; - Ok(pubkey_of(matches, name)) + if let Some(pubkey) = matches.try_get_one::(name)? { + Ok(Some(*pubkey)) + } else { + Ok(try_keypair_of(matches, name)?.map(|keypair| keypair.pubkey())) + } } pub fn try_pubkeys_of( matches: &ArgMatches, name: &str, ) -> Result>, Box> { - matches.try_contains_id(name)?; - Ok(pubkeys_of(matches, name)) + if let Some(pubkey_strings) = matches.try_get_many::(name)? { + let mut pubkeys = Vec::with_capacity(pubkey_strings.len()); + for pubkey_string in pubkey_strings { + pubkeys.push(pubkey_string.parse::()?); + } + Ok(Some(pubkeys)) + } else { + Ok(None) + } } // Return pubkey/signature pairs for a string of the form pubkey=signature +#[deprecated(since = "2.0.0", note = "Please use `try_pubkeys_sigs_of` instead")] +#[allow(deprecated)] pub fn pubkeys_sigs_of(matches: &ArgMatches, name: &str) -> Option> { matches.values_of(name).map(|values| { values @@ -286,8 +317,20 @@ pub fn try_pubkeys_sigs_of( matches: &ArgMatches, name: &str, ) -> Result>, Box> { - matches.try_contains_id(name)?; - Ok(pubkeys_sigs_of(matches, name)) + if let Some(pubkey_signer_strings) = matches.try_get_many::(name)? { + let mut pubkey_sig_pairs = Vec::with_capacity(pubkey_signer_strings.len()); + for pubkey_signer_string in pubkey_signer_strings { + let (pubkey_string, sig_string) = pubkey_signer_string + .split_once('=') + .ok_or("failed to parse `pubkey=signature` pair")?; + let pubkey = Pubkey::from_str(pubkey_string)?; + let sig = Signature::from_str(sig_string)?; + pubkey_sig_pairs.push((pubkey, sig)); + } + Ok(Some(pubkey_sig_pairs)) + } else { + Ok(None) + } } // Return a signer from matches at `name` @@ -376,12 +419,14 @@ impl FromStr for PubkeySignature { } } +#[allow(deprecated)] #[cfg(test)] mod tests { use { super::*, + crate::input_parsers::{keypair_of, pubkey_of, pubkeys_of}, assert_matches::assert_matches, - clap::{Arg, Command}, + clap::{Arg, ArgAction, Command}, solana_remote_wallet::locator::Manufacturer, solana_sdk::signature::write_keypair_file, std::fs, @@ -512,7 +557,7 @@ mod tests { Arg::new("multiple") .long("multiple") .takes_value(true) - .multiple_occurrences(true) + .action(ArgAction::Append) .multiple_values(true), ) .arg(Arg::new("single").takes_value(true).long("single")) diff --git a/clap-v3-utils/src/keygen/mnemonic.rs b/clap-v3-utils/src/keygen/mnemonic.rs index 5813e535098504..0bcc20a58bee85 100644 --- a/clap-v3-utils/src/keygen/mnemonic.rs +++ b/clap-v3-utils/src/keygen/mnemonic.rs @@ -1,7 +1,7 @@ use { crate::{keypair::prompt_passphrase, ArgConstant}, bip39::Language, - clap::{Arg, ArgMatches}, + clap::{builder::PossibleValuesParser, Arg, ArgMatches}, std::error, }; @@ -28,7 +28,7 @@ pub const NO_PASSPHRASE_ARG: ArgConstant<'static> = ArgConstant { pub fn word_count_arg<'a>() -> Arg<'a> { Arg::new(WORD_COUNT_ARG.name) .long(WORD_COUNT_ARG.long) - .possible_values(["12", "15", "18", "21", "24"]) + .value_parser(PossibleValuesParser::new(["12", "15", "18", "21", "24"])) .default_value("12") .value_name("NUMBER") .takes_value(true) @@ -38,7 +38,7 @@ pub fn word_count_arg<'a>() -> Arg<'a> { pub fn language_arg<'a>() -> Arg<'a> { Arg::new(LANGUAGE_ARG.name) .long(LANGUAGE_ARG.long) - .possible_values([ + .value_parser(PossibleValuesParser::new([ "english", "chinese-simplified", "chinese-traditional", @@ -47,7 +47,7 @@ pub fn language_arg<'a>() -> Arg<'a> { "korean", "french", "italian", - ]) + ])) .default_value("english") .value_name("LANGUAGE") .takes_value(true) diff --git a/clap-v3-utils/src/keypair.rs b/clap-v3-utils/src/keypair.rs index 7e41b3c82fbbb3..c140f9573ba38d 100644 --- a/clap-v3-utils/src/keypair.rs +++ b/clap-v3-utils/src/keypair.rs @@ -1257,6 +1257,7 @@ mod tests { } #[test] + #[allow(deprecated)] fn signer_from_path_with_file() -> Result<(), Box> { let dir = TempDir::new()?; let dir = dir.path(); diff --git a/clap-v3-utils/src/offline.rs b/clap-v3-utils/src/offline.rs index cfd71e2a6b23e5..57cb4f35defa6f 100644 --- a/clap-v3-utils/src/offline.rs +++ b/clap-v3-utils/src/offline.rs @@ -1,6 +1,6 @@ use { crate::{input_parsers::signer::PubkeySignature, ArgConstant}, - clap::{value_parser, Arg, Command}, + clap::{value_parser, Arg, ArgAction, Command}, solana_sdk::hash::Hash, }; @@ -52,7 +52,7 @@ fn signer_arg<'a>() -> Arg<'a> { .value_name("PUBKEY=SIGNATURE") .value_parser(value_parser!(PubkeySignature)) .requires(BLOCKHASH_ARG.name) - .multiple_occurrences(true) + .action(ArgAction::Append) .multiple_values(false) .help(SIGNER_ARG.help) } From cbd0369da1dd9ae1f199264afb7505b21a01370e Mon Sep 17 00:00:00 2001 From: Brooks Date: Thu, 21 Mar 2024 21:27:03 -0400 Subject: [PATCH 039/153] Uses AppendVecId in AccountsFIle::file_name() (#372) --- accounts-db/src/accounts_file.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/accounts-db/src/accounts_file.rs b/accounts-db/src/accounts_file.rs index 885380cb7814d6..4f373333ae7450 100644 --- a/accounts-db/src/accounts_file.rs +++ b/accounts-db/src/accounts_file.rs @@ -3,6 +3,7 @@ use { account_storage::meta::{ StorableAccountsWithHashesAndWriteVersions, StoredAccountInfo, StoredAccountMeta, }, + accounts_db::AppendVecId, accounts_hash::AccountHash, append_vec::{AppendVec, AppendVecError}, storable_accounts::StorableAccounts, @@ -103,7 +104,7 @@ impl AccountsFile { } } - pub fn file_name(slot: Slot, id: impl std::fmt::Display) -> String { + pub fn file_name(slot: Slot, id: AppendVecId) -> String { format!("{slot}.{id}") } From 0906b8996c55d7022f694d883486357f3e913067 Mon Sep 17 00:00:00 2001 From: blake <572337+bartenbach@users.noreply.github.com> Date: Fri, 22 Mar 2024 00:11:36 -0500 Subject: [PATCH 040/153] Health check slot distance (#335) Changed validator health check slot distance to 128 to be consistent --- CHANGELOG.md | 1 + validator/src/cli.rs | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c2898a3aab0d3c..09d411471323d7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,7 @@ Release channels have their own copy of this changelog: * Changes * `central-scheduler` as default option for `--block-production-method` (#34891) * `solana-rpc-client-api`: `RpcFilterError` depends on `base64` version 0.22, so users may need to upgrade to `base64` version 0.22 + * Changed default value for `--health-check-slot-distance` from 150 to 128 ## [1.18.0] * Changes diff --git a/validator/src/cli.rs b/validator/src/cli.rs index 8cae6667f87a34..9d041877e3054a 100644 --- a/validator/src/cli.rs +++ b/validator/src/cli.rs @@ -28,7 +28,7 @@ use { solana_net_utils::{MINIMUM_VALIDATOR_PORT_RANGE_WIDTH, VALIDATOR_PORT_RANGE}, solana_rayon_threadlimit::get_thread_count, solana_rpc::{rpc::MAX_REQUEST_BODY_SIZE, rpc_pubsub_service::PubSubConfig}, - solana_rpc_client_api::request::MAX_MULTIPLE_ACCOUNTS, + solana_rpc_client_api::request::{DELINQUENT_VALIDATOR_SLOT_DISTANCE, MAX_MULTIPLE_ACCOUNTS}, solana_runtime::{ snapshot_bank_utils::{ DEFAULT_FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, @@ -2216,7 +2216,7 @@ impl DefaultArgs { maximum_local_snapshot_age: "2500".to_string(), genesis_archive_unpacked_size: MAX_GENESIS_ARCHIVE_UNPACKED_SIZE.to_string(), rpc_max_multiple_accounts: MAX_MULTIPLE_ACCOUNTS.to_string(), - health_check_slot_distance: "150".to_string(), + health_check_slot_distance: DELINQUENT_VALIDATOR_SLOT_DISTANCE.to_string(), tower_storage: "file".to_string(), etcd_domain_name: "localhost".to_string(), rpc_pubsub_max_active_subscriptions: PubSubConfig::default() From 8f830c418c75718fa78f76ea0a7a4fe0eaef2cb8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Fri, 22 Mar 2024 07:58:47 +0100 Subject: [PATCH 041/153] Rekey - alt_bn128 and poseidon_syscall (#319) Adds simplify_alt_bn128_syscall_error_codes. --- sdk/src/feature_set.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index 55ce4c1253940a..bbd68729fad10e 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -553,6 +553,11 @@ pub mod enable_bpf_loader_set_authority_checked_ix { pub mod enable_alt_bn128_syscall { solana_sdk::declare_id!("A16q37opZdQMCbe5qJ6xpBB9usykfv8jZaMkxvZQi4GJ"); } + +pub mod simplify_alt_bn128_syscall_error_codes { + solana_sdk::declare_id!("JDn5q3GBeqzvUa7z67BbmVHVdE3EbUAjvFep3weR3jxX"); +} + pub mod enable_alt_bn128_compression_syscall { solana_sdk::declare_id!("EJJewYSddEEtSZHiqugnvhQHiWyZKjkFDQASd7oKSagn"); } @@ -912,6 +917,7 @@ lazy_static! { (check_syscall_outputs_do_not_overlap::id(), "check syscall outputs do_not overlap #28600"), (enable_bpf_loader_set_authority_checked_ix::id(), "enable bpf upgradeable loader SetAuthorityChecked instruction #28424"), (enable_alt_bn128_syscall::id(), "add alt_bn128 syscalls #27961"), + (simplify_alt_bn128_syscall_error_codes::id(), "simplify alt_bn128 syscall error codes SIMD-0129"), (enable_program_redeployment_cooldown::id(), "enable program redeployment cooldown #29135"), (commission_updates_only_allowed_in_first_half_of_epoch::id(), "validator commission updates are only allowed in the first half of an epoch #29362"), (enable_turbine_fanout_experiments::id(), "enable turbine fanout experiments #29393"), From 36c66f5111044187bbfcb952e642b08c151db278 Mon Sep 17 00:00:00 2001 From: Jon C Date: Fri, 22 Mar 2024 12:10:00 +0100 Subject: [PATCH 042/153] client: Timeout resends during `send_and_confirm_in_parallel` (#358) * client: Timeout resends during `send_and_confirm_in_parallel` * Clarify constant --- ...nd_and_confirm_transactions_in_parallel.rs | 54 +++++++++++-------- 1 file changed, 32 insertions(+), 22 deletions(-) diff --git a/client/src/send_and_confirm_transactions_in_parallel.rs b/client/src/send_and_confirm_transactions_in_parallel.rs index 43196d05a8a519..976539c4a48b5d 100644 --- a/client/src/send_and_confirm_transactions_in_parallel.rs +++ b/client/src/send_and_confirm_transactions_in_parallel.rs @@ -31,7 +31,7 @@ use { tokio::{sync::RwLock, task::JoinHandle, time::Instant}, }; -const BLOCKHASH_REFRESH_RATE: Duration = Duration::from_secs(10); +const BLOCKHASH_REFRESH_RATE: Duration = Duration::from_secs(5); const TPU_RESEND_REFRESH_RATE: Duration = Duration::from_secs(2); const SEND_INTERVAL: Duration = Duration::from_millis(10); type QuicTpuClient = TpuClient; @@ -326,21 +326,20 @@ async fn confirm_transactions_till_block_height_and_resend_unexpired_transaction ); } + if let Some(progress_bar) = progress_bar { + let progress = progress_from_context_and_block_height(context, max_valid_block_height); + progress.set_message_for_confirmed_transactions( + progress_bar, + "Checking transaction status...", + ); + } + // wait till all transactions are confirmed or we have surpassed max processing age for the last sent transaction while !unconfirmed_transaction_map.is_empty() && current_block_height.load(Ordering::Relaxed) <= max_valid_block_height { let block_height = current_block_height.load(Ordering::Relaxed); - if let Some(progress_bar) = progress_bar { - let progress = - progress_from_context_and_block_height(context, max_valid_block_height); - progress.set_message_for_confirmed_transactions( - progress_bar, - "Checking transaction status...", - ); - } - if let Some(tpu_client) = tpu_client { let instant = Instant::now(); // retry sending transaction only over TPU port @@ -349,10 +348,29 @@ async fn confirm_transactions_till_block_height_and_resend_unexpired_transaction .iter() .filter(|x| block_height < x.last_valid_block_height) .map(|x| x.serialized_transaction.clone()) - .collect(); - let _ = tpu_client - .try_send_wire_transaction_batch(txs_to_resend_over_tpu) - .await; + .collect::>(); + let num_txs_to_resend = txs_to_resend_over_tpu.len(); + // This is a "reasonable" constant for how long it should + // take to fan the transactions out, taken from + // `solana_tpu_client::nonblocking::tpu_client::send_wire_transaction_futures` + const SEND_TIMEOUT_INTERVAL: Duration = Duration::from_secs(5); + let message = if tokio::time::timeout( + SEND_TIMEOUT_INTERVAL, + tpu_client.try_send_wire_transaction_batch(txs_to_resend_over_tpu), + ) + .await + .is_err() + { + format!("Timed out resending {num_txs_to_resend} transactions...") + } else { + format!("Resent {num_txs_to_resend} transactions...") + }; + + if let Some(progress_bar) = progress_bar { + let progress = + progress_from_context_and_block_height(context, max_valid_block_height); + progress.set_message_for_confirmed_transactions(progress_bar, &message); + } let elapsed = instant.elapsed(); if elapsed < TPU_RESEND_REFRESH_RATE { @@ -370,14 +388,6 @@ async fn confirm_transactions_till_block_height_and_resend_unexpired_transaction max_valid_block_height = max_valid_block_height_in_remaining_transaction; } } - - if let Some(progress_bar) = progress_bar { - let progress = progress_from_context_and_block_height(context, max_valid_block_height); - progress.set_message_for_confirmed_transactions( - progress_bar, - "Checking transaction status...", - ); - } } } From 91b1ee3df6a6ec850659735bf516cf8191c51cfc Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Fri, 22 Mar 2024 07:43:28 -0700 Subject: [PATCH 043/153] Fix: deploy program on last slot of epoch during environment change (#101) * Fix: deploy program on last slot of epoch during environment change * solana-runtime: deploy at last epoch slot test * disable deployment of sol_alloc_free * Move tx-batch-constructor to its own function * use new_from_cache --------- Co-authored-by: Alessandro Decina --- ledger-tool/src/program.rs | 10 ++- program-runtime/src/invoke_context.rs | 35 +++++++- program-runtime/src/loaded_programs.rs | 93 +++++++++++++++----- programs/bpf_loader/src/lib.rs | 30 +++++-- programs/bpf_loader/src/syscalls/mod.rs | 18 ++++ programs/loader-v4/src/lib.rs | 14 +-- runtime/src/bank.rs | 19 +++- runtime/src/bank/tests.rs | 112 ++++++++++++++++++++++++ svm/src/transaction_processor.rs | 12 ++- 9 files changed, 296 insertions(+), 47 deletions(-) diff --git a/ledger-tool/src/program.rs b/ledger-tool/src/program.rs index 0b4855ccb7f756..d4ecb0d4694b76 100644 --- a/ledger-tool/src/program.rs +++ b/ledger-tool/src/program.rs @@ -21,11 +21,12 @@ use { }, solana_runtime::bank::Bank, solana_sdk::{ - account::AccountSharedData, + account::{create_account_shared_data_for_test, AccountSharedData}, account_utils::StateMut, bpf_loader_upgradeable::{self, UpgradeableLoaderState}, pubkey::Pubkey, slot_history::Slot, + sysvar, transaction_context::{IndexOfAccount, InstructionAccount}, }, std::{ @@ -510,13 +511,16 @@ pub fn program(ledger_path: &Path, matches: &ArgMatches<'_>) { program_id, // ID of the loaded program. It can modify accounts with the same owner key AccountSharedData::new(0, 0, &loader_id), )); + transaction_accounts.push(( + sysvar::epoch_schedule::id(), + create_account_shared_data_for_test(bank.epoch_schedule()), + )); let interpreted = matches.value_of("mode").unwrap() != "jit"; with_mock_invoke_context!(invoke_context, transaction_context, transaction_accounts); // Adding `DELAY_VISIBILITY_SLOT_OFFSET` to slots to accommodate for delay visibility of the program - let slot = bank.slot() + DELAY_VISIBILITY_SLOT_OFFSET; let mut loaded_programs = - LoadedProgramsForTxBatch::new(slot, bank.get_runtime_environments_for_slot(slot)); + bank.new_program_cache_for_tx_batch_for_slot(bank.slot() + DELAY_VISIBILITY_SLOT_OFFSET); for key in cached_account_keys { loaded_programs.replenish(key, bank.load_program(&key, false, bank.epoch())); debug!("Loaded program {}", key); diff --git a/program-runtime/src/invoke_context.rs b/program-runtime/src/invoke_context.rs index 5b2d417912256f..8259c2ed2bcc7a 100644 --- a/program-runtime/src/invoke_context.rs +++ b/program-runtime/src/invoke_context.rs @@ -2,7 +2,9 @@ use { crate::{ compute_budget::ComputeBudget, ic_msg, - loaded_programs::{LoadedProgram, LoadedProgramType, LoadedProgramsForTxBatch}, + loaded_programs::{ + LoadedProgram, LoadedProgramType, LoadedProgramsForTxBatch, ProgramRuntimeEnvironments, + }, log_collector::LogCollector, stable_log, sysvar_cache::SysvarCache, @@ -17,8 +19,10 @@ use { vm::{Config, ContextObject, EbpfVm}, }, solana_sdk::{ - account::AccountSharedData, + account::{create_account_shared_data_for_test, AccountSharedData}, bpf_loader_deprecated, + clock::Slot, + epoch_schedule::EpochSchedule, feature_set::FeatureSet, hash::Hash, instruction::{AccountMeta, InstructionError}, @@ -26,6 +30,7 @@ use { pubkey::Pubkey, saturating_add_assign, stable_layout::stable_instruction::StableInstruction, + sysvar, transaction_context::{ IndexOfAccount, InstructionAccount, TransactionAccount, TransactionContext, }, @@ -209,6 +214,17 @@ impl<'a> InvokeContext<'a> { .or_else(|| self.programs_loaded_for_tx_batch.find(pubkey)) } + pub fn get_environments_for_slot( + &self, + effective_slot: Slot, + ) -> Result<&ProgramRuntimeEnvironments, InstructionError> { + let epoch_schedule = self.sysvar_cache.get_epoch_schedule()?; + let epoch = epoch_schedule.get_epoch(effective_slot); + Ok(self + .programs_loaded_for_tx_batch + .get_environments_for_epoch(epoch)) + } + /// Push a stack frame onto the invocation stack pub fn push(&mut self) -> Result<(), InstructionError> { let instruction_context = self @@ -713,6 +729,18 @@ pub fn mock_process_instruction>, slot: Slot, pub environments: ProgramRuntimeEnvironments, + /// Anticipated replacement for `environments` at the next epoch. + /// + /// This is `None` during most of an epoch, and only `Some` around the boundaries (at the end and beginning of an epoch). + /// More precisely, it starts with the recompilation phase a few hundred slots before the epoch boundary, + /// and it ends with the first rerooting after the epoch boundary. + /// Needed when a program is deployed at the last slot of an epoch, becomes effective in the next epoch. + /// So needs to be compiled with the environment for the next epoch. + pub upcoming_environments: Option, + /// The epoch of the last rerooting + pub latest_root_epoch: Epoch, pub hit_max_limit: bool, } impl LoadedProgramsForTxBatch { - pub fn new(slot: Slot, environments: ProgramRuntimeEnvironments) -> Self { + pub fn new( + slot: Slot, + environments: ProgramRuntimeEnvironments, + upcoming_environments: Option, + latest_root_epoch: Epoch, + ) -> Self { Self { entries: HashMap::new(), slot, environments, + upcoming_environments, + latest_root_epoch, hit_max_limit: false, } } + pub fn new_from_cache( + slot: Slot, + epoch: Epoch, + cache: &ProgramCache, + ) -> Self { + Self { + entries: HashMap::new(), + slot, + environments: cache.get_environments_for_epoch(epoch).clone(), + upcoming_environments: cache.get_upcoming_environments_for_epoch(epoch), + latest_root_epoch: cache.latest_root_epoch, + hit_max_limit: false, + } + } + + /// Returns the current environments depending on the given epoch + pub fn get_environments_for_epoch(&self, epoch: Epoch) -> &ProgramRuntimeEnvironments { + if epoch != self.latest_root_epoch { + if let Some(upcoming_environments) = self.upcoming_environments.as_ref() { + return upcoming_environments; + } + } + &self.environments + } + /// Refill the cache with a single entry. It's typically called during transaction loading, and /// transaction processing (for program management instructions). /// It replaces the existing entry (if any) with the provided entry. The return value contains @@ -710,6 +752,17 @@ impl ProgramCache { &self.environments } + /// Returns the upcoming environments depending on the given epoch + pub fn get_upcoming_environments_for_epoch( + &self, + epoch: Epoch, + ) -> Option { + if epoch == self.latest_root_epoch { + return self.upcoming_environments.clone(); + } + None + } + /// Insert a single entry. It's typically called during transaction loading, /// when the cache doesn't contain the entry corresponding to program `key`. pub fn assign_program(&mut self, key: Pubkey, entry: Arc) -> bool { @@ -2057,7 +2110,7 @@ mod tests { (program3, (LoadedProgramMatchCriteria::NoCriteria, 3)), (program4, (LoadedProgramMatchCriteria::NoCriteria, 4)), ]; - let mut extracted = LoadedProgramsForTxBatch::new(22, cache.environments.clone()); + let mut extracted = LoadedProgramsForTxBatch::new(22, cache.environments.clone(), None, 0); cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program1, 20, 22)); @@ -2073,7 +2126,7 @@ mod tests { (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; - let mut extracted = LoadedProgramsForTxBatch::new(15, cache.environments.clone()); + let mut extracted = LoadedProgramsForTxBatch::new(15, cache.environments.clone(), None, 0); cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program1, 0, 15)); @@ -2096,7 +2149,7 @@ mod tests { (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; - let mut extracted = LoadedProgramsForTxBatch::new(18, cache.environments.clone()); + let mut extracted = LoadedProgramsForTxBatch::new(18, cache.environments.clone(), None, 0); cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program1, 0, 18)); @@ -2114,7 +2167,7 @@ mod tests { (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; - let mut extracted = LoadedProgramsForTxBatch::new(23, cache.environments.clone()); + let mut extracted = LoadedProgramsForTxBatch::new(23, cache.environments.clone(), None, 0); cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program1, 0, 23)); @@ -2132,7 +2185,7 @@ mod tests { (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; - let mut extracted = LoadedProgramsForTxBatch::new(11, cache.environments.clone()); + let mut extracted = LoadedProgramsForTxBatch::new(11, cache.environments.clone(), None, 0); cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program1, 0, 11)); @@ -2170,7 +2223,7 @@ mod tests { (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; - let mut extracted = LoadedProgramsForTxBatch::new(21, cache.environments.clone()); + let mut extracted = LoadedProgramsForTxBatch::new(21, cache.environments.clone(), None, 0); cache.extract(&mut missing, &mut extracted, true); // Since the fork was pruned, we should not find the entry deployed at slot 20. @@ -2187,7 +2240,7 @@ mod tests { (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; - let mut extracted = LoadedProgramsForTxBatch::new(27, cache.environments.clone()); + let mut extracted = LoadedProgramsForTxBatch::new(27, cache.environments.clone(), None, 0); cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program1, 0, 27)); @@ -2219,7 +2272,7 @@ mod tests { (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; - let mut extracted = LoadedProgramsForTxBatch::new(23, cache.environments.clone()); + let mut extracted = LoadedProgramsForTxBatch::new(23, cache.environments.clone(), None, 0); cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program1, 0, 23)); @@ -2274,7 +2327,7 @@ mod tests { (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; - let mut extracted = LoadedProgramsForTxBatch::new(12, cache.environments.clone()); + let mut extracted = LoadedProgramsForTxBatch::new(12, cache.environments.clone(), None, 0); cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program1, 0, 12)); @@ -2294,7 +2347,7 @@ mod tests { ), (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; - let mut extracted = LoadedProgramsForTxBatch::new(12, cache.environments.clone()); + let mut extracted = LoadedProgramsForTxBatch::new(12, cache.environments.clone(), None, 0); cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program2, 11, 12)); @@ -2360,7 +2413,7 @@ mod tests { (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; - let mut extracted = LoadedProgramsForTxBatch::new(19, cache.environments.clone()); + let mut extracted = LoadedProgramsForTxBatch::new(19, cache.environments.clone(), None, 0); cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program1, 0, 19)); @@ -2374,7 +2427,7 @@ mod tests { (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; - let mut extracted = LoadedProgramsForTxBatch::new(27, cache.environments.clone()); + let mut extracted = LoadedProgramsForTxBatch::new(27, cache.environments.clone(), None, 0); cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program1, 0, 27)); @@ -2388,7 +2441,7 @@ mod tests { (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; - let mut extracted = LoadedProgramsForTxBatch::new(22, cache.environments.clone()); + let mut extracted = LoadedProgramsForTxBatch::new(22, cache.environments.clone(), None, 0); cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program1, 20, 22)); @@ -2469,7 +2522,7 @@ mod tests { cache.prune(10, 0); let mut missing = vec![(program1, (LoadedProgramMatchCriteria::NoCriteria, 1))]; - let mut extracted = LoadedProgramsForTxBatch::new(20, cache.environments.clone()); + let mut extracted = LoadedProgramsForTxBatch::new(20, cache.environments.clone(), None, 0); cache.extract(&mut missing, &mut extracted, true); // The cache should have the program deployed at slot 0 @@ -2513,7 +2566,7 @@ mod tests { (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; - let mut extracted = LoadedProgramsForTxBatch::new(20, cache.environments.clone()); + let mut extracted = LoadedProgramsForTxBatch::new(20, cache.environments.clone(), None, 0); cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program1, 0, 20)); @@ -2523,7 +2576,7 @@ mod tests { (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; - let mut extracted = LoadedProgramsForTxBatch::new(6, cache.environments.clone()); + let mut extracted = LoadedProgramsForTxBatch::new(6, cache.environments.clone(), None, 0); cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program1, 5, 6)); @@ -2537,7 +2590,7 @@ mod tests { (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; - let mut extracted = LoadedProgramsForTxBatch::new(20, cache.environments.clone()); + let mut extracted = LoadedProgramsForTxBatch::new(20, cache.environments.clone(), None, 0); cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program1, 0, 20)); @@ -2547,7 +2600,7 @@ mod tests { (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; - let mut extracted = LoadedProgramsForTxBatch::new(6, cache.environments.clone()); + let mut extracted = LoadedProgramsForTxBatch::new(6, cache.environments.clone(), None, 0); cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program1, 0, 6)); @@ -2561,7 +2614,7 @@ mod tests { (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; - let mut extracted = LoadedProgramsForTxBatch::new(20, cache.environments.clone()); + let mut extracted = LoadedProgramsForTxBatch::new(20, cache.environments.clone(), None, 0); cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program1, 0, 20)); diff --git a/programs/bpf_loader/src/lib.rs b/programs/bpf_loader/src/lib.rs index 2cae8b502efdb9..41af14aab8121d 100644 --- a/programs/bpf_loader/src/lib.rs +++ b/programs/bpf_loader/src/lib.rs @@ -51,7 +51,7 @@ use { rc::Rc, sync::{atomic::Ordering, Arc}, }, - syscalls::create_program_runtime_environment_v1, + syscalls::{create_program_runtime_environment_v1, morph_into_deployment_environment_v1}, }; pub const DEFAULT_LOADER_COMPUTE_UNITS: u64 = 570; @@ -106,11 +106,16 @@ macro_rules! deploy_program { $account_size:expr, $slot:expr, $drop:expr, $new_programdata:expr $(,)?) => {{ let mut load_program_metrics = LoadProgramMetrics::default(); let mut register_syscalls_time = Measure::start("register_syscalls_time"); - let deployment_program_runtime_environment = create_program_runtime_environment_v1( - &$invoke_context.feature_set, - $invoke_context.get_compute_budget(), - true, /* deployment */ - false, /* debugging_features */ + let deployment_slot: Slot = $slot; + let environments = $invoke_context.get_environments_for_slot( + deployment_slot.saturating_add(DELAY_VISIBILITY_SLOT_OFFSET) + ).map_err(|e| { + // This will never fail since the epoch schedule is already configured. + ic_msg!($invoke_context, "Failed to get runtime environment: {}", e); + InstructionError::ProgramEnvironmentSetupFailure + })?; + let deployment_program_runtime_environment = morph_into_deployment_environment_v1( + environments.program_runtime_v1.clone(), ).map_err(|e| { ic_msg!($invoke_context, "Failed to register syscalls: {}", e); InstructionError::ProgramEnvironmentSetupFailure @@ -143,7 +148,7 @@ macro_rules! deploy_program { $loader_key, $account_size, $slot, - $invoke_context.programs_modified_by_tx.environments.program_runtime_v1.clone(), + environments.program_runtime_v1.clone(), true, )?; if let Some(old_entry) = $invoke_context.find_program_in_cache(&$program_id) { @@ -1536,6 +1541,7 @@ mod tests { }, account_utils::StateMut, clock::Clock, + epoch_schedule::EpochSchedule, instruction::{AccountMeta, InstructionError}, pubkey::Pubkey, rent::Rent, @@ -3723,7 +3729,10 @@ mod tests { #[test] fn test_program_usage_count_on_upgrade() { - let transaction_accounts = vec![]; + let transaction_accounts = vec![( + sysvar::epoch_schedule::id(), + create_account_for_test(&EpochSchedule::default()), + )]; with_mock_invoke_context!(invoke_context, transaction_context, transaction_accounts); let program_id = Pubkey::new_unique(); let env = Arc::new(BuiltinProgram::new_mock()); @@ -3763,7 +3772,10 @@ mod tests { #[test] fn test_program_usage_count_on_non_upgrade() { - let transaction_accounts = vec![]; + let transaction_accounts = vec![( + sysvar::epoch_schedule::id(), + create_account_for_test(&EpochSchedule::default()), + )]; with_mock_invoke_context!(invoke_context, transaction_context, transaction_accounts); let program_id = Pubkey::new_unique(); let env = Arc::new(BuiltinProgram::new_mock()); diff --git a/programs/bpf_loader/src/syscalls/mod.rs b/programs/bpf_loader/src/syscalls/mod.rs index d9c66f24e503ed..4a166fa1cf9996 100644 --- a/programs/bpf_loader/src/syscalls/mod.rs +++ b/programs/bpf_loader/src/syscalls/mod.rs @@ -239,6 +239,24 @@ macro_rules! register_feature_gated_function { }; } +pub fn morph_into_deployment_environment_v1( + from: Arc>, +) -> Result, Error> { + let mut config = *from.get_config(); + config.reject_broken_elfs = true; + + let mut result = FunctionRegistry::>::default(); + + for (key, (name, value)) in from.get_function_registry().iter() { + // Deployment of programs with sol_alloc_free is disabled. So do not register the syscall. + if name != *b"sol_alloc_free_" { + result.register_function(key, name, value)?; + } + } + + Ok(BuiltinProgram::new_loader(config, result)) +} + pub fn create_program_runtime_environment_v1<'a>( feature_set: &FeatureSet, compute_budget: &ComputeBudget, diff --git a/programs/loader-v4/src/lib.rs b/programs/loader-v4/src/lib.rs index 9573f925085585..6a9026f25708d0 100644 --- a/programs/loader-v4/src/lib.rs +++ b/programs/loader-v4/src/lib.rs @@ -405,17 +405,21 @@ pub fn process_instruction_deploy( let deployment_slot = state.slot; let effective_slot = deployment_slot.saturating_add(DELAY_VISIBILITY_SLOT_OFFSET); + let environments = invoke_context + .get_environments_for_slot(effective_slot) + .map_err(|err| { + // This will never fail since the epoch schedule is already configured. + ic_logger_msg!(log_collector, "Failed to get runtime environment {}", err); + InstructionError::InvalidArgument + })?; + let mut load_program_metrics = LoadProgramMetrics { program_id: buffer.get_key().to_string(), ..LoadProgramMetrics::default() }; let executor = LoadedProgram::new( &loader_v4::id(), - invoke_context - .programs_modified_by_tx - .environments - .program_runtime_v2 - .clone(), + environments.program_runtime_v2.clone(), deployment_slot, effective_slot, programdata, diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 702711231ea139..7b1697ddb13ed9 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -33,10 +33,6 @@ //! It offers a high-level API that signs transactions //! on behalf of the caller, and a low-level API for when they have //! already been signed and verified. -#[cfg(feature = "dev-context-only-utils")] -use solana_accounts_db::accounts_db::{ - ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS, ACCOUNTS_DB_CONFIG_FOR_TESTING, -}; #[allow(deprecated)] use solana_sdk::recent_blockhashes_account; pub use solana_sdk::reward_type::RewardType; @@ -199,6 +195,13 @@ use { time::{Duration, Instant}, }, }; +#[cfg(feature = "dev-context-only-utils")] +use { + solana_accounts_db::accounts_db::{ + ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS, ACCOUNTS_DB_CONFIG_FOR_TESTING, + }, + solana_program_runtime::loaded_programs::LoadedProgramsForTxBatch, +}; /// params to `verify_accounts_hash` struct VerifyAccountsHashConfig { @@ -7881,6 +7884,14 @@ impl Bank { pub fn update_accounts_hash_for_tests(&self) -> AccountsHash { self.update_accounts_hash(CalcAccountsHashDataSource::IndexForTests, false, false) } + + pub fn new_program_cache_for_tx_batch_for_slot(&self, slot: Slot) -> LoadedProgramsForTxBatch { + LoadedProgramsForTxBatch::new_from_cache( + slot, + self.epoch_schedule.get_epoch(slot), + &self.program_cache.read().unwrap(), + ) + } } /// Compute how much an account has changed size. This function is useful when the data size delta diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index ea2354ef8e3586..edeeb621966f12 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -13921,3 +13921,115 @@ fn test_check_execution_status_and_charge_fee() { } }); } +#[test] +fn test_deploy_last_epoch_slot() { + solana_logger::setup(); + + // Bank Setup + let (mut genesis_config, mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); + genesis_config + .accounts + .remove(&feature_set::reject_callx_r10::id()); + let mut bank = Bank::new_for_tests(&genesis_config); + bank.activate_feature(&feature_set::reject_callx_r10::id()); + + // go to the last slot in the epoch + let (bank, bank_forks) = bank.wrap_with_bank_forks_for_tests(); + let slots_in_epoch = bank.epoch_schedule().get_slots_in_epoch(0); + let bank = new_bank_from_parent_with_bank_forks( + &bank_forks, + bank, + &Pubkey::default(), + slots_in_epoch - 1, + ); + eprintln!("now at slot {} epoch {}", bank.slot(), bank.epoch()); + + // deploy a program + let payer_keypair = Keypair::new(); + let program_keypair = Keypair::new(); + let mut file = File::open("../programs/bpf_loader/test_elfs/out/noop_aligned.so").unwrap(); + let mut elf = Vec::new(); + file.read_to_end(&mut elf).unwrap(); + let min_program_balance = + bank.get_minimum_balance_for_rent_exemption(UpgradeableLoaderState::size_of_program()); + let min_buffer_balance = bank + .get_minimum_balance_for_rent_exemption(UpgradeableLoaderState::size_of_buffer(elf.len())); + let min_programdata_balance = bank.get_minimum_balance_for_rent_exemption( + UpgradeableLoaderState::size_of_programdata(elf.len()), + ); + let buffer_address = Pubkey::new_unique(); + let (programdata_address, _) = Pubkey::find_program_address( + &[program_keypair.pubkey().as_ref()], + &bpf_loader_upgradeable::id(), + ); + let upgrade_authority_keypair = Keypair::new(); + + let buffer_account = { + let mut account = AccountSharedData::new( + min_buffer_balance, + UpgradeableLoaderState::size_of_buffer(elf.len()), + &bpf_loader_upgradeable::id(), + ); + account + .set_state(&UpgradeableLoaderState::Buffer { + authority_address: Some(upgrade_authority_keypair.pubkey()), + }) + .unwrap(); + account + .data_as_mut_slice() + .get_mut(UpgradeableLoaderState::size_of_buffer_metadata()..) + .unwrap() + .copy_from_slice(&elf); + account + }; + + let payer_base_balance = LAMPORTS_PER_SOL; + let deploy_fees = { + let fee_calculator = genesis_config.fee_rate_governor.create_fee_calculator(); + 3 * fee_calculator.lamports_per_signature + }; + let min_payer_balance = min_program_balance + .saturating_add(min_programdata_balance) + .saturating_sub(min_buffer_balance.saturating_add(deploy_fees)); + bank.store_account( + &payer_keypair.pubkey(), + &AccountSharedData::new( + payer_base_balance.saturating_add(min_payer_balance), + 0, + &system_program::id(), + ), + ); + bank.store_account(&buffer_address, &buffer_account); + bank.store_account(&program_keypair.pubkey(), &AccountSharedData::default()); + bank.store_account(&programdata_address, &AccountSharedData::default()); + let message = Message::new( + &bpf_loader_upgradeable::deploy_with_max_program_len( + &payer_keypair.pubkey(), + &program_keypair.pubkey(), + &buffer_address, + &upgrade_authority_keypair.pubkey(), + min_program_balance, + elf.len(), + ) + .unwrap(), + Some(&payer_keypair.pubkey()), + ); + let signers = &[&payer_keypair, &program_keypair, &upgrade_authority_keypair]; + let transaction = Transaction::new(signers, message.clone(), bank.last_blockhash()); + let ret = bank.process_transaction(&transaction); + assert!(ret.is_ok(), "ret: {:?}", ret); + goto_end_of_slot(bank.clone()); + + // go to the first slot in the new epoch + let bank = + new_bank_from_parent_with_bank_forks(&bank_forks, bank, &Pubkey::default(), slots_in_epoch); + eprintln!("now at slot {} epoch {}", bank.slot(), bank.epoch()); + + let instruction = Instruction::new_with_bytes(program_keypair.pubkey(), &[], Vec::new()); + let message = Message::new(&[instruction], Some(&mint_keypair.pubkey())); + let binding = mint_keypair.insecure_clone(); + let signers = vec![&binding]; + let transaction = Transaction::new(&signers, message, bank.last_blockhash()); + let result_with_feature_enabled = bank.process_transaction(&transaction); + assert_eq!(result_with_feature_enabled, Ok(())); +} diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index db5a8ac92c2ded..b1673cef0b1b11 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -510,9 +510,10 @@ impl TransactionBatchProcessor { // Initialize our local cache. let is_first_round = loaded_programs_for_txs.is_none(); if is_first_round { - loaded_programs_for_txs = Some(LoadedProgramsForTxBatch::new( + loaded_programs_for_txs = Some(LoadedProgramsForTxBatch::new_from_cache( self.slot, - program_cache.get_environments_for_epoch(self.epoch).clone(), + self.epoch, + &program_cache, )); } // Submit our last completed loading task. @@ -523,9 +524,10 @@ impl TransactionBatchProcessor { // This branch is taken when there is an error in assigning a program to a // cache slot. It is not possible to mock this error for SVM unit // tests purposes. - let mut ret = LoadedProgramsForTxBatch::new( + let mut ret = LoadedProgramsForTxBatch::new_from_cache( self.slot, - program_cache.get_environments_for_epoch(self.epoch).clone(), + self.epoch, + &program_cache, ); ret.hit_max_limit = true; return ret; @@ -630,6 +632,8 @@ impl TransactionBatchProcessor { let mut programs_modified_by_tx = LoadedProgramsForTxBatch::new( self.slot, programs_loaded_for_tx_batch.environments.clone(), + programs_loaded_for_tx_batch.upcoming_environments.clone(), + programs_loaded_for_tx_batch.latest_root_epoch, ); let mut process_message_time = Measure::start("process_message_time"); let process_result = MessageProcessor::process_message( From 84639441c065a2fdcefb6ec05254483eeedc63e3 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Fri, 22 Mar 2024 09:12:21 -0700 Subject: [PATCH 044/153] Support --block-production-method in banking-bench (#269) --- banking-bench/src/main.rs | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/banking-bench/src/main.rs b/banking-bench/src/main.rs index 041df5354f9e0f..f970a0002d11d1 100644 --- a/banking-bench/src/main.rs +++ b/banking-bench/src/main.rs @@ -9,6 +9,7 @@ use { solana_core::{ banking_stage::BankingStage, banking_trace::{BankingPacketBatch, BankingTracer, BANKING_TRACE_DIR_DEFAULT_BYTE_LIMIT}, + validator::BlockProductionMethod, }, solana_gossip::cluster_info::{ClusterInfo, Node}, solana_ledger::{ @@ -279,6 +280,14 @@ fn main() { .takes_value(true) .help("Number of batches to send in each iteration"), ) + .arg( + Arg::with_name("block_production_method") + .long("block-production-method") + .value_name("METHOD") + .takes_value(true) + .possible_values(BlockProductionMethod::cli_names()) + .help(BlockProductionMethod::cli_message()), + ) .arg( Arg::new("num_banking_threads") .long("num-banking-threads") @@ -306,6 +315,9 @@ fn main() { ) .get_matches(); + let block_production_method = matches + .value_of_t::("block_production_method") + .unwrap_or_default(); let num_banking_threads = matches .value_of_t::("num_banking_threads") .unwrap_or_else(|_| BankingStage::num_threads()); @@ -448,7 +460,8 @@ fn main() { DEFAULT_TPU_CONNECTION_POOL_SIZE, ), }; - let banking_stage = BankingStage::new_thread_local_multi_iterator( + let banking_stage = BankingStage::new_num_threads( + block_production_method, &cluster_info, &poh_recorder, non_vote_receiver, From f799c9ff675376af57cba4a7ad3fcd1a30ac4508 Mon Sep 17 00:00:00 2001 From: Joe C Date: Fri, 22 Mar 2024 11:24:49 -0500 Subject: [PATCH 045/153] Runtime: Expose builtin program IDs to crate (#318) * runtime: bank: rename `builtin_programs` to `builtin_program_ids` * runtime: snapshot minimizer: use builtin IDs from bank --- runtime/src/bank.rs | 24 ++++++++++++++---------- runtime/src/bank/metrics.rs | 8 ++++++-- runtime/src/snapshot_minimizer.rs | 14 +++++++------- 3 files changed, 27 insertions(+), 19 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 7b1697ddb13ed9..29218efcdc7c69 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -551,7 +551,7 @@ impl PartialEq for Bank { epoch_stakes, is_delta, // TODO: Confirm if all these fields are intentionally ignored! - builtin_programs: _, + builtin_program_ids: _, runtime_config: _, rewards: _, cluster_type: _, @@ -779,7 +779,7 @@ pub struct Bank { /// stream for the slot == self.slot is_delta: AtomicBool, - builtin_programs: HashSet, + builtin_program_ids: HashSet, /// Optional config parameters that can override runtime behavior pub(crate) runtime_config: Arc, @@ -1001,7 +1001,7 @@ impl Bank { stakes_cache: StakesCache::default(), epoch_stakes: HashMap::::default(), is_delta: AtomicBool::default(), - builtin_programs: HashSet::::default(), + builtin_program_ids: HashSet::::default(), runtime_config: Arc::::default(), rewards: RwLock::>::default(), cluster_type: Option::::default(), @@ -1258,8 +1258,8 @@ impl Bank { let (epoch_stakes, epoch_stakes_time_us) = measure_us!(parent.epoch_stakes.clone()); - let (builtin_programs, builtin_programs_time_us) = - measure_us!(parent.builtin_programs.clone()); + let (builtin_program_ids, builtin_program_ids_time_us) = + measure_us!(parent.builtin_program_ids.clone()); let (rewards_pool_pubkeys, rewards_pool_pubkeys_time_us) = measure_us!(parent.rewards_pool_pubkeys.clone()); @@ -1315,7 +1315,7 @@ impl Bank { ancestors: Ancestors::default(), hash: RwLock::new(Hash::default()), is_delta: AtomicBool::new(false), - builtin_programs, + builtin_program_ids, tick_height: AtomicU64::new(parent.tick_height.load(Relaxed)), signature_count: AtomicU64::new(0), runtime_config: parent.runtime_config.clone(), @@ -1477,7 +1477,7 @@ impl Bank { blockhash_queue_time_us, stakes_cache_time_us, epoch_stakes_time_us, - builtin_programs_time_us, + builtin_program_ids_time_us, rewards_pool_pubkeys_time_us, executor_cache_time_us: 0, transaction_debug_keys_time_us, @@ -1872,7 +1872,7 @@ impl Bank { stakes_cache: StakesCache::new(stakes), epoch_stakes: fields.epoch_stakes, is_delta: AtomicBool::new(fields.is_delta), - builtin_programs: HashSet::::default(), + builtin_program_ids: HashSet::::default(), runtime_config, rewards: RwLock::new(vec![]), cluster_type: Some(genesis_config.cluster_type), @@ -4681,7 +4681,7 @@ impl Bank { recording_config, timings, account_overrides, - self.builtin_programs.iter(), + self.builtin_program_ids.iter(), log_messages_bytes_limit, limit_to_load_programs, ); @@ -6162,6 +6162,10 @@ impl Bank { } } + pub(crate) fn get_builtin_program_ids(&self) -> &HashSet { + &self.builtin_program_ids + } + // Hi! leaky abstraction here.... // try to use get_account_with_fixed_root() if it's called ONLY from on-chain runtime account // processing. That alternative fn provides more safety. @@ -7190,7 +7194,7 @@ impl Bank { pub fn add_builtin(&mut self, program_id: Pubkey, name: String, builtin: LoadedProgram) { debug!("Adding program {} under {:?}", name, program_id); self.add_builtin_account(name.as_str(), &program_id, false); - self.builtin_programs.insert(program_id); + self.builtin_program_ids.insert(program_id); self.program_cache .write() .unwrap() diff --git a/runtime/src/bank/metrics.rs b/runtime/src/bank/metrics.rs index fd2c19473931d8..4193339bce6cc1 100644 --- a/runtime/src/bank/metrics.rs +++ b/runtime/src/bank/metrics.rs @@ -31,7 +31,7 @@ pub(crate) struct NewBankTimings { pub(crate) blockhash_queue_time_us: u64, pub(crate) stakes_cache_time_us: u64, pub(crate) epoch_stakes_time_us: u64, - pub(crate) builtin_programs_time_us: u64, + pub(crate) builtin_program_ids_time_us: u64, pub(crate) rewards_pool_pubkeys_time_us: u64, pub(crate) executor_cache_time_us: u64, pub(crate) transaction_debug_keys_time_us: u64, @@ -125,7 +125,11 @@ pub(crate) fn report_new_bank_metrics( ("blockhash_queue_us", timings.blockhash_queue_time_us, i64), ("stakes_cache_us", timings.stakes_cache_time_us, i64), ("epoch_stakes_time_us", timings.epoch_stakes_time_us, i64), - ("builtin_programs_us", timings.builtin_programs_time_us, i64), + ( + "builtin_programs_us", + timings.builtin_program_ids_time_us, + i64 + ), ( "rewards_pool_pubkeys_us", timings.rewards_pool_pubkeys_time_us, diff --git a/runtime/src/snapshot_minimizer.rs b/runtime/src/snapshot_minimizer.rs index ddd47c887ab7bb..009444b962ed48 100644 --- a/runtime/src/snapshot_minimizer.rs +++ b/runtime/src/snapshot_minimizer.rs @@ -1,10 +1,7 @@ //! Used to create minimal snapshots - separated here to keep accounts_db simpler use { - crate::{ - bank::{builtins::BUILTINS, Bank}, - static_ids, - }, + crate::{bank::Bank, static_ids}, dashmap::DashSet, log::info, rayon::{ @@ -116,9 +113,12 @@ impl<'a> SnapshotMinimizer<'a> { /// Used to get builtin accounts in `minimize` fn get_builtins(&self) { - BUILTINS.iter().for_each(|e| { - self.minimized_account_set.insert(e.program_id); - }); + self.bank + .get_builtin_program_ids() + .iter() + .for_each(|program_id| { + self.minimized_account_set.insert(*program_id); + }); } /// Used to get static runtime accounts in `minimize` From 62d49f123a2c24daa161913dd7f361dbbcf613af Mon Sep 17 00:00:00 2001 From: Joe C Date: Fri, 22 Mar 2024 11:25:10 -0500 Subject: [PATCH 046/153] Runtime: Core BPF Migration: Struct for loading and checking builtin program accounts (#331) * runtime: core_bpf_migration: add builtin config * runtime: core_bpf_migration: add builtin config tests * some renaming feedback --- .../bank/builtins/core_bpf_migration/error.rs | 18 ++ .../bank/builtins/core_bpf_migration/mod.rs | 8 + .../core_bpf_migration/target_builtin.rs | 251 ++++++++++++++++++ runtime/src/bank/builtins/mod.rs | 1 + 4 files changed, 278 insertions(+) create mode 100644 runtime/src/bank/builtins/core_bpf_migration/error.rs create mode 100644 runtime/src/bank/builtins/core_bpf_migration/mod.rs create mode 100644 runtime/src/bank/builtins/core_bpf_migration/target_builtin.rs diff --git a/runtime/src/bank/builtins/core_bpf_migration/error.rs b/runtime/src/bank/builtins/core_bpf_migration/error.rs new file mode 100644 index 00000000000000..e55469e0211207 --- /dev/null +++ b/runtime/src/bank/builtins/core_bpf_migration/error.rs @@ -0,0 +1,18 @@ +use {solana_sdk::pubkey::Pubkey, thiserror::Error}; + +/// Errors returned by a Core BPF migration. +#[derive(Debug, Error, PartialEq)] +pub enum CoreBpfMigrationError { + /// Account not found + #[error("Account not found: {0:?}")] + AccountNotFound(Pubkey), + /// Account exists + #[error("Account exists: {0:?}")] + AccountExists(Pubkey), + /// Incorrect account owner + #[error("Incorrect account owner for {0:?}")] + IncorrectOwner(Pubkey), + /// Program has a data account + #[error("Data account exists for program {0:?}")] + ProgramHasDataAccount(Pubkey), +} diff --git a/runtime/src/bank/builtins/core_bpf_migration/mod.rs b/runtime/src/bank/builtins/core_bpf_migration/mod.rs new file mode 100644 index 00000000000000..6a09df8dd13136 --- /dev/null +++ b/runtime/src/bank/builtins/core_bpf_migration/mod.rs @@ -0,0 +1,8 @@ +#![allow(dead_code)] // Removed in later commit +pub(crate) mod error; +mod target_builtin; + +pub(crate) enum CoreBpfMigrationTargetType { + Builtin, + Stateless, +} diff --git a/runtime/src/bank/builtins/core_bpf_migration/target_builtin.rs b/runtime/src/bank/builtins/core_bpf_migration/target_builtin.rs new file mode 100644 index 00000000000000..0166e3d9ea0a7e --- /dev/null +++ b/runtime/src/bank/builtins/core_bpf_migration/target_builtin.rs @@ -0,0 +1,251 @@ +use { + super::{error::CoreBpfMigrationError, CoreBpfMigrationTargetType}, + crate::bank::Bank, + solana_sdk::{ + account::{AccountSharedData, ReadableAccount}, + bpf_loader_upgradeable::get_program_data_address, + native_loader::ID as NATIVE_LOADER_ID, + pubkey::Pubkey, + }, +}; + +/// Used to validate a built-in program's account before migrating to Core BPF. +#[derive(Debug)] +pub(crate) struct TargetProgramBuiltin { + pub program_address: Pubkey, + pub program_account: AccountSharedData, + pub program_data_address: Pubkey, + pub total_data_size: usize, +} + +impl TargetProgramBuiltin { + /// Create a new migration configuration for a built-in program. + pub(crate) fn new_checked( + bank: &Bank, + program_address: &Pubkey, + migration_target: &CoreBpfMigrationTargetType, + ) -> Result { + let program_account = match migration_target { + CoreBpfMigrationTargetType::Builtin => { + // The program account should exist. + let program_account = bank + .get_account_with_fixed_root(program_address) + .ok_or(CoreBpfMigrationError::AccountNotFound(*program_address))?; + + // The program account should be owned by the native loader. + if program_account.owner() != &NATIVE_LOADER_ID { + return Err(CoreBpfMigrationError::IncorrectOwner(*program_address)); + } + + program_account + } + CoreBpfMigrationTargetType::Stateless => { + // The program account should _not_ exist. + if bank.get_account_with_fixed_root(program_address).is_some() { + return Err(CoreBpfMigrationError::AccountExists(*program_address)); + } + + AccountSharedData::default() + } + }; + + let program_data_address = get_program_data_address(program_address); + + // The program data account should not exist. + if bank + .get_account_with_fixed_root(&program_data_address) + .is_some() + { + return Err(CoreBpfMigrationError::ProgramHasDataAccount( + *program_address, + )); + } + + // The total data size is the size of the program account's data. + let total_data_size = program_account.data().len(); + + Ok(Self { + program_address: *program_address, + program_account, + program_data_address, + total_data_size, + }) + } +} + +#[cfg(test)] +mod tests { + use { + super::*, + crate::bank::{tests::create_simple_test_bank, ApplyFeatureActivationsCaller}, + solana_sdk::{ + account::Account, + bpf_loader_upgradeable::{UpgradeableLoaderState, ID as BPF_LOADER_UPGRADEABLE_ID}, + feature, feature_set, + }, + test_case::test_case, + }; + + fn store_account( + bank: &Bank, + address: &Pubkey, + data: &T, + executable: bool, + owner: &Pubkey, + ) { + let data = bincode::serialize(data).unwrap(); + let data_len = data.len(); + let lamports = bank.get_minimum_balance_for_rent_exemption(data_len); + let account = AccountSharedData::from(Account { + data, + executable, + lamports, + owner: *owner, + ..Account::default() + }); + bank.store_account_and_update_capitalization(address, &account); + } + + #[test_case(solana_sdk::address_lookup_table::program::id(), None)] + #[test_case(solana_sdk::bpf_loader::id(), None)] + #[test_case(solana_sdk::bpf_loader_deprecated::id(), None)] + #[test_case(solana_sdk::bpf_loader_upgradeable::id(), None)] + #[test_case(solana_sdk::compute_budget::id(), None)] + #[test_case(solana_config_program::id(), None)] + #[test_case(solana_stake_program::id(), None)] + #[test_case(solana_system_program::id(), None)] + #[test_case(solana_vote_program::id(), None)] + #[test_case( + solana_sdk::loader_v4::id(), + Some(feature_set::enable_program_runtime_v2_and_loader_v4::id()) + )] + #[test_case( + solana_zk_token_sdk::zk_token_proof_program::id(), + Some(feature_set::zk_token_sdk_enabled::id()) + )] + fn test_target_program_builtin(program_address: Pubkey, activation_feature: Option) { + let migration_target = CoreBpfMigrationTargetType::Builtin; + let mut bank = create_simple_test_bank(0); + + if let Some(feature_id) = activation_feature { + // Activate the feature to enable the built-in program + bank.store_account( + &feature_id, + &feature::create_account( + &feature::Feature { activated_at: None }, + bank.get_minimum_balance_for_rent_exemption(feature::Feature::size_of()), + ), + ); + bank.apply_feature_activations(ApplyFeatureActivationsCaller::NewFromParent, false); + } + + let program_account = bank.get_account_with_fixed_root(&program_address).unwrap(); + let program_data_address = get_program_data_address(&program_address); + + // Success + let builtin_config = + TargetProgramBuiltin::new_checked(&bank, &program_address, &migration_target).unwrap(); + assert_eq!(builtin_config.program_address, program_address); + assert_eq!(builtin_config.program_account, program_account); + assert_eq!(builtin_config.program_data_address, program_data_address); + assert_eq!(builtin_config.total_data_size, program_account.data().len()); + + // Fail if the program account is not owned by the native loader + store_account( + &bank, + &program_address, + &String::from("some built-in program"), + true, + &Pubkey::new_unique(), // Not the native loader + ); + assert_eq!( + TargetProgramBuiltin::new_checked(&bank, &program_address, &migration_target) + .unwrap_err(), + CoreBpfMigrationError::IncorrectOwner(program_address) + ); + + // Fail if the program data account exists + store_account( + &bank, + &program_address, + &program_account.data(), + program_account.executable(), + program_account.owner(), + ); + store_account( + &bank, + &program_data_address, + &UpgradeableLoaderState::ProgramData { + slot: 0, + upgrade_authority_address: Some(Pubkey::new_unique()), + }, + false, + &BPF_LOADER_UPGRADEABLE_ID, + ); + assert_eq!( + TargetProgramBuiltin::new_checked(&bank, &program_address, &migration_target) + .unwrap_err(), + CoreBpfMigrationError::ProgramHasDataAccount(program_address) + ); + + // Fail if the program account does not exist + bank.store_account_and_update_capitalization( + &program_address, + &AccountSharedData::default(), + ); + assert_eq!( + TargetProgramBuiltin::new_checked(&bank, &program_address, &migration_target) + .unwrap_err(), + CoreBpfMigrationError::AccountNotFound(program_address) + ); + } + + #[test_case(solana_sdk::feature::id())] + #[test_case(solana_sdk::native_loader::id())] + fn test_target_program_stateless_builtin(program_address: Pubkey) { + let migration_target = CoreBpfMigrationTargetType::Stateless; + let bank = create_simple_test_bank(0); + + let program_account = AccountSharedData::default(); + let program_data_address = get_program_data_address(&program_address); + + // Success + let builtin_config = + TargetProgramBuiltin::new_checked(&bank, &program_address, &migration_target).unwrap(); + assert_eq!(builtin_config.program_address, program_address); + assert_eq!(builtin_config.program_account, program_account); + assert_eq!(builtin_config.program_data_address, program_data_address); + assert_eq!(builtin_config.total_data_size, program_account.data().len()); + + // Fail if the program data account exists + store_account( + &bank, + &program_data_address, + &UpgradeableLoaderState::ProgramData { + slot: 0, + upgrade_authority_address: Some(Pubkey::new_unique()), + }, + false, + &BPF_LOADER_UPGRADEABLE_ID, + ); + assert_eq!( + TargetProgramBuiltin::new_checked(&bank, &program_address, &migration_target) + .unwrap_err(), + CoreBpfMigrationError::ProgramHasDataAccount(program_address) + ); + + // Fail if the program account exists + store_account( + &bank, + &program_address, + &String::from("some built-in program"), + true, + &NATIVE_LOADER_ID, + ); + assert_eq!( + TargetProgramBuiltin::new_checked(&bank, &program_address, &migration_target) + .unwrap_err(), + CoreBpfMigrationError::AccountExists(program_address) + ); + } +} diff --git a/runtime/src/bank/builtins/mod.rs b/runtime/src/bank/builtins/mod.rs index 4e8574b7f9144c..d9f9f573144c7a 100644 --- a/runtime/src/bank/builtins/mod.rs +++ b/runtime/src/bank/builtins/mod.rs @@ -1,3 +1,4 @@ +pub(crate) mod core_bpf_migration; pub mod prototypes; pub use prototypes::{BuiltinPrototype, StatelessBuiltinPrototype}; From 2ee606da4fd42c8aa35e8588b65fb1bdf47810bb Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Sat, 23 Mar 2024 00:48:52 +0800 Subject: [PATCH 047/153] Add `--with-compute-unit-price` to cli program deploy commands (#364) * add set compute units arg for program deploy * update master changes * remove duplicates * fixes and tests * remove extra lines * feedback * Use simulation to determine compute units consumed * feedback --------- Co-authored-by: NagaprasadVr --- Cargo.lock | 1 + cli/Cargo.toml | 1 + cli/src/program.rs | 359 ++++++++++++++++++++++++++---------- cli/tests/program.rs | 226 ++++++++++++++++++++++- transaction-dos/src/main.rs | 1 + 5 files changed, 485 insertions(+), 103 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6ef0fffe4a4477..6a2abab7f46afc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5781,6 +5781,7 @@ dependencies = [ "solana-program-runtime", "solana-pubsub-client", "solana-remote-wallet", + "solana-rpc", "solana-rpc-client", "solana-rpc-client-api", "solana-rpc-client-nonce-utils", diff --git a/cli/Cargo.toml b/cli/Cargo.toml index b9170ac79ab07c..b5e444ba81a00b 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -56,6 +56,7 @@ tiny-bip39 = { workspace = true } [dev-dependencies] assert_matches = { workspace = true } +solana-rpc = { workspace = true } solana-streamer = { workspace = true } solana-test-validator = { workspace = true } tempfile = { workspace = true } diff --git a/cli/src/program.rs b/cli/src/program.rs index c35871868f0f04..0aec785fa445ea 100644 --- a/cli/src/program.rs +++ b/cli/src/program.rs @@ -13,6 +13,7 @@ use { solana_bpf_loader_program::syscalls::create_program_runtime_environment_v1, solana_clap_utils::{ self, + compute_unit_price::compute_unit_price_arg, fee_payer::{fee_payer_arg, FEE_PAYER_ARG}, hidden_unless_forced, input_parsers::*, @@ -28,12 +29,16 @@ use { }, solana_client::{ connection_cache::ConnectionCache, + rpc_config::RpcSimulateTransactionConfig, send_and_confirm_transactions_in_parallel::{ send_and_confirm_transactions_in_parallel_blocking, SendAndConfirmConfig, }, tpu_client::{TpuClient, TpuClientConfig}, }, - solana_program_runtime::{compute_budget::ComputeBudget, invoke_context::InvokeContext}, + solana_program_runtime::{ + compute_budget::ComputeBudget, compute_budget_processor::MAX_COMPUTE_UNIT_LIMIT, + invoke_context::InvokeContext, + }, solana_rbpf::{elf::Executable, verifier::RequisiteVerifier}, solana_remote_wallet::remote_wallet::RemoteWalletManager, solana_rpc_client::rpc_client::RpcClient, @@ -46,8 +51,10 @@ use { solana_sdk::{ account::Account, account_utils::StateMut, + borsh1::try_from_slice_unchecked, bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable::{self, UpgradeableLoaderState}, + compute_budget::{self, ComputeBudgetInstruction}, feature_set::FeatureSet, instruction::{Instruction, InstructionError}, loader_instruction, @@ -90,6 +97,7 @@ pub enum ProgramCliCommand { max_len: Option, allow_excessive_balance: bool, skip_fee_check: bool, + compute_unit_price: Option, }, Upgrade { fee_payer_signer_index: SignerIndex, @@ -108,6 +116,7 @@ pub enum ProgramCliCommand { buffer_authority_signer_index: SignerIndex, max_len: Option, skip_fee_check: bool, + compute_unit_price: Option, }, SetBufferAuthority { buffer_pubkey: Pubkey, @@ -236,7 +245,8 @@ impl ProgramSubCommands for App<'_, '_> { "Use the designated program id even if the account already \ holds a large balance of SOL", ), - ), + ) + .arg(compute_unit_price_arg()), ) .subcommand( SubCommand::with_name("upgrade") @@ -308,7 +318,8 @@ impl ProgramSubCommands for App<'_, '_> { "Maximum length of the upgradeable program \ [default: the length of the original deployed program]", ), - ), + ) + .arg(compute_unit_price_arg()), ) .subcommand( SubCommand::with_name("set-buffer-authority") @@ -601,6 +612,8 @@ pub fn parse_program_subcommand( let signer_info = default_signer.generate_unique_signers(bulk_signers, matches, wallet_manager)?; + let compute_unit_price = value_of(matches, "compute_unit_price"); + CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::Deploy { program_location, @@ -616,6 +629,7 @@ pub fn parse_program_subcommand( max_len, allow_excessive_balance: matches.is_present("allow_excessive_balance"), skip_fee_check, + compute_unit_price, }), signers: signer_info.signers, } @@ -687,6 +701,8 @@ pub fn parse_program_subcommand( let signer_info = default_signer.generate_unique_signers(bulk_signers, matches, wallet_manager)?; + let compute_unit_price = value_of(matches, "compute_unit_price"); + CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::WriteBuffer { program_location: matches.value_of("program_location").unwrap().to_string(), @@ -698,6 +714,7 @@ pub fn parse_program_subcommand( .unwrap(), max_len, skip_fee_check, + compute_unit_price, }), signers: signer_info.signers, } @@ -899,6 +916,7 @@ pub fn process_program_subcommand( max_len, allow_excessive_balance, skip_fee_check, + compute_unit_price, } => process_program_deploy( rpc_client, config, @@ -913,6 +931,7 @@ pub fn process_program_subcommand( *max_len, *allow_excessive_balance, *skip_fee_check, + *compute_unit_price, ), ProgramCliCommand::Upgrade { fee_payer_signer_index, @@ -941,6 +960,7 @@ pub fn process_program_subcommand( buffer_authority_signer_index, max_len, skip_fee_check, + compute_unit_price, } => process_write_buffer( rpc_client, config, @@ -951,6 +971,7 @@ pub fn process_program_subcommand( *buffer_authority_signer_index, *max_len, *skip_fee_check, + *compute_unit_price, ), ProgramCliCommand::SetBufferAuthority { buffer_pubkey, @@ -1082,6 +1103,7 @@ fn process_program_deploy( max_len: Option, allow_excessive_balance: bool, skip_fee_check: bool, + compute_unit_price: Option, ) -> ProcessResult { let fee_payer_signer = config.signers[fee_payer_signer_index]; let upgrade_authority_signer = config.signers[upgrade_authority_signer_index]; @@ -1221,6 +1243,7 @@ fn process_program_deploy( upgrade_authority_signer, allow_excessive_balance, skip_fee_check, + compute_unit_price, ) } else { do_process_program_upgrade( @@ -1235,6 +1258,7 @@ fn process_program_deploy( &buffer_pubkey, buffer_signer, skip_fee_check, + compute_unit_price, ) }; if result.is_ok() && is_final { @@ -1372,6 +1396,7 @@ fn process_program_upgrade( } } +#[allow(clippy::too_many_arguments)] fn process_write_buffer( rpc_client: Arc, config: &CliConfig, @@ -1382,6 +1407,7 @@ fn process_write_buffer( buffer_authority_signer_index: SignerIndex, max_len: Option, skip_fee_check: bool, + compute_unit_price: Option, ) -> ProcessResult { let fee_payer_signer = config.signers[fee_payer_signer_index]; let buffer_authority = config.signers[buffer_authority_signer_index]; @@ -1447,6 +1473,7 @@ fn process_write_buffer( buffer_authority, true, skip_fee_check, + compute_unit_price, ); if result.is_err() && buffer_signer_index.is_none() && buffer_signer.is_some() { report_ephemeral_mnemonic(words, mnemonic); @@ -2200,11 +2227,12 @@ fn do_process_program_write_and_deploy( buffer_authority_signer: &dyn Signer, allow_excessive_balance: bool, skip_fee_check: bool, + compute_unit_price: Option, ) -> ProcessResult { let blockhash = rpc_client.get_latest_blockhash()?; // Initialize buffer account or complete if already partially initialized - let (initial_instructions, balance_needed, buffer_program_data) = if let Some(mut account) = + let (mut initial_instructions, balance_needed, buffer_program_data) = if let Some(mut account) = rpc_client .get_account_with_commitment(buffer_pubkey, config.commitment)? .value @@ -2251,7 +2279,9 @@ fn do_process_program_write_and_deploy( vec![0; program_len], ) }; + let initial_message = if !initial_instructions.is_empty() { + set_compute_budget_ixs_if_needed(&mut initial_instructions, compute_unit_price); Some(Message::new_with_blockhash( &initial_instructions, Some(&fee_payer_signer.pubkey()), @@ -2273,7 +2303,10 @@ fn do_process_program_write_and_deploy( } else { loader_instruction::write(buffer_pubkey, loader_id, offset, bytes) }; - Message::new_with_blockhash(&[instruction], Some(&fee_payer_signer.pubkey()), &blockhash) + + let mut instructions = vec![instruction]; + set_compute_budget_ixs_if_needed(&mut instructions, compute_unit_price); + Message::new_with_blockhash(&instructions, Some(&fee_payer_signer.pubkey()), &blockhash) }; let mut write_messages = vec![]; @@ -2288,26 +2321,23 @@ fn do_process_program_write_and_deploy( // Create and add final message let final_message = if let Some(program_signers) = program_signers { let message = if loader_id == &bpf_loader_upgradeable::id() { - Message::new_with_blockhash( - &bpf_loader_upgradeable::deploy_with_max_program_len( - &fee_payer_signer.pubkey(), - &program_signers[0].pubkey(), - buffer_pubkey, - &program_signers[1].pubkey(), - rpc_client.get_minimum_balance_for_rent_exemption( - UpgradeableLoaderState::size_of_program(), - )?, - program_data_max_len, + let mut instructions = bpf_loader_upgradeable::deploy_with_max_program_len( + &fee_payer_signer.pubkey(), + &program_signers[0].pubkey(), + buffer_pubkey, + &program_signers[1].pubkey(), + rpc_client.get_minimum_balance_for_rent_exemption( + UpgradeableLoaderState::size_of_program(), )?, - Some(&fee_payer_signer.pubkey()), - &blockhash, - ) + program_data_max_len, + )?; + + set_compute_budget_ixs_if_needed(&mut instructions, compute_unit_price); + Message::new_with_blockhash(&instructions, Some(&fee_payer_signer.pubkey()), &blockhash) } else { - Message::new_with_blockhash( - &[loader_instruction::finalize(buffer_pubkey, loader_id)], - Some(&fee_payer_signer.pubkey()), - &blockhash, - ) + let mut instructions = vec![loader_instruction::finalize(buffer_pubkey, loader_id)]; + set_compute_budget_ixs_if_needed(&mut instructions, compute_unit_price); + Message::new_with_blockhash(&instructions, Some(&fee_payer_signer.pubkey()), &blockhash) }; Some(message) } else { @@ -2365,93 +2395,97 @@ fn do_process_program_upgrade( buffer_pubkey: &Pubkey, buffer_signer: Option<&dyn Signer>, skip_fee_check: bool, + compute_unit_price: Option, ) -> ProcessResult { let blockhash = rpc_client.get_latest_blockhash()?; - let (initial_message, write_messages, balance_needed) = - if let Some(buffer_signer) = buffer_signer { - // Check Buffer account to see if partial initialization has occurred - let (initial_instructions, balance_needed, buffer_program_data) = - if let Some(mut account) = rpc_client - .get_account_with_commitment(&buffer_signer.pubkey(), config.commitment)? - .value - { - let (ixs, balance_needed) = complete_partial_program_init( - &bpf_loader_upgradeable::id(), + let (initial_message, write_messages, balance_needed) = if let Some(buffer_signer) = + buffer_signer + { + // Check Buffer account to see if partial initialization has occurred + let (mut initial_instructions, balance_needed, buffer_program_data) = + if let Some(mut account) = rpc_client + .get_account_with_commitment(&buffer_signer.pubkey(), config.commitment)? + .value + { + let (ixs, balance_needed) = complete_partial_program_init( + &bpf_loader_upgradeable::id(), + &fee_payer_signer.pubkey(), + &buffer_signer.pubkey(), + &account, + UpgradeableLoaderState::size_of_buffer(program_len), + min_rent_exempt_program_data_balance, + true, + )?; + let buffer_program_data = account + .data + .split_off(UpgradeableLoaderState::size_of_buffer_metadata()); + (ixs, balance_needed, buffer_program_data) + } else { + ( + bpf_loader_upgradeable::create_buffer( &fee_payer_signer.pubkey(), - &buffer_signer.pubkey(), - &account, - UpgradeableLoaderState::size_of_buffer(program_len), - min_rent_exempt_program_data_balance, - true, - )?; - let buffer_program_data = account - .data - .split_off(UpgradeableLoaderState::size_of_buffer_metadata()); - (ixs, balance_needed, buffer_program_data) - } else { - ( - bpf_loader_upgradeable::create_buffer( - &fee_payer_signer.pubkey(), - buffer_pubkey, - &upgrade_authority.pubkey(), - min_rent_exempt_program_data_balance, - program_len, - )?, + buffer_pubkey, + &upgrade_authority.pubkey(), min_rent_exempt_program_data_balance, - vec![0; program_len], - ) - }; - - let initial_message = if !initial_instructions.is_empty() { - Some(Message::new_with_blockhash( - &initial_instructions, - Some(&fee_payer_signer.pubkey()), - &blockhash, - )) - } else { - None - }; - - let buffer_signer_pubkey = buffer_signer.pubkey(); - let upgrade_authority_pubkey = upgrade_authority.pubkey(); - let create_msg = |offset: u32, bytes: Vec| { - let instruction = bpf_loader_upgradeable::write( - &buffer_signer_pubkey, - &upgrade_authority_pubkey, - offset, - bytes, - ); - Message::new_with_blockhash( - &[instruction], - Some(&fee_payer_signer.pubkey()), - &blockhash, + program_len, + )?, + min_rent_exempt_program_data_balance, + vec![0; program_len], ) }; - // Create and add write messages - let mut write_messages = vec![]; - let chunk_size = calculate_max_chunk_size(&create_msg); - for (chunk, i) in program_data.chunks(chunk_size).zip(0..) { - let offset = i * chunk_size; - if chunk != &buffer_program_data[offset..offset + chunk.len()] { - write_messages.push(create_msg(offset as u32, chunk.to_vec())); - } - } - - (initial_message, write_messages, balance_needed) + let initial_message = if !initial_instructions.is_empty() { + set_compute_budget_ixs_if_needed(&mut initial_instructions, compute_unit_price); + Some(Message::new_with_blockhash( + &initial_instructions, + Some(&fee_payer_signer.pubkey()), + &blockhash, + )) } else { - (None, vec![], 0) + None + }; + + let buffer_signer_pubkey = buffer_signer.pubkey(); + let upgrade_authority_pubkey = upgrade_authority.pubkey(); + let create_msg = |offset: u32, bytes: Vec| { + let mut instructions = vec![bpf_loader_upgradeable::write( + &buffer_signer_pubkey, + &upgrade_authority_pubkey, + offset, + bytes, + )]; + + set_compute_budget_ixs_if_needed(&mut instructions, compute_unit_price); + Message::new_with_blockhash(&instructions, Some(&fee_payer_signer.pubkey()), &blockhash) }; + // Create and add write messages + let mut write_messages = vec![]; + let chunk_size = calculate_max_chunk_size(&create_msg); + for (chunk, i) in program_data.chunks(chunk_size).zip(0..) { + let offset = i * chunk_size; + if chunk != &buffer_program_data[offset..offset + chunk.len()] { + write_messages.push(create_msg(offset as u32, chunk.to_vec())); + } + } + + (initial_message, write_messages, balance_needed) + } else { + (None, vec![], 0) + }; + // Create and add final message + let mut final_instructions = vec![bpf_loader_upgradeable::upgrade( + program_id, + buffer_pubkey, + &upgrade_authority.pubkey(), + &fee_payer_signer.pubkey(), + )]; + + set_compute_budget_ixs_if_needed(&mut final_instructions, compute_unit_price); let final_message = Message::new_with_blockhash( - &[bpf_loader_upgradeable::upgrade( - program_id, - buffer_pubkey, - &upgrade_authority.pubkey(), - &fee_payer_signer.pubkey(), - )], + &final_instructions, Some(&fee_payer_signer.pubkey()), &blockhash, ); @@ -2598,6 +2632,70 @@ fn check_payer( Ok(()) } +// This enum is equivalent to an Option but was added to self-document +// the ok variants and has the benefit of not forcing the caller to use +// the result if they don't care about it. +enum UpdateComputeUnitLimitResult { + UpdatedInstructionIndex(usize), + NoInstructionFound, +} + +// Returns the index of the compute unit limit instruction +fn simulate_and_update_compute_unit_limit( + rpc_client: &RpcClient, + transaction: &mut Transaction, +) -> Result> { + let Some(compute_unit_limit_ix_index) = transaction + .message + .instructions + .iter() + .enumerate() + .find_map(|(ix_index, instruction)| { + let ix_program_id = transaction.message.program_id(ix_index)?; + if ix_program_id != &compute_budget::id() { + return None; + } + + matches!( + try_from_slice_unchecked(&instruction.data), + Ok(ComputeBudgetInstruction::SetComputeUnitLimit(_)) + ) + .then_some(ix_index) + }) + else { + return Ok(UpdateComputeUnitLimitResult::NoInstructionFound); + }; + + let simulate_result = rpc_client + .simulate_transaction_with_config( + transaction, + RpcSimulateTransactionConfig { + replace_recent_blockhash: true, + commitment: Some(rpc_client.commitment()), + ..RpcSimulateTransactionConfig::default() + }, + )? + .value; + + // Bail if the simulated transaction failed + if let Some(err) = simulate_result.err { + return Err(err.into()); + } + + let units_consumed = simulate_result + .units_consumed + .expect("compute units unavailable"); + + // Overwrite the compute unit limit instruction with the actual units consumed + let compute_unit_limit = u32::try_from(units_consumed)?; + transaction.message.instructions[compute_unit_limit_ix_index].data = + ComputeBudgetInstruction::set_compute_unit_limit(compute_unit_limit).data; + + Ok(UpdateComputeUnitLimitResult::UpdatedInstructionIndex( + compute_unit_limit_ix_index, + )) +} + fn send_deploy_messages( rpc_client: Arc, config: &CliConfig, @@ -2612,9 +2710,12 @@ fn send_deploy_messages( if let Some(message) = initial_message { if let Some(initial_signer) = initial_signer { trace!("Preparing the required accounts"); - let blockhash = rpc_client.get_latest_blockhash()?; let mut initial_transaction = Transaction::new_unsigned(message.clone()); + simulate_and_update_compute_unit_limit(&rpc_client, &mut initial_transaction)?; + + let blockhash = rpc_client.get_latest_blockhash()?; + // Most of the initial_transaction combinations require both the fee-payer and new program // account to sign the transaction. One (transfer) only requires the fee-payer signature. // This check is to ensure signing does not fail on a KeypairPubkeyMismatch error from an @@ -2635,6 +2736,29 @@ fn send_deploy_messages( if !write_messages.is_empty() { if let Some(write_signer) = write_signer { trace!("Writing program data"); + + // Simulate the first write message to get the number of compute units + // consumed and then reuse that value as the compute unit limit for all + // write messages. + let mut write_messages = write_messages.to_vec(); + { + let mut transaction = Transaction::new_unsigned(write_messages[0].clone()); + if let UpdateComputeUnitLimitResult::UpdatedInstructionIndex(ix_index) = + simulate_and_update_compute_unit_limit(&rpc_client, &mut transaction)? + { + for msg in &mut write_messages { + // Write messages are all assumed to be identical except + // the program data being written. But just in case that + // assumption is broken, assert that we are only ever + // changing the instruction data for a compute budget + // instruction. + assert_eq!(msg.program_id(ix_index), Some(&compute_budget::id())); + msg.instructions[ix_index].data = + transaction.message.instructions[ix_index].data.clone(); + } + } + } + let connection_cache = if config.use_quic { ConnectionCache::new_quic("connection_cache_cli_program_quic", 1) } else { @@ -2648,7 +2772,7 @@ fn send_deploy_messages( cache, )? .send_and_confirm_messages_with_spinner( - write_messages, + &write_messages, &[fee_payer_signer, write_signer], ), ConnectionCache::Quic(cache) => { @@ -2666,7 +2790,7 @@ fn send_deploy_messages( send_and_confirm_transactions_in_parallel_blocking( rpc_client.clone(), Some(tpu_client), - write_messages, + &write_messages, &[fee_payer_signer, write_signer], SendAndConfirmConfig { resign_txs_count: Some(5), @@ -2694,9 +2818,11 @@ fn send_deploy_messages( if let Some(message) = final_message { if let Some(final_signers) = final_signers { trace!("Deploying program"); - let blockhash = rpc_client.get_latest_blockhash()?; let mut final_tx = Transaction::new_unsigned(message.clone()); + simulate_and_update_compute_unit_limit(&rpc_client, &mut final_tx)?; + + let blockhash = rpc_client.get_latest_blockhash()?; let mut signers = final_signers.to_vec(); signers.push(fee_payer_signer); final_tx.try_sign(&signers, blockhash)?; @@ -2740,6 +2866,24 @@ fn report_ephemeral_mnemonic(words: usize, mnemonic: bip39::Mnemonic) { eprintln!("[BUFFER_ACCOUNT_ADDRESS] argument to `solana program close`.\n{divider}"); } +fn set_compute_budget_ixs_if_needed(ixs: &mut Vec, compute_unit_price: Option) { + let Some(compute_unit_price) = compute_unit_price else { + return; + }; + + // Default to the max compute unit limit because later transactions will be + // simulated to get the exact compute units consumed. + ixs.insert( + 0, + ComputeBudgetInstruction::set_compute_unit_limit(MAX_COMPUTE_UNIT_LIMIT), + ); + + ixs.insert( + 0, + ComputeBudgetInstruction::set_compute_unit_price(compute_unit_price), + ); +} + #[cfg(test)] mod tests { use { @@ -2798,6 +2942,7 @@ mod tests { max_len: None, allow_excessive_balance: false, skip_fee_check: false, + compute_unit_price: None }), signers: vec![Box::new(read_keypair_file(&keypair_file).unwrap())], } @@ -2826,6 +2971,7 @@ mod tests { max_len: Some(42), allow_excessive_balance: false, skip_fee_check: false, + compute_unit_price: None }), signers: vec![Box::new(read_keypair_file(&keypair_file).unwrap())], } @@ -2856,6 +3002,7 @@ mod tests { max_len: None, allow_excessive_balance: false, skip_fee_check: false, + compute_unit_price: None }), signers: vec![ Box::new(read_keypair_file(&keypair_file).unwrap()), @@ -2888,6 +3035,7 @@ mod tests { max_len: None, allow_excessive_balance: false, skip_fee_check: false, + compute_unit_price: None }), signers: vec![Box::new(read_keypair_file(&keypair_file).unwrap())], } @@ -2919,6 +3067,7 @@ mod tests { max_len: None, allow_excessive_balance: false, skip_fee_check: false, + compute_unit_price: None }), signers: vec![ Box::new(read_keypair_file(&keypair_file).unwrap()), @@ -2953,6 +3102,7 @@ mod tests { max_len: None, allow_excessive_balance: false, skip_fee_check: false, + compute_unit_price: None }), signers: vec![ Box::new(read_keypair_file(&keypair_file).unwrap()), @@ -2983,6 +3133,7 @@ mod tests { max_len: None, skip_fee_check: false, allow_excessive_balance: false, + compute_unit_price: None }), signers: vec![Box::new(read_keypair_file(&keypair_file).unwrap())], } @@ -3017,6 +3168,7 @@ mod tests { buffer_authority_signer_index: 0, max_len: None, skip_fee_check: false, + compute_unit_price: None }), signers: vec![Box::new(read_keypair_file(&keypair_file).unwrap())], } @@ -3042,6 +3194,7 @@ mod tests { buffer_authority_signer_index: 0, max_len: Some(42), skip_fee_check: false, + compute_unit_price: None }), signers: vec![Box::new(read_keypair_file(&keypair_file).unwrap())], } @@ -3070,6 +3223,7 @@ mod tests { buffer_authority_signer_index: 0, max_len: None, skip_fee_check: false, + compute_unit_price: None }), signers: vec![ Box::new(read_keypair_file(&keypair_file).unwrap()), @@ -3101,6 +3255,7 @@ mod tests { buffer_authority_signer_index: 1, max_len: None, skip_fee_check: false, + compute_unit_price: None }), signers: vec![ Box::new(read_keypair_file(&keypair_file).unwrap()), @@ -3137,6 +3292,7 @@ mod tests { buffer_authority_signer_index: 2, max_len: None, skip_fee_check: false, + compute_unit_price: None }), signers: vec![ Box::new(read_keypair_file(&keypair_file).unwrap()), @@ -3695,6 +3851,7 @@ mod tests { max_len: None, allow_excessive_balance: false, skip_fee_check: false, + compute_unit_price: None, }), signers: vec![&default_keypair], output_format: OutputFormat::JsonCompact, diff --git a/cli/tests/program.rs b/cli/tests/program.rs index 6eb281d65b9e35..240a01567a409e 100644 --- a/cli/tests/program.rs +++ b/cli/tests/program.rs @@ -3,6 +3,7 @@ #![allow(clippy::items_after_test_module)] use { + assert_matches::assert_matches, serde_json::Value, solana_cli::{ cli::{process_command, CliCommand, CliConfig}, @@ -10,18 +11,29 @@ use { test_utils::wait_n_slots, }, solana_cli_output::{parse_sign_only_reply_string, OutputFormat}, + solana_client::{ + rpc_client::GetConfirmedSignaturesForAddress2Config, rpc_config::RpcTransactionConfig, + }, solana_faucet::faucet::run_local_faucet, + solana_rpc::rpc::JsonRpcConfig, solana_rpc_client::rpc_client::RpcClient, solana_rpc_client_nonce_utils::blockhash_query::BlockhashQuery, solana_sdk::{ account_utils::StateMut, + borsh1::try_from_slice_unchecked, bpf_loader_upgradeable::{self, UpgradeableLoaderState}, commitment_config::CommitmentConfig, + compute_budget::{self, ComputeBudgetInstruction}, + fee_calculator::FeeRateGovernor, pubkey::Pubkey, - signature::{Keypair, NullSigner, Signer}, + rent::Rent, + signature::{Keypair, NullSigner, Signature, Signer}, + system_program, + transaction::Transaction, }, solana_streamer::socket::SocketAddrSpace, - solana_test_validator::TestValidator, + solana_test_validator::{TestValidator, TestValidatorGenesis}, + solana_transaction_status::UiTransactionEncoding, std::{ env, fs::File, @@ -85,6 +97,7 @@ fn test_cli_program_deploy_non_upgradeable() { is_final: true, max_len: None, skip_fee_check: false, + compute_unit_price: None, }); config.output_format = OutputFormat::JsonCompact; let response = process_command(&config); @@ -131,6 +144,7 @@ fn test_cli_program_deploy_non_upgradeable() { is_final: true, max_len: None, skip_fee_check: false, + compute_unit_price: None, }); process_command(&config).unwrap(); let account1 = rpc_client @@ -186,6 +200,7 @@ fn test_cli_program_deploy_non_upgradeable() { is_final: true, max_len: None, skip_fee_check: false, + compute_unit_price: None, }); let err = process_command(&config).unwrap_err(); assert_eq!( @@ -209,6 +224,7 @@ fn test_cli_program_deploy_non_upgradeable() { is_final: true, max_len: None, skip_fee_check: false, + compute_unit_price: None, }); process_command(&config).unwrap_err(); } @@ -270,6 +286,7 @@ fn test_cli_program_deploy_no_authority() { is_final: true, max_len: None, skip_fee_check: false, + compute_unit_price: None, }); config.output_format = OutputFormat::JsonCompact; let response = process_command(&config); @@ -297,6 +314,7 @@ fn test_cli_program_deploy_no_authority() { is_final: false, max_len: None, skip_fee_check: false, + compute_unit_price: None, }); process_command(&config).unwrap_err(); } @@ -359,6 +377,7 @@ fn test_cli_program_deploy_with_authority() { is_final: false, max_len: Some(max_len), skip_fee_check: false, + compute_unit_price: None, }); config.output_format = OutputFormat::JsonCompact; let response = process_command(&config); @@ -408,6 +427,7 @@ fn test_cli_program_deploy_with_authority() { is_final: false, max_len: Some(max_len), skip_fee_check: false, + compute_unit_price: None, }); let response = process_command(&config); let json: Value = serde_json::from_str(&response.unwrap()).unwrap(); @@ -451,6 +471,7 @@ fn test_cli_program_deploy_with_authority() { is_final: false, max_len: Some(max_len), skip_fee_check: false, + compute_unit_price: None, }); process_command(&config).unwrap(); let program_account = rpc_client.get_account(&program_pubkey).unwrap(); @@ -526,6 +547,7 @@ fn test_cli_program_deploy_with_authority() { is_final: false, max_len: None, skip_fee_check: false, + compute_unit_price: None, }); process_command(&config).unwrap(); let program_account = rpc_client.get_account(&program_pubkey).unwrap(); @@ -605,6 +627,7 @@ fn test_cli_program_deploy_with_authority() { is_final: false, max_len: None, skip_fee_check: false, + compute_unit_price: None, }); process_command(&config).unwrap_err(); @@ -622,6 +645,7 @@ fn test_cli_program_deploy_with_authority() { is_final: true, max_len: None, skip_fee_check: false, + compute_unit_price: None, }); let response = process_command(&config); let json: Value = serde_json::from_str(&response.unwrap()).unwrap(); @@ -726,6 +750,7 @@ fn test_cli_program_close_program() { is_final: false, max_len: Some(max_len), skip_fee_check: false, + compute_unit_price: None, }); config.output_format = OutputFormat::JsonCompact; process_command(&config).unwrap(); @@ -836,6 +861,7 @@ fn test_cli_program_extend_program() { is_final: false, max_len: None, // Use None to check that it defaults to the max length skip_fee_check: false, + compute_unit_price: None, }); config.output_format = OutputFormat::JsonCompact; process_command(&config).unwrap(); @@ -883,6 +909,7 @@ fn test_cli_program_extend_program() { is_final: false, max_len: None, skip_fee_check: false, + compute_unit_price: None, }); process_command(&config).unwrap_err(); @@ -915,6 +942,7 @@ fn test_cli_program_extend_program() { is_final: false, max_len: None, skip_fee_check: false, + compute_unit_price: None, }); process_command(&config).unwrap(); } @@ -979,6 +1007,7 @@ fn test_cli_program_write_buffer() { buffer_authority_signer_index: 0, max_len: None, skip_fee_check: false, + compute_unit_price: None, }); config.output_format = OutputFormat::JsonCompact; let response = process_command(&config); @@ -1015,6 +1044,7 @@ fn test_cli_program_write_buffer() { buffer_authority_signer_index: 0, max_len: Some(max_len), skip_fee_check: false, + compute_unit_price: None, }); let response = process_command(&config); let json: Value = serde_json::from_str(&response.unwrap()).unwrap(); @@ -1078,6 +1108,7 @@ fn test_cli_program_write_buffer() { buffer_authority_signer_index: 2, max_len: None, skip_fee_check: false, + compute_unit_price: None, }); let response = process_command(&config); let json: Value = serde_json::from_str(&response.unwrap()).unwrap(); @@ -1117,6 +1148,7 @@ fn test_cli_program_write_buffer() { buffer_authority_signer_index: 2, max_len: None, skip_fee_check: false, + compute_unit_price: None, }); let response = process_command(&config); let json: Value = serde_json::from_str(&response.unwrap()).unwrap(); @@ -1192,6 +1224,7 @@ fn test_cli_program_write_buffer() { buffer_authority_signer_index: 0, max_len: None, skip_fee_check: false, + compute_unit_price: None, }); config.output_format = OutputFormat::JsonCompact; let response = process_command(&config); @@ -1234,6 +1267,7 @@ fn test_cli_program_write_buffer() { buffer_authority_signer_index: 0, max_len: None, //Some(max_len), skip_fee_check: false, + compute_unit_price: None, }); process_command(&config).unwrap(); config.signers = vec![&keypair, &buffer_keypair]; @@ -1249,6 +1283,7 @@ fn test_cli_program_write_buffer() { is_final: true, max_len: None, skip_fee_check: false, + compute_unit_price: None, }); config.output_format = OutputFormat::JsonCompact; let error = process_command(&config).unwrap_err(); @@ -1308,6 +1343,7 @@ fn test_cli_program_set_buffer_authority() { buffer_authority_signer_index: 0, max_len: None, skip_fee_check: false, + compute_unit_price: None, }); process_command(&config).unwrap(); let buffer_account = rpc_client.get_account(&buffer_keypair.pubkey()).unwrap(); @@ -1360,6 +1396,7 @@ fn test_cli_program_set_buffer_authority() { is_final: false, max_len: None, skip_fee_check: false, + compute_unit_price: None, }); config.output_format = OutputFormat::JsonCompact; process_command(&config).unwrap_err(); @@ -1405,6 +1442,7 @@ fn test_cli_program_set_buffer_authority() { is_final: false, max_len: None, skip_fee_check: false, + compute_unit_price: None, }); config.output_format = OutputFormat::JsonCompact; process_command(&config).unwrap(); @@ -1461,6 +1499,7 @@ fn test_cli_program_mismatch_buffer_authority() { buffer_authority_signer_index: 2, max_len: None, skip_fee_check: false, + compute_unit_price: None, }); process_command(&config).unwrap(); let buffer_account = rpc_client.get_account(&buffer_keypair.pubkey()).unwrap(); @@ -1485,6 +1524,7 @@ fn test_cli_program_mismatch_buffer_authority() { is_final: true, max_len: None, skip_fee_check: false, + compute_unit_price: None, }); process_command(&config).unwrap_err(); @@ -1502,6 +1542,7 @@ fn test_cli_program_mismatch_buffer_authority() { is_final: true, max_len: None, skip_fee_check: false, + compute_unit_price: None, }); process_command(&config).unwrap(); } @@ -1585,6 +1626,7 @@ fn test_cli_program_deploy_with_offline_signing(use_offline_signer_as_fee_payer: is_final: false, max_len: Some(max_program_data_len), // allows for larger program size with future upgrades skip_fee_check: false, + compute_unit_price: None, }); config.output_format = OutputFormat::JsonCompact; process_command(&config).unwrap(); @@ -1752,6 +1794,7 @@ fn test_cli_program_show() { buffer_authority_signer_index: 2, max_len: None, skip_fee_check: false, + compute_unit_price: None, }); process_command(&config).unwrap(); @@ -1813,6 +1856,7 @@ fn test_cli_program_show() { is_final: false, max_len: Some(max_len), skip_fee_check: false, + compute_unit_price: None, }); config.output_format = OutputFormat::JsonCompact; let min_slot = rpc_client.get_slot().unwrap(); @@ -1941,6 +1985,7 @@ fn test_cli_program_dump() { buffer_authority_signer_index: 2, max_len: None, skip_fee_check: false, + compute_unit_price: None, }); process_command(&config).unwrap(); @@ -1984,6 +2029,7 @@ fn create_buffer_with_offline_authority<'a>( buffer_authority_signer_index: 0, max_len: None, skip_fee_check: false, + compute_unit_price: None, }); process_command(config).unwrap(); let buffer_account = rpc_client.get_account(&buffer_signer.pubkey()).unwrap(); @@ -2009,3 +2055,179 @@ fn create_buffer_with_offline_authority<'a>( panic!("not a buffer account"); } } + +#[allow(clippy::assertions_on_constants)] +fn cli_program_deploy_with_args(compute_unit_price: Option) { + let mut noop_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + noop_path.push("tests"); + noop_path.push("fixtures"); + noop_path.push("noop"); + noop_path.set_extension("so"); + + let mint_keypair = Keypair::new(); + let mint_pubkey = mint_keypair.pubkey(); + let faucet_addr = run_local_faucet(mint_keypair, None); + let test_validator = TestValidatorGenesis::default() + .fee_rate_governor(FeeRateGovernor::new(0, 0)) + .rent(Rent { + lamports_per_byte_year: 1, + exemption_threshold: 1.0, + ..Rent::default() + }) + .rpc_config(JsonRpcConfig { + enable_rpc_transaction_history: true, + faucet_addr: Some(faucet_addr), + ..JsonRpcConfig::default_for_test() + }) + .start_with_mint_address(mint_pubkey, SocketAddrSpace::Unspecified) + .expect("validator start failed"); + + let rpc_client = + RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::confirmed()); + + let mut file = File::open(noop_path.to_str().unwrap()).unwrap(); + let mut program_data = Vec::new(); + file.read_to_end(&mut program_data).unwrap(); + let max_len = program_data.len(); + let minimum_balance_for_programdata = rpc_client + .get_minimum_balance_for_rent_exemption(UpgradeableLoaderState::size_of_programdata( + max_len, + )) + .unwrap(); + let minimum_balance_for_program = rpc_client + .get_minimum_balance_for_rent_exemption(UpgradeableLoaderState::size_of_program()) + .unwrap(); + let upgrade_authority = Keypair::new(); + + let mut config = CliConfig::recent_for_tests(); + let keypair = Keypair::new(); + config.json_rpc_url = test_validator.rpc_url(); + config.signers = vec![&keypair]; + config.command = CliCommand::Airdrop { + pubkey: None, + lamports: 100 * minimum_balance_for_programdata + minimum_balance_for_program, + }; + process_command(&config).unwrap(); + + // Deploy the upgradeable program with specified program_id + let program_keypair = Keypair::new(); + config.signers = vec![&keypair, &upgrade_authority, &program_keypair]; + config.command = CliCommand::Program(ProgramCliCommand::Deploy { + program_location: Some(noop_path.to_str().unwrap().to_string()), + fee_payer_signer_index: 0, + program_signer_index: Some(2), + program_pubkey: Some(program_keypair.pubkey()), + buffer_signer_index: None, + buffer_pubkey: None, + allow_excessive_balance: false, + upgrade_authority_signer_index: 1, + is_final: false, + max_len: Some(max_len), + skip_fee_check: false, + compute_unit_price, + }); + config.output_format = OutputFormat::JsonCompact; + let response = process_command(&config); + let json: Value = serde_json::from_str(&response.unwrap()).unwrap(); + let program_pubkey_str = json + .as_object() + .unwrap() + .get("programId") + .unwrap() + .as_str() + .unwrap(); + assert_eq!( + program_keypair.pubkey(), + Pubkey::from_str(program_pubkey_str).unwrap() + ); + let program_account = rpc_client.get_account(&program_keypair.pubkey()).unwrap(); + assert_eq!(program_account.lamports, minimum_balance_for_program); + assert_eq!(program_account.owner, bpf_loader_upgradeable::id()); + assert!(program_account.executable); + let signature_statuses = rpc_client + .get_signatures_for_address_with_config( + &keypair.pubkey(), + GetConfirmedSignaturesForAddress2Config { + commitment: Some(CommitmentConfig::confirmed()), + ..GetConfirmedSignaturesForAddress2Config::default() + }, + ) + .unwrap(); + let signatures: Vec<_> = signature_statuses + .into_iter() + .rev() + .map(|status| Signature::from_str(&status.signature).unwrap()) + .collect(); + + fn fetch_and_decode_transaction(rpc_client: &RpcClient, signature: &Signature) -> Transaction { + rpc_client + .get_transaction_with_config( + signature, + RpcTransactionConfig { + encoding: Some(UiTransactionEncoding::Base64), + commitment: Some(CommitmentConfig::confirmed()), + ..RpcTransactionConfig::default() + }, + ) + .unwrap() + .transaction + .transaction + .decode() + .unwrap() + .into_legacy_transaction() + .unwrap() + } + + assert!(signatures.len() >= 4); + let initial_tx = fetch_and_decode_transaction(&rpc_client, &signatures[1]); + let write_tx = fetch_and_decode_transaction(&rpc_client, &signatures[2]); + let final_tx = fetch_and_decode_transaction(&rpc_client, signatures.last().unwrap()); + + if let Some(compute_unit_price) = compute_unit_price { + for tx in [&initial_tx, &write_tx, &final_tx] { + for i in [0, 1] { + assert_eq!( + tx.message.instructions[i].program_id(&tx.message.account_keys), + &compute_budget::id() + ); + } + + assert_matches!( + try_from_slice_unchecked(&tx.message.instructions[0].data), + Ok(ComputeBudgetInstruction::SetComputeUnitPrice(price)) if price == compute_unit_price + ); + } + + assert_matches!( + try_from_slice_unchecked(&initial_tx.message.instructions[1].data), + Ok(ComputeBudgetInstruction::SetComputeUnitLimit(2820)) + ); + assert_matches!( + try_from_slice_unchecked(&write_tx.message.instructions[1].data), + Ok(ComputeBudgetInstruction::SetComputeUnitLimit(2670)) + ); + assert_matches!( + try_from_slice_unchecked(&final_tx.message.instructions[1].data), + Ok(ComputeBudgetInstruction::SetComputeUnitLimit(2970)) + ); + } else { + assert_eq!( + initial_tx.message.instructions[0].program_id(&initial_tx.message.account_keys), + &system_program::id() + ); + assert_eq!( + write_tx.message.instructions[0].program_id(&write_tx.message.account_keys), + &bpf_loader_upgradeable::id() + ); + assert_eq!( + final_tx.message.instructions[0].program_id(&final_tx.message.account_keys), + &system_program::id() + ); + } +} + +#[test] +fn test_cli_program_deploy_with_compute_unit_price() { + cli_program_deploy_with_args(Some(1000)); + cli_program_deploy_with_args(None); +} diff --git a/transaction-dos/src/main.rs b/transaction-dos/src/main.rs index dedbcdab27ef79..3cf835c578382e 100644 --- a/transaction-dos/src/main.rs +++ b/transaction-dos/src/main.rs @@ -247,6 +247,7 @@ fn run_transactions_dos( upgrade_authority_signer_index: 0, is_final: true, max_len: None, + compute_unit_price: None, skip_fee_check: true, // skip_fee_check }); From 5cacca99b4bee72cdc43e0efe5d85e72a6f7485c Mon Sep 17 00:00:00 2001 From: Brooks Date: Fri, 22 Mar 2024 12:56:54 -0400 Subject: [PATCH 048/153] Fixes help text formatting (#385) --- validator/src/cli.rs | 59 ++++++++++++++++++++++---------------------- 1 file changed, 29 insertions(+), 30 deletions(-) diff --git a/validator/src/cli.rs b/validator/src/cli.rs index 9d041877e3054a..7fc525477ef41e 100644 --- a/validator/src/cli.rs +++ b/validator/src/cli.rs @@ -233,7 +233,7 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .takes_value(false) .help( "Enable historical transaction info over JSON RPC, including the \ - 'getConfirmedBlock' API. This will cause an increase in disk usage and IOPS", + 'getConfirmedBlock' API. This will cause an increase in disk usage and IOPS", ), ) .arg( @@ -675,7 +675,7 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .possible_values(&["none", "lz4", "snappy", "zlib"]) .default_value(&default_args.rocksdb_ledger_compression) .help( - "The compression algorithm that is used to compress transaction status data. \ + "The compression algorithm that is used to compress transaction status data. \ Turning on compression can save ~10% of the ledger size.", ), ) @@ -762,9 +762,9 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .hidden(hidden_unless_forced()) .long("no-wait-for-vote-to-start-leader") .help( - "If the validator starts up with no ledger, it will wait to start block - production until it sees a vote land in a rooted slot. This prevents - double signing. Turn off to risk double signing a block.", + "If the validator starts up with no ledger, it will wait to start block \ + production until it sees a vote land in a rooted slot. This prevents \ + double signing. Turn off to risk double signing a block.", ), ) .arg( @@ -841,7 +841,7 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .multiple(true) .takes_value(true) .help( - "A list of validators to gossip with. If specified, gossip will not \ + "A list of validators to gossip with. If specified, gossip will not \ push/pull from from validators outside this set. [default: all validators]", ), ) @@ -1532,25 +1532,24 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .required(false) .conflicts_with("wait_for_supermajority") .help( - "When specified, the validator will enter Wen Restart mode which - pauses normal activity. Validators in this mode will gossip their last - vote to reach consensus on a safe restart slot and repair all blocks - on the selected fork. The safe slot will be a descendant of the latest - optimistically confirmed slot to ensure we do not roll back any - optimistically confirmed slots. - - The progress in this mode will be saved in the file location provided. - If consensus is reached, the validator will automatically exit and then - execute wait_for_supermajority logic so the cluster will resume execution. - The progress file will be kept around for future debugging. - - After the cluster resumes normal operation, the validator arguments can - be adjusted to remove --wen_restart and update expected_shred_version to - the new shred_version agreed on in the consensus. - - If wen_restart fails, refer to the progress file (in proto3 format) for - further debugging. - ", + "When specified, the validator will enter Wen Restart mode which \ + pauses normal activity. Validators in this mode will gossip their last \ + vote to reach consensus on a safe restart slot and repair all blocks \ + on the selected fork. The safe slot will be a descendant of the latest \ + optimistically confirmed slot to ensure we do not roll back any \ + optimistically confirmed slots. \ + \n\n\ + The progress in this mode will be saved in the file location provided. \ + If consensus is reached, the validator will automatically exit and then \ + execute wait_for_supermajority logic so the cluster will resume execution. \ + The progress file will be kept around for future debugging. \ + \n\n\ + After the cluster resumes normal operation, the validator arguments can \ + be adjusted to remove --wen_restart and update expected_shred_version to \ + the new shred_version agreed on in the consensus. \ + \n\n\ + If wen_restart fails, refer to the progress file (in proto3 format) for \ + further debugging.", ), ) .args(&thread_args(&default_args.thread_args)) @@ -2042,8 +2041,8 @@ fn deprecated_arguments() -> Vec { .help("Enable incremental snapshots") .long_help( "Enable incremental snapshots by setting this flag. When enabled, \ - --snapshot-interval-slots will set the incremental snapshot interval. To set the - full snapshot interval, use --full-snapshot-interval-slots.", + --snapshot-interval-slots will set the incremental snapshot interval. To set the \ + full snapshot interval, use --full-snapshot-interval-slots.", )); add_arg!(Arg::with_name("minimal_rpc_api") .long("minimal-rpc-api") @@ -2054,8 +2053,8 @@ fn deprecated_arguments() -> Vec { .long("no-accounts-db-index-hashing") .help( "This is obsolete. See --accounts-db-index-hashing. \ - Disables the use of the index in hash calculation in \ - AccountsHashVerifier/Accounts Background Service.", + Disables the use of the index in hash calculation in \ + AccountsHashVerifier/Accounts Background Service.", ), usage_warning: "The accounts hash is only calculated without using the index.", ); @@ -2364,7 +2363,7 @@ pub fn test_app<'a>(version: &'a str, default_args: &'a DefaultTestArgs) -> App< .validator(is_pubkey) .takes_value(true) .help( - "Address of the mint account that will receive tokens created at genesis. If \ + "Address of the mint account that will receive tokens created at genesis. If \ the ledger already exists then this parameter is silently ignored \ [default: client keypair]", ), From 24fe473b466fa24e267b042ba2544580479c099c Mon Sep 17 00:00:00 2001 From: Brooks Date: Fri, 22 Mar 2024 13:48:46 -0400 Subject: [PATCH 049/153] clippy: Automated fixes for Rust 1.77.0 (#390) --- accounts-db/src/accounts_db.rs | 2 +- accounts-db/src/accounts_hash.rs | 2 +- accounts-db/src/accounts_index.rs | 2 +- accounts-db/src/accounts_index/in_mem_accounts_index.rs | 4 ++-- core/src/repair/repair_service.rs | 2 +- entry/src/entry.rs | 4 ++-- program-test/src/lib.rs | 2 +- rpc-client-nonce-utils/src/nonblocking/mod.rs | 2 +- rpc/src/rpc_subscriptions.rs | 2 +- sdk/program/src/message/account_keys.rs | 2 +- 10 files changed, 12 insertions(+), 12 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 34bcdedd2c5499..d900f0a2a5cc4c 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -12102,7 +12102,7 @@ pub mod tests { db.print_accounts_stats("pre"); let slots: HashSet = vec![1].into_iter().collect(); - let purge_keys = vec![(key1, slots)]; + let purge_keys = [(key1, slots)]; let _ = db.purge_keys_exact(purge_keys.iter()); let account2 = AccountSharedData::new(3, 0, &key); diff --git a/accounts-db/src/accounts_hash.rs b/accounts-db/src/accounts_hash.rs index cb75369d52d182..06ce9b49f2cd25 100644 --- a/accounts-db/src/accounts_hash.rs +++ b/accounts-db/src/accounts_hash.rs @@ -1609,7 +1609,7 @@ mod tests { #[test] fn test_accountsdb_de_dup_accounts_zero_chunks() { - let vec = vec![vec![CalculateHashIntermediate { + let vec = [vec![CalculateHashIntermediate { lamports: 1, hash: AccountHash(Hash::default()), pubkey: Pubkey::default(), diff --git a/accounts-db/src/accounts_index.rs b/accounts-db/src/accounts_index.rs index bd57e0803846fc..04426251f79c2e 100644 --- a/accounts-db/src/accounts_index.rs +++ b/accounts-db/src/accounts_index.rs @@ -120,7 +120,7 @@ impl ScanConfig { /// use existing 'abort' if available, otherwise allocate one pub fn recreate_with_abort(&self) -> Self { ScanConfig { - abort: Some(self.abort.as_ref().map(Arc::clone).unwrap_or_default()), + abort: Some(self.abort.clone().unwrap_or_default()), collect_all_unsorted: self.collect_all_unsorted, } } diff --git a/accounts-db/src/accounts_index/in_mem_accounts_index.rs b/accounts-db/src/accounts_index/in_mem_accounts_index.rs index 3df05ee5a28127..f3318470effba3 100644 --- a/accounts-db/src/accounts_index/in_mem_accounts_index.rs +++ b/accounts-db/src/accounts_index/in_mem_accounts_index.rs @@ -175,7 +175,7 @@ impl + Into> InMemAccountsIndex + Into> InMemAccountsIndex RT { // SAFETY: Since we're passing the entry Arc clone to `callback`, we must // also add the entry to the in-mem cache. - self.get_internal(pubkey, |entry| (true, callback(entry.map(Arc::clone)))) + self.get_internal(pubkey, |entry| (true, callback(entry.cloned()))) } /// lookup 'pubkey' in index (in_mem or disk). diff --git a/core/src/repair/repair_service.rs b/core/src/repair/repair_service.rs index 9431d7a256a22c..9a293f6a8ba746 100644 --- a/core/src/repair/repair_service.rs +++ b/core/src/repair/repair_service.rs @@ -872,7 +872,7 @@ impl RepairService { ServeRepair::repair_proto_to_bytes(&request_proto, &identity_keypair).unwrap(); // Prepare packet batch to send - let reqs = vec![(packet_buf, address)]; + let reqs = [(packet_buf, address)]; // Send packet batch match batch_send(repair_socket, &reqs[..]) { diff --git a/entry/src/entry.rs b/entry/src/entry.rs index af3fdca9518e83..46aad401dec9b0 100644 --- a/entry/src/entry.rs +++ b/entry/src/entry.rs @@ -1104,7 +1104,7 @@ mod tests { let tx1 = system_transaction::transfer(&keypair, &keypair.pubkey(), 1, zero); // Verify entry with 2 transactions - let mut e0 = vec![Entry::new(&zero, 0, vec![tx0, tx1])]; + let mut e0 = [Entry::new(&zero, 0, vec![tx0, tx1])]; assert!(e0.verify(&zero)); // Clear signature of the first transaction, see that it does not verify @@ -1124,7 +1124,7 @@ mod tests { assert!(!e0.verify(&zero)); // Pass an entry with no transactions - let e0 = vec![Entry::new(&zero, 0, vec![])]; + let e0 = [Entry::new(&zero, 0, vec![])]; assert!(e0.verify(&zero)); } diff --git a/program-test/src/lib.rs b/program-test/src/lib.rs index f4fba5761d1332..8b786aa7962694 100644 --- a/program-test/src/lib.rs +++ b/program-test/src/lib.rs @@ -87,7 +87,7 @@ pub enum ProgramTestError { } thread_local! { - static INVOKE_CONTEXT: RefCell> = RefCell::new(None); + static INVOKE_CONTEXT: RefCell> = const { RefCell::new(None) }; } fn set_invoke_context(new: &mut InvokeContext) { INVOKE_CONTEXT diff --git a/rpc-client-nonce-utils/src/nonblocking/mod.rs b/rpc-client-nonce-utils/src/nonblocking/mod.rs index fceeacf6fc0d87..19ec9b6193df7c 100644 --- a/rpc-client-nonce-utils/src/nonblocking/mod.rs +++ b/rpc-client-nonce-utils/src/nonblocking/mod.rs @@ -228,7 +228,7 @@ pub fn data_from_account>( account: &T, ) -> Result { account_identity_ok(account)?; - state_from_account(account).and_then(|ref s| data_from_state(s).map(|d| d.clone())) + state_from_account(account).and_then(|ref s| data_from_state(s).cloned()) } /// Get the nonce data from its [`State`] value. diff --git a/rpc/src/rpc_subscriptions.rs b/rpc/src/rpc_subscriptions.rs index 39d746c48049de..f3594c67cc7a72 100644 --- a/rpc/src/rpc_subscriptions.rs +++ b/rpc/src/rpc_subscriptions.rs @@ -266,7 +266,7 @@ struct RpcNotifier { } thread_local! { - static RPC_NOTIFIER_BUF: RefCell> = RefCell::new(Vec::new()); + static RPC_NOTIFIER_BUF: RefCell> = const { RefCell::new(Vec::new()) }; } #[derive(Debug, Serialize)] diff --git a/sdk/program/src/message/account_keys.rs b/sdk/program/src/message/account_keys.rs index f0ab7deeef0987..6f80c3c68e6186 100644 --- a/sdk/program/src/message/account_keys.rs +++ b/sdk/program/src/message/account_keys.rs @@ -171,7 +171,7 @@ mod tests { }; let account_keys = AccountKeys::new(&static_keys, Some(&dynamic_keys)); - let expected_segments = vec![ + let expected_segments = [ vec![keys[0], keys[1], keys[2]], vec![keys[3], keys[4]], vec![keys[5]], From 977b1b836f109c7b4d565eff4dd614cdab1334fb Mon Sep 17 00:00:00 2001 From: Yueh-Hsuan Chiang <93241502+yhchiang-sol@users.noreply.github.com> Date: Fri, 22 Mar 2024 11:25:30 -0700 Subject: [PATCH 050/153] Rename AppendVecId to AccountsFileId (#383) #### Problem The current AppendVecId actually refers to an accounts file id. #### Summary of Changes Rename AppendVecId to AccountsFileId. #### Test Plan Build --- accounts-db/src/account_info.rs | 10 ++-- accounts-db/src/account_storage.rs | 8 ++-- accounts-db/src/accounts_db.rs | 47 ++++++++++--------- accounts-db/src/accounts_file.rs | 4 +- accounts-db/src/sorted_storages.rs | 6 +-- .../persistent-account-storage.md | 6 +-- runtime/src/bank/serde_snapshot.rs | 10 ++-- runtime/src/serde_snapshot.rs | 36 +++++++------- runtime/src/serde_snapshot/storage.rs | 12 ++--- runtime/src/serde_snapshot/tests.rs | 18 +++---- runtime/src/snapshot_bank_utils.rs | 14 +++--- runtime/src/snapshot_utils.rs | 14 +++--- .../snapshot_storage_rebuilder.rs | 27 ++++++----- 13 files changed, 108 insertions(+), 104 deletions(-) diff --git a/accounts-db/src/account_info.rs b/accounts-db/src/account_info.rs index 67c02282fa1702..cbec32be6499ef 100644 --- a/accounts-db/src/account_info.rs +++ b/accounts-db/src/account_info.rs @@ -4,7 +4,7 @@ //! Note that AccountInfo is saved to disk buckets during runtime, but disk buckets are recreated at startup. use { crate::{ - accounts_db::AppendVecId, + accounts_db::AccountsFileId, accounts_file::ALIGN_BOUNDARY_OFFSET, accounts_index::{IsCached, ZeroLamport}, }, @@ -21,7 +21,7 @@ pub type StoredSize = u32; /// specify where account data is located #[derive(Debug, PartialEq, Eq)] pub enum StorageLocation { - AppendVec(AppendVecId, Offset), + AppendVec(AccountsFileId, Offset), Cached, } @@ -85,7 +85,7 @@ pub struct PackedOffsetAndFlags { #[derive(Default, Debug, PartialEq, Eq, Clone, Copy)] pub struct AccountInfo { /// index identifying the append storage - store_id: AppendVecId, + store_id: AccountsFileId, account_offset_and_flags: AccountOffsetAndFlags, } @@ -121,7 +121,7 @@ impl IsCached for StorageLocation { } /// We have to have SOME value for store_id when we are cached -const CACHE_VIRTUAL_STORAGE_ID: AppendVecId = AppendVecId::MAX; +const CACHE_VIRTUAL_STORAGE_ID: AccountsFileId = AccountsFileId::MAX; impl AccountInfo { pub fn new(storage_location: StorageLocation, lamports: u64) -> Self { @@ -160,7 +160,7 @@ impl AccountInfo { (offset / ALIGN_BOUNDARY_OFFSET) as OffsetReduced } - pub fn store_id(&self) -> AppendVecId { + pub fn store_id(&self) -> AccountsFileId { // if the account is in a cached store, the store_id is meaningless assert!(!self.is_cached()); self.store_id diff --git a/accounts-db/src/account_storage.rs b/accounts-db/src/account_storage.rs index e7a33b711d23ca..e1fb4f092ad623 100644 --- a/accounts-db/src/account_storage.rs +++ b/accounts-db/src/account_storage.rs @@ -1,7 +1,7 @@ //! Manage the map of slot -> append vec use { - crate::accounts_db::{AccountStorageEntry, AppendVecId}, + crate::accounts_db::{AccountStorageEntry, AccountsFileId}, dashmap::DashMap, solana_sdk::clock::Slot, std::sync::Arc, @@ -15,7 +15,7 @@ pub struct AccountStorageReference { pub storage: Arc, /// id can be read from 'storage', but it is an atomic read. /// id will never change while a storage is held, so we store it separately here for faster runtime lookup in 'get_account_storage_entry' - pub id: AppendVecId, + pub id: AccountsFileId, } pub type AccountStorageMap = DashMap; @@ -50,7 +50,7 @@ impl AccountStorage { pub(crate) fn get_account_storage_entry( &self, slot: Slot, - store_id: AppendVecId, + store_id: AccountsFileId, ) -> Option> { let lookup_in_map = || { self.map @@ -343,7 +343,7 @@ pub(crate) mod tests { } impl AccountStorage { - fn get_test_storage_with_id(&self, id: AppendVecId) -> Arc { + fn get_test_storage_with_id(&self, id: AccountsFileId) -> Arc { let slot = 0; // add a map store let common_store_path = Path::new(""); diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index d900f0a2a5cc4c..d172fd4c48cb40 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -637,7 +637,7 @@ struct StorageSizeAndCount { /// number of accounts in the storage including both alive and dead accounts pub count: usize, } -type StorageSizeAndCountMap = DashMap; +type StorageSizeAndCountMap = DashMap; impl GenerateIndexTimings { pub fn report(&self, startup_stats: &StartupStats) { @@ -764,8 +764,8 @@ impl<'a> MultiThreadProgress<'a> { } /// An offset into the AccountsDb::storage vector -pub type AtomicAppendVecId = AtomicU32; -pub type AppendVecId = u32; +pub type AtomicAccountsFileId = AtomicU32; +pub type AccountsFileId = u32; type AccountSlots = HashMap>; type SlotOffsets = HashMap>; @@ -1005,7 +1005,7 @@ struct CleanKeyTimings { /// Persistent storage structure holding the accounts #[derive(Debug)] pub struct AccountStorageEntry { - pub(crate) id: AppendVecId, + pub(crate) id: AccountsFileId, pub(crate) slot: Slot, @@ -1031,7 +1031,7 @@ pub struct AccountStorageEntry { } impl AccountStorageEntry { - pub fn new(path: &Path, slot: Slot, id: AppendVecId, file_size: u64) -> Self { + pub fn new(path: &Path, slot: Slot, id: AccountsFileId, file_size: u64) -> Self { let tail = AccountsFile::file_name(slot, id); let path = Path::new(path).join(tail); let accounts = AccountsFile::AppendVec(AppendVec::new(&path, true, file_size as usize)); @@ -1048,7 +1048,7 @@ impl AccountStorageEntry { pub fn new_existing( slot: Slot, - id: AppendVecId, + id: AccountsFileId, accounts: AccountsFile, num_accounts: usize, ) -> Self { @@ -1115,7 +1115,7 @@ impl AccountStorageEntry { self.slot } - pub fn append_vec_id(&self) -> AppendVecId { + pub fn append_vec_id(&self) -> AccountsFileId { self.id } @@ -1297,7 +1297,7 @@ pub struct AccountsDb { read_only_accounts_cache: ReadOnlyAccountsCache, /// distribute the accounts across storage lists - pub next_id: AtomicAppendVecId, + pub next_id: AtomicAccountsFileId, /// Set of shrinkable stores organized by map of slot to append_vec_id pub shrink_candidate_slots: Mutex, @@ -2336,7 +2336,7 @@ impl AccountsDb { READ_ONLY_CACHE_MS_TO_SKIP_LRU_UPDATE, ), uncleaned_pubkeys: DashMap::new(), - next_id: AtomicAppendVecId::new(0), + next_id: AtomicAccountsFileId::new(0), shrink_candidate_slots: Mutex::new(ShrinkCandidates::default()), write_cache_limit_bytes: None, write_version: AtomicU64::new(0), @@ -2504,9 +2504,12 @@ impl AccountsDb { self.base_working_path.clone() } - fn next_id(&self) -> AppendVecId { + fn next_id(&self) -> AccountsFileId { let next_id = self.next_id.fetch_add(1, Ordering::AcqRel); - assert!(next_id != AppendVecId::MAX, "We've run out of storage ids!"); + assert!( + next_id != AccountsFileId::MAX, + "We've run out of storage ids!" + ); next_id } @@ -6322,9 +6325,9 @@ impl AccountsDb { /// This runs prior to the storages being put in AccountsDb.storage pub fn combine_multiple_slots_into_one_at_startup( path: &Path, - id: AppendVecId, + id: AccountsFileId, slot: Slot, - slot_stores: &HashMap>, + slot_stores: &HashMap>, ) -> Arc { let size = slot_stores.values().map(|storage| storage.capacity()).sum(); let storage = AccountStorageEntry::new(path, slot, id, size); @@ -8641,7 +8644,7 @@ impl AccountsDb { &self, storage: &Arc, slot: Slot, - store_id: AppendVecId, + store_id: AccountsFileId, rent_collector: &RentCollector, storage_info: &StorageSizeAndCountMap, ) -> SlotIndexGenerationInfo { @@ -9609,7 +9612,7 @@ pub mod tests { impl CurrentAncientAppendVec { /// note this requires that 'slot_and_append_vec' is Some - fn append_vec_id(&self) -> AppendVecId { + fn append_vec_id(&self) -> AccountsFileId { self.append_vec().append_vec_id() } } @@ -10782,7 +10785,7 @@ pub mod tests { write_version: StoredMetaWriteVersion, slot: Slot, pubkey: &Pubkey, - id: AppendVecId, + id: AccountsFileId, mark_alive: bool, account_data_size: Option, fill_percentage: u64, @@ -10808,7 +10811,7 @@ pub mod tests { write_version: StoredMetaWriteVersion, slot: Slot, pubkey: &Pubkey, - id: AppendVecId, + id: AccountsFileId, mark_alive: bool, account_data_size: Option, ) -> Arc { @@ -13313,7 +13316,7 @@ pub mod tests { AccountSharedData::new(0, 0, AccountSharedData::default().owner()); // set 'next' id to the max possible value - db.next_id.store(AppendVecId::MAX, Ordering::Release); + db.next_id.store(AccountsFileId::MAX, Ordering::Release); let slots = 3; let keys = (0..slots).map(|_| Pubkey::new_unique()).collect::>(); // write unique keys to successive slots @@ -13340,7 +13343,7 @@ pub mod tests { AccountSharedData::new(0, 0, AccountSharedData::default().owner()); // set 'next' id to the max possible value - db.next_id.store(AppendVecId::MAX, Ordering::Release); + db.next_id.store(AccountsFileId::MAX, Ordering::Release); let slots = 3; let keys = (0..slots).map(|_| Pubkey::new_unique()).collect::>(); // write unique keys to successive slots @@ -13350,7 +13353,7 @@ pub mod tests { db.calculate_accounts_delta_hash(slot); db.add_root_and_flush_write_cache(slot); // reset next_id to what it was previously to cause us to re-use the same id - db.next_id.store(AppendVecId::MAX, Ordering::Release); + db.next_id.store(AccountsFileId::MAX, Ordering::Release); }); let ancestors = Ancestors::default(); keys.iter().for_each(|key| { @@ -17236,7 +17239,7 @@ pub mod tests { .max() .unwrap_or(999); for (i, account_data_size) in account_data_sizes.iter().enumerate().take(num_slots) { - let id = starting_id + (i as AppendVecId); + let id = starting_id + (i as AccountsFileId); let pubkey1 = solana_sdk::pubkey::new_rand(); let storage = sample_storage_with_entries_id_fill_percentage( tf, @@ -17285,7 +17288,7 @@ pub mod tests { .max() .unwrap_or(999); for i in 0..num_slots { - let id = starting_id + (i as AppendVecId); + let id = starting_id + (i as AccountsFileId); let pubkey1 = solana_sdk::pubkey::new_rand(); let storage = sample_storage_with_entries_id( tf, diff --git a/accounts-db/src/accounts_file.rs b/accounts-db/src/accounts_file.rs index 4f373333ae7450..6a795f7238fc64 100644 --- a/accounts-db/src/accounts_file.rs +++ b/accounts-db/src/accounts_file.rs @@ -3,7 +3,7 @@ use { account_storage::meta::{ StorableAccountsWithHashesAndWriteVersions, StoredAccountInfo, StoredAccountMeta, }, - accounts_db::AppendVecId, + accounts_db::AccountsFileId, accounts_hash::AccountHash, append_vec::{AppendVec, AppendVecError}, storable_accounts::StorableAccounts, @@ -104,7 +104,7 @@ impl AccountsFile { } } - pub fn file_name(slot: Slot, id: AppendVecId) -> String { + pub fn file_name(slot: Slot, id: AccountsFileId) -> String { format!("{slot}.{id}") } diff --git a/accounts-db/src/sorted_storages.rs b/accounts-db/src/sorted_storages.rs index 26741b321f7a9e..72cf084a0896f7 100644 --- a/accounts-db/src/sorted_storages.rs +++ b/accounts-db/src/sorted_storages.rs @@ -195,7 +195,7 @@ mod tests { use { super::*, crate::{ - accounts_db::{AccountStorageEntry, AppendVecId}, + accounts_db::{AccountStorageEntry, AccountsFileId}, accounts_file::AccountsFile, append_vec::AppendVec, }, @@ -297,7 +297,7 @@ mod tests { assert!( (slot != 2 && slot != 4) ^ storage - .map(|storage| storage.append_vec_id() == (slot as AppendVecId)) + .map(|storage| storage.append_vec_id() == (slot as AccountsFileId)) .unwrap_or(false), "slot: {slot}, storage: {storage:?}" ); @@ -440,7 +440,7 @@ mod tests { ); } - fn create_sample_store(id: AppendVecId) -> Arc { + fn create_sample_store(id: AccountsFileId) -> Arc { let tf = crate::append_vec::test_utils::get_append_vec_path("create_sample_store"); let (_temp_dirs, paths) = crate::accounts_db::get_temp_accounts_paths(1).unwrap(); let size: usize = 123; diff --git a/docs/src/implemented-proposals/persistent-account-storage.md b/docs/src/implemented-proposals/persistent-account-storage.md index 85367e5f5ab482..038c37dc7c559c 100644 --- a/docs/src/implemented-proposals/persistent-account-storage.md +++ b/docs/src/implemented-proposals/persistent-account-storage.md @@ -19,11 +19,11 @@ The underlying memory for an AppendVec is a memory-mapped file. Memory-mapped fi The account index is designed to support a single index for all the currently forked Accounts. ```text -type AppendVecId = usize; +type AccountsFileId = usize; type Fork = u64; -struct AccountMap(Hashmap); +struct AccountMap(Hashmap); type AccountIndex = HashMap; ``` @@ -39,7 +39,7 @@ The index is a map of account Pubkeys to a map of Forks and the location of the pub fn load_slow(&self, id: Fork, pubkey: &Pubkey) -> Option<&Account> ``` -The read is satisfied by pointing to a memory-mapped location in the `AppendVecId` at the stored offset. A reference can be returned without a copy. +The read is satisfied by pointing to a memory-mapped location in the `AccountsFileId` at the stored offset. A reference can be returned without a copy. ### Root Forks diff --git a/runtime/src/bank/serde_snapshot.rs b/runtime/src/bank/serde_snapshot.rs index f5b1653e8d6311..372baec2e4aee0 100644 --- a/runtime/src/bank/serde_snapshot.rs +++ b/runtime/src/bank/serde_snapshot.rs @@ -14,7 +14,7 @@ mod tests { snapshot_bank_utils, snapshot_utils::{ self, create_tmp_accounts_dir_for_tests, get_storages_to_serialize, ArchiveFormat, - StorageAndNextAppendVecId, BANK_SNAPSHOT_PRE_FILENAME_EXTENSION, + StorageAndNextAccountsFileId, BANK_SNAPSHOT_PRE_FILENAME_EXTENSION, }, status_cache::StatusCache, }, @@ -23,7 +23,7 @@ mod tests { account_storage::{AccountStorageMap, AccountStorageReference}, accounts_db::{ get_temp_accounts_paths, AccountShrinkThreshold, AccountStorageEntry, AccountsDb, - AtomicAppendVecId, + AtomicAccountsFileId, }, accounts_file::{AccountsFile, AccountsFileError}, accounts_hash::{AccountsDeltaHash, AccountsHash}, @@ -53,7 +53,7 @@ mod tests { fn copy_append_vecs>( accounts_db: &AccountsDb, output_dir: P, - ) -> Result { + ) -> Result { let storage_entries = accounts_db.get_snapshot_storages(RangeFull).0; let storage: AccountStorageMap = AccountStorageMap::with_capacity(storage_entries.len()); let mut next_append_vec_id = 0; @@ -84,9 +84,9 @@ mod tests { ); } - Ok(StorageAndNextAppendVecId { + Ok(StorageAndNextAccountsFileId { storage, - next_append_vec_id: AtomicAppendVecId::new(next_append_vec_id + 1), + next_append_vec_id: AtomicAccountsFileId::new(next_append_vec_id + 1), }) } diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index 998fa82e2326d1..744a8ace27e0bd 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -6,7 +6,7 @@ use { epoch_stakes::EpochStakes, serde_snapshot::storage::SerializableAccountStorageEntry, snapshot_utils::{ - self, SnapshotError, StorageAndNextAppendVecId, BANK_SNAPSHOT_PRE_FILENAME_EXTENSION, + self, SnapshotError, StorageAndNextAccountsFileId, BANK_SNAPSHOT_PRE_FILENAME_EXTENSION, }, stakes::Stakes, }, @@ -17,8 +17,8 @@ use { account_storage::meta::StoredMetaWriteVersion, accounts::Accounts, accounts_db::{ - AccountShrinkThreshold, AccountStorageEntry, AccountsDb, AccountsDbConfig, AppendVecId, - AtomicAppendVecId, BankHashStats, IndexGenerationInfo, + AccountShrinkThreshold, AccountStorageEntry, AccountsDb, AccountsDbConfig, + AccountsFileId, AtomicAccountsFileId, BankHashStats, IndexGenerationInfo, }, accounts_file::AccountsFile, accounts_hash::AccountsHash, @@ -64,7 +64,7 @@ pub(crate) use { solana_accounts_db::accounts_hash::{ SerdeAccountsDeltaHash, SerdeAccountsHash, SerdeIncrementalAccountsHash, }, - storage::SerializedAppendVecId, + storage::SerializedAccountsFileId, }; #[derive(Copy, Clone, Eq, PartialEq)] @@ -286,7 +286,7 @@ pub(crate) fn compare_two_serialized_banks( /// Get snapshot storage lengths from accounts_db_fields pub(crate) fn snapshot_storage_lengths_from_fields( accounts_db_fields: &AccountsDbFields, -) -> HashMap> { +) -> HashMap> { let AccountsDbFields(snapshot_storage, ..) = &accounts_db_fields; snapshot_storage .iter() @@ -353,7 +353,7 @@ pub(crate) fn bank_from_streams( serde_style: SerdeStyle, snapshot_streams: &mut SnapshotStreams, account_paths: &[PathBuf], - storage_and_next_append_vec_id: StorageAndNextAppendVecId, + storage_and_next_append_vec_id: StorageAndNextAccountsFileId, genesis_config: &GenesisConfig, runtime_config: &RuntimeConfig, debug_keys: Option>>, @@ -582,7 +582,7 @@ fn reconstruct_bank_from_fields( genesis_config: &GenesisConfig, runtime_config: &RuntimeConfig, account_paths: &[PathBuf], - storage_and_next_append_vec_id: StorageAndNextAppendVecId, + storage_and_next_append_vec_id: StorageAndNextAccountsFileId, debug_keys: Option>>, additional_builtins: Option<&[BuiltinPrototype]>, account_secondary_indexes: AccountSecondaryIndexes, @@ -646,7 +646,7 @@ pub(crate) fn reconstruct_single_storage( slot: &Slot, append_vec_path: &Path, current_len: usize, - append_vec_id: AppendVecId, + append_vec_id: AccountsFileId, ) -> Result, SnapshotError> { let (accounts_file, num_accounts) = AccountsFile::new_from_file(append_vec_path, current_len)?; Ok(Arc::new(AccountStorageEntry::new_existing( @@ -662,11 +662,11 @@ pub(crate) fn reconstruct_single_storage( // nodes pub(crate) fn remap_append_vec_file( slot: Slot, - old_append_vec_id: SerializedAppendVecId, + old_append_vec_id: SerializedAccountsFileId, append_vec_path: &Path, - next_append_vec_id: &AtomicAppendVecId, + next_append_vec_id: &AtomicAccountsFileId, num_collisions: &AtomicUsize, -) -> io::Result<(AppendVecId, PathBuf)> { +) -> io::Result<(AccountsFileId, PathBuf)> { #[cfg(target_os = "linux")] let append_vec_path_cstr = cstring_from_path(append_vec_path)?; @@ -681,7 +681,7 @@ pub(crate) fn remap_append_vec_file( let remapped_append_vec_id = next_append_vec_id.fetch_add(1, Ordering::AcqRel); // this can only happen in the first iteration of the loop - if old_append_vec_id == remapped_append_vec_id as SerializedAppendVecId { + if old_append_vec_id == remapped_append_vec_id as SerializedAccountsFileId { break (remapped_append_vec_id, remapped_append_vec_path); } @@ -717,7 +717,7 @@ pub(crate) fn remap_append_vec_file( // Only rename the file if the new ID is actually different from the original. In the target_os // = linux case, we have already renamed if necessary. #[cfg(not(target_os = "linux"))] - if old_append_vec_id != remapped_append_vec_id as SerializedAppendVecId { + if old_append_vec_id != remapped_append_vec_id as SerializedAccountsFileId { std::fs::rename(append_vec_path, &remapped_append_vec_path)?; } @@ -726,10 +726,10 @@ pub(crate) fn remap_append_vec_file( pub(crate) fn remap_and_reconstruct_single_storage( slot: Slot, - old_append_vec_id: SerializedAppendVecId, + old_append_vec_id: SerializedAccountsFileId, current_len: usize, append_vec_path: &Path, - next_append_vec_id: &AtomicAppendVecId, + next_append_vec_id: &AtomicAccountsFileId, num_collisions: &AtomicUsize, ) -> Result, SnapshotError> { let (remapped_append_vec_id, remapped_append_vec_path) = remap_append_vec_file( @@ -758,7 +758,7 @@ struct ReconstructedAccountsDbInfo { fn reconstruct_accountsdb_from_fields( snapshot_accounts_db_fields: SnapshotAccountsDbFields, account_paths: &[PathBuf], - storage_and_next_append_vec_id: StorageAndNextAppendVecId, + storage_and_next_append_vec_id: StorageAndNextAccountsFileId, genesis_config: &GenesisConfig, account_secondary_indexes: AccountSecondaryIndexes, limit_load_slot_count_from_snapshot: Option, @@ -905,7 +905,7 @@ where .unwrap_or_else(|err| panic!("Failed to create directory {}: {}", path.display(), err)); } - let StorageAndNextAppendVecId { + let StorageAndNextAccountsFileId { storage, next_append_vec_id, } = storage_and_next_append_vec_id; @@ -918,7 +918,7 @@ where let next_append_vec_id = next_append_vec_id.load(Ordering::Acquire); let max_append_vec_id = next_append_vec_id - 1; assert!( - max_append_vec_id <= AppendVecId::MAX / 2, + max_append_vec_id <= AccountsFileId::MAX / 2, "Storage id {max_append_vec_id} larger than allowed max" ); diff --git a/runtime/src/serde_snapshot/storage.rs b/runtime/src/serde_snapshot/storage.rs index 1f9beab6466cb4..da4bf1e4920f98 100644 --- a/runtime/src/serde_snapshot/storage.rs +++ b/runtime/src/serde_snapshot/storage.rs @@ -3,23 +3,23 @@ use { solana_accounts_db::accounts_db::AccountStorageEntry, }; -/// The serialized AppendVecId type is fixed as usize -pub(crate) type SerializedAppendVecId = usize; +/// The serialized AccountsFileId type is fixed as usize +pub(crate) type SerializedAccountsFileId = usize; // Serializable version of AccountStorageEntry for snapshot format #[derive(Clone, Copy, Debug, Default, Eq, PartialEq, Serialize, Deserialize)] pub struct SerializableAccountStorageEntry { - id: SerializedAppendVecId, + id: SerializedAccountsFileId, accounts_current_len: usize, } pub(super) trait SerializableStorage { - fn id(&self) -> SerializedAppendVecId; + fn id(&self) -> SerializedAccountsFileId; fn current_len(&self) -> usize; } impl SerializableStorage for SerializableAccountStorageEntry { - fn id(&self) -> SerializedAppendVecId { + fn id(&self) -> SerializedAccountsFileId { self.id } fn current_len(&self) -> usize { @@ -30,7 +30,7 @@ impl SerializableStorage for SerializableAccountStorageEntry { impl From<&AccountStorageEntry> for SerializableAccountStorageEntry { fn from(rhs: &AccountStorageEntry) -> Self { Self { - id: rhs.append_vec_id() as SerializedAppendVecId, + id: rhs.append_vec_id() as SerializedAccountsFileId, accounts_current_len: rhs.accounts.len(), } } diff --git a/runtime/src/serde_snapshot/tests.rs b/runtime/src/serde_snapshot/tests.rs index 2e5393a3a5bf49..2ac97392b45e42 100644 --- a/runtime/src/serde_snapshot/tests.rs +++ b/runtime/src/serde_snapshot/tests.rs @@ -6,7 +6,7 @@ mod serde_snapshot_tests { newer, reconstruct_accountsdb_from_fields, remap_append_vec_file, SerdeStyle, SerializableAccountsDb, SnapshotAccountsDbFields, TypeContext, }, - snapshot_utils::{get_storages_to_serialize, StorageAndNextAppendVecId}, + snapshot_utils::{get_storages_to_serialize, StorageAndNextAccountsFileId}, }, bincode::{serialize_into, Error}, log::info, @@ -16,7 +16,7 @@ mod serde_snapshot_tests { accounts::Accounts, accounts_db::{ get_temp_accounts_paths, test_utils::create_test_accounts, AccountShrinkThreshold, - AccountStorageEntry, AccountsDb, AtomicAppendVecId, + AccountStorageEntry, AccountsDb, AtomicAccountsFileId, VerifyAccountsHashAndLamportsConfig, }, accounts_file::{AccountsFile, AccountsFileError}, @@ -58,7 +58,7 @@ mod serde_snapshot_tests { fn context_accountsdb_from_stream<'a, C, R>( stream: &mut BufReader, account_paths: &[PathBuf], - storage_and_next_append_vec_id: StorageAndNextAppendVecId, + storage_and_next_append_vec_id: StorageAndNextAccountsFileId, ) -> Result where C: TypeContext<'a>, @@ -96,7 +96,7 @@ mod serde_snapshot_tests { serde_style: SerdeStyle, stream: &mut BufReader, account_paths: &[PathBuf], - storage_and_next_append_vec_id: StorageAndNextAppendVecId, + storage_and_next_append_vec_id: StorageAndNextAccountsFileId, ) -> Result where R: Read, @@ -137,7 +137,7 @@ mod serde_snapshot_tests { fn copy_append_vecs>( accounts_db: &AccountsDb, output_dir: P, - ) -> Result { + ) -> Result { let storage_entries = accounts_db.get_snapshot_storages(RangeFull).0; let storage: AccountStorageMap = AccountStorageMap::with_capacity(storage_entries.len()); let mut next_append_vec_id = 0; @@ -168,9 +168,9 @@ mod serde_snapshot_tests { ); } - Ok(StorageAndNextAppendVecId { + Ok(StorageAndNextAccountsFileId { storage, - next_append_vec_id: AtomicAppendVecId::new(next_append_vec_id + 1), + next_append_vec_id: AtomicAccountsFileId::new(next_append_vec_id + 1), }) } @@ -873,7 +873,7 @@ mod serde_snapshot_tests { become_ungovernable(tmp.path()); - let next_append_vec_id = AtomicAppendVecId::new(next_id as u32); + let next_append_vec_id = AtomicAccountsFileId::new(next_id as u32); let num_collisions = AtomicUsize::new(0); let (remapped_id, remapped_path) = remap_append_vec_file(123, old_id, &old_path, &next_append_vec_id, &num_collisions) @@ -891,7 +891,7 @@ mod serde_snapshot_tests { // In remap_append_vec() we want to handle EEXIST (collisions), but we want to return all // other errors - let next_append_vec_id = AtomicAppendVecId::new(457); + let next_append_vec_id = AtomicAccountsFileId::new(457); let num_collisions = AtomicUsize::new(0); remap_append_vec_file( 123, diff --git a/runtime/src/snapshot_bank_utils.rs b/runtime/src/snapshot_bank_utils.rs index a9f613e431feaa..03a26d46986ddf 100644 --- a/runtime/src/snapshot_bank_utils.rs +++ b/runtime/src/snapshot_bank_utils.rs @@ -18,7 +18,7 @@ use { rebuild_storages_from_snapshot_dir, serialize_snapshot_data_file, verify_and_unarchive_snapshots, verify_unpacked_snapshots_dir_and_version, AddBankSnapshotError, ArchiveFormat, BankSnapshotInfo, BankSnapshotKind, SnapshotError, - SnapshotRootPaths, SnapshotVersion, StorageAndNextAppendVecId, + SnapshotRootPaths, SnapshotVersion, StorageAndNextAccountsFileId, UnpackedSnapshotsDirAndVersion, VerifySlotDeltasError, }, status_cache, @@ -27,7 +27,7 @@ use { log::*, solana_accounts_db::{ accounts_db::{ - AccountShrinkThreshold, AccountStorageEntry, AccountsDbConfig, AtomicAppendVecId, + AccountShrinkThreshold, AccountStorageEntry, AccountsDbConfig, AtomicAccountsFileId, CalcAccountsHashDataSource, }, accounts_hash::AccountsHash, @@ -308,7 +308,7 @@ pub fn bank_from_snapshot_archives( storage.extend(incremental_snapshot_storages); } - let storage_and_next_append_vec_id = StorageAndNextAppendVecId { + let storage_and_next_append_vec_id = StorageAndNextAccountsFileId { storage, next_append_vec_id, }; @@ -501,7 +501,7 @@ pub fn bank_from_snapshot_dir( delete_contents_of_path(path); } - let next_append_vec_id = Arc::new(AtomicAppendVecId::new(0)); + let next_append_vec_id = Arc::new(AtomicAccountsFileId::new(0)); let (storage, measure_rebuild_storages) = measure!( rebuild_storages_from_snapshot_dir( @@ -515,7 +515,7 @@ pub fn bank_from_snapshot_dir( let next_append_vec_id = Arc::try_unwrap(next_append_vec_id).expect("this is the only strong reference"); - let storage_and_next_append_vec_id = StorageAndNextAppendVecId { + let storage_and_next_append_vec_id = StorageAndNextAccountsFileId { storage, next_append_vec_id, }; @@ -685,7 +685,7 @@ fn rebuild_bank_from_unarchived_snapshots( &UnpackedSnapshotsDirAndVersion, >, account_paths: &[PathBuf], - storage_and_next_append_vec_id: StorageAndNextAppendVecId, + storage_and_next_append_vec_id: StorageAndNextAccountsFileId, genesis_config: &GenesisConfig, runtime_config: &RuntimeConfig, debug_keys: Option>>, @@ -781,7 +781,7 @@ fn rebuild_bank_from_unarchived_snapshots( fn rebuild_bank_from_snapshot( bank_snapshot: &BankSnapshotInfo, account_paths: &[PathBuf], - storage_and_next_append_vec_id: StorageAndNextAppendVecId, + storage_and_next_append_vec_id: StorageAndNextAccountsFileId, genesis_config: &GenesisConfig, runtime_config: &RuntimeConfig, debug_keys: Option>>, diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index 77aab8f0fee1c2..10f715c2597b56 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -18,7 +18,7 @@ use { regex::Regex, solana_accounts_db::{ account_storage::AccountStorageMap, - accounts_db::{AccountStorageEntry, AtomicAppendVecId}, + accounts_db::{AccountStorageEntry, AtomicAccountsFileId}, accounts_file::AccountsFileError, append_vec::AppendVec, hardened_unpack::{self, ParallelSelector, UnpackError}, @@ -281,9 +281,9 @@ pub struct UnpackedSnapshotsDirAndVersion { /// Helper type for passing around account storage map and next append vec id /// for reconstructing accounts from a snapshot -pub(crate) struct StorageAndNextAppendVecId { +pub(crate) struct StorageAndNextAccountsFileId { pub storage: AccountStorageMap, - pub next_append_vec_id: AtomicAppendVecId, + pub next_append_vec_id: AtomicAccountsFileId, } #[derive(Error, Debug)] @@ -1228,7 +1228,7 @@ pub fn verify_and_unarchive_snapshots( ) -> Result<( UnarchivedSnapshot, Option, - AtomicAppendVecId, + AtomicAccountsFileId, )> { check_are_snapshots_compatible( full_snapshot_archive_info, @@ -1237,7 +1237,7 @@ pub fn verify_and_unarchive_snapshots( let parallel_divisions = (num_cpus::get() / 4).clamp(1, PARALLEL_UNTAR_READERS_DEFAULT); - let next_append_vec_id = Arc::new(AtomicAppendVecId::new(0)); + let next_append_vec_id = Arc::new(AtomicAccountsFileId::new(0)); let unarchived_full_snapshot = unarchive_snapshot( &bank_snapshots_dir, TMP_SNAPSHOT_ARCHIVE_PREFIX, @@ -1384,7 +1384,7 @@ fn unarchive_snapshot( account_paths: &[PathBuf], archive_format: ArchiveFormat, parallel_divisions: usize, - next_append_vec_id: Arc, + next_append_vec_id: Arc, ) -> Result { let unpack_dir = tempfile::Builder::new() .prefix(unpacked_snapshots_dir_prefix) @@ -1459,7 +1459,7 @@ fn streaming_snapshot_dir_files( pub fn rebuild_storages_from_snapshot_dir( snapshot_info: &BankSnapshotInfo, account_paths: &[PathBuf], - next_append_vec_id: Arc, + next_append_vec_id: Arc, ) -> Result { let bank_snapshot_dir = &snapshot_info.snapshot_dir; let accounts_hardlinks = bank_snapshot_dir.join(SNAPSHOT_ACCOUNTS_HARDLINKS); diff --git a/runtime/src/snapshot_utils/snapshot_storage_rebuilder.rs b/runtime/src/snapshot_utils/snapshot_storage_rebuilder.rs index 5806fcd46ccf5e..a1ef80ee92325e 100644 --- a/runtime/src/snapshot_utils/snapshot_storage_rebuilder.rs +++ b/runtime/src/snapshot_utils/snapshot_storage_rebuilder.rs @@ -4,7 +4,7 @@ use { super::{snapshot_version_from_file, SnapshotError, SnapshotFrom, SnapshotVersion}, crate::serde_snapshot::{ self, reconstruct_single_storage, remap_and_reconstruct_single_storage, - snapshot_storage_lengths_from_fields, SerdeStyle, SerializedAppendVecId, + snapshot_storage_lengths_from_fields, SerdeStyle, SerializedAccountsFileId, }, crossbeam_channel::{select, unbounded, Receiver, Sender}, dashmap::DashMap, @@ -16,7 +16,7 @@ use { regex::Regex, solana_accounts_db::{ account_storage::{AccountStorageMap, AccountStorageReference}, - accounts_db::{AccountStorageEntry, AccountsDb, AppendVecId, AtomicAppendVecId}, + accounts_db::{AccountStorageEntry, AccountsDb, AccountsFileId, AtomicAccountsFileId}, append_vec::AppendVec, }, solana_sdk::clock::Slot, @@ -55,16 +55,16 @@ pub(crate) struct SnapshotStorageRebuilder { /// Number of threads to rebuild with num_threads: usize, /// Snapshot storage lengths - from the snapshot file - snapshot_storage_lengths: HashMap>, + snapshot_storage_lengths: HashMap>, /// Container for storing snapshot file paths storage_paths: DashMap>>, /// Container for storing rebuilt snapshot storages storage: AccountStorageMap, /// Tracks next append_vec_id - next_append_vec_id: Arc, + next_append_vec_id: Arc, /// Tracker for number of processed slots processed_slot_count: AtomicUsize, - /// Tracks the number of collisions in AppendVecId + /// Tracks the number of collisions in AccountsFileId num_collisions: AtomicUsize, /// Rebuild from the snapshot files or archives snapshot_from: SnapshotFrom, @@ -75,7 +75,7 @@ impl SnapshotStorageRebuilder { pub(crate) fn rebuild_storage( file_receiver: Receiver, num_threads: usize, - next_append_vec_id: Arc, + next_append_vec_id: Arc, snapshot_from: SnapshotFrom, ) -> Result { let (snapshot_version_path, snapshot_file_path, append_vec_files) = @@ -109,7 +109,7 @@ impl SnapshotStorageRebuilder { fn new( file_receiver: Receiver, num_threads: usize, - next_append_vec_id: Arc, + next_append_vec_id: Arc, snapshot_storage_lengths: HashMap>, snapshot_from: SnapshotFrom, ) -> Self { @@ -199,7 +199,7 @@ impl SnapshotStorageRebuilder { fn spawn_rebuilder_threads( file_receiver: Receiver, num_threads: usize, - next_append_vec_id: Arc, + next_append_vec_id: Arc, snapshot_storage_lengths: HashMap>, append_vec_files: Vec, snapshot_from: SnapshotFrom, @@ -274,7 +274,7 @@ impl SnapshotStorageRebuilder { // dir. When loading from a snapshot archive, the max of the appendvec IDs is // updated in remap_append_vec_file(), which is not in the from_dir route. self.next_append_vec_id - .fetch_max((append_vec_id + 1) as AppendVecId, Ordering::Relaxed); + .fetch_max((append_vec_id + 1) as AccountsFileId, Ordering::Relaxed); } let slot_storage_count = self.insert_storage_file(&slot, path); if slot_storage_count == self.snapshot_storage_lengths.get(&slot).unwrap().len() { @@ -324,13 +324,14 @@ impl SnapshotStorageRebuilder { &slot, path.as_path(), current_len, - old_append_vec_id as AppendVecId, + old_append_vec_id as AccountsFileId, )?, }; Ok((storage_entry.append_vec_id(), storage_entry)) }) - .collect::>, SnapshotError>>()?; + .collect::>, SnapshotError>>( + )?; let storage = if slot_stores.len() > 1 { let remapped_append_vec_folder = lock.first().unwrap().parent().unwrap(); @@ -365,10 +366,10 @@ impl SnapshotStorageRebuilder { /// increment `next_append_vec_id` until there is no file in `parent_folder` with this id and slot /// return the id fn get_unique_append_vec_id( - next_append_vec_id: &Arc, + next_append_vec_id: &Arc, parent_folder: &Path, slot: Slot, - ) -> AppendVecId { + ) -> AccountsFileId { loop { let remapped_append_vec_id = next_append_vec_id.fetch_add(1, Ordering::AcqRel); let remapped_file_name = AppendVec::file_name(slot, remapped_append_vec_id); From fe16e84806fa860d4b14bad83bbdad566c304944 Mon Sep 17 00:00:00 2001 From: Lucas Steuernagel <38472950+LucasSte@users.noreply.github.com> Date: Fri, 22 Mar 2024 15:45:59 -0300 Subject: [PATCH 051/153] Include simple transfer example in SVM (#388) --- .../simple-transfer/Cargo.toml | 12 ++ .../simple_transfer_program.so | Bin 0 -> 67320 bytes .../simple-transfer/src/lib.rs | 26 +++ svm/tests/integration_test.rs | 153 ++++++++++++++++-- 4 files changed, 177 insertions(+), 14 deletions(-) create mode 100644 svm/tests/example-programs/simple-transfer/Cargo.toml create mode 100755 svm/tests/example-programs/simple-transfer/simple_transfer_program.so create mode 100644 svm/tests/example-programs/simple-transfer/src/lib.rs diff --git a/svm/tests/example-programs/simple-transfer/Cargo.toml b/svm/tests/example-programs/simple-transfer/Cargo.toml new file mode 100644 index 00000000000000..9ccbf60aa8b8f7 --- /dev/null +++ b/svm/tests/example-programs/simple-transfer/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "simple-transfer-program" +version = "2.0.0" +edition = "2021" + +[dependencies] +solana-program = { path = "../../../../sdk/program", version = "=2.0.0" } + +[lib] +crate-type = ["cdylib", "rlib"] + +[workspace] \ No newline at end of file diff --git a/svm/tests/example-programs/simple-transfer/simple_transfer_program.so b/svm/tests/example-programs/simple-transfer/simple_transfer_program.so new file mode 100755 index 0000000000000000000000000000000000000000..5132b38cdc1b3dcfab8714c9aca50be5a4f08ff6 GIT binary patch literal 67320 zcmeIb3w%_^bw7UZKJ?-tfxRHh##)(|jJ+W7kc9jaU~K$|k&V}~u^qHXYmfoGRv51L zpG3lrjhz?RPD0avQiQQh>@+568{8z#s?EbBzcwaq9yCoGl3yOAX%qTL!Y_%f|M{LX z=kDE+1lXX#* zO-{sVaY~&M#*GqA++^GF?yn00jhzaCfnG#^hrS`O9q)-ry2inB5z0FGQ^~jE-IbEA zv6c%_GU{tGrqhO90J!7>L>CR-yOc zof6;QE^!TV9VbbpP%i3xO6WhalheIDgBtAqYzd-1e^$4mUE)+x zx;JdF7bbH{TP1;v6AaKjyfwi_6}4qKiLJxSU8J&9TEPY zG5siJg6BS#H=G0tN!AAH$5zuHlqUJ6KUOoEQ%v}re!IZQsOiVmrktN|${jTQ_>Aet z|ML1xKR#pn@xQqK?1GcrFCEQ4khtoE^wY5PU$*L$fu;YlRi_yaj^`7viT~0<4oj>= zI;@!H`U>iTD0zGcfa?&G*wH`95?JB9U*zp&(D9x)E&A}EuFpRgx)Y4&sPj|Kho7YZ zU%e#)RiYV#@*U~1u<)N-Drvb@5?334MaRbx5(h97s{|h~0{kKNMViX3e|@Jk*q}+Q<5lHS=pw z|3^rlvu2(Joj3}ZkTvso5FhiK&ga4Oos=G&7xxg1cKnzed2iU*#{)9|s{gC)0(?2) zDI8pX{2>We({rKToh-U!NIA$*?F;u15HV-j94!zT@)<)r%1Ux`#-Iqs}DG z)O7&aEAF@4K~8px3HdbS9H95oIP@y1S*HT}0Q$co`Zk|A!zcU-=x?^!)c3ThkJ3?J zwz>8cQea>9{7CZfnQgA~)4vnoBimf*r}q}%S8#f^s)S?cCt94Xa(oygCtDR~IQI-{ zp>Yss@6kX#kRR}2-@X<|M|<~<)87Ax9>gp?_|=R;J-B3MUcUd4dP=N%{?V!@7N|%2 zvrp~kY!;WJej@x(n6kvuS)380bvc5Ln^*|mD8bXjdO@DlC8UQ`Dbi<1y4t5U$|Mv= z?J)dbnm$KS4kkS@n*>iG=9Y8pZ5ow%YqJCLlMdq(=Z`2quws5FQ)$M zLqbpS(XUq#p7MirHi?VJ5lSL|IpwQdJZXlb_{jgZRX%Q&uMd>ZHBu&pT&;->y;unu zD+=BU!>=nH;H?yXlOf5E6<@%3%74zZAAFQiJu!!rl!D4T`+$`9?YohG)S*on3c8NY zJ;b1s8;}_EE@XNl=cqH8Gh%RDD2!MCuc_l=#*RgKmx#ulsXzOOv`goYC~=6O^I~q0 z!90FZ9v?YbBPZ<#oj3jUFX<8V%wPWkpaA{C*Q6@~_fkSPM>>zNiJFJ}#C$1`o6pJ4 zTr!xX>dYU;_Y94E~< z$UY!)u;q&Rgh*-_oK^E%Dd$oK1{DRiNd>nOFGg6xT6i7A}Ue4nvINxrw=wQZ(Ix_HL;aO}sV&NK|XI>9~ z&sub83Sj<@{g7GWkEm16aV&fx&l5qu)>!4yzNm9POxopW1!!EBbr(eqCnK(|!&5dr{s!r+M{t=*!&}UCd~Z zLq$Li;QQnB=i7B-FLZt3U5D1w_$|V$Jl1|@@0W3Tgp`=|_;ImExfP7=ln+U)e!F)- z?5_9pKDKMw*+SPlAat_E-_UWD)qOHUKey7*-6e5$cAURt&#UEF>nj(vak8rfPuqv} zP{I6>f_6y%H3Y_QZj+RoPj;=2zH0l9(06XsA7cE>c?%4D z*ua+>_$332JiXx&hO_6zP5w~>OTT5Gqy|vPcA(W1XE>t$(cds$$9b1=Lf^3XjhXWn zF|2+=c7f`{amI(AORV$03@CcT(!NAJQZc^%nDK1=>NEB>+bncqv@Z+9#B?q90WPom z4!(S(`<-%e_Z)AYw8QIQboRSJ0e&}PT~7i=@WgC7*#?1=k@24k$zCvG(^lR4a*LyW_WzTDWE zzLYfB=huu+<+so5=P17zA8{fP9CyF>D(&Z4;|=-11AkwkE=COh#2;T3(jS27zmu&H zJ;+M3zurJQ*C4-4#!k;BBNA?6EiG@D};Grhq0UvyQ ziV~DR&+O~#x*m84$|v^kz_0cT^n>#$9l?8A#&dSQX&;pbhL30HWfusZcRS0o! zXQ>`GOTIo&h2Dbi>;kbPSofhl*_rr6%#o8DlDK?@#E|2ioNx5)gK^F%-)Hiv_y!`} zMltV?m~v|0Jz0j4eGvWh4v0S8J6!}E+{c1mX4gwTb2&Xt z1{s{iiYJL*SX{<_Vbqc5k7JJ`&OePbM(Xzn$`f^xFxR(+M&z?y>SWbG|4r%A?)E@+|~=m(T|V zod+<_6DrU9NRNde1)+~v_Ybz!EMdZ`-x!}LhyE#{@$Nk>{Sb2&Fdphh&~;AsahdQB?h|~k!L*MG;ag(*2F_4_HeFn0^x&_J9-L-9(EWqCB_+&9aNJ=X z?At4167@qamt*`Hxp@1f-QIJ;SCD`3F(LoPkF4%zHXY?ZQL}~<)Gm~p_US%7d*)fEC`&7=)jFB_+U-`^6aWVE^(ABowbsWG>BH!jS zv)7a-K9R5el`NX<#2Me}wMAG<&7aWL0?Y4lv>_csOd`vi}xSaFAo{cmkgetd2kqc>t_*R$Nm>f5=Gr@siw zc>+0fv=dVl&endFKgKQeBr*L?=G)d&dpxP0+Vb=o!~o4Dtvpw`9tyU{=;uWBtK6_| zz|5-STlvv>VZ!k?1gS_lbcB|R75&%|+F+uyixqW5OVE&V3R zZT%VKrt8t*yo9O2mmev0a9(}ma{JhuCbz5KB)KhlopQtY|Ie4(JKr?9&3u#OHu(&4 z3+fsC5S7>2{R&xVO=#yP^eZZyIMt)}%=ZVEwo@vlFK*>{uKUID?RPC_`}_XtU+B67 z_U>oDC40Ba;B`uzw|k%F1ofYNJ3Gej_`wBQjgkq*f1R<}3A^wOwyy-g-XRUuT%NNL2q9$HQi-9pgd#?YZvo8y*kuKI3>W`;KqPdWJvk*Ycf0Cu`OZYA<2`p)s~x-uOJw;lzpFn=udk;x`k|1NJ3%xu|L@h;#?qm zsqmrCtUCj^>OKpw~w0%>Yp_)N1cVQq9OKtG#;J%&q(LCbEcC5 zoiTju9k={=cKog&AHJWo|BU5>e*Uc}@0a_3#@tJoX#Tiy+bjShcFh2PWNy8`f;xdYH z?)T`A^65CQaxws8UVnu)5)cy^etwH_uI)woR4SwSh<`&g6$ZVMZ{_J{*PC<7l#Y;H zFXx!`oHELRzK+A}jbbOV&Bsjp#2?Y;*0X zQqK|KT*4)kqw?41QP~CZe8k%?_0K1qI@I%w@PT_q$4MS~PAK?%3moY=i>$Fb{(dRB zjj%M?`(=wMfzP6W&vzu-;0+ zn>Cv{nu4y^m0xeA5cJGBl6z;%`4K&T>lw%X-q}JQ%C^>Q_r91IT3wMr+x{ZuXFt? znC<*M=f%QzEc6+nYo7P(yqMMd7sh{&GFK#LbMIQu^?SqngimwsU&o93U-~@Wds_5U z=U0rU=`2s-!!z~g>lHnW^3!Ch*XK{G`}op2#(iCvwc1dsf*0=5->NppYNe zgVLVnou*y8MLxHhacu7Cc(PYeo0LC=Rn-i`l<&Oek1Kf!Y` z;8X1k@HPeN1ziNy=UFpnFzh`obo}#V4|B3Z_s&IL?=<6MQ2JNTvwM$91Nt7T;R1b+ z#5vA%nD|La&t?Qq&lhLILMOY<&^>7ScNG`s^N^R&S2UYrl0U|8GV~MS=Q+_+&j`?a zpq2~bTmhOGDm9BF?-K2u4KIbnnD71nV%*h75N{Y!msL9Tplj=X+pkCzcb-hkkF&lr7KFM91gBYKftZ}fis2=kSF{HVm{oIv*e;{xA!LSkdT zeS5F>jfSNj*oDI+A5ZToKRw9hyn~|8-ZQ-nqrEIge!3y@!8u{i-1q7*<45mNdBeN8 zoSt(-`60#^{$n9hE(-Ihf6z~Q9!BQ@;Qy8m`grD?ynkP0SlS!J`xW8oxK;gEJ>`B1 z=yA&MnGTimxt4UOoNPIrs|d}Ja}}XwV9ViqhWt(Luf#$r){kI1O$}7`t&EipO=dfm z4#geZjERL7J5xAb$~QZL<|WHl4rqio-NBYfEL6uggMxS%fqH*B7Fxv5 z;bWnC)`TD)_>VeQ(x=ctJHh8Pii3FIJL)vjC-Upr{s!|Qx2UtkL+MID)6 zgXtK*sq;jyVqtiXG!7d$BK%AFOAsG7Zlcb-G3n^Ps55&^IxIufiH}Lg$kp?I)2C`-FA9p^^$e0z`%`Xy@a5e3uHQoT16Oou+}J;Pu+`XS2NR#-2Fgx>!N z;>&pi8lV({>39^Aqy0*k|(b|=fY zcI#wZyY)P-QBL%w#xE!EZKl4NGQQmek8j|M9$o0;3w*P|pCRL%jo7-dhm-4kez(YIE zr@FK~-d@9xp1a;FjAqUGZEvs0MbB^RdmQu}H~R}z!Ip|RbLs1QxoZ&r}#zLD)F;2W<8L7aG&@~PjfNe7s37d;61{f zfK!r>dx6e3qRcuR!e_fQ^s*zc#=z=l0Hcu)!opnD$~ z5XJkC+LCpu5*y(8e= z6lj;uD?5ltoakU)fe6#?Ot!0OcMgx6w7X37k@65??o%e#dl-8eK0=@1C+h5BI8FQb zumowhhV59|U2N?A-IkqyjPX%EMt@OfH^XUn3j4`vnipVUV(uNz#T>f~N1a`ahjPe| zvbT-;F5q#Qrg;VSCg$F1<_nkMs3YfyQBLg9gY*sM>iEQW%zX=cLNWIiK93DM4T~6c z1{e?J1kNy=b{Cm>??%EweXg;y-9En9-EBTT{JEHW9ou!_1J?N%^abAJ<5SlW{z@}H zyFA}SosFCieB?)+6vHw18Xk8sx5dn3a*sIb;8Zx-9r_b{csIi__bM|lUTx+{m-{Q~ z+`;+4#|Vnj8-EaD?iH*rG51QI3Bdm)oZrg%z!!Ke!!dU`&ucNa+4vPM_fOQh(Z`1! zjXF0N{6_XrG5^k|6wt5bd{{%_U*J^?$4Eb6_hW8@@k3nJ->75!Anel4#KDY& z`T8P`UDnemy$cZm{6lZmUy70bVaUZue=(1PADJh`9s*zBdio~iW!_xm!_ZrQUZ(Ff z3<);#GJS_BQhy!v<}i%b3q9zS-bX;b%#$c3`S48qd6~Y47{;iO{4m32UZ(FdZ<~3U z;F+9n=4JXGXBeqczJ_6pPJzWwL8}E8KSK-?_?t(6QPx(;M@ypI`vRlC)L#caEU@l( z0S0IKJPt7WN&R)es7dWAVDYb9`X=--<*Ge|-9wJTdcWR&k3t(EK~8D6n(?6@2s-~@ z-vIuEzn?$M*}VUP{rqfI?J1@H#b*+Iy0 z!q@ggj(876rtFx^+s{%a1@Ohsby5QBej4A~3DkrATJQ@x3#q=~dv*Bz0j5041=|n& zc~nP^+KT}A?F2_zODO%2q+|b!`4|#7$j4An4#Wid0d&4iWpbUSJf%~TzmEm^1?8lE zB%Ytx{5?Qv${)%-7_5)l1AW(eP`*9Tzu==gP#*A408<+g>~*B}OUM0xIi??iatyYA zXOJIiC+q|2`83*pkgMVM@cbq9ul6_E@v&h4Q9Gz8-=B6Y{_h}up^x(^KU4K%;VVG* z$DnKa@Ab&_rhpu<9)Mj%JscB%Ac=Nle_43n5$W)+frtE$Q37IOgP)gW?%Te#!}!Y> zhu!hac0zSJI?@^Sn~~nro9;(?YM`q}{bzlio_+8Xw^Q#e1?3nV4-@GF^q|F(V*>33 z^Ay1Ep+NifeqJo}XI!tIHwpTI10;v2^QWAT@?SS}&~Cu51YmtH2FjBfIC_6L?UtR< z4@7%{&ktj6j!KhWQ2Lsno)KNDlE&I+I6vd2p7ik{&lv5?U>*Se zRTjR`$4Uoyx9~WJ|4Xbg{wFMc#BX@i;I|q-@HX}bK>wgcU;1On;NR-R>08Xbo&7-I z&#~kqeo@xouQPt&9V|D-x9E#Jc_v89#76>of50wAw5B z*k$nVGJfDa>|X%?Cl){AS9BQsyNw_CR`xf5-)-TGUv)3zBmZG5U;L)K{QP);KlQg! z?sOnMKMrp5%QaeZkbb$v&tExizWCdqKVs3Bc3$U~f6?Mw=&$ti>#clgm-ySDzt^HK zdfDukKVapH9ya*-6@h%+7YE(nx9TM~g2u-pzg(%+ZsF@PKR;&WOTC!2WPD7q=!<^M z_VcT(e6btVe*SukzQ|4dZ1A79^2xjt|1|#=K+0nm$pCd3Q%pPSoy& zR1fffVdk+pCI)?^(_=}F0qgz;Z zpQz`tfE;w+U+3HZKfZ?wmf_8K4^_L7H~L-Qmjn5C2hvskIClcq3jQNvJvRavq0OY9 z_5EM-rGTy@f0y6?1kQO?x$kxo{*jCII&*U|lY?ceMSsaN}7&kK1g3|$((s87#p zpna2Q+~?^_zw3JlgY=Q3^!e2w9N8U>|Su>W7s4mrP-ok0vx$m;V3L(hI+ zp0+1jCFdYuKaU0Mp1$YamxFolpYCG-KWN9Y=6(6WbE{Za!rq+-(1m;+qBPY5eLsuZ zM?HUs{EJDBKHhw8mj67&>o@ukx|B0K?Pn}DDJWf>*8`^4#K}(i8i|n(Nnw0LA9D{& zdNL&M=kWR~IU%O{W$0plg-G%K7?jukNW4=74H!Y6?_%E%0p$?3JuLI~hQ$K65k4@I zA*?#-ED4sU{)nxpd`fnLM|^8-@bf?INRq3>3;zIFzHENt;je5Nj+Bw zyjKEraJ~mY&)q2hQRkmVk$!~!P+s*y+nYZ(bCU7&JzPP3I6?K;^Q+#E10PS4jFaq* zQ~lJj|56V_qCPZvF#Cll;7)#uMsMj zdC;8#(HD4338!-uL2{mvZ_Zr{F#)U8C~vnL=Ohq$$(CH}#0r?~e~zF8S}_ z(f7%Ep5e1k3_$f>sqd;(;ruh+Ilbq>r(fRiY0CTU>X3BscP;pn<+PLipwtVyf}nf? zZ!fjWPS@v>Q5uy&d@Yxo$7CFB7wEi7OBp@4nB?6f8tbG7ErIcVF7sXOw}m!7b-v3! zF7v0iv6eHvRGj17(;V^hY0QJ*<5x6vlI-o1&XB%9(yEuib9g@{e)YU$ZcylG`(QtB z`^T?*4ymU%b=78Tq`#2W*{{`A7p1>R0@0#In;X2EluZF&^0l7P7IF@^e&-T&wc1U9gH{Fy_kz# zu5+x12!U+ZCg8Qji$ri6^OTqyu0vW`55``ycv| zCBY}z8t3%_=@*iS@q0%IdhV^`0azE2&mhGaGY{(XvjTZi(U>dy&gax#V%UFfb}sUy zhHbItVT>=>CGd!#&lNU^8{_PtJ_2cN|JoWx*@Kf#y47t9IzGfCkU-Ug8>6ryGPxh@;Smwh%!biv$Kc{b( z!g3#_?>2?SzwTSCu>1}e`U@}6msFDbIQ@>MX@4<36*v<<-*U9Zg5MatiY)b}679STv+xLvb<1 zsQ(C+!%kab`ZoH+xZA|>|BRpi89z{j%!2U}}1)A8gfvzRg{0KSK8_ZDNXx($eqJA<4TaKYgD5y(gPS-b;6AN^HFnBebO z`17VdjvFzh0lbFe_5K z^xl@?Tj{Aj>-q+ABUYU)=J%KEeI>MGFsR37-x2AB>oZxG1?6^}G4PJAm^= zsE>ZXRrKN@xTX1-(oYe-zDEOi2wA-^X6`d%AE}^T^L~51cdYN95ArdB{xkLje0Z?N4XXKDBMh}ZGj{amo!joR*$+&+Duh(15m_e|^Wm(uTTGM+aL zf+c-3`hA4z64{YDBi&nTs|Y_F8>_oe3=)#VWE$42bH$%rruxIahG%- zCM@z>sXh?Ke1niI9pX6gzy^+WJnQ>GbUxMntZHLtFpki&#L_FJoS8Rq9*|VgX`>kH zKRx$WJ%`bpSOmGEcZhRwe3FmdDzQ;l+TFRD=YKplfjkno%X-8A{l0p^$2j8gZS*T~ z{zaUD_QCR&w=+ocg_MLh@D00w*c(30a!#BdL$>1;37p%^Alu_I3V;y5Xy$#a<3?Y5 z?KPxR^ji6y_~tmzQM;+xqZ>?lk6?scm&7k#AaVI!68rcjf9+$E&h~L|a1anhgBwh_ zSgAQ*_kiGe=6>j`Rr5KadX=P?uap?$8)0HOmlw!c-*b>yC+j)#cci?jZ`Mq+pQCh5 zzn0(5Xind?62~G%XQ4Zd5w%=ywUjHrRbt(rhdy!t&m@CILG?5704XwssPjDcpRKP) zB-`n$VKh27TiCBLx0w0U-x~)$>hYgfsJ+@L<#oKw>NC$R&2tMqHy5M*HU$59t2Itx zf4}fep@mP_Us6H#PY~PVH%a?V2-+_1px6uTSM@uzztyhHTC;=Yq5DR^%f*D&NPEPe=VBJs0jhECWyRUavf3uDt)kZ(pscZ-&J1pCOmNl(ZMm zPXH&~3?pQl_pyBXHi}|-T~aQ!cPW3-eU3KMuh|>-Fn)IZZjO^-sZjSLcs>sJt1*`t zpn&wJ1L@EcXudwb0}M=WZ=60U*7u8CEAr9#K-;6gGn91J3%-_nLiq9K7`g7^Y+oO> ze9~EW#&Y2IBNo5lPxB|Mvm;e{?2>0+4y(neHZ$BHr|sZ zQvN9!FW&n_&Z#|hoZk0)O&n)FBzCF8{BC_ml|27vfAFN#iBI(dv*vf~v+AEV9~C__ z>$jkvDE^iB5&HZ_^#T6e4{75n$)`SP{0&Q8>X%N`uPFBqTn@X8q<`kU2mbozfT2V3 zA0atuziYqhy-j@%?D=+5_(Z*jtbU^;zu#0JwEr^mWISdb5P9d?rG9^$oAIsVTE}nF zypI8PUBIJNWsJybtm7GT~+p`WUeaokk9`#Y!o`q}z53{KZP~Df1sNy}md=#~U#6vh0}u z{+9!$UT3-Bo8L{d*UwS@b(4HO{`{f!^pEYAj|ccy`>OIyntNI*m(1hFKJ>?hkBra@ zuDi}?AI!ShUq=r}KX}`Xedw)Yx_vv0eR%No*#~$>A?c09e7nT~b?S|OZZk%$dzv?AFTf?A} zy;bCyT|tB?C|&gfgXh%h0(`vD=k`H;KSuKn+E@D%;RT;)-^b~F^N1;X6zf*_0ht?R zTmg;~oW1dsnMY4btm}27KiLP5N&15$9D7YDMdg|gGn_1e6iGkorTsB?i@*mY-!tcB zHjz+aPj!7+F6fSTDWf|W2asr#SEoczf8R^z$tQ$wJvXI#mgG~fgx?|k;iZIL@O%m8 zJGE7*Jt+eGiE)R8akix)X&rR zFL?CQ7-Z;q0{AxyqknXNNc|r5e|`Cw-?`KG$aqUdKG>%?e814b=e@T>`)U3q`k?E- zFG0*5`I&|Oq&Fnv!h1-@#l-x*%=|0esPjdJUq64JE1+ZZ7j-_(`M!Rc_wOY7&>F}| zp4V(5y~a4p<8ij5@5%PgGx}rvoH6}u*7y1GB^{#nPv7GJ7&WV%%e6>7{_~-`%=_~` zB>pYlBZL5Zn4nnw`qZN`FX%j|ej)I1BtTY2MSK4>@TL+u3IbQ|H$tpO!&?NO|uOk$2()+#pJq zae(;>Ik93*zlQ6pHhw_gV;HR@Cv)#u_xIDKa*`%ix{333e@*vgqfRHMLw<3TZCX*&(@4L%oDvdM527EiS*|U$l+n4pO=Gd0D4csI#RtUj%Z&;`o}ZB1E+eX z{=!7_INEa=kx+f`J}P>Z*BkLqgZoKbuUQ9S-wQ$aJ$2tk{p9jG&gOFsRF!|OTJ^Sk zkK}s?MV|Q0ZvP!X?38rdkMe(K(Epv?v3iRcEa`lW`BuHteMiK^=4cF; zpm$!}z#<3OJ#d(xU&S6qonJBCSm{*WxAFHY&AI6O`d8>oq!&r&&zO!k{1V5RO4DBR z`@||A$O9qj9AP{i7dwc3?oVnDV%#HV*rey>ZtVS#jDMA%w>u89zFtG$ST3c7`lNEn zKSvdMdAS^9zEm!GJkx$-SGW93{28aL z9VV9XJB{}JkV@ljh$H{^JbgPt_y{^bVO^sz;-s;+I&b;*N$Ht&s=mil=|<_c9m*@8 z9y*4C&L8-#AB45HOB{*Y|Pj^Ci9KUOrpOV_YHV{%Qsuds4QBDw?h>ATA(uWB{ z{wdBEhJ*QJ`qAHD9#!7`dgoKdQa!=4CJl%slCTc z`-u%u(BI!sy-B2iP5o>=e}G!|P*>`_h;f9V@5{CE_5C&z&!1#4DO|Nj_D!n=^nYhn z=e4vuo$WHtS0m(%AEeK-)NeZb{Q5I%ewBMaet%p07`l(9{gfZ?3FZU-A~ai{qbgta zJfibV_CuoAI*+Cvt7AIeqejnm#~IGguhLImM$&z|X2uKn!8i;0HPEaiz3&SlN~gaN zdjpk-e7iW2evG9<9ZtcwD+i39P{Ya2tc~Z!pSIVZ7mWX)^9=L|eCu;Py%(hV)n@X2 zyL^6}QV4&Vkz35PZv`i_zd=JSzy6pf>3VO%OBI&$QgKGmb1VA$29Pi4z`sPu%oGL9 zU|~-o3xi2P`5H5S30pJ|nEG_TVknMc{(hKGze?tjOqtj#{T*mpf5VJAUZ<>^ytKUk zD0of_&-;D-Fm@E<^_}=#r;YL+C9flH=*WASytKTBNy`EM9faR!p8xk95Prr!7ZAr; z`$xy0_G6O9EAwl9kNJ1yxqz)lzMmo8<3AUesK0#uF!PbxgG><#jzZR~=d?d{-LLC7 zowqadWFE?z^~Z$zgUOhJ{*Jr1*V3cCanASoP`&w(*!?kl(C>mH_??DC(PLd%RAl`GrufNYcZ1hg|nfu~k z+wsPEPI1P4W~v84_doP`MH^L)87}FXedaUJKbL*xHq#zm@A>_CDOc~`*YXU}==ZN#FUCs$g41;#&VJ|++yC;tLN{A=m|>ka z)xQK?gzSU+IbYv1SN@o!>$$3GwP#Ns6}m=0bw6p=dCzfyzB(zV^9lTQpuTKk{hgGw zJ6HV6dclYLjrmmf+xqG$fdcyL8j5qKooy6X173_=x>rnv(A8_j?9jU?UFW+5>+=oN zk9=J>_1%tIh%PAs=&oJCpo4Q&s3N;w+86XUaiT-d`L10l`0zJR(bq&o{1?BTgU3dJ z(YI0MGx;q$=HIj%PH1HVX!g+@kEpLam?DDhY$ zL6lx9=Xw%P2m`Y&6$R4w(y0Ea-syQD$S0@=;E(;`^DY;JNEO7moX! z8NW&MoHy$H3)k<#6Pu(r(vH3Z z((bEj=!g7F&*BM%r}7fJ%&H!B>n*Xx6%8GwVvQ}nb#v9|GY+@ z(ANHge?CY_QAf@P>i$NDd48NaAogG1TNicyjpYjai1v8Ti9Lj!1s#3flBl`I$D^i- ze@H_hQFE7{4oyZmP)gL?;ipF^CFfJP>v? zzgqj#=*g_7xMH8K{?6q@<3q<)ew;|ZsXpp_G)6CF*gLsFW+(3luM*hj%k+zH4~(6G z-DZ1WV%76B{Z31c^v>7Ul|oPT7W&#^>FZt8WM7Xe1>e7iQX}@myCBXP!FJ836ttVV zocjxsOR}}5cO32_fI`qOf?l#+HTCtfz zP=DrewHp<$%Wh~p)Spi}ai$-2{+0D@>^PS5027Vl1O!b%$MbmOH~I#~ukxAXTR$Y< z3CFGcUeDRat@dZ0E~E&#%lO8)LW;jmPjNQ4E7X1s26ho zK>S!fr*=$e;8XXD$MduIlRoKrl-F}_p*Nua&_^0V1@{(yg5Tpaa@FS<7#}Z^AK;IZ z-@$h6IZL~~IzhXh`WMun>b#wfxDMtIzzF{S zQ<0o^*7Z)*5&NR|A>e<*PQag`;`DoAV*hqfg!x?eG4VS%xVH(~i1j_~eb*y})*~b? zp4UvQ>)mv*dA=vpIN2ky7v^VPo!p~Reg~BU%5&5%@UeqpN^;ijlzhEMq3?V0=Ac)p zf3BB&zdW^v%Tvtdjr=xr3SRlc5_`2`@ZNa+GJ(C(`XTQ)v2yS9f8cpa{Q$l9;2o0v zFg>peJw<=ymI?pW@03{U&zk*@oVexecaE}0Px7FD8yW?F2gwDJ+ptvNawDHg5|;du zgT#wWI`HcVRrOQPHN@z5#1K?pbw5285(e6+lHXGPCW^r~=A#jc6MHYAPlBoV2*r9X zAh!;w)IPJGh=s3YSj*|UH(}Ptn4d?0KbO&*O-5g_K1Dz|eAVYoiJHF=IS0o532L)1 z_Zlwd=(&cd^Md%Rl1%Hu1^n&o`|V~NqR|v|{9v6=|NU_m3&oL5?f(VWQ~n5p{ytS3 z$p<7hnHckHaQu1gg0JT;FkXu&={fpi{im4q-`JZCqV7(4oH8iDK0puf{Fdzy$rC}( zZv@NLak;}n?>T0h->)=C>A~~WhXEUZ z59d9P{X}r_iFqI6AN`J4_r>&gYIPl%ZSEbzx5z=-Ggc0=PoevDLB2%}6SeoHG4=Gm z!FIO)BU&f{?S!2`I}v02?56dJ$|dLr-$Dc3A75rX+V?HT>iY`?&ku9AOTD?Y#9SY> zL+QrMIpM2ji-P9F?QzP@yw#VKa_fNaXZH|!0~kTyx1{$Ab1*^_z$GxRYK8TrF?%aVj@mh_|Mmprjdngk_g=KU z>JOH;nR4e#tn+2HnMZYfrSk#$4eiDKNCdQlz|lSt8}kL&_N%sKK1DjTNcfvBbl{I%Llku0Zlm^8 z12%Gn9-(Dx4l{GdUK6Cn}^1>{47NXNvS z(ECl44#{vjQAp4|Oa$Lgx>4+)>O;0_C({qwH?fPSIDM>LJk9Cjt@m4~9l?3!*!^5J ze~dSuh+WLv-_y?p@y&C9VM&8uwUz0gyWMSP{KL{dLmTV2K}tW<{8ACH0~o&;aj-jx zqfV>PGybc0t30pP{dMmSd5#?mU&@R_AJGGPU!}aBll^@Y?@oFCt^0o%Uk=r(~9BMEylfP|q#tdA3>8g~7i0(*No|mT#B(^`11=Cs3hT#@|*yHPPD0 zh>3TKf39{;e{Wm)f;>r_qv~H{Jc7Z#HByhRJ7-mjVfW7g8a?;Sdk?%r(yl!J!neP_ zP2`ZG=P~s?gF5eNx;}S+Up-2Up#Knbyy^P|`Z`Ve^%9S@zmN)*GybWb&z?1R3a7V` z9e)RX+e9(?sV^=3>Nz<0-Jno?1EaHlAE&P`6JLn8Jt_2|mMD4eJXycPJ_3F}{UJe}6%! z*#$B_0e^uovJ1?3dq&E@pM39k^5wxFp4j32Fh2^U>v=EW|J{i^ez3kpR(;hBkF9U( zck|^@U#+R{vw?K7<0*6)b5UqF|tXS;>@7wrd}Fz53C zBj`Maaq-rAqDOI@(s!5``#Vz!#(JB}-+@no5$nDg^!@u$Do=5o(s!5`^Y{0O7VIaN z$AeM|>c<7kSD!zA`D$_u5?YShg#f#XCV*cgtzCvx8IPzzpJT6;Wf`5v_~t0`Ro`Iu z{^Xy2J35};5=qiCnU{k2t=}0RzxD6S2>&Yu^xKI33F<$aek}CG(G$;xd_I7O_X&>0 z8yUU$r4XOXr(XgYMf2mt*A5e-pV3a}<0)6(y;z_0j;(7-3g>o@8AklNG#4)Sg!zJC|x zojgZ*9~wvAshIT1SpCN8f^@H)Bj;T8TkWCh`3@=`r@z;)=coK}DS9sFQ&gW-PycV~ z`Mh(KyZ7Hz?xOF%dvgC3?bn=(eYtTQxtrIw^ZKsPo|B_V;q~f!P|wK((tEgQ0;iz* zKCvB_=kP}eSM*!-{dY;fzjBUpp7q~E&Z5u1dvgBpIm-D|;C+s|kCj@yRP=2QM|_SE z{z=ljM-Ki{oZ$TX94==>{e1zQ&oK{QPdQO@FBRzsZ+N`Slk&d*L-d0F&koA|PZ=-9 zFUijbm$JX1`v+(Bd#v(f{2t>MshrN__jFqQ_`522*?^FwI`{9v$CKz&#j1^s%K!wSjolE~QtVia^gAN_@SFcFjWjMj(aUIq0S zKk=;fB$>Rk#q5vhJ`l?B_0(D4a>6|$-}?eo_)hQ7|D z7_SBU*2D(_?A_03|401@wWoS+R>wQ=4ie)wUdlYr*hKW=MDJx2rz2r$r{PQOkG>Bf zXVR5E&Zj^wYHv~B2ga*Uwsrh{3E<&(V{E?0v+M5;;CXUf)c@TsyLBmvD{{9Q;0r2#F9kC*j6xO{kK*^#DWSwlD zOJE1E%+3eWAIHO5&03@Pkmo#)RFV{dTw9OZOo-}A#p0G?~{bRV!lXT zLB8h5!oCYseS&Jw?{AMkklMOEO{BWI zJJUPkJ=+K3J@>^o_H6I&%rwUbHmBoT)7|l|Ok8T4`16LI8@h>BS7&nIzNXdP9X*|0 z-J49(%ATIBR?@1?seUWz_S7a#xi+0i_jeJs2h%IMy<2())@|?Y?dcy#cdnzPo4dAk z4XjJ|ZBKW1q~EeVGq5K0K>Fr%_oji(s|f1ZMwO>;q6TfDzjZy^((x;=ir>=HosLsr z_WJvK`kC1c+j}<*^rQiApoT(t>F$AkueYbGdw^t!gs$$)K>zlRfv%ozN+D*hgLH21 z+LrEIOX8~C=nbSZ@oUqm&aFKiTP}_7*h~`3Z0+hG0mR$;Q{9`=@pN}*TqN9@?%&px z$q=z?)7@R^&J~@V{X}!c1F5d9sf}CHb@hvvEM3;n*t9&gv76i8~zpHw>yVK0oZRz{AXVRU)giLy1O;2VZ+1;N`b!-ORThaqNdiu8nQu6pKHnMDq z4U;Bp-O!!PXt`^9c66`o@7a>>Ztdz#uh>d-JH6FAyD|frww~=|J_d{jMdwE2fW*;_}cER-gRAD(-|)_klvR6c5_cp@4A7M&oXk>q`Et|rj0t? z(A~RzzMWIEBZHW-$wd#Gl>PHdoTh9sJFKFlIkYAGpfbO?)1)H5>vW!bvS65%RxNb?^(z<1J4RwumO?Aub>+0+47uPSTUs}JczM;Oc zzNvos;=0B4ix)3mvUus@Ws4gYH!f~kynIRBlKLf!>8G8RE?KsuVM*hXrX|al)-A1H zx_If5rAwDCTiUR+acR@i<;&`p)h}DTY{{~v%a$!`Sk}0#Y1#6Ix`z6O#SKdumNqPF zXlQ6`XlhvASl3wJxVUjiBjZKZqo9deCn-(`MXi%PC2$OJ*R|Po2gVTopocsAt&}rD4@cNy$VSSg^Td22y32l9^)Q?u82% z-n%iqsjC~N6UM8y9cE7JAc=SP48&7$vMv2GTadb45r^tz2r1LmnT}tII@^&@FspFt zn9d;+YkGT3)^_T8GU$tX`^hF_Ttd-`TsQ1SLeb*lP)SK?s4O}uG&NG;P76(so;U4$ zHx`;1x?svh(OD(c?i~02$d=HH;ZKA<8+s-5wb0imf1~u9p>Ku0?VgB!FZAQc$?o#}f%b>zZ}ic88SojlkSvu*KRYQ%XGs;RTuUNf2+)+|mHzQgTzA);xG(~QT zE{c>Dmln6g=SL=&HiVm_RmG9X#jQ6qE}62VcwtG|(7fAjx~ycujH-DTojPclqQ_gEHDr>kr zGIQ`xuk2hmc};2Ab*nF2Q?hQ#4aH@Hzr3z&R`~iG8pBgd%8Hg3mkl*sP<%!BqC4F3 z#Zw;o{rk308vMfJH+M{VxUOQx`<~x*{qUdeT3);$a!=8`vg^ufqv!29e0Tb$$nxT8 zEl`OMo-TR#n+r-0{CH?dxjU<9YNTZ7-H%1KM5lyHiz}XJzrJ+f%E4cjWlDNu*FAXt zJA-Su@bkLqk(9k91DHqipcwO|zz49w{vjO)VPy&_l-~)5252 zJ0cs3CP&=z$&n^%Ty4oEw+!7rc^2`~P%@QJON$5p*W9v)iySu`jTRM!ii=8$OQ)4x zH0grLRZ}XaPA-pBgr`qGuXKhxGg9eZ5Uwh|(7h;BJtH2zES#9M(5(yChZeifg+3a3 zKJr4zuS36yz8ZQhJX-pPJH2V5_E z{jHtphxh*ehrj&QSEfw6wE2qFH?-Y-&s#Tirr)#oqg3RJU;ge7et2riwAD9srUxJX z-K5?|t7VK6NDb z=U;mHThnIDy!)PC{_3^S!EJs2@ZG7^-8~o0+VHlwfBeOFeCD$=W?o!<-SxMuh5o<& z9e?u0uYcn^r~c&^{h9X-Z2!Q#g^Qkh@ks9FSHAV#54C)7U)}qv|N84+9ld4k`n!uu z$}8qC`q|IBdm690X65QVd)IB+e)P-7zIOa?e@vS~@eP;k`fg;`wIvruilz;HZ0g_( z(dyEn3&R(bxRFJXrIF&WTU=B;t*o_tdU3Ki9J#2hG+Yuc4u`_w@Z?A|JgLZ?8jIdi zd|`20aj0nKDiH^jF^?w>n&G`j0>xT9HrmQ`L6E-Pv(xvpe#(ZHmO!|Nk=l@2~sc~RMn(wifLj~BggaPrJZ{cvRH z+w+PiN27z!Rt){3*o`kJqTKgH20s_RFkC)mLIZ6K^@dLJ$;d{J!AsMUq?fHXEzll{ z#GSp-cIV#n_B+#O#;YgC+pB+m`Tolm)Wt9FdG^HRp+oJ7i+|I;(0MK1@R8B>hF9J1 zHMnIpjdP}auknScspX3*hnLq~lv?v|7Z2auvNY9t>i*$d*Y?!h_TkSC-{!oMT9^LX z@H*$)HMct_zL)&?_fu{E^n;qaj-4F7EAIU4u2b$i);qm)6gELOA42rcT{EdJR^g_p z_d+2zGTXiQ!n-FmmzKJf5x0~kn&@TWD@ztsy75LT5GkQvDlQAna+^^+QbH(Yp(-~N zT23=kBt%-_UK|R$lK@8v!971TgXSy3CfX%#akwmWv3muTolGTbi9AsaM@e6bLz9>) zSfWZpfG-LyC;qgSS?(G);u3MU#J$xG6;CeN=!Qxs72goLkm$K?<5ZVwj81arl)Cpt z+#+HsbU`Q*u82&buSIUTON-<1tkA{uuO;Lbm$;!xr7r2JyFGM?`#?AnDs_v(e@8+f zZi_)NR8mwHa_g$=BXtBv-P+R0p*S_t4L4CH6%03*ghKnm?i9Be6^27!YH{4p*Er$# zxb1PLh<+PA;+Dljt@PS!YNC5VDC&MNR5g8yJFnz|Nejbu)V5G)zPpMf9|}#Tb}e$3 z5Y!-5KuG;&AlyC1}h{NR|2u zcy8#Y#9I-?7lx80NVy;KpxktrI+b4B<%WJiJx?0p?xFf3ZoI6vh*I+FS&7{jH;YhTkBvgEH z>Zdlm>9`IEaiEFhWMDM{&uq`Gg1DXXf%Z%eiLuD zm5`#%l-cI{NlMiAYK*KsW-sZNRBpP#{T!8>W4<4Ki|p+Jq-`cY+)3#O$0#Or4jKcf zyHnDRQ$F?~5rXxW+#+csly90=kY8u$BVWsD%q2~J(}wZspE3_G_T4Y_ln@@mm;W{| zAHaV|@J0ASp@#PAXe*0DoxukXY}aO!K82nOX}db9T%1ce&h3;w*?dQoC781P5PYtH z#J5p?kiHEcHT(lNh35vJ$;Z2ip7tYB?E3J#GC_Vuh`zQXDBt$1&}$u( zIR3cApP=&c_72)VqokRV?6e2c-$IN|87F;Nsr=3_vV!;vDg6?I{{gzc73BXA!I}q@ zrf=o-1+bm}6~Z?vjcn%x!O9EZe@n)XKYxn;a~Ojm}#fNm1f{5yweOEg^ybB2@7sAbE4w6 zTJUBIe#wH5S?~#?j7smc1=kvV()>XS-e5grefk!B)PgI_a~Z|2wctG#eAI#~ zlZExSTJUBI-fh8$EV%Lxi=G8k3d^@! z@E!{uvEWk{9A95ZuhoJFE%>knpRnMHy9?>HSa7ce@3G*+7JSr#Pg-yZCKL+3K3VV@ z3*KzOyDj*T1s}EG6Bg{;TUdYGf?F)O-GT=#c)taYSnvr8cHU~$Z^11V+-|`~Ex5wi zag}GU1($Rd*4Jdg?H1g7Um^Yp3qEbZB{=9nLF+$FzGM)t-BJh-_7uW~r1XTiq@3i0=CFN9B8@R|n-^9L=M4_pw6%CBx`A^u?t zK5D@&ULpR71;-yO%->_dBNkklEyUmdjzaj;q=2tva2p_ZH)`tu8 zk6Q5lcNXR!x8R+R6z1=-;HF0l^G{jup52A{J0B~Ak6Uo9Srcjh9k$>T7F_bKLVA_& zEregP;SU$)pR(W+e^8j;vcC|neWnoJW5LItE6i{GXd%4w`9k=(1-HIXn19%UYd=<) zzuSTj|50K7Q43!4$A$U(EcmnqxBf{X{%#9CVZp6O3h`V1v=H9>=|XtF1s}6u=QD-) zcUthtKP$|4K353WS#Y}rm*fiZ*I0171@E-rmn`_01-E{_kp7?rk9?sp|C9wceW@^i zvjrc1xiJ5P1y_E#Fn^5&4}PUE|F8wG`OCummcJ^5ov#+cO%{CAg6m!>#6NApm498B zf7pUYEO_v53h~!`y%64M!H0fWm_K5{$1M1y1)s9uk{=b)tFYi&3$C-^H5S}z!J93( z*MfIj@E!|3WWk3m_^1V+u;7yx?EJW}y_FUmx8NoVZn5AyEx6r+cUthE1@E)q{TBR^ z1&>(pQ42n1!6z*Ev;|k3wB%vIbr#%W!L1hDZo$14JZQoDEO@^KAGY8T3qEGSCoK51 z1v@_}Y;TDL>)p~4LGyp(QsEX0U*B=8`Mp;Dpat)-;QbbS*n&qa_?QKsu;5b`?EGV4 zd-WX~%5U7tue0D53vRXGb_?#c;6V%CW5N0^1?6YN%GWcmnt#g5FZpR<`{EY7->i?y z2n}0XyUqR<;4uB|4ABP#g$FJ8)cJ+^C9y)dcSa$++k)3r73Pmv@Sckb^Y>YB-Nl9Z z$1M0%yfFXN6@_rcm4$HJf{(Qn=C4^%2v=Nd!8aAcCoDK__DfWrm1h4#;aUqm6zCuH z$6*V8$%02L_>=`Z8w&Y5-ChV+qzd6)Y*15B{&rjN&d$R8{T4iuF3c~vuMm!JDuhoB z6vC&?`pvK3tk)E7f3Oh$&TJtZe^(*g@=PIo$bwHjSD3%~qlIwg3x)703qET6b>(m5 zlZE*4PZh#DEqJr>ueJPU<6kR$%z`UFS4eM-1@E-rea1gkdL>^hEI;_ALiqH{g>d_q z3*n<*DTMDd{-e@wH~yKzdn|a+?3W`yLVtS#`U<#pRi1v7zV7=*9xlSyg#+oG1J1%u z567=5=0hdvtqc2moQ3@qQ-*{b8W;PQxof0IR9uyIx|A9}KP5d!W6vqJs z;@zg*lA7PYP`HJ@pvZKSpwHkn9xXQ+CQ^H^smMlo*a!smgEig*;RS+SK0$flUCTH9 zRFE}3Y|3i;w7foxnM?VT=+8_Kjy`wN_)wrc_|)>1CcT(q)UD<9xs=8?89a@QcopR5 o(WQa%`rJ%oeQu^=(E64C1}dv<_leS%UOG~Ou!cYLA9nr!A641M!2kdN literal 0 HcmV?d00001 diff --git a/svm/tests/example-programs/simple-transfer/src/lib.rs b/svm/tests/example-programs/simple-transfer/src/lib.rs new file mode 100644 index 00000000000000..dc82f8d83330e5 --- /dev/null +++ b/svm/tests/example-programs/simple-transfer/src/lib.rs @@ -0,0 +1,26 @@ +use solana_program::{ + account_info::{AccountInfo, next_account_info}, entrypoint, entrypoint::ProgramResult, pubkey::Pubkey, + program::invoke, system_instruction, +}; + +entrypoint!(process_instruction); + + +fn process_instruction( + _program_id: &Pubkey, + accounts: &[AccountInfo], + data: &[u8] +) -> ProgramResult { + let amount = u64::from_be_bytes(data[0..8].try_into().unwrap()); + let accounts_iter = &mut accounts.iter(); + let payer = next_account_info(accounts_iter)?; + let recipient = next_account_info(accounts_iter)?; + let system_program = next_account_info(accounts_iter)?; + + invoke( + &system_instruction::transfer(payer.key, recipient.key, amount), + &[payer.clone(), recipient.clone(), system_program.clone()], + )?; + + Ok(()) +} \ No newline at end of file diff --git a/svm/tests/integration_test.rs b/svm/tests/integration_test.rs index 1b8d1c08ccdf9e..b414281267b6e1 100644 --- a/svm/tests/integration_test.rs +++ b/svm/tests/integration_test.rs @@ -2,7 +2,9 @@ use { crate::mock_bank::MockBankCallback, - solana_bpf_loader_program::syscalls::{SyscallAbort, SyscallLog, SyscallMemcpy, SyscallMemset}, + solana_bpf_loader_program::syscalls::{ + SyscallAbort, SyscallInvokeSignedRust, SyscallLog, SyscallMemcpy, SyscallMemset, + }, solana_program_runtime::{ compute_budget::ComputeBudget, invoke_context::InvokeContext, @@ -17,7 +19,7 @@ use { timings::ExecuteTimings, }, solana_sdk::{ - account::{AccountSharedData, WritableAccount}, + account::{AccountSharedData, ReadableAccount, WritableAccount}, bpf_loader, clock::{Epoch, Slot}, epoch_schedule::EpochSchedule, @@ -48,6 +50,7 @@ use { mod mock_bank; const BPF_LOADER_NAME: &str = "solana_bpf_loader_program"; +const SYSTEM_PROGRAM_NAME: &str = "system_program"; const DEPLOYMENT_SLOT: u64 = 0; const EXECUTION_SLOT: u64 = 5; // The execution slot must be greater than the deployment slot const DEPLOYMENT_EPOCH: u64 = 0; @@ -108,6 +111,10 @@ fn create_custom_environment<'a>() -> BuiltinProgram> { .register_function_hashed(*b"sol_memset_", SyscallMemset::vm) .expect("Registration failed"); + function_registry + .register_function_hashed(*b"sol_invoke_signed_rust", SyscallInvokeSignedRust::vm) + .expect("Registration failed"); + BuiltinProgram::new_loader(vm_config, function_registry) } @@ -136,6 +143,24 @@ fn create_executable_environment( )), ); + // In order to perform a transference of native tokens using the system instruction, + // the system program builtin must be registered. + let account_data = native_loader::create_loadable_account_with_fields( + SYSTEM_PROGRAM_NAME, + (5000, DEPLOYMENT_EPOCH), + ); + mock_bank + .account_shared_data + .insert(solana_system_program::id(), account_data); + program_cache.assign_program( + solana_system_program::id(), + Arc::new(LoadedProgram::new_builtin( + DEPLOYMENT_SLOT, + SYSTEM_PROGRAM_NAME.len(), + solana_system_program::system_processor::Entrypoint::vm, + )), + ); + program_cache.environments = ProgramRuntimeEnvironments { program_runtime_v1: Arc::new(create_custom_environment()), // We are not using program runtime v2 @@ -148,10 +173,25 @@ fn create_executable_environment( program_cache.fork_graph = Some(Arc::new(RwLock::new(MockForkGraph {}))); // Inform SVM of the registered builins - let registered_built_ins = vec![bpf_loader::id()]; + let registered_built_ins = vec![bpf_loader::id(), solana_system_program::id()]; (program_cache, registered_built_ins) } +fn load_program(name: String) -> Vec { + // Loading the program file + let mut dir = env::current_dir().unwrap(); + dir.push("tests"); + dir.push("example-programs"); + dir.push(name.as_str()); + let name = name.replace('-', "_"); + dir.push(name + "_program.so"); + let mut file = File::open(dir.clone()).expect("file not found"); + let metadata = fs::metadata(dir).expect("Unable to read metadata"); + let mut buffer = vec![0; metadata.len() as usize]; + file.read_exact(&mut buffer).expect("Buffer overflow"); + buffer +} + fn prepare_transactions( mock_bank: &mut MockBankCallback, ) -> (Vec, Vec) { @@ -186,15 +226,7 @@ fn prepare_transactions( transaction_checks.push((Ok(()), None, Some(20))); // Loading the program file - let mut dir = env::current_dir().unwrap(); - dir.push("tests"); - dir.push("example-programs"); - dir.push("hello-solana"); - dir.push("hello_solana_program.so"); - let mut file = File::open(dir.clone()).expect("file not found"); - let metadata = fs::metadata(dir).expect("Unable to read metadata"); - let mut buffer = vec![0; metadata.len() as usize]; - file.read_exact(&mut buffer).expect("Buffer overflow"); + let buffer = load_program("hello-solana".to_string()); // The program account must have funds and hold the executable binary let mut account_data = AccountSharedData::default(); @@ -212,8 +244,83 @@ fn prepare_transactions( .account_shared_data .insert(fee_payer, account_data); - // TODO: Include these examples as well: // A simple funds transfer between accounts + let program_account = Pubkey::new_unique(); + let sender = Pubkey::new_unique(); + let recipient = Pubkey::new_unique(); + let fee_payer = Pubkey::new_unique(); + let system_account = Pubkey::from([0u8; 32]); + let message = Message { + account_keys: vec![ + fee_payer, + sender, + program_account, + recipient, + system_account, + ], + header: MessageHeader { + // The signers must appear in the `account_keys` vector in positions whose index is + // less than `num_required_signatures` + num_required_signatures: 2, + num_readonly_signed_accounts: 0, + num_readonly_unsigned_accounts: 0, + }, + instructions: vec![CompiledInstruction { + program_id_index: 2, + accounts: vec![1, 3, 4], + data: vec![0, 0, 0, 0, 0, 0, 0, 10], + }], + recent_blockhash: Hash::default(), + }; + + let transaction = Transaction { + signatures: vec![Signature::new_unique(), Signature::new_unique()], + message, + }; + + let sanitized_transaction = + SanitizedTransaction::try_from_legacy_transaction(transaction).unwrap(); + all_transactions.push(sanitized_transaction); + transaction_checks.push((Ok(()), None, Some(20))); + + // Setting up the accounts for the transfer + + // fee payer + let mut account_data = AccountSharedData::default(); + account_data.set_lamports(80000); + mock_bank + .account_shared_data + .insert(fee_payer, account_data); + + let buffer = load_program("simple-transfer".to_string()); + + // The program account must have funds and hold the executable binary + let mut account_data = AccountSharedData::default(); + // The executable account owner must be one of the loaders. + account_data.set_owner(bpf_loader::id()); + account_data.set_data(buffer); + account_data.set_executable(true); + account_data.set_lamports(25); + mock_bank + .account_shared_data + .insert(program_account, account_data); + + // sender + let mut account_data = AccountSharedData::default(); + account_data.set_lamports(900000); + mock_bank.account_shared_data.insert(sender, account_data); + + // recipient + let mut account_data = AccountSharedData::default(); + account_data.set_lamports(900000); + mock_bank + .account_shared_data + .insert(recipient, account_data); + + // The program account is set in `create_executable_environment` + + // TODO: Include these examples as well: + // An example with a sysvar // A transaction that fails // A transaction whose verification has already failed @@ -256,7 +363,7 @@ fn svm_integration() { false, ); - assert_eq!(result.execution_results.len(), 1); + assert_eq!(result.execution_results.len(), 2); assert!(result.execution_results[0] .details() .unwrap() @@ -269,4 +376,22 @@ fn svm_integration() { .as_ref() .unwrap(); assert!(logs.contains(&"Program log: Hello, Solana!".to_string())); + + assert!(result.execution_results[1] + .details() + .unwrap() + .status + .is_ok()); + + // The SVM does not commit the account changes in MockBank + let recipient_key = transactions[1].message().account_keys()[3]; + let recipient_data = result.loaded_transactions[1] + .0 + .as_ref() + .unwrap() + .accounts + .iter() + .find(|key| key.0 == recipient_key) + .unwrap(); + assert_eq!(recipient_data.1.lamports(), 900010); } From f1a82cb666340ee606a2d5e384d944585b0d2611 Mon Sep 17 00:00:00 2001 From: Yueh-Hsuan Chiang <93241502+yhchiang-sol@users.noreply.github.com> Date: Fri, 22 Mar 2024 11:56:30 -0700 Subject: [PATCH 052/153] [TieredStorage] Use mmap.len() in TieredStorage::file_size() for HotStorage (#381) #### Problem The current implementation of TieredStorage::file_size() requires a sys-call to provide the file size. #### Summary of Changes Add len() API to TieredStorageReader, and have HotStorageReader() implement the API using Mmap::len(). #### Test Plan Update existing unit-test to also verify HotStorageReader::len(). --- accounts-db/src/tiered_storage.rs | 9 ++------- accounts-db/src/tiered_storage/hot.rs | 21 ++++++++++++++++++++- accounts-db/src/tiered_storage/readable.rs | 14 ++++++++++++++ 3 files changed, 36 insertions(+), 8 deletions(-) diff --git a/accounts-db/src/tiered_storage.rs b/accounts-db/src/tiered_storage.rs index 3f655896a28ed6..20626143d7036d 100644 --- a/accounts-db/src/tiered_storage.rs +++ b/accounts-db/src/tiered_storage.rs @@ -27,7 +27,7 @@ use { solana_sdk::account::ReadableAccount, std::{ borrow::Borrow, - fs::{self, OpenOptions}, + fs, path::{Path, PathBuf}, sync::{ atomic::{AtomicBool, Ordering}, @@ -156,12 +156,7 @@ impl TieredStorage { /// Returns the size of the underlying accounts file. pub fn file_size(&self) -> TieredStorageResult { - let file = OpenOptions::new().read(true).open(&self.path); - - Ok(file - .and_then(|file| file.metadata()) - .map(|metadata| metadata.len()) - .unwrap_or(0)) + Ok(self.reader().map_or(0, |reader| reader.len())) } } diff --git a/accounts-db/src/tiered_storage/hot.rs b/accounts-db/src/tiered_storage/hot.rs index c1e92e4469b269..4d3fabfec40a63 100644 --- a/accounts-db/src/tiered_storage/hot.rs +++ b/accounts-db/src/tiered_storage/hot.rs @@ -359,6 +359,16 @@ impl HotStorageReader { Ok(Self { mmap, footer }) } + /// Returns the size of the underlying storage. + pub fn len(&self) -> u64 { + self.mmap.len() as u64 + } + + /// Returns whether the nderlying storage is empty. + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + /// Returns the footer of the underlying tiered-storage accounts file. pub fn footer(&self) -> &TieredStorageFooter { &self.footer @@ -709,7 +719,7 @@ pub mod tests { super::*, crate::tiered_storage::{ byte_block::ByteBlockWriter, - file::TieredWritableFile, + file::{TieredStorageMagicNumber, TieredWritableFile}, footer::{AccountBlockFormat, AccountMetaFormat, TieredStorageFooter, FOOTER_SIZE}, hot::{HotAccountMeta, HotStorageReader}, index::{AccountIndexWriterEntry, IndexBlockFormat, IndexOffset}, @@ -1420,5 +1430,14 @@ pub mod tests { let partial_accounts = hot_storage.accounts(IndexOffset(i as u32)).unwrap(); assert_eq!(&partial_accounts, &accounts[i..]); } + let footer = hot_storage.footer(); + + let expected_size: u64 = footer.owners_block_offset + + std::mem::size_of::() as u64 * footer.owner_count as u64 + + std::mem::size_of::() as u64 + + std::mem::size_of::() as u64; + + assert!(!hot_storage.is_empty()); + assert_eq!(expected_size, hot_storage.len()); } } diff --git a/accounts-db/src/tiered_storage/readable.rs b/accounts-db/src/tiered_storage/readable.rs index 15d678ffc856fc..cc2b6fdee0356f 100644 --- a/accounts-db/src/tiered_storage/readable.rs +++ b/accounts-db/src/tiered_storage/readable.rs @@ -30,6 +30,20 @@ impl TieredStorageReader { } } + /// Returns the size of the underlying storage. + pub fn len(&self) -> u64 { + match self { + Self::Hot(hot) => hot.len(), + } + } + + /// Returns whether the nderlying storage is empty. + pub fn is_empty(&self) -> bool { + match self { + Self::Hot(hot) => hot.is_empty(), + } + } + /// Returns the footer of the associated HotAccountsFile. pub fn footer(&self) -> &TieredStorageFooter { match self { From 9a447ab6bdba1166aead7c37c154e6af81240fcb Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Fri, 22 Mar 2024 14:05:20 -0700 Subject: [PATCH 053/153] SVM: bank to use program cache from transaction_processor (#397) --- runtime/src/bank.rs | 64 ++++++++++++++++++--------------------- runtime/src/bank/tests.rs | 6 ++-- 2 files changed, 34 insertions(+), 36 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 29218efcdc7c69..bb2c11fa6bd913 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -98,7 +98,6 @@ use { invoke_context::BuiltinFunctionWithContext, loaded_programs::{ LoadedProgram, LoadedProgramMatchCriteria, LoadedProgramType, ProgramCache, - ProgramRuntimeEnvironments, }, runtime_config::RuntimeConfig, timings::{ExecuteTimingType, ExecuteTimings}, @@ -570,7 +569,6 @@ impl PartialEq for Bank { accounts_data_size_delta_off_chain: _, fee_structure: _, incremental_snapshot_persistence: _, - program_cache: _, epoch_reward_status: _, transaction_processor: _, check_program_modification_slot: _, @@ -830,8 +828,6 @@ pub struct Bank { pub incremental_snapshot_persistence: Option, - program_cache: Arc>>, - epoch_reward_status: EpochRewardStatus, transaction_processor: TransactionBatchProcessor, @@ -1020,10 +1016,6 @@ impl Bank { accounts_data_size_delta_on_chain: AtomicI64::new(0), accounts_data_size_delta_off_chain: AtomicI64::new(0), fee_structure: FeeStructure::default(), - program_cache: Arc::new(RwLock::new(ProgramCache::new( - Slot::default(), - Epoch::default(), - ))), epoch_reward_status: EpochRewardStatus::default(), transaction_processor: TransactionBatchProcessor::default(), check_program_modification_slot: false, @@ -1036,7 +1028,10 @@ impl Bank { bank.epoch_schedule.clone(), bank.fee_structure.clone(), bank.runtime_config.clone(), - bank.program_cache.clone(), + Arc::new(RwLock::new(ProgramCache::new( + Slot::default(), + Epoch::default(), + ))), ); let accounts_data_size_initial = bank.get_total_accounts_stats().unwrap().data_len as u64; @@ -1343,7 +1338,6 @@ impl Bank { accounts_data_size_delta_on_chain: AtomicI64::new(0), accounts_data_size_delta_off_chain: AtomicI64::new(0), fee_structure: parent.fee_structure.clone(), - program_cache: parent.program_cache.clone(), epoch_reward_status: parent.epoch_reward_status.clone(), transaction_processor: TransactionBatchProcessor::default(), check_program_modification_slot: false, @@ -1356,7 +1350,7 @@ impl Bank { new.epoch_schedule.clone(), new.fee_structure.clone(), new.runtime_config.clone(), - new.program_cache.clone(), + parent.transaction_processor.program_cache.clone(), ); let (_, ancestors_time_us) = measure_us!({ @@ -1396,7 +1390,7 @@ impl Bank { .min(slots_in_epoch) .checked_div(2) .unwrap(); - let mut program_cache = new.program_cache.write().unwrap(); + let mut program_cache = new.transaction_processor.program_cache.write().unwrap(); if program_cache.upcoming_environments.is_some() { if let Some((key, program_to_recompile)) = program_cache.programs_to_recompile.pop() { @@ -1409,7 +1403,8 @@ impl Bank { recompiled .ix_usage_counter .fetch_add(program_to_recompile.ix_usage_counter.load(Relaxed), Relaxed); - let mut program_cache = new.program_cache.write().unwrap(); + let mut program_cache = + new.transaction_processor.program_cache.write().unwrap(); program_cache.assign_program(key, recompiled); } } else if new.epoch() != program_cache.latest_root_epoch @@ -1419,7 +1414,7 @@ impl Bank { // so we can try to recompile loaded programs before the feature transition hits. drop(program_cache); let (feature_set, _new_feature_activations) = new.compute_active_feature_set(true); - let mut program_cache = new.program_cache.write().unwrap(); + let mut program_cache = new.transaction_processor.program_cache.write().unwrap(); let program_runtime_environment_v1 = create_program_runtime_environment_v1( &feature_set, &new.runtime_config.compute_budget.unwrap_or_default(), @@ -1492,46 +1487,46 @@ impl Bank { ); parent + .transaction_processor .program_cache .read() .unwrap() .stats .submit(parent.slot()); - new.program_cache.write().unwrap().stats.reset(); + new.transaction_processor + .program_cache + .write() + .unwrap() + .stats + .reset(); new } pub fn set_fork_graph_in_program_cache(&self, fork_graph: Arc>) { - self.program_cache + self.transaction_processor + .program_cache .write() .unwrap() .set_fork_graph(fork_graph); } pub fn prune_program_cache(&self, new_root_slot: Slot, new_root_epoch: Epoch) { - self.program_cache + self.transaction_processor + .program_cache .write() .unwrap() .prune(new_root_slot, new_root_epoch); } pub fn prune_program_cache_by_deployment_slot(&self, deployment_slot: Slot) { - self.program_cache + self.transaction_processor + .program_cache .write() .unwrap() .prune_by_deployment_slot(deployment_slot); } - pub fn get_runtime_environments_for_slot(&self, slot: Slot) -> ProgramRuntimeEnvironments { - let epoch = self.epoch_schedule.get_epoch(slot); - self.program_cache - .read() - .unwrap() - .get_environments_for_epoch(epoch) - .clone() - } - /// Epoch in which the new cooldown warmup rate for stake was activated pub fn new_warmup_cooldown_rate_epoch(&self) -> Option { self.feature_set @@ -1891,7 +1886,6 @@ impl Bank { accounts_data_size_delta_on_chain: AtomicI64::new(0), accounts_data_size_delta_off_chain: AtomicI64::new(0), fee_structure: FeeStructure::default(), - program_cache: Arc::new(RwLock::new(ProgramCache::new(fields.slot, fields.epoch))), epoch_reward_status: fields.epoch_reward_status, transaction_processor: TransactionBatchProcessor::default(), check_program_modification_slot: false, @@ -1905,7 +1899,7 @@ impl Bank { bank.epoch_schedule.clone(), bank.fee_structure.clone(), bank.runtime_config.clone(), - bank.program_cache.clone(), + Arc::new(RwLock::new(ProgramCache::new(fields.slot, fields.epoch))), ); bank.finish_init( @@ -5084,7 +5078,7 @@ impl Bank { } = execution_result { if details.status.is_ok() { - let mut cache = self.program_cache.write().unwrap(); + let mut cache = self.transaction_processor.program_cache.write().unwrap(); cache.merge(programs_modified_by_tx); } } @@ -6110,7 +6104,7 @@ impl Bank { } } - let mut program_cache = self.program_cache.write().unwrap(); + let mut program_cache = self.transaction_processor.program_cache.write().unwrap(); program_cache.latest_root_slot = self.slot(); program_cache.latest_root_epoch = self.epoch(); program_cache.environments.program_runtime_v1 = Arc::new( @@ -7195,7 +7189,8 @@ impl Bank { debug!("Adding program {} under {:?}", name, program_id); self.add_builtin_account(name.as_str(), &program_id, false); self.builtin_program_ids.insert(program_id); - self.program_cache + self.transaction_processor + .program_cache .write() .unwrap() .assign_program(program_id, Arc::new(builtin)); @@ -7500,7 +7495,8 @@ impl Bank { self.store_account(new_address, &AccountSharedData::default()); // Unload a program from the bank's cache - self.program_cache + self.transaction_processor + .program_cache .write() .unwrap() .remove_programs([*old_address].into_iter()); @@ -7893,7 +7889,7 @@ impl Bank { LoadedProgramsForTxBatch::new_from_cache( slot, self.epoch_schedule.get_epoch(slot), - &self.program_cache.read().unwrap(), + &self.transaction_processor.program_cache.read().unwrap(), ) } } diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index edeeb621966f12..bdacbb1304a028 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -11910,6 +11910,7 @@ fn test_feature_activation_loaded_programs_recompilation_phase() { goto_end_of_slot(bank.clone()); let bank = new_bank_from_parent_with_bank_forks(&bank_forks, bank, &Pubkey::default(), 16); let current_env = bank + .transaction_processor .program_cache .read() .unwrap() @@ -11917,6 +11918,7 @@ fn test_feature_activation_loaded_programs_recompilation_phase() { .program_runtime_v1 .clone(); let upcoming_env = bank + .transaction_processor .program_cache .read() .unwrap() @@ -11926,7 +11928,7 @@ fn test_feature_activation_loaded_programs_recompilation_phase() { // Advance the bank to recompile the program. { - let program_cache = bank.program_cache.read().unwrap(); + let program_cache = bank.transaction_processor.program_cache.read().unwrap(); let slot_versions = program_cache.get_slot_versions_for_tests(&program_keypair.pubkey()); assert_eq!(slot_versions.len(), 1); assert!(Arc::ptr_eq( @@ -11937,7 +11939,7 @@ fn test_feature_activation_loaded_programs_recompilation_phase() { goto_end_of_slot(bank.clone()); let bank = new_from_parent_with_fork_next_slot(bank, bank_forks.as_ref()); { - let program_cache = bank.program_cache.read().unwrap(); + let program_cache = bank.transaction_processor.program_cache.read().unwrap(); let slot_versions = program_cache.get_slot_versions_for_tests(&program_keypair.pubkey()); assert_eq!(slot_versions.len(), 2); assert!(Arc::ptr_eq( From e9cc9f8379b8b116ad007f9e76ec501ff93f0cc6 Mon Sep 17 00:00:00 2001 From: Yueh-Hsuan Chiang <93241502+yhchiang-sol@users.noreply.github.com> Date: Fri, 22 Mar 2024 14:21:24 -0700 Subject: [PATCH 054/153] [TieredStorage] Refactor file_size() code path (#400) #### Problem TieredStorage::file_size() essentially supports AccountsFile::len(), but its API is inconsistent with AccountsFile's. #### Summary of Changes Refactor TieredStorage::file_size() to ::len() and share the same API as AccountsFile's. #### Test Plan Build Existing unit-tests. --- accounts-db/src/tiered_storage.rs | 15 ++++++++++----- accounts-db/src/tiered_storage/hot.rs | 12 ++++++------ accounts-db/src/tiered_storage/readable.rs | 2 +- 3 files changed, 17 insertions(+), 12 deletions(-) diff --git a/accounts-db/src/tiered_storage.rs b/accounts-db/src/tiered_storage.rs index 20626143d7036d..a0d8eea4010b94 100644 --- a/accounts-db/src/tiered_storage.rs +++ b/accounts-db/src/tiered_storage.rs @@ -155,8 +155,13 @@ impl TieredStorage { } /// Returns the size of the underlying accounts file. - pub fn file_size(&self) -> TieredStorageResult { - Ok(self.reader().map_or(0, |reader| reader.len())) + pub fn len(&self) -> usize { + self.reader().map_or(0, |reader| reader.len()) + } + + /// Returns whether the underlying storage is empty. + pub fn is_empty(&self) -> bool { + self.len() == 0 } } @@ -220,7 +225,7 @@ mod tests { assert!(tiered_storage.is_read_only()); assert_eq!( - tiered_storage.file_size().unwrap() as usize, + tiered_storage.len(), std::mem::size_of::() + std::mem::size_of::() ); @@ -238,7 +243,7 @@ mod tests { assert!(!tiered_storage.is_read_only()); assert_eq!(tiered_storage.path(), tiered_storage_path); - assert_eq!(tiered_storage.file_size().unwrap(), 0); + assert_eq!(tiered_storage.len(), 0); write_zero_accounts(&tiered_storage, Ok(vec![])); } @@ -252,7 +257,7 @@ mod tests { assert_eq!(footer.index_block_format, HOT_FORMAT.index_block_format); assert_eq!(footer.account_block_format, HOT_FORMAT.account_block_format); assert_eq!( - tiered_storage_readonly.file_size().unwrap() as usize, + tiered_storage_readonly.len(), std::mem::size_of::() + std::mem::size_of::() ); diff --git a/accounts-db/src/tiered_storage/hot.rs b/accounts-db/src/tiered_storage/hot.rs index 4d3fabfec40a63..414d74b2eb81b7 100644 --- a/accounts-db/src/tiered_storage/hot.rs +++ b/accounts-db/src/tiered_storage/hot.rs @@ -360,8 +360,8 @@ impl HotStorageReader { } /// Returns the size of the underlying storage. - pub fn len(&self) -> u64 { - self.mmap.len() as u64 + pub fn len(&self) -> usize { + self.mmap.len() } /// Returns whether the nderlying storage is empty. @@ -1432,10 +1432,10 @@ pub mod tests { } let footer = hot_storage.footer(); - let expected_size: u64 = footer.owners_block_offset - + std::mem::size_of::() as u64 * footer.owner_count as u64 - + std::mem::size_of::() as u64 - + std::mem::size_of::() as u64; + let expected_size = footer.owners_block_offset as usize + + std::mem::size_of::() * footer.owner_count as usize + + std::mem::size_of::() + + std::mem::size_of::(); assert!(!hot_storage.is_empty()); assert_eq!(expected_size, hot_storage.len()); diff --git a/accounts-db/src/tiered_storage/readable.rs b/accounts-db/src/tiered_storage/readable.rs index cc2b6fdee0356f..008e805689df57 100644 --- a/accounts-db/src/tiered_storage/readable.rs +++ b/accounts-db/src/tiered_storage/readable.rs @@ -31,7 +31,7 @@ impl TieredStorageReader { } /// Returns the size of the underlying storage. - pub fn len(&self) -> u64 { + pub fn len(&self) -> usize { match self { Self::Hot(hot) => hot.len(), } From bcaf7a8f6c9d93f32736879f68e4bae9eb876e3e Mon Sep 17 00:00:00 2001 From: Lucas Steuernagel <38472950+LucasSte@users.noreply.github.com> Date: Fri, 22 Mar 2024 20:26:46 -0300 Subject: [PATCH 055/153] Sysvar example (#399) --- .../example-programs/clock-sysvar/Cargo.toml | 12 ++ .../clock-sysvar/clock_sysvar_program.so | Bin 0 -> 44168 bytes .../example-programs/clock-sysvar/src/lib.rs | 18 +++ svm/tests/integration_test.rs | 111 +++++++++++++++++- 4 files changed, 135 insertions(+), 6 deletions(-) create mode 100644 svm/tests/example-programs/clock-sysvar/Cargo.toml create mode 100755 svm/tests/example-programs/clock-sysvar/clock_sysvar_program.so create mode 100644 svm/tests/example-programs/clock-sysvar/src/lib.rs diff --git a/svm/tests/example-programs/clock-sysvar/Cargo.toml b/svm/tests/example-programs/clock-sysvar/Cargo.toml new file mode 100644 index 00000000000000..082c29bbfe34fd --- /dev/null +++ b/svm/tests/example-programs/clock-sysvar/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "clock-sysvar-program" +version = "2.0.0" +edition = "2021" + +[dependencies] +solana-program = { path = "../../../../sdk/program", version = "=2.0.0" } + +[lib] +crate-type = ["cdylib", "rlib"] + +[workspace] \ No newline at end of file diff --git a/svm/tests/example-programs/clock-sysvar/clock_sysvar_program.so b/svm/tests/example-programs/clock-sysvar/clock_sysvar_program.so new file mode 100755 index 0000000000000000000000000000000000000000..dee43a1e38b28684312760f0b81203605ae79c6c GIT binary patch literal 44168 zcmeHw3wTu5b?!Mcb2K9%81Wbo0-+I4Au}L>UdYA<+t@%HBLib$8;k~NEM(AggvIgP z#3PJt?9_?;i0wE{iZC|BX=9wW!Fe=8+Sn%PO-$}>5|TEJlFw*&l8*pBbl@5}wZZkyR>?Y;KeYp=c5+Rt-z&-xp#^O>e0!EgN90JJv9DQk7chUfJI zMQe=)BS63TMo8gWlor(_ym8OHTux%6h+&}T(Z8WX4141X(>PsXcewy%H9W=n-uTQ^ zPM289`6wCn9e$GYz41v-HY5&d*w*wC3u;tKmLsE?m_SqF0kqyQn*AgLiX(m&ghdiU zxQ~#Ig$=_p2tmV7PXWw$$bXgKkE&e5$S&Xqsn?!{S`0&A;O{6H$9Ja}Q3HjJH%?4v zx=DY&QAm$T|5V}u@p1#QiTJNq2oe7l#aXsofkrW9KCAl(DW|9zGby%=Pb!3@zsNvW zCH>0`Y9`UyY)n@1R)d;lxas`yeUzXB=**yJOLLs`he?}Ie!J?Yh<}Fx6?B)M<0(H| zm4BFiQU4SpLUF`@tAVCQ{I@Im?(*~Q2POy9znC5^gNY{na|{wImD@pjom=k)Prc0d z2Nj{Dzr>iO%H68u;;ygRQy=7H83U?Z#D9y@h=_lu(iC^O5s$o~f|k*v%k5Bl7xA|k zNOza}hKJAjs$7RIx67F1!SD9qLw=Ugs_}0(iaq#!7j4f@g&!n!hQ_B$E{q`|drT!8 zK}x(ZHa6zwQ*0d==!ZOti!H-wdW_597kd6Ob%bSXQHk#IPk7`5y|auPCg2_Nv=4f3 z8La2v7y9o>F~SHG_H^gry(GD;`leet60MQd{DeZzyt^zfUDchbWj+XGsi zi}w`|9``q_ITvq-2aoFwOu*~#v=@}1m;2~J{A(7SNg+GTG4$#r={xX7Dan5BjIsy& z_)BaT7r|>GA8QYvRQVB;+iwq_Rj}x%J;gtFvm$Ti=33ea*7kdH2L24h$rB^Z> z_frA|djXH&U zd4S9pve$548F#PKzEdOy5|0tP2EWv97K?F;K_hPQLzdlu@c@g**j293)%eUD8frX= zO=CUAxGe$vq(x65dJ`i*+dxa=ZbGUVW)rgX_u3Qm`tP_f${PD4LSj;M6dInAu8 ziqj=**KYhCh0pfwPCr2D*`v%a^mmA0wBsduwucWW`u5SoDi(W{b^(5q>J>O!&vddC zI@bLc31JpQ^wzPoBPJ`3eKjji+{gKmAZA}AKW&%Ug9=pk{&C_fqGl(sb0(a=i_1H5 zoyqhqHA_T&Tt0iRj{C>y{i&kI3S+6+E7@02`}K>|{~?9XLblZGg8E0-pT3{#1s}i= zKP38?&2YUAe%?4fa?)B(jvO_N{s29p8}V`d(%xoWU)-!v71#-;o1LXVqh0J{375+* z;q*mh7qDpXr(#_X{LrVLfBt!3QRIuIbbXYMkWF#Cgz&29v7ORkQgIW%%qLAP4CUb(`5 zaf`r>ktr?z9hWn?fuMU0J&K;&M|s=>e3L6ZyHJ&P?9;xE;dma;N2Gt8cHPPCIz?=$ z_CBP?({)@fJjk)EyX>Q*Yijb*KRuoME&UkxSGpCgB7U?TC5C^i@LH#1 z)?^sH!(*E63I&V(rz=Gt&Z&INKcL4!A_t20Ft;yOja2xbzfgGDbqX}3zkBs~n{H$} z5p$IR#Mm?`_k=1S);g3QzP% z;;grxr7So5Q7$L)K)GYyUcD;Xol$X&S{_J`iD84+^(88qrKO-^xi%)!gRIW zTgImqoUUX&%HGfA!`jdECb+>4zuG>9-(Ec*N|!Ldu#YH_aNFIBZ_&KiV{ga@9{Brn zDkJ^k*jEwv2VnZ&NEfjlq&eA{r=gu2kYB)dlrGZ#tD}ih4Wso5Rt(UAq}_adlK9G! zKS79Np-%yLs0nz$2VZ|eX~Lh_XI9%U@DPMgnGZ|9KtEKzo)_ECa6hM)==M>0U^sZ1 zUb<52!%;0)t%q^19yW4*cDJg=fZl@dbmd9auUMC$J?UBaMXVwtJIHZ(9mkO4c9pO7 z?a8Rh2tTFssrW7;+)T0B57Fhs-ff;vk$+(Qw2!hr?VQE}c8{|oq>t%aIUjO;gZjaK zmSK^bj7zd_BK|@2*Xu`9Tg88}+{XPU%O|DBqx56kF6l?dZ>G3E#2=@pYk9SD`z_-( zB~Q#-&OOFy_yzxOQkv+8#4^6gdH~~`1Zw`6?32nkFhDe-gdpv0Cw#!0tLcYA($xE& zW)b-sz)N}G)4WpUJATUCr}Qa8>l&z%W$;89`3&z-_(^jL?|YhydEe7);C)YXvzpPO zJnu{K_<-^Zx9IYdm0rQGlZqJ;vqtTAq8ub=DJ>+spmGe~qRSQXKBrm5`M zh$>gE@KBCnc63Q|7VmGGF}1%5`~rnP$H8Zq_tBE(Oy1WdIbn(me5k&qs4`y+v!jcU z9H>gd!`T4iL(Z0JHRCh9mQs;FpZ77T-5BaXe;UP>;*RkdZUj2z&r|!Cz~g-#?Qf`3 zhHEw69JOy5G3OdEsn*5gctEh!?1$+CZmr{d%ZLF~^2YwPrFKUcM*mm_k2{bnEK>H- z0K=78#v=M;K2FxWEtcA0(n|SnUhZ>2(1Rse#(2MA-_}yCVfyeemg)_L z(N3pdh&k{Ff2v|qT6$FWb^c9^BW#zBzomTO!S4Q9$FMK(0m)cOOSm5NljSPk)jiPF zS_JO7zIfMOf)z;XE$s(o-6iWNLdVWstDawY?L2#&%VVGD9Qa^%opq)&UJ#uMLIvM5 zsl#L)D*hv`T1DvzO^<{CQttJ$qVJ;fFQiDa9-L4P#!mtCJ2hXBYx;hb?5wMik9vd; z#E_61hJ2;n-t5Hw0WnB(=VC4C)SQobtUC>ASWZen4%atzUsxwd5^1bxF#XHCy6ATLO zeTzR-xnmsuuaG;Y8W)p0^h?s6eK)QD5g$ey;%6PCg>jDk{`h_yyydtZoT6t)Iue|R zWXJEpA0Qp|Tt;>&{xG6uC9uI2JNx5&xK7$>ov%erlhCw}jKh zF6}<84?doE$oYq?(`EgH^*k``1}-1oz%kMxDcCpsK=u%)$9=z1^050@A0j@jUpx&` z^B-!OI)^}c>5tfbNM#tYJyrBor&E0horf?hq_vK2XS3GJxNn9^Z>M64K0zwx;zlHd z*&fZyLq5eX+NyDkY5MkI`Z}CpV(tNi6=_*26PPi8G>We zSbid()=J)=RWS8&Y#K_9`=^W1K~IhEXD!IVZ>IXLqLk|?&QP2xc}vMd_yRwe_alEq z<#YW5LB3DWOtC6YaR&K!zp2V&9zdd_{;r^3(&OFGa*7kI&pW3fm-tSoNXe&!EUvYQ@tF^IGSF}^kh9D{7!5S^CSFTPkfywQmMPO90`W%S!#7LF2{*s zrRGcTMt&3Njq?SIgdFLA1N@Cr?B15_(?s9k0f+&1|@zeKn{f>R;b%k@T#qP+kZOvyd_jB1JTz*X=cl^md%IVow zj$t3qgHNYCr-yZUr(G`m-Qcf-@zPInz3?jt!YAlHLhbUV%RJj6>vrQyx$I&^#*lL( z&{<9kclb5xdh#z3g@EyE$Yt;8cQM`_cz|i+SH`>aLp*-kyS2R}qR2Mf_X{u%f{#^X zI&soi1gzg6Y0*pBUjtq-@tfstFyy>V+6Vu+<)6R)Wze|L-cgY*XGEm`r0;i7MdR5= zUZ0vS$J4kxW!@>{r_}59J4v%Z`HcweZy;d4h4uOPe#jD)ehR{*I`_`Rt_aB&FopC? zSS7 zz~_0xyJ|;2@E5#$e)`7S6`_4FNDT7aL2)zb09cIZdl=FW==YD2DC5c-kLv}}ebfPQ zRSI-xC_Y;6f_Gv0WGGJQ`LgUM<;asNjF>#0zN_{U!?MnQSNPPhy}Eu!PpMA$CGdzK z=U=;c8e|kIP2JbP{x3t6ujhxmSp3ES$&ZRU`<61krYbL}GJ7k3rDE6v z<{NKE2NIP1PvH-IA-$Pmtb=4;KR}7Fv}TG`dJh2<2IwEg*=G8u(zV|mAY|$~wV$oh z_Y**2fXp8~*i13fi>Yt_^#SB?!2Yir4sX@zgrAdspH8QGa?;rysMSnP`dW=o{O6=E z2DU@mb zl=TSGS5bgoph}bjEb|+ohkO#PA?YX=rm%!!lLG32M??Bm6bC6r{imoL_Ag`8I_Vd7 z*REoWPycN{Brodp|F$1GPycWGc?bI;2T1|;gc%9^==LX7lKR0o`Y53wR({|p=V4v) zD@u_20NBq-Q&wsvWl#XTgM9a1?XNnHvd`Ho_Xdvgc$3!iPN_e&7*>sOnde8AF<1Gj zEH4WUdvR3d+e<1qwmC!ARq$`i534No13oa$JWT&EAIp+|K*0PMk8u+t5Un=`nGX0s zNLLQ2dO`1qd%j!Qqtby#qzUhUg430Una`uDxY{2%Lh?yhW;k8@N5~E1Sh|wWKSBTS z2e}M><-A|cnZ-|{9Q?AJAER989(TDD%!j@Y=$=RS5Fa8R8L!cwXGj1O$!kENQ5ca`reIuAEgEL^{H&dC;AWLMcW%Ms>H#WUwYF%x<|G11>IlB z|4Mq^H-PZRXO4cwex`-<_55n{0#($xcPsZ^WZzQwM}Pf(^1`S{?MUhNXugG>=(Efl zAva>x*xShSIq$v_+Huyc$ND}u(sS2m=eVDTRrPAT#d#Lw{UQylhzT6Sej(~hZ#u*H z$22~rk5Ybm6YIC^E2cM%D7^G0rjLE3oO<;=v9z9#%K3-8|4tKK_de&32o`>_dVQX* z^w@!`9nd$Si*}!M+ns*DME2p_^`21Wn1A;<;z{u1YWE0Wy>Em59(BnZ{R8{JJru;? z`wHSg@ZIa!SBQ?&uA5n|>B^^Ae)@iJdXsKv;}MmP_ME#&d))2twm-*z=>2Io{?L1D zcabP1+Ix|9mtLaX$KCC&mv*05?UVc5a(!#0vPRiLjw1o& zvfd%iM)6B?!zPZky3+2>m1_JCl72!Sv2DEGaPE;;Gd||u%D%O|#AeJ@8E79oRk%fg zBwu2f0%`}}xWUC$(XU$b{Hp1dzp zdd(7*P<9Qchp*-s_Kk39IfrxPEOcXAc|Aw-9WJlyE1jkHbA+z$*YGxlX7pamaU_uU z4s@p~L@AeD&*j25aV+~=&?nXZv&e8MhXhm3fEsxA>LP4Fjwn+CF3*+dG5TyhTEx~QF(?n&8)*zg4J_myYF{FmJJX}uLalYM2ogZb!9a6994&ke!GfgXiKZ;gs& z9^6drME|8XJ*DWU8;4XJ_wj@bd~S2~dnBI{C?LJbl@5JS{gRJg)(>FXy-|RM-Cx1+ zwJZnm`}Q!`YaiA18mk#k-_Q5XFD--lkLY?F`AR)~x}Jqx5BQ9G_yk{4e_Y=m6S<@w z%BuF;eNpBk#q`|sk}Ubh^rn+qUV7e8$@2zV&numIW^i(Ojy#oy@PTkFR z-C8iJ=(U#c`8oV&l_)b|UThx4^A8?p)3=^ueq`R)c!puUF0zMNzRo=UsH$GAt4>pU zq#s0Y<^GwRciN6W;QAofEpH=NIX6h@b05401kFf4!S!ZaIF=^5?>)KOjd>ex+&Oqn zTn^(P=*9dYl@P1d<1%L(X)~&x($$=wF6WH&P23OZb=)sPSL{6Px`&W?oXg<%y`Ias z=YMBO&XC&ysg&rVeSNe6C-YOx@8LI6n^-RZM^XPK){k^0&)=N+7&qHU-+h+*`|i^! zwi{52$~B%;a6H8K7VT)#%Qfb%J0N!5!U!+46g^nTuuwjWby zbdJGxV;TR-?O$D?G9vjmsu=5IG-V0#-$1e4i9Qo+? z0puRIT?6S-pU^yPXo-4MKRdTU`)T~pbZk8jkn>iVKg)iPJ;-)p-^+G!Y5rbS{UdZO z<7EZ^{``G8hmMy&%lLxIcl1l&zlrriDv%TJ$F-AQ!_LO>RJIY;^9Wnt%a`@ognriZ zw{iO-fl-ioo_x0iFlrW;lU+*y1!w>7me45R^waFu<364k!ym?wuGg`N2YB2SKQ8Yf z^89P+1U#ANG!q@vA2)fOW#6Q(ClLNQmLr5f!Hth3oXaA}{c6#}xXe=@tWf#({ZSRm zyjt!B*w3&&Wc55B{`FbH#Jv*5kN&=bWpH`>e%8m>6H+hR0nVe46A4QCRjR%+J&yH0 z2x}!d6=?sf_ivKU(eF^FEg6rSu|h{FS0(55J`1RIzTaeveb+19>1= z#wmp-b}>NgtNx_+ARf@M=*_s?wZ9+Cm7je$3bKw~qnwgUey%==T*mjwnclcujw!xG zF5`H*{o1eC$9R5iKg)Ix|AHl<^mp9&xR!@5r{_oG^v-d<$G+mm<0@U`B=%UU_Y1@> z-l-ijpNQ!G1q^wJpA|l1Gw;u-&oT}vID4}ary=+Avr9ExtYbYcWBiO#(twWHey1At z<47gD8&r`Y`^=6XA$$ZGpWWcr?YhbV=Ex^g|)6Rf)n;48L0`?)cciXdnU>Th%U-oIk3l*sLr|=~Ed0svtkJH3w z=XzE|xko7HO|svZ*6VU7y^iV2eq3C}tHG$s5WjBg{Ri2{wx1dIGmy7suz}eR>h=>G zpkP0z<)rm4mH;;Ovz`~qcnGyFp{|s15q5+S$qTC3i*M=o3obo=lEb91W(c<&NDmlv zzWXBMT9Upq0*T@N970ylgJi!_?uEVc`1*6t_{#UXz2l?EQTBnPpT_Myrue{o5t=Rg z#Hawi z_k@=~}L)Hhfu9SVu zW~v09BX07($lK9xK748V5&gbJv!0L1JX-qSnHR7-FzmXhB4$@}KgvAWnXe?cF7yvZ z6!~t3*pKXE(|lJUI{gddq7cddYZZ%sOFw-=jrVfTTfW~Q{z%4c&_zh!eMI3~>LiWE zxd%C2&LhjXyA1mop5KMFe#$z%boy~spqJf+WAB&;0rgcKryI|zdXnZMwl6(j!ZH$m zO7<~&_4h^4UsqAOyzk#k?JWbml71-QTmmEe=4lF~x0p@ihx91tTc{uTG9T^ThFa(x zlav5-H?LEmf%8a+Qoc*z9>;MKBIg2|uV#FVK#3-Gw$)T6&I(|a{{Ofyxd>M7@P z?UW2w(GG*o;N<>ekmx}#oqG1VPVu|0?z<%dvDR zE0Cq$+95gV@mTau&e0$rw;q5$HP4v7jOm7yK#bVIg$gdMK<(&9wioe7;YU@n;hdA{ zeNJ1ir(|6w>jUuzapNXcFfq*IX{+AXv$b60z6SIS?T0_gP>l5eVrgIRaeZHQn9r@( z^y+!5_S3R1OFhl`i_9{<4|bG@P{6tz=i0I#+B=`qgjXBFZ>rYBsPu51%YwjujfhT6~CbW#Bay<_uc!zzmI(4`ul9sKIuQqdoomyW$?MW ztQ%YPxl-aN`~URaJpRb{d*C0@9{V`^L-<+Hk$vn~`3@z2;89cgoRv%Pk`?4$=M#WJ=rOl5%Ld_hBo+)ng(o>0}O`2$pRf?l%OPk8@j0$*N#p#2Q|w(#s#{bCiV?`xs{)I;JoiheJ@A?=X) zbKHn3!j|!GO5Y~h@jR7(DLalq&=lUze&@Xh`xQRp>bC6ocrnp#{}0;<`Co)f*{$^F zI9*5)a%cONes$JA36-td+|BdK!EeZMQ|{-pRr_qzBD%>x`ek@fl?P`gInp2V`1#&W| z&rNt3C;!C$!sg-0$Uea32dErSj#In9#{k8YWNbdb`EvhIzLQ`tBm@ewAMKQ<_Nej{ ztMXcYyV{se_z=f-1v|X=UOx|D@3nr&J4&qVoc52(-^)Be?hD!{^gi=J<`;U3{>auc zTy`JFQornfWO-Vy-ZL3fGkTH-{kN;0@drpQkld~shQnGu#Uw2Lq9eyEbUN^>2vzh` z&Wj^>`ICa^tL#%oe9S;IRq{K^$2BvV7oH|K_Q-ttWxYL3v7EQc_nl?mC*r>XXlj0P znqpb^%J(-hKCARalu4mo>nql$2pIqHDEs)a^8d|p-lDSA`*I`HW=HPjoG$0Z@_k5Y zU&L3b5Z-woQ`;dLO+oAj>wNm(v9pLTifn2>-`5P^uRt|E^sA3AxnYdVCK@73kb&!MaWPA(AJ;_&qxQt|#hs>yfO#cj@`; z0F_bt4$<0eHfo1A7`AtS!L6{K!fVHbYm6Ii>RIOLa{}& z6ax0`QLwj&D{CiE`5_%A=`Fo12?Tf`&|9R)sJ9t&@d_Ngw-g?JN&fUN7teC=-lV+V zBK%$qkNSq{jcs9tD_z6_mVJI1r{F~%e^8N7Kd~*0hjjP*8>gr<(bU)$NQv|tol7Bo z>jsuj$y$!@-p+ABlZs;t$`xL0VU%P2y?V>oq+s`au8H`Sc^UfeV(%}=Jq>rcwW{1% zRi8V(L8YHo>7bW8Kji%!`JSq~T$L(!lIa~+Y^v`PWGLNz@A4#Iz0Sftx~ONq0sBY4 zBbI%!*aDWHtk=?wJrnq5IdFR>%7OPOWWUbMH_PEt?R{xNJw5NSohLo*RQ{FPi5T|t z1DZdHT-@{Ec3QVN_NDF7`<}u?egAaMxlDE&_eVC#G3Xtpb_m^wzJIx90V`;hr#(hk zk6XQQE(d?D>`KS*v!{tpXOp*oT3Buix!fA9&$3U6`3r5&EBgip=zv13hUoxCkna@8 zz5Hwyr-Msiu4#hzB%8fSMTYn}?B9&IbjpP^TBYlE;oZ?886H9I4bKa84qM1 z5bqTrpdHkVv0fIN^4);j3088_ac7Fp37q!~7I1wL{r9Y5SI1O>+@CGGhVx7HclqS+ z(nS38Rffz@dv!m_yx6->k@T%#{`7aOF(0{#D9E_oOmZp%tmWEH&uH1^DE&}aWXb;Y z5(R@k@_UQufda$_dYc(Est%Kn%MZYQ$#tpm1+ir`F+IH> zZr@Y{^`vthmY01q->Z!Hmm=A~c?fzy&JV-YDw*(Ej`nuG7b*LHN^hx#0V;?2miRU2 zz0ZfvtNLZXCssXI5fr_VdqSntn8Ds9-2XCv4DaXq<^3Y8PoP4j&iC$lo*HYaSBSCu zcz!N^E`kfy6ofCxlf*eD^J~~680_7k(3HOe2c^XeRf2QBs)_UEeL?#Kw`-h#=G)od zCUm68JvaG2kBobgF6R!ISL4kg^dExQo4l9T+osd6=Xj$3g;c2A&73dyUP~8ERte2y zO!v?e*6WZ+Z<6_y`$brngF@Lxg|6oJQF?rp_(I(I7}JMZqU6rSyncs&1pHC@hj1xB zh4JcwOHbiH?xG34<42esL;O7Oi=Jef_yykQXh3!F^>`@f{};pms0&~EU-nnwm!m{? zK*x|f;MjsC^vn7JmXHp)#TKwWyj^)nBDP>EitB##5zGy<4Bp?xeDJ0ZDHQdSu4Maz z|NCn~O;@sg0{$vtq${<(J4F=)K@{zsIS$Pj($mzU3v9E?y+GW@ydob$Dn^vDv$L$V(AyqrRtU6!uX5! z1CHr?Pk<3*9D`llUQP5Uj#4_-nTWB!GX*ixSLJWUFT;ps-wgWxV~fgD9HsOD9b^3d zG10>OT$RT`DFwg??($_bE?&NjCI&GnN9{s@UqutZFOoLbB9-kCHOM*kW?q&Vao=~~ zmYj2L_`SdU+3&_Krng`%wt&Ya(NEwv{lmran|@G0_+QVV-%R);)PG+35#P&WBhUF9 zK7fbs@Jz%zJvQ$ppStf%e?edj&5sga13E@Oqn*&lQIo%cL?E3@eqQk*dLZ|0p$`ZN zJ^#Zzp@3lYE2$cdYnXk}dc8$OM)o<5(XSXU!*vX&@?gC*k0pAP?C6V(KLeB(rGGNs zK#!;jx<^2Skgok{Gl95S06weeUo64nC*WrKXBm8cFZbbt#BYYu7nwyWmh=Z81PbXA zwtM${LGi7}TPdF|$+brrm&HEd_fm(MCVn9w60!1^-=+DW_+{W{C>{7MlBn^%?f$;Q z|3`AKC;tBs<$mH_%KbT)++|&qSXo1@BE4IvBG=yqCdWhp_7|mCzN;;Nr&HcL61{*w zzMk-$zbgQxprAiHspj3e_Cy0cg-hDgVwG(;_C)kxPi{LR)uZnRFrQ*im$n~12OYN` zwkOeV(f>u}Ql+<&{*Lxj&U{Mw3HGDyOXT!+_S5a@M^OGllpou{rQ|2~F22it_HF#Y zHkUrak1V0~V%#gDhgc+v<}3e#SjORc@@L4Gad8RxNd-3ni{_IpDl+8$AN(lhx%Cvc zQC7B{^Opn}F6+_w$2rD){d=Tm;p-X3cnued*X8%S{9bqu=gZ$%-P^uECA8Bo@ZqEJ zB`Ta3<&+~Bp`%4b#@^r}^?>;biDPV$_2aRzkzZPXmr%|k+T?Ae*ix(ZaXB=ToK(9g zR_)U5M;XAFTOqVltoWfH#Sf+B@I!Wsfch}by2l44hjpCa!I7~S#3;!A5c&&t9gFDq z_b4AIxIU0mj~-Xwv7R`n)Axn+d|TEPC`as#WuN7I%*Pae>orW3R?%Zh*K?_Mh<##w zhpfuhad~Ha)bwTiguUkMgAgAG@OS@7>j@d!9DmBwac4dWyko?-7w=;J^G{rOHed5` zzEkepYt#M&Q(>^;&|L_w$X|HgkId@!fsfM{@h_P7lv6pGH@6c}^d#aa#REDs*&ZjURM852cIp=k3CUkCrT+rkFrUBvuDf0e^d;bhW3GC=U zK7tY)!rG1!Eeyl%6yFp_>ED2kF~0`gool&1IWIUxYv1w=x`z5N9 z{=P7u!-dx(mHjKy9sMs?$;$2^U)b&1Cpitj(Er2qAwk4SZZ3R00>8q8f6$H3VshT^ zfn87$L+on_F+=7xCi_>5%wnFuQ#u0rlZFr@Rts;*Fyd?reo4;67lM2T+PSv}1)-4c z;W$Y2KnMO0q+{FoJ;e`HBbCaxsmPGOw<-D&hVoF5ec04ChCvT>I;Zn~oABevHNkiy zH_PCApfdi-`NKQQW7Z|)F1NJwFi2QhVbvMt`#F%D>~8@v10cXjV?-=pF8CGC`cyqfCk9=I;iu`k*7{=|W; z$zL^o^6=Lfo%EknHT~k5hYkdb<1iiBUUs zSE6s@zJ1B|#JF75bX#AdHJQuV)%!d4 z^>=h>QKjBbopTLQqfWV|yR&D1f09JePa@^sV(BYR*7bGt@99kTceHN4E7^C)zV3mI zot^vp6T9~%oeo&Hzkg3R;n}UkOtLN7vL%_?zpuZsaevo9U!tdCX-l-bE1KwvwvaSh zqEyHJWU8-q#lDW+%llGzBB}J*J>4B${gj|Y))?(fc6RsKQPn=x_UIi-^2?&#`}?Ea zcW4BKO_cf)efA24yaE*a5(B&LO12WY)josEcXao4Ci+Pp?a}_eL`Q$L zqqApU5(>%{^(9Gdx~L_@a}pgmk^UW>V3q7Z+Pt|3{o`$OwB^R`uB2}B(hinyG||_d z5|X6ENyRZyT+!LxMpTzCUm6|QlkAETQY)Bj=}UCAC!@)(wrEFJTk=4(s(R(Bn%cVh zhSiDPt!>FWI91DKH(60zsxjKXCmG#GR60`8=oM?CDpc&Y`;)0?V^5;1qjfJ@*dJ{y zi0&qd0-fnlfyU^v=(2)}ih^Yq<+E#lC-GTzpsK2>x@u+Bs;Zi*+N!#$`l^Pi)zwwi z)zvGjS5?bk1B z>bjM6tLkd%YU}Fi>gyWnR@Yb6SJ$tsUsYdIUt3>SUtix)zq+BSp}JvZ!>WdwhT4X@ zhWdtvhSjTy#nn{(Y9hUwimj$3mT#&*(MP(VBrHnCIixVEGm_9dk}27|h8bIxOhK4R zl+;lQxNZ6JiW9`9~TuIT9_GlTs?fAdV!Zw7o;Fz5?~@_hx?B;S-k zkvY{j&6+-Sh8gkA^39$+*D4K_nG4N31ABch_+RvW$@iM?TfV<5{C56#eBbqb-yE@i zLyD^@i$ZrF4~ z)7JRS+i%@*drNEbj=iY^AGqhCW6!^E^7PryywKJC<;QNHpSJvgnEwvHxng?a8=kW&6^Sk4gS%CfxXsbe}1s&(U$A;`>)9Ssvs5WiClO0jKUd(&H1x4 z4-8)KzwhdB{gqNx(O_wrpynWe%RLJorzUUoX8OP!RClJN4Qd z)@1(riaawAx8~IN2B$0wv=!c5koinQ>EvaB{Ge}2Ugqh0&jqIXC;JBiyYdPHX1FlW zK#i*i&A)MQTVW~jQWu&+sQJOn-!3XRlxLWJ%gW311@l6|{HXpk+?=AmW z{+}JNKlJd?s^;w<`lF$_KbR7J-wnV0-SQPz?bx~N$A=#N@FS0Y>cua7@#R;(_Vs^! z{U>8aKq+*6^&8ugnM0rb(wDQ}{_f~6?!E6r$DaFg z_T^V!`~Kf=cEg^QM~`1u=M-Sum(x_bT4BU{_| zpZVI^Z=L(zPiapjx@-Qy9|jIy8=4cyn>zTXQ!<~n%JK*2_-BXAz=}XkAm}%PdBLd# zP2p+5c+el1TafP$`GbC+-|sIBSpG?Q=9GwaV{lHeIq1uqRoE1`#vdd1G&L_=*cd2X zv@6;fxN}kFjCJs&za;PAoBmsZGxLk{p>TKR737uV-4eXax~^bZfONxOJ!x5>ByW;G z^QV-xqWb;*%(2iF{;>awU_)_bb;?RnzF@JfuJe>KXz`-YHPl_CV+*)B>PMTJn zpZW6q{=&?6N(!yan3efq;lF*tU!Om?V@BqsQ08x~g5t~l1$hmj>q3Qj{gdYTZw=g% zpSibqZo$m_8v>b!@;-g6a8{stI57DA#lb?$$~;#z_{*Rft<0m`hXa`}`{($>lP~F@ zwWHqEMiXK3(G&2Jn9?ZkjfK~}fvE9_)ne?Ne#DqID_T|#D;&LH+;j|nncs+ zox?Y6?k?Z*i7yRrF5tMUEvZN|ut;-C3(qWNd9m)~;s{O~PNC2xK+~}J_ z^h~pUib*wElgx$r<{beukC^h!_67V!fywlkXNFCZy}#5qkN#WhGlL=1H!0sFWi|Kv z=9_o<1HODS&;R!%1mZRbioQ@@fzPZes}58VY?&4Lg}x{?()2e_CKdEIhJ3!K{N`jc zhzk9_SJoQl-;^8vhs~C#kw<^yEnpTzeNFUwI5p9n?X%1$eI?T-n~OuUCoT6^QQLgJ zCFV6Gd7rP4+O@)5MO1x0i`rf33zn2qP(q>(FDYdh}UrjWF<{JM(%M4v<7W!)P$yb}Z{17Dx#r%Zn577;80!?$KIVI?~ z{w9RR%z{*@uYhOz{)KqUqj-)l9zx2UkO$=^{nV-SfeO?2OX_*j2y=+)3z*S@iaga* zc|QMg5;z@h684sv#0pWlJC7N zm5Zue+TW%0Lj8>BZwaPsC%E^Ikr+FiOWzBhSmG4T;bW+R)88ce(Qy>x3Zkd4_o-BG zeK%6Ro1Y^@U)m9J(OKKTK-C(KN7iz@oyu!b8aM}-#mUA=;zQEkPXgne9J=4^O0T8u z7XI>2Zu)XcpRehxc`%oMd@Eb>fbs*Pr?!SVZ*Pa{M0U=eg*=6TKf2zMFq9{x2zCiGH|EwrgYdP z-!>YTtJrPkW0Xlj^fRM7QXg+P{YqE0yMgz0{7?}2H9WHF)3_4}91IpY*`zJg_Lb(67*nFR;Fj@5twXC;x~CKI4H$Juu#HbIbd@ z2bPMY{v!P#aD}!DfqT5Lb{>*{LOU*jM?G+p9$O{Nfsc6Lmpt%U4}4@rZha>`@EH$0;(;%C;G(KrdN1ko zV&VUQzV9#a84p~sDz|)#2OjajMYXy3Lmv2q2af7NO6ou2fsOiHdOaTahzGvlfiqX+ zmOrcKGE(1xD|7RQJaEHRx%s0WcxY{I{&^4Fvo1IPhzCC7fzMr=i+^B!E_`8YF1%fz z+ev%Y-jbVt!UK=qmYcr^0|x~uKXiL8JnDh>XiqQrXFTxQmfZ43Jn)(J-24#_+_EP( z|D*>l(gV8GzsCa~@xT{6@SeT7^tX5B!kMmI_$3c~-UAnR=i>KxV52WLzt{tB@W2N= za5R-$zQqF{@W4YJ_^by$?}2*;a_JxOz~>L-<`>(!@X5P#;S=dxc+daHg^zgPst@Gm zKjnd2KA4+-{+?VolgWjLJaE%sZhrB>TzG>AZh9;?e~$<5@xX^Y@Q?>S;ek(j;4>ch zoChBDz@f);`783k#U427fvY_51`pigf%kY|`CAWSe?y-9BOdsq2R`kA&wAkV9(dFP z%LNaqU;gHoz|oK9%18c2hU9PX=4_S3)9SlCez_?!nG3Fqc-*NZg4 zub7sbU*&<%OwY~VGb0y1JTn(A(hF{(zum<@_}$}y4}0Jt5B!t|KH`DTdf*Wce6BQ? zzflj|RF<2+#{+LK&&|(x;3M;M^Ur%=V_|OoSv{W>`JAiG&EHd(3$NAlL&0xo$ju*F zmkV##^C!VS?SVr(a`7`ebKxpIKa%pHmfZXi4}9*9-28?+bKxNmoYC_cp`X$78G+Ax z;3_@8ko*=8Jmi62((?hquhQecz^Bu>{8j1kRPtLq@HxFcL_U_*JudwMeBjVHJuHof zT=sya>sgmQV5z$2-f?_o%FFwc2l|cWZ8k1fQLIjnlKYnTbsNk3D5ea~wL4Xu=5eQ9s9wBi9gzXC6@-fN-?^6EolZ-6!NnKXjC*@_&Y7yn5t9=?z_U$B= zeKeE@pHjY9 ProgramResult { + + let time_now = Clock::get().unwrap().unix_timestamp; + let return_data = time_now.to_be_bytes(); + set_return_data(&return_data); + Ok(()) +} diff --git a/svm/tests/integration_test.rs b/svm/tests/integration_test.rs index b414281267b6e1..e435ce093975c9 100644 --- a/svm/tests/integration_test.rs +++ b/svm/tests/integration_test.rs @@ -3,7 +3,8 @@ use { crate::mock_bank::MockBankCallback, solana_bpf_loader_program::syscalls::{ - SyscallAbort, SyscallInvokeSignedRust, SyscallLog, SyscallMemcpy, SyscallMemset, + SyscallAbort, SyscallGetClockSysvar, SyscallInvokeSignedRust, SyscallLog, SyscallMemcpy, + SyscallMemset, SyscallSetReturnData, }, solana_program_runtime::{ compute_budget::ComputeBudget, @@ -21,7 +22,7 @@ use { solana_sdk::{ account::{AccountSharedData, ReadableAccount, WritableAccount}, bpf_loader, - clock::{Epoch, Slot}, + clock::{Clock, Epoch, Slot, UnixTimestamp}, epoch_schedule::EpochSchedule, fee::FeeStructure, hash::Hash, @@ -30,12 +31,15 @@ use { native_loader, pubkey::Pubkey, signature::Signature, + sysvar::SysvarId, transaction::{SanitizedTransaction, Transaction}, }, solana_svm::{ account_loader::TransactionCheckResult, transaction_error_metrics::TransactionErrorMetrics, - transaction_processor::{ExecutionRecordingConfig, TransactionBatchProcessor}, + transaction_processor::{ + ExecutionRecordingConfig, TransactionBatchProcessor, TransactionProcessingCallback, + }, }, std::{ cmp::Ordering, @@ -43,6 +47,7 @@ use { fs::{self, File}, io::Read, sync::{Arc, RwLock}, + time::{SystemTime, UNIX_EPOCH}, }, }; @@ -115,6 +120,14 @@ fn create_custom_environment<'a>() -> BuiltinProgram> { .register_function_hashed(*b"sol_invoke_signed_rust", SyscallInvokeSignedRust::vm) .expect("Registration failed"); + function_registry + .register_function_hashed(*b"sol_set_return_data", SyscallSetReturnData::vm) + .expect("Registration failed"); + + function_registry + .register_function_hashed(*b"sol_get_clock_sysvar", SyscallGetClockSysvar::vm) + .expect("Registration failed"); + BuiltinProgram::new_loader(vm_config, function_registry) } @@ -172,6 +185,25 @@ fn create_executable_environment( program_cache.fork_graph = Some(Arc::new(RwLock::new(MockForkGraph {}))); + // We must fill in the sysvar cache entries + let time_now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("Time went backwards") + .as_secs() as i64; + let clock = Clock { + slot: DEPLOYMENT_SLOT, + epoch_start_timestamp: time_now.saturating_sub(10) as UnixTimestamp, + epoch: DEPLOYMENT_EPOCH, + leader_schedule_epoch: DEPLOYMENT_EPOCH, + unix_timestamp: time_now as UnixTimestamp, + }; + + let mut account_data = AccountSharedData::default(); + account_data.set_data(bincode::serialize(&clock).unwrap()); + mock_bank + .account_shared_data + .insert(Clock::id(), account_data); + // Inform SVM of the registered builins let registered_built_ins = vec![bpf_loader::id(), solana_system_program::id()]; (program_cache, registered_built_ins) @@ -319,8 +351,53 @@ fn prepare_transactions( // The program account is set in `create_executable_environment` + // A program that utilizes a Sysvar + let program_account = Pubkey::new_unique(); + let fee_payer = Pubkey::new_unique(); + let message = Message { + account_keys: vec![fee_payer, program_account], + header: MessageHeader { + num_required_signatures: 1, + num_readonly_signed_accounts: 0, + num_readonly_unsigned_accounts: 0, + }, + instructions: vec![CompiledInstruction { + program_id_index: 1, + accounts: vec![], + data: vec![], + }], + recent_blockhash: Hash::default(), + }; + + let transaction = Transaction { + signatures: vec![Signature::new_unique()], + message, + }; + let sanitized_transaction = + SanitizedTransaction::try_from_legacy_transaction(transaction).unwrap(); + all_transactions.push(sanitized_transaction); + transaction_checks.push((Ok(()), None, Some(20))); + + let mut account_data = AccountSharedData::default(); + account_data.set_lamports(80000); + mock_bank + .account_shared_data + .insert(fee_payer, account_data); + + let buffer = load_program("clock-sysvar".to_string()); + + // The program account must have funds and hold the executable binary + let mut account_data = AccountSharedData::default(); + // The executable account owner must be one of the loaders. + account_data.set_owner(bpf_loader::id()); + account_data.set_data(buffer); + account_data.set_executable(true); + account_data.set_lamports(25); + mock_bank + .account_shared_data + .insert(program_account, account_data); + // TODO: Include these examples as well: - // An example with a sysvar // A transaction that fails // A transaction whose verification has already failed @@ -342,10 +419,21 @@ fn svm_integration() { program_cache.clone(), ); + // The sysvars must be put in the cache + batch_processor + .sysvar_cache + .write() + .unwrap() + .fill_missing_entries(|pubkey, callback| { + if let Some(account) = mock_bank.get_account_shared_data(pubkey) { + callback(account.data()); + } + }); + let mut error_counter = TransactionErrorMetrics::default(); let recording_config = ExecutionRecordingConfig { enable_log_recording: true, - enable_return_data_recording: false, + enable_return_data_recording: true, enable_cpi_recording: false, }; let mut timings = ExecuteTimings::default(); @@ -363,7 +451,7 @@ fn svm_integration() { false, ); - assert_eq!(result.execution_results.len(), 2); + assert_eq!(result.execution_results.len(), 3); assert!(result.execution_results[0] .details() .unwrap() @@ -394,4 +482,15 @@ fn svm_integration() { .find(|key| key.0 == recipient_key) .unwrap(); assert_eq!(recipient_data.1.lamports(), 900010); + + let return_data = result.execution_results[2] + .details() + .unwrap() + .return_data + .as_ref() + .unwrap(); + let time = i64::from_be_bytes(return_data.data[0..8].try_into().unwrap()); + let clock_data = mock_bank.get_account_shared_data(&Clock::id()).unwrap(); + let clock_info: Clock = bincode::deserialize(clock_data.data()).unwrap(); + assert_eq!(clock_info.unix_timestamp, time); } From 6e6acce7981768ca1841a0aeb122034ebc1a2bb8 Mon Sep 17 00:00:00 2001 From: Tyera Date: Fri, 22 Mar 2024 17:44:38 -0600 Subject: [PATCH 056/153] Clarify TargetProgramBuiltin code docs (#403) * Update comments * Nitty variable name update --- .../core_bpf_migration/target_builtin.rs | 25 ++++++++++--------- 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/runtime/src/bank/builtins/core_bpf_migration/target_builtin.rs b/runtime/src/bank/builtins/core_bpf_migration/target_builtin.rs index 0166e3d9ea0a7e..e370fa825ff5fd 100644 --- a/runtime/src/bank/builtins/core_bpf_migration/target_builtin.rs +++ b/runtime/src/bank/builtins/core_bpf_migration/target_builtin.rs @@ -9,7 +9,7 @@ use { }, }; -/// Used to validate a built-in program's account before migrating to Core BPF. +/// The account details of a built-in program to be migrated to Core BPF. #[derive(Debug)] pub(crate) struct TargetProgramBuiltin { pub program_address: Pubkey, @@ -19,7 +19,8 @@ pub(crate) struct TargetProgramBuiltin { } impl TargetProgramBuiltin { - /// Create a new migration configuration for a built-in program. + /// Collects the details of a built-in program and verifies it is properly + /// configured pub(crate) fn new_checked( bank: &Bank, program_address: &Pubkey, @@ -143,12 +144,12 @@ mod tests { let program_data_address = get_program_data_address(&program_address); // Success - let builtin_config = + let target_builtin = TargetProgramBuiltin::new_checked(&bank, &program_address, &migration_target).unwrap(); - assert_eq!(builtin_config.program_address, program_address); - assert_eq!(builtin_config.program_account, program_account); - assert_eq!(builtin_config.program_data_address, program_data_address); - assert_eq!(builtin_config.total_data_size, program_account.data().len()); + assert_eq!(target_builtin.program_address, program_address); + assert_eq!(target_builtin.program_account, program_account); + assert_eq!(target_builtin.program_data_address, program_data_address); + assert_eq!(target_builtin.total_data_size, program_account.data().len()); // Fail if the program account is not owned by the native loader store_account( @@ -210,12 +211,12 @@ mod tests { let program_data_address = get_program_data_address(&program_address); // Success - let builtin_config = + let target_builtin = TargetProgramBuiltin::new_checked(&bank, &program_address, &migration_target).unwrap(); - assert_eq!(builtin_config.program_address, program_address); - assert_eq!(builtin_config.program_account, program_account); - assert_eq!(builtin_config.program_data_address, program_data_address); - assert_eq!(builtin_config.total_data_size, program_account.data().len()); + assert_eq!(target_builtin.program_address, program_address); + assert_eq!(target_builtin.program_account, program_account); + assert_eq!(target_builtin.program_data_address, program_data_address); + assert_eq!(target_builtin.total_data_size, program_account.data().len()); // Fail if the program data account exists store_account( From 5cfb6e860d39ef5d47903d855e0a90545fcd1a71 Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Fri, 22 Mar 2024 19:45:34 -0700 Subject: [PATCH 057/153] SVM: Move sysvar_cache related functions and tests to SVM (#402) --- runtime/src/bank.rs | 26 +++-- runtime/src/bank/sysvar_cache.rs | 36 ++----- svm/src/transaction_processor.rs | 179 ++++++++++++++++++++++++++++++- 3 files changed, 202 insertions(+), 39 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index bb2c11fa6bd913..cfcd32ef7ff456 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -199,7 +199,9 @@ use { solana_accounts_db::accounts_db::{ ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS, ACCOUNTS_DB_CONFIG_FOR_TESTING, }, - solana_program_runtime::loaded_programs::LoadedProgramsForTxBatch, + solana_program_runtime::{ + loaded_programs::LoadedProgramsForTxBatch, sysvar_cache::SysvarCache, + }, }; /// params to `verify_accounts_hash` @@ -1098,7 +1100,8 @@ impl Bank { bank.update_epoch_schedule(); bank.update_recent_blockhashes(); bank.update_last_restart_slot(); - bank.fill_missing_sysvar_cache_entries(); + bank.transaction_processor + .fill_missing_sysvar_cache_entries(&bank); bank } @@ -1457,7 +1460,9 @@ impl Bank { new.update_last_restart_slot() }); - let (_, fill_sysvar_cache_time_us) = measure_us!(new.fill_missing_sysvar_cache_entries()); + let (_, fill_sysvar_cache_time_us) = measure_us!(new + .transaction_processor + .fill_missing_sysvar_cache_entries(&new)); time.stop(); report_new_bank_metrics( @@ -1793,7 +1798,8 @@ impl Bank { new.inherit_specially_retained_account_fields(account), ) }); - new.fill_missing_sysvar_cache_entries(); + new.transaction_processor + .fill_missing_sysvar_cache_entries(&new); new.freeze(); new } @@ -1907,7 +1913,8 @@ impl Bank { additional_builtins, debug_do_not_add_builtins, ); - bank.fill_missing_sysvar_cache_entries(); + bank.transaction_processor + .fill_missing_sysvar_cache_entries(&bank); bank.rebuild_skipped_rewrites(); // Sanity assertions between bank snapshot and genesis config @@ -2210,8 +2217,9 @@ impl Bank { }); // Simply force fill sysvar cache rather than checking which sysvar was // actually updated since tests don't need to be optimized for performance. - self.reset_sysvar_cache(); - self.fill_missing_sysvar_cache_entries(); + self.transaction_processor.reset_sysvar_cache(); + self.transaction_processor + .fill_missing_sysvar_cache_entries(self); } fn update_slot_history(&self) { @@ -7881,6 +7889,10 @@ impl Bank { .wait_for_complete() } + pub fn get_sysvar_cache_for_tests(&self) -> SysvarCache { + self.transaction_processor.get_sysvar_cache_for_tests() + } + pub fn update_accounts_hash_for_tests(&self) -> AccountsHash { self.update_accounts_hash(CalcAccountsHashDataSource::IndexForTests, false, false) } diff --git a/runtime/src/bank/sysvar_cache.rs b/runtime/src/bank/sysvar_cache.rs index 91a22907d6e888..3e0f9a93ddef2a 100644 --- a/runtime/src/bank/sysvar_cache.rs +++ b/runtime/src/bank/sysvar_cache.rs @@ -1,31 +1,5 @@ -use { - super::Bank, solana_program_runtime::sysvar_cache::SysvarCache, - solana_sdk::account::ReadableAccount, -}; - -impl Bank { - pub(crate) fn fill_missing_sysvar_cache_entries(&self) { - let mut sysvar_cache = self.transaction_processor.sysvar_cache.write().unwrap(); - sysvar_cache.fill_missing_entries(|pubkey, callback| { - if let Some(account) = self.get_account_with_fixed_root(pubkey) { - callback(account.data()); - } - }); - } - - pub(crate) fn reset_sysvar_cache(&self) { - let mut sysvar_cache = self.transaction_processor.sysvar_cache.write().unwrap(); - sysvar_cache.reset(); - } - - pub fn get_sysvar_cache_for_tests(&self) -> SysvarCache { - self.transaction_processor - .sysvar_cache - .read() - .unwrap() - .clone() - } -} +#[cfg(test)] +use super::Bank; #[cfg(test)] mod tests { @@ -132,7 +106,7 @@ mod tests { assert!(bank1_cached_epoch_rewards.is_err()); drop(bank1_sysvar_cache); - bank1.reset_sysvar_cache(); + bank1.transaction_processor.reset_sysvar_cache(); let bank1_sysvar_cache = bank1.transaction_processor.sysvar_cache.read().unwrap(); assert!(bank1_sysvar_cache.get_clock().is_err()); @@ -157,7 +131,9 @@ mod tests { expected_epoch_rewards.distribution_complete_block_height, ); - bank1.fill_missing_sysvar_cache_entries(); + bank1 + .transaction_processor + .fill_missing_sysvar_cache_entries(&bank1); let bank1_sysvar_cache = bank1.transaction_processor.sysvar_cache.read().unwrap(); assert_eq!(bank1_sysvar_cache.get_clock(), bank1_cached_clock); diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index b1673cef0b1b11..5426cf0fce9b16 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -927,6 +927,27 @@ impl TransactionBatchProcessor { } outer_instructions } + + pub fn fill_missing_sysvar_cache_entries( + &self, + callbacks: &CB, + ) { + let mut sysvar_cache = self.sysvar_cache.write().unwrap(); + sysvar_cache.fill_missing_entries(|pubkey, set_sysvar| { + if let Some(account) = callbacks.get_account_shared_data(pubkey) { + set_sysvar(account.data()); + } + }); + } + + pub fn reset_sysvar_cache(&self) { + let mut sysvar_cache = self.sysvar_cache.write().unwrap(); + sysvar_cache.reset(); + } + + pub fn get_sysvar_cache_for_tests(&self) -> SysvarCache { + self.sysvar_cache.read().unwrap().clone() + } } #[cfg(test)] @@ -937,12 +958,13 @@ mod tests { loaded_programs::BlockRelation, solana_rbpf::program::BuiltinProgram, }, solana_sdk::{ - account::WritableAccount, + account::{create_account_shared_data_for_test, WritableAccount}, bpf_loader, + fee_calculator::FeeCalculator, message::{LegacyMessage, Message, MessageHeader}, rent_debits::RentDebits, signature::{Keypair, Signature}, - sysvar::rent::Rent, + sysvar::{self, rent::Rent}, transaction::{SanitizedTransaction, Transaction, TransactionError}, transaction_context::TransactionContext, }, @@ -2102,4 +2124,157 @@ mod tests { ); assert_eq!(lock_results[1].0, Err(TransactionError::BlockhashNotFound)); } + + #[test] + #[allow(deprecated)] + fn test_sysvar_cache_initialization1() { + let mut mock_bank = MockBankCallback::default(); + + let clock = sysvar::clock::Clock { + slot: 1, + epoch_start_timestamp: 2, + epoch: 3, + leader_schedule_epoch: 4, + unix_timestamp: 5, + }; + let clock_account = create_account_shared_data_for_test(&clock); + mock_bank + .account_shared_data + .insert(sysvar::clock::id(), clock_account); + + let epoch_schedule = EpochSchedule::custom(64, 2, true); + let epoch_schedule_account = create_account_shared_data_for_test(&epoch_schedule); + mock_bank + .account_shared_data + .insert(sysvar::epoch_schedule::id(), epoch_schedule_account); + + let fees = sysvar::fees::Fees { + fee_calculator: FeeCalculator { + lamports_per_signature: 123, + }, + }; + let fees_account = create_account_shared_data_for_test(&fees); + mock_bank + .account_shared_data + .insert(sysvar::fees::id(), fees_account); + + let rent = Rent::with_slots_per_epoch(2048); + let rent_account = create_account_shared_data_for_test(&rent); + mock_bank + .account_shared_data + .insert(sysvar::rent::id(), rent_account); + + let transaction_processor = TransactionBatchProcessor::::default(); + transaction_processor.fill_missing_sysvar_cache_entries(&mock_bank); + + let sysvar_cache = transaction_processor.sysvar_cache.read().unwrap(); + let cached_clock = sysvar_cache.get_clock(); + let cached_epoch_schedule = sysvar_cache.get_epoch_schedule(); + let cached_fees = sysvar_cache.get_fees(); + let cached_rent = sysvar_cache.get_rent(); + + assert_eq!( + cached_clock.expect("clock sysvar missing in cache"), + clock.into() + ); + assert_eq!( + cached_epoch_schedule.expect("epoch_schedule sysvar missing in cache"), + epoch_schedule.into() + ); + assert_eq!( + cached_fees.expect("fees sysvar missing in cache"), + fees.into() + ); + assert_eq!( + cached_rent.expect("rent sysvar missing in cache"), + rent.into() + ); + assert!(sysvar_cache.get_slot_hashes().is_err()); + assert!(sysvar_cache.get_epoch_rewards().is_err()); + } + + #[test] + #[allow(deprecated)] + fn test_reset_and_fill_sysvar_cache() { + let mut mock_bank = MockBankCallback::default(); + + let clock = sysvar::clock::Clock { + slot: 1, + epoch_start_timestamp: 2, + epoch: 3, + leader_schedule_epoch: 4, + unix_timestamp: 5, + }; + let clock_account = create_account_shared_data_for_test(&clock); + mock_bank + .account_shared_data + .insert(sysvar::clock::id(), clock_account); + + let epoch_schedule = EpochSchedule::custom(64, 2, true); + let epoch_schedule_account = create_account_shared_data_for_test(&epoch_schedule); + mock_bank + .account_shared_data + .insert(sysvar::epoch_schedule::id(), epoch_schedule_account); + + let fees = sysvar::fees::Fees { + fee_calculator: FeeCalculator { + lamports_per_signature: 123, + }, + }; + let fees_account = create_account_shared_data_for_test(&fees); + mock_bank + .account_shared_data + .insert(sysvar::fees::id(), fees_account); + + let rent = Rent::with_slots_per_epoch(2048); + let rent_account = create_account_shared_data_for_test(&rent); + mock_bank + .account_shared_data + .insert(sysvar::rent::id(), rent_account); + + let transaction_processor = TransactionBatchProcessor::::default(); + // Fill the sysvar cache + transaction_processor.fill_missing_sysvar_cache_entries(&mock_bank); + // Reset the sysvar cache + transaction_processor.reset_sysvar_cache(); + + { + let sysvar_cache = transaction_processor.sysvar_cache.read().unwrap(); + // Test that sysvar cache is empty and none of the values are found + assert!(sysvar_cache.get_clock().is_err()); + assert!(sysvar_cache.get_epoch_schedule().is_err()); + assert!(sysvar_cache.get_fees().is_err()); + assert!(sysvar_cache.get_epoch_rewards().is_err()); + assert!(sysvar_cache.get_rent().is_err()); + assert!(sysvar_cache.get_epoch_rewards().is_err()); + } + + // Refill the cache and test the values are available. + transaction_processor.fill_missing_sysvar_cache_entries(&mock_bank); + + let sysvar_cache = transaction_processor.sysvar_cache.read().unwrap(); + let cached_clock = sysvar_cache.get_clock(); + let cached_epoch_schedule = sysvar_cache.get_epoch_schedule(); + let cached_fees = sysvar_cache.get_fees(); + let cached_rent = sysvar_cache.get_rent(); + + assert_eq!( + cached_clock.expect("clock sysvar missing in cache"), + clock.into() + ); + assert_eq!( + cached_epoch_schedule.expect("epoch_schedule sysvar missing in cache"), + epoch_schedule.into() + ); + assert_eq!( + cached_fees.expect("fees sysvar missing in cache"), + fees.into() + ); + assert_eq!( + cached_rent.expect("rent sysvar missing in cache"), + rent.into() + ); + assert!(sysvar_cache.get_slot_hashes().is_err()); + assert!(sysvar_cache.get_epoch_rewards().is_err()); + } } From bfdfc6cef222f1b2b918721f083054662e53cbef Mon Sep 17 00:00:00 2001 From: Joe C Date: Sat, 23 Mar 2024 07:38:57 -0500 Subject: [PATCH 058/153] frozen-abi-macro: use `log` from `solana_frozen_abi` (#153) * frozen-abi-macro: use `log` from `solana_frozen_abi` * use private module approach --- frozen-abi/macro/src/lib.rs | 8 ++++---- frozen-abi/src/lib.rs | 7 +++++++ 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/frozen-abi/macro/src/lib.rs b/frozen-abi/macro/src/lib.rs index 0c37eeb149967f..9a735e2c5f7185 100644 --- a/frozen-abi/macro/src/lib.rs +++ b/frozen-abi/macro/src/lib.rs @@ -146,7 +146,7 @@ fn derive_abi_sample_enum_type(input: ItemEnum) -> TokenStream { #( #attrs )* impl #impl_generics ::solana_frozen_abi::abi_example::AbiExample for #type_name #ty_generics #where_clause { fn example() -> Self { - ::log::info!( + ::solana_frozen_abi::__private::log::info!( "AbiExample for enum: {}", std::any::type_name::<#type_name #ty_generics>() ); @@ -198,7 +198,7 @@ fn derive_abi_sample_struct_type(input: ItemStruct) -> TokenStream { #( #attrs )* impl #impl_generics ::solana_frozen_abi::abi_example::AbiExample for #type_name #ty_generics #where_clause { fn example() -> Self { - ::log::info!( + ::solana_frozen_abi::__private::log::info!( "AbiExample for struct: {}", std::any::type_name::<#type_name #ty_generics>() ); @@ -300,7 +300,7 @@ fn quote_for_test( let mut hash = digester.finalize(); // pretty-print error if result.is_err() { - ::log::error!("digest error: {:#?}", result); + ::solana_frozen_abi::__private::log::error!("digest error: {:#?}", result); } result.unwrap(); let actual_digest = format!("{}", hash); @@ -308,7 +308,7 @@ fn quote_for_test( if #expected_digest != actual_digest { #p!("sed -i -e 's/{}/{}/g' $(git grep --files-with-matches frozen_abi)", #expected_digest, hash); } - ::log::warn!("Not testing the abi digest under SOLANA_ABI_BULK_UPDATE!"); + ::solana_frozen_abi::__private::log::warn!("Not testing the abi digest under SOLANA_ABI_BULK_UPDATE!"); } else { if let Ok(dir) = ::std::env::var("SOLANA_ABI_DUMP_DIR") { assert_eq!(#expected_digest, actual_digest, "Possibly ABI changed? Examine the diff in SOLANA_ABI_DUMP_DIR!: \n$ diff -u {}/*{}* {}/*{}*", dir, #expected_digest, dir, actual_digest); diff --git a/frozen-abi/src/lib.rs b/frozen-abi/src/lib.rs index 4747cf64b9e50f..a44ef405d18fd0 100644 --- a/frozen-abi/src/lib.rs +++ b/frozen-abi/src/lib.rs @@ -19,3 +19,10 @@ extern crate solana_frozen_abi_macro; #[cfg(test)] #[macro_use] extern crate serde_derive; + +// Not public API. Referenced by macro-generated code. +#[doc(hidden)] +pub mod __private { + #[doc(hidden)] + pub use log; +} From b6d22374032683257a7d6633896445031ae1f20a Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Sat, 23 Mar 2024 13:53:46 +0000 Subject: [PATCH 059/153] implements weighted shuffle using binary tree (#185) This is partial port of firedancer's implementation of weighted shuffle: https://github.com/firedancer-io/firedancer/blob/3401bfc26/src/ballet/wsample/fd_wsample.c Though Fenwick trees use less space, inverse queries require an additional O(log n) factor for binary search resulting an overall O(n log n log n) performance for weighted shuffle. This commit instead uses a binary tree where each node contains the sum of all weights in its left sub-tree. The weights themselves are implicitly stored at the leaves. Inverse queries and updates to the tree all can be done O(log n) resulting an overall O(n log n) weighted shuffle implementation. Based on benchmarks, this results in 24% improvement in WeightedShuffle::shuffle: Fenwick tree: test bench_weighted_shuffle_new ... bench: 36,686 ns/iter (+/- 191) test bench_weighted_shuffle_shuffle ... bench: 342,625 ns/iter (+/- 4,067) Binary tree: test bench_weighted_shuffle_new ... bench: 59,131 ns/iter (+/- 362) test bench_weighted_shuffle_shuffle ... bench: 260,194 ns/iter (+/- 11,195) Though WeightedShuffle::new is now slower, it generally can be cached and reused as in Turbine: https://github.com/anza-xyz/agave/blob/b3fd87fe8/turbine/src/cluster_nodes.rs#L68 Additionally the new code has better asymptotic performance. For example with 20_000 weights WeightedShuffle::shuffle is 31% faster: Fenwick tree: test bench_weighted_shuffle_new ... bench: 255,071 ns/iter (+/- 9,591) test bench_weighted_shuffle_shuffle ... bench: 2,466,058 ns/iter (+/- 9,873) Binary tree: test bench_weighted_shuffle_new ... bench: 830,727 ns/iter (+/- 10,210) test bench_weighted_shuffle_shuffle ... bench: 1,696,160 ns/iter (+/- 75,271) --- gossip/src/weighted_shuffle.rs | 186 +++++++++++++++++++++++---------- 1 file changed, 128 insertions(+), 58 deletions(-) diff --git a/gossip/src/weighted_shuffle.rs b/gossip/src/weighted_shuffle.rs index 250d1efb0f6800..7c12debce469e0 100644 --- a/gossip/src/weighted_shuffle.rs +++ b/gossip/src/weighted_shuffle.rs @@ -18,15 +18,14 @@ use { /// non-zero weighted indices. #[derive(Clone)] pub struct WeightedShuffle { - arr: Vec, // Underlying array implementing binary indexed tree. - sum: T, // Current sum of weights, excluding already selected indices. + // Underlying array implementing binary tree. + // tree[i] is the sum of weights in the left sub-tree of node i. + tree: Vec, + // Current sum of all weights, excluding already sampled ones. + weight: T, zeros: Vec, // Indices of zero weighted entries. } -// The implementation uses binary indexed tree: -// https://en.wikipedia.org/wiki/Fenwick_tree -// to maintain cumulative sum of weights excluding already selected indices -// over self.arr. impl WeightedShuffle where T: Copy + Default + PartialOrd + AddAssign + CheckedAdd, @@ -34,36 +33,39 @@ where /// If weights are negative or overflow the total sum /// they are treated as zero. pub fn new(name: &'static str, weights: &[T]) -> Self { - let size = weights.len() + 1; let zero = ::default(); - let mut arr = vec![zero; size]; + let mut tree = vec![zero; get_tree_size(weights.len())]; let mut sum = zero; let mut zeros = Vec::default(); let mut num_negative = 0; let mut num_overflow = 0; - for (mut k, &weight) in (1usize..).zip(weights) { + for (k, &weight) in weights.iter().enumerate() { #[allow(clippy::neg_cmp_op_on_partial_ord)] // weight < zero does not work for NaNs. if !(weight >= zero) { - zeros.push(k - 1); + zeros.push(k); num_negative += 1; continue; } if weight == zero { - zeros.push(k - 1); + zeros.push(k); continue; } sum = match sum.checked_add(&weight) { Some(val) => val, None => { - zeros.push(k - 1); + zeros.push(k); num_overflow += 1; continue; } }; - while k < size { - arr[k] += weight; - k += k & k.wrapping_neg(); + let mut index = tree.len() + k; + while index != 0 { + let offset = index & 1; + index = (index - 1) >> 1; + if offset > 0 { + tree[index] += weight; + } } } if num_negative > 0 { @@ -72,7 +74,11 @@ where if num_overflow > 0 { datapoint_error!("weighted-shuffle-overflow", (name, num_overflow, i64)); } - Self { arr, sum, zeros } + Self { + tree, + weight: sum, + zeros, + } } } @@ -80,54 +86,65 @@ impl WeightedShuffle where T: Copy + Default + PartialOrd + AddAssign + SubAssign + Sub, { - // Returns cumulative sum of current weights upto index k (inclusive). - fn cumsum(&self, mut k: usize) -> T { - let mut out = ::default(); - while k != 0 { - out += self.arr[k]; - k ^= k & k.wrapping_neg(); - } - out - } - // Removes given weight at index k. - fn remove(&mut self, mut k: usize, weight: T) { - self.sum -= weight; - let size = self.arr.len(); - while k < size { - self.arr[k] -= weight; - k += k & k.wrapping_neg(); + fn remove(&mut self, k: usize, weight: T) { + self.weight -= weight; + let mut index = self.tree.len() + k; + while index != 0 { + let offset = index & 1; + index = (index - 1) >> 1; + if offset > 0 { + self.tree[index] -= weight; + } } } - // Returns smallest index such that self.cumsum(k) > val, + // Returns smallest index such that cumsum of weights[..=k] > val, // along with its respective weight. - fn search(&self, val: T) -> (/*index:*/ usize, /*weight:*/ T) { + fn search(&self, mut val: T) -> (/*index:*/ usize, /*weight:*/ T) { let zero = ::default(); debug_assert!(val >= zero); - debug_assert!(val < self.sum); - let mut lo = (/*index:*/ 0, /*cumsum:*/ zero); - let mut hi = (self.arr.len() - 1, self.sum); - while lo.0 + 1 < hi.0 { - let k = lo.0 + (hi.0 - lo.0) / 2; - let sum = self.cumsum(k); - if sum <= val { - lo = (k, sum); + debug_assert!(val < self.weight); + let mut index = 0; + let mut weight = self.weight; + while index < self.tree.len() { + if val < self.tree[index] { + weight = self.tree[index]; + index = (index << 1) + 1; } else { - hi = (k, sum); + weight -= self.tree[index]; + val -= self.tree[index]; + index = (index << 1) + 2; } } - debug_assert!(lo.1 <= val); - debug_assert!(hi.1 > val); - (hi.0, hi.1 - lo.1) + (index - self.tree.len(), weight) } - pub fn remove_index(&mut self, index: usize) { - let zero = ::default(); - let weight = self.cumsum(index + 1) - self.cumsum(index); - if weight != zero { - self.remove(index + 1, weight); - } else if let Some(index) = self.zeros.iter().position(|ix| *ix == index) { + pub fn remove_index(&mut self, k: usize) { + let mut index = self.tree.len() + k; + let mut weight = ::default(); // zero + while index != 0 { + let offset = index & 1; + index = (index - 1) >> 1; + if offset > 0 { + if self.tree[index] != weight { + self.remove(k, self.tree[index] - weight); + } else { + self.remove_zero(k); + } + return; + } + weight += self.tree[index]; + } + if self.weight != weight { + self.remove(k, self.weight - weight); + } else { + self.remove_zero(k); + } + } + + fn remove_zero(&mut self, k: usize) { + if let Some(index) = self.zeros.iter().position(|&ix| ix == k) { self.zeros.remove(index); } } @@ -140,10 +157,10 @@ where // Equivalent to weighted_shuffle.shuffle(&mut rng).next() pub fn first(&self, rng: &mut R) -> Option { let zero = ::default(); - if self.sum > zero { - let sample = ::Sampler::sample_single(zero, self.sum, rng); + if self.weight > zero { + let sample = ::Sampler::sample_single(zero, self.weight, rng); let (index, _weight) = WeightedShuffle::search(self, sample); - return Some(index - 1); + return Some(index); } if self.zeros.is_empty() { return None; @@ -160,11 +177,11 @@ where pub fn shuffle(mut self, rng: &'a mut R) -> impl Iterator + 'a { std::iter::from_fn(move || { let zero = ::default(); - if self.sum > zero { - let sample = ::Sampler::sample_single(zero, self.sum, rng); + if self.weight > zero { + let sample = ::Sampler::sample_single(zero, self.weight, rng); let (index, weight) = WeightedShuffle::search(&self, sample); self.remove(index, weight); - return Some(index - 1); + return Some(index); } if self.zeros.is_empty() { return None; @@ -176,6 +193,19 @@ where } } +// Maps number of items to the "internal" size of the binary tree "implicitly" +// holding those items on the leaves. +fn get_tree_size(count: usize) -> usize { + let shift = usize::BITS + - count.leading_zeros() + - if count.is_power_of_two() && count != 1 { + 1 + } else { + 0 + }; + (1usize << shift) - 1 +} + #[cfg(test)] mod tests { use { @@ -218,6 +248,30 @@ mod tests { shuffle } + #[test] + fn test_get_tree_size() { + assert_eq!(get_tree_size(0), 0); + assert_eq!(get_tree_size(1), 1); + assert_eq!(get_tree_size(2), 1); + assert_eq!(get_tree_size(3), 3); + assert_eq!(get_tree_size(4), 3); + for count in 5..9 { + assert_eq!(get_tree_size(count), 7); + } + for count in 9..17 { + assert_eq!(get_tree_size(count), 15); + } + for count in 17..33 { + assert_eq!(get_tree_size(count), 31); + } + assert_eq!(get_tree_size((1 << 16) - 1), (1 << 16) - 1); + assert_eq!(get_tree_size(1 << 16), (1 << 16) - 1); + assert_eq!(get_tree_size((1 << 16) + 1), (1 << 17) - 1); + assert_eq!(get_tree_size((1 << 17) - 1), (1 << 17) - 1); + assert_eq!(get_tree_size(1 << 17), (1 << 17) - 1); + assert_eq!(get_tree_size((1 << 17) + 1), (1 << 18) - 1); + } + // Asserts that empty weights will return empty shuffle. #[test] fn test_weighted_shuffle_empty_weights() { @@ -357,4 +411,20 @@ mod tests { assert_eq!(shuffle.first(&mut rng), Some(shuffle_slow[0])); } } + + #[test] + fn test_weighted_shuffle_paranoid() { + let mut rng = rand::thread_rng(); + for size in 0..1351 { + let weights: Vec<_> = repeat_with(|| rng.gen_range(0..1000)).take(size).collect(); + let seed = rng.gen::<[u8; 32]>(); + let mut rng = ChaChaRng::from_seed(seed); + let shuffle_slow = weighted_shuffle_slow(&mut rng.clone(), weights.clone()); + let shuffle = WeightedShuffle::new("", &weights); + if size > 0 { + assert_eq!(shuffle.first(&mut rng.clone()), Some(shuffle_slow[0])); + } + assert_eq!(shuffle.shuffle(&mut rng).collect::>(), shuffle_slow); + } + } } From 602471257e9bbc9ba62f7c701b9ef47a5a30e8d9 Mon Sep 17 00:00:00 2001 From: Yueh-Hsuan Chiang <93241502+yhchiang-sol@users.noreply.github.com> Date: Sat, 23 Mar 2024 22:14:19 -0700 Subject: [PATCH 060/153] [TieredStorage] Add capacity() API and limit file size to 16GB (#401) #### Problem The TieredStorage has not yet implemented the AccountsFile::capacity() API. #### Summary of Changes Implement capacity() API for TieredStorage and limit file size to 16GB, same as the append-vec file. --- accounts-db/src/tiered_storage.rs | 7 +++++++ accounts-db/src/tiered_storage/hot.rs | 4 ++++ accounts-db/src/tiered_storage/readable.rs | 6 ++++++ 3 files changed, 17 insertions(+) diff --git a/accounts-db/src/tiered_storage.rs b/accounts-db/src/tiered_storage.rs index a0d8eea4010b94..cbca5c93d0041e 100644 --- a/accounts-db/src/tiered_storage.rs +++ b/accounts-db/src/tiered_storage.rs @@ -38,6 +38,8 @@ use { pub type TieredStorageResult = Result; +const MAX_TIERED_STORAGE_FILE_SIZE: u64 = 16 * 1024 * 1024 * 1024; // 16 GiB; + /// The struct that defines the formats of all building blocks of a /// TieredStorage. #[derive(Clone, Debug, PartialEq)] @@ -163,6 +165,11 @@ impl TieredStorage { pub fn is_empty(&self) -> bool { self.len() == 0 } + + pub fn capacity(&self) -> u64 { + self.reader() + .map_or(MAX_TIERED_STORAGE_FILE_SIZE, |reader| reader.capacity()) + } } #[cfg(test)] diff --git a/accounts-db/src/tiered_storage/hot.rs b/accounts-db/src/tiered_storage/hot.rs index 414d74b2eb81b7..260548897f66e2 100644 --- a/accounts-db/src/tiered_storage/hot.rs +++ b/accounts-db/src/tiered_storage/hot.rs @@ -369,6 +369,10 @@ impl HotStorageReader { self.len() == 0 } + pub fn capacity(&self) -> u64 { + self.len() as u64 + } + /// Returns the footer of the underlying tiered-storage accounts file. pub fn footer(&self) -> &TieredStorageFooter { &self.footer diff --git a/accounts-db/src/tiered_storage/readable.rs b/accounts-db/src/tiered_storage/readable.rs index 008e805689df57..0191dad4903578 100644 --- a/accounts-db/src/tiered_storage/readable.rs +++ b/accounts-db/src/tiered_storage/readable.rs @@ -44,6 +44,12 @@ impl TieredStorageReader { } } + pub fn capacity(&self) -> u64 { + match self { + Self::Hot(hot) => hot.capacity(), + } + } + /// Returns the footer of the associated HotAccountsFile. pub fn footer(&self) -> &TieredStorageFooter { match self { From a916edb7a2caaa01eda171d67a1d8f1dc1f397b1 Mon Sep 17 00:00:00 2001 From: Yueh-Hsuan Chiang <93241502+yhchiang-sol@users.noreply.github.com> Date: Sun, 24 Mar 2024 16:41:36 -0700 Subject: [PATCH 061/153] [TieredStorage] Add AccountsFile::TieredStorage (#72) #### Problem AccountsFile currently doesn't have an implementation for TieredStorage. To enable AccountsDB tests for the TieredStorage, we need AccountsFile to support TieredStorage. #### Summary of Changes This PR implements a AccountsFile::TieredStorage, a thin wrapper between AccountsFile and TieredStorage. --- accounts-db/src/accounts_file.rs | 50 ++++++++++++++++++++++++-- accounts-db/src/ancient_append_vecs.rs | 4 +-- 2 files changed, 48 insertions(+), 6 deletions(-) diff --git a/accounts-db/src/accounts_file.rs b/accounts-db/src/accounts_file.rs index 6a795f7238fc64..117148ad050227 100644 --- a/accounts-db/src/accounts_file.rs +++ b/accounts-db/src/accounts_file.rs @@ -7,7 +7,9 @@ use { accounts_hash::AccountHash, append_vec::{AppendVec, AppendVecError}, storable_accounts::StorableAccounts, - tiered_storage::error::TieredStorageError, + tiered_storage::{ + error::TieredStorageError, hot::HOT_FORMAT, index::IndexOffset, TieredStorage, + }, }, solana_sdk::{account::ReadableAccount, clock::Slot, pubkey::Pubkey}, std::{ @@ -56,6 +58,7 @@ pub type Result = std::result::Result; /// under different formats. pub enum AccountsFile { AppendVec(AppendVec), + TieredStorage(TieredStorage), } impl AccountsFile { @@ -64,43 +67,62 @@ impl AccountsFile { /// The second element of the returned tuple is the number of accounts in the /// accounts file. pub fn new_from_file(path: impl AsRef, current_len: usize) -> Result<(Self, usize)> { - let (av, num_accounts) = AppendVec::new_from_file(path, current_len)?; - Ok((Self::AppendVec(av), num_accounts)) + match TieredStorage::new_readonly(path.as_ref()) { + Ok(tiered_storage) => { + // unwrap() note: TieredStorage::new_readonly() is guaranteed to have a valid + // reader instance when opening with new_readonly. + let num_accounts = tiered_storage.reader().unwrap().num_accounts(); + Ok((Self::TieredStorage(tiered_storage), num_accounts)) + } + Err(TieredStorageError::MagicNumberMismatch(_, _)) => { + // In case of MagicNumberMismatch, we can assume that this is not + // a tiered-storage file. + let (av, num_accounts) = AppendVec::new_from_file(path, current_len)?; + Ok((Self::AppendVec(av), num_accounts)) + } + Err(e) => Err(AccountsFileError::TieredStorageError(e)), + } } pub fn flush(&self) -> Result<()> { match self { Self::AppendVec(av) => av.flush(), + Self::TieredStorage(_) => Ok(()), } } pub fn reset(&self) { match self { Self::AppendVec(av) => av.reset(), + Self::TieredStorage(_) => {} } } pub fn remaining_bytes(&self) -> u64 { match self { Self::AppendVec(av) => av.remaining_bytes(), + Self::TieredStorage(ts) => ts.capacity().saturating_sub(ts.len() as u64), } } pub fn len(&self) -> usize { match self { Self::AppendVec(av) => av.len(), + Self::TieredStorage(ts) => ts.len(), } } pub fn is_empty(&self) -> bool { match self { Self::AppendVec(av) => av.is_empty(), + Self::TieredStorage(ts) => ts.is_empty(), } } pub fn capacity(&self) -> u64 { match self { Self::AppendVec(av) => av.capacity(), + Self::TieredStorage(ts) => ts.capacity(), } } @@ -114,6 +136,11 @@ impl AccountsFile { pub fn get_account(&self, index: usize) -> Option<(StoredAccountMeta<'_>, usize)> { match self { Self::AppendVec(av) => av.get_account(index), + Self::TieredStorage(ts) => ts + .reader()? + .get_account(IndexOffset(index as u32)) + .ok()? + .map(|(metas, index_offset)| (metas, index_offset.0 as usize)), } } @@ -124,6 +151,12 @@ impl AccountsFile { ) -> std::result::Result { match self { Self::AppendVec(av) => av.account_matches_owners(offset, owners), + Self::TieredStorage(ts) => { + let Some(reader) = ts.reader() else { + return Err(MatchAccountOwnerError::UnableToLoad); + }; + reader.account_matches_owners(IndexOffset(offset as u32), owners) + } } } @@ -131,6 +164,7 @@ impl AccountsFile { pub fn get_path(&self) -> PathBuf { match self { Self::AppendVec(av) => av.get_path(), + Self::TieredStorage(ts) => ts.path().to_path_buf(), } } @@ -143,6 +177,10 @@ impl AccountsFile { pub fn accounts(&self, offset: usize) -> Vec { match self { Self::AppendVec(av) => av.accounts(offset), + Self::TieredStorage(ts) => ts + .reader() + .and_then(|reader| reader.accounts(IndexOffset(offset as u32)).ok()) + .unwrap_or_default(), } } @@ -166,6 +204,11 @@ impl AccountsFile { ) -> Option> { match self { Self::AppendVec(av) => av.append_accounts(accounts, skip), + // Currently we only support HOT_FORMAT. If we later want to use + // a different format, then we will need a way to pass-in it. + // TODO: consider adding function like write_accounts_to_hot_storage() or something + // to hide implementation detail. + Self::TieredStorage(ts) => ts.write_accounts(accounts, skip, &HOT_FORMAT).ok(), } } } @@ -204,6 +247,7 @@ pub mod tests { pub(crate) fn set_current_len_for_tests(&self, len: usize) { match self { Self::AppendVec(av) => av.set_current_len_for_tests(len), + Self::TieredStorage(_) => {} } } } diff --git a/accounts-db/src/ancient_append_vecs.rs b/accounts-db/src/ancient_append_vecs.rs index 3925b21e69f586..f83f16e121a734 100644 --- a/accounts-db/src/ancient_append_vecs.rs +++ b/accounts-db/src/ancient_append_vecs.rs @@ -966,9 +966,7 @@ pub const fn get_ancient_append_vec_capacity() -> u64 { /// is this a max-size append vec designed to be used as an ancient append vec? pub fn is_ancient(storage: &AccountsFile) -> bool { - match storage { - AccountsFile::AppendVec(storage) => storage.capacity() >= get_ancient_append_vec_capacity(), - } + storage.capacity() >= get_ancient_append_vec_capacity() } #[cfg(test)] From b884ea8011520eb0c5c2eb6969c2e43709a1cfc3 Mon Sep 17 00:00:00 2001 From: Lucas Steuernagel <38472950+LucasSte@users.noreply.github.com> Date: Mon, 25 Mar 2024 16:02:58 -0300 Subject: [PATCH 062/153] Add examples of failing transactions to SVM integration tests (#417) Add examples of failing transactions --- svm/tests/integration_test.rs | 89 ++++++++++++++++++++++++++++++++--- 1 file changed, 83 insertions(+), 6 deletions(-) diff --git a/svm/tests/integration_test.rs b/svm/tests/integration_test.rs index e435ce093975c9..8ecc56bd3703d4 100644 --- a/svm/tests/integration_test.rs +++ b/svm/tests/integration_test.rs @@ -32,7 +32,7 @@ use { pubkey::Pubkey, signature::Signature, sysvar::SysvarId, - transaction::{SanitizedTransaction, Transaction}, + transaction::{SanitizedTransaction, Transaction, TransactionError}, }, solana_svm::{ account_loader::TransactionCheckResult, @@ -40,6 +40,7 @@ use { transaction_processor::{ ExecutionRecordingConfig, TransactionBatchProcessor, TransactionProcessingCallback, }, + transaction_results::TransactionExecutionResult, }, std::{ cmp::Ordering, @@ -277,7 +278,7 @@ fn prepare_transactions( .insert(fee_payer, account_data); // A simple funds transfer between accounts - let program_account = Pubkey::new_unique(); + let transfer_program_account = Pubkey::new_unique(); let sender = Pubkey::new_unique(); let recipient = Pubkey::new_unique(); let fee_payer = Pubkey::new_unique(); @@ -286,7 +287,7 @@ fn prepare_transactions( account_keys: vec![ fee_payer, sender, - program_account, + transfer_program_account, recipient, system_account, ], @@ -335,7 +336,7 @@ fn prepare_transactions( account_data.set_lamports(25); mock_bank .account_shared_data - .insert(program_account, account_data); + .insert(transfer_program_account, account_data); // sender let mut account_data = AccountSharedData::default(); @@ -397,9 +398,67 @@ fn prepare_transactions( .account_shared_data .insert(program_account, account_data); - // TODO: Include these examples as well: // A transaction that fails + let sender = Pubkey::new_unique(); + let recipient = Pubkey::new_unique(); + let fee_payer = Pubkey::new_unique(); + let system_account = Pubkey::new_from_array([0; 32]); + let mut data = 900050u64.to_be_bytes().to_vec(); + while data.len() < 8 { + data.insert(0, 0); + } + + let message = Message { + account_keys: vec![ + fee_payer, + sender, + transfer_program_account, + recipient, + system_account, + ], + header: MessageHeader { + num_required_signatures: 2, + num_readonly_signed_accounts: 0, + num_readonly_unsigned_accounts: 0, + }, + instructions: vec![CompiledInstruction { + program_id_index: 2, + accounts: vec![1, 3, 4], + data, + }], + recent_blockhash: Hash::default(), + }; + let transaction = Transaction { + signatures: vec![Signature::new_unique(), Signature::new_unique()], + message, + }; + let sanitized_transaction = + SanitizedTransaction::try_from_legacy_transaction(transaction).unwrap(); + all_transactions.push(sanitized_transaction.clone()); + transaction_checks.push((Ok(()), None, Some(20))); + + // fee payer + let mut account_data = AccountSharedData::default(); + account_data.set_lamports(80000); + mock_bank + .account_shared_data + .insert(fee_payer, account_data); + + // Sender without enough funds + let mut account_data = AccountSharedData::default(); + account_data.set_lamports(900000); + mock_bank.account_shared_data.insert(sender, account_data); + + // recipient + let mut account_data = AccountSharedData::default(); + account_data.set_lamports(900000); + mock_bank + .account_shared_data + .insert(recipient, account_data); + // A transaction whose verification has already failed + all_transactions.push(sanitized_transaction); + transaction_checks.push((Err(TransactionError::BlockhashNotFound), None, Some(20))); (all_transactions, transaction_checks) } @@ -451,7 +510,7 @@ fn svm_integration() { false, ); - assert_eq!(result.execution_results.len(), 3); + assert_eq!(result.execution_results.len(), 5); assert!(result.execution_results[0] .details() .unwrap() @@ -493,4 +552,22 @@ fn svm_integration() { let clock_data = mock_bank.get_account_shared_data(&Clock::id()).unwrap(); let clock_info: Clock = bincode::deserialize(clock_data.data()).unwrap(); assert_eq!(clock_info.unix_timestamp, time); + + assert!(result.execution_results[3] + .details() + .unwrap() + .status + .is_err()); + assert!(result.execution_results[3] + .details() + .unwrap() + .log_messages + .as_ref() + .unwrap() + .contains(&"Transfer: insufficient lamports 900000, need 900050".to_string())); + + assert!(matches!( + result.execution_results[4], + TransactionExecutionResult::NotExecuted(TransactionError::BlockhashNotFound) + )); } From c7dba30f4f38cf9dae6ff3ef43dbe1df39976a39 Mon Sep 17 00:00:00 2001 From: Yueh-Hsuan Chiang <93241502+yhchiang-sol@users.noreply.github.com> Date: Mon, 25 Mar 2024 12:08:48 -0700 Subject: [PATCH 063/153] [TieredStorage] Disable accounts-file type check before enabling tiered-storage (#418) #### Problem As #72 introduced AccountsFile::TieredStorage, it also performs file-type check when opening an accounts-file to determine whether it is a tiered-storage or an append-vec. But before tiered-storage is enabled, this opening check is unnecessary. #### Summary of Changes Remove the accounts-file type check code and simply assume everything is append-vec on AccountsFile::new_from_file(). --- accounts-db/src/accounts_file.rs | 17 ++--------------- 1 file changed, 2 insertions(+), 15 deletions(-) diff --git a/accounts-db/src/accounts_file.rs b/accounts-db/src/accounts_file.rs index 117148ad050227..6371be6083cc84 100644 --- a/accounts-db/src/accounts_file.rs +++ b/accounts-db/src/accounts_file.rs @@ -67,21 +67,8 @@ impl AccountsFile { /// The second element of the returned tuple is the number of accounts in the /// accounts file. pub fn new_from_file(path: impl AsRef, current_len: usize) -> Result<(Self, usize)> { - match TieredStorage::new_readonly(path.as_ref()) { - Ok(tiered_storage) => { - // unwrap() note: TieredStorage::new_readonly() is guaranteed to have a valid - // reader instance when opening with new_readonly. - let num_accounts = tiered_storage.reader().unwrap().num_accounts(); - Ok((Self::TieredStorage(tiered_storage), num_accounts)) - } - Err(TieredStorageError::MagicNumberMismatch(_, _)) => { - // In case of MagicNumberMismatch, we can assume that this is not - // a tiered-storage file. - let (av, num_accounts) = AppendVec::new_from_file(path, current_len)?; - Ok((Self::AppendVec(av), num_accounts)) - } - Err(e) => Err(AccountsFileError::TieredStorageError(e)), - } + let (av, num_accounts) = AppendVec::new_from_file(path, current_len)?; + Ok((Self::AppendVec(av), num_accounts)) } pub fn flush(&self) -> Result<()> { From c867522de82bf0fb54b161a33e44df6e17f7e741 Mon Sep 17 00:00:00 2001 From: samkim-crypto Date: Tue, 26 Mar 2024 07:42:31 +0900 Subject: [PATCH 064/153] [clap-v3-utils] Make `SignerSource::parse` public (#410) make `SignerSource::parse` public --- clap-v3-utils/src/input_parsers/signer.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clap-v3-utils/src/input_parsers/signer.rs b/clap-v3-utils/src/input_parsers/signer.rs index 0580799a6bf675..1e4e8b96b4ab81 100644 --- a/clap-v3-utils/src/input_parsers/signer.rs +++ b/clap-v3-utils/src/input_parsers/signer.rs @@ -89,7 +89,7 @@ impl SignerSource { } } - pub(crate) fn parse>(source: S) -> Result { + pub fn parse>(source: S) -> Result { let source = source.as_ref(); let source = { #[cfg(target_family = "windows")] From f6a3608981a8a4d60b7ae24905516c9f250a9122 Mon Sep 17 00:00:00 2001 From: samkim-crypto Date: Tue, 26 Mar 2024 13:25:57 +0900 Subject: [PATCH 065/153] [clap-v3-utils] Add `try_get_word_count` (#411) --- clap-v3-utils/src/keygen/mnemonic.rs | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/clap-v3-utils/src/keygen/mnemonic.rs b/clap-v3-utils/src/keygen/mnemonic.rs index 0bcc20a58bee85..2dc32d6ca514d7 100644 --- a/clap-v3-utils/src/keygen/mnemonic.rs +++ b/clap-v3-utils/src/keygen/mnemonic.rs @@ -25,16 +25,32 @@ pub const NO_PASSPHRASE_ARG: ArgConstant<'static> = ArgConstant { help: "Do not prompt for a BIP39 passphrase", }; +// The constant `POSSIBLE_WORD_COUNTS` and function `try_get_word_count` must always be updated in +// sync +const POSSIBLE_WORD_COUNTS: &[&str] = &["12", "15", "18", "21", "24"]; pub fn word_count_arg<'a>() -> Arg<'a> { Arg::new(WORD_COUNT_ARG.name) .long(WORD_COUNT_ARG.long) - .value_parser(PossibleValuesParser::new(["12", "15", "18", "21", "24"])) + .value_parser(PossibleValuesParser::new(POSSIBLE_WORD_COUNTS)) .default_value("12") .value_name("NUMBER") .takes_value(true) .help(WORD_COUNT_ARG.help) } +pub fn try_get_word_count(matches: &ArgMatches) -> Result, Box> { + Ok(matches + .try_get_one::(WORD_COUNT_ARG.name)? + .map(|count| match count.as_str() { + "12" => 12, + "15" => 15, + "18" => 18, + "21" => 21, + "24" => 24, + _ => unreachable!(), + })) +} + pub fn language_arg<'a>() -> Arg<'a> { Arg::new(LANGUAGE_ARG.name) .long(LANGUAGE_ARG.long) From b01d7923fc46de22b688b27f19122cd62227550e Mon Sep 17 00:00:00 2001 From: carllin Date: Tue, 26 Mar 2024 00:34:15 -0400 Subject: [PATCH 066/153] Add local cluster utitlity functions (#355) --- core/src/consensus.rs | 1 - local-cluster/src/cluster_tests.rs | 54 ++++++++++++++++++++++++++++-- local-cluster/src/local_cluster.rs | 39 ++++++++++++++++++--- 3 files changed, 86 insertions(+), 8 deletions(-) diff --git a/core/src/consensus.rs b/core/src/consensus.rs index ab316d7c7da612..d4f2345aa14ab8 100644 --- a/core/src/consensus.rs +++ b/core/src/consensus.rs @@ -631,7 +631,6 @@ impl Tower { } } - #[cfg(test)] pub fn record_vote(&mut self, slot: Slot, hash: Hash) -> Option { self.record_bank_vote_and_update_lockouts(slot, hash) } diff --git a/local-cluster/src/cluster_tests.rs b/local-cluster/src/cluster_tests.rs index 90337bb272460f..dffe2a8713ab08 100644 --- a/local-cluster/src/cluster_tests.rs +++ b/local-cluster/src/cluster_tests.rs @@ -10,7 +10,10 @@ use { connection_cache::{ConnectionCache, Protocol}, thin_client::ThinClient, }, - solana_core::consensus::VOTE_THRESHOLD_DEPTH, + solana_core::consensus::{ + tower_storage::{FileTowerStorage, SavedTower, SavedTowerVersions, TowerStorage}, + VOTE_THRESHOLD_DEPTH, + }, solana_entry::entry::{Entry, EntrySlice}, solana_gossip::{ cluster_info::{self, ClusterInfo}, @@ -43,7 +46,7 @@ use { borrow::Borrow, collections::{HashMap, HashSet, VecDeque}, net::{IpAddr, Ipv4Addr, SocketAddr, TcpListener}, - path::Path, + path::{Path, PathBuf}, sync::{ atomic::{AtomicBool, Ordering}, Arc, RwLock, @@ -334,6 +337,53 @@ pub fn kill_entry_and_spend_and_verify_rest( } } +pub fn apply_votes_to_tower(node_keypair: &Keypair, votes: Vec<(Slot, Hash)>, tower_path: PathBuf) { + let tower_storage = FileTowerStorage::new(tower_path); + let mut tower = tower_storage.load(&node_keypair.pubkey()).unwrap(); + for (slot, hash) in votes { + tower.record_vote(slot, hash); + } + let saved_tower = SavedTowerVersions::from(SavedTower::new(&tower, node_keypair).unwrap()); + tower_storage.store(&saved_tower).unwrap(); +} + +pub fn check_min_slot_is_rooted( + min_slot: Slot, + contact_infos: &[ContactInfo], + connection_cache: &Arc, + test_name: &str, +) { + let mut last_print = Instant::now(); + let loop_start = Instant::now(); + let loop_timeout = Duration::from_secs(180); + for ingress_node in contact_infos.iter() { + let (rpc, tpu) = LegacyContactInfo::try_from(ingress_node) + .map(|node| get_client_facing_addr(connection_cache.protocol(), node)) + .unwrap(); + let client = ThinClient::new(rpc, tpu, connection_cache.clone()); + loop { + let root_slot = client + .get_slot_with_commitment(CommitmentConfig::finalized()) + .unwrap_or(0); + if root_slot >= min_slot || last_print.elapsed().as_secs() > 3 { + info!( + "{} waiting for node {} to see root >= {}.. observed latest root: {}", + test_name, + ingress_node.pubkey(), + min_slot, + root_slot + ); + last_print = Instant::now(); + if root_slot >= min_slot { + break; + } + } + sleep(Duration::from_millis(clock::DEFAULT_MS_PER_SLOT / 2)); + assert!(loop_start.elapsed() < loop_timeout); + } + } +} + pub fn check_for_new_roots( num_new_roots: usize, contact_infos: &[ContactInfo], diff --git a/local-cluster/src/local_cluster.rs b/local-cluster/src/local_cluster.rs index d06c001bcc7ed1..3d8df638fbbb81 100644 --- a/local-cluster/src/local_cluster.rs +++ b/local-cluster/src/local_cluster.rs @@ -30,7 +30,7 @@ use { solana_sdk::{ account::{Account, AccountSharedData}, client::SyncClient, - clock::{DEFAULT_DEV_SLOTS_PER_EPOCH, DEFAULT_TICKS_PER_SLOT}, + clock::{Slot, DEFAULT_DEV_SLOTS_PER_EPOCH, DEFAULT_TICKS_PER_SLOT}, commitment_config::CommitmentConfig, epoch_schedule::EpochSchedule, feature_set, @@ -555,12 +555,11 @@ impl LocalCluster { Self::transfer_with_client(&client, source_keypair, dest_pubkey, lamports) } - pub fn check_for_new_roots( + fn discover_nodes( &self, - num_new_roots: usize, - test_name: &str, socket_addr_space: SocketAddrSpace, - ) { + test_name: &str, + ) -> Vec { let alive_node_contact_infos: Vec<_> = self .validators .values() @@ -575,6 +574,36 @@ impl LocalCluster { ) .unwrap(); info!("{} discovered {} nodes", test_name, cluster_nodes.len()); + alive_node_contact_infos + } + + pub fn check_min_slot_is_rooted( + &self, + min_root: Slot, + test_name: &str, + socket_addr_space: SocketAddrSpace, + ) { + let alive_node_contact_infos = self.discover_nodes(socket_addr_space, test_name); + info!( + "{} looking minimum root {} on all nodes", + min_root, test_name + ); + cluster_tests::check_min_slot_is_rooted( + min_root, + &alive_node_contact_infos, + &self.connection_cache, + test_name, + ); + info!("{} done waiting for roots", test_name); + } + + pub fn check_for_new_roots( + &self, + num_new_roots: usize, + test_name: &str, + socket_addr_space: SocketAddrSpace, + ) { + let alive_node_contact_infos = self.discover_nodes(socket_addr_space, test_name); info!("{} looking for new roots on all nodes", test_name); cluster_tests::check_for_new_roots( num_new_roots, From 30eecd62b12e16ee473af9d0345d9503bb537403 Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Tue, 26 Mar 2024 05:21:54 +0000 Subject: [PATCH 067/153] implements weighted shuffle using N-ary tree (#259) This is port of firedancer's implementation of weighted shuffle: https://github.com/firedancer-io/firedancer/blob/3401bfc26/src/ballet/wsample/fd_wsample.c https://github.com/anza-xyz/agave/pull/185 implemented weighted shuffle using binary tree. Though asymptotically a binary tree has better performance, compared to a Fenwick tree, it has less cache locality resulting in smaller improvements and in particular slower WeightedShuffle::new. In order to improve cache locality and reduce the overheads of traversing the tree, this commit instead uses a generalized N-ary tree with fanout of 16, showing significant improvements in both WeightedShuffle::new and WeightedShuffle::shuffle. With 4000 weights: N-ary tree (fanout 16): test bench_weighted_shuffle_new ... bench: 36,244 ns/iter (+/- 243) test bench_weighted_shuffle_shuffle ... bench: 149,082 ns/iter (+/- 1,474) Binary tree: test bench_weighted_shuffle_new ... bench: 58,514 ns/iter (+/- 229) test bench_weighted_shuffle_shuffle ... bench: 269,961 ns/iter (+/- 16,446) Fenwick tree: test bench_weighted_shuffle_new ... bench: 39,413 ns/iter (+/- 179) test bench_weighted_shuffle_shuffle ... bench: 364,771 ns/iter (+/- 2,078) The improvements become even more significant as there are more items to shuffle. With 20_000 weights: N-ary tree (fanout 16): test bench_weighted_shuffle_new ... bench: 200,659 ns/iter (+/- 4,395) test bench_weighted_shuffle_shuffle ... bench: 941,928 ns/iter (+/- 26,492) Binary tree: test bench_weighted_shuffle_new ... bench: 881,114 ns/iter (+/- 12,343) test bench_weighted_shuffle_shuffle ... bench: 1,822,257 ns/iter (+/- 12,772) Fenwick tree: test bench_weighted_shuffle_new ... bench: 276,936 ns/iter (+/- 14,692) test bench_weighted_shuffle_shuffle ... bench: 2,644,713 ns/iter (+/- 49,252) --- gossip/src/weighted_shuffle.rs | 132 +++++++++++++++++++-------------- 1 file changed, 77 insertions(+), 55 deletions(-) diff --git a/gossip/src/weighted_shuffle.rs b/gossip/src/weighted_shuffle.rs index 7c12debce469e0..656615449b2a79 100644 --- a/gossip/src/weighted_shuffle.rs +++ b/gossip/src/weighted_shuffle.rs @@ -9,6 +9,14 @@ use { std::ops::{AddAssign, Sub, SubAssign}, }; +// Each internal tree node has FANOUT many child nodes with indices: +// (index << BIT_SHIFT) + 1 ..= (index << BIT_SHIFT) + FANOUT +// Conversely, for each node, the parent node is obtained by: +// (index - 1) >> BIT_SHIFT +const BIT_SHIFT: usize = 4; +const FANOUT: usize = 1 << BIT_SHIFT; +const BIT_MASK: usize = FANOUT - 1; + /// Implements an iterator where indices are shuffled according to their /// weights: /// - Returned indices are unique in the range [0, weights.len()). @@ -18,12 +26,13 @@ use { /// non-zero weighted indices. #[derive(Clone)] pub struct WeightedShuffle { - // Underlying array implementing binary tree. - // tree[i] is the sum of weights in the left sub-tree of node i. - tree: Vec, + // Underlying array implementing the tree. + // tree[i][j] is the sum of all weights in the j'th sub-tree of node i. + tree: Vec<[T; FANOUT - 1]>, // Current sum of all weights, excluding already sampled ones. weight: T, - zeros: Vec, // Indices of zero weighted entries. + // Indices of zero weighted entries. + zeros: Vec, } impl WeightedShuffle @@ -34,7 +43,7 @@ where /// they are treated as zero. pub fn new(name: &'static str, weights: &[T]) -> Self { let zero = ::default(); - let mut tree = vec![zero; get_tree_size(weights.len())]; + let mut tree = vec![[zero; FANOUT - 1]; get_tree_size(weights.len())]; let mut sum = zero; let mut zeros = Vec::default(); let mut num_negative = 0; @@ -59,12 +68,14 @@ where continue; } }; - let mut index = tree.len() + k; + // Traverse the tree from the leaf node upwards to the root, + // updating the sub-tree sums along the way. + let mut index = tree.len() + k; // leaf node while index != 0 { - let offset = index & 1; - index = (index - 1) >> 1; + let offset = index & BIT_MASK; + index = (index - 1) >> BIT_SHIFT; // parent node if offset > 0 { - tree[index] += weight; + tree[index][offset - 1] += weight; } } } @@ -88,54 +99,73 @@ where { // Removes given weight at index k. fn remove(&mut self, k: usize, weight: T) { + debug_assert!(self.weight >= weight); self.weight -= weight; - let mut index = self.tree.len() + k; + // Traverse the tree from the leaf node upwards to the root, + // updating the sub-tree sums along the way. + let mut index = self.tree.len() + k; // leaf node while index != 0 { - let offset = index & 1; - index = (index - 1) >> 1; + let offset = index & BIT_MASK; + index = (index - 1) >> BIT_SHIFT; // parent node if offset > 0 { - self.tree[index] -= weight; + debug_assert!(self.tree[index][offset - 1] >= weight); + self.tree[index][offset - 1] -= weight; } } } - // Returns smallest index such that cumsum of weights[..=k] > val, + // Returns smallest index such that sum of weights[..=k] > val, // along with its respective weight. fn search(&self, mut val: T) -> (/*index:*/ usize, /*weight:*/ T) { let zero = ::default(); debug_assert!(val >= zero); debug_assert!(val < self.weight); - let mut index = 0; + // Traverse the tree downwards from the root while maintaining the + // weight of the subtree which contains the target leaf node. + let mut index = 0; // root let mut weight = self.weight; - while index < self.tree.len() { - if val < self.tree[index] { - weight = self.tree[index]; - index = (index << 1) + 1; - } else { - weight -= self.tree[index]; - val -= self.tree[index]; - index = (index << 1) + 2; + 'outer: while index < self.tree.len() { + for (j, &node) in self.tree[index].iter().enumerate() { + if val < node { + // Traverse to the j+1 subtree of self.tree[index]. + weight = node; + index = (index << BIT_SHIFT) + j + 1; + continue 'outer; + } else { + debug_assert!(weight >= node); + weight -= node; + val -= node; + } } + // Traverse to the right-most subtree of self.tree[index]. + index = (index << BIT_SHIFT) + FANOUT; } (index - self.tree.len(), weight) } pub fn remove_index(&mut self, k: usize) { - let mut index = self.tree.len() + k; + // Traverse the tree from the leaf node upwards to the root, while + // maintaining the sum of weights of subtrees *not* containing the leaf + // node. + let mut index = self.tree.len() + k; // leaf node let mut weight = ::default(); // zero while index != 0 { - let offset = index & 1; - index = (index - 1) >> 1; + let offset = index & BIT_MASK; + index = (index - 1) >> BIT_SHIFT; // parent node if offset > 0 { - if self.tree[index] != weight { - self.remove(k, self.tree[index] - weight); + if self.tree[index][offset - 1] != weight { + self.remove(k, self.tree[index][offset - 1] - weight); } else { self.remove_zero(k); } return; } - weight += self.tree[index]; + // The leaf node is in the right-most subtree of self.tree[index]. + for &node in &self.tree[index] { + weight += node; + } } + // The leaf node is the right-most node of the whole tree. if self.weight != weight { self.remove(k, self.weight - weight); } else { @@ -193,17 +223,16 @@ where } } -// Maps number of items to the "internal" size of the binary tree "implicitly" -// holding those items on the leaves. +// Maps number of items to the "internal" size of the tree +// which "implicitly" holds those items on the leaves. fn get_tree_size(count: usize) -> usize { - let shift = usize::BITS - - count.leading_zeros() - - if count.is_power_of_two() && count != 1 { - 1 - } else { - 0 - }; - (1usize << shift) - 1 + let mut size = if count == 1 { 1 } else { 0 }; + let mut nodes = 1; + while nodes < count { + size += nodes; + nodes *= FANOUT; + } + size } #[cfg(test)] @@ -251,25 +280,18 @@ mod tests { #[test] fn test_get_tree_size() { assert_eq!(get_tree_size(0), 0); - assert_eq!(get_tree_size(1), 1); - assert_eq!(get_tree_size(2), 1); - assert_eq!(get_tree_size(3), 3); - assert_eq!(get_tree_size(4), 3); - for count in 5..9 { - assert_eq!(get_tree_size(count), 7); + for count in 1..=16 { + assert_eq!(get_tree_size(count), 1); + } + for count in 17..=256 { + assert_eq!(get_tree_size(count), 1 + 16); } - for count in 9..17 { - assert_eq!(get_tree_size(count), 15); + for count in 257..=4096 { + assert_eq!(get_tree_size(count), 1 + 16 + 16 * 16); } - for count in 17..33 { - assert_eq!(get_tree_size(count), 31); + for count in 4097..=65536 { + assert_eq!(get_tree_size(count), 1 + 16 + 16 * 16 + 16 * 16 * 16); } - assert_eq!(get_tree_size((1 << 16) - 1), (1 << 16) - 1); - assert_eq!(get_tree_size(1 << 16), (1 << 16) - 1); - assert_eq!(get_tree_size((1 << 16) + 1), (1 << 17) - 1); - assert_eq!(get_tree_size((1 << 17) - 1), (1 << 17) - 1); - assert_eq!(get_tree_size(1 << 17), (1 << 17) - 1); - assert_eq!(get_tree_size((1 << 17) + 1), (1 << 18) - 1); } // Asserts that empty weights will return empty shuffle. From 1261f1f90075cfaedd5bd26e09c845c6efad56f1 Mon Sep 17 00:00:00 2001 From: kirill lykov Date: Tue, 26 Mar 2024 03:47:24 -0700 Subject: [PATCH 068/153] Add analysis for bench-tps transactions (#92) * save progress * rename threads handler * added writer for txs * after extracting structure to handle tx confirmations * extract LogWriter * Replace pair TimestampedTransaction with struct * add compute_unit_price to TimestampedTransaction * add cu_price to LogWriter * add block time to the logs * Fix warnings * add comments and restructure code * some small improvements * Renamed conformation_processing.rs to log_transaction_service.rs * address numerous PR comments * split LogWriter into two structs * simplify code of LogWriters * extract process_blocks * specify commitment in LogTransactionService * break thread loop if receiver happens to be dropped * update start_slot when processing blocks * address pr comments * fix clippy error * minor changes * fix ms problem * fix bug with time in clear transaction map --- Cargo.lock | 3 + bench-tps/Cargo.toml | 3 + bench-tps/src/bench.rs | 130 ++++-- bench-tps/src/cli.rs | 25 ++ bench-tps/src/lib.rs | 2 + bench-tps/src/log_transaction_service.rs | 496 +++++++++++++++++++++++ bench-tps/src/rpc_with_retry_utils.rs | 61 +++ 7 files changed, 679 insertions(+), 41 deletions(-) create mode 100644 bench-tps/src/log_transaction_service.rs create mode 100644 bench-tps/src/rpc_with_retry_utils.rs diff --git a/Cargo.lock b/Cargo.lock index 6a2abab7f46afc..b0390f9a2d926c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5560,11 +5560,14 @@ dependencies = [ name = "solana-bench-tps" version = "2.0.0" dependencies = [ + "chrono", "clap 2.33.3", "crossbeam-channel", + "csv", "log", "rand 0.8.5", "rayon", + "serde", "serde_json", "serde_yaml 0.9.32", "serial_test", diff --git a/bench-tps/Cargo.toml b/bench-tps/Cargo.toml index 3693f70e4ed9b8..80a09fc8048ccd 100644 --- a/bench-tps/Cargo.toml +++ b/bench-tps/Cargo.toml @@ -9,11 +9,14 @@ license = { workspace = true } edition = { workspace = true } [dependencies] +chrono = { workspace = true } clap = { workspace = true } crossbeam-channel = { workspace = true } +csv = { workspace = true } log = { workspace = true } rand = { workspace = true } rayon = { workspace = true } +serde = { workspace = true } serde_json = { workspace = true } serde_yaml = { workspace = true } solana-clap-utils = { workspace = true } diff --git a/bench-tps/src/bench.rs b/bench-tps/src/bench.rs index 8b370786861cea..f01745e6ce8c9e 100644 --- a/bench-tps/src/bench.rs +++ b/bench-tps/src/bench.rs @@ -2,9 +2,13 @@ use { crate::{ bench_tps_client::*, cli::{ComputeUnitPrice, Config, InstructionPaddingConfig}, + log_transaction_service::{ + create_log_transactions_service_and_sender, SignatureBatchSender, TransactionInfoBatch, + }, perf_utils::{sample_txs, SampleStats}, send_batch::*, }, + chrono::Utc, log::*, rand::distributions::{Distribution, Uniform}, rayon::prelude::*, @@ -87,8 +91,14 @@ fn get_transaction_loaded_accounts_data_size(enable_padding: bool) -> u32 { } } -pub type TimestampedTransaction = (Transaction, Option); -pub type SharedTransactions = Arc>>>; +#[derive(Debug, PartialEq, Default, Eq, Clone)] +pub(crate) struct TimestampedTransaction { + transaction: Transaction, + timestamp: Option, + compute_unit_price: Option, +} + +pub(crate) type SharedTransactions = Arc>>>; /// Keypairs split into source and destination /// used for transfer transactions @@ -356,6 +366,7 @@ fn create_sender_threads( threads: usize, exit_signal: Arc, shared_tx_active_thread_count: &Arc, + signatures_sender: Option, ) -> Vec> where T: 'static + BenchTpsClient + Send + Sync + ?Sized, @@ -367,6 +378,7 @@ where let shared_tx_active_thread_count = shared_tx_active_thread_count.clone(); let total_tx_sent_count = total_tx_sent_count.clone(); let client = client.clone(); + let signatures_sender = signatures_sender.clone(); Builder::new() .name("solana-client-sender".to_string()) .spawn(move || { @@ -377,6 +389,7 @@ where &total_tx_sent_count, thread_batch_sleep_ms, &client, + signatures_sender, ); }) .unwrap() @@ -406,6 +419,8 @@ where use_durable_nonce, instruction_padding_config, num_conflict_groups, + block_data_file, + transaction_data_file, .. } = config; @@ -464,7 +479,13 @@ where None }; - let s_threads = create_sender_threads( + let (log_transaction_service, signatures_sender) = create_log_transactions_service_and_sender( + &client, + block_data_file.as_deref(), + transaction_data_file.as_deref(), + ); + + let sender_threads = create_sender_threads( &client, &shared_txs, thread_batch_sleep_ms, @@ -472,6 +493,7 @@ where threads, exit_signal.clone(), &shared_tx_active_thread_count, + signatures_sender, ); wait_for_target_slots_per_epoch(target_slots_per_epoch, &client); @@ -499,7 +521,7 @@ where // join the tx send threads info!("Waiting for transmit threads..."); - for t in s_threads { + for t in sender_threads { if let Err(err) = t.join() { info!(" join() failed with: {:?}", err); } @@ -512,6 +534,13 @@ where } } + if let Some(log_transaction_service) = log_transaction_service { + info!("Waiting for log_transaction_service thread..."); + if let Err(err) = log_transaction_service.join() { + info!(" join() failed with: {:?}", err); + } + } + if let Some(nonce_keypairs) = nonce_keypairs { withdraw_durable_nonce_accounts(client.clone(), &gen_keypairs, &nonce_keypairs); } @@ -575,36 +604,37 @@ fn generate_system_txs( pairs_with_compute_unit_prices .par_iter() .map(|((from, to), compute_unit_price)| { - ( - transfer_with_compute_unit_price_and_padding( + let compute_unit_price = Some(**compute_unit_price); + TimestampedTransaction { + transaction: transfer_with_compute_unit_price_and_padding( from, &to.pubkey(), 1, *blockhash, instruction_padding_config, - Some(**compute_unit_price), + compute_unit_price, skip_tx_account_data_size, ), - Some(timestamp()), - ) + timestamp: Some(timestamp()), + compute_unit_price, + } }) .collect() } else { pairs .par_iter() - .map(|(from, to)| { - ( - transfer_with_compute_unit_price_and_padding( - from, - &to.pubkey(), - 1, - *blockhash, - instruction_padding_config, - None, - skip_tx_account_data_size, - ), - Some(timestamp()), - ) + .map(|(from, to)| TimestampedTransaction { + transaction: transfer_with_compute_unit_price_and_padding( + from, + &to.pubkey(), + 1, + *blockhash, + instruction_padding_config, + None, + skip_tx_account_data_size, + ), + timestamp: Some(timestamp()), + compute_unit_price: None, }) .collect() } @@ -779,8 +809,8 @@ fn generate_nonced_system_txs = get_nonce_blockhashes(&client, &pubkeys); for i in 0..length { - transactions.push(( - nonced_transfer_with_padding( + transactions.push(TimestampedTransaction { + transaction: nonced_transfer_with_padding( source[i], &dest[i].pubkey(), 1, @@ -790,16 +820,17 @@ fn generate_nonced_system_txs = dest_nonce.iter().map(|keypair| keypair.pubkey()).collect(); let blockhashes: Vec = get_nonce_blockhashes(&client, &pubkeys); for i in 0..length { - transactions.push(( - nonced_transfer_with_padding( + transactions.push(TimestampedTransaction { + transaction: nonced_transfer_with_padding( dest[i], &source[i].pubkey(), 1, @@ -809,8 +840,9 @@ fn generate_nonced_system_txs( total_tx_sent_count: &Arc, thread_batch_sleep_ms: usize, client: &Arc, + signatures_sender: Option, ) { let mut last_sent_time = timestamp(); - loop { + 'thread_loop: loop { if thread_batch_sleep_ms > 0 { sleep(Duration::from_millis(thread_batch_sleep_ms as u64)); } @@ -926,19 +959,21 @@ fn do_tx_transfers( let mut shared_txs_wl = shared_txs.write().expect("write lock in do_tx_transfers"); shared_txs_wl.pop_front() }; - if let Some(txs0) = txs { + if let Some(txs) = txs { shared_tx_thread_count.fetch_add(1, Ordering::Relaxed); - info!("Transferring 1 unit {} times...", txs0.len()); - let tx_len = txs0.len(); + let num_txs = txs.len(); + info!("Transferring 1 unit {} times...", num_txs); let transfer_start = Instant::now(); let mut old_transactions = false; - let mut transactions = Vec::<_>::new(); let mut min_timestamp = u64::MAX; - for tx in txs0 { + let mut transactions = Vec::<_>::with_capacity(num_txs); + let mut signatures = Vec::<_>::with_capacity(num_txs); + let mut compute_unit_prices = Vec::<_>::with_capacity(num_txs); + for tx in txs { let now = timestamp(); // Transactions without durable nonce that are too old will be rejected by the cluster Don't bother // sending them. - if let Some(tx_timestamp) = tx.1 { + if let Some(tx_timestamp) = tx.timestamp { if tx_timestamp < min_timestamp { min_timestamp = tx_timestamp; } @@ -947,7 +982,9 @@ fn do_tx_transfers( continue; } } - transactions.push(tx.0); + signatures.push(tx.transaction.signatures[0]); + transactions.push(tx.transaction); + compute_unit_prices.push(tx.compute_unit_price); } if min_timestamp != u64::MAX { @@ -957,6 +994,17 @@ fn do_tx_transfers( ); } + if let Some(signatures_sender) = &signatures_sender { + if let Err(error) = signatures_sender.send(TransactionInfoBatch { + signatures, + sent_at: Utc::now(), + compute_unit_prices, + }) { + error!("Receiver has been dropped with error `{error}`, stop sending transactions."); + break 'thread_loop; + } + } + if let Err(error) = client.send_batch(transactions) { warn!("send_batch_sync in do_tx_transfers failed: {}", error); } @@ -977,16 +1025,16 @@ fn do_tx_transfers( shared_txs_wl.clear(); } shared_tx_thread_count.fetch_add(-1, Ordering::Relaxed); - total_tx_sent_count.fetch_add(tx_len, Ordering::Relaxed); + total_tx_sent_count.fetch_add(num_txs, Ordering::Relaxed); info!( "Tx send done. {} ms {} tps", duration_as_ms(&transfer_start.elapsed()), - tx_len as f32 / duration_as_s(&transfer_start.elapsed()), + num_txs as f32 / duration_as_s(&transfer_start.elapsed()), ); datapoint_info!( "bench-tps-do_tx_transfers", ("duration", duration_as_us(&transfer_start.elapsed()), i64), - ("count", tx_len, i64) + ("count", num_txs, i64) ); } if exit_signal.load(Ordering::Relaxed) { diff --git a/bench-tps/src/cli.rs b/bench-tps/src/cli.rs index 1804dbbc454e02..04bb869c2626bb 100644 --- a/bench-tps/src/cli.rs +++ b/bench-tps/src/cli.rs @@ -76,6 +76,8 @@ pub struct Config { pub bind_address: IpAddr, pub client_node_id: Option, pub commitment_config: CommitmentConfig, + pub block_data_file: Option, + pub transaction_data_file: Option, } impl Eq for Config {} @@ -109,6 +111,8 @@ impl Default for Config { bind_address: IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), client_node_id: None, commitment_config: CommitmentConfig::confirmed(), + block_data_file: None, + transaction_data_file: None, } } } @@ -419,6 +423,23 @@ pub fn build_args<'a>(version: &'_ str) -> App<'a, '_> { .default_value("confirmed") .help("Block commitment config for getting latest blockhash"), ) + .arg( + Arg::with_name("block_data_file") + .long("block-data-file") + .value_name("FILENAME") + .takes_value(true) + .help("File to save block statistics relevant to the submitted transactions."), + ) + .arg( + Arg::with_name("transaction_data_file") + .long("transaction-data-file") + .value_name("FILENAME") + .takes_value(true) + .help( + "File to save details about all the submitted transactions.\ + This option is useful for debug purposes." + ), + ) } /// Parses a clap `ArgMatches` structure into a `Config` @@ -587,6 +608,10 @@ pub fn parse_args(matches: &ArgMatches) -> Result { } args.commitment_config = value_t_or_exit!(matches, "commitment_config", CommitmentConfig); + args.block_data_file = matches.value_of("block_data_file").map(|s| s.to_string()); + args.transaction_data_file = matches + .value_of("transaction_data_file") + .map(|s| s.to_string()); Ok(args) } diff --git a/bench-tps/src/lib.rs b/bench-tps/src/lib.rs index 7da3979a30d72c..6f55a4122e4c0b 100644 --- a/bench-tps/src/lib.rs +++ b/bench-tps/src/lib.rs @@ -3,5 +3,7 @@ pub mod bench; pub mod bench_tps_client; pub mod cli; pub mod keypairs; +mod log_transaction_service; mod perf_utils; +mod rpc_with_retry_utils; pub mod send_batch; diff --git a/bench-tps/src/log_transaction_service.rs b/bench-tps/src/log_transaction_service.rs new file mode 100644 index 00000000000000..6363ff59914c83 --- /dev/null +++ b/bench-tps/src/log_transaction_service.rs @@ -0,0 +1,496 @@ +//! `LogTransactionService` requests confirmed blocks, analyses transactions submitted by bench-tps, +//! and saves log files in csv format. + +use { + crate::{ + bench_tps_client::BenchTpsClient, + rpc_with_retry_utils::{get_blocks_with_retry, get_slot_with_retry}, + }, + chrono::{DateTime, TimeZone, Utc}, + crossbeam_channel::{select, tick, unbounded, Receiver, Sender}, + log::*, + serde::Serialize, + solana_client::rpc_config::RpcBlockConfig, + solana_measure::measure::Measure, + solana_sdk::{ + clock::{DEFAULT_MS_PER_SLOT, MAX_PROCESSING_AGE}, + commitment_config::{CommitmentConfig, CommitmentLevel}, + signature::Signature, + slot_history::Slot, + }, + solana_transaction_status::{ + option_serializer::OptionSerializer, EncodedTransactionWithStatusMeta, RewardType, + TransactionDetails, UiConfirmedBlock, UiTransactionEncoding, UiTransactionStatusMeta, + }, + std::{ + collections::HashMap, + fs::File, + sync::Arc, + thread::{self, Builder, JoinHandle}, + time::Duration, + }, +}; + +// Data to establish communication between sender thread and +// LogTransactionService. +#[derive(Clone)] +pub(crate) struct TransactionInfoBatch { + pub signatures: Vec, + pub sent_at: DateTime, + pub compute_unit_prices: Vec>, +} + +pub(crate) type SignatureBatchSender = Sender; + +pub(crate) struct LogTransactionService { + thread_handler: JoinHandle<()>, +} + +pub(crate) fn create_log_transactions_service_and_sender( + client: &Arc, + block_data_file: Option<&str>, + transaction_data_file: Option<&str>, +) -> (Option, Option) +where + Client: 'static + BenchTpsClient + Send + Sync + ?Sized, +{ + if data_file_provided(block_data_file, transaction_data_file) { + let (sender, receiver) = unbounded(); + let log_tx_service = + LogTransactionService::new(client, receiver, block_data_file, transaction_data_file); + (Some(log_tx_service), Some(sender)) + } else { + (None, None) + } +} + +// How many blocks to process during one iteration. +// The time to process blocks is dominated by get_block calls. +// Each call takes slightly less time than slot. +const NUM_SLOTS_PER_ITERATION: u64 = 16; +// How often process blocks. +const PROCESS_BLOCKS_EVERY_MS: u64 = NUM_SLOTS_PER_ITERATION * DEFAULT_MS_PER_SLOT; +// Max age for transaction in the transaction map, older transactions are cleaned up and marked as timeout. +const REMOVE_TIMEOUT_TX_EVERY_MS: i64 = MAX_PROCESSING_AGE as i64 * DEFAULT_MS_PER_SLOT as i64; + +// Map used to filter submitted transactions. +#[derive(Clone)] +struct TransactionSendInfo { + pub sent_at: DateTime, + pub compute_unit_price: Option, +} +type MapSignatureToTxInfo = HashMap; + +type SignatureBatchReceiver = Receiver; + +impl LogTransactionService { + fn new( + client: &Arc, + signature_receiver: SignatureBatchReceiver, + block_data_file: Option<&str>, + transaction_data_file: Option<&str>, + ) -> Self + where + Client: 'static + BenchTpsClient + Send + Sync + ?Sized, + { + if !data_file_provided(block_data_file, transaction_data_file) { + panic!("Expect block-data-file or transaction-data-file is specified, must have been verified by callee."); + } + + let client = client.clone(); + let tx_log_writer = TransactionLogWriter::new(transaction_data_file); + let block_log_writer = BlockLogWriter::new(block_data_file); + + let thread_handler = Builder::new() + .name("LogTransactionService".to_string()) + .spawn(move || { + Self::run(client, signature_receiver, tx_log_writer, block_log_writer); + }) + .expect("LogTransactionService should have started successfully."); + Self { thread_handler } + } + + pub fn join(self) -> thread::Result<()> { + self.thread_handler.join() + } + + fn run( + client: Arc, + signature_receiver: SignatureBatchReceiver, + mut tx_log_writer: TransactionLogWriter, + mut block_log_writer: BlockLogWriter, + ) where + Client: 'static + BenchTpsClient + Send + Sync + ?Sized, + { + // used to request blocks data and only confirmed makes sense in this context. + let commitment: CommitmentConfig = CommitmentConfig { + commitment: CommitmentLevel::Confirmed, + }; + let block_processing_timer_receiver = tick(Duration::from_millis(PROCESS_BLOCKS_EVERY_MS)); + + let mut start_slot = get_slot_with_retry(&client, commitment) + .expect("get_slot_with_retry should have succeed, cannot proceed without having slot. Must be a problem with RPC."); + + let mut sender_stopped = false; + let mut signature_to_tx_info = MapSignatureToTxInfo::new(); + loop { + select! { + recv(signature_receiver) -> msg => { + match msg { + Ok(TransactionInfoBatch { + signatures, + sent_at, + compute_unit_prices + }) => { + signatures.iter().zip(compute_unit_prices).for_each( |(sign, compute_unit_price)| {signature_to_tx_info.insert(*sign, TransactionSendInfo { + sent_at, + compute_unit_price + });}); + } + Err(_) => { + sender_stopped = true; + } + } + }, + recv(block_processing_timer_receiver) -> _ => { + info!("sign_receiver queue len: {}", signature_receiver.len()); + if !signature_receiver.is_empty() { + continue; + } + let mut measure_get_blocks = Measure::start("measure_get_blocks"); + let block_slots = get_blocks_with_retry(&client, start_slot, Some(start_slot + NUM_SLOTS_PER_ITERATION - 1), commitment); + measure_get_blocks.stop(); + let time_get_blocks_us = measure_get_blocks.as_us(); + info!("Time to get_blocks : {time_get_blocks_us}us."); + let Ok(block_slots) = block_slots else { + error!("Failed to get blocks, stop LogWriterService."); + break; + }; + if block_slots.is_empty() { + continue; + } + let last_block_time = Self::process_blocks( + &client, + block_slots, + &mut signature_to_tx_info, + &mut tx_log_writer, + &mut block_log_writer, + commitment, + ); + Self::clean_transaction_map(&mut tx_log_writer, &mut signature_to_tx_info, last_block_time); + + start_slot = start_slot.saturating_add(NUM_SLOTS_PER_ITERATION); + tx_log_writer.flush(); + block_log_writer.flush(); + if sender_stopped && signature_to_tx_info.is_empty() { + info!("Stop LogTransactionService"); + break; + } + }, + } + } + } + + /// Download and process the blocks. + /// Returns the time when the last processed block has been confirmed or now(). + fn process_blocks( + client: &Arc, + block_slots: Vec, + signature_to_tx_info: &mut MapSignatureToTxInfo, + tx_log_writer: &mut TransactionLogWriter, + block_log_writer: &mut BlockLogWriter, + commitment: CommitmentConfig, + ) -> DateTime + where + Client: 'static + BenchTpsClient + Send + Sync + ?Sized, + { + let rpc_block_config = RpcBlockConfig { + encoding: Some(UiTransactionEncoding::Base64), + transaction_details: Some(TransactionDetails::Full), + rewards: Some(true), + commitment: Some(commitment), + max_supported_transaction_version: Some(0), + }; + let mut measure_process_blocks = Measure::start("measure_process_blocks"); + let blocks = block_slots + .iter() + .map(|slot| client.get_block_with_config(*slot, rpc_block_config)); + let num_blocks = blocks.len(); + let mut last_block_time = None; + for (block, slot) in blocks.zip(&block_slots) { + let Ok(block) = block else { + continue; + }; + let block_time = Self::process_block( + block, + signature_to_tx_info, + *slot, + tx_log_writer, + block_log_writer, + ); + // if last_time is some, it means that the there is at least one valid block + if block_time.is_some() { + last_block_time = block_time; + } + } + measure_process_blocks.stop(); + let time_process_blocks_us = measure_process_blocks.as_us(); + info!("Time to process {num_blocks} blocks: {time_process_blocks_us}us."); + last_block_time.unwrap_or_else(Utc::now) + } + + fn process_block( + block: UiConfirmedBlock, + signature_to_tx_info: &mut MapSignatureToTxInfo, + slot: u64, + tx_log_writer: &mut TransactionLogWriter, + block_log_writer: &mut BlockLogWriter, + ) -> Option> { + let rewards = block + .rewards + .as_ref() + .expect("Rewards should be part of the block information."); + let slot_leader = rewards + .iter() + .find(|r| r.reward_type == Some(RewardType::Fee)) + .map_or("".to_string(), |x| x.pubkey.clone()); + + let Some(transactions) = &block.transactions else { + warn!("Empty block: {slot}"); + return None; + }; + + let mut num_bench_tps_transactions: usize = 0; + let mut total_cu_consumed: u64 = 0; + let mut bench_tps_cu_consumed: u64 = 0; + for EncodedTransactionWithStatusMeta { + transaction, meta, .. + } in transactions + { + let Some(transaction) = transaction.decode() else { + continue; + }; + let cu_consumed = meta + .as_ref() + .map_or(0, |meta| match meta.compute_units_consumed { + OptionSerializer::Some(cu_consumed) => cu_consumed, + _ => 0, + }); + let signature = &transaction.signatures[0]; + + total_cu_consumed = total_cu_consumed.saturating_add(cu_consumed); + if let Some(TransactionSendInfo { + sent_at, + compute_unit_price, + }) = signature_to_tx_info.remove(signature) + { + num_bench_tps_transactions = num_bench_tps_transactions.saturating_add(1); + bench_tps_cu_consumed = bench_tps_cu_consumed.saturating_add(cu_consumed); + + tx_log_writer.write( + Some(block.blockhash.clone()), + Some(slot_leader.clone()), + signature, + sent_at, + Some(slot), + block.block_time, + meta.as_ref(), + false, + compute_unit_price, + ); + } + } + block_log_writer.write( + block.blockhash.clone(), + slot_leader, + slot, + block.block_time, + num_bench_tps_transactions, + transactions.len(), + bench_tps_cu_consumed, + total_cu_consumed, + ); + + block.block_time.map(|time| { + Utc.timestamp_opt(time, 0) + .latest() + .expect("valid timestamp") + }) + } + + /// Remove from map all the signatures which we haven't processed before and they are + /// older than the the timestamp of the last processed block plus max blockhash age. + fn clean_transaction_map( + tx_log_writer: &mut TransactionLogWriter, + signature_to_tx_info: &mut MapSignatureToTxInfo, + last_block_time: DateTime, + ) { + signature_to_tx_info.retain(|signature, tx_info| { + let duration_since_sent = last_block_time.signed_duration_since(tx_info.sent_at); + let is_timeout_tx = duration_since_sent.num_milliseconds() > REMOVE_TIMEOUT_TX_EVERY_MS; + if is_timeout_tx { + tx_log_writer.write( + None, + None, + signature, + tx_info.sent_at, + None, + None, + None, + true, + tx_info.compute_unit_price, + ); + } + !is_timeout_tx + }); + } +} + +fn data_file_provided(block_data_file: Option<&str>, transaction_data_file: Option<&str>) -> bool { + block_data_file.is_some() || transaction_data_file.is_some() +} + +type CsvFileWriter = csv::Writer; + +#[derive(Clone, Serialize)] +struct BlockData { + pub blockhash: String, + pub block_slot: Slot, + pub slot_leader: String, + pub block_time: Option>, + pub total_num_transactions: usize, + pub num_bench_tps_transactions: usize, + pub total_cu_consumed: u64, + pub bench_tps_cu_consumed: u64, +} + +struct BlockLogWriter { + log_writer: Option, +} + +impl BlockLogWriter { + fn new(block_data_file: Option<&str>) -> Self { + let block_log_writer = block_data_file.map(|block_data_file| { + CsvFileWriter::from_writer( + File::create(block_data_file) + .expect("Application should be able to create a file."), + ) + }); + Self { + log_writer: block_log_writer, + } + } + + #[allow(clippy::too_many_arguments)] + fn write( + &mut self, + blockhash: String, + slot_leader: String, + slot: Slot, + block_time: Option, + num_bench_tps_transactions: usize, + total_num_transactions: usize, + bench_tps_cu_consumed: u64, + total_cu_consumed: u64, + ) { + let Some(block_log_writer) = &mut self.log_writer else { + return; + }; + let block_data = BlockData { + blockhash, + slot_leader, + block_slot: slot, + block_time: block_time.map(|time| { + Utc.timestamp_opt(time, 0) + .latest() + .expect("timestamp should be valid") + }), + num_bench_tps_transactions, + total_num_transactions, + bench_tps_cu_consumed, + total_cu_consumed, + }; + let _ = block_log_writer.serialize(block_data); + } + + fn flush(&mut self) { + if let Some(block_log_writer) = &mut self.log_writer { + let _ = block_log_writer.flush(); + } + } +} + +#[derive(Clone, Serialize)] +struct TransactionData { + pub blockhash: Option, + pub slot_leader: Option, + pub signature: String, + pub sent_at: Option>, + pub confirmed_slot: Option, + pub block_time: Option>, + pub successful: bool, + pub error: Option, + pub timed_out: bool, + pub compute_unit_price: u64, +} + +struct TransactionLogWriter { + log_writer: Option, +} + +impl TransactionLogWriter { + fn new(transaction_data_file: Option<&str>) -> Self { + let transaction_log_writer = transaction_data_file.map(|transaction_data_file| { + CsvFileWriter::from_writer( + File::create(transaction_data_file) + .expect("Application should be able to create a file."), + ) + }); + Self { + log_writer: transaction_log_writer, + } + } + + #[allow(clippy::too_many_arguments)] + fn write( + &mut self, + blockhash: Option, + slot_leader: Option, + signature: &Signature, + sent_at: DateTime, + confirmed_slot: Option, + block_time: Option, + meta: Option<&UiTransactionStatusMeta>, + timed_out: bool, + compute_unit_price: Option, + ) { + let Some(transaction_log_writer) = &mut self.log_writer else { + return; + }; + let tx_data = TransactionData { + blockhash, + slot_leader, + signature: signature.to_string(), + sent_at: Some(sent_at), + confirmed_slot, + block_time: block_time.map(|time| { + Utc.timestamp_opt(time, 0) + .latest() + .expect("valid timestamp") + }), + successful: meta.as_ref().map_or(false, |m| m.status.is_ok()), + error: meta + .as_ref() + .and_then(|m| m.err.as_ref().map(|x| x.to_string())), + timed_out, + compute_unit_price: compute_unit_price.unwrap_or(0), + }; + let _ = transaction_log_writer.serialize(tx_data); + } + + fn flush(&mut self) { + if let Some(transaction_log_writer) = &mut self.log_writer { + let _ = transaction_log_writer.flush(); + } + } +} diff --git a/bench-tps/src/rpc_with_retry_utils.rs b/bench-tps/src/rpc_with_retry_utils.rs new file mode 100644 index 00000000000000..57af3923f0aeda --- /dev/null +++ b/bench-tps/src/rpc_with_retry_utils.rs @@ -0,0 +1,61 @@ +use { + crate::bench_tps_client::{BenchTpsClient, Result}, + log::*, + solana_sdk::{ + clock::DEFAULT_MS_PER_SLOT, commitment_config::CommitmentConfig, slot_history::Slot, + }, + std::{sync::Arc, thread::sleep, time::Duration}, +}; + +const NUM_RETRY: u64 = 5; +const RETRY_EVERY_MS: u64 = 4 * DEFAULT_MS_PER_SLOT; + +fn call_rpc_with_retry(f: Func, retry_warning: &str) -> Result +where + Func: Fn() -> Result, +{ + let mut iretry = 0; + loop { + match f() { + Ok(slot) => { + return Ok(slot); + } + Err(error) => { + if iretry == NUM_RETRY { + return Err(error); + } + warn!("{retry_warning}: {error}, retry."); + sleep(Duration::from_millis(RETRY_EVERY_MS)); + } + } + iretry += 1; + } +} + +pub(crate) fn get_slot_with_retry( + client: &Arc, + commitment: CommitmentConfig, +) -> Result +where + Client: 'static + BenchTpsClient + Send + Sync + ?Sized, +{ + call_rpc_with_retry( + || client.get_slot_with_commitment(commitment), + "Failed to get slot", + ) +} + +pub(crate) fn get_blocks_with_retry( + client: &Arc, + start_slot: Slot, + end_slot: Option, + commitment: CommitmentConfig, +) -> Result> +where + Client: 'static + BenchTpsClient + Send + Sync + ?Sized, +{ + call_rpc_with_retry( + || client.get_blocks_with_commitment(start_slot, end_slot, commitment), + "Failed to download blocks", + ) +} From a3bc406b55f238f5e284cb06baa84b60ae81e641 Mon Sep 17 00:00:00 2001 From: samkim-crypto Date: Tue, 26 Mar 2024 19:54:06 +0900 Subject: [PATCH 069/153] [zk-token-sdk] Remove `std::thread` from wasm target (#379) --- zk-token-sdk/src/encryption/discrete_log.rs | 76 +++++++++++++-------- zk-token-sdk/src/encryption/elgamal.rs | 1 + 2 files changed, 48 insertions(+), 29 deletions(-) diff --git a/zk-token-sdk/src/encryption/discrete_log.rs b/zk-token-sdk/src/encryption/discrete_log.rs index b3e02a74625b61..5ffc1c206a6f68 100644 --- a/zk-token-sdk/src/encryption/discrete_log.rs +++ b/zk-token-sdk/src/encryption/discrete_log.rs @@ -16,6 +16,8 @@ #![cfg(not(target_os = "solana"))] +#[cfg(not(target_arch = "wasm32"))] +use std::thread; use { crate::RISTRETTO_POINT_LEN, curve25519_dalek::{ @@ -26,7 +28,7 @@ use { }, itertools::Itertools, serde::{Deserialize, Serialize}, - std::{collections::HashMap, thread}, + std::collections::HashMap, thiserror::Error, }; @@ -34,6 +36,7 @@ const TWO16: u64 = 65536; // 2^16 const TWO17: u64 = 131072; // 2^17 /// Maximum number of threads permitted for discrete log computation +#[cfg(not(target_arch = "wasm32"))] const MAX_THREAD: usize = 65536; #[derive(Error, Clone, Debug, Eq, PartialEq)] @@ -112,6 +115,7 @@ impl DiscreteLog { } /// Adjusts number of threads in a discrete log instance. + #[cfg(not(target_arch = "wasm32"))] pub fn num_threads(&mut self, num_threads: usize) -> Result<(), DiscreteLogError> { // number of threads must be a positive power-of-two integer if num_threads == 0 || (num_threads & (num_threads - 1)) != 0 || num_threads > MAX_THREAD { @@ -141,35 +145,48 @@ impl DiscreteLog { /// Solves the discrete log problem under the assumption that the solution /// is a positive 32-bit number. pub fn decode_u32(self) -> Option { - let mut starting_point = self.target; - let handles = (0..self.num_threads) - .map(|i| { - let ristretto_iterator = RistrettoIterator::new( - (starting_point, i as u64), - (-(&self.step_point), self.num_threads as u64), - ); - - let handle = thread::spawn(move || { - Self::decode_range( - ristretto_iterator, - self.range_bound, - self.compression_batch_size, - ) - }); - - starting_point -= G; - handle - }) - .collect::>(); - - let mut solution = None; - for handle in handles { - let discrete_log = handle.join().unwrap(); - if discrete_log.is_some() { - solution = discrete_log; - } + #[cfg(not(target_arch = "wasm32"))] + { + let mut starting_point = self.target; + let handles = (0..self.num_threads) + .map(|i| { + let ristretto_iterator = RistrettoIterator::new( + (starting_point, i as u64), + (-(&self.step_point), self.num_threads as u64), + ); + + let handle = thread::spawn(move || { + Self::decode_range( + ristretto_iterator, + self.range_bound, + self.compression_batch_size, + ) + }); + + starting_point -= G; + handle + }) + .collect::>(); + + handles + .into_iter() + .map_while(|h| h.join().ok()) + .find(|x| x.is_some()) + .flatten() + } + #[cfg(target_arch = "wasm32")] + { + let ristretto_iterator = RistrettoIterator::new( + (self.target, 0_u64), + (-(&self.step_point), self.num_threads as u64), + ); + + Self::decode_range( + ristretto_iterator, + self.range_bound, + self.compression_batch_size, + ) } - solution } fn decode_range( @@ -274,6 +291,7 @@ mod tests { println!("single thread discrete log computation secs: {computation_secs:?} sec"); } + #[cfg(not(target_arch = "wasm32"))] #[test] fn test_decode_correctness_threaded() { // general case diff --git a/zk-token-sdk/src/encryption/elgamal.rs b/zk-token-sdk/src/encryption/elgamal.rs index 5b4e2dba872530..e499106e1e58b2 100644 --- a/zk-token-sdk/src/encryption/elgamal.rs +++ b/zk-token-sdk/src/encryption/elgamal.rs @@ -791,6 +791,7 @@ mod tests { assert_eq!(57_u64, secret.decrypt_u32(&ciphertext).unwrap()); } + #[cfg(not(target_arch = "wasm32"))] #[test] fn test_encrypt_decrypt_correctness_multithreaded() { let ElGamalKeypair { public, secret } = ElGamalKeypair::new_rand(); From 36270385417d3373385dbae68a75c9d3beb3ad3b Mon Sep 17 00:00:00 2001 From: Joe C Date: Tue, 26 Mar 2024 07:05:10 -0500 Subject: [PATCH 070/153] Runtime: Core BPF Migration: Struct for loading and checking source BPF program accounts (#332) * runtime: core_bpf_migration: add source_bpf_upgradeable config * runtime: core_bpf_migration: add source_bpf_upgradeable config tests * renamings * bincode error * drop `total_size` --- .../bank/builtins/core_bpf_migration/error.rs | 17 +- .../bank/builtins/core_bpf_migration/mod.rs | 1 + .../source_upgradeable_bpf.rs | 348 ++++++++++++++++++ .../core_bpf_migration/target_builtin.rs | 51 +-- 4 files changed, 385 insertions(+), 32 deletions(-) create mode 100644 runtime/src/bank/builtins/core_bpf_migration/source_upgradeable_bpf.rs diff --git a/runtime/src/bank/builtins/core_bpf_migration/error.rs b/runtime/src/bank/builtins/core_bpf_migration/error.rs index e55469e0211207..2bc54a46dabb74 100644 --- a/runtime/src/bank/builtins/core_bpf_migration/error.rs +++ b/runtime/src/bank/builtins/core_bpf_migration/error.rs @@ -1,8 +1,11 @@ use {solana_sdk::pubkey::Pubkey, thiserror::Error}; /// Errors returned by a Core BPF migration. -#[derive(Debug, Error, PartialEq)] +#[derive(Debug, Error)] pub enum CoreBpfMigrationError { + /// Bincode serialization error + #[error("Bincode serialization error: {0:?}")] + BincodeError(#[from] bincode::Error), /// Account not found #[error("Account not found: {0:?}")] AccountNotFound(Pubkey), @@ -15,4 +18,16 @@ pub enum CoreBpfMigrationError { /// Program has a data account #[error("Data account exists for program {0:?}")] ProgramHasDataAccount(Pubkey), + /// Program has no data account + #[error("Data account does not exist for program {0:?}")] + ProgramHasNoDataAccount(Pubkey), + /// Invalid program account + #[error("Invalid program account: {0:?}")] + InvalidProgramAccount(Pubkey), + /// Invalid program data account + #[error("Invalid program data account: {0:?}")] + InvalidProgramDataAccount(Pubkey), + /// Arithmetic overflow + #[error("Arithmetic overflow")] + ArithmeticOverflow, } diff --git a/runtime/src/bank/builtins/core_bpf_migration/mod.rs b/runtime/src/bank/builtins/core_bpf_migration/mod.rs index 6a09df8dd13136..6e1af6b6c17184 100644 --- a/runtime/src/bank/builtins/core_bpf_migration/mod.rs +++ b/runtime/src/bank/builtins/core_bpf_migration/mod.rs @@ -1,5 +1,6 @@ #![allow(dead_code)] // Removed in later commit pub(crate) mod error; +mod source_upgradeable_bpf; mod target_builtin; pub(crate) enum CoreBpfMigrationTargetType { diff --git a/runtime/src/bank/builtins/core_bpf_migration/source_upgradeable_bpf.rs b/runtime/src/bank/builtins/core_bpf_migration/source_upgradeable_bpf.rs new file mode 100644 index 00000000000000..67642a69604fb6 --- /dev/null +++ b/runtime/src/bank/builtins/core_bpf_migration/source_upgradeable_bpf.rs @@ -0,0 +1,348 @@ +use { + super::error::CoreBpfMigrationError, + crate::bank::Bank, + solana_sdk::{ + account::{AccountSharedData, ReadableAccount}, + bpf_loader_upgradeable::{ + get_program_data_address, UpgradeableLoaderState, ID as BPF_LOADER_UPGRADEABLE_ID, + }, + pubkey::Pubkey, + }, +}; + +/// The account details of an Upgradeable BPF program slated to replace a +/// built-in program. +#[derive(Debug)] +pub(crate) struct SourceUpgradeableBpf { + pub program_address: Pubkey, + pub program_account: AccountSharedData, + pub program_data_address: Pubkey, + pub program_data_account: AccountSharedData, +} + +impl SourceUpgradeableBpf { + fn check_program_account(&self) -> Result<(), CoreBpfMigrationError> { + // The program account should be owned by the upgradeable loader. + if self.program_account.owner() != &BPF_LOADER_UPGRADEABLE_ID { + return Err(CoreBpfMigrationError::IncorrectOwner(self.program_address)); + } + + // The program account should have a pointer to its data account. + if let UpgradeableLoaderState::Program { + programdata_address, + } = &self.program_account.deserialize_data()? + { + if programdata_address != &self.program_data_address { + return Err(CoreBpfMigrationError::InvalidProgramAccount( + self.program_address, + )); + } + } + + Ok(()) + } + + fn check_program_data_account(&self) -> Result<(), CoreBpfMigrationError> { + // The program data account should be owned by the upgradeable loader. + if self.program_data_account.owner() != &BPF_LOADER_UPGRADEABLE_ID { + return Err(CoreBpfMigrationError::IncorrectOwner( + self.program_data_address, + )); + } + + // The program data account should have the correct state. + let programdata_data_offset = UpgradeableLoaderState::size_of_programdata_metadata(); + if self.program_data_account.data().len() < programdata_data_offset { + return Err(CoreBpfMigrationError::InvalidProgramDataAccount( + self.program_data_address, + )); + } + // Length checked in previous block. + match bincode::deserialize::( + &self.program_data_account.data()[..programdata_data_offset], + )? { + UpgradeableLoaderState::ProgramData { .. } => Ok(()), + _ => Err(CoreBpfMigrationError::InvalidProgramDataAccount( + self.program_data_address, + )), + } + } + + /// Collects the details of an upgradeable BPF program and verifies it is + /// properly configured. + /// The program account should exist with a pointer to its data account. + /// The program data account should exist with the correct state + /// (a ProgramData header and the program ELF). + pub(crate) fn new_checked( + bank: &Bank, + program_address: &Pubkey, + ) -> Result { + // The program account should exist. + let program_account = bank + .get_account_with_fixed_root(program_address) + .ok_or(CoreBpfMigrationError::AccountNotFound(*program_address))?; + + // The program data account should exist. + let program_data_address = get_program_data_address(program_address); + let program_data_account = bank + .get_account_with_fixed_root(&program_data_address) + .ok_or(CoreBpfMigrationError::ProgramHasNoDataAccount( + *program_address, + ))?; + + let source_upgradeable_bpf = Self { + program_address: *program_address, + program_account, + program_data_address, + program_data_account, + }; + + source_upgradeable_bpf.check_program_account()?; + source_upgradeable_bpf.check_program_data_account()?; + + Ok(source_upgradeable_bpf) + } +} + +#[cfg(test)] +mod tests { + use { + super::*, + crate::bank::tests::create_simple_test_bank, + assert_matches::assert_matches, + solana_sdk::{account::Account, bpf_loader_upgradeable::ID as BPF_LOADER_UPGRADEABLE_ID}, + }; + + fn store_account( + bank: &Bank, + address: &Pubkey, + data: &T, + additional_data: Option<&[u8]>, + executable: bool, + owner: &Pubkey, + ) { + let mut data = bincode::serialize(data).unwrap(); + if let Some(additional_data) = additional_data { + data.extend_from_slice(additional_data); + } + let data_len = data.len(); + let lamports = bank.get_minimum_balance_for_rent_exemption(data_len); + let account = AccountSharedData::from(Account { + data, + executable, + lamports, + owner: *owner, + ..Account::default() + }); + bank.store_account_and_update_capitalization(address, &account); + } + + #[test] + fn test_source_upgradeable_bpf() { + let bank = create_simple_test_bank(0); + + let program_id = Pubkey::new_unique(); + let program_data_address = get_program_data_address(&program_id); + + // Fail if the program account does not exist + assert_matches!( + SourceUpgradeableBpf::new_checked(&bank, &program_id).unwrap_err(), + CoreBpfMigrationError::AccountNotFound(..) + ); + + // Store the proper program account + let proper_program_account_state = UpgradeableLoaderState::Program { + programdata_address: program_data_address, + }; + store_account( + &bank, + &program_id, + &proper_program_account_state, + None, + true, + &BPF_LOADER_UPGRADEABLE_ID, + ); + + // Fail if the program data account does not exist + assert_matches!( + SourceUpgradeableBpf::new_checked(&bank, &program_id).unwrap_err(), + CoreBpfMigrationError::ProgramHasNoDataAccount(..) + ); + + // Store the proper program data account + let proper_program_data_account_state = UpgradeableLoaderState::ProgramData { + slot: 0, + upgrade_authority_address: Some(Pubkey::new_unique()), + }; + store_account( + &bank, + &program_data_address, + &proper_program_data_account_state, + Some(&[4u8; 200]), + false, + &BPF_LOADER_UPGRADEABLE_ID, + ); + + // Success + let source_upgradeable_bpf = SourceUpgradeableBpf::new_checked(&bank, &program_id).unwrap(); + + let check_program_account_data = bincode::serialize(&proper_program_account_state).unwrap(); + let check_program_account_data_len = check_program_account_data.len(); + let check_program_lamports = + bank.get_minimum_balance_for_rent_exemption(check_program_account_data_len); + let check_program_account = AccountSharedData::from(Account { + data: check_program_account_data, + executable: true, + lamports: check_program_lamports, + owner: BPF_LOADER_UPGRADEABLE_ID, + ..Account::default() + }); + + let mut check_program_data_account_data = + bincode::serialize(&proper_program_data_account_state).unwrap(); + check_program_data_account_data.extend_from_slice(&[4u8; 200]); + let check_program_data_account_data_len = check_program_data_account_data.len(); + let check_program_data_lamports = + bank.get_minimum_balance_for_rent_exemption(check_program_data_account_data_len); + let check_program_data_account = AccountSharedData::from(Account { + data: check_program_data_account_data, + executable: false, + lamports: check_program_data_lamports, + owner: BPF_LOADER_UPGRADEABLE_ID, + ..Account::default() + }); + + assert_eq!(source_upgradeable_bpf.program_address, program_id); + assert_eq!( + source_upgradeable_bpf.program_account, + check_program_account + ); + assert_eq!( + source_upgradeable_bpf.program_data_address, + program_data_address + ); + assert_eq!( + source_upgradeable_bpf.program_data_account, + check_program_data_account + ); + } + + #[test] + fn test_source_upgradeable_bpf_bad_program_account() { + let bank = create_simple_test_bank(0); + + let program_id = Pubkey::new_unique(); + let program_data_address = get_program_data_address(&program_id); + + // Store the proper program data account + store_account( + &bank, + &program_data_address, + &UpgradeableLoaderState::ProgramData { + slot: 0, + upgrade_authority_address: Some(Pubkey::new_unique()), + }, + Some(&[4u8; 200]), + false, + &BPF_LOADER_UPGRADEABLE_ID, + ); + + // Fail if the program account is not owned by the upgradeable loader + store_account( + &bank, + &program_id, + &UpgradeableLoaderState::Program { + programdata_address: program_data_address, + }, + None, + true, + &Pubkey::new_unique(), // Not the upgradeable loader + ); + assert_matches!( + SourceUpgradeableBpf::new_checked(&bank, &program_id).unwrap_err(), + CoreBpfMigrationError::IncorrectOwner(..) + ); + + // Fail if the program account's state is not `UpgradeableLoaderState::Program` + store_account( + &bank, + &program_id, + &vec![0u8; 200], + None, + true, + &BPF_LOADER_UPGRADEABLE_ID, + ); + assert_matches!( + SourceUpgradeableBpf::new_checked(&bank, &program_id).unwrap_err(), + CoreBpfMigrationError::BincodeError(..) + ); + + // Fail if the program account's state is `UpgradeableLoaderState::Program`, + // but it points to the wrong data account + store_account( + &bank, + &program_id, + &UpgradeableLoaderState::Program { + programdata_address: Pubkey::new_unique(), // Not the correct data account + }, + None, + true, + &BPF_LOADER_UPGRADEABLE_ID, + ); + assert_matches!( + SourceUpgradeableBpf::new_checked(&bank, &program_id).unwrap_err(), + CoreBpfMigrationError::InvalidProgramAccount(..) + ); + } + + #[test] + fn test_source_upgradeable_bpf_bad_program_data_account() { + let bank = create_simple_test_bank(0); + + let program_id = Pubkey::new_unique(); + let program_data_address = get_program_data_address(&program_id); + + // Store the proper program account + store_account( + &bank, + &program_id, + &UpgradeableLoaderState::Program { + programdata_address: program_data_address, + }, + None, + true, + &BPF_LOADER_UPGRADEABLE_ID, + ); + + // Fail if the program data account is not owned by the upgradeable loader + store_account( + &bank, + &program_data_address, + &UpgradeableLoaderState::ProgramData { + slot: 0, + upgrade_authority_address: Some(Pubkey::new_unique()), + }, + Some(&[4u8; 200]), + false, + &Pubkey::new_unique(), // Not the upgradeable loader + ); + assert_matches!( + SourceUpgradeableBpf::new_checked(&bank, &program_id).unwrap_err(), + CoreBpfMigrationError::IncorrectOwner(..) + ); + + // Fail if the program data account does not have the correct state + store_account( + &bank, + &program_data_address, + &vec![4u8; 200], // Not the correct state + None, + false, + &BPF_LOADER_UPGRADEABLE_ID, + ); + assert_matches!( + SourceUpgradeableBpf::new_checked(&bank, &program_id).unwrap_err(), + CoreBpfMigrationError::BincodeError(..) + ); + } +} diff --git a/runtime/src/bank/builtins/core_bpf_migration/target_builtin.rs b/runtime/src/bank/builtins/core_bpf_migration/target_builtin.rs index e370fa825ff5fd..fdd8c3279fd54f 100644 --- a/runtime/src/bank/builtins/core_bpf_migration/target_builtin.rs +++ b/runtime/src/bank/builtins/core_bpf_migration/target_builtin.rs @@ -11,14 +11,13 @@ use { /// The account details of a built-in program to be migrated to Core BPF. #[derive(Debug)] -pub(crate) struct TargetProgramBuiltin { +pub(crate) struct TargetBuiltin { pub program_address: Pubkey, pub program_account: AccountSharedData, pub program_data_address: Pubkey, - pub total_data_size: usize, } -impl TargetProgramBuiltin { +impl TargetBuiltin { /// Collects the details of a built-in program and verifies it is properly /// configured pub(crate) fn new_checked( @@ -62,14 +61,10 @@ impl TargetProgramBuiltin { )); } - // The total data size is the size of the program account's data. - let total_data_size = program_account.data().len(); - Ok(Self { program_address: *program_address, program_account, program_data_address, - total_data_size, }) } } @@ -79,6 +74,7 @@ mod tests { use { super::*, crate::bank::{tests::create_simple_test_bank, ApplyFeatureActivationsCaller}, + assert_matches::assert_matches, solana_sdk::{ account::Account, bpf_loader_upgradeable::{UpgradeableLoaderState, ID as BPF_LOADER_UPGRADEABLE_ID}, @@ -145,11 +141,10 @@ mod tests { // Success let target_builtin = - TargetProgramBuiltin::new_checked(&bank, &program_address, &migration_target).unwrap(); + TargetBuiltin::new_checked(&bank, &program_address, &migration_target).unwrap(); assert_eq!(target_builtin.program_address, program_address); assert_eq!(target_builtin.program_account, program_account); assert_eq!(target_builtin.program_data_address, program_data_address); - assert_eq!(target_builtin.total_data_size, program_account.data().len()); // Fail if the program account is not owned by the native loader store_account( @@ -159,10 +154,9 @@ mod tests { true, &Pubkey::new_unique(), // Not the native loader ); - assert_eq!( - TargetProgramBuiltin::new_checked(&bank, &program_address, &migration_target) - .unwrap_err(), - CoreBpfMigrationError::IncorrectOwner(program_address) + assert_matches!( + TargetBuiltin::new_checked(&bank, &program_address, &migration_target).unwrap_err(), + CoreBpfMigrationError::IncorrectOwner(..) ); // Fail if the program data account exists @@ -183,10 +177,9 @@ mod tests { false, &BPF_LOADER_UPGRADEABLE_ID, ); - assert_eq!( - TargetProgramBuiltin::new_checked(&bank, &program_address, &migration_target) - .unwrap_err(), - CoreBpfMigrationError::ProgramHasDataAccount(program_address) + assert_matches!( + TargetBuiltin::new_checked(&bank, &program_address, &migration_target).unwrap_err(), + CoreBpfMigrationError::ProgramHasDataAccount(..) ); // Fail if the program account does not exist @@ -194,10 +187,9 @@ mod tests { &program_address, &AccountSharedData::default(), ); - assert_eq!( - TargetProgramBuiltin::new_checked(&bank, &program_address, &migration_target) - .unwrap_err(), - CoreBpfMigrationError::AccountNotFound(program_address) + assert_matches!( + TargetBuiltin::new_checked(&bank, &program_address, &migration_target).unwrap_err(), + CoreBpfMigrationError::AccountNotFound(..) ); } @@ -212,11 +204,10 @@ mod tests { // Success let target_builtin = - TargetProgramBuiltin::new_checked(&bank, &program_address, &migration_target).unwrap(); + TargetBuiltin::new_checked(&bank, &program_address, &migration_target).unwrap(); assert_eq!(target_builtin.program_address, program_address); assert_eq!(target_builtin.program_account, program_account); assert_eq!(target_builtin.program_data_address, program_data_address); - assert_eq!(target_builtin.total_data_size, program_account.data().len()); // Fail if the program data account exists store_account( @@ -229,10 +220,9 @@ mod tests { false, &BPF_LOADER_UPGRADEABLE_ID, ); - assert_eq!( - TargetProgramBuiltin::new_checked(&bank, &program_address, &migration_target) - .unwrap_err(), - CoreBpfMigrationError::ProgramHasDataAccount(program_address) + assert_matches!( + TargetBuiltin::new_checked(&bank, &program_address, &migration_target).unwrap_err(), + CoreBpfMigrationError::ProgramHasDataAccount(..) ); // Fail if the program account exists @@ -243,10 +233,9 @@ mod tests { true, &NATIVE_LOADER_ID, ); - assert_eq!( - TargetProgramBuiltin::new_checked(&bank, &program_address, &migration_target) - .unwrap_err(), - CoreBpfMigrationError::AccountExists(program_address) + assert_matches!( + TargetBuiltin::new_checked(&bank, &program_address, &migration_target).unwrap_err(), + CoreBpfMigrationError::AccountExists(..) ); } } From 8e37ad7665359b7b61b5e339c04ed8420af4ab01 Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 26 Mar 2024 09:30:06 -0400 Subject: [PATCH 071/153] Removes unused dependencies from accounts-db crate (#416) --- Cargo.lock | 11 ----------- accounts-db/Cargo.toml | 23 ++++++----------------- programs/sbf/Cargo.lock | 16 ---------------- 3 files changed, 6 insertions(+), 44 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b0390f9a2d926c..a8836adf757915 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5382,21 +5382,16 @@ dependencies = [ name = "solana-accounts-db" version = "2.0.0" dependencies = [ - "arrayref", "assert_matches", "bincode", "blake3", "bv", "bytemuck", - "byteorder", "bzip2", "criterion", "crossbeam-channel", "dashmap", "ed25519-dalek", - "flate2", - "fnv", - "im", "index_list", "indexmap 2.2.5", "itertools", @@ -5407,16 +5402,12 @@ dependencies = [ "memmap2", "memoffset 0.9.0", "modular-bitfield", - "num-derive", - "num-traits", "num_cpus", "num_enum", - "percentage", "qualifier_attr", "rand 0.8.5", "rand_chacha 0.3.1", "rayon", - "regex", "rustc_version 0.4.0", "seqlock", "serde", @@ -5424,7 +5415,6 @@ dependencies = [ "smallvec", "solana-accounts-db", "solana-bucket-map", - "solana-config-program", "solana-frozen-abi", "solana-frozen-abi-macro", "solana-logger", @@ -5436,7 +5426,6 @@ dependencies = [ "solana-sdk", "solana-stake-program", "solana-svm", - "solana-system-program", "solana-vote-program", "static_assertions", "strum", diff --git a/accounts-db/Cargo.toml b/accounts-db/Cargo.toml index 0fc5a381fbda5e..d43acdf9e819f3 100644 --- a/accounts-db/Cargo.toml +++ b/accounts-db/Cargo.toml @@ -10,18 +10,13 @@ license = { workspace = true } edition = { workspace = true } [dependencies] -arrayref = { workspace = true } bincode = { workspace = true } blake3 = { workspace = true } bv = { workspace = true, features = ["serde"] } bytemuck = { workspace = true } -byteorder = { workspace = true } bzip2 = { workspace = true } crossbeam-channel = { workspace = true } dashmap = { workspace = true, features = ["rayon", "raw-api"] } -flate2 = { workspace = true } -fnv = { workspace = true } -im = { workspace = true, features = ["rayon", "serde"] } index_list = { workspace = true } indexmap = { workspace = true } itertools = { workspace = true } @@ -30,21 +25,16 @@ log = { workspace = true } lz4 = { workspace = true } memmap2 = { workspace = true } modular-bitfield = { workspace = true } -num-derive = { workspace = true } -num-traits = { workspace = true } num_cpus = { workspace = true } num_enum = { workspace = true } -percentage = { workspace = true } -qualifier_attr = { workspace = true } +qualifier_attr = { workspace = true, optional = true } rand = { workspace = true } rayon = { workspace = true } -regex = { workspace = true } seqlock = { workspace = true } serde = { workspace = true, features = ["rc"] } serde_derive = { workspace = true } smallvec = { workspace = true, features = ["const_generics"] } solana-bucket-map = { workspace = true } -solana-config-program = { workspace = true } solana-frozen-abi = { workspace = true } solana-frozen-abi-macro = { workspace = true } solana-measure = { workspace = true } @@ -53,13 +43,10 @@ solana-nohash-hasher = { workspace = true } solana-program-runtime = { workspace = true } solana-rayon-threadlimit = { workspace = true } solana-sdk = { workspace = true } -solana-stake-program = { workspace = true } +solana-stake-program = { workspace = true, optional = true } solana-svm = { workspace = true } -solana-system-program = { workspace = true } -solana-vote-program = { workspace = true } +solana-vote-program = { workspace = true, optional = true } static_assertions = { workspace = true } -strum = { workspace = true, features = ["derive"] } -strum_macros = { workspace = true } tar = { workspace = true } tempfile = { workspace = true } thiserror = { workspace = true } @@ -80,6 +67,8 @@ solana-accounts-db = { path = ".", features = ["dev-context-only-utils"] } solana-logger = { workspace = true } solana-sdk = { workspace = true, features = ["dev-context-only-utils"] } static_assertions = { workspace = true } +strum = { workspace = true, features = ["derive"] } +strum_macros = { workspace = true } test-case = { workspace = true } [package.metadata.docs.rs] @@ -89,7 +78,7 @@ targets = ["x86_64-unknown-linux-gnu"] rustc_version = { workspace = true } [features] -dev-context-only-utils = [] +dev-context-only-utils = ["dep:qualifier_attr", "dep:solana-stake-program", "dep:solana-vote-program"] [[bench]] name = "bench_hashing" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index c211c81696541a..66f0c5ee36a431 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4630,18 +4630,13 @@ dependencies = [ name = "solana-accounts-db" version = "2.0.0" dependencies = [ - "arrayref", "bincode", "blake3", "bv", "bytemuck", - "byteorder 1.5.0", "bzip2", "crossbeam-channel", "dashmap", - "flate2", - "fnv", - "im", "index_list", "indexmap 2.2.5", "itertools", @@ -4650,22 +4645,16 @@ dependencies = [ "lz4", "memmap2", "modular-bitfield", - "num-derive 0.4.2", - "num-traits", "num_cpus", "num_enum", - "percentage", - "qualifier_attr", "rand 0.8.5", "rayon", - "regex", "rustc_version", "seqlock", "serde", "serde_derive", "smallvec", "solana-bucket-map", - "solana-config-program", "solana-frozen-abi", "solana-frozen-abi-macro", "solana-measure", @@ -4674,13 +4663,8 @@ dependencies = [ "solana-program-runtime", "solana-rayon-threadlimit", "solana-sdk", - "solana-stake-program", "solana-svm", - "solana-system-program", - "solana-vote-program", "static_assertions", - "strum", - "strum_macros", "tar", "tempfile", "thiserror", From b2b159ad9204bf7744b78b8aa062636d1646f4af Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 26 Mar 2024 10:44:23 -0400 Subject: [PATCH 072/153] Adds bench for writing accounts to append vecs and hot storage (#421) --- accounts-db/Cargo.toml | 4 + accounts-db/benches/bench_accounts_file.rs | 93 ++++++++++++++++++++++ 2 files changed, 97 insertions(+) create mode 100644 accounts-db/benches/bench_accounts_file.rs diff --git a/accounts-db/Cargo.toml b/accounts-db/Cargo.toml index d43acdf9e819f3..ff38118806c3c9 100644 --- a/accounts-db/Cargo.toml +++ b/accounts-db/Cargo.toml @@ -80,6 +80,10 @@ rustc_version = { workspace = true } [features] dev-context-only-utils = ["dep:qualifier_attr", "dep:solana-stake-program", "dep:solana-vote-program"] +[[bench]] +name = "bench_accounts_file" +harness = false + [[bench]] name = "bench_hashing" harness = false diff --git a/accounts-db/benches/bench_accounts_file.rs b/accounts-db/benches/bench_accounts_file.rs new file mode 100644 index 00000000000000..808f1a630ee7df --- /dev/null +++ b/accounts-db/benches/bench_accounts_file.rs @@ -0,0 +1,93 @@ +#![allow(clippy::arithmetic_side_effects)] +use { + criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion, Throughput}, + solana_accounts_db::{ + account_storage::meta::StorableAccountsWithHashesAndWriteVersions, + accounts_hash::AccountHash, + append_vec::{self, AppendVec}, + tiered_storage::hot::HotStorageWriter, + }, + solana_sdk::{ + account::Account, clock::Slot, hash::Hash, pubkey::Pubkey, + rent_collector::RENT_EXEMPT_RENT_EPOCH, + }, +}; + +const ACCOUNTS_COUNTS: [usize; 4] = [ + 1, // the smallest count; will bench overhead + 100, // number of accounts written per slot on mnb (with *no* rent rewrites) + 1_000, // number of accounts written slot on mnb (with rent rewrites) + 10_000, // reasonable largest number of accounts written per slot +]; + +fn bench_write_accounts_file(c: &mut Criterion) { + let mut group = c.benchmark_group("write_accounts_file"); + + // most accounts on mnb are 165-200 bytes, so use that here too + let space = 200; + let lamports = 2_282_880; // the rent-exempt amount for 200 bytes of data + let temp_dir = tempfile::tempdir().unwrap(); + + for accounts_count in ACCOUNTS_COUNTS { + group.throughput(Throughput::Elements(accounts_count as u64)); + + let accounts: Vec<_> = std::iter::repeat_with(|| { + ( + Pubkey::new_unique(), + Account::new_rent_epoch( + lamports, + space, + &Pubkey::new_unique(), + RENT_EXEMPT_RENT_EPOCH, + ), + ) + }) + .take(accounts_count) + .collect(); + let accounts_refs: Vec<_> = accounts.iter().collect(); + let accounts_data = (Slot::MAX, accounts_refs.as_slice()); + let storable_accounts = + StorableAccountsWithHashesAndWriteVersions::new_with_hashes_and_write_versions( + &accounts_data, + vec![AccountHash(Hash::default()); accounts_count], + vec![0; accounts_count], + ); + + group.bench_function(BenchmarkId::new("append_vec", accounts_count), |b| { + b.iter_batched_ref( + || { + let path = temp_dir.path().join(format!("append_vec_{accounts_count}")); + let file_size = accounts.len() * (space + append_vec::STORE_META_OVERHEAD); + AppendVec::new(&path, true, file_size) + }, + |append_vec| { + let res = append_vec.append_accounts(&storable_accounts, 0).unwrap(); + let accounts_written_count = res.len(); + assert_eq!(accounts_written_count, accounts_count); + }, + BatchSize::SmallInput, + ); + }); + + group.bench_function(BenchmarkId::new("hot_storage", accounts_count), |b| { + b.iter_batched_ref( + || { + let path = temp_dir + .path() + .join(format!("hot_storage_{accounts_count}")); + _ = std::fs::remove_file(&path); + HotStorageWriter::new(path).unwrap() + }, + |hot_storage| { + let res = hot_storage.write_accounts(&storable_accounts, 0).unwrap(); + let accounts_written_count = res.len(); + assert_eq!(accounts_written_count, accounts_count); + }, + BatchSize::SmallInput, + ); + }); + } +} + +criterion_group!(benches, bench_write_accounts_file); +criterion_main!(benches); From bcf3d809c9d9f00e0d42507fa61cecc144d03aad Mon Sep 17 00:00:00 2001 From: Tyera Date: Tue, 26 Mar 2024 10:13:25 -0600 Subject: [PATCH 073/153] Simd 118: rekey partitioned epoch rewards feature (#427) Rekey partitioned epoch rewards feature --- sdk/src/feature_set.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index bbd68729fad10e..cd60ee536ea552 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -370,7 +370,7 @@ pub mod update_rewards_from_cached_accounts { solana_sdk::declare_id!("28s7i3htzhahXQKqmS2ExzbEoUypg9krwvtK2M9UWXh9"); } pub mod enable_partitioned_epoch_reward { - solana_sdk::declare_id!("41tVp5qR1XwWRt5WifvtSQyuxtqQWJgEK8w91AtBqSwP"); + solana_sdk::declare_id!("9bn2vTJUsUcnpiZWbu2woSKtTGW3ErZC9ERv88SDqQjK"); } pub mod spl_token_v3_4_0 { From 01314a4fc339e94f97468c0682c53ccc613255a6 Mon Sep 17 00:00:00 2001 From: samkim-crypto Date: Wed, 27 Mar 2024 02:51:54 +0900 Subject: [PATCH 074/153] [clap-v3-utils] Add `try_get_language` and deprecate `acquire_language` (#430) --- clap-v3-utils/src/keygen/mnemonic.rs | 39 +++++++++++++++++++++------- 1 file changed, 29 insertions(+), 10 deletions(-) diff --git a/clap-v3-utils/src/keygen/mnemonic.rs b/clap-v3-utils/src/keygen/mnemonic.rs index 2dc32d6ca514d7..f5eb032dd1cd64 100644 --- a/clap-v3-utils/src/keygen/mnemonic.rs +++ b/clap-v3-utils/src/keygen/mnemonic.rs @@ -51,19 +51,21 @@ pub fn try_get_word_count(matches: &ArgMatches) -> Result, Box() -> Arg<'a> { Arg::new(LANGUAGE_ARG.name) .long(LANGUAGE_ARG.long) - .value_parser(PossibleValuesParser::new([ - "english", - "chinese-simplified", - "chinese-traditional", - "japanese", - "spanish", - "korean", - "french", - "italian", - ])) + .value_parser(PossibleValuesParser::new(POSSIBLE_LANGUAGES)) .default_value("english") .value_name("LANGUAGE") .takes_value(true) @@ -77,6 +79,7 @@ pub fn no_passphrase_arg<'a>() -> Arg<'a> { .help(NO_PASSPHRASE_ARG.help) } +#[deprecated(since = "2.0.0", note = "Please use `try_get_language` instead")] pub fn acquire_language(matches: &ArgMatches) -> Language { match matches .get_one::(LANGUAGE_ARG.name) @@ -95,6 +98,22 @@ pub fn acquire_language(matches: &ArgMatches) -> Language { } } +pub fn try_get_language(matches: &ArgMatches) -> Result, Box> { + Ok(matches + .try_get_one::(LANGUAGE_ARG.name)? + .map(|language| match language.as_str() { + "english" => Language::English, + "chinese-simplified" => Language::ChineseSimplified, + "chinese-traditional" => Language::ChineseTraditional, + "japanese" => Language::Japanese, + "spanish" => Language::Spanish, + "korean" => Language::Korean, + "french" => Language::French, + "italian" => Language::Italian, + _ => unreachable!(), + })) +} + pub fn no_passphrase_and_message() -> (String, String) { (NO_PASSPHRASE.to_string(), "".to_string()) } From 21b682188511b208cae16155acc258fd1da9db0f Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 26 Mar 2024 14:44:17 -0400 Subject: [PATCH 075/153] Uses Into for path in AppendVec::new() (#433) --- accounts-db/benches/bench_accounts_file.rs | 2 +- accounts-db/src/accounts_db.rs | 2 +- accounts-db/src/accounts_file.rs | 2 +- accounts-db/src/append_vec.rs | 23 ++++++++++++---------- 4 files changed, 16 insertions(+), 13 deletions(-) diff --git a/accounts-db/benches/bench_accounts_file.rs b/accounts-db/benches/bench_accounts_file.rs index 808f1a630ee7df..3a05b0139f473e 100644 --- a/accounts-db/benches/bench_accounts_file.rs +++ b/accounts-db/benches/bench_accounts_file.rs @@ -58,7 +58,7 @@ fn bench_write_accounts_file(c: &mut Criterion) { || { let path = temp_dir.path().join(format!("append_vec_{accounts_count}")); let file_size = accounts.len() * (space + append_vec::STORE_META_OVERHEAD); - AppendVec::new(&path, true, file_size) + AppendVec::new(path, true, file_size) }, |append_vec| { let res = append_vec.append_accounts(&storable_accounts, 0).unwrap(); diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index d172fd4c48cb40..7f29edec19c949 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -1034,7 +1034,7 @@ impl AccountStorageEntry { pub fn new(path: &Path, slot: Slot, id: AccountsFileId, file_size: u64) -> Self { let tail = AccountsFile::file_name(slot, id); let path = Path::new(path).join(tail); - let accounts = AccountsFile::AppendVec(AppendVec::new(&path, true, file_size as usize)); + let accounts = AccountsFile::AppendVec(AppendVec::new(path, true, file_size as usize)); Self { id, diff --git a/accounts-db/src/accounts_file.rs b/accounts-db/src/accounts_file.rs index 6371be6083cc84..54bf8b76cafc55 100644 --- a/accounts-db/src/accounts_file.rs +++ b/accounts-db/src/accounts_file.rs @@ -67,7 +67,7 @@ impl AccountsFile { /// The second element of the returned tuple is the number of accounts in the /// accounts file. pub fn new_from_file(path: impl AsRef, current_len: usize) -> Result<(Self, usize)> { - let (av, num_accounts) = AppendVec::new_from_file(path, current_len)?; + let (av, num_accounts) = AppendVec::new_from_file(path.as_ref(), current_len)?; Ok((Self::AppendVec(av), num_accounts)) } diff --git a/accounts-db/src/append_vec.rs b/accounts-db/src/append_vec.rs index bf91ca0d111523..b2499cb2cb0352 100644 --- a/accounts-db/src/append_vec.rs +++ b/accounts-db/src/append_vec.rs @@ -29,7 +29,7 @@ use { fs::{remove_file, OpenOptions}, io::{Seek, SeekFrom, Write}, mem, - path::{Path, PathBuf}, + path::PathBuf, sync::{ atomic::{AtomicU64, AtomicUsize, Ordering}, Mutex, @@ -237,19 +237,20 @@ impl Drop for AppendVec { } impl AppendVec { - pub fn new(file: &Path, create: bool, size: usize) -> Self { + pub fn new(file: impl Into, create: bool, size: usize) -> Self { + let file = file.into(); let initial_len = 0; AppendVec::sanitize_len_and_size(initial_len, size).unwrap(); if create { - let _ignored = remove_file(file); + let _ignored = remove_file(&file); } let mut data = OpenOptions::new() .read(true) .write(true) .create(create) - .open(file) + .open(&file) .map_err(|e| { panic!( "Unable to {} data file {} in current dir({:?}): {:?}", @@ -282,7 +283,7 @@ impl AppendVec { APPEND_VEC_MMAPPED_FILES_OPEN.fetch_add(1, Ordering::Relaxed); AppendVec { - path: file.to_path_buf(), + path: file, map, // This mutex forces append to be single threaded, but concurrent with reads // See UNSAFE usage in `append_ptr` @@ -347,15 +348,16 @@ impl AppendVec { format!("{slot}.{id}") } - pub fn new_from_file>(path: P, current_len: usize) -> Result<(Self, usize)> { - let new = Self::new_from_file_unchecked(&path, current_len)?; + pub fn new_from_file(path: impl Into, current_len: usize) -> Result<(Self, usize)> { + let path = path.into(); + let new = Self::new_from_file_unchecked(path, current_len)?; let (sanitized, num_accounts) = new.sanitize_layout_and_length(); if !sanitized { // This info show the failing accountvec file path. It helps debugging // the appendvec data corrupution issues related to recycling. return Err(AccountsFileError::AppendVecError( - AppendVecError::IncorrectLayout(path.as_ref().to_path_buf()), + AppendVecError::IncorrectLayout(new.path.clone()), )); } @@ -363,7 +365,8 @@ impl AppendVec { } /// Creates an appendvec from file without performing sanitize checks or counting the number of accounts - pub fn new_from_file_unchecked>(path: P, current_len: usize) -> Result { + pub fn new_from_file_unchecked(path: impl Into, current_len: usize) -> Result { + let path = path.into(); let file_size = std::fs::metadata(&path)?.len(); Self::sanitize_len_and_size(current_len, file_size as usize)?; @@ -384,7 +387,7 @@ impl AppendVec { APPEND_VEC_MMAPPED_FILES_OPEN.fetch_add(1, Ordering::Relaxed); Ok(AppendVec { - path: path.as_ref().to_path_buf(), + path, map, append_lock: Mutex::new(()), current_len: AtomicUsize::new(current_len), From 24c55f39ee75c4a64568a31088ad33129b15a70f Mon Sep 17 00:00:00 2001 From: Kevin Heavey Date: Wed, 27 Mar 2024 00:08:56 +0400 Subject: [PATCH 076/153] Replace Vec::new() with Vec::with_capacity(2) (#413) --- svm/src/account_loader.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/svm/src/account_loader.rs b/svm/src/account_loader.rs index 374fc756de31da..87c18b9717c9bc 100644 --- a/svm/src/account_loader.rs +++ b/svm/src/account_loader.rs @@ -317,7 +317,7 @@ fn load_transaction_accounts( .instructions() .iter() .map(|instruction| { - let mut account_indices = Vec::new(); + let mut account_indices = Vec::with_capacity(2); let mut program_index = instruction.program_id_index as usize; // This command may never return error, because the transaction is sanitized let (program_id, program_account) = accounts From 21fbde741ad9698f22970f963e362414c262d163 Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 26 Mar 2024 16:19:06 -0400 Subject: [PATCH 077/153] Uses Into for path in AccountsFile::new_from_file() (#434) --- accounts-db/src/accounts_file.rs | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/accounts-db/src/accounts_file.rs b/accounts-db/src/accounts_file.rs index 54bf8b76cafc55..e962c87331738b 100644 --- a/accounts-db/src/accounts_file.rs +++ b/accounts-db/src/accounts_file.rs @@ -12,11 +12,7 @@ use { }, }, solana_sdk::{account::ReadableAccount, clock::Slot, pubkey::Pubkey}, - std::{ - borrow::Borrow, - mem, - path::{Path, PathBuf}, - }, + std::{borrow::Borrow, mem, path::PathBuf}, thiserror::Error, }; @@ -66,8 +62,8 @@ impl AccountsFile { /// /// The second element of the returned tuple is the number of accounts in the /// accounts file. - pub fn new_from_file(path: impl AsRef, current_len: usize) -> Result<(Self, usize)> { - let (av, num_accounts) = AppendVec::new_from_file(path.as_ref(), current_len)?; + pub fn new_from_file(path: impl Into, current_len: usize) -> Result<(Self, usize)> { + let (av, num_accounts) = AppendVec::new_from_file(path, current_len)?; Ok((Self::AppendVec(av), num_accounts)) } From 7c8a287ff29e81f17eca0ef34d7347bd6a5adb54 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Tue, 26 Mar 2024 13:52:45 -0700 Subject: [PATCH 078/153] Bugfix: Report scheduler slot timing metrics under correct name (#376) --- .../banking_stage/transaction_scheduler/scheduler_metrics.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/banking_stage/transaction_scheduler/scheduler_metrics.rs b/core/src/banking_stage/transaction_scheduler/scheduler_metrics.rs index 33999f0ef20a18..9ad7195c3d3b52 100644 --- a/core/src/banking_stage/transaction_scheduler/scheduler_metrics.rs +++ b/core/src/banking_stage/transaction_scheduler/scheduler_metrics.rs @@ -290,7 +290,7 @@ impl SlotSchedulerTimingMetrics { // Only report if there was an assigned slot. if self.slot.is_some() { self.metrics - .report("banking_stage_scheduler_slot_counts", self.slot); + .report("banking_stage_scheduler_slot_timing", self.slot); } self.metrics.reset(); self.slot = slot; From 80d3200f4ae54ba7c5b1fbf68608aaa6f0796d6d Mon Sep 17 00:00:00 2001 From: Kirill Fomichev Date: Tue, 26 Mar 2024 19:19:26 -0400 Subject: [PATCH 079/153] prioritization fee cache: remove not required locks (#272) * prioritization fee cache: remove not required locks * update HashMap to BTreeMap * fix clippy * add type alias * apply name change * check that cache is empty * remove sleep * remove outdated commented tests * fmt * extra warn --- runtime/src/prioritization_fee.rs | 12 +- runtime/src/prioritization_fee_cache.rs | 388 +++++++++++------------- 2 files changed, 185 insertions(+), 215 deletions(-) diff --git a/runtime/src/prioritization_fee.rs b/runtime/src/prioritization_fee.rs index 90cc66b981ce3a..2f7618390b94ff 100644 --- a/runtime/src/prioritization_fee.rs +++ b/runtime/src/prioritization_fee.rs @@ -168,7 +168,7 @@ impl PrioritizationFee { pub fn update( &mut self, transaction_fee: u64, - writable_accounts: &[Pubkey], + writable_accounts: Vec, ) -> Result<(), PrioritizationFeeError> { let (_, update_time) = measure!( { @@ -177,9 +177,9 @@ impl PrioritizationFee { self.min_transaction_fee = transaction_fee; } - for write_account in writable_accounts.iter() { + for write_account in writable_accounts { self.min_writable_account_fees - .entry(*write_account) + .entry(write_account) .and_modify(|write_lock_fee| { *write_lock_fee = std::cmp::min(*write_lock_fee, transaction_fee) }) @@ -284,7 +284,7 @@ mod tests { // [5, a, b ] --> [5, 5, 5, nil ] { assert!(prioritization_fee - .update(5, &[write_account_a, write_account_b]) + .update(5, vec![write_account_a, write_account_b]) .is_ok()); assert_eq!(5, prioritization_fee.get_min_transaction_fee().unwrap()); assert_eq!( @@ -310,7 +310,7 @@ mod tests { // [9, b, c ] --> [5, 5, 5, 9 ] { assert!(prioritization_fee - .update(9, &[write_account_b, write_account_c]) + .update(9, vec![write_account_b, write_account_c]) .is_ok()); assert_eq!(5, prioritization_fee.get_min_transaction_fee().unwrap()); assert_eq!( @@ -339,7 +339,7 @@ mod tests { // [2, a, c ] --> [2, 2, 5, 2 ] { assert!(prioritization_fee - .update(2, &[write_account_a, write_account_c]) + .update(2, vec![write_account_a, write_account_c]) .is_ok()); assert_eq!(2, prioritization_fee.get_min_transaction_fee().unwrap()); assert_eq!( diff --git a/runtime/src/prioritization_fee_cache.rs b/runtime/src/prioritization_fee_cache.rs index 0490f594451b9c..ff911eb9efa842 100644 --- a/runtime/src/prioritization_fee_cache.rs +++ b/runtime/src/prioritization_fee_cache.rs @@ -1,7 +1,6 @@ use { crate::{bank::Bank, compute_budget_details::GetComputeBudgetDetails, prioritization_fee::*}, crossbeam_channel::{unbounded, Receiver, Sender}, - dashmap::DashMap, log::*, lru::LruCache, solana_measure::measure, @@ -11,7 +10,7 @@ use { transaction::SanitizedTransaction, }, std::{ - collections::HashMap, + collections::{BTreeMap, HashMap}, sync::{ atomic::{AtomicU64, Ordering}, Arc, RwLock, @@ -25,6 +24,11 @@ use { /// land a transaction in the current block. const MAX_NUM_RECENT_BLOCKS: u64 = 150; +/// Thers is no guarantee that slots coming in order, we keep extra slots in the buffer. +const MAX_UNFINALIZED_SLOTS: u64 = 128; + +type UnfinalizedPrioritizationFees = BTreeMap>; + #[derive(Debug, Default)] struct PrioritizationFeeCacheMetrics { // Count of transactions that successfully updated each slot's prioritization fee cache. @@ -123,7 +127,7 @@ enum CacheServiceUpdate { slot: Slot, bank_id: BankId, transaction_fee: u64, - writable_accounts: Arc>, + writable_accounts: Vec, }, BankFinalized { slot: Slot, @@ -132,16 +136,12 @@ enum CacheServiceUpdate { Exit, } -/// Potentially there are more than one bank that updates Prioritization Fee -/// for a slot. The updates are tracked and finalized by bank_id. -type SlotPrioritizationFee = DashMap; - /// Stores up to MAX_NUM_RECENT_BLOCKS recent block's prioritization fee, /// A separate internal thread `service_thread` handles additional tasks when a bank is frozen, /// and collecting stats and reporting metrics. #[derive(Debug)] pub struct PrioritizationFeeCache { - cache: Arc>>>, + cache: Arc>>, service_thread: Option>, sender: Sender, metrics: Arc, @@ -189,22 +189,6 @@ impl PrioritizationFeeCache { } } - /// Get prioritization fee entry, create new entry if necessary - fn get_prioritization_fee( - cache: Arc>>>, - slot: &Slot, - ) -> Arc { - let mut cache = cache.write().unwrap(); - match cache.get(slot) { - Some(entry) => Arc::clone(entry), - None => { - let entry = Arc::new(SlotPrioritizationFee::default()); - cache.put(*slot, Arc::clone(&entry)); - entry - } - } - } - /// Update with a list of non-vote transactions' compute_budget_details and account_locks; Only /// transactions have both valid compute_budget_details and account_locks will be used to update /// fee_cache asynchronously. @@ -235,14 +219,12 @@ impl PrioritizationFeeCache { continue; } - let writable_accounts = Arc::new( - account_locks - .unwrap() - .writable - .iter() - .map(|key| **key) - .collect::>(), - ); + let writable_accounts = account_locks + .unwrap() + .writable + .iter() + .map(|key| **key) + .collect::>(); self.sender .send(CacheServiceUpdate::TransactionUpdate { @@ -282,48 +264,53 @@ impl PrioritizationFeeCache { /// Internal function is invoked by worker thread to update slot's minimum prioritization fee, /// Cache lock contends here. fn update_cache( - cache: Arc>>>, - slot: &Slot, - bank_id: &BankId, + unfinalized: &mut UnfinalizedPrioritizationFees, + slot: Slot, + bank_id: BankId, transaction_fee: u64, - writable_accounts: Arc>, - metrics: Arc, + writable_accounts: Vec, + metrics: &PrioritizationFeeCacheMetrics, ) { - let (slot_prioritization_fee, cache_lock_time) = - measure!(Self::get_prioritization_fee(cache, slot), "cache_lock_time"); - let (_, entry_update_time) = measure!( { - let mut block_prioritization_fee = slot_prioritization_fee - .entry(*bank_id) - .or_insert(PrioritizationFee::default()); - block_prioritization_fee.update(transaction_fee, &writable_accounts) + let _ = unfinalized + .entry(slot) + .or_default() + .entry(bank_id) + .or_default() + .update(transaction_fee, writable_accounts); }, "entry_update_time" ); - metrics.accumulate_total_cache_lock_elapsed_us(cache_lock_time.as_us()); metrics.accumulate_total_entry_update_elapsed_us(entry_update_time.as_us()); metrics.accumulate_successful_transaction_update_count(1); } fn finalize_slot( - cache: Arc>>>, - slot: &Slot, - bank_id: &BankId, - metrics: Arc, + unfinalized: &mut UnfinalizedPrioritizationFees, + cache: &RwLock>, + slot: Slot, + bank_id: BankId, + metrics: &PrioritizationFeeCacheMetrics, ) { - let (slot_prioritization_fee, cache_lock_time) = - measure!(Self::get_prioritization_fee(cache, slot), "cache_lock_time"); - // prune cache by evicting write account entry from prioritization fee if its fee is less // or equal to block's minimum transaction fee, because they are irrelevant in calculating // block minimum fee. - let (result, slot_finalize_time) = measure!( + let (slot_prioritization_fee, slot_finalize_time) = measure!( { + // remove unfinalized slots + *unfinalized = unfinalized + .split_off(&slot.checked_sub(MAX_UNFINALIZED_SLOTS).unwrap_or_default()); + + let Some(mut slot_prioritization_fee) = unfinalized.remove(&slot) else { + warn!("Finalized slot {slot} not found"); + return; + }; + // Only retain priority fee reported from optimistically confirmed bank let pre_purge_bank_count = slot_prioritization_fee.len() as u64; - slot_prioritization_fee.retain(|id, _| id == bank_id); - let post_purge_bank_count = slot_prioritization_fee.len() as u64; + let mut prioritization_fee = slot_prioritization_fee.remove(&bank_id); + let post_purge_bank_count = prioritization_fee.as_ref().map(|_| 1).unwrap_or(0); metrics.accumulate_total_purged_duplicated_bank_count( pre_purge_bank_count.saturating_sub(post_purge_bank_count), ); @@ -333,31 +320,43 @@ impl PrioritizationFeeCache { warn!("Finalized bank has empty prioritization fee cache. slot {slot} bank id {bank_id}"); } - let mut block_prioritization_fee = slot_prioritization_fee - .entry(*bank_id) - .or_insert(PrioritizationFee::default()); - let result = block_prioritization_fee.mark_block_completed(); - block_prioritization_fee.report_metrics(*slot); - result + if let Some(prioritization_fee) = &mut prioritization_fee { + if let Err(err) = prioritization_fee.mark_block_completed() { + error!( + "Unsuccessful finalizing slot {slot}, bank ID {bank_id}: {:?}", + err + ); + } + prioritization_fee.report_metrics(slot); + } + prioritization_fee }, "slot_finalize_time" ); - metrics.accumulate_total_cache_lock_elapsed_us(cache_lock_time.as_us()); metrics.accumulate_total_block_finalize_elapsed_us(slot_finalize_time.as_us()); - if let Err(err) = result { - error!( - "Unsuccessful finalizing slot {slot}, bank ID {bank_id}: {:?}", - err + // Create new cache entry + if let Some(slot_prioritization_fee) = slot_prioritization_fee { + let (_, cache_lock_time) = measure!( + { + let mut cache = cache.write().unwrap(); + cache.put(slot, slot_prioritization_fee); + }, + "cache_lock_time" ); + metrics.accumulate_total_cache_lock_elapsed_us(cache_lock_time.as_us()); } } fn service_loop( - cache: Arc>>>, + cache: Arc>>, receiver: Receiver, metrics: Arc, ) { + // Potentially there are more than one bank that updates Prioritization Fee + // for a slot. The updates are tracked and finalized by bank_id. + let mut unfinalized = UnfinalizedPrioritizationFees::new(); + for update in receiver.iter() { match update { CacheServiceUpdate::TransactionUpdate { @@ -366,16 +365,15 @@ impl PrioritizationFeeCache { transaction_fee, writable_accounts, } => Self::update_cache( - cache.clone(), - &slot, - &bank_id, + &mut unfinalized, + slot, + bank_id, transaction_fee, writable_accounts, - metrics.clone(), + &metrics, ), CacheServiceUpdate::BankFinalized { slot, bank_id } => { - Self::finalize_slot(cache.clone(), &slot, &bank_id, metrics.clone()); - + Self::finalize_slot(&mut unfinalized, &cache, slot, bank_id, &metrics); metrics.report(slot); } CacheServiceUpdate::Exit => { @@ -391,39 +389,29 @@ impl PrioritizationFeeCache { .read() .unwrap() .iter() - .filter(|(_slot, slot_prioritization_fee)| { - slot_prioritization_fee - .iter() - .any(|prioritization_fee| prioritization_fee.is_finalized()) - }) + .filter(|(_slot, slot_prioritization_fee)| slot_prioritization_fee.is_finalized()) .count() } - pub fn get_prioritization_fees(&self, account_keys: &[Pubkey]) -> HashMap { + pub fn get_prioritization_fees(&self, account_keys: &[Pubkey]) -> Vec<(Slot, u64)> { self.cache .read() .unwrap() .iter() - .filter_map(|(slot, slot_prioritization_fee)| { - slot_prioritization_fee - .iter() - .find_map(|prioritization_fee| { - prioritization_fee.is_finalized().then(|| { - let mut fee = prioritization_fee - .get_min_transaction_fee() - .unwrap_or_default(); - for account_key in account_keys { - if let Some(account_fee) = - prioritization_fee.get_writable_account_fee(account_key) - { - fee = std::cmp::max(fee, account_fee); - } - } - Some((*slot, fee)) - }) - }) + .filter(|(_slot, slot_prioritization_fee)| slot_prioritization_fee.is_finalized()) + .map(|(slot, slot_prioritization_fee)| { + let mut fee = slot_prioritization_fee + .get_min_transaction_fee() + .unwrap_or_default(); + for account_key in account_keys { + if let Some(account_fee) = + slot_prioritization_fee.get_writable_account_fee(account_key) + { + fee = std::cmp::max(fee, account_fee); + } + } + (*slot, fee) }) - .flatten() .collect() } } @@ -493,18 +481,20 @@ mod tests { slot: Slot, bank_id: BankId, ) { + // mark as finalized prioritization_fee_cache.finalize_priority_fee(slot, bank_id); - let fee = PrioritizationFeeCache::get_prioritization_fee( - prioritization_fee_cache.cache.clone(), - &slot, - ); // wait till finalization is done - while !fee - .get(&bank_id) - .map_or(false, |block_fee| block_fee.is_finalized()) - { - std::thread::sleep(std::time::Duration::from_millis(100)); + loop { + let mut cache = prioritization_fee_cache.cache.write().unwrap(); + if let Some(slot_cache) = cache.get(&slot) { + if slot_cache.is_finalized() { + return; + } + } + drop(cache); + + std::thread::sleep(std::time::Duration::from_millis(10)); } } @@ -536,31 +526,17 @@ mod tests { let prioritization_fee_cache = PrioritizationFeeCache::default(); sync_update(&prioritization_fee_cache, bank.clone(), txs.iter()); - // assert block minimum fee and account a, b, c fee accordingly + // assert empty cache { - let fee = PrioritizationFeeCache::get_prioritization_fee( - prioritization_fee_cache.cache.clone(), - &slot, - ); - let fee = fee.get(&bank.bank_id()).unwrap(); - assert_eq!(2, fee.get_min_transaction_fee().unwrap()); - assert_eq!(2, fee.get_writable_account_fee(&write_account_a).unwrap()); - assert_eq!(5, fee.get_writable_account_fee(&write_account_b).unwrap()); - assert_eq!(2, fee.get_writable_account_fee(&write_account_c).unwrap()); - // assert unknown account d fee - assert!(fee - .get_writable_account_fee(&Pubkey::new_unique()) - .is_none()); + let mut lock = prioritization_fee_cache.cache.write().unwrap(); + assert!(lock.get(&slot).is_none()); } // assert after prune, account a and c should be removed from cache to save space { sync_finalize_priority_fee_for_test(&prioritization_fee_cache, slot, bank.bank_id()); - let fee = PrioritizationFeeCache::get_prioritization_fee( - prioritization_fee_cache.cache.clone(), - &slot, - ); - let fee = fee.get(&bank.bank_id()).unwrap(); + let mut lock = prioritization_fee_cache.cache.write().unwrap(); + let fee = lock.get(&slot).unwrap(); assert_eq!(2, fee.get_min_transaction_fee().unwrap()); assert!(fee.get_writable_account_fee(&write_account_a).is_none()); assert_eq!(5, fee.get_writable_account_fee(&write_account_b).unwrap()); @@ -572,35 +548,51 @@ mod tests { fn test_available_block_count() { let prioritization_fee_cache = PrioritizationFeeCache::default(); - assert!(PrioritizationFeeCache::get_prioritization_fee( - prioritization_fee_cache.cache.clone(), - &1 - ) - .entry(1) - .or_default() - .mark_block_completed() - .is_ok()); - assert!(PrioritizationFeeCache::get_prioritization_fee( - prioritization_fee_cache.cache.clone(), - &2 - ) - .entry(2) - .or_default() - .mark_block_completed() - .is_ok()); - // add slot 3 entry to cache, but not finalize it - PrioritizationFeeCache::get_prioritization_fee(prioritization_fee_cache.cache.clone(), &3) - .entry(3) - .or_default(); + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); + let bank0 = Bank::new_for_benches(&genesis_config); + let bank_forks = BankForks::new_rw_arc(bank0); + let bank = bank_forks.read().unwrap().working_bank(); + let collector = solana_sdk::pubkey::new_rand(); + + let bank1 = Arc::new(Bank::new_from_parent(bank.clone(), &collector, 1)); + sync_update( + &prioritization_fee_cache, + bank1.clone(), + vec![build_sanitized_transaction_for_test( + 1, + &Pubkey::new_unique(), + &Pubkey::new_unique(), + )] + .iter(), + ); + sync_finalize_priority_fee_for_test(&prioritization_fee_cache, 1, bank1.bank_id()); + + // add slot 2 entry to cache, but not finalize it + let bank2 = Arc::new(Bank::new_from_parent(bank.clone(), &collector, 3)); + let txs = vec![build_sanitized_transaction_for_test( + 1, + &Pubkey::new_unique(), + &Pubkey::new_unique(), + )]; + sync_update(&prioritization_fee_cache, bank2.clone(), txs.iter()); + + let bank3 = Arc::new(Bank::new_from_parent(bank.clone(), &collector, 2)); + sync_update( + &prioritization_fee_cache, + bank3.clone(), + vec![build_sanitized_transaction_for_test( + 1, + &Pubkey::new_unique(), + &Pubkey::new_unique(), + )] + .iter(), + ); + sync_finalize_priority_fee_for_test(&prioritization_fee_cache, 2, bank3.bank_id()); // assert available block count should be 2 finalized blocks assert_eq!(2, prioritization_fee_cache.available_block_count()); } - fn hashmap_of(vec: Vec<(Slot, u64)>) -> HashMap { - vec.into_iter().collect() - } - #[test] fn test_get_prioritization_fees() { solana_logger::setup(); @@ -672,28 +664,28 @@ mod tests { // after block is completed sync_finalize_priority_fee_for_test(&prioritization_fee_cache, 1, bank1.bank_id()); assert_eq!( - hashmap_of(vec![(1, 1)]), + vec![(1, 1)], prioritization_fee_cache.get_prioritization_fees(&[]) ); assert_eq!( - hashmap_of(vec![(1, 2)]), + vec![(1, 2)], prioritization_fee_cache.get_prioritization_fees(&[write_account_a]) ); assert_eq!( - hashmap_of(vec![(1, 2)]), + vec![(1, 2)], prioritization_fee_cache.get_prioritization_fees(&[write_account_b]) ); assert_eq!( - hashmap_of(vec![(1, 1)]), + vec![(1, 1)], prioritization_fee_cache.get_prioritization_fees(&[write_account_c]) ); assert_eq!( - hashmap_of(vec![(1, 2)]), + vec![(1, 2)], prioritization_fee_cache .get_prioritization_fees(&[write_account_a, write_account_b]) ); assert_eq!( - hashmap_of(vec![(1, 2)]), + vec![(1, 2)], prioritization_fee_cache.get_prioritization_fees(&[ write_account_a, write_account_b, @@ -715,28 +707,28 @@ mod tests { sync_update(&prioritization_fee_cache, bank2.clone(), txs.iter()); // before block is marked as completed assert_eq!( - hashmap_of(vec![(1, 1)]), + vec![(1, 1)], prioritization_fee_cache.get_prioritization_fees(&[]) ); assert_eq!( - hashmap_of(vec![(1, 2)]), + vec![(1, 2)], prioritization_fee_cache.get_prioritization_fees(&[write_account_a]) ); assert_eq!( - hashmap_of(vec![(1, 2)]), + vec![(1, 2)], prioritization_fee_cache.get_prioritization_fees(&[write_account_b]) ); assert_eq!( - hashmap_of(vec![(1, 1)]), + vec![(1, 1)], prioritization_fee_cache.get_prioritization_fees(&[write_account_c]) ); assert_eq!( - hashmap_of(vec![(1, 2)]), + vec![(1, 2)], prioritization_fee_cache .get_prioritization_fees(&[write_account_a, write_account_b]) ); assert_eq!( - hashmap_of(vec![(1, 2)]), + vec![(1, 2)], prioritization_fee_cache.get_prioritization_fees(&[ write_account_a, write_account_b, @@ -746,28 +738,28 @@ mod tests { // after block is completed sync_finalize_priority_fee_for_test(&prioritization_fee_cache, 2, bank2.bank_id()); assert_eq!( - hashmap_of(vec![(2, 3), (1, 1)]), + vec![(2, 3), (1, 1)], prioritization_fee_cache.get_prioritization_fees(&[]), ); assert_eq!( - hashmap_of(vec![(2, 3), (1, 2)]), + vec![(2, 3), (1, 2)], prioritization_fee_cache.get_prioritization_fees(&[write_account_a]), ); assert_eq!( - hashmap_of(vec![(2, 4), (1, 2)]), + vec![(2, 4), (1, 2)], prioritization_fee_cache.get_prioritization_fees(&[write_account_b]), ); assert_eq!( - hashmap_of(vec![(2, 4), (1, 1)]), + vec![(2, 4), (1, 1)], prioritization_fee_cache.get_prioritization_fees(&[write_account_c]), ); assert_eq!( - hashmap_of(vec![(2, 4), (1, 2)]), + vec![(2, 4), (1, 2)], prioritization_fee_cache .get_prioritization_fees(&[write_account_a, write_account_b]), ); assert_eq!( - hashmap_of(vec![(2, 4), (1, 2)]), + vec![(2, 4), (1, 2)], prioritization_fee_cache.get_prioritization_fees(&[ write_account_a, write_account_b, @@ -789,28 +781,28 @@ mod tests { sync_update(&prioritization_fee_cache, bank3.clone(), txs.iter()); // before block is marked as completed assert_eq!( - hashmap_of(vec![(2, 3), (1, 1)]), + vec![(2, 3), (1, 1)], prioritization_fee_cache.get_prioritization_fees(&[]), ); assert_eq!( - hashmap_of(vec![(2, 3), (1, 2)]), + vec![(2, 3), (1, 2)], prioritization_fee_cache.get_prioritization_fees(&[write_account_a]), ); assert_eq!( - hashmap_of(vec![(2, 4), (1, 2)]), + vec![(2, 4), (1, 2)], prioritization_fee_cache.get_prioritization_fees(&[write_account_b]), ); assert_eq!( - hashmap_of(vec![(2, 4), (1, 1)]), + vec![(2, 4), (1, 1)], prioritization_fee_cache.get_prioritization_fees(&[write_account_c]), ); assert_eq!( - hashmap_of(vec![(2, 4), (1, 2)]), + vec![(2, 4), (1, 2)], prioritization_fee_cache .get_prioritization_fees(&[write_account_a, write_account_b]), ); assert_eq!( - hashmap_of(vec![(2, 4), (1, 2)]), + vec![(2, 4), (1, 2)], prioritization_fee_cache.get_prioritization_fees(&[ write_account_a, write_account_b, @@ -820,28 +812,28 @@ mod tests { // after block is completed sync_finalize_priority_fee_for_test(&prioritization_fee_cache, 3, bank3.bank_id()); assert_eq!( - hashmap_of(vec![(3, 5), (2, 3), (1, 1)]), + vec![(3, 5), (2, 3), (1, 1)], prioritization_fee_cache.get_prioritization_fees(&[]), ); assert_eq!( - hashmap_of(vec![(3, 6), (2, 3), (1, 2)]), + vec![(3, 6), (2, 3), (1, 2)], prioritization_fee_cache.get_prioritization_fees(&[write_account_a]), ); assert_eq!( - hashmap_of(vec![(3, 5), (2, 4), (1, 2)]), + vec![(3, 5), (2, 4), (1, 2)], prioritization_fee_cache.get_prioritization_fees(&[write_account_b]), ); assert_eq!( - hashmap_of(vec![(3, 6), (2, 4), (1, 1)]), + vec![(3, 6), (2, 4), (1, 1)], prioritization_fee_cache.get_prioritization_fees(&[write_account_c]), ); assert_eq!( - hashmap_of(vec![(3, 6), (2, 4), (1, 2)]), + vec![(3, 6), (2, 4), (1, 2)], prioritization_fee_cache .get_prioritization_fees(&[write_account_a, write_account_b]), ); assert_eq!( - hashmap_of(vec![(3, 6), (2, 4), (1, 2)]), + vec![(3, 6), (2, 4), (1, 2)], prioritization_fee_cache.get_prioritization_fees(&[ write_account_a, write_account_b, @@ -867,7 +859,7 @@ mod tests { let collector = solana_sdk::pubkey::new_rand(); let slot: Slot = 999; let bank1 = Arc::new(Bank::new_from_parent(bank.clone(), &collector, slot)); - let bank2 = Arc::new(Bank::new_from_parent(bank, &collector, slot)); + let bank2 = Arc::new(Bank::new_from_parent(bank, &collector, slot + 1)); let prioritization_fee_cache = PrioritizationFeeCache::default(); @@ -882,13 +874,6 @@ mod tests { ), ]; sync_update(&prioritization_fee_cache, bank1.clone(), txs.iter()); - - let slot_prioritization_fee = PrioritizationFeeCache::get_prioritization_fee( - prioritization_fee_cache.cache.clone(), - &slot, - ); - assert_eq!(1, slot_prioritization_fee.len()); - assert!(slot_prioritization_fee.contains_key(&bank1.bank_id())); } // Assert after add transactions for bank2 of slot 1 @@ -902,51 +887,36 @@ mod tests { ), ]; sync_update(&prioritization_fee_cache, bank2.clone(), txs.iter()); - - let slot_prioritization_fee = PrioritizationFeeCache::get_prioritization_fee( - prioritization_fee_cache.cache.clone(), - &slot, - ); - assert_eq!(2, slot_prioritization_fee.len()); - assert!(slot_prioritization_fee.contains_key(&bank1.bank_id())); - assert!(slot_prioritization_fee.contains_key(&bank2.bank_id())); } // Assert after finalize with bank1 of slot 1, { sync_finalize_priority_fee_for_test(&prioritization_fee_cache, slot, bank1.bank_id()); - let slot_prioritization_fee = PrioritizationFeeCache::get_prioritization_fee( - prioritization_fee_cache.cache.clone(), - &slot, - ); - assert_eq!(1, slot_prioritization_fee.len()); - assert!(slot_prioritization_fee.contains_key(&bank1.bank_id())); - // and data available for query are from bank1 assert_eq!( - hashmap_of(vec![(slot, 1)]), + vec![(slot, 1)], prioritization_fee_cache.get_prioritization_fees(&[]) ); assert_eq!( - hashmap_of(vec![(slot, 2)]), + vec![(slot, 2)], prioritization_fee_cache.get_prioritization_fees(&[write_account_a]) ); assert_eq!( - hashmap_of(vec![(slot, 2)]), + vec![(slot, 2)], prioritization_fee_cache.get_prioritization_fees(&[write_account_b]) ); assert_eq!( - hashmap_of(vec![(slot, 1)]), + vec![(slot, 1)], prioritization_fee_cache.get_prioritization_fees(&[write_account_c]) ); assert_eq!( - hashmap_of(vec![(slot, 2)]), + vec![(slot, 2)], prioritization_fee_cache .get_prioritization_fees(&[write_account_a, write_account_b]) ); assert_eq!( - hashmap_of(vec![(slot, 2)]), + vec![(slot, 2)], prioritization_fee_cache.get_prioritization_fees(&[ write_account_a, write_account_b, From 9cd90751f0105665328691b2cf72d2a8310b09ed Mon Sep 17 00:00:00 2001 From: Lucas Steuernagel <38472950+LucasSte@users.noreply.github.com> Date: Wed, 27 Mar 2024 10:00:04 -0300 Subject: [PATCH 080/153] Add `SanitizedTransaction` builder to SVM (#442) Add SanitizedTransaction builder --- svm/tests/integration_test.rs | 154 ++++++++++++------------------- svm/tests/transaction_builder.rs | 136 +++++++++++++++++++++++++++ 2 files changed, 195 insertions(+), 95 deletions(-) create mode 100644 svm/tests/transaction_builder.rs diff --git a/svm/tests/integration_test.rs b/svm/tests/integration_test.rs index 8ecc56bd3703d4..4104ed4012b1e5 100644 --- a/svm/tests/integration_test.rs +++ b/svm/tests/integration_test.rs @@ -1,7 +1,7 @@ #![cfg(test)] use { - crate::mock_bank::MockBankCallback, + crate::{mock_bank::MockBankCallback, transaction_builder::SanitizedTransactionBuilder}, solana_bpf_loader_program::syscalls::{ SyscallAbort, SyscallGetClockSysvar, SyscallInvokeSignedRust, SyscallLog, SyscallMemcpy, SyscallMemset, SyscallSetReturnData, @@ -26,13 +26,12 @@ use { epoch_schedule::EpochSchedule, fee::FeeStructure, hash::Hash, - instruction::CompiledInstruction, - message::{Message, MessageHeader}, + instruction::AccountMeta, native_loader, pubkey::Pubkey, signature::Signature, sysvar::SysvarId, - transaction::{SanitizedTransaction, Transaction, TransactionError}, + transaction::{SanitizedTransaction, TransactionError}, }, solana_svm::{ account_loader::TransactionCheckResult, @@ -44,6 +43,7 @@ use { }, std::{ cmp::Ordering, + collections::HashMap, env, fs::{self, File}, io::Read, @@ -54,6 +54,7 @@ use { // This module contains the implementation of TransactionProcessingCallback mod mock_bank; +mod transaction_builder; const BPF_LOADER_NAME: &str = "solana_bpf_loader_program"; const SYSTEM_PROGRAM_NAME: &str = "system_program"; @@ -228,33 +229,18 @@ fn load_program(name: String) -> Vec { fn prepare_transactions( mock_bank: &mut MockBankCallback, ) -> (Vec, Vec) { + let mut transaction_builder = SanitizedTransactionBuilder::default(); let mut all_transactions = Vec::new(); let mut transaction_checks = Vec::new(); // A transaction that works without any account let key1 = Pubkey::new_unique(); let fee_payer = Pubkey::new_unique(); - let message = Message { - account_keys: vec![fee_payer, key1], - header: MessageHeader { - num_required_signatures: 1, - num_readonly_signed_accounts: 0, - num_readonly_unsigned_accounts: 0, - }, - instructions: vec![CompiledInstruction { - program_id_index: 1, - accounts: vec![], - data: vec![], - }], - recent_blockhash: Hash::default(), - }; + transaction_builder.create_instruction(key1, Vec::new(), HashMap::new(), Vec::new()); - let transaction = Transaction { - signatures: vec![Signature::new_unique()], - message, - }; let sanitized_transaction = - SanitizedTransaction::try_from_legacy_transaction(transaction).unwrap(); + transaction_builder.build(Hash::default(), (fee_payer, Signature::new_unique())); + all_transactions.push(sanitized_transaction); transaction_checks.push((Ok(()), None, Some(20))); @@ -283,36 +269,32 @@ fn prepare_transactions( let recipient = Pubkey::new_unique(); let fee_payer = Pubkey::new_unique(); let system_account = Pubkey::from([0u8; 32]); - let message = Message { - account_keys: vec![ - fee_payer, - sender, - transfer_program_account, - recipient, - system_account, - ], - header: MessageHeader { - // The signers must appear in the `account_keys` vector in positions whose index is - // less than `num_required_signatures` - num_required_signatures: 2, - num_readonly_signed_accounts: 0, - num_readonly_unsigned_accounts: 0, - }, - instructions: vec![CompiledInstruction { - program_id_index: 2, - accounts: vec![1, 3, 4], - data: vec![0, 0, 0, 0, 0, 0, 0, 10], - }], - recent_blockhash: Hash::default(), - }; - let transaction = Transaction { - signatures: vec![Signature::new_unique(), Signature::new_unique()], - message, - }; + transaction_builder.create_instruction( + transfer_program_account, + vec![ + AccountMeta { + pubkey: sender, + is_signer: true, + is_writable: true, + }, + AccountMeta { + pubkey: recipient, + is_signer: false, + is_writable: true, + }, + AccountMeta { + pubkey: system_account, + is_signer: false, + is_writable: false, + }, + ], + HashMap::from([(sender, Signature::new_unique())]), + vec![0, 0, 0, 0, 0, 0, 0, 10], + ); let sanitized_transaction = - SanitizedTransaction::try_from_legacy_transaction(transaction).unwrap(); + transaction_builder.build(Hash::default(), (fee_payer, Signature::new_unique())); all_transactions.push(sanitized_transaction); transaction_checks.push((Ok(()), None, Some(20))); @@ -355,27 +337,11 @@ fn prepare_transactions( // A program that utilizes a Sysvar let program_account = Pubkey::new_unique(); let fee_payer = Pubkey::new_unique(); - let message = Message { - account_keys: vec![fee_payer, program_account], - header: MessageHeader { - num_required_signatures: 1, - num_readonly_signed_accounts: 0, - num_readonly_unsigned_accounts: 0, - }, - instructions: vec![CompiledInstruction { - program_id_index: 1, - accounts: vec![], - data: vec![], - }], - recent_blockhash: Hash::default(), - }; + transaction_builder.create_instruction(program_account, Vec::new(), HashMap::new(), Vec::new()); - let transaction = Transaction { - signatures: vec![Signature::new_unique()], - message, - }; let sanitized_transaction = - SanitizedTransaction::try_from_legacy_transaction(transaction).unwrap(); + transaction_builder.build(Hash::default(), (fee_payer, Signature::new_unique())); + all_transactions.push(sanitized_transaction); transaction_checks.push((Ok(()), None, Some(20))); @@ -407,33 +373,31 @@ fn prepare_transactions( while data.len() < 8 { data.insert(0, 0); } - - let message = Message { - account_keys: vec![ - fee_payer, - sender, - transfer_program_account, - recipient, - system_account, + transaction_builder.create_instruction( + transfer_program_account, + vec![ + AccountMeta { + pubkey: sender, + is_signer: true, + is_writable: true, + }, + AccountMeta { + pubkey: recipient, + is_signer: false, + is_writable: true, + }, + AccountMeta { + pubkey: system_account, + is_signer: false, + is_writable: false, + }, ], - header: MessageHeader { - num_required_signatures: 2, - num_readonly_signed_accounts: 0, - num_readonly_unsigned_accounts: 0, - }, - instructions: vec![CompiledInstruction { - program_id_index: 2, - accounts: vec![1, 3, 4], - data, - }], - recent_blockhash: Hash::default(), - }; - let transaction = Transaction { - signatures: vec![Signature::new_unique(), Signature::new_unique()], - message, - }; + HashMap::from([(sender, Signature::new_unique())]), + data, + ); + let sanitized_transaction = - SanitizedTransaction::try_from_legacy_transaction(transaction).unwrap(); + transaction_builder.build(Hash::default(), (fee_payer, Signature::new_unique())); all_transactions.push(sanitized_transaction.clone()); transaction_checks.push((Ok(()), None, Some(20))); @@ -531,7 +495,7 @@ fn svm_integration() { .is_ok()); // The SVM does not commit the account changes in MockBank - let recipient_key = transactions[1].message().account_keys()[3]; + let recipient_key = transactions[1].message().account_keys()[2]; let recipient_data = result.loaded_transactions[1] .0 .as_ref() diff --git a/svm/tests/transaction_builder.rs b/svm/tests/transaction_builder.rs new file mode 100644 index 00000000000000..ca8ef79a9b73c9 --- /dev/null +++ b/svm/tests/transaction_builder.rs @@ -0,0 +1,136 @@ +use { + solana_sdk::{ + hash::Hash, + instruction::{AccountMeta, CompiledInstruction}, + message::{Message, MessageHeader}, + pubkey::Pubkey, + signature::Signature, + transaction::{SanitizedTransaction, Transaction}, + }, + std::collections::HashMap, +}; + +#[derive(Default)] +pub struct SanitizedTransactionBuilder { + instructions: Vec, + num_required_signatures: u8, + num_readonly_signed_accounts: u8, + num_readonly_unsigned_accounts: u8, +} + +struct InnerInstruction { + program_id: Pubkey, + accounts: Vec, + signatures: HashMap, + data: Vec, +} + +impl SanitizedTransactionBuilder { + pub fn create_instruction( + &mut self, + program_id: Pubkey, + // The fee payer and the program id shall not appear in the accounts vector + accounts: Vec, + signatures: HashMap, + data: Vec, + ) { + self.num_required_signatures = self + .num_required_signatures + .saturating_add(signatures.len() as u8); + + let instruction = InnerInstruction { + program_id, + accounts: accounts + .iter() + .map(|meta| { + if !meta.is_writable { + if meta.is_signer { + self.num_readonly_signed_accounts = + self.num_readonly_signed_accounts.saturating_add(1); + } else { + self.num_readonly_unsigned_accounts = + self.num_readonly_unsigned_accounts.saturating_add(1); + } + } + meta.pubkey + }) + .collect(), + signatures, + data, + }; + self.instructions.push(instruction); + } + + pub fn build( + &mut self, + block_hash: Hash, + fee_payer: (Pubkey, Signature), + ) -> SanitizedTransaction { + let mut message = Message { + account_keys: vec![], + header: MessageHeader { + // The fee payer always requires a signature so +1 + num_required_signatures: self.num_required_signatures.saturating_add(1), + num_readonly_signed_accounts: self.num_readonly_signed_accounts, + num_readonly_unsigned_accounts: self.num_readonly_unsigned_accounts, + }, + instructions: vec![], + recent_blockhash: block_hash, + }; + + let mut signatures = Vec::new(); + let mut positions: HashMap = HashMap::new(); + + message.account_keys.push(fee_payer.0); + signatures.push(fee_payer.1); + + for item in &self.instructions { + for (key, value) in &item.signatures { + signatures.push(*value); + positions.insert(*key, message.account_keys.len()); + message.account_keys.push(*key); + } + } + + let mut instructions: Vec = Vec::new(); + + // Clean up + std::mem::swap(&mut instructions, &mut self.instructions); + self.num_required_signatures = 0; + self.num_readonly_signed_accounts = 0; + self.num_readonly_unsigned_accounts = 0; + + for item in instructions { + let accounts = item + .accounts + .iter() + .map(|key| { + if let Some(idx) = positions.get(key) { + *idx as u8 + } else { + push_and_return_index(*key, &mut message.account_keys) + } + }) + .collect::>(); + let instruction = CompiledInstruction { + program_id_index: push_and_return_index(item.program_id, &mut message.account_keys), + accounts, + data: item.data, + }; + + message.instructions.push(instruction); + } + + let transaction = Transaction { + signatures, + message, + }; + + SanitizedTransaction::try_from_legacy_transaction(transaction).unwrap() + } +} + +fn push_and_return_index(value: Pubkey, vector: &mut Vec) -> u8 { + vector.push(value); + vector.len().saturating_sub(1) as u8 +} From 4b9e1e0ab3a2c6904729fc287fbce446d2a4e72c Mon Sep 17 00:00:00 2001 From: Yueh-Hsuan Chiang <93241502+yhchiang-sol@users.noreply.github.com> Date: Wed, 27 Mar 2024 09:43:38 -0700 Subject: [PATCH 081/153] [TieredStorage] Exclude NotFound in reporting storage leakage on drop() (#446) #### Problem TieredStorage::drop() currently panic when it fails to delete the underlying file to raise awareness of possible storage resource leakage, including io::ErrorKind::NotFound. But sometimes the TieredStorage (or AccountsFile in general) instance is created then dropped without any file being created. This causes some false-alarms including unit-tests. #### Summary of Changes This PR excludes NotFound in reporting storage leakage on TieredStorage::drop(). --- accounts-db/src/tiered_storage.rs | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/accounts-db/src/tiered_storage.rs b/accounts-db/src/tiered_storage.rs index cbca5c93d0041e..7b8b26fce64fa9 100644 --- a/accounts-db/src/tiered_storage.rs +++ b/accounts-db/src/tiered_storage.rs @@ -27,7 +27,7 @@ use { solana_sdk::account::ReadableAccount, std::{ borrow::Borrow, - fs, + fs, io, path::{Path, PathBuf}, sync::{ atomic::{AtomicBool, Ordering}, @@ -65,10 +65,14 @@ pub struct TieredStorage { impl Drop for TieredStorage { fn drop(&mut self) { if let Err(err) = fs::remove_file(&self.path) { - panic!( - "TieredStorage failed to remove backing storage file '{}': {err}", - self.path.display(), - ); + // Here we bypass NotFound error as the focus of the panic is to + // detect any leakage of storage resource. + if err.kind() != io::ErrorKind::NotFound { + panic!( + "TieredStorage failed to remove backing storage file '{}': {err}", + self.path.display(), + ); + } } } } From 02918a5af1f90a05f2c72b338dd3c0dd967c0f6f Mon Sep 17 00:00:00 2001 From: Lucas Steuernagel <38472950+LucasSte@users.noreply.github.com> Date: Wed, 27 Mar 2024 13:49:43 -0300 Subject: [PATCH 082/153] Update sysvar API in SVM integration test (#451) --- svm/tests/integration_test.rs | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/svm/tests/integration_test.rs b/svm/tests/integration_test.rs index 4104ed4012b1e5..5095fc16488960 100644 --- a/svm/tests/integration_test.rs +++ b/svm/tests/integration_test.rs @@ -443,15 +443,7 @@ fn svm_integration() { ); // The sysvars must be put in the cache - batch_processor - .sysvar_cache - .write() - .unwrap() - .fill_missing_entries(|pubkey, callback| { - if let Some(account) = mock_bank.get_account_shared_data(pubkey) { - callback(account.data()); - } - }); + batch_processor.fill_missing_sysvar_cache_entries(&mock_bank); let mut error_counter = TransactionErrorMetrics::default(); let recording_config = ExecutionRecordingConfig { From cfd5b71b2896b625dae75189b7f43e60fcd53f07 Mon Sep 17 00:00:00 2001 From: Ashwin Sekar Date: Wed, 27 Mar 2024 10:06:43 -0700 Subject: [PATCH 083/153] shred: expose chained merkle root (#435) * shred: expose chained merkle root * pr feedback: macro, pub(super), _=> none --- ledger/src/shred.rs | 32 ++++++++++++++++++++++++++++++++ ledger/src/shred/merkle.rs | 24 +++++++++++++++++++++++- ledger/src/shred/shred_code.rs | 7 +++++++ ledger/src/shred/shred_data.rs | 7 +++++++ 4 files changed, 69 insertions(+), 1 deletion(-) diff --git a/ledger/src/shred.rs b/ledger/src/shred.rs index 24d5000b65311b..2b6f6f136784c2 100644 --- a/ledger/src/shred.rs +++ b/ledger/src/shred.rs @@ -310,6 +310,7 @@ impl ErasureSetId { macro_rules! dispatch { ($vis:vis fn $name:ident(&self $(, $arg:ident : $ty:ty)?) $(-> $out:ty)?) => { #[inline] + #[allow(dead_code)] $vis fn $name(&self $(, $arg:$ty)?) $(-> $out)? { match self { Self::ShredCode(shred) => shred.$name($($arg, )?), @@ -344,6 +345,7 @@ impl Shred { dispatch!(fn set_signature(&mut self, signature: Signature)); dispatch!(fn signed_data(&self) -> Result); + dispatch!(pub(crate) fn chained_merkle_root(&self) -> Result); // Returns the portion of the shred's payload which is erasure coded. dispatch!(pub(crate) fn erasure_shard(self) -> Result, Error>); // Like Shred::erasure_shard but returning a slice. @@ -726,6 +728,36 @@ pub mod layout { } } + #[allow(dead_code)] + pub(crate) fn get_chained_merkle_root(shred: &[u8]) -> Option { + let offset = match get_shred_variant(shred).ok()? { + ShredVariant::LegacyCode | ShredVariant::LegacyData => None, + ShredVariant::MerkleCode { + proof_size, + chained: true, + resigned, + } => merkle::ShredCode::get_chained_merkle_root_offset(proof_size, resigned).ok(), + ShredVariant::MerkleData { + proof_size, + chained: true, + resigned, + } => merkle::ShredData::get_chained_merkle_root_offset(proof_size, resigned).ok(), + ShredVariant::MerkleCode { + proof_size: _, + chained: false, + resigned: _, + } => None, + ShredVariant::MerkleData { + proof_size: _, + chained: false, + resigned: _, + } => None, + }?; + shred + .get(offset..offset + SIZE_OF_MERKLE_ROOT) + .map(Hash::new) + } + // Minimally corrupts the packet so that the signature no longer verifies. #[cfg(test)] pub(crate) fn corrupt_packet( diff --git a/ledger/src/shred/merkle.rs b/ledger/src/shred/merkle.rs index b785eeb6dc32cc..a7cc134824cf31 100644 --- a/ledger/src/shred/merkle.rs +++ b/ledger/src/shred/merkle.rs @@ -190,9 +190,24 @@ impl ShredData { else { return Err(Error::InvalidShredVariant); }; + Self::get_chained_merkle_root_offset(proof_size, resigned) + } + + pub(super) fn get_chained_merkle_root_offset( + proof_size: u8, + resigned: bool, + ) -> Result { Ok(Self::SIZE_OF_HEADERS + Self::capacity(proof_size, /*chained:*/ true, resigned)?) } + pub(super) fn chained_merkle_root(&self) -> Result { + let offset = self.chained_merkle_root_offset()?; + self.payload + .get(offset..offset + SIZE_OF_MERKLE_ROOT) + .map(Hash::new) + .ok_or(Error::InvalidPayloadSize(self.payload.len())) + } + fn set_chained_merkle_root(&mut self, chained_merkle_root: &Hash) -> Result<(), Error> { let offset = self.chained_merkle_root_offset()?; let Some(buffer) = self.payload.get_mut(offset..offset + SIZE_OF_MERKLE_ROOT) else { @@ -361,10 +376,17 @@ impl ShredCode { else { return Err(Error::InvalidShredVariant); }; + Self::get_chained_merkle_root_offset(proof_size, resigned) + } + + pub(super) fn get_chained_merkle_root_offset( + proof_size: u8, + resigned: bool, + ) -> Result { Ok(Self::SIZE_OF_HEADERS + Self::capacity(proof_size, /*chained:*/ true, resigned)?) } - fn chained_merkle_root(&self) -> Result { + pub(super) fn chained_merkle_root(&self) -> Result { let offset = self.chained_merkle_root_offset()?; self.payload .get(offset..offset + SIZE_OF_MERKLE_ROOT) diff --git a/ledger/src/shred/shred_code.rs b/ledger/src/shred/shred_code.rs index 0ad97a0f729a77..067d7edaf437eb 100644 --- a/ledger/src/shred/shred_code.rs +++ b/ledger/src/shred/shred_code.rs @@ -47,6 +47,13 @@ impl ShredCode { } } + pub(super) fn chained_merkle_root(&self) -> Result { + match self { + Self::Legacy(_) => Err(Error::InvalidShredType), + Self::Merkle(shred) => shred.chained_merkle_root(), + } + } + pub(super) fn merkle_root(&self) -> Result { match self { Self::Legacy(_) => Err(Error::InvalidShredType), diff --git a/ledger/src/shred/shred_data.rs b/ledger/src/shred/shred_data.rs index 15f407172cfc4b..ac409376370420 100644 --- a/ledger/src/shred/shred_data.rs +++ b/ledger/src/shred/shred_data.rs @@ -41,6 +41,13 @@ impl ShredData { } } + pub(super) fn chained_merkle_root(&self) -> Result { + match self { + Self::Legacy(_) => Err(Error::InvalidShredType), + Self::Merkle(shred) => shred.chained_merkle_root(), + } + } + pub(super) fn merkle_root(&self) -> Result { match self { Self::Legacy(_) => Err(Error::InvalidShredType), From ba9c25c41e9807b3ff523c302fd2f602e052692d Mon Sep 17 00:00:00 2001 From: Kirill Fomichev Date: Wed, 27 Mar 2024 13:09:17 -0400 Subject: [PATCH 084/153] prioritization fee cache: remove lru crate (#30) --- Cargo.lock | 1 - programs/sbf/Cargo.lock | 1 - runtime/Cargo.toml | 1 - runtime/src/prioritization_fee.rs | 19 +---- runtime/src/prioritization_fee_cache.rs | 99 +++++++++++++------------ 5 files changed, 56 insertions(+), 65 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a8836adf757915..7b94b288e600e6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6994,7 +6994,6 @@ dependencies = [ "libc", "libsecp256k1", "log", - "lru", "lz4", "memmap2", "memoffset 0.9.0", diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 66f0c5ee36a431..654430127fa7f4 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -5681,7 +5681,6 @@ dependencies = [ "lazy_static", "libc", "log", - "lru", "lz4", "memmap2", "mockall", diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index 49451aa02eed26..d4e554a5a8fbe6 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -30,7 +30,6 @@ itertools = { workspace = true } lazy_static = { workspace = true } libc = { workspace = true } log = { workspace = true } -lru = { workspace = true } lz4 = { workspace = true } memmap2 = { workspace = true } mockall = { workspace = true } diff --git a/runtime/src/prioritization_fee.rs b/runtime/src/prioritization_fee.rs index 2f7618390b94ff..45425059f98c15 100644 --- a/runtime/src/prioritization_fee.rs +++ b/runtime/src/prioritization_fee.rs @@ -165,11 +165,7 @@ impl Default for PrioritizationFee { impl PrioritizationFee { /// Update self for minimum transaction fee in the block and minimum fee for each writable account. - pub fn update( - &mut self, - transaction_fee: u64, - writable_accounts: Vec, - ) -> Result<(), PrioritizationFeeError> { + pub fn update(&mut self, transaction_fee: u64, writable_accounts: Vec) { let (_, update_time) = measure!( { if !self.is_finalized { @@ -199,7 +195,6 @@ impl PrioritizationFee { self.metrics .accumulate_total_update_elapsed_us(update_time.as_us()); - Ok(()) } /// Accounts that have minimum fees lesser or equal to the minimum fee in the block are redundant, they are @@ -283,9 +278,7 @@ mod tests { // ----------------------------------------------------------------------- // [5, a, b ] --> [5, 5, 5, nil ] { - assert!(prioritization_fee - .update(5, vec![write_account_a, write_account_b]) - .is_ok()); + prioritization_fee.update(5, vec![write_account_a, write_account_b]); assert_eq!(5, prioritization_fee.get_min_transaction_fee().unwrap()); assert_eq!( 5, @@ -309,9 +302,7 @@ mod tests { // ----------------------------------------------------------------------- // [9, b, c ] --> [5, 5, 5, 9 ] { - assert!(prioritization_fee - .update(9, vec![write_account_b, write_account_c]) - .is_ok()); + prioritization_fee.update(9, vec![write_account_b, write_account_c]); assert_eq!(5, prioritization_fee.get_min_transaction_fee().unwrap()); assert_eq!( 5, @@ -338,9 +329,7 @@ mod tests { // ----------------------------------------------------------------------- // [2, a, c ] --> [2, 2, 5, 2 ] { - assert!(prioritization_fee - .update(2, vec![write_account_a, write_account_c]) - .is_ok()); + prioritization_fee.update(2, vec![write_account_a, write_account_c]); assert_eq!(2, prioritization_fee.get_min_transaction_fee().unwrap()); assert_eq!( 2, diff --git a/runtime/src/prioritization_fee_cache.rs b/runtime/src/prioritization_fee_cache.rs index ff911eb9efa842..7414ae46ce3308 100644 --- a/runtime/src/prioritization_fee_cache.rs +++ b/runtime/src/prioritization_fee_cache.rs @@ -2,7 +2,6 @@ use { crate::{bank::Bank, compute_budget_details::GetComputeBudgetDetails, prioritization_fee::*}, crossbeam_channel::{unbounded, Receiver, Sender}, log::*, - lru::LruCache, solana_measure::measure, solana_sdk::{ clock::{BankId, Slot}, @@ -122,6 +121,7 @@ impl PrioritizationFeeCacheMetrics { } } +#[derive(Debug)] enum CacheServiceUpdate { TransactionUpdate { slot: Slot, @@ -141,7 +141,7 @@ enum CacheServiceUpdate { /// and collecting stats and reporting metrics. #[derive(Debug)] pub struct PrioritizationFeeCache { - cache: Arc>>, + cache: Arc>>, service_thread: Option>, sender: Sender, metrics: Arc, @@ -166,17 +166,17 @@ impl Drop for PrioritizationFeeCache { impl PrioritizationFeeCache { pub fn new(capacity: u64) -> Self { - let metrics = Arc::new(PrioritizationFeeCacheMetrics::default()); + let cache = Arc::new(RwLock::new(BTreeMap::new())); let (sender, receiver) = unbounded(); - let cache = Arc::new(RwLock::new(LruCache::new(capacity as usize))); + let metrics = Arc::new(PrioritizationFeeCacheMetrics::default()); - let cache_clone = cache.clone(); - let metrics_clone = metrics.clone(); let service_thread = Some( Builder::new() .name("solPrFeeCachSvc".to_string()) - .spawn(move || { - Self::service_loop(cache_clone, receiver, metrics_clone); + .spawn({ + let cache = cache.clone(); + let metrics = metrics.clone(); + move || Self::service_loop(cache, capacity as usize, receiver, metrics) }) .unwrap(), ); @@ -261,8 +261,7 @@ impl PrioritizationFeeCache { }); } - /// Internal function is invoked by worker thread to update slot's minimum prioritization fee, - /// Cache lock contends here. + /// Internal function is invoked by worker thread to update slot's minimum prioritization fee. fn update_cache( unfinalized: &mut UnfinalizedPrioritizationFees, slot: Slot, @@ -273,7 +272,7 @@ impl PrioritizationFeeCache { ) { let (_, entry_update_time) = measure!( { - let _ = unfinalized + unfinalized .entry(slot) .or_default() .entry(bank_id) @@ -288,7 +287,8 @@ impl PrioritizationFeeCache { fn finalize_slot( unfinalized: &mut UnfinalizedPrioritizationFees, - cache: &RwLock>, + cache: &RwLock>, + cache_max_size: usize, slot: Slot, bank_id: BankId, metrics: &PrioritizationFeeCacheMetrics, @@ -340,7 +340,10 @@ impl PrioritizationFeeCache { let (_, cache_lock_time) = measure!( { let mut cache = cache.write().unwrap(); - cache.put(slot, slot_prioritization_fee); + while cache.len() >= cache_max_size { + cache.pop_first(); + } + cache.insert(slot, slot_prioritization_fee); }, "cache_lock_time" ); @@ -349,7 +352,8 @@ impl PrioritizationFeeCache { } fn service_loop( - cache: Arc>>, + cache: Arc>>, + cache_max_size: usize, receiver: Receiver, metrics: Arc, ) { @@ -373,7 +377,14 @@ impl PrioritizationFeeCache { &metrics, ), CacheServiceUpdate::BankFinalized { slot, bank_id } => { - Self::finalize_slot(&mut unfinalized, &cache, slot, bank_id, &metrics); + Self::finalize_slot( + &mut unfinalized, + &cache, + cache_max_size, + slot, + bank_id, + &metrics, + ); metrics.report(slot); } CacheServiceUpdate::Exit => { @@ -385,12 +396,7 @@ impl PrioritizationFeeCache { /// Returns number of blocks that have finalized minimum fees collection pub fn available_block_count(&self) -> usize { - self.cache - .read() - .unwrap() - .iter() - .filter(|(_slot, slot_prioritization_fee)| slot_prioritization_fee.is_finalized()) - .count() + self.cache.read().unwrap().len() } pub fn get_prioritization_fees(&self, account_keys: &[Pubkey]) -> Vec<(Slot, u64)> { @@ -398,7 +404,6 @@ impl PrioritizationFeeCache { .read() .unwrap() .iter() - .filter(|(_slot, slot_prioritization_fee)| slot_prioritization_fee.is_finalized()) .map(|(slot, slot_prioritization_fee)| { let mut fee = slot_prioritization_fee .get_min_transaction_fee() @@ -471,7 +476,7 @@ mod tests { .load(Ordering::Relaxed) != expected_update_count { - std::thread::sleep(std::time::Duration::from_millis(100)); + std::thread::sleep(std::time::Duration::from_millis(10)); } } @@ -486,7 +491,7 @@ mod tests { // wait till finalization is done loop { - let mut cache = prioritization_fee_cache.cache.write().unwrap(); + let cache = prioritization_fee_cache.cache.read().unwrap(); if let Some(slot_cache) = cache.get(&slot) { if slot_cache.is_finalized() { return; @@ -528,14 +533,14 @@ mod tests { // assert empty cache { - let mut lock = prioritization_fee_cache.cache.write().unwrap(); + let lock = prioritization_fee_cache.cache.read().unwrap(); assert!(lock.get(&slot).is_none()); } // assert after prune, account a and c should be removed from cache to save space { sync_finalize_priority_fee_for_test(&prioritization_fee_cache, slot, bank.bank_id()); - let mut lock = prioritization_fee_cache.cache.write().unwrap(); + let lock = prioritization_fee_cache.cache.read().unwrap(); let fee = lock.get(&slot).unwrap(); assert_eq!(2, fee.get_min_transaction_fee().unwrap()); assert!(fee.get_writable_account_fee(&write_account_a).is_none()); @@ -568,7 +573,7 @@ mod tests { sync_finalize_priority_fee_for_test(&prioritization_fee_cache, 1, bank1.bank_id()); // add slot 2 entry to cache, but not finalize it - let bank2 = Arc::new(Bank::new_from_parent(bank.clone(), &collector, 3)); + let bank2 = Arc::new(Bank::new_from_parent(bank.clone(), &collector, 2)); let txs = vec![build_sanitized_transaction_for_test( 1, &Pubkey::new_unique(), @@ -576,7 +581,7 @@ mod tests { )]; sync_update(&prioritization_fee_cache, bank2.clone(), txs.iter()); - let bank3 = Arc::new(Bank::new_from_parent(bank.clone(), &collector, 2)); + let bank3 = Arc::new(Bank::new_from_parent(bank.clone(), &collector, 3)); sync_update( &prioritization_fee_cache, bank3.clone(), @@ -587,7 +592,7 @@ mod tests { )] .iter(), ); - sync_finalize_priority_fee_for_test(&prioritization_fee_cache, 2, bank3.bank_id()); + sync_finalize_priority_fee_for_test(&prioritization_fee_cache, 3, bank3.bank_id()); // assert available block count should be 2 finalized blocks assert_eq!(2, prioritization_fee_cache.available_block_count()); @@ -738,28 +743,28 @@ mod tests { // after block is completed sync_finalize_priority_fee_for_test(&prioritization_fee_cache, 2, bank2.bank_id()); assert_eq!( - vec![(2, 3), (1, 1)], + vec![(1, 1), (2, 3)], prioritization_fee_cache.get_prioritization_fees(&[]), ); assert_eq!( - vec![(2, 3), (1, 2)], + vec![(1, 2), (2, 3)], prioritization_fee_cache.get_prioritization_fees(&[write_account_a]), ); assert_eq!( - vec![(2, 4), (1, 2)], + vec![(1, 2), (2, 4)], prioritization_fee_cache.get_prioritization_fees(&[write_account_b]), ); assert_eq!( - vec![(2, 4), (1, 1)], + vec![(1, 1), (2, 4)], prioritization_fee_cache.get_prioritization_fees(&[write_account_c]), ); assert_eq!( - vec![(2, 4), (1, 2)], + vec![(1, 2), (2, 4)], prioritization_fee_cache .get_prioritization_fees(&[write_account_a, write_account_b]), ); assert_eq!( - vec![(2, 4), (1, 2)], + vec![(1, 2), (2, 4)], prioritization_fee_cache.get_prioritization_fees(&[ write_account_a, write_account_b, @@ -781,28 +786,28 @@ mod tests { sync_update(&prioritization_fee_cache, bank3.clone(), txs.iter()); // before block is marked as completed assert_eq!( - vec![(2, 3), (1, 1)], + vec![(1, 1), (2, 3)], prioritization_fee_cache.get_prioritization_fees(&[]), ); assert_eq!( - vec![(2, 3), (1, 2)], + vec![(1, 2), (2, 3)], prioritization_fee_cache.get_prioritization_fees(&[write_account_a]), ); assert_eq!( - vec![(2, 4), (1, 2)], + vec![(1, 2), (2, 4)], prioritization_fee_cache.get_prioritization_fees(&[write_account_b]), ); assert_eq!( - vec![(2, 4), (1, 1)], + vec![(1, 1), (2, 4)], prioritization_fee_cache.get_prioritization_fees(&[write_account_c]), ); assert_eq!( - vec![(2, 4), (1, 2)], + vec![(1, 2), (2, 4)], prioritization_fee_cache .get_prioritization_fees(&[write_account_a, write_account_b]), ); assert_eq!( - vec![(2, 4), (1, 2)], + vec![(1, 2), (2, 4)], prioritization_fee_cache.get_prioritization_fees(&[ write_account_a, write_account_b, @@ -812,28 +817,28 @@ mod tests { // after block is completed sync_finalize_priority_fee_for_test(&prioritization_fee_cache, 3, bank3.bank_id()); assert_eq!( - vec![(3, 5), (2, 3), (1, 1)], + vec![(1, 1), (2, 3), (3, 5)], prioritization_fee_cache.get_prioritization_fees(&[]), ); assert_eq!( - vec![(3, 6), (2, 3), (1, 2)], + vec![(1, 2), (2, 3), (3, 6)], prioritization_fee_cache.get_prioritization_fees(&[write_account_a]), ); assert_eq!( - vec![(3, 5), (2, 4), (1, 2)], + vec![(1, 2), (2, 4), (3, 5)], prioritization_fee_cache.get_prioritization_fees(&[write_account_b]), ); assert_eq!( - vec![(3, 6), (2, 4), (1, 1)], + vec![(1, 1), (2, 4), (3, 6)], prioritization_fee_cache.get_prioritization_fees(&[write_account_c]), ); assert_eq!( - vec![(3, 6), (2, 4), (1, 2)], + vec![(1, 2), (2, 4), (3, 6)], prioritization_fee_cache .get_prioritization_fees(&[write_account_a, write_account_b]), ); assert_eq!( - vec![(3, 6), (2, 4), (1, 2)], + vec![(1, 2), (2, 4), (3, 6)], prioritization_fee_cache.get_prioritization_fees(&[ write_account_a, write_account_b, From e70ff38f350a70528f9fefa3721427589844d3ae Mon Sep 17 00:00:00 2001 From: Lucas Steuernagel <38472950+LucasSte@users.noreply.github.com> Date: Wed, 27 Mar 2024 15:47:34 -0300 Subject: [PATCH 085/153] Use loader v3 instead of v2 in the SVM (#454) --- svm/tests/integration_test.rs | 103 +++++++++++++++++----------------- 1 file changed, 51 insertions(+), 52 deletions(-) diff --git a/svm/tests/integration_test.rs b/svm/tests/integration_test.rs index 5095fc16488960..d99e4d07b442a9 100644 --- a/svm/tests/integration_test.rs +++ b/svm/tests/integration_test.rs @@ -21,7 +21,7 @@ use { }, solana_sdk::{ account::{AccountSharedData, ReadableAccount, WritableAccount}, - bpf_loader, + bpf_loader_upgradeable::{self, UpgradeableLoaderState}, clock::{Clock, Epoch, Slot, UnixTimestamp}, epoch_schedule::EpochSchedule, fee::FeeStructure, @@ -56,7 +56,7 @@ use { mod mock_bank; mod transaction_builder; -const BPF_LOADER_NAME: &str = "solana_bpf_loader_program"; +const BPF_LOADER_NAME: &str = "solana_bpf_loader_upgradeable_program"; const SYSTEM_PROGRAM_NAME: &str = "system_program"; const DEPLOYMENT_SLOT: u64 = 0; const EXECUTION_SLOT: u64 = 5; // The execution slot must be greater than the deployment slot @@ -146,11 +146,11 @@ fn create_executable_environment( ); mock_bank .account_shared_data - .insert(bpf_loader::id(), account_data); + .insert(bpf_loader_upgradeable::id(), account_data); // The bpf loader needs an executable as well program_cache.assign_program( - bpf_loader::id(), + bpf_loader_upgradeable::id(), Arc::new(LoadedProgram::new_builtin( DEPLOYMENT_SLOT, BPF_LOADER_NAME.len(), @@ -207,7 +207,7 @@ fn create_executable_environment( .insert(Clock::id(), account_data); // Inform SVM of the registered builins - let registered_built_ins = vec![bpf_loader::id(), solana_system_program::id()]; + let registered_built_ins = vec![bpf_loader_upgradeable::id(), solana_system_program::id()]; (program_cache, registered_built_ins) } @@ -226,6 +226,46 @@ fn load_program(name: String) -> Vec { buffer } +fn deploy_program(name: String, mock_bank: &mut MockBankCallback) -> Pubkey { + let program_account = Pubkey::new_unique(); + let program_data_account = Pubkey::new_unique(); + let state = UpgradeableLoaderState::Program { + programdata_address: program_data_account, + }; + + // The program account must have funds and hold the executable binary + let mut account_data = AccountSharedData::default(); + account_data.set_data(bincode::serialize(&state).unwrap()); + account_data.set_lamports(25); + account_data.set_owner(bpf_loader_upgradeable::id()); + mock_bank + .account_shared_data + .insert(program_account, account_data); + + let mut account_data = AccountSharedData::default(); + let state = UpgradeableLoaderState::ProgramData { + slot: DEPLOYMENT_SLOT, + upgrade_authority_address: None, + }; + let mut header = bincode::serialize(&state).unwrap(); + let mut complement = vec![ + 0; + std::cmp::max( + 0, + UpgradeableLoaderState::size_of_programdata_metadata().saturating_sub(header.len()) + ) + ]; + let mut buffer = load_program(name); + header.append(&mut complement); + header.append(&mut buffer); + account_data.set_data(header); + mock_bank + .account_shared_data + .insert(program_data_account, account_data); + + program_account +} + fn prepare_transactions( mock_bank: &mut MockBankCallback, ) -> (Vec, Vec) { @@ -234,9 +274,9 @@ fn prepare_transactions( let mut transaction_checks = Vec::new(); // A transaction that works without any account - let key1 = Pubkey::new_unique(); + let hello_program = deploy_program("hello-solana".to_string(), mock_bank); let fee_payer = Pubkey::new_unique(); - transaction_builder.create_instruction(key1, Vec::new(), HashMap::new(), Vec::new()); + transaction_builder.create_instruction(hello_program, Vec::new(), HashMap::new(), Vec::new()); let sanitized_transaction = transaction_builder.build(Hash::default(), (fee_payer, Signature::new_unique())); @@ -244,18 +284,6 @@ fn prepare_transactions( all_transactions.push(sanitized_transaction); transaction_checks.push((Ok(()), None, Some(20))); - // Loading the program file - let buffer = load_program("hello-solana".to_string()); - - // The program account must have funds and hold the executable binary - let mut account_data = AccountSharedData::default(); - // The executable account owner must be one of the loaders. - account_data.set_owner(bpf_loader::id()); - account_data.set_data(buffer); - account_data.set_executable(true); - account_data.set_lamports(25); - mock_bank.account_shared_data.insert(key1, account_data); - // The transaction fee payer must have enough funds let mut account_data = AccountSharedData::default(); account_data.set_lamports(80000); @@ -264,7 +292,7 @@ fn prepare_transactions( .insert(fee_payer, account_data); // A simple funds transfer between accounts - let transfer_program_account = Pubkey::new_unique(); + let transfer_program_account = deploy_program("simple-transfer".to_string(), mock_bank); let sender = Pubkey::new_unique(); let recipient = Pubkey::new_unique(); let fee_payer = Pubkey::new_unique(); @@ -307,19 +335,6 @@ fn prepare_transactions( .account_shared_data .insert(fee_payer, account_data); - let buffer = load_program("simple-transfer".to_string()); - - // The program account must have funds and hold the executable binary - let mut account_data = AccountSharedData::default(); - // The executable account owner must be one of the loaders. - account_data.set_owner(bpf_loader::id()); - account_data.set_data(buffer); - account_data.set_executable(true); - account_data.set_lamports(25); - mock_bank - .account_shared_data - .insert(transfer_program_account, account_data); - // sender let mut account_data = AccountSharedData::default(); account_data.set_lamports(900000); @@ -332,10 +347,10 @@ fn prepare_transactions( .account_shared_data .insert(recipient, account_data); - // The program account is set in `create_executable_environment` + // The system account is set in `create_executable_environment` // A program that utilizes a Sysvar - let program_account = Pubkey::new_unique(); + let program_account = deploy_program("clock-sysvar".to_string(), mock_bank); let fee_payer = Pubkey::new_unique(); transaction_builder.create_instruction(program_account, Vec::new(), HashMap::new(), Vec::new()); @@ -351,28 +366,12 @@ fn prepare_transactions( .account_shared_data .insert(fee_payer, account_data); - let buffer = load_program("clock-sysvar".to_string()); - - // The program account must have funds and hold the executable binary - let mut account_data = AccountSharedData::default(); - // The executable account owner must be one of the loaders. - account_data.set_owner(bpf_loader::id()); - account_data.set_data(buffer); - account_data.set_executable(true); - account_data.set_lamports(25); - mock_bank - .account_shared_data - .insert(program_account, account_data); - // A transaction that fails let sender = Pubkey::new_unique(); let recipient = Pubkey::new_unique(); let fee_payer = Pubkey::new_unique(); let system_account = Pubkey::new_from_array([0; 32]); - let mut data = 900050u64.to_be_bytes().to_vec(); - while data.len() < 8 { - data.insert(0, 0); - } + let data = 900050u64.to_be_bytes().to_vec(); transaction_builder.create_instruction( transfer_program_account, vec![ From 10d06773cd1daf251aeb7a6283da6bd8d75f571a Mon Sep 17 00:00:00 2001 From: steviez Date: Wed, 27 Mar 2024 16:33:21 -0500 Subject: [PATCH 086/153] Share the threadpool for tx execution and entry verifification (#216) Previously, entry verification had a dedicated threadpool used to verify PoH hashes as well as some basic transaction verification via Bank::verify_transaction(). It should also be noted that the entry verification code provides logic to offload to a GPU if one is present. Regardless of whether a GPU is present or not, some of the verification must be done on a CPU. Moreso, the CPU verification of entries and transaction execution are serial operations; entry verification finishes first before moving onto transaction execution. So, tx execution and entry verification are not competing for CPU cycles at the same time and can use the same pool. One exception to the above statement is that if someone is using the feature to replay forks in parallel, then hypothetically, different forks may end up competing for the same resources at the same time. However, that is already true given that we had pools that were shared between replay of multiple forks. So, this change doesn't really change much for that case, but will reduce overhead in the single fork case which is the vast majority of the time. --- Cargo.lock | 1 + core/src/banking_stage.rs | 6 +- entry/benches/entry_sigverify.rs | 6 +- entry/src/entry.rs | 195 ++++++++++++++++++++--------- ledger/src/blockstore_processor.rs | 40 +++--- local-cluster/src/cluster_tests.rs | 14 ++- poh-bench/Cargo.toml | 1 + poh-bench/src/main.rs | 25 ++-- poh/benches/poh_verify.rs | 10 +- 9 files changed, 200 insertions(+), 98 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7b94b288e600e6..a22e448815ce9b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6613,6 +6613,7 @@ dependencies = [ "solana-logger", "solana-measure", "solana-perf", + "solana-rayon-threadlimit", "solana-sdk", "solana-version", ] diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index 603ff55f0003b4..7be8af1373ccbe 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -786,7 +786,7 @@ mod tests { crate::banking_trace::{BankingPacketBatch, BankingTracer}, crossbeam_channel::{unbounded, Receiver}, itertools::Itertools, - solana_entry::entry::{Entry, EntrySlice}, + solana_entry::entry::{self, Entry, EntrySlice}, solana_gossip::cluster_info::Node, solana_ledger::{ blockstore::Blockstore, @@ -941,7 +941,7 @@ mod tests { .collect(); trace!("done"); assert_eq!(entries.len(), genesis_config.ticks_per_slot as usize); - assert!(entries.verify(&start_hash)); + assert!(entries.verify(&start_hash, &entry::thread_pool_for_tests())); assert_eq!(entries[entries.len() - 1].hash, bank.last_blockhash()); banking_stage.join().unwrap(); } @@ -1060,7 +1060,7 @@ mod tests { .map(|(_bank, (entry, _tick_height))| entry) .collect(); - assert!(entries.verify(&blockhash)); + assert!(entries.verify(&blockhash, &entry::thread_pool_for_tests())); if !entries.is_empty() { blockhash = entries.last().unwrap().hash; for entry in entries { diff --git a/entry/benches/entry_sigverify.rs b/entry/benches/entry_sigverify.rs index b3a1b7b5cdb3e6..09adeb6cfd831a 100644 --- a/entry/benches/entry_sigverify.rs +++ b/entry/benches/entry_sigverify.rs @@ -16,6 +16,7 @@ use { #[bench] fn bench_gpusigverify(bencher: &mut Bencher) { + let thread_pool = entry::thread_pool_for_benches(); let entries = (0..131072) .map(|_| { let transaction = test_tx(); @@ -53,6 +54,7 @@ fn bench_gpusigverify(bencher: &mut Bencher) { let res = entry::start_verify_transactions( entries.clone(), false, + &thread_pool, recycler.clone(), Arc::new(verify_transaction), ); @@ -65,6 +67,7 @@ fn bench_gpusigverify(bencher: &mut Bencher) { #[bench] fn bench_cpusigverify(bencher: &mut Bencher) { + let thread_pool = entry::thread_pool_for_benches(); let entries = (0..131072) .map(|_| { let transaction = test_tx(); @@ -89,6 +92,7 @@ fn bench_cpusigverify(bencher: &mut Bencher) { }; bencher.iter(|| { - let _ans = entry::verify_transactions(entries.clone(), Arc::new(verify_transaction)); + let _ans = + entry::verify_transactions(entries.clone(), &thread_pool, Arc::new(verify_transaction)); }) } diff --git a/entry/src/entry.rs b/entry/src/entry.rs index 46aad401dec9b0..7497f96d65980f 100644 --- a/entry/src/entry.rs +++ b/entry/src/entry.rs @@ -6,7 +6,6 @@ use { crate::poh::Poh, crossbeam_channel::{Receiver, Sender}, dlopen2::symbor::{Container, SymBorApi, Symbol}, - lazy_static::lazy_static, log::*, rand::{thread_rng, Rng}, rayon::{prelude::*, ThreadPool}, @@ -41,16 +40,6 @@ use { }, }; -// get_max_thread_count to match number of threads in the old code. -// see: https://github.com/solana-labs/solana/pull/24853 -lazy_static! { - static ref PAR_THREAD_POOL: ThreadPool = rayon::ThreadPoolBuilder::new() - .num_threads(get_max_thread_count()) - .thread_name(|i| format!("solEntry{i:02}")) - .build() - .unwrap(); -} - pub type EntrySender = Sender>; pub type EntryReceiver = Receiver>; @@ -359,7 +348,7 @@ impl EntryVerificationState { self.poh_duration_us } - pub fn finish_verify(&mut self) -> bool { + pub fn finish_verify(&mut self, thread_pool: &ThreadPool) -> bool { match &mut self.device_verification_data { DeviceVerificationData::Gpu(verification_state) => { let gpu_time_us = verification_state.thread_h.take().unwrap().join().unwrap(); @@ -370,7 +359,7 @@ impl EntryVerificationState { .expect("unwrap Arc") .into_inner() .expect("into_inner"); - let res = PAR_THREAD_POOL.install(|| { + let res = thread_pool.install(|| { hashes .into_par_iter() .cloned() @@ -405,9 +394,10 @@ impl EntryVerificationState { pub fn verify_transactions( entries: Vec, + thread_pool: &ThreadPool, verify: Arc Result + Send + Sync>, ) -> Result> { - PAR_THREAD_POOL.install(|| { + thread_pool.install(|| { entries .into_par_iter() .map(|entry| { @@ -430,6 +420,7 @@ pub fn verify_transactions( pub fn start_verify_transactions( entries: Vec, skip_verification: bool, + thread_pool: &ThreadPool, verify_recyclers: VerifyRecyclers, verify: Arc< dyn Fn(VersionedTransaction, TransactionVerificationMode) -> Result @@ -459,15 +450,16 @@ pub fn start_verify_transactions( .is_some(); if use_cpu { - start_verify_transactions_cpu(entries, skip_verification, verify) + start_verify_transactions_cpu(entries, skip_verification, thread_pool, verify) } else { - start_verify_transactions_gpu(entries, verify_recyclers, verify) + start_verify_transactions_gpu(entries, verify_recyclers, thread_pool, verify) } } fn start_verify_transactions_cpu( entries: Vec, skip_verification: bool, + thread_pool: &ThreadPool, verify: Arc< dyn Fn(VersionedTransaction, TransactionVerificationMode) -> Result + Send @@ -484,7 +476,7 @@ fn start_verify_transactions_cpu( move |versioned_tx| verify(versioned_tx, mode) }; - let entries = verify_transactions(entries, Arc::new(verify_func))?; + let entries = verify_transactions(entries, thread_pool, Arc::new(verify_func))?; Ok(EntrySigVerificationState { verification_status: EntryVerificationStatus::Success, @@ -497,6 +489,7 @@ fn start_verify_transactions_cpu( fn start_verify_transactions_gpu( entries: Vec, verify_recyclers: VerifyRecyclers, + thread_pool: &ThreadPool, verify: Arc< dyn Fn(VersionedTransaction, TransactionVerificationMode) -> Result + Send @@ -512,7 +505,7 @@ fn start_verify_transactions_gpu( } }; - let entries = verify_transactions(entries, Arc::new(verify_func))?; + let entries = verify_transactions(entries, thread_pool, Arc::new(verify_func))?; let entry_txs: Vec<&SanitizedTransaction> = entries .iter() @@ -618,12 +611,25 @@ fn compare_hashes(computed_hash: Hash, ref_entry: &Entry) -> bool { // an EntrySlice is a slice of Entries pub trait EntrySlice { /// Verifies the hashes and counts of a slice of transactions are all consistent. - fn verify_cpu(&self, start_hash: &Hash) -> EntryVerificationState; - fn verify_cpu_generic(&self, start_hash: &Hash) -> EntryVerificationState; - fn verify_cpu_x86_simd(&self, start_hash: &Hash, simd_len: usize) -> EntryVerificationState; - fn start_verify(&self, start_hash: &Hash, recyclers: VerifyRecyclers) - -> EntryVerificationState; - fn verify(&self, start_hash: &Hash) -> bool; + fn verify_cpu(&self, start_hash: &Hash, thread_pool: &ThreadPool) -> EntryVerificationState; + fn verify_cpu_generic( + &self, + start_hash: &Hash, + thread_pool: &ThreadPool, + ) -> EntryVerificationState; + fn verify_cpu_x86_simd( + &self, + start_hash: &Hash, + simd_len: usize, + thread_pool: &ThreadPool, + ) -> EntryVerificationState; + fn start_verify( + &self, + start_hash: &Hash, + thread_pool: &ThreadPool, + recyclers: VerifyRecyclers, + ) -> EntryVerificationState; + fn verify(&self, start_hash: &Hash, thread_pool: &ThreadPool) -> bool; /// Checks that each entry tick has the correct number of hashes. Entry slices do not /// necessarily end in a tick, so `tick_hash_count` is used to carry over the hash count /// for the next entry slice. @@ -633,12 +639,16 @@ pub trait EntrySlice { } impl EntrySlice for [Entry] { - fn verify(&self, start_hash: &Hash) -> bool { - self.start_verify(start_hash, VerifyRecyclers::default()) - .finish_verify() + fn verify(&self, start_hash: &Hash, thread_pool: &ThreadPool) -> bool { + self.start_verify(start_hash, thread_pool, VerifyRecyclers::default()) + .finish_verify(thread_pool) } - fn verify_cpu_generic(&self, start_hash: &Hash) -> EntryVerificationState { + fn verify_cpu_generic( + &self, + start_hash: &Hash, + thread_pool: &ThreadPool, + ) -> EntryVerificationState { let now = Instant::now(); let genesis = [Entry { num_hashes: 0, @@ -646,7 +656,7 @@ impl EntrySlice for [Entry] { transactions: vec![], }]; let entry_pairs = genesis.par_iter().chain(self).zip(self); - let res = PAR_THREAD_POOL.install(|| { + let res = thread_pool.install(|| { entry_pairs.all(|(x0, x1)| { let r = x1.verify(&x0.hash); if !r { @@ -672,7 +682,12 @@ impl EntrySlice for [Entry] { } } - fn verify_cpu_x86_simd(&self, start_hash: &Hash, simd_len: usize) -> EntryVerificationState { + fn verify_cpu_x86_simd( + &self, + start_hash: &Hash, + simd_len: usize, + thread_pool: &ThreadPool, + ) -> EntryVerificationState { use solana_sdk::hash::HASH_BYTES; let now = Instant::now(); let genesis = [Entry { @@ -703,7 +718,7 @@ impl EntrySlice for [Entry] { num_hashes.resize(aligned_len, 0); let num_hashes: Vec<_> = num_hashes.chunks(simd_len).collect(); - let res = PAR_THREAD_POOL.install(|| { + let res = thread_pool.install(|| { hashes_chunked .par_iter_mut() .zip(num_hashes) @@ -753,7 +768,7 @@ impl EntrySlice for [Entry] { } } - fn verify_cpu(&self, start_hash: &Hash) -> EntryVerificationState { + fn verify_cpu(&self, start_hash: &Hash, thread_pool: &ThreadPool) -> EntryVerificationState { #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] let (has_avx2, has_avx512) = ( is_x86_feature_detected!("avx2"), @@ -764,25 +779,26 @@ impl EntrySlice for [Entry] { if api().is_some() { if has_avx512 && self.len() >= 128 { - self.verify_cpu_x86_simd(start_hash, 16) + self.verify_cpu_x86_simd(start_hash, 16, thread_pool) } else if has_avx2 && self.len() >= 48 { - self.verify_cpu_x86_simd(start_hash, 8) + self.verify_cpu_x86_simd(start_hash, 8, thread_pool) } else { - self.verify_cpu_generic(start_hash) + self.verify_cpu_generic(start_hash, thread_pool) } } else { - self.verify_cpu_generic(start_hash) + self.verify_cpu_generic(start_hash, thread_pool) } } fn start_verify( &self, start_hash: &Hash, + thread_pool: &ThreadPool, recyclers: VerifyRecyclers, ) -> EntryVerificationState { let start = Instant::now(); let Some(api) = perf_libs::api() else { - return self.verify_cpu(start_hash); + return self.verify_cpu(start_hash, thread_pool); }; inc_new_counter_info!("entry_verify-num_entries", self.len()); @@ -839,7 +855,7 @@ impl EntrySlice for [Entry] { }) .unwrap(); - let verifications = PAR_THREAD_POOL.install(|| { + let verifications = thread_pool.install(|| { self.into_par_iter() .map(|entry| { let answer = entry.hash; @@ -938,6 +954,26 @@ pub fn next_versioned_entry( } } +pub fn thread_pool_for_tests() -> ThreadPool { + // Allocate fewer threads for unit tests + // Unit tests typically aren't creating massive blocks to verify, and + // multiple tests could be running in parallel so any further parallelism + // will do more harm than good + rayon::ThreadPoolBuilder::new() + .num_threads(4) + .thread_name(|i| format!("solEntryTest{i:02}")) + .build() + .expect("new rayon threadpool") +} + +pub fn thread_pool_for_benches() -> ThreadPool { + rayon::ThreadPoolBuilder::new() + .num_threads(get_max_thread_count()) + .thread_name(|i| format!("solEntryBnch{i:02}")) + .build() + .expect("new rayon threadpool") +} + #[cfg(test)] mod tests { use { @@ -968,6 +1004,7 @@ mod tests { entries: Vec, skip_verification: bool, verify_recyclers: VerifyRecyclers, + thread_pool: &ThreadPool, verify: Arc< dyn Fn( VersionedTransaction, @@ -989,10 +1026,16 @@ mod tests { } }; - let cpu_verify_result = verify_transactions(entries.clone(), Arc::new(verify_func)); + let cpu_verify_result = + verify_transactions(entries.clone(), thread_pool, Arc::new(verify_func)); let mut gpu_verify_result: EntrySigVerificationState = { - let verify_result = - start_verify_transactions(entries, skip_verification, verify_recyclers, verify); + let verify_result = start_verify_transactions( + entries, + skip_verification, + thread_pool, + verify_recyclers, + verify, + ); match verify_result { Ok(res) => res, _ => EntrySigVerificationState { @@ -1022,6 +1065,8 @@ mod tests { #[test] fn test_entry_gpu_verify() { + let thread_pool = thread_pool_for_tests(); + let verify_transaction = { move |versioned_tx: VersionedTransaction, verification_mode: TransactionVerificationMode| @@ -1067,12 +1112,14 @@ mod tests { entries_invalid, false, recycler.clone(), + &thread_pool, Arc::new(verify_transaction) )); assert!(test_verify_transactions( entries_valid, false, recycler, + &thread_pool, Arc::new(verify_transaction) )); } @@ -1096,6 +1143,8 @@ mod tests { #[test] fn test_transaction_signing() { + let thread_pool = thread_pool_for_tests(); + use solana_sdk::signature::Signature; let zero = Hash::default(); @@ -1105,27 +1154,27 @@ mod tests { // Verify entry with 2 transactions let mut e0 = [Entry::new(&zero, 0, vec![tx0, tx1])]; - assert!(e0.verify(&zero)); + assert!(e0.verify(&zero, &thread_pool)); // Clear signature of the first transaction, see that it does not verify let orig_sig = e0[0].transactions[0].signatures[0]; e0[0].transactions[0].signatures[0] = Signature::default(); - assert!(!e0.verify(&zero)); + assert!(!e0.verify(&zero, &thread_pool)); // restore original signature e0[0].transactions[0].signatures[0] = orig_sig; - assert!(e0.verify(&zero)); + assert!(e0.verify(&zero, &thread_pool)); // Resize signatures and see verification fails. let len = e0[0].transactions[0].signatures.len(); e0[0].transactions[0] .signatures .resize(len - 1, Signature::default()); - assert!(!e0.verify(&zero)); + assert!(!e0.verify(&zero, &thread_pool)); // Pass an entry with no transactions let e0 = [Entry::new(&zero, 0, vec![])]; - assert!(e0.verify(&zero)); + assert!(e0.verify(&zero, &thread_pool)); } #[test] @@ -1158,41 +1207,57 @@ mod tests { #[test] fn test_verify_slice1() { solana_logger::setup(); + let thread_pool = thread_pool_for_tests(); + let zero = Hash::default(); let one = hash(zero.as_ref()); - assert!(vec![][..].verify(&zero)); // base case - assert!(vec![Entry::new_tick(0, &zero)][..].verify(&zero)); // singleton case 1 - assert!(!vec![Entry::new_tick(0, &zero)][..].verify(&one)); // singleton case 2, bad - assert!(vec![next_entry(&zero, 0, vec![]); 2][..].verify(&zero)); // inductive step + // base case + assert!(vec![][..].verify(&zero, &thread_pool)); + // singleton case 1 + assert!(vec![Entry::new_tick(0, &zero)][..].verify(&zero, &thread_pool)); + // singleton case 2, bad + assert!(!vec![Entry::new_tick(0, &zero)][..].verify(&one, &thread_pool)); + // inductive step + assert!(vec![next_entry(&zero, 0, vec![]); 2][..].verify(&zero, &thread_pool)); let mut bad_ticks = vec![next_entry(&zero, 0, vec![]); 2]; bad_ticks[1].hash = one; - assert!(!bad_ticks.verify(&zero)); // inductive step, bad + // inductive step, bad + assert!(!bad_ticks.verify(&zero, &thread_pool)); } #[test] fn test_verify_slice_with_hashes1() { solana_logger::setup(); + let thread_pool = thread_pool_for_tests(); + let zero = Hash::default(); let one = hash(zero.as_ref()); let two = hash(one.as_ref()); - assert!(vec![][..].verify(&one)); // base case - assert!(vec![Entry::new_tick(1, &two)][..].verify(&one)); // singleton case 1 - assert!(!vec![Entry::new_tick(1, &two)][..].verify(&two)); // singleton case 2, bad + // base case + assert!(vec![][..].verify(&one, &thread_pool)); + // singleton case 1 + assert!(vec![Entry::new_tick(1, &two)][..].verify(&one, &thread_pool)); + // singleton case 2, bad + assert!(!vec![Entry::new_tick(1, &two)][..].verify(&two, &thread_pool)); let mut ticks = vec![next_entry(&one, 1, vec![])]; ticks.push(next_entry(&ticks.last().unwrap().hash, 1, vec![])); - assert!(ticks.verify(&one)); // inductive step + // inductive step + assert!(ticks.verify(&one, &thread_pool)); let mut bad_ticks = vec![next_entry(&one, 1, vec![])]; bad_ticks.push(next_entry(&bad_ticks.last().unwrap().hash, 1, vec![])); bad_ticks[1].hash = one; - assert!(!bad_ticks.verify(&one)); // inductive step, bad + // inductive step, bad + assert!(!bad_ticks.verify(&one, &thread_pool)); } #[test] fn test_verify_slice_with_hashes_and_transactions() { solana_logger::setup(); + let thread_pool = thread_pool_for_tests(); + let zero = Hash::default(); let one = hash(zero.as_ref()); let two = hash(one.as_ref()); @@ -1200,9 +1265,12 @@ mod tests { let bob_keypair = Keypair::new(); let tx0 = system_transaction::transfer(&alice_keypair, &bob_keypair.pubkey(), 1, one); let tx1 = system_transaction::transfer(&bob_keypair, &alice_keypair.pubkey(), 1, one); - assert!(vec![][..].verify(&one)); // base case - assert!(vec![next_entry(&one, 1, vec![tx0.clone()])][..].verify(&one)); // singleton case 1 - assert!(!vec![next_entry(&one, 1, vec![tx0.clone()])][..].verify(&two)); // singleton case 2, bad + // base case + assert!(vec![][..].verify(&one, &thread_pool)); + // singleton case 1 + assert!(vec![next_entry(&one, 1, vec![tx0.clone()])][..].verify(&one, &thread_pool)); + // singleton case 2, bad + assert!(!vec![next_entry(&one, 1, vec![tx0.clone()])][..].verify(&two, &thread_pool)); let mut ticks = vec![next_entry(&one, 1, vec![tx0.clone()])]; ticks.push(next_entry( @@ -1210,12 +1278,15 @@ mod tests { 1, vec![tx1.clone()], )); - assert!(ticks.verify(&one)); // inductive step + + // inductive step + assert!(ticks.verify(&one, &thread_pool)); let mut bad_ticks = vec![next_entry(&one, 1, vec![tx0])]; bad_ticks.push(next_entry(&bad_ticks.last().unwrap().hash, 1, vec![tx1])); bad_ticks[1].hash = one; - assert!(!bad_ticks.verify(&one)); // inductive step, bad + // inductive step, bad + assert!(!bad_ticks.verify(&one, &thread_pool)); } #[test] @@ -1354,7 +1425,7 @@ mod tests { info!("done.. {}", time); let mut time = Measure::start("poh"); - let res = entries.verify(&Hash::default()); + let res = entries.verify(&Hash::default(), &thread_pool_for_tests()); assert_eq!(res, !modified); time.stop(); info!("{} {}", time, res); diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index 9eace1e7c9cd34..7f419c46493251 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -519,20 +519,23 @@ pub fn process_entries_for_tests( let mut entry_starting_index: usize = bank.transaction_count().try_into().unwrap(); let mut batch_timing = BatchExecutionTiming::default(); - let mut replay_entries: Vec<_> = - entry::verify_transactions(entries, Arc::new(verify_transaction))? - .into_iter() - .map(|entry| { - let starting_index = entry_starting_index; - if let EntryType::Transactions(ref transactions) = entry { - entry_starting_index = entry_starting_index.saturating_add(transactions.len()); - } - ReplayEntry { - entry, - starting_index, - } - }) - .collect(); + let mut replay_entries: Vec<_> = entry::verify_transactions( + entries, + &replay_tx_thread_pool, + Arc::new(verify_transaction), + )? + .into_iter() + .map(|entry| { + let starting_index = entry_starting_index; + if let EntryType::Transactions(ref transactions) = entry { + entry_starting_index = entry_starting_index.saturating_add(transactions.len()); + } + ReplayEntry { + entry, + starting_index, + } + }) + .collect(); let ignored_prioritization_fee_cache = PrioritizationFeeCache::new(0u64); let result = process_entries( @@ -1292,7 +1295,11 @@ fn confirm_slot_entries( let last_entry_hash = entries.last().map(|e| e.hash); let verifier = if !skip_verification { datapoint_debug!("verify-batch-size", ("size", num_entries as i64, i64)); - let entry_state = entries.start_verify(&progress.last_entry, recyclers.clone()); + let entry_state = entries.start_verify( + &progress.last_entry, + replay_tx_thread_pool, + recyclers.clone(), + ); if entry_state.status() == EntryVerificationStatus::Failure { warn!("Ledger proof of history failed at slot: {}", slot); return Err(BlockError::InvalidEntryHash.into()); @@ -1315,6 +1322,7 @@ fn confirm_slot_entries( let transaction_verification_result = entry::start_verify_transactions( entries, skip_verification, + replay_tx_thread_pool, recyclers.clone(), Arc::new(verify_transaction), ); @@ -1381,7 +1389,7 @@ fn confirm_slot_entries( } if let Some(mut verifier) = verifier { - let verified = verifier.finish_verify(); + let verified = verifier.finish_verify(replay_tx_thread_pool); *poh_verify_elapsed += verifier.poh_duration_us(); if !verified { warn!("Ledger proof of history failed at slot: {}", bank.slot()); diff --git a/local-cluster/src/cluster_tests.rs b/local-cluster/src/cluster_tests.rs index dffe2a8713ab08..aa318f9df16f34 100644 --- a/local-cluster/src/cluster_tests.rs +++ b/local-cluster/src/cluster_tests.rs @@ -5,7 +5,7 @@ use log::*; use { rand::{thread_rng, Rng}, - rayon::prelude::*, + rayon::{prelude::*, ThreadPool}, solana_client::{ connection_cache::{ConnectionCache, Protocol}, thin_client::ThinClient, @@ -14,7 +14,7 @@ use { tower_storage::{FileTowerStorage, SavedTower, SavedTowerVersions, TowerStorage}, VOTE_THRESHOLD_DEPTH, }, - solana_entry::entry::{Entry, EntrySlice}, + solana_entry::entry::{self, Entry, EntrySlice}, solana_gossip::{ cluster_info::{self, ClusterInfo}, contact_info::{ContactInfo, LegacyContactInfo}, @@ -180,6 +180,8 @@ pub fn send_many_transactions( pub fn verify_ledger_ticks(ledger_path: &Path, ticks_per_slot: usize) { let ledger = Blockstore::open(ledger_path).unwrap(); + let thread_pool = entry::thread_pool_for_tests(); + let zeroth_slot = ledger.get_slot_entries(0, 0).unwrap(); let last_id = zeroth_slot.last().unwrap().hash; let next_slots = ledger.get_slots_since(&[0]).unwrap().remove(&0).unwrap(); @@ -201,7 +203,7 @@ pub fn verify_ledger_ticks(ledger_path: &Path, ticks_per_slot: usize) { None }; - let last_id = verify_slot_ticks(&ledger, slot, &last_id, should_verify_ticks); + let last_id = verify_slot_ticks(&ledger, &thread_pool, slot, &last_id, should_verify_ticks); pending_slots.extend( next_slots .into_iter() @@ -630,21 +632,23 @@ pub fn start_gossip_voter( fn get_and_verify_slot_entries( blockstore: &Blockstore, + thread_pool: &ThreadPool, slot: Slot, last_entry: &Hash, ) -> Vec { let entries = blockstore.get_slot_entries(slot, 0).unwrap(); - assert!(entries.verify(last_entry)); + assert!(entries.verify(last_entry, thread_pool)); entries } fn verify_slot_ticks( blockstore: &Blockstore, + thread_pool: &ThreadPool, slot: Slot, last_entry: &Hash, expected_num_ticks: Option, ) -> Hash { - let entries = get_and_verify_slot_entries(blockstore, slot, last_entry); + let entries = get_and_verify_slot_entries(blockstore, thread_pool, slot, last_entry); let num_ticks: usize = entries.iter().map(|entry| entry.is_tick() as usize).sum(); if let Some(expected_num_ticks) = expected_num_ticks { assert_eq!(num_ticks, expected_num_ticks); diff --git a/poh-bench/Cargo.toml b/poh-bench/Cargo.toml index fb44c0cb81d966..8cd3979b17c79b 100644 --- a/poh-bench/Cargo.toml +++ b/poh-bench/Cargo.toml @@ -17,6 +17,7 @@ solana-entry = { workspace = true } solana-logger = { workspace = true } solana-measure = { workspace = true } solana-perf = { workspace = true } +solana-rayon-threadlimit = { workspace = true } solana-sdk = { workspace = true } solana-version = { workspace = true } diff --git a/poh-bench/src/main.rs b/poh-bench/src/main.rs index d835bac05a3ff9..941d581a825b73 100644 --- a/poh-bench/src/main.rs +++ b/poh-bench/src/main.rs @@ -7,6 +7,7 @@ use { clap::{crate_description, crate_name, Arg, Command}, solana_measure::measure::Measure, solana_perf::perf_libs, + solana_rayon_threadlimit::get_max_thread_count, solana_sdk::hash::hash, }; @@ -73,6 +74,14 @@ fn main() { let start_hash = hash(&[1, 2, 3, 4]); let ticks = create_ticks(max_num_entries, hashes_per_tick, start_hash); let mut num_entries = start_num_entries as usize; + let num_threads = matches + .value_of_t("num_threads") + .unwrap_or(get_max_thread_count()); + let thread_pool = rayon::ThreadPoolBuilder::new() + .num_threads(num_threads) + .thread_name(|i| format!("solPohBench{i:02}")) + .build() + .expect("new rayon threadpool"); if matches.is_present("cuda") { perf_libs::init_cuda(); } @@ -81,8 +90,8 @@ fn main() { let mut time = Measure::start("time"); for _ in 0..iterations { assert!(ticks[..num_entries] - .verify_cpu_generic(&start_hash) - .finish_verify()); + .verify_cpu_generic(&start_hash, &thread_pool) + .finish_verify(&thread_pool)); } time.stop(); println!( @@ -100,8 +109,8 @@ fn main() { let mut time = Measure::start("time"); for _ in 0..iterations { assert!(ticks[..num_entries] - .verify_cpu_x86_simd(&start_hash, 8) - .finish_verify()); + .verify_cpu_x86_simd(&start_hash, 8, &thread_pool) + .finish_verify(&thread_pool)); } time.stop(); println!( @@ -115,8 +124,8 @@ fn main() { let mut time = Measure::start("time"); for _ in 0..iterations { assert!(ticks[..num_entries] - .verify_cpu_x86_simd(&start_hash, 16) - .finish_verify()); + .verify_cpu_x86_simd(&start_hash, 16, &thread_pool) + .finish_verify(&thread_pool)); } time.stop(); println!( @@ -132,8 +141,8 @@ fn main() { let recyclers = VerifyRecyclers::default(); for _ in 0..iterations { assert!(ticks[..num_entries] - .start_verify(&start_hash, recyclers.clone()) - .finish_verify()); + .start_verify(&start_hash, &thread_pool, recyclers.clone()) + .finish_verify(&thread_pool)); } time.stop(); println!( diff --git a/poh/benches/poh_verify.rs b/poh/benches/poh_verify.rs index 47f31860c38d9c..cd33cdae43ef8d 100644 --- a/poh/benches/poh_verify.rs +++ b/poh/benches/poh_verify.rs @@ -2,7 +2,7 @@ extern crate test; use { - solana_entry::entry::{next_entry_mut, Entry, EntrySlice}, + solana_entry::entry::{self, next_entry_mut, Entry, EntrySlice}, solana_sdk::{ hash::{hash, Hash}, signature::{Keypair, Signer}, @@ -17,6 +17,8 @@ const NUM_ENTRIES: usize = 800; #[bench] fn bench_poh_verify_ticks(bencher: &mut Bencher) { solana_logger::setup(); + let thread_pool = entry::thread_pool_for_benches(); + let zero = Hash::default(); let start_hash = hash(zero.as_ref()); let mut cur_hash = start_hash; @@ -27,12 +29,14 @@ fn bench_poh_verify_ticks(bencher: &mut Bencher) { } bencher.iter(|| { - assert!(ticks.verify(&start_hash)); + assert!(ticks.verify(&start_hash, &thread_pool)); }) } #[bench] fn bench_poh_verify_transaction_entries(bencher: &mut Bencher) { + let thread_pool = entry::thread_pool_for_benches(); + let zero = Hash::default(); let start_hash = hash(zero.as_ref()); let mut cur_hash = start_hash; @@ -47,6 +51,6 @@ fn bench_poh_verify_transaction_entries(bencher: &mut Bencher) { } bencher.iter(|| { - assert!(ticks.verify(&start_hash)); + assert!(ticks.verify(&start_hash, &thread_pool)); }) } From 8246590f4759bcecdede20fbaa6857908e46a389 Mon Sep 17 00:00:00 2001 From: Jon C Date: Thu, 28 Mar 2024 01:58:20 +0100 Subject: [PATCH 087/153] clap-utils: Add more compute unit helpers (#440) * clap-utils: Refactor compute_unit_price into compute_budget * clap-utils: Validate compute unit price as a u64 * clap-utils: Add compute unit limit arg * clap-v3-utils: Add compute unit price and limit helpers * Add deprecation on `pub use` even though it isn't triggered --- clap-utils/src/compute_budget.rs | 34 ++++++++++++++++++++++++++++ clap-utils/src/compute_unit_price.rs | 17 ++------------ clap-utils/src/lib.rs | 1 + clap-v3-utils/src/compute_budget.rs | 34 ++++++++++++++++++++++++++++ clap-v3-utils/src/lib.rs | 1 + 5 files changed, 72 insertions(+), 15 deletions(-) create mode 100644 clap-utils/src/compute_budget.rs create mode 100644 clap-v3-utils/src/compute_budget.rs diff --git a/clap-utils/src/compute_budget.rs b/clap-utils/src/compute_budget.rs new file mode 100644 index 00000000000000..af9edba1d3d174 --- /dev/null +++ b/clap-utils/src/compute_budget.rs @@ -0,0 +1,34 @@ +use { + crate::{input_validators::is_parsable, ArgConstant}, + clap::Arg, +}; + +pub const COMPUTE_UNIT_PRICE_ARG: ArgConstant<'static> = ArgConstant { + name: "compute_unit_price", + long: "--with-compute-unit-price", + help: "Set compute unit price for transaction, in increments of 0.000001 lamports per compute unit.", +}; + +pub const COMPUTE_UNIT_LIMIT_ARG: ArgConstant<'static> = ArgConstant { + name: "compute_unit_limit", + long: "--with-compute-unit-limit", + help: "Set compute unit limit for transaction.", +}; + +pub fn compute_unit_price_arg<'a, 'b>() -> Arg<'a, 'b> { + Arg::with_name(COMPUTE_UNIT_PRICE_ARG.name) + .long(COMPUTE_UNIT_PRICE_ARG.long) + .takes_value(true) + .value_name("COMPUTE-UNIT-PRICE") + .validator(is_parsable::) + .help(COMPUTE_UNIT_PRICE_ARG.help) +} + +pub fn compute_unit_limit_arg<'a, 'b>() -> Arg<'a, 'b> { + Arg::with_name(COMPUTE_UNIT_LIMIT_ARG.name) + .long(COMPUTE_UNIT_LIMIT_ARG.long) + .takes_value(true) + .value_name("COMPUTE-UNIT-LIMIT") + .validator(is_parsable::) + .help(COMPUTE_UNIT_LIMIT_ARG.help) +} diff --git a/clap-utils/src/compute_unit_price.rs b/clap-utils/src/compute_unit_price.rs index f8fd279d98811f..18f060bc1e8b3a 100644 --- a/clap-utils/src/compute_unit_price.rs +++ b/clap-utils/src/compute_unit_price.rs @@ -1,15 +1,2 @@ -use {crate::ArgConstant, clap::Arg}; - -pub const COMPUTE_UNIT_PRICE_ARG: ArgConstant<'static> = ArgConstant { - name: "compute_unit_price", - long: "--with-compute-unit-price", - help: "Set compute unit price for transaction, in increments of 0.000001 lamports per compute unit.", -}; - -pub fn compute_unit_price_arg<'a, 'b>() -> Arg<'a, 'b> { - Arg::with_name(COMPUTE_UNIT_PRICE_ARG.name) - .long(COMPUTE_UNIT_PRICE_ARG.long) - .takes_value(true) - .value_name("COMPUTE-UNIT-PRICE") - .help(COMPUTE_UNIT_PRICE_ARG.help) -} +#[deprecated(since = "2.0.0", note = "Please use `compute_budget` instead")] +pub use crate::compute_budget::{compute_unit_price_arg, COMPUTE_UNIT_PRICE_ARG}; diff --git a/clap-utils/src/lib.rs b/clap-utils/src/lib.rs index 43d4fa6f865890..58a27b2424401a 100644 --- a/clap-utils/src/lib.rs +++ b/clap-utils/src/lib.rs @@ -27,6 +27,7 @@ pub fn hidden_unless_forced() -> bool { std::env::var("SOLANA_NO_HIDDEN_CLI_ARGS").is_err() } +pub mod compute_budget; pub mod compute_unit_price; pub mod fee_payer; pub mod input_parsers; diff --git a/clap-v3-utils/src/compute_budget.rs b/clap-v3-utils/src/compute_budget.rs new file mode 100644 index 00000000000000..7b34e114b42883 --- /dev/null +++ b/clap-v3-utils/src/compute_budget.rs @@ -0,0 +1,34 @@ +use { + crate::ArgConstant, + clap::{value_parser, Arg}, +}; + +pub const COMPUTE_UNIT_PRICE_ARG: ArgConstant<'static> = ArgConstant { + name: "compute_unit_price", + long: "--with-compute-unit-price", + help: "Set compute unit price for transaction, in increments of 0.000001 lamports per compute unit.", +}; + +pub const COMPUTE_UNIT_LIMIT_ARG: ArgConstant<'static> = ArgConstant { + name: "compute_unit_limit", + long: "--with-compute-unit-limit", + help: "Set compute unit limit for transaction.", +}; + +pub fn compute_unit_price_arg<'a>() -> Arg<'a> { + Arg::with_name(COMPUTE_UNIT_PRICE_ARG.name) + .long(COMPUTE_UNIT_PRICE_ARG.long) + .takes_value(true) + .value_name("COMPUTE-UNIT-PRICE") + .value_parser(value_parser!(u64)) + .help(COMPUTE_UNIT_PRICE_ARG.help) +} + +pub fn compute_unit_limit_arg<'a>() -> Arg<'a> { + Arg::with_name(COMPUTE_UNIT_LIMIT_ARG.name) + .long(COMPUTE_UNIT_LIMIT_ARG.long) + .takes_value(true) + .value_name("COMPUTE-UNIT-LIMIT") + .value_parser(value_parser!(u32)) + .help(COMPUTE_UNIT_LIMIT_ARG.help) +} diff --git a/clap-v3-utils/src/lib.rs b/clap-v3-utils/src/lib.rs index 5000f7e1d0cfb6..c3edf98dc57d20 100644 --- a/clap-v3-utils/src/lib.rs +++ b/clap-v3-utils/src/lib.rs @@ -23,6 +23,7 @@ impl std::fmt::Debug for DisplayError { } } +pub mod compute_budget; pub mod fee_payer; pub mod input_parsers; pub mod input_validators; From ecb4f6288733cb04a7fbc214e7057de1ee856c15 Mon Sep 17 00:00:00 2001 From: Tyera Date: Wed, 27 Mar 2024 19:24:06 -0600 Subject: [PATCH 088/153] Simd 118: extend EpochRewards sysvar (#428) * Update EpochRewards sysvar * Update Clone trait * Update doctests * Update bank to new sysvar fields * Update runtime tests * Update syscall test * Update tests * Clean up doctest EpochRewards construction --- account-decoder/src/parse_sysvar.rs | 4 +- programs/bpf_loader/src/syscalls/mod.rs | 16 ++++++-- programs/sbf/rust/sysvar/tests/lib.rs | 4 +- runtime/src/bank.rs | 11 ++--- runtime/src/bank/sysvar_cache.rs | 10 +++-- runtime/src/bank/tests.rs | 12 +++++- sdk/program/src/epoch_rewards.rs | 40 +++++++++++++----- sdk/program/src/sysvar/epoch_rewards.rs | 54 +++++++++++++++++++------ 8 files changed, 112 insertions(+), 39 deletions(-) diff --git a/account-decoder/src/parse_sysvar.rs b/account-decoder/src/parse_sysvar.rs index 35746949c7f9ef..e308dfe07f9c36 100644 --- a/account-decoder/src/parse_sysvar.rs +++ b/account-decoder/src/parse_sysvar.rs @@ -372,9 +372,11 @@ mod test { ); let epoch_rewards = EpochRewards { + distribution_starting_block_height: 42, total_rewards: 100, distributed_rewards: 20, - distribution_complete_block_height: 42, + active: true, + ..EpochRewards::default() }; let epoch_rewards_sysvar = create_account_for_test(&epoch_rewards); assert_eq!( diff --git a/programs/bpf_loader/src/syscalls/mod.rs b/programs/bpf_loader/src/syscalls/mod.rs index 4a166fa1cf9996..0d51e599d9366c 100644 --- a/programs/bpf_loader/src/syscalls/mod.rs +++ b/programs/bpf_loader/src/syscalls/mod.rs @@ -3317,16 +3317,20 @@ mod tests { src_rent.burn_percent = 3; let mut src_rewards = create_filled_type::(false); + src_rewards.distribution_starting_block_height = 42; + src_rewards.num_partitions = 2; + src_rewards.parent_blockhash = Hash::new(&[3; 32]); + src_rewards.total_points = 4; src_rewards.total_rewards = 100; src_rewards.distributed_rewards = 10; - src_rewards.distribution_complete_block_height = 42; + src_rewards.active = true; let mut sysvar_cache = SysvarCache::default(); sysvar_cache.set_clock(src_clock.clone()); sysvar_cache.set_epoch_schedule(src_epochschedule.clone()); sysvar_cache.set_fees(src_fees.clone()); sysvar_cache.set_rent(src_rent.clone()); - sysvar_cache.set_epoch_rewards(src_rewards); + sysvar_cache.set_epoch_rewards(src_rewards.clone()); let transaction_accounts = vec![ ( @@ -3519,10 +3523,14 @@ mod tests { assert_eq!(got_rewards, src_rewards); let mut clean_rewards = create_filled_type::(true); + clean_rewards.distribution_starting_block_height = + src_rewards.distribution_starting_block_height; + clean_rewards.num_partitions = src_rewards.num_partitions; + clean_rewards.parent_blockhash = src_rewards.parent_blockhash; + clean_rewards.total_points = src_rewards.total_points; clean_rewards.total_rewards = src_rewards.total_rewards; clean_rewards.distributed_rewards = src_rewards.distributed_rewards; - clean_rewards.distribution_complete_block_height = - src_rewards.distribution_complete_block_height; + clean_rewards.active = src_rewards.active; assert!(are_bytes_equal(&got_rewards, &clean_rewards)); } } diff --git a/programs/sbf/rust/sysvar/tests/lib.rs b/programs/sbf/rust/sysvar/tests/lib.rs index b99b89e75c9648..ffa2f625b6d03d 100644 --- a/programs/sbf/rust/sysvar/tests/lib.rs +++ b/programs/sbf/rust/sysvar/tests/lib.rs @@ -27,9 +27,11 @@ async fn test_sysvars() { ); let epoch_rewards = epoch_rewards::EpochRewards { + distribution_starting_block_height: 42, total_rewards: 100, distributed_rewards: 50, - distribution_complete_block_height: 42, + active: true, + ..epoch_rewards::EpochRewards::default() }; program_test.add_sysvar_account(epoch_rewards::id(), &epoch_rewards); let (mut banks_client, payer, recent_blockhash) = program_test.start().await; diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index cfcd32ef7ff456..c8afb7406164ae 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -1673,13 +1673,12 @@ impl Bank { let slot = self.slot(); let credit_start = self.block_height() + self.get_reward_calculation_num_blocks(); - let credit_end_exclusive = credit_start + stake_rewards_by_partition.len() as u64; self.set_epoch_reward_status_active(stake_rewards_by_partition); // create EpochRewards sysvar that holds the balance of undistributed rewards with - // (total_rewards, distributed_rewards, credit_end_exclusive), total capital will increase by (total_rewards - distributed_rewards) - self.create_epoch_rewards_sysvar(total_rewards, distributed_rewards, credit_end_exclusive); + // (total_rewards, distributed_rewards, credit_start), total capital will increase by (total_rewards - distributed_rewards) + self.create_epoch_rewards_sysvar(total_rewards, distributed_rewards, credit_start); datapoint_info!( "epoch-rewards-status-update", @@ -3615,14 +3614,16 @@ impl Bank { &self, total_rewards: u64, distributed_rewards: u64, - distribution_complete_block_height: u64, + distribution_starting_block_height: u64, ) { assert!(self.is_partitioned_rewards_code_enabled()); let epoch_rewards = sysvar::epoch_rewards::EpochRewards { total_rewards, distributed_rewards, - distribution_complete_block_height, + distribution_starting_block_height, + active: true, + ..sysvar::epoch_rewards::EpochRewards::default() }; self.update_sysvar_account(&sysvar::epoch_rewards::id(), |account| { diff --git a/runtime/src/bank/sysvar_cache.rs b/runtime/src/bank/sysvar_cache.rs index 3e0f9a93ddef2a..e45f64e96aaf3f 100644 --- a/runtime/src/bank/sysvar_cache.rs +++ b/runtime/src/bank/sysvar_cache.rs @@ -6,7 +6,7 @@ mod tests { use { super::*, solana_sdk::{ - feature_set, genesis_config::create_genesis_config, pubkey::Pubkey, + feature_set, genesis_config::create_genesis_config, hash::Hash, pubkey::Pubkey, sysvar::epoch_rewards::EpochRewards, }, std::sync::Arc, @@ -121,14 +121,18 @@ mod tests { // inject a reward sysvar for test bank1.activate_feature(&feature_set::enable_partitioned_epoch_reward::id()); let expected_epoch_rewards = EpochRewards { + distribution_starting_block_height: 42, + num_partitions: 0, + parent_blockhash: Hash::default(), + total_points: 0, total_rewards: 100, distributed_rewards: 10, - distribution_complete_block_height: 42, + active: true, }; bank1.create_epoch_rewards_sysvar( expected_epoch_rewards.total_rewards, expected_epoch_rewards.distributed_rewards, - expected_epoch_rewards.distribution_complete_block_height, + expected_epoch_rewards.distribution_starting_block_height, ); bank1 diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index bdacbb1304a028..0fdfb968149bbd 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -12715,9 +12715,13 @@ fn test_epoch_rewards_sysvar() { // create epoch rewards sysvar let expected_epoch_rewards = sysvar::epoch_rewards::EpochRewards { + distribution_starting_block_height: 42, + num_partitions: 0, + parent_blockhash: Hash::default(), + total_points: 0, total_rewards, distributed_rewards: 10, - distribution_complete_block_height: 42, + active: true, }; bank.create_epoch_rewards_sysvar(total_rewards, 10, 42); @@ -12732,9 +12736,13 @@ fn test_epoch_rewards_sysvar() { assert_eq!(account.lamports(), total_rewards - 20); let epoch_rewards: sysvar::epoch_rewards::EpochRewards = from_account(&account).unwrap(); let expected_epoch_rewards = sysvar::epoch_rewards::EpochRewards { + distribution_starting_block_height: 42, + num_partitions: 0, + parent_blockhash: Hash::default(), + total_points: 0, total_rewards, distributed_rewards: 20, - distribution_complete_block_height: 42, + active: true, }; assert_eq!(epoch_rewards, expected_epoch_rewards); diff --git a/sdk/program/src/epoch_rewards.rs b/sdk/program/src/epoch_rewards.rs index e3229486e0e0dc..b0628ecd2eff2a 100644 --- a/sdk/program/src/epoch_rewards.rs +++ b/sdk/program/src/epoch_rewards.rs @@ -6,20 +6,37 @@ //! //! [`sysvar::epoch_rewards`]: crate::sysvar::epoch_rewards -use std::ops::AddAssign; +use {crate::hash::Hash, solana_sdk_macro::CloneZeroed, std::ops::AddAssign}; -#[repr(C)] -#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Default, Clone, Copy, AbiExample)] +#[repr(C, align(16))] +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Default, AbiExample, CloneZeroed)] pub struct EpochRewards { - /// total rewards for the current epoch, in lamports + /// The starting block height of the rewards distribution in the current + /// epoch + pub distribution_starting_block_height: u64, + + /// Number of partitions in the rewards distribution in the current epoch, + /// used to generate an EpochRewardsHasher + pub num_partitions: u64, + + /// The blockhash of the parent block of the first block in the epoch, used + /// to seed an EpochRewardsHasher + pub parent_blockhash: Hash, + + /// The total rewards points calculated for the current epoch, where points + /// equals the sum of (delegated stake * credits observed) for all + /// delegations + pub total_points: u128, + + /// The total rewards for the current epoch, in lamports pub total_rewards: u64, - /// distributed rewards for the current epoch, in lamports + /// The rewards currently distributed for the current epoch, in lamports pub distributed_rewards: u64, - /// distribution of all staking rewards for the current - /// epoch will be completed at this block height - pub distribution_complete_block_height: u64, + /// Whether the rewards period (including calculation and distribution) is + /// active + pub active: bool, } impl EpochRewards { @@ -38,12 +55,13 @@ mod tests { pub fn new( total_rewards: u64, distributed_rewards: u64, - distribution_complete_block_height: u64, + distribution_starting_block_height: u64, ) -> Self { Self { total_rewards, distributed_rewards, - distribution_complete_block_height, + distribution_starting_block_height, + ..Self::default() } } } @@ -54,7 +72,7 @@ mod tests { assert_eq!(epoch_rewards.total_rewards, 100); assert_eq!(epoch_rewards.distributed_rewards, 0); - assert_eq!(epoch_rewards.distribution_complete_block_height, 64); + assert_eq!(epoch_rewards.distribution_starting_block_height, 64); } #[test] diff --git a/sdk/program/src/sysvar/epoch_rewards.rs b/sdk/program/src/sysvar/epoch_rewards.rs index 1d43fbad8a9b08..c8aa7bfbbc88e3 100755 --- a/sdk/program/src/sysvar/epoch_rewards.rs +++ b/sdk/program/src/sysvar/epoch_rewards.rs @@ -1,11 +1,19 @@ //! Epoch rewards for current epoch //! //! The _epoch rewards_ sysvar provides access to the [`EpochRewards`] type, -//! which tracks the progress of epoch rewards distribution. It includes the -//! - total rewards for the current epoch, in lamports -//! - rewards for the current epoch distributed so far, in lamports -//! - distribution completed block height, i.e. distribution of all staking rewards for the current -//! epoch will be completed at this block height +//! which tracks whether the rewards period (including calculation and +//! distribution) is in progress, as well as the details needed to resume +//! distribution when starting from a snapshot during the rewards period. The +//! sysvar is repopulated at the start of the first block of each epoch. +//! Therefore, the sysvar contains data about the current epoch until a new +//! epoch begins. Fields in the sysvar include: +//! - distribution starting block height +//! - the number of partitions in the distribution +//! - the parent-blockhash seed used to generate the partition hasher +//! - the total rewards points calculated for the epoch +//! - total rewards for epoch, in lamports +//! - rewards for the epoch distributed so far, in lamports +//! - whether the rewards period is active //! //! [`EpochRewards`] implements [`Sysvar::get`] and can be loaded efficiently without //! passing the sysvar account ID to the program. @@ -43,9 +51,16 @@ //! # //! # use solana_program::sysvar::SysvarId; //! # let p = EpochRewards::id(); -//! # let l = &mut 1120560; -//! # let d = &mut vec![0, 202, 154, 59, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 42, 0, 0, 0, 0, 0, 0, 0]; -//! # let a = AccountInfo::new(&p, false, false, l, d, &p, false, 0); +//! # let l = &mut 1559040; +//! # let epoch_rewards = EpochRewards { +//! # distribution_starting_block_height: 42, +//! # total_rewards: 100, +//! # distributed_rewards: 10, +//! # active: true, +//! # ..EpochRewards::default() +//! # }; +//! # let mut d: Vec = bincode::serialize(&epoch_rewards).unwrap(); +//! # let a = AccountInfo::new(&p, false, false, l, &mut d, &p, false, 0); //! # let accounts = &[a.clone(), a]; //! # process_instruction( //! # &Pubkey::new_unique(), @@ -86,9 +101,16 @@ //! # //! # use solana_program::sysvar::SysvarId; //! # let p = EpochRewards::id(); -//! # let l = &mut 1120560; -//! # let d = &mut vec![0, 202, 154, 59, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 42, 0, 0, 0, 0, 0, 0, 0]; -//! # let a = AccountInfo::new(&p, false, false, l, d, &p, false, 0); +//! # let l = &mut 1559040; +//! # let epoch_rewards = EpochRewards { +//! # distribution_starting_block_height: 42, +//! # total_rewards: 100, +//! # distributed_rewards: 10, +//! # active: true, +//! # ..EpochRewards::default() +//! # }; +//! # let mut d: Vec = bincode::serialize(&epoch_rewards).unwrap(); +//! # let a = AccountInfo::new(&p, false, false, l, &mut d, &p, false, 0); //! # let accounts = &[a.clone(), a]; //! # process_instruction( //! # &Pubkey::new_unique(), @@ -109,9 +131,17 @@ //! # use anyhow::Result; //! # //! fn print_sysvar_epoch_rewards(client: &RpcClient) -> Result<()> { +//! # let epoch_rewards = EpochRewards { +//! # distribution_starting_block_height: 42, +//! # total_rewards: 100, +//! # distributed_rewards: 10, +//! # active: true, +//! # ..EpochRewards::default() +//! # }; +//! # let data: Vec = bincode::serialize(&epoch_rewards)?; //! # client.set_get_account_response(epoch_rewards::ID, Account { //! # lamports: 1120560, -//! # data: vec![0, 202, 154, 59, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 42, 0, 0, 0, 0, 0, 0, 0], +//! # data, //! # owner: solana_sdk::system_program::ID, //! # executable: false, //! # rent_epoch: 307, From b1919bd9e47f76dd11f1c0dd9145275313fc23d9 Mon Sep 17 00:00:00 2001 From: samkim-crypto Date: Thu, 28 Mar 2024 13:31:43 +0900 Subject: [PATCH 089/153] [keygen] Remove deprecated functions from `keygen` cli (minus `grind` command) (#438) --- keygen/src/keygen.rs | 71 ++++++++++++++++++++++++++------------------ 1 file changed, 42 insertions(+), 29 deletions(-) diff --git a/keygen/src/keygen.rs b/keygen/src/keygen.rs index 32d57a4c2f1333..4d85941a0578c5 100644 --- a/keygen/src/keygen.rs +++ b/keygen/src/keygen.rs @@ -4,19 +4,21 @@ use { bip39::{Mnemonic, MnemonicType, Seed}, clap::{crate_description, crate_name, value_parser, Arg, ArgMatches, Command}, solana_clap_v3_utils::{ - input_parsers::STDOUT_OUTFILE_TOKEN, - input_validators::is_prompt_signer_source, + input_parsers::{ + signer::{SignerSource, SignerSourceParserBuilder}, + STDOUT_OUTFILE_TOKEN, + }, keygen::{ check_for_overwrite, derivation_path::{acquire_derivation_path, derivation_path_arg}, mnemonic::{ acquire_language, acquire_passphrase_and_message, no_passphrase_and_message, - WORD_COUNT_ARG, + try_get_language, try_get_word_count, WORD_COUNT_ARG, }, no_outfile_arg, KeyGenerationCommonArgs, NO_OUTFILE_ARG, }, keypair::{ - keypair_from_path, keypair_from_seed_phrase, signer_from_path, + keypair_from_seed_phrase, keypair_from_source, signer_from_source, SKIP_SEED_PHRASE_VALIDATION_ARG, }, DisplayError, @@ -68,16 +70,19 @@ fn get_keypair_from_matches( config: Config, wallet_manager: &mut Option>, ) -> Result, Box> { - let mut path = dirs_next::home_dir().expect("home directory"); - let path = if matches.is_present("keypair") { - matches.value_of("keypair").unwrap() + let config_source; + let keypair_source = if matches.try_contains_id("keypair")? { + matches.get_one::("keypair").unwrap() } else if !config.keypair_path.is_empty() { - &config.keypair_path + config_source = SignerSource::parse(&config.keypair_path)?; + &config_source } else { + let mut path = dirs_next::home_dir().expect("home directory"); path.extend([".config", "solana", "id.json"]); - path.to_str().unwrap() + config_source = SignerSource::parse(path.to_str().unwrap())?; + &config_source }; - signer_from_path(matches, path, "pubkey recovery", wallet_manager) + signer_from_source(matches, keypair_source, "pubkey recovery", wallet_manager) } fn output_keypair( @@ -258,6 +263,9 @@ fn app<'a>(num_threads: &'a str, crate_version: &'a str) -> Command<'a> { .index(2) .value_name("KEYPAIR") .takes_value(true) + .value_parser( + SignerSourceParserBuilder::default().allow_all().build() + ) .help("Filepath or URL to a keypair"), ) ) @@ -370,6 +378,9 @@ fn app<'a>(num_threads: &'a str, crate_version: &'a str) -> Command<'a> { .index(1) .value_name("KEYPAIR") .takes_value(true) + .value_parser( + SignerSourceParserBuilder::default().allow_all().build() + ) .help("Filepath or URL to a keypair"), ) .arg( @@ -401,7 +412,7 @@ fn app<'a>(num_threads: &'a str, crate_version: &'a str) -> Command<'a> { .index(1) .value_name("KEYPAIR") .takes_value(true) - .validator(is_prompt_signer_source) + .value_parser(SignerSourceParserBuilder::default().allow_prompt().allow_legacy().build()) .help("`prompt:` URI scheme or `ASK` keyword"), ) .arg( @@ -436,7 +447,7 @@ fn main() -> Result<(), Box> { } fn do_main(matches: &ArgMatches) -> Result<(), Box> { - let config = if let Some(config_file) = matches.value_of("config_file") { + let config = if let Some(config_file) = matches.try_get_one::("config_file")? { Config::load(config_file).unwrap_or_default() } else { Config::default() @@ -451,8 +462,8 @@ fn do_main(matches: &ArgMatches) -> Result<(), Box> { let pubkey = get_keypair_from_matches(matches, config, &mut wallet_manager)?.try_pubkey()?; - if matches.is_present("outfile") { - let outfile = matches.value_of("outfile").unwrap(); + if matches.try_contains_id("outfile")? { + let outfile = matches.get_one::("outfile").unwrap(); check_for_overwrite(outfile, matches)?; write_pubkey_file(outfile, pubkey)?; } else { @@ -461,9 +472,9 @@ fn do_main(matches: &ArgMatches) -> Result<(), Box> { } ("new", matches) => { let mut path = dirs_next::home_dir().expect("home directory"); - let outfile = if matches.is_present("outfile") { - matches.value_of("outfile") - } else if matches.is_present(NO_OUTFILE_ARG.name) { + let outfile = if matches.try_contains_id("outfile")? { + matches.get_one::("outfile").map(|s| s.as_str()) + } else if matches.try_contains_id(NO_OUTFILE_ARG.name)? { None } else { path.extend([".config", "solana", "id.json"]); @@ -476,11 +487,11 @@ fn do_main(matches: &ArgMatches) -> Result<(), Box> { None => (), } - let word_count: usize = matches.value_of_t(WORD_COUNT_ARG.name).unwrap(); + let word_count = try_get_word_count(matches)?.unwrap(); let mnemonic_type = MnemonicType::for_word_count(word_count)?; - let language = acquire_language(matches); + let language = try_get_language(matches)?.unwrap(); - let silent = matches.is_present("silent"); + let silent = matches.try_contains_id("silent")?; if !silent { println!("Generating a new keypair"); } @@ -513,8 +524,8 @@ fn do_main(matches: &ArgMatches) -> Result<(), Box> { } ("recover", matches) => { let mut path = dirs_next::home_dir().expect("home directory"); - let outfile = if matches.is_present("outfile") { - matches.value_of("outfile").unwrap() + let outfile = if matches.try_contains_id("outfile")? { + matches.get_one::("outfile").unwrap() } else { path.extend([".config", "solana", "id.json"]); path.to_str().unwrap() @@ -525,12 +536,14 @@ fn do_main(matches: &ArgMatches) -> Result<(), Box> { } let keypair_name = "recover"; - let keypair = if let Some(path) = matches.value_of("prompt_signer") { - keypair_from_path(matches, path, keypair_name, true)? - } else { - let skip_validation = matches.is_present(SKIP_SEED_PHRASE_VALIDATION_ARG.name); - keypair_from_seed_phrase(keypair_name, skip_validation, true, None, true)? - }; + let keypair = + if let Some(source) = matches.try_get_one::("prompt_signer")? { + keypair_from_source(matches, source, keypair_name, true)? + } else { + let skip_validation = + matches.try_contains_id(SKIP_SEED_PHRASE_VALIDATION_ARG.name)?; + keypair_from_seed_phrase(keypair_name, skip_validation, true, None, true)? + }; output_keypair(&keypair, outfile, "recovered")?; } ("grind", matches) => { @@ -729,7 +742,7 @@ fn do_main(matches: &ArgMatches) -> Result<(), Box> { ) .serialize(); let signature = keypair.try_sign_message(&simple_message)?; - let pubkey_bs58 = matches.value_of("pubkey").unwrap(); + let pubkey_bs58 = matches.try_get_one::("pubkey")?.unwrap(); let pubkey = bs58::decode(pubkey_bs58).into_vec().unwrap(); if signature.verify(&pubkey, &simple_message) { println!("Verification for public key: {pubkey_bs58}: Success"); From 182d27f718e1d6df347bdbe60edaba6026f1c6b5 Mon Sep 17 00:00:00 2001 From: Brooks Date: Thu, 28 Mar 2024 11:14:23 -0400 Subject: [PATCH 090/153] Checks if bank snapshot is loadable before fastbooting (#343) --- core/src/accounts_hash_verifier.rs | 35 ++++++- ledger/src/bank_forks_utils.rs | 126 ++++++++++++------------ local-cluster/tests/local_cluster.rs | 99 +++++++++++++++++++ runtime/src/snapshot_bank_utils.rs | 139 ++++++++++++++++++++++++++- runtime/src/snapshot_utils.rs | 103 ++++++++++++++++++++ 5 files changed, 430 insertions(+), 72 deletions(-) diff --git a/core/src/accounts_hash_verifier.rs b/core/src/accounts_hash_verifier.rs index 20adba99835eeb..29e592ff979355 100644 --- a/core/src/accounts_hash_verifier.rs +++ b/core/src/accounts_hash_verifier.rs @@ -24,6 +24,7 @@ use { hash::Hash, }, std::{ + io::{Error as IoError, Result as IoResult}, sync::{ atomic::{AtomicBool, Ordering}, Arc, @@ -71,12 +72,17 @@ impl AccountsHashVerifier { info!("handling accounts package: {accounts_package:?}"); let enqueued_time = accounts_package.enqueued.elapsed(); - let (_, handling_time_us) = measure_us!(Self::process_accounts_package( + let (result, handling_time_us) = measure_us!(Self::process_accounts_package( accounts_package, snapshot_package_sender.as_ref(), &snapshot_config, &exit, )); + if let Err(err) = result { + error!("Stopping AccountsHashVerifier! Fatal error while processing accounts package: {err}"); + exit.store(true, Ordering::Relaxed); + break; + } datapoint_info!( "accounts_hash_verifier", @@ -208,9 +214,9 @@ impl AccountsHashVerifier { snapshot_package_sender: Option<&Sender>, snapshot_config: &SnapshotConfig, exit: &AtomicBool, - ) { + ) -> IoResult<()> { let accounts_hash = - Self::calculate_and_verify_accounts_hash(&accounts_package, snapshot_config); + Self::calculate_and_verify_accounts_hash(&accounts_package, snapshot_config)?; Self::save_epoch_accounts_hash(&accounts_package, accounts_hash); @@ -221,13 +227,15 @@ impl AccountsHashVerifier { accounts_hash, exit, ); + + Ok(()) } /// returns calculated accounts hash fn calculate_and_verify_accounts_hash( accounts_package: &AccountsPackage, snapshot_config: &SnapshotConfig, - ) -> AccountsHashKind { + ) -> IoResult { let accounts_hash_calculation_kind = match accounts_package.package_kind { AccountsPackageKind::AccountsHashVerifier => CalcAccountsHashKind::Full, AccountsPackageKind::EpochAccountsHash => CalcAccountsHashKind::Full, @@ -303,6 +311,23 @@ impl AccountsHashVerifier { &accounts_hash_for_reserialize, bank_incremental_snapshot_persistence.as_ref(), ); + + // now write the full snapshot slot file after reserializing so this bank snapshot is loadable + let full_snapshot_archive_slot = match accounts_package.package_kind { + AccountsPackageKind::Snapshot(SnapshotKind::IncrementalSnapshot(base_slot)) => { + base_slot + } + _ => accounts_package.slot, + }; + snapshot_utils::write_full_snapshot_slot_file( + &snapshot_info.bank_snapshot_dir, + full_snapshot_archive_slot, + ) + .map_err(|err| { + IoError::other(format!( + "failed to calculate accounts hash for {accounts_package:?}: {err}" + )) + })?; } if accounts_package.package_kind @@ -340,7 +365,7 @@ impl AccountsHashVerifier { ); } - accounts_hash_kind + Ok(accounts_hash_kind) } fn _calculate_full_accounts_hash( diff --git a/ledger/src/bank_forks_utils.rs b/ledger/src/bank_forks_utils.rs index 17412c1801ac68..a64b29bdcf8670 100644 --- a/ledger/src/bank_forks_utils.rs +++ b/ledger/src/bank_forks_utils.rs @@ -244,20 +244,70 @@ fn bank_forks_from_snapshot( .map(SnapshotArchiveInfoGetter::slot) .unwrap_or(0), ); - let latest_bank_snapshot = - snapshot_utils::get_highest_bank_snapshot_post(&snapshot_config.bank_snapshots_dir); - let will_startup_from_snapshot_archives = match process_options.use_snapshot_archives_at_startup - { - UseSnapshotArchivesAtStartup::Always => true, - UseSnapshotArchivesAtStartup::Never => false, - UseSnapshotArchivesAtStartup::WhenNewest => latest_bank_snapshot - .as_ref() - .map(|bank_snapshot| latest_snapshot_archive_slot > bank_snapshot.slot) - .unwrap_or(true), + let fastboot_snapshot = match process_options.use_snapshot_archives_at_startup { + UseSnapshotArchivesAtStartup::Always => None, + UseSnapshotArchivesAtStartup::Never => { + let Some(bank_snapshot) = + snapshot_utils::get_highest_loadable_bank_snapshot(snapshot_config) + else { + return Err(BankForksUtilsError::NoBankSnapshotDirectory { + flag: use_snapshot_archives_at_startup::cli::LONG_ARG.to_string(), + value: UseSnapshotArchivesAtStartup::Never.to_string(), + }); + }; + // If a newer snapshot archive was downloaded, it is possible that its slot is + // higher than the local state we will load. Did the user intend for this? + if bank_snapshot.slot < latest_snapshot_archive_slot { + warn!( + "Starting up from local state at slot {}, which is *older* than \ + the latest snapshot archive at slot {}. If this is not desired, \ + change the --{} CLI option to *not* \"{}\" and restart.", + bank_snapshot.slot, + latest_snapshot_archive_slot, + use_snapshot_archives_at_startup::cli::LONG_ARG, + UseSnapshotArchivesAtStartup::Never.to_string(), + ); + } + Some(bank_snapshot) + } + UseSnapshotArchivesAtStartup::WhenNewest => { + snapshot_utils::get_highest_loadable_bank_snapshot(snapshot_config) + .filter(|bank_snapshot| bank_snapshot.slot >= latest_snapshot_archive_slot) + } }; - let bank = if will_startup_from_snapshot_archives { + let bank = if let Some(fastboot_snapshot) = fastboot_snapshot { + let (bank, _) = snapshot_bank_utils::bank_from_snapshot_dir( + &account_paths, + &fastboot_snapshot, + genesis_config, + &process_options.runtime_config, + process_options.debug_keys.clone(), + None, + process_options.account_indexes.clone(), + process_options.limit_load_slot_count_from_snapshot, + process_options.shrink_ratio, + process_options.verify_index, + process_options.accounts_db_config.clone(), + accounts_update_notifier, + exit, + ) + .map_err(|err| BankForksUtilsError::BankFromSnapshotsDirectory { + source: err, + path: fastboot_snapshot.snapshot_path(), + })?; + + // If the node crashes before taking the next bank snapshot, the next startup will attempt + // to load from the same bank snapshot again. And if `shrink` has run, the account storage + // files that are hard linked in bank snapshot will be *different* than what the bank + // snapshot expects. This would cause the node to crash again. To prevent that, purge all + // the bank snapshots here. In the above scenario, this will cause the node to load from a + // snapshot archive next time, which is safe. + snapshot_utils::purge_all_bank_snapshots(&snapshot_config.bank_snapshots_dir); + + bank + } else { // Given that we are going to boot from an archive, the append vecs held in the snapshot dirs for fast-boot should // be released. They will be released by the account_background_service anyway. But in the case of the account_paths // using memory-mounted file system, they are not released early enough to give space for the new append-vecs from @@ -292,60 +342,6 @@ fn bank_forks_from_snapshot( .map(|archive| archive.path().display().to_string()) .unwrap_or("none".to_string()), })?; - bank - } else { - let bank_snapshot = - latest_bank_snapshot.ok_or_else(|| BankForksUtilsError::NoBankSnapshotDirectory { - flag: use_snapshot_archives_at_startup::cli::LONG_ARG.to_string(), - value: UseSnapshotArchivesAtStartup::Never.to_string(), - })?; - - // If a newer snapshot archive was downloaded, it is possible that its slot is - // higher than the local bank we will load. Did the user intend for this? - if bank_snapshot.slot < latest_snapshot_archive_slot { - assert_eq!( - process_options.use_snapshot_archives_at_startup, - UseSnapshotArchivesAtStartup::Never, - ); - warn!( - "Starting up from local state at slot {}, which is *older* than \ - the latest snapshot archive at slot {}. If this is not desired, \ - change the --{} CLI option to *not* \"{}\" and restart.", - bank_snapshot.slot, - latest_snapshot_archive_slot, - use_snapshot_archives_at_startup::cli::LONG_ARG, - UseSnapshotArchivesAtStartup::Never.to_string(), - ); - } - - let (bank, _) = snapshot_bank_utils::bank_from_snapshot_dir( - &account_paths, - &bank_snapshot, - genesis_config, - &process_options.runtime_config, - process_options.debug_keys.clone(), - None, - process_options.account_indexes.clone(), - process_options.limit_load_slot_count_from_snapshot, - process_options.shrink_ratio, - process_options.verify_index, - process_options.accounts_db_config.clone(), - accounts_update_notifier, - exit, - ) - .map_err(|err| BankForksUtilsError::BankFromSnapshotsDirectory { - source: err, - path: bank_snapshot.snapshot_path(), - })?; - - // If the node crashes before taking the next bank snapshot, the next startup will attempt - // to load from the same bank snapshot again. And if `shrink` has run, the account storage - // files that are hard linked in bank snapshot will be *different* than what the bank - // snapshot expects. This would cause the node to crash again. To prevent that, purge all - // the bank snapshots here. In the above scenario, this will cause the node to load from a - // snapshot archive next time, which is safe. - snapshot_utils::purge_all_bank_snapshots(&snapshot_config.bank_snapshots_dir); - bank }; diff --git a/local-cluster/tests/local_cluster.rs b/local-cluster/tests/local_cluster.rs index f36e94df8a661d..44032aeeb4d38b 100644 --- a/local-cluster/tests/local_cluster.rs +++ b/local-cluster/tests/local_cluster.rs @@ -5067,6 +5067,105 @@ fn test_boot_from_local_state() { } } +/// Test fastboot to ensure a node can boot in case it crashed while archiving a full snapshot +/// +/// 1. Start a node and wait for it to take at least two full snapshots and one more +/// bank snapshot POST afterwards (for simplicity, wait for 2 full and 1 incremental). +/// 2. To simulate a node crashing while archiving a full snapshot, stop the node and +/// then delete the latest full snapshot archive. +/// 3. Restart the node. This should succeed, and boot from the older full snapshot archive, +/// *not* the latest bank snapshot POST. +/// 4. Take another incremental snapshot. This ensures the correct snapshot was loaded, +/// AND ensures the correct accounts hashes are present (which are needed when making +/// the bank snapshot POST for the new incremental snapshot). +#[test] +#[serial] +fn test_boot_from_local_state_missing_archive() { + solana_logger::setup_with_default(RUST_LOG_FILTER); + const FULL_SNAPSHOT_INTERVAL: Slot = 20; + const INCREMENTAL_SNAPSHOT_INTERVAL: Slot = 10; + + let validator_config = SnapshotValidatorConfig::new( + FULL_SNAPSHOT_INTERVAL, + INCREMENTAL_SNAPSHOT_INTERVAL, + INCREMENTAL_SNAPSHOT_INTERVAL, + 7, + ); + + let mut cluster_config = ClusterConfig { + node_stakes: vec![100 * DEFAULT_NODE_STAKE], + cluster_lamports: DEFAULT_CLUSTER_LAMPORTS, + validator_configs: make_identical_validator_configs(&validator_config.validator_config, 1), + ..ClusterConfig::default() + }; + let mut cluster = LocalCluster::new(&mut cluster_config, SocketAddrSpace::Unspecified); + + // we need two full snapshots and an incremental snapshot for this test + info!("Waiting for validator to create snapshots..."); + LocalCluster::wait_for_next_full_snapshot( + &cluster, + &validator_config.full_snapshot_archives_dir, + Some(Duration::from_secs(5 * 60)), + ); + LocalCluster::wait_for_next_full_snapshot( + &cluster, + &validator_config.full_snapshot_archives_dir, + Some(Duration::from_secs(5 * 60)), + ); + LocalCluster::wait_for_next_incremental_snapshot( + &cluster, + &validator_config.full_snapshot_archives_dir, + &validator_config.incremental_snapshot_archives_dir, + Some(Duration::from_secs(5 * 60)), + ); + debug!( + "snapshot archives:\n\tfull: {:?}\n\tincr: {:?}", + snapshot_utils::get_full_snapshot_archives( + validator_config.full_snapshot_archives_dir.path() + ), + snapshot_utils::get_incremental_snapshot_archives( + validator_config.incremental_snapshot_archives_dir.path() + ), + ); + info!("Waiting for validator to create snapshots... DONE"); + + // now delete the latest full snapshot archive and restart, to simulate a crash while archiving + // a full snapshot package + info!("Stopping validator..."); + let validator_pubkey = cluster.get_node_pubkeys()[0]; + let mut validator_info = cluster.exit_node(&validator_pubkey); + info!("Stopping validator... DONE"); + + info!("Deleting latest full snapshot archive..."); + let highest_full_snapshot = snapshot_utils::get_highest_full_snapshot_archive_info( + validator_config.full_snapshot_archives_dir.path(), + ) + .unwrap(); + fs::remove_file(highest_full_snapshot.path()).unwrap(); + info!("Deleting latest full snapshot archive... DONE"); + + info!("Restarting validator..."); + // if we set this to `Never`, the validator should not boot + validator_info.config.use_snapshot_archives_at_startup = + UseSnapshotArchivesAtStartup::WhenNewest; + cluster.restart_node( + &validator_pubkey, + validator_info, + SocketAddrSpace::Unspecified, + ); + info!("Restarting validator... DONE"); + + // ensure we can create new incremental snapshots, since that is what used to fail + info!("Waiting for validator to create snapshots..."); + LocalCluster::wait_for_next_incremental_snapshot( + &cluster, + &validator_config.full_snapshot_archives_dir, + &validator_config.incremental_snapshot_archives_dir, + Some(Duration::from_secs(5 * 60)), + ); + info!("Waiting for validator to create snapshots... DONE"); +} + // We want to simulate the following: // /--- 1 --- 3 (duplicate block) // 0 diff --git a/runtime/src/snapshot_bank_utils.rs b/runtime/src/snapshot_bank_utils.rs index 03a26d46986ddf..6db2747089d30c 100644 --- a/runtime/src/snapshot_bank_utils.rs +++ b/runtime/src/snapshot_bank_utils.rs @@ -1257,13 +1257,16 @@ mod tests { crate::{ bank_forks::BankForks, genesis_utils, + snapshot_config::SnapshotConfig, snapshot_utils::{ clean_orphaned_account_snapshot_dirs, create_tmp_accounts_dir_for_tests, get_bank_snapshots, get_bank_snapshots_post, get_bank_snapshots_pre, - get_highest_bank_snapshot, purge_all_bank_snapshots, purge_bank_snapshot, + get_highest_bank_snapshot, get_highest_bank_snapshot_pre, + get_highest_loadable_bank_snapshot, purge_all_bank_snapshots, purge_bank_snapshot, purge_bank_snapshots_older_than_slot, purge_incomplete_bank_snapshots, purge_old_bank_snapshots, purge_old_bank_snapshots_at_startup, - snapshot_storage_rebuilder::get_slot_and_append_vec_id, ArchiveFormat, + snapshot_storage_rebuilder::get_slot_and_append_vec_id, + write_full_snapshot_slot_file, ArchiveFormat, SNAPSHOT_FULL_SNAPSHOT_SLOT_FILENAME, }, status_cache::Status, }, @@ -2638,4 +2641,136 @@ mod tests { Err(VerifySlotDeltasError::SlotNotFoundInDeltas(333)), ); } + + #[test] + fn test_get_highest_loadable_bank_snapshot() { + let bank_snapshots_dir = TempDir::new().unwrap(); + let full_snapshot_archives_dir = TempDir::new().unwrap(); + let incremental_snapshot_archives_dir = TempDir::new().unwrap(); + + let snapshot_config = SnapshotConfig { + bank_snapshots_dir: bank_snapshots_dir.as_ref().to_path_buf(), + full_snapshot_archives_dir: full_snapshot_archives_dir.as_ref().to_path_buf(), + incremental_snapshot_archives_dir: incremental_snapshot_archives_dir + .as_ref() + .to_path_buf(), + ..Default::default() + }; + let load_only_snapshot_config = SnapshotConfig { + bank_snapshots_dir: snapshot_config.bank_snapshots_dir.clone(), + full_snapshot_archives_dir: snapshot_config.full_snapshot_archives_dir.clone(), + incremental_snapshot_archives_dir: snapshot_config + .incremental_snapshot_archives_dir + .clone(), + ..SnapshotConfig::new_load_only() + }; + + let genesis_config = GenesisConfig::default(); + let mut bank = Arc::new(Bank::new_for_tests(&genesis_config)); + + // take some snapshots, and archive them + for _ in 0..snapshot_config + .maximum_full_snapshot_archives_to_retain + .get() + { + let slot = bank.slot() + 1; + bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::default(), slot)); + bank.fill_bank_with_ticks_for_tests(); + bank.squash(); + bank.force_flush_accounts_cache(); + bank.update_accounts_hash(CalcAccountsHashDataSource::Storages, false, false); + let snapshot_storages = bank.get_snapshot_storages(None); + let slot_deltas = bank.status_cache.read().unwrap().root_slot_deltas(); + let bank_snapshot_info = add_bank_snapshot( + &bank_snapshots_dir, + &bank, + &snapshot_storages, + snapshot_config.snapshot_version, + slot_deltas, + ) + .unwrap(); + assert!( + crate::serde_snapshot::reserialize_bank_with_new_accounts_hash( + &bank_snapshot_info.snapshot_dir, + bank.slot(), + &bank.get_accounts_hash().unwrap(), + None, + ) + ); + write_full_snapshot_slot_file(&bank_snapshot_info.snapshot_dir, slot).unwrap(); + package_and_archive_full_snapshot( + &bank, + &bank_snapshot_info, + &full_snapshot_archives_dir, + &incremental_snapshot_archives_dir, + snapshot_storages, + snapshot_config.archive_format, + snapshot_config.snapshot_version, + snapshot_config.maximum_full_snapshot_archives_to_retain, + snapshot_config.maximum_incremental_snapshot_archives_to_retain, + ) + .unwrap(); + } + + // take another snapshot, but leave it as PRE + let slot = bank.slot() + 1; + bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::default(), slot)); + bank.fill_bank_with_ticks_for_tests(); + bank.squash(); + bank.force_flush_accounts_cache(); + let snapshot_storages = bank.get_snapshot_storages(None); + let slot_deltas = bank.status_cache.read().unwrap().root_slot_deltas(); + add_bank_snapshot( + &bank_snapshots_dir, + &bank, + &snapshot_storages, + SnapshotVersion::default(), + slot_deltas, + ) + .unwrap(); + + let highest_full_snapshot_archive = + get_highest_full_snapshot_archive_info(&full_snapshot_archives_dir).unwrap(); + let highest_bank_snapshot_post = + get_highest_bank_snapshot_post(&bank_snapshots_dir).unwrap(); + let highest_bank_snapshot_pre = get_highest_bank_snapshot_pre(&bank_snapshots_dir).unwrap(); + + // we want a bank snapshot PRE with the highest slot to ensure get_highest_loadable() + // correctly skips bank snapshots PRE + assert!(highest_bank_snapshot_pre.slot > highest_bank_snapshot_post.slot); + + // 1. call get_highest_loadable() but bad snapshot dir, so returns None + assert!(get_highest_loadable_bank_snapshot(&SnapshotConfig::default()).is_none()); + + // 2. get_highest_loadable(), should return highest_bank_snapshot_post_slot + let bank_snapshot = get_highest_loadable_bank_snapshot(&snapshot_config).unwrap(); + assert_eq!(bank_snapshot, highest_bank_snapshot_post); + + // 3. delete highest full snapshot archive, get_highest_loadable() should return NONE + fs::remove_file(highest_full_snapshot_archive.path()).unwrap(); + assert!(get_highest_loadable_bank_snapshot(&snapshot_config).is_none()); + + // 4. get_highest_loadable(), but with a load-only snapshot config, should return Some() + let bank_snapshot = get_highest_loadable_bank_snapshot(&load_only_snapshot_config).unwrap(); + assert_eq!(bank_snapshot, highest_bank_snapshot_post); + + // 5. delete highest bank snapshot, get_highest_loadable() should return Some() again, with slot-1 + fs::remove_dir_all(&highest_bank_snapshot_post.snapshot_dir).unwrap(); + let bank_snapshot = get_highest_loadable_bank_snapshot(&snapshot_config).unwrap(); + assert_eq!(bank_snapshot.slot, highest_bank_snapshot_post.slot - 1); + + // 6. delete the full snapshot slot file, get_highest_loadable() should return NONE + fs::remove_file( + bank_snapshot + .snapshot_dir + .join(SNAPSHOT_FULL_SNAPSHOT_SLOT_FILENAME), + ) + .unwrap(); + assert!(get_highest_loadable_bank_snapshot(&snapshot_config).is_none()); + + // 7. however, a load-only snapshot config should return Some() again + let bank_snapshot2 = + get_highest_loadable_bank_snapshot(&load_only_snapshot_config).unwrap(); + assert_eq!(bank_snapshot2, bank_snapshot); + } } diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index 10f715c2597b56..9da2205d0c616f 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -4,6 +4,7 @@ use { snapshot_archive_info::{ FullSnapshotArchiveInfo, IncrementalSnapshotArchiveInfo, SnapshotArchiveInfoGetter, }, + snapshot_config::SnapshotConfig, snapshot_hash::SnapshotHash, snapshot_package::SnapshotPackage, snapshot_utils::snapshot_storage_rebuilder::{ @@ -58,6 +59,7 @@ pub const SNAPSHOT_VERSION_FILENAME: &str = "version"; pub const SNAPSHOT_STATE_COMPLETE_FILENAME: &str = "state_complete"; pub const SNAPSHOT_ACCOUNTS_HARDLINKS: &str = "accounts_hardlinks"; pub const SNAPSHOT_ARCHIVE_DOWNLOAD_DIR: &str = "remote"; +pub const SNAPSHOT_FULL_SNAPSHOT_SLOT_FILENAME: &str = "full_snapshot_slot"; pub const MAX_SNAPSHOT_DATA_FILE_SIZE: u64 = 32 * 1024 * 1024 * 1024; // 32 GiB const MAX_SNAPSHOT_VERSION_FILE_SIZE: u64 = 8; // byte const VERSION_STRING_V1_2_0: &str = "1.2.0"; @@ -625,6 +627,76 @@ fn is_bank_snapshot_complete(bank_snapshot_dir: impl AsRef) -> bool { state_complete_path.is_file() } +/// Writes the full snapshot slot file into the bank snapshot dir +pub fn write_full_snapshot_slot_file( + bank_snapshot_dir: impl AsRef, + full_snapshot_slot: Slot, +) -> IoResult<()> { + let full_snapshot_slot_path = bank_snapshot_dir + .as_ref() + .join(SNAPSHOT_FULL_SNAPSHOT_SLOT_FILENAME); + fs::write( + &full_snapshot_slot_path, + Slot::to_le_bytes(full_snapshot_slot), + ) + .map_err(|err| { + IoError::other(format!( + "failed to write full snapshot slot file '{}': {err}", + full_snapshot_slot_path.display(), + )) + }) +} + +// Reads the full snapshot slot file from the bank snapshot dir +pub fn read_full_snapshot_slot_file(bank_snapshot_dir: impl AsRef) -> IoResult { + const SLOT_SIZE: usize = std::mem::size_of::(); + let full_snapshot_slot_path = bank_snapshot_dir + .as_ref() + .join(SNAPSHOT_FULL_SNAPSHOT_SLOT_FILENAME); + let full_snapshot_slot_file_metadata = fs::metadata(&full_snapshot_slot_path)?; + if full_snapshot_slot_file_metadata.len() != SLOT_SIZE as u64 { + let error_message = format!( + "invalid full snapshot slot file size: '{}' has {} bytes (should be {} bytes)", + full_snapshot_slot_path.display(), + full_snapshot_slot_file_metadata.len(), + SLOT_SIZE, + ); + return Err(IoError::other(error_message)); + } + let mut full_snapshot_slot_file = fs::File::open(&full_snapshot_slot_path)?; + let mut buffer = [0; SLOT_SIZE]; + full_snapshot_slot_file.read_exact(&mut buffer)?; + let slot = Slot::from_le_bytes(buffer); + Ok(slot) +} + +/// Gets the highest, loadable, bank snapshot +/// +/// The highest bank snapshot is the one with the highest slot. +/// To be loadable, the bank snapshot must be a BankSnapshotKind::Post. +/// And if we're generating snapshots (e.g. running a normal validator), then +/// the full snapshot file's slot must match the highest full snapshot archive's. +pub fn get_highest_loadable_bank_snapshot( + snapshot_config: &SnapshotConfig, +) -> Option { + let highest_bank_snapshot = + get_highest_bank_snapshot_post(&snapshot_config.bank_snapshots_dir)?; + + // If we're *not* generating snapshots, e.g. running ledger-tool, then we *can* load + // this bank snapshot, and we do not need to check for anything else. + if !snapshot_config.should_generate_snapshots() { + return Some(highest_bank_snapshot); + } + + // Otherwise, the bank snapshot's full snapshot slot *must* be the same as + // the highest full snapshot archive's slot. + let highest_full_snapshot_archive_slot = + get_highest_full_snapshot_archive_slot(&snapshot_config.full_snapshot_archives_dir)?; + let full_snapshot_file_slot = + read_full_snapshot_slot_file(&highest_bank_snapshot.snapshot_dir).ok()?; + (full_snapshot_file_slot == highest_full_snapshot_archive_slot).then_some(highest_bank_snapshot) +} + /// If the validator halts in the middle of `archive_snapshot_package()`, the temporary staging /// directory won't be cleaned up. Call this function to clean them up. pub fn remove_tmp_snapshot_archives(snapshot_archives_dir: impl AsRef) { @@ -2269,6 +2341,7 @@ mod tests { std::{convert::TryFrom, mem::size_of}, tempfile::NamedTempFile, }; + #[test] fn test_serialize_snapshot_data_file_under_limit() { let temp_dir = tempfile::TempDir::new().unwrap(); @@ -3211,4 +3284,34 @@ mod tests { Err(GetSnapshotAccountsHardLinkDirError::GetAccountPath(_)) ); } + + #[test] + fn test_full_snapshot_slot_file_good() { + let slot_written = 123_456_789; + let bank_snapshot_dir = TempDir::new().unwrap(); + write_full_snapshot_slot_file(&bank_snapshot_dir, slot_written).unwrap(); + + let slot_read = read_full_snapshot_slot_file(&bank_snapshot_dir).unwrap(); + assert_eq!(slot_read, slot_written); + } + + #[test] + fn test_full_snapshot_slot_file_bad() { + const SLOT_SIZE: usize = std::mem::size_of::(); + let too_small = [1u8; SLOT_SIZE - 1]; + let too_large = [1u8; SLOT_SIZE + 1]; + + for contents in [too_small.as_slice(), too_large.as_slice()] { + let bank_snapshot_dir = TempDir::new().unwrap(); + let full_snapshot_slot_path = bank_snapshot_dir + .as_ref() + .join(SNAPSHOT_FULL_SNAPSHOT_SLOT_FILENAME); + fs::write(full_snapshot_slot_path, contents).unwrap(); + + let err = read_full_snapshot_slot_file(&bank_snapshot_dir).unwrap_err(); + assert!(err + .to_string() + .starts_with("invalid full snapshot slot file size")); + } + } } From 1ded5a875ae6d11d5fad35a4530ddb4c1d22d52f Mon Sep 17 00:00:00 2001 From: Will Hickey Date: Thu, 28 Mar 2024 13:13:27 -0500 Subject: [PATCH 091/153] Update RELEASE.md to reflect current release process (#444) * Update RELEASE.md to reflect current release process * Changed a few Solana -> agave --- RELEASE.md | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/RELEASE.md b/RELEASE.md index c5aa5d540b1191..14cc160ed8bef4 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -1,4 +1,4 @@ -# Solana Release process +# Agave Release process ## Branches and Tags @@ -38,8 +38,8 @@ Incrementing the major version of the `master` branch is outside the scope of this document. ### v*X.Y* stabilization branches -These are stabilization branches for a given milestone. They are created off -the `master` branch as late as possible prior to the milestone release. +These are stabilization branches. They are created from the `master` branch approximately +every 13 weeks. ### v*X.Y.Z* release tag The release tags are created as desired by the owner of the given stabilization @@ -70,7 +70,7 @@ There are three release channels that map to branches as follows: 1. Determine the new branch name. The name should be "v" + the first 2 version fields from Cargo.toml. For example, a Cargo.toml with version = "0.9.0" implies the next branch name is "v0.9". -1. Create the new branch and push this branch to the `solana` repository: +1. Create the new branch and push this branch to the `agave` repository: ``` git checkout -b git push -u origin @@ -99,15 +99,15 @@ Alternatively use the Github UI. ### Miscellaneous Clean up 1. Pin the spl-token-cli version in the newly promoted stable branch by setting `splTokenCliVersion` in scripts/spl-token-cli-version.sh to the latest release that depends on the stable branch (usually this will be the latest spl-token-cli release). -1. Update [mergify.yml](https://github.com/solana-labs/solana/blob/master/.mergify.yml) to add backport actions for the new branch and remove actions for the obsolete branch. -1. Adjust the [Github backport labels](https://github.com/solana-labs/solana/labels) to add the new branch label and remove the label for the obsolete branch. +1. Update [mergify.yml](https://github.com/anza-xyz/agave/blob/master/.mergify.yml) to add backport actions for the new branch and remove actions for the obsolete branch. +1. Adjust the [Github backport labels](https://github.com/anza-xyz/agave/labels) to add the new branch label and remove the label for the obsolete branch. 1. Announce on Discord #development that the release branch exists so people know to use the new backport labels. ## Steps to Create a Release ### Create the Release Tag on GitHub -1. Go to [GitHub Releases](https://github.com/solana-labs/solana/releases) for tagging a release. +1. Go to [GitHub Releases](https://github.com/anza-xyz/agave/releases) for tagging a release. 1. Click "Draft new release". The release tag must exactly match the `version` field in `/Cargo.toml` prefixed by `v`. 1. If the Cargo.toml version field is **0.12.3**, then the release tag must be **v0.12.3** @@ -115,7 +115,7 @@ Alternatively use the Github UI. 1. If you want to release v0.12.0, the target branch must be v0.12 1. Fill the release notes. 1. If this is the first release on the branch (e.g. v0.13.**0**), paste in [this - template](https://raw.githubusercontent.com/solana-labs/solana/master/.github/RELEASE_TEMPLATE.md). Engineering Lead can provide summary contents for release notes if needed. + template](https://raw.githubusercontent.com/anza-xyz/agave/master/.github/RELEASE_TEMPLATE.md). Engineering Lead can provide summary contents for release notes if needed. 1. If this is a patch release, review all the commits since the previous release on this branch and add details as needed. 1. Click "Save Draft", then confirm the release notes look good and the tag name and branch are correct. 1. Ensure all desired commits (usually backports) are landed on the branch by now. @@ -126,24 +126,24 @@ Alternatively use the Github UI. ### Update release branch with the next patch version -[This action](https://github.com/solana-labs/solana/blob/master/.github/workflows/increment-cargo-version-on-release.yml) ensures that publishing a release will trigger the creation of a PR to update the Cargo.toml files on **release branch** to the next semantic version (e.g. 0.9.0 -> 0.9.1). Ensure that the created PR makes it through CI and gets submitted. +[This action](https://github.com/anza-xyz/agave/blob/master/.github/workflows/increment-cargo-version-on-release.yml) ensures that publishing a release will trigger the creation of a PR to update the Cargo.toml files on **release branch** to the next semantic version (e.g. 0.9.0 -> 0.9.1). Ensure that the created PR makes it through CI and gets submitted. + +Note: As of 2024-03-26 the above action is failing so version bumps are done manually. The version bump script is incorrectly updating hashbrown and proc-macro2 versions which should be reverted. ### Prepare for the next release -1. Go to [GitHub Releases](https://github.com/solana-labs/solana/releases) and create a new draft release for `X.Y.Z+1` with empty release notes. This allows people to incrementally add new release notes until it's time for the next release +1. Go to [GitHub Releases](https://github.com/anza-xyz/agave/releases) and create a new draft release for `X.Y.Z+1` with empty release notes. This allows people to incrementally add new release notes until it's time for the next release 1. Also, point the branch field to the same branch and mark the release as **"This is a pre-release"**. -1. Go to the [Github Milestones](https://github.com/solana-labs/solana/milestones). Create a new milestone for the `X.Y.Z+1`, move over -unresolved issues still in the `X.Y.Z` milestone, then close the `X.Y.Z` milestone. ### Verify release automation success -Go to [Solana Releases](https://github.com/solana-labs/solana/releases) and click on the latest release that you just published. -Verify that all of the build artifacts are present, then uncheck **"This is a pre-release"** for the release. +Go to [Agave Releases](https://github.com/anza-xyz/agave/releases) and click on the latest release that you just published. +Verify that all of the build artifacts are present (15 assets), then uncheck **"This is a pre-release"** for the release. Build artifacts can take up to 60 minutes after creating the tag before appearing. To check for progress: -* The `solana-secondary` Buildkite pipeline handles creating the Linux and macOS release artifacts and updated crates. Look for a job under the tag name of the release: https://buildkite.com/solana-labs/solana-secondary. -* The Windows release artifacts are produced by GitHub Actions. Look for a job under the tag name of the release: https://github.com/solana-labs/solana/actions. +* The `agave-secondary` Buildkite pipeline handles creating the Linux and macOS release artifacts and updated crates. Look for a job under the tag name of the release: https://buildkite.com/anza-xyz/agave-secondary. +* The Windows release artifacts are produced by GitHub Actions. Look for a job under the tag name of the release: https://github.com/anza-xyz/agave/actions. -[Crates.io](https://crates.io/crates/solana) should have an updated Solana version. This can take 2-3 hours, and sometimes fails in the `solana-secondary` job. +[Crates.io agave-validator](https://crates.io/crates/agave-validator) should have an updated agave-validator version. This can take 2-3 hours, and sometimes fails in the `agave-secondary` job. If this happens and the error is non-fatal, click "Retry" on the "publish crate" job ### Update software on testnet.solana.com From b9a998a69b69d2314b010f59404cfe9c6928e10e Mon Sep 17 00:00:00 2001 From: Joe C Date: Thu, 28 Mar 2024 13:41:09 -0500 Subject: [PATCH 092/153] config: deprecate date instructions (#465) --- programs/config/src/lib.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/programs/config/src/lib.rs b/programs/config/src/lib.rs index c165b14477b00c..731682b9e8d198 100644 --- a/programs/config/src/lib.rs +++ b/programs/config/src/lib.rs @@ -1,6 +1,10 @@ #![allow(clippy::arithmetic_side_effects)] pub mod config_instruction; pub mod config_processor; +#[deprecated( + since = "2.0.0", + note = "The config program API no longer supports date instructions." +)] pub mod date_instruction; pub use solana_sdk::config::program::id; From 8822aaa67e9dfd6daf92f3cbfdec30de981b27e3 Mon Sep 17 00:00:00 2001 From: Brooks Date: Thu, 28 Mar 2024 15:10:50 -0400 Subject: [PATCH 093/153] Do not purge all bank snapshots after fastboot (#345) --- ledger/src/bank_forks_utils.rs | 9 --------- 1 file changed, 9 deletions(-) diff --git a/ledger/src/bank_forks_utils.rs b/ledger/src/bank_forks_utils.rs index a64b29bdcf8670..50f8be8561cce9 100644 --- a/ledger/src/bank_forks_utils.rs +++ b/ledger/src/bank_forks_utils.rs @@ -297,15 +297,6 @@ fn bank_forks_from_snapshot( source: err, path: fastboot_snapshot.snapshot_path(), })?; - - // If the node crashes before taking the next bank snapshot, the next startup will attempt - // to load from the same bank snapshot again. And if `shrink` has run, the account storage - // files that are hard linked in bank snapshot will be *different* than what the bank - // snapshot expects. This would cause the node to crash again. To prevent that, purge all - // the bank snapshots here. In the above scenario, this will cause the node to load from a - // snapshot archive next time, which is safe. - snapshot_utils::purge_all_bank_snapshots(&snapshot_config.bank_snapshots_dir); - bank } else { // Given that we are going to boot from an archive, the append vecs held in the snapshot dirs for fast-boot should From f36a45a971dd0a0d23adaf712daf640fe6c583b7 Mon Sep 17 00:00:00 2001 From: Alessandro Decina Date: Fri, 29 Mar 2024 06:13:01 +1100 Subject: [PATCH 094/153] ledger-tool: name rayon threads (#464) ledger-tool: name and shame rayon threads --- Cargo.lock | 1 + ledger-tool/Cargo.toml | 1 + ledger-tool/src/main.rs | 6 ++++++ 3 files changed, 8 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index a22e448815ce9b..f2c106a1ef4b20 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -153,6 +153,7 @@ dependencies = [ "itertools", "log", "num_cpus", + "rayon", "regex", "serde", "serde_json", diff --git a/ledger-tool/Cargo.toml b/ledger-tool/Cargo.toml index 88bb3d3ff83b72..7d527c91842479 100644 --- a/ledger-tool/Cargo.toml +++ b/ledger-tool/Cargo.toml @@ -21,6 +21,7 @@ histogram = { workspace = true } itertools = { workspace = true } log = { workspace = true } num_cpus = { workspace = true } +rayon = { workspace = true } regex = { workspace = true } serde = { workspace = true, features = ["derive"] } serde_json = { workspace = true } diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index 4509e975cf10a1..ab859ff0a7e689 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -1501,6 +1501,12 @@ fn main() { let verbose_level = matches.occurrences_of("verbose"); + // Name the rayon global thread pool + rayon::ThreadPoolBuilder::new() + .thread_name(|i| format!("solRayonGlob{i:02}")) + .build_global() + .unwrap(); + match matches.subcommand() { ("bigtable", Some(arg_matches)) => bigtable_process_command(&ledger_path, arg_matches), ("blockstore", Some(arg_matches)) => blockstore_process_command(&ledger_path, arg_matches), From d5c0c0b1c2b2dc77a6b4946d54bc9332ac3b7062 Mon Sep 17 00:00:00 2001 From: Brooks Date: Thu, 28 Mar 2024 16:32:17 -0400 Subject: [PATCH 095/153] Removes support for loading snapshots with > 1 append vec per slot (#474) --- accounts-db/src/accounts_db.rs | 70 ---------------- runtime/src/snapshot_utils.rs | 3 + .../snapshot_storage_rebuilder.rs | 83 ++++--------------- 3 files changed, 17 insertions(+), 139 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 7f29edec19c949..348ae90100fd1c 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -6318,49 +6318,6 @@ impl AccountsDb { self.flush_slot_cache_with_clean(slot, None::<&mut fn(&_, &_) -> bool>, None) } - /// 1.13 and some 1.14 could produce legal snapshots with more than 1 append vec per slot. - /// This is now illegal at runtime in the validator. - /// However, there is a clear path to be able to support this. - /// So, combine all accounts from 'slot_stores' into a new storage and return it. - /// This runs prior to the storages being put in AccountsDb.storage - pub fn combine_multiple_slots_into_one_at_startup( - path: &Path, - id: AccountsFileId, - slot: Slot, - slot_stores: &HashMap>, - ) -> Arc { - let size = slot_stores.values().map(|storage| storage.capacity()).sum(); - let storage = AccountStorageEntry::new(path, slot, id, size); - - // get unique accounts, most recent version by write_version - let mut accum = HashMap::>::default(); - slot_stores.iter().for_each(|(_id, store)| { - store.accounts.account_iter().for_each(|loaded_account| { - match accum.entry(*loaded_account.pubkey()) { - hash_map::Entry::Occupied(mut occupied_entry) => { - if loaded_account.write_version() > occupied_entry.get().write_version() { - occupied_entry.insert(loaded_account); - } - } - hash_map::Entry::Vacant(vacant_entry) => { - vacant_entry.insert(loaded_account); - } - } - }); - }); - - // store all unique accounts into new storage - let accounts = accum.values().collect::>(); - let to_store = (slot, &accounts[..]); - let storable = - StorableAccountsWithHashesAndWriteVersions::<'_, '_, _, _, &AccountHash>::new( - &to_store, - ); - storage.accounts.append_accounts(&storable, 0); - - Arc::new(storage) - } - /// `should_flush_f` is an optional closure that determines whether a given /// account should be flushed. Passing `None` will by default flush all /// accounts @@ -10107,33 +10064,6 @@ pub mod tests { } } - #[test] - fn test_combine_multiple_slots_into_one_at_startup() { - solana_logger::setup(); - let (db, slot1) = create_db_with_storages_and_index(false, 2, None); - let slot2 = slot1 + 1; - - let initial_accounts = get_all_accounts(&db, slot1..(slot2 + 1)); - - let tf = TempDir::new().unwrap(); - let stores = db - .storage - .all_slots() - .into_iter() - .map(|slot| { - let storage = db.storage.get_slot_storage_entry(slot).unwrap(); - (storage.append_vec_id(), storage) - }) - .collect::>(); - let new_storage = - AccountsDb::combine_multiple_slots_into_one_at_startup(tf.path(), 1000, slot1, &stores); - - compare_all_accounts( - &initial_accounts, - &get_all_accounts_from_storages(std::iter::once(&new_storage)), - ); - } - #[test] fn test_accountsdb_scan_snapshot_stores_hash_not_stored() { let accounts_db = AccountsDb::new_single_for_tests(); diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index 9da2205d0c616f..89c489945f0cb0 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -356,6 +356,9 @@ pub enum SnapshotError { #[error("failed to archive snapshot package: {0}")] ArchiveSnapshotPackage(#[from] ArchiveSnapshotPackageError), + + #[error("failed to rebuild snapshot storages: {0}")] + RebuildStorages(String), } #[derive(Error, Debug)] diff --git a/runtime/src/snapshot_utils/snapshot_storage_rebuilder.rs b/runtime/src/snapshot_utils/snapshot_storage_rebuilder.rs index a1ef80ee92325e..5c1c8237b044f4 100644 --- a/runtime/src/snapshot_utils/snapshot_storage_rebuilder.rs +++ b/runtime/src/snapshot_utils/snapshot_storage_rebuilder.rs @@ -16,15 +16,14 @@ use { regex::Regex, solana_accounts_db::{ account_storage::{AccountStorageMap, AccountStorageReference}, - accounts_db::{AccountStorageEntry, AccountsDb, AccountsFileId, AtomicAccountsFileId}, - append_vec::AppendVec, + accounts_db::{AccountStorageEntry, AccountsFileId, AtomicAccountsFileId}, }, solana_sdk::clock::Slot, std::{ collections::HashMap, fs::File, io::{BufReader, Error as IoError}, - path::{Path, PathBuf}, + path::PathBuf, str::FromStr as _, sync::{ atomic::{AtomicUsize, Ordering}, @@ -333,52 +332,19 @@ impl SnapshotStorageRebuilder { .collect::>, SnapshotError>>( )?; - let storage = if slot_stores.len() > 1 { - let remapped_append_vec_folder = lock.first().unwrap().parent().unwrap(); - let remapped_append_vec_id = Self::get_unique_append_vec_id( - &self.next_append_vec_id, - remapped_append_vec_folder, - slot, - ); - AccountsDb::combine_multiple_slots_into_one_at_startup( - remapped_append_vec_folder, - remapped_append_vec_id, - slot, - &slot_stores, - ) - } else { - slot_stores - .into_values() - .next() - .expect("at least 1 storage per slot required") - }; - - self.storage.insert( - slot, - AccountStorageReference { - id: storage.append_vec_id(), - storage, - }, - ); - Ok(()) - } - - /// increment `next_append_vec_id` until there is no file in `parent_folder` with this id and slot - /// return the id - fn get_unique_append_vec_id( - next_append_vec_id: &Arc, - parent_folder: &Path, - slot: Slot, - ) -> AccountsFileId { - loop { - let remapped_append_vec_id = next_append_vec_id.fetch_add(1, Ordering::AcqRel); - let remapped_file_name = AppendVec::file_name(slot, remapped_append_vec_id); - let remapped_append_vec_path = parent_folder.join(remapped_file_name); - if std::fs::metadata(&remapped_append_vec_path).is_err() { - // getting an err here means that there is no existing file here - return remapped_append_vec_id; - } + if slot_stores.len() != 1 { + return Err(SnapshotError::RebuildStorages(format!( + "there must be exactly one storage per slot, but slot {slot} has {} storages", + slot_stores.len() + ))); } + // SAFETY: The check above guarantees there is one item in slot_stores, + // so `.next()` will always return `Some` + let (id, storage) = slot_stores.into_iter().next().unwrap(); + + self.storage + .insert(slot, AccountStorageReference { id, storage }); + Ok(()) } /// Wait for the completion of the rebuilding threads @@ -462,27 +428,6 @@ mod tests { solana_accounts_db::append_vec::AppendVec, }; - #[test] - fn test_get_unique_append_vec_id() { - let folder = tempfile::TempDir::new().unwrap(); - let folder = folder.path(); - let next_id = Arc::default(); - let slot = 1; - let append_vec_id = - SnapshotStorageRebuilder::get_unique_append_vec_id(&next_id, folder, slot); - assert_eq!(append_vec_id, 0); - let file_name = AppendVec::file_name(slot, append_vec_id); - let append_vec_path = folder.join(file_name); - - // create a file at this path - _ = File::create(append_vec_path).unwrap(); - next_id.store(0, Ordering::Release); - let append_vec_id = - SnapshotStorageRebuilder::get_unique_append_vec_id(&next_id, folder, slot); - // should have found a conflict with 0 - assert_eq!(append_vec_id, 1); - } - #[test] fn test_get_snapshot_file_kind() { assert_eq!(None, get_snapshot_file_kind("file.txt")); From e261e2704a1a874b4df5802cbeb6d119d3dc4240 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Thu, 28 Mar 2024 17:00:48 -0500 Subject: [PATCH 096/153] store only removes from read cache if slot is possibly present (#452) * store only removes from read cache if slot is possibly present * remove_assume_not_present --- accounts-db/src/accounts_db.rs | 16 ++++++++++++---- accounts-db/src/read_only_accounts_cache.rs | 20 ++++++++++++++++++++ 2 files changed, 32 insertions(+), 4 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 348ae90100fd1c..0aabae8e93ea1b 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -6427,10 +6427,18 @@ impl AccountsDb { ) -> Vec { let mut calc_stored_meta_time = Measure::start("calc_stored_meta"); let slot = accounts.target_slot(); - (0..accounts.len()).for_each(|index| { - let pubkey = accounts.pubkey(index); - self.read_only_accounts_cache.remove(*pubkey, slot); - }); + if self + .read_only_accounts_cache + .can_slot_be_in_cache(accounts.target_slot()) + { + (0..accounts.len()).for_each(|index| { + let pubkey = accounts.pubkey(index); + // based on the patterns of how a validator writes accounts, it is almost always the case that there is no read only cache entry + // for this pubkey and slot. So, we can give that hint to the `remove` for performance. + self.read_only_accounts_cache + .remove_assume_not_present(*pubkey, slot); + }); + } calc_stored_meta_time.stop(); self.stats .calc_stored_meta diff --git a/accounts-db/src/read_only_accounts_cache.rs b/accounts-db/src/read_only_accounts_cache.rs index 27e0d848543c82..89cd5928f16786 100644 --- a/accounts-db/src/read_only_accounts_cache.rs +++ b/accounts-db/src/read_only_accounts_cache.rs @@ -72,11 +72,13 @@ pub(crate) struct ReadOnlyAccountsCache { // Performance statistics stats: ReadOnlyCacheStats, + highest_slot_stored: AtomicU64, } impl ReadOnlyAccountsCache { pub(crate) fn new(max_data_size: usize, ms_to_skip_lru_update: u32) -> Self { Self { + highest_slot_stored: AtomicU64::default(), max_data_size, cache: DashMap::default(), queue: Mutex::>::default(), @@ -134,6 +136,7 @@ impl ReadOnlyAccountsCache { } pub(crate) fn store(&self, pubkey: Pubkey, slot: Slot, account: AccountSharedData) { + self.highest_slot_stored.fetch_max(slot, Ordering::Release); let key = (pubkey, slot); let account_size = self.account_size(&account); self.data_size.fetch_add(account_size, Ordering::Relaxed); @@ -169,6 +172,23 @@ impl ReadOnlyAccountsCache { self.stats.evicts.fetch_add(num_evicts, Ordering::Relaxed); } + /// true if any pubkeys could have ever been stored into the cache at `slot` + pub(crate) fn can_slot_be_in_cache(&self, slot: Slot) -> bool { + self.highest_slot_stored.load(Ordering::Acquire) >= slot + } + + /// remove entry if it exists. + /// Assume the entry does not exist for performance. + pub(crate) fn remove_assume_not_present( + &self, + pubkey: Pubkey, + slot: Slot, + ) -> Option { + // get read lock first to see if the entry exists + _ = self.cache.get(&(pubkey, slot))?; + self.remove(pubkey, slot) + } + pub(crate) fn remove(&self, pubkey: Pubkey, slot: Slot) -> Option { let (_, entry) = self.cache.remove(&(pubkey, slot))?; // self.queue should be modified only after removing the entry from the From 212cbdbc4e276c5136ae5cd51507f86a50283e44 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Fri, 29 Mar 2024 11:41:53 +0800 Subject: [PATCH 097/153] ci: add release pipeline (#466) --- .github/workflows/release.yml | 88 +++++++++++++++++++++++++++++++++++ 1 file changed, 88 insertions(+) create mode 100644 .github/workflows/release.yml diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 00000000000000..11ac0daca37078 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,88 @@ +name: Release + +on: + push: + tags: + - "*" + +jobs: + trigger-buildkite-pipeline: + runs-on: ubuntu-latest + steps: + - name: Trigger a Buildkite Build + uses: "buildkite/trigger-pipeline-action@v2.0.0" + with: + buildkite_api_access_token: ${{ secrets.TRIGGER_BK_BUILD_TOKEN }} + pipeline: "anza/agave-secondary" + branch: "${{ github.ref_name }}" + commit: "HEAD" + message: ":github: Triggered from a GitHub Action" + + draft-release: + runs-on: ubuntu-latest + steps: + - name: Create Release + uses: actions/github-script@v7 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + github.rest.repos.createRelease({ + owner: context.repo.owner, + repo: context.repo.repo, + tag_name: '${{ github.ref_name }}', + name: 'Release ${{ github.ref_name }}', + body: '🚧', + draft: true, + prerelease: false + }) + + version-bump: + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Parse Info + id: parse_info + run: | + # get the next version + version=${{ github.ref_name }} + major=$(echo $version | cut -d'.' -f1) + minor=$(echo $version | cut -d'.' -f2) + patch=$(echo $version | cut -d'.' -f3) + next_version=$major.$minor.$((patch+1)) + : "${next_version:?}" + + # get the traget branch + target_branch=$major.$minor + : "${target_branch:?}" + + echo "next_version=$next_version" | tee -a $GITHUB_OUTPUT + echo "target_branch=$target_branch" | tee -a $GITHUB_OUTPUT + + - name: Create branch and make changes + run: | + next_version=${{ steps.parse_info.outputs.next_version }} + + git checkout -b version-bump-$next_version + ./scripts/increment-cargo-version.sh patch + + git config user.email "41898282+github-actions[bot]@users.noreply.github.com" + git config user.name "github-actions[bot]" + git commit -am "Bump version to $next_version" + git push origin version-bump-$next_version + + - name: Create PR + uses: actions/github-script@v7 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + github.rest.pulls.create({ + owner: context.repo.owner, + repo: context.repo.repo, + title: 'Bump version to ${{ steps.parse_info.outputs.next_version }}', + head: 'version-bump-${{ steps.parse_info.outputs.next_version }}', + base: '${{ steps.parse_info.outputs.target_branch }}' + }) From ae327c01565256ff5859aae73ea5f715bbd0af8f Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Fri, 29 Mar 2024 15:30:42 +0900 Subject: [PATCH 098/153] Bump blake3 from v1.5.0 to v1.5.1 (#488) --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f2c106a1ef4b20..9c4224650f1847 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -863,9 +863,9 @@ dependencies = [ [[package]] name = "blake3" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0231f06152bf547e9c2b5194f247cd97aacf6dcd8b15d8e5ec0663f64580da87" +checksum = "30cca6d3674597c30ddf2c587bf8d9d65c9a84d2326d941cc79c9842dfe0ef52" dependencies = [ "arrayref", "arrayvec", diff --git a/Cargo.toml b/Cargo.toml index 07881e624c83f2..6275414e6e33e0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -157,7 +157,7 @@ backoff = "0.4.0" base64 = "0.22.0" bincode = "1.3.3" bitflags = { version = "2.4.2", features = ["serde"] } -blake3 = "1.5.0" +blake3 = "1.5.1" block-buffer = "0.10.4" borsh = { version = "1.2.1", features = ["derive", "unstable__schema"] } bs58 = "0.4.0" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 654430127fa7f4..9a69073b2931ce 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -677,9 +677,9 @@ dependencies = [ [[package]] name = "blake3" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0231f06152bf547e9c2b5194f247cd97aacf6dcd8b15d8e5ec0663f64580da87" +checksum = "30cca6d3674597c30ddf2c587bf8d9d65c9a84d2326d941cc79c9842dfe0ef52" dependencies = [ "arrayref", "arrayvec", From d140cdbeab8075a22fbbec037429970ceb6b4cce Mon Sep 17 00:00:00 2001 From: Tyera Date: Fri, 29 Mar 2024 00:42:09 -0600 Subject: [PATCH 099/153] Update crate check to new crates.io error string (#484) --- ci/check-crates.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ci/check-crates.sh b/ci/check-crates.sh index 655504ea11d8e3..07a8ea04760872 100755 --- a/ci/check-crates.sh +++ b/ci/check-crates.sh @@ -54,7 +54,7 @@ for file in "${files[@]}"; do errors=$(echo "$response" | jq .errors) if [[ $errors != "null" ]]; then details=$(echo "$response" | jq .errors | jq -r ".[0].detail") - if [[ $details = *"Not Found"* ]]; then + if [[ $details = *"does not exist"* ]]; then ((error_count++)) echo "❌ new crate $crate_name not found on crates.io. you can either From 9076348ef40e28975796c2ff25460bf3b25d41e4 Mon Sep 17 00:00:00 2001 From: steviez Date: Fri, 29 Mar 2024 07:34:12 -0500 Subject: [PATCH 100/153] Make CostTracker aware of inflight transactions (#437) When a leader is packing a Bank, transactions costs are added to the CostTracker and then later updated or removed, depending on if the tx is committed. However, it is possible for a Bank to be frozen while there are several tx's in flight. CostUpdateService submits a metric with cost information almost immediately after a Bank has been frozen. The result is that we have observed cost details being submitted before some cost removals take place, which causes a massive over-reporting of the block cost compared to actual. This PR adds a field to track the number of transactions that are inflight, and adds a simple mechanism to try to allow that value to settle to zero before submitting the datapoint. The number of inflight tx's is submitted with the datapoint, so even if the value does not settle to zero, we can still detect this case and know the metric is tainted. Co-authored-by: Andrew Fitzgerald --- core/src/banking_stage/qos_service.rs | 7 +++++++ core/src/cost_update_service.rs | 30 ++++++++++++++++++++++++++- cost-model/src/cost_tracker.rs | 27 ++++++++++++++++++++++++ 3 files changed, 63 insertions(+), 1 deletion(-) diff --git a/core/src/banking_stage/qos_service.rs b/core/src/banking_stage/qos_service.rs index 8c1507ae3fb91c..c9e1d98d64822e 100644 --- a/core/src/banking_stage/qos_service.rs +++ b/core/src/banking_stage/qos_service.rs @@ -120,6 +120,7 @@ impl QosService { } }) .collect(); + cost_tracker.add_transactions_in_flight(num_included); cost_tracking_time.stop(); self.metrics @@ -167,17 +168,20 @@ impl QosService { bank: &Bank, ) { let mut cost_tracker = bank.write_cost_tracker().unwrap(); + let mut num_included = 0; transaction_cost_results .zip(transaction_committed_status) .for_each(|(tx_cost, transaction_committed_details)| { // Only transactions that the qos service included have to be // checked for update if let Ok(tx_cost) = tx_cost { + num_included += 1; if *transaction_committed_details == CommitTransactionDetails::NotCommitted { cost_tracker.remove(tx_cost) } } }); + cost_tracker.sub_transactions_in_flight(num_included); } fn update_committed_transaction_costs<'a>( @@ -206,13 +210,16 @@ impl QosService { bank: &Bank, ) { let mut cost_tracker = bank.write_cost_tracker().unwrap(); + let mut num_included = 0; transaction_cost_results.for_each(|tx_cost| { // Only transactions that the qos service included have to be // removed if let Ok(tx_cost) = tx_cost { + num_included += 1; cost_tracker.remove(tx_cost); } }); + cost_tracker.sub_transactions_in_flight(num_included); } // metrics are reported by bank slot diff --git a/core/src/cost_update_service.rs b/core/src/cost_update_service.rs index 6b49c8fdf1db46..58ef6c48ed7721 100644 --- a/core/src/cost_update_service.rs +++ b/core/src/cost_update_service.rs @@ -7,6 +7,7 @@ use { std::{ sync::Arc, thread::{self, Builder, JoinHandle}, + time::Duration, }, }; pub enum CostUpdate { @@ -19,6 +20,12 @@ pub struct CostUpdateService { thread_hdl: JoinHandle<()>, } +// The maximum number of retries to check if CostTracker::in_flight_transaction_count() has settled +// to zero. Bail out after this many retries; the in-flight count is reported so this is ok +const MAX_LOOP_COUNT: usize = 25; +// Throttle checking the count to avoid excessive polling +const LOOP_LIMITER: Duration = Duration::from_millis(10); + impl CostUpdateService { pub fn new(blockstore: Arc, cost_update_receiver: CostUpdateReceiver) -> Self { let thread_hdl = Builder::new() @@ -39,7 +46,28 @@ impl CostUpdateService { for cost_update in cost_update_receiver.iter() { match cost_update { CostUpdate::FrozenBank { bank } => { - bank.read_cost_tracker().unwrap().report_stats(bank.slot()); + for loop_count in 1..=MAX_LOOP_COUNT { + { + // Release the lock so that the thread that will + // update the count is able to obtain a write lock + // + // Use inner scope to avoid sleeping with the lock + let cost_tracker = bank.read_cost_tracker().unwrap(); + let in_flight_transaction_count = + cost_tracker.in_flight_transaction_count(); + + if in_flight_transaction_count == 0 || loop_count == MAX_LOOP_COUNT { + let slot = bank.slot(); + trace!( + "inflight transaction count is {in_flight_transaction_count} \ + for slot {slot} after {loop_count} iteration(s)" + ); + cost_tracker.report_stats(slot); + break; + } + } + std::thread::sleep(LOOP_LIMITER); + } } } } diff --git a/cost-model/src/cost_tracker.rs b/cost-model/src/cost_tracker.rs index b5e3f9f4932a59..64185edb6c77ca 100644 --- a/cost-model/src/cost_tracker.rs +++ b/cost-model/src/cost_tracker.rs @@ -61,6 +61,10 @@ pub struct CostTracker { transaction_signature_count: u64, secp256k1_instruction_signature_count: u64, ed25519_instruction_signature_count: u64, + /// The number of transactions that have had their estimated cost added to + /// the tracker, but are still waiting for an update with actual usage or + /// removal if the transaction does not end up getting committed. + in_flight_transaction_count: usize, } impl Default for CostTracker { @@ -83,6 +87,7 @@ impl Default for CostTracker { transaction_signature_count: 0, secp256k1_instruction_signature_count: 0, ed25519_instruction_signature_count: 0, + in_flight_transaction_count: 0, } } } @@ -100,6 +105,23 @@ impl CostTracker { self.vote_cost_limit = vote_cost_limit; } + pub fn in_flight_transaction_count(&self) -> usize { + self.in_flight_transaction_count + } + + pub fn add_transactions_in_flight(&mut self, in_flight_transaction_count: usize) { + saturating_add_assign!( + self.in_flight_transaction_count, + in_flight_transaction_count + ); + } + + pub fn sub_transactions_in_flight(&mut self, in_flight_transaction_count: usize) { + self.in_flight_transaction_count = self + .in_flight_transaction_count + .saturating_sub(in_flight_transaction_count); + } + pub fn try_add(&mut self, tx_cost: &TransactionCost) -> Result { self.would_fit(tx_cost)?; self.add_transaction_cost(tx_cost); @@ -174,6 +196,11 @@ impl CostTracker { self.ed25519_instruction_signature_count, i64 ), + ( + "inflight_transaction_count", + self.in_flight_transaction_count, + i64 + ), ); } From b1e17998b64203fe62f52efe3fe5ba87706ed446 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Fri, 29 Mar 2024 20:56:54 +0800 Subject: [PATCH 101/153] ci: fix some issues for the release pipeline (#493) * add TRIGGERED_BUILDKITE_TAG to the pipeline * publish a pre-release for the release id --- .github/workflows/release.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 11ac0daca37078..78a6d029d4c063 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -15,6 +15,7 @@ jobs: buildkite_api_access_token: ${{ secrets.TRIGGER_BK_BUILD_TOKEN }} pipeline: "anza/agave-secondary" branch: "${{ github.ref_name }}" + build_env_vars: '{"TRIGGERED_BUILDKITE_TAG": "${{ github.ref_name }}"}' commit: "HEAD" message: ":github: Triggered from a GitHub Action" @@ -32,8 +33,8 @@ jobs: tag_name: '${{ github.ref_name }}', name: 'Release ${{ github.ref_name }}', body: '🚧', - draft: true, - prerelease: false + draft: false, + prerelease: true }) version-bump: From a41210f898a21df5e8402a67e1e568e4561cfd8f Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Fri, 29 Mar 2024 10:27:41 -0700 Subject: [PATCH 102/153] Scheduler: remove allow(dead_code) markers (#481) remove allow(dead_code) markers --- core/src/banking_stage/consume_worker.rs | 1 - core/src/banking_stage/read_write_account_set.rs | 1 - .../transaction_scheduler/in_flight_tracker.rs | 1 + core/src/banking_stage/transaction_scheduler/mod.rs | 3 --- .../transaction_state_container.rs | 10 ---------- 5 files changed, 1 insertion(+), 15 deletions(-) diff --git a/core/src/banking_stage/consume_worker.rs b/core/src/banking_stage/consume_worker.rs index 92fb07ddfab18c..12bb4fc1e88ee1 100644 --- a/core/src/banking_stage/consume_worker.rs +++ b/core/src/banking_stage/consume_worker.rs @@ -36,7 +36,6 @@ pub(crate) struct ConsumeWorker { metrics: Arc, } -#[allow(dead_code)] impl ConsumeWorker { pub fn new( id: u32, diff --git a/core/src/banking_stage/read_write_account_set.rs b/core/src/banking_stage/read_write_account_set.rs index b9d65ff4756857..dad1256f6e6f22 100644 --- a/core/src/banking_stage/read_write_account_set.rs +++ b/core/src/banking_stage/read_write_account_set.rs @@ -14,7 +14,6 @@ pub struct ReadWriteAccountSet { impl ReadWriteAccountSet { /// Returns true if all account locks were available and false otherwise. - #[allow(dead_code)] pub fn check_locks(&self, message: &SanitizedMessage) -> bool { message .account_keys() diff --git a/core/src/banking_stage/transaction_scheduler/in_flight_tracker.rs b/core/src/banking_stage/transaction_scheduler/in_flight_tracker.rs index 243f14c66920a0..74f142b53b4156 100644 --- a/core/src/banking_stage/transaction_scheduler/in_flight_tracker.rs +++ b/core/src/banking_stage/transaction_scheduler/in_flight_tracker.rs @@ -34,6 +34,7 @@ impl InFlightTracker { } /// Returns the number of cus that are in flight for each thread. + #[allow(dead_code)] pub fn cus_in_flight_per_thread(&self) -> &[u64] { &self.cus_in_flight_per_thread } diff --git a/core/src/banking_stage/transaction_scheduler/mod.rs b/core/src/banking_stage/transaction_scheduler/mod.rs index 5a3ab0c06ded5d..17991b762eb104 100644 --- a/core/src/banking_stage/transaction_scheduler/mod.rs +++ b/core/src/banking_stage/transaction_scheduler/mod.rs @@ -1,5 +1,4 @@ mod batch_id_generator; -#[allow(dead_code)] mod in_flight_tracker; pub(crate) mod prio_graph_scheduler; pub(crate) mod scheduler_controller; @@ -8,7 +7,5 @@ mod scheduler_metrics; mod thread_aware_account_locks; mod transaction_id_generator; mod transaction_priority_id; -#[allow(dead_code)] mod transaction_state; -#[allow(dead_code)] mod transaction_state_container; diff --git a/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs b/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs index a627375a03e6ba..798f469f701fc8 100644 --- a/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs +++ b/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs @@ -81,16 +81,6 @@ impl TransactionStateContainer { .map(|state| state.transaction_ttl()) } - /// Take `SanitizedTransactionTTL` by id. - /// This transitions the transaction to `Pending` state. - /// Panics if the transaction does not exist. - pub(crate) fn take_transaction(&mut self, id: &TransactionId) -> SanitizedTransactionTTL { - self.id_to_transaction_state - .get_mut(id) - .expect("transaction must exist") - .transition_to_pending() - } - /// Insert a new transaction into the container's queues and maps. /// Returns `true` if a packet was dropped due to capacity limits. pub(crate) fn insert_new_transaction( From 04feed2cf52359c6f8be9156327e7bb02cc866b6 Mon Sep 17 00:00:00 2001 From: Greg Cusack Date: Fri, 29 Mar 2024 12:12:12 -0700 Subject: [PATCH 103/153] add metric for duplicate push messages (#321) * add metric for duplicate push messages * add in num_total_push * address comments. don't lock stats each time * address comments. remove num_total_push * change dup push message name in code to reflect metric name --- gossip/src/cluster_info_metrics.rs | 5 +++++ gossip/src/crds.rs | 12 ++++++++---- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/gossip/src/cluster_info_metrics.rs b/gossip/src/cluster_info_metrics.rs index 74dc0c43e9606e..eed11f8313c195 100644 --- a/gossip/src/cluster_info_metrics.rs +++ b/gossip/src/cluster_info_metrics.rs @@ -434,6 +434,11 @@ pub(crate) fn submit_gossip_stats( i64 ), ("push_message_count", stats.push_message_count.clear(), i64), + ( + "num_duplicate_push_messages", + crds_stats.num_duplicate_push_messages, + i64 + ), ( "push_fanout_num_entries", stats.push_fanout_num_entries.clear(), diff --git a/gossip/src/crds.rs b/gossip/src/crds.rs index dbb6c43c0356c0..210c7a05aaf5f0 100644 --- a/gossip/src/crds.rs +++ b/gossip/src/crds.rs @@ -118,6 +118,7 @@ pub(crate) struct CrdsStats { /// number of times a message was first received via a PullResponse /// and that message was later received via a PushMessage pub(crate) num_redundant_pull_responses: u64, + pub(crate) num_duplicate_push_messages: u64, } /// This structure stores some local metadata associated with the CrdsValue @@ -235,9 +236,10 @@ impl Crds { let label = value.label(); let pubkey = value.pubkey(); let value = VersionedCrdsValue::new(value, self.cursor, now, route); + let mut stats = self.stats.lock().unwrap(); match self.table.entry(label) { Entry::Vacant(entry) => { - self.stats.lock().unwrap().record_insert(&value, route); + stats.record_insert(&value, route); let entry_index = entry.index(); self.shards.insert(entry_index, &value); match &value.value.data { @@ -263,7 +265,7 @@ impl Crds { Ok(()) } Entry::Occupied(mut entry) if overrides(&value.value, entry.get()) => { - self.stats.lock().unwrap().record_insert(&value, route); + stats.record_insert(&value, route); let entry_index = entry.index(); self.shards.remove(entry_index, entry.get()); self.shards.insert(entry_index, &value); @@ -302,7 +304,7 @@ impl Crds { Ok(()) } Entry::Occupied(mut entry) => { - self.stats.lock().unwrap().record_fail(&value, route); + stats.record_fail(&value, route); trace!( "INSERT FAILED data: {} new.wallclock: {}", value.value.label(), @@ -316,7 +318,9 @@ impl Crds { } else if matches!(route, GossipRoute::PushMessage(_)) { let entry = entry.get_mut(); if entry.num_push_recv == Some(0) { - self.stats.lock().unwrap().num_redundant_pull_responses += 1; + stats.num_redundant_pull_responses += 1; + } else { + stats.num_duplicate_push_messages += 1; } let num_push_dups = entry.num_push_recv.unwrap_or_default(); entry.num_push_recv = Some(num_push_dups.saturating_add(1)); From 18c32aba3593a76418d96f09e8c1ea9bd70e6800 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Fri, 29 Mar 2024 15:20:38 -0500 Subject: [PATCH 104/153] disable read cache while populating stakes cache on load (#482) * dsiable read cache while populating stakes cache on load * use struct with drop as api * use LoadHint * remove disable_read_cache_updates_count * add comment * fmt --- accounts-db/src/accounts.rs | 14 ++++++++++++++ accounts-db/src/accounts_db.rs | 8 +++++--- runtime/src/bank.rs | 7 ++++++- 3 files changed, 25 insertions(+), 4 deletions(-) diff --git a/accounts-db/src/accounts.rs b/accounts-db/src/accounts.rs index 33a57d56461c78..bfacc83b69a9e1 100644 --- a/accounts-db/src/accounts.rs +++ b/accounts-db/src/accounts.rs @@ -174,6 +174,20 @@ impl Accounts { self.load_slow(ancestors, pubkey, LoadHint::FixedMaxRoot) } + /// same as `load_with_fixed_root` except: + /// if the account is not already in the read cache, it is NOT put in the read cache on successful load + pub fn load_with_fixed_root_do_not_populate_read_cache( + &self, + ancestors: &Ancestors, + pubkey: &Pubkey, + ) -> Option<(AccountSharedData, Slot)> { + self.load_slow( + ancestors, + pubkey, + LoadHint::FixedMaxRootDoNotPopulateReadCache, + ) + } + pub fn load_without_fixed_root( &self, ancestors: &Ancestors, diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 0aabae8e93ea1b..f9d40da83e9f7c 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -787,6 +787,8 @@ pub enum LoadHint { // account loading, while maintaining the determinism of account loading and resultant // transaction execution thereof. FixedMaxRoot, + /// same as `FixedMaxRoot`, except do not populate the read cache on load + FixedMaxRootDoNotPopulateReadCache, // Caller can't hint the above safety assumption. Generally RPC and miscellaneous // other call-site falls into this category. The likelihood of slower path is slightly // increased as well. @@ -5119,7 +5121,7 @@ impl AccountsDb { // so retry. This works because in accounts cache flush, an account is written to // storage *before* it is removed from the cache match load_hint { - LoadHint::FixedMaxRoot => { + LoadHint::FixedMaxRootDoNotPopulateReadCache | LoadHint::FixedMaxRoot => { // it's impossible for this to fail for transaction loads from // replaying/banking more than once. // This is because: @@ -5139,7 +5141,7 @@ impl AccountsDb { } LoadedAccountAccessor::Stored(None) => { match load_hint { - LoadHint::FixedMaxRoot => { + LoadHint::FixedMaxRootDoNotPopulateReadCache | LoadHint::FixedMaxRoot => { // When running replay on the validator, or banking stage on the leader, // it should be very rare that the storage entry doesn't exist if the // entry in the accounts index is the latest version of this account. @@ -5348,7 +5350,7 @@ impl AccountsDb { return None; } - if !is_cached { + if !is_cached && load_hint != LoadHint::FixedMaxRootDoNotPopulateReadCache { /* We show this store into the read-only cache for account 'A' and future loads of 'A' from the read-only cache are safe/reflect 'A''s latest state on this fork. diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index c8afb7406164ae..f9a785eb9b5a57 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -1823,8 +1823,13 @@ impl Bank { // from Stakes by reading the full account state from // accounts-db. Note that it is crucial that these accounts are loaded // at the right slot and match precisely with serialized Delegations. + // Note that we are disabling the read cache while we populate the stakes cache. + // The stakes accounts will not be expected to be loaded again. + // If we populate the read cache with these loads, then we'll just soon have to evict these. let stakes = Stakes::new(&fields.stakes, |pubkey| { - let (account, _slot) = bank_rc.accounts.load_with_fixed_root(&ancestors, pubkey)?; + let (account, _slot) = bank_rc + .accounts + .load_with_fixed_root_do_not_populate_read_cache(&ancestors, pubkey)?; Some(account) }) .expect( From c5b9196df7d95f5444e675119462958edd04c130 Mon Sep 17 00:00:00 2001 From: samkim-crypto Date: Sat, 30 Mar 2024 06:37:19 +0900 Subject: [PATCH 105/153] [clap-v3-utils] Use `Arg::new` in place of `Arg::with_name` (#491) --- clap-v3-utils/src/compute_budget.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/clap-v3-utils/src/compute_budget.rs b/clap-v3-utils/src/compute_budget.rs index 7b34e114b42883..b2f43f6851a8ea 100644 --- a/clap-v3-utils/src/compute_budget.rs +++ b/clap-v3-utils/src/compute_budget.rs @@ -16,7 +16,7 @@ pub const COMPUTE_UNIT_LIMIT_ARG: ArgConstant<'static> = ArgConstant { }; pub fn compute_unit_price_arg<'a>() -> Arg<'a> { - Arg::with_name(COMPUTE_UNIT_PRICE_ARG.name) + Arg::new(COMPUTE_UNIT_PRICE_ARG.name) .long(COMPUTE_UNIT_PRICE_ARG.long) .takes_value(true) .value_name("COMPUTE-UNIT-PRICE") @@ -25,7 +25,7 @@ pub fn compute_unit_price_arg<'a>() -> Arg<'a> { } pub fn compute_unit_limit_arg<'a>() -> Arg<'a> { - Arg::with_name(COMPUTE_UNIT_LIMIT_ARG.name) + Arg::new(COMPUTE_UNIT_LIMIT_ARG.name) .long(COMPUTE_UNIT_LIMIT_ARG.long) .takes_value(true) .value_name("COMPUTE-UNIT-LIMIT") From fb1ee7842f537cbd80b8705fef059fcce8bf9151 Mon Sep 17 00:00:00 2001 From: samkim-crypto Date: Sat, 30 Mar 2024 06:37:43 +0900 Subject: [PATCH 106/153] [zk-token-sdk] Allow discrete log to be executed in the current thread (#443) --- zk-token-sdk/src/encryption/discrete_log.rs | 111 ++++++++++---------- zk-token-sdk/src/encryption/elgamal.rs | 2 +- 2 files changed, 58 insertions(+), 55 deletions(-) diff --git a/zk-token-sdk/src/encryption/discrete_log.rs b/zk-token-sdk/src/encryption/discrete_log.rs index 5ffc1c206a6f68..55b9c82adcd978 100644 --- a/zk-token-sdk/src/encryption/discrete_log.rs +++ b/zk-token-sdk/src/encryption/discrete_log.rs @@ -28,7 +28,7 @@ use { }, itertools::Itertools, serde::{Deserialize, Serialize}, - std::collections::HashMap, + std::{collections::HashMap, num::NonZeroUsize}, thiserror::Error, }; @@ -57,14 +57,14 @@ pub struct DiscreteLog { /// Target point for discrete log pub target: RistrettoPoint, /// Number of threads used for discrete log computation - num_threads: usize, + num_threads: Option, /// Range bound for discrete log search derived from the max value to search for and /// `num_threads` - range_bound: usize, + range_bound: NonZeroUsize, /// Ristretto point representing each step of the discrete log search step_point: RistrettoPoint, /// Ristretto point compression batch size - compression_batch_size: usize, + compression_batch_size: NonZeroUsize, } #[derive(Serialize, Deserialize, Default)] @@ -107,24 +107,27 @@ impl DiscreteLog { Self { generator, target, - num_threads: 1, - range_bound: TWO16 as usize, + num_threads: None, + range_bound: (TWO16 as usize).try_into().unwrap(), step_point: G, - compression_batch_size: 32, + compression_batch_size: 32.try_into().unwrap(), } } /// Adjusts number of threads in a discrete log instance. #[cfg(not(target_arch = "wasm32"))] - pub fn num_threads(&mut self, num_threads: usize) -> Result<(), DiscreteLogError> { + pub fn num_threads(&mut self, num_threads: NonZeroUsize) -> Result<(), DiscreteLogError> { // number of threads must be a positive power-of-two integer - if num_threads == 0 || (num_threads & (num_threads - 1)) != 0 || num_threads > MAX_THREAD { + if !num_threads.is_power_of_two() || num_threads.get() > MAX_THREAD { return Err(DiscreteLogError::DiscreteLogThreads); } - self.num_threads = num_threads; - self.range_bound = (TWO16 as usize).checked_div(num_threads).unwrap(); - self.step_point = Scalar::from(num_threads as u64) * G; + self.num_threads = Some(num_threads); + self.range_bound = (TWO16 as usize) + .checked_div(num_threads.get()) + .and_then(|range_bound| range_bound.try_into().ok()) + .unwrap(); // `num_threads` cannot exceed `TWO16`, so `range_bound` always non-zero + self.step_point = Scalar::from(num_threads.get() as u64) * G; Ok(()) } @@ -132,9 +135,9 @@ impl DiscreteLog { /// Adjusts inversion batch size in a discrete log instance. pub fn set_compression_batch_size( &mut self, - compression_batch_size: usize, + compression_batch_size: NonZeroUsize, ) -> Result<(), DiscreteLogError> { - if compression_batch_size >= TWO16 as usize || compression_batch_size == 0 { + if compression_batch_size.get() >= TWO16 as usize { return Err(DiscreteLogError::DiscreteLogBatchSize); } self.compression_batch_size = compression_batch_size; @@ -145,41 +148,41 @@ impl DiscreteLog { /// Solves the discrete log problem under the assumption that the solution /// is a positive 32-bit number. pub fn decode_u32(self) -> Option { - #[cfg(not(target_arch = "wasm32"))] - { - let mut starting_point = self.target; - let handles = (0..self.num_threads) - .map(|i| { - let ristretto_iterator = RistrettoIterator::new( - (starting_point, i as u64), - (-(&self.step_point), self.num_threads as u64), - ); - - let handle = thread::spawn(move || { - Self::decode_range( - ristretto_iterator, - self.range_bound, - self.compression_batch_size, - ) - }); - - starting_point -= G; - handle - }) - .collect::>(); - - handles - .into_iter() - .map_while(|h| h.join().ok()) - .find(|x| x.is_some()) - .flatten() - } - #[cfg(target_arch = "wasm32")] - { - let ristretto_iterator = RistrettoIterator::new( - (self.target, 0_u64), - (-(&self.step_point), self.num_threads as u64), - ); + if let Some(num_threads) = self.num_threads { + #[cfg(not(target_arch = "wasm32"))] + { + let mut starting_point = self.target; + let handles = (0..num_threads.get()) + .map(|i| { + let ristretto_iterator = RistrettoIterator::new( + (starting_point, i as u64), + (-(&self.step_point), num_threads.get() as u64), + ); + + let handle = thread::spawn(move || { + Self::decode_range( + ristretto_iterator, + self.range_bound, + self.compression_batch_size, + ) + }); + + starting_point -= G; + handle + }) + .collect::>(); + + handles + .into_iter() + .map_while(|h| h.join().ok()) + .find(|x| x.is_some()) + .flatten() + } + #[cfg(target_arch = "wasm32")] + unreachable!() // `self.num_threads` always `None` on wasm target + } else { + let ristretto_iterator = + RistrettoIterator::new((self.target, 0_u64), (-(&self.step_point), 1u64)); Self::decode_range( ristretto_iterator, @@ -191,15 +194,15 @@ impl DiscreteLog { fn decode_range( ristretto_iterator: RistrettoIterator, - range_bound: usize, - compression_batch_size: usize, + range_bound: NonZeroUsize, + compression_batch_size: NonZeroUsize, ) -> Option { let hashmap = &DECODE_PRECOMPUTATION_FOR_G; let mut decoded = None; for batch in &ristretto_iterator - .take(range_bound) - .chunks(compression_batch_size) + .take(range_bound.get()) + .chunks(compression_batch_size.get()) { // batch compression currently errors if any point in the batch is the identity point let (batch_points, batch_indices): (Vec<_>, Vec<_>) = batch @@ -298,7 +301,7 @@ mod tests { let amount: u64 = 55; let mut instance = DiscreteLog::new(G, Scalar::from(amount) * G); - instance.num_threads(4).unwrap(); + instance.num_threads(4.try_into().unwrap()).unwrap(); // Very informal measurements for now let start_computation = Instant::now(); diff --git a/zk-token-sdk/src/encryption/elgamal.rs b/zk-token-sdk/src/encryption/elgamal.rs index e499106e1e58b2..0bc9eb0511b9ea 100644 --- a/zk-token-sdk/src/encryption/elgamal.rs +++ b/zk-token-sdk/src/encryption/elgamal.rs @@ -799,7 +799,7 @@ mod tests { let ciphertext = ElGamal::encrypt(&public, amount); let mut instance = ElGamal::decrypt(&secret, &ciphertext); - instance.num_threads(4).unwrap(); + instance.num_threads(4.try_into().unwrap()).unwrap(); assert_eq!(57_u64, instance.decode_u32().unwrap()); } From 4b0e7d6ba35dd6778e14e020984a71da0ec565ee Mon Sep 17 00:00:00 2001 From: Joe C Date: Fri, 29 Mar 2024 17:05:53 -0500 Subject: [PATCH 107/153] Test Validator: Set deployment slot to `0` for cloned upgradeable programs (#501) test-validator: clone upgradeable programs with slot 0 --- test-validator/src/lib.rs | 74 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 70 insertions(+), 4 deletions(-) diff --git a/test-validator/src/lib.rs b/test-validator/src/lib.rs index f551cb97820d06..8b32762a203d02 100644 --- a/test-validator/src/lib.rs +++ b/test-validator/src/lib.rs @@ -37,7 +37,7 @@ use { snapshot_config::SnapshotConfig, }, solana_sdk::{ - account::{Account, AccountSharedData}, + account::{Account, AccountSharedData, WritableAccount}, bpf_loader_upgradeable::UpgradeableLoaderState, clock::{Slot, DEFAULT_MS_PER_SLOT}, commitment_config::CommitmentConfig, @@ -305,14 +305,16 @@ impl TestValidatorGenesis { self } - pub fn clone_accounts( + fn clone_accounts_and_transform( &mut self, addresses: T, rpc_client: &RpcClient, skip_missing: bool, + transform: F, ) -> Result<&mut Self, String> where T: IntoIterator, + F: Fn(&Pubkey, Account) -> Result, { let addresses: Vec = addresses.into_iter().collect(); for chunk in addresses.chunks(MAX_MULTIPLE_ACCOUNTS) { @@ -322,7 +324,7 @@ impl TestValidatorGenesis { .map_err(|err| format!("Failed to fetch: {err}"))?; for (address, res) in chunk.iter().zip(responses) { if let Some(account) = res { - self.add_account(*address, AccountSharedData::from(account)); + self.add_account(*address, transform(address, account)?); } else if skip_missing { warn!("Could not find {}, skipping.", address); } else { @@ -333,6 +335,70 @@ impl TestValidatorGenesis { Ok(self) } + pub fn clone_accounts( + &mut self, + addresses: T, + rpc_client: &RpcClient, + skip_missing: bool, + ) -> Result<&mut Self, String> + where + T: IntoIterator, + { + self.clone_accounts_and_transform( + addresses, + rpc_client, + skip_missing, + |_address, account| Ok(AccountSharedData::from(account)), + ) + } + + pub fn clone_programdata_accounts( + &mut self, + addresses: T, + rpc_client: &RpcClient, + skip_missing: bool, + ) -> Result<&mut Self, String> + where + T: IntoIterator, + { + self.clone_accounts_and_transform( + addresses, + rpc_client, + skip_missing, + |address, account| { + let programdata_offset = UpgradeableLoaderState::size_of_programdata_metadata(); + // Ensure the account is a proper programdata account before + // attempting to serialize into it. + if let Ok(UpgradeableLoaderState::ProgramData { + upgrade_authority_address, + .. + }) = bincode::deserialize(&account.data[..programdata_offset]) + { + // Serialize new programdata metadata into the resulting account, + // to overwrite the deployment slot to `0`. + let mut programdata_account = AccountSharedData::from(account); + bincode::serialize_into( + programdata_account.data_as_mut_slice(), + &UpgradeableLoaderState::ProgramData { + slot: 0, + upgrade_authority_address, + }, + ) + .map(|()| Ok(programdata_account)) + .unwrap_or_else(|_| { + Err(format!( + "Failed to write to upgradeable programdata account {address}", + )) + }) + } else { + Err(format!( + "Failed to read upgradeable programdata account {address}", + )) + } + }, + ) + } + pub fn clone_upgradeable_programs( &mut self, addresses: T, @@ -360,7 +426,7 @@ impl TestValidatorGenesis { } } - self.clone_accounts(programdata_addresses, rpc_client, false)?; + self.clone_programdata_accounts(programdata_addresses, rpc_client, false)?; Ok(self) } From e56d314df52cecc00bd8d30b56ca75adef159985 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Fri, 29 Mar 2024 17:10:02 -0500 Subject: [PATCH 108/153] determine if account is in write cache earlier (#504) --- accounts-db/src/accounts_db.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index f9d40da83e9f7c..54a8082af3dccc 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -5343,14 +5343,14 @@ impl AccountsDb { max_root, load_hint, )?; + let in_write_cache = matches!(account_accessor, LoadedAccountAccessor::Cached(_)); let loaded_account = account_accessor.check_and_get_loaded_account(); - let is_cached = loaded_account.is_cached(); let account = loaded_account.take_account(); if matches!(load_zero_lamports, LoadZeroLamports::None) && account.is_zero_lamport() { return None; } - if !is_cached && load_hint != LoadHint::FixedMaxRootDoNotPopulateReadCache { + if !in_write_cache && load_hint != LoadHint::FixedMaxRootDoNotPopulateReadCache { /* We show this store into the read-only cache for account 'A' and future loads of 'A' from the read-only cache are safe/reflect 'A''s latest state on this fork. From 725518ebad53a1c748003084e74fdd239fb14df2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 Apr 2024 01:49:40 +0800 Subject: [PATCH 109/153] build(deps): bump strum from 0.24.0 to 0.24.1 (#38) * build(deps): bump strum from 0.24.0 to 0.24.1 Bumps [strum](https://github.com/Peternator7/strum) from 0.24.0 to 0.24.1. - [Release notes](https://github.com/Peternator7/strum/releases) - [Changelog](https://github.com/Peternator7/strum/blob/master/CHANGELOG.md) - [Commits](https://github.com/Peternator7/strum/commits) --- updated-dependencies: - dependency-name: strum dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 4 ++-- programs/sbf/Cargo.lock | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9c4224650f1847..19f162869055a2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7982,9 +7982,9 @@ checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] name = "strum" -version = "0.24.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e96acfc1b70604b8b2f1ffa4c57e59176c7dbb05d556c71ecd2f5498a1dee7f8" +checksum = "063e6045c0e62079840579a7e47a355ae92f60eb74daaf156fb1e84ba164e63f" dependencies = [ "strum_macros", ] diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 9a69073b2931ce..7de331f0ca8435 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -6895,9 +6895,9 @@ checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] name = "strum" -version = "0.24.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e96acfc1b70604b8b2f1ffa4c57e59176c7dbb05d556c71ecd2f5498a1dee7f8" +checksum = "063e6045c0e62079840579a7e47a355ae92f60eb74daaf156fb1e84ba164e63f" dependencies = [ "strum_macros", ] From 43e70dbe7942be0387fa29995d9beb2abcccde16 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 Apr 2024 01:54:49 +0800 Subject: [PATCH 110/153] build(deps): bump js-sys from 0.3.68 to 0.3.69 (#263) * build(deps): bump js-sys from 0.3.68 to 0.3.69 Bumps [js-sys](https://github.com/rustwasm/wasm-bindgen) from 0.3.68 to 0.3.69. - [Release notes](https://github.com/rustwasm/wasm-bindgen/releases) - [Changelog](https://github.com/rustwasm/wasm-bindgen/blob/main/CHANGELOG.md) - [Commits](https://github.com/rustwasm/wasm-bindgen/commits) --- updated-dependencies: - dependency-name: js-sys dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 24 ++++++++++++------------ Cargo.toml | 2 +- programs/sbf/Cargo.lock | 24 ++++++++++++------------ 3 files changed, 25 insertions(+), 25 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 19f162869055a2..2dbdd7b21b72c6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2960,9 +2960,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.68" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "406cda4b368d531c842222cf9d2600a9a4acce8d29423695379c6868a143a9ee" +checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" dependencies = [ "wasm-bindgen", ] @@ -8931,9 +8931,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1e124130aee3fb58c5bdd6b639a0509486b0338acaaae0c84a5124b0f588b7f" +checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" dependencies = [ "cfg-if 1.0.0", "wasm-bindgen-macro", @@ -8941,9 +8941,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9e7e1900c352b609c8488ad12639a311045f40a35491fb69ba8c12f758af70b" +checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" dependencies = [ "bumpalo", "log", @@ -8968,9 +8968,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b30af9e2d358182b5c7449424f017eba305ed32a7010509ede96cdc4696c46ed" +checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -8978,9 +8978,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "642f325be6301eb8107a83d12a8ac6c1e1c54345a7ef1a9261962dfefda09e66" +checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", @@ -8991,9 +8991,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f186bd2dcf04330886ce82d6f33dd75a7bfcf69ecf5763b89fcde53b6ac9838" +checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" [[package]] name = "web-sys" diff --git a/Cargo.toml b/Cargo.toml index 6275414e6e33e0..0f743e41825c64 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -228,7 +228,7 @@ itertools = "0.10.5" jemallocator = { package = "tikv-jemallocator", version = "0.4.1", features = [ "unprefixed_malloc_on_supported_platforms", ] } -js-sys = "0.3.68" +js-sys = "0.3.69" json5 = "0.4.1" jsonrpc-core = "18.0.0" jsonrpc-core-client = "18.0.0" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 7de331f0ca8435..8e7315d17631d5 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -2425,9 +2425,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.68" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "406cda4b368d531c842222cf9d2600a9a4acce8d29423695379c6868a143a9ee" +checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" dependencies = [ "wasm-bindgen", ] @@ -7767,9 +7767,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1e124130aee3fb58c5bdd6b639a0509486b0338acaaae0c84a5124b0f588b7f" +checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" dependencies = [ "cfg-if 1.0.0", "wasm-bindgen-macro", @@ -7777,9 +7777,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9e7e1900c352b609c8488ad12639a311045f40a35491fb69ba8c12f758af70b" +checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" dependencies = [ "bumpalo", "log", @@ -7804,9 +7804,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b30af9e2d358182b5c7449424f017eba305ed32a7010509ede96cdc4696c46ed" +checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -7814,9 +7814,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "642f325be6301eb8107a83d12a8ac6c1e1c54345a7ef1a9261962dfefda09e66" +checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", @@ -7827,9 +7827,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f186bd2dcf04330886ce82d6f33dd75a7bfcf69ecf5763b89fcde53b6ac9838" +checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" [[package]] name = "web-sys" From bfd69b069f8ba67ca2bbafdeec9a7d8a5173d07b Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Mon, 1 Apr 2024 10:41:00 +0800 Subject: [PATCH 111/153] chore: bump to actions/checkout@v4 (#495) --- .github/workflows/changelog-label.yml | 2 +- .github/workflows/client-targets.yml | 4 ++-- .github/workflows/crate-check.yml | 2 +- .github/workflows/docs.yml | 4 ++-- .github/workflows/downstream-project-anchor.yml | 2 +- .github/workflows/downstream-project-spl.yml | 6 +++--- .github/workflows/release-artifacts.yml | 2 +- 7 files changed, 11 insertions(+), 11 deletions(-) diff --git a/.github/workflows/changelog-label.yml b/.github/workflows/changelog-label.yml index c63f7821c260dd..ffd8ec21033ef8 100644 --- a/.github/workflows/changelog-label.yml +++ b/.github/workflows/changelog-label.yml @@ -10,7 +10,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: fetch-depth: 0 - name: Check if changes to CHANGELOG.md diff --git a/.github/workflows/client-targets.yml b/.github/workflows/client-targets.yml index 97118918ef8442..848d10f85089e2 100644 --- a/.github/workflows/client-targets.yml +++ b/.github/workflows/client-targets.yml @@ -31,7 +31,7 @@ jobs: - armv7-linux-androideabi runs-on: ${{ matrix.os }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - run: cargo install cargo-ndk@2.12.2 @@ -55,7 +55,7 @@ jobs: - x86_64-apple-darwin runs-on: ${{ matrix.os }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Setup Rust run: | diff --git a/.github/workflows/crate-check.yml b/.github/workflows/crate-check.yml index a47e7cde5fb217..1eba1d48994d2c 100644 --- a/.github/workflows/crate-check.yml +++ b/.github/workflows/crate-check.yml @@ -15,7 +15,7 @@ jobs: check: runs-on: ubuntu-20.04 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: fetch-depth: 0 diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index fb2096bd33b185..17cc728e68bd17 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -19,7 +19,7 @@ jobs: runs-on: ubuntu-20.04 steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 @@ -76,7 +76,7 @@ jobs: runs-on: ubuntu-20.04 steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup Node uses: actions/setup-node@v3 diff --git a/.github/workflows/downstream-project-anchor.yml b/.github/workflows/downstream-project-anchor.yml index 487d8361ea38d5..2b171293068887 100644 --- a/.github/workflows/downstream-project-anchor.yml +++ b/.github/workflows/downstream-project-anchor.yml @@ -44,7 +44,7 @@ jobs: matrix: version: ["v0.29.0"] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - shell: bash run: | diff --git a/.github/workflows/downstream-project-spl.yml b/.github/workflows/downstream-project-spl.yml index 690a312b7994e3..45e16cbfcf0d07 100644 --- a/.github/workflows/downstream-project-spl.yml +++ b/.github/workflows/downstream-project-spl.yml @@ -40,7 +40,7 @@ jobs: if: false runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - shell: bash run: | @@ -89,7 +89,7 @@ jobs: }, ] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - shell: bash run: | @@ -140,7 +140,7 @@ jobs: - [single-pool/program] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - shell: bash run: | diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index c840862a5e28f3..de32cee71dfc97 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -18,7 +18,7 @@ jobs: channel: ${{ steps.build.outputs.channel }} steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: ref: master fetch-depth: 0 From 1cee392637cbf838a493e925675cc8cc2e7cb672 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Mon, 1 Apr 2024 10:41:21 +0800 Subject: [PATCH 112/153] ci: remove increment-cargo-version-on-release.yml (#494) --- .../increment-cargo-version-on-release.yml | 39 ------------------- 1 file changed, 39 deletions(-) delete mode 100644 .github/workflows/increment-cargo-version-on-release.yml diff --git a/.github/workflows/increment-cargo-version-on-release.yml b/.github/workflows/increment-cargo-version-on-release.yml deleted file mode 100644 index 5592d76ca52dd1..00000000000000 --- a/.github/workflows/increment-cargo-version-on-release.yml +++ /dev/null @@ -1,39 +0,0 @@ -name: increment-cargo-version - -on: - release: - types: [published] - -jobs: - check_compilation: - name: Increment cargo version - runs-on: ubuntu-latest - steps: - - name: Checkout Repository - uses: actions/checkout@v3 - - # This script confirms two assumptions: - # 1) Tag should be branch. - # 2) Tag should match the crate version numbers in the manifest files (which get incremented by the next step) - - name: Confirm tag, branch, and cargo version numbers - run: scripts/confirm-cargo-version-numbers-before-bump.sh ${{ github.event.release.target_commitish }} ${{ github.event.release.tag_name }} - - - name: Update Patch Version Numbers - run: | - OUTPUT=$(scripts/increment-cargo-version.sh patch) - SOLANA_NEW_VERSION=$(sed -E 's/.* -> //' <<< $OUTPUT) - echo "SOLANA_NEW_VERSION=$SOLANA_NEW_VERSION" - echo "SOLANA_NEW_VERSION=$SOLANA_NEW_VERSION" >> $GITHUB_ENV - - - name: Cargo Tree - run: ./scripts/cargo-for-all-lock-files.sh tree - - - name: Create Pull Request - uses: peter-evans/create-pull-request@v4 - with: - commit-message: Bump Version to ${{ env.SOLANA_NEW_VERSION }} - title: Bump Version to ${{ env.SOLANA_NEW_VERSION }} - body: PR opened by Github Action - branch: update-version-${{ env.SOLANA_NEW_VERSION }} - base: ${{ github.event.release.target_commitish }} - labels: automerge From 3c8da0dbea1bc79272c4156bc210d2ccc0b46b11 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 Apr 2024 18:53:10 +0800 Subject: [PATCH 113/153] build(deps): bump serde_json from 1.0.114 to 1.0.115 (#517) * build(deps): bump serde_json from 1.0.114 to 1.0.115 Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.114 to 1.0.115. - [Release notes](https://github.com/serde-rs/json/releases) - [Commits](https://github.com/serde-rs/json/compare/v1.0.114...v1.0.115) --- updated-dependencies: - dependency-name: serde_json dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2dbdd7b21b72c6..afb8ac3b16d29c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4984,9 +4984,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.114" +version = "1.0.115" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5f09b1bd632ef549eaa9f60a1f8de742bdbc698e6cee2095fc84dde5f549ae0" +checksum = "12dc5c46daa8e9fdf4f5e71b6cf9a53f2487da0e86e55808e2d35539666497dd" dependencies = [ "itoa", "ryu", diff --git a/Cargo.toml b/Cargo.toml index 0f743e41825c64..348dfc2847f591 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -294,7 +294,7 @@ seqlock = "0.2.0" serde = "1.0.197" serde_bytes = "0.11.14" serde_derive = "1.0.103" -serde_json = "1.0.114" +serde_json = "1.0.115" serde_with = { version = "2.3.3", default-features = false } serde_yaml = "0.9.32" serial_test = "2.0.0" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 8e7315d17631d5..414dd6b0adcae4 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4323,9 +4323,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.114" +version = "1.0.115" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5f09b1bd632ef549eaa9f60a1f8de742bdbc698e6cee2095fc84dde5f549ae0" +checksum = "12dc5c46daa8e9fdf4f5e71b6cf9a53f2487da0e86e55808e2d35539666497dd" dependencies = [ "itoa", "ryu", From e0e659680a9ebcbb97bbb57859ce6590a292bf5d Mon Sep 17 00:00:00 2001 From: sakridge Date: Mon, 1 Apr 2024 14:31:04 +0200 Subject: [PATCH 114/153] Remove duplicated token ids and use a shared inline-spl crate (#456) --- Cargo.lock | 15 ++++++ Cargo.toml | 2 + accounts-cluster-bench/Cargo.toml | 3 +- accounts-cluster-bench/src/main.rs | 15 +++--- accounts-db/Cargo.toml | 1 + accounts-db/src/accounts_db.rs | 8 +-- accounts-db/src/accounts_index.rs | 49 +++++++++++-------- accounts-db/src/lib.rs | 2 - inline-spl/Cargo.toml | 26 ++++++++++ .../src/associated_token_account.rs | 2 +- inline-spl/src/lib.rs | 3 ++ .../src/token.rs | 2 +- .../src/token_2022.rs | 6 +-- program-test/Cargo.toml | 1 + program-test/src/programs.rs | 16 ++---- program-test/tests/compute_units.rs | 3 +- program-test/tests/realloc.rs | 2 +- program-test/tests/spl.rs | 4 +- programs/sbf/Cargo.lock | 13 +++++ rpc/Cargo.toml | 1 + rpc/src/rpc.rs | 41 ++++++++-------- rpc/src/rpc_subscription_tracker.rs | 7 ++- runtime/Cargo.toml | 1 + runtime/src/bank/tests.rs | 13 ++--- runtime/src/genesis_utils.rs | 10 ++-- runtime/src/lib.rs | 1 - runtime/src/static_ids.rs | 13 +++-- validator/Cargo.toml | 1 + validator/src/admin_rpc_service.rs | 17 ++++--- 29 files changed, 165 insertions(+), 113 deletions(-) create mode 100644 inline-spl/Cargo.toml rename runtime/src/inline_spl_associated_token_account.rs => inline-spl/src/associated_token_account.rs (89%) create mode 100644 inline-spl/src/lib.rs rename accounts-db/src/inline_spl_token.rs => inline-spl/src/token.rs (97%) rename accounts-db/src/inline_spl_token_2022.rs => inline-spl/src/token_2022.rs (72%) diff --git a/Cargo.lock b/Cargo.lock index afb8ac3b16d29c..576352d8de1e5c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -227,6 +227,7 @@ dependencies = [ "solana-genesis-utils", "solana-geyser-plugin-manager", "solana-gossip", + "solana-inline-spl", "solana-ledger", "solana-logger", "solana-metrics", @@ -5365,6 +5366,7 @@ dependencies = [ "solana-core", "solana-faucet", "solana-gossip", + "solana-inline-spl", "solana-local-cluster", "solana-logger", "solana-measure", @@ -5418,6 +5420,7 @@ dependencies = [ "solana-bucket-map", "solana-frozen-abi", "solana-frozen-abi-macro", + "solana-inline-spl", "solana-logger", "solana-measure", "solana-metrics", @@ -6293,6 +6296,15 @@ dependencies = [ "thiserror", ] +[[package]] +name = "solana-inline-spl" +version = "2.0.0" +dependencies = [ + "bytemuck", + "rustc_version 0.4.0", + "solana-sdk", +] + [[package]] name = "solana-keygen" version = "2.0.0" @@ -6725,6 +6737,7 @@ dependencies = [ "solana-banks-interface", "solana-banks-server", "solana-bpf-loader-program", + "solana-inline-spl", "solana-logger", "solana-program-runtime", "solana-runtime", @@ -6846,6 +6859,7 @@ dependencies = [ "solana-entry", "solana-faucet", "solana-gossip", + "solana-inline-spl", "solana-ledger", "solana-measure", "solana-metrics", @@ -7024,6 +7038,7 @@ dependencies = [ "solana-cost-model", "solana-frozen-abi", "solana-frozen-abi-macro", + "solana-inline-spl", "solana-loader-v4-program", "solana-logger", "solana-measure", diff --git a/Cargo.toml b/Cargo.toml index 348dfc2847f591..d9144de2ff59ce 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -40,6 +40,7 @@ members = [ "geyser-plugin-interface", "geyser-plugin-manager", "gossip", + "inline-spl", "install", "keygen", "ledger", @@ -338,6 +339,7 @@ solana-genesis-utils = { path = "genesis-utils", version = "=2.0.0" } agave-geyser-plugin-interface = { path = "geyser-plugin-interface", version = "=2.0.0" } solana-geyser-plugin-manager = { path = "geyser-plugin-manager", version = "=2.0.0" } solana-gossip = { path = "gossip", version = "=2.0.0" } +solana-inline-spl = { path = "inline-spl", version = "=2.0.0" } solana-ledger = { path = "ledger", version = "=2.0.0" } solana-loader-v4-program = { path = "programs/loader-v4", version = "=2.0.0" } solana-local-cluster = { path = "local-cluster", version = "=2.0.0" } diff --git a/accounts-cluster-bench/Cargo.toml b/accounts-cluster-bench/Cargo.toml index 54a455753831fd..3d8c8c721ca375 100644 --- a/accounts-cluster-bench/Cargo.toml +++ b/accounts-cluster-bench/Cargo.toml @@ -14,12 +14,12 @@ log = { workspace = true } rand = { workspace = true } rayon = { workspace = true } solana-account-decoder = { workspace = true } -solana-accounts-db = { workspace = true } solana-clap-utils = { workspace = true } solana-cli-config = { workspace = true } solana-client = { workspace = true } solana-faucet = { workspace = true } solana-gossip = { workspace = true } +solana-inline-spl = { workspace = true } solana-logger = { workspace = true } solana-measure = { workspace = true } solana-net-utils = { workspace = true } @@ -32,6 +32,7 @@ solana-version = { workspace = true } spl-token = { workspace = true, features = ["no-entrypoint"] } [dev-dependencies] +solana-accounts-db = { workspace = true } solana-core = { workspace = true } solana-local-cluster = { workspace = true } solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } diff --git a/accounts-cluster-bench/src/main.rs b/accounts-cluster-bench/src/main.rs index 1a945090b39777..9e84456011e023 100644 --- a/accounts-cluster-bench/src/main.rs +++ b/accounts-cluster-bench/src/main.rs @@ -4,13 +4,13 @@ use { log::*, rand::{thread_rng, Rng}, rayon::prelude::*, - solana_accounts_db::inline_spl_token, solana_clap_utils::{ hidden_unless_forced, input_parsers::pubkey_of, input_validators::is_url_or_moniker, }, solana_cli_config::{ConfigInput, CONFIG_FILE}, solana_client::{rpc_request::TokenAccountsFilter, transaction_executor::TransactionExecutor}, solana_gossip::gossip_service::discover, + solana_inline_spl::token, solana_measure::measure::Measure, solana_rpc_client::rpc_client::RpcClient, solana_sdk::{ @@ -143,7 +143,7 @@ fn make_create_message( let instructions: Vec<_> = (0..num_instructions) .flat_map(|_| { let program_id = if mint.is_some() { - inline_spl_token::id() + token::id() } else { system_program::id() }; @@ -190,7 +190,7 @@ fn make_close_message( let instructions: Vec<_> = (0..num_instructions) .filter_map(|_| { let program_id = if spl_token { - inline_spl_token::id() + token::id() } else { system_program::id() }; @@ -465,7 +465,7 @@ fn make_rpc_bench_threads( num_rpc_bench_threads: usize, ) -> Vec> { let program_id = if mint.is_some() { - inline_spl_token::id() + token::id() } else { system_program::id() }; @@ -1055,10 +1055,7 @@ fn main() { pub mod test { use { super::*, - solana_accounts_db::{ - accounts_index::{AccountIndex, AccountSecondaryIndexes}, - inline_spl_token, - }, + solana_accounts_db::accounts_index::{AccountIndex, AccountSecondaryIndexes}, solana_core::validator::ValidatorConfig, solana_faucet::faucet::run_local_faucet, solana_local_cluster::{ @@ -1230,7 +1227,7 @@ pub mod test { &spl_mint_keypair.pubkey(), spl_mint_rent, spl_mint_len as u64, - &inline_spl_token::id(), + &token::id(), ), spl_token::instruction::initialize_mint( &spl_token::id(), diff --git a/accounts-db/Cargo.toml b/accounts-db/Cargo.toml index ff38118806c3c9..1a3abb6c04ea88 100644 --- a/accounts-db/Cargo.toml +++ b/accounts-db/Cargo.toml @@ -37,6 +37,7 @@ smallvec = { workspace = true, features = ["const_generics"] } solana-bucket-map = { workspace = true } solana-frozen-abi = { workspace = true } solana-frozen-abi-macro = { workspace = true } +solana-inline-spl = { workspace = true } solana-measure = { workspace = true } solana-metrics = { workspace = true } solana-nohash-hasher = { workspace = true } diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 54a8082af3dccc..4819838b4c2fa1 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -9477,7 +9477,6 @@ pub mod tests { ancient_append_vecs, append_vec::{test_utils::TempFile, AppendVecStoredAccountMeta}, cache_hash_data::CacheHashDataFile, - inline_spl_token, }, assert_matches::assert_matches, itertools::Itertools, @@ -11597,14 +11596,15 @@ pub mod tests { // Set up account to be added to secondary index let mint_key = Pubkey::new_unique(); - let mut account_data_with_mint = vec![0; inline_spl_token::Account::get_packed_len()]; + let mut account_data_with_mint = + vec![0; solana_inline_spl::token::Account::get_packed_len()]; account_data_with_mint[..PUBKEY_BYTES].clone_from_slice(&(mint_key.to_bytes())); let mut normal_account = AccountSharedData::new(1, 0, AccountSharedData::default().owner()); - normal_account.set_owner(inline_spl_token::id()); + normal_account.set_owner(solana_inline_spl::token::id()); normal_account.set_data(account_data_with_mint.clone()); let mut zero_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner()); - zero_account.set_owner(inline_spl_token::id()); + zero_account.set_owner(solana_inline_spl::token::id()); zero_account.set_data(account_data_with_mint); //store an account diff --git a/accounts-db/src/accounts_index.rs b/accounts-db/src/accounts_index.rs index 04426251f79c2e..a0eac93c995b25 100644 --- a/accounts-db/src/accounts_index.rs +++ b/accounts-db/src/accounts_index.rs @@ -6,8 +6,6 @@ use { ancestors::Ancestors, bucket_map_holder::{Age, AtomicAge, BucketMapHolder}, contains::Contains, - inline_spl_token::{self, GenericTokenAccount}, - inline_spl_token_2022, pubkey_bins::PubkeyBinCalculator24, rolling_bit_field::RollingBitField, secondary_index::*, @@ -1468,7 +1466,7 @@ impl + Into> AccountsIndex { max_root } - fn update_spl_token_secondary_indexes( + fn update_spl_token_secondary_indexes( &self, token_id: &Pubkey, pubkey: &Pubkey, @@ -1559,15 +1557,15 @@ impl + Into> AccountsIndex { // (as persisted tombstone for snapshots). This will then ultimately be // filtered out by post-scan filters, like in `get_filtered_spl_token_accounts_by_owner()`. - self.update_spl_token_secondary_indexes::( - &inline_spl_token::id(), + self.update_spl_token_secondary_indexes::( + &solana_inline_spl::token::id(), pubkey, account_owner, account_data, account_indexes, ); - self.update_spl_token_secondary_indexes::( - &inline_spl_token_2022::id(), + self.update_spl_token_secondary_indexes::( + &solana_inline_spl::token_2022::id(), pubkey, account_owner, account_data, @@ -2016,7 +2014,7 @@ impl + Into> AccountsIndex { pub mod tests { use { super::*, - crate::inline_spl_token::*, + solana_inline_spl::token::SPL_TOKEN_ACCOUNT_OWNER_OFFSET, solana_sdk::{ account::{AccountSharedData, WritableAccount}, pubkey::PUBKEY_BYTES, @@ -2024,6 +2022,11 @@ pub mod tests { std::ops::RangeInclusive, }; + const SPL_TOKENS: &[Pubkey] = &[ + solana_inline_spl::token::id(), + solana_inline_spl::token_2022::id(), + ]; + pub enum SecondaryIndexTypes<'a> { RwLock(&'a SecondaryIndex), DashMap(&'a SecondaryIndex), @@ -3362,6 +3365,10 @@ pub mod tests { ); } + fn make_empty_token_account_data() -> Vec { + vec![0; solana_inline_spl::token::Account::get_packed_len()] + } + fn run_test_purge_exact_secondary_index< SecondaryIndexEntryType: SecondaryIndexEntry + Default + Sync + Send, >( @@ -3376,7 +3383,7 @@ pub mod tests { let index_key = Pubkey::new_unique(); let account_key = Pubkey::new_unique(); - let mut account_data = vec![0; inline_spl_token::Account::get_packed_len()]; + let mut account_data = make_empty_token_account_data(); account_data[key_start..key_end].clone_from_slice(&(index_key.to_bytes())); // Insert slots into secondary index @@ -3389,7 +3396,7 @@ pub mod tests { &AccountSharedData::create( 0, account_data.to_vec(), - inline_spl_token::id(), + solana_inline_spl::token::id(), false, 0, ), @@ -3558,7 +3565,7 @@ pub mod tests { let mut secondary_indexes = secondary_indexes.clone(); let account_key = Pubkey::new_unique(); let index_key = Pubkey::new_unique(); - let mut account_data = vec![0; inline_spl_token::Account::get_packed_len()]; + let mut account_data = make_empty_token_account_data(); account_data[key_start..key_end].clone_from_slice(&(index_key.to_bytes())); // Wrong program id @@ -3650,9 +3657,9 @@ pub mod tests { fn test_dashmap_secondary_index() { let (key_start, key_end, secondary_indexes) = create_dashmap_secondary_index_state(); let index = AccountsIndex::::default_for_tests(); - for token_id in [inline_spl_token::id(), inline_spl_token_2022::id()] { + for token_id in SPL_TOKENS { run_test_spl_token_secondary_indexes( - &token_id, + token_id, &index, &index.spl_token_mint_index, key_start, @@ -3666,9 +3673,9 @@ pub mod tests { fn test_rwlock_secondary_index() { let (key_start, key_end, secondary_indexes) = create_rwlock_secondary_index_state(); let index = AccountsIndex::::default_for_tests(); - for token_id in [inline_spl_token::id(), inline_spl_token_2022::id()] { + for token_id in SPL_TOKENS { run_test_spl_token_secondary_indexes( - &token_id, + token_id, &index, &index.spl_token_owner_index, key_start, @@ -3692,10 +3699,10 @@ pub mod tests { let secondary_key1 = Pubkey::new_unique(); let secondary_key2 = Pubkey::new_unique(); let slot = 1; - let mut account_data1 = vec![0; inline_spl_token::Account::get_packed_len()]; + let mut account_data1 = make_empty_token_account_data(); account_data1[index_key_start..index_key_end] .clone_from_slice(&(secondary_key1.to_bytes())); - let mut account_data2 = vec![0; inline_spl_token::Account::get_packed_len()]; + let mut account_data2 = make_empty_token_account_data(); account_data2[index_key_start..index_key_end] .clone_from_slice(&(secondary_key2.to_bytes())); @@ -3771,9 +3778,9 @@ pub mod tests { fn test_dashmap_secondary_index_same_slot_and_forks() { let (key_start, key_end, account_index) = create_dashmap_secondary_index_state(); let index = AccountsIndex::::default_for_tests(); - for token_id in [inline_spl_token::id(), inline_spl_token_2022::id()] { + for token_id in SPL_TOKENS { run_test_secondary_indexes_same_slot_and_forks( - &token_id, + token_id, &index, &index.spl_token_mint_index, key_start, @@ -3787,9 +3794,9 @@ pub mod tests { fn test_rwlock_secondary_index_same_slot_and_forks() { let (key_start, key_end, account_index) = create_rwlock_secondary_index_state(); let index = AccountsIndex::::default_for_tests(); - for token_id in [inline_spl_token::id(), inline_spl_token_2022::id()] { + for token_id in SPL_TOKENS { run_test_secondary_indexes_same_slot_and_forks( - &token_id, + token_id, &index, &index.spl_token_owner_index, key_start, diff --git a/accounts-db/src/lib.rs b/accounts-db/src/lib.rs index 7883f852d1e3f2..6a50eb1b0ec3fb 100644 --- a/accounts-db/src/lib.rs +++ b/accounts-db/src/lib.rs @@ -27,8 +27,6 @@ pub mod cache_hash_data_stats; pub mod contains; pub mod epoch_accounts_hash; pub mod hardened_unpack; -pub mod inline_spl_token; -pub mod inline_spl_token_2022; pub mod partitioned_rewards; mod pubkey_bins; mod read_only_accounts_cache; diff --git a/inline-spl/Cargo.toml b/inline-spl/Cargo.toml new file mode 100644 index 00000000000000..e3ca8d06357981 --- /dev/null +++ b/inline-spl/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "solana-inline-spl" +description = "Limited types and ids from the Solana Program Library" +documentation = "https://docs.rs/solana-inline-spl" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] +bytemuck = { workspace = true } +solana-sdk = { workspace = true } + +[lib] +crate-type = ["lib"] +name = "solana_inline_spl" + +[dev-dependencies] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[build-dependencies] +rustc_version = { workspace = true } diff --git a/runtime/src/inline_spl_associated_token_account.rs b/inline-spl/src/associated_token_account.rs similarity index 89% rename from runtime/src/inline_spl_associated_token_account.rs rename to inline-spl/src/associated_token_account.rs index 6052d9a700f140..9057bee3840ee2 100644 --- a/runtime/src/inline_spl_associated_token_account.rs +++ b/inline-spl/src/associated_token_account.rs @@ -1,6 +1,6 @@ // Partial SPL Associated Token Account declarations inlined to avoid an external dependency on the spl-associated-token-account crate solana_sdk::declare_id!("ATokenGPvbdGVxr1b2hvZbsiqW5xWH25efTNsLJA8knL"); -pub(crate) mod program_v1_1_0 { +pub mod program_v1_1_0 { solana_sdk::declare_id!("NatA1Zyo48dJ7yuwR7cGURwhskKA8ywUyxb9GvG7mTC"); } diff --git a/inline-spl/src/lib.rs b/inline-spl/src/lib.rs new file mode 100644 index 00000000000000..4cceddeaf36179 --- /dev/null +++ b/inline-spl/src/lib.rs @@ -0,0 +1,3 @@ +pub mod associated_token_account; +pub mod token; +pub mod token_2022; diff --git a/accounts-db/src/inline_spl_token.rs b/inline-spl/src/token.rs similarity index 97% rename from accounts-db/src/inline_spl_token.rs rename to inline-spl/src/token.rs index 0e936c5f403e6d..a15822932c8f7c 100644 --- a/accounts-db/src/inline_spl_token.rs +++ b/inline-spl/src/token.rs @@ -38,7 +38,7 @@ pub trait GenericTokenAccount { // Call after account length has already been verified fn unpack_pubkey_unchecked(account_data: &[u8], offset: usize) -> &Pubkey { - bytemuck::from_bytes(&account_data[offset..offset + PUBKEY_BYTES]) + bytemuck::from_bytes(&account_data[offset..offset.wrapping_add(PUBKEY_BYTES)]) } fn unpack_account_owner(account_data: &[u8]) -> Option<&Pubkey> { diff --git a/accounts-db/src/inline_spl_token_2022.rs b/inline-spl/src/token_2022.rs similarity index 72% rename from accounts-db/src/inline_spl_token_2022.rs rename to inline-spl/src/token_2022.rs index a16dccc8934175..1787fa817645f9 100644 --- a/accounts-db/src/inline_spl_token_2022.rs +++ b/inline-spl/src/token_2022.rs @@ -1,5 +1,5 @@ /// Partial SPL Token declarations inlined to avoid an external dependency on the spl-token-2022 crate -use crate::inline_spl_token::{self, GenericTokenAccount}; +use crate::token::{self, GenericTokenAccount}; solana_sdk::declare_id!("TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb"); @@ -9,10 +9,10 @@ pub const ACCOUNTTYPE_ACCOUNT: u8 = 2; pub struct Account; impl GenericTokenAccount for Account { fn valid_account_data(account_data: &[u8]) -> bool { - inline_spl_token::Account::valid_account_data(account_data) + token::Account::valid_account_data(account_data) || ACCOUNTTYPE_ACCOUNT == *account_data - .get(inline_spl_token::Account::get_packed_len()) + .get(token::Account::get_packed_len()) .unwrap_or(&0) } } diff --git a/program-test/Cargo.toml b/program-test/Cargo.toml index b8b4fcdb332a09..1456d0dcca8018 100644 --- a/program-test/Cargo.toml +++ b/program-test/Cargo.toml @@ -22,6 +22,7 @@ solana-banks-client = { workspace = true } solana-banks-interface = { workspace = true } solana-banks-server = { workspace = true } solana-bpf-loader-program = { workspace = true } +solana-inline-spl = { workspace = true } solana-logger = { workspace = true } solana-program-runtime = { workspace = true } solana-runtime = { workspace = true } diff --git a/program-test/src/programs.rs b/program-test/src/programs.rs index 8d9a42790f7af2..3046a713a35e76 100644 --- a/program-test/src/programs.rs +++ b/program-test/src/programs.rs @@ -5,30 +5,20 @@ use solana_sdk::{ rent::Rent, }; -mod spl_token { - solana_sdk::declare_id!("TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA"); -} -mod spl_token_2022 { - solana_sdk::declare_id!("TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb"); -} mod spl_memo_1_0 { solana_sdk::declare_id!("Memo1UhkJRfHyvLMcVucJwxXeuD728EqVDDwQDxFMNo"); } mod spl_memo_3_0 { solana_sdk::declare_id!("MemoSq4gqABAXKb96qnH8TysNcWxMyWCqXgDLGmfcHr"); } -mod spl_associated_token_account { - solana_sdk::declare_id!("ATokenGPvbdGVxr1b2hvZbsiqW5xWH25efTNsLJA8knL"); -} - static SPL_PROGRAMS: &[(Pubkey, Pubkey, &[u8])] = &[ ( - spl_token::ID, + solana_inline_spl::token::ID, solana_sdk::bpf_loader::ID, include_bytes!("programs/spl_token-3.5.0.so"), ), ( - spl_token_2022::ID, + solana_inline_spl::token_2022::ID, solana_sdk::bpf_loader_upgradeable::ID, include_bytes!("programs/spl_token_2022-1.0.0.so"), ), @@ -43,7 +33,7 @@ static SPL_PROGRAMS: &[(Pubkey, Pubkey, &[u8])] = &[ include_bytes!("programs/spl_memo-3.0.0.so"), ), ( - spl_associated_token_account::ID, + solana_inline_spl::associated_token_account::ID, solana_sdk::bpf_loader::ID, include_bytes!("programs/spl_associated_token_account-1.1.1.so"), ), diff --git a/program-test/tests/compute_units.rs b/program-test/tests/compute_units.rs index 750f7eecfc6b99..bcbdd0b252f76b 100644 --- a/program-test/tests/compute_units.rs +++ b/program-test/tests/compute_units.rs @@ -2,7 +2,6 @@ use { solana_program_test::ProgramTest, solana_sdk::{ instruction::{AccountMeta, Instruction}, - pubkey::Pubkey, signature::{Keypair, Signer}, system_instruction, sysvar::rent, @@ -26,7 +25,7 @@ async fn max_compute_units() { // Invalid compute unit maximums are only triggered by BPF programs, so send // a valid instruction into a BPF program to make sure the issue doesn't // manifest. - let token_2022_id = Pubkey::try_from("TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb").unwrap(); + let token_2022_id = solana_inline_spl::token_2022::id(); let mint = Keypair::new(); let rent = context.banks_client.get_rent().await.unwrap(); let space = 82; diff --git a/program-test/tests/realloc.rs b/program-test/tests/realloc.rs index 00e2d768f71acd..14a93439e826f1 100644 --- a/program-test/tests/realloc.rs +++ b/program-test/tests/realloc.rs @@ -52,7 +52,7 @@ async fn realloc_smaller_in_cpi() { ); let mut context = program_test.start_with_context().await; - let token_2022_id = Pubkey::try_from("TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb").unwrap(); + let token_2022_id = solana_inline_spl::token_2022::id(); let mint = Keypair::new(); let account = Keypair::new(); let rent = context.banks_client.get_rent().await.unwrap(); diff --git a/program-test/tests/spl.rs b/program-test/tests/spl.rs index a97cdc51bd2656..fc6deff7ca61c4 100644 --- a/program-test/tests/spl.rs +++ b/program-test/tests/spl.rs @@ -16,7 +16,7 @@ use { async fn programs_present() { let (mut banks_client, _, _) = ProgramTest::default().start().await; let rent = banks_client.get_rent().await.unwrap(); - let token_2022_id = Pubkey::try_from("TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb").unwrap(); + let token_2022_id = solana_inline_spl::token_2022::id(); let (token_2022_programdata_id, _) = Pubkey::find_program_address(&[token_2022_id.as_ref()], &bpf_loader_upgradeable::id()); @@ -34,7 +34,7 @@ async fn programs_present() { async fn token_2022() { let (mut banks_client, payer, recent_blockhash) = ProgramTest::default().start().await; - let token_2022_id = Pubkey::try_from("TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb").unwrap(); + let token_2022_id = solana_inline_spl::token_2022::id(); let mint = Keypair::new(); let rent = banks_client.get_rent().await.unwrap(); let space = 82; diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 414dd6b0adcae4..6ce4b5570cfa47 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4657,6 +4657,7 @@ dependencies = [ "solana-bucket-map", "solana-frozen-abi", "solana-frozen-abi-macro", + "solana-inline-spl", "solana-measure", "solana-metrics", "solana-nohash-hasher", @@ -5182,6 +5183,15 @@ dependencies = [ "thiserror", ] +[[package]] +name = "solana-inline-spl" +version = "2.0.0" +dependencies = [ + "bytemuck", + "rustc_version", + "solana-sdk", +] + [[package]] name = "solana-ledger" version = "2.0.0" @@ -5462,6 +5472,7 @@ dependencies = [ "solana-banks-interface", "solana-banks-server", "solana-bpf-loader-program", + "solana-inline-spl", "solana-logger", "solana-program-runtime", "solana-runtime", @@ -5575,6 +5586,7 @@ dependencies = [ "solana-entry", "solana-faucet", "solana-gossip", + "solana-inline-spl", "solana-ledger", "solana-measure", "solana-metrics", @@ -5707,6 +5719,7 @@ dependencies = [ "solana-cost-model", "solana-frozen-abi", "solana-frozen-abi-macro", + "solana-inline-spl", "solana-loader-v4-program", "solana-measure", "solana-metrics", diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index d4f2648b6b1078..2a0c5c480da1b0 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -35,6 +35,7 @@ solana-client = { workspace = true } solana-entry = { workspace = true } solana-faucet = { workspace = true } solana-gossip = { workspace = true } +solana-inline-spl = { workspace = true } solana-ledger = { workspace = true } solana-measure = { workspace = true } solana-metrics = { workspace = true } diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index f0856aeae4b1a2..4c8bacfa953c7c 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -16,13 +16,15 @@ use { solana_accounts_db::{ accounts::AccountAddressFilter, accounts_index::{AccountIndex, AccountSecondaryIndexes, IndexKey, ScanConfig}, - inline_spl_token::{SPL_TOKEN_ACCOUNT_MINT_OFFSET, SPL_TOKEN_ACCOUNT_OWNER_OFFSET}, - inline_spl_token_2022::{self, ACCOUNTTYPE_ACCOUNT}, }, solana_client::connection_cache::{ConnectionCache, Protocol}, solana_entry::entry::Entry, solana_faucet::faucet::request_airdrop_transaction, solana_gossip::{cluster_info::ClusterInfo, contact_info::ContactInfo}, + solana_inline_spl::{ + token::{SPL_TOKEN_ACCOUNT_MINT_OFFSET, SPL_TOKEN_ACCOUNT_OWNER_OFFSET}, + token_2022::{self, ACCOUNTTYPE_ACCOUNT}, + }, solana_ledger::{ blockstore::{Blockstore, SignatureInfosForAddress}, blockstore_db::BlockstoreError, @@ -2388,7 +2390,7 @@ fn get_spl_token_owner_filter(program_id: &Pubkey, filters: &[RpcFilterType]) -> offset, bytes: MemcmpEncodedBytes::Bytes(bytes), .. - }) if *offset == account_packed_len && *program_id == inline_spl_token_2022::id() => { + }) if *offset == account_packed_len && *program_id == token_2022::id() => { memcmp_filter = Some(bytes) } #[allow(deprecated)] @@ -2446,7 +2448,7 @@ fn get_spl_token_mint_filter(program_id: &Pubkey, filters: &[RpcFilterType]) -> offset, bytes: MemcmpEncodedBytes::Bytes(bytes), .. - }) if *offset == account_packed_len && *program_id == inline_spl_token_2022::id() => { + }) if *offset == account_packed_len && *program_id == token_2022::id() => { memcmp_filter = Some(bytes) } #[allow(deprecated)] @@ -4720,7 +4722,6 @@ pub mod tests { jsonrpc_core::{futures, ErrorCode, MetaIoHandler, Output, Response, Value}, jsonrpc_core_client::transports::local, serde::de::DeserializeOwned, - solana_accounts_db::{inline_spl_token, inline_spl_token_2022}, solana_entry::entry::next_versioned_entry, solana_gossip::socketaddr, solana_ledger::{ @@ -7891,7 +7892,7 @@ pub mod tests { let token_account_pubkey = solana_sdk::pubkey::new_rand(); let token_with_different_mint_pubkey = solana_sdk::pubkey::new_rand(); let new_mint = SplTokenPubkey::new_from_array([5; 32]); - if program_id == inline_spl_token_2022::id() { + if program_id == solana_inline_spl::token_2022::id() { // Add the token account let account_base = TokenAccount { mint, @@ -8148,7 +8149,7 @@ pub mod tests { .expect("actual response deserialization"); let accounts: Vec = serde_json::from_value(result["result"].clone()).unwrap(); - if program_id == inline_spl_token::id() { + if program_id == solana_inline_spl::token::id() { // native mint is included for token-v3 assert_eq!(accounts.len(), 4); } else { @@ -8386,7 +8387,7 @@ pub mod tests { let delegate = SplTokenPubkey::new_from_array([4; 32]); let token_account_pubkey = solana_sdk::pubkey::new_rand(); let (program_name, account_size, mint_size) = if program_id - == inline_spl_token_2022::id() + == solana_inline_spl::token_2022::id() { let account_base = TokenAccount { mint, @@ -8539,7 +8540,7 @@ pub mod tests { } } }); - if program_id == inline_spl_token_2022::id() { + if program_id == solana_inline_spl::token_2022::id() { expected_value["parsed"]["info"]["extensions"] = json!([ { "extension": "immutableOwner" @@ -8575,7 +8576,7 @@ pub mod tests { } } }); - if program_id == inline_spl_token_2022::id() { + if program_id == solana_inline_spl::token_2022::id() { expected_value["parsed"]["info"]["extensions"] = json!([ { "extension": "mintCloseAuthority", @@ -8595,7 +8596,7 @@ pub mod tests { let owner = Pubkey::new_unique(); assert_eq!( get_spl_token_owner_filter( - &Pubkey::from_str("TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA").unwrap(), + &spl_token::id(), &[ RpcFilterType::Memcmp(Memcmp::new_raw_bytes(32, owner.to_bytes().to_vec())), RpcFilterType::DataSize(165) @@ -8608,7 +8609,7 @@ pub mod tests { // Filtering on token-2022 account type assert_eq!( get_spl_token_owner_filter( - &Pubkey::from_str("TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb").unwrap(), + &token_2022::id(), &[ RpcFilterType::Memcmp(Memcmp::new_raw_bytes(32, owner.to_bytes().to_vec())), RpcFilterType::Memcmp(Memcmp::new_raw_bytes(165, vec![ACCOUNTTYPE_ACCOUNT])), @@ -8621,7 +8622,7 @@ pub mod tests { // Filtering on token account state assert_eq!( get_spl_token_owner_filter( - &Pubkey::from_str("TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb").unwrap(), + &token_2022::id(), &[ RpcFilterType::Memcmp(Memcmp::new_raw_bytes(32, owner.to_bytes().to_vec())), RpcFilterType::TokenAccountState, @@ -8633,7 +8634,7 @@ pub mod tests { // Can't filter on account type for token-v3 assert!(get_spl_token_owner_filter( - &Pubkey::from_str("TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA").unwrap(), + &solana_inline_spl::token::id(), &[ RpcFilterType::Memcmp(Memcmp::new_raw_bytes(32, owner.to_bytes().to_vec())), RpcFilterType::Memcmp(Memcmp::new_raw_bytes(165, vec![ACCOUNTTYPE_ACCOUNT])), @@ -8643,7 +8644,7 @@ pub mod tests { // Filtering on mint instead of owner assert!(get_spl_token_owner_filter( - &Pubkey::from_str("TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA").unwrap(), + &solana_inline_spl::token::id(), &[ RpcFilterType::Memcmp(Memcmp::new_raw_bytes(0, owner.to_bytes().to_vec())), RpcFilterType::DataSize(165) @@ -8676,7 +8677,7 @@ pub mod tests { let mint = Pubkey::new_unique(); assert_eq!( get_spl_token_mint_filter( - &Pubkey::from_str("TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA").unwrap(), + &solana_inline_spl::token::id(), &[ RpcFilterType::Memcmp(Memcmp::new_raw_bytes(0, mint.to_bytes().to_vec())), RpcFilterType::DataSize(165) @@ -8689,7 +8690,7 @@ pub mod tests { // Filtering on token-2022 account type assert_eq!( get_spl_token_mint_filter( - &Pubkey::from_str("TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb").unwrap(), + &solana_inline_spl::token_2022::id(), &[ RpcFilterType::Memcmp(Memcmp::new_raw_bytes(0, mint.to_bytes().to_vec())), RpcFilterType::Memcmp(Memcmp::new_raw_bytes(165, vec![ACCOUNTTYPE_ACCOUNT])), @@ -8702,7 +8703,7 @@ pub mod tests { // Filtering on token account state assert_eq!( get_spl_token_mint_filter( - &Pubkey::from_str("TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA").unwrap(), + &solana_inline_spl::token::id(), &[ RpcFilterType::Memcmp(Memcmp::new_raw_bytes(0, mint.to_bytes().to_vec())), RpcFilterType::TokenAccountState, @@ -8714,7 +8715,7 @@ pub mod tests { // Can't filter on account type for token-v3 assert!(get_spl_token_mint_filter( - &Pubkey::from_str("TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA").unwrap(), + &solana_inline_spl::token::id(), &[ RpcFilterType::Memcmp(Memcmp::new_raw_bytes(0, mint.to_bytes().to_vec())), RpcFilterType::Memcmp(Memcmp::new_raw_bytes(165, vec![ACCOUNTTYPE_ACCOUNT])), @@ -8724,7 +8725,7 @@ pub mod tests { // Filtering on owner instead of mint assert!(get_spl_token_mint_filter( - &Pubkey::from_str("TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA").unwrap(), + &solana_inline_spl::token::id(), &[ RpcFilterType::Memcmp(Memcmp::new_raw_bytes(32, mint.to_bytes().to_vec())), RpcFilterType::DataSize(165) diff --git a/rpc/src/rpc_subscription_tracker.rs b/rpc/src/rpc_subscription_tracker.rs index 97ceb576636465..0d6c7a0c1020e2 100644 --- a/rpc/src/rpc_subscription_tracker.rs +++ b/rpc/src/rpc_subscription_tracker.rs @@ -595,7 +595,6 @@ mod tests { crate::rpc_pubsub_service::PubSubConfig, solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo}, solana_runtime::bank::Bank, - std::str::FromStr, }; struct ControlWrapper { @@ -715,7 +714,7 @@ mod tests { assert_eq!(*info.last_notified_slot.read().unwrap(), 0); let account_params = SubscriptionParams::Account(AccountSubscriptionParams { - pubkey: Pubkey::from_str("TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA").unwrap(), + pubkey: solana_inline_spl::token::id(), commitment: CommitmentConfig::finalized(), encoding: UiAccountEncoding::Base64Zstd, data_slice: None, @@ -755,7 +754,7 @@ mod tests { assert_eq!(counts(&tracker), (0, 0, 0, 0)); let account_params = SubscriptionParams::Account(AccountSubscriptionParams { - pubkey: Pubkey::from_str("TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA").unwrap(), + pubkey: solana_inline_spl::token::id(), commitment: CommitmentConfig::finalized(), encoding: UiAccountEncoding::Base64Zstd, data_slice: None, @@ -766,7 +765,7 @@ mod tests { assert_eq!(counts(&tracker), (0, 0, 0, 0)); let account_params2 = SubscriptionParams::Account(AccountSubscriptionParams { - pubkey: Pubkey::from_str("TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA").unwrap(), + pubkey: solana_inline_spl::token::id(), commitment: CommitmentConfig::confirmed(), encoding: UiAccountEncoding::Base64Zstd, data_slice: None, diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index d4e554a5a8fbe6..f5ec09fa678856 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -55,6 +55,7 @@ solana-config-program = { workspace = true } solana-cost-model = { workspace = true } solana-frozen-abi = { workspace = true } solana-frozen-abi-macro = { workspace = true } +solana-inline-spl = { workspace = true } solana-loader-v4-program = { workspace = true } solana-measure = { workspace = true } solana-metrics = { workspace = true } diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 0fdfb968149bbd..dc3a3121558ea3 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -33,9 +33,9 @@ use { }, accounts_partition::{self, PartitionIndex, RentPayingAccountsByPartition}, ancestors::Ancestors, - inline_spl_token, partitioned_rewards::TestPartitionedEpochRewards, }, + solana_inline_spl::token, solana_logger, solana_program_runtime::{ compute_budget::ComputeBudget, @@ -7099,15 +7099,10 @@ fn test_reconfigure_token2_native_mint() { let genesis_config = create_genesis_config_with_leader(5, &solana_sdk::pubkey::new_rand(), 0).genesis_config; let bank = Arc::new(Bank::new_for_tests(&genesis_config)); - assert_eq!( - bank.get_balance(&inline_spl_token::native_mint::id()), - 1000000000 - ); - let native_mint_account = bank - .get_account(&inline_spl_token::native_mint::id()) - .unwrap(); + assert_eq!(bank.get_balance(&token::native_mint::id()), 1000000000); + let native_mint_account = bank.get_account(&token::native_mint::id()).unwrap(); assert_eq!(native_mint_account.data().len(), 82); - assert_eq!(native_mint_account.owner(), &inline_spl_token::id()); + assert_eq!(native_mint_account.owner(), &token::id()); } #[test] diff --git a/runtime/src/genesis_utils.rs b/runtime/src/genesis_utils.rs index 81cbf2c19813b8..379750b1743381 100644 --- a/runtime/src/genesis_utils.rs +++ b/runtime/src/genesis_utils.rs @@ -1,5 +1,4 @@ use { - solana_accounts_db::inline_spl_token, solana_sdk::{ account::{Account, AccountSharedData}, feature::{self, Feature}, @@ -256,13 +255,16 @@ pub fn create_genesis_config_with_leader_ex( initial_accounts.push((*validator_stake_account_pubkey, validator_stake_account)); let native_mint_account = solana_sdk::account::AccountSharedData::from(Account { - owner: inline_spl_token::id(), - data: inline_spl_token::native_mint::ACCOUNT_DATA.to_vec(), + owner: solana_inline_spl::token::id(), + data: solana_inline_spl::token::native_mint::ACCOUNT_DATA.to_vec(), lamports: sol_to_lamports(1.), executable: false, rent_epoch: 1, }); - initial_accounts.push((inline_spl_token::native_mint::id(), native_mint_account)); + initial_accounts.push(( + solana_inline_spl::token::native_mint::id(), + native_mint_account, + )); let mut genesis_config = GenesisConfig { accounts: initial_accounts diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 57936c2c7e6bac..12eab54a41cf0e 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -14,7 +14,6 @@ pub mod compute_budget_details; mod epoch_rewards_hasher; pub mod epoch_stakes; pub mod genesis_utils; -pub mod inline_spl_associated_token_account; pub mod installed_scheduler_pool; pub mod loader_utils; pub mod non_circulating_supply; diff --git a/runtime/src/static_ids.rs b/runtime/src/static_ids.rs index 4cedf3b847d3a8..b7a8a5b5e2bf28 100644 --- a/runtime/src/static_ids.rs +++ b/runtime/src/static_ids.rs @@ -1,16 +1,15 @@ use { - crate::inline_spl_associated_token_account, - solana_accounts_db::{inline_spl_token, inline_spl_token_2022}, + solana_inline_spl::{associated_token_account, token, token_2022}, solana_sdk::pubkey::Pubkey, }; lazy_static! { /// Vector of static token & mint IDs pub static ref STATIC_IDS: Vec = vec![ - inline_spl_associated_token_account::id(), - inline_spl_associated_token_account::program_v1_1_0::id(), - inline_spl_token::id(), - inline_spl_token::native_mint::id(), - inline_spl_token_2022::id(), + associated_token_account::id(), + associated_token_account::program_v1_1_0::id(), + token::id(), + token::native_mint::id(), + token_2022::id(), ]; } diff --git a/validator/Cargo.toml b/validator/Cargo.toml index 0a6324f454e2b2..6222435906a31d 100644 --- a/validator/Cargo.toml +++ b/validator/Cargo.toml @@ -70,6 +70,7 @@ tokio = { workspace = true } [dev-dependencies] solana-account-decoder = { workspace = true } +solana-inline-spl = { workspace = true } solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } spl-token-2022 = { workspace = true, features = ["no-entrypoint"] } diff --git a/validator/src/admin_rpc_service.rs b/validator/src/admin_rpc_service.rs index b6d65e3ec4a4df..99ef4b53a0b94d 100644 --- a/validator/src/admin_rpc_service.rs +++ b/validator/src/admin_rpc_service.rs @@ -860,9 +860,10 @@ mod tests { use { super::*, serde_json::Value, - solana_accounts_db::{accounts_index::AccountSecondaryIndexes, inline_spl_token}, + solana_accounts_db::accounts_index::AccountSecondaryIndexes, solana_core::consensus::tower_storage::NullTowerStorage, solana_gossip::cluster_info::ClusterInfo, + solana_inline_spl::token, solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo}, solana_rpc::rpc::create_validator_exit, solana_runtime::{ @@ -1020,7 +1021,7 @@ mod tests { // Count SPL Token Program Default Accounts let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getSecondaryIndexKeySize","params":["{}"]}}"#, - inline_spl_token::id(), + token::id(), ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) @@ -1075,7 +1076,7 @@ mod tests { let token_account1 = AccountSharedData::from(Account { lamports: 111, data: account1_data.to_vec(), - owner: inline_spl_token::id(), + owner: token::id(), ..Account::default() }); bank.store_account(&token_account1_pubkey, &token_account1); @@ -1093,7 +1094,7 @@ mod tests { let mint_account1 = AccountSharedData::from(Account { lamports: 222, data: mint1_data.to_vec(), - owner: inline_spl_token::id(), + owner: token::id(), ..Account::default() }); bank.store_account(&mint1_pubkey, &mint_account1); @@ -1114,7 +1115,7 @@ mod tests { let token_account2 = AccountSharedData::from(Account { lamports: 333, data: account2_data.to_vec(), - owner: inline_spl_token::id(), + owner: token::id(), ..Account::default() }); bank.store_account(&token_account2_pubkey, &token_account2); @@ -1135,7 +1136,7 @@ mod tests { let token_account3 = AccountSharedData::from(Account { lamports: 444, data: account3_data.to_vec(), - owner: inline_spl_token::id(), + owner: token::id(), ..Account::default() }); bank.store_account(&token_account3_pubkey, &token_account3); @@ -1153,7 +1154,7 @@ mod tests { let mint_account2 = AccountSharedData::from(Account { lamports: 555, data: mint2_data.to_vec(), - owner: inline_spl_token::id(), + owner: token::id(), ..Account::default() }); bank.store_account(&mint2_pubkey, &mint_account2); @@ -1233,7 +1234,7 @@ mod tests { // 5) SPL Token Program Owns 6 Accounts - 1 Default, 5 created above. let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getSecondaryIndexKeySize","params":["{}"]}}"#, - inline_spl_token::id(), + token::id(), ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) From 92b09e67914ad60b8f97dd58f4174bdd9b55080e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 Apr 2024 21:03:07 +0800 Subject: [PATCH 115/153] build(deps): bump anyhow from 1.0.80 to 1.0.81 (#516) * build(deps): bump anyhow from 1.0.80 to 1.0.81 Bumps [anyhow](https://github.com/dtolnay/anyhow) from 1.0.80 to 1.0.81. - [Release notes](https://github.com/dtolnay/anyhow/releases) - [Commits](https://github.com/dtolnay/anyhow/compare/1.0.80...1.0.81) --- updated-dependencies: - dependency-name: anyhow dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 576352d8de1e5c..7841faa2d87d14 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -370,9 +370,9 @@ checksum = "3a30da5c5f2d5e72842e00bcb57657162cdabef0931f40e2deb9b4140440cecd" [[package]] name = "anyhow" -version = "1.0.80" +version = "1.0.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ad32ce52e4161730f7098c077cd2ed6229b5804ccf99e5366be1ab72a98b4e1" +checksum = "0952808a6c2afd1aa8947271f3a60f1a6763c7b912d210184c5149b5cf147247" [[package]] name = "aquamarine" diff --git a/Cargo.toml b/Cargo.toml index d9144de2ff59ce..afc700ef015e7f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -140,7 +140,7 @@ Inflector = "0.11.4" aquamarine = "0.3.3" aes-gcm-siv = "0.10.3" ahash = "0.8.10" -anyhow = "1.0.80" +anyhow = "1.0.81" arbitrary = "1.3.2" ark-bn254 = "0.4.0" ark-ec = "0.4.0" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 6ce4b5570cfa47..3a24fed647a1a4 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -221,9 +221,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.80" +version = "1.0.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ad32ce52e4161730f7098c077cd2ed6229b5804ccf99e5366be1ab72a98b4e1" +checksum = "0952808a6c2afd1aa8947271f3a60f1a6763c7b912d210184c5149b5cf147247" [[package]] name = "aquamarine" From 288d5ba121179f66404a41aac833bdbd44bf337a Mon Sep 17 00:00:00 2001 From: Brooks Date: Mon, 1 Apr 2024 09:51:59 -0400 Subject: [PATCH 116/153] Moves where modules are included in accounts_db.rs (#515) --- accounts-db/src/accounts_db.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 4819838b4c2fa1..1b4a7662be0b60 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -18,6 +18,8 @@ //! tracks the number of commits to the entire data store. So the latest //! commit for each slot entry would be indexed. +mod geyser_plugin_utils; + #[cfg(feature = "dev-context-only-utils")] use qualifier_attr::qualifiers; use { @@ -804,8 +806,6 @@ pub enum LoadedAccountAccessor<'a> { Cached(Option>), } -mod geyser_plugin_utils; - impl<'a> LoadedAccountAccessor<'a> { fn check_and_get_loaded_account(&mut self) -> LoadedAccount { // all of these following .expect() and .unwrap() are like serious logic errors, From 2d82a947e347c6788f40d38e3e3c0f47885ac578 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Mon, 1 Apr 2024 08:18:46 -0700 Subject: [PATCH 117/153] Scheduler: Add leader bank detection metrics (#405) --- .../scheduler_controller.rs | 10 ++- .../scheduler_metrics.rs | 65 +++++++++++++++++++ 2 files changed, 73 insertions(+), 2 deletions(-) diff --git a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs index 0b10f613e64cd6..21c1548f373653 100644 --- a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs +++ b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs @@ -5,7 +5,9 @@ use { super::{ prio_graph_scheduler::PrioGraphScheduler, scheduler_error::SchedulerError, - scheduler_metrics::{SchedulerCountMetrics, SchedulerTimingMetrics}, + scheduler_metrics::{ + SchedulerCountMetrics, SchedulerLeaderDetectionMetrics, SchedulerTimingMetrics, + }, transaction_id_generator::TransactionIdGenerator, transaction_state::SanitizedTransactionTTL, transaction_state_container::TransactionStateContainer, @@ -54,6 +56,8 @@ pub(crate) struct SchedulerController { container: TransactionStateContainer, /// State for scheduling and communicating with worker threads. scheduler: PrioGraphScheduler, + /// Metrics tracking time for leader bank detection. + leader_detection_metrics: SchedulerLeaderDetectionMetrics, /// Metrics tracking counts on transactions in different states /// over an interval and during a leader slot. count_metrics: SchedulerCountMetrics, @@ -79,6 +83,7 @@ impl SchedulerController { transaction_id_generator: TransactionIdGenerator::default(), container: TransactionStateContainer::with_capacity(TOTAL_BUFFERED_PACKETS), scheduler, + leader_detection_metrics: SchedulerLeaderDetectionMetrics::default(), count_metrics: SchedulerCountMetrics::default(), timing_metrics: SchedulerTimingMetrics::default(), worker_metrics, @@ -102,8 +107,9 @@ impl SchedulerController { self.timing_metrics.update(|timing_metrics| { saturating_add_assign!(timing_metrics.decision_time_us, decision_time_us); }); - let new_leader_slot = decision.bank_start().map(|b| b.working_bank.slot()); + self.leader_detection_metrics + .update_and_maybe_report(decision.bank_start()); self.count_metrics .maybe_report_and_reset_slot(new_leader_slot); self.timing_metrics diff --git a/core/src/banking_stage/transaction_scheduler/scheduler_metrics.rs b/core/src/banking_stage/transaction_scheduler/scheduler_metrics.rs index 9ad7195c3d3b52..1f1650248740fe 100644 --- a/core/src/banking_stage/transaction_scheduler/scheduler_metrics.rs +++ b/core/src/banking_stage/transaction_scheduler/scheduler_metrics.rs @@ -1,6 +1,8 @@ use { itertools::MinMaxResult, + solana_poh::poh_recorder::BankStart, solana_sdk::{clock::Slot, timing::AtomicInterval}, + std::time::Instant, }; #[derive(Default)] @@ -332,3 +334,66 @@ impl SchedulerTimingMetricsInner { self.receive_completed_time_us = 0; } } + +#[derive(Default)] +pub struct SchedulerLeaderDetectionMetrics { + inner: Option, +} + +struct SchedulerLeaderDetectionMetricsInner { + slot: Slot, + bank_creation_time: Instant, + bank_detected_time: Instant, +} + +impl SchedulerLeaderDetectionMetrics { + pub fn update_and_maybe_report(&mut self, bank_start: Option<&BankStart>) { + match (&self.inner, bank_start) { + (None, Some(bank_start)) => self.initialize_inner(bank_start), + (Some(_inner), None) => self.report_and_reset(), + (Some(inner), Some(bank_start)) if inner.slot != bank_start.working_bank.slot() => { + self.report_and_reset(); + self.initialize_inner(bank_start); + } + _ => {} + } + } + + fn initialize_inner(&mut self, bank_start: &BankStart) { + let bank_detected_time = Instant::now(); + self.inner = Some(SchedulerLeaderDetectionMetricsInner { + slot: bank_start.working_bank.slot(), + bank_creation_time: *bank_start.bank_creation_time, + bank_detected_time, + }); + } + + fn report_and_reset(&mut self) { + let SchedulerLeaderDetectionMetricsInner { + slot, + bank_creation_time, + bank_detected_time, + } = self.inner.take().expect("inner must be present"); + + let bank_detected_delay_us = bank_detected_time + .duration_since(bank_creation_time) + .as_micros() + .try_into() + .unwrap_or(i64::MAX); + let bank_detected_to_slot_end_detected_us = bank_detected_time + .elapsed() + .as_micros() + .try_into() + .unwrap_or(i64::MAX); + datapoint_info!( + "banking_stage_scheduler_leader_detection", + ("slot", slot, i64), + ("bank_detected_delay_us", bank_detected_delay_us, i64), + ( + "bank_detected_to_slot_end_detected_us", + bank_detected_to_slot_end_detected_us, + i64 + ), + ); + } +} From 92c9b454791753cb919169a67ed583ccb053a86a Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Mon, 1 Apr 2024 08:18:59 -0700 Subject: [PATCH 118/153] Scheduler: Use single hashmap for read and write locks (#458) --- .../thread_aware_account_locks.rs | 215 ++++++++++-------- 1 file changed, 122 insertions(+), 93 deletions(-) diff --git a/core/src/banking_stage/transaction_scheduler/thread_aware_account_locks.rs b/core/src/banking_stage/transaction_scheduler/thread_aware_account_locks.rs index 4a9cfd2df9edcf..5ceda39c03858b 100644 --- a/core/src/banking_stage/transaction_scheduler/thread_aware_account_locks.rs +++ b/core/src/banking_stage/transaction_scheduler/thread_aware_account_locks.rs @@ -28,6 +28,17 @@ struct AccountReadLocks { lock_counts: [LockCount; MAX_THREADS], } +/// Account locks. +/// Write Locks - only one thread can hold a write lock at a time. +/// Contains how many write locks are held by the thread. +/// Read Locks - multiple threads can hold a read lock at a time. +/// Contains thread-set for easily checking which threads are scheduled. +#[derive(Default)] +struct AccountLocks { + pub write_locks: Option, + pub read_locks: Option, +} + /// Thread-aware account locks which allows for scheduling on threads /// that already hold locks on the account. This is useful for allowing /// queued transactions to be scheduled on a thread while the transaction @@ -35,13 +46,9 @@ struct AccountReadLocks { pub(crate) struct ThreadAwareAccountLocks { /// Number of threads. num_threads: usize, // 0..MAX_THREADS - /// Write locks - only one thread can hold a write lock at a time. - /// Contains how many write locks are held by the thread. - write_locks: HashMap, - /// Read locks - multiple threads can hold a read lock at a time. - /// Contains thread-set for easily checking which threads are scheduled. - /// Contains how many read locks are held by each thread. - read_locks: HashMap, + /// Locks for each account. An account should only have an entry if there + /// is at least one lock. + locks: HashMap, } impl ThreadAwareAccountLocks { @@ -55,8 +62,7 @@ impl ThreadAwareAccountLocks { Self { num_threads, - write_locks: HashMap::new(), - read_locks: HashMap::new(), + locks: HashMap::new(), } } @@ -144,9 +150,12 @@ impl ThreadAwareAccountLocks { /// holds all read locks. Otherwise, no threads are write-schedulable. /// If only read-locked, all threads are read-schedulable. fn schedulable_threads(&self, account: &Pubkey) -> ThreadSet { - match (self.write_locks.get(account), self.read_locks.get(account)) { - (None, None) => ThreadSet::any(self.num_threads), - (None, Some(read_locks)) => { + match self.locks.get(account) { + None => ThreadSet::any(self.num_threads), + Some(AccountLocks { + write_locks: None, + read_locks: Some(read_locks), + }) => { if WRITE { read_locks .thread_set @@ -157,14 +166,24 @@ impl ThreadAwareAccountLocks { ThreadSet::any(self.num_threads) } } - (Some(write_locks), None) => ThreadSet::only(write_locks.thread_id), - (Some(write_locks), Some(read_locks)) => { + Some(AccountLocks { + write_locks: Some(write_locks), + read_locks: None, + }) => ThreadSet::only(write_locks.thread_id), + Some(AccountLocks { + write_locks: Some(write_locks), + read_locks: Some(read_locks), + }) => { assert_eq!( read_locks.thread_set.only_one_contained(), Some(write_locks.thread_id) ); read_locks.thread_set } + Some(AccountLocks { + write_locks: None, + read_locks: None, + }) => unreachable!(), } } @@ -191,57 +210,61 @@ impl ThreadAwareAccountLocks { /// Locks the given `account` for writing on `thread_id`. /// Panics if the account is already locked for writing on another thread. fn write_lock_account(&mut self, account: &Pubkey, thread_id: ThreadId) { - match self.write_locks.entry(*account) { - Entry::Occupied(mut entry) => { - let AccountWriteLocks { - thread_id: lock_thread_id, - lock_count, - } = entry.get_mut(); - assert_eq!( - *lock_thread_id, thread_id, - "outstanding write lock must be on same thread" - ); + let entry = self.locks.entry(*account).or_default(); - *lock_count += 1; - } - Entry::Vacant(entry) => { - entry.insert(AccountWriteLocks { - thread_id, - lock_count: 1, - }); - } - } + let AccountLocks { + write_locks, + read_locks, + } = entry; - // Check for outstanding read-locks - if let Some(read_locks) = self.read_locks.get(account) { + if let Some(read_locks) = read_locks { assert_eq!( - read_locks.thread_set, - ThreadSet::only(thread_id), + read_locks.thread_set.only_one_contained(), + Some(thread_id), "outstanding read lock must be on same thread" ); } + + if let Some(write_locks) = write_locks { + assert_eq!( + write_locks.thread_id, thread_id, + "outstanding write lock must be on same thread" + ); + write_locks.lock_count += 1; + } else { + *write_locks = Some(AccountWriteLocks { + thread_id, + lock_count: 1, + }); + } } /// Unlocks the given `account` for writing on `thread_id`. /// Panics if the account is not locked for writing on `thread_id`. fn write_unlock_account(&mut self, account: &Pubkey, thread_id: ThreadId) { - match self.write_locks.entry(*account) { - Entry::Occupied(mut entry) => { - let AccountWriteLocks { - thread_id: lock_thread_id, - lock_count, - } = entry.get_mut(); - assert_eq!( - *lock_thread_id, thread_id, - "outstanding write lock must be on same thread" - ); - *lock_count -= 1; - if *lock_count == 0 { - entry.remove(); - } - } - Entry::Vacant(_) => { - panic!("write lock must exist for account: {account}"); + let Entry::Occupied(mut entry) = self.locks.entry(*account) else { + panic!("write lock must exist for account: {account}"); + }; + + let AccountLocks { + write_locks: maybe_write_locks, + read_locks, + } = entry.get_mut(); + + let Some(write_locks) = maybe_write_locks else { + panic!("write lock must exist for account: {account}"); + }; + + assert_eq!( + write_locks.thread_id, thread_id, + "outstanding write lock must be on same thread" + ); + + write_locks.lock_count -= 1; + if write_locks.lock_count == 0 { + *maybe_write_locks = None; + if read_locks.is_none() { + entry.remove(); } } } @@ -249,58 +272,64 @@ impl ThreadAwareAccountLocks { /// Locks the given `account` for reading on `thread_id`. /// Panics if the account is already locked for writing on another thread. fn read_lock_account(&mut self, account: &Pubkey, thread_id: ThreadId) { - match self.read_locks.entry(*account) { - Entry::Occupied(mut entry) => { - let AccountReadLocks { - thread_set, - lock_counts, - } = entry.get_mut(); - thread_set.insert(thread_id); - lock_counts[thread_id] += 1; + let AccountLocks { + write_locks, + read_locks, + } = self.locks.entry(*account).or_default(); + + if let Some(write_locks) = write_locks { + assert_eq!( + write_locks.thread_id, thread_id, + "outstanding write lock must be on same thread" + ); + } + + match read_locks { + Some(read_locks) => { + read_locks.thread_set.insert(thread_id); + read_locks.lock_counts[thread_id] += 1; } - Entry::Vacant(entry) => { + None => { let mut lock_counts = [0; MAX_THREADS]; lock_counts[thread_id] = 1; - entry.insert(AccountReadLocks { + *read_locks = Some(AccountReadLocks { thread_set: ThreadSet::only(thread_id), lock_counts, }); } } - - // Check for outstanding write-locks - if let Some(write_locks) = self.write_locks.get(account) { - assert_eq!( - write_locks.thread_id, thread_id, - "outstanding write lock must be on same thread" - ); - } } /// Unlocks the given `account` for reading on `thread_id`. /// Panics if the account is not locked for reading on `thread_id`. fn read_unlock_account(&mut self, account: &Pubkey, thread_id: ThreadId) { - match self.read_locks.entry(*account) { - Entry::Occupied(mut entry) => { - let AccountReadLocks { - thread_set, - lock_counts, - } = entry.get_mut(); - assert!( - thread_set.contains(thread_id), - "outstanding read lock must be on same thread" - ); - lock_counts[thread_id] -= 1; - if lock_counts[thread_id] == 0 { - thread_set.remove(thread_id); - if thread_set.is_empty() { - entry.remove(); - } + let Entry::Occupied(mut entry) = self.locks.entry(*account) else { + panic!("read lock must exist for account: {account}"); + }; + + let AccountLocks { + write_locks, + read_locks: maybe_read_locks, + } = entry.get_mut(); + + let Some(read_locks) = maybe_read_locks else { + panic!("read lock must exist for account: {account}"); + }; + + assert!( + read_locks.thread_set.contains(thread_id), + "outstanding read lock must be on same thread" + ); + + read_locks.lock_counts[thread_id] -= 1; + if read_locks.lock_counts[thread_id] == 0 { + read_locks.thread_set.remove(thread_id); + if read_locks.thread_set.is_empty() { + *maybe_read_locks = None; + if write_locks.is_none() { + entry.remove(); } } - Entry::Vacant(_) => { - panic!("read lock must exist for account: {account}"); - } } } } @@ -641,7 +670,7 @@ mod tests { locks.write_lock_account(&pk1, 1); locks.write_unlock_account(&pk1, 1); locks.write_unlock_account(&pk1, 1); - assert!(locks.write_locks.is_empty()); + assert!(locks.locks.is_empty()); } #[test] @@ -652,7 +681,7 @@ mod tests { locks.read_lock_account(&pk1, 1); locks.read_unlock_account(&pk1, 1); locks.read_unlock_account(&pk1, 1); - assert!(locks.read_locks.is_empty()); + assert!(locks.locks.is_empty()); } #[test] From 79e316eb56a27bd65d5097e9b33225541a03b744 Mon Sep 17 00:00:00 2001 From: steviez Date: Mon, 1 Apr 2024 10:24:59 -0500 Subject: [PATCH 119/153] Reduce the default number of IP echo server threads (#354) The IP echo server currently spins up a worker thread for every thread on the machine. Observing some data for nodes, - MNB validators and RPC nodes look to get several hundred of these requests per day - MNB entrypoint nodes look to get 2-3 requests per second on average In both instances, the current threadpool is severely overprovisioned which is a waste of resources. This PR plumnbs a flag to control the number of worker threads for this pool as well as setting a default of two threads for this server. Two threads allow for one thread to always listen on the TCP port while the other thread processes requests --- Cargo.lock | 1 + core/src/validator.rs | 3 +++ gossip/src/gossip_service.rs | 11 +++++++++-- local-cluster/src/validator_configs.rs | 1 + net-utils/Cargo.toml | 1 + net-utils/src/bin/ip_address_server.rs | 7 ++++++- net-utils/src/ip_echo_server.rs | 11 +++++++++++ net-utils/src/lib.rs | 17 ++++++++++++++--- programs/sbf/Cargo.lock | 1 + validator/src/cli/thread_args.rs | 23 +++++++++++++++++++++++ validator/src/main.rs | 2 ++ 11 files changed, 72 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7841faa2d87d14..3c92a68f9bfb0e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6542,6 +6542,7 @@ dependencies = [ "solana-logger", "solana-sdk", "solana-version", + "static_assertions", "tokio", "url 2.5.0", ] diff --git a/core/src/validator.rs b/core/src/validator.rs index 98a267aeafc71a..32b165f77246c9 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -269,6 +269,7 @@ pub struct ValidatorConfig { pub use_snapshot_archives_at_startup: UseSnapshotArchivesAtStartup, pub wen_restart_proto_path: Option, pub unified_scheduler_handler_threads: Option, + pub ip_echo_server_threads: NonZeroUsize, pub replay_forks_threads: NonZeroUsize, pub replay_transactions_threads: NonZeroUsize, } @@ -338,6 +339,7 @@ impl Default for ValidatorConfig { use_snapshot_archives_at_startup: UseSnapshotArchivesAtStartup::default(), wen_restart_proto_path: None, unified_scheduler_handler_threads: None, + ip_echo_server_threads: NonZeroUsize::new(1).expect("1 is non-zero"), replay_forks_threads: NonZeroUsize::new(1).expect("1 is non-zero"), replay_transactions_threads: NonZeroUsize::new(1).expect("1 is non-zero"), } @@ -1079,6 +1081,7 @@ impl Validator { None => None, Some(tcp_listener) => Some(solana_net_utils::ip_echo_server( tcp_listener, + config.ip_echo_server_threads, Some(node.info.shred_version()), )), }; diff --git a/gossip/src/gossip_service.rs b/gossip/src/gossip_service.rs index d1c726051e6558..76ab14f27a6b4a 100644 --- a/gossip/src/gossip_service.rs +++ b/gossip/src/gossip_service.rs @@ -7,6 +7,7 @@ use { solana_client::{ connection_cache::ConnectionCache, rpc_client::RpcClient, tpu_client::TpuClientWrapper, }, + solana_net_utils::DEFAULT_IP_ECHO_SERVER_THREADS, solana_perf::recycler::Recycler, solana_runtime::bank_forks::BankForks, solana_sdk::{ @@ -159,8 +160,14 @@ pub fn discover( if let Some(my_gossip_addr) = my_gossip_addr { info!("Gossip Address: {:?}", my_gossip_addr); } - let _ip_echo_server = ip_echo - .map(|tcp_listener| solana_net_utils::ip_echo_server(tcp_listener, Some(my_shred_version))); + + let _ip_echo_server = ip_echo.map(|tcp_listener| { + solana_net_utils::ip_echo_server( + tcp_listener, + DEFAULT_IP_ECHO_SERVER_THREADS, + Some(my_shred_version), + ) + }); let (met_criteria, elapsed, all_peers, tvu_peers) = spy( spy_ref.clone(), num_nodes, diff --git a/local-cluster/src/validator_configs.rs b/local-cluster/src/validator_configs.rs index 45045203412a73..0e4ee5a9af31ff 100644 --- a/local-cluster/src/validator_configs.rs +++ b/local-cluster/src/validator_configs.rs @@ -68,6 +68,7 @@ pub fn safe_clone_config(config: &ValidatorConfig) -> ValidatorConfig { use_snapshot_archives_at_startup: config.use_snapshot_archives_at_startup, wen_restart_proto_path: config.wen_restart_proto_path.clone(), unified_scheduler_handler_threads: config.unified_scheduler_handler_threads, + ip_echo_server_threads: config.ip_echo_server_threads, replay_forks_threads: config.replay_forks_threads, replay_transactions_threads: config.replay_transactions_threads, } diff --git a/net-utils/Cargo.toml b/net-utils/Cargo.toml index 0d8a82f7a994cd..3486b30bbb9cda 100644 --- a/net-utils/Cargo.toml +++ b/net-utils/Cargo.toml @@ -22,6 +22,7 @@ socket2 = { workspace = true } solana-logger = { workspace = true } solana-sdk = { workspace = true } solana-version = { workspace = true } +static_assertions = { workspace = true } tokio = { workspace = true, features = ["full"] } url = { workspace = true } diff --git a/net-utils/src/bin/ip_address_server.rs b/net-utils/src/bin/ip_address_server.rs index a194ad2c5cf2b8..6d5b7939ce4782 100644 --- a/net-utils/src/bin/ip_address_server.rs +++ b/net-utils/src/bin/ip_address_server.rs @@ -1,5 +1,6 @@ use { clap::{Arg, Command}, + solana_net_utils::DEFAULT_IP_ECHO_SERVER_THREADS, std::net::{Ipv4Addr, SocketAddr, TcpListener}, }; @@ -21,7 +22,11 @@ fn main() { .unwrap_or_else(|_| panic!("Unable to parse {port}")); let bind_addr = SocketAddr::from((Ipv4Addr::UNSPECIFIED, port)); let tcp_listener = TcpListener::bind(bind_addr).expect("unable to start tcp listener"); - let _runtime = solana_net_utils::ip_echo_server(tcp_listener, /*shred_version=*/ None); + let _runtime = solana_net_utils::ip_echo_server( + tcp_listener, + DEFAULT_IP_ECHO_SERVER_THREADS, + /*shred_version=*/ None, + ); loop { std::thread::park(); } diff --git a/net-utils/src/ip_echo_server.rs b/net-utils/src/ip_echo_server.rs index 64fbedadc7acf9..2d5782dcae1cdc 100644 --- a/net-utils/src/ip_echo_server.rs +++ b/net-utils/src/ip_echo_server.rs @@ -6,6 +6,7 @@ use { std::{ io, net::{IpAddr, SocketAddr}, + num::NonZeroUsize, time::Duration, }, tokio::{ @@ -18,6 +19,14 @@ use { pub type IpEchoServer = Runtime; +// Enforce a minimum of two threads: +// - One thread to monitor the TcpListener and spawn async tasks +// - One thread to service the spawned tasks +// The unsafe is safe because we're using a fixed, known non-zero value +pub const MINIMUM_IP_ECHO_SERVER_THREADS: NonZeroUsize = unsafe { NonZeroUsize::new_unchecked(2) }; +// IP echo requests require little computation and come in fairly infrequently, +// so keep the number of server workers small to avoid overhead +pub const DEFAULT_IP_ECHO_SERVER_THREADS: NonZeroUsize = MINIMUM_IP_ECHO_SERVER_THREADS; pub const MAX_PORT_COUNT_PER_MESSAGE: usize = 4; const IO_TIMEOUT: Duration = Duration::from_secs(5); @@ -168,6 +177,7 @@ async fn run_echo_server(tcp_listener: std::net::TcpListener, shred_version: Opt /// connects. Used by |get_public_ip_addr| pub fn ip_echo_server( tcp_listener: std::net::TcpListener, + num_server_threads: NonZeroUsize, // Cluster shred-version of the node running the server. shred_version: Option, ) -> IpEchoServer { @@ -175,6 +185,7 @@ pub fn ip_echo_server( let runtime = tokio::runtime::Builder::new_multi_thread() .thread_name("solIpEchoSrvrRt") + .worker_threads(num_server_threads.get()) .enable_all() .build() .expect("new tokio runtime"); diff --git a/net-utils/src/lib.rs b/net-utils/src/lib.rs index 1ff48173def0da..2d1b6249f3fcb1 100644 --- a/net-utils/src/lib.rs +++ b/net-utils/src/lib.rs @@ -16,7 +16,10 @@ use { }; mod ip_echo_server; -pub use ip_echo_server::{ip_echo_server, IpEchoServer, MAX_PORT_COUNT_PER_MESSAGE}; +pub use ip_echo_server::{ + ip_echo_server, IpEchoServer, DEFAULT_IP_ECHO_SERVER_THREADS, MAX_PORT_COUNT_PER_MESSAGE, + MINIMUM_IP_ECHO_SERVER_THREADS, +}; use ip_echo_server::{IpEchoServerMessage, IpEchoServerResponse}; /// A data type representing a public Udp socket @@ -744,7 +747,11 @@ mod tests { let (_server_port, (server_udp_socket, server_tcp_listener)) = bind_common_in_range(ip_addr, (3200, 3250)).unwrap(); - let _runtime = ip_echo_server(server_tcp_listener, /*shred_version=*/ Some(42)); + let _runtime = ip_echo_server( + server_tcp_listener, + DEFAULT_IP_ECHO_SERVER_THREADS, + /*shred_version=*/ Some(42), + ); let server_ip_echo_addr = server_udp_socket.local_addr().unwrap(); assert_eq!( @@ -764,7 +771,11 @@ mod tests { let (client_port, (client_udp_socket, client_tcp_listener)) = bind_common_in_range(ip_addr, (3200, 3250)).unwrap(); - let _runtime = ip_echo_server(server_tcp_listener, /*shred_version=*/ Some(65535)); + let _runtime = ip_echo_server( + server_tcp_listener, + DEFAULT_IP_ECHO_SERVER_THREADS, + /*shred_version=*/ Some(65535), + ); let ip_echo_server_addr = server_udp_socket.local_addr().unwrap(); assert_eq!( diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 3a24fed647a1a4..3696f9ba5cb30d 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -5323,6 +5323,7 @@ dependencies = [ "solana-logger", "solana-sdk", "solana-version", + "static_assertions", "tokio", "url 2.5.0", ] diff --git a/validator/src/cli/thread_args.rs b/validator/src/cli/thread_args.rs index 53d8cf15d984a0..4c3221f9e661fe 100644 --- a/validator/src/cli/thread_args.rs +++ b/validator/src/cli/thread_args.rs @@ -9,6 +9,7 @@ use { // Need this struct to provide &str whose lifetime matches that of the CLAP Arg's pub struct DefaultThreadArgs { + pub ip_echo_server_threads: String, pub replay_forks_threads: String, pub replay_transactions_threads: String, } @@ -16,6 +17,7 @@ pub struct DefaultThreadArgs { impl Default for DefaultThreadArgs { fn default() -> Self { Self { + ip_echo_server_threads: IpEchoServerThreadsArg::default().to_string(), replay_forks_threads: ReplayForksThreadsArg::default().to_string(), replay_transactions_threads: ReplayTransactionsThreadsArg::default().to_string(), } @@ -24,6 +26,7 @@ impl Default for DefaultThreadArgs { pub fn thread_args<'a>(defaults: &DefaultThreadArgs) -> Vec> { vec![ + new_thread_arg::(&defaults.ip_echo_server_threads), new_thread_arg::(&defaults.replay_forks_threads), new_thread_arg::(&defaults.replay_transactions_threads), ] @@ -41,12 +44,18 @@ fn new_thread_arg<'a, T: ThreadArg>(default: &str) -> Arg<'_, 'a> { } pub struct NumThreadConfig { + pub ip_echo_server_threads: NonZeroUsize, pub replay_forks_threads: NonZeroUsize, pub replay_transactions_threads: NonZeroUsize, } pub fn parse_num_threads_args(matches: &ArgMatches) -> NumThreadConfig { NumThreadConfig { + ip_echo_server_threads: value_t_or_exit!( + matches, + IpEchoServerThreadsArg::NAME, + NonZeroUsize + ), replay_forks_threads: if matches.is_present("replay_slots_concurrently") { NonZeroUsize::new(4).expect("4 is non-zero") } else { @@ -86,6 +95,20 @@ trait ThreadArg { } } +struct IpEchoServerThreadsArg; +impl ThreadArg for IpEchoServerThreadsArg { + const NAME: &'static str = "ip_echo_server_threads"; + const LONG_NAME: &'static str = "ip-echo-server-threads"; + const HELP: &'static str = "Number of threads to use for the IP echo server"; + + fn default() -> usize { + solana_net_utils::DEFAULT_IP_ECHO_SERVER_THREADS.get() + } + fn min() -> usize { + solana_net_utils::MINIMUM_IP_ECHO_SERVER_THREADS.get() + } +} + struct ReplayForksThreadsArg; impl ThreadArg for ReplayForksThreadsArg { const NAME: &'static str = "replay_forks_threads"; diff --git a/validator/src/main.rs b/validator/src/main.rs index 56050031975a52..151281bc8ae874 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -1332,6 +1332,7 @@ pub fn main() { let full_api = matches.is_present("full_rpc_api"); let cli::thread_args::NumThreadConfig { + ip_echo_server_threads, replay_forks_threads, replay_transactions_threads, } = cli::thread_args::parse_num_threads_args(&matches); @@ -1474,6 +1475,7 @@ pub fn main() { use_snapshot_archives_at_startup::cli::NAME, UseSnapshotArchivesAtStartup ), + ip_echo_server_threads, replay_forks_threads, replay_transactions_threads, ..ValidatorConfig::default() From 620f5658dab50e3b7406efcfeed8955fa8e0b9d9 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Mon, 1 Apr 2024 12:07:41 -0500 Subject: [PATCH 120/153] introduce scan_pubkeys for clean storage iteration (#507) * introduce pubkey_iter for clean storage iteration * rename scan_pubkeys * pr feedback * pr feedback --- accounts-db/src/accounts_db.rs | 16 +++++++--- accounts-db/src/accounts_file.rs | 8 +++++ accounts-db/src/append_vec.rs | 50 ++++++++++++++++++++++++++++++++ 3 files changed, 70 insertions(+), 4 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 1b4a7662be0b60..ff6bf4a9aed4ea 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -2904,8 +2904,8 @@ impl AccountsDb { dirty_ancient_stores.fetch_add(1, Ordering::Relaxed); } oldest_dirty_slot = oldest_dirty_slot.min(*slot); - store.accounts.account_iter().for_each(|account| { - pubkeys.insert(*account.pubkey()); + store.accounts.scan_pubkeys(|k| { + pubkeys.insert(*k); }); }); oldest_dirty_slot @@ -16664,11 +16664,19 @@ pub mod tests { ) -> Vec<(Pubkey, AccountSharedData)> { storages .flat_map(|storage| { - storage + let vec = storage .accounts .account_iter() .map(|account| (*account.pubkey(), account.to_account_shared_data())) - .collect::>() + .collect::>(); + // make sure scan_pubkeys results match + // Note that we assume traversals are both in the same order, but this doesn't have to be true. + let mut compare = Vec::default(); + storage.accounts.scan_pubkeys(|k| { + compare.push(*k); + }); + assert_eq!(compare, vec.iter().map(|(k, _)| *k).collect::>()); + vec }) .collect::>() } diff --git a/accounts-db/src/accounts_file.rs b/accounts-db/src/accounts_file.rs index e962c87331738b..0cd11296ce50b3 100644 --- a/accounts-db/src/accounts_file.rs +++ b/accounts-db/src/accounts_file.rs @@ -156,6 +156,14 @@ impl AccountsFile { AccountsFileIter::new(self) } + /// iterate over all pubkeys + pub(crate) fn scan_pubkeys(&self, callback: impl FnMut(&Pubkey)) { + match self { + Self::AppendVec(av) => av.scan_pubkeys(callback), + Self::TieredStorage(_) => unimplemented!(), + } + } + /// Return a vector of account metadata for each account, starting from `offset`. pub fn accounts(&self, offset: usize) -> Vec { match self { diff --git a/accounts-db/src/append_vec.rs b/accounts-db/src/append_vec.rs index b2499cb2cb0352..5e26bf1849b832 100644 --- a/accounts-db/src/append_vec.rs +++ b/accounts-db/src/append_vec.rs @@ -197,6 +197,15 @@ impl<'append_vec> ReadableAccount for AppendVecStoredAccountMeta<'append_vec> { } } +/// offsets to help navigate the persisted format of `AppendVec` +#[derive(Debug)] +struct AccountOffsets { + /// offset to the end of the &[u8] data + offset_to_end_of_data: usize, + /// offset to the next account. This will be aligned. + next_account_offset: usize, +} + /// A thread-safe, file-backed block of memory used to store `Account` instances. Append operations /// are serialized such that only one thread updates the internal `append_lock` at a time. No /// restrictions are placed on reading. That is, one may read items from one thread while another @@ -552,6 +561,47 @@ impl AppendVec { self.path.clone() } + /// help with the math of offsets when navigating the on-disk layout in an AppendVec. + /// data is at the end of each account and is variable sized + /// the next account is then aligned on a 64 bit boundary. + /// With these helpers, we can skip over reading some of the data depending on what the caller wants. + fn next_account_offset(start_offset: usize, stored_meta: &StoredMeta) -> AccountOffsets { + let start_of_data = start_offset + + std::mem::size_of::() + + std::mem::size_of::() + + std::mem::size_of::(); + let aligned_data_len = u64_align!(stored_meta.data_len as usize); + let next_account_offset = start_of_data + aligned_data_len; + let offset_to_end_of_data = start_of_data + stored_meta.data_len as usize; + + AccountOffsets { + next_account_offset, + offset_to_end_of_data, + } + } + + /// iterate over all pubkeys and call `callback`. + /// This iteration does not deserialize and populate each field in `StoredAccountMeta`. + /// `data` is completely ignored, for example. + /// Also, no references have to be maintained/returned from an iterator function. + /// This fn can operate on a batch of data at once. + pub(crate) fn scan_pubkeys(&self, mut callback: impl FnMut(&Pubkey)) { + let mut offset = 0; + loop { + let Some((stored_meta, _)) = self.get_type::(offset) else { + // eof + break; + }; + let next = Self::next_account_offset(offset, stored_meta); + if next.offset_to_end_of_data > self.len() { + // data doesn't fit, so don't include this pubkey + break; + } + callback(&stored_meta.pubkey); + offset = next.next_account_offset; + } + } + /// Return iterator for account metadata pub fn account_iter(&self) -> AppendVecAccountsIter { AppendVecAccountsIter::new(self) From bc7b27472f2457bb5a13a3e980b7b5ca498f4e31 Mon Sep 17 00:00:00 2001 From: Brooks Date: Mon, 1 Apr 2024 13:27:29 -0400 Subject: [PATCH 121/153] Refactors storage flushing when taking a snapshot (#485) --- runtime/src/snapshot_bank_utils.rs | 7 +++++++ runtime/src/snapshot_utils.rs | 9 +++------ 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/runtime/src/snapshot_bank_utils.rs b/runtime/src/snapshot_bank_utils.rs index 6db2747089d30c..188816a9f5682d 100644 --- a/runtime/src/snapshot_bank_utils.rs +++ b/runtime/src/snapshot_bank_utils.rs @@ -99,6 +99,12 @@ pub fn add_bank_snapshot( bank_snapshot_path.display(), ); + let (_, measure_flush) = measure!(for storage in snapshot_storages { + storage + .flush() + .map_err(|err| AddBankSnapshotError::FlushStorage(err, storage.get_path()))?; + }); + // We are constructing the snapshot directory to contain the full snapshot state information to allow // constructing a bank from this directory. It acts like an archive to include the full state. // The set of the account storages files is the necessary part of this snapshot state. Hard-link them @@ -157,6 +163,7 @@ pub fn add_bank_snapshot( ("slot", slot, i64), ("bank_size", bank_snapshot_consumed_size, i64), ("status_cache_size", status_cache_consumed_size, i64), + ("flush_storages_us", measure_flush.as_us(), i64), ("hard_link_storages_us", measure_hard_linking.as_us(), i64), ("bank_serialize_us", bank_serialize.as_us(), i64), ( diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index 89c489945f0cb0..bb4891e67802fa 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -418,6 +418,9 @@ pub enum AddBankSnapshotError { #[error("failed to create snapshot dir '{1}': {0}")] CreateSnapshotDir(#[source] IoError, PathBuf), + #[error("failed to flush storage '{1}': {0}")] + FlushStorage(#[source] AccountsFileError, PathBuf), + #[error("failed to hard link storages: {0}")] HardLinkStorages(#[source] HardLinkStoragesToSnapshotError), @@ -507,9 +510,6 @@ pub enum HardLinkStoragesToSnapshotError { #[error("failed to create accounts hard links dir '{1}': {0}")] CreateAccountsHardLinksDir(#[source] IoError, PathBuf), - #[error("failed to flush storage: {0}")] - FlushStorage(#[source] AccountsFileError), - #[error("failed to get the snapshot's accounts hard link dir: {0}")] GetSnapshotHardLinksDir(#[from] GetSnapshotAccountsHardLinkDirError), @@ -1259,9 +1259,6 @@ pub fn hard_link_storages_to_snapshot( let mut account_paths: HashSet = HashSet::new(); for storage in snapshot_storages { - storage - .flush() - .map_err(HardLinkStoragesToSnapshotError::FlushStorage)?; let storage_path = storage.accounts.get_path(); let snapshot_hardlink_dir = get_snapshot_accounts_hardlink_dir( &storage_path, From d9dfe0f8cae67b0aa9c9473fece24ca4a455c14d Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Mon, 1 Apr 2024 14:48:17 -0500 Subject: [PATCH 122/153] rework account load with check_and_get_loaded_account_shared_data (#506) --- accounts-db/src/accounts_db.rs | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index ff6bf4a9aed4ea..943e1152348818 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -807,6 +807,26 @@ pub enum LoadedAccountAccessor<'a> { } impl<'a> LoadedAccountAccessor<'a> { + fn check_and_get_loaded_account_shared_data(&mut self) -> AccountSharedData { + // all of these following .expect() and .unwrap() are like serious logic errors, + // ideal for representing this as rust type system.... + + match self { + LoadedAccountAccessor::Stored(Some((maybe_storage_entry, offset))) => { + // If we do find the storage entry, we can guarantee that the storage entry is + // safe to read from because we grabbed a reference to the storage entry while it + // was still in the storage map. This means even if the storage entry is removed + // from the storage map after we grabbed the storage entry, the recycler should not + // reset the storage entry until we drop the reference to the storage entry. + maybe_storage_entry + .get_stored_account_meta(*offset) + .map(|account| account.to_account_shared_data()) + .expect("If a storage entry was found in the storage map, it must not have been reset yet") + } + _ => self.check_and_get_loaded_account().take_account(), + } + } + fn check_and_get_loaded_account(&mut self) -> LoadedAccount { // all of these following .expect() and .unwrap() are like serious logic errors, // ideal for representing this as rust type system.... @@ -5343,9 +5363,10 @@ impl AccountsDb { max_root, load_hint, )?; + // note that the account being in the cache could be different now than it was previously + // since the cache could be flushed in between the 2 calls. let in_write_cache = matches!(account_accessor, LoadedAccountAccessor::Cached(_)); - let loaded_account = account_accessor.check_and_get_loaded_account(); - let account = loaded_account.take_account(); + let account = account_accessor.check_and_get_loaded_account_shared_data(); if matches!(load_zero_lamports, LoadZeroLamports::None) && account.is_zero_lamport() { return None; } From a5aee4872253407f1e3066c17e7301567516e509 Mon Sep 17 00:00:00 2001 From: Yueh-Hsuan Chiang <93241502+yhchiang-sol@users.noreply.github.com> Date: Mon, 1 Apr 2024 13:54:14 -0700 Subject: [PATCH 123/153] Rename append vec to accounts file for CurrentAncientAppendVec (#447) --- accounts-db/src/accounts_db.rs | 170 ++++++++++++++-------------- runtime/src/serde_snapshot/tests.rs | 8 +- 2 files changed, 90 insertions(+), 88 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 943e1152348818..9360d93df57553 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -330,21 +330,21 @@ pub enum StoreReclaims { /// If a caller uses it before initializing it, it will be a runtime unwrap() error, similar to an assert. /// That condition is an illegal use pattern and is justifiably an assertable condition. #[derive(Default)] -struct CurrentAncientAppendVec { - slot_and_append_vec: Option<(Slot, Arc)>, +struct CurrentAncientAccountsFile { + slot_and_accounts_file: Option<(Slot, Arc)>, } -impl CurrentAncientAppendVec { - fn new(slot: Slot, append_vec: Arc) -> CurrentAncientAppendVec { +impl CurrentAncientAccountsFile { + fn new(slot: Slot, append_vec: Arc) -> CurrentAncientAccountsFile { Self { - slot_and_append_vec: Some((slot, append_vec)), + slot_and_accounts_file: Some((slot, append_vec)), } } - /// Create ancient append vec for a slot - /// min_bytes: the new append vec needs to have at least this capacity + /// Create ancient accounts file for a slot + /// min_bytes: the new accounts file needs to have at least this capacity #[must_use] - fn create_ancient_append_vec<'a>( + fn create_ancient_accounts_file<'a>( &mut self, slot: Slot, db: &'a AccountsDb, @@ -362,21 +362,21 @@ impl CurrentAncientAppendVec { db: &'a AccountsDb, min_bytes: usize, ) -> Option> { - if self.slot_and_append_vec.is_none() { - Some(self.create_ancient_append_vec(slot, db, min_bytes)) + if self.slot_and_accounts_file.is_none() { + Some(self.create_ancient_accounts_file(slot, db, min_bytes)) } else { None } } - /// note this requires that 'slot_and_append_vec' is Some + /// note this requires that 'slot_and_accounts_file' is Some fn slot(&self) -> Slot { - self.slot_and_append_vec.as_ref().unwrap().0 + self.slot_and_accounts_file.as_ref().unwrap().0 } - /// note this requires that 'slot_and_append_vec' is Some - fn append_vec(&self) -> &Arc { - &self.slot_and_append_vec.as_ref().unwrap().1 + /// note this requires that 'slot_and_accounts_file' is Some + fn accounts_file(&self) -> &Arc { + &self.slot_and_accounts_file.as_ref().unwrap().1 } /// helper function to cleanup call to 'store_accounts_frozen' @@ -389,16 +389,16 @@ impl CurrentAncientAppendVec { ) -> (StoreAccountsTiming, u64) { let accounts = accounts_to_store.get(storage_selector); - let previous_available = self.append_vec().accounts.remaining_bytes(); + let previous_available = self.accounts_file().accounts.remaining_bytes(); let timing = db.store_accounts_frozen( (self.slot(), accounts, accounts_to_store.slot()), None::>, - self.append_vec(), + self.accounts_file(), None, StoreReclaims::Ignore, ); let bytes_written = - previous_available.saturating_sub(self.append_vec().accounts.remaining_bytes()); + previous_available.saturating_sub(self.accounts_file().accounts.remaining_bytes()); assert_eq!( bytes_written, u64_align!(accounts_to_store.get_bytes(storage_selector)) as u64 @@ -444,7 +444,7 @@ impl AncientSlotPubkeys { &mut self, slot: Slot, db: &AccountsDb, - current_ancient: &CurrentAncientAppendVec, + current_ancient: &CurrentAncientAccountsFile, to_store: &AccountsToStore, ) { if slot != current_ancient.slot() { @@ -454,7 +454,7 @@ impl AncientSlotPubkeys { let accounts = to_store.get(StorageSelector::Primary); if Some(current_ancient.slot()) != self.inner.as_ref().map(|ap| ap.slot) { let pubkeys = current_ancient - .append_vec() + .accounts_file() .accounts .account_iter() .map(|account| *account.pubkey()) @@ -4292,16 +4292,16 @@ impl AccountsDb { /// get the storage from 'slot' to squash /// or None if this slot should be skipped /// side effect could be updating 'current_ancient' - fn get_storage_to_move_to_ancient_append_vec( + fn get_storage_to_move_to_ancient_accounts_file( &self, slot: Slot, - current_ancient: &mut CurrentAncientAppendVec, + current_ancient: &mut CurrentAncientAccountsFile, can_randomly_shrink: bool, ) -> Option> { self.storage .get_slot_storage_entry(slot) .and_then(|storage| { - self.should_move_to_ancient_append_vec( + self.should_move_to_ancient_accounts_file( &storage, current_ancient, slot, @@ -4317,10 +4317,10 @@ impl AccountsDb { /// can_randomly_shrink: true if ancient append vecs that otherwise don't qualify to be shrunk can be randomly shrunk /// this is convenient for a running system /// this is not useful for testing - fn should_move_to_ancient_append_vec( + fn should_move_to_ancient_accounts_file( &self, storage: &Arc, - current_ancient: &mut CurrentAncientAppendVec, + current_ancient: &mut CurrentAncientAccountsFile, slot: Slot, can_randomly_shrink: bool, ) -> bool { @@ -4363,7 +4363,7 @@ impl AccountsDb { return true; } // this slot is ancient and can become the 'current' ancient for other slots to be squashed into - *current_ancient = CurrentAncientAppendVec::new(slot, Arc::clone(storage)); + *current_ancient = CurrentAncientAccountsFile::new(slot, Arc::clone(storage)); return false; // we're done with this slot - this slot IS the ancient append vec } @@ -4382,7 +4382,7 @@ impl AccountsDb { let mut guard = None; // the ancient append vec currently being written to - let mut current_ancient = CurrentAncientAppendVec::default(); + let mut current_ancient = CurrentAncientAccountsFile::default(); let mut dropped_roots = vec![]; // we have to keep track of what pubkeys exist in the current ancient append vec so we can unref correctly @@ -4390,7 +4390,7 @@ impl AccountsDb { let len = sorted_slots.len(); for slot in sorted_slots { - let Some(old_storage) = self.get_storage_to_move_to_ancient_append_vec( + let Some(old_storage) = self.get_storage_to_move_to_ancient_accounts_file( slot, &mut current_ancient, can_randomly_shrink, @@ -4436,7 +4436,7 @@ impl AccountsDb { &self, slot: Slot, old_storage: &Arc, - current_ancient: &mut CurrentAncientAppendVec, + current_ancient: &mut CurrentAncientAccountsFile, ancient_slot_pubkeys: &mut AncientSlotPubkeys, dropped_roots: &mut Vec, ) { @@ -4461,7 +4461,7 @@ impl AccountsDb { current_ancient.create_if_necessary(slot, self, shrink_collect.alive_total_bytes) ); stats_sub.create_and_insert_store_elapsed_us = create_and_insert_store_elapsed_us; - let available_bytes = current_ancient.append_vec().accounts.remaining_bytes(); + let available_bytes = current_ancient.accounts_file().accounts.remaining_bytes(); // split accounts in 'slot' into: // 'Primary', which can fit in 'current_ancient' // 'Overflow', which will have to go into a new ancient append vec at 'slot' @@ -4495,7 +4495,7 @@ impl AccountsDb { // Now we create an ancient append vec at `slot` to store the overflows. let (shrink_in_progress_overflow, time_us) = measure_us!(current_ancient - .create_ancient_append_vec( + .create_ancient_accounts_file( slot, self, to_store.get_bytes(StorageSelector::Overflow) @@ -9401,7 +9401,7 @@ impl AccountsDb { self.flush_accounts_cache(true, Some(root)); } - pub fn all_account_count_in_append_vec(&self, slot: Slot) -> usize { + pub fn all_account_count_in_accounts_file(&self, slot: Slot) -> usize { let store = self.storage.get_slot_storage_entry(slot); if let Some(store) = store { let count = store.all_accounts().len(); @@ -9597,10 +9597,10 @@ pub mod tests { } } - impl CurrentAncientAppendVec { - /// note this requires that 'slot_and_append_vec' is Some + impl CurrentAncientAccountsFile { + /// note this requires that 'slot_and_accounts_file' is Some fn append_vec_id(&self) -> AccountsFileId { - self.append_vec().append_vec_id() + self.accounts_file().append_vec_id() } } @@ -9672,7 +9672,7 @@ pub mod tests { } #[test] - fn test_create_ancient_append_vec() { + fn test_create_ancient_accounts_file() { let ancient_append_vec_size = ancient_append_vecs::get_ancient_append_vec_capacity(); let db = AccountsDb::new_single_for_tests(); @@ -9680,14 +9680,14 @@ pub mod tests { // create an ancient appendvec from a small appendvec, the size of // the ancient appendvec should be the size of the ideal ancient // appendvec size. - let mut current_ancient = CurrentAncientAppendVec::default(); + let mut current_ancient = CurrentAncientAccountsFile::default(); let slot0 = 0; // there has to be an existing append vec at this slot for a new current ancient at the slot to make sense let _existing_append_vec = db.create_and_insert_store(slot0, 1000, "test"); - let _ = current_ancient.create_ancient_append_vec(slot0, &db, 0); + let _ = current_ancient.create_ancient_accounts_file(slot0, &db, 0); assert_eq!( - current_ancient.append_vec().capacity(), + current_ancient.accounts_file().capacity(), ancient_append_vec_size ); } @@ -9696,17 +9696,17 @@ pub mod tests { // create an ancient appendvec from a large appendvec (bigger than // current ancient_append_vec_size), the ancient appendvec should be // the size of the bigger ancient appendvec size. - let mut current_ancient = CurrentAncientAppendVec::default(); + let mut current_ancient = CurrentAncientAccountsFile::default(); let slot1 = 1; // there has to be an existing append vec at this slot for a new current ancient at the slot to make sense let _existing_append_vec = db.create_and_insert_store(slot1, 1000, "test"); - let _ = current_ancient.create_ancient_append_vec( + let _ = current_ancient.create_ancient_accounts_file( slot1, &db, 2 * ancient_append_vec_size as usize, ); assert_eq!( - current_ancient.append_vec().capacity(), + current_ancient.accounts_file().capacity(), 2 * ancient_append_vec_size ); } @@ -9718,7 +9718,7 @@ pub mod tests { let slot0 = 0; let slot1 = 1; let available_bytes = 1_000_000; - let mut current_ancient = CurrentAncientAppendVec::default(); + let mut current_ancient = CurrentAncientAccountsFile::default(); // setup 'to_store' let pubkey = Pubkey::from([1; 32]); @@ -9758,7 +9758,7 @@ pub mod tests { // there has to be an existing append vec at this slot for a new current ancient at the slot to make sense let _existing_append_vec = db.create_and_insert_store(slot0, 1000, "test"); { - let _shrink_in_progress = current_ancient.create_ancient_append_vec(slot0, &db, 0); + let _shrink_in_progress = current_ancient.create_ancient_accounts_file(slot0, &db, 0); } let mut ancient_slot_pubkeys = AncientSlotPubkeys::default(); assert!(ancient_slot_pubkeys.inner.is_none()); @@ -9773,7 +9773,7 @@ pub mod tests { // different slot than current_ancient, so update 'ancient_slot_pubkeys' // there has to be an existing append vec at this slot for a new current ancient at the slot to make sense let _existing_append_vec = db.create_and_insert_store(slot1, 1000, "test"); - let _shrink_in_progress = current_ancient.create_ancient_append_vec(slot1, &db, 0); + let _shrink_in_progress = current_ancient.create_ancient_accounts_file(slot1, &db, 0); let slot2 = 2; ancient_slot_pubkeys.maybe_unref_accounts_already_in_ancient( slot2, @@ -12811,7 +12811,7 @@ pub mod tests { assert_eq!( pubkey_count, - accounts.all_account_count_in_append_vec(shrink_slot) + accounts.all_account_count_in_accounts_file(shrink_slot) ); // Only, try to shrink stale slots, nothing happens because shrink ratio @@ -12821,14 +12821,14 @@ pub mod tests { accounts.shrink_candidate_slots(&EpochSchedule::default()); assert_eq!( pubkey_count, - accounts.all_account_count_in_append_vec(shrink_slot) + accounts.all_account_count_in_accounts_file(shrink_slot) ); // Now, do full-shrink. accounts.shrink_all_slots(false, None, &EpochSchedule::default()); assert_eq!( pubkey_count_after_shrink, - accounts.all_account_count_in_append_vec(shrink_slot) + accounts.all_account_count_in_accounts_file(shrink_slot) ); } @@ -16336,15 +16336,15 @@ pub mod tests { #[test] #[should_panic(expected = "called `Option::unwrap()` on a `None` value")] fn test_current_ancient_slot_assert() { - let current_ancient = CurrentAncientAppendVec::default(); + let current_ancient = CurrentAncientAccountsFile::default(); _ = current_ancient.slot(); } #[test] #[should_panic(expected = "called `Option::unwrap()` on a `None` value")] fn test_current_ancient_append_vec_assert() { - let current_ancient = CurrentAncientAppendVec::default(); - _ = current_ancient.append_vec(); + let current_ancient = CurrentAncientAccountsFile::default(); + _ = current_ancient.accounts_file(); } #[test] @@ -16357,11 +16357,11 @@ pub mod tests { let db = AccountsDb::new_single_for_tests(); let size = 1000; let append_vec = db.create_and_insert_store(slot, size, "test"); - let mut current_ancient = CurrentAncientAppendVec::new(slot, append_vec.clone()); + let mut current_ancient = CurrentAncientAccountsFile::new(slot, append_vec.clone()); assert_eq!(current_ancient.slot(), slot); assert_eq!(current_ancient.append_vec_id(), append_vec.append_vec_id()); assert_eq!( - current_ancient.append_vec().append_vec_id(), + current_ancient.accounts_file().append_vec_id(), append_vec.append_vec_id() ); @@ -16376,39 +16376,41 @@ pub mod tests { // there has to be an existing append vec at this slot for a new current ancient at the slot to make sense let _existing_append_vec = db.create_and_insert_store(slot2, 1000, "test"); - let mut current_ancient = CurrentAncientAppendVec::default(); + let mut current_ancient = CurrentAncientAccountsFile::default(); let mut _shrink_in_progress = current_ancient.create_if_necessary(slot2, &db, 0); let id = current_ancient.append_vec_id(); assert_eq!(current_ancient.slot(), slot2); - assert!(is_ancient(¤t_ancient.append_vec().accounts)); + assert!(is_ancient(¤t_ancient.accounts_file().accounts)); let slot3 = 3; // should do nothing let _shrink_in_progress = current_ancient.create_if_necessary(slot3, &db, 0); assert_eq!(current_ancient.slot(), slot2); assert_eq!(current_ancient.append_vec_id(), id); - assert!(is_ancient(¤t_ancient.append_vec().accounts)); + assert!(is_ancient(¤t_ancient.accounts_file().accounts)); } { // create_ancient_append_vec let db = AccountsDb::new_single_for_tests(); - let mut current_ancient = CurrentAncientAppendVec::default(); + let mut current_ancient = CurrentAncientAccountsFile::default(); // there has to be an existing append vec at this slot for a new current ancient at the slot to make sense let _existing_append_vec = db.create_and_insert_store(slot2, 1000, "test"); { - let _shrink_in_progress = current_ancient.create_ancient_append_vec(slot2, &db, 0); + let _shrink_in_progress = + current_ancient.create_ancient_accounts_file(slot2, &db, 0); } let id = current_ancient.append_vec_id(); assert_eq!(current_ancient.slot(), slot2); - assert!(is_ancient(¤t_ancient.append_vec().accounts)); + assert!(is_ancient(¤t_ancient.accounts_file().accounts)); // there has to be an existing append vec at this slot for a new current ancient at the slot to make sense let _existing_append_vec = db.create_and_insert_store(slot3, 1000, "test"); - let mut _shrink_in_progress = current_ancient.create_ancient_append_vec(slot3, &db, 0); + let mut _shrink_in_progress = + current_ancient.create_ancient_accounts_file(slot3, &db, 0); assert_eq!(current_ancient.slot(), slot3); - assert!(is_ancient(¤t_ancient.append_vec().accounts)); + assert!(is_ancient(¤t_ancient.accounts_file().accounts)); assert_ne!(current_ancient.append_vec_id(), id); } } @@ -17021,7 +17023,7 @@ pub mod tests { ); // now, shrink the second ancient append vec into the first one - let mut current_ancient = CurrentAncientAppendVec::new( + let mut current_ancient = CurrentAncientAccountsFile::new( ancient_slot, db.get_storage_for_slot(ancient_slot).unwrap(), ); @@ -17407,7 +17409,7 @@ pub mod tests { } #[test] - fn test_should_move_to_ancient_append_vec() { + fn test_should_move_to_ancient_accounts_file() { solana_logger::setup(); let db = AccountsDb::new_single_for_tests(); let slot5 = 5; @@ -17417,20 +17419,20 @@ pub mod tests { let write_version1 = 0; let pubkey1 = solana_sdk::pubkey::new_rand(); let storage = sample_storage_with_entries(&tf, write_version1, slot5, &pubkey1, false); - let mut current_ancient = CurrentAncientAppendVec::default(); + let mut current_ancient = CurrentAncientAccountsFile::default(); - let should_move = db.should_move_to_ancient_append_vec( + let should_move = db.should_move_to_ancient_accounts_file( &storage, &mut current_ancient, slot5, CAN_RANDOMLY_SHRINK_FALSE, ); - assert!(current_ancient.slot_and_append_vec.is_none()); + assert!(current_ancient.slot_and_accounts_file.is_none()); // slot is not ancient, so it is good to move assert!(should_move); - current_ancient = CurrentAncientAppendVec::new(slot5, Arc::clone(&storage)); // just 'some', contents don't matter - let should_move = db.should_move_to_ancient_append_vec( + current_ancient = CurrentAncientAccountsFile::new(slot5, Arc::clone(&storage)); // just 'some', contents don't matter + let should_move = db.should_move_to_ancient_accounts_file( &storage, &mut current_ancient, slot5, @@ -17438,14 +17440,14 @@ pub mod tests { ); // should have kept the same 'current_ancient' assert_eq!(current_ancient.slot(), slot5); - assert_eq!(current_ancient.append_vec().slot(), slot5); + assert_eq!(current_ancient.accounts_file().slot(), slot5); assert_eq!(current_ancient.append_vec_id(), storage.append_vec_id()); // slot is not ancient, so it is good to move assert!(should_move); // now, create an ancient slot and make sure that it does NOT think it needs to be moved and that it becomes the ancient append vec to use - let mut current_ancient = CurrentAncientAppendVec::default(); + let mut current_ancient = CurrentAncientAccountsFile::default(); let slot1_ancient = 1; // there has to be an existing append vec at this slot for a new current ancient at the slot to make sense let _existing_append_vec = db.create_and_insert_store(slot1_ancient, 1000, "test"); @@ -17453,7 +17455,7 @@ pub mod tests { .get_store_for_shrink(slot1_ancient, get_ancient_append_vec_capacity()) .new_storage() .clone(); - let should_move = db.should_move_to_ancient_append_vec( + let should_move = db.should_move_to_ancient_accounts_file( &ancient1, &mut current_ancient, slot1_ancient, @@ -17467,14 +17469,14 @@ pub mod tests { // try to move ancient2 // current should become ancient2 let slot2_ancient = 2; - let mut current_ancient = CurrentAncientAppendVec::new(slot1_ancient, ancient1.clone()); + let mut current_ancient = CurrentAncientAccountsFile::new(slot1_ancient, ancient1.clone()); // there has to be an existing append vec at this slot for a new current ancient at the slot to make sense let _existing_append_vec = db.create_and_insert_store(slot2_ancient, 1000, "test"); let ancient2 = db .get_store_for_shrink(slot2_ancient, get_ancient_append_vec_capacity()) .new_storage() .clone(); - let should_move = db.should_move_to_ancient_append_vec( + let should_move = db.should_move_to_ancient_accounts_file( &ancient2, &mut current_ancient, slot2_ancient, @@ -17487,11 +17489,11 @@ pub mod tests { // now try a full ancient append vec // current is None let slot3_full_ancient = 3; - let mut current_ancient = CurrentAncientAppendVec::default(); + let mut current_ancient = CurrentAncientAccountsFile::default(); // there has to be an existing append vec at this slot for a new current ancient at the slot to make sense let _existing_append_vec = db.create_and_insert_store(slot3_full_ancient, 1000, "test"); - let full_ancient_3 = make_full_ancient_append_vec(&db, slot3_full_ancient, false); - let should_move = db.should_move_to_ancient_append_vec( + let full_ancient_3 = make_full_ancient_accounts_file(&db, slot3_full_ancient, false); + let should_move = db.should_move_to_ancient_accounts_file( &full_ancient_3.new_storage().clone(), &mut current_ancient, slot3_full_ancient, @@ -17505,8 +17507,8 @@ pub mod tests { assert_eq!(current_ancient.slot(), slot3_full_ancient); // now set current_ancient to something - let mut current_ancient = CurrentAncientAppendVec::new(slot1_ancient, ancient1.clone()); - let should_move = db.should_move_to_ancient_append_vec( + let mut current_ancient = CurrentAncientAccountsFile::new(slot1_ancient, ancient1.clone()); + let should_move = db.should_move_to_ancient_accounts_file( &full_ancient_3.new_storage().clone(), &mut current_ancient, slot3_full_ancient, @@ -17523,20 +17525,20 @@ pub mod tests { adjust_alive_bytes(full_ancient_3.new_storage(), 0); // should shrink here, returning none for current - let mut current_ancient = CurrentAncientAppendVec::default(); - let should_move = db.should_move_to_ancient_append_vec( + let mut current_ancient = CurrentAncientAccountsFile::default(); + let should_move = db.should_move_to_ancient_accounts_file( &full_ancient_3.new_storage().clone(), &mut current_ancient, slot3_full_ancient, CAN_RANDOMLY_SHRINK_FALSE, ); assert!(should_move); - assert!(current_ancient.slot_and_append_vec.is_none()); + assert!(current_ancient.slot_and_accounts_file.is_none()); // should return true here, returning current from prior // now set current_ancient to something and see if it still goes to None - let mut current_ancient = CurrentAncientAppendVec::new(slot1_ancient, ancient1.clone()); - let should_move = db.should_move_to_ancient_append_vec( + let mut current_ancient = CurrentAncientAccountsFile::new(slot1_ancient, ancient1.clone()); + let should_move = db.should_move_to_ancient_accounts_file( &Arc::clone(full_ancient_3.new_storage()), &mut current_ancient, slot3_full_ancient, @@ -17566,7 +17568,7 @@ pub mod tests { adjust_alive_bytes(ancient, ancient.capacity() as usize); } - fn make_full_ancient_append_vec( + fn make_full_ancient_accounts_file( db: &AccountsDb, slot: Slot, mark_alive: bool, diff --git a/runtime/src/serde_snapshot/tests.rs b/runtime/src/serde_snapshot/tests.rs index 2ac97392b45e42..ea5957a6be07ec 100644 --- a/runtime/src/serde_snapshot/tests.rs +++ b/runtime/src/serde_snapshot/tests.rs @@ -814,12 +814,12 @@ mod serde_snapshot_tests { assert_eq!( pubkey_count, - accounts.all_account_count_in_append_vec(shrink_slot) + accounts.all_account_count_in_accounts_file(shrink_slot) ); accounts.shrink_all_slots(*startup, None, &EpochSchedule::default()); assert_eq!( pubkey_count_after_shrink, - accounts.all_account_count_in_append_vec(shrink_slot) + accounts.all_account_count_in_accounts_file(shrink_slot) ); let no_ancestors = Ancestors::default(); @@ -846,7 +846,7 @@ mod serde_snapshot_tests { accounts.shrink_all_slots(*startup, None, &epoch_schedule); assert_eq!( pubkey_count_after_shrink, - accounts.all_account_count_in_append_vec(shrink_slot) + accounts.all_account_count_in_accounts_file(shrink_slot) ); } } @@ -889,7 +889,7 @@ mod serde_snapshot_tests { let tmp = tempfile::tempdir().unwrap(); let original_path = tmp.path().join("123.456"); - // In remap_append_vec() we want to handle EEXIST (collisions), but we want to return all + // In remap_accounts_file() we want to handle EEXIST (collisions), but we want to return all // other errors let next_append_vec_id = AtomicAccountsFileId::new(457); let num_collisions = AtomicUsize::new(0); From 8153c529389ed6d11d692f2021a45e1323d6cbd4 Mon Sep 17 00:00:00 2001 From: Yueh-Hsuan Chiang <93241502+yhchiang-sol@users.noreply.github.com> Date: Mon, 1 Apr 2024 13:54:45 -0700 Subject: [PATCH 124/153] [TieredStorage] enum-based AccountsFileProvider (#457) * [TieredStorage] enum-based AccountsFileProvider * derive traits * remove refs --------- Co-authored-by: jeff washington --- accounts-db/src/account_storage.rs | 5 +- accounts-db/src/accounts_db.rs | 80 ++++++++++++++++++++++++------ accounts-db/src/accounts_file.rs | 18 +++++++ accounts-db/src/sorted_storages.rs | 10 +++- 4 files changed, 96 insertions(+), 17 deletions(-) diff --git a/accounts-db/src/account_storage.rs b/accounts-db/src/account_storage.rs index e1fb4f092ad623..185a3324094ee4 100644 --- a/accounts-db/src/account_storage.rs +++ b/accounts-db/src/account_storage.rs @@ -276,7 +276,7 @@ impl Default for AccountStorageStatus { #[cfg(test)] pub(crate) mod tests { - use {super::*, std::path::Path}; + use {super::*, crate::accounts_file::AccountsFileProvider, std::path::Path}; #[test] fn test_shrink_in_progress() { @@ -297,12 +297,14 @@ pub(crate) mod tests { slot, id, store_file_size, + AccountsFileProvider::AppendVec, )); let entry2 = Arc::new(AccountStorageEntry::new( common_store_path, slot, id, store_file_size2, + AccountsFileProvider::AppendVec, )); storage .map @@ -353,6 +355,7 @@ pub(crate) mod tests { slot, id, store_file_size, + AccountsFileProvider::AppendVec, )) } fn get_test_storage(&self) -> Arc { diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 9360d93df57553..4b5f027bf38fcf 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -34,7 +34,8 @@ use { }, accounts_cache::{AccountsCache, CachedAccount, SlotCache}, accounts_file::{ - AccountsFile, AccountsFileError, MatchAccountOwnerError, ALIGN_BOUNDARY_OFFSET, + AccountsFile, AccountsFileError, AccountsFileProvider, MatchAccountOwnerError, + ALIGN_BOUNDARY_OFFSET, }, accounts_hash::{ AccountHash, AccountsDeltaHash, AccountsHash, AccountsHashKind, AccountsHasher, @@ -57,9 +58,7 @@ use { ancient_append_vecs::{ get_ancient_append_vec_capacity, is_ancient, AccountsToStore, StorageSelector, }, - append_vec::{ - aligned_stored_size, AppendVec, APPEND_VEC_MMAPPED_FILES_OPEN, STORE_META_OVERHEAD, - }, + append_vec::{aligned_stored_size, APPEND_VEC_MMAPPED_FILES_OPEN, STORE_META_OVERHEAD}, cache_hash_data::{ CacheHashData, CacheHashDataFileReference, DeletionPolicy as CacheHashDeletionPolicy, }, @@ -1053,10 +1052,16 @@ pub struct AccountStorageEntry { } impl AccountStorageEntry { - pub fn new(path: &Path, slot: Slot, id: AccountsFileId, file_size: u64) -> Self { + pub fn new( + path: &Path, + slot: Slot, + id: AccountsFileId, + file_size: u64, + provider: AccountsFileProvider, + ) -> Self { let tail = AccountsFile::file_name(slot, id); let path = Path::new(path).join(tail); - let accounts = AccountsFile::AppendVec(AppendVec::new(path, true, file_size as usize)); + let accounts = provider.new_writable(path, file_size); Self { id, @@ -2536,7 +2541,13 @@ impl AccountsDb { } fn new_storage_entry(&self, slot: Slot, path: &Path, size: u64) -> AccountStorageEntry { - AccountStorageEntry::new(path, slot, self.next_id(), size) + AccountStorageEntry::new( + path, + slot, + self.next_id(), + size, + AccountsFileProvider::AppendVec, + ) } pub fn expected_cluster_type(&self) -> ClusterType { @@ -9496,7 +9507,7 @@ pub mod tests { accounts_hash::MERKLE_FANOUT, accounts_index::{tests::*, AccountSecondaryIndexesIncludeExclude}, ancient_append_vecs, - append_vec::{test_utils::TempFile, AppendVecStoredAccountMeta}, + append_vec::{test_utils::TempFile, AppendVec, AppendVecStoredAccountMeta}, cache_hash_data::CacheHashDataFile, }, assert_matches::assert_matches, @@ -10507,7 +10518,13 @@ pub mod tests { let (_temp_dirs, paths) = get_temp_accounts_paths(1).unwrap(); let slot_expected: Slot = 0; let size: usize = 123; - let data = AccountStorageEntry::new(&paths[0], slot_expected, 0, size as u64); + let data = AccountStorageEntry::new( + &paths[0], + slot_expected, + 0, + size as u64, + AccountsFileProvider::AppendVec, + ); let arc = Arc::new(data); let storages = vec![arc]; @@ -10558,7 +10575,13 @@ pub mod tests { let (_temp_dirs, paths) = get_temp_accounts_paths(1).unwrap(); let slot_expected: Slot = 0; let size: usize = 123; - let mut data = AccountStorageEntry::new(&paths[0], slot_expected, 0, size as u64); + let mut data = AccountStorageEntry::new( + &paths[0], + slot_expected, + 0, + size as u64, + AccountsFileProvider::AppendVec, + ); let av = AccountsFile::AppendVec(AppendVec::new(&tf.path, true, 1024 * 1024)); data.accounts = av; @@ -10674,7 +10697,13 @@ pub mod tests { let (_temp_dirs, paths) = get_temp_accounts_paths(1).unwrap(); let slot_expected: Slot = 0; let size: usize = 123; - let mut data = AccountStorageEntry::new(&paths[0], slot_expected, 0, size as u64); + let mut data = AccountStorageEntry::new( + &paths[0], + slot_expected, + 0, + size as u64, + AccountsFileProvider::AppendVec, + ); let av = AccountsFile::AppendVec(AppendVec::new(&tf.path, true, 1024 * 1024)); data.accounts = av; @@ -10753,7 +10782,13 @@ pub mod tests { let (_temp_dirs, paths) = get_temp_accounts_paths(1).unwrap(); let file_size = account_data_size.unwrap_or(123) * 100 / fill_percentage; let size_aligned: usize = aligned_stored_size(file_size as usize); - let mut data = AccountStorageEntry::new(&paths[0], slot, id, size_aligned as u64); + let mut data = AccountStorageEntry::new( + &paths[0], + slot, + id, + size_aligned as u64, + AccountsFileProvider::AppendVec, + ); let av = AccountsFile::AppendVec(AppendVec::new( &tf.path, true, @@ -12863,6 +12898,7 @@ pub mod tests { slot_id_1, store1_id, store_file_size, + AccountsFileProvider::AppendVec, )); store1.alive_bytes.store(0, Ordering::Release); @@ -12876,6 +12912,7 @@ pub mod tests { slot_id_2, store2_id, store_file_size, + AccountsFileProvider::AppendVec, )); // The store2's alive_ratio is 0.5: as its page aligned alive size is 1 page. @@ -12892,6 +12929,7 @@ pub mod tests { slot_id_3, store3_id, store_file_size, + AccountsFileProvider::AppendVec, )); db.storage.insert(slot_id_1, Arc::clone(&store1)); @@ -12936,6 +12974,7 @@ pub mod tests { slot_id_1, store1_id, store_file_size, + AccountsFileProvider::AppendVec, )); store1.alive_bytes.store(0, Ordering::Release); db.storage.insert(slot_id_1, Arc::clone(&store1)); @@ -12948,6 +12987,7 @@ pub mod tests { slot_id_2, store2_id, store_file_size, + AccountsFileProvider::AppendVec, )); db.storage.insert(slot_id_2, Arc::clone(&store2)); @@ -12965,6 +13005,7 @@ pub mod tests { slot_id_3, store3_id, store_file_size, + AccountsFileProvider::AppendVec, )); // The store3's alive ratio is 1.0 as its page-aligned alive size is 2 pages @@ -13002,6 +13043,7 @@ pub mod tests { slot1, store1_id, store_file_size, + AccountsFileProvider::AppendVec, )); // store1 has 1 page-aligned alive bytes, its alive ratio is 1/4: 0.25 @@ -13020,6 +13062,7 @@ pub mod tests { slot2, store2_id, store_file_size, + AccountsFileProvider::AppendVec, )); // store2 has 2 page-aligned bytes, its alive ratio is 2/4: 0.5 @@ -15027,11 +15070,18 @@ pub mod tests { #[test] fn test_shrink_productive() { solana_logger::setup(); - let s1 = AccountStorageEntry::new(Path::new("."), 0, 0, 1024); + let s1 = + AccountStorageEntry::new(Path::new("."), 0, 0, 1024, AccountsFileProvider::AppendVec); let store = Arc::new(s1); assert!(!AccountsDb::is_shrinking_productive(0, &store)); - let s1 = AccountStorageEntry::new(Path::new("."), 0, 0, PAGE_SIZE * 4); + let s1 = AccountStorageEntry::new( + Path::new("."), + 0, + 0, + PAGE_SIZE * 4, + AccountsFileProvider::AppendVec, + ); let store = Arc::new(s1); store.add_account((3 * PAGE_SIZE as usize) - 1); store.add_account(10); @@ -15054,6 +15104,7 @@ pub mod tests { 0, 1, store_file_size, + AccountsFileProvider::AppendVec, )); match accounts.shrink_ratio { AccountShrinkThreshold::TotalSpace { shrink_ratio } => { @@ -17400,6 +17451,7 @@ pub mod tests { 0, 1, store_file_size, + AccountsFileProvider::AppendVec, )); let db = AccountsDb::new_single_for_tests(); let slot0 = 0; diff --git a/accounts-db/src/accounts_file.rs b/accounts-db/src/accounts_file.rs index 0cd11296ce50b3..251aedcd1de562 100644 --- a/accounts-db/src/accounts_file.rs +++ b/accounts-db/src/accounts_file.rs @@ -231,6 +231,24 @@ impl<'a> Iterator for AccountsFileIter<'a> { } } +/// An enum that creates AccountsFile instance with the specified format. +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +pub enum AccountsFileProvider { + AppendVec, + HotStorage, +} + +impl AccountsFileProvider { + pub fn new_writable(&self, path: impl Into, file_size: u64) -> AccountsFile { + match self { + Self::AppendVec => { + AccountsFile::AppendVec(AppendVec::new(path, true, file_size as usize)) + } + Self::HotStorage => AccountsFile::TieredStorage(TieredStorage::new_writable(path)), + } + } +} + #[cfg(test)] pub mod tests { use crate::accounts_file::AccountsFile; diff --git a/accounts-db/src/sorted_storages.rs b/accounts-db/src/sorted_storages.rs index 72cf084a0896f7..f47dadb249a077 100644 --- a/accounts-db/src/sorted_storages.rs +++ b/accounts-db/src/sorted_storages.rs @@ -196,7 +196,7 @@ mod tests { super::*, crate::{ accounts_db::{AccountStorageEntry, AccountsFileId}, - accounts_file::AccountsFile, + accounts_file::{AccountsFile, AccountsFileProvider}, append_vec::AppendVec, }, std::sync::Arc, @@ -445,7 +445,13 @@ mod tests { let (_temp_dirs, paths) = crate::accounts_db::get_temp_accounts_paths(1).unwrap(); let size: usize = 123; let slot = 0; - let mut data = AccountStorageEntry::new(&paths[0], slot, id, size as u64); + let mut data = AccountStorageEntry::new( + &paths[0], + slot, + id, + size as u64, + AccountsFileProvider::AppendVec, + ); let av = AccountsFile::AppendVec(AppendVec::new(&tf.path, true, 1024 * 1024)); data.accounts = av; From b47a4ec74d85dae0b6d5dd24a13a8923240e03af Mon Sep 17 00:00:00 2001 From: Yueh-Hsuan Chiang <93241502+yhchiang-sol@users.noreply.github.com> Date: Mon, 1 Apr 2024 14:26:41 -0700 Subject: [PATCH 125/153] [TieredStorage] Make TieredStorage's Offset compatible with AccountsDB's offset (#74) * [TieredStorage] Handles reduced-offset and offset conversion * fix comment --------- Co-authored-by: jeff washington --- accounts-db/src/account_info.rs | 4 +-- accounts-db/src/accounts_file.rs | 50 ++++++++++++++++++++++++-------- 2 files changed, 40 insertions(+), 14 deletions(-) diff --git a/accounts-db/src/account_info.rs b/accounts-db/src/account_info.rs index cbec32be6499ef..5e4cb01b0c8af4 100644 --- a/accounts-db/src/account_info.rs +++ b/accounts-db/src/account_info.rs @@ -156,7 +156,7 @@ impl AccountInfo { } } - fn get_reduced_offset(offset: usize) -> OffsetReduced { + pub fn get_reduced_offset(offset: usize) -> OffsetReduced { (offset / ALIGN_BOUNDARY_OFFSET) as OffsetReduced } @@ -174,7 +174,7 @@ impl AccountInfo { ) } - fn reduced_offset_to_offset(reduced_offset: OffsetReduced) -> Offset { + pub fn reduced_offset_to_offset(reduced_offset: OffsetReduced) -> Offset { (reduced_offset as Offset) * ALIGN_BOUNDARY_OFFSET } diff --git a/accounts-db/src/accounts_file.rs b/accounts-db/src/accounts_file.rs index 251aedcd1de562..e77ef9eb4568f9 100644 --- a/accounts-db/src/accounts_file.rs +++ b/accounts-db/src/accounts_file.rs @@ -1,5 +1,6 @@ use { crate::{ + account_info::AccountInfo, account_storage::meta::{ StorableAccountsWithHashesAndWriteVersions, StoredAccountInfo, StoredAccountMeta, }, @@ -114,16 +115,21 @@ impl AccountsFile { } /// Return (account metadata, next_index) pair for the account at the - /// specified `index` if any. Otherwise return None. Also return the + /// specified `offset` if any. Otherwise return None. Also return the /// index of the next entry. - pub fn get_account(&self, index: usize) -> Option<(StoredAccountMeta<'_>, usize)> { + pub fn get_account(&self, offset: usize) -> Option<(StoredAccountMeta<'_>, usize)> { match self { - Self::AppendVec(av) => av.get_account(index), + Self::AppendVec(av) => av.get_account(offset), + // Note: The conversion here is needed as the AccountsDB currently + // assumes all offsets are multiple of 8 while TieredStorage uses + // IndexOffset that is equivalent to AccountInfo::reduced_offset. Self::TieredStorage(ts) => ts .reader()? - .get_account(IndexOffset(index as u32)) + .get_account(IndexOffset(AccountInfo::get_reduced_offset(offset))) .ok()? - .map(|(metas, index_offset)| (metas, index_offset.0 as usize)), + .map(|(metas, index_offset)| { + (metas, AccountInfo::reduced_offset_to_offset(index_offset.0)) + }), } } @@ -134,11 +140,17 @@ impl AccountsFile { ) -> std::result::Result { match self { Self::AppendVec(av) => av.account_matches_owners(offset, owners), + // Note: The conversion here is needed as the AccountsDB currently + // assumes all offsets are multiple of 8 while TieredStorage uses + // IndexOffset that is equivalent to AccountInfo::reduced_offset. Self::TieredStorage(ts) => { let Some(reader) = ts.reader() else { return Err(MatchAccountOwnerError::UnableToLoad); }; - reader.account_matches_owners(IndexOffset(offset as u32), owners) + reader.account_matches_owners( + IndexOffset(AccountInfo::get_reduced_offset(offset)), + owners, + ) } } } @@ -168,9 +180,16 @@ impl AccountsFile { pub fn accounts(&self, offset: usize) -> Vec { match self { Self::AppendVec(av) => av.accounts(offset), + // Note: The conversion here is needed as the AccountsDB currently + // assumes all offsets are multiple of 8 while TieredStorage uses + // IndexOffset that is equivalent to AccountInfo::reduced_offset. Self::TieredStorage(ts) => ts .reader() - .and_then(|reader| reader.accounts(IndexOffset(offset as u32)).ok()) + .and_then(|reader| { + reader + .accounts(IndexOffset(AccountInfo::get_reduced_offset(offset))) + .ok() + }) .unwrap_or_default(), } } @@ -195,11 +214,18 @@ impl AccountsFile { ) -> Option> { match self { Self::AppendVec(av) => av.append_accounts(accounts, skip), - // Currently we only support HOT_FORMAT. If we later want to use - // a different format, then we will need a way to pass-in it. - // TODO: consider adding function like write_accounts_to_hot_storage() or something - // to hide implementation detail. - Self::TieredStorage(ts) => ts.write_accounts(accounts, skip, &HOT_FORMAT).ok(), + // Note: The conversion here is needed as the AccountsDB currently + // assumes all offsets are multiple of 8 while TieredStorage uses + // IndexOffset that is equivalent to AccountInfo::reduced_offset. + Self::TieredStorage(ts) => ts + .write_accounts(accounts, skip, &HOT_FORMAT) + .map(|mut infos| { + infos.iter_mut().for_each(|info| { + info.offset = AccountInfo::reduced_offset_to_offset(info.offset as u32); + }); + infos + }) + .ok(), } } } From 3f17532b1173bc72b72f4a5a53da77cba0fb01ee Mon Sep 17 00:00:00 2001 From: Joe C Date: Mon, 1 Apr 2024 16:30:23 -0500 Subject: [PATCH 126/153] Runtime: Core BPF Migration: Path to migrate a builtin to Core BPF (#408) * runtime: core_bpf_migration: add migration path * runtime: core_bpf_migration: add tests for migration path * comments * function name * rent lamports * bank operations ordering * make migration method a bank method * update deployment slot to current slot * invoke loader deploy directly --- programs/bpf_loader/src/lib.rs | 23 + .../bank/builtins/core_bpf_migration/error.rs | 8 +- .../bank/builtins/core_bpf_migration/mod.rs | 547 ++++++++++++++++++ 3 files changed, 577 insertions(+), 1 deletion(-) diff --git a/programs/bpf_loader/src/lib.rs b/programs/bpf_loader/src/lib.rs index 41af14aab8121d..dfb27ec2eb97b1 100644 --- a/programs/bpf_loader/src/lib.rs +++ b/programs/bpf_loader/src/lib.rs @@ -168,6 +168,29 @@ macro_rules! deploy_program { }}; } +/// Directly deploy a program using a provided invoke context. +/// This function should only be invoked from the runtime, since it does not +/// provide any account loads or checks. +pub fn direct_deploy_program( + invoke_context: &mut InvokeContext, + program_id: &Pubkey, + loader_key: &Pubkey, + account_size: usize, + elf: &[u8], + slot: Slot, +) -> Result<(), InstructionError> { + deploy_program!( + invoke_context, + *program_id, + loader_key, + account_size, + slot, + {}, + elf, + ); + Ok(()) +} + fn write_program_data( program_data_offset: usize, bytes: &[u8], diff --git a/runtime/src/bank/builtins/core_bpf_migration/error.rs b/runtime/src/bank/builtins/core_bpf_migration/error.rs index 2bc54a46dabb74..0009781d2125d8 100644 --- a/runtime/src/bank/builtins/core_bpf_migration/error.rs +++ b/runtime/src/bank/builtins/core_bpf_migration/error.rs @@ -1,8 +1,14 @@ -use {solana_sdk::pubkey::Pubkey, thiserror::Error}; +use { + solana_sdk::{instruction::InstructionError, pubkey::Pubkey}, + thiserror::Error, +}; /// Errors returned by a Core BPF migration. #[derive(Debug, Error)] pub enum CoreBpfMigrationError { + /// Solana instruction error + #[error("Solana instruction error: {0:?}")] + InstructionError(#[from] InstructionError), /// Bincode serialization error #[error("Bincode serialization error: {0:?}")] BincodeError(#[from] bincode::Error), diff --git a/runtime/src/bank/builtins/core_bpf_migration/mod.rs b/runtime/src/bank/builtins/core_bpf_migration/mod.rs index 6e1af6b6c17184..4f2ebf56f5000d 100644 --- a/runtime/src/bank/builtins/core_bpf_migration/mod.rs +++ b/runtime/src/bank/builtins/core_bpf_migration/mod.rs @@ -3,7 +3,554 @@ pub(crate) mod error; mod source_upgradeable_bpf; mod target_builtin; +use { + crate::bank::Bank, + error::CoreBpfMigrationError, + solana_program_runtime::{ + invoke_context::InvokeContext, loaded_programs::LoadedProgramsForTxBatch, + sysvar_cache::SysvarCache, + }, + solana_sdk::{ + account::{AccountSharedData, ReadableAccount, WritableAccount}, + bpf_loader_upgradeable::{self, UpgradeableLoaderState}, + clock::Slot, + hash::Hash, + instruction::InstructionError, + pubkey::Pubkey, + transaction_context::TransactionContext, + }, + source_upgradeable_bpf::SourceUpgradeableBpf, + std::sync::atomic::Ordering::Relaxed, + target_builtin::TargetBuiltin, +}; + +/// Identifies the type of built-in program targeted for Core BPF migration. +/// The type of target determines whether the program should have a program +/// account or not, which is checked before migration. +#[derive(Debug)] pub(crate) enum CoreBpfMigrationTargetType { + /// A standard (stateful) builtin program must have a program account. Builtin, + /// A stateless builtin must not have a program account. Stateless, } + +/// Configuration for migrating a built-in program to Core BPF. +#[derive(Debug)] +pub(crate) struct CoreBpfMigrationConfig { + /// The program ID of the source program to be used to replace the builtin. + pub source_program_id: Pubkey, + /// The feature gate to trigger the migration to Core BPF. + /// Note: This feature gate should never be the same as any builtin's + /// `enable_feature_id`. It should always be a feature gate that will be + /// activated after the builtin is already enabled. + pub feature_id: Pubkey, + /// The type of target to replace. + pub migration_target: CoreBpfMigrationTargetType, + /// Static message used to emit datapoint logging. + /// This is used to identify the migration in the logs. + /// Should be unique to the migration, ie: + /// "migrate_{builtin/stateless}_to_core_bpf_{program_name}". + pub datapoint_name: &'static str, +} + +fn checked_add(a: usize, b: usize) -> Result { + a.checked_add(b) + .ok_or(CoreBpfMigrationError::ArithmeticOverflow) +} + +/// Create an `AccountSharedData` with data initialized to +/// `UpgradeableLoaderState::Program` populated with the target's new data +/// account address. +/// +/// Note that the account's data is initialized manually, but the rest of the +/// account's fields are inherited from the source program account, including +/// the lamports. +fn new_target_program_account( + target: &TargetBuiltin, + source: &SourceUpgradeableBpf, +) -> Result { + let state = UpgradeableLoaderState::Program { + programdata_address: target.program_data_address, + }; + let data = bincode::serialize(&state)?; + // The source program account has the same state, so it should already have + // a sufficient lamports balance to cover rent for this state. + // Out of an abundance of caution, first ensure the source program + // account's data is the same length as the serialized state. + if source.program_account.data().len() != data.len() { + return Err(CoreBpfMigrationError::InvalidProgramAccount( + source.program_address, + )); + } + // Then copy the source account's contents and overwrite the data with the + // newly created target program account data. + let mut account = source.program_account.clone(); + account.set_data_from_slice(&data); + Ok(account) +} + +/// Create an `AccountSharedData` with data initialized to +/// `UpgradeableLoaderState::ProgramData` populated with the current slot, as +/// well as the source program data account's upgrade authority and ELF. +/// +/// Note that the account's data is initialized manually, but the rest of the +/// account's fields are inherited from the source program account, including +/// the lamports. +fn new_target_program_data_account( + source: &SourceUpgradeableBpf, + slot: Slot, +) -> Result { + let programdata_data_offset = UpgradeableLoaderState::size_of_programdata_metadata(); + // Deserialize the program data metadata to get the upgrade authority. + if let UpgradeableLoaderState::ProgramData { + upgrade_authority_address, + .. + } = bincode::deserialize(&source.program_data_account.data()[..programdata_data_offset])? + { + let mut account = source.program_data_account.clone(); + // This account's data was just partially deserialized into + // `UpgradeableLoaderState`, so it's guaranteed to have at least enough + // space for the same type to be serialized in. + // The ELF should remain untouched, since it follows the + // `UpgradeableLoaderState`. + // + // Serialize the new `UpgradeableLoaderState` with the bank's current + // slot and the deserialized upgrade authority. + bincode::serialize_into( + account.data_as_mut_slice(), + &UpgradeableLoaderState::ProgramData { + slot, + upgrade_authority_address, + }, + )?; + return Ok(account); + } + Err(CoreBpfMigrationError::InvalidProgramDataAccount( + source.program_data_address, + )) +} + +impl Bank { + /// In order to properly update the newly migrated Core BPF program in + /// the program cache, the migration must directly invoke the BPF + /// Upgradeable Loader's deployment functionality for validating the ELF + /// bytes against the current environment, as well as updating the program + /// cache. + /// + /// Invoking the loader's `direct_deploy_program` function will update the + /// program cache in the currently executing context (ie. `programs_loaded` + /// and `programs_modified`), but the runtime must also propagate those + /// updates to the currently active cache. + fn directly_invoke_loader_v3_deploy( + &self, + builtin_program_id: &Pubkey, + program_data_account: &AccountSharedData, + ) -> Result<(), InstructionError> { + let programdata_data_offset = UpgradeableLoaderState::size_of_programdata_metadata(); + let data_len = program_data_account.data().len(); + let elf = program_data_account + .data() + .get(programdata_data_offset..) + .ok_or(InstructionError::InvalidAccountData)?; + + // Set up the two `LoadedProgramsForTxBatch` instances, as if + // processing a new transaction batch. + let programs_loaded = LoadedProgramsForTxBatch::new_from_cache( + self.slot, + self.epoch, + &self.transaction_processor.program_cache.read().unwrap(), + ); + let mut programs_modified = LoadedProgramsForTxBatch::new( + self.slot, + programs_loaded.environments.clone(), + programs_loaded.upcoming_environments.clone(), + programs_loaded.latest_root_epoch, + ); + + // Configure a dummy `InvokeContext` from the runtime's current + // environment, as well as the two `LoadedProgramsForTxBatch` + // instances configured above, then invoke the loader. + { + let compute_budget = self.runtime_config.compute_budget.unwrap_or_default(); + let mut sysvar_cache = SysvarCache::default(); + sysvar_cache.fill_missing_entries(|pubkey, set_sysvar| { + if let Some(account) = self.get_account(pubkey) { + set_sysvar(account.data()); + } + }); + + let mut dummy_transaction_context = TransactionContext::new( + vec![], + self.rent_collector.rent.clone(), + compute_budget.max_invoke_stack_height, + compute_budget.max_instruction_trace_length, + ); + + let mut dummy_invoke_context = InvokeContext::new( + &mut dummy_transaction_context, + &sysvar_cache, + None, + compute_budget, + &programs_loaded, + &mut programs_modified, + self.feature_set.clone(), + Hash::default(), + 0, + ); + + solana_bpf_loader_program::direct_deploy_program( + &mut dummy_invoke_context, + builtin_program_id, + &bpf_loader_upgradeable::id(), + data_len, + elf, + self.slot, + )? + } + + // Update the program cache by merging with `programs_modified`, which + // should have been updated by the deploy function. + self.transaction_processor + .program_cache + .write() + .unwrap() + .merge(&programs_modified); + + Ok(()) + } + + pub(crate) fn migrate_builtin_to_core_bpf( + &mut self, + builtin_program_id: &Pubkey, + config: &CoreBpfMigrationConfig, + ) -> Result<(), CoreBpfMigrationError> { + datapoint_info!(config.datapoint_name, ("slot", self.slot, i64)); + + let target = + TargetBuiltin::new_checked(self, builtin_program_id, &config.migration_target)?; + let source = SourceUpgradeableBpf::new_checked(self, &config.source_program_id)?; + + // Attempt serialization first before modifying the bank. + let new_target_program_account = new_target_program_account(&target, &source)?; + let new_target_program_data_account = new_target_program_data_account(&source, self.slot)?; + + // Gather old and new account data sizes, for updating the bank's + // accounts data size delta off-chain. + // The old data size is the total size of all accounts involved. + // The new data size is the total size of the source program accounts, + // since the target program account is replaced with a new program + // account of the same size as the source program account, and the + // source program data account is copied to the target program data + // account before both source program accounts are cleared. + let target_program_len = target.program_account.data().len(); + let source_program_len = source.program_account.data().len(); + let source_program_data_len = source.program_data_account.data().len(); + let old_data_size = checked_add( + target_program_len, + checked_add(source_program_len, source_program_data_len)?, + )?; + let new_data_size = checked_add(source_program_len, source_program_data_len)?; + + // Deploy the new target Core BPF program. + // This step will validate the program ELF against the current runtime + // environment, as well as update the program cache. + self.directly_invoke_loader_v3_deploy( + &target.program_address, + &source.program_data_account, + )?; + + // Burn lamports from the target program account, since it will be + // replaced. + self.capitalization + .fetch_sub(target.program_account.lamports(), Relaxed); + + // Replace the target builtin account with the + // `new_target_program_account` and clear the source program account. + self.store_account(&target.program_address, &new_target_program_account); + self.store_account(&source.program_address, &AccountSharedData::default()); + + // Copy the source program data account into the account at the target + // builtin program's data address, which was verified to be empty by + // `TargetBuiltin::new_checked`, then clear the source program data + // account. + self.store_account( + &target.program_data_address, + &new_target_program_data_account, + ); + self.store_account(&source.program_data_address, &AccountSharedData::default()); + + // Remove the built-in program from the bank's list of built-ins. + self.builtin_program_ids.remove(&target.program_address); + + // Update the account data size delta. + self.calculate_and_update_accounts_data_size_delta_off_chain(old_data_size, new_data_size); + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use { + super::*, + crate::bank::tests::create_simple_test_bank, + assert_matches::assert_matches, + solana_program_runtime::loaded_programs::{LoadedProgram, LoadedProgramType}, + solana_sdk::{ + account_utils::StateMut, + bpf_loader_upgradeable::{self, get_program_data_address}, + native_loader, + }, + }; + + const TEST_ELF: &[u8] = + include_bytes!("../../../../../programs/bpf_loader/test_elfs/out/noop_aligned.so"); + + const PROGRAM_DATA_OFFSET: usize = UpgradeableLoaderState::size_of_programdata_metadata(); + + struct TestContext { + builtin_id: Pubkey, + source_program_id: Pubkey, + upgrade_authority_address: Option, + elf: Vec, + } + impl TestContext { + // Initialize some test values and set up the source BPF upgradeable + // program in the bank. + fn new(bank: &Bank) -> Self { + let builtin_id = Pubkey::new_unique(); + let source_program_id = Pubkey::new_unique(); + let upgrade_authority_address = Some(Pubkey::new_unique()); + let elf = TEST_ELF.to_vec(); + + let source_program_data_address = get_program_data_address(&source_program_id); + + let source_program_account = { + let data = bincode::serialize(&UpgradeableLoaderState::Program { + programdata_address: source_program_data_address, + }) + .unwrap(); + + let data_len = data.len(); + let lamports = bank.get_minimum_balance_for_rent_exemption(data_len); + + let mut account = + AccountSharedData::new(lamports, data_len, &bpf_loader_upgradeable::id()); + account.set_data(data); + account + }; + + let source_program_data_account = { + let mut data = bincode::serialize(&UpgradeableLoaderState::ProgramData { + slot: 99, // Arbitrary slot for testing. + upgrade_authority_address, + }) + .unwrap(); + data.extend_from_slice(&elf); + + let data_len = data.len(); + let lamports = bank.get_minimum_balance_for_rent_exemption(data_len); + + let mut account = + AccountSharedData::new(lamports, data_len, &bpf_loader_upgradeable::id()); + account.set_data(data); + account + }; + + bank.store_account_and_update_capitalization( + &source_program_id, + &source_program_account, + ); + bank.store_account_and_update_capitalization( + &source_program_data_address, + &source_program_data_account, + ); + + Self { + builtin_id, + source_program_id, + upgrade_authority_address, + elf, + } + } + + // Evaluate the account state of the builtin and source post-migration. + // Ensure the builtin program account is now a BPF upgradeable program, + // the source program account and data account have been cleared, and + // the bank's builtin IDs and cache have been updated. + fn run_program_checks_post_migration(&self, bank: &Bank) { + // Verify both the source program account and source program data + // account have been cleared. + assert!(bank.get_account(&self.source_program_id).is_none()); + assert!(bank + .get_account(&get_program_data_address(&self.source_program_id)) + .is_none()); + + let program_account = bank.get_account(&self.builtin_id).unwrap(); + let program_data_address = get_program_data_address(&self.builtin_id); + + // Program account is owned by the upgradeable loader. + assert_eq!(program_account.owner(), &bpf_loader_upgradeable::id()); + + // Program account has the correct state, with a pointer to its program + // data address. + let program_account_state: UpgradeableLoaderState = program_account.state().unwrap(); + assert_eq!( + program_account_state, + UpgradeableLoaderState::Program { + programdata_address: program_data_address + } + ); + + let program_data_account = bank.get_account(&program_data_address).unwrap(); + + // Program data account is owned by the upgradeable loader. + assert_eq!(program_data_account.owner(), &bpf_loader_upgradeable::id()); + + // Program data account has the correct state. + // It should exactly match the original, including upgrade authority + // and slot. + let program_data_account_state_metadata: UpgradeableLoaderState = + bincode::deserialize(&program_data_account.data()[..PROGRAM_DATA_OFFSET]).unwrap(); + assert_eq!( + program_data_account_state_metadata, + UpgradeableLoaderState::ProgramData { + slot: bank.slot, // _Not_ the original deployment slot + upgrade_authority_address: self.upgrade_authority_address // Preserved + }, + ); + assert_eq!( + &program_data_account.data()[PROGRAM_DATA_OFFSET..], + &self.elf, + ); + + // The bank's builtins should no longer contain the builtin + // program ID. + assert!(!bank.builtin_program_ids.contains(&self.builtin_id)); + + // The cache should contain the target program. + let program_cache = bank.transaction_processor.program_cache.read().unwrap(); + let entries = program_cache.get_flattened_entries(true, true); + let target_entry = entries + .iter() + .find(|(program_id, _)| program_id == &self.builtin_id) + .map(|(_, entry)| entry) + .unwrap(); + + // The target program entry should be updated. + assert_eq!(target_entry.account_size, program_data_account.data().len()); + assert_eq!(target_entry.deployment_slot, bank.slot()); + assert_eq!(target_entry.effective_slot, bank.slot() + 1); + assert_eq!(target_entry.latest_access_slot.load(Relaxed), bank.slot()); + + // The target program entry should now be a BPF program. + assert_matches!(target_entry.program, LoadedProgramType::LegacyV1(..)); + } + } + + #[test] + fn test_migrate_builtin() { + let mut bank = create_simple_test_bank(0); + + let test_context = TestContext::new(&bank); + + let TestContext { + builtin_id, + source_program_id, + .. + } = test_context; + + // This will be checked by `TargetBuiltinProgram::new_checked`, but set + // up the mock builtin and ensure it exists as configured. + let builtin_account = { + let builtin_name = String::from("test_builtin"); + let account = + AccountSharedData::new_data(1, &builtin_name, &native_loader::id()).unwrap(); + bank.store_account_and_update_capitalization(&builtin_id, &account); + bank.add_builtin(builtin_id, builtin_name, LoadedProgram::default()); + account + }; + assert_eq!(&bank.get_account(&builtin_id).unwrap(), &builtin_account); + + let core_bpf_migration_config = CoreBpfMigrationConfig { + source_program_id, + feature_id: Pubkey::new_unique(), + migration_target: CoreBpfMigrationTargetType::Builtin, + datapoint_name: "test_migrate_builtin", + }; + + // Gather bank information to check later. + let bank_pre_migration_capitalization = bank.capitalization(); + let bank_pre_migration_accounts_data_size_delta_off_chain = + bank.accounts_data_size_delta_off_chain.load(Relaxed); + + // Perform the migration. + bank.migrate_builtin_to_core_bpf(&builtin_id, &core_bpf_migration_config) + .unwrap(); + + // Run the post-migration program checks. + test_context.run_program_checks_post_migration(&bank); + + // The bank's capitalization should reflect the burned lamports + // from the replaced builtin program account. + assert_eq!( + bank.capitalization(), + bank_pre_migration_capitalization - builtin_account.lamports() + ); + + // The bank's accounts data size delta off-chain should reflect the + // change in data size from the replaced builtin program account. + assert_eq!( + bank.accounts_data_size_delta_off_chain.load(Relaxed), + bank_pre_migration_accounts_data_size_delta_off_chain + - builtin_account.data().len() as i64, + ); + } + + #[test] + fn test_migrate_stateless_builtin() { + let mut bank = create_simple_test_bank(0); + + let test_context = TestContext::new(&bank); + + let TestContext { + builtin_id, + source_program_id, + .. + } = test_context; + + // This will be checked by `TargetBuiltinProgram::new_checked`, but + // assert the stateless builtin account doesn't exist. + assert!(bank.get_account(&builtin_id).is_none()); + + let core_bpf_migration_config = CoreBpfMigrationConfig { + source_program_id, + feature_id: Pubkey::new_unique(), + migration_target: CoreBpfMigrationTargetType::Stateless, + datapoint_name: "test_migrate_stateless_builtin", + }; + + // Gather bank information to check later. + let bank_pre_migration_capitalization = bank.capitalization(); + let bank_pre_migration_accounts_data_size_delta_off_chain = + bank.accounts_data_size_delta_off_chain.load(Relaxed); + + // Perform the migration. + bank.migrate_builtin_to_core_bpf(&builtin_id, &core_bpf_migration_config) + .unwrap(); + + // Run the post-migration program checks. + test_context.run_program_checks_post_migration(&bank); + + // The bank's capitalization should be exactly the same. + assert_eq!(bank.capitalization(), bank_pre_migration_capitalization); + + // The bank's accounts data size delta off-chain should be exactly the + // same. + assert_eq!( + bank.accounts_data_size_delta_off_chain.load(Relaxed), + bank_pre_migration_accounts_data_size_delta_off_chain, + ); + } +} From c29a2392fce5385c67c27dcf109faa2b76021b2e Mon Sep 17 00:00:00 2001 From: Tyera Date: Mon, 1 Apr 2024 18:05:57 -0600 Subject: [PATCH 127/153] Reorganize partitioned epoch rewards runtime code, 1 of 5 (#511) * Add bank::partitioned_epoch_rewards submodule * Move helper structs and types to submodule * Move partitioned-rewards-specific Bank methods to submodule * Move unit tests into submodule * Update BankAbiTestWrapperNewer frozen_abi hash --- runtime/src/bank.rs | 174 +-------- .../src/bank/partitioned_epoch_rewards/mod.rs | 369 ++++++++++++++++++ runtime/src/bank/serde_snapshot.rs | 6 +- runtime/src/bank/tests.rs | 169 +------- runtime/src/epoch_rewards_hasher.rs | 2 +- runtime/src/serde_snapshot/newer.rs | 2 +- 6 files changed, 382 insertions(+), 340 deletions(-) create mode 100644 runtime/src/bank/partitioned_epoch_rewards/mod.rs diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index f9a785eb9b5a57..47a63b9fc98962 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -41,6 +41,11 @@ use { bank::{ builtins::{BuiltinPrototype, BUILTINS}, metrics::*, + partitioned_epoch_rewards::{ + CalculateRewardsAndDistributeVoteRewardsResult, EpochRewardCalculateParamInfo, + EpochRewardStatus, PartitionedRewardsCalculation, RewardInterval, + StakeRewardCalculationPartitioned, StakeRewards, VoteRewardsAccounts, + }, }, bank_forks::BankForks, epoch_rewards_hasher::hash_rewards_into_partitions, @@ -83,7 +88,6 @@ use { ancestors::{Ancestors, AncestorsForSerialization}, blockhash_queue::BlockhashQueue, epoch_accounts_hash::EpochAccountsHash, - partitioned_rewards::PartitionedEpochRewardsConfig, sorted_storages::SortedStorages, stake_rewards::StakeReward, storable_accounts::StorableAccounts, @@ -173,7 +177,7 @@ use { }, }, solana_system_program::{get_system_account_kind, SystemAccountKind}, - solana_vote::vote_account::{VoteAccount, VoteAccounts, VoteAccountsHashMap}, + solana_vote::vote_account::{VoteAccount, VoteAccountsHashMap}, solana_vote_program::vote_state::VoteState, std::{ borrow::Cow, @@ -220,6 +224,7 @@ pub mod builtins; pub mod epoch_accounts_hash_utils; mod fee_distribution; mod metrics; +pub(crate) mod partitioned_epoch_rewards; mod serde_snapshot; mod sysvar_cache; #[cfg(test)] @@ -642,27 +647,6 @@ impl AbiExample for OptionalDropCallback { } } -#[derive(AbiExample, Debug, Clone, PartialEq, Serialize, Deserialize)] -pub(crate) struct StartBlockHeightAndRewards { - /// the block height of the slot at which rewards distribution began - pub(crate) start_block_height: u64, - /// calculated epoch rewards pending distribution, outer Vec is by partition (one partition per block) - pub(crate) stake_rewards_by_partition: Arc>, -} - -/// Represent whether bank is in the reward phase or not. -#[derive(AbiExample, AbiEnumVisitor, Debug, Clone, PartialEq, Serialize, Deserialize, Default)] -pub(crate) enum EpochRewardStatus { - /// this bank is in the reward phase. - /// Contents are the start point for epoch reward calculation, - /// i.e. parent_slot and parent_block height for the starting - /// block of the current epoch. - Active(StartBlockHeightAndRewards), - /// this bank is outside of the rewarding phase. - #[default] - Inactive, -} - /// Manager for the state of all accounts and programs after processing its entries. /// AbiExample is needed even without Serialize/Deserialize; actual (de-)serialization /// are implemented elsewhere for versioning @@ -865,56 +849,6 @@ struct VoteReward { } type VoteRewards = DashMap; -#[derive(Debug, Default)] -struct VoteRewardsAccounts { - /// reward info for each vote account pubkey. - /// This type is used by `update_reward_history()` - rewards: Vec<(Pubkey, RewardInfo)>, - /// corresponds to pubkey in `rewards` - /// Some if account is to be stored. - /// None if to be skipped. - accounts_to_store: Vec>, -} - -/// hold reward calc info to avoid recalculation across functions -struct EpochRewardCalculateParamInfo<'a> { - stake_history: StakeHistory, - stake_delegations: Vec<(&'a Pubkey, &'a StakeAccount)>, - cached_vote_accounts: &'a VoteAccounts, -} - -/// Hold all results from calculating the rewards for partitioned distribution. -/// This struct exists so we can have a function which does all the calculation with no -/// side effects. -struct PartitionedRewardsCalculation { - vote_account_rewards: VoteRewardsAccounts, - stake_rewards_by_partition: StakeRewardCalculationPartitioned, - old_vote_balance_and_staked: u64, - validator_rewards: u64, - validator_rate: f64, - foundation_rate: f64, - prev_epoch_duration_in_years: f64, - capitalization: u64, -} - -/// result of calculating the stake rewards at beginning of new epoch -struct StakeRewardCalculationPartitioned { - /// each individual stake account to reward, grouped by partition - stake_rewards_by_partition: Vec, - /// total lamports across all `stake_rewards` - total_stake_rewards_lamports: u64, -} - -struct CalculateRewardsAndDistributeVoteRewardsResult { - /// total rewards for the epoch (including both vote rewards and stake rewards) - total_rewards: u64, - /// distributed vote rewards - distributed_rewards: u64, - /// stake rewards that still need to be distributed, grouped by partition - stake_rewards_by_partition: Vec, -} - -pub(crate) type StakeRewards = Vec; #[derive(Debug, Default)] pub struct NewBankOptions { @@ -950,14 +884,6 @@ struct StakeRewardCalculation { total_stake_rewards_lamports: u64, } -#[derive(Debug, PartialEq, Eq, Copy, Clone)] -pub(super) enum RewardInterval { - /// the slot within the epoch is INSIDE the reward distribution interval - InsideInterval, - /// the slot within the epoch is OUTSIDE the reward distribution interval - OutsideInterval, -} - impl Bank { fn default_with_accounts(accounts: Accounts) -> Self { let mut bank = Self { @@ -1144,76 +1070,6 @@ impl Bank { rent_collector.clone_with_epoch(epoch) } - fn is_partitioned_rewards_feature_enabled(&self) -> bool { - self.feature_set - .is_active(&feature_set::enable_partitioned_epoch_reward::id()) - } - - pub(crate) fn set_epoch_reward_status_active( - &mut self, - stake_rewards_by_partition: Vec, - ) { - self.epoch_reward_status = EpochRewardStatus::Active(StartBlockHeightAndRewards { - start_block_height: self.block_height, - stake_rewards_by_partition: Arc::new(stake_rewards_by_partition), - }); - } - - fn partitioned_epoch_rewards_config(&self) -> &PartitionedEpochRewardsConfig { - &self - .rc - .accounts - .accounts_db - .partitioned_epoch_rewards_config - } - - /// # stake accounts to store in one block during partitioned reward interval - fn partitioned_rewards_stake_account_stores_per_block(&self) -> u64 { - self.partitioned_epoch_rewards_config() - .stake_account_stores_per_block - } - - /// reward calculation happens synchronously during the first block of the epoch boundary. - /// So, # blocks for reward calculation is 1. - fn get_reward_calculation_num_blocks(&self) -> Slot { - self.partitioned_epoch_rewards_config() - .reward_calculation_num_blocks - } - - /// Calculate the number of blocks required to distribute rewards to all stake accounts. - fn get_reward_distribution_num_blocks(&self, rewards: &StakeRewards) -> u64 { - let total_stake_accounts = rewards.len(); - if self.epoch_schedule.warmup && self.epoch < self.first_normal_epoch() { - 1 - } else { - const MAX_FACTOR_OF_REWARD_BLOCKS_IN_EPOCH: u64 = 10; - let num_chunks = solana_accounts_db::accounts_hash::AccountsHasher::div_ceil( - total_stake_accounts, - self.partitioned_rewards_stake_account_stores_per_block() as usize, - ) as u64; - - // Limit the reward credit interval to 10% of the total number of slots in a epoch - num_chunks.clamp( - 1, - (self.epoch_schedule.slots_per_epoch / MAX_FACTOR_OF_REWARD_BLOCKS_IN_EPOCH).max(1), - ) - } - } - - /// Return `RewardInterval` enum for current bank - fn get_reward_interval(&self) -> RewardInterval { - if matches!(self.epoch_reward_status, EpochRewardStatus::Active(_)) { - RewardInterval::InsideInterval - } else { - RewardInterval::OutsideInterval - } - } - - /// For testing only - pub fn force_reward_interval_end_for_tests(&mut self) { - self.epoch_reward_status = EpochRewardStatus::Inactive; - } - fn _new_from_parent( parent: Arc, collector_id: &Pubkey, @@ -1642,13 +1498,6 @@ impl Bank { } } - fn force_partition_rewards_in_first_block_of_epoch(&self) -> bool { - self.partitioned_epoch_rewards_config() - .test_enable_partitioned_rewards - && self.get_reward_calculation_num_blocks() == 0 - && self.partitioned_rewards_stake_account_stores_per_block() == u64::MAX - } - /// Begin the process of calculating and distributing rewards. /// This process can take multiple slots. fn begin_partitioned_rewards( @@ -3591,15 +3440,6 @@ impl Bank { report_partitioned_reward_metrics(self, metrics); } - /// true if it is ok to run partitioned rewards code. - /// This means the feature is activated or certain testing situations. - fn is_partitioned_rewards_code_enabled(&self) -> bool { - self.is_partitioned_rewards_feature_enabled() - || self - .partitioned_epoch_rewards_config() - .test_enable_partitioned_rewards - } - /// Helper fn to log epoch_rewards sysvar fn log_epoch_rewards_sysvar(&self, prefix: &str) { if let Some(account) = self.get_account(&sysvar::epoch_rewards::id()) { diff --git a/runtime/src/bank/partitioned_epoch_rewards/mod.rs b/runtime/src/bank/partitioned_epoch_rewards/mod.rs new file mode 100644 index 00000000000000..21f4fdcabf79ed --- /dev/null +++ b/runtime/src/bank/partitioned_epoch_rewards/mod.rs @@ -0,0 +1,369 @@ +use { + super::Bank, + crate::{stake_account::StakeAccount, stake_history::StakeHistory}, + solana_accounts_db::{ + partitioned_rewards::PartitionedEpochRewardsConfig, stake_rewards::StakeReward, + }, + solana_sdk::{ + account::AccountSharedData, clock::Slot, feature_set, pubkey::Pubkey, + reward_info::RewardInfo, stake::state::Delegation, + }, + solana_vote::vote_account::VoteAccounts, + std::sync::Arc, +}; + +#[derive(Debug, PartialEq, Eq, Copy, Clone)] +pub(super) enum RewardInterval { + /// the slot within the epoch is INSIDE the reward distribution interval + InsideInterval, + /// the slot within the epoch is OUTSIDE the reward distribution interval + OutsideInterval, +} + +#[derive(AbiExample, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub(crate) struct StartBlockHeightAndRewards { + /// the block height of the slot at which rewards distribution began + pub(crate) start_block_height: u64, + /// calculated epoch rewards pending distribution, outer Vec is by partition (one partition per block) + pub(crate) stake_rewards_by_partition: Arc>, +} + +/// Represent whether bank is in the reward phase or not. +#[derive(AbiExample, AbiEnumVisitor, Debug, Clone, PartialEq, Serialize, Deserialize, Default)] +pub(crate) enum EpochRewardStatus { + /// this bank is in the reward phase. + /// Contents are the start point for epoch reward calculation, + /// i.e. parent_slot and parent_block height for the starting + /// block of the current epoch. + Active(StartBlockHeightAndRewards), + /// this bank is outside of the rewarding phase. + #[default] + Inactive, +} + +#[derive(Debug, Default)] +pub(super) struct VoteRewardsAccounts { + /// reward info for each vote account pubkey. + /// This type is used by `update_reward_history()` + pub(super) rewards: Vec<(Pubkey, RewardInfo)>, + /// corresponds to pubkey in `rewards` + /// Some if account is to be stored. + /// None if to be skipped. + pub(super) accounts_to_store: Vec>, +} + +/// hold reward calc info to avoid recalculation across functions +pub(super) struct EpochRewardCalculateParamInfo<'a> { + pub(super) stake_history: StakeHistory, + pub(super) stake_delegations: Vec<(&'a Pubkey, &'a StakeAccount)>, + pub(super) cached_vote_accounts: &'a VoteAccounts, +} + +/// Hold all results from calculating the rewards for partitioned distribution. +/// This struct exists so we can have a function which does all the calculation with no +/// side effects. +pub(super) struct PartitionedRewardsCalculation { + pub(super) vote_account_rewards: VoteRewardsAccounts, + pub(super) stake_rewards_by_partition: StakeRewardCalculationPartitioned, + pub(super) old_vote_balance_and_staked: u64, + pub(super) validator_rewards: u64, + pub(super) validator_rate: f64, + pub(super) foundation_rate: f64, + pub(super) prev_epoch_duration_in_years: f64, + pub(super) capitalization: u64, +} + +/// result of calculating the stake rewards at beginning of new epoch +pub(super) struct StakeRewardCalculationPartitioned { + /// each individual stake account to reward, grouped by partition + pub(super) stake_rewards_by_partition: Vec, + /// total lamports across all `stake_rewards` + pub(super) total_stake_rewards_lamports: u64, +} + +pub(super) struct CalculateRewardsAndDistributeVoteRewardsResult { + /// total rewards for the epoch (including both vote rewards and stake rewards) + pub(super) total_rewards: u64, + /// distributed vote rewards + pub(super) distributed_rewards: u64, + /// stake rewards that still need to be distributed, grouped by partition + pub(super) stake_rewards_by_partition: Vec, +} + +pub(crate) type StakeRewards = Vec; + +impl Bank { + pub(super) fn is_partitioned_rewards_feature_enabled(&self) -> bool { + self.feature_set + .is_active(&feature_set::enable_partitioned_epoch_reward::id()) + } + + pub(crate) fn set_epoch_reward_status_active( + &mut self, + stake_rewards_by_partition: Vec, + ) { + self.epoch_reward_status = EpochRewardStatus::Active(StartBlockHeightAndRewards { + start_block_height: self.block_height, + stake_rewards_by_partition: Arc::new(stake_rewards_by_partition), + }); + } + + pub(super) fn partitioned_epoch_rewards_config(&self) -> &PartitionedEpochRewardsConfig { + &self + .rc + .accounts + .accounts_db + .partitioned_epoch_rewards_config + } + + /// # stake accounts to store in one block during partitioned reward interval + pub(super) fn partitioned_rewards_stake_account_stores_per_block(&self) -> u64 { + self.partitioned_epoch_rewards_config() + .stake_account_stores_per_block + } + + /// reward calculation happens synchronously during the first block of the epoch boundary. + /// So, # blocks for reward calculation is 1. + pub(super) fn get_reward_calculation_num_blocks(&self) -> Slot { + self.partitioned_epoch_rewards_config() + .reward_calculation_num_blocks + } + + /// Calculate the number of blocks required to distribute rewards to all stake accounts. + pub(super) fn get_reward_distribution_num_blocks(&self, rewards: &StakeRewards) -> u64 { + let total_stake_accounts = rewards.len(); + if self.epoch_schedule.warmup && self.epoch < self.first_normal_epoch() { + 1 + } else { + const MAX_FACTOR_OF_REWARD_BLOCKS_IN_EPOCH: u64 = 10; + let num_chunks = solana_accounts_db::accounts_hash::AccountsHasher::div_ceil( + total_stake_accounts, + self.partitioned_rewards_stake_account_stores_per_block() as usize, + ) as u64; + + // Limit the reward credit interval to 10% of the total number of slots in a epoch + num_chunks.clamp( + 1, + (self.epoch_schedule.slots_per_epoch / MAX_FACTOR_OF_REWARD_BLOCKS_IN_EPOCH).max(1), + ) + } + } + + /// Return `RewardInterval` enum for current bank + pub(super) fn get_reward_interval(&self) -> RewardInterval { + if matches!(self.epoch_reward_status, EpochRewardStatus::Active(_)) { + RewardInterval::InsideInterval + } else { + RewardInterval::OutsideInterval + } + } + + /// true if it is ok to run partitioned rewards code. + /// This means the feature is activated or certain testing situations. + pub(super) fn is_partitioned_rewards_code_enabled(&self) -> bool { + self.is_partitioned_rewards_feature_enabled() + || self + .partitioned_epoch_rewards_config() + .test_enable_partitioned_rewards + } + + /// For testing only + pub fn force_reward_interval_end_for_tests(&mut self) { + self.epoch_reward_status = EpochRewardStatus::Inactive; + } + + pub(super) fn force_partition_rewards_in_first_block_of_epoch(&self) -> bool { + self.partitioned_epoch_rewards_config() + .test_enable_partitioned_rewards + && self.get_reward_calculation_num_blocks() == 0 + && self.partitioned_rewards_stake_account_stores_per_block() == u64::MAX + } +} + +#[cfg(test)] +mod tests { + use { + super::*, + crate::bank::tests::create_genesis_config, + solana_accounts_db::{ + accounts_db::{ + AccountShrinkThreshold, AccountsDbConfig, ACCOUNTS_DB_CONFIG_FOR_TESTING, + }, + accounts_index::AccountSecondaryIndexes, + partitioned_rewards::TestPartitionedEpochRewards, + }, + solana_program_runtime::runtime_config::RuntimeConfig, + solana_sdk::{epoch_schedule::EpochSchedule, native_token::LAMPORTS_PER_SOL}, + }; + + impl Bank { + /// Return the total number of blocks in reward interval (including both calculation and crediting). + pub(in crate::bank) fn get_reward_total_num_blocks(&self, rewards: &StakeRewards) -> u64 { + self.get_reward_calculation_num_blocks() + + self.get_reward_distribution_num_blocks(rewards) + } + } + + #[test] + fn test_force_reward_interval_end() { + let (genesis_config, _mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); + let mut bank = Bank::new_for_tests(&genesis_config); + + let expected_num = 100; + + let stake_rewards = (0..expected_num) + .map(|_| StakeReward::new_random()) + .collect::>(); + + bank.set_epoch_reward_status_active(vec![stake_rewards]); + assert!(bank.get_reward_interval() == RewardInterval::InsideInterval); + + bank.force_reward_interval_end_for_tests(); + assert!(bank.get_reward_interval() == RewardInterval::OutsideInterval); + } + + #[test] + fn test_is_partitioned_reward_feature_enable() { + let (genesis_config, _mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); + + let mut bank = Bank::new_for_tests(&genesis_config); + assert!(!bank.is_partitioned_rewards_feature_enabled()); + bank.activate_feature(&feature_set::enable_partitioned_epoch_reward::id()); + assert!(bank.is_partitioned_rewards_feature_enabled()); + } + + #[test] + fn test_deactivate_epoch_reward_status() { + let (genesis_config, _mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); + let mut bank = Bank::new_for_tests(&genesis_config); + + let expected_num = 100; + + let stake_rewards = (0..expected_num) + .map(|_| StakeReward::new_random()) + .collect::>(); + + bank.set_epoch_reward_status_active(vec![stake_rewards]); + + assert!(bank.get_reward_interval() == RewardInterval::InsideInterval); + bank.deactivate_epoch_reward_status(); + assert!(bank.get_reward_interval() == RewardInterval::OutsideInterval); + } + + /// Test get_reward_distribution_num_blocks, get_reward_calculation_num_blocks, get_reward_total_num_blocks during small epoch + /// The num_credit_blocks should be cap to 10% of the total number of blocks in the epoch. + #[test] + fn test_get_reward_distribution_num_blocks_cap() { + let (mut genesis_config, _mint_keypair) = + create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); + genesis_config.epoch_schedule = EpochSchedule::custom(32, 32, false); + + // Config stake reward distribution to be 10 per block + let mut accounts_db_config: AccountsDbConfig = ACCOUNTS_DB_CONFIG_FOR_TESTING.clone(); + accounts_db_config.test_partitioned_epoch_rewards = + TestPartitionedEpochRewards::PartitionedEpochRewardsConfigRewardBlocks { + reward_calculation_num_blocks: 1, + stake_account_stores_per_block: 10, + }; + + let bank = Bank::new_with_paths( + &genesis_config, + Arc::new(RuntimeConfig::default()), + Vec::new(), + None, + None, + AccountSecondaryIndexes::default(), + AccountShrinkThreshold::default(), + false, + Some(accounts_db_config), + None, + Some(Pubkey::new_unique()), + Arc::default(), + ); + + let stake_account_stores_per_block = + bank.partitioned_rewards_stake_account_stores_per_block(); + assert_eq!(stake_account_stores_per_block, 10); + + let check_num_reward_distribution_blocks = + |num_stakes: u64, + expected_num_reward_distribution_blocks: u64, + expected_num_reward_computation_blocks: u64| { + // Given the short epoch, i.e. 32 slots, we should cap the number of reward distribution blocks to 32/10 = 3. + let stake_rewards = (0..num_stakes) + .map(|_| StakeReward::new_random()) + .collect::>(); + + assert_eq!( + bank.get_reward_distribution_num_blocks(&stake_rewards), + expected_num_reward_distribution_blocks + ); + assert_eq!( + bank.get_reward_calculation_num_blocks(), + expected_num_reward_computation_blocks + ); + assert_eq!( + bank.get_reward_total_num_blocks(&stake_rewards), + bank.get_reward_distribution_num_blocks(&stake_rewards) + + bank.get_reward_calculation_num_blocks(), + ); + }; + + for test_record in [ + // num_stakes, expected_num_reward_distribution_blocks, expected_num_reward_computation_blocks + (0, 1, 1), + (1, 1, 1), + (stake_account_stores_per_block, 1, 1), + (2 * stake_account_stores_per_block - 1, 2, 1), + (2 * stake_account_stores_per_block, 2, 1), + (3 * stake_account_stores_per_block - 1, 3, 1), + (3 * stake_account_stores_per_block, 3, 1), + (4 * stake_account_stores_per_block, 3, 1), // cap at 3 + (5 * stake_account_stores_per_block, 3, 1), //cap at 3 + ] { + check_num_reward_distribution_blocks(test_record.0, test_record.1, test_record.2); + } + } + + /// Test get_reward_distribution_num_blocks, get_reward_calculation_num_blocks, get_reward_total_num_blocks during normal epoch gives the expected result + #[test] + fn test_get_reward_distribution_num_blocks_normal() { + solana_logger::setup(); + let (mut genesis_config, _mint_keypair) = + create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); + genesis_config.epoch_schedule = EpochSchedule::custom(432000, 432000, false); + + let bank = Bank::new_for_tests(&genesis_config); + + // Given 8k rewards, it will take 2 blocks to credit all the rewards + let expected_num = 8192; + let stake_rewards = (0..expected_num) + .map(|_| StakeReward::new_random()) + .collect::>(); + + assert_eq!(bank.get_reward_distribution_num_blocks(&stake_rewards), 2); + assert_eq!(bank.get_reward_calculation_num_blocks(), 1); + assert_eq!( + bank.get_reward_total_num_blocks(&stake_rewards), + bank.get_reward_distribution_num_blocks(&stake_rewards) + + bank.get_reward_calculation_num_blocks(), + ); + } + + /// Test get_reward_distribution_num_blocks, get_reward_calculation_num_blocks, get_reward_total_num_blocks during warm up epoch gives the expected result. + /// The num_credit_blocks should be 1 during warm up epoch. + #[test] + fn test_get_reward_distribution_num_blocks_warmup() { + let (genesis_config, _mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); + + let bank = Bank::new_for_tests(&genesis_config); + let rewards = vec![]; + assert_eq!(bank.get_reward_distribution_num_blocks(&rewards), 1); + assert_eq!(bank.get_reward_calculation_num_blocks(), 1); + assert_eq!( + bank.get_reward_total_num_blocks(&rewards), + bank.get_reward_distribution_num_blocks(&rewards) + + bank.get_reward_calculation_num_blocks(), + ); + } +} diff --git a/runtime/src/bank/serde_snapshot.rs b/runtime/src/bank/serde_snapshot.rs index 372baec2e4aee0..2ddaf8adf5ba92 100644 --- a/runtime/src/bank/serde_snapshot.rs +++ b/runtime/src/bank/serde_snapshot.rs @@ -3,8 +3,8 @@ mod tests { use { crate::{ bank::{ - epoch_accounts_hash_utils, test_utils as bank_test_utils, Bank, EpochRewardStatus, - StartBlockHeightAndRewards, + epoch_accounts_hash_utils, partitioned_epoch_rewards::StartBlockHeightAndRewards, + test_utils as bank_test_utils, Bank, EpochRewardStatus, }, genesis_utils::activate_all_features, serde_snapshot::{ @@ -605,7 +605,7 @@ mod tests { // This some what long test harness is required to freeze the ABI of // Bank's serialization due to versioned nature - #[frozen_abi(digest = "7BH2s2Y1yKy396c3ixC4TTyvvpkyenAvWDSiZvY5yb7P")] + #[frozen_abi(digest = "8BVfyLYrPt1ranknjF4sLePjZaZjpKXXrHt4wKf47g3W")] #[derive(Serialize, AbiExample)] pub struct BankAbiTestWrapperNewer { #[serde(serialize_with = "wrapper_newer")] diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index dc3a3121558ea3..0b9f2757deba74 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -191,7 +191,7 @@ fn create_genesis_config_no_tx_fee(lamports: u64) -> (GenesisConfig, Keypair) { (genesis_config, mint_keypair) } -fn create_genesis_config(lamports: u64) -> (GenesisConfig, Keypair) { +pub(in crate::bank) fn create_genesis_config(lamports: u64) -> (GenesisConfig, Keypair) { solana_sdk::genesis_config::create_genesis_config(lamports) } @@ -12225,34 +12225,6 @@ fn test_rewards_point_calculation_empty() { assert!(point_value.is_none()); } -#[test] -fn test_force_reward_interval_end() { - let (genesis_config, _mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); - let mut bank = Bank::new_for_tests(&genesis_config); - - let expected_num = 100; - - let stake_rewards = (0..expected_num) - .map(|_| StakeReward::new_random()) - .collect::>(); - - bank.set_epoch_reward_status_active(vec![stake_rewards]); - assert!(bank.get_reward_interval() == RewardInterval::InsideInterval); - - bank.force_reward_interval_end_for_tests(); - assert!(bank.get_reward_interval() == RewardInterval::OutsideInterval); -} - -#[test] -fn test_is_partitioned_reward_feature_enable() { - let (genesis_config, _mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); - - let mut bank = Bank::new_for_tests(&genesis_config); - assert!(!bank.is_partitioned_rewards_feature_enabled()); - bank.activate_feature(&feature_set::enable_partitioned_epoch_reward::id()); - assert!(bank.is_partitioned_rewards_feature_enabled()); -} - /// Test that reward partition range panics when passing out of range partition index #[test] #[should_panic(expected = "index out of bounds: the len is 10 but the index is 15")] @@ -12276,24 +12248,6 @@ fn test_get_stake_rewards_partition_range_panic() { let _range = &stake_rewards_bucket[15]; } -#[test] -fn test_deactivate_epoch_reward_status() { - let (genesis_config, _mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); - let mut bank = Bank::new_for_tests(&genesis_config); - - let expected_num = 100; - - let stake_rewards = (0..expected_num) - .map(|_| StakeReward::new_random()) - .collect::>(); - - bank.set_epoch_reward_status_active(vec![stake_rewards]); - - assert!(bank.get_reward_interval() == RewardInterval::InsideInterval); - bank.deactivate_epoch_reward_status(); - assert!(bank.get_reward_interval() == RewardInterval::OutsideInterval); -} - #[test] fn test_distribute_partitioned_epoch_rewards() { let (genesis_config, _mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); @@ -13286,127 +13240,6 @@ fn test_calc_vote_accounts_to_store_normal() { } } -impl Bank { - /// Return the total number of blocks in reward interval (including both calculation and crediting). - fn get_reward_total_num_blocks(&self, rewards: &StakeRewards) -> u64 { - self.get_reward_calculation_num_blocks() + self.get_reward_distribution_num_blocks(rewards) - } -} - -/// Test get_reward_distribution_num_blocks, get_reward_calculation_num_blocks, get_reward_total_num_blocks during normal epoch gives the expected result -#[test] -fn test_get_reward_distribution_num_blocks_normal() { - solana_logger::setup(); - let (mut genesis_config, _mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); - genesis_config.epoch_schedule = EpochSchedule::custom(432000, 432000, false); - - let bank = Bank::new_for_tests(&genesis_config); - - // Given 8k rewards, it will take 2 blocks to credit all the rewards - let expected_num = 8192; - let stake_rewards = (0..expected_num) - .map(|_| StakeReward::new_random()) - .collect::>(); - - assert_eq!(bank.get_reward_distribution_num_blocks(&stake_rewards), 2); - assert_eq!(bank.get_reward_calculation_num_blocks(), 1); - assert_eq!( - bank.get_reward_total_num_blocks(&stake_rewards), - bank.get_reward_distribution_num_blocks(&stake_rewards) - + bank.get_reward_calculation_num_blocks(), - ); -} - -/// Test get_reward_distribution_num_blocks, get_reward_calculation_num_blocks, get_reward_total_num_blocks during small epoch -/// The num_credit_blocks should be cap to 10% of the total number of blocks in the epoch. -#[test] -fn test_get_reward_distribution_num_blocks_cap() { - let (mut genesis_config, _mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); - genesis_config.epoch_schedule = EpochSchedule::custom(32, 32, false); - - // Config stake reward distribution to be 10 per block - let mut accounts_db_config: AccountsDbConfig = ACCOUNTS_DB_CONFIG_FOR_TESTING.clone(); - accounts_db_config.test_partitioned_epoch_rewards = - TestPartitionedEpochRewards::PartitionedEpochRewardsConfigRewardBlocks { - reward_calculation_num_blocks: 1, - stake_account_stores_per_block: 10, - }; - - let bank = Bank::new_with_paths( - &genesis_config, - Arc::new(RuntimeConfig::default()), - Vec::new(), - None, - None, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - false, - Some(accounts_db_config), - None, - Some(Pubkey::new_unique()), - Arc::default(), - ); - - let stake_account_stores_per_block = bank.partitioned_rewards_stake_account_stores_per_block(); - assert_eq!(stake_account_stores_per_block, 10); - - let check_num_reward_distribution_blocks = - |num_stakes: u64, - expected_num_reward_distribution_blocks: u64, - expected_num_reward_computation_blocks: u64| { - // Given the short epoch, i.e. 32 slots, we should cap the number of reward distribution blocks to 32/10 = 3. - let stake_rewards = (0..num_stakes) - .map(|_| StakeReward::new_random()) - .collect::>(); - - assert_eq!( - bank.get_reward_distribution_num_blocks(&stake_rewards), - expected_num_reward_distribution_blocks - ); - assert_eq!( - bank.get_reward_calculation_num_blocks(), - expected_num_reward_computation_blocks - ); - assert_eq!( - bank.get_reward_total_num_blocks(&stake_rewards), - bank.get_reward_distribution_num_blocks(&stake_rewards) - + bank.get_reward_calculation_num_blocks(), - ); - }; - - for test_record in [ - // num_stakes, expected_num_reward_distribution_blocks, expected_num_reward_computation_blocks - (0, 1, 1), - (1, 1, 1), - (stake_account_stores_per_block, 1, 1), - (2 * stake_account_stores_per_block - 1, 2, 1), - (2 * stake_account_stores_per_block, 2, 1), - (3 * stake_account_stores_per_block - 1, 3, 1), - (3 * stake_account_stores_per_block, 3, 1), - (4 * stake_account_stores_per_block, 3, 1), // cap at 3 - (5 * stake_account_stores_per_block, 3, 1), //cap at 3 - ] { - check_num_reward_distribution_blocks(test_record.0, test_record.1, test_record.2); - } -} - -/// Test get_reward_distribution_num_blocks, get_reward_calculation_num_blocks, get_reward_total_num_blocks during warm up epoch gives the expected result. -/// The num_credit_blocks should be 1 during warm up epoch. -#[test] -fn test_get_reward_distribution_num_blocks_warmup() { - let (genesis_config, _mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); - - let bank = Bank::new_for_tests(&genesis_config); - let rewards = vec![]; - assert_eq!(bank.get_reward_distribution_num_blocks(&rewards), 1); - assert_eq!(bank.get_reward_calculation_num_blocks(), 1); - assert_eq!( - bank.get_reward_total_num_blocks(&rewards), - bank.get_reward_distribution_num_blocks(&rewards) - + bank.get_reward_calculation_num_blocks(), - ); -} - #[test] fn test_calculate_stake_vote_rewards() { solana_logger::setup(); diff --git a/runtime/src/epoch_rewards_hasher.rs b/runtime/src/epoch_rewards_hasher.rs index b594b05a5cfe3b..ddf45a9095a3e8 100644 --- a/runtime/src/epoch_rewards_hasher.rs +++ b/runtime/src/epoch_rewards_hasher.rs @@ -1,5 +1,5 @@ use { - crate::bank::StakeRewards, + crate::bank::partitioned_epoch_rewards::StakeRewards, solana_sdk::{epoch_rewards_hasher::EpochRewardsHasher, hash::Hash}, }; diff --git a/runtime/src/serde_snapshot/newer.rs b/runtime/src/serde_snapshot/newer.rs index 004e6e61d54868..d9c73d04422199 100644 --- a/runtime/src/serde_snapshot/newer.rs +++ b/runtime/src/serde_snapshot/newer.rs @@ -5,7 +5,7 @@ use { *, }, crate::{ - bank::EpochRewardStatus, + bank::partitioned_epoch_rewards::EpochRewardStatus, stakes::{serde_stakes_enum_compat, StakesEnum}, }, solana_accounts_db::{accounts_hash::AccountsHash, ancestors::AncestorsForSerialization}, From 16c684ef9f682501c831fc75510e519d1de3657b Mon Sep 17 00:00:00 2001 From: Tyera Date: Mon, 1 Apr 2024 19:41:48 -0600 Subject: [PATCH 128/153] Move sysvar submodule; reorg partitioned epoch rewards runtime code, 2 of 5 (#520) * Add sysvar sub-submodule * Move sysvar methods to sub-submodule * Move unit test to sysvar sub-submodule * Add new partitioned_epoch_rewards::sysvar method * Remove superfluous method --- runtime/src/bank.rs | 93 +---------- .../src/bank/partitioned_epoch_rewards/mod.rs | 20 +-- .../bank/partitioned_epoch_rewards/sysvar.rs | 154 ++++++++++++++++++ runtime/src/bank/tests.rs | 51 ------ 4 files changed, 162 insertions(+), 156 deletions(-) create mode 100644 runtime/src/bank/partitioned_epoch_rewards/sysvar.rs diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 47a63b9fc98962..29c01da7e66dad 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -1477,27 +1477,6 @@ impl Bank { ); } - /// partitioned reward distribution is complete. - /// So, deactivate the epoch rewards sysvar. - fn deactivate_epoch_reward_status(&mut self) { - assert!(matches!( - self.epoch_reward_status, - EpochRewardStatus::Active(_) - )); - self.epoch_reward_status = EpochRewardStatus::Inactive; - if let Some(account) = self.get_account(&sysvar::epoch_rewards::id()) { - if account.lamports() > 0 { - info!( - "burning {} extra lamports in EpochRewards sysvar account at slot {}", - account.lamports(), - self.slot() - ); - self.log_epoch_rewards_sysvar("burn"); - self.burn_and_purge_account(&sysvar::epoch_rewards::id(), account); - } - } - } - /// Begin the process of calculating and distributing rewards. /// This process can take multiple slots. fn begin_partitioned_rewards( @@ -1571,7 +1550,12 @@ impl Bank { ("start_block_height", start_block_height, i64), ); - self.deactivate_epoch_reward_status(); + assert!(matches!( + self.epoch_reward_status, + EpochRewardStatus::Active(_) + )); + self.epoch_reward_status = EpochRewardStatus::Inactive; + self.destroy_epoch_rewards_sysvar(); } } @@ -3440,71 +3424,6 @@ impl Bank { report_partitioned_reward_metrics(self, metrics); } - /// Helper fn to log epoch_rewards sysvar - fn log_epoch_rewards_sysvar(&self, prefix: &str) { - if let Some(account) = self.get_account(&sysvar::epoch_rewards::id()) { - let epoch_rewards: sysvar::epoch_rewards::EpochRewards = - from_account(&account).unwrap(); - info!( - "{prefix} epoch_rewards sysvar: {:?}", - (account.lamports(), epoch_rewards) - ); - } else { - info!("{prefix} epoch_rewards sysvar: none"); - } - } - - /// Create EpochRewards sysvar with calculated rewards - fn create_epoch_rewards_sysvar( - &self, - total_rewards: u64, - distributed_rewards: u64, - distribution_starting_block_height: u64, - ) { - assert!(self.is_partitioned_rewards_code_enabled()); - - let epoch_rewards = sysvar::epoch_rewards::EpochRewards { - total_rewards, - distributed_rewards, - distribution_starting_block_height, - active: true, - ..sysvar::epoch_rewards::EpochRewards::default() - }; - - self.update_sysvar_account(&sysvar::epoch_rewards::id(), |account| { - let mut inherited_account_fields = - self.inherit_specially_retained_account_fields(account); - - assert!(total_rewards >= distributed_rewards); - // set the account lamports to the undistributed rewards - inherited_account_fields.0 = total_rewards - distributed_rewards; - create_account(&epoch_rewards, inherited_account_fields) - }); - - self.log_epoch_rewards_sysvar("create"); - } - - /// Update EpochRewards sysvar with distributed rewards - fn update_epoch_rewards_sysvar(&self, distributed: u64) { - assert!(self.is_partitioned_rewards_code_enabled()); - - let mut epoch_rewards: sysvar::epoch_rewards::EpochRewards = - from_account(&self.get_account(&sysvar::epoch_rewards::id()).unwrap()).unwrap(); - epoch_rewards.distribute(distributed); - - self.update_sysvar_account(&sysvar::epoch_rewards::id(), |account| { - let mut inherited_account_fields = - self.inherit_specially_retained_account_fields(account); - - let lamports = inherited_account_fields.0; - assert!(lamports >= distributed); - inherited_account_fields.0 = lamports - distributed; - create_account(&epoch_rewards, inherited_account_fields) - }); - - self.log_epoch_rewards_sysvar("update"); - } - fn update_recent_blockhashes_locked(&self, locked_blockhash_queue: &BlockhashQueue) { #[allow(deprecated)] self.update_sysvar_account(&sysvar::recent_blockhashes::id(), |account| { diff --git a/runtime/src/bank/partitioned_epoch_rewards/mod.rs b/runtime/src/bank/partitioned_epoch_rewards/mod.rs index 21f4fdcabf79ed..5d7f405e34ac9b 100644 --- a/runtime/src/bank/partitioned_epoch_rewards/mod.rs +++ b/runtime/src/bank/partitioned_epoch_rewards/mod.rs @@ -1,3 +1,5 @@ +mod sysvar; + use { super::Bank, crate::{stake_account::StakeAccount, stake_history::StakeHistory}, @@ -232,24 +234,6 @@ mod tests { assert!(bank.is_partitioned_rewards_feature_enabled()); } - #[test] - fn test_deactivate_epoch_reward_status() { - let (genesis_config, _mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); - let mut bank = Bank::new_for_tests(&genesis_config); - - let expected_num = 100; - - let stake_rewards = (0..expected_num) - .map(|_| StakeReward::new_random()) - .collect::>(); - - bank.set_epoch_reward_status_active(vec![stake_rewards]); - - assert!(bank.get_reward_interval() == RewardInterval::InsideInterval); - bank.deactivate_epoch_reward_status(); - assert!(bank.get_reward_interval() == RewardInterval::OutsideInterval); - } - /// Test get_reward_distribution_num_blocks, get_reward_calculation_num_blocks, get_reward_total_num_blocks during small epoch /// The num_credit_blocks should be cap to 10% of the total number of blocks in the epoch. #[test] diff --git a/runtime/src/bank/partitioned_epoch_rewards/sysvar.rs b/runtime/src/bank/partitioned_epoch_rewards/sysvar.rs new file mode 100644 index 00000000000000..b540dc2bec0fcd --- /dev/null +++ b/runtime/src/bank/partitioned_epoch_rewards/sysvar.rs @@ -0,0 +1,154 @@ +use { + super::Bank, + log::info, + solana_sdk::{ + account::{ + create_account_shared_data_with_fields as create_account, from_account, ReadableAccount, + }, + sysvar, + }, +}; + +impl Bank { + /// Helper fn to log epoch_rewards sysvar + fn log_epoch_rewards_sysvar(&self, prefix: &str) { + if let Some(account) = self.get_account(&sysvar::epoch_rewards::id()) { + let epoch_rewards: sysvar::epoch_rewards::EpochRewards = + from_account(&account).unwrap(); + info!( + "{prefix} epoch_rewards sysvar: {:?}", + (account.lamports(), epoch_rewards) + ); + } else { + info!("{prefix} epoch_rewards sysvar: none"); + } + } + + /// Create EpochRewards sysvar with calculated rewards + pub(in crate::bank) fn create_epoch_rewards_sysvar( + &self, + total_rewards: u64, + distributed_rewards: u64, + distribution_starting_block_height: u64, + ) { + assert!(self.is_partitioned_rewards_code_enabled()); + + let epoch_rewards = sysvar::epoch_rewards::EpochRewards { + total_rewards, + distributed_rewards, + distribution_starting_block_height, + active: true, + ..sysvar::epoch_rewards::EpochRewards::default() + }; + + self.update_sysvar_account(&sysvar::epoch_rewards::id(), |account| { + let mut inherited_account_fields = + self.inherit_specially_retained_account_fields(account); + + assert!(total_rewards >= distributed_rewards); + // set the account lamports to the undistributed rewards + inherited_account_fields.0 = total_rewards - distributed_rewards; + create_account(&epoch_rewards, inherited_account_fields) + }); + + self.log_epoch_rewards_sysvar("create"); + } + + /// Update EpochRewards sysvar with distributed rewards + pub(in crate::bank) fn update_epoch_rewards_sysvar(&self, distributed: u64) { + assert!(self.is_partitioned_rewards_code_enabled()); + + let mut epoch_rewards: sysvar::epoch_rewards::EpochRewards = + from_account(&self.get_account(&sysvar::epoch_rewards::id()).unwrap()).unwrap(); + epoch_rewards.distribute(distributed); + + self.update_sysvar_account(&sysvar::epoch_rewards::id(), |account| { + let mut inherited_account_fields = + self.inherit_specially_retained_account_fields(account); + + let lamports = inherited_account_fields.0; + assert!(lamports >= distributed); + inherited_account_fields.0 = lamports - distributed; + create_account(&epoch_rewards, inherited_account_fields) + }); + + self.log_epoch_rewards_sysvar("update"); + } + + pub(in crate::bank) fn destroy_epoch_rewards_sysvar(&self) { + if let Some(account) = self.get_account(&sysvar::epoch_rewards::id()) { + if account.lamports() > 0 { + info!( + "burning {} extra lamports in EpochRewards sysvar account at slot {}", + account.lamports(), + self.slot() + ); + self.log_epoch_rewards_sysvar("burn"); + self.burn_and_purge_account(&sysvar::epoch_rewards::id(), account); + } + } + } +} + +#[cfg(test)] +mod tests { + use { + super::*, + crate::bank::tests::create_genesis_config, + solana_sdk::{ + epoch_schedule::EpochSchedule, feature_set, hash::Hash, native_token::LAMPORTS_PER_SOL, + }, + }; + + /// Test `EpochRewards` sysvar creation, distribution, and burning. + /// This test covers the following epoch_rewards_sysvar bank member functions, i.e. + /// `create_epoch_rewards_sysvar`, `update_epoch_rewards_sysvar`, `burn_and_purge_account`. + #[test] + fn test_epoch_rewards_sysvar() { + let (mut genesis_config, _mint_keypair) = + create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); + genesis_config.epoch_schedule = EpochSchedule::custom(432000, 432000, false); + let mut bank = Bank::new_for_tests(&genesis_config); + bank.activate_feature(&feature_set::enable_partitioned_epoch_reward::id()); + + let total_rewards = 1_000_000_000; // a large rewards so that the sysvar account is rent-exempted. + + // create epoch rewards sysvar + let expected_epoch_rewards = sysvar::epoch_rewards::EpochRewards { + distribution_starting_block_height: 42, + num_partitions: 0, + parent_blockhash: Hash::default(), + total_points: 0, + total_rewards, + distributed_rewards: 10, + active: true, + }; + + bank.create_epoch_rewards_sysvar(total_rewards, 10, 42); + let account = bank.get_account(&sysvar::epoch_rewards::id()).unwrap(); + assert_eq!(account.lamports(), total_rewards - 10); + let epoch_rewards: sysvar::epoch_rewards::EpochRewards = from_account(&account).unwrap(); + assert_eq!(epoch_rewards, expected_epoch_rewards); + + // make a distribution from epoch rewards sysvar + bank.update_epoch_rewards_sysvar(10); + let account = bank.get_account(&sysvar::epoch_rewards::id()).unwrap(); + assert_eq!(account.lamports(), total_rewards - 20); + let epoch_rewards: sysvar::epoch_rewards::EpochRewards = from_account(&account).unwrap(); + let expected_epoch_rewards = sysvar::epoch_rewards::EpochRewards { + distribution_starting_block_height: 42, + num_partitions: 0, + parent_blockhash: Hash::default(), + total_points: 0, + total_rewards, + distributed_rewards: 20, + active: true, + }; + assert_eq!(epoch_rewards, expected_epoch_rewards); + + // burn epoch rewards sysvar + bank.burn_and_purge_account(&sysvar::epoch_rewards::id(), account); + let account = bank.get_account(&sysvar::epoch_rewards::id()); + assert!(account.is_none()); + } +} diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 0b9f2757deba74..5b89de28688e94 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -12650,57 +12650,6 @@ fn test_rewards_computation_and_partitioned_distribution_two_blocks() { } } -/// Test `EpochRewards` sysvar creation, distribution, and burning. -/// This test covers the following epoch_rewards_sysvar bank member functions, i.e. -/// `create_epoch_rewards_sysvar`, `update_epoch_rewards_sysvar`, `burn_and_purge_account`. -#[test] -fn test_epoch_rewards_sysvar() { - let (mut genesis_config, _mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); - genesis_config.epoch_schedule = EpochSchedule::custom(432000, 432000, false); - let mut bank = Bank::new_for_tests(&genesis_config); - bank.activate_feature(&feature_set::enable_partitioned_epoch_reward::id()); - - let total_rewards = 1_000_000_000; // a large rewards so that the sysvar account is rent-exempted. - - // create epoch rewards sysvar - let expected_epoch_rewards = sysvar::epoch_rewards::EpochRewards { - distribution_starting_block_height: 42, - num_partitions: 0, - parent_blockhash: Hash::default(), - total_points: 0, - total_rewards, - distributed_rewards: 10, - active: true, - }; - - bank.create_epoch_rewards_sysvar(total_rewards, 10, 42); - let account = bank.get_account(&sysvar::epoch_rewards::id()).unwrap(); - assert_eq!(account.lamports(), total_rewards - 10); - let epoch_rewards: sysvar::epoch_rewards::EpochRewards = from_account(&account).unwrap(); - assert_eq!(epoch_rewards, expected_epoch_rewards); - - // make a distribution from epoch rewards sysvar - bank.update_epoch_rewards_sysvar(10); - let account = bank.get_account(&sysvar::epoch_rewards::id()).unwrap(); - assert_eq!(account.lamports(), total_rewards - 20); - let epoch_rewards: sysvar::epoch_rewards::EpochRewards = from_account(&account).unwrap(); - let expected_epoch_rewards = sysvar::epoch_rewards::EpochRewards { - distribution_starting_block_height: 42, - num_partitions: 0, - parent_blockhash: Hash::default(), - total_points: 0, - total_rewards, - distributed_rewards: 20, - active: true, - }; - assert_eq!(epoch_rewards, expected_epoch_rewards); - - // burn epoch rewards sysvar - bank.burn_and_purge_account(&sysvar::epoch_rewards::id(), account); - let account = bank.get_account(&sysvar::epoch_rewards::id()); - assert!(account.is_none()); -} - /// Test that program execution that involves stake accounts should fail during reward period. /// Any programs, which result in stake account changes, will throw `ProgramExecutionTemporarilyRestricted` error when /// in reward period. From 025ed451c597df8abb7e271f1ec80cf4998dd0fb Mon Sep 17 00:00:00 2001 From: samkim-crypto Date: Tue, 2 Apr 2024 11:11:05 +0900 Subject: [PATCH 129/153] [keygen] Remove deprecated functions from the `grind` command (#490) * disable deprecated functions * replace `is_present` with `try_contains_id` * use `try_get_word_count` and `try_get_language` * replace `multiple_occurrences` with arg actions * add `grind_parser` * remove `values_of_t_or_exit` * remove `deprecated` feature * Update keygen/src/keygen.rs Co-authored-by: Tyera * change `GrindType::StartsEnds` to `GrindType::StartsAndEnds` * comment on why `args.pop()` is safe --------- Co-authored-by: Tyera --- keygen/src/keygen.rs | 157 ++++++++++++++++++++++--------------------- 1 file changed, 80 insertions(+), 77 deletions(-) diff --git a/keygen/src/keygen.rs b/keygen/src/keygen.rs index 4d85941a0578c5..af1bff3f87f81a 100644 --- a/keygen/src/keygen.rs +++ b/keygen/src/keygen.rs @@ -1,8 +1,10 @@ #![allow(clippy::arithmetic_side_effects)] -#![allow(deprecated)] use { bip39::{Mnemonic, MnemonicType, Seed}, - clap::{crate_description, crate_name, value_parser, Arg, ArgMatches, Command}, + clap::{ + builder::ValueParser, crate_description, crate_name, value_parser, Arg, ArgAction, + ArgMatches, Command, + }, solana_clap_v3_utils::{ input_parsers::{ signer::{SignerSource, SignerSourceParserBuilder}, @@ -12,8 +14,8 @@ use { check_for_overwrite, derivation_path::{acquire_derivation_path, derivation_path_arg}, mnemonic::{ - acquire_language, acquire_passphrase_and_message, no_passphrase_and_message, - try_get_language, try_get_word_count, WORD_COUNT_ARG, + acquire_passphrase_and_message, no_passphrase_and_message, try_get_language, + try_get_word_count, }, no_outfile_arg, KeyGenerationCommonArgs, NO_OUTFILE_ARG, }, @@ -65,6 +67,39 @@ struct GrindMatch { count: AtomicU64, } +#[derive(Debug, Clone)] +enum GrindType { + Starts, + Ends, + StartsAndEnds, +} + +fn grind_parser(grind_type: GrindType) -> ValueParser { + ValueParser::from(move |v: &str| -> Result { + let (required_div_count, prefix_suffix) = match grind_type { + GrindType::Starts => (1, "PREFIX"), + GrindType::Ends => (1, "SUFFIX"), + GrindType::StartsAndEnds => (2, "PREFIX and SUFFIX"), + }; + if v.matches(':').count() != required_div_count || (v.starts_with(':') || v.ends_with(':')) + { + return Err(format!("Expected : between {} and COUNT", prefix_suffix)); + } + // `args` is guaranteed to have length at least 1 by the previous if statement + let mut args: Vec<&str> = v.split(':').collect(); + let count = args.pop().unwrap().parse::(); + for arg in args.iter() { + bs58::decode(arg) + .into_vec() + .map_err(|err| format!("{}: {:?}", args[0], err))?; + } + if count.is_err() || count.unwrap() == 0 { + return Err(String::from("Expected COUNT to be of type u64")); + } + Ok(v.to_string()) + }) +} + fn get_keypair_from_matches( matches: &ArgMatches, config: Config, @@ -100,56 +135,6 @@ fn output_keypair( Ok(()) } -fn grind_validator_starts_with(v: &str) -> Result<(), String> { - if v.matches(':').count() != 1 || (v.starts_with(':') || v.ends_with(':')) { - return Err(String::from("Expected : between PREFIX and COUNT")); - } - let args: Vec<&str> = v.split(':').collect(); - bs58::decode(&args[0]) - .into_vec() - .map_err(|err| format!("{}: {:?}", args[0], err))?; - let count = args[1].parse::(); - if count.is_err() || count.unwrap() == 0 { - return Err(String::from("Expected COUNT to be of type u64")); - } - Ok(()) -} - -fn grind_validator_ends_with(v: &str) -> Result<(), String> { - if v.matches(':').count() != 1 || (v.starts_with(':') || v.ends_with(':')) { - return Err(String::from("Expected : between SUFFIX and COUNT")); - } - let args: Vec<&str> = v.split(':').collect(); - bs58::decode(&args[0]) - .into_vec() - .map_err(|err| format!("{}: {:?}", args[0], err))?; - let count = args[1].parse::(); - if count.is_err() || count.unwrap() == 0 { - return Err(String::from("Expected COUNT to be of type u64")); - } - Ok(()) -} - -fn grind_validator_starts_and_ends_with(v: &str) -> Result<(), String> { - if v.matches(':').count() != 2 || (v.starts_with(':') || v.ends_with(':')) { - return Err(String::from( - "Expected : between PREFIX and SUFFIX and COUNT", - )); - } - let args: Vec<&str> = v.split(':').collect(); - bs58::decode(&args[0]) - .into_vec() - .map_err(|err| format!("{}: {:?}", args[0], err))?; - bs58::decode(&args[1]) - .into_vec() - .map_err(|err| format!("{}: {:?}", args[1], err))?; - let count = args[2].parse::(); - if count.is_err() || count.unwrap() == 0 { - return Err(String::from("Expected COUNT to be a u64")); - } - Ok(()) -} - fn grind_print_info(grind_matches: &[GrindMatch], num_threads: usize) { println!("Searching with {num_threads} threads for:"); for gm in grind_matches { @@ -316,9 +301,9 @@ fn app<'a>(num_threads: &'a str, crate_version: &'a str) -> Command<'a> { .value_name("PREFIX:COUNT") .number_of_values(1) .takes_value(true) - .multiple_occurrences(true) + .action(ArgAction::Append) .multiple_values(true) - .validator(grind_validator_starts_with) + .value_parser(grind_parser(GrindType::Starts)) .help("Saves specified number of keypairs whos public key starts with the indicated prefix\nExample: --starts-with sol:4\nPREFIX type is Base58\nCOUNT type is u64"), ) .arg( @@ -327,9 +312,9 @@ fn app<'a>(num_threads: &'a str, crate_version: &'a str) -> Command<'a> { .value_name("SUFFIX:COUNT") .number_of_values(1) .takes_value(true) - .multiple_occurrences(true) + .action(ArgAction::Append) .multiple_values(true) - .validator(grind_validator_ends_with) + .value_parser(grind_parser(GrindType::Ends)) .help("Saves specified number of keypairs whos public key ends with the indicated suffix\nExample: --ends-with ana:4\nSUFFIX type is Base58\nCOUNT type is u64"), ) .arg( @@ -338,9 +323,9 @@ fn app<'a>(num_threads: &'a str, crate_version: &'a str) -> Command<'a> { .value_name("PREFIX:SUFFIX:COUNT") .number_of_values(1) .takes_value(true) - .multiple_occurrences(true) + .action(ArgAction::Append) .multiple_values(true) - .validator(grind_validator_starts_and_ends_with) + .value_parser(grind_parser(GrindType::StartsAndEnds)) .help("Saves specified number of keypairs whos public key starts and ends with the indicated perfix and suffix\nExample: --starts-and-ends-with sol:ana:4\nPREFIX and SUFFIX type is Base58\nCOUNT type is u64"), ) .arg( @@ -547,31 +532,49 @@ fn do_main(matches: &ArgMatches) -> Result<(), Box> { output_keypair(&keypair, outfile, "recovered")?; } ("grind", matches) => { - let ignore_case = matches.is_present("ignore_case"); + let ignore_case = matches.try_contains_id("ignore_case")?; - let starts_with_args = if matches.is_present("starts_with") { + let starts_with_args = if matches.try_contains_id("starts_with")? { matches - .values_of_t_or_exit::("starts_with") - .into_iter() - .map(|s| if ignore_case { s.to_lowercase() } else { s }) + .get_many::("starts_with") + .unwrap() + .map(|s| { + if ignore_case { + s.to_lowercase() + } else { + s.to_owned() + } + }) .collect() } else { HashSet::new() }; - let ends_with_args = if matches.is_present("ends_with") { + let ends_with_args = if matches.try_contains_id("ends_with")? { matches - .values_of_t_or_exit::("ends_with") - .into_iter() - .map(|s| if ignore_case { s.to_lowercase() } else { s }) + .get_many::("ends_with") + .unwrap() + .map(|s| { + if ignore_case { + s.to_lowercase() + } else { + s.to_owned() + } + }) .collect() } else { HashSet::new() }; - let starts_and_ends_with_args = if matches.is_present("starts_and_ends_with") { + let starts_and_ends_with_args = if matches.try_contains_id("starts_and_ends_with")? { matches - .values_of_t_or_exit::("starts_and_ends_with") - .into_iter() - .map(|s| if ignore_case { s.to_lowercase() } else { s }) + .get_many::("starts_and_ends_with") + .unwrap() + .map(|s| { + if ignore_case { + s.to_lowercase() + } else { + s.to_owned() + } + }) .collect() } else { HashSet::new() @@ -596,20 +599,20 @@ fn do_main(matches: &ArgMatches) -> Result<(), Box> { num_threads, ); - let use_mnemonic = matches.is_present("use_mnemonic"); + let use_mnemonic = matches.try_contains_id("use_mnemonic")?; let derivation_path = acquire_derivation_path(matches)?; - let word_count: usize = matches.value_of_t(WORD_COUNT_ARG.name).unwrap(); + let word_count = try_get_word_count(matches)?.unwrap(); let mnemonic_type = MnemonicType::for_word_count(word_count)?; - let language = acquire_language(matches); + let language = try_get_language(matches)?.unwrap(); let (passphrase, passphrase_message) = if use_mnemonic { acquire_passphrase_and_message(matches).unwrap() } else { no_passphrase_and_message() }; - let no_outfile = matches.is_present(NO_OUTFILE_ARG.name); + let no_outfile = matches.try_contains_id(NO_OUTFILE_ARG.name)?; // The vast majority of base58 encoded public keys have length 44, but // these only encapsulate prefixes 1-9 and A-H. If the user is searching From 005c1375f8892764600110d5475ee5c477fb135f Mon Sep 17 00:00:00 2001 From: Joe C Date: Tue, 2 Apr 2024 05:06:13 -0500 Subject: [PATCH 130/153] Program Runtime: Refactor `InvokeContext` out of `MessageProcessor::process_message` (#509) --- program-runtime/src/message_processor.rs | 145 ++++++++++++----------- svm/src/transaction_processor.rs | 23 ++-- 2 files changed, 89 insertions(+), 79 deletions(-) diff --git a/program-runtime/src/message_processor.rs b/program-runtime/src/message_processor.rs index e307609e096501..10917ab0715e55 100644 --- a/program-runtime/src/message_processor.rs +++ b/program-runtime/src/message_processor.rs @@ -1,26 +1,19 @@ use { crate::{ - compute_budget::ComputeBudget, invoke_context::InvokeContext, - loaded_programs::LoadedProgramsForTxBatch, - log_collector::LogCollector, - sysvar_cache::SysvarCache, timings::{ExecuteDetailsTimings, ExecuteTimings}, }, serde::{Deserialize, Serialize}, solana_measure::measure::Measure, solana_sdk::{ account::WritableAccount, - feature_set::FeatureSet, - hash::Hash, message::SanitizedMessage, precompiles::is_precompile, saturating_add_assign, sysvar::instructions, transaction::TransactionError, - transaction_context::{IndexOfAccount, InstructionAccount, TransactionContext}, + transaction_context::{IndexOfAccount, InstructionAccount}, }, - std::{cell::RefCell, rc::Rc, sync::Arc}, }; #[derive(Debug, Default, Clone, Deserialize, Serialize)] @@ -45,30 +38,10 @@ impl MessageProcessor { pub fn process_message( message: &SanitizedMessage, program_indices: &[Vec], - transaction_context: &mut TransactionContext, - log_collector: Option>>, - programs_loaded_for_tx_batch: &LoadedProgramsForTxBatch, - programs_modified_by_tx: &mut LoadedProgramsForTxBatch, - feature_set: Arc, - compute_budget: ComputeBudget, + invoke_context: &mut InvokeContext, timings: &mut ExecuteTimings, - sysvar_cache: &SysvarCache, - blockhash: Hash, - lamports_per_signature: u64, accumulated_consumed_units: &mut u64, ) -> Result<(), TransactionError> { - let mut invoke_context = InvokeContext::new( - transaction_context, - sysvar_cache, - log_collector, - compute_budget, - programs_loaded_for_tx_batch, - programs_modified_by_tx, - feature_set, - blockhash, - lamports_per_signature, - ); - debug_assert_eq!(program_indices.len(), message.instructions().len()); for (instruction_index, ((program_id, instruction), program_indices)) in message .program_instructions_iter() @@ -174,11 +147,16 @@ mod tests { use { super::*, crate::{ - declare_process_instruction, loaded_programs::LoadedProgram, + compute_budget::ComputeBudget, + declare_process_instruction, + loaded_programs::{LoadedProgram, LoadedProgramsForTxBatch}, message_processor::MessageProcessor, + sysvar_cache::SysvarCache, }, solana_sdk::{ account::{AccountSharedData, ReadableAccount}, + feature_set::FeatureSet, + hash::Hash, instruction::{AccountMeta, Instruction, InstructionError}, message::{AccountKeys, Message}, native_loader::{self, create_loadable_account_for_test}, @@ -186,7 +164,9 @@ mod tests { rent::Rent, secp256k1_instruction::new_secp256k1_instruction, secp256k1_program, system_program, + transaction_context::TransactionContext, }, + std::sync::Arc, }; #[derive(Debug, Serialize, Deserialize)] @@ -292,19 +272,22 @@ mod tests { )); let sysvar_cache = SysvarCache::default(); let mut programs_modified_by_tx = LoadedProgramsForTxBatch::default(); - let result = MessageProcessor::process_message( - &message, - &program_indices, + let mut invoke_context = InvokeContext::new( &mut transaction_context, + &sysvar_cache, None, + ComputeBudget::default(), &programs_loaded_for_tx_batch, &mut programs_modified_by_tx, Arc::new(FeatureSet::all_enabled()), - ComputeBudget::default(), - &mut ExecuteTimings::default(), - &sysvar_cache, Hash::default(), 0, + ); + let result = MessageProcessor::process_message( + &message, + &program_indices, + &mut invoke_context, + &mut ExecuteTimings::default(), &mut 0, ); assert!(result.is_ok()); @@ -340,19 +323,22 @@ mod tests { ]), )); let mut programs_modified_by_tx = LoadedProgramsForTxBatch::default(); - let result = MessageProcessor::process_message( - &message, - &program_indices, + let mut invoke_context = InvokeContext::new( &mut transaction_context, + &sysvar_cache, None, + ComputeBudget::default(), &programs_loaded_for_tx_batch, &mut programs_modified_by_tx, Arc::new(FeatureSet::all_enabled()), - ComputeBudget::default(), - &mut ExecuteTimings::default(), - &sysvar_cache, Hash::default(), 0, + ); + let result = MessageProcessor::process_message( + &message, + &program_indices, + &mut invoke_context, + &mut ExecuteTimings::default(), &mut 0, ); assert_eq!( @@ -378,19 +364,22 @@ mod tests { ]), )); let mut programs_modified_by_tx = LoadedProgramsForTxBatch::default(); - let result = MessageProcessor::process_message( - &message, - &program_indices, + let mut invoke_context = InvokeContext::new( &mut transaction_context, + &sysvar_cache, None, + ComputeBudget::default(), &programs_loaded_for_tx_batch, &mut programs_modified_by_tx, Arc::new(FeatureSet::all_enabled()), - ComputeBudget::default(), - &mut ExecuteTimings::default(), - &sysvar_cache, Hash::default(), 0, + ); + let result = MessageProcessor::process_message( + &message, + &program_indices, + &mut invoke_context, + &mut ExecuteTimings::default(), &mut 0, ); assert_eq!( @@ -507,19 +496,22 @@ mod tests { )); let sysvar_cache = SysvarCache::default(); let mut programs_modified_by_tx = LoadedProgramsForTxBatch::default(); - let result = MessageProcessor::process_message( - &message, - &program_indices, + let mut invoke_context = InvokeContext::new( &mut transaction_context, + &sysvar_cache, None, + ComputeBudget::default(), &programs_loaded_for_tx_batch, &mut programs_modified_by_tx, Arc::new(FeatureSet::all_enabled()), - ComputeBudget::default(), - &mut ExecuteTimings::default(), - &sysvar_cache, Hash::default(), 0, + ); + let result = MessageProcessor::process_message( + &message, + &program_indices, + &mut invoke_context, + &mut ExecuteTimings::default(), &mut 0, ); assert_eq!( @@ -540,19 +532,22 @@ mod tests { Some(transaction_context.get_key_of_account_at_index(0).unwrap()), )); let mut programs_modified_by_tx = LoadedProgramsForTxBatch::default(); - let result = MessageProcessor::process_message( - &message, - &program_indices, + let mut invoke_context = InvokeContext::new( &mut transaction_context, + &sysvar_cache, None, + ComputeBudget::default(), &programs_loaded_for_tx_batch, &mut programs_modified_by_tx, Arc::new(FeatureSet::all_enabled()), - ComputeBudget::default(), - &mut ExecuteTimings::default(), - &sysvar_cache, Hash::default(), 0, + ); + let result = MessageProcessor::process_message( + &message, + &program_indices, + &mut invoke_context, + &mut ExecuteTimings::default(), &mut 0, ); assert!(result.is_ok()); @@ -570,19 +565,22 @@ mod tests { Some(transaction_context.get_key_of_account_at_index(0).unwrap()), )); let mut programs_modified_by_tx = LoadedProgramsForTxBatch::default(); - let result = MessageProcessor::process_message( - &message, - &program_indices, + let mut invoke_context = InvokeContext::new( &mut transaction_context, + &sysvar_cache, None, + ComputeBudget::default(), &programs_loaded_for_tx_batch, &mut programs_modified_by_tx, Arc::new(FeatureSet::all_enabled()), - ComputeBudget::default(), - &mut ExecuteTimings::default(), - &sysvar_cache, Hash::default(), 0, + ); + let result = MessageProcessor::process_message( + &message, + &program_indices, + &mut invoke_context, + &mut ExecuteTimings::default(), &mut 0, ); assert!(result.is_ok()); @@ -661,19 +659,22 @@ mod tests { Arc::new(LoadedProgram::new_builtin(0, 0, MockBuiltin::vm)), ); let mut programs_modified_by_tx = LoadedProgramsForTxBatch::default(); - let result = MessageProcessor::process_message( - &message, - &[vec![1], vec![2]], + let mut invoke_context = InvokeContext::new( &mut transaction_context, + &sysvar_cache, None, + ComputeBudget::default(), &programs_loaded_for_tx_batch, &mut programs_modified_by_tx, Arc::new(FeatureSet::all_enabled()), - ComputeBudget::default(), - &mut ExecuteTimings::default(), - &sysvar_cache, Hash::default(), 0, + ); + let result = MessageProcessor::process_message( + &message, + &[vec![1], vec![2]], + &mut invoke_context, + &mut ExecuteTimings::default(), &mut 0, ); diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index 5426cf0fce9b16..0bc056e426113a 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -15,6 +15,7 @@ use { solana_measure::measure::Measure, solana_program_runtime::{ compute_budget::ComputeBudget, + invoke_context::InvokeContext, loaded_programs::{ ForkGraph, LoadProgramMetrics, LoadedProgram, LoadedProgramMatchCriteria, LoadedProgramType, LoadedProgramsForTxBatch, ProgramCache, ProgramRuntimeEnvironment, @@ -635,24 +636,32 @@ impl TransactionBatchProcessor { programs_loaded_for_tx_batch.upcoming_environments.clone(), programs_loaded_for_tx_batch.latest_root_epoch, ); - let mut process_message_time = Measure::start("process_message_time"); - let process_result = MessageProcessor::process_message( - tx.message(), - &loaded_transaction.program_indices, + let sysvar_cache = &self.sysvar_cache.read().unwrap(); + + let mut invoke_context = InvokeContext::new( &mut transaction_context, + sysvar_cache, log_collector.clone(), + compute_budget, programs_loaded_for_tx_batch, &mut programs_modified_by_tx, callback.get_feature_set(), - compute_budget, - timings, - &self.sysvar_cache.read().unwrap(), blockhash, lamports_per_signature, + ); + + let mut process_message_time = Measure::start("process_message_time"); + let process_result = MessageProcessor::process_message( + tx.message(), + &loaded_transaction.program_indices, + &mut invoke_context, + timings, &mut executed_units, ); process_message_time.stop(); + drop(invoke_context); + saturating_add_assign!( timings.execute_accessories.process_message_us, process_message_time.as_us() From 798cb561e15d34ecf9891263572472365215d0c6 Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Tue, 2 Apr 2024 19:25:51 +0900 Subject: [PATCH 131/153] Support running miri in ci (#534) * Support running miri in ci * fail fast... * wait miri is still not stable? * clean up * fmt * Move to Dockerfile --- ci/buildkite-pipeline.sh | 1 + ci/docker/Dockerfile | 1 + ci/test-miri.sh | 8 ++++++++ 3 files changed, 10 insertions(+) create mode 100755 ci/test-miri.sh diff --git a/ci/buildkite-pipeline.sh b/ci/buildkite-pipeline.sh index 4ae00c9feab586..e0b51ef9e4acdb 100755 --- a/ci/buildkite-pipeline.sh +++ b/ci/buildkite-pipeline.sh @@ -143,6 +143,7 @@ all_test_steps() { command_step checks1 "ci/docker-run-default-image.sh ci/test-checks.sh" 20 check command_step checks2 "ci/docker-run-default-image.sh ci/test-dev-context-only-utils.sh check-bins" 15 check command_step checks3 "ci/docker-run-default-image.sh ci/test-dev-context-only-utils.sh check-all-targets" 15 check + command_step miri "ci/docker-run-default-image.sh ci/test-miri.sh" 5 check wait_step # Full test suite diff --git a/ci/docker/Dockerfile b/ci/docker/Dockerfile index cee80877c6db5d..4e792f1368a1ad 100644 --- a/ci/docker/Dockerfile +++ b/ci/docker/Dockerfile @@ -73,6 +73,7 @@ RUN \ rustup install $RUST_NIGHTLY_VERSION && \ rustup component add clippy --toolchain=$RUST_NIGHTLY_VERSION && \ rustup component add rustfmt --toolchain=$RUST_NIGHTLY_VERSION && \ + rustup component add miri --toolchain=$RUST_NIGHTLY_VERSION && \ rustup target add wasm32-unknown-unknown && \ cargo install cargo-audit && \ cargo install cargo-hack && \ diff --git a/ci/test-miri.sh b/ci/test-miri.sh new file mode 100755 index 00000000000000..407d48c34106a2 --- /dev/null +++ b/ci/test-miri.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +set -eo pipefail + +source ci/rust-version.sh nightly + +# miri is very slow; so only run very few of selective tests! +cargo "+${rust_nightly}" miri test -p solana-program -- hash:: account_info:: From 2643ae85c3262f5bee62931bee8a51e11090c72a Mon Sep 17 00:00:00 2001 From: Jon C Date: Tue, 2 Apr 2024 15:29:38 +0200 Subject: [PATCH 132/153] cli: Customize max sign attempts for deploy and write-buffer (#526) * cli: Customize max sign attempts for deploy and write-buffer * Update changelog * Improve help message * Fixup line break --- CHANGELOG.md | 2 + cli/src/program.rs | 157 +++++++++++++++++++++++++++++++++--- cli/tests/program.rs | 35 ++++++++ transaction-dos/src/main.rs | 1 + 4 files changed, 182 insertions(+), 13 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 09d411471323d7..b304c367fcabc8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ Release channels have their own copy of this changelog: * `central-scheduler` as default option for `--block-production-method` (#34891) * `solana-rpc-client-api`: `RpcFilterError` depends on `base64` version 0.22, so users may need to upgrade to `base64` version 0.22 * Changed default value for `--health-check-slot-distance` from 150 to 128 + * CLI: Can specify `--with-compute-unit-price` and `--max-sign-attempts` during program deployment ## [1.18.0] * Changes @@ -39,6 +40,7 @@ Release channels have their own copy of this changelog: double the size. Program accounts must be extended with `solana program extend` before an upgrade if they need to accommodate larger programs. * Interface for `gossip_service::get_client()` has changed. `gossip_service::get_multi_client()` has been removed. + * CLI: Can specify `--with-compute-unit-price` and `--max-sign-attempts` during program deployment * Upgrade Notes * `solana-program` and `solana-sdk` default to support for Borsh v1, with limited backward compatibility for v0.10 and v0.9. Please upgrade to Borsh v1. diff --git a/cli/src/program.rs b/cli/src/program.rs index 0aec785fa445ea..ca7547eaaae6c2 100644 --- a/cli/src/program.rs +++ b/cli/src/program.rs @@ -98,6 +98,7 @@ pub enum ProgramCliCommand { allow_excessive_balance: bool, skip_fee_check: bool, compute_unit_price: Option, + max_sign_attempts: usize, }, Upgrade { fee_payer_signer_index: SignerIndex, @@ -117,6 +118,7 @@ pub enum ProgramCliCommand { max_len: Option, skip_fee_check: bool, compute_unit_price: Option, + max_sign_attempts: usize, }, SetBufferAuthority { buffer_pubkey: Pubkey, @@ -246,6 +248,26 @@ impl ProgramSubCommands for App<'_, '_> { holds a large balance of SOL", ), ) + .arg( + Arg::with_name("max_sign_attempts") + .long("max-sign-attempts") + .takes_value(true) + .validator(is_parsable::) + .default_value("5") + .help( + "Maximum number of attempts to sign or resign transactions \ + after blockhash expiration. \ + If any transactions sent during the program deploy are still \ + unconfirmed after the initially chosen recent blockhash \ + expires, those transactions will be resigned with a new \ + recent blockhash and resent. Use this setting to adjust \ + the maximum number of transaction signing iterations. Each \ + blockhash is valid for about 60 seconds, which means using \ + the default value of 5 will lead to sending transactions \ + for at least 5 minutes or until all transactions are confirmed,\ + whichever comes first.", + ), + ) .arg(compute_unit_price_arg()), ) .subcommand( @@ -319,6 +341,26 @@ impl ProgramSubCommands for App<'_, '_> { [default: the length of the original deployed program]", ), ) + .arg( + Arg::with_name("max_sign_attempts") + .long("max-sign-attempts") + .takes_value(true) + .validator(is_parsable::) + .default_value("5") + .help( + "Maximum number of attempts to sign or resign transactions \ + after blockhash expiration. \ + If any transactions sent during the program deploy are still \ + unconfirmed after the initially chosen recent blockhash \ + expires, those transactions will be resigned with a new \ + recent blockhash and resent. Use this setting to adjust \ + the maximum number of transaction signing iterations. Each \ + blockhash is valid for about 60 seconds, which means using \ + the default value of 5 will lead to sending transactions \ + for at least 5 minutes or until all transactions are confirmed,\ + whichever comes first.", + ), + ) .arg(compute_unit_price_arg()), ) .subcommand( @@ -613,6 +655,7 @@ pub fn parse_program_subcommand( default_signer.generate_unique_signers(bulk_signers, matches, wallet_manager)?; let compute_unit_price = value_of(matches, "compute_unit_price"); + let max_sign_attempts = value_of(matches, "max_sign_attempts").unwrap(); CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::Deploy { @@ -630,6 +673,7 @@ pub fn parse_program_subcommand( allow_excessive_balance: matches.is_present("allow_excessive_balance"), skip_fee_check, compute_unit_price, + max_sign_attempts, }), signers: signer_info.signers, } @@ -702,6 +746,7 @@ pub fn parse_program_subcommand( default_signer.generate_unique_signers(bulk_signers, matches, wallet_manager)?; let compute_unit_price = value_of(matches, "compute_unit_price"); + let max_sign_attempts = value_of(matches, "max_sign_attempts").unwrap(); CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::WriteBuffer { @@ -715,6 +760,7 @@ pub fn parse_program_subcommand( max_len, skip_fee_check, compute_unit_price, + max_sign_attempts, }), signers: signer_info.signers, } @@ -917,6 +963,7 @@ pub fn process_program_subcommand( allow_excessive_balance, skip_fee_check, compute_unit_price, + max_sign_attempts, } => process_program_deploy( rpc_client, config, @@ -932,6 +979,7 @@ pub fn process_program_subcommand( *allow_excessive_balance, *skip_fee_check, *compute_unit_price, + *max_sign_attempts, ), ProgramCliCommand::Upgrade { fee_payer_signer_index, @@ -961,6 +1009,7 @@ pub fn process_program_subcommand( max_len, skip_fee_check, compute_unit_price, + max_sign_attempts, } => process_write_buffer( rpc_client, config, @@ -972,6 +1021,7 @@ pub fn process_program_subcommand( *max_len, *skip_fee_check, *compute_unit_price, + *max_sign_attempts, ), ProgramCliCommand::SetBufferAuthority { buffer_pubkey, @@ -1104,6 +1154,7 @@ fn process_program_deploy( allow_excessive_balance: bool, skip_fee_check: bool, compute_unit_price: Option, + max_sign_attempts: usize, ) -> ProcessResult { let fee_payer_signer = config.signers[fee_payer_signer_index]; let upgrade_authority_signer = config.signers[upgrade_authority_signer_index]; @@ -1244,6 +1295,7 @@ fn process_program_deploy( allow_excessive_balance, skip_fee_check, compute_unit_price, + max_sign_attempts, ) } else { do_process_program_upgrade( @@ -1259,6 +1311,7 @@ fn process_program_deploy( buffer_signer, skip_fee_check, compute_unit_price, + max_sign_attempts, ) }; if result.is_ok() && is_final { @@ -1408,6 +1461,7 @@ fn process_write_buffer( max_len: Option, skip_fee_check: bool, compute_unit_price: Option, + max_sign_attempts: usize, ) -> ProcessResult { let fee_payer_signer = config.signers[fee_payer_signer_index]; let buffer_authority = config.signers[buffer_authority_signer_index]; @@ -1474,6 +1528,7 @@ fn process_write_buffer( true, skip_fee_check, compute_unit_price, + max_sign_attempts, ); if result.is_err() && buffer_signer_index.is_none() && buffer_signer.is_some() { report_ephemeral_mnemonic(words, mnemonic); @@ -2228,6 +2283,7 @@ fn do_process_program_write_and_deploy( allow_excessive_balance: bool, skip_fee_check: bool, compute_unit_price: Option, + max_sign_attempts: usize, ) -> ProcessResult { let blockhash = rpc_client.get_latest_blockhash()?; @@ -2366,6 +2422,7 @@ fn do_process_program_write_and_deploy( buffer_signer, Some(buffer_authority_signer), program_signers, + max_sign_attempts, )?; if let Some(program_signers) = program_signers { @@ -2396,6 +2453,7 @@ fn do_process_program_upgrade( buffer_signer: Option<&dyn Signer>, skip_fee_check: bool, compute_unit_price: Option, + max_sign_attempts: usize, ) -> ProcessResult { let blockhash = rpc_client.get_latest_blockhash()?; @@ -2513,6 +2571,7 @@ fn do_process_program_upgrade( buffer_signer, Some(upgrade_authority), Some(&[upgrade_authority]), + max_sign_attempts, )?; let program_id = CliProgramId { @@ -2696,6 +2755,7 @@ fn simulate_and_update_compute_unit_limit( )) } +#[allow(clippy::too_many_arguments)] fn send_deploy_messages( rpc_client: Arc, config: &CliConfig, @@ -2706,6 +2766,7 @@ fn send_deploy_messages( initial_signer: Option<&dyn Signer>, write_signer: Option<&dyn Signer>, final_signers: Option<&[&dyn Signer]>, + max_sign_attempts: usize, ) -> Result, Box> { if let Some(message) = initial_message { if let Some(initial_signer) = initial_signer { @@ -2793,7 +2854,7 @@ fn send_deploy_messages( &write_messages, &[fee_payer_signer, write_signer], SendAndConfirmConfig { - resign_txs_count: Some(5), + resign_txs_count: Some(max_sign_attempts), with_spinner: true, }, ) @@ -2942,7 +3003,8 @@ mod tests { max_len: None, allow_excessive_balance: false, skip_fee_check: false, - compute_unit_price: None + compute_unit_price: None, + max_sign_attempts: 5, }), signers: vec![Box::new(read_keypair_file(&keypair_file).unwrap())], } @@ -2971,7 +3033,8 @@ mod tests { max_len: Some(42), allow_excessive_balance: false, skip_fee_check: false, - compute_unit_price: None + compute_unit_price: None, + max_sign_attempts: 5, }), signers: vec![Box::new(read_keypair_file(&keypair_file).unwrap())], } @@ -3002,7 +3065,8 @@ mod tests { max_len: None, allow_excessive_balance: false, skip_fee_check: false, - compute_unit_price: None + compute_unit_price: None, + max_sign_attempts: 5, }), signers: vec![ Box::new(read_keypair_file(&keypair_file).unwrap()), @@ -3035,7 +3099,8 @@ mod tests { max_len: None, allow_excessive_balance: false, skip_fee_check: false, - compute_unit_price: None + compute_unit_price: None, + max_sign_attempts: 5, }), signers: vec![Box::new(read_keypair_file(&keypair_file).unwrap())], } @@ -3067,7 +3132,8 @@ mod tests { max_len: None, allow_excessive_balance: false, skip_fee_check: false, - compute_unit_price: None + compute_unit_price: None, + max_sign_attempts: 5, }), signers: vec![ Box::new(read_keypair_file(&keypair_file).unwrap()), @@ -3102,7 +3168,8 @@ mod tests { max_len: None, allow_excessive_balance: false, skip_fee_check: false, - compute_unit_price: None + compute_unit_price: None, + max_sign_attempts: 5, }), signers: vec![ Box::new(read_keypair_file(&keypair_file).unwrap()), @@ -3133,7 +3200,38 @@ mod tests { max_len: None, skip_fee_check: false, allow_excessive_balance: false, - compute_unit_price: None + compute_unit_price: None, + max_sign_attempts: 5, + }), + signers: vec![Box::new(read_keypair_file(&keypair_file).unwrap())], + } + ); + + let test_command = test_commands.clone().get_matches_from(vec![ + "test", + "program", + "deploy", + "/Users/test/program.so", + "--max-sign-attempts", + "1", + ]); + assert_eq!( + parse_command(&test_command, &default_signer, &mut None).unwrap(), + CliCommandInfo { + command: CliCommand::Program(ProgramCliCommand::Deploy { + program_location: Some("/Users/test/program.so".to_string()), + fee_payer_signer_index: 0, + buffer_signer_index: None, + buffer_pubkey: None, + program_signer_index: None, + program_pubkey: None, + upgrade_authority_signer_index: 0, + is_final: false, + max_len: None, + allow_excessive_balance: false, + skip_fee_check: false, + compute_unit_price: None, + max_sign_attempts: 1, }), signers: vec![Box::new(read_keypair_file(&keypair_file).unwrap())], } @@ -3168,7 +3266,8 @@ mod tests { buffer_authority_signer_index: 0, max_len: None, skip_fee_check: false, - compute_unit_price: None + compute_unit_price: None, + max_sign_attempts: 5, }), signers: vec![Box::new(read_keypair_file(&keypair_file).unwrap())], } @@ -3194,7 +3293,8 @@ mod tests { buffer_authority_signer_index: 0, max_len: Some(42), skip_fee_check: false, - compute_unit_price: None + compute_unit_price: None, + max_sign_attempts: 5, }), signers: vec![Box::new(read_keypair_file(&keypair_file).unwrap())], } @@ -3223,7 +3323,8 @@ mod tests { buffer_authority_signer_index: 0, max_len: None, skip_fee_check: false, - compute_unit_price: None + compute_unit_price: None, + max_sign_attempts: 5, }), signers: vec![ Box::new(read_keypair_file(&keypair_file).unwrap()), @@ -3255,7 +3356,8 @@ mod tests { buffer_authority_signer_index: 1, max_len: None, skip_fee_check: false, - compute_unit_price: None + compute_unit_price: None, + max_sign_attempts: 5, }), signers: vec![ Box::new(read_keypair_file(&keypair_file).unwrap()), @@ -3292,7 +3394,8 @@ mod tests { buffer_authority_signer_index: 2, max_len: None, skip_fee_check: false, - compute_unit_price: None + compute_unit_price: None, + max_sign_attempts: 5, }), signers: vec![ Box::new(read_keypair_file(&keypair_file).unwrap()), @@ -3301,6 +3404,33 @@ mod tests { ], } ); + + // specify max sign attempts + let test_command = test_commands.clone().get_matches_from(vec![ + "test", + "program", + "write-buffer", + "/Users/test/program.so", + "--max-sign-attempts", + "10", + ]); + assert_eq!( + parse_command(&test_command, &default_signer, &mut None).unwrap(), + CliCommandInfo { + command: CliCommand::Program(ProgramCliCommand::WriteBuffer { + program_location: "/Users/test/program.so".to_string(), + fee_payer_signer_index: 0, + buffer_signer_index: None, + buffer_pubkey: None, + buffer_authority_signer_index: 0, + max_len: None, + skip_fee_check: false, + compute_unit_price: None, + max_sign_attempts: 10, + }), + signers: vec![Box::new(read_keypair_file(&keypair_file).unwrap())], + } + ); } #[test] @@ -3852,6 +3982,7 @@ mod tests { allow_excessive_balance: false, skip_fee_check: false, compute_unit_price: None, + max_sign_attempts: 5, }), signers: vec![&default_keypair], output_format: OutputFormat::JsonCompact, diff --git a/cli/tests/program.rs b/cli/tests/program.rs index 240a01567a409e..545751cf53b5c2 100644 --- a/cli/tests/program.rs +++ b/cli/tests/program.rs @@ -98,6 +98,7 @@ fn test_cli_program_deploy_non_upgradeable() { max_len: None, skip_fee_check: false, compute_unit_price: None, + max_sign_attempts: 5, }); config.output_format = OutputFormat::JsonCompact; let response = process_command(&config); @@ -145,6 +146,7 @@ fn test_cli_program_deploy_non_upgradeable() { max_len: None, skip_fee_check: false, compute_unit_price: None, + max_sign_attempts: 5, }); process_command(&config).unwrap(); let account1 = rpc_client @@ -201,6 +203,7 @@ fn test_cli_program_deploy_non_upgradeable() { max_len: None, skip_fee_check: false, compute_unit_price: None, + max_sign_attempts: 5, }); let err = process_command(&config).unwrap_err(); assert_eq!( @@ -225,6 +228,7 @@ fn test_cli_program_deploy_non_upgradeable() { max_len: None, skip_fee_check: false, compute_unit_price: None, + max_sign_attempts: 5, }); process_command(&config).unwrap_err(); } @@ -287,6 +291,7 @@ fn test_cli_program_deploy_no_authority() { max_len: None, skip_fee_check: false, compute_unit_price: None, + max_sign_attempts: 5, }); config.output_format = OutputFormat::JsonCompact; let response = process_command(&config); @@ -315,6 +320,7 @@ fn test_cli_program_deploy_no_authority() { max_len: None, skip_fee_check: false, compute_unit_price: None, + max_sign_attempts: 5, }); process_command(&config).unwrap_err(); } @@ -378,6 +384,7 @@ fn test_cli_program_deploy_with_authority() { max_len: Some(max_len), skip_fee_check: false, compute_unit_price: None, + max_sign_attempts: 5, }); config.output_format = OutputFormat::JsonCompact; let response = process_command(&config); @@ -428,6 +435,7 @@ fn test_cli_program_deploy_with_authority() { max_len: Some(max_len), skip_fee_check: false, compute_unit_price: None, + max_sign_attempts: 5, }); let response = process_command(&config); let json: Value = serde_json::from_str(&response.unwrap()).unwrap(); @@ -472,6 +480,7 @@ fn test_cli_program_deploy_with_authority() { max_len: Some(max_len), skip_fee_check: false, compute_unit_price: None, + max_sign_attempts: 5, }); process_command(&config).unwrap(); let program_account = rpc_client.get_account(&program_pubkey).unwrap(); @@ -548,6 +557,7 @@ fn test_cli_program_deploy_with_authority() { max_len: None, skip_fee_check: false, compute_unit_price: None, + max_sign_attempts: 5, }); process_command(&config).unwrap(); let program_account = rpc_client.get_account(&program_pubkey).unwrap(); @@ -628,6 +638,7 @@ fn test_cli_program_deploy_with_authority() { max_len: None, skip_fee_check: false, compute_unit_price: None, + max_sign_attempts: 5, }); process_command(&config).unwrap_err(); @@ -646,6 +657,7 @@ fn test_cli_program_deploy_with_authority() { max_len: None, skip_fee_check: false, compute_unit_price: None, + max_sign_attempts: 5, }); let response = process_command(&config); let json: Value = serde_json::from_str(&response.unwrap()).unwrap(); @@ -751,6 +763,7 @@ fn test_cli_program_close_program() { max_len: Some(max_len), skip_fee_check: false, compute_unit_price: None, + max_sign_attempts: 5, }); config.output_format = OutputFormat::JsonCompact; process_command(&config).unwrap(); @@ -862,6 +875,7 @@ fn test_cli_program_extend_program() { max_len: None, // Use None to check that it defaults to the max length skip_fee_check: false, compute_unit_price: None, + max_sign_attempts: 5, }); config.output_format = OutputFormat::JsonCompact; process_command(&config).unwrap(); @@ -910,6 +924,7 @@ fn test_cli_program_extend_program() { max_len: None, skip_fee_check: false, compute_unit_price: None, + max_sign_attempts: 5, }); process_command(&config).unwrap_err(); @@ -943,6 +958,7 @@ fn test_cli_program_extend_program() { max_len: None, skip_fee_check: false, compute_unit_price: None, + max_sign_attempts: 5, }); process_command(&config).unwrap(); } @@ -1008,6 +1024,7 @@ fn test_cli_program_write_buffer() { max_len: None, skip_fee_check: false, compute_unit_price: None, + max_sign_attempts: 5, }); config.output_format = OutputFormat::JsonCompact; let response = process_command(&config); @@ -1045,6 +1062,7 @@ fn test_cli_program_write_buffer() { max_len: Some(max_len), skip_fee_check: false, compute_unit_price: None, + max_sign_attempts: 5, }); let response = process_command(&config); let json: Value = serde_json::from_str(&response.unwrap()).unwrap(); @@ -1109,6 +1127,7 @@ fn test_cli_program_write_buffer() { max_len: None, skip_fee_check: false, compute_unit_price: None, + max_sign_attempts: 5, }); let response = process_command(&config); let json: Value = serde_json::from_str(&response.unwrap()).unwrap(); @@ -1149,6 +1168,7 @@ fn test_cli_program_write_buffer() { max_len: None, skip_fee_check: false, compute_unit_price: None, + max_sign_attempts: 5, }); let response = process_command(&config); let json: Value = serde_json::from_str(&response.unwrap()).unwrap(); @@ -1225,6 +1245,7 @@ fn test_cli_program_write_buffer() { max_len: None, skip_fee_check: false, compute_unit_price: None, + max_sign_attempts: 5, }); config.output_format = OutputFormat::JsonCompact; let response = process_command(&config); @@ -1268,6 +1289,7 @@ fn test_cli_program_write_buffer() { max_len: None, //Some(max_len), skip_fee_check: false, compute_unit_price: None, + max_sign_attempts: 5, }); process_command(&config).unwrap(); config.signers = vec![&keypair, &buffer_keypair]; @@ -1284,6 +1306,7 @@ fn test_cli_program_write_buffer() { max_len: None, skip_fee_check: false, compute_unit_price: None, + max_sign_attempts: 5, }); config.output_format = OutputFormat::JsonCompact; let error = process_command(&config).unwrap_err(); @@ -1344,6 +1367,7 @@ fn test_cli_program_set_buffer_authority() { max_len: None, skip_fee_check: false, compute_unit_price: None, + max_sign_attempts: 5, }); process_command(&config).unwrap(); let buffer_account = rpc_client.get_account(&buffer_keypair.pubkey()).unwrap(); @@ -1397,6 +1421,7 @@ fn test_cli_program_set_buffer_authority() { max_len: None, skip_fee_check: false, compute_unit_price: None, + max_sign_attempts: 5, }); config.output_format = OutputFormat::JsonCompact; process_command(&config).unwrap_err(); @@ -1443,6 +1468,7 @@ fn test_cli_program_set_buffer_authority() { max_len: None, skip_fee_check: false, compute_unit_price: None, + max_sign_attempts: 5, }); config.output_format = OutputFormat::JsonCompact; process_command(&config).unwrap(); @@ -1500,6 +1526,7 @@ fn test_cli_program_mismatch_buffer_authority() { max_len: None, skip_fee_check: false, compute_unit_price: None, + max_sign_attempts: 5, }); process_command(&config).unwrap(); let buffer_account = rpc_client.get_account(&buffer_keypair.pubkey()).unwrap(); @@ -1525,6 +1552,7 @@ fn test_cli_program_mismatch_buffer_authority() { max_len: None, skip_fee_check: false, compute_unit_price: None, + max_sign_attempts: 5, }); process_command(&config).unwrap_err(); @@ -1543,6 +1571,7 @@ fn test_cli_program_mismatch_buffer_authority() { max_len: None, skip_fee_check: false, compute_unit_price: None, + max_sign_attempts: 5, }); process_command(&config).unwrap(); } @@ -1627,6 +1656,7 @@ fn test_cli_program_deploy_with_offline_signing(use_offline_signer_as_fee_payer: max_len: Some(max_program_data_len), // allows for larger program size with future upgrades skip_fee_check: false, compute_unit_price: None, + max_sign_attempts: 5, }); config.output_format = OutputFormat::JsonCompact; process_command(&config).unwrap(); @@ -1795,6 +1825,7 @@ fn test_cli_program_show() { max_len: None, skip_fee_check: false, compute_unit_price: None, + max_sign_attempts: 5, }); process_command(&config).unwrap(); @@ -1857,6 +1888,7 @@ fn test_cli_program_show() { max_len: Some(max_len), skip_fee_check: false, compute_unit_price: None, + max_sign_attempts: 5, }); config.output_format = OutputFormat::JsonCompact; let min_slot = rpc_client.get_slot().unwrap(); @@ -1986,6 +2018,7 @@ fn test_cli_program_dump() { max_len: None, skip_fee_check: false, compute_unit_price: None, + max_sign_attempts: 5, }); process_command(&config).unwrap(); @@ -2030,6 +2063,7 @@ fn create_buffer_with_offline_authority<'a>( max_len: None, skip_fee_check: false, compute_unit_price: None, + max_sign_attempts: 5, }); process_command(config).unwrap(); let buffer_account = rpc_client.get_account(&buffer_signer.pubkey()).unwrap(); @@ -2125,6 +2159,7 @@ fn cli_program_deploy_with_args(compute_unit_price: Option) { max_len: Some(max_len), skip_fee_check: false, compute_unit_price, + max_sign_attempts: 5, }); config.output_format = OutputFormat::JsonCompact; let response = process_command(&config); diff --git a/transaction-dos/src/main.rs b/transaction-dos/src/main.rs index 3cf835c578382e..8fab7612560585 100644 --- a/transaction-dos/src/main.rs +++ b/transaction-dos/src/main.rs @@ -248,6 +248,7 @@ fn run_transactions_dos( is_final: true, max_len: None, compute_unit_price: None, + max_sign_attempts: 5, skip_fee_check: true, // skip_fee_check }); From 8cb700097393655f9411086a857c865f8da71d7e Mon Sep 17 00:00:00 2001 From: Illia Bobyr Date: Tue, 2 Apr 2024 06:31:24 -0700 Subject: [PATCH 133/153] loaded-programs: Use `Measure::end_as_us()` (#81) It is a bit nicer API, compared to the `stop()/as_us()` pair. Does not require the value to be `mut`. --- program-runtime/src/invoke_context.rs | 5 ++--- program-runtime/src/loaded_programs.rs | 15 ++++++--------- program-runtime/src/message_processor.rs | 8 ++++---- 3 files changed, 12 insertions(+), 16 deletions(-) diff --git a/program-runtime/src/invoke_context.rs b/program-runtime/src/invoke_context.rs index 8259c2ed2bcc7a..9f5b40aa7088dc 100644 --- a/program-runtime/src/invoke_context.rs +++ b/program-runtime/src/invoke_context.rs @@ -457,7 +457,7 @@ impl<'a> InvokeContext<'a> { timings: &mut ExecuteTimings, ) -> Result<(), InstructionError> { let instruction_context = self.transaction_context.get_current_instruction_context()?; - let mut process_executable_chain_time = Measure::start("process_executable_chain_time"); + let process_executable_chain_time = Measure::start("process_executable_chain_time"); let builtin_id = { let borrowed_root_account = instruction_context @@ -539,13 +539,12 @@ impl<'a> InvokeContext<'a> { return Err(InstructionError::BuiltinProgramsMustConsumeComputeUnits); } - process_executable_chain_time.stop(); saturating_add_assign!( timings .execute_accessories .process_instructions .process_executable_chain_us, - process_executable_chain_time.as_us() + process_executable_chain_time.end_as_us() ); result } diff --git a/program-runtime/src/loaded_programs.rs b/program-runtime/src/loaded_programs.rs index 6a49c6e3a2dcf6..99c77bf9430ba6 100644 --- a/program-runtime/src/loaded_programs.rs +++ b/program-runtime/src/loaded_programs.rs @@ -335,27 +335,24 @@ impl LoadedProgram { metrics: &mut LoadProgramMetrics, reloading: bool, ) -> Result> { - let mut load_elf_time = Measure::start("load_elf_time"); + let load_elf_time = Measure::start("load_elf_time"); // The following unused_mut exception is needed for architectures that do not // support JIT compilation. #[allow(unused_mut)] let mut executable = Executable::load(elf_bytes, program_runtime_environment.clone())?; - load_elf_time.stop(); - metrics.load_elf_us = load_elf_time.as_us(); + metrics.load_elf_us = load_elf_time.end_as_us(); if !reloading { - let mut verify_code_time = Measure::start("verify_code_time"); + let verify_code_time = Measure::start("verify_code_time"); executable.verify::()?; - verify_code_time.stop(); - metrics.verify_code_us = verify_code_time.as_us(); + metrics.verify_code_us = verify_code_time.end_as_us(); } #[cfg(all(not(target_os = "windows"), target_arch = "x86_64"))] { - let mut jit_compile_time = Measure::start("jit_compile_time"); + let jit_compile_time = Measure::start("jit_compile_time"); executable.jit_compile()?; - jit_compile_time.stop(); - metrics.jit_compile_us = jit_compile_time.as_us(); + metrics.jit_compile_us = jit_compile_time.end_as_us(); } let program = if bpf_loader_deprecated::check_id(loader_key) { diff --git a/program-runtime/src/message_processor.rs b/program-runtime/src/message_processor.rs index 10917ab0715e55..f3035820ac041c 100644 --- a/program-runtime/src/message_processor.rs +++ b/program-runtime/src/message_processor.rs @@ -106,7 +106,7 @@ impl MessageProcessor { invoke_context.transaction_context.pop() }) } else { - let mut time = Measure::start("execute_instruction"); + let time = Measure::start("execute_instruction"); let mut compute_units_consumed = 0; let result = invoke_context.process_instruction( &instruction.data, @@ -115,12 +115,12 @@ impl MessageProcessor { &mut compute_units_consumed, timings, ); - time.stop(); + let time = time.end_as_us(); *accumulated_consumed_units = accumulated_consumed_units.saturating_add(compute_units_consumed); timings.details.accumulate_program( program_id, - time.as_us(), + time, compute_units_consumed, result.is_err(), ); @@ -130,7 +130,7 @@ impl MessageProcessor { }; saturating_add_assign!( timings.execute_accessories.process_instructions.total_us, - time.as_us() + time ); result }; From a37b85c210527e52ca6ba46630f295f87cb6c161 Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 2 Apr 2024 09:45:34 -0400 Subject: [PATCH 134/153] Reverts incorrectly renamed comment (#537) --- runtime/src/serde_snapshot/tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/serde_snapshot/tests.rs b/runtime/src/serde_snapshot/tests.rs index ea5957a6be07ec..220f2cec03b181 100644 --- a/runtime/src/serde_snapshot/tests.rs +++ b/runtime/src/serde_snapshot/tests.rs @@ -889,7 +889,7 @@ mod serde_snapshot_tests { let tmp = tempfile::tempdir().unwrap(); let original_path = tmp.path().join("123.456"); - // In remap_accounts_file() we want to handle EEXIST (collisions), but we want to return all + // In remap_append_vec_file() we want to handle EEXIST (collisions), but we want to return all // other errors let next_append_vec_id = AtomicAccountsFileId::new(457); let num_collisions = AtomicUsize::new(0); From e8159251e953d4a51dc7306d8ffdbe6f817e1230 Mon Sep 17 00:00:00 2001 From: Yueh-Hsuan Chiang <93241502+yhchiang-sol@users.noreply.github.com> Date: Tue, 2 Apr 2024 06:58:13 -0700 Subject: [PATCH 135/153] Enable accountsdb_scan_account_storage_no_bank tests for hot storage (#344) Enable accountsdb_scan_account_storage_no_bank tests for hot storage. --- accounts-db/src/accounts_db.rs | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 4b5f027bf38fcf..ad3e9707f5cb2d 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -9504,6 +9504,7 @@ pub mod tests { crate::{ account_info::StoredSize, account_storage::meta::{AccountMeta, StoredMeta}, + accounts_file::AccountsFileProvider, accounts_hash::MERKLE_FANOUT, accounts_index::{tests::*, AccountSecondaryIndexesIncludeExclude}, ancient_append_vecs, @@ -10564,8 +10565,9 @@ pub mod tests { } } - #[test] - fn test_accountsdb_scan_account_storage_no_bank() { + #[test_case(AccountsFileProvider::AppendVec)] + #[test_case(AccountsFileProvider::HotStorage)] + fn test_accountsdb_scan_account_storage_no_bank(accounts_file_provider: AccountsFileProvider) { solana_logger::setup(); let expected = 1; @@ -10580,7 +10582,7 @@ pub mod tests { slot_expected, 0, size as u64, - AccountsFileProvider::AppendVec, + accounts_file_provider, ); let av = AccountsFile::AppendVec(AppendVec::new(&tf.path, true, 1024 * 1024)); data.accounts = av; @@ -10686,13 +10688,16 @@ pub mod tests { } } - #[test] - fn test_accountsdb_scan_account_storage_no_bank_one_slot() { + #[test_case(AccountsFileProvider::AppendVec)] + #[test_case(AccountsFileProvider::HotStorage)] + fn test_accountsdb_scan_account_storage_no_bank_one_slot( + accounts_file_provider: AccountsFileProvider, + ) { solana_logger::setup(); let expected = 1; let tf = crate::append_vec::test_utils::get_append_vec_path( - "test_accountsdb_scan_account_storage_no_bank", + "test_accountsdb_scan_account_storage_no_bank_one_slot", ); let (_temp_dirs, paths) = get_temp_accounts_paths(1).unwrap(); let slot_expected: Slot = 0; @@ -10702,7 +10707,7 @@ pub mod tests { slot_expected, 0, size as u64, - AccountsFileProvider::AppendVec, + accounts_file_provider, ); let av = AccountsFile::AppendVec(AppendVec::new(&tf.path, true, 1024 * 1024)); data.accounts = av; From 65f54bec88a70407da475563214cee7808954662 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Tue, 2 Apr 2024 10:03:55 -0500 Subject: [PATCH 136/153] Add ProgramCacheUs to execute timings (#473) --- program-runtime/src/timings.rs | 8 ++++++++ svm/src/transaction_processor.rs | 6 ++++++ 2 files changed, 14 insertions(+) diff --git a/program-runtime/src/timings.rs b/program-runtime/src/timings.rs index 8eeb9c5a005cde..e70c15e8d67782 100644 --- a/program-runtime/src/timings.rs +++ b/program-runtime/src/timings.rs @@ -51,6 +51,7 @@ pub enum ExecuteTimingType { CollectLogsUs, TotalBatchesLen, UpdateTransactionStatuses, + ProgramCacheUs, } pub struct Metrics([u64; ExecuteTimingType::CARDINALITY]); @@ -94,6 +95,13 @@ eager_macro_rules! { $eager_1 .index(ExecuteTimingType::CheckUs), i64 ), + ( + "program_cache_us", + *$self + .metrics + .index(ExecuteTimingType::ProgramCacheUs), + i64 + ), ( "load_us", *$self diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index 0bc056e426113a..e25c2ccfb24907 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -211,6 +211,7 @@ impl TransactionBatchProcessor { log_messages_bytes_limit: Option, limit_to_load_programs: bool, ) -> LoadAndExecuteSanitizedTransactionsOutput { + let mut program_cache_time = Measure::start("program_cache"); let mut program_accounts_map = Self::filter_executable_program_accounts( callbacks, sanitized_txs, @@ -234,6 +235,7 @@ impl TransactionBatchProcessor { execution_results: vec![], }; } + program_cache_time.stop(); let mut load_time = Measure::start("accounts_load"); let mut loaded_transactions = load_accounts( @@ -328,6 +330,10 @@ impl TransactionBatchProcessor { sanitized_txs.len(), ); + timings.saturating_add_in_place( + ExecuteTimingType::ProgramCacheUs, + program_cache_time.as_us(), + ); timings.saturating_add_in_place(ExecuteTimingType::LoadUs, load_time.as_us()); timings.saturating_add_in_place(ExecuteTimingType::ExecuteUs, execution_time.as_us()); From 01855eda4368157da56ace5dbbaf31ccb5b2b44e Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 2 Apr 2024 11:27:23 -0400 Subject: [PATCH 137/153] Sets write version to 0 when storing accounts (#476) --- accounts-db/src/accounts_db.rs | 74 ++++++++------------------ accounts-db/src/ancient_append_vecs.rs | 1 - runtime/src/snapshot_minimizer.rs | 3 -- 3 files changed, 22 insertions(+), 56 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index ad3e9707f5cb2d..a5db465c029f1c 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -26,10 +26,7 @@ use { crate::{ account_info::{AccountInfo, StorageLocation}, account_storage::{ - meta::{ - StorableAccountsWithHashesAndWriteVersions, StoredAccountMeta, - StoredMetaWriteVersion, - }, + meta::{StorableAccountsWithHashesAndWriteVersions, StoredAccountMeta}, AccountStorage, AccountStorageStatus, ShrinkInProgress, }, accounts_cache::{AccountsCache, CachedAccount, SlotCache}, @@ -393,7 +390,6 @@ impl CurrentAncientAccountsFile { (self.slot(), accounts, accounts_to_store.slot()), None::>, self.accounts_file(), - None, StoreReclaims::Ignore, ); let bytes_written = @@ -4004,7 +4000,6 @@ impl AccountsDb { (slot, &shrink_collect.alive_accounts.alive_accounts()[..]), None::>, shrink_in_progress.new_storage(), - None, StoreReclaims::Ignore, ); @@ -5976,11 +5971,6 @@ impl AccountsDb { AccountHash(Hash::new_from_array(hasher.finalize().into())) } - fn bulk_assign_write_version(&self, count: usize) -> StoredMetaWriteVersion { - self.write_version - .fetch_add(count as StoredMetaWriteVersion, Ordering::AcqRel) - } - fn write_accounts_to_storage< 'a, 'b, @@ -6322,7 +6312,6 @@ impl AccountsDb { (slot, &accounts[..]), Some(hashes), &flushed_store, - None, StoreReclaims::Default, )); store_accounts_timing = store_accounts_timing_inner; @@ -6403,16 +6392,26 @@ impl AccountsDb { } } - fn write_accounts_to_cache<'a, 'b, T: ReadableAccount + Sync, P>( + fn write_accounts_to_cache<'a, 'b, T: ReadableAccount + Sync>( &self, slot: Slot, accounts_and_meta_to_store: &impl StorableAccounts<'b, T>, txn_iter: Box> + 'a>, - mut write_version_producer: P, - ) -> Vec - where - P: Iterator, - { + ) -> Vec { + let mut write_version_producer: Box> = + if self.accounts_update_notifier.is_some() { + let mut current_version = self + .write_version + .fetch_add(accounts_and_meta_to_store.len() as u64, Ordering::AcqRel); + Box::new(std::iter::from_fn(move || { + let ret = current_version; + current_version += 1; + Some(ret) + })) + } else { + Box::new(std::iter::empty()) + }; + txn_iter .enumerate() .map(|(i, txn)| { @@ -6445,17 +6444,10 @@ impl AccountsDb { .collect() } - fn store_accounts_to< - 'a: 'c, - 'b, - 'c, - P: Iterator, - T: ReadableAccount + Sync + ZeroLamport + 'b, - >( + fn store_accounts_to<'a: 'c, 'b, 'c, T: ReadableAccount + Sync + ZeroLamport + 'b>( &self, accounts: &'c impl StorableAccounts<'b, T>, hashes: Option>>, - mut write_version_producer: P, store_to: &StoreTo, transactions: Option<&[Option<&'a SanitizedTransaction>]>, ) -> Vec { @@ -6489,7 +6481,7 @@ impl AccountsDb { None => Box::new(std::iter::repeat(&None).take(accounts.len())), }; - self.write_accounts_to_cache(slot, accounts, txn_iter, write_version_producer) + self.write_accounts_to_cache(slot, accounts, txn_iter) } StoreTo::Storage(storage) => { if accounts.has_hash_and_write_version() { @@ -6501,9 +6493,7 @@ impl AccountsDb { ), ) } else { - let write_versions = (0..accounts.len()) - .map(|_| write_version_producer.next().unwrap()) - .collect::>(); + let write_versions = vec![0; accounts.len()]; match hashes { Some(hashes) => self.write_accounts_to_storage( slot, @@ -8402,7 +8392,6 @@ impl AccountsDb { self.store_accounts_custom( accounts, hashes, - None::>>, store_to, reset_accounts, transactions, @@ -8416,7 +8405,6 @@ impl AccountsDb { accounts: impl StorableAccounts<'a, T>, hashes: Option>>, storage: &Arc, - write_version_producer: Option>>, reclaim: StoreReclaims, ) -> StoreAccountsTiming { // stores on a frozen slot should not reset @@ -8426,7 +8414,6 @@ impl AccountsDb { self.store_accounts_custom( accounts, hashes, - write_version_producer, &StoreTo::Storage(storage), reset_accounts, None, @@ -8439,34 +8426,17 @@ impl AccountsDb { &self, accounts: impl StorableAccounts<'a, T>, hashes: Option>>, - write_version_producer: Option>>, store_to: &StoreTo, reset_accounts: bool, transactions: Option<&[Option<&SanitizedTransaction>]>, reclaim: StoreReclaims, update_index_thread_selection: UpdateIndexThreadSelection, ) -> StoreAccountsTiming { - let write_version_producer: Box> = write_version_producer - .unwrap_or_else(|| { - let mut current_version = self.bulk_assign_write_version(accounts.len()); - Box::new(std::iter::from_fn(move || { - let ret = current_version; - current_version += 1; - Some(ret) - })) - }); - self.stats .store_num_accounts .fetch_add(accounts.len() as u64, Ordering::Relaxed); let mut store_accounts_time = Measure::start("store_accounts"); - let infos = self.store_accounts_to( - &accounts, - hashes, - write_version_producer, - store_to, - transactions, - ); + let infos = self.store_accounts_to(&accounts, hashes, store_to, transactions); store_accounts_time.stop(); self.stats .store_accounts @@ -9503,7 +9473,7 @@ pub mod tests { super::*, crate::{ account_info::StoredSize, - account_storage::meta::{AccountMeta, StoredMeta}, + account_storage::meta::{AccountMeta, StoredMeta, StoredMetaWriteVersion}, accounts_file::AccountsFileProvider, accounts_hash::MERKLE_FANOUT, accounts_index::{tests::*, AccountSecondaryIndexesIncludeExclude}, diff --git a/accounts-db/src/ancient_append_vecs.rs b/accounts-db/src/ancient_append_vecs.rs index f83f16e121a734..c4df48f0447593 100644 --- a/accounts-db/src/ancient_append_vecs.rs +++ b/accounts-db/src/ancient_append_vecs.rs @@ -438,7 +438,6 @@ impl AccountsDb { accounts_to_write, None::>, shrink_in_progress.new_storage(), - None, StoreReclaims::Ignore, )); diff --git a/runtime/src/snapshot_minimizer.rs b/runtime/src/snapshot_minimizer.rs index 009444b962ed48..e39c298fb1aaaf 100644 --- a/runtime/src/snapshot_minimizer.rs +++ b/runtime/src/snapshot_minimizer.rs @@ -359,12 +359,10 @@ impl<'a> SnapshotMinimizer<'a> { if aligned_total > 0 { let mut accounts = Vec::with_capacity(keep_accounts.len()); let mut hashes = Vec::with_capacity(keep_accounts.len()); - let mut write_versions = Vec::with_capacity(keep_accounts.len()); for alive_account in keep_accounts { accounts.push(alive_account); hashes.push(alive_account.hash()); - write_versions.push(alive_account.write_version()); } shrink_in_progress = Some(self.accounts_db().get_store_for_shrink(slot, aligned_total)); @@ -373,7 +371,6 @@ impl<'a> SnapshotMinimizer<'a> { (slot, &accounts[..]), Some(hashes), new_storage, - Some(Box::new(write_versions.into_iter())), StoreReclaims::Ignore, ); From 90999017a49521ea57777ec6a692e5c5f90498ec Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Tue, 2 Apr 2024 11:22:49 -0500 Subject: [PATCH 138/153] no need to call handle_reclaims if reclaims was not populated (#527) --- accounts-db/src/accounts_db.rs | 32 +++++++++++++++++++------------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index a5db465c029f1c..f1e01421e413fb 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -8487,23 +8487,29 @@ impl AccountsDb { // // From 1) and 2) we guarantee passing `no_purge_stats` == None, which is // equivalent to asserting there will be no dead slots, is safe. - let mut handle_reclaims_time = Measure::start("handle_reclaims"); - self.handle_reclaims( - (!reclaims.is_empty()).then(|| reclaims.iter()), - expected_single_dead_slot, - None, - reset_accounts, - &HashSet::default(), - ); - handle_reclaims_time.stop(); - self.stats - .store_handle_reclaims - .fetch_add(handle_reclaims_time.as_us(), Ordering::Relaxed); + let mut handle_reclaims_elapsed = 0; + if reclaim == UpsertReclaim::PopulateReclaims { + let mut handle_reclaims_time = Measure::start("handle_reclaims"); + self.handle_reclaims( + (!reclaims.is_empty()).then(|| reclaims.iter()), + expected_single_dead_slot, + None, + reset_accounts, + &HashSet::default(), + ); + handle_reclaims_time.stop(); + handle_reclaims_elapsed = handle_reclaims_time.as_us(); + self.stats + .store_handle_reclaims + .fetch_add(handle_reclaims_elapsed, Ordering::Relaxed); + } else { + assert!(reclaims.is_empty()); + } StoreAccountsTiming { store_accounts_elapsed: store_accounts_time.as_us(), update_index_elapsed: update_index_time.as_us(), - handle_reclaims_elapsed: handle_reclaims_time.as_us(), + handle_reclaims_elapsed, } } From c59143b980f85f1386f82df3172419c0b06d2599 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Tue, 2 Apr 2024 11:39:11 -0500 Subject: [PATCH 139/153] add get_stored_account to append vec (#508) * add get_stored_account to append vec * Update accounts-db/src/append_vec.rs Co-authored-by: Brooks * renames * accountshash -> accounthash --------- Co-authored-by: Brooks --- accounts-db/src/accounts_db.rs | 7 +++- accounts-db/src/accounts_file.rs | 14 ++++++- accounts-db/src/append_vec.rs | 67 +++++++++++++++++++++++++++++++- 3 files changed, 83 insertions(+), 5 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index f1e01421e413fb..2684c44b9373b9 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -814,8 +814,7 @@ impl<'a> LoadedAccountAccessor<'a> { // from the storage map after we grabbed the storage entry, the recycler should not // reset the storage entry until we drop the reference to the storage entry. maybe_storage_entry - .get_stored_account_meta(*offset) - .map(|account| account.to_account_shared_data()) + .get_stored_account(*offset) .expect("If a storage entry was found in the storage map, it must not have been reset yet") } _ => self.check_and_get_loaded_account().take_account(), @@ -1150,6 +1149,10 @@ impl AccountStorageEntry { Some(self.accounts.get_account(offset)?.0) } + fn get_stored_account(&self, offset: usize) -> Option { + self.accounts.get_stored_account(offset) + } + fn add_account(&self, num_bytes: usize) { let mut count_and_status = self.count_and_status.lock_write(); *count_and_status = (count_and_status.0 + 1, count_and_status.1); diff --git a/accounts-db/src/accounts_file.rs b/accounts-db/src/accounts_file.rs index e77ef9eb4568f9..5b34698c2a733c 100644 --- a/accounts-db/src/accounts_file.rs +++ b/accounts-db/src/accounts_file.rs @@ -12,7 +12,11 @@ use { error::TieredStorageError, hot::HOT_FORMAT, index::IndexOffset, TieredStorage, }, }, - solana_sdk::{account::ReadableAccount, clock::Slot, pubkey::Pubkey}, + solana_sdk::{ + account::{AccountSharedData, ReadableAccount}, + clock::Slot, + pubkey::Pubkey, + }, std::{borrow::Borrow, mem, path::PathBuf}, thiserror::Error, }; @@ -133,6 +137,14 @@ impl AccountsFile { } } + /// return an `AccountSharedData` for an account at `offset`, if any. Otherwise return None. + pub(crate) fn get_stored_account(&self, offset: usize) -> Option { + match self { + Self::AppendVec(av) => av.get_stored_account(offset), + Self::TieredStorage(_) => unimplemented!(), + } + } + pub fn account_matches_owners( &self, offset: usize, diff --git a/accounts-db/src/append_vec.rs b/accounts-db/src/append_vec.rs index 5e26bf1849b832..125bd1925190f0 100644 --- a/accounts-db/src/append_vec.rs +++ b/accounts-db/src/append_vec.rs @@ -18,7 +18,7 @@ use { log::*, memmap2::MmapMut, solana_sdk::{ - account::{AccountSharedData, ReadableAccount}, + account::{AccountSharedData, ReadableAccount, WritableAccount}, clock::Slot, pubkey::Pubkey, stake_history::Epoch, @@ -516,6 +516,24 @@ impl AppendVec { )) } + /// return an `AccountSharedData` for an account at `offset`. + /// This fn can efficiently return exactly what is needed by a caller. + pub(crate) fn get_stored_account(&self, offset: usize) -> Option { + let (meta, next) = self.get_type::(offset)?; + let (account_meta, next) = self.get_type::(next)?; + let next = next + std::mem::size_of::(); + let (data, _next) = self.get_slice(next, meta.data_len as usize)?; + Some(AccountSharedData::create( + account_meta.lamports, + data.to_vec(), + account_meta.owner, + account_meta.executable, + account_meta.rent_epoch, + )) + } + + /// note this fn can return account meta for an account whose fields have been truncated (ie. if `len` isn't long enough.) + /// This fn doesn't even load the data_len field, so this fn does not know how big `len` needs to be. fn get_account_meta(&self, offset: usize) -> Option<&AccountMeta> { // Skip over StoredMeta data in the account let offset = offset.checked_add(mem::size_of::())?; @@ -552,7 +570,21 @@ impl AppendVec { &self, offset: usize, ) -> Option<(StoredMeta, solana_sdk::account::AccountSharedData)> { - let (stored_account, _) = self.get_account(offset)?; + let r1 = self.get_account(offset); + let r2 = self.get_stored_account(offset); + let r3 = self.get_account_meta(offset); + if r1.is_some() { + // r3 can return Some when r1 and r2 do not + assert!(r3.is_some()); + } + if let Some(r2) = r2.as_ref() { + let meta = r3.unwrap(); + assert_eq!(meta.executable, r2.executable()); + assert_eq!(meta.owner, *r2.owner()); + assert_eq!(meta.lamports, r2.lamports()); + assert_eq!(meta.rent_epoch, r2.rent_epoch()); + } + let (stored_account, _) = r1?; let meta = stored_account.meta().clone(); Some((meta, stored_account.to_account_shared_data())) } @@ -1044,6 +1076,37 @@ pub mod tests { let account = create_test_account(0); let index = av.append_account_test(&account).unwrap(); assert_eq!(av.get_account_test(index).unwrap(), account); + truncate_and_test(av, index); + } + + /// truncate `av` and make sure that we fail to get an account. This verifies that the eof + /// code is working correctly. + fn truncate_and_test(av: AppendVec, index: usize) { + // truncate the hash, 1 byte at a time + let hash = std::mem::size_of::(); + for _ in 0..hash { + av.current_len.fetch_sub(1, Ordering::Relaxed); + assert_eq!(av.get_account_test(index), None); + } + // truncate 1 byte into the AccountMeta + av.current_len.fetch_sub(1, Ordering::Relaxed); + assert_eq!(av.get_account_test(index), None); + } + + #[test] + fn test_append_vec_one_with_data() { + let path = get_append_vec_path("test_append"); + let av = AppendVec::new(&path.path, true, 1024 * 1024); + let data_len = 1; + let account = create_test_account(data_len); + let index = av.append_account_test(&account).unwrap(); + // make the append vec 1 byte too short. we should get `None` since the append vec was truncated + assert_eq!( + STORE_META_OVERHEAD + data_len, + av.current_len.load(Ordering::Relaxed) + ); + assert_eq!(av.get_account_test(index).unwrap(), account); + truncate_and_test(av, index); } #[test] From 64765bf817d220c13c7d9939c645779c1d8e2ba2 Mon Sep 17 00:00:00 2001 From: steviez Date: Tue, 2 Apr 2024 11:59:03 -0500 Subject: [PATCH 140/153] Introduce NodeConfig for parameters to Node type (#533) The parameter list is already kind of long, so squash the parameters into a config struct --- gossip/src/cluster_info.rs | 60 ++++++++++++++++++++++---------------- validator/src/main.rs | 22 ++++++++------ 2 files changed, 48 insertions(+), 34 deletions(-) diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index 783f8a067d7614..7d737d313eeae7 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -2786,6 +2786,14 @@ pub struct Sockets { pub tpu_forwards_quic: UdpSocket, } +pub struct NodeConfig { + pub gossip_addr: SocketAddr, + pub port_range: PortRange, + pub bind_ip_addr: IpAddr, + pub public_tpu_addr: Option, + pub public_tpu_forwards_addr: Option, +} + #[derive(Debug)] pub struct Node { pub info: ContactInfo, @@ -2978,16 +2986,17 @@ impl Node { } } - pub fn new_with_external_ip( - pubkey: &Pubkey, - gossip_addr: &SocketAddr, - port_range: PortRange, - bind_ip_addr: IpAddr, - public_tpu_addr: Option, - public_tpu_forwards_addr: Option, - ) -> Node { + pub fn new_with_external_ip(pubkey: &Pubkey, config: NodeConfig) -> Node { + let NodeConfig { + gossip_addr, + port_range, + bind_ip_addr, + public_tpu_addr, + public_tpu_forwards_addr, + } = config; + let (gossip_port, (gossip, ip_echo)) = - Self::get_gossip_port(gossip_addr, port_range, bind_ip_addr); + Self::get_gossip_port(&gossip_addr, port_range, bind_ip_addr); let (tvu_port, tvu_sockets) = multi_bind_in_range(bind_ip_addr, port_range, 8).expect("tvu multi_bind"); @@ -3593,14 +3602,15 @@ mod tests { #[test] fn new_with_external_ip_test_random() { let ip = Ipv4Addr::LOCALHOST; - let node = Node::new_with_external_ip( - &solana_sdk::pubkey::new_rand(), - &socketaddr!(ip, 0), - VALIDATOR_PORT_RANGE, - IpAddr::V4(ip), - None, - None, - ); + let config = NodeConfig { + gossip_addr: socketaddr!(ip, 0), + port_range: VALIDATOR_PORT_RANGE, + bind_ip_addr: IpAddr::V4(ip), + public_tpu_addr: None, + public_tpu_forwards_addr: None, + }; + + let node = Node::new_with_external_ip(&solana_sdk::pubkey::new_rand(), config); check_node_sockets(&node, IpAddr::V4(ip), VALIDATOR_PORT_RANGE); } @@ -3613,17 +3623,17 @@ mod tests { VALIDATOR_PORT_RANGE.1 + MINIMUM_VALIDATOR_PORT_RANGE_WIDTH, VALIDATOR_PORT_RANGE.1 + (2 * MINIMUM_VALIDATOR_PORT_RANGE_WIDTH), ); - let ip = IpAddr::V4(Ipv4Addr::LOCALHOST); let port = bind_in_range(ip, port_range).expect("Failed to bind").0; - let node = Node::new_with_external_ip( - &solana_sdk::pubkey::new_rand(), - &socketaddr!(Ipv4Addr::LOCALHOST, port), + let config = NodeConfig { + gossip_addr: socketaddr!(Ipv4Addr::LOCALHOST, port), port_range, - ip, - None, - None, - ); + bind_ip_addr: ip, + public_tpu_addr: None, + public_tpu_forwards_addr: None, + }; + + let node = Node::new_with_external_ip(&solana_sdk::pubkey::new_rand(), config); check_node_sockets(&node, ip, port_range); diff --git a/validator/src/main.rs b/validator/src/main.rs index 151281bc8ae874..c8494221d614b0 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -36,7 +36,10 @@ use { ValidatorConfig, ValidatorStartProgress, }, }, - solana_gossip::{cluster_info::Node, legacy_contact_info::LegacyContactInfo as ContactInfo}, + solana_gossip::{ + cluster_info::{Node, NodeConfig}, + legacy_contact_info::LegacyContactInfo as ContactInfo, + }, solana_ledger::{ blockstore_cleanup_service::{DEFAULT_MAX_LEDGER_SHREDS, DEFAULT_MIN_MAX_LEDGER_SHREDS}, blockstore_options::{ @@ -1844,19 +1847,20 @@ pub fn main() { }) }); + let node_config = NodeConfig { + gossip_addr, + port_range: dynamic_port_range, + bind_ip_addr: bind_address, + public_tpu_addr, + public_tpu_forwards_addr, + }; + let cluster_entrypoints = entrypoint_addrs .iter() .map(ContactInfo::new_gossip_entry_point) .collect::>(); - let mut node = Node::new_with_external_ip( - &identity_keypair.pubkey(), - &gossip_addr, - dynamic_port_range, - bind_address, - public_tpu_addr, - public_tpu_forwards_addr, - ); + let mut node = Node::new_with_external_ip(&identity_keypair.pubkey(), node_config); if restricted_repair_only_mode { // When in --restricted_repair_only_mode is enabled only the gossip and repair ports From a468ff299949a9e15fb1afed317d2f86c8a27762 Mon Sep 17 00:00:00 2001 From: Ashwin Sekar Date: Tue, 2 Apr 2024 10:02:10 -0700 Subject: [PATCH 141/153] vote: add TowerSync ix (#365) * vote: add TowerSync ix * fork_id -> block_id --- core/src/consensus.rs | 13 ++- core/src/consensus/tower1_14_11.rs | 2 +- programs/vote/src/vote_processor.rs | 13 ++- programs/vote/src/vote_state/mod.rs | 18 ++- sdk/program/src/vote/instruction.rs | 63 ++++++++++- sdk/program/src/vote/state/mod.rs | 157 +++++++++++++++++++++++++++ sdk/src/feature_set.rs | 5 + transaction-status/src/parse_vote.rs | 37 +++++++ vote/src/vote_parser.rs | 4 + vote/src/vote_transaction.rs | 14 ++- 10 files changed, 318 insertions(+), 8 deletions(-) diff --git a/core/src/consensus.rs b/core/src/consensus.rs index d4f2345aa14ab8..b9a65160328d56 100644 --- a/core/src/consensus.rs +++ b/core/src/consensus.rs @@ -97,6 +97,9 @@ impl SwitchForkDecision { v, )) } + (SwitchForkDecision::SameFork, VoteTransaction::TowerSync(t)) => Some( + vote_instruction::tower_sync(vote_account_pubkey, authorized_voter_pubkey, t), + ), (SwitchForkDecision::SwitchProof(switch_proof_hash), VoteTransaction::Vote(v)) => { Some(vote_instruction::vote_switch( vote_account_pubkey, @@ -114,6 +117,14 @@ impl SwitchForkDecision { v, *switch_proof_hash, )), + (SwitchForkDecision::SwitchProof(switch_proof_hash), VoteTransaction::TowerSync(t)) => { + Some(vote_instruction::tower_sync_switch( + vote_account_pubkey, + authorized_voter_pubkey, + t, + *switch_proof_hash, + )) + } (SwitchForkDecision::SameFork, VoteTransaction::CompactVoteStateUpdate(v)) => { Some(vote_instruction::compact_update_vote_state( vote_account_pubkey, @@ -221,7 +232,7 @@ pub(crate) enum BlockhashStatus { Blockhash(Hash), } -#[frozen_abi(digest = "iZi6s9BvytU3HbRsibrAD71jwMLvrqHdCjVk6qKcVvd")] +#[frozen_abi(digest = "679XkZ4upGc389SwqAsjs5tr2qB4wisqjbwtei7fGhxC")] #[derive(Clone, Serialize, Deserialize, Debug, PartialEq, AbiExample)] pub struct Tower { pub node_pubkey: Pubkey, diff --git a/core/src/consensus/tower1_14_11.rs b/core/src/consensus/tower1_14_11.rs index 22c396e0975e59..fe9a5b40b20517 100644 --- a/core/src/consensus/tower1_14_11.rs +++ b/core/src/consensus/tower1_14_11.rs @@ -6,7 +6,7 @@ use { }, }; -#[frozen_abi(digest = "F83xHQA1wxoFDy25MTKXXmFXTc9Jbp6SXRXEPcehtKbQ")] +#[frozen_abi(digest = "4LayQwoKrE2jPhbNtg3TSpKrtEtjcPiwsVPJN7aCavri")] #[derive(Clone, Serialize, Deserialize, Debug, PartialEq, AbiExample)] pub struct Tower1_14_11 { pub(crate) node_pubkey: Pubkey, diff --git a/programs/vote/src/vote_processor.rs b/programs/vote/src/vote_processor.rs index 443aeb391b8c13..72309a26ca2b55 100644 --- a/programs/vote/src/vote_processor.rs +++ b/programs/vote/src/vote_processor.rs @@ -9,6 +9,7 @@ use { sysvar_cache::get_sysvar_with_account_check, }, solana_sdk::{ + feature_set, instruction::InstructionError, program_utils::limited_deserialize, pubkey::Pubkey, @@ -192,7 +193,17 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| &invoke_context.feature_set, ) } - + VoteInstruction::TowerSync(_tower_sync) + | VoteInstruction::TowerSyncSwitch(_tower_sync, _) => { + if !invoke_context + .feature_set + .is_active(&feature_set::enable_tower_sync_ix::id()) + { + return Err(InstructionError::InvalidInstructionData); + } + // TODO: will fill in future PR + return Err(InstructionError::InvalidInstructionData); + } VoteInstruction::Withdraw(lamports) => { instruction_context.check_number_of_instruction_accounts(2)?; let rent_sysvar = invoke_context.get_sysvar_cache().get_rent()?; diff --git a/programs/vote/src/vote_state/mod.rs b/programs/vote/src/vote_state/mod.rs index f5901374d9b6d9..b600ed5528c799 100644 --- a/programs/vote/src/vote_state/mod.rs +++ b/programs/vote/src/vote_state/mod.rs @@ -28,13 +28,15 @@ use { }, }; -#[frozen_abi(digest = "2AuJFjx7SYrJ2ugCfH1jFh3Lr9UHMEPfKwwk1NcjqND1")] +#[frozen_abi(digest = "EcS3xgfomytEAQ1eVd8R76ZejwyHp2Ed8dHqQWh6zi5v")] #[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize, AbiEnumVisitor, AbiExample)] pub enum VoteTransaction { Vote(Vote), VoteStateUpdate(VoteStateUpdate), #[serde(with = "serde_compact_vote_state_update")] CompactVoteStateUpdate(VoteStateUpdate), + #[serde(with = "serde_tower_sync")] + TowerSync(TowerSync), } impl VoteTransaction { @@ -43,6 +45,7 @@ impl VoteTransaction { VoteTransaction::Vote(vote) => vote.slots.clone(), VoteTransaction::VoteStateUpdate(vote_state_update) => vote_state_update.slots(), VoteTransaction::CompactVoteStateUpdate(vote_state_update) => vote_state_update.slots(), + VoteTransaction::TowerSync(tower_sync) => tower_sync.slots(), } } @@ -53,6 +56,7 @@ impl VoteTransaction { | VoteTransaction::CompactVoteStateUpdate(vote_state_update) => { vote_state_update.lockouts[i].slot() } + VoteTransaction::TowerSync(tower_sync) => tower_sync.lockouts[i].slot(), } } @@ -63,6 +67,7 @@ impl VoteTransaction { | VoteTransaction::CompactVoteStateUpdate(vote_state_update) => { vote_state_update.lockouts.len() } + VoteTransaction::TowerSync(tower_sync) => tower_sync.lockouts.len(), } } @@ -73,6 +78,7 @@ impl VoteTransaction { | VoteTransaction::CompactVoteStateUpdate(vote_state_update) => { vote_state_update.lockouts.is_empty() } + VoteTransaction::TowerSync(tower_sync) => tower_sync.lockouts.is_empty(), } } @@ -81,6 +87,7 @@ impl VoteTransaction { VoteTransaction::Vote(vote) => vote.hash, VoteTransaction::VoteStateUpdate(vote_state_update) => vote_state_update.hash, VoteTransaction::CompactVoteStateUpdate(vote_state_update) => vote_state_update.hash, + VoteTransaction::TowerSync(tower_sync) => tower_sync.hash, } } @@ -91,6 +98,7 @@ impl VoteTransaction { | VoteTransaction::CompactVoteStateUpdate(vote_state_update) => { vote_state_update.timestamp } + VoteTransaction::TowerSync(tower_sync) => tower_sync.timestamp, } } @@ -101,6 +109,7 @@ impl VoteTransaction { | VoteTransaction::CompactVoteStateUpdate(vote_state_update) => { vote_state_update.timestamp = ts } + VoteTransaction::TowerSync(tower_sync) => tower_sync.timestamp = ts, } } @@ -111,6 +120,7 @@ impl VoteTransaction { | VoteTransaction::CompactVoteStateUpdate(vote_state_update) => { vote_state_update.last_voted_slot() } + VoteTransaction::TowerSync(tower_sync) => tower_sync.last_voted_slot(), } } @@ -131,6 +141,12 @@ impl From for VoteTransaction { } } +impl From for VoteTransaction { + fn from(tower_sync: TowerSync) -> Self { + VoteTransaction::TowerSync(tower_sync) + } +} + // utility function, used by Stakes, tests pub fn from(account: &T) -> Option { VoteState::deserialize(account.data()).ok() diff --git a/sdk/program/src/vote/instruction.rs b/sdk/program/src/vote/instruction.rs index 568472ed5313e7..2c4cb4157f5721 100644 --- a/sdk/program/src/vote/instruction.rs +++ b/sdk/program/src/vote/instruction.rs @@ -1,6 +1,7 @@ //! Vote program instructions use { + super::state::TowerSync, crate::{ clock::{Slot, UnixTimestamp}, hash::Hash, @@ -10,7 +11,7 @@ use { vote::{ program::id, state::{ - serde_compact_vote_state_update, Vote, VoteAuthorize, + serde_compact_vote_state_update, serde_tower_sync, Vote, VoteAuthorize, VoteAuthorizeCheckedWithSeedArgs, VoteAuthorizeWithSeedArgs, VoteInit, VoteStateUpdate, VoteStateVersions, }, @@ -146,6 +147,21 @@ pub enum VoteInstruction { #[serde(with = "serde_compact_vote_state_update")] VoteStateUpdate, Hash, ), + + /// Sync the onchain vote state with local tower + /// + /// # Account references + /// 0. `[Write]` Vote account to vote with + /// 1. `[SIGNER]` Vote authority + #[serde(with = "serde_tower_sync")] + TowerSync(TowerSync), + + /// Sync the onchain vote state with local tower along with a switching proof + /// + /// # Account references + /// 0. `[Write]` Vote account to vote with + /// 1. `[SIGNER]` Vote authority + TowerSyncSwitch(#[serde(with = "serde_tower_sync")] TowerSync, Hash), } impl VoteInstruction { @@ -157,7 +173,9 @@ impl VoteInstruction { | Self::UpdateVoteState(_) | Self::UpdateVoteStateSwitch(_, _) | Self::CompactUpdateVoteState(_) - | Self::CompactUpdateVoteStateSwitch(_, _), + | Self::CompactUpdateVoteStateSwitch(_, _) + | Self::TowerSync(_) + | Self::TowerSyncSwitch(_, _), ) } @@ -167,7 +185,9 @@ impl VoteInstruction { Self::UpdateVoteState(_) | Self::UpdateVoteStateSwitch(_, _) | Self::CompactUpdateVoteState(_) - | Self::CompactUpdateVoteStateSwitch(_, _), + | Self::CompactUpdateVoteStateSwitch(_, _) + | Self::TowerSync(_) + | Self::TowerSyncSwitch(_, _), ) } @@ -182,6 +202,9 @@ impl VoteInstruction { | Self::CompactUpdateVoteStateSwitch(vote_state_update, _) => { vote_state_update.last_voted_slot() } + Self::TowerSync(tower_sync) | Self::TowerSyncSwitch(tower_sync, _) => { + tower_sync.last_voted_slot() + } _ => panic!("Tried to get slot on non simple vote instruction"), } } @@ -197,6 +220,9 @@ impl VoteInstruction { | Self::CompactUpdateVoteStateSwitch(vote_state_update, _) => { vote_state_update.timestamp } + Self::TowerSync(tower_sync) | Self::TowerSyncSwitch(tower_sync, _) => { + tower_sync.timestamp + } _ => panic!("Tried to get timestamp on non simple vote instruction"), } } @@ -514,6 +540,37 @@ pub fn compact_update_vote_state_switch( ) } +pub fn tower_sync( + vote_pubkey: &Pubkey, + authorized_voter_pubkey: &Pubkey, + tower_sync: TowerSync, +) -> Instruction { + let account_metas = vec![ + AccountMeta::new(*vote_pubkey, false), + AccountMeta::new_readonly(*authorized_voter_pubkey, true), + ]; + + Instruction::new_with_bincode(id(), &VoteInstruction::TowerSync(tower_sync), account_metas) +} + +pub fn tower_sync_switch( + vote_pubkey: &Pubkey, + authorized_voter_pubkey: &Pubkey, + tower_sync: TowerSync, + proof_hash: Hash, +) -> Instruction { + let account_metas = vec![ + AccountMeta::new(*vote_pubkey, false), + AccountMeta::new_readonly(*authorized_voter_pubkey, true), + ]; + + Instruction::new_with_bincode( + id(), + &VoteInstruction::TowerSyncSwitch(tower_sync, proof_hash), + account_metas, + ) +} + pub fn withdraw( vote_pubkey: &Pubkey, authorized_withdrawer_pubkey: &Pubkey, diff --git a/sdk/program/src/vote/state/mod.rs b/sdk/program/src/vote/state/mod.rs index d22d5814c2ebd2..9f7bf19eadc98c 100644 --- a/sdk/program/src/vote/state/mod.rs +++ b/sdk/program/src/vote/state/mod.rs @@ -207,6 +207,66 @@ impl VoteStateUpdate { } } +#[frozen_abi(digest = "5VUusSTenF9vZ9eHiCprVe9ABJUHCubeDNCCDxykybZY")] +#[derive(Serialize, Default, Deserialize, Debug, PartialEq, Eq, Clone, AbiExample)] +pub struct TowerSync { + /// The proposed tower + pub lockouts: VecDeque, + /// The proposed root + pub root: Option, + /// signature of the bank's state at the last slot + pub hash: Hash, + /// processing timestamp of last slot + pub timestamp: Option, + /// the unique identifier for the chain up to and + /// including this block. Does not require replaying + /// in order to compute. + pub block_id: Hash, +} + +impl From> for TowerSync { + fn from(recent_slots: Vec<(Slot, u32)>) -> Self { + let lockouts: VecDeque = recent_slots + .into_iter() + .map(|(slot, confirmation_count)| { + Lockout::new_with_confirmation_count(slot, confirmation_count) + }) + .collect(); + Self { + lockouts, + root: None, + hash: Hash::default(), + timestamp: None, + block_id: Hash::default(), + } + } +} + +impl TowerSync { + pub fn new( + lockouts: VecDeque, + root: Option, + hash: Hash, + block_id: Hash, + ) -> Self { + Self { + lockouts, + root, + hash, + timestamp: None, + block_id, + } + } + + pub fn slots(&self) -> Vec { + self.lockouts.iter().map(|lockout| lockout.slot()).collect() + } + + pub fn last_voted_slot(&self) -> Option { + self.lockouts.back().map(|l| l.slot()) + } +} + #[derive(Default, Serialize, Deserialize, Debug, PartialEq, Eq, Clone, Copy)] pub struct VoteInit { pub node_pubkey: Pubkey, @@ -904,6 +964,103 @@ pub mod serde_compact_vote_state_update { } } +pub mod serde_tower_sync { + use { + super::*, + crate::{ + clock::{Slot, UnixTimestamp}, + serde_varint, short_vec, + vote::state::Lockout, + }, + serde::{Deserialize, Deserializer, Serialize, Serializer}, + }; + + #[derive(Deserialize, Serialize, AbiExample)] + struct LockoutOffset { + #[serde(with = "serde_varint")] + offset: Slot, + confirmation_count: u8, + } + + #[derive(Deserialize, Serialize)] + struct CompactTowerSync { + root: Slot, + #[serde(with = "short_vec")] + lockout_offsets: Vec, + hash: Hash, + timestamp: Option, + block_id: Hash, + } + + pub fn serialize(tower_sync: &TowerSync, serializer: S) -> Result + where + S: Serializer, + { + let lockout_offsets = tower_sync.lockouts.iter().scan( + tower_sync.root.unwrap_or_default(), + |slot, lockout| { + let Some(offset) = lockout.slot().checked_sub(*slot) else { + return Some(Err(serde::ser::Error::custom("Invalid vote lockout"))); + }; + let Ok(confirmation_count) = u8::try_from(lockout.confirmation_count()) else { + return Some(Err(serde::ser::Error::custom("Invalid confirmation count"))); + }; + let lockout_offset = LockoutOffset { + offset, + confirmation_count, + }; + *slot = lockout.slot(); + Some(Ok(lockout_offset)) + }, + ); + let compact_tower_sync = CompactTowerSync { + root: tower_sync.root.unwrap_or(Slot::MAX), + lockout_offsets: lockout_offsets.collect::>()?, + hash: tower_sync.hash, + timestamp: tower_sync.timestamp, + block_id: tower_sync.block_id, + }; + compact_tower_sync.serialize(serializer) + } + + pub fn deserialize<'de, D>(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let CompactTowerSync { + root, + lockout_offsets, + hash, + timestamp, + block_id, + } = CompactTowerSync::deserialize(deserializer)?; + let root = (root != Slot::MAX).then_some(root); + let lockouts = + lockout_offsets + .iter() + .scan(root.unwrap_or_default(), |slot, lockout_offset| { + *slot = match slot.checked_add(lockout_offset.offset) { + None => { + return Some(Err(serde::de::Error::custom("Invalid lockout offset"))) + } + Some(slot) => slot, + }; + let lockout = Lockout::new_with_confirmation_count( + *slot, + u32::from(lockout_offset.confirmation_count), + ); + Some(Ok(lockout)) + }); + Ok(TowerSync { + root, + lockouts: lockouts.collect::>()?, + hash, + timestamp, + block_id, + }) + } +} + #[cfg(test)] mod tests { use {super::*, itertools::Itertools, rand::Rng}; diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index cd60ee536ea552..a162ea852aebf7 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -781,6 +781,10 @@ pub mod remove_rounding_in_fee_calculation { solana_sdk::declare_id!("BtVN7YjDzNE6Dk7kTT7YTDgMNUZTNgiSJgsdzAeTg2jF"); } +pub mod enable_tower_sync_ix { + solana_sdk::declare_id!("tSynMCspg4xFiCj1v3TDb4c7crMR5tSBhLz4sF7rrNA"); +} + pub mod deprecate_unused_legacy_vote_plumbing { solana_sdk::declare_id!("6Uf8S75PVh91MYgPQSHnjRAPQq6an5BDv9vomrCwDqLe"); } @@ -976,6 +980,7 @@ lazy_static! { (enable_chained_merkle_shreds::id(), "Enable chained Merkle shreds #34916"), (remove_rounding_in_fee_calculation::id(), "Removing unwanted rounding in fee calculation #34982"), (deprecate_unused_legacy_vote_plumbing::id(), "Deprecate unused legacy vote tx plumbing"), + (enable_tower_sync_ix::id(), "Enable tower sync vote instruction"), /*************** ADD NEW FEATURES HERE ***************/ ] .iter() diff --git a/transaction-status/src/parse_vote.rs b/transaction-status/src/parse_vote.rs index b84a7dab636d4d..8416d0f279ae61 100644 --- a/transaction-status/src/parse_vote.rs +++ b/transaction-status/src/parse_vote.rs @@ -171,6 +171,43 @@ pub fn parse_vote( }), }) } + VoteInstruction::TowerSync(tower_sync) => { + check_num_vote_accounts(&instruction.accounts, 2)?; + let tower_sync = json!({ + "lockouts": tower_sync.lockouts, + "root": tower_sync.root, + "hash": tower_sync.hash.to_string(), + "timestamp": tower_sync.timestamp, + "blockId": tower_sync.block_id, + }); + Ok(ParsedInstructionEnum { + instruction_type: "towersync".to_string(), + info: json!({ + "voteAccount": account_keys[instruction.accounts[0] as usize].to_string(), + "voteAuthority": account_keys[instruction.accounts[1] as usize].to_string(), + "towerSync": tower_sync, + }), + }) + } + VoteInstruction::TowerSyncSwitch(tower_sync, hash) => { + check_num_vote_accounts(&instruction.accounts, 2)?; + let tower_sync = json!({ + "lockouts": tower_sync.lockouts, + "root": tower_sync.root, + "hash": tower_sync.hash.to_string(), + "timestamp": tower_sync.timestamp, + "blockId": tower_sync.block_id, + }); + Ok(ParsedInstructionEnum { + instruction_type: "towersyncswitch".to_string(), + info: json!({ + "voteAccount": account_keys[instruction.accounts[0] as usize].to_string(), + "voteAuthority": account_keys[instruction.accounts[1] as usize].to_string(), + "towerSync": tower_sync, + "hash": hash.to_string(), + }), + }) + } VoteInstruction::Withdraw(lamports) => { check_num_vote_accounts(&instruction.accounts, 3)?; Ok(ParsedInstructionEnum { diff --git a/vote/src/vote_parser.rs b/vote/src/vote_parser.rs index 5ca00fa9445ed1..318d01564e8fdd 100644 --- a/vote/src/vote_parser.rs +++ b/vote/src/vote_parser.rs @@ -62,6 +62,10 @@ fn parse_vote_instruction_data( VoteInstruction::CompactUpdateVoteStateSwitch(vote_state_update, hash) => { Some((VoteTransaction::from(vote_state_update), Some(hash))) } + VoteInstruction::TowerSync(tower_sync) => Some((VoteTransaction::from(tower_sync), None)), + VoteInstruction::TowerSyncSwitch(tower_sync, hash) => { + Some((VoteTransaction::from(tower_sync), Some(hash))) + } VoteInstruction::Authorize(_, _) | VoteInstruction::AuthorizeChecked(_) | VoteInstruction::AuthorizeWithSeed(_) diff --git a/vote/src/vote_transaction.rs b/vote/src/vote_transaction.rs index fed2d730a0a177..c9ff76c7f84ee8 100644 --- a/vote/src/vote_transaction.rs +++ b/vote/src/vote_transaction.rs @@ -3,13 +3,14 @@ use { clock::{Slot, UnixTimestamp}, hash::Hash, }, - solana_vote_program::vote_state::{Vote, VoteStateUpdate}, + solana_vote_program::vote_state::{TowerSync, Vote, VoteStateUpdate}, }; #[derive(Debug, PartialEq, Eq, Clone)] pub enum VoteTransaction { Vote(Vote), VoteStateUpdate(VoteStateUpdate), + TowerSync(TowerSync), } impl VoteTransaction { @@ -21,6 +22,7 @@ impl VoteTransaction { .iter() .map(|lockout| lockout.slot()) .collect(), + VoteTransaction::TowerSync(tower_sync) => tower_sync.slots(), } } @@ -30,6 +32,7 @@ impl VoteTransaction { VoteTransaction::VoteStateUpdate(vote_state_update) => { vote_state_update.lockouts.is_empty() } + VoteTransaction::TowerSync(tower_sync) => tower_sync.lockouts.is_empty(), } } @@ -37,6 +40,7 @@ impl VoteTransaction { match self { VoteTransaction::Vote(vote) => vote.hash, VoteTransaction::VoteStateUpdate(vote_state_update) => vote_state_update.hash, + VoteTransaction::TowerSync(tower_sync) => tower_sync.hash, } } @@ -44,6 +48,7 @@ impl VoteTransaction { match self { VoteTransaction::Vote(vote) => vote.timestamp, VoteTransaction::VoteStateUpdate(vote_state_update) => vote_state_update.timestamp, + VoteTransaction::TowerSync(tower_sync) => tower_sync.timestamp, } } @@ -53,6 +58,7 @@ impl VoteTransaction { VoteTransaction::VoteStateUpdate(vote_state_update) => { Some(vote_state_update.lockouts.back()?.slot()) } + VoteTransaction::TowerSync(tower_sync) => tower_sync.last_voted_slot(), } } @@ -72,3 +78,9 @@ impl From for VoteTransaction { VoteTransaction::VoteStateUpdate(vote_state_update) } } + +impl From for VoteTransaction { + fn from(tower_sync: TowerSync) -> Self { + VoteTransaction::TowerSync(tower_sync) + } +} From 9f8a478c519e5d28d51f7d845a4e405f8b753f06 Mon Sep 17 00:00:00 2001 From: Lucas Steuernagel <38472950+LucasSte@users.noreply.github.com> Date: Tue, 2 Apr 2024 14:36:13 -0300 Subject: [PATCH 142/153] Use `&str` instead of `String` in `add_builtin` (#539) --- program-test/src/lib.rs | 10 +++++----- programs/sbf/tests/programs.rs | 2 +- runtime/src/bank.rs | 12 ++++++------ runtime/src/bank/builtins/core_bpf_migration/mod.rs | 2 +- runtime/src/bank/tests.rs | 6 +++--- 5 files changed, 16 insertions(+), 16 deletions(-) diff --git a/program-test/src/lib.rs b/program-test/src/lib.rs index 8b786aa7962694..ee9b4fdef5c37c 100644 --- a/program-test/src/lib.rs +++ b/program-test/src/lib.rs @@ -463,7 +463,7 @@ pub fn read_file>(path: P) -> Vec { pub struct ProgramTest { accounts: Vec<(Pubkey, AccountSharedData)>, - builtin_programs: Vec<(Pubkey, String, LoadedProgram)>, + builtin_programs: Vec<(Pubkey, &'static str, LoadedProgram)>, compute_max_units: Option, prefer_bpf: bool, deactivate_feature_set: HashSet, @@ -513,7 +513,7 @@ impl ProgramTest { /// [`default`]: #method.default /// [`add_program`]: #method.add_program pub fn new( - program_name: &str, + program_name: &'static str, program_id: Pubkey, builtin_function: Option, ) -> Self { @@ -613,7 +613,7 @@ impl ProgramTest { /// SBF shared object depending on the `BPF_OUT_DIR` environment variable. pub fn add_program( &mut self, - program_name: &str, + program_name: &'static str, program_id: Pubkey, builtin_function: Option, ) { @@ -720,14 +720,14 @@ impl ProgramTest { /// Note that builtin programs are responsible for their own `stable_log` output. pub fn add_builtin_program( &mut self, - program_name: &str, + program_name: &'static str, program_id: Pubkey, builtin_function: BuiltinFunctionWithContext, ) { info!("\"{}\" builtin program", program_name); self.builtin_programs.push(( program_id, - program_name.to_string(), + program_name, LoadedProgram::new_builtin(0, program_name.len(), builtin_function), )); } diff --git a/programs/sbf/tests/programs.rs b/programs/sbf/tests/programs.rs index 22969bc482a28e..187c06e31dba9a 100644 --- a/programs/sbf/tests/programs.rs +++ b/programs/sbf/tests/programs.rs @@ -4316,7 +4316,7 @@ fn test_cpi_change_account_data_memory_allocation() { let builtin_program_id = Pubkey::new_unique(); bank.add_builtin( builtin_program_id, - "test_cpi_change_account_data_memory_allocation_builtin".to_string(), + "test_cpi_change_account_data_memory_allocation_builtin", LoadedProgram::new_builtin(0, 42, MockBuiltin::vm), ); diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 29c01da7e66dad..1b8c6701957196 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -5865,7 +5865,7 @@ impl Bank { if builtin.enable_feature_id.is_none() { self.add_builtin( builtin.program_id, - builtin.name.to_string(), + builtin.name, LoadedProgram::new_builtin(0, builtin.name.len(), builtin.entrypoint), ); } @@ -6952,15 +6952,15 @@ impl Bank { ) { self.add_builtin( program_id, - "mockup".to_string(), + "mockup", LoadedProgram::new_builtin(self.slot, 0, builtin_function), ); } /// Add a built-in program - pub fn add_builtin(&mut self, program_id: Pubkey, name: String, builtin: LoadedProgram) { + pub fn add_builtin(&mut self, program_id: Pubkey, name: &str, builtin: LoadedProgram) { debug!("Adding program {} under {:?}", name, program_id); - self.add_builtin_account(name.as_str(), &program_id, false); + self.add_builtin_account(name, &program_id, false); self.builtin_program_ids.insert(program_id); self.transaction_processor .program_cache @@ -6971,7 +6971,7 @@ impl Bank { } /// Remove a built-in instruction processor - pub fn remove_builtin(&mut self, program_id: Pubkey, name: String) { + pub fn remove_builtin(&mut self, program_id: Pubkey, name: &str) { debug!("Removing program {}", program_id); // Don't remove the account since the bank expects the account state to // be idempotent @@ -7223,7 +7223,7 @@ impl Bank { if should_apply_action_for_feature_transition { self.add_builtin( builtin.program_id, - builtin.name.to_string(), + builtin.name, LoadedProgram::new_builtin( self.feature_set.activated_slot(&feature_id).unwrap_or(0), builtin.name.len(), diff --git a/runtime/src/bank/builtins/core_bpf_migration/mod.rs b/runtime/src/bank/builtins/core_bpf_migration/mod.rs index 4f2ebf56f5000d..261dc4f65b9183 100644 --- a/runtime/src/bank/builtins/core_bpf_migration/mod.rs +++ b/runtime/src/bank/builtins/core_bpf_migration/mod.rs @@ -468,7 +468,7 @@ mod tests { let account = AccountSharedData::new_data(1, &builtin_name, &native_loader::id()).unwrap(); bank.store_account_and_update_capitalization(&builtin_id, &account); - bank.add_builtin(builtin_id, builtin_name, LoadedProgram::default()); + bank.add_builtin(builtin_id, builtin_name.as_str(), LoadedProgram::default()); account }; assert_eq!(&bank.get_account(&builtin_id).unwrap(), &builtin_account); diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 5b89de28688e94..16114f7b7593b3 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -4709,12 +4709,12 @@ fn test_add_instruction_processor_for_existing_unrelated_accounts() { bank.add_builtin( vote_id, - "mock_program1".to_string(), + "mock_program1", LoadedProgram::new_builtin(0, 0, MockBuiltin::vm), ); bank.add_builtin( stake_id, - "mock_program2".to_string(), + "mock_program2", LoadedProgram::new_builtin(0, 0, MockBuiltin::vm), ); { @@ -6303,7 +6303,7 @@ fn test_fuzz_instructions() { let name = format!("program{i:?}"); bank.add_builtin( key, - name.clone(), + name.as_str(), LoadedProgram::new_builtin(0, 0, MockBuiltin::vm), ); (key, name.as_bytes().to_vec()) From 9ea627c16d7719227d50eb5ecf93f8698c688d4f Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Tue, 2 Apr 2024 13:10:18 -0500 Subject: [PATCH 143/153] Recompute hash on load if default hash is stored for the account (#519) recompute hash on load if default hash is stored for the account Co-authored-by: HaoranYi --- accounts-db/src/accounts_db.rs | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 2684c44b9373b9..53b8d1346ba2ec 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -7607,7 +7607,10 @@ impl AccountsDb { Some((*loaded_account.pubkey(), loaded_account.loaded_hash())) }, |accum: &DashMap, loaded_account: LoadedAccount| { - let loaded_hash = loaded_account.loaded_hash(); + let mut loaded_hash = loaded_account.loaded_hash(); + if loaded_hash == AccountHash(Hash::default()) { + loaded_hash = Self::hash_account(&loaded_account, loaded_account.pubkey()) + } accum.insert(*loaded_account.pubkey(), loaded_hash); }, ); @@ -7639,9 +7642,13 @@ impl AccountsDb { |accum: &DashMap, loaded_account: LoadedAccount| { // Storage may have duplicates so only keep the latest version for each key + let mut loaded_hash = loaded_account.loaded_hash(); + if loaded_hash == AccountHash(Hash::default()) { + loaded_hash = Self::hash_account(&loaded_account, loaded_account.pubkey()) + } accum.insert( *loaded_account.pubkey(), - (loaded_account.loaded_hash(), loaded_account.take_account()), + (loaded_hash, loaded_account.take_account()), ); }, ); From ccb09866b59917bb01918299b7c4d2adfa313781 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Tue, 2 Apr 2024 13:25:43 -0500 Subject: [PATCH 144/153] stop requiring data allocation to check for rent (#543) --- accounts-db/src/accounts_db.rs | 41 +++++++++++++++++++++++----------- runtime/src/bank.rs | 16 ++++++++++--- sdk/src/rent_collector.rs | 27 +++++++++++++--------- svm/src/account_loader.rs | 6 ++++- 4 files changed, 62 insertions(+), 28 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 53b8d1346ba2ec..3e9eaa144545f9 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -8608,20 +8608,25 @@ impl AccountsDb { } /// return Some(lamports_to_top_off) if 'account' would collect rent - fn stats_for_rent_payers( + fn stats_for_rent_payers( pubkey: &Pubkey, - account: &T, + lamports: u64, + account_data_len: usize, + account_rent_epoch: Epoch, + executable: bool, rent_collector: &RentCollector, ) -> Option { - if account.lamports() == 0 { + if lamports == 0 { return None; } - (rent_collector.should_collect_rent(pubkey, account) - && !rent_collector.get_rent_due(account).is_exempt()) + (rent_collector.should_collect_rent(pubkey, executable) + && !rent_collector + .get_rent_due(lamports, account_data_len, account_rent_epoch) + .is_exempt()) .then(|| { - let min_balance = rent_collector.rent.minimum_balance(account.data().len()); + let min_balance = rent_collector.rent.minimum_balance(account_data_len); // return lamports required to top off this account to make it rent exempt - min_balance.saturating_sub(account.lamports()) + min_balance.saturating_sub(lamports) }) } @@ -8661,9 +8666,14 @@ impl AccountsDb { accounts_data_len += stored_account.data().len() as u64; } - if let Some(amount_to_top_off_rent_this_account) = - Self::stats_for_rent_payers(pubkey, &stored_account, rent_collector) - { + if let Some(amount_to_top_off_rent_this_account) = Self::stats_for_rent_payers( + pubkey, + stored_account.lamports(), + stored_account.data().len(), + stored_account.rent_epoch(), + stored_account.executable(), + rent_collector, + ) { amount_to_top_off_rent += amount_to_top_off_rent_this_account; num_accounts_rent_paying += 1; // remember this rent-paying account pubkey @@ -9090,9 +9100,14 @@ impl AccountsDb { ); let loaded_account = accessor.check_and_get_loaded_account(); accounts_data_len_from_duplicates += loaded_account.data().len(); - if let Some(lamports_to_top_off) = - Self::stats_for_rent_payers(pubkey, &loaded_account, rent_collector) - { + if let Some(lamports_to_top_off) = Self::stats_for_rent_payers( + pubkey, + loaded_account.lamports(), + loaded_account.data().len(), + loaded_account.rent_epoch(), + loaded_account.executable(), + rent_collector, + ) { removed_rent_paying += 1; removed_top_off += lamports_to_top_off; } diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 1b8c6701957196..97816cb9ed2be2 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -5165,7 +5165,11 @@ impl Bank { // account itself is rent-exempted but its `rent_epoch` is not u64::MAX, we will set its // `rent_epoch` to u64::MAX. In such case, the behavior stays the same as before. if account.rent_epoch() != RENT_EXEMPT_RENT_EPOCH - && self.rent_collector.get_rent_due(account) == RentDue::Exempt + && self.rent_collector.get_rent_due( + account.lamports(), + account.data().len(), + account.rent_epoch(), + ) == RentDue::Exempt { account.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); } @@ -7803,8 +7807,14 @@ impl TotalAccountsStats { self.executable_data_len += data_len; } - if !rent_collector.should_collect_rent(address, account) - || rent_collector.get_rent_due(account).is_exempt() + if !rent_collector.should_collect_rent(address, account.executable()) + || rent_collector + .get_rent_due( + account.lamports(), + account.data().len(), + account.rent_epoch(), + ) + .is_exempt() { self.num_rent_exempt_accounts += 1; } else { diff --git a/sdk/src/rent_collector.rs b/sdk/src/rent_collector.rs index 1de6ce19950dbd..1dd8c4a0292bdb 100644 --- a/sdk/src/rent_collector.rs +++ b/sdk/src/rent_collector.rs @@ -73,21 +73,22 @@ impl RentCollector { } /// true if it is easy to determine this account should consider having rent collected from it - pub fn should_collect_rent(&self, address: &Pubkey, account: &impl ReadableAccount) -> bool { - !(account.executable() // executable accounts must be rent-exempt balance + pub fn should_collect_rent(&self, address: &Pubkey, executable: bool) -> bool { + !(executable // executable accounts must be rent-exempt balance || *address == incinerator::id()) } /// given an account that 'should_collect_rent' /// returns (amount rent due, is_exempt_from_rent) - pub fn get_rent_due(&self, account: &impl ReadableAccount) -> RentDue { - if self - .rent - .is_exempt(account.lamports(), account.data().len()) - { + pub fn get_rent_due( + &self, + lamports: u64, + data_len: usize, + account_rent_epoch: Epoch, + ) -> RentDue { + if self.rent.is_exempt(lamports, data_len) { RentDue::Exempt } else { - let account_rent_epoch = account.rent_epoch(); let slots_elapsed: u64 = (account_rent_epoch..=self.epoch) .map(|epoch| { self.epoch_schedule @@ -103,7 +104,7 @@ impl RentCollector { }; // we know this account is not exempt - let due = self.rent.due_amount(account.data().len(), years_elapsed); + let due = self.rent.due_amount(data_len, years_elapsed); RentDue::Paying(due) } } @@ -158,11 +159,15 @@ impl RentCollector { // Maybe collect rent later, leave account alone for now. return RentResult::NoRentCollectionNow; } - if !self.should_collect_rent(address, account) { + if !self.should_collect_rent(address, account.executable()) { // easy to determine this account should not consider having rent collected from it return RentResult::Exempt; } - match self.get_rent_due(account) { + match self.get_rent_due( + account.lamports(), + account.data().len(), + account.rent_epoch(), + ) { // account will not have rent collected ever RentDue::Exempt => RentResult::Exempt, // potentially rent paying account diff --git a/svm/src/account_loader.rs b/svm/src/account_loader.rs index 87c18b9717c9bc..dd47da3c6092cc 100644 --- a/svm/src/account_loader.rs +++ b/svm/src/account_loader.rs @@ -250,7 +250,11 @@ fn load_transaction_accounts( // account itself is rent-exempted but its `rent_epoch` is not u64::MAX, we will set its // `rent_epoch` to u64::MAX. In such case, the behavior stays the same as before. if account.rent_epoch() != RENT_EXEMPT_RENT_EPOCH - && rent_collector.get_rent_due(&account) == RentDue::Exempt + && rent_collector.get_rent_due( + account.lamports(), + account.data().len(), + account.rent_epoch(), + ) == RentDue::Exempt { account.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); } From 4247a8a546567242576c9a6696429a0edc53da5e Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 2 Apr 2024 14:43:38 -0400 Subject: [PATCH 145/153] Archives storages directly (#503) --- accounts-db/src/accounts_file.rs | 13 +++- accounts-db/src/append_vec.rs | 5 ++ accounts-db/src/tiered_storage/hot.rs | 5 ++ accounts-db/src/tiered_storage/readable.rs | 7 +++ runtime/src/snapshot_utils.rs | 70 +++++++--------------- 5 files changed, 49 insertions(+), 51 deletions(-) diff --git a/accounts-db/src/accounts_file.rs b/accounts-db/src/accounts_file.rs index 5b34698c2a733c..72f0373d95ecd3 100644 --- a/accounts-db/src/accounts_file.rs +++ b/accounts-db/src/accounts_file.rs @@ -17,7 +17,7 @@ use { clock::Slot, pubkey::Pubkey, }, - std::{borrow::Borrow, mem, path::PathBuf}, + std::{borrow::Borrow, io::Read, mem, path::PathBuf}, thiserror::Error, }; @@ -240,6 +240,17 @@ impl AccountsFile { .ok(), } } + + /// Returns a Read implementation suitable for use when archiving accounts files + pub fn data_for_archive(&self) -> impl Read + '_ { + match self { + Self::AppendVec(av) => av.data_for_archive(), + Self::TieredStorage(ts) => ts + .reader() + .expect("must be a reader when archiving") + .data_for_archive(), + } + } } pub struct AccountsFileIter<'a> { diff --git a/accounts-db/src/append_vec.rs b/accounts-db/src/append_vec.rs index 125bd1925190f0..4b63b8c0e062e5 100644 --- a/accounts-db/src/append_vec.rs +++ b/accounts-db/src/append_vec.rs @@ -733,6 +733,11 @@ impl AppendVec { Some(rv) } } + + /// Returns a slice suitable for use when archiving append vecs + pub fn data_for_archive(&self) -> &[u8] { + self.map.as_ref() + } } #[cfg(test)] diff --git a/accounts-db/src/tiered_storage/hot.rs b/accounts-db/src/tiered_storage/hot.rs index 260548897f66e2..d74c069f6a0033 100644 --- a/accounts-db/src/tiered_storage/hot.rs +++ b/accounts-db/src/tiered_storage/hot.rs @@ -553,6 +553,11 @@ impl HotStorageReader { } Ok(accounts) } + + /// Returns a slice suitable for use when archiving hot storages + pub fn data_for_archive(&self) -> &[u8] { + self.mmap.as_ref() + } } fn write_optional_fields( diff --git a/accounts-db/src/tiered_storage/readable.rs b/accounts-db/src/tiered_storage/readable.rs index 0191dad4903578..afdc642131de57 100644 --- a/accounts-db/src/tiered_storage/readable.rs +++ b/accounts-db/src/tiered_storage/readable.rs @@ -108,4 +108,11 @@ impl TieredStorageReader { Self::Hot(hot) => hot.accounts(index_offset), } } + + /// Returns a slice suitable for use when archiving tiered storages + pub fn data_for_archive(&self) -> &[u8] { + match self { + Self::Hot(hot) => hot.data_for_archive(), + } + } } diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index bb4891e67802fa..c63b485a8b7550 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -20,7 +20,7 @@ use { solana_accounts_db::{ account_storage::AccountStorageMap, accounts_db::{AccountStorageEntry, AtomicAccountsFileId}, - accounts_file::AccountsFileError, + accounts_file::{AccountsFile, AccountsFileError}, append_vec::AppendVec, hardened_unpack::{self, ParallelSelector, UnpackError}, shared_buffer_reader::{SharedBuffer, SharedBufferReader}, @@ -446,9 +446,6 @@ pub enum ArchiveSnapshotPackageError { #[error("failed to create staging dir inside '{1}': {0}")] CreateStagingDir(#[source] IoError, PathBuf), - #[error("failed to create accounts staging dir '{1}': {0}")] - CreateAccountsStagingDir(#[source] IoError, PathBuf), - #[error("failed to create snapshot staging dir '{1}': {0}")] CreateSnapshotStagingDir(#[source] IoError, PathBuf), @@ -464,18 +461,6 @@ pub enum ArchiveSnapshotPackageError { #[error("failed to symlink version file from '{1}' to '{2}': {0}")] SymlinkVersionFile(#[source] IoError, PathBuf, PathBuf), - #[error("failed to flush account storage file '{1}': {0}")] - FlushAccountStorageFile(#[source] AccountsFileError, PathBuf), - - #[error("failed to canonicalize account storage file '{1}': {0}")] - CanonicalizeAccountStorageFile(#[source] IoError, PathBuf), - - #[error("failed to symlink account storage file from '{1}' to '{2}': {0}")] - SymlinkAccountStorageFile(#[source] IoError, PathBuf, PathBuf), - - #[error("account storage staging file is invalid '{0}'")] - InvalidAccountStorageStagingFile(PathBuf), - #[error("failed to create archive file '{1}': {0}")] CreateArchiveFile(#[source] IoError, PathBuf), @@ -485,8 +470,8 @@ pub enum ArchiveSnapshotPackageError { #[error("failed to archive snapshots dir: {0}")] ArchiveSnapshotsDir(#[source] IoError), - #[error("failed to archive accounts dir: {0}")] - ArchiveAccountsDir(#[source] IoError), + #[error("failed to archive account storage file '{1}': {0}")] + ArchiveAccountStorageFile(#[source] IoError, PathBuf), #[error("failed to archive snapshot: {0}")] FinishArchive(#[source] IoError), @@ -762,13 +747,7 @@ pub fn archive_snapshot_package( )) .tempdir_in(tar_dir) .map_err(|err| E::CreateStagingDir(err, tar_dir.to_path_buf()))?; - let staging_snapshots_dir = staging_dir.path().join(SNAPSHOTS_DIR); - let staging_accounts_dir = staging_dir.path().join(ACCOUNTS_DIR); - - // Create staging/accounts/ - fs::create_dir_all(&staging_accounts_dir) - .map_err(|err| E::CreateAccountsStagingDir(err, staging_accounts_dir.clone()))?; let slot_str = snapshot_package.slot().to_string(); let staging_snapshot_dir = staging_snapshots_dir.join(&slot_str); @@ -800,29 +779,6 @@ pub fn archive_snapshot_package( E::SymlinkVersionFile(err, src_version_file, staging_version_file.clone()) })?; - // Add the AppendVecs into the compressible list - for storage in snapshot_package.snapshot_storages.iter() { - let storage_path = storage.get_path(); - storage - .flush() - .map_err(|err| E::FlushAccountStorageFile(err, storage_path.clone()))?; - let staging_storage_path = staging_accounts_dir.join(AppendVec::file_name( - storage.slot(), - storage.append_vec_id(), - )); - - // `src_storage_path` - The file path where the AppendVec itself is located - // `staging_storage_path` - The file path where the AppendVec will be placed in the staging directory. - let src_storage_path = fs::canonicalize(&storage_path) - .map_err(|err| E::CanonicalizeAccountStorageFile(err, storage_path))?; - symlink::symlink_file(&src_storage_path, &staging_storage_path).map_err(|err| { - E::SymlinkAccountStorageFile(err, src_storage_path, staging_storage_path.clone()) - })?; - if !staging_storage_path.is_file() { - return Err(E::InvalidAccountStorageStagingFile(staging_storage_path).into()); - } - } - // Tar the staging directory into the archive at `archive_path` let archive_path = tar_dir.join(format!( "{}{}.{}", @@ -845,9 +801,23 @@ pub fn archive_snapshot_package( archive .append_dir_all(SNAPSHOTS_DIR, &staging_snapshots_dir) .map_err(E::ArchiveSnapshotsDir)?; - archive - .append_dir_all(ACCOUNTS_DIR, &staging_accounts_dir) - .map_err(E::ArchiveAccountsDir)?; + + for storage in &snapshot_package.snapshot_storages { + let path_in_archive = Path::new(ACCOUNTS_DIR).join(AccountsFile::file_name( + storage.slot(), + storage.append_vec_id(), + )); + let mut header = tar::Header::new_gnu(); + header + .set_path(path_in_archive) + .map_err(|err| E::ArchiveAccountStorageFile(err, storage.get_path()))?; + header.set_size(storage.capacity()); + header.set_cksum(); + archive + .append(&header, storage.accounts.data_for_archive()) + .map_err(|err| E::ArchiveAccountStorageFile(err, storage.get_path()))?; + } + archive.into_inner().map_err(E::FinishArchive)?; Ok(()) }; From 85c14b4e4f003f2e354500ce7bdf56995f6eb4c7 Mon Sep 17 00:00:00 2001 From: samkim-crypto Date: Wed, 3 Apr 2024 07:16:53 +0900 Subject: [PATCH 146/153] [clap-v3-utils] Fix deprecation message of `is_valid_pubkey` and `is_valid_signer` to `allow_all()` (#531) fix deprecation message of `is_valid_pubkey` and `is_valid_signer` to `allow_all()` --- clap-v3-utils/src/input_validators.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/clap-v3-utils/src/input_validators.rs b/clap-v3-utils/src/input_validators.rs index 0b3f75e1a6f334..5ffe8499b4cc56 100644 --- a/clap-v3-utils/src/input_validators.rs +++ b/clap-v3-utils/src/input_validators.rs @@ -150,7 +150,7 @@ where // produce a pubkey() #[deprecated( since = "1.18.0", - note = "please use `SignerSourceParserBuilder::default().allow_pubkey().allow_file_path().build()` instead" + note = "please use `SignerSourceParserBuilder::default().allow_all().build()` instead" )] #[allow(deprecated)] pub fn is_valid_pubkey(string: T) -> Result<(), String> @@ -176,7 +176,7 @@ where // also provided and correct happens in parsing, not in validation. #[deprecated( since = "1.18.0", - note = "please use `SignerSourceParserBuilder::default().build()` instead" + note = "please use `SignerSourceParserBuilder::default().allow_all().build()` instead" )] #[allow(deprecated)] pub fn is_valid_signer(string: T) -> Result<(), String> From 64260fc831d7459fa1fd45c67b4a8f5ba40f4be9 Mon Sep 17 00:00:00 2001 From: Emanuele Cesena Date: Tue, 2 Apr 2024 19:09:54 -0500 Subject: [PATCH 147/153] Simd 129: alt_bn128 syscalls - simplified error code (#294) * alt_bn128: simplify errors in sycalls (alt_bn128, compress, poseidon) * add TODO for feature gate. remove validate from compress * add feature gate * fix one more error case * all changes under feature gate * revert removing from() * return unexpected errors in lib * add comment to remove error types, once the feature gate is activated * remove unnecessary/impossible error * fix mispelled comments --- programs/bpf_loader/src/syscalls/mod.rs | 53 ++++++++++++++++++++---- sdk/program/src/alt_bn128/compression.rs | 26 +++++++----- sdk/program/src/alt_bn128/mod.rs | 11 +++-- sdk/program/src/poseidon.rs | 4 +- 4 files changed, 72 insertions(+), 22 deletions(-) diff --git a/programs/bpf_loader/src/syscalls/mod.rs b/programs/bpf_loader/src/syscalls/mod.rs index 0d51e599d9366c..63e15171a7af5d 100644 --- a/programs/bpf_loader/src/syscalls/mod.rs +++ b/programs/bpf_loader/src/syscalls/mod.rs @@ -1573,14 +1573,24 @@ declare_builtin_function!( } }; + let simplify_alt_bn128_syscall_error_codes = invoke_context + .feature_set + .is_active(&feature_set::simplify_alt_bn128_syscall_error_codes::id()); + let result_point = match calculation(input) { Ok(result_point) => result_point, Err(e) => { - return Ok(e.into()); + return if simplify_alt_bn128_syscall_error_codes { + Ok(1) + } else { + Ok(e.into()) + }; } }; - if result_point.len() != output { + // This can never happen and should be removed when the + // simplify_alt_bn128_syscall_error_codes feature gets activated + if result_point.len() != output && !simplify_alt_bn128_syscall_error_codes { return Ok(AltBn128Error::SliceOutOfBounds.into()); } @@ -1720,10 +1730,19 @@ declare_builtin_function!( ) }) .collect::, Error>>()?; + + let simplify_alt_bn128_syscall_error_codes = invoke_context + .feature_set + .is_active(&feature_set::simplify_alt_bn128_syscall_error_codes::id()); + let hash = match poseidon::hashv(parameters, endianness, inputs.as_slice()) { Ok(hash) => hash, Err(e) => { - return Ok(e.into()); + return if simplify_alt_bn128_syscall_error_codes { + Ok(1) + } else { + Ok(e.into()) + }; } }; hash_result.copy_from_slice(&hash.to_bytes()); @@ -1807,12 +1826,20 @@ declare_builtin_function!( invoke_context.get_check_aligned(), )?; + let simplify_alt_bn128_syscall_error_codes = invoke_context + .feature_set + .is_active(&feature_set::simplify_alt_bn128_syscall_error_codes::id()); + match op { ALT_BN128_G1_COMPRESS => { let result_point = match alt_bn128_g1_compress(input) { Ok(result_point) => result_point, Err(e) => { - return Ok(e.into()); + return if simplify_alt_bn128_syscall_error_codes { + Ok(1) + } else { + Ok(e.into()) + }; } }; call_result.copy_from_slice(&result_point); @@ -1822,7 +1849,11 @@ declare_builtin_function!( let result_point = match alt_bn128_g1_decompress(input) { Ok(result_point) => result_point, Err(e) => { - return Ok(e.into()); + return if simplify_alt_bn128_syscall_error_codes { + Ok(1) + } else { + Ok(e.into()) + }; } }; call_result.copy_from_slice(&result_point); @@ -1832,7 +1863,11 @@ declare_builtin_function!( let result_point = match alt_bn128_g2_compress(input) { Ok(result_point) => result_point, Err(e) => { - return Ok(e.into()); + return if simplify_alt_bn128_syscall_error_codes { + Ok(1) + } else { + Ok(e.into()) + }; } }; call_result.copy_from_slice(&result_point); @@ -1842,7 +1877,11 @@ declare_builtin_function!( let result_point = match alt_bn128_g2_decompress(input) { Ok(result_point) => result_point, Err(e) => { - return Ok(e.into()); + return if simplify_alt_bn128_syscall_error_codes { + Ok(1) + } else { + Ok(e.into()) + }; } }; call_result.copy_from_slice(&result_point); diff --git a/sdk/program/src/alt_bn128/compression.rs b/sdk/program/src/alt_bn128/compression.rs index 2791b8fd35f8f5..0b63b202d4d854 100644 --- a/sdk/program/src/alt_bn128/compression.rs +++ b/sdk/program/src/alt_bn128/compression.rs @@ -20,6 +20,8 @@ mod alt_bn128_compression_size { pub const G2_COMPRESSED: usize = 64; } +// AltBn128CompressionError must be removed once the +// simplify_alt_bn128_syscall_error_codes feature gets activated #[derive(Debug, Error, Clone, PartialEq, Eq)] pub enum AltBn128CompressionError { #[error("Unexpected error")] @@ -51,13 +53,14 @@ impl From for AltBn128CompressionError { impl From for u64 { fn from(v: AltBn128CompressionError) -> u64 { + // note: should never return 0, as it risks to be confused with syscall success match v { AltBn128CompressionError::G1DecompressionFailed => 1, AltBn128CompressionError::G2DecompressionFailed => 2, AltBn128CompressionError::G1CompressionFailed => 3, AltBn128CompressionError::G2CompressionFailed => 4, AltBn128CompressionError::InvalidInputSize => 5, - AltBn128CompressionError::UnexpectedError => 0, + AltBn128CompressionError::UnexpectedError => 6, } } } @@ -118,7 +121,7 @@ mod target_arch { .map_err(|_| AltBn128CompressionError::G1CompressionFailed)?; let mut g1_bytes = [0u8; alt_bn128_compression_size::G1_COMPRESSED]; G1::serialize_compressed(&g1, g1_bytes.as_mut_slice()) - .map_err(|_| AltBn128CompressionError::G2CompressionFailed)?; + .map_err(|_| AltBn128CompressionError::G1CompressionFailed)?; Ok(convert_endianness::<32, 32>(&g1_bytes)) } @@ -131,9 +134,12 @@ mod target_arch { if g2_bytes == [0u8; alt_bn128_compression_size::G2_COMPRESSED] { return Ok([0u8; alt_bn128_compression_size::G2]); } - let decompressed_g2 = - G2::deserialize_compressed(convert_endianness::<64, 64>(&g2_bytes).as_slice()) - .map_err(|_| AltBn128CompressionError::G2DecompressionFailed)?; + let decompressed_g2 = G2::deserialize_with_mode( + convert_endianness::<64, 64>(&g2_bytes).as_slice(), + Compress::Yes, + Validate::No, + ) + .map_err(|_| AltBn128CompressionError::G2DecompressionFailed)?; let mut decompressed_g2_bytes = [0u8; alt_bn128_compression_size::G2]; decompressed_g2 .x @@ -160,7 +166,7 @@ mod target_arch { Compress::No, Validate::No, ) - .map_err(|_| AltBn128CompressionError::G2DecompressionFailed)?; + .map_err(|_| AltBn128CompressionError::G2CompressionFailed)?; let mut g2_bytes = [0u8; alt_bn128_compression_size::G2_COMPRESSED]; G2::serialize_compressed(&g2, g2_bytes.as_mut_slice()) .map_err(|_| AltBn128CompressionError::G2CompressionFailed)?; @@ -205,7 +211,7 @@ mod target_arch { match result { 0 => Ok(result_buffer), - error => Err(AltBn128CompressionError::from(error)), + _ => Err(AltBn128CompressionError::UnexpectedError), } } @@ -222,7 +228,7 @@ mod target_arch { match result { 0 => Ok(result_buffer), - error => Err(AltBn128CompressionError::from(error)), + _ => Err(AltBn128CompressionError::UnexpectedError), } } @@ -241,7 +247,7 @@ mod target_arch { match result { 0 => Ok(result_buffer), - error => Err(AltBn128CompressionError::from(error)), + _ => Err(AltBn128CompressionError::UnexpectedError), } } @@ -260,7 +266,7 @@ mod target_arch { match result { 0 => Ok(result_buffer), - error => Err(AltBn128CompressionError::from(error)), + _ => Err(AltBn128CompressionError::UnexpectedError), } } } diff --git a/sdk/program/src/alt_bn128/mod.rs b/sdk/program/src/alt_bn128/mod.rs index f8995e2a19c429..6ed0f964c83041 100644 --- a/sdk/program/src/alt_bn128/mod.rs +++ b/sdk/program/src/alt_bn128/mod.rs @@ -41,6 +41,8 @@ mod consts { pub const ALT_BN128_PAIRING: u64 = 3; } +// AltBn128Error must be removed once the +// simplify_alt_bn128_syscall_error_codes feature gets activated #[derive(Debug, Error, Clone, PartialEq, Eq)] pub enum AltBn128Error { #[error("The input data is invalid")] @@ -72,13 +74,14 @@ impl From for AltBn128Error { impl From for u64 { fn from(v: AltBn128Error) -> u64 { + // note: should never return 0, as it risks to be confused with syscall success match v { AltBn128Error::InvalidInputData => 1, AltBn128Error::GroupError => 2, AltBn128Error::SliceOutOfBounds => 3, AltBn128Error::TryIntoVecError(_) => 4, AltBn128Error::ProjectiveToG1Failed => 5, - AltBn128Error::UnexpectedError => 0, + AltBn128Error::UnexpectedError => 6, } } } @@ -319,7 +322,7 @@ mod target_arch { match result { 0 => Ok(result_buffer.to_vec()), - error => Err(AltBn128Error::from(error)), + _ => Err(AltBn128Error::UnexpectedError), } } @@ -339,7 +342,7 @@ mod target_arch { match result { 0 => Ok(result_buffer.to_vec()), - error => Err(AltBn128Error::from(error)), + _ => Err(AltBn128Error::UnexpectedError), } } @@ -363,7 +366,7 @@ mod target_arch { match result { 0 => Ok(result_buffer.to_vec()), - error => Err(AltBn128Error::from(error)), + _ => Err(AltBn128Error::UnexpectedError), } } } diff --git a/sdk/program/src/poseidon.rs b/sdk/program/src/poseidon.rs index 9c02fe90bc8b50..9e782fa5e85fe7 100644 --- a/sdk/program/src/poseidon.rs +++ b/sdk/program/src/poseidon.rs @@ -7,6 +7,8 @@ use thiserror::Error; /// Length of Poseidon hash result. pub const HASH_BYTES: usize = 32; +// PoseidonSyscallError must be removed once the +// simplify_alt_bn128_syscall_error_codes feature gets activated #[derive(Error, Debug)] pub enum PoseidonSyscallError { #[error("Invalid parameters.")] @@ -267,7 +269,7 @@ pub fn hashv( match result { 0 => Ok(PoseidonHash::new(hash_result)), - e => Err(PoseidonSyscallError::from(e)), + _ => Err(PoseidonSyscallError::Unexpected), } } } From 5d53389fe409833be8c85c79134ac30fc434108c Mon Sep 17 00:00:00 2001 From: Tyera Date: Tue, 2 Apr 2024 19:13:39 -0600 Subject: [PATCH 148/153] Move distribution methods; partitioned epoch-rewards reorg, 3 of 5 (#528) * Add distribution sub-submodule * Move distribution methods to sub-submodule * Move unit tests into distribution sub-submodule --- runtime/src/bank.rs | 123 ------ .../partitioned_epoch_rewards/distribution.rs | 395 ++++++++++++++++++ .../src/bank/partitioned_epoch_rewards/mod.rs | 1 + .../bank/partitioned_epoch_rewards/sysvar.rs | 7 +- runtime/src/bank/tests.rs | 243 ----------- 5 files changed, 401 insertions(+), 368 deletions(-) create mode 100644 runtime/src/bank/partitioned_epoch_rewards/distribution.rs diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 97816cb9ed2be2..0be5848830dd15 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -1518,47 +1518,6 @@ impl Bank { ); } - /// Process reward distribution for the block if it is inside reward interval. - fn distribute_partitioned_epoch_rewards(&mut self) { - let EpochRewardStatus::Active(status) = &self.epoch_reward_status else { - return; - }; - - let height = self.block_height(); - let start_block_height = status.start_block_height; - let credit_start = start_block_height + self.get_reward_calculation_num_blocks(); - let credit_end_exclusive = credit_start + status.stake_rewards_by_partition.len() as u64; - assert!( - self.epoch_schedule.get_slots_in_epoch(self.epoch) - > credit_end_exclusive.saturating_sub(credit_start) - ); - - if height >= credit_start && height < credit_end_exclusive { - let partition_index = height - credit_start; - self.distribute_epoch_rewards_in_partition( - &status.stake_rewards_by_partition, - partition_index, - ); - } - - if height.saturating_add(1) >= credit_end_exclusive { - datapoint_info!( - "epoch-rewards-status-update", - ("slot", self.slot(), i64), - ("block_height", height, i64), - ("active", 0, i64), - ("start_block_height", start_block_height, i64), - ); - - assert!(matches!( - self.epoch_reward_status, - EpochRewardStatus::Active(_) - )); - self.epoch_reward_status = EpochRewardStatus::Inactive; - self.destroy_epoch_rewards_sysvar(); - } - } - pub fn byte_limit_for_scans(&self) -> Option { self.rc .accounts @@ -3208,39 +3167,6 @@ impl Bank { .fetch_add(now.elapsed().as_micros() as u64, Relaxed); } - /// store stake rewards in partition - /// return the sum of all the stored rewards - /// - /// Note: even if staker's reward is 0, the stake account still needs to be stored because - /// credits observed has changed - fn store_stake_accounts_in_partition(&self, stake_rewards: &[StakeReward]) -> u64 { - // Verify that stake account `lamports + reward_amount` matches what we have in the - // rewarded account. This code will have a performance hit - an extra load and compare of - // the stake accounts. This is for debugging. Once we are confident, we can disable the - // check. - const VERIFY_REWARD_LAMPORT: bool = true; - - if VERIFY_REWARD_LAMPORT { - for r in stake_rewards { - let stake_pubkey = r.stake_pubkey; - let reward_amount = r.get_stake_reward(); - let post_stake_account = &r.stake_account; - if let Some(curr_stake_account) = self.get_account_with_fixed_root(&stake_pubkey) { - let pre_lamport = curr_stake_account.lamports(); - let post_lamport = post_stake_account.lamports(); - assert_eq!(pre_lamport + u64::try_from(reward_amount).unwrap(), post_lamport, - "stake account balance has changed since the reward calculation! account: {stake_pubkey}, pre balance: {pre_lamport}, post balance: {post_lamport}, rewards: {reward_amount}"); - } - } - } - - self.store_accounts((self.slot(), stake_rewards)); - stake_rewards - .iter() - .map(|stake_reward| stake_reward.stake_reward_info.lamports) - .sum::() as u64 - } - fn store_vote_accounts_partitioned( &self, vote_account_rewards: VoteRewardsAccounts, @@ -3375,55 +3301,6 @@ impl Bank { .for_each(|x| rewards.push((x.stake_pubkey, x.stake_reward_info))); } - /// insert non-zero stake rewards to self.rewards - /// Return the number of rewards inserted - fn update_reward_history_in_partition(&self, stake_rewards: &[StakeReward]) -> usize { - let mut rewards = self.rewards.write().unwrap(); - rewards.reserve(stake_rewards.len()); - let initial_len = rewards.len(); - stake_rewards - .iter() - .filter(|x| x.get_stake_reward() > 0) - .for_each(|x| rewards.push((x.stake_pubkey, x.stake_reward_info))); - rewards.len().saturating_sub(initial_len) - } - - /// Process reward credits for a partition of rewards - /// Store the rewards to AccountsDB, update reward history record and total capitalization. - fn distribute_epoch_rewards_in_partition( - &self, - all_stake_rewards: &[Vec], - partition_index: u64, - ) { - let pre_capitalization = self.capitalization(); - let this_partition_stake_rewards = &all_stake_rewards[partition_index as usize]; - - let (total_rewards_in_lamports, store_stake_accounts_us) = - measure_us!(self.store_stake_accounts_in_partition(this_partition_stake_rewards)); - - // increase total capitalization by the distributed rewards - self.capitalization - .fetch_add(total_rewards_in_lamports, Relaxed); - - // decrease distributed capital from epoch rewards sysvar - self.update_epoch_rewards_sysvar(total_rewards_in_lamports); - - // update reward history for this partitioned distribution - self.update_reward_history_in_partition(this_partition_stake_rewards); - - let metrics = RewardsStoreMetrics { - pre_capitalization, - post_capitalization: self.capitalization(), - total_stake_accounts_count: all_stake_rewards.len(), - partition_index, - store_stake_accounts_us, - store_stake_accounts_count: this_partition_stake_rewards.len(), - distributed_rewards: total_rewards_in_lamports, - }; - - report_partitioned_reward_metrics(self, metrics); - } - fn update_recent_blockhashes_locked(&self, locked_blockhash_queue: &BlockhashQueue) { #[allow(deprecated)] self.update_sysvar_account(&sysvar::recent_blockhashes::id(), |account| { diff --git a/runtime/src/bank/partitioned_epoch_rewards/distribution.rs b/runtime/src/bank/partitioned_epoch_rewards/distribution.rs new file mode 100644 index 00000000000000..79c73ed5b90af7 --- /dev/null +++ b/runtime/src/bank/partitioned_epoch_rewards/distribution.rs @@ -0,0 +1,395 @@ +use { + super::{Bank, EpochRewardStatus}, + crate::bank::metrics::{report_partitioned_reward_metrics, RewardsStoreMetrics}, + solana_accounts_db::stake_rewards::StakeReward, + solana_measure::measure_us, + solana_sdk::account::ReadableAccount, + std::sync::atomic::Ordering::Relaxed, +}; + +impl Bank { + /// Process reward distribution for the block if it is inside reward interval. + pub(in crate::bank) fn distribute_partitioned_epoch_rewards(&mut self) { + let EpochRewardStatus::Active(status) = &self.epoch_reward_status else { + return; + }; + + let height = self.block_height(); + let start_block_height = status.start_block_height; + let credit_start = start_block_height + self.get_reward_calculation_num_blocks(); + let credit_end_exclusive = credit_start + status.stake_rewards_by_partition.len() as u64; + assert!( + self.epoch_schedule.get_slots_in_epoch(self.epoch) + > credit_end_exclusive.saturating_sub(credit_start) + ); + + if height >= credit_start && height < credit_end_exclusive { + let partition_index = height - credit_start; + self.distribute_epoch_rewards_in_partition( + &status.stake_rewards_by_partition, + partition_index, + ); + } + + if height.saturating_add(1) >= credit_end_exclusive { + datapoint_info!( + "epoch-rewards-status-update", + ("slot", self.slot(), i64), + ("block_height", height, i64), + ("active", 0, i64), + ("start_block_height", start_block_height, i64), + ); + + assert!(matches!( + self.epoch_reward_status, + EpochRewardStatus::Active(_) + )); + self.epoch_reward_status = EpochRewardStatus::Inactive; + self.destroy_epoch_rewards_sysvar(); + } + } + + /// Process reward credits for a partition of rewards + /// Store the rewards to AccountsDB, update reward history record and total capitalization. + fn distribute_epoch_rewards_in_partition( + &self, + all_stake_rewards: &[Vec], + partition_index: u64, + ) { + let pre_capitalization = self.capitalization(); + let this_partition_stake_rewards = &all_stake_rewards[partition_index as usize]; + + let (total_rewards_in_lamports, store_stake_accounts_us) = + measure_us!(self.store_stake_accounts_in_partition(this_partition_stake_rewards)); + + // increase total capitalization by the distributed rewards + self.capitalization + .fetch_add(total_rewards_in_lamports, Relaxed); + + // decrease distributed capital from epoch rewards sysvar + self.update_epoch_rewards_sysvar(total_rewards_in_lamports); + + // update reward history for this partitioned distribution + self.update_reward_history_in_partition(this_partition_stake_rewards); + + let metrics = RewardsStoreMetrics { + pre_capitalization, + post_capitalization: self.capitalization(), + total_stake_accounts_count: all_stake_rewards.len(), + partition_index, + store_stake_accounts_us, + store_stake_accounts_count: this_partition_stake_rewards.len(), + distributed_rewards: total_rewards_in_lamports, + }; + + report_partitioned_reward_metrics(self, metrics); + } + + /// insert non-zero stake rewards to self.rewards + /// Return the number of rewards inserted + fn update_reward_history_in_partition(&self, stake_rewards: &[StakeReward]) -> usize { + let mut rewards = self.rewards.write().unwrap(); + rewards.reserve(stake_rewards.len()); + let initial_len = rewards.len(); + stake_rewards + .iter() + .filter(|x| x.get_stake_reward() > 0) + .for_each(|x| rewards.push((x.stake_pubkey, x.stake_reward_info))); + rewards.len().saturating_sub(initial_len) + } + + /// store stake rewards in partition + /// return the sum of all the stored rewards + /// + /// Note: even if staker's reward is 0, the stake account still needs to be stored because + /// credits observed has changed + fn store_stake_accounts_in_partition(&self, stake_rewards: &[StakeReward]) -> u64 { + // Verify that stake account `lamports + reward_amount` matches what we have in the + // rewarded account. This code will have a performance hit - an extra load and compare of + // the stake accounts. This is for debugging. Once we are confident, we can disable the + // check. + const VERIFY_REWARD_LAMPORT: bool = true; + + if VERIFY_REWARD_LAMPORT { + for r in stake_rewards { + let stake_pubkey = r.stake_pubkey; + let reward_amount = r.get_stake_reward(); + let post_stake_account = &r.stake_account; + if let Some(curr_stake_account) = self.get_account_with_fixed_root(&stake_pubkey) { + let pre_lamport = curr_stake_account.lamports(); + let post_lamport = post_stake_account.lamports(); + assert_eq!(pre_lamport + u64::try_from(reward_amount).unwrap(), post_lamport, + "stake account balance has changed since the reward calculation! account: {stake_pubkey}, pre balance: {pre_lamport}, post balance: {post_lamport}, rewards: {reward_amount}"); + } + } + } + + self.store_accounts((self.slot(), stake_rewards)); + stake_rewards + .iter() + .map(|stake_reward| stake_reward.stake_reward_info.lamports) + .sum::() as u64 + } +} + +#[cfg(test)] +mod tests { + use { + super::*, + crate::{ + bank::tests::create_genesis_config, epoch_rewards_hasher::hash_rewards_into_partitions, + }, + rand::Rng, + solana_sdk::{ + account::from_account, epoch_schedule::EpochSchedule, feature_set, hash::Hash, + native_token::LAMPORTS_PER_SOL, sysvar, + }, + }; + + #[test] + fn test_distribute_partitioned_epoch_rewards() { + let (genesis_config, _mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); + let mut bank = Bank::new_for_tests(&genesis_config); + + let expected_num = 100; + + let stake_rewards = (0..expected_num) + .map(|_| StakeReward::new_random()) + .collect::>(); + + let stake_rewards = hash_rewards_into_partitions(stake_rewards, &Hash::new(&[1; 32]), 2); + + bank.set_epoch_reward_status_active(stake_rewards); + + bank.distribute_partitioned_epoch_rewards(); + } + + #[test] + #[should_panic(expected = "self.epoch_schedule.get_slots_in_epoch")] + fn test_distribute_partitioned_epoch_rewards_too_many_partitions() { + let (genesis_config, _mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); + let mut bank = Bank::new_for_tests(&genesis_config); + + let expected_num = 1; + + let stake_rewards = (0..expected_num) + .map(|_| StakeReward::new_random()) + .collect::>(); + + let stake_rewards = hash_rewards_into_partitions( + stake_rewards, + &Hash::new(&[1; 32]), + bank.epoch_schedule().slots_per_epoch as usize + 1, + ); + + bank.set_epoch_reward_status_active(stake_rewards); + + bank.distribute_partitioned_epoch_rewards(); + } + + #[test] + fn test_distribute_partitioned_epoch_rewards_empty() { + let (genesis_config, _mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); + let mut bank = Bank::new_for_tests(&genesis_config); + + bank.set_epoch_reward_status_active(vec![]); + + bank.distribute_partitioned_epoch_rewards(); + } + + /// Test distribute partitioned epoch rewards + #[test] + fn test_distribute_partitioned_epoch_rewards_bank_capital_and_sysvar_balance() { + let (mut genesis_config, _mint_keypair) = + create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); + genesis_config.epoch_schedule = EpochSchedule::custom(432000, 432000, false); + let mut bank = Bank::new_for_tests(&genesis_config); + bank.activate_feature(&feature_set::enable_partitioned_epoch_reward::id()); + + // Set up epoch_rewards sysvar with rewards with 1e9 lamports to distribute. + let total_rewards = 1_000_000_000; + bank.create_epoch_rewards_sysvar(total_rewards, 0, 42); + let pre_epoch_rewards_account = bank.get_account(&sysvar::epoch_rewards::id()).unwrap(); + assert_eq!(pre_epoch_rewards_account.lamports(), total_rewards); + + // Set up a partition of rewards to distribute + let expected_num = 100; + let mut stake_rewards = (0..expected_num) + .map(|_| StakeReward::new_random()) + .collect::>(); + let mut rewards_to_distribute = 0; + for stake_reward in &mut stake_rewards { + stake_reward.credit(100); + rewards_to_distribute += 100; + } + let all_rewards = vec![stake_rewards]; + + // Distribute rewards + let pre_cap = bank.capitalization(); + bank.distribute_epoch_rewards_in_partition(&all_rewards, 0); + let post_cap = bank.capitalization(); + let post_epoch_rewards_account = bank.get_account(&sysvar::epoch_rewards::id()).unwrap(); + let expected_epoch_rewards_sysvar_lamports_remaining = + total_rewards - rewards_to_distribute; + + // Assert that epoch rewards sysvar lamports decreases by the distributed rewards + assert_eq!( + post_epoch_rewards_account.lamports(), + expected_epoch_rewards_sysvar_lamports_remaining + ); + + let epoch_rewards: sysvar::epoch_rewards::EpochRewards = + from_account(&post_epoch_rewards_account).unwrap(); + assert_eq!(epoch_rewards.total_rewards, total_rewards); + assert_eq!(epoch_rewards.distributed_rewards, rewards_to_distribute,); + + // Assert that the bank total capital didn't change + assert_eq!(pre_cap, post_cap); + } + + /// Test partitioned credits and reward history updates of epoch rewards do cover all the rewards + /// slice. + #[test] + fn test_epoch_credit_rewards_and_history_update() { + let (mut genesis_config, _mint_keypair) = + create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); + genesis_config.epoch_schedule = EpochSchedule::custom(432000, 432000, false); + let mut bank = Bank::new_for_tests(&genesis_config); + + // setup the expected number of stake rewards + let expected_num = 12345; + + let mut stake_rewards = (0..expected_num) + .map(|_| StakeReward::new_random()) + .collect::>(); + + bank.store_accounts((bank.slot(), &stake_rewards[..])); + + // Simulate rewards + let mut expected_rewards = 0; + for stake_reward in &mut stake_rewards { + stake_reward.credit(1); + expected_rewards += 1; + } + + let stake_rewards_bucket = + hash_rewards_into_partitions(stake_rewards, &Hash::new(&[1; 32]), 100); + bank.set_epoch_reward_status_active(stake_rewards_bucket.clone()); + + // Test partitioned stores + let mut total_rewards = 0; + let mut total_num_updates = 0; + + let pre_update_history_len = bank.rewards.read().unwrap().len(); + + for stake_rewards in stake_rewards_bucket { + let total_rewards_in_lamports = bank.store_stake_accounts_in_partition(&stake_rewards); + let num_history_updates = bank.update_reward_history_in_partition(&stake_rewards); + assert_eq!(stake_rewards.len(), num_history_updates); + total_rewards += total_rewards_in_lamports; + total_num_updates += num_history_updates; + } + + let post_update_history_len = bank.rewards.read().unwrap().len(); + + // assert that all rewards are credited + assert_eq!(total_rewards, expected_rewards); + assert_eq!(total_num_updates, expected_num); + assert_eq!( + total_num_updates, + post_update_history_len - pre_update_history_len + ); + } + + #[test] + fn test_update_reward_history_in_partition() { + for zero_reward in [false, true] { + let (genesis_config, _mint_keypair) = + create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); + let bank = Bank::new_for_tests(&genesis_config); + + let mut expected_num = 100; + + let mut stake_rewards = (0..expected_num) + .map(|_| StakeReward::new_random()) + .collect::>(); + + let mut rng = rand::thread_rng(); + let i_zero = rng.gen_range(0..expected_num); + if zero_reward { + // pick one entry to have zero rewards so it gets ignored + stake_rewards[i_zero].stake_reward_info.lamports = 0; + } + + let num_in_history = bank.update_reward_history_in_partition(&stake_rewards); + + if zero_reward { + stake_rewards.remove(i_zero); + // -1 because one of them had zero rewards and was ignored + expected_num -= 1; + } + + bank.rewards + .read() + .unwrap() + .iter() + .zip(stake_rewards.iter()) + .for_each(|((k, reward_info), expected_stake_reward)| { + assert_eq!( + ( + &expected_stake_reward.stake_pubkey, + &expected_stake_reward.stake_reward_info + ), + (k, reward_info) + ); + }); + + assert_eq!(num_in_history, expected_num); + } + } + + #[test] + fn test_update_reward_history_in_partition_empty() { + let (genesis_config, _mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); + let bank = Bank::new_for_tests(&genesis_config); + + let stake_rewards = vec![]; + + let num_in_history = bank.update_reward_history_in_partition(&stake_rewards); + assert_eq!(num_in_history, 0); + } + + /// Test rewards computation and partitioned rewards distribution at the epoch boundary + #[test] + fn test_store_stake_accounts_in_partition() { + let (genesis_config, _mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); + let bank = Bank::new_for_tests(&genesis_config); + + let expected_num = 100; + + let stake_rewards = (0..expected_num) + .map(|_| StakeReward::new_random()) + .collect::>(); + + let expected_total = stake_rewards + .iter() + .map(|stake_reward| stake_reward.stake_reward_info.lamports) + .sum::() as u64; + + let total_rewards_in_lamports = bank.store_stake_accounts_in_partition(&stake_rewards); + assert_eq!(expected_total, total_rewards_in_lamports); + } + + #[test] + fn test_store_stake_accounts_in_partition_empty() { + let (genesis_config, _mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); + let bank = Bank::new_for_tests(&genesis_config); + + let stake_rewards = vec![]; + + let expected_total = 0; + + let total_rewards_in_lamports = bank.store_stake_accounts_in_partition(&stake_rewards); + assert_eq!(expected_total, total_rewards_in_lamports); + } +} diff --git a/runtime/src/bank/partitioned_epoch_rewards/mod.rs b/runtime/src/bank/partitioned_epoch_rewards/mod.rs index 5d7f405e34ac9b..8248f0c51bb387 100644 --- a/runtime/src/bank/partitioned_epoch_rewards/mod.rs +++ b/runtime/src/bank/partitioned_epoch_rewards/mod.rs @@ -1,3 +1,4 @@ +mod distribution; mod sysvar; use { diff --git a/runtime/src/bank/partitioned_epoch_rewards/sysvar.rs b/runtime/src/bank/partitioned_epoch_rewards/sysvar.rs index b540dc2bec0fcd..23eb5c986c3512 100644 --- a/runtime/src/bank/partitioned_epoch_rewards/sysvar.rs +++ b/runtime/src/bank/partitioned_epoch_rewards/sysvar.rs @@ -55,7 +55,10 @@ impl Bank { } /// Update EpochRewards sysvar with distributed rewards - pub(in crate::bank) fn update_epoch_rewards_sysvar(&self, distributed: u64) { + pub(in crate::bank::partitioned_epoch_rewards) fn update_epoch_rewards_sysvar( + &self, + distributed: u64, + ) { assert!(self.is_partitioned_rewards_code_enabled()); let mut epoch_rewards: sysvar::epoch_rewards::EpochRewards = @@ -75,7 +78,7 @@ impl Bank { self.log_epoch_rewards_sysvar("update"); } - pub(in crate::bank) fn destroy_epoch_rewards_sysvar(&self) { + pub(in crate::bank::partitioned_epoch_rewards) fn destroy_epoch_rewards_sysvar(&self) { if let Some(account) = self.get_account(&sysvar::epoch_rewards::id()) { if account.lamports() > 0 { info!( diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 16114f7b7593b3..1fb875be988751 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -12248,158 +12248,6 @@ fn test_get_stake_rewards_partition_range_panic() { let _range = &stake_rewards_bucket[15]; } -#[test] -fn test_distribute_partitioned_epoch_rewards() { - let (genesis_config, _mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); - let mut bank = Bank::new_for_tests(&genesis_config); - - let expected_num = 100; - - let stake_rewards = (0..expected_num) - .map(|_| StakeReward::new_random()) - .collect::>(); - - let stake_rewards = hash_rewards_into_partitions(stake_rewards, &Hash::new(&[1; 32]), 2); - - bank.set_epoch_reward_status_active(stake_rewards); - - bank.distribute_partitioned_epoch_rewards(); -} - -#[test] -#[should_panic(expected = "self.epoch_schedule.get_slots_in_epoch")] -fn test_distribute_partitioned_epoch_rewards_too_many_partitions() { - let (genesis_config, _mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); - let mut bank = Bank::new_for_tests(&genesis_config); - - let expected_num = 1; - - let stake_rewards = (0..expected_num) - .map(|_| StakeReward::new_random()) - .collect::>(); - - let stake_rewards = hash_rewards_into_partitions( - stake_rewards, - &Hash::new(&[1; 32]), - bank.epoch_schedule().slots_per_epoch as usize + 1, - ); - - bank.set_epoch_reward_status_active(stake_rewards); - - bank.distribute_partitioned_epoch_rewards(); -} - -#[test] -fn test_distribute_partitioned_epoch_rewards_empty() { - let (genesis_config, _mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); - let mut bank = Bank::new_for_tests(&genesis_config); - - bank.set_epoch_reward_status_active(vec![]); - - bank.distribute_partitioned_epoch_rewards(); -} - -/// Test partitioned credits and reward history updates of epoch rewards do cover all the rewards -/// slice. -#[test] -fn test_epoch_credit_rewards_and_history_update() { - let (mut genesis_config, _mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); - genesis_config.epoch_schedule = EpochSchedule::custom(432000, 432000, false); - let mut bank = Bank::new_for_tests(&genesis_config); - - // setup the expected number of stake rewards - let expected_num = 12345; - - let mut stake_rewards = (0..expected_num) - .map(|_| StakeReward::new_random()) - .collect::>(); - - bank.store_accounts((bank.slot(), &stake_rewards[..])); - - // Simulate rewards - let mut expected_rewards = 0; - for stake_reward in &mut stake_rewards { - stake_reward.credit(1); - expected_rewards += 1; - } - - let stake_rewards_bucket = - hash_rewards_into_partitions(stake_rewards, &Hash::new(&[1; 32]), 100); - bank.set_epoch_reward_status_active(stake_rewards_bucket.clone()); - - // Test partitioned stores - let mut total_rewards = 0; - let mut total_num_updates = 0; - - let pre_update_history_len = bank.rewards.read().unwrap().len(); - - for stake_rewards in stake_rewards_bucket { - let total_rewards_in_lamports = bank.store_stake_accounts_in_partition(&stake_rewards); - let num_history_updates = bank.update_reward_history_in_partition(&stake_rewards); - assert_eq!(stake_rewards.len(), num_history_updates); - total_rewards += total_rewards_in_lamports; - total_num_updates += num_history_updates; - } - - let post_update_history_len = bank.rewards.read().unwrap().len(); - - // assert that all rewards are credited - assert_eq!(total_rewards, expected_rewards); - assert_eq!(total_num_updates, expected_num); - assert_eq!( - total_num_updates, - post_update_history_len - pre_update_history_len - ); -} - -/// Test distribute partitioned epoch rewards -#[test] -fn test_distribute_partitioned_epoch_rewards_bank_capital_and_sysvar_balance() { - let (mut genesis_config, _mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); - genesis_config.epoch_schedule = EpochSchedule::custom(432000, 432000, false); - let mut bank = Bank::new_for_tests(&genesis_config); - bank.activate_feature(&feature_set::enable_partitioned_epoch_reward::id()); - - // Set up epoch_rewards sysvar with rewards with 1e9 lamports to distribute. - let total_rewards = 1_000_000_000; - bank.create_epoch_rewards_sysvar(total_rewards, 0, 42); - let pre_epoch_rewards_account = bank.get_account(&sysvar::epoch_rewards::id()).unwrap(); - assert_eq!(pre_epoch_rewards_account.lamports(), total_rewards); - - // Set up a partition of rewards to distribute - let expected_num = 100; - let mut stake_rewards = (0..expected_num) - .map(|_| StakeReward::new_random()) - .collect::>(); - let mut rewards_to_distribute = 0; - for stake_reward in &mut stake_rewards { - stake_reward.credit(100); - rewards_to_distribute += 100; - } - let all_rewards = vec![stake_rewards]; - - // Distribute rewards - let pre_cap = bank.capitalization(); - bank.distribute_epoch_rewards_in_partition(&all_rewards, 0); - let post_cap = bank.capitalization(); - let post_epoch_rewards_account = bank.get_account(&sysvar::epoch_rewards::id()).unwrap(); - let expected_epoch_rewards_sysvar_lamports_remaining = total_rewards - rewards_to_distribute; - - // Assert that epoch rewards sysvar lamports decreases by the distributed rewards - assert_eq!( - post_epoch_rewards_account.lamports(), - expected_epoch_rewards_sysvar_lamports_remaining - ); - - let epoch_rewards: sysvar::epoch_rewards::EpochRewards = - from_account(&post_epoch_rewards_account).unwrap(); - assert_eq!(epoch_rewards.total_rewards, total_rewards); - assert_eq!(epoch_rewards.distributed_rewards, rewards_to_distribute,); - - // Assert that the bank total capital didn't change - assert_eq!(pre_cap, post_cap); -} - #[test] /// Test rewards computation and partitioned rewards distribution at the epoch boundary fn test_rewards_computation() { @@ -12718,97 +12566,6 @@ fn test_program_execution_restricted_for_stake_account_in_reward_period() { } } -/// Test rewards computation and partitioned rewards distribution at the epoch boundary -#[test] -fn test_store_stake_accounts_in_partition() { - let (genesis_config, _mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); - let bank = Bank::new_for_tests(&genesis_config); - - let expected_num = 100; - - let stake_rewards = (0..expected_num) - .map(|_| StakeReward::new_random()) - .collect::>(); - - let expected_total = stake_rewards - .iter() - .map(|stake_reward| stake_reward.stake_reward_info.lamports) - .sum::() as u64; - - let total_rewards_in_lamports = bank.store_stake_accounts_in_partition(&stake_rewards); - assert_eq!(expected_total, total_rewards_in_lamports); -} - -#[test] -fn test_store_stake_accounts_in_partition_empty() { - let (genesis_config, _mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); - let bank = Bank::new_for_tests(&genesis_config); - - let stake_rewards = vec![]; - - let expected_total = 0; - - let total_rewards_in_lamports = bank.store_stake_accounts_in_partition(&stake_rewards); - assert_eq!(expected_total, total_rewards_in_lamports); -} - -#[test] -fn test_update_reward_history_in_partition() { - for zero_reward in [false, true] { - let (genesis_config, _mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); - let bank = Bank::new_for_tests(&genesis_config); - - let mut expected_num = 100; - - let mut stake_rewards = (0..expected_num) - .map(|_| StakeReward::new_random()) - .collect::>(); - - let mut rng = rand::thread_rng(); - let i_zero = rng.gen_range(0..expected_num); - if zero_reward { - // pick one entry to have zero rewards so it gets ignored - stake_rewards[i_zero].stake_reward_info.lamports = 0; - } - - let num_in_history = bank.update_reward_history_in_partition(&stake_rewards); - - if zero_reward { - stake_rewards.remove(i_zero); - // -1 because one of them had zero rewards and was ignored - expected_num -= 1; - } - - bank.rewards - .read() - .unwrap() - .iter() - .zip(stake_rewards.iter()) - .for_each(|((k, reward_info), expected_stake_reward)| { - assert_eq!( - ( - &expected_stake_reward.stake_pubkey, - &expected_stake_reward.stake_reward_info - ), - (k, reward_info) - ); - }); - - assert_eq!(num_in_history, expected_num); - } -} - -#[test] -fn test_update_reward_history_in_partition_empty() { - let (genesis_config, _mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); - let bank = Bank::new_for_tests(&genesis_config); - - let stake_rewards = vec![]; - - let num_in_history = bank.update_reward_history_in_partition(&stake_rewards); - assert_eq!(num_in_history, 0); -} - #[test] fn test_store_vote_accounts_partitioned() { let (genesis_config, _mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); From 0168e0ab69a631d5998507924debaba1caa566ba Mon Sep 17 00:00:00 2001 From: abcalphabet Date: Tue, 2 Apr 2024 22:50:37 -0300 Subject: [PATCH 149/153] add serialization to AeKey (#208) * add serialization to AeKey * make ElGamalKeypair/AeKey byte lengths public * switch from/to_bytes to from * tests for change logic * variable names * fix clippy on tests * fix nit Co-authored-by: samkim-crypto --------- Co-authored-by: samkim-crypto --- .../src/encryption/auth_encryption.rs | 51 ++++++++++++++++++- 1 file changed, 49 insertions(+), 2 deletions(-) diff --git a/zk-token-sdk/src/encryption/auth_encryption.rs b/zk-token-sdk/src/encryption/auth_encryption.rs index 3bc5c657de103e..5961cb57f4c10f 100644 --- a/zk-token-sdk/src/encryption/auth_encryption.rs +++ b/zk-token-sdk/src/encryption/auth_encryption.rs @@ -32,7 +32,7 @@ use { }; /// Byte length of an authenticated encryption secret key -const AE_KEY_LEN: usize = 16; +pub const AE_KEY_LEN: usize = 16; /// Byte length of an authenticated encryption nonce component const NONCE_LEN: usize = 12; @@ -102,7 +102,7 @@ impl AuthenticatedEncryption { } } -#[derive(Debug, Zeroize)] +#[derive(Debug, Zeroize, Eq, PartialEq)] pub struct AeKey([u8; AE_KEY_LEN]); impl AeKey { /// Deterministically derives an authenticated encryption key from a Solana signer and a public @@ -210,6 +210,31 @@ impl SeedDerivable for AeKey { } } +impl From<[u8; AE_KEY_LEN]> for AeKey { + fn from(bytes: [u8; AE_KEY_LEN]) -> Self { + Self(bytes) + } +} + +impl From for [u8; AE_KEY_LEN] { + fn from(key: AeKey) -> Self { + key.0 + } +} + +impl TryFrom<&[u8]> for AeKey { + type Error = AuthenticatedEncryptionError; + fn try_from(bytes: &[u8]) -> Result { + if bytes.len() != AE_KEY_LEN { + return Err(AuthenticatedEncryptionError::Deserialization); + } + bytes + .try_into() + .map(Self) + .map_err(|_| AuthenticatedEncryptionError::Deserialization) + } +} + /// For the purpose of encrypting balances for the spl token accounts, the nonce and ciphertext /// sizes should always be fixed. type Nonce = [u8; NONCE_LEN]; @@ -298,4 +323,26 @@ mod tests { let too_long_seed = vec![0; 65536]; assert!(AeKey::from_seed(&too_long_seed).is_err()); } + + #[test] + fn test_aes_key_from() { + let key = AeKey::from_seed(&[0; 32]).unwrap(); + let key_bytes: [u8; AE_KEY_LEN] = AeKey::from_seed(&[0; 32]).unwrap().into(); + + assert_eq!(key, AeKey::from(key_bytes)); + } + + #[test] + fn test_aes_key_try_from() { + let key = AeKey::from_seed(&[0; 32]).unwrap(); + let key_bytes: [u8; AE_KEY_LEN] = AeKey::from_seed(&[0; 32]).unwrap().into(); + + assert_eq!(key, AeKey::try_from(key_bytes.as_slice()).unwrap()); + } + + #[test] + fn test_aes_key_try_from_error() { + let too_many_bytes = vec![0_u8; 32]; + assert!(AeKey::try_from(too_many_bytes.as_slice()).is_err()); + } } From 57572d59c8b21d303663ac5338aed597138a5247 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Wed, 3 Apr 2024 09:19:01 -0500 Subject: [PATCH 150/153] add scan_index for improving index generation (#524) * add scan_index for improving index generation * pr feedback * rework some stuff from pr feedback * get rid of redundant if * deal with rent correctly --- accounts-db/src/accounts_db.rs | 107 +++++++++++++++++++++---------- accounts-db/src/accounts_file.rs | 10 ++- accounts-db/src/append_vec.rs | 69 ++++++++++++++++++-- 3 files changed, 144 insertions(+), 42 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 3e9eaa144545f9..76e3d76a29c400 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -8642,8 +8642,6 @@ impl AccountsDb { if accounts.next().is_none() { return SlotIndexGenerationInfo::default(); } - let accounts = storage.accounts.account_iter(); - let secondary = !self.account_indexes.is_empty(); let mut rent_paying_accounts_by_partition = Vec::default(); @@ -8652,46 +8650,87 @@ impl AccountsDb { let mut amount_to_top_off_rent = 0; let mut stored_size_alive = 0; - let items = accounts.map(|stored_account| { - stored_size_alive += stored_account.stored_size(); - let pubkey = stored_account.pubkey(); - if secondary { + let (dirty_pubkeys, insert_time_us, mut generate_index_results) = if !secondary { + let mut items_local = Vec::default(); + storage.accounts.scan_index(|info| { + stored_size_alive += info.stored_size_aligned; + if info.index_info.lamports > 0 { + accounts_data_len += info.index_info.data_len; + } + items_local.push(info.index_info); + }); + let items = items_local.into_iter().map(|info| { + if let Some(amount_to_top_off_rent_this_account) = Self::stats_for_rent_payers( + &info.pubkey, + info.lamports, + info.data_len as usize, + info.rent_epoch, + info.executable, + rent_collector, + ) { + amount_to_top_off_rent += amount_to_top_off_rent_this_account; + num_accounts_rent_paying += 1; + // remember this rent-paying account pubkey + rent_paying_accounts_by_partition.push(info.pubkey); + } + + ( + info.pubkey, + AccountInfo::new( + StorageLocation::AppendVec(store_id, info.offset), // will never be cached + info.lamports, + ), + ) + }); + self.accounts_index + .insert_new_if_missing_into_primary_index( + slot, + storage.approx_stored_count(), + items, + ) + } else { + let accounts = storage.accounts.account_iter(); + let items = accounts.map(|stored_account| { + stored_size_alive += stored_account.stored_size(); + let pubkey = stored_account.pubkey(); self.accounts_index.update_secondary_indexes( pubkey, &stored_account, &self.account_indexes, ); - } - if !stored_account.is_zero_lamport() { - accounts_data_len += stored_account.data().len() as u64; - } - - if let Some(amount_to_top_off_rent_this_account) = Self::stats_for_rent_payers( - pubkey, - stored_account.lamports(), - stored_account.data().len(), - stored_account.rent_epoch(), - stored_account.executable(), - rent_collector, - ) { - amount_to_top_off_rent += amount_to_top_off_rent_this_account; - num_accounts_rent_paying += 1; - // remember this rent-paying account pubkey - rent_paying_accounts_by_partition.push(*pubkey); - } + if !stored_account.is_zero_lamport() { + accounts_data_len += stored_account.data().len() as u64; + } - ( - *pubkey, - AccountInfo::new( - StorageLocation::AppendVec(store_id, stored_account.offset()), // will never be cached + if let Some(amount_to_top_off_rent_this_account) = Self::stats_for_rent_payers( + pubkey, stored_account.lamports(), - ), - ) - }); + stored_account.data().len(), + stored_account.rent_epoch(), + stored_account.executable(), + rent_collector, + ) { + amount_to_top_off_rent += amount_to_top_off_rent_this_account; + num_accounts_rent_paying += 1; + // remember this rent-paying account pubkey + rent_paying_accounts_by_partition.push(*pubkey); + } - let (dirty_pubkeys, insert_time_us, mut generate_index_results) = self - .accounts_index - .insert_new_if_missing_into_primary_index(slot, storage.approx_stored_count(), items); + ( + *pubkey, + AccountInfo::new( + StorageLocation::AppendVec(store_id, stored_account.offset()), // will never be cached + stored_account.lamports(), + ), + ) + }); + self.accounts_index + .insert_new_if_missing_into_primary_index( + slot, + storage.approx_stored_count(), + items, + ) + }; if let Some(duplicates_this_slot) = std::mem::take(&mut generate_index_results.duplicates) { // there were duplicate pubkeys in this same slot diff --git a/accounts-db/src/accounts_file.rs b/accounts-db/src/accounts_file.rs index 72f0373d95ecd3..f8a1e5cce80834 100644 --- a/accounts-db/src/accounts_file.rs +++ b/accounts-db/src/accounts_file.rs @@ -6,7 +6,7 @@ use { }, accounts_db::AccountsFileId, accounts_hash::AccountHash, - append_vec::{AppendVec, AppendVecError}, + append_vec::{AppendVec, AppendVecError, IndexInfo}, storable_accounts::StorableAccounts, tiered_storage::{ error::TieredStorageError, hot::HOT_FORMAT, index::IndexOffset, TieredStorage, @@ -180,6 +180,14 @@ impl AccountsFile { AccountsFileIter::new(self) } + /// iterate over all entries to put in index + pub(crate) fn scan_index(&self, callback: impl FnMut(IndexInfo)) { + match self { + Self::AppendVec(av) => av.scan_index(callback), + Self::TieredStorage(_ts) => unimplemented!(), + } + } + /// iterate over all pubkeys pub(crate) fn scan_pubkeys(&self, callback: impl FnMut(&Pubkey)) { match self { diff --git a/accounts-db/src/append_vec.rs b/accounts-db/src/append_vec.rs index 4b63b8c0e062e5..d0f9bde0d08645 100644 --- a/accounts-db/src/append_vec.rs +++ b/accounts-db/src/append_vec.rs @@ -197,6 +197,26 @@ impl<'append_vec> ReadableAccount for AppendVecStoredAccountMeta<'append_vec> { } } +/// info from an entry useful for building an index +pub(crate) struct IndexInfo { + /// size of entry, aligned to next u64 + /// This matches the return of `get_account` + pub stored_size_aligned: usize, + /// info on the entry + pub index_info: IndexInfoInner, +} + +/// info from an entry useful for building an index +pub(crate) struct IndexInfoInner { + /// offset to this entry + pub offset: usize, + pub pubkey: Pubkey, + pub lamports: u64, + pub rent_epoch: Epoch, + pub executable: bool, + pub data_len: u64, +} + /// offsets to help navigate the persisted format of `AppendVec` #[derive(Debug)] struct AccountOffsets { @@ -204,6 +224,8 @@ struct AccountOffsets { offset_to_end_of_data: usize, /// offset to the next account. This will be aligned. next_account_offset: usize, + /// # of bytes (aligned) to store this account, including variable sized data + stored_size_aligned: usize, } /// A thread-safe, file-backed block of memory used to store `Account` instances. Append operations @@ -598,17 +620,50 @@ impl AppendVec { /// the next account is then aligned on a 64 bit boundary. /// With these helpers, we can skip over reading some of the data depending on what the caller wants. fn next_account_offset(start_offset: usize, stored_meta: &StoredMeta) -> AccountOffsets { - let start_of_data = start_offset - + std::mem::size_of::() - + std::mem::size_of::() - + std::mem::size_of::(); - let aligned_data_len = u64_align!(stored_meta.data_len as usize); - let next_account_offset = start_of_data + aligned_data_len; - let offset_to_end_of_data = start_of_data + stored_meta.data_len as usize; + let stored_size_unaligned = STORE_META_OVERHEAD + stored_meta.data_len as usize; + let stored_size_aligned = u64_align!(stored_size_unaligned); + let offset_to_end_of_data = start_offset + stored_size_unaligned; + let next_account_offset = start_offset + stored_size_aligned; AccountOffsets { next_account_offset, offset_to_end_of_data, + stored_size_aligned, + } + } + + /// Iterate over all accounts and call `callback` with `IndexInfo` for each. + /// This fn can help generate an index of the data in this storage. + pub(crate) fn scan_index(&self, mut callback: impl FnMut(IndexInfo)) { + let mut offset = 0; + loop { + let Some((stored_meta, next)) = self.get_type::(offset) else { + // eof + break; + }; + let Some((account_meta, _)) = self.get_type::(next) else { + // eof + break; + }; + let next = Self::next_account_offset(offset, stored_meta); + if next.offset_to_end_of_data > self.len() { + // data doesn't fit, so don't include this account + break; + } + callback(IndexInfo { + index_info: { + IndexInfoInner { + pubkey: stored_meta.pubkey, + lamports: account_meta.lamports, + offset, + data_len: stored_meta.data_len, + executable: account_meta.executable, + rent_epoch: account_meta.rent_epoch, + } + }, + stored_size_aligned: next.stored_size_aligned, + }); + offset = next.next_account_offset; } } From ce1f41e547109f69385817371b91f82cb1aa2579 Mon Sep 17 00:00:00 2001 From: Brooks Date: Wed, 3 Apr 2024 10:21:11 -0400 Subject: [PATCH 151/153] Removes write version from StorableAccounts (#542) --- accounts-db/src/account_storage/meta.rs | 11 +++----- accounts-db/src/accounts_db.rs | 2 +- accounts-db/src/append_vec.rs | 2 +- accounts-db/src/storable_accounts.rs | 35 +++++++------------------ 4 files changed, 15 insertions(+), 35 deletions(-) diff --git a/accounts-db/src/account_storage/meta.rs b/accounts-db/src/account_storage/meta.rs index cc01ba164b077f..559c6748ee7455 100644 --- a/accounts-db/src/account_storage/meta.rs +++ b/accounts-db/src/account_storage/meta.rs @@ -52,7 +52,7 @@ impl< { /// used when accounts contains hash and write version already pub fn new(accounts: &'b U) -> Self { - assert!(accounts.has_hash_and_write_version()); + assert!(accounts.has_hash()); Self { accounts, hashes_and_write_versions: None, @@ -66,7 +66,7 @@ impl< hashes: Vec, write_versions: Vec, ) -> Self { - assert!(!accounts.has_hash_and_write_version()); + assert!(!accounts.has_hash()); assert_eq!(accounts.len(), hashes.len()); assert_eq!(write_versions.len(), hashes.len()); Self { @@ -80,11 +80,8 @@ impl< pub fn get(&self, index: usize) -> (Option<&T>, &Pubkey, &AccountHash, StoredMetaWriteVersion) { let account = self.accounts.account_default_if_zero_lamport(index); let pubkey = self.accounts.pubkey(index); - let (hash, write_version) = if self.accounts.has_hash_and_write_version() { - ( - self.accounts.hash(index), - self.accounts.write_version(index), - ) + let (hash, write_version) = if self.accounts.has_hash() { + (self.accounts.hash(index), StoredMetaWriteVersion::default()) } else { let item = self.hashes_and_write_versions.as_ref().unwrap(); (item.0[index].borrow(), item.1[index]) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 76e3d76a29c400..ca330e03a02388 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -6487,7 +6487,7 @@ impl AccountsDb { self.write_accounts_to_cache(slot, accounts, txn_iter) } StoreTo::Storage(storage) => { - if accounts.has_hash_and_write_version() { + if accounts.has_hash() { self.write_accounts_to_storage( slot, storage, diff --git a/accounts-db/src/append_vec.rs b/accounts-db/src/append_vec.rs index d0f9bde0d08645..e19fa137f7ba22 100644 --- a/accounts-db/src/append_vec.rs +++ b/accounts-db/src/append_vec.rs @@ -878,7 +878,7 @@ pub mod tests { static_assertions::assert_eq_align!(u64, StoredMeta, AccountMeta); #[test] - #[should_panic(expected = "accounts.has_hash_and_write_version()")] + #[should_panic(expected = "accounts.has_hash()")] fn test_storable_accounts_with_hashes_and_write_versions_new() { let account = AccountSharedData::default(); // for (Slot, &'a [(&'a Pubkey, &'a T)]) diff --git a/accounts-db/src/storable_accounts.rs b/accounts-db/src/storable_accounts.rs index 1527d7c1a84ecc..c523467fa2db65 100644 --- a/accounts-db/src/storable_accounts.rs +++ b/accounts-db/src/storable_accounts.rs @@ -35,23 +35,16 @@ pub trait StorableAccounts<'a, T: ReadableAccount + Sync>: Sync { false } - /// true iff the impl can provide hash and write_version - /// Otherwise, hash and write_version have to be provided separately to store functions. - fn has_hash_and_write_version(&self) -> bool { + /// true iff the impl can provide hash + /// Otherwise, hash has to be provided separately to store functions. + fn has_hash(&self) -> bool { false } /// return hash for account at 'index' - /// Should only be called if 'has_hash_and_write_version' = true + /// Should only be called if 'has_hash' = true fn hash(&self, _index: usize) -> &AccountHash { - // this should never be called if has_hash_and_write_version returns false - unimplemented!(); - } - - /// return write_version for account at 'index' - /// Should only be called if 'has_hash_and_write_version' = true - fn write_version(&self, _index: usize) -> u64 { - // this should never be called if has_hash_and_write_version returns false + // this should never be called if has_hash returns false unimplemented!(); } } @@ -142,15 +135,12 @@ impl<'a> StorableAccounts<'a, StoredAccountMeta<'a>> for (Slot, &'a [&'a StoredA fn len(&self) -> usize { self.1.len() } - fn has_hash_and_write_version(&self) -> bool { + fn has_hash(&self) -> bool { true } fn hash(&self, index: usize) -> &AccountHash { self.account(index).hash() } - fn write_version(&self, index: usize) -> u64 { - self.account(index).write_version() - } } /// holds slices of accounts being moved FROM a common source slot to 'target_slot' @@ -237,15 +227,12 @@ impl<'a> StorableAccounts<'a, StoredAccountMeta<'a>> for StorableAccountsBySlot< fn contains_multiple_slots(&self) -> bool { self.contains_multiple_slots } - fn has_hash_and_write_version(&self) -> bool { + fn has_hash(&self) -> bool { true } fn hash(&self, index: usize) -> &AccountHash { self.account(index).hash() } - fn write_version(&self, index: usize) -> u64 { - self.account(index).write_version() - } } /// this tuple contains a single different source slot that applies to all accounts @@ -269,15 +256,12 @@ impl<'a> StorableAccounts<'a, StoredAccountMeta<'a>> fn len(&self) -> usize { self.1.len() } - fn has_hash_and_write_version(&self) -> bool { + fn has_hash(&self) -> bool { true } fn hash(&self, index: usize) -> &AccountHash { self.account(index).hash() } - fn write_version(&self, index: usize) -> u64 { - self.account(index).write_version() - } } #[cfg(test)] @@ -525,7 +509,7 @@ pub mod tests { }) .collect::>(); let storable = StorableAccountsBySlot::new(99, &slots_and_accounts[..]); - assert!(storable.has_hash_and_write_version()); + assert!(storable.has_hash()); assert_eq!(99, storable.target_slot()); assert_eq!(entries0 != entries, storable.contains_multiple_slots()); (0..entries).for_each(|index| { @@ -534,7 +518,6 @@ pub mod tests { assert_eq!(storable.pubkey(index), raw2[index].pubkey()); assert_eq!(storable.hash(index), raw2[index].hash()); assert_eq!(storable.slot(index), expected_slots[index]); - assert_eq!(storable.write_version(index), raw2[index].write_version()); }) } } From 7b204e7d01626c1cd582ac36dbbf8be66a976f2d Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Wed, 3 Apr 2024 11:01:31 -0500 Subject: [PATCH 152/153] ConsumeWorker: collect and report bank waiting times (#477) --- core/src/banking_stage/consume_worker.rs | 35 ++++++++++++++++++++++-- 1 file changed, 33 insertions(+), 2 deletions(-) diff --git a/core/src/banking_stage/consume_worker.rs b/core/src/banking_stage/consume_worker.rs index 12bb4fc1e88ee1..dcc9ecc306845b 100644 --- a/core/src/banking_stage/consume_worker.rs +++ b/core/src/banking_stage/consume_worker.rs @@ -5,6 +5,7 @@ use { scheduler_messages::{ConsumeWork, FinishedConsumeWork}, }, crossbeam_channel::{Receiver, RecvError, SendError, Sender}, + solana_measure::measure_us, solana_poh::leader_bank_notifier::LeaderBankNotifier, solana_runtime::bank::Bank, solana_sdk::timing::AtomicInterval, @@ -65,15 +66,33 @@ impl ConsumeWorker { } fn consume_loop(&self, work: ConsumeWork) -> Result<(), ConsumeWorkerError> { - let Some(mut bank) = self.get_consume_bank() else { + let (maybe_consume_bank, get_bank_us) = measure_us!(self.get_consume_bank()); + let Some(mut bank) = maybe_consume_bank else { + self.metrics + .timing_metrics + .wait_for_bank_failure_us + .fetch_add(get_bank_us, Ordering::Relaxed); return self.retry_drain(work); }; + self.metrics + .timing_metrics + .wait_for_bank_success_us + .fetch_add(get_bank_us, Ordering::Relaxed); for work in try_drain_iter(work, &self.consume_receiver) { if bank.is_complete() { - if let Some(new_bank) = self.get_consume_bank() { + let (maybe_new_bank, get_bank_us) = measure_us!(self.get_consume_bank()); + if let Some(new_bank) = maybe_new_bank { + self.metrics + .timing_metrics + .wait_for_bank_success_us + .fetch_add(get_bank_us, Ordering::Relaxed); bank = new_bank; } else { + self.metrics + .timing_metrics + .wait_for_bank_failure_us + .fetch_add(get_bank_us, Ordering::Relaxed); return self.retry_drain(work); } } @@ -471,6 +490,8 @@ struct ConsumeWorkerTimingMetrics { record_us: AtomicU64, commit_us: AtomicU64, find_and_send_votes_us: AtomicU64, + wait_for_bank_success_us: AtomicU64, + wait_for_bank_failure_us: AtomicU64, } impl ConsumeWorkerTimingMetrics { @@ -510,6 +531,16 @@ impl ConsumeWorkerTimingMetrics { self.find_and_send_votes_us.swap(0, Ordering::Relaxed), i64 ), + ( + "wait_for_bank_success_us", + self.wait_for_bank_success_us.swap(0, Ordering::Relaxed), + i64 + ), + ( + "wait_for_bank_failure_us", + self.wait_for_bank_failure_us.swap(0, Ordering::Relaxed), + i64 + ), ); } } From afa65c6690d9376ff7c3fbd9e369d3b3196b79fd Mon Sep 17 00:00:00 2001 From: Brooks Date: Wed, 3 Apr 2024 12:41:20 -0400 Subject: [PATCH 153/153] Removes write version from StorableAccountsWithHashesAndWriteVersions (#561) --- accounts-db/benches/append_vec.rs | 11 +- accounts-db/benches/bench_accounts_file.rs | 12 +-- accounts-db/src/account_storage/meta.rs | 40 +++---- accounts-db/src/accounts_db.rs | 117 +++++---------------- accounts-db/src/accounts_file.rs | 6 +- accounts-db/src/ancient_append_vecs.rs | 7 -- accounts-db/src/append_vec.rs | 94 ++++------------- accounts-db/src/tiered_storage.rs | 24 +---- accounts-db/src/tiered_storage/hot.rs | 26 ++--- 9 files changed, 88 insertions(+), 249 deletions(-) diff --git a/accounts-db/benches/append_vec.rs b/accounts-db/benches/append_vec.rs index 83517e7ac4338b..44914d4c19112b 100644 --- a/accounts-db/benches/append_vec.rs +++ b/accounts-db/benches/append_vec.rs @@ -4,9 +4,7 @@ extern crate test; use { rand::{thread_rng, Rng}, solana_accounts_db::{ - account_storage::meta::{ - StorableAccountsWithHashesAndWriteVersions, StoredAccountInfo, StoredMeta, - }, + account_storage::meta::{StorableAccountsWithHashes, StoredAccountInfo, StoredMeta}, accounts_hash::AccountHash, append_vec::{ test_utils::{create_test_account, get_append_vec_path}, @@ -39,12 +37,7 @@ fn append_account( let accounts = [(&storage_meta.pubkey, account)]; let slice = &accounts[..]; let accounts = (slot_ignored, slice); - let storable_accounts = - StorableAccountsWithHashesAndWriteVersions::new_with_hashes_and_write_versions( - &accounts, - vec![&hash], - vec![storage_meta.write_version_obsolete], - ); + let storable_accounts = StorableAccountsWithHashes::new_with_hashes(&accounts, vec![&hash]); let res = vec.append_accounts(&storable_accounts, 0); res.and_then(|res| res.first().cloned()) } diff --git a/accounts-db/benches/bench_accounts_file.rs b/accounts-db/benches/bench_accounts_file.rs index 3a05b0139f473e..bde23b2bf1f8ec 100644 --- a/accounts-db/benches/bench_accounts_file.rs +++ b/accounts-db/benches/bench_accounts_file.rs @@ -2,7 +2,7 @@ use { criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion, Throughput}, solana_accounts_db::{ - account_storage::meta::StorableAccountsWithHashesAndWriteVersions, + account_storage::meta::StorableAccountsWithHashes, accounts_hash::AccountHash, append_vec::{self, AppendVec}, tiered_storage::hot::HotStorageWriter, @@ -46,12 +46,10 @@ fn bench_write_accounts_file(c: &mut Criterion) { .collect(); let accounts_refs: Vec<_> = accounts.iter().collect(); let accounts_data = (Slot::MAX, accounts_refs.as_slice()); - let storable_accounts = - StorableAccountsWithHashesAndWriteVersions::new_with_hashes_and_write_versions( - &accounts_data, - vec![AccountHash(Hash::default()); accounts_count], - vec![0; accounts_count], - ); + let storable_accounts = StorableAccountsWithHashes::new_with_hashes( + &accounts_data, + vec![AccountHash(Hash::default()); accounts_count], + ); group.bench_function(BenchmarkId::new("append_vec", accounts_count), |b| { b.iter_batched_ref( diff --git a/accounts-db/src/account_storage/meta.rs b/accounts-db/src/account_storage/meta.rs index 559c6748ee7455..1204f94e683590 100644 --- a/accounts-db/src/account_storage/meta.rs +++ b/accounts-db/src/account_storage/meta.rs @@ -25,8 +25,7 @@ lazy_static! { /// This struct contains what is needed to store accounts to a storage /// 1. account & pubkey (StorableAccounts) /// 2. hash per account (Maybe in StorableAccounts, otherwise has to be passed in separately) -/// 3. write version per account (Maybe in StorableAccounts, otherwise has to be passed in separately) -pub struct StorableAccountsWithHashesAndWriteVersions< +pub struct StorableAccountsWithHashes< 'a: 'b, 'b, T: ReadableAccount + Sync + 'b, @@ -35,10 +34,10 @@ pub struct StorableAccountsWithHashesAndWriteVersions< > { /// accounts to store /// always has pubkey and account - /// may also have hash and write_version per account + /// may also have hash per account pub(crate) accounts: &'b U, - /// if accounts does not have hash and write version, this has a hash and write version per account - hashes_and_write_versions: Option<(Vec, Vec)>, + /// if accounts does not have hash, this has a hash per account + hashes: Option>, _phantom: PhantomData<&'a T>, } @@ -48,45 +47,40 @@ impl< T: ReadableAccount + Sync + 'b, U: StorableAccounts<'a, T>, V: Borrow, - > StorableAccountsWithHashesAndWriteVersions<'a, 'b, T, U, V> + > StorableAccountsWithHashes<'a, 'b, T, U, V> { - /// used when accounts contains hash and write version already + /// used when accounts contains hash already pub fn new(accounts: &'b U) -> Self { assert!(accounts.has_hash()); Self { accounts, - hashes_and_write_versions: None, + hashes: None, _phantom: PhantomData, } } - /// used when accounts does NOT contains hash or write version - /// In this case, hashes and write_versions have to be passed in separately and zipped together. - pub fn new_with_hashes_and_write_versions( - accounts: &'b U, - hashes: Vec, - write_versions: Vec, - ) -> Self { + /// used when accounts does NOT contains hash + /// In this case, hashes have to be passed in separately. + pub fn new_with_hashes(accounts: &'b U, hashes: Vec) -> Self { assert!(!accounts.has_hash()); assert_eq!(accounts.len(), hashes.len()); - assert_eq!(write_versions.len(), hashes.len()); Self { accounts, - hashes_and_write_versions: Some((hashes, write_versions)), + hashes: Some(hashes), _phantom: PhantomData, } } /// get all account fields at 'index' - pub fn get(&self, index: usize) -> (Option<&T>, &Pubkey, &AccountHash, StoredMetaWriteVersion) { + pub fn get(&self, index: usize) -> (Option<&T>, &Pubkey, &AccountHash) { let account = self.accounts.account_default_if_zero_lamport(index); let pubkey = self.accounts.pubkey(index); - let (hash, write_version) = if self.accounts.has_hash() { - (self.accounts.hash(index), StoredMetaWriteVersion::default()) + let hash = if self.accounts.has_hash() { + self.accounts.hash(index) } else { - let item = self.hashes_and_write_versions.as_ref().unwrap(); - (item.0[index].borrow(), item.1[index]) + let item = self.hashes.as_ref().unwrap(); + item[index].borrow() }; - (account, pubkey, hash, write_version) + (account, pubkey, hash) } /// None if account at index has lamports == 0 diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index ca330e03a02388..2049914710f6c7 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -26,7 +26,7 @@ use { crate::{ account_info::{AccountInfo, StorageLocation}, account_storage::{ - meta::{StorableAccountsWithHashesAndWriteVersions, StoredAccountMeta}, + meta::{StorableAccountsWithHashes, StoredAccountMeta}, AccountStorage, AccountStorageStatus, ShrinkInProgress, }, accounts_cache::{AccountsCache, CachedAccount, SlotCache}, @@ -5984,7 +5984,7 @@ impl AccountsDb { &self, slot: Slot, storage: &AccountStorageEntry, - accounts_and_meta_to_store: &StorableAccountsWithHashesAndWriteVersions<'a, 'b, T, U, V>, + accounts_and_meta_to_store: &StorableAccountsWithHashes<'a, 'b, T, U, V>, ) -> Vec { let mut infos: Vec = Vec::with_capacity(accounts_and_meta_to_store.len()); let mut total_append_accounts_us = 0; @@ -6491,21 +6491,14 @@ impl AccountsDb { self.write_accounts_to_storage( slot, storage, - &StorableAccountsWithHashesAndWriteVersions::<'_, '_, _, _, &AccountHash>::new( - accounts, - ), + &StorableAccountsWithHashes::<'_, '_, _, _, &AccountHash>::new(accounts), ) } else { - let write_versions = vec![0; accounts.len()]; match hashes { Some(hashes) => self.write_accounts_to_storage( slot, storage, - &StorableAccountsWithHashesAndWriteVersions::new_with_hashes_and_write_versions( - accounts, - hashes, - write_versions, - ), + &StorableAccountsWithHashes::new_with_hashes(accounts, hashes), ), None => { // hash any accounts where we were lazy in calculating the hash @@ -6513,11 +6506,9 @@ impl AccountsDb { let len = accounts.len(); let mut hashes = Vec::with_capacity(len); for index in 0..accounts.len() { - let (pubkey, account) = (accounts.pubkey(index), accounts.account(index)); - let hash = Self::hash_account( - account, - pubkey, - ); + let (pubkey, account) = + (accounts.pubkey(index), accounts.account(index)); + let hash = Self::hash_account(account, pubkey); hashes.push(hash); } hash_time.stop(); @@ -6526,10 +6517,10 @@ impl AccountsDb { .fetch_add(hash_time.as_us(), Ordering::Relaxed); self.write_accounts_to_storage( - slot, - storage, - &StorableAccountsWithHashesAndWriteVersions::new_with_hashes_and_write_versions(accounts, hashes, write_versions), - ) + slot, + storage, + &StorableAccountsWithHashes::new_with_hashes(accounts, hashes), + ) } } } @@ -9543,7 +9534,7 @@ pub mod tests { super::*, crate::{ account_info::StoredSize, - account_storage::meta::{AccountMeta, StoredMeta, StoredMetaWriteVersion}, + account_storage::meta::{AccountMeta, StoredMeta}, accounts_file::AccountsFileProvider, accounts_hash::MERKLE_FANOUT, accounts_index::{tests::*, AccountSecondaryIndexesIncludeExclude}, @@ -9689,13 +9680,7 @@ pub mod tests { .iter() .map(|_| AccountHash(Hash::default())) .collect::>(); - let write_versions = data.iter().map(|_| 0).collect::>(); - let append = - StorableAccountsWithHashesAndWriteVersions::new_with_hashes_and_write_versions( - &storable, - hashes, - write_versions, - ); + let append = StorableAccountsWithHashes::new_with_hashes(&storable, hashes); // construct append vec with account to generate an index from append_vec.accounts.append_accounts(&append, 0); @@ -10174,13 +10159,8 @@ pub mod tests { let slice = &accounts[..]; let account_data = (slot, slice); let hashes = (0..account_data.len()).map(|_| &hash).collect(); - let write_versions = (0..account_data.len()).map(|_| 0).collect(); let storable_accounts = - StorableAccountsWithHashesAndWriteVersions::new_with_hashes_and_write_versions( - &account_data, - hashes, - write_versions, - ); + StorableAccountsWithHashes::new_with_hashes(&account_data, hashes); copied_storage .accounts .append_accounts(&storable_accounts, 0); @@ -10220,13 +10200,8 @@ pub mod tests { let slice = &accounts[..]; let account_data = (slot, slice); let hashes = (0..account_data.len()).map(|_| &hash).collect(); - let write_versions = (0..account_data.len()).map(|_| 0).collect(); let storable_accounts = - StorableAccountsWithHashesAndWriteVersions::new_with_hashes_and_write_versions( - &account_data, - hashes, - write_versions, - ); + StorableAccountsWithHashes::new_with_hashes(&account_data, hashes); copied_storage .accounts .append_accounts(&storable_accounts, 0); @@ -10631,7 +10606,7 @@ pub mod tests { let pubkey = solana_sdk::pubkey::new_rand(); let acc = AccountSharedData::new(1, 48, AccountSharedData::default().owner()); let mark_alive = false; - append_single_account_with_default_hash(&storage, &pubkey, &acc, 1, mark_alive, None); + append_single_account_with_default_hash(&storage, &pubkey, &acc, mark_alive, None); let calls = Arc::new(AtomicU64::new(0)); let temp_dir = TempDir::new().unwrap(); @@ -10686,7 +10661,6 @@ pub mod tests { storage: &AccountStorageEntry, pubkey: &Pubkey, account: &AccountSharedData, - write_version: StoredMetaWriteVersion, mark_alive: bool, add_to_index: Option<&AccountInfoAccountsIndex>, ) { @@ -10696,11 +10670,7 @@ pub mod tests { let account_data = (slot, slice); let hash = AccountHash(Hash::default()); let storable_accounts = - StorableAccountsWithHashesAndWriteVersions::new_with_hashes_and_write_versions( - &account_data, - vec![&hash], - vec![write_version], - ); + StorableAccountsWithHashes::new_with_hashes(&account_data, vec![&hash]); let stored_accounts_info = storage .accounts .append_accounts(&storable_accounts, 0) @@ -10756,7 +10726,7 @@ pub mod tests { let pubkey = solana_sdk::pubkey::new_rand(); let acc = AccountSharedData::new(1, 48, AccountSharedData::default().owner()); let mark_alive = false; - append_single_account_with_default_hash(&storage, &pubkey, &acc, 1, mark_alive, None); + append_single_account_with_default_hash(&storage, &pubkey, &acc, mark_alive, None); let calls = Arc::new(AtomicU64::new(0)); @@ -10785,7 +10755,6 @@ pub mod tests { fn append_sample_data_to_storage( storage: &Arc, pubkey: &Pubkey, - write_version: StoredMetaWriteVersion, mark_alive: bool, account_data_size: Option, ) { @@ -10794,29 +10763,20 @@ pub mod tests { account_data_size.unwrap_or(48) as usize, AccountSharedData::default().owner(), ); - append_single_account_with_default_hash( - storage, - pubkey, - &acc, - write_version, - mark_alive, - None, - ); + append_single_account_with_default_hash(storage, pubkey, &acc, mark_alive, None); } fn sample_storage_with_entries( tf: &TempFile, - write_version: StoredMetaWriteVersion, slot: Slot, pubkey: &Pubkey, mark_alive: bool, ) -> Arc { - sample_storage_with_entries_id(tf, write_version, slot, pubkey, 0, mark_alive, None) + sample_storage_with_entries_id(tf, slot, pubkey, 0, mark_alive, None) } fn sample_storage_with_entries_id_fill_percentage( tf: &TempFile, - write_version: StoredMetaWriteVersion, slot: Slot, pubkey: &Pubkey, id: AccountsFileId, @@ -10842,13 +10802,12 @@ pub mod tests { data.accounts = av; let arc = Arc::new(data); - append_sample_data_to_storage(&arc, pubkey, write_version, mark_alive, account_data_size); + append_sample_data_to_storage(&arc, pubkey, mark_alive, account_data_size); arc } fn sample_storage_with_entries_id( tf: &TempFile, - write_version: StoredMetaWriteVersion, slot: Slot, pubkey: &Pubkey, id: AccountsFileId, @@ -10857,7 +10816,6 @@ pub mod tests { ) -> Arc { sample_storage_with_entries_id_fill_percentage( tf, - write_version, slot, pubkey, id, @@ -10875,12 +10833,10 @@ pub mod tests { let tf = crate::append_vec::test_utils::get_append_vec_path( "test_accountsdb_scan_account_storage_no_bank", ); - let write_version1 = 0; let pubkey1 = solana_sdk::pubkey::new_rand(); let pubkey2 = solana_sdk::pubkey::new_rand(); let mark_alive = false; - let storage = - sample_storage_with_entries(&tf, write_version1, slot_expected, &pubkey1, mark_alive); + let storage = sample_storage_with_entries(&tf, slot_expected, &pubkey1, mark_alive); let lamports = storage.accounts.account_iter().next().unwrap().lamports(); let calls = Arc::new(AtomicU64::new(0)); let mut scanner = TestScanSimple { @@ -15185,12 +15141,10 @@ pub mod tests { let storage = accounts.create_and_insert_store(slot0, 4_000, "flush_slot_cache"); let hashes = vec![AccountHash(Hash::default()); 1]; - let write_version = vec![0; 1]; storage.accounts.append_accounts( - &StorableAccountsWithHashesAndWriteVersions::new_with_hashes_and_write_versions( + &StorableAccountsWithHashes::new_with_hashes( &(slot0, &[(&shared_key, &account)][..]), hashes, - write_version, ), 0, ); @@ -15250,12 +15204,10 @@ pub mod tests { let slot0 = 0; let storage = accounts.create_and_insert_store(slot0, 4_000, "flush_slot_cache"); let hashes = vec![AccountHash(Hash::default()); 2]; - let write_version = vec![0; 2]; storage.accounts.append_accounts( - &StorableAccountsWithHashesAndWriteVersions::new_with_hashes_and_write_versions( + &StorableAccountsWithHashes::new_with_hashes( &(slot0, &[(&keys[0], &account), (&keys[1], &account_big)][..]), hashes, - write_version, ), 0, ); @@ -16251,11 +16203,9 @@ pub mod tests { let tf = crate::append_vec::test_utils::get_append_vec_path( "test_accountsdb_scan_account_storage_no_bank", ); - let write_version1 = 0; let pubkey1 = solana_sdk::pubkey::new_rand(); let mark_alive = false; - let storage = - sample_storage_with_entries(&tf, write_version1, slot, &pubkey1, mark_alive); + let storage = sample_storage_with_entries(&tf, slot, &pubkey1, mark_alive); let load = AccountsDb::hash_storage_info(&mut hasher, Some(&storage), slot); let hash = hasher.finish(); @@ -16269,13 +16219,7 @@ pub mod tests { // can't assert hash here - it is a function of mod date assert!(load); let mut hasher = hash_map::DefaultHasher::new(); - append_sample_data_to_storage( - &storage, - &solana_sdk::pubkey::new_rand(), - write_version1, - false, - None, - ); + append_sample_data_to_storage(&storage, &solana_sdk::pubkey::new_rand(), false, None); let load = AccountsDb::hash_storage_info(&mut hasher, Some(&storage), slot); let hash3 = hasher.finish(); assert_ne!(hash2, hash3); // moddate and written size changed @@ -17298,7 +17242,6 @@ pub mod tests { }); let tf = tf.unwrap_or_else(|| local_tf.as_ref().unwrap()); - let write_version1 = 0; let starting_id = db .storage .iter() @@ -17310,7 +17253,6 @@ pub mod tests { let pubkey1 = solana_sdk::pubkey::new_rand(); let storage = sample_storage_with_entries_id_fill_percentage( tf, - write_version1, starting_slot + (i as Slot), &pubkey1, id, @@ -17347,7 +17289,6 @@ pub mod tests { }); let tf = tf.unwrap_or_else(|| local_tf.as_ref().unwrap()); - let write_version1 = 0; let starting_id = db .storage .iter() @@ -17359,7 +17300,6 @@ pub mod tests { let pubkey1 = solana_sdk::pubkey::new_rand(); let storage = sample_storage_with_entries_id( tf, - write_version1, starting_slot + (i as Slot), &pubkey1, id, @@ -17513,9 +17453,8 @@ pub mod tests { let tf = crate::append_vec::test_utils::get_append_vec_path( "test_should_move_to_ancient_append_vec", ); - let write_version1 = 0; let pubkey1 = solana_sdk::pubkey::new_rand(); - let storage = sample_storage_with_entries(&tf, write_version1, slot5, &pubkey1, false); + let storage = sample_storage_with_entries(&tf, slot5, &pubkey1, false); let mut current_ancient = CurrentAncientAccountsFile::default(); let should_move = db.should_move_to_ancient_accounts_file( @@ -17659,7 +17598,7 @@ pub mod tests { fn make_ancient_append_vec_full(ancient: &Arc, mark_alive: bool) { for _ in 0..100 { - append_sample_data_to_storage(ancient, &Pubkey::default(), 0, mark_alive, None); + append_sample_data_to_storage(ancient, &Pubkey::default(), mark_alive, None); } // since we're not adding to the index, this is how we specify that all these accounts are alive adjust_alive_bytes(ancient, ancient.capacity() as usize); diff --git a/accounts-db/src/accounts_file.rs b/accounts-db/src/accounts_file.rs index f8a1e5cce80834..8a6458a6b59e9a 100644 --- a/accounts-db/src/accounts_file.rs +++ b/accounts-db/src/accounts_file.rs @@ -1,9 +1,7 @@ use { crate::{ account_info::AccountInfo, - account_storage::meta::{ - StorableAccountsWithHashesAndWriteVersions, StoredAccountInfo, StoredAccountMeta, - }, + account_storage::meta::{StorableAccountsWithHashes, StoredAccountInfo, StoredAccountMeta}, accounts_db::AccountsFileId, accounts_hash::AccountHash, append_vec::{AppendVec, AppendVecError, IndexInfo}, @@ -229,7 +227,7 @@ impl AccountsFile { V: Borrow, >( &self, - accounts: &StorableAccountsWithHashesAndWriteVersions<'a, 'b, T, U, V>, + accounts: &StorableAccountsWithHashes<'a, 'b, T, U, V>, skip: usize, ) -> Option> { match self { diff --git a/accounts-db/src/ancient_append_vecs.rs b/accounts-db/src/ancient_append_vecs.rs index c4df48f0447593..a230ab5be4541e 100644 --- a/accounts-db/src/ancient_append_vecs.rs +++ b/accounts-db/src/ancient_append_vecs.rs @@ -1174,7 +1174,6 @@ pub mod tests { storage, &pk, &account, - 0, true, Some(&db.accounts_index), ); @@ -1282,7 +1281,6 @@ pub mod tests { storage, &pk, &account, - 0, true, Some(&db.accounts_index), ); @@ -1514,12 +1512,10 @@ pub mod tests { storages.iter().for_each(|storage| { let pk = solana_sdk::pubkey::new_rand(); let alive = false; - let write_version = 0; append_single_account_with_default_hash( storage, &pk, &AccountSharedData::default(), - write_version, alive, Some(&db.accounts_index), ); @@ -1663,7 +1659,6 @@ pub mod tests { &storage, &pk_with_1_ref, &account_with_1_ref, - 0, true, Some(&db.accounts_index), ); @@ -1676,7 +1671,6 @@ pub mod tests { &ignored_storage, pk_with_2_refs, &account_with_2_refs.to_account_shared_data(), - 0, true, Some(&db.accounts_index), ); @@ -1843,7 +1837,6 @@ pub mod tests { &storage, &pk_with_1_ref, &account_with_1_ref, - 0, true, Some(&db.accounts_index), ); diff --git a/accounts-db/src/append_vec.rs b/accounts-db/src/append_vec.rs index e19fa137f7ba22..c2cadc5cbf1f39 100644 --- a/accounts-db/src/append_vec.rs +++ b/accounts-db/src/append_vec.rs @@ -7,8 +7,8 @@ use { crate::{ account_storage::meta::{ - AccountMeta, StorableAccountsWithHashesAndWriteVersions, StoredAccountInfo, - StoredAccountMeta, StoredMeta, StoredMetaWriteVersion, + AccountMeta, StorableAccountsWithHashes, StoredAccountInfo, StoredAccountMeta, + StoredMeta, StoredMetaWriteVersion, }, accounts_file::{AccountsFileError, MatchAccountOwnerError, Result, ALIGN_BOUNDARY_OFFSET}, accounts_hash::AccountHash, @@ -719,7 +719,7 @@ impl AppendVec { V: Borrow, >( &self, - accounts: &StorableAccountsWithHashesAndWriteVersions<'a, 'b, T, U, V>, + accounts: &StorableAccountsWithHashes<'a, 'b, T, U, V>, skip: usize, ) -> Option> { let _lock = self.append_lock.lock().unwrap(); @@ -732,7 +732,7 @@ impl AppendVec { let offsets_len = len - skip + 1; let mut offsets = Vec::with_capacity(offsets_len); for i in skip..len { - let (account, pubkey, hash, write_version_obsolete) = accounts.get(i); + let (account, pubkey, hash) = accounts.get(i); let account_meta = account .map(|account| AccountMeta { lamports: account.lamports(), @@ -747,7 +747,7 @@ impl AppendVec { data_len: account .map(|account| account.data().len()) .unwrap_or_default() as u64, - write_version_obsolete, + write_version_obsolete: 0, }; let meta_ptr = &stored_meta as *const StoredMeta; let account_meta_ptr = &account_meta as *const AccountMeta; @@ -822,11 +822,7 @@ pub mod tests { let account_data = (slot_ignored, slice); let hash = AccountHash(Hash::default()); let storable_accounts = - StorableAccountsWithHashesAndWriteVersions::new_with_hashes_and_write_versions( - &account_data, - vec![&hash], - vec![data.0.write_version_obsolete], - ); + StorableAccountsWithHashes::new_with_hashes(&account_data, vec![&hash]); self.append_accounts(&storable_accounts, 0) .map(|res| res[0].offset) @@ -879,85 +875,55 @@ pub mod tests { #[test] #[should_panic(expected = "accounts.has_hash()")] - fn test_storable_accounts_with_hashes_and_write_versions_new() { + fn test_storable_accounts_with_hashes_new() { let account = AccountSharedData::default(); // for (Slot, &'a [(&'a Pubkey, &'a T)]) let slot = 0 as Slot; let pubkey = Pubkey::default(); - StorableAccountsWithHashesAndWriteVersions::<'_, '_, _, _, &AccountHash>::new(&( + StorableAccountsWithHashes::<'_, '_, _, _, &AccountHash>::new(&( slot, &[(&pubkey, &account)][..], )); } - fn test_mismatch(correct_hashes: bool, correct_write_versions: bool) { + fn test_mismatch(correct_hashes: bool) { let account = AccountSharedData::default(); // for (Slot, &'a [(&'a Pubkey, &'a T)]) let slot = 0 as Slot; let pubkey = Pubkey::default(); - // mismatch between lens of accounts, hashes, write_versions + // mismatch between lens of accounts and hashes let mut hashes = Vec::default(); if correct_hashes { hashes.push(AccountHash(Hash::default())); } - let mut write_versions = Vec::default(); - if correct_write_versions { - write_versions.push(0); - } - StorableAccountsWithHashesAndWriteVersions::new_with_hashes_and_write_versions( - &(slot, &[(&pubkey, &account)][..]), - hashes, - write_versions, - ); - } - - #[test] - // rust 1.73+ (our as-of-writing nightly version) changed panic message. we're stuck with this - // short common substring until the monorepo is fully 1.73+ including stable. - #[should_panic(expected = "left == right")] - fn test_storable_accounts_with_hashes_and_write_versions_new2() { - test_mismatch(false, false); + StorableAccountsWithHashes::new_with_hashes(&(slot, &[(&pubkey, &account)][..]), hashes); } #[test] // rust 1.73+ (our as-of-writing nightly version) changed panic message. we're stuck with this // short common substring until the monorepo is fully 1.73+ including stable. #[should_panic(expected = "left == right")] - fn test_storable_accounts_with_hashes_and_write_versions_new3() { - test_mismatch(false, true); + fn test_storable_accounts_with_hashes_new2() { + test_mismatch(false); } #[test] - // rust 1.73+ (our as-of-writing nightly version) changed panic message. we're stuck with this - // short common substring until the monorepo is fully 1.73+ including stable. - #[should_panic(expected = "left == right")] - fn test_storable_accounts_with_hashes_and_write_versions_new4() { - test_mismatch(true, false); - } - - #[test] - fn test_storable_accounts_with_hashes_and_write_versions_empty() { + fn test_storable_accounts_with_hashes_empty() { // for (Slot, &'a [(&'a Pubkey, &'a T)]) let account = AccountSharedData::default(); let slot = 0 as Slot; let pubkeys = [Pubkey::default()]; let hashes = Vec::::default(); - let write_versions = Vec::default(); let mut accounts = vec![(&pubkeys[0], &account)]; accounts.clear(); let accounts2 = (slot, &accounts[..]); - let storable = - StorableAccountsWithHashesAndWriteVersions::new_with_hashes_and_write_versions( - &accounts2, - hashes, - write_versions, - ); + let storable = StorableAccountsWithHashes::new_with_hashes(&accounts2, hashes); assert_eq!(storable.len(), 0); assert!(storable.is_empty()); } #[test] - fn test_storable_accounts_with_hashes_and_write_versions_hash_and_write_version() { + fn test_storable_accounts_with_hashes_hash() { // for (Slot, &'a [(&'a Pubkey, &'a T)]) let account = AccountSharedData::default(); let slot = 0 as Slot; @@ -966,27 +932,20 @@ pub mod tests { AccountHash(Hash::new(&[3; 32])), AccountHash(Hash::new(&[4; 32])), ]; - let write_versions = vec![42, 43]; let accounts = [(&pubkeys[0], &account), (&pubkeys[1], &account)]; let accounts2 = (slot, &accounts[..]); - let storable = - StorableAccountsWithHashesAndWriteVersions::new_with_hashes_and_write_versions( - &accounts2, - hashes.clone(), - write_versions.clone(), - ); + let storable = StorableAccountsWithHashes::new_with_hashes(&accounts2, hashes.clone()); assert_eq!(storable.len(), pubkeys.len()); assert!(!storable.is_empty()); (0..2).for_each(|i| { - let (_, pubkey, hash, write_version) = storable.get(i); + let (_, pubkey, hash) = storable.get(i); assert_eq!(hash, &hashes[i]); - assert_eq!(write_version, write_versions[i]); assert_eq!(pubkey, &pubkeys[i]); }); } #[test] - fn test_storable_accounts_with_hashes_and_write_versions_default() { + fn test_storable_accounts_with_hashes_default() { // 0 lamport account, should return default account (or None in this case) let account = Account { data: vec![0], @@ -997,15 +956,9 @@ pub mod tests { let slot = 0 as Slot; let pubkey = Pubkey::default(); let hashes = vec![AccountHash(Hash::default())]; - let write_versions = vec![0]; let accounts = [(&pubkey, &account)]; let accounts2 = (slot, &accounts[..]); - let storable = - StorableAccountsWithHashesAndWriteVersions::new_with_hashes_and_write_versions( - &accounts2, - hashes.clone(), - write_versions.clone(), - ); + let storable = StorableAccountsWithHashes::new_with_hashes(&accounts2, hashes.clone()); let get_account = storable.account(0); assert!(get_account.is_none()); @@ -1019,12 +972,7 @@ pub mod tests { // for (Slot, &'a [(&'a Pubkey, &'a T)]) let accounts = [(&pubkey, &account)]; let accounts2 = (slot, &accounts[..]); - let storable = - StorableAccountsWithHashesAndWriteVersions::new_with_hashes_and_write_versions( - &accounts2, - hashes, - write_versions, - ); + let storable = StorableAccountsWithHashes::new_with_hashes(&accounts2, hashes); let get_account = storable.account(0); assert!(accounts_equal(&account, get_account.unwrap())); } diff --git a/accounts-db/src/tiered_storage.rs b/accounts-db/src/tiered_storage.rs index 7b8b26fce64fa9..93c0ebd520e778 100644 --- a/accounts-db/src/tiered_storage.rs +++ b/accounts-db/src/tiered_storage.rs @@ -14,7 +14,7 @@ mod test_utils; use { crate::{ - account_storage::meta::{StorableAccountsWithHashesAndWriteVersions, StoredAccountInfo}, + account_storage::meta::{StorableAccountsWithHashes, StoredAccountInfo}, accounts_hash::AccountHash, storable_accounts::StorableAccounts, }, @@ -119,7 +119,7 @@ impl TieredStorage { V: Borrow, >( &self, - accounts: &StorableAccountsWithHashesAndWriteVersions<'a, 'b, T, U, V>, + accounts: &StorableAccountsWithHashes<'a, 'b, T, U, V>, skip: usize, format: &TieredStorageFormat, ) -> TieredStorageResult> { @@ -180,7 +180,6 @@ impl TieredStorage { mod tests { use { super::*, - crate::account_storage::meta::StoredMetaWriteVersion, file::TieredStorageMagicNumber, footer::TieredStorageFooter, hot::HOT_FORMAT, @@ -213,11 +212,7 @@ mod tests { let account_refs = Vec::<(&Pubkey, &AccountSharedData)>::new(); let account_data = (slot_ignored, account_refs.as_slice()); let storable_accounts = - StorableAccountsWithHashesAndWriteVersions::new_with_hashes_and_write_versions( - &account_data, - Vec::::new(), - Vec::::new(), - ); + StorableAccountsWithHashes::new_with_hashes(&account_data, Vec::::new()); let result = tiered_storage.write_accounts(&storable_accounts, 0, &HOT_FORMAT); @@ -350,17 +345,8 @@ mod tests { let hashes: Vec<_> = std::iter::repeat_with(|| AccountHash(Hash::new_unique())) .take(account_data_sizes.len()) .collect(); - let write_versions: Vec<_> = accounts - .iter() - .map(|account| account.0.write_version_obsolete) - .collect(); - let storable_accounts = - StorableAccountsWithHashesAndWriteVersions::new_with_hashes_and_write_versions( - &account_data, - hashes, - write_versions, - ); + let storable_accounts = StorableAccountsWithHashes::new_with_hashes(&account_data, hashes); let temp_dir = tempdir().unwrap(); let tiered_storage_path = temp_dir.path().join(path_suffix); @@ -373,7 +359,7 @@ mod tests { let mut expected_accounts_map = HashMap::new(); for i in 0..num_accounts { - let (account, address, _account_hash, _write_version) = storable_accounts.get(i); + let (account, address, _account_hash) = storable_accounts.get(i); expected_accounts_map.insert(address, account); } diff --git a/accounts-db/src/tiered_storage/hot.rs b/accounts-db/src/tiered_storage/hot.rs index d74c069f6a0033..960c26f92168f7 100644 --- a/accounts-db/src/tiered_storage/hot.rs +++ b/accounts-db/src/tiered_storage/hot.rs @@ -15,8 +15,8 @@ use { }, mmap_utils::{get_pod, get_slice}, owners::{OwnerOffset, OwnersBlockFormat, OwnersTable, OWNER_NO_OWNER}, - StorableAccounts, StorableAccountsWithHashesAndWriteVersions, TieredStorageError, - TieredStorageFormat, TieredStorageResult, + StorableAccounts, StorableAccountsWithHashes, TieredStorageError, TieredStorageFormat, + TieredStorageResult, }, }, bytemuck::{Pod, Zeroable}, @@ -634,7 +634,7 @@ impl HotStorageWriter { V: Borrow, >( &mut self, - accounts: &StorableAccountsWithHashesAndWriteVersions<'a, 'b, T, U, V>, + accounts: &StorableAccountsWithHashes<'a, 'b, T, U, V>, skip: usize, ) -> TieredStorageResult> { let mut footer = new_hot_footer(); @@ -648,7 +648,7 @@ impl HotStorageWriter { let total_input_accounts = len - skip; let mut stored_infos = Vec::with_capacity(total_input_accounts); for i in skip..len { - let (account, address, _account_hash, _write_version) = accounts.get(i); + let (account, address, _account_hash) = accounts.get(i); let index_entry = AccountIndexWriterEntry { address, offset: HotAccountOffset::new(cursor)?, @@ -1372,17 +1372,8 @@ pub mod tests { .take(account_data_sizes.len()) .collect(); - let write_versions: Vec<_> = accounts - .iter() - .map(|account| account.0.write_version_obsolete) - .collect(); - let storable_accounts = - StorableAccountsWithHashesAndWriteVersions::new_with_hashes_and_write_versions( - &account_data, - hashes.clone(), - write_versions.clone(), - ); + StorableAccountsWithHashes::new_with_hashes(&account_data, hashes.clone()); let temp_dir = TempDir::new().unwrap(); let path = temp_dir.path().join("test_write_account_and_index_blocks"); @@ -1401,7 +1392,7 @@ pub mod tests { .unwrap() .unwrap(); - let (account, address, _account_hash, _write_version) = storable_accounts.get(i); + let (account, address, _account_hash) = storable_accounts.get(i); verify_test_account(&stored_meta, account, address); assert_eq!(i + 1, next.0 as usize); @@ -1419,8 +1410,7 @@ pub mod tests { .unwrap() .unwrap(); - let (account, address, _account_hash, _write_version) = - storable_accounts.get(stored_info.offset); + let (account, address, _account_hash) = storable_accounts.get(stored_info.offset); verify_test_account(&stored_meta, account, address); } @@ -1429,7 +1419,7 @@ pub mod tests { // first, we verify everything for (i, stored_meta) in accounts.iter().enumerate() { - let (account, address, _account_hash, _write_version) = storable_accounts.get(i); + let (account, address, _account_hash) = storable_accounts.get(i); verify_test_account(stored_meta, account, address); }