From 56aa2d4c1b179acaaffd62420c4d9d5c92818280 Mon Sep 17 00:00:00 2001 From: marc Date: Mon, 21 Jul 2025 08:52:49 +0200 Subject: [PATCH 01/68] port helper functions --- net-utils/src/sockets.rs | 33 +++++++++++++++++++++++++++------ 1 file changed, 27 insertions(+), 6 deletions(-) diff --git a/net-utils/src/sockets.rs b/net-utils/src/sockets.rs index 20017a6f38a733..5829f231cea771 100644 --- a/net-utils/src/sockets.rs +++ b/net-utils/src/sockets.rs @@ -5,6 +5,7 @@ use { std::{ io, net::{IpAddr, SocketAddr, TcpListener, UdpSocket}, + ops::Range, sync::atomic::{AtomicU16, Ordering}, }, }; @@ -15,8 +16,6 @@ const BASE_PORT: u16 = 5000; // how much to allocate per individual process. // we expect to have at most 64 concurrent tests in CI at any moment on a given host. const SLICE_PER_PROCESS: u16 = (u16::MAX - BASE_PORT) / 64; -/// Retrieve a free 20-port slice for unit tests -/// /// When running under nextest, this will try to provide /// a unique slice of port numbers (assuming no other nextest processes /// are running on the same host) based on NEXTEST_TEST_GLOBAL_SLOT variable @@ -25,9 +24,9 @@ const SLICE_PER_PROCESS: u16 = (u16::MAX - BASE_PORT) / 64; /// When running without nextest, this will only bump an atomic and eventually /// panic when it runs out of port numbers to assign. #[allow(clippy::arithmetic_side_effects)] -pub fn localhost_port_range_for_tests() -> (u16, u16) { +pub fn unique_port_range_for_tests(size: u16) -> Range { static SLICE: AtomicU16 = AtomicU16::new(0); - let offset = SLICE.fetch_add(20, Ordering::Relaxed); + let offset = SLICE.fetch_add(size, Ordering::Relaxed); let start = offset + match std::env::var("NEXTEST_TEST_GLOBAL_SLOT") { Ok(slot) => { @@ -40,8 +39,30 @@ pub fn localhost_port_range_for_tests() -> (u16, u16) { } Err(_) => BASE_PORT, }; - assert!(start < u16::MAX - 20, "ran out of port numbers!"); - (start, start + 20) + assert!(start < u16::MAX - size, "Ran out of port numbers!"); + start..start + size +} + +/// Retrieve a free 20-port slice for unit tests +/// +/// When running under nextest, this will try to provide +/// a unique slice of port numbers (assuming no other nextest processes +/// are running on the same host) based on NEXTEST_TEST_GLOBAL_SLOT variable +/// The port ranges will be reused following nextest logic. +/// +/// When running without nextest, this will only bump an atomic and eventually +/// panic when it runs out of port numbers to assign. +pub fn localhost_port_range_for_tests() -> (u16, u16) { + let pr = unique_port_range_for_tests(20); + (pr.start, pr.end) +} + +/// Bind a `UdpSocket` to a unique port. +pub fn bind_to_localhost_unique() -> io::Result { + bind_to( + IpAddr::V4(Ipv4Addr::LOCALHOST), + unique_port_range_for_tests(1).start, + ) } pub fn bind_gossip_port_in_range( From 8d86cec42f88d0b0dee5456d439597d5080de16b Mon Sep 17 00:00:00 2001 From: marc Date: Mon, 21 Jul 2025 09:04:19 +0200 Subject: [PATCH 02/68] move Ipv4Addr import to global scope --- net-utils/src/sockets.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/net-utils/src/sockets.rs b/net-utils/src/sockets.rs index 5829f231cea771..aa07bad4e9a70d 100644 --- a/net-utils/src/sockets.rs +++ b/net-utils/src/sockets.rs @@ -1,16 +1,16 @@ +#[cfg(feature = "dev-context-only-utils")] +use tokio::net::UdpSocket as TokioUdpSocket; use { crate::PortRange, log::warn, socket2::{Domain, SockAddr, Socket, Type}, std::{ io, - net::{IpAddr, SocketAddr, TcpListener, UdpSocket}, + net::{IpAddr, Ipv4Addr, SocketAddr, TcpListener, UdpSocket}, ops::Range, sync::atomic::{AtomicU16, Ordering}, }, }; -#[cfg(feature = "dev-context-only-utils")] -use {std::net::Ipv4Addr, tokio::net::UdpSocket as TokioUdpSocket}; // base port for deconflicted allocations const BASE_PORT: u16 = 5000; // how much to allocate per individual process. From a9477ae072338f8b11734beb805a917d6bea5c2f Mon Sep 17 00:00:00 2001 From: marc Date: Wed, 23 Jul 2025 12:21:26 +0200 Subject: [PATCH 03/68] core, repair service: swap old bind_to, to the new bind_to_localhost_unique --- core/src/repair/repair_service.rs | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/core/src/repair/repair_service.rs b/core/src/repair/repair_service.rs index 65e71e7d3d893a..754c42a6cd1cb8 100644 --- a/core/src/repair/repair_service.rs +++ b/core/src/repair/repair_service.rs @@ -1277,7 +1277,7 @@ mod test { }, solana_net_utils::{ bind_to_unspecified, - sockets::{bind_to, localhost_port_range_for_tests}, + sockets::bind_to_localhost_unique, }, solana_runtime::bank::Bank, solana_signer::Signer, @@ -1285,7 +1285,6 @@ mod test { solana_time_utils::timestamp, std::{ collections::HashSet, - net::{IpAddr, Ipv4Addr}, }, }; @@ -1302,10 +1301,9 @@ mod test { let pubkey = cluster_info.id(); let slot = 100; let shred_index = 50; - let port_range = localhost_port_range_for_tests(); - let reader = bind_to(IpAddr::V4(Ipv4Addr::LOCALHOST), port_range.0).expect("should bind"); + let reader = bind_to_localhost_unique().expect("should bind"); let address = reader.local_addr().unwrap(); - let sender = bind_to(IpAddr::V4(Ipv4Addr::LOCALHOST), port_range.1).expect("should bind"); + let sender = bind_to_localhost_unique().expect("should bind"); let outstanding_repair_requests = Arc::new(RwLock::new(OutstandingShredRepairs::default())); // Send a repair request From 65aca805a9bdc66afae74aced67b57ecf5bc3d78 Mon Sep 17 00:00:00 2001 From: marc Date: Wed, 23 Jul 2025 13:20:51 +0200 Subject: [PATCH 04/68] core, repair service: swap old bind_to_unspecified, to the new bind_to_localhost_unique --- core/src/repair/repair_service.rs | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/core/src/repair/repair_service.rs b/core/src/repair/repair_service.rs index 754c42a6cd1cb8..5bfef19cc68127 100644 --- a/core/src/repair/repair_service.rs +++ b/core/src/repair/repair_service.rs @@ -1275,10 +1275,7 @@ mod test { get_tmp_ledger_path_auto_delete, shred::max_ticks_per_n_shreds, }, - solana_net_utils::{ - bind_to_unspecified, - sockets::bind_to_localhost_unique, - }, + solana_net_utils::sockets::bind_to_localhost_unique, solana_runtime::bank::Bank, solana_signer::Signer, solana_streamer::socket::SocketAddrSpace, @@ -1659,7 +1656,7 @@ mod test { }; let mut duplicate_slot_repair_statuses = HashMap::new(); let dead_slot = 9; - let receive_socket = &bind_to_unspecified().unwrap(); + let receive_socket = &bind_to_localhost_unique().expect("should bind - receive socket"); let duplicate_status = DuplicateSlotRepairStatus { correct_ancestor_to_repair: (dead_slot, Hash::default()), start_ts: u64::MAX, @@ -1688,7 +1685,7 @@ mod test { &blockstore, &serve_repair, &mut RepairStats::default(), - &bind_to_unspecified().unwrap(), + &bind_to_localhost_unique().expect("should bind - repair socket"), &None, &RwLock::new(OutstandingRequests::default()), &identity_keypair, @@ -1714,7 +1711,7 @@ mod test { &blockstore, &serve_repair, &mut RepairStats::default(), - &bind_to_unspecified().unwrap(), + &bind_to_localhost_unique().expect("should bind - repair socket"), &None, &RwLock::new(OutstandingRequests::default()), &identity_keypair, @@ -1733,7 +1730,7 @@ mod test { &blockstore, &serve_repair, &mut RepairStats::default(), - &bind_to_unspecified().unwrap(), + &bind_to_localhost_unique().expect("should bind - repair socket"), &None, &RwLock::new(OutstandingRequests::default()), &identity_keypair, @@ -1748,7 +1745,7 @@ mod test { let bank_forks = BankForks::new_rw_arc(bank); let dummy_addr = Some(( Pubkey::default(), - bind_to_unspecified().unwrap().local_addr().unwrap(), + bind_to_localhost_unique().expect("should bind - dummy socket").local_addr().unwrap(), )); let cluster_info = Arc::new(new_test_cluster_info()); let ledger_path = get_tmp_ledger_path_auto_delete!(); From 4eea2346ab63922b5106f75f18bea7fc872e4e64 Mon Sep 17 00:00:00 2001 From: marc Date: Wed, 23 Jul 2025 13:37:22 +0200 Subject: [PATCH 05/68] core, repair: migrate ancestor hashes service to new binding for tests --- core/src/repair/ancestor_hashes_service.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/src/repair/ancestor_hashes_service.rs b/core/src/repair/ancestor_hashes_service.rs index 32106a113efca7..50513acc49fd79 100644 --- a/core/src/repair/ancestor_hashes_service.rs +++ b/core/src/repair/ancestor_hashes_service.rs @@ -923,7 +923,7 @@ mod test { blockstore::make_many_slot_entries, get_tmp_ledger_path, get_tmp_ledger_path_auto_delete, shred::Nonce, }, - solana_net_utils::bind_to_unspecified, + solana_net_utils::sockets::bind_to_localhost_unique, solana_perf::packet::Packet, solana_runtime::bank_forks::BankForks, solana_signer::Signer, @@ -1357,7 +1357,7 @@ mod test { impl ManageAncestorHashesState { fn new(bank_forks: Arc>) -> Self { let ancestor_hashes_request_statuses = Arc::new(DashMap::new()); - let ancestor_hashes_request_socket = Arc::new(bind_to_unspecified().unwrap()); + let ancestor_hashes_request_socket = Arc::new(bind_to_localhost_unique().expect("should bind")); let epoch_schedule = bank_forks .read() .unwrap() From 64e63c07c34269b26657d8f2cb8ab10eb38c83f9 Mon Sep 17 00:00:00 2001 From: Steven Luscher Date: Fri, 18 Jul 2025 11:55:53 -0700 Subject: [PATCH 06/68] Subscription requests now take priority over notifications (#7031) --- CHANGELOG.md | 5 +++++ rpc/src/rpc_pubsub_service.rs | 6 ++++++ 2 files changed, 11 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 38c46f29232578..cffe0b8056b29f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,11 @@ Release channels have their own copy of this changelog: ## 3.0.0 - Unreleased +### RPC + +#### Changes +* The subscription server now prioritizes processing received messages before sending out responses. This ensures that new subscription requests and time-sensitive messages like `PING` opcodes take priority over notifications. + ### Validator #### Breaking diff --git a/rpc/src/rpc_pubsub_service.rs b/rpc/src/rpc_pubsub_service.rs index 0cb602982e707c..8187741f442ebc 100644 --- a/rpc/src/rpc_pubsub_service.rs +++ b/rpc/src/rpc_pubsub_service.rs @@ -401,6 +401,12 @@ async fn handle_connection( pin!(receive_future); loop { select! { + biased; // See [prioritization] note below. + + // [prioritization] + // This block must come FIRST in the `select!` macro. This prioritizes + // processing received messages over sending messages. This ensures the timely + // processing of new subscriptions and time-sensitive opcodes like `PING`. result = &mut receive_future => match result { Ok(_) => break, Err(soketto::connection::Error::Closed) => return Ok(()), From dad44681ebc2f5f08ee51f0713063c0e23220abe Mon Sep 17 00:00:00 2001 From: Tao Zhu <82401714+tao-stones@users.noreply.github.com> Date: Fri, 18 Jul 2025 14:50:34 -0500 Subject: [PATCH 07/68] block-prioritization-fee metrics update (#6967) * clean up naming * accumulates and reports included transaction fees * only collect total prioritization fee --- runtime/src/prioritization_fee.rs | 107 +++++++++++++++--------- runtime/src/prioritization_fee_cache.rs | 46 ++++++++-- 2 files changed, 103 insertions(+), 50 deletions(-) diff --git a/runtime/src/prioritization_fee.rs b/runtime/src/prioritization_fee.rs index 9befdd31a61b2b..9e05c7c80f72be 100644 --- a/runtime/src/prioritization_fee.rs +++ b/runtime/src/prioritization_fee.rs @@ -23,14 +23,14 @@ struct PrioritizationFeeMetrics { // Count of attempted update on finalized PrioritizationFee attempted_update_on_finalized_fee_count: Saturating, - // Total prioritization fees included in this slot. + // Total transaction fees of non-vote transactions included in this slot. total_prioritization_fee: Saturating, - // The minimum prioritization fee of prioritized transactions in this slot. - min_prioritization_fee: Option, + // The minimum compute unit price of prioritized transactions in this slot. + min_compute_unit_price: Option, - // The maximum prioritization fee of prioritized transactions in this slot. - max_prioritization_fee: u64, + // The maximum compute unit price of prioritized transactions in this slot. + max_compute_unit_price: u64, // Accumulated time spent on tracking prioritization fee for each slot. total_update_elapsed_us: Saturating, @@ -49,8 +49,8 @@ impl PrioritizationFeeMetrics { self.attempted_update_on_finalized_fee_count += val; } - fn update_prioritization_fee(&mut self, fee: u64) { - if fee == 0 { + fn update_compute_unit_price(&mut self, cu_price: u64) { + if cu_price == 0 { self.non_prioritized_transactions_count += 1; return; } @@ -58,11 +58,11 @@ impl PrioritizationFeeMetrics { // update prioritized transaction fee metrics. self.prioritized_transactions_count += 1; - self.max_prioritization_fee = self.max_prioritization_fee.max(fee); + self.max_compute_unit_price = self.max_compute_unit_price.max(cu_price); - self.min_prioritization_fee = Some( - self.min_prioritization_fee - .map_or(fee, |min_fee| min_fee.min(fee)), + self.min_compute_unit_price = Some( + self.min_compute_unit_price + .map_or(cu_price, |min_cu_price| min_cu_price.min(cu_price)), ); } @@ -75,8 +75,8 @@ impl PrioritizationFeeMetrics { attempted_update_on_finalized_fee_count: Saturating(attempted_update_on_finalized_fee_count), total_prioritization_fee: Saturating(total_prioritization_fee), - min_prioritization_fee, - max_prioritization_fee, + min_compute_unit_price, + max_compute_unit_price, total_update_elapsed_us: Saturating(total_update_elapsed_us), } = self; datapoint_info!( @@ -113,11 +113,11 @@ impl PrioritizationFeeMetrics { i64 ), ( - "min_prioritization_fee", - min_prioritization_fee.unwrap_or(0) as i64, + "min_compute_unit_price", + min_compute_unit_price.unwrap_or(0) as i64, i64 ), - ("max_prioritization_fee", max_prioritization_fee as i64, i64), + ("max_compute_unit_price", max_compute_unit_price as i64, i64), ( "total_update_elapsed_us", total_update_elapsed_us as i64, @@ -144,11 +144,11 @@ pub enum PrioritizationFeeError { /// Block minimum prioritization fee stats, includes the minimum prioritization fee for a transaction in this /// block; and the minimum fee for each writable account in all transactions in this block. The only relevant /// write account minimum fees are those greater than the block minimum transaction fee, because the minimum fee needed to land -/// a transaction is determined by Max( min_transaction_fee, min_writable_account_fees(key), ...) +/// a transaction is determined by Max( min_compute_unit_price, min_writable_account_fees(key), ...) #[derive(Debug)] pub struct PrioritizationFee { // The minimum prioritization fee of transactions that landed in this block. - min_transaction_fee: u64, + min_compute_unit_price: u64, // The minimum prioritization fee of each writable account in transactions in this block. min_writable_account_fees: HashMap, @@ -164,7 +164,7 @@ pub struct PrioritizationFee { impl Default for PrioritizationFee { fn default() -> Self { PrioritizationFee { - min_transaction_fee: u64::MAX, + min_compute_unit_price: u64::MAX, min_writable_account_fees: HashMap::new(), is_finalized: false, metrics: PrioritizationFeeMetrics::default(), @@ -174,25 +174,30 @@ impl Default for PrioritizationFee { impl PrioritizationFee { /// Update self for minimum transaction fee in the block and minimum fee for each writable account. - pub fn update(&mut self, transaction_fee: u64, writable_accounts: Vec) { + pub fn update( + &mut self, + compute_unit_price: u64, + prioritization_fee: u64, + writable_accounts: Vec, + ) { let (_, update_us) = measure_us!({ if !self.is_finalized { - if transaction_fee < self.min_transaction_fee { - self.min_transaction_fee = transaction_fee; + if compute_unit_price < self.min_compute_unit_price { + self.min_compute_unit_price = compute_unit_price; } for write_account in writable_accounts { self.min_writable_account_fees .entry(write_account) .and_modify(|write_lock_fee| { - *write_lock_fee = std::cmp::min(*write_lock_fee, transaction_fee) + *write_lock_fee = std::cmp::min(*write_lock_fee, compute_unit_price) }) - .or_insert(transaction_fee); + .or_insert(compute_unit_price); } self.metrics - .accumulate_total_prioritization_fee(transaction_fee); - self.metrics.update_prioritization_fee(transaction_fee); + .accumulate_total_prioritization_fee(prioritization_fee); + self.metrics.update_compute_unit_price(compute_unit_price); } else { self.metrics .increment_attempted_update_on_finalized_fee_count(1); @@ -207,7 +212,7 @@ impl PrioritizationFee { fn prune_irrelevant_writable_accounts(&mut self) { self.metrics.total_writable_accounts_count = self.get_writable_accounts_count() as u64; self.min_writable_account_fees - .retain(|_, account_fee| account_fee > &mut self.min_transaction_fee); + .retain(|_, account_fee| account_fee > &mut self.min_compute_unit_price); self.metrics.relevant_writable_accounts_count = self.get_writable_accounts_count() as u64; } @@ -220,8 +225,8 @@ impl PrioritizationFee { Ok(()) } - pub fn get_min_transaction_fee(&self) -> Option { - (self.min_transaction_fee != u64::MAX).then_some(self.min_transaction_fee) + pub fn get_min_compute_unit_price(&self) -> Option { + (self.min_compute_unit_price != u64::MAX).then_some(self.min_compute_unit_price) } pub fn get_writable_account_fee(&self, key: &Pubkey) -> Option { @@ -250,22 +255,23 @@ mod tests { use {super::*, solana_pubkey::Pubkey}; #[test] - fn test_update_prioritization_fee() { + fn test_update_compute_unit_price() { solana_logger::setup(); let write_account_a = Pubkey::new_unique(); let write_account_b = Pubkey::new_unique(); let write_account_c = Pubkey::new_unique(); + let tx_fee = 10; let mut prioritization_fee = PrioritizationFee::default(); - assert!(prioritization_fee.get_min_transaction_fee().is_none()); + assert!(prioritization_fee.get_min_compute_unit_price().is_none()); // Assert for 1st transaction - // [fee, write_accounts...] --> [block, account_a, account_b, account_c] + // [cu_px, write_accounts...] --> [block, account_a, account_b, account_c] // ----------------------------------------------------------------------- // [5, a, b ] --> [5, 5, 5, nil ] { - prioritization_fee.update(5, vec![write_account_a, write_account_b]); - assert_eq!(5, prioritization_fee.get_min_transaction_fee().unwrap()); + prioritization_fee.update(5, tx_fee, vec![write_account_a, write_account_b]); + assert_eq!(5, prioritization_fee.get_min_compute_unit_price().unwrap()); assert_eq!( 5, prioritization_fee @@ -284,12 +290,12 @@ mod tests { } // Assert for second transaction: - // [fee, write_accounts...] --> [block, account_a, account_b, account_c] + // [cu_px, write_accounts...] --> [block, account_a, account_b, account_c] // ----------------------------------------------------------------------- // [9, b, c ] --> [5, 5, 5, 9 ] { - prioritization_fee.update(9, vec![write_account_b, write_account_c]); - assert_eq!(5, prioritization_fee.get_min_transaction_fee().unwrap()); + prioritization_fee.update(9, tx_fee, vec![write_account_b, write_account_c]); + assert_eq!(5, prioritization_fee.get_min_compute_unit_price().unwrap()); assert_eq!( 5, prioritization_fee @@ -311,12 +317,12 @@ mod tests { } // Assert for third transaction: - // [fee, write_accounts...] --> [block, account_a, account_b, account_c] + // [cu_px, write_accounts...] --> [block, account_a, account_b, account_c] // ----------------------------------------------------------------------- // [2, a, c ] --> [2, 2, 5, 2 ] { - prioritization_fee.update(2, vec![write_account_a, write_account_c]); - assert_eq!(2, prioritization_fee.get_min_transaction_fee().unwrap()); + prioritization_fee.update(2, tx_fee, vec![write_account_a, write_account_c]); + assert_eq!(2, prioritization_fee.get_min_compute_unit_price().unwrap()); assert_eq!( 2, prioritization_fee @@ -341,7 +347,7 @@ mod tests { { prioritization_fee.prune_irrelevant_writable_accounts(); assert_eq!(1, prioritization_fee.min_writable_account_fees.len()); - assert_eq!(2, prioritization_fee.get_min_transaction_fee().unwrap()); + assert_eq!(2, prioritization_fee.get_min_compute_unit_price().unwrap()); assert!(prioritization_fee .get_writable_account_fee(&write_account_a) .is_none()); @@ -357,6 +363,25 @@ mod tests { } } + #[test] + fn test_total_prioritization_fee() { + let mut prioritization_fee = PrioritizationFee::default(); + prioritization_fee.update(0, 10, vec![]); + assert_eq!(10, prioritization_fee.metrics.total_prioritization_fee.0); + + prioritization_fee.update(10, u64::MAX, vec![]); + assert_eq!( + u64::MAX, + prioritization_fee.metrics.total_prioritization_fee.0 + ); + + prioritization_fee.update(10, 100, vec![]); + assert_eq!( + u64::MAX, + prioritization_fee.metrics.total_prioritization_fee.0 + ); + } + #[test] fn test_mark_block_completed() { let mut prioritization_fee = PrioritizationFee::default(); diff --git a/runtime/src/prioritization_fee_cache.rs b/runtime/src/prioritization_fee_cache.rs index 9631ef86e4fb6a..6d63e040a5c122 100644 --- a/runtime/src/prioritization_fee_cache.rs +++ b/runtime/src/prioritization_fee_cache.rs @@ -1,5 +1,5 @@ use { - crate::{bank::Bank, prioritization_fee::*}, + crate::{bank::Bank, prioritization_fee::PrioritizationFee}, crossbeam_channel::{unbounded, Receiver, Sender, TryRecvError}, log::*, solana_accounts_db::account_locks::validate_account_locks, @@ -47,6 +47,9 @@ struct PrioritizationFeeCacheMetrics { // Accumulated time spent on finalizing block prioritization fees. total_block_finalize_elapsed_us: AtomicU64, + + // Accumulated time spent on calculate transaction fees. + total_calculate_prioritization_fee_elapsed_us: AtomicU64, } impl PrioritizationFeeCacheMetrics { @@ -80,6 +83,11 @@ impl PrioritizationFeeCacheMetrics { .fetch_add(val, Ordering::Relaxed); } + fn accumulate_total_calculate_prioritization_fee_elapsed_us(&self, val: u64) { + self.total_calculate_prioritization_fee_elapsed_us + .fetch_add(val, Ordering::Relaxed); + } + fn report(&self, slot: Slot) { datapoint_info!( "block_prioritization_fee_counters", @@ -117,6 +125,12 @@ impl PrioritizationFeeCacheMetrics { .swap(0, Ordering::Relaxed) as i64, i64 ), + ( + "total_calculate_prioritization_fee_elapsed_us", + self.total_calculate_prioritization_fee_elapsed_us + .swap(0, Ordering::Relaxed) as i64, + i64 + ), ); } } @@ -126,7 +140,8 @@ enum CacheServiceUpdate { TransactionUpdate { slot: Slot, bank_id: BankId, - transaction_fee: u64, + compute_unit_price: u64, + prioritization_fee: u64, writable_accounts: Vec, }, BankFinalized { @@ -233,11 +248,21 @@ impl PrioritizationFeeCache { .map(|(_, key)| *key) .collect(); + let (prioritization_fee, calculate_prioritization_fee_us) = measure_us!({ + solana_fee_structure::FeeBudgetLimits::from(compute_budget_limits) + .prioritization_fee + }); + self.metrics + .accumulate_total_calculate_prioritization_fee_elapsed_us( + calculate_prioritization_fee_us, + ); + self.sender .send(CacheServiceUpdate::TransactionUpdate { slot: bank.slot(), bank_id: bank.bank_id(), - transaction_fee: compute_budget_limits.compute_unit_price, + compute_unit_price: compute_budget_limits.compute_unit_price, + prioritization_fee, writable_accounts, }) .unwrap_or_else(|err| { @@ -271,7 +296,8 @@ impl PrioritizationFeeCache { unfinalized: &mut UnfinalizedPrioritizationFees, slot: Slot, bank_id: BankId, - transaction_fee: u64, + compute_unit_price: u64, + prioritization_fee: u64, writable_accounts: Vec, metrics: &PrioritizationFeeCacheMetrics, ) { @@ -280,7 +306,7 @@ impl PrioritizationFeeCache { .or_default() .entry(bank_id) .or_default() - .update(transaction_fee, writable_accounts)); + .update(compute_unit_price, prioritization_fee, writable_accounts)); metrics.accumulate_total_entry_update_elapsed_us(entry_update_us); metrics.accumulate_successful_transaction_update_count(1); } @@ -374,13 +400,15 @@ impl PrioritizationFeeCache { CacheServiceUpdate::TransactionUpdate { slot, bank_id, - transaction_fee, + compute_unit_price, + prioritization_fee, writable_accounts, } => Self::update_cache( &mut unfinalized, slot, bank_id, - transaction_fee, + compute_unit_price, + prioritization_fee, writable_accounts, &metrics, ), @@ -414,7 +442,7 @@ impl PrioritizationFeeCache { .iter() .map(|(slot, slot_prioritization_fee)| { let mut fee = slot_prioritization_fee - .get_min_transaction_fee() + .get_min_compute_unit_price() .unwrap_or_default(); for account_key in account_keys { if let Some(account_fee) = @@ -549,7 +577,7 @@ mod tests { sync_finalize_priority_fee_for_test(&prioritization_fee_cache, slot, bank.bank_id()); let lock = prioritization_fee_cache.cache.read().unwrap(); let fee = lock.get(&slot).unwrap(); - assert_eq!(2, fee.get_min_transaction_fee().unwrap()); + assert_eq!(2, fee.get_min_compute_unit_price().unwrap()); assert!(fee.get_writable_account_fee(&write_account_a).is_none()); assert_eq!(5, fee.get_writable_account_fee(&write_account_b).unwrap()); assert!(fee.get_writable_account_fee(&write_account_c).is_none()); From 9b7714dde3f964e1a4363dbfe0681fd0e18b2d76 Mon Sep 17 00:00:00 2001 From: Lucas Ste <38472950+LucasSte@users.noreply.github.com> Date: Fri, 18 Jul 2025 16:52:43 -0300 Subject: [PATCH 08/68] Use `Instruction` instead of `StableInstruction` across runtime (#7040) Use Instruction instead of StableInstruction --- program-runtime/src/invoke_context.rs | 14 ++++++-------- program-test/src/lib.rs | 4 +--- programs/bpf_loader/src/lib.rs | 19 +++++++------------ programs/bpf_loader/src/syscalls/cpi.rs | 19 ++++++++++--------- rpc/src/rpc.rs | 3 +-- runtime/src/bank/builtin_programs.rs | 2 +- 6 files changed, 26 insertions(+), 35 deletions(-) diff --git a/program-runtime/src/invoke_context.rs b/program-runtime/src/invoke_context.rs index 70a8f3de580225..628db069a2d890 100644 --- a/program-runtime/src/invoke_context.rs +++ b/program-runtime/src/invoke_context.rs @@ -12,7 +12,7 @@ use { solana_clock::Slot, solana_epoch_schedule::EpochSchedule, solana_hash::Hash, - solana_instruction::{error::InstructionError, AccountMeta}, + solana_instruction::{error::InstructionError, AccountMeta, Instruction}, solana_log_collector::{ic_msg, LogCollector}, solana_measure::measure::Measure, solana_pubkey::Pubkey, @@ -26,7 +26,6 @@ use { solana_sdk_ids::{ bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable, loader_v4, native_loader, sysvar, }, - solana_stable_layout::stable_instruction::StableInstruction, solana_svm_callback::InvokeContextCallback, solana_svm_feature_set::SVMFeatureSet, solana_timings::{ExecuteDetailsTimings, ExecuteTimings}, @@ -304,7 +303,7 @@ impl<'a> InvokeContext<'a> { /// Entrypoint for a cross-program invocation from a builtin program pub fn native_invoke( &mut self, - instruction: StableInstruction, + instruction: Instruction, signers: &[Pubkey], ) -> Result<(), InstructionError> { let (instruction_accounts, program_indices) = @@ -324,7 +323,7 @@ impl<'a> InvokeContext<'a> { #[allow(clippy::type_complexity)] pub fn prepare_instruction( &mut self, - instruction: &StableInstruction, + instruction: &Instruction, signers: &[Pubkey], ) -> Result<(Vec, Vec), InstructionError> { // Finds the index of each account in the instruction by its pubkey. @@ -333,7 +332,7 @@ impl<'a> InvokeContext<'a> { // but performed on a very small slice and requires no heap allocations. let instruction_context = self.transaction_context.get_current_instruction_context()?; let mut deduplicated_instruction_accounts: Vec = Vec::new(); - let mut duplicate_indicies = Vec::with_capacity(instruction.accounts.len() as usize); + let mut duplicate_indicies = Vec::with_capacity(instruction.accounts.len()); for (instruction_account_index, account_meta) in instruction.accounts.iter().enumerate() { let index_in_transaction = self .transaction_context @@ -1028,7 +1027,7 @@ mod tests { assert_eq!(result, Err(InstructionError::UnbalancedInstruction)); result?; invoke_context - .native_invoke(inner_instruction.into(), &[]) + .native_invoke(inner_instruction, &[]) .and(invoke_context.pop())?; } MockInstruction::UnbalancedPop => instruction_context @@ -1186,7 +1185,7 @@ mod tests { let inner_instruction = Instruction::new_with_bincode(callee_program_id, &instruction, metas.clone()); let result = invoke_context - .native_invoke(inner_instruction.into(), &[]) + .native_invoke(inner_instruction, &[]) .and(invoke_context.pop()); assert_eq!(result, expected_result); } @@ -1250,7 +1249,6 @@ mod tests { }, metas.clone(), ); - let inner_instruction = StableInstruction::from(inner_instruction); let (inner_instruction_accounts, program_indices) = invoke_context .prepare_instruction(&inner_instruction, &[]) .unwrap(); diff --git a/program-test/src/lib.rs b/program-test/src/lib.rs index a97eb0d517ea62..ad3ba4ca88a732 100644 --- a/program-test/src/lib.rs +++ b/program-test/src/lib.rs @@ -42,7 +42,6 @@ use { runtime_config::RuntimeConfig, }, solana_signer::Signer, - solana_stable_layout::stable_instruction::StableInstruction, solana_sysvar::Sysvar, solana_sysvar_id::SysvarId, solana_timings::ExecuteTimings, @@ -250,7 +249,6 @@ impl solana_sysvar::program_stubs::SyscallStubs for SyscallStubs { account_infos: &[AccountInfo], signers_seeds: &[&[&[u8]]], ) -> ProgramResult { - let instruction = StableInstruction::from(instruction.clone()); let invoke_context = get_invoke_context(); let log_collector = invoke_context.get_log_collector(); let transaction_context = &invoke_context.transaction_context; @@ -273,7 +271,7 @@ impl solana_sysvar::program_stubs::SyscallStubs for SyscallStubs { .collect::>(); let (instruction_accounts, program_indices) = invoke_context - .prepare_instruction(&instruction, &signers) + .prepare_instruction(instruction, &signers) .unwrap(); // Copy caller's account_info modifications into invoke_context accounts diff --git a/programs/bpf_loader/src/lib.rs b/programs/bpf_loader/src/lib.rs index 1651fef03fe023..0f5f989f3d5be8 100644 --- a/programs/bpf_loader/src/lib.rs +++ b/programs/bpf_loader/src/lib.rs @@ -659,7 +659,7 @@ fn process_loader_upgradeable_instruction( .iter() .map(|seeds| Pubkey::create_program_address(seeds, caller_program_id)) .collect::, solana_pubkey::PubkeyError>>()?; - invoke_context.native_invoke(instruction.into(), signers.as_slice())?; + invoke_context.native_invoke(instruction, signers.as_slice())?; // Load and verify the program bits let transaction_context = &invoke_context.transaction_context; @@ -1291,8 +1291,7 @@ fn process_loader_upgradeable_instruction( &provided_authority_address, program_len as u32, &program_address, - ) - .into(), + ), &[], )?; @@ -1304,8 +1303,7 @@ fn process_loader_upgradeable_instruction( 0, 0, program_len as u32, - ) - .into(), + ), &[], )?; @@ -1313,8 +1311,7 @@ fn process_loader_upgradeable_instruction( solana_loader_v4_interface::instruction::deploy( &program_address, &provided_authority_address, - ) - .into(), + ), &[], )?; @@ -1324,8 +1321,7 @@ fn process_loader_upgradeable_instruction( &program_address, &provided_authority_address, &program_address, - ) - .into(), + ), &[], )?; } else if migration_authority::check_id(&provided_authority_address) { @@ -1334,8 +1330,7 @@ fn process_loader_upgradeable_instruction( &program_address, &provided_authority_address, &upgrade_authority_address.unwrap(), - ) - .into(), + ), &[], )?; } @@ -1496,7 +1491,7 @@ fn common_extend_program( )?; invoke_context.native_invoke( - system_instruction::transfer(&payer_key, &programdata_key, required_payment).into(), + system_instruction::transfer(&payer_key, &programdata_key, required_payment), &[], )?; } diff --git a/programs/bpf_loader/src/syscalls/cpi.rs b/programs/bpf_loader/src/syscalls/cpi.rs index 6eb4e46a401c91..675702e7a2d579 100644 --- a/programs/bpf_loader/src/syscalls/cpi.rs +++ b/programs/bpf_loader/src/syscalls/cpi.rs @@ -1,6 +1,7 @@ use { super::*, crate::{translate_inner, translate_slice_inner, translate_type_inner}, + solana_instruction::Instruction, solana_loader_v3_interface::instruction as bpf_loader_upgradeable, solana_measure::measure::Measure, solana_program_runtime::{ @@ -326,7 +327,7 @@ trait SyscallInvokeSigned { addr: u64, memory_mapping: &MemoryMapping, invoke_context: &mut InvokeContext, - ) -> Result; + ) -> Result; fn translate_accounts<'a>( instruction_accounts: &[InstructionAccount], account_infos_addr: u64, @@ -373,7 +374,7 @@ impl SyscallInvokeSigned for SyscallInvokeSignedRust { addr: u64, memory_mapping: &MemoryMapping, invoke_context: &mut InvokeContext, - ) -> Result { + ) -> Result { let ix = translate_type::( memory_mapping, addr, @@ -419,9 +420,9 @@ impl SyscallInvokeSigned for SyscallInvokeSignedRust { accounts.push(account_meta.clone()); } - Ok(StableInstruction { - accounts: accounts.into(), - data: data.into(), + Ok(Instruction { + accounts, + data, program_id: ix.program_id, }) } @@ -580,7 +581,7 @@ impl SyscallInvokeSigned for SyscallInvokeSignedC { addr: u64, memory_mapping: &MemoryMapping, invoke_context: &mut InvokeContext, - ) -> Result { + ) -> Result { let ix_c = translate_type::( memory_mapping, addr, @@ -641,9 +642,9 @@ impl SyscallInvokeSigned for SyscallInvokeSignedC { }); } - Ok(StableInstruction { - accounts: accounts.into(), - data: data.into(), + Ok(Instruction { + accounts, + data, program_id: *program_id, }) } diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index 1df8307ebd9bc3..054d2cc443bca6 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -4695,8 +4695,7 @@ pub mod tests { lamports, space, &owner_pubkey, - ) - .into(), + ), &[], )?; diff --git a/runtime/src/bank/builtin_programs.rs b/runtime/src/bank/builtin_programs.rs index a0bcbd607019b5..102680c055d950 100644 --- a/runtime/src/bank/builtin_programs.rs +++ b/runtime/src/bank/builtin_programs.rs @@ -108,7 +108,7 @@ mod tests_core_bpf_migration { let instruction = Instruction::new_with_bytes(*target_program_id, &[], Vec::new()); - invoke_context.native_invoke(instruction.into(), &[]) + invoke_context.native_invoke(instruction, &[]) }); } From 281e13a3b447e93032069c810482823a17a7f145 Mon Sep 17 00:00:00 2001 From: puhtaytow <18026645+puhtaytow@users.noreply.github.com> Date: Sun, 20 Jul 2025 14:42:32 +0200 Subject: [PATCH 09/68] metrics: move benchmarks to bencher 0.1.5 (#7011) * move benchmarks to bencher 0.1.5 * use std::hint::black_box where possible --- Cargo.lock | 1 + metrics/Cargo.toml | 2 ++ metrics/benches/metrics.rs | 39 +++++++++++++++++++------------------- 3 files changed, 23 insertions(+), 19 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d84bffa971e238..7242a352238073 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9179,6 +9179,7 @@ dependencies = [ name = "solana-metrics" version = "3.0.0" dependencies = [ + "bencher", "crossbeam-channel", "env_logger 0.11.8", "gethostname", diff --git a/metrics/Cargo.toml b/metrics/Cargo.toml index d81b5f61a73444..cb6e51970f9b16 100644 --- a/metrics/Cargo.toml +++ b/metrics/Cargo.toml @@ -26,9 +26,11 @@ solana-time-utils = { workspace = true } thiserror = { workspace = true } [dev-dependencies] +bencher = { workspace = true } env_logger = { workspace = true } rand = { workspace = true } serial_test = { workspace = true } [[bench]] name = "metrics" +harness = false diff --git a/metrics/benches/metrics.rs b/metrics/benches/metrics.rs index f8038ef51200b3..e762066cdd2ecf 100644 --- a/metrics/benches/metrics.rs +++ b/metrics/benches/metrics.rs @@ -1,8 +1,5 @@ -#![feature(test)] - -extern crate test; - use { + bencher::{benchmark_group, benchmark_main, Bencher}, log::*, rand::distributions::{Distribution, Uniform}, solana_metrics::{ @@ -10,12 +7,10 @@ use { datapoint::DataPoint, metrics::{serialize_points, test_mocks::MockMetricsWriter, MetricsAgent}, }, - std::{sync::Arc, time::Duration}, - test::Bencher, + std::{hint::black_box, sync::Arc, time::Duration}, }; -#[bench] -fn bench_write_points(bencher: &mut Bencher) { +fn bench_write_points(b: &mut Bencher) { let points = (0..10) .map(|_| { DataPoint::new("measurement") @@ -26,19 +21,18 @@ fn bench_write_points(bencher: &mut Bencher) { }) .collect(); let host_id = "benchmark-host-id"; - bencher.iter(|| { + b.iter(|| { for _ in 0..10 { - test::black_box(serialize_points(&points, host_id)); + black_box(serialize_points(&points, host_id)); } }) } -#[bench] -fn bench_datapoint_submission(bencher: &mut Bencher) { +fn bench_datapoint_submission(b: &mut Bencher) { let writer = Arc::new(MockMetricsWriter::new()); let agent = MetricsAgent::new(writer, Duration::from_secs(10), 1000); - bencher.iter(|| { + b.iter(|| { for i in 0..1000 { agent.submit( DataPoint::new("measurement") @@ -51,12 +45,11 @@ fn bench_datapoint_submission(bencher: &mut Bencher) { }) } -#[bench] -fn bench_counter_submission(bencher: &mut Bencher) { +fn bench_counter_submission(b: &mut Bencher) { let writer = Arc::new(MockMetricsWriter::new()); let agent = MetricsAgent::new(writer, Duration::from_secs(10), 1000); - bencher.iter(|| { + b.iter(|| { for i in 0..1000 { agent.submit_counter(CounterPoint::new("counter 1"), Level::Info, i); } @@ -64,14 +57,13 @@ fn bench_counter_submission(bencher: &mut Bencher) { }) } -#[bench] -fn bench_random_submission(bencher: &mut Bencher) { +fn bench_random_submission(b: &mut Bencher) { let writer = Arc::new(MockMetricsWriter::new()); let agent = MetricsAgent::new(writer, Duration::from_secs(10), 1000); let mut rng = rand::thread_rng(); let die = Uniform::::from(1..7); - bencher.iter(|| { + b.iter(|| { for i in 0..1000 { let dice = die.sample(&mut rng); @@ -89,3 +81,12 @@ fn bench_random_submission(bencher: &mut Bencher) { agent.flush(); }) } + +benchmark_group!( + benches, + bench_write_points, + bench_datapoint_submission, + bench_counter_submission, + bench_random_submission +); +benchmark_main!(benches); From 831d660a66a6e7775e3c0dd31282e50cdaff008c Mon Sep 17 00:00:00 2001 From: steviez Date: Sun, 20 Jul 2025 18:11:26 -0500 Subject: [PATCH 10/68] net-utils: Resolve Rust 1.88 clippy lints and format strings (#7045) - Ran cargo clippy with Rust 1.88.0 set in rust-toolchain.toml - Ran cargo fmt with format_strings = true in rustfmt.toml --- net-utils/src/ip_echo_client.rs | 39 ++++++++++++++++----------------- net-utils/src/ip_echo_server.rs | 18 +++++++-------- net-utils/src/lib.rs | 13 ++++++----- net-utils/src/sockets.rs | 10 ++++----- 4 files changed, 40 insertions(+), 40 deletions(-) diff --git a/net-utils/src/ip_echo_client.rs b/net-utils/src/ip_echo_client.rs index 4e1afeb33ee165..c1ed42ebc5e50a 100644 --- a/net-utils/src/ip_echo_client.rs +++ b/net-utils/src/ip_echo_client.rs @@ -98,12 +98,20 @@ fn parse_response( [b'H', b'T', b'T', b'P'] => { let http_response = std::str::from_utf8(body); match http_response { - Ok(r) => bail!("Invalid gossip entrypoint. {ip_echo_server_addr} looks to be an HTTP port replying with {r}"), - Err(_) => bail!("Invalid gossip entrypoint. {ip_echo_server_addr} looks to be an HTTP port."), + Ok(r) => bail!( + "Invalid gossip entrypoint. {ip_echo_server_addr} looks to be an HTTP port \ + replying with {r}" + ), + Err(_) => bail!( + "Invalid gossip entrypoint. {ip_echo_server_addr} looks to be an HTTP port." + ), } } _ => { - bail!("Invalid gossip entrypoint. {ip_echo_server_addr} provided unexpected header bytes {response_header:?} "); + bail!( + "Invalid gossip entrypoint. {ip_echo_server_addr} provided unexpected header \ + bytes {response_header:?} " + ); } }; Ok(payload) @@ -163,7 +171,7 @@ pub(crate) async fn verify_all_reachable_tcp( bind_address, ) .await - .map_err(|err| warn!("ip_echo_server request failed: {}", err)); + .map_err(|err| warn!("ip_echo_server request failed: {err}")); // spawn checker to wait for reply // since we do not know if tcp_listeners are nonblocking, we have to run them in native threads. @@ -173,7 +181,7 @@ pub(crate) async fn verify_all_reachable_tcp( // Use blocking API since we have no idea if sockets given to us are nonblocking or not let thread_handle = tokio::task::spawn_blocking(move || { - debug!("Waiting for incoming connection on tcp/{}", port); + debug!("Waiting for incoming connection on tcp/{port}"); match tcp_listener.incoming().next() { Some(_) => { // ignore errors here since this can only happen if a timeout was detected. @@ -250,10 +258,7 @@ pub(crate) async fn verify_all_reachable_udp( for (bind_ip, ports_to_socks_map) in ip_to_ports { let ports: Vec = ports_to_socks_map.keys().copied().collect(); - info!( - "Checking that udp ports {:?} are reachable from bind IP {:?}", - ports, bind_ip - ); + info!("Checking that udp ports {ports:?} are reachable from bind IP {bind_ip:?}"); 'outer: for chunk_to_check in ports.chunks(MAX_PORT_COUNT_PER_MESSAGE) { let ports_to_check = chunk_to_check.to_vec(); @@ -275,7 +280,7 @@ pub(crate) async fn verify_all_reachable_udp( bind_ip, ) .await - .map_err(|err| warn!("ip_echo_server request failed: {}", err)); + .map_err(|err| warn!("ip_echo_server request failed: {err}")); let reachable_ports = Arc::new(RwLock::new(HashSet::new())); // Spawn threads for each socket to check @@ -300,10 +305,7 @@ pub(crate) async fn verify_all_reachable_udp( } let recv_result = socket.recv(&mut [0; 1]); - debug!( - "Waited for incoming datagram on udp/{}: {:?}", - port, recv_result - ); + debug!("Waited for incoming datagram on udp/{port}: {recv_result:?}"); if recv_result.is_ok() { reachable_ports.write().unwrap().insert(port); @@ -327,18 +329,15 @@ pub(crate) async fn verify_all_reachable_udp( .into_inner() .expect("No threads should hold the lock"); info!( - "checked udp ports: {:?}, reachable udp ports: {:?}", - ports_to_check, reachable_ports + "checked udp ports: {ports_to_check:?}, reachable udp ports: \ + {reachable_ports:?}" ); if reachable_ports.len() == ports_to_check.len() { continue 'outer; // starts checking next chunk of ports, if any } } - error!( - "Maximum retry count reached. Some ports for IP {} unreachable.", - bind_ip - ); + error!("Maximum retry count reached. Some ports for IP {bind_ip} unreachable."); return false; } } diff --git a/net-utils/src/ip_echo_server.rs b/net-utils/src/ip_echo_server.rs index e877b30b1315d0..c662ff457229ee 100644 --- a/net-utils/src/ip_echo_server.rs +++ b/net-utils/src/ip_echo_server.rs @@ -68,7 +68,7 @@ async fn process_connection( peer_addr: SocketAddr, shred_version: Option, ) -> io::Result<()> { - info!("connection from {:?}", peer_addr); + info!("connection from {peer_addr:?}"); let mut data = vec![0u8; ip_echo_server_request_length()]; @@ -104,7 +104,7 @@ async fn process_connection( )) })?; - trace!("request: {:?}", msg); + trace!("request: {msg:?}"); // Fire a datagram at each non-zero UDP port match bind_to_unspecified() { @@ -114,21 +114,21 @@ async fn process_connection( let result = udp_socket.send_to(&[0], SocketAddr::from((peer_addr.ip(), *udp_port))); match result { - Ok(_) => debug!("Successful send_to udp/{}", udp_port), - Err(err) => info!("Failed to send_to udp/{}: {}", udp_port, err), + Ok(_) => debug!("Successful send_to udp/{udp_port}"), + Err(err) => info!("Failed to send_to udp/{udp_port}: {err}"), } } } } Err(err) => { - warn!("Failed to bind local udp socket: {}", err); + warn!("Failed to bind local udp socket: {err}"); } } // Try to connect to each non-zero TCP port for tcp_port in &msg.tcp_ports { if *tcp_port != 0 { - debug!("Connecting to tcp/{}", tcp_port); + debug!("Connecting to tcp/{tcp_port}"); let mut tcp_stream = timeout( IO_TIMEOUT, @@ -148,7 +148,7 @@ async fn process_connection( // conflict with the first four bytes of a valid HTTP response. let mut bytes = vec![0u8; IP_ECHO_SERVER_RESPONSE_LENGTH]; bincode::serialize_into(&mut bytes[HEADER_LENGTH..], &response).unwrap(); - trace!("response: {:?}", bytes); + trace!("response: {bytes:?}"); writer.write_all(&bytes).await } @@ -163,11 +163,11 @@ async fn run_echo_server(tcp_listener: std::net::TcpListener, shred_version: Opt Ok((socket, peer_addr)) => { runtime::Handle::current().spawn(async move { if let Err(err) = process_connection(socket, peer_addr, shred_version).await { - info!("session failed: {:?}", err); + info!("session failed: {err:?}"); } }); } - Err(err) => warn!("listener accept failed: {:?}", err), + Err(err) => warn!("listener accept failed: {err:?}"), } } } diff --git a/net-utils/src/lib.rs b/net-utils/src/lib.rs index 5b5eb70c53eff4..1b223362a0f3e3 100644 --- a/net-utils/src/lib.rs +++ b/net-utils/src/lib.rs @@ -366,8 +366,8 @@ pub fn multi_bind_in_range_with_config( if !PLATFORM_SUPPORTS_SOCKET_CONFIGS && num != 1 { // See https://github.com/solana-labs/solana/issues/4607 warn!( - "multi_bind_in_range_with_config() only supports 1 socket on this platform ({} requested)", - num + "multi_bind_in_range_with_config() only supports 1 socket on this platform ({num} \ + requested)" ); num = 1; } @@ -464,7 +464,8 @@ pub fn bind_common_with_config( #[deprecated( since = "2.3.2", - note = "Please avoid this function, in favor of sockets::bind_two_in_range_with_offset_and_config" + note = "Please avoid this function, in favor of \ + sockets::bind_two_in_range_with_offset_and_config" )] #[allow(deprecated)] pub fn bind_two_in_range_with_offset( @@ -484,7 +485,8 @@ pub fn bind_two_in_range_with_offset( #[deprecated( since = "2.3.2", - note = "Please avoid this function, in favor of sockets::bind_two_in_range_with_offset_and_config" + note = "Please avoid this function, in favor of \ + sockets::bind_two_in_range_with_offset_and_config" )] #[allow(deprecated)] pub fn bind_two_in_range_with_offset_and_config( @@ -582,8 +584,7 @@ pub fn bind_more_with_config( if !PLATFORM_SUPPORTS_SOCKET_CONFIGS { if num > 1 { warn!( - "bind_more_with_config() only supports 1 socket on this platform ({} requested)", - num + "bind_more_with_config() only supports 1 socket on this platform ({num} requested)" ); } Ok(vec![socket]) diff --git a/net-utils/src/sockets.rs b/net-utils/src/sockets.rs index aa07bad4e9a70d..78b562574e0c7a 100644 --- a/net-utils/src/sockets.rs +++ b/net-utils/src/sockets.rs @@ -33,7 +33,8 @@ pub fn unique_port_range_for_tests(size: u16) -> Range { let slot: u16 = slot.parse().unwrap(); assert!( offset < SLICE_PER_PROCESS, - "Overrunning into the port range of another test! Consider using fewer ports per test." + "Overrunning into the port range of another test! Consider using fewer ports \ + per test." ); BASE_PORT + slot * SLICE_PER_PROCESS } @@ -238,8 +239,8 @@ pub fn multi_bind_in_range_with_config( if !PLATFORM_SUPPORTS_SOCKET_CONFIGS && num != 1 { // See https://github.com/solana-labs/solana/issues/4607 warn!( - "multi_bind_in_range_with_config() only supports 1 socket on this platform ({} requested)", - num + "multi_bind_in_range_with_config() only supports 1 socket on this platform ({num} \ + requested)" ); num = 1; } @@ -341,8 +342,7 @@ pub fn bind_more_with_config( if !PLATFORM_SUPPORTS_SOCKET_CONFIGS { if num > 1 { warn!( - "bind_more_with_config() only supports 1 socket on this platform ({} requested)", - num + "bind_more_with_config() only supports 1 socket on this platform ({num} requested)" ); } Ok(vec![socket]) From 0663334113dd8d4b5a12ef3cd4156548acb896b0 Mon Sep 17 00:00:00 2001 From: Kamil Skalski Date: Mon, 21 Jul 2025 07:09:41 +0200 Subject: [PATCH 11/68] Capture ability of BufferedReader to provide contiguous min len buffers as a trait (#6921) --- accounts-db/src/append_vec.rs | 102 +++----- accounts-db/src/buffered_reader.rs | 375 +++++++++++++++++++---------- 2 files changed, 287 insertions(+), 190 deletions(-) diff --git a/accounts-db/src/append_vec.rs b/accounts-db/src/append_vec.rs index 742f0b42e2bea7..2ab20b0599c0b4 100644 --- a/accounts-db/src/append_vec.rs +++ b/accounts-db/src/append_vec.rs @@ -23,7 +23,7 @@ use { StoredAccountsInfo, }, accounts_hash::AccountHash, - buffered_reader::{BufferedReader, Stack}, + buffered_reader::{BufferedReader, ContiguousBufFileRead, Stack}, file_io::read_into_buffer, is_zero_lamport::IsZeroLamport, storable_accounts::StorableAccounts, @@ -1049,19 +1049,17 @@ impl AppendVec { {} } AppendVecFileBacking::File(file) => { - let self_len = self.len(); const BUFFER_SIZE: usize = PAGE_SIZE * 8; - let mut reader = BufferedReader::>::new_stack( - self_len, - file, - STORE_META_OVERHEAD, - ); + let mut reader = BufferedReader::>::new_stack(self.len(), file); + let mut min_buf_len = STORE_META_OVERHEAD; // Buffer for account data that doesn't fit within the stack allocated buffer. // This will be re-used for each account that doesn't fit within the stack allocated buffer. let mut data_overflow_buffer = vec![]; loop { - let offset = reader.get_offset(); - let bytes = match reader.fill_buf() { + let offset = reader.get_file_offset(); + let bytes = match reader + .fill_buf_required_or_overflow(min_buf_len, &mut data_overflow_buffer) + { Ok([]) => break, Ok(bytes) => ValidSlice::new(bytes), Err(err) if err.kind() == std::io::ErrorKind::UnexpectedEof => break, @@ -1087,53 +1085,26 @@ impl AppendVec { }; callback(account); reader.consume(stored_size); - } else if STORE_META_OVERHEAD + data_len <= BUFFER_SIZE { - reader.set_required_data_len(STORE_META_OVERHEAD + data_len); + // restore default required buffer size + min_buf_len = STORE_META_OVERHEAD; } else { - const MAX_CAPACITY: usize = MAX_PERMITTED_DATA_LENGTH as usize; - // 128KiB covers a reasonably large distribution of typical account sizes. - // In a recent sample, 99.98% of accounts' data lengths were less than or equal to 128KiB. - const MIN_CAPACITY: usize = 1024 * 128; - let capacity = data_overflow_buffer.capacity(); - if data_len > capacity { - let next_cap = data_len - .next_power_of_two() - .clamp(MIN_CAPACITY, MAX_CAPACITY); - data_overflow_buffer.reserve_exact(next_cap - capacity); - // SAFETY: We only write to the uninitialized portion of the buffer via `copy_from_slice` and `read_into_buffer`. - // Later, we ensure we only read from the initialized portion of the buffer. - unsafe { - data_overflow_buffer.set_len(next_cap); + // repeat loop with required buffer size holding whole account data + min_buf_len = STORE_META_OVERHEAD + data_len; + + if min_buf_len > BUFFER_SIZE { + const MAX_CAPACITY: usize = + STORE_META_OVERHEAD + MAX_PERMITTED_DATA_LENGTH as usize; + // 128KiB covers a reasonably large distribution of typical account sizes. + // In a recent sample, 99.98% of accounts' data lengths were less than or equal to 128KiB. + const MIN_CAPACITY: usize = 1024 * 128; + if min_buf_len > data_overflow_buffer.capacity() { + let next_cap = min_buf_len + .next_power_of_two() + .clamp(MIN_CAPACITY, MAX_CAPACITY); + data_overflow_buffer + .reserve_exact(next_cap - data_overflow_buffer.len()); } } - - // Copy already read data to overflow buffer. - data_overflow_buffer[..leftover].copy_from_slice(&bytes.0[next..]); - - // Read remaining data into overflow buffer. - let Ok(bytes_read) = read_into_buffer( - file, - self_len, - offset + next + leftover, - &mut data_overflow_buffer[leftover..data_len], - ) else { - break; - }; - if bytes_read + leftover < data_len { - break; - } - let data = &data_overflow_buffer[..data_len]; - let stored_size = aligned_stored_size(data_len); - let account = StoredAccountMeta { - meta, - account_meta, - data, - offset, - stored_size, - hash, - }; - callback(account); - reader.consume(stored_size); } } } @@ -1252,14 +1223,12 @@ impl AppendVec { AppendVecFileBacking::File(file) => { // Heuristic observed in benchmarking that maintains a reasonable balance between syscalls and data waste const BUFFER_SIZE: usize = PAGE_SIZE * 4; - let mut reader = BufferedReader::>::new_stack( - self_len, - file, - mem::size_of::() + mem::size_of::(), - ); + let mut reader = BufferedReader::>::new_stack(self_len, file); + const REQUIRED_READ_LEN: usize = + mem::size_of::() + mem::size_of::(); loop { - let offset = reader.get_offset(); - let bytes = match reader.fill_buf() { + let offset = reader.get_file_offset(); + let bytes = match reader.fill_buf_required(REQUIRED_READ_LEN) { Ok([]) => break, Ok(bytes) => ValidSlice::new(bytes), Err(err) if err.kind() == std::io::ErrorKind::UnexpectedEof => break, @@ -1703,12 +1672,17 @@ pub mod tests { let mut test_accounts = Vec::with_capacity(num_accounts); let mut file_size = 0; + let special_file_interval = num_accounts / 8; for i in 0..num_accounts { let data_len = match i { - // ensure one max size account - 0 => MAX_PERMITTED_DATA_LENGTH as usize, - // ensure one 64KiB account - x if x == num_accounts - 1 => 1 << 16, + // Create several spread out accounts with varying sizes: + // for (x / special_file_interval) in 0..7 range + x if x % special_file_interval == 0 => { + // mult increases in 0 to 3 range twice + let mult = (x / special_file_interval) % 4; + // and data_len goes over 0..MAX_PERMITTED_DATA_LENGTH range also twice + mult * (MAX_PERMITTED_DATA_LENGTH as usize) / 3 + } // Otherwise use a reasonably small account to avoid long test times x => x % 256, }; diff --git a/accounts-db/src/buffered_reader.rs b/accounts-db/src/buffered_reader.rs index d1a649a762d067..e038f700b782a5 100644 --- a/accounts-db/src/buffered_reader.rs +++ b/accounts-db/src/buffered_reader.rs @@ -9,7 +9,7 @@ //! `set_required_data_len(len)`, the whole account data is buffered _linearly_ in memory and available to //! be returned. use { - crate::file_io::read_more_buffer, + crate::file_io::{read_into_buffer, read_more_buffer}, std::{ fs::File, io::{self, BufRead, BufReader}, @@ -26,6 +26,7 @@ use { /// caller may be able to opt for a stack-allocated buffer rather than a heap-allocated buffer, or /// vice versa. pub(crate) trait Backing { + fn capacity(&self) -> usize; unsafe fn as_slice(&self) -> &[u8]; unsafe fn as_mut_slice(&mut self) -> &mut [u8]; } @@ -46,6 +47,10 @@ impl Stack { } impl Backing for Stack { + fn capacity(&self) -> usize { + N + } + #[inline(always)] unsafe fn as_slice(&self) -> &[u8] { slice::from_raw_parts(self.0.as_ptr() as *const u8, N) @@ -57,6 +62,48 @@ impl Backing for Stack { } } +/// An extension of the `BufRead` trait for file readers that require stronger control +/// over returned buffer size and tracking of the file offset. +/// +/// Unlike the standard `fill_buf`, which only guarantees a non-empty buffer, +/// this trait allows callers to: +/// - Enforce a minimum number of contiguous bytes to be made available. +/// - Fall back to an overflow buffer if the internal buffer cannot satisfy the request. +/// - Retrieve the current file offset corresponding to the start of the next buffer. +pub(crate) trait ContiguousBufFileRead<'a>: BufRead { + /// Returns the current file offset corresponding to the start of the buffer + /// that will be returned by the next call to `fill_buf_*`. + /// + /// This offset represents the position within the underlying file where data + /// will be consumed from. + fn get_file_offset(&self) -> usize; + + /// Ensures the internal buffer contains at least `required_len` contiguous bytes, + /// and returns a slice to that buffer. + /// + /// Returns `Err(io::ErrorKind::UnexpectedEof)` if the end of file is reached + /// before the required number of bytes is available. + fn fill_buf_required(&mut self, required_len: usize) -> io::Result<&[u8]>; + + /// Attempts to provide at least `required_len` contiguous bytes by using + /// the internal buffer or the provided `overflow_buffer` if needed. + /// + /// If the internal buffer alone does not satisfy the requirement, additional + /// bytes are read and appended to `overflow_buffer`, which is resized to fit the data. + /// + /// Returns a slice containing all the required data (may point to either buffer). + /// + /// Returns `Err(io::ErrorKind::UnexpectedEof)` if the end of file is reached + /// before the required number of bytes can be read. + fn fill_buf_required_or_overflow<'b>( + &'b mut self, + required_len: usize, + overflow_buffer: &'b mut Vec, + ) -> io::Result<&'b [u8]> + where + 'a: 'b; +} + /// read a file a large buffer at a time and provide access to a slice in that buffer pub struct BufferedReader<'a, T> { /// when we are next asked to read from file, start at this offset @@ -67,105 +114,126 @@ pub struct BufferedReader<'a, T> { buf_valid_bytes: Range, /// offset in the file of the `buf_valid_bytes`.`start` file_last_offset: usize, - /// how many contiguous bytes caller needs - read_requirements: Option, /// how many bytes are valid in the file. The file's len may be longer. file_len_valid: usize, /// reference to file handle file: &'a File, - /// we always want at least this many contiguous bytes available or we must read more into the buffer. - default_min_read_requirement: usize, } impl<'a, T> BufferedReader<'a, T> { /// `buffer_size`: how much to try to read at a time /// `file_len_valid`: # bytes that are valid in the file, may be less than overall file len /// `default_min_read_requirement`: make sure we always have this much data available if we're asked to read - pub fn new( - backing: T, - file_len_valid: usize, - file: &'a File, - default_min_read_requirement: usize, - ) -> Self { + pub fn new(backing: T, file_len_valid: usize, file: &'a File) -> Self { Self { file_offset_of_next_read: 0, buf: backing, buf_valid_bytes: 0..0, file_last_offset: 0, - read_requirements: None, file_len_valid, file, - default_min_read_requirement, } } +} - /// specify the amount of data required to read next time `read` is called +impl<'a, T: Backing> ContiguousBufFileRead<'a> for BufferedReader<'a, T> { #[inline(always)] - pub fn set_required_data_len(&mut self, len: usize) { - self.read_requirements = Some(len); + fn get_file_offset(&self) -> usize { + if self.buf_valid_bytes.is_empty() { + self.file_offset_of_next_read + } else { + self.file_last_offset + self.buf_valid_bytes.start + } } -} -impl<'a, T> BufferedReader<'a, T> -where - T: Backing, -{ - /// read to make sure we have the minimum amount of data - fn read_required_bytes(&mut self) -> io::Result<()> { - let must_read = self - .read_requirements - .unwrap_or(self.default_min_read_requirement); - if self.buf_valid_bytes.len() < must_read { - // we haven't used all the bytes we read last time, so adjust the effective offset - debug_assert!(self.buf_valid_bytes.len() <= self.file_offset_of_next_read); - self.file_last_offset = self.file_offset_of_next_read - self.buf_valid_bytes.len(); - read_more_buffer( - self.file, - self.file_len_valid, - &mut self.file_offset_of_next_read, - // SAFETY: `read_more_buffer` will only _write_ to uninitialized memory and lifetime is tied to self. - unsafe { self.buf.as_mut_slice() }, - &mut self.buf_valid_bytes, - )?; - if self.buf_valid_bytes.len() < must_read { + fn fill_buf_required(&mut self, required_len: usize) -> io::Result<&[u8]> { + if self.buf_valid_bytes.len() < required_len { + self.read_more_bytes()?; + if self.buf_valid_bytes.len() < required_len { return Err(io::Error::new( io::ErrorKind::UnexpectedEof, "unable to read enough data", )); } } - // reset this once we have checked that we had this much data once - self.read_requirements = None; - Ok(()) + Ok(self.valid_slice()) } - /// Return file offset within `file` of the current consume position. - /// - /// The offset is corresponding to the start of buffer that will be returned - /// by the next `fill_buf` call. - #[inline(always)] - pub fn get_offset(&'a self) -> usize { - if self.buf_valid_bytes.is_empty() { - self.file_offset_of_next_read - } else { - self.file_last_offset + self.buf_valid_bytes.start + fn fill_buf_required_or_overflow<'b>( + &'b mut self, + required_len: usize, + overflow_buffer: &'b mut Vec, + ) -> io::Result<&'b [u8]> + where + 'a: 'b, + { + if required_len <= self.buf.capacity() { + return self.fill_buf_required(required_len); + } + + if required_len > overflow_buffer.capacity() { + overflow_buffer.reserve_exact(required_len - overflow_buffer.len()); + } + // SAFETY: We only write to the uninitialized portion of the buffer via `copy_from_slice` and `read_into_buffer`. + // Later, we ensure we only read from the initialized portion of the buffer. + unsafe { + overflow_buffer.set_len(required_len); + } + + // Copy already read data to overflow buffer. + let available_valid_data = self.valid_slice(); + let leftover = available_valid_data.len(); + overflow_buffer[..leftover].copy_from_slice(available_valid_data); + + // Read remaining data into overflow buffer. + let read_dst = &mut overflow_buffer[leftover..]; + let bytes_read = read_into_buffer( + self.file, + self.file_len_valid, + self.file_offset_of_next_read, + read_dst, + )?; + if bytes_read < read_dst.len() { + return Err(io::Error::new( + io::ErrorKind::UnexpectedEof, + "unable to read required amount of data", + )); } + Ok(overflow_buffer.as_slice()) + } +} + +impl BufferedReader<'_, T> +where + T: Backing, +{ + /// Defragment buffer and read more bytes to make sure we have filled available + /// space as much as possible. + fn read_more_bytes(&mut self) -> io::Result<()> { + // we haven't used all the bytes we read last time, so adjust the effective offset + debug_assert!(self.buf_valid_bytes.len() <= self.file_offset_of_next_read); + self.file_last_offset = self.file_offset_of_next_read - self.buf_valid_bytes.len(); + read_more_buffer( + self.file, + self.file_len_valid, + &mut self.file_offset_of_next_read, + // SAFETY: `read_more_buffer` will only _write_ to uninitialized memory and lifetime is tied to self. + unsafe { self.buf.as_mut_slice() }, + &mut self.buf_valid_bytes, + ) + } + + fn valid_slice(&self) -> &[u8] { + // SAFETY: We only read from memory that has been initialized by `read_more_buffer` + // and lifetime is tied to self. + unsafe { &self.buf.as_slice()[self.buf_valid_bytes.clone()] } } } impl<'a, const N: usize> BufferedReader<'a, Stack> { /// create a new buffered reader with a stack-allocated buffer - pub fn new_stack( - file_len_valid: usize, - file: &'a File, - default_min_read_requirement: usize, - ) -> Self { - BufferedReader::new( - Stack::new(), - file_len_valid, - file, - default_min_read_requirement, - ) + pub fn new_stack(file_len_valid: usize, file: &'a File) -> Self { + BufferedReader::new(Stack::new(), file_len_valid, file) } } @@ -185,18 +253,11 @@ impl io::Read for BufferedReader<'_, T> { /// `BufferedReader` implements a more permissive API compared to `BufRead` /// by allowing `consume` to advance beyond the end of the buffer returned by `fill_buf`. impl BufRead for BufferedReader<'_, T> { - /// Return the biggest slice of valid data starting at the current offset. - /// - /// Note that `fill_buf` has stronger guarantee than `BufRead::fill_buf` and returns - /// at least the number of bytes requested by `default_min_read_requirement` and - /// `set_required_data_len`. If that condition cannot be met - /// `Err(io::ErrorKind::UnexpectedEof)` is returned. fn fill_buf(&mut self) -> io::Result<&[u8]> { - self.read_required_bytes()?; - - // SAFETY: We only read from memory that has been initialized by `read_more_buffer` - // and lifetime is tied to self. - Ok(unsafe { &self.buf.as_slice()[self.buf_valid_bytes.clone()] }) + if self.buf_valid_bytes.is_empty() { + self.read_more_bytes()?; + } + Ok(self.valid_slice()) } /// Advance the offset by `amt` to a `file` position where next `fill_buf` buffer should @@ -225,7 +286,7 @@ pub fn large_file_buf_reader( if agave_io_uring::io_uring_supported() { use crate::io_uring::sequential_file_reader::SequentialFileReader; - let io_uring_reader = SequentialFileReader::with_capacity(buf_size, path.as_ref()); + let io_uring_reader = SequentialFileReader::with_capacity(buf_size, &path); match io_uring_reader { Ok(reader) => return Ok(Box::new(reader)), Err(error) => { @@ -262,10 +323,9 @@ mod tests { // First read 16 bytes to fill buffer let file_len_valid = 32; let default_min_read = 8; - let mut reader = - BufferedReader::new(backing, file_len_valid, &sample_file, default_min_read); - let offset = reader.get_offset(); - let slice = ValidSlice::new(reader.fill_buf().unwrap()); + let mut reader = BufferedReader::new(backing, file_len_valid, &sample_file); + let offset = reader.get_file_offset(); + let slice = ValidSlice::new(reader.fill_buf_required(default_min_read).unwrap()); let mut expected_offset = 0; assert_eq!(offset, expected_offset); assert_eq!(slice.len(), buffer_size); @@ -275,31 +335,34 @@ mod tests { let advance = 16; let mut required_len = 32; reader.consume(advance); - reader.set_required_data_len(required_len); - let offset = reader.get_offset(); + let offset = reader.get_file_offset(); expected_offset += advance; assert_eq!(offset, expected_offset); assert_eq!( - reader.fill_buf().expect_err("should hit EOF").kind(), + reader + .fill_buf_required(required_len) + .expect_err("should hit EOF") + .kind(), io::ErrorKind::UnexpectedEof ); // Continue reading should yield EOF. reader.consume(advance); - reader.set_required_data_len(required_len); - let offset = reader.get_offset(); + let offset = reader.get_file_offset(); expected_offset += advance; assert_eq!(offset, expected_offset); assert_eq!( - reader.fill_buf().expect_err("should hit EOF").kind(), + reader + .fill_buf_required(required_len) + .expect_err("should hit EOF") + .kind(), io::ErrorKind::UnexpectedEof ); // set_required_data to zero and offset should not change, and slice should be empty. required_len = 0; - reader.set_required_data_len(required_len); - let offset = reader.get_offset(); - let slice = ValidSlice::new(reader.fill_buf().unwrap()); + let offset = reader.get_file_offset(); + let slice = ValidSlice::new(reader.fill_buf_required(required_len).unwrap()); let expected_offset = file_len_valid; assert_eq!(offset, expected_offset); let expected_slice_len = 0; @@ -319,10 +382,9 @@ mod tests { // First read 16 bytes to fill buffer let default_min_read_size = 8; - let mut reader = - BufferedReader::new(backing, valid_len, &sample_file, default_min_read_size); - let offset = reader.get_offset(); - let slice = ValidSlice::new(reader.fill_buf().unwrap()); + let mut reader = BufferedReader::new(backing, valid_len, &sample_file); + let offset = reader.get_file_offset(); + let slice = ValidSlice::new(reader.fill_buf_required(default_min_read_size).unwrap()); let mut expected_offset = 0; assert_eq!(offset, expected_offset); assert_eq!(slice.len(), buffer_size); @@ -332,12 +394,14 @@ mod tests { let mut advance = 16; let mut required_data_len = 32; reader.consume(advance); - reader.set_required_data_len(required_data_len); - let offset = reader.get_offset(); + let offset = reader.get_file_offset(); expected_offset += advance; assert_eq!(offset, expected_offset); assert_eq!( - reader.fill_buf().expect_err("should hit EOF").kind(), + reader + .fill_buf_required(required_data_len) + .expect_err("should hit EOF") + .kind(), io::ErrorKind::UnexpectedEof ); @@ -345,12 +409,14 @@ mod tests { advance = 14; required_data_len = 32; reader.consume(advance); - reader.set_required_data_len(required_data_len); - let offset = reader.get_offset(); + let offset = reader.get_file_offset(); expected_offset += advance; assert_eq!(offset, expected_offset); assert_eq!( - reader.fill_buf().expect_err("should hit EOF").kind(), + reader + .fill_buf_required(required_data_len) + .expect_err("should hit EOF") + .kind(), io::ErrorKind::UnexpectedEof ); @@ -358,12 +424,14 @@ mod tests { advance = 1; required_data_len = 8; reader.consume(advance); - reader.set_required_data_len(required_data_len); - let offset = reader.get_offset(); + let offset = reader.get_file_offset(); expected_offset += advance; assert_eq!(offset, expected_offset); assert_eq!( - reader.fill_buf().expect_err("should hit EOF").kind(), + reader + .fill_buf_required(required_data_len) + .expect_err("should hit EOF") + .kind(), io::ErrorKind::UnexpectedEof ); @@ -371,12 +439,14 @@ mod tests { advance = 3; required_data_len = 8; reader.consume(advance); - reader.set_required_data_len(required_data_len); - let offset = reader.get_offset(); + let offset = reader.get_file_offset(); expected_offset += advance; assert_eq!(offset, expected_offset); assert_eq!( - reader.fill_buf().expect_err("Should hit EOF").kind(), + reader + .fill_buf_required(required_data_len) + .expect_err("Should hit EOF") + .kind(), io::ErrorKind::UnexpectedEof ); } @@ -392,10 +462,9 @@ mod tests { // First read 16 bytes to fill buffer let file_len_valid = 32; let default_min_read_size = 8; - let mut reader = - BufferedReader::new(backing, file_len_valid, &sample_file, default_min_read_size); - let offset = reader.get_offset(); - let slice = ValidSlice::new(reader.fill_buf().unwrap()); + let mut reader = BufferedReader::new(backing, file_len_valid, &sample_file); + let offset = reader.get_file_offset(); + let slice = ValidSlice::new(reader.fill_buf_required(default_min_read_size).unwrap()); let mut expected_offset = 0; assert_eq!(offset, expected_offset); assert_eq!(slice.len(), buffer_size); @@ -405,9 +474,8 @@ mod tests { let mut advance = 8; let mut required_len = 8; reader.consume(advance); - reader.set_required_data_len(required_len); - let offset = reader.get_offset(); - let slice = ValidSlice::new(reader.fill_buf().unwrap()); + let offset = reader.get_file_offset(); + let slice = ValidSlice::new(reader.fill_buf_required(required_len).unwrap()); expected_offset += advance; assert_eq!(offset, expected_offset); assert_eq!(slice.len(), required_len); @@ -420,9 +488,8 @@ mod tests { advance = 8; required_len = 16; reader.consume(advance); - reader.set_required_data_len(required_len); - let offset = reader.get_offset(); - let slice = ValidSlice::new(reader.fill_buf().unwrap()); + let offset = reader.get_file_offset(); + let slice = ValidSlice::new(reader.fill_buf_required(required_len).unwrap()); expected_offset += advance; assert_eq!(offset, expected_offset); assert_eq!(slice.len(), required_len); @@ -435,12 +502,14 @@ mod tests { advance = 16; required_len = 32; reader.consume(advance); - reader.set_required_data_len(required_len); - let offset = reader.get_offset(); + let offset = reader.get_file_offset(); expected_offset += advance; assert_eq!(offset, expected_offset); assert_eq!( - reader.fill_buf().expect_err("should hit EOF").kind(), + reader + .fill_buf_required(required_len) + .expect_err("should hit EOF") + .kind(), io::ErrorKind::UnexpectedEof ); } @@ -456,9 +525,9 @@ mod tests { // First read 16 bytes to fill buffer let valid_len = 32; let default_min_read = 8; - let mut reader = BufferedReader::new(backing, valid_len, &sample_file, default_min_read); - let offset = reader.get_offset(); - let slice = ValidSlice::new(reader.fill_buf().unwrap()); + let mut reader = BufferedReader::new(backing, valid_len, &sample_file); + let offset = reader.get_file_offset(); + let slice = ValidSlice::new(reader.fill_buf_required(default_min_read).unwrap()); let mut expected_offset = 0; assert_eq!(offset, expected_offset); assert_eq!(slice.len(), buffer_size); @@ -469,9 +538,8 @@ mod tests { let mut advance = 8; let mut required_data_len = 16; reader.consume(advance); - reader.set_required_data_len(required_data_len); - let offset = reader.get_offset(); - let slice = ValidSlice::new(reader.fill_buf().unwrap()); + let offset = reader.get_file_offset(); + let slice = ValidSlice::new(reader.fill_buf_required(required_data_len).unwrap()); expected_offset += advance; assert_eq!(offset, expected_offset); assert_eq!(slice.len(), required_data_len); @@ -484,9 +552,8 @@ mod tests { advance = 16; required_data_len = 8; reader.consume(advance); - reader.set_required_data_len(required_data_len); - let offset = reader.get_offset(); - let slice = ValidSlice::new(reader.fill_buf().unwrap()); + let offset = reader.get_file_offset(); + let slice = ValidSlice::new(reader.fill_buf_required(required_data_len).unwrap()); expected_offset += advance; assert_eq!(offset, expected_offset); assert_eq!(slice.len(), required_data_len); @@ -495,4 +562,60 @@ mod tests { &bytes[expected_offset..expected_offset + required_data_len] ); } + + #[test_case(Stack::<16>::new(), 16)] + fn test_fill_buf_required_or_overflow(backing: impl Backing, buffer_size: usize) { + // Setup a sample file with 32 bytes of data + const FILE_SIZE: usize = 32; + let mut sample_file = tempfile().unwrap(); + let bytes = rand_bytes::(); + sample_file.write_all(&bytes).unwrap(); + + let file_len_valid = 32; + let mut reader = BufferedReader::new(backing, file_len_valid, &sample_file); + + // Case 1: required_len <= buffer_size (no overflow needed) + let mut overflow = Vec::new(); + let required_len = 8; + let slice = reader + .fill_buf_required_or_overflow(required_len, &mut overflow) + .unwrap(); + assert_eq!(&slice[..required_len], &bytes[..required_len]); + assert!(overflow.is_empty()); + + // Consume part of the buffer to simulate partial reading + reader.consume(required_len); + + // Case 2: required_len > buffer_size (overflow required) + let mut overflow = Vec::new(); + let required_len = buffer_size + 8; + let slice = reader + .fill_buf_required_or_overflow(required_len, &mut overflow) + .unwrap(); + + // Internal buffer is size `buffer_size`, overflow should extend with the remaining `8` bytes + assert_eq!(slice.len(), required_len); + assert_eq!(slice, &bytes[8..8 + required_len]); + assert_eq!(overflow.len(), required_len); + + // Consume everything to reach EOF + reader.consume(required_len); + + // Case 3: required_len larger than remaining data (expect UnexpectedEof) + let mut overflow = Vec::new(); + let required_len = 64; + let result = reader.fill_buf_required_or_overflow(required_len, &mut overflow); + assert_eq!(result.unwrap_err().kind(), io::ErrorKind::UnexpectedEof); + + // Case 4: required_len = 0 (should return empty slice) + let mut overflow = Vec::new(); + let required_len = 0; + let offset_before = reader.get_file_offset(); + let slice = reader + .fill_buf_required_or_overflow(required_len, &mut overflow) + .unwrap(); + assert_eq!(slice.len(), 0); + let offset_after = reader.get_file_offset(); + assert_eq!(offset_before, offset_after); + } } From b17ac8f3e86222f94dff8dd36f4e0c8407a98035 Mon Sep 17 00:00:00 2001 From: Jon C Date: Mon, 21 Jul 2025 06:18:12 -0400 Subject: [PATCH 12/68] runtime: Inline `solana_sha256_hasher::extend_and_hash` (#7028) * runtime: Inline `extend_and_hash` #### Problem `solana_sha256_hasher::extend_and_hash` requires dynamic allocation because of the vec creation, which means adding a `std` feature to the crate. However, the function is only used in one place, so it would be easier to remove it and simplify the hasher crate. #### Summary of changes Inline the usage of `extend_and_hash` so it can be safely removed from the hasher crate. * Simplify extend_and_hash impl --- runtime/src/bank.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 1247646f73b70f..cad872f09d75db 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -127,7 +127,7 @@ use { runtime_transaction::RuntimeTransaction, transaction_with_meta::TransactionWithMeta, }, solana_sdk_ids::{bpf_loader_upgradeable, incinerator, native_loader}, - solana_sha256_hasher::{extend_and_hash, hashv}, + solana_sha256_hasher::hashv, solana_signature::Signature, solana_slot_hashes::SlotHashes, solana_slot_history::{Check, SlotHistory}, @@ -4455,7 +4455,7 @@ impl Bank { .unwrap() .get_hash_data(slot, self.parent_slot()); if let Some(buf) = buf { - let hard_forked_hash = extend_and_hash(&hash, &buf); + let hard_forked_hash = hashv(&[hash.as_ref(), &buf]); warn!("hard fork at slot {slot} by hashing {buf:?}: {hash} => {hard_forked_hash}"); hash = hard_forked_hash; } From ef9453e6b4495dc36412a09f482955e35471d236 Mon Sep 17 00:00:00 2001 From: Tao Zhu <82401714+tao-stones@users.noreply.github.com> Date: Mon, 21 Jul 2025 10:40:19 -0500 Subject: [PATCH 13/68] refactor: remove unused parameter to CostUpdateService (#7048) --- core/src/cost_update_service.rs | 7 +++---- core/src/tvu.rs | 2 +- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/core/src/cost_update_service.rs b/core/src/cost_update_service.rs index e912037a43c55d..de4d1ad1b30eaa 100644 --- a/core/src/cost_update_service.rs +++ b/core/src/cost_update_service.rs @@ -2,7 +2,6 @@ use { crossbeam_channel::Receiver, - solana_ledger::blockstore::Blockstore, solana_runtime::bank::Bank, std::{ sync::Arc, @@ -30,11 +29,11 @@ const MAX_LOOP_COUNT: usize = 25; const LOOP_LIMITER: Duration = Duration::from_millis(10); impl CostUpdateService { - pub fn new(blockstore: Arc, cost_update_receiver: CostUpdateReceiver) -> Self { + pub fn new(cost_update_receiver: CostUpdateReceiver) -> Self { let thread_hdl = Builder::new() .name("solCostUpdtSvc".to_string()) .spawn(move || { - Self::service_loop(blockstore, cost_update_receiver); + Self::service_loop(cost_update_receiver); }) .unwrap(); @@ -45,7 +44,7 @@ impl CostUpdateService { self.thread_hdl.join() } - fn service_loop(_blockstore: Arc, cost_update_receiver: CostUpdateReceiver) { + fn service_loop(cost_update_receiver: CostUpdateReceiver) { for cost_update in cost_update_receiver.iter() { match cost_update { CostUpdate::FrozenBank { diff --git a/core/src/tvu.rs b/core/src/tvu.rs index 576fd3aef2c059..b7172d5883b002 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -360,7 +360,7 @@ impl Tvu { &exit, ); - let cost_update_service = CostUpdateService::new(blockstore.clone(), cost_update_receiver); + let cost_update_service = CostUpdateService::new(cost_update_receiver); let drop_bank_service = DropBankService::new(drop_bank_receiver); From eb9ef9a6f861a3a68ee2bd553a6e6dc75db79c22 Mon Sep 17 00:00:00 2001 From: Brooks Date: Mon, 21 Jul 2025 12:51:59 -0400 Subject: [PATCH 14/68] Removes merkle-based accounts hashing in AccountsHashVerifier (#7035) --- core/src/accounts_hash_verifier.rs | 227 ++--------------------------- runtime/src/snapshot_package.rs | 23 +-- 2 files changed, 10 insertions(+), 240 deletions(-) diff --git a/core/src/accounts_hash_verifier.rs b/core/src/accounts_hash_verifier.rs index 55363acf0f7af4..92b747b7c23b70 100644 --- a/core/src/accounts_hash_verifier.rs +++ b/core/src/accounts_hash_verifier.rs @@ -3,25 +3,16 @@ use { crate::snapshot_packager_service::PendingSnapshotPackages, crossbeam_channel::{Receiver, Sender}, - solana_accounts_db::{ - accounts_db::CalcAccountsHashKind, - accounts_hash::{ - AccountsHash, CalcAccountsHashConfig, HashStats, IncrementalAccountsHash, - MerkleOrLatticeAccountsHash, - }, - sorted_storages::SortedStorages, - }, - solana_clock::{Slot, DEFAULT_MS_PER_SLOT}, + solana_accounts_db::accounts_hash::MerkleOrLatticeAccountsHash, + solana_clock::DEFAULT_MS_PER_SLOT, solana_measure::measure_us, solana_runtime::{ serde_snapshot::BankIncrementalSnapshotPersistence, snapshot_config::SnapshotConfig, snapshot_controller::SnapshotController, snapshot_package::{ - self, AccountsHashAlgorithm, AccountsPackage, AccountsPackageKind, SnapshotKind, - SnapshotPackage, + self, AccountsPackage, AccountsPackageKind, SnapshotKind, SnapshotPackage, }, - snapshot_utils, }, std::{ io, @@ -185,221 +176,18 @@ impl AccountsHashVerifier { pending_snapshot_packages: &Mutex, snapshot_config: &SnapshotConfig, ) -> io::Result<()> { - let (merkle_or_lattice_accounts_hash, bank_incremental_snapshot_persistence) = - Self::calculate_and_verify_accounts_hash(&accounts_package, snapshot_config)?; - Self::purge_old_accounts_hashes(&accounts_package, snapshot_config); Self::submit_for_packaging( accounts_package, pending_snapshot_packages, - merkle_or_lattice_accounts_hash, - bank_incremental_snapshot_persistence, + MerkleOrLatticeAccountsHash::Lattice, + None, ); Ok(()) } - /// returns calculated accounts hash - fn calculate_and_verify_accounts_hash( - accounts_package: &AccountsPackage, - snapshot_config: &SnapshotConfig, - ) -> io::Result<( - MerkleOrLatticeAccountsHash, - Option, - )> { - match accounts_package.accounts_hash_algorithm { - AccountsHashAlgorithm::Merkle => { - debug!( - "calculate_and_verify_accounts_hash(): snapshots lt hash is disabled, DO \ - merkle-based accounts hash calculation", - ); - } - AccountsHashAlgorithm::Lattice => { - debug!( - "calculate_and_verify_accounts_hash(): snapshots lt hash is enabled, SKIP \ - merkle-based accounts hash calculation", - ); - return Ok((MerkleOrLatticeAccountsHash::Lattice, None)); - } - } - - let accounts_hash_calculation_kind = match accounts_package.package_kind { - AccountsPackageKind::Snapshot(snapshot_kind) => match snapshot_kind { - SnapshotKind::FullSnapshot => CalcAccountsHashKind::Full, - SnapshotKind::IncrementalSnapshot(_) => CalcAccountsHashKind::Incremental, - }, - }; - - let (accounts_hash_kind, bank_incremental_snapshot_persistence) = - match accounts_hash_calculation_kind { - CalcAccountsHashKind::Full => { - let (accounts_hash, _capitalization) = - Self::_calculate_full_accounts_hash(accounts_package); - (accounts_hash.into(), None) - } - CalcAccountsHashKind::Incremental => { - let AccountsPackageKind::Snapshot(SnapshotKind::IncrementalSnapshot(base_slot)) = - accounts_package.package_kind - else { - panic!("Calculating incremental accounts hash requires a base slot"); - }; - let accounts_db = &accounts_package.accounts.accounts_db; - let Some((base_accounts_hash, base_capitalization)) = - accounts_db.get_accounts_hash(base_slot) - else { - #[rustfmt::skip] - panic!( - "incremental snapshot requires accounts hash and capitalization from \ - the full snapshot it is based on\n\ - package: {accounts_package:?}\n\ - accounts hashes: {:?}\n\ - incremental accounts hashes: {:?}\n\ - full snapshot archives: {:?}\n\ - bank snapshots: {:?}", - accounts_db.get_accounts_hashes(), - accounts_db.get_incremental_accounts_hashes(), - snapshot_utils::get_full_snapshot_archives( - &snapshot_config.full_snapshot_archives_dir, - ), - snapshot_utils::get_bank_snapshots(&snapshot_config.bank_snapshots_dir), - ); - }; - let (incremental_accounts_hash, incremental_capitalization) = - Self::_calculate_incremental_accounts_hash(accounts_package, base_slot); - let bank_incremental_snapshot_persistence = - BankIncrementalSnapshotPersistence { - full_slot: base_slot, - full_hash: base_accounts_hash.into(), - full_capitalization: base_capitalization, - incremental_hash: incremental_accounts_hash.into(), - incremental_capitalization, - }; - ( - incremental_accounts_hash.into(), - Some(bank_incremental_snapshot_persistence), - ) - } - }; - - Ok(( - MerkleOrLatticeAccountsHash::Merkle(accounts_hash_kind), - bank_incremental_snapshot_persistence, - )) - } - - fn _calculate_full_accounts_hash( - accounts_package: &AccountsPackage, - ) -> (AccountsHash, /*capitalization*/ u64) { - let (sorted_storages, storage_sort_us) = - measure_us!(SortedStorages::new(&accounts_package.snapshot_storages)); - - let mut timings = HashStats { - storage_sort_us, - ..HashStats::default() - }; - timings.calc_storage_size_quartiles(&accounts_package.snapshot_storages); - - let epoch = accounts_package - .epoch_schedule - .get_epoch(accounts_package.slot); - let calculate_accounts_hash_config = CalcAccountsHashConfig { - use_bg_thread_pool: true, - ancestors: None, - epoch_schedule: &accounts_package.epoch_schedule, - epoch, - store_detailed_debug_info_on_failure: false, - }; - - let slot = accounts_package.slot; - let ((accounts_hash, lamports), measure_hash_us) = - measure_us!(accounts_package.accounts.accounts_db.update_accounts_hash( - &calculate_accounts_hash_config, - &sorted_storages, - slot, - timings, - )); - - if accounts_package.expected_capitalization != lamports { - // before we assert, run the hash calc again. This helps track down whether it could have been a failure in a race condition possibly with shrink. - // We could add diagnostics to the hash calc here to produce a per bin cap or something to help narrow down how many pubkeys are different. - let calculate_accounts_hash_config = CalcAccountsHashConfig { - // since we're going to assert, use the fg thread pool to go faster - use_bg_thread_pool: false, - // now that we've failed, store off the failing contents that produced a bad capitalization - store_detailed_debug_info_on_failure: true, - ..calculate_accounts_hash_config - }; - let second_accounts_hash = accounts_package - .accounts - .accounts_db - .calculate_accounts_hash( - &calculate_accounts_hash_config, - &sorted_storages, - HashStats::default(), - ); - panic!( - "accounts hash capitalization mismatch: expected {}, but calculated {} (then \ - recalculated {})", - accounts_package.expected_capitalization, lamports, second_accounts_hash.1, - ); - } - - datapoint_info!( - "accounts_hash_verifier", - ("calculate_hash", measure_hash_us, i64), - ); - - (accounts_hash, lamports) - } - - fn _calculate_incremental_accounts_hash( - accounts_package: &AccountsPackage, - base_slot: Slot, - ) -> (IncrementalAccountsHash, /*capitalization*/ u64) { - let incremental_storages = - accounts_package - .snapshot_storages - .iter() - .filter_map(|storage| { - let storage_slot = storage.slot(); - (storage_slot > base_slot).then_some((storage, storage_slot)) - }); - let sorted_storages = SortedStorages::new_with_slots(incremental_storages, None, None); - - let epoch = accounts_package - .epoch_schedule - .get_epoch(accounts_package.slot); - let calculate_accounts_hash_config = CalcAccountsHashConfig { - use_bg_thread_pool: true, - ancestors: None, - epoch_schedule: &accounts_package.epoch_schedule, - epoch, - store_detailed_debug_info_on_failure: false, - }; - - let (incremental_accounts_hash, measure_hash_us) = measure_us!(accounts_package - .accounts - .accounts_db - .update_incremental_accounts_hash( - &calculate_accounts_hash_config, - &sorted_storages, - accounts_package.slot, - HashStats::default(), - )); - - datapoint_info!( - "accounts_hash_verifier", - ( - "calculate_incremental_accounts_hash_us", - measure_hash_us, - i64 - ), - ); - - incremental_accounts_hash - } - fn purge_old_accounts_hashes( accounts_package: &AccountsPackage, snapshot_config: &SnapshotConfig, @@ -462,7 +250,10 @@ impl AccountsHashVerifier { #[cfg(test)] mod tests { - use {super::*, rand::seq::SliceRandom, solana_runtime::snapshot_package::SnapshotKind}; + use { + super::*, rand::seq::SliceRandom, solana_clock::Slot, + solana_runtime::snapshot_package::SnapshotKind, + }; fn new(package_kind: AccountsPackageKind, slot: Slot) -> AccountsPackage { AccountsPackage { diff --git a/runtime/src/snapshot_package.rs b/runtime/src/snapshot_package.rs index 8f8e2e783f6a6c..bb7a251b3fb521 100644 --- a/runtime/src/snapshot_package.rs +++ b/runtime/src/snapshot_package.rs @@ -35,7 +35,6 @@ pub struct AccountsPackage { pub accounts: Arc, pub epoch_schedule: EpochSchedule, pub rent_collector: RentCollector, - pub accounts_hash_algorithm: AccountsHashAlgorithm, /// Supplemental information needed for snapshots pub snapshot_info: Option, @@ -83,21 +82,13 @@ impl AccountsPackage { } }; - let accounts_hash_algorithm = AccountsHashAlgorithm::Lattice; - Self::_new( - package_kind, - bank, - snapshot_storages, - accounts_hash_algorithm, - Some(snapshot_info), - ) + Self::_new(package_kind, bank, snapshot_storages, Some(snapshot_info)) } fn _new( package_kind: AccountsPackageKind, bank: &Bank, snapshot_storages: Vec>, - accounts_hash_algorithm: AccountsHashAlgorithm, snapshot_info: Option, ) -> Self { Self { @@ -109,7 +100,6 @@ impl AccountsPackage { accounts: bank.accounts(), epoch_schedule: bank.epoch_schedule().clone(), rent_collector: bank.rent_collector().clone(), - accounts_hash_algorithm, snapshot_info, enqueued: Instant::now(), } @@ -131,7 +121,6 @@ impl AccountsPackage { accounts: Arc::new(accounts), epoch_schedule: EpochSchedule::default(), rent_collector: RentCollector::default(), - accounts_hash_algorithm: AccountsHashAlgorithm::Merkle, snapshot_info: Some(SupplementalSnapshotInfo { status_cache_slot_deltas: Vec::default(), bank_fields_to_serialize: BankFieldsToSerialize::default_for_tests(), @@ -150,7 +139,6 @@ impl std::fmt::Debug for AccountsPackage { .field("kind", &self.package_kind) .field("slot", &self.slot) .field("block_height", &self.block_height) - .field("accounts_hash_algorithm", &self.accounts_hash_algorithm) .finish_non_exhaustive() } } @@ -303,12 +291,3 @@ impl SnapshotKind { matches!(self, SnapshotKind::IncrementalSnapshot(_)) } } - -/// Which algorithm should be used to calculate the accounts hash? -#[derive(Debug, Copy, Clone, Eq, PartialEq)] -pub enum AccountsHashAlgorithm { - /// Merkle-based accounts hash algorithm - Merkle, - /// Lattice-based accounts hash algorithm - Lattice, -} From ca91a37b324640783164030cf037b8aedff3e9bf Mon Sep 17 00:00:00 2001 From: Tao Zhu <82401714+tao-stones@users.noreply.github.com> Date: Mon, 21 Jul 2025 12:26:19 -0500 Subject: [PATCH 15/68] refactor - remove wrapper function (#7059) --- cost-model/src/cost_model.rs | 68 ++++++++++++++++-------------------- 1 file changed, 31 insertions(+), 37 deletions(-) diff --git a/cost-model/src/cost_model.rs b/cost-model/src/cost_model.rs index a0e8196c71f699..77c27319c7c2f1 100644 --- a/cost-model/src/cost_model.rs +++ b/cost-model/src/cost_model.rs @@ -41,8 +41,9 @@ impl CostModel { if transaction.is_simple_vote_transaction() { TransactionCost::SimpleVote { transaction } } else { - let (programs_execution_cost, loaded_accounts_data_size_cost, data_bytes_cost) = - Self::get_transaction_cost(transaction, feature_set); + let (programs_execution_cost, loaded_accounts_data_size_cost) = + Self::get_estimated_execution_cost(transaction, feature_set); + let data_bytes_cost = Self::get_instructions_data_cost(transaction); Self::calculate_non_vote_transaction_cost( transaction, transaction.program_instructions_iter(), @@ -97,8 +98,9 @@ impl CostModel { if transaction.is_simple_vote_transaction() { return TransactionCost::SimpleVote { transaction }; } - let (programs_execution_cost, loaded_accounts_data_size_cost, data_bytes_cost) = - Self::get_transaction_cost(transaction, feature_set); + let (programs_execution_cost, loaded_accounts_data_size_cost) = + Self::get_estimated_execution_cost(transaction, feature_set); + let data_bytes_cost = Self::get_instructions_data_cost(transaction); Self::calculate_non_vote_transaction_cost( transaction, instructions, @@ -181,18 +183,6 @@ impl CostModel { WRITE_LOCK_UNITS.saturating_mul(num_write_locks) } - /// Return (programs_execution_cost, loaded_accounts_data_size_cost, data_bytes_cost) - fn get_transaction_cost(meta: &impl StaticMeta, feature_set: &FeatureSet) -> (u64, u64, u16) { - let data_bytes_cost = Self::get_instructions_data_cost(meta); - let (programs_execution_cost, loaded_accounts_data_size_cost) = - Self::get_estimated_execution_cost(meta, feature_set); - ( - programs_execution_cost, - loaded_accounts_data_size_cost, - data_bytes_cost, - ) - } - /// Return (programs_execution_cost, loaded_accounts_data_size_cost) fn get_estimated_execution_cost( transaction: &impl StaticMeta, @@ -523,10 +513,10 @@ mod tests { let feature_set = FeatureSet::default(); let expected_execution_cost = u64::from(MAX_BUILTIN_ALLOCATION_COMPUTE_UNIT_LIMIT); - let (program_execution_cost, _loaded_accounts_data_size_cost, _data_bytes_cost) = - CostModel::get_transaction_cost(&simple_transaction, &feature_set); + let (programs_execution_cost, _loaded_accounts_data_size_cost) = + CostModel::get_estimated_execution_cost(&simple_transaction, &feature_set); - assert_eq!(expected_execution_cost, program_execution_cost); + assert_eq!(expected_execution_cost, programs_execution_cost); } #[test] @@ -553,10 +543,11 @@ mod tests { DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, ), ] { - let (program_execution_cost, _loaded_accounts_data_size_cost, data_bytes_cost) = - CostModel::get_transaction_cost(&token_transaction, &feature_set); + let (programs_execution_cost, _loaded_accounts_data_size_cost) = + CostModel::get_estimated_execution_cost(&token_transaction, &feature_set); + let data_bytes_cost = CostModel::get_instructions_data_cost(&token_transaction); - assert_eq!(expected_execution_cost, program_execution_cost); + assert_eq!(expected_execution_cost, programs_execution_cost); assert_eq!(0, data_bytes_cost); } } @@ -606,10 +597,11 @@ mod tests { (FeatureSet::default(), expected_cu_limit as u64), (FeatureSet::all_enabled(), expected_cu_limit as u64), ] { - let (program_execution_cost, _loaded_accounts_data_size_cost, data_bytes_cost) = - CostModel::get_transaction_cost(&token_transaction, &feature_set); + let (programs_execution_cost, _loaded_accounts_data_size_cost) = + CostModel::get_estimated_execution_cost(&token_transaction, &feature_set); + let data_bytes_cost = CostModel::get_instructions_data_cost(&token_transaction); - assert_eq!(expected_execution_cost, program_execution_cost); + assert_eq!(expected_execution_cost, programs_execution_cost); assert_eq!(1, data_bytes_cost); } } @@ -646,9 +638,9 @@ mod tests { let token_transaction = RuntimeTransaction::from_transaction_for_tests(tx); for feature_set in [FeatureSet::default(), FeatureSet::all_enabled()] { - let (program_execution_cost, _loaded_accounts_data_size_cost, _data_bytes_cost) = - CostModel::get_transaction_cost(&token_transaction, &feature_set); - assert_eq!(0, program_execution_cost); + let (programs_execution_cost, _loaded_accounts_data_size_cost) = + CostModel::get_estimated_execution_cost(&token_transaction, &feature_set); + assert_eq!(0, programs_execution_cost); } } @@ -670,8 +662,9 @@ mod tests { // expected cost for two system transfer instructions let feature_set = FeatureSet::default(); let expected_execution_cost = 2 * u64::from(MAX_BUILTIN_ALLOCATION_COMPUTE_UNIT_LIMIT); - let (programs_execution_cost, _loaded_accounts_data_size_cost, data_bytes_cost) = - CostModel::get_transaction_cost(&tx, &feature_set); + let (programs_execution_cost, _loaded_accounts_data_size_cost) = + CostModel::get_estimated_execution_cost(&tx, &feature_set); + let data_bytes_cost = CostModel::get_instructions_data_cost(&tx); assert_eq!(expected_execution_cost, programs_execution_cost); assert_eq!(6, data_bytes_cost); } @@ -709,9 +702,10 @@ mod tests { DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64 * 2, ), ] { - let (program_execution_cost, _loaded_accounts_data_size_cost, data_bytes_cost) = - CostModel::get_transaction_cost(&tx, &feature_set); - assert_eq!(expected_cost, program_execution_cost); + let (programs_execution_cost, _loaded_accounts_data_size_cost) = + CostModel::get_estimated_execution_cost(&tx, &feature_set); + let data_bytes_cost = CostModel::get_instructions_data_cost(&tx); + assert_eq!(expected_cost, programs_execution_cost); assert_eq!(0, data_bytes_cost); } } @@ -827,8 +821,8 @@ mod tests { let feature_set = FeatureSet::default(); let expected_execution_cost = u64::from(MAX_BUILTIN_ALLOCATION_COMPUTE_UNIT_LIMIT) + u64::from(DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT); - let (programs_execution_cost, _loaded_accounts_data_size_cost, _data_bytes_cost) = - CostModel::get_transaction_cost(&transaction, &feature_set); + let (programs_execution_cost, _loaded_accounts_data_size_cost) = + CostModel::get_estimated_execution_cost(&transaction, &feature_set); assert_eq!(expected_execution_cost, programs_execution_cost); } @@ -851,8 +845,8 @@ mod tests { let feature_set = FeatureSet::default(); let expected_execution_cost = cu_limit as u64; - let (programs_execution_cost, _loaded_accounts_data_size_cost, _data_bytes_cost) = - CostModel::get_transaction_cost(&transaction, &feature_set); + let (programs_execution_cost, _loaded_accounts_data_size_cost) = + CostModel::get_estimated_execution_cost(&transaction, &feature_set); assert_eq!(expected_execution_cost, programs_execution_cost); } From 17a76c4946d796a8c7c4a6aad272ef971f246783 Mon Sep 17 00:00:00 2001 From: Brooks Date: Mon, 21 Jul 2025 14:03:10 -0400 Subject: [PATCH 16/68] Removes MerkleOrLatticeAccountsHash enum (#7039) --- accounts-db/src/accounts_hash.rs | 9 ------ core/src/accounts_hash_verifier.rs | 16 ++-------- runtime/src/bank.rs | 7 ++--- runtime/src/snapshot_bank_utils.rs | 13 ++------ runtime/src/snapshot_hash.rs | 20 +++++-------- runtime/src/snapshot_package.rs | 48 +++++++----------------------- 6 files changed, 24 insertions(+), 89 deletions(-) diff --git a/accounts-db/src/accounts_hash.rs b/accounts-db/src/accounts_hash.rs index 1d07af561a74a3..d842aaa3f31c04 100644 --- a/accounts-db/src/accounts_hash.rs +++ b/accounts-db/src/accounts_hash.rs @@ -1246,15 +1246,6 @@ pub const ZERO_LAMPORT_ACCOUNT_LT_HASH: AccountLtHash = AccountLtHash(LtHash::id #[derive(Debug, Clone, Eq, PartialEq)] pub struct AccountsLtHash(pub LtHash); -/// Hash of accounts -#[derive(Debug, Clone, Eq, PartialEq)] -pub enum MerkleOrLatticeAccountsHash { - /// Merkle-based hash of accounts - Merkle(AccountsHashKind), - /// Lattice-based hash of accounts - Lattice, -} - /// Hash of accounts #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub enum AccountsHashKind { diff --git a/core/src/accounts_hash_verifier.rs b/core/src/accounts_hash_verifier.rs index 92b747b7c23b70..7a4abc9697f2a7 100644 --- a/core/src/accounts_hash_verifier.rs +++ b/core/src/accounts_hash_verifier.rs @@ -3,7 +3,6 @@ use { crate::snapshot_packager_service::PendingSnapshotPackages, crossbeam_channel::{Receiver, Sender}, - solana_accounts_db::accounts_hash::MerkleOrLatticeAccountsHash, solana_clock::DEFAULT_MS_PER_SLOT, solana_measure::measure_us, solana_runtime::{ @@ -178,12 +177,7 @@ impl AccountsHashVerifier { ) -> io::Result<()> { Self::purge_old_accounts_hashes(&accounts_package, snapshot_config); - Self::submit_for_packaging( - accounts_package, - pending_snapshot_packages, - MerkleOrLatticeAccountsHash::Lattice, - None, - ); + Self::submit_for_packaging(accounts_package, pending_snapshot_packages, None); Ok(()) } @@ -222,7 +216,6 @@ impl AccountsHashVerifier { fn submit_for_packaging( accounts_package: AccountsPackage, pending_snapshot_packages: &Mutex, - merkle_or_lattice_accounts_hash: MerkleOrLatticeAccountsHash, bank_incremental_snapshot_persistence: Option, ) { if !matches!( @@ -232,11 +225,8 @@ impl AccountsHashVerifier { return; } - let snapshot_package = SnapshotPackage::new( - accounts_package, - merkle_or_lattice_accounts_hash, - bank_incremental_snapshot_persistence, - ); + let snapshot_package = + SnapshotPackage::new(accounts_package, bank_incremental_snapshot_persistence); pending_snapshot_packages .lock() .unwrap() diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index cad872f09d75db..2ab65f181d4596 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -80,7 +80,7 @@ use { }, accounts_hash::{ AccountsHash, AccountsLtHash, CalcAccountsHashConfig, HashStats, - IncrementalAccountsHash, MerkleOrLatticeAccountsHash, + IncrementalAccountsHash, }, accounts_index::{IndexKey, ScanConfig, ScanResult}, accounts_update_notifier_interface::AccountsUpdateNotifier, @@ -4809,10 +4809,7 @@ impl Bank { /// /// This fn is used at startup to verify the bank was rebuilt correctly. pub fn get_snapshot_hash(&self) -> SnapshotHash { - SnapshotHash::new( - &MerkleOrLatticeAccountsHash::Lattice, - Some(self.accounts_lt_hash.lock().unwrap().0.checksum()), - ) + SnapshotHash::new(Some(self.accounts_lt_hash.lock().unwrap().0.checksum())) } pub fn load_account_into_read_cache(&self, key: &Pubkey) { diff --git a/runtime/src/snapshot_bank_utils.rs b/runtime/src/snapshot_bank_utils.rs index 47816365ff586a..a228befa9f7ee3 100644 --- a/runtime/src/snapshot_bank_utils.rs +++ b/runtime/src/snapshot_bank_utils.rs @@ -39,7 +39,6 @@ use { log::*, solana_accounts_db::{ accounts_db::{AccountStorageEntry, AccountsDbConfig, AtomicAccountsFileId}, - accounts_hash::MerkleOrLatticeAccountsHash, accounts_update_notifier_interface::AccountsUpdateNotifier, utils::remove_dir_contents, }, @@ -789,7 +788,6 @@ fn bank_to_full_snapshot_archive_with( bank.force_flush_accounts_cache(); bank.clean_accounts(); - let merkle_or_lattice_accounts_hash = MerkleOrLatticeAccountsHash::Lattice; let snapshot_storages = bank.get_snapshot_storages(None); let status_cache_slot_deltas = bank.status_cache.read().unwrap().root_slot_deltas(); let accounts_package = AccountsPackage::new_for_snapshot( @@ -798,8 +796,7 @@ fn bank_to_full_snapshot_archive_with( snapshot_storages, status_cache_slot_deltas, ); - let snapshot_package = - SnapshotPackage::new(accounts_package, merkle_or_lattice_accounts_hash, None); + let snapshot_package = SnapshotPackage::new(accounts_package, None); let snapshot_config = SnapshotConfig { full_snapshot_archives_dir: full_snapshot_archives_dir.as_ref().to_path_buf(), @@ -849,8 +846,6 @@ pub fn bank_to_incremental_snapshot_archive( bank.force_flush_accounts_cache(); bank.clean_accounts(); - let (merkle_or_lattice_accounts_hash, bank_incremental_snapshot_persistence) = - (MerkleOrLatticeAccountsHash::Lattice, None); let snapshot_storages = bank.get_snapshot_storages(Some(full_snapshot_slot)); let status_cache_slot_deltas = bank.status_cache.read().unwrap().root_slot_deltas(); let accounts_package = AccountsPackage::new_for_snapshot( @@ -859,11 +854,7 @@ pub fn bank_to_incremental_snapshot_archive( snapshot_storages, status_cache_slot_deltas, ); - let snapshot_package = SnapshotPackage::new( - accounts_package, - merkle_or_lattice_accounts_hash, - bank_incremental_snapshot_persistence, - ); + let snapshot_package = SnapshotPackage::new(accounts_package, None); // Note: Since the snapshot_storages above are *only* the incremental storages, // this bank snapshot *cannot* be used by fastboot. diff --git a/runtime/src/snapshot_hash.rs b/runtime/src/snapshot_hash.rs index 6465d127076952..2897545f73cf60 100644 --- a/runtime/src/snapshot_hash.rs +++ b/runtime/src/snapshot_hash.rs @@ -1,7 +1,7 @@ //! Helper types and functions for handling and dealing with snapshot hashes. use { - solana_accounts_db::accounts_hash::MerkleOrLatticeAccountsHash, solana_clock::Slot, - solana_hash::Hash, solana_lattice_hash::lt_hash::Checksum as AccountsLtHashChecksum, + solana_clock::Slot, solana_hash::Hash, + solana_lattice_hash::lt_hash::Checksum as AccountsLtHashChecksum, }; /// At startup, when loading from snapshots, the starting snapshot hashes need to be passed to @@ -32,19 +32,13 @@ impl SnapshotHash { /// Make a snapshot hash from accounts hashes #[must_use] pub fn new( - merkle_or_lattice_accounts_hash: &MerkleOrLatticeAccountsHash, accounts_lt_hash_checksum: Option, // option wrapper will be removed next ) -> Self { - let accounts_hash = match merkle_or_lattice_accounts_hash { - MerkleOrLatticeAccountsHash::Merkle(accounts_hash_kind) => { - *accounts_hash_kind.as_hash() - } - MerkleOrLatticeAccountsHash::Lattice => Hash::new_from_array( - accounts_lt_hash_checksum - .expect("lattice kind must have lt hash checksum") - .0, - ), - }; + let accounts_hash = Hash::new_from_array( + accounts_lt_hash_checksum + .expect("lattice kind must have lt hash checksum") + .0, + ); Self(accounts_hash) } } diff --git a/runtime/src/snapshot_package.rs b/runtime/src/snapshot_package.rs index bb7a251b3fb521..72a22e9930420d 100644 --- a/runtime/src/snapshot_package.rs +++ b/runtime/src/snapshot_package.rs @@ -8,9 +8,7 @@ use { solana_accounts_db::{ accounts::Accounts, accounts_db::AccountStorageEntry, - accounts_hash::{ - AccountsDeltaHash, AccountsHash, AccountsHashKind, MerkleOrLatticeAccountsHash, - }, + accounts_hash::{AccountsDeltaHash, AccountsHash}, }, solana_clock::Slot, solana_epoch_schedule::EpochSchedule, @@ -173,7 +171,7 @@ pub struct SnapshotPackage { pub accounts_delta_hash: AccountsDeltaHash, // obsolete, will be removed next pub accounts_hash: AccountsHash, pub write_version: u64, - pub bank_incremental_snapshot_persistence: Option, + pub bank_incremental_snapshot_persistence: Option, // obsolete, will be removed next /// The instant this snapshot package was sent to the queue. /// Used to track how long snapshot packages wait before handling. @@ -183,7 +181,6 @@ pub struct SnapshotPackage { impl SnapshotPackage { pub fn new( accounts_package: AccountsPackage, - merkle_or_lattice_accounts_hash: MerkleOrLatticeAccountsHash, bank_incremental_snapshot_persistence: Option, ) -> Self { let AccountsPackageKind::Snapshot(snapshot_kind) = accounts_package.package_kind; @@ -193,48 +190,23 @@ impl SnapshotPackage { ); }; - let accounts_hash = match merkle_or_lattice_accounts_hash { - MerkleOrLatticeAccountsHash::Merkle(accounts_hash_kind) => { - match accounts_hash_kind { - AccountsHashKind::Full(accounts_hash) => accounts_hash, - AccountsHashKind::Incremental(_) => { - // The accounts hash is only needed when serializing a full snapshot. - // When serializing an incremental snapshot, there will not be a full accounts hash - // at `slot`. In that case, use the default, because it doesn't actually get used. - // The incremental snapshot will use the BankIncrementalSnapshotPersistence - // field, so ensure it is Some. - assert!(bank_incremental_snapshot_persistence.is_some()); - AccountsHash(Hash::default()) - } - } - } - MerkleOrLatticeAccountsHash::Lattice => { - // This is the merkle-based accounts hash, which isn't used in the Lattice case, - // so any value is fine here. - AccountsHash(Hash::default()) - } - }; - Self { snapshot_kind, slot: accounts_package.slot, block_height: accounts_package.block_height, - hash: SnapshotHash::new( - &merkle_or_lattice_accounts_hash, - Some( - snapshot_info - .bank_fields_to_serialize - .accounts_lt_hash - .0 - .checksum(), - ), - ), + hash: SnapshotHash::new(Some( + snapshot_info + .bank_fields_to_serialize + .accounts_lt_hash + .0 + .checksum(), + )), snapshot_storages: accounts_package.snapshot_storages, status_cache_slot_deltas: snapshot_info.status_cache_slot_deltas, bank_fields_to_serialize: snapshot_info.bank_fields_to_serialize, accounts_delta_hash: snapshot_info.accounts_delta_hash, bank_hash_stats: snapshot_info.bank_hash_stats, - accounts_hash, + accounts_hash: AccountsHash(Hash::default()), // obsolete, will be removed next bank_incremental_snapshot_persistence, write_version: snapshot_info.write_version, enqueued: Instant::now(), From 1833b170e611010b2a8d3f047e26053398d3c106 Mon Sep 17 00:00:00 2001 From: Steven Luscher Date: Mon, 21 Jul 2025 11:09:26 -0700 Subject: [PATCH 17/68] Add `slot` to the error context of `EpochRewardsPeriodActiveErrorData` and `SlotNotEpochBoundaryErrorData` (#6962) * Add `slot` to the error context of `EpochRewardsPeriodActiveErrorData` and `SlotNotEpochBoundaryErrorData` * Add CHANGELOG entries * Directly serialize the JSON-RPC error data, rather than bouncing it through a struct * Make `EpochRewardsPeriodActiveErrorData` from the perspective of deserializing _old_ data by making `slot` an Option --- CHANGELOG.md | 4 +++ Cargo.lock | 1 + rpc-client-api/Cargo.toml | 3 ++ rpc-client-api/src/custom_error.rs | 46 +++++++++++++++++++++++++++++- 4 files changed, 53 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index cffe0b8056b29f..9562034a89822d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,10 @@ Release channels have their own copy of this changelog: ### RPC +#### Breaking +* Added a `slot` property to `EpochRewardsPeriodActiveErrorData` +* Added error data containing a `slot` property to `RpcCustomError::SlotNotEpochBoundary` + #### Changes * The subscription server now prioritizes processing received messages before sending out responses. This ensures that new subscription requests and time-sensitive messages like `PING` opcodes take priority over notifications. diff --git a/Cargo.lock b/Cargo.lock index 7242a352238073..4e81f130b2daa9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10056,6 +10056,7 @@ dependencies = [ "solana-signer", "solana-transaction-error", "solana-transaction-status-client-types", + "test-case", "thiserror 2.0.12", ] diff --git a/rpc-client-api/Cargo.toml b/rpc-client-api/Cargo.toml index 0ba5f76a88aac7..b1e5692555e256 100644 --- a/rpc-client-api/Cargo.toml +++ b/rpc-client-api/Cargo.toml @@ -27,3 +27,6 @@ solana-signer = { workspace = true } solana-transaction-error = { workspace = true } solana-transaction-status-client-types = { workspace = true } thiserror = { workspace = true } + +[dev-dependencies] +test-case = { workspace = true } diff --git a/rpc-client-api/src/custom_error.rs b/rpc-client-api/src/custom_error.rs index 57381f2676fea3..cd1449ae6c8664 100644 --- a/rpc-client-api/src/custom_error.rs +++ b/rpc-client-api/src/custom_error.rs @@ -92,11 +92,13 @@ pub struct MinContextSlotNotReachedErrorData { pub context_slot: Slot, } +#[cfg_attr(test, derive(PartialEq))] #[derive(Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct EpochRewardsPeriodActiveErrorData { pub current_block_height: u64, pub rewards_complete_block_height: u64, + pub slot: Option, } impl From for RpcCustomError { @@ -237,6 +239,7 @@ impl From for Error { data: Some(serde_json::json!(EpochRewardsPeriodActiveErrorData { current_block_height, rewards_complete_block_height, + slot: Some(slot), })), }, RpcCustomError::SlotNotEpochBoundary { slot } => Self { @@ -245,7 +248,9 @@ impl From for Error { "Rewards cannot be found because slot {slot} is not the epoch boundary. This \ may be due to gap in the queried node's local ledger or long-term storage" ), - data: None, + data: Some(serde_json::json!({ + "slot": slot, + })), }, RpcCustomError::LongTermStorageUnreachable => Self { code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_LONG_TERM_STORAGE_UNREACHABLE), @@ -255,3 +260,42 @@ impl From for Error { } } } + +#[cfg(test)] +mod tests { + use { + crate::custom_error::EpochRewardsPeriodActiveErrorData, serde_json::Value, + test_case::test_case, + }; + + #[test_case(serde_json::json!({ + "currentBlockHeight": 123, + "rewardsCompleteBlockHeight": 456 + }); "Pre-3.0 schema")] + #[test_case(serde_json::json!({ + "currentBlockHeight": 123, + "rewardsCompleteBlockHeight": 456, + "slot": 789 + }); "3.0+ schema")] + fn test_deseriailze_epoch_rewards_period_active_error_data(serialized_data: Value) { + let expected_current_block_height = serialized_data + .get("currentBlockHeight") + .map(|v| v.as_u64().unwrap()) + .unwrap(); + let expected_rewards_complete_block_height = serialized_data + .get("rewardsCompleteBlockHeight") + .map(|v| v.as_u64().unwrap()) + .unwrap(); + let expected_slot: Option = serialized_data.get("slot").map(|v| v.as_u64().unwrap()); + let actual: EpochRewardsPeriodActiveErrorData = + serde_json::from_value(serialized_data).expect("Failed to deserialize test fixture"); + assert_eq!( + actual, + EpochRewardsPeriodActiveErrorData { + current_block_height: expected_current_block_height, + rewards_complete_block_height: expected_rewards_complete_block_height, + slot: expected_slot, + } + ); + } +} From f5f68eebd79fbf3d9b6cf570d2f08ed1c1f084f8 Mon Sep 17 00:00:00 2001 From: Steven Luscher Date: Mon, 21 Jul 2025 13:16:04 -0700 Subject: [PATCH 18/68] Don't construct or notify `RpcSubscriptions` when the RPC is off (#6516) * Don't construct or notify `RpcSubscriptions` when the RPC is off * Less dumb Rust * Keep going? Can't get it to typecheck. * Revert some places that still need `Arc`s due to threads * Cleanup tests, avoid a clone in tpu --------- Co-authored-by: Jon C --- core/src/cluster_info_vote_listener.rs | 36 ++++++------ core/src/commitment_service.rs | 23 +++++--- core/src/replay_stage.rs | 80 +++++++++++++++----------- core/src/tpu.rs | 4 +- core/src/tvu.rs | 10 ++-- core/src/validator.rs | 32 +++++------ rpc/src/rpc_pubsub_service.rs | 2 +- 7 files changed, 105 insertions(+), 82 deletions(-) diff --git a/core/src/cluster_info_vote_listener.rs b/core/src/cluster_info_vote_listener.rs index f8dbe38565c225..68887dd1c04398 100644 --- a/core/src/cluster_info_vote_listener.rs +++ b/core/src/cluster_info_vote_listener.rs @@ -193,7 +193,7 @@ impl ClusterInfoVoteListener { verified_packets_sender: BankingPacketSender, vote_tracker: Arc, bank_forks: Arc>, - subscriptions: Arc, + subscriptions: Option>, verified_vote_sender: VerifiedVoteSender, gossip_verified_vote_hash_sender: GossipVerifiedVoteHashSender, replay_votes_receiver: ReplayVoteReceiver, @@ -230,7 +230,7 @@ impl ClusterInfoVoteListener { vote_tracker, &mut bank_hash_cache, dumped_slot_subscription, - subscriptions, + subscriptions.as_deref(), gossip_verified_vote_hash_sender, verified_vote_sender, replay_votes_receiver, @@ -318,7 +318,7 @@ impl ClusterInfoVoteListener { vote_tracker: Arc, bank_hash_cache: &mut BankHashCache, dumped_slot_subscription: DumpedSlotSubscription, - subscriptions: Arc, + subscriptions: Option<&RpcSubscriptions>, gossip_verified_vote_hash_sender: GossipVerifiedVoteHashSender, verified_vote_sender: VerifiedVoteSender, replay_votes_receiver: ReplayVoteReceiver, @@ -355,7 +355,7 @@ impl ClusterInfoVoteListener { &gossip_vote_txs_receiver, &vote_tracker, &root_bank, - &subscriptions, + subscriptions, &gossip_verified_vote_hash_sender, &verified_vote_sender, &replay_votes_receiver, @@ -389,7 +389,7 @@ impl ClusterInfoVoteListener { gossip_vote_txs_receiver: &VerifiedVoteTransactionsReceiver, vote_tracker: &VoteTracker, root_bank: &Bank, - subscriptions: &RpcSubscriptions, + subscriptions: Option<&RpcSubscriptions>, gossip_verified_vote_hash_sender: &GossipVerifiedVoteHashSender, verified_vote_sender: &VerifiedVoteSender, replay_votes_receiver: &ReplayVoteReceiver, @@ -445,7 +445,7 @@ impl ClusterInfoVoteListener { vote_transaction_signature: Signature, vote_tracker: &VoteTracker, root_bank: &Bank, - subscriptions: &RpcSubscriptions, + rpc_subscriptions: Option<&RpcSubscriptions>, verified_vote_sender: &VerifiedVoteSender, gossip_verified_vote_hash_sender: &GossipVerifiedVoteHashSender, diff: &mut HashMap>, @@ -586,7 +586,9 @@ impl ClusterInfoVoteListener { *latest_vote_slot = max(*latest_vote_slot, last_vote_slot); if is_new_vote { - subscriptions.notify_vote(*vote_pubkey, vote, vote_transaction_signature); + if let Some(rpc_subscriptions) = rpc_subscriptions { + rpc_subscriptions.notify_vote(*vote_pubkey, vote, vote_transaction_signature); + } let _ = verified_vote_sender.send((*vote_pubkey, vote_slots)); } } @@ -597,7 +599,7 @@ impl ClusterInfoVoteListener { gossip_vote_txs: Vec, replayed_votes: Vec, root_bank: &Bank, - subscriptions: &RpcSubscriptions, + subscriptions: Option<&RpcSubscriptions>, gossip_verified_vote_hash_sender: &GossipVerifiedVoteHashSender, verified_vote_sender: &VerifiedVoteSender, bank_notification_sender: &Option, @@ -881,7 +883,7 @@ mod tests { &votes_receiver, &vote_tracker, &bank3, - &subscriptions, + Some(&subscriptions), &gossip_verified_vote_hash_sender, &verified_vote_sender, &replay_votes_receiver, @@ -916,7 +918,7 @@ mod tests { &votes_receiver, &vote_tracker, &bank3, - &subscriptions, + Some(&subscriptions), &gossip_verified_vote_hash_sender, &verified_vote_sender, &replay_votes_receiver, @@ -1010,7 +1012,7 @@ mod tests { &votes_txs_receiver, &vote_tracker, &bank0, - &subscriptions, + Some(&subscriptions), &gossip_verified_vote_hash_sender, &verified_vote_sender, &replay_votes_receiver, @@ -1180,7 +1182,7 @@ mod tests { &votes_txs_receiver, &vote_tracker, &bank0, - &subscriptions, + Some(&subscriptions), &gossip_verified_vote_hash_sender, &verified_vote_sender, &replay_votes_receiver, @@ -1293,7 +1295,7 @@ mod tests { &votes_receiver, &vote_tracker, &bank, - &subscriptions, + Some(&subscriptions), &gossip_verified_vote_hash_sender, &verified_vote_sender, &replay_votes_receiver, @@ -1389,7 +1391,7 @@ mod tests { Signature::default(), )], &bank, - &subscriptions, + Some(&subscriptions), &gossip_verified_vote_hash_sender, &verified_vote_sender, &None, @@ -1438,7 +1440,7 @@ mod tests { Signature::default(), )], &new_root_bank, - &subscriptions, + Some(&subscriptions), &gossip_verified_vote_hash_sender, &verified_vote_sender, &None, @@ -1656,7 +1658,7 @@ mod tests { signature, &vote_tracker, &bank, - &subscriptions, + Some(&subscriptions), &verified_vote_sender, &gossip_verified_vote_hash_sender, &mut diff, @@ -1689,7 +1691,7 @@ mod tests { signature, &vote_tracker, &bank, - &subscriptions, + Some(&subscriptions), &verified_vote_sender, &gossip_verified_vote_hash_sender, &mut diff, diff --git a/core/src/commitment_service.rs b/core/src/commitment_service.rs index 54c85728310869..ec55bfaae15539 100644 --- a/core/src/commitment_service.rs +++ b/core/src/commitment_service.rs @@ -67,7 +67,7 @@ impl AggregateCommitmentService { pub fn new( exit: Arc, block_commitment_cache: Arc>, - subscriptions: Arc, + subscriptions: Option>, ) -> (Sender, Self) { let (sender, receiver): ( Sender, @@ -83,9 +83,12 @@ impl AggregateCommitmentService { break; } - if let Err(RecvTimeoutError::Disconnected) = - Self::run(&receiver, &block_commitment_cache, &subscriptions, &exit) - { + if let Err(RecvTimeoutError::Disconnected) = Self::run( + &receiver, + &block_commitment_cache, + subscriptions.as_deref(), + &exit, + ) { break; } }) @@ -97,7 +100,7 @@ impl AggregateCommitmentService { fn run( receiver: &Receiver, block_commitment_cache: &RwLock, - subscriptions: &Arc, + rpc_subscriptions: Option<&RpcSubscriptions>, exit: &AtomicBool, ) -> Result<(), RecvTimeoutError> { loop { @@ -136,10 +139,12 @@ impl AggregateCommitmentService { ), ); - // Triggers rpc_subscription notifications as soon as new commitment data is available, - // sending just the commitment cache slot information that the notifications thread - // needs - subscriptions.notify_subscribers(update_commitment_slots); + if let Some(rpc_subscriptions) = rpc_subscriptions { + // Triggers rpc_subscription notifications as soon as new commitment data is + // available, sending just the commitment cache slot information that the + // notifications thread needs + rpc_subscriptions.notify_subscribers(update_commitment_slots); + } } } diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 3925ace68340a9..f7fdf71662c1ca 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -280,7 +280,7 @@ pub struct ReplayStageConfig { } pub struct ReplaySenders { - pub rpc_subscriptions: Arc, + pub rpc_subscriptions: Option>, pub slot_status_notifier: Option, pub transaction_status_sender: Option, pub entry_notification_sender: Option, @@ -715,7 +715,7 @@ impl ReplayStage { &blockstore, &bank_forks, &leader_schedule_cache, - &rpc_subscriptions, + rpc_subscriptions.as_deref(), &slot_status_notifier, &mut progress, &mut replay_timing, @@ -741,7 +741,7 @@ impl ReplayStage { &mut heaviest_subtree_fork_choice, &replay_vote_sender, &bank_notification_sender, - &rpc_subscriptions, + rpc_subscriptions.as_deref(), &slot_status_notifier, &mut duplicate_slots_tracker, &duplicate_confirmed_slots, @@ -1002,7 +1002,7 @@ impl ReplayStage { &leader_schedule_cache, &lockouts_sender, snapshot_controller.as_deref(), - &rpc_subscriptions, + rpc_subscriptions.as_deref(), &block_commitment_cache, &mut heaviest_subtree_fork_choice, &bank_notification_sender, @@ -1160,7 +1160,7 @@ impl ReplayStage { &bank_forks, &poh_recorder, &leader_schedule_cache, - &rpc_subscriptions, + rpc_subscriptions.as_deref(), &slot_status_notifier, &mut progress, &retransmit_slots_sender, @@ -2079,7 +2079,7 @@ impl ReplayStage { bank_forks: &Arc>, poh_recorder: &Arc>, leader_schedule_cache: &Arc, - rpc_subscriptions: &Arc, + rpc_subscriptions: Option<&RpcSubscriptions>, slot_status_notifier: &Option, progress_map: &mut ProgressMap, retransmit_slots_sender: &Sender, @@ -2257,7 +2257,7 @@ impl ReplayStage { bank: &Bank, root: Slot, err: &BlockstoreProcessorError, - rpc_subscriptions: &Arc, + rpc_subscriptions: Option<&RpcSubscriptions>, slot_status_notifier: &Option, duplicate_slots_tracker: &mut DuplicateSlotsTracker, duplicate_confirmed_slots: &DuplicateConfirmedSlots, @@ -2309,11 +2309,13 @@ impl ReplayStage { .notify_slot_dead(slot, parent_slot, err.clone()); } - rpc_subscriptions.notify_slot_update(SlotUpdate::Dead { - slot, - err, - timestamp: timestamp(), - }); + if let Some(rpc_subscriptions) = rpc_subscriptions { + rpc_subscriptions.notify_slot_update(SlotUpdate::Dead { + slot, + err, + timestamp: timestamp(), + }); + } let dead_state = DeadState::new_from_state( slot, @@ -2374,7 +2376,7 @@ impl ReplayStage { leader_schedule_cache: &Arc, lockouts_sender: &Sender, snapshot_controller: Option<&SnapshotController>, - rpc_subscriptions: &Arc, + rpc_subscriptions: Option<&RpcSubscriptions>, block_commitment_cache: &Arc>, heaviest_subtree_fork_choice: &mut HeaviestSubtreeForkChoice, bank_notification_sender: &Option, @@ -3051,7 +3053,7 @@ impl ReplayStage { transaction_status_sender: Option<&TransactionStatusSender>, heaviest_subtree_fork_choice: &mut HeaviestSubtreeForkChoice, bank_notification_sender: &Option, - rpc_subscriptions: &Arc, + rpc_subscriptions: Option<&RpcSubscriptions>, slot_status_notifier: &Option, duplicate_slots_tracker: &mut DuplicateSlotsTracker, duplicate_confirmed_slots: &DuplicateConfirmedSlots, @@ -3355,7 +3357,7 @@ impl ReplayStage { heaviest_subtree_fork_choice: &mut HeaviestSubtreeForkChoice, replay_vote_sender: &ReplayVoteSender, bank_notification_sender: &Option, - rpc_subscriptions: &Arc, + rpc_subscriptions: Option<&RpcSubscriptions>, slot_status_notifier: &Option, duplicate_slots_tracker: &mut DuplicateSlotsTracker, duplicate_confirmed_slots: &DuplicateConfirmedSlots, @@ -3980,7 +3982,7 @@ impl ReplayStage { blockstore: &Blockstore, leader_schedule_cache: &Arc, snapshot_controller: Option<&SnapshotController>, - rpc_subscriptions: &Arc, + rpc_subscriptions: Option<&RpcSubscriptions>, block_commitment_cache: &Arc>, heaviest_subtree_fork_choice: &mut HeaviestSubtreeForkChoice, bank_notification_sender: &Option, @@ -4042,7 +4044,9 @@ impl ReplayStage { drop_bank_sender, )?; blockstore.slots_stats.mark_rooted(new_root); - rpc_subscriptions.notify_roots(rooted_slots); + if let Some(rpc_subscriptions) = rpc_subscriptions { + rpc_subscriptions.notify_roots(rooted_slots); + } if let Some(sender) = bank_notification_sender { sender .sender @@ -4127,7 +4131,7 @@ impl ReplayStage { blockstore: &Blockstore, bank_forks: &RwLock, leader_schedule_cache: &Arc, - rpc_subscriptions: &Arc, + rpc_subscriptions: Option<&RpcSubscriptions>, slot_status_notifier: &Option, progress: &mut ProgressMap, replay_timing: &mut ReplayLoopTiming, @@ -4222,11 +4226,13 @@ impl ReplayStage { slot: u64, root_slot: u64, leader: &Pubkey, - rpc_subscriptions: &Arc, + rpc_subscriptions: Option<&RpcSubscriptions>, slot_status_notifier: &Option, new_bank_options: NewBankOptions, ) -> Bank { - rpc_subscriptions.notify_slot(slot, parent.slot(), root_slot); + if let Some(rpc_subscriptions) = rpc_subscriptions { + rpc_subscriptions.notify_slot(slot, parent.slot(), root_slot); + } if let Some(slot_status_notifier) = slot_status_notifier { slot_status_notifier .read() @@ -4543,6 +4549,8 @@ pub(crate) mod tests { bank1.freeze(); bank_forks.write().unwrap().insert(bank1); + let rpc_subscriptions = Some(rpc_subscriptions); + // Insert shreds for slot NUM_CONSECUTIVE_LEADER_SLOTS, // chaining to slot 1 let (shreds, _) = make_slot_entries( @@ -4562,7 +4570,7 @@ pub(crate) mod tests { &blockstore, &bank_forks, &leader_schedule_cache, - &rpc_subscriptions, + rpc_subscriptions.as_deref(), &None, &mut progress, &mut replay_timing, @@ -4591,7 +4599,7 @@ pub(crate) mod tests { &blockstore, &bank_forks, &leader_schedule_cache, - &rpc_subscriptions, + rpc_subscriptions.as_deref(), &None, &mut progress, &mut replay_timing, @@ -5101,13 +5109,15 @@ pub(crate) mod tests { SlotStatusNotifierForTest::new(dead_slots.clone()), ))); + let rpc_subscriptions = Some(rpc_subscriptions); + if let Err(err) = &res { ReplayStage::mark_dead_slot( &blockstore, &bank1, 0, err, - &rpc_subscriptions, + rpc_subscriptions.as_deref(), &slot_status_notifier, &mut DuplicateSlotsTracker::default(), &DuplicateConfirmedSlots::new(), @@ -5164,13 +5174,13 @@ pub(crate) mod tests { let exit = Arc::new(AtomicBool::new(false)); let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default())); let max_complete_transaction_status_slot = Arc::new(AtomicU64::default()); - let rpc_subscriptions = Arc::new(RpcSubscriptions::new_for_tests( + let rpc_subscriptions = Some(Arc::new(RpcSubscriptions::new_for_tests( exit.clone(), max_complete_transaction_status_slot, bank_forks.clone(), block_commitment_cache.clone(), OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks), - )); + ))); let (lockouts_sender, _) = AggregateCommitmentService::new( exit, block_commitment_cache.clone(), @@ -6476,12 +6486,14 @@ pub(crate) mod tests { ); blockstore.insert_shreds(shreds, None, false).unwrap(); + let rpc_subscriptions = Some(rpc_subscriptions); + // 3 should now be an active bank ReplayStage::generate_new_bank_forks( &blockstore, &bank_forks, &leader_schedule_cache, - &rpc_subscriptions, + rpc_subscriptions.as_deref(), &None, &mut progress, &mut replay_timing, @@ -6511,7 +6523,7 @@ pub(crate) mod tests { &blockstore, &bank_forks, &leader_schedule_cache, - &rpc_subscriptions, + rpc_subscriptions.as_deref(), &None, &mut progress, &mut replay_timing, @@ -6542,7 +6554,7 @@ pub(crate) mod tests { &blockstore, &bank_forks, &leader_schedule_cache, - &rpc_subscriptions, + rpc_subscriptions.as_deref(), &None, &mut progress, &mut replay_timing, @@ -6572,7 +6584,7 @@ pub(crate) mod tests { &blockstore, &bank_forks, &leader_schedule_cache, - &rpc_subscriptions, + rpc_subscriptions.as_deref(), &None, &mut progress, &mut replay_timing, @@ -8632,12 +8644,14 @@ pub(crate) mod tests { // this test to use true to avoid skipping the leader slot let has_new_vote_been_rooted = true; + let rpc_subscriptions = Some(rpc_subscriptions); + assert!(!ReplayStage::maybe_start_leader( my_pubkey, bank_forks, &poh_recorder, &leader_schedule_cache, - &rpc_subscriptions, + rpc_subscriptions.as_deref(), &None, &mut progress, &retransmit_slots_sender, @@ -9285,6 +9299,8 @@ pub(crate) mod tests { // this test to use true to avoid skipping the leader slot let has_new_vote_been_rooted = true; + let rpc_subscriptions = Some(rpc_subscriptions); + // We should not attempt to start leader for the dummy_slot assert_matches!( poh_recorder.read().unwrap().reached_leader_slot(&my_pubkey), @@ -9295,7 +9311,7 @@ pub(crate) mod tests { &bank_forks, &poh_recorder, &leader_schedule_cache, - &rpc_subscriptions, + rpc_subscriptions.as_deref(), &None, &mut progress, &retransmit_slots_sender, @@ -9321,7 +9337,7 @@ pub(crate) mod tests { &bank_forks, &poh_recorder, &leader_schedule_cache, - &rpc_subscriptions, + rpc_subscriptions.as_deref(), &None, &mut progress, &retransmit_slots_sender, diff --git a/core/src/tpu.rs b/core/src/tpu.rs index aa3130a4453c87..7411fb73c7cf1f 100644 --- a/core/src/tpu.rs +++ b/core/src/tpu.rs @@ -126,7 +126,7 @@ impl Tpu { entry_receiver: Receiver, retransmit_slots_receiver: Receiver, sockets: TpuSockets, - subscriptions: &Arc, + subscriptions: Option>, transaction_status_sender: Option, entry_notification_sender: Option, blockstore: Arc, @@ -315,7 +315,7 @@ impl Tpu { gossip_vote_sender, vote_tracker, bank_forks.clone(), - subscriptions.clone(), + subscriptions, verified_vote_sender, gossip_verified_vote_hash_sender, replay_vote_receiver, diff --git a/core/src/tvu.rs b/core/src/tvu.rs index b7172d5883b002..0575a1d8c001a1 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -134,7 +134,7 @@ impl Tvu { sockets: TvuSockets, blockstore: Arc, ledger_signal_receiver: Receiver, - rpc_subscriptions: &Arc, + rpc_subscriptions: Option>, poh_recorder: &Arc>, tower: Tower, tower_storage: Arc, @@ -224,7 +224,7 @@ impl Tvu { turbine_quic_endpoint_sender, retransmit_receiver, max_slots.clone(), - Some(rpc_subscriptions.clone()), + rpc_subscriptions.clone(), slot_status_notifier.clone(), tvu_config.xdp_sender, ); @@ -295,7 +295,7 @@ impl Tvu { let (voting_sender, voting_receiver) = unbounded(); let replay_senders = ReplaySenders { - rpc_subscriptions: rpc_subscriptions.clone(), + rpc_subscriptions, slot_status_notifier, transaction_status_sender, entry_notification_sender, @@ -557,13 +557,13 @@ pub mod tests { }, blockstore, ledger_signal_receiver, - &Arc::new(RpcSubscriptions::new_for_tests( + Some(Arc::new(RpcSubscriptions::new_for_tests( exit.clone(), max_complete_transaction_status_slot, bank_forks.clone(), block_commitment_cache.clone(), OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks), - )), + ))), &poh_recorder, Tower::default(), Arc::new(FileTowerStorage::default()), diff --git a/core/src/validator.rs b/core/src/validator.rs index 2e54c1193b7848..2f978877d0133c 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -1063,17 +1063,6 @@ impl Validator { let optimistically_confirmed_bank = OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks); - let rpc_subscriptions = Arc::new(RpcSubscriptions::new_with_config( - exit.clone(), - max_complete_transaction_status_slot.clone(), - blockstore.clone(), - bank_forks.clone(), - block_commitment_cache.clone(), - optimistically_confirmed_bank.clone(), - &config.pubsub_config, - None, - )); - let max_slots = Arc::new(MaxSlots::default()); let staked_nodes = Arc::new(RwLock::new(StakedNodes::default())); @@ -1152,6 +1141,7 @@ impl Validator { Arc::new(AtomicBool::new(config.rpc_config.disable_health_check)); let ( json_rpc_service, + rpc_subscriptions, pubsub_service, completed_data_sets_sender, completed_data_sets_service, @@ -1208,13 +1198,22 @@ impl Validator { send_transaction_service_config: config.send_transaction_service_config.clone(), max_slots: max_slots.clone(), leader_schedule_cache: leader_schedule_cache.clone(), - max_complete_transaction_status_slot, + max_complete_transaction_status_slot: max_complete_transaction_status_slot.clone(), prioritization_fee_cache: prioritization_fee_cache.clone(), client_option, }; let json_rpc_service = JsonRpcService::new_with_config(rpc_svc_config).map_err(ValidatorError::Other)?; - + let rpc_subscriptions = Arc::new(RpcSubscriptions::new_with_config( + exit.clone(), + max_complete_transaction_status_slot, + blockstore.clone(), + bank_forks.clone(), + block_commitment_cache.clone(), + optimistically_confirmed_bank.clone(), + &config.pubsub_config, + None, + )); let pubsub_service = if !config.rpc_config.full_api { None } else { @@ -1283,6 +1282,7 @@ impl Validator { }); ( Some(json_rpc_service), + Some(rpc_subscriptions), pubsub_service, completed_data_sets_sender, completed_data_sets_service, @@ -1291,7 +1291,7 @@ impl Validator { bank_notification_sender_config, ) } else { - (None, None, None, None, None, None, None) + (None, None, None, None, None, None, None, None) }; if config.halt_at_slot.is_some() { @@ -1522,7 +1522,7 @@ impl Validator { }, blockstore.clone(), ledger_signal_receiver, - &rpc_subscriptions, + rpc_subscriptions.clone(), &poh_recorder, tower, config.tower_storage.clone(), @@ -1627,7 +1627,7 @@ impl Validator { vote_forwarding_client: node.sockets.tpu_vote_forwarding_client, vortexor_receivers: node.sockets.vortexor_receivers, }, - &rpc_subscriptions, + rpc_subscriptions.clone(), transaction_status_sender, entry_notification_sender, blockstore.clone(), diff --git a/rpc/src/rpc_pubsub_service.rs b/rpc/src/rpc_pubsub_service.rs index 8187741f442ebc..ffac5b1b6503da 100644 --- a/rpc/src/rpc_pubsub_service.rs +++ b/rpc/src/rpc_pubsub_service.rs @@ -83,7 +83,7 @@ pub struct PubSubService { impl PubSubService { pub fn new( pubsub_config: PubSubConfig, - subscriptions: &Arc, + subscriptions: &RpcSubscriptions, pubsub_addr: SocketAddr, ) -> (Trigger, Self) { let subscription_control = subscriptions.control().clone(); From a4c8818c874c8975bd3642d7f56564749aa49a30 Mon Sep 17 00:00:00 2001 From: Lijun Wang <83639177+lijunwangs@users.noreply.github.com> Date: Mon, 21 Jul 2025 14:36:26 -0700 Subject: [PATCH 19/68] Support closing connections when QUIC connection drop (#6857) * Support closing connections when QUIC connection drop * Added a unit test on connection close * removed the superflous connection.closed().await it is already locally closed --- quic-client/src/lib.rs | 18 +++++++- quic-client/src/nonblocking/quic_client.rs | 20 ++++++++ quic-client/src/quic_client.rs | 6 +++ quic-client/tests/quic_client.rs | 53 +++++++++++++++++++++- 4 files changed, 94 insertions(+), 3 deletions(-) diff --git a/quic-client/src/lib.rs b/quic-client/src/lib.rs index d5e748b8398660..930a0cc078fbc9 100644 --- a/quic-client/src/lib.rs +++ b/quic-client/src/lib.rs @@ -12,8 +12,11 @@ use { QuicClient, QuicClientConnection as NonblockingQuicClientConnection, QuicLazyInitializedEndpoint, }, - quic_client::QuicClientConnection as BlockingQuicClientConnection, + quic_client::{ + close_quic_connection, QuicClientConnection as BlockingQuicClientConnection, + }, }, + log::debug, quic_client::get_runtime, quinn::{Endpoint, EndpointConfig, TokioRuntime}, solana_connection_cache::{ @@ -72,6 +75,19 @@ impl ConnectionPool for QuicPool { } } +impl Drop for QuicPool { + fn drop(&mut self) { + debug!( + "Dropping QuicPool with {} connections", + self.connections.len() + ); + for connection in self.connections.drain(..) { + // Explicitly drop each connection to ensure resources are released + close_quic_connection(connection.0.clone()); + } + } +} + pub struct QuicConfig { // Arc to prevent having to copy the struct client_certificate: RwLock>, diff --git a/quic-client/src/nonblocking/quic_client.rs b/quic-client/src/nonblocking/quic_client.rs index 7275b98398c82f..0dfc82f6a0b127 100644 --- a/quic-client/src/nonblocking/quic_client.rs +++ b/quic-client/src/nonblocking/quic_client.rs @@ -229,6 +229,26 @@ pub struct QuicClient { stats: Arc, } +const CONNECTION_CLOSE_CODE_APPLICATION_CLOSE: u32 = 0u32; +const CONNECTION_CLOSE_REASON_APPLICATION_CLOSE: &[u8] = b"dropped"; + +impl QuicClient { + /// Explicitly close the connection. Must be called manually if cleanup is needed. + pub async fn close(&self) { + let mut conn_guard = self.connection.lock().await; + if let Some(conn) = conn_guard.take() { + debug!( + "Closing connection to {} connection_id: {:?}", + self.addr, conn.connection + ); + conn.connection.close( + CONNECTION_CLOSE_CODE_APPLICATION_CLOSE.into(), + CONNECTION_CLOSE_REASON_APPLICATION_CLOSE, + ); + } + } +} + impl QuicClient { pub fn new(endpoint: Arc, addr: SocketAddr) -> Self { Self { diff --git a/quic-client/src/quic_client.rs b/quic-client/src/quic_client.rs index e51370326ad4a3..4b1b10462dbcd5 100644 --- a/quic-client/src/quic_client.rs +++ b/quic-client/src/quic_client.rs @@ -180,3 +180,9 @@ impl ClientConnection for QuicClientConnection { Ok(()) } } + +pub(crate) fn close_quic_connection(connection: Arc) { + // Close the connection and release resources + trace!("Closing QUIC connection to {}", connection.server_addr()); + RUNTIME.block_on(connection.close()); +} diff --git a/quic-client/tests/quic_client.rs b/quic-client/tests/quic_client.rs index d6c8e23eb5cc2a..23234fd7631a6d 100644 --- a/quic-client/tests/quic_client.rs +++ b/quic-client/tests/quic_client.rs @@ -3,12 +3,14 @@ mod tests { use { crossbeam_channel::{unbounded, Receiver}, log::*, - solana_connection_cache::connection_cache_stats::ConnectionCacheStats, + solana_connection_cache::{ + client_connection::ClientStats, connection_cache_stats::ConnectionCacheStats, + }, solana_keypair::Keypair, solana_net_utils::sockets::{bind_to, localhost_port_range_for_tests}, solana_packet::PACKET_DATA_SIZE, solana_perf::packet::PacketBatch, - solana_quic_client::nonblocking::quic_client::QuicLazyInitializedEndpoint, + solana_quic_client::nonblocking::quic_client::{QuicClient, QuicLazyInitializedEndpoint}, solana_streamer::{ quic::{QuicServerParams, SpawnServerResult}, streamer::StakedNodes, @@ -310,4 +312,51 @@ mod tests { response_recv_thread.join().unwrap(); info!("Response receiver exited!"); } + + #[tokio::test] + async fn test_connection_close() { + solana_logger::setup(); + let (sender, receiver) = unbounded(); + let staked_nodes = Arc::new(RwLock::new(StakedNodes::default())); + let (s, exit, keypair) = server_args(); + let solana_streamer::nonblocking::quic::SpawnNonBlockingServerResult { + endpoints: _, + stats: _, + thread: t, + max_concurrent_connections: _, + } = solana_streamer::nonblocking::quic::spawn_server( + "quic_streamer_test", + s.try_clone().unwrap(), + &keypair, + sender, + exit.clone(), + staked_nodes, + QuicServerParams::default_for_tests(), + ) + .unwrap(); + + let addr = s.local_addr().unwrap().ip(); + let port = s.local_addr().unwrap().port(); + let tpu_addr = SocketAddr::new(addr, port); + let connection_cache_stats = Arc::new(ConnectionCacheStats::default()); + let client = QuicClient::new(Arc::new(QuicLazyInitializedEndpoint::default()), tpu_addr); + + // Send a full size packet with single byte writes. + let num_bytes = PACKET_DATA_SIZE; + let num_expected_packets: usize = 3; + let packets = vec![vec![0u8; PACKET_DATA_SIZE]; num_expected_packets]; + let client_stats = ClientStats::default(); + for packet in packets { + let _ = client + .send_buffer(&packet, &client_stats, connection_cache_stats.clone()) + .await; + } + + nonblocking_check_packets(receiver, num_bytes, num_expected_packets).await; + exit.store(true, Ordering::Relaxed); + + t.await.unwrap(); + // We close the connection after the server is down, this should not block + client.close().await; + } } From a19ca7be809427077acd26a4f337b2ee4e9515b4 Mon Sep 17 00:00:00 2001 From: Brooks Date: Mon, 21 Jul 2025 18:29:23 -0400 Subject: [PATCH 20/68] Removes BankIncrementalSnapshotPersistence from SnapshotPackage (#7061) --- core/src/accounts_hash_verifier.rs | 7 ++----- runtime/src/snapshot_bank_utils.rs | 4 ++-- runtime/src/snapshot_package.rs | 9 +-------- runtime/src/snapshot_utils.rs | 10 +++------- 4 files changed, 8 insertions(+), 22 deletions(-) diff --git a/core/src/accounts_hash_verifier.rs b/core/src/accounts_hash_verifier.rs index 7a4abc9697f2a7..a6db35e7e8dc1d 100644 --- a/core/src/accounts_hash_verifier.rs +++ b/core/src/accounts_hash_verifier.rs @@ -6,7 +6,6 @@ use { solana_clock::DEFAULT_MS_PER_SLOT, solana_measure::measure_us, solana_runtime::{ - serde_snapshot::BankIncrementalSnapshotPersistence, snapshot_config::SnapshotConfig, snapshot_controller::SnapshotController, snapshot_package::{ @@ -177,7 +176,7 @@ impl AccountsHashVerifier { ) -> io::Result<()> { Self::purge_old_accounts_hashes(&accounts_package, snapshot_config); - Self::submit_for_packaging(accounts_package, pending_snapshot_packages, None); + Self::submit_for_packaging(accounts_package, pending_snapshot_packages); Ok(()) } @@ -216,7 +215,6 @@ impl AccountsHashVerifier { fn submit_for_packaging( accounts_package: AccountsPackage, pending_snapshot_packages: &Mutex, - bank_incremental_snapshot_persistence: Option, ) { if !matches!( accounts_package.package_kind, @@ -225,8 +223,7 @@ impl AccountsHashVerifier { return; } - let snapshot_package = - SnapshotPackage::new(accounts_package, bank_incremental_snapshot_persistence); + let snapshot_package = SnapshotPackage::new(accounts_package); pending_snapshot_packages .lock() .unwrap() diff --git a/runtime/src/snapshot_bank_utils.rs b/runtime/src/snapshot_bank_utils.rs index a228befa9f7ee3..8234fce41f132e 100644 --- a/runtime/src/snapshot_bank_utils.rs +++ b/runtime/src/snapshot_bank_utils.rs @@ -796,7 +796,7 @@ fn bank_to_full_snapshot_archive_with( snapshot_storages, status_cache_slot_deltas, ); - let snapshot_package = SnapshotPackage::new(accounts_package, None); + let snapshot_package = SnapshotPackage::new(accounts_package); let snapshot_config = SnapshotConfig { full_snapshot_archives_dir: full_snapshot_archives_dir.as_ref().to_path_buf(), @@ -854,7 +854,7 @@ pub fn bank_to_incremental_snapshot_archive( snapshot_storages, status_cache_slot_deltas, ); - let snapshot_package = SnapshotPackage::new(accounts_package, None); + let snapshot_package = SnapshotPackage::new(accounts_package); // Note: Since the snapshot_storages above are *only* the incremental storages, // this bank snapshot *cannot* be used by fastboot. diff --git a/runtime/src/snapshot_package.rs b/runtime/src/snapshot_package.rs index 72a22e9930420d..8e48316b4c4afd 100644 --- a/runtime/src/snapshot_package.rs +++ b/runtime/src/snapshot_package.rs @@ -1,7 +1,6 @@ use { crate::{ bank::{Bank, BankFieldsToSerialize, BankHashStats, BankSlotDelta}, - serde_snapshot::BankIncrementalSnapshotPersistence, snapshot_hash::SnapshotHash, }, log::*, @@ -171,7 +170,6 @@ pub struct SnapshotPackage { pub accounts_delta_hash: AccountsDeltaHash, // obsolete, will be removed next pub accounts_hash: AccountsHash, pub write_version: u64, - pub bank_incremental_snapshot_persistence: Option, // obsolete, will be removed next /// The instant this snapshot package was sent to the queue. /// Used to track how long snapshot packages wait before handling. @@ -179,10 +177,7 @@ pub struct SnapshotPackage { } impl SnapshotPackage { - pub fn new( - accounts_package: AccountsPackage, - bank_incremental_snapshot_persistence: Option, - ) -> Self { + pub fn new(accounts_package: AccountsPackage) -> Self { let AccountsPackageKind::Snapshot(snapshot_kind) = accounts_package.package_kind; let Some(snapshot_info) = accounts_package.snapshot_info else { panic!( @@ -207,7 +202,6 @@ impl SnapshotPackage { accounts_delta_hash: snapshot_info.accounts_delta_hash, bank_hash_stats: snapshot_info.bank_hash_stats, accounts_hash: AccountsHash(Hash::default()), // obsolete, will be removed next - bank_incremental_snapshot_persistence, write_version: snapshot_info.write_version, enqueued: Instant::now(), } @@ -230,7 +224,6 @@ impl SnapshotPackage { accounts_delta_hash: AccountsDeltaHash(Hash::default()), bank_hash_stats: BankHashStats::default(), accounts_hash: AccountsHash(Hash::default()), - bank_incremental_snapshot_persistence: None, write_version: u64::default(), enqueued: Instant::now(), } diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index 8f274191e9bfd9..5cb10e68387f5e 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -2,9 +2,8 @@ use { crate::{ bank::{BankFieldsToDeserialize, BankFieldsToSerialize, BankHashStats, BankSlotDelta}, serde_snapshot::{ - self, AccountsDbFields, BankIncrementalSnapshotPersistence, ExtraFieldsToSerialize, - SerializableAccountStorageEntry, SnapshotAccountsDbFields, SnapshotBankFields, - SnapshotStreams, + self, AccountsDbFields, ExtraFieldsToSerialize, SerializableAccountStorageEntry, + SnapshotAccountsDbFields, SnapshotBankFields, SnapshotStreams, }, snapshot_archive_info::{ FullSnapshotArchiveInfo, IncrementalSnapshotArchiveInfo, SnapshotArchiveInfo, @@ -831,7 +830,6 @@ pub fn serialize_and_archive_snapshot_package( bank_hash_stats, accounts_delta_hash, accounts_hash, - bank_incremental_snapshot_persistence, write_version, enqueued: _, } = snapshot_package; @@ -845,7 +843,6 @@ pub fn serialize_and_archive_snapshot_package( bank_hash_stats, accounts_delta_hash, accounts_hash, - bank_incremental_snapshot_persistence.as_ref(), write_version, should_flush_and_hard_link_storages, )?; @@ -907,7 +904,6 @@ fn serialize_snapshot( bank_hash_stats: BankHashStats, accounts_delta_hash: AccountsDeltaHash, accounts_hash: AccountsHash, - bank_incremental_snapshot_persistence: Option<&BankIncrementalSnapshotPersistence>, write_version: u64, should_flush_and_hard_link_storages: bool, ) -> Result { @@ -960,7 +956,7 @@ fn serialize_snapshot( let versioned_epoch_stakes = mem::take(&mut bank_fields.versioned_epoch_stakes); let extra_fields = ExtraFieldsToSerialize { lamports_per_signature: bank_fields.fee_rate_governor.lamports_per_signature, - incremental_snapshot_persistence: bank_incremental_snapshot_persistence, + incremental_snapshot_persistence: None, obsolete_epoch_accounts_hash: None, versioned_epoch_stakes, accounts_lt_hash: Some(bank_fields.accounts_lt_hash.clone().into()), From 947bc870feaa007b09a081cf478d9b75b9cd0553 Mon Sep 17 00:00:00 2001 From: Brooks Date: Mon, 21 Jul 2025 19:07:26 -0400 Subject: [PATCH 21/68] Removes AccountsDb::verify_accounts_hash_and_lamports() (#7065) --- accounts-db/benches/accounts.rs | 37 +------- accounts-db/src/accounts.rs | 28 +------ accounts-db/src/accounts_db.rs | 113 ------------------------- accounts-db/src/accounts_db/tests.rs | 121 ++------------------------- runtime/src/serde_snapshot/tests.rs | 40 ++++----- 5 files changed, 25 insertions(+), 314 deletions(-) diff --git a/accounts-db/benches/accounts.rs b/accounts-db/benches/accounts.rs index 52b0d8e3ddc060..332460f9c9c3de 100644 --- a/accounts-db/benches/accounts.rs +++ b/accounts-db/benches/accounts.rs @@ -13,15 +13,13 @@ use { accounts::{AccountAddressFilter, Accounts}, accounts_db::{ test_utils::create_test_accounts, AccountFromStorage, AccountsDb, - VerifyAccountsHashAndLamportsConfig, ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS, + ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS, }, accounts_index::ScanConfig, ancestors::Ancestors, }, - solana_clock::Epoch, solana_hash::Hash, solana_pubkey::Pubkey, - solana_sysvar::epoch_schedule::EpochSchedule, std::{ collections::{HashMap, HashSet}, path::PathBuf, @@ -44,39 +42,6 @@ fn new_accounts_db(account_paths: Vec) -> AccountsDb { ) } -#[bench] -fn bench_accounts_hash_bank_hash(bencher: &mut Bencher) { - let accounts_db = new_accounts_db(vec![PathBuf::from("bench_accounts_hash_internal")]); - let accounts = Accounts::new(Arc::new(accounts_db)); - let mut pubkeys: Vec = vec![]; - let num_accounts = 60_000; - let slot = 0; - create_test_accounts(&accounts, &mut pubkeys, num_accounts, slot); - let ancestors = Ancestors::from(vec![0]); - let (_, total_lamports) = accounts - .accounts_db - .update_accounts_hash_for_tests(0, &ancestors, false, false); - accounts.add_root(slot); - accounts.accounts_db.flush_accounts_cache(true, Some(slot)); - bencher.iter(|| { - assert!(accounts - .accounts_db - .verify_accounts_hash_and_lamports_for_tests( - 0, - total_lamports, - VerifyAccountsHashAndLamportsConfig { - ancestors: &ancestors, - epoch_schedule: &EpochSchedule::default(), - epoch: Epoch::default(), - ignore_mismatch: false, - store_detailed_debug_info: false, - use_bg_thread_pool: false, - } - ) - .is_ok()) - }); -} - #[bench] fn bench_update_accounts_hash(bencher: &mut Bencher) { solana_logger::setup(); diff --git a/accounts-db/src/accounts.rs b/accounts-db/src/accounts.rs index c7003eafb09898..0b9022e4028873 100644 --- a/accounts-db/src/accounts.rs +++ b/accounts-db/src/accounts.rs @@ -3,8 +3,8 @@ use { account_locks::{validate_account_locks, AccountLocks}, account_storage::stored_account_info::StoredAccountInfo, accounts_db::{ - AccountStorageEntry, AccountsAddRootTiming, AccountsDb, LoadHint, LoadedAccount, - ScanAccountStorageData, ScanStorageResult, VerifyAccountsHashAndLamportsConfig, + AccountsAddRootTiming, AccountsDb, LoadHint, LoadedAccount, ScanAccountStorageData, + ScanStorageResult, }, accounts_index::{IndexKey, ScanConfig, ScanError, ScanOrder, ScanResult}, ancestors::Ancestors, @@ -306,30 +306,6 @@ impl Accounts { .collect()) } - /// Only called from startup or test code. - #[must_use] - pub fn verify_accounts_hash_and_lamports( - &self, - snapshot_storages_and_slots: (&[Arc], &[Slot]), - slot: Slot, - total_lamports: u64, - base: Option<(Slot, /*capitalization*/ u64)>, - config: VerifyAccountsHashAndLamportsConfig, - ) -> bool { - if let Err(err) = self.accounts_db.verify_accounts_hash_and_lamports( - snapshot_storages_and_slots, - slot, - total_lamports, - base, - config, - ) { - warn!("verify_accounts_hash failed: {err:?}, slot: {slot}"); - false - } else { - true - } - } - fn load_while_filtering bool>( collector: &mut Vec, some_account_tuple: Option<(&Pubkey, AccountSharedData, Slot)>, diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 1703cfa8685eb8..9cdbb40464e738 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -6702,99 +6702,6 @@ impl AccountsDb { result } - /// Verify accounts hash at startup (or tests) - /// - /// Calculate accounts hash(es) and compare them to the values set at startup. - /// If `base` is `None`, only calculates the full accounts hash for `[0, slot]`. - /// If `base` is `Some`, calculate the full accounts hash for `[0, base slot]` - /// and then calculate the incremental accounts hash for `(base slot, slot]`. - pub fn verify_accounts_hash_and_lamports( - &self, - snapshot_storages_and_slots: (&[Arc], &[Slot]), - slot: Slot, - total_lamports: u64, - base: Option<(Slot, /*capitalization*/ u64)>, - config: VerifyAccountsHashAndLamportsConfig, - ) -> Result<(), AccountsHashVerificationError> { - let calc_config = CalcAccountsHashConfig { - use_bg_thread_pool: config.use_bg_thread_pool, - ancestors: Some(config.ancestors), - epoch_schedule: config.epoch_schedule, - epoch: config.epoch, - store_detailed_debug_info_on_failure: config.store_detailed_debug_info, - }; - let hash_mismatch_is_error = !config.ignore_mismatch; - - if let Some((base_slot, base_capitalization)) = base { - self.verify_accounts_hash_and_lamports( - snapshot_storages_and_slots, - base_slot, - base_capitalization, - None, - config, - )?; - - let storages_and_slots = snapshot_storages_and_slots - .0 - .iter() - .zip(snapshot_storages_and_slots.1.iter()) - .filter(|storage_and_slot| *storage_and_slot.1 > base_slot) - .map(|(storage, slot)| (storage, *slot)); - let sorted_storages = SortedStorages::new_with_slots(storages_and_slots, None, None); - let calculated_incremental_accounts_hash = self.calculate_incremental_accounts_hash( - &calc_config, - &sorted_storages, - HashStats::default(), - ); - let found_incremental_accounts_hash = self - .get_incremental_accounts_hash(slot) - .ok_or(AccountsHashVerificationError::MissingAccountsHash)?; - if calculated_incremental_accounts_hash != found_incremental_accounts_hash { - warn!( - "mismatched incremental accounts hash for slot {slot}: \ - {calculated_incremental_accounts_hash:?} (calculated) != \ - {found_incremental_accounts_hash:?} (expected)" - ); - if hash_mismatch_is_error { - return Err(AccountsHashVerificationError::MismatchedAccountsHash); - } - } - } else { - let storages_and_slots = snapshot_storages_and_slots - .0 - .iter() - .zip(snapshot_storages_and_slots.1.iter()) - .filter(|storage_and_slot| *storage_and_slot.1 <= slot) - .map(|(storage, slot)| (storage, *slot)); - let sorted_storages = SortedStorages::new_with_slots(storages_and_slots, None, None); - let (calculated_accounts_hash, calculated_lamports) = - self.calculate_accounts_hash(&calc_config, &sorted_storages, HashStats::default()); - if calculated_lamports != total_lamports { - warn!( - "Mismatched total lamports: {total_lamports} calculated: {calculated_lamports}" - ); - return Err(AccountsHashVerificationError::MismatchedTotalLamports( - calculated_lamports, - total_lamports, - )); - } - let (found_accounts_hash, _) = self - .get_accounts_hash(slot) - .ok_or(AccountsHashVerificationError::MissingAccountsHash)?; - if calculated_accounts_hash != found_accounts_hash { - warn!( - "Mismatched accounts hash for slot {slot}: {calculated_accounts_hash:?} \ - (calculated) != {found_accounts_hash:?} (expected)" - ); - if hash_mismatch_is_error { - return Err(AccountsHashVerificationError::MismatchedAccountsHash); - } - } - } - - Ok(()) - } - /// Returns all of the accounts' pubkeys for a given slot pub fn get_pubkeys_for_slot(&self, slot: Slot) -> Vec { let scan_result = self.scan_cache_storage_fallback( @@ -8726,26 +8633,6 @@ impl AccountsDb { } } - pub fn verify_accounts_hash_and_lamports_for_tests( - &self, - slot: Slot, - total_lamports: u64, - config: VerifyAccountsHashAndLamportsConfig, - ) -> Result<(), AccountsHashVerificationError> { - let snapshot_storages = self.get_storages(..); - let snapshot_storages_and_slots = ( - snapshot_storages.0.as_slice(), - snapshot_storages.1.as_slice(), - ); - self.verify_accounts_hash_and_lamports( - snapshot_storages_and_slots, - slot, - total_lamports, - None, - config, - ) - } - pub fn uncleaned_pubkeys(&self) -> &DashMap, BuildNoHashHasher> { &self.uncleaned_pubkeys } diff --git a/accounts-db/src/accounts_db/tests.rs b/accounts-db/src/accounts_db/tests.rs index 7d7bfbcdc506b4..03f8f952352668 100644 --- a/accounts-db/src/accounts_db/tests.rs +++ b/accounts-db/src/accounts_db/tests.rs @@ -13,14 +13,12 @@ use { }, storable_accounts::AccountForStorage, }, - assert_matches::assert_matches, itertools::Itertools, rand::{prelude::SliceRandom, thread_rng, Rng}, solana_account::{ accounts_equal, Account, AccountSharedData, InheritableAccountFields, ReadableAccount, WritableAccount, DUMMY_INHERITABLE_ACCOUNT_FIELDS, }, - solana_hash::HASH_BYTES, solana_pubkey::PUBKEY_BYTES, std::{ hash::DefaultHasher, @@ -2140,52 +2138,6 @@ impl CalcAccountsHashConfig<'_> { } } -#[test] -fn test_verify_accounts_hash() { - solana_logger::setup(); - let db = AccountsDb::new_single_for_tests(); - - let key = solana_pubkey::new_rand(); - let some_data_len = 0; - let some_slot: Slot = 0; - let account = AccountSharedData::new(1, some_data_len, &key); - let ancestors = vec![(some_slot, 0)].into_iter().collect(); - let epoch_schedule = EpochSchedule::default(); - let epoch = Epoch::default(); - - db.store_for_tests(some_slot, &[(&key, &account)]); - db.add_root_and_flush_write_cache(some_slot); - let (_, capitalization) = db.update_accounts_hash_for_tests(some_slot, &ancestors, true, true); - - let config = - VerifyAccountsHashAndLamportsConfig::new_for_test(&ancestors, &epoch_schedule, epoch); - - assert_matches!( - db.verify_accounts_hash_and_lamports_for_tests(some_slot, 1, config.clone()), - Ok(_) - ); - - db.accounts_hashes.lock().unwrap().remove(&some_slot); - - assert_matches!( - db.verify_accounts_hash_and_lamports_for_tests(some_slot, 1, config.clone()), - Err(AccountsHashVerificationError::MissingAccountsHash) - ); - - db.set_accounts_hash( - some_slot, - ( - AccountsHash(Hash::new_from_array([0xca; HASH_BYTES])), - capitalization, - ), - ); - - assert_matches!( - db.verify_accounts_hash_and_lamports_for_tests(some_slot, 1, config), - Err(AccountsHashVerificationError::MismatchedAccountsHash) - ); -} - #[test] fn test_verify_bank_capitalization() { for pass in 0..2 { @@ -2197,19 +2149,15 @@ fn test_verify_bank_capitalization() { let some_slot: Slot = 0; let account = AccountSharedData::new(1, some_data_len, &key); let ancestors = vec![(some_slot, 0)].into_iter().collect(); - let epoch_schedule = EpochSchedule::default(); - let epoch = Epoch::default(); - let config = - VerifyAccountsHashAndLamportsConfig::new_for_test(&ancestors, &epoch_schedule, epoch); db.store_for_tests(some_slot, &[(&key, &account)]); if pass == 0 { db.add_root_and_flush_write_cache(some_slot); db.update_accounts_hash_for_tests(some_slot, &ancestors, true, true); - assert_matches!( - db.verify_accounts_hash_and_lamports_for_tests(some_slot, 1, config.clone()), - Ok(_) + assert_eq!( + db.calculate_capitalization_at_startup_from_index(&ancestors, some_slot), + 1 ); continue; } @@ -2225,69 +2173,12 @@ fn test_verify_bank_capitalization() { db.add_root_and_flush_write_cache(some_slot); db.update_accounts_hash_for_tests(some_slot, &ancestors, true, true); - assert_matches!( - db.verify_accounts_hash_and_lamports_for_tests(some_slot, 2, config.clone()), - Ok(_) - ); - - assert_matches!( - db.verify_accounts_hash_and_lamports_for_tests(some_slot, 10, config), - Err(AccountsHashVerificationError::MismatchedTotalLamports(expected, actual)) if expected == 2 && actual == 10 + assert_eq!( + db.calculate_capitalization_at_startup_from_index(&ancestors, some_slot), + 2 ); } } - -#[test] -fn test_verify_accounts_hash_no_account() { - solana_logger::setup(); - let db = AccountsDb::new_single_for_tests(); - - let some_slot: Slot = 0; - let ancestors = vec![(some_slot, 0)].into_iter().collect(); - - db.add_root(some_slot); - db.update_accounts_hash_for_tests(some_slot, &ancestors, true, true); - - let epoch_schedule = EpochSchedule::default(); - let epoch = Epoch::default(); - let config = - VerifyAccountsHashAndLamportsConfig::new_for_test(&ancestors, &epoch_schedule, epoch); - - assert_matches!( - db.verify_accounts_hash_and_lamports_for_tests(some_slot, 0, config), - Ok(_) - ); -} - -#[test] -fn test_verify_accounts_hash_bad_account_hash() { - solana_logger::setup(); - let db = AccountsDb::new_single_for_tests(); - - let key = Pubkey::default(); - let some_data_len = 0; - let some_slot: Slot = 0; - let account = AccountSharedData::new(1, some_data_len, &key); - let ancestors = vec![(some_slot, 0)].into_iter().collect(); - - let accounts = &[(&key, &account)][..]; - db.update_accounts_hash_for_tests(some_slot, &ancestors, false, false); - - // provide bogus account hashes - db.store_cached((some_slot, accounts)); - db.add_root_and_flush_write_cache(some_slot); - - let epoch_schedule = EpochSchedule::default(); - let epoch = Epoch::default(); - let config = - VerifyAccountsHashAndLamportsConfig::new_for_test(&ancestors, &epoch_schedule, epoch); - - assert_matches!( - db.verify_accounts_hash_and_lamports_for_tests(some_slot, 1, config), - Err(AccountsHashVerificationError::MismatchedAccountsHash) - ); -} - #[test] fn test_storage_finder() { solana_logger::setup(); diff --git a/runtime/src/serde_snapshot/tests.rs b/runtime/src/serde_snapshot/tests.rs index ce50e9f540179b..a9620e5113a956 100644 --- a/runtime/src/serde_snapshot/tests.rs +++ b/runtime/src/serde_snapshot/tests.rs @@ -19,13 +19,13 @@ mod serde_snapshot_tests { accounts::Accounts, accounts_db::{ get_temp_accounts_paths, test_utils::create_test_accounts, AccountStorageEntry, - AccountsDb, AtomicAccountsFileId, VerifyAccountsHashAndLamportsConfig, + AccountsDb, AtomicAccountsFileId, }, accounts_file::{AccountsFile, AccountsFileError, StorageAccess}, accounts_hash::AccountsHash, ancestors::Ancestors, }, - solana_clock::{Epoch, Slot}, + solana_clock::Slot, solana_epoch_schedule::EpochSchedule, solana_hash::Hash, solana_nohash_hasher::BuildNoHashHasher, @@ -520,15 +520,10 @@ mod serde_snapshot_tests { accounts.assert_load_account(current_slot, purged_pubkey2, 0); accounts.assert_load_account(current_slot, dummy_pubkey, dummy_lamport); - let ancestors = Ancestors::default(); - let epoch_schedule = EpochSchedule::default(); - let epoch = Epoch::default(); - let config = - VerifyAccountsHashAndLamportsConfig::new_for_test(&ancestors, &epoch_schedule, epoch); - - accounts - .verify_accounts_hash_and_lamports_for_tests(4, 1222, config) - .unwrap(); + let calculated_capitalization = + accounts.calculate_capitalization_at_startup_from_index(&Ancestors::default(), 4); + let expected_capitalization = 1_222; + assert_eq!(calculated_capitalization, expected_capitalization); } #[test_case(StorageAccess::Mmap)] @@ -812,25 +807,22 @@ mod serde_snapshot_tests { ); let no_ancestors = Ancestors::default(); - let epoch_schedule = EpochSchedule::default(); - let epoch = Epoch::default(); - let config = VerifyAccountsHashAndLamportsConfig::new_for_test( - &no_ancestors, - &epoch_schedule, - epoch, - ); accounts.update_accounts_hash_for_tests(current_slot, &no_ancestors, false, false); - accounts - .verify_accounts_hash_and_lamports_for_tests(current_slot, 22300, config.clone()) - .unwrap(); + let calculated_capitalization = accounts + .calculate_capitalization_at_startup_from_index(&no_ancestors, current_slot); + let expected_capitalization = 22_300; + assert_eq!(calculated_capitalization, expected_capitalization); + + let accounts_lt_hash_pre = accounts + .calculate_accounts_lt_hash_at_startup_from_index(&no_ancestors, current_slot); let accounts = reconstruct_accounts_db_via_serialization(&accounts, current_slot, storage_access); - accounts - .verify_accounts_hash_and_lamports_for_tests(current_slot, 22300, config) - .unwrap(); + let accounts_lt_hash_post = accounts + .calculate_accounts_lt_hash_at_startup_from_index(&no_ancestors, current_slot); + assert_eq!(accounts_lt_hash_pre, accounts_lt_hash_post); // repeating should be no-op accounts.shrink_all_slots(*startup, &epoch_schedule, None); From b1331da1c00b32a9cbb61735c5a26fa5651ac46c Mon Sep 17 00:00:00 2001 From: steviez Date: Mon, 21 Jul 2025 19:54:15 -0500 Subject: [PATCH 22/68] perf: Resolve Rust 1.88 clippy lints and format strings (#7044) - Run `cargo clippy --fix --tests` with Rust 1.88.0 set in `rust-toolchain.toml` - Run `cargo fmt` with `format_strings = true` set in `rustfmt.toml` --- perf/src/deduper.rs | 2 +- perf/src/lib.rs | 8 +++++--- perf/src/packet.rs | 2 +- perf/src/perf_libs.rs | 18 +++++++++--------- perf/src/recycler.rs | 2 +- perf/src/sigverify.rs | 22 +++++++++++----------- perf/src/thread.rs | 5 ++--- 7 files changed, 30 insertions(+), 29 deletions(-) diff --git a/perf/src/deduper.rs b/perf/src/deduper.rs index a4853c3efdfc37..da4d5dfb9c5d66 100644 --- a/perf/src/deduper.rs +++ b/perf/src/deduper.rs @@ -171,7 +171,7 @@ mod tests { let mut batches = to_packet_batches(&(0..1000).map(|_| test_tx()).collect::>(), 128); discard += dedup_packets_and_count_discards(&filter, &mut batches) as usize; - trace!("{} {}", i, discard); + trace!("{i} {discard}"); if filter.popcount.load(Ordering::Relaxed) > capacity { break; } diff --git a/perf/src/lib.rs b/perf/src/lib.rs index f9b0b12772b680..864d541c8746f1 100644 --- a/perf/src/lib.rs +++ b/perf/src/lib.rs @@ -68,8 +68,9 @@ pub fn report_target_features() { info!("AVX detected"); } else { error!( - "Incompatible CPU detected: missing AVX support. Please build from source on the target" - ); + "Incompatible CPU detected: missing AVX support. Please build from source on \ + the target" + ); std::process::abort(); } } @@ -83,7 +84,8 @@ pub fn report_target_features() { info!("AVX2 detected"); } else { error!( - "Incompatible CPU detected: missing AVX2 support. Please build from source on the target" + "Incompatible CPU detected: missing AVX2 support. Please build from source on \ + the target" ); std::process::abort(); } diff --git a/perf/src/packet.rs b/perf/src/packet.rs index 68277d46ec5a63..b7d9d31be13317 100644 --- a/perf/src/packet.rs +++ b/perf/src/packet.rs @@ -685,7 +685,7 @@ impl PinnedPacketBatch { // TODO: This should never happen. Instead the caller should // break the payload into smaller messages, and here any errors // should be propagated. - error!("Couldn't write to packet {:?}. Data skipped.", e); + error!("Couldn't write to packet {e:?}. Data skipped."); packet.meta_mut().set_discard(true); } } else { diff --git a/perf/src/perf_libs.rs b/perf/src/perf_libs.rs index feedc6bc03b875..a9d336bfa255dd 100644 --- a/perf/src/perf_libs.rs +++ b/perf/src/perf_libs.rs @@ -84,10 +84,10 @@ pub struct Api<'a> { static API: OnceLock> = OnceLock::new(); fn init(name: &OsStr) { - info!("Loading {:?}", name); + info!("Loading {name:?}"); API.get_or_init(|| { unsafe { Container::load(name) }.unwrap_or_else(|err| { - error!("Unable to load {:?}: {}", name, err); + error!("Unable to load {name:?}: {err}"); std::process::exit(1); }) }); @@ -97,10 +97,10 @@ pub fn locate_perf_libs() -> Option { let exe = env::current_exe().expect("Unable to get executable path"); let perf_libs = exe.parent().unwrap().join("perf-libs"); if perf_libs.is_dir() { - info!("perf-libs found at {:?}", perf_libs); + info!("perf-libs found at {perf_libs:?}"); return Some(perf_libs); } - warn!("{:?} does not exist", perf_libs); + warn!("{perf_libs:?} does not exist"); None } @@ -108,10 +108,10 @@ fn find_cuda_home(perf_libs_path: &Path) -> Option { if let Ok(cuda_home) = env::var("CUDA_HOME") { let path = PathBuf::from(cuda_home); if path.is_dir() { - info!("Using CUDA_HOME: {:?}", path); + info!("Using CUDA_HOME: {path:?}"); return Some(path); } - warn!("Ignoring CUDA_HOME, not a path: {:?}", path); + warn!("Ignoring CUDA_HOME, not a path: {path:?}"); } // Search /usr/local for a `cuda-` directory that matches a perf-libs subdirectory @@ -130,7 +130,7 @@ fn find_cuda_home(perf_libs_path: &Path) -> Option { continue; } - info!("CUDA installation found at {:?}", cuda_home); + info!("CUDA installation found at {cuda_home:?}"); return Some(cuda_home); } None @@ -141,7 +141,7 @@ pub fn append_to_ld_library_path(mut ld_library_path: String) { ld_library_path.push(':'); ld_library_path.push_str(&env_value); } - info!("setting ld_library_path to: {:?}", ld_library_path); + info!("setting ld_library_path to: {ld_library_path:?}"); env::set_var("LD_LIBRARY_PATH", ld_library_path); } @@ -154,7 +154,7 @@ pub fn init_cuda() { // to ensure the correct CUDA version is used append_to_ld_library_path(cuda_lib64_dir.to_str().unwrap_or("").to_string()) } else { - warn!("CUDA lib64 directory does not exist: {:?}", cuda_lib64_dir); + warn!("CUDA lib64 directory does not exist: {cuda_lib64_dir:?}"); } let libcuda_crypt = perf_libs_path diff --git a/perf/src/recycler.rs b/perf/src/recycler.rs index 0a31df16bf2a49..0b1c2209860d15 100644 --- a/perf/src/recycler.rs +++ b/perf/src/recycler.rs @@ -47,7 +47,7 @@ pub struct RecyclerX { impl Default for RecyclerX { fn default() -> RecyclerX { let id = thread_rng().gen_range(0..1000); - trace!("new recycler..{}", id); + trace!("new recycler..{id}"); RecyclerX { gc: Mutex::default(), stats: RecyclerStats::default(), diff --git a/perf/src/sigverify.rs b/perf/src/sigverify.rs index 261f8ea7dd9933..80a7f7e80dc862 100644 --- a/perf/src/sigverify.rs +++ b/perf/src/sigverify.rs @@ -510,7 +510,7 @@ pub fn shrink_batches(batches: Vec) -> Vec { } pub fn ed25519_verify_cpu(batches: &mut [PacketBatch], reject_non_vote: bool, packet_count: usize) { - debug!("CPU ECDSA for {}", packet_count); + debug!("CPU ECDSA for {packet_count}"); PAR_THREAD_POOL.install(|| { batches.par_iter_mut().flatten().for_each(|mut packet| { if !packet.meta().discard() && !verify_packet(&mut packet, reject_non_vote) { @@ -522,7 +522,7 @@ pub fn ed25519_verify_cpu(batches: &mut [PacketBatch], reject_non_vote: bool, pa pub fn ed25519_verify_disabled(batches: &mut [PacketBatch]) { let packet_count = count_packets_in_batches(batches); - debug!("disabled ECDSA for {}", packet_count); + debug!("disabled ECDSA for {packet_count}"); PAR_THREAD_POOL.install(|| { batches.par_iter_mut().flatten().for_each(|mut packet| { packet.meta_mut().set_discard(false); @@ -613,7 +613,7 @@ pub fn ed25519_verify( let (signature_offsets, pubkey_offsets, msg_start_offsets, msg_sizes, sig_lens) = generate_offsets(batches, recycler, reject_non_vote); - debug!("CUDA ECDSA for {}", valid_packet_count); + debug!("CUDA ECDSA for {valid_packet_count}"); debug!("allocating out.."); let mut out = recycler_out.allocate("out_buffer"); out.set_pinnable(); @@ -642,7 +642,7 @@ pub fn ed25519_verify( num_packets = num_packets.saturating_add(batch.len()); } out.resize(signature_offsets.len(), 0); - trace!("Starting verify num packets: {}", num_packets); + trace!("Starting verify num packets: {num_packets}"); trace!("elem len: {}", elems.len() as u32); trace!("packet sizeof: {}", size_of::() as u32); trace!("len offset: {}", PACKET_DATA_SIZE as u32); @@ -662,7 +662,7 @@ pub fn ed25519_verify( USE_NON_DEFAULT_STREAM, ); if res != 0 { - trace!("RETURN!!!: {}", res); + trace!("RETURN!!!: {res}"); } } trace!("done verify"); @@ -879,7 +879,7 @@ mod tests { let mut tx = Transaction::new_unsigned(message); info!("message: {:?}", tx.message_data()); - info!("tx: {:?}", tx); + info!("tx: {tx:?}"); let sig = keypair1.try_sign_message(&tx.message_data()).unwrap(); tx.signatures = vec![sig; NUM_SIG]; @@ -1734,7 +1734,7 @@ mod tests { let test_cases = set_discards.iter().zip(&expect_valids).enumerate(); for (i, (set_discard, (expect_batch_count, expect_valid_packets))) in test_cases { - debug!("test_shrink case: {}", i); + debug!("test_shrink case: {i}"); let mut batches = to_packet_batches( &(0..PACKET_COUNT).map(|_| test_tx()).collect::>(), PACKETS_PER_BATCH, @@ -1747,18 +1747,18 @@ mod tests { .for_each(|(j, mut p)| p.meta_mut().set_discard(set_discard(i, j))) }); assert_eq!(count_valid_packets(&batches), *expect_valid_packets); - debug!("show valid packets for case {}", i); + debug!("show valid packets for case {i}"); batches.iter_mut().enumerate().for_each(|(i, b)| { b.iter_mut().enumerate().for_each(|(j, p)| { if !p.meta().discard() { - trace!("{} {}", i, j) + trace!("{i} {j}") } }) }); - debug!("done show valid packets for case {}", i); + debug!("done show valid packets for case {i}"); let batches = shrink_batches(batches); let shrunken_batch_count = batches.len(); - debug!("shrunk batch test {} count: {}", i, shrunken_batch_count); + debug!("shrunk batch test {i} count: {shrunken_batch_count}"); assert_eq!(shrunken_batch_count, *expect_batch_count); assert_eq!(count_valid_packets(&batches), *expect_valid_packets); } diff --git a/perf/src/thread.rs b/perf/src/thread.rs index 36cfde10826b14..7a101390ee5024 100644 --- a/perf/src/thread.rs +++ b/perf/src/thread.rs @@ -81,9 +81,8 @@ where Ok(()) } else { Err(String::from( - "niceness adjustment supported only on Linux; negative adjustment \ - (priority increase) requires root or CAP_SYS_NICE (see `man 7 capabilities` \ - for details)", + "niceness adjustment supported only on Linux; negative adjustment (priority increase) \ + requires root or CAP_SYS_NICE (see `man 7 capabilities` for details)", )) } } From 00c445afcb0f9ea4fb7dd07ad326b6c20f4a75e8 Mon Sep 17 00:00:00 2001 From: colin <102356659+colinlyguo@users.noreply.github.com> Date: Tue, 22 Jul 2025 10:59:19 +0800 Subject: [PATCH 23/68] fix: typos (#7013) --- .../sigma_proofs/grouped_ciphertext_validity/handles_3.rs | 4 ++-- .../sigma_proofs/ciphertext_ciphertext_equality_proof.rs | 2 +- .../grouped_ciphertext_validity_proof/handles_3.rs | 8 ++++---- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/zk-sdk/src/sigma_proofs/grouped_ciphertext_validity/handles_3.rs b/zk-sdk/src/sigma_proofs/grouped_ciphertext_validity/handles_3.rs index b3de729687fce3..1c50e7204649bc 100644 --- a/zk-sdk/src/sigma_proofs/grouped_ciphertext_validity/handles_3.rs +++ b/zk-sdk/src/sigma_proofs/grouped_ciphertext_validity/handles_3.rs @@ -224,10 +224,10 @@ impl GroupedCiphertext3HandlesValidityProof { &Y_1, // Y_1 P_second, // P_second D_second, // D_second - &Y_2, // Y_1 + &Y_2, // Y_2 P_third, // P_third D_third, // D_third - &Y_3, // Y_2 + &Y_3, // Y_3 ], ); diff --git a/zk-token-sdk/src/sigma_proofs/ciphertext_ciphertext_equality_proof.rs b/zk-token-sdk/src/sigma_proofs/ciphertext_ciphertext_equality_proof.rs index bcb9bec860e517..03f8f53af6beab 100644 --- a/zk-token-sdk/src/sigma_proofs/ciphertext_ciphertext_equality_proof.rs +++ b/zk-token-sdk/src/sigma_proofs/ciphertext_ciphertext_equality_proof.rs @@ -197,7 +197,7 @@ impl CiphertextCiphertextEqualityProof { &(&ww * &self.z_r), // ww * z_r &(&ww_negated * &c), // -ww * c &ww_negated, // -ww - &(&www * &self.z_r), // z_r + &(&www * &self.z_r), // www * z_r &(&www_negated * &c), // -www * c &www_negated, ], diff --git a/zk-token-sdk/src/sigma_proofs/grouped_ciphertext_validity_proof/handles_3.rs b/zk-token-sdk/src/sigma_proofs/grouped_ciphertext_validity_proof/handles_3.rs index 593316486dc083..3acb159b2fccb4 100644 --- a/zk-token-sdk/src/sigma_proofs/grouped_ciphertext_validity_proof/handles_3.rs +++ b/zk-token-sdk/src/sigma_proofs/grouped_ciphertext_validity_proof/handles_3.rs @@ -217,15 +217,15 @@ impl GroupedCiphertext3HandlesValidityProof { &G, // G C, // C &Y_0, // Y_0 - P_source, // P_destination - D_source, // D_destination + P_source, // P_source + D_source, // D_source &Y_1, // Y_1 P_destination, // P_destination D_destination, // D_destination - &Y_2, // Y_1 + &Y_2, // Y_2 P_auditor, // P_auditor D_auditor, // D_auditor - &Y_3, // Y_2 + &Y_3, // Y_3 ], ); From e192d14aef889fa6129af35b4ddee8cc4af22c42 Mon Sep 17 00:00:00 2001 From: Alex Pyattaev Date: Tue, 22 Jul 2025 10:39:05 +0300 Subject: [PATCH 24/68] GCE: fix gce.sh startup (#7043) replace random deb for earlyoom with apt-get install update to use 24.04 ubuntu image remove docker requirement for any host not running 22.04 (as long as you have compatible libc you should be ok) fix the SSH startup script to correctly reload sshd --- net/net.sh | 3 +-- net/scripts/gce-provider.sh | 3 +-- net/scripts/install-earlyoom.sh | 3 +-- net/scripts/network-config.sh | 3 +-- 4 files changed, 4 insertions(+), 8 deletions(-) diff --git a/net/net.sh b/net/net.sh index 235d485555c5ea..b8f743bf6d566d 100755 --- a/net/net.sh +++ b/net/net.sh @@ -189,9 +189,8 @@ annotateBlockexplorerUrl() { } build() { - supported=("22.04") declare MAYBE_DOCKER= - if [[ $(uname) != Linux || ! " ${supported[*]} " =~ $(lsb_release -sr) ]]; then + if [[ $(uname) != Linux ]]; then # shellcheck source=ci/docker/env.sh source "$SOLANA_ROOT"/ci/docker/env.sh MAYBE_DOCKER="ci/docker-run.sh ${CI_DOCKER_IMAGE:?}" diff --git a/net/scripts/gce-provider.sh b/net/scripts/gce-provider.sh index 376febdb981d5b..46e2ad3c2510ed 100755 --- a/net/scripts/gce-provider.sh +++ b/net/scripts/gce-provider.sh @@ -170,7 +170,6 @@ cloud_CreateInstances() { declare optionalBootDiskType="${10:-pd-ssd}" declare optionalAdditionalDiskSize="${11}" declare optionalPreemptible="${12}" - #declare sshPrivateKey="${13}" # unused if $enableGpu; then # Custom Ubuntu 20.04 LTS image with CUDA 10.2 installed @@ -185,7 +184,7 @@ cloud_CreateInstances() { echo "Error: Not supported" >&2 exit 1 else - imageName="ubuntu-2204-jammy-v20241119 --image-project ubuntu-os-cloud" + imageName="ubuntu-2404-noble-amd64-v20250709 --image-project ubuntu-os-cloud" fi declare -a nodes diff --git a/net/scripts/install-earlyoom.sh b/net/scripts/install-earlyoom.sh index 5605bc9cb58e28..bf5946672f0dd4 100755 --- a/net/scripts/install-earlyoom.sh +++ b/net/scripts/install-earlyoom.sh @@ -15,8 +15,7 @@ echo kernel.sysrq=1 >> /etc/sysctl.conf if command -v earlyoom; then systemctl status earlyoom else - wget -r -l1 -np http://ftp.us.debian.org/debian/pool/main/e/earlyoom/ -A 'earlyoom_1.2-*_amd64.deb' -e robots=off -nd - apt install --quiet --yes ./earlyoom_1.2-*_amd64.deb + apt-get install --quiet --yes earlyoom cat > earlyoom < Date: Tue, 22 Jul 2025 03:05:57 -0500 Subject: [PATCH 25/68] gossip: Rename threadpool from solRunGossip to solGossipRun (#7072) --- gossip/src/cluster_info.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index a3bca722f7bb6b..e9af4d259fb06c 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -1431,7 +1431,7 @@ impl ClusterInfo { ) -> JoinHandle<()> { let thread_pool = ThreadPoolBuilder::new() .num_threads(std::cmp::min(get_thread_count(), 8)) - .thread_name(|i| format!("solRunGossip{i:02}")) + .thread_name(|i| format!("solGossipRun{i:02}")) .build() .unwrap(); let mut epoch_specs = bank_forks.map(EpochSpecs::from); From eba94cad016a777b0ce107ea81e25a5d2cd52e65 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Tue, 22 Jul 2025 19:46:39 +0800 Subject: [PATCH 26/68] agave-validator: add args tests for run (part 3) (#6918) * impl FromClapArgMatches for blockstore_options * use blockstore_options directly * return clap errors directly * add an invalid case for rocksdb ledger compression * move tests to correct places * use test_case to improve verify_args_struct_by_command_run_with_wal_recovery_mode * verify_args_struct_by_command_run_with_wal_recovery_mode_invalid * use test_case to improve verify_args_struct_by_command_run_with_rocksdb_ledger_compression --- Cargo.lock | 1 + ledger/src/blockstore_options.rs | 8 +- validator/Cargo.toml | 1 + validator/src/cli/thread_args.rs | 18 +- validator/src/commands/run/args.rs | 41 +++- .../commands/run/args/blockstore_options.rs | 205 ++++++++++++++++++ validator/src/commands/run/execute.rs | 42 +--- 7 files changed, 253 insertions(+), 63 deletions(-) create mode 100644 validator/src/commands/run/args/blockstore_options.rs diff --git a/Cargo.lock b/Cargo.lock index 4e81f130b2daa9..0b21f4b8d2eac5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -485,6 +485,7 @@ dependencies = [ "spl-token-2022", "symlink", "tempfile", + "test-case", "thiserror 2.0.12", "tikv-jemallocator", "tokio", diff --git a/ledger/src/blockstore_options.rs b/ledger/src/blockstore_options.rs index 15a9ff1041ed40..349c5658ecebc4 100644 --- a/ledger/src/blockstore_options.rs +++ b/ledger/src/blockstore_options.rs @@ -7,7 +7,7 @@ use { /// The subdirectory under ledger directory where the Blockstore lives pub const BLOCKSTORE_DIRECTORY_ROCKS_LEVEL: &str = "rocksdb"; -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] pub struct BlockstoreOptions { // The access type of blockstore. Default: Primary pub access_type: AccessType, @@ -59,7 +59,7 @@ pub enum AccessType { Secondary, } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] pub enum BlockstoreRecoveryMode { TolerateCorruptedTailRecords, AbsoluteConsistency, @@ -99,7 +99,7 @@ impl From for DBRecoveryMode { /// Options for LedgerColumn. /// Each field might also be used as a tag that supports group-by operation when /// reporting metrics. -#[derive(Default, Debug, Clone)] +#[derive(Default, Debug, Clone, PartialEq)] pub struct LedgerColumnOptions { // Determine the way to compress column families which are eligible for // compression. @@ -122,7 +122,7 @@ impl LedgerColumnOptions { } } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] pub enum BlockstoreCompressionType { None, Snappy, diff --git a/validator/Cargo.toml b/validator/Cargo.toml index 9ee547609ba7db..56f91bbebf9e16 100644 --- a/validator/Cargo.toml +++ b/validator/Cargo.toml @@ -106,3 +106,4 @@ solana-time-utils = { workspace = true } spl-generic-token = { workspace = true } spl-token-2022 = { workspace = true, features = ["no-entrypoint"] } tempfile = { workspace = true } +test-case = { workspace = true } diff --git a/validator/src/cli/thread_args.rs b/validator/src/cli/thread_args.rs index bc2e03381efddb..6d0bbd23fd6fd0 100644 --- a/validator/src/cli/thread_args.rs +++ b/validator/src/cli/thread_args.rs @@ -102,8 +102,6 @@ pub struct NumThreadConfig { pub rayon_global_threads: NonZeroUsize, pub replay_forks_threads: NonZeroUsize, pub replay_transactions_threads: NonZeroUsize, - pub rocksdb_compaction_threads: NonZeroUsize, - pub rocksdb_flush_threads: NonZeroUsize, pub tpu_transaction_forward_receive_threads: NonZeroUsize, pub tpu_transaction_receive_threads: NonZeroUsize, pub tpu_vote_transaction_receive_threads: NonZeroUsize, @@ -146,16 +144,6 @@ pub fn parse_num_threads_args(matches: &ArgMatches) -> NumThreadConfig { ReplayTransactionsThreadsArg::NAME, NonZeroUsize ), - rocksdb_compaction_threads: value_t_or_exit!( - matches, - RocksdbCompactionThreadsArg::NAME, - NonZeroUsize - ), - rocksdb_flush_threads: value_t_or_exit!( - matches, - RocksdbFlushThreadsArg::NAME, - NonZeroUsize - ), tpu_transaction_forward_receive_threads: value_t_or_exit!( matches, TpuTransactionForwardReceiveThreadArgs::NAME, @@ -186,7 +174,7 @@ pub fn parse_num_threads_args(matches: &ArgMatches) -> NumThreadConfig { } /// Configuration for CLAP arguments that control the number of threads for various functions -trait ThreadArg { +pub trait ThreadArg { /// The argument's name const NAME: &'static str; /// The argument's long name @@ -314,7 +302,7 @@ impl ThreadArg for ReplayTransactionsThreadsArg { } } -struct RocksdbCompactionThreadsArg; +pub struct RocksdbCompactionThreadsArg; impl ThreadArg for RocksdbCompactionThreadsArg { const NAME: &'static str = "rocksdb_compaction_threads"; const LONG_NAME: &'static str = "rocksdb-compaction-threads"; @@ -325,7 +313,7 @@ impl ThreadArg for RocksdbCompactionThreadsArg { } } -struct RocksdbFlushThreadsArg; +pub struct RocksdbFlushThreadsArg; impl ThreadArg for RocksdbFlushThreadsArg { const NAME: &'static str = "rocksdb_flush_threads"; const LONG_NAME: &'static str = "rocksdb-flush-threads"; diff --git a/validator/src/commands/run/args.rs b/validator/src/commands/run/args.rs index da5e5d63979547..75f89bb968c6bd 100644 --- a/validator/src/commands/run/args.rs +++ b/validator/src/commands/run/args.rs @@ -21,7 +21,7 @@ use { validator::{BlockProductionMethod, BlockVerificationMethod, TransactionStructure}, }, solana_keypair::Keypair, - solana_ledger::use_snapshot_archives_at_startup, + solana_ledger::{blockstore_options::BlockstoreOptions, use_snapshot_archives_at_startup}, solana_pubkey::Pubkey, solana_runtime::snapshot_utils::{SnapshotVersion, SUPPORTED_ARCHIVE_COMPRESSION}, solana_send_transaction_service::send_transaction_service::{ @@ -35,6 +35,7 @@ use { const EXCLUDE_KEY: &str = "account-index-exclude-key"; const INCLUDE_KEY: &str = "account-index-include-key"; +pub mod blockstore_options; pub mod rpc_bootstrap_config; #[derive(Debug, PartialEq)] @@ -44,6 +45,7 @@ pub struct RunArgs { pub entrypoints: Vec, pub known_validators: Option>, pub rpc_bootstrap_config: RpcBootstrapConfig, + pub blockstore_options: BlockstoreOptions, } impl FromClapArgMatches for RunArgs { @@ -87,6 +89,7 @@ impl FromClapArgMatches for RunArgs { entrypoints, known_validators, rpc_bootstrap_config: RpcBootstrapConfig::from_clap_arg_match(matches)?, + blockstore_options: BlockstoreOptions::from_clap_arg_match(matches)?, }) } } @@ -1738,6 +1741,7 @@ fn validators_set( mod tests { use { super::*, + crate::cli::thread_args::thread_args, std::net::{IpAddr, Ipv4Addr}, }; @@ -1754,6 +1758,7 @@ mod tests { entrypoints, known_validators, rpc_bootstrap_config: RpcBootstrapConfig::default(), + blockstore_options: BlockstoreOptions::default(), } } } @@ -1766,6 +1771,7 @@ mod tests { entrypoints: self.entrypoints.clone(), known_validators: self.known_validators.clone(), rpc_bootstrap_config: self.rpc_bootstrap_config.clone(), + blockstore_options: self.blockstore_options.clone(), } } } @@ -1775,8 +1781,11 @@ mod tests { args: Vec<&str>, expected_args: RunArgs, ) { + let app = add_args(App::new("run_command"), default_args) + .args(&thread_args(&default_args.thread_args)); + crate::commands::tests::verify_args_struct_by_command::( - add_args(App::new("run_command"), default_args), + app, [&["run_command"], &args[..]].concat(), expected_args, ); @@ -1817,7 +1826,7 @@ mod tests { } } - fn verify_args_struct_by_command_run_with_identity_setup( + pub fn verify_args_struct_by_command_run_with_identity_setup( default_run_args: RunArgs, args: Vec<&str>, expected_args: RunArgs, @@ -1834,6 +1843,32 @@ mod tests { verify_args_struct_by_command(&default_args, args, expected_args); } + pub fn verify_args_struct_by_command_run_is_error_with_identity_setup( + default_run_args: RunArgs, + args: Vec<&str>, + ) { + let default_args = DefaultArgs::default(); + + // generate a keypair + let tmp_dir = tempfile::tempdir().unwrap(); + let file = tmp_dir.path().join("id.json"); + let keypair = default_run_args.identity_keypair.insecure_clone(); + solana_keypair::write_keypair_file(&keypair, &file).unwrap(); + + let app = add_args(App::new("run_command"), &default_args) + .args(&thread_args(&default_args.thread_args)); + + crate::commands::tests::verify_args_struct_by_command_is_error::( + app, + [ + &["run_command"], + &["--identity", file.to_str().unwrap()][..], + &args[..], + ] + .concat(), + ); + } + #[test] fn verify_args_struct_by_command_run_with_log() { let default_run_args = RunArgs::default(); diff --git a/validator/src/commands/run/args/blockstore_options.rs b/validator/src/commands/run/args/blockstore_options.rs new file mode 100644 index 00000000000000..189598b8808da1 --- /dev/null +++ b/validator/src/commands/run/args/blockstore_options.rs @@ -0,0 +1,205 @@ +use { + crate::{ + cli::thread_args::{RocksdbCompactionThreadsArg, RocksdbFlushThreadsArg, ThreadArg}, + commands::{FromClapArgMatches, Result}, + }, + clap::{value_t, ArgMatches}, + solana_ledger::blockstore_options::{ + AccessType, BlockstoreCompressionType, BlockstoreOptions, BlockstoreRecoveryMode, + LedgerColumnOptions, + }, + std::num::NonZeroUsize, +}; + +impl FromClapArgMatches for BlockstoreOptions { + fn from_clap_arg_match(matches: &ArgMatches) -> Result { + let recovery_mode = matches + .value_of("wal_recovery_mode") + .map(BlockstoreRecoveryMode::from); + + let column_options = LedgerColumnOptions { + compression_type: match matches.value_of("rocksdb_ledger_compression") { + None => BlockstoreCompressionType::default(), + Some(ledger_compression_string) => match ledger_compression_string { + "none" => BlockstoreCompressionType::None, + "snappy" => BlockstoreCompressionType::Snappy, + "lz4" => BlockstoreCompressionType::Lz4, + "zlib" => BlockstoreCompressionType::Zlib, + _ => { + return Err(crate::commands::Error::Dynamic( + Box::::from(format!( + "Unsupported ledger_compression: {ledger_compression_string}" + )), + )); + } + }, + }, + rocks_perf_sample_interval: value_t!(matches, "rocksdb_perf_sample_interval", usize)?, + }; + + let rocksdb_compaction_threads = + value_t!(matches, RocksdbCompactionThreadsArg::NAME, NonZeroUsize)?; + + let rocksdb_flush_threads = value_t!(matches, RocksdbFlushThreadsArg::NAME, NonZeroUsize)?; + + Ok(BlockstoreOptions { + recovery_mode, + column_options, + // The validator needs to open many files, check that the process has + // permission to do so in order to fail quickly and give a direct error + enforce_ulimit_nofile: true, + // The validator needs primary (read/write) + access_type: AccessType::Primary, + num_rocksdb_compaction_threads: rocksdb_compaction_threads, + num_rocksdb_flush_threads: rocksdb_flush_threads, + }) + } +} + +#[cfg(test)] +mod tests { + use { + super::*, + crate::commands::run::args::{ + tests::{ + verify_args_struct_by_command_run_is_error_with_identity_setup, + verify_args_struct_by_command_run_with_identity_setup, + }, + RunArgs, + }, + test_case::test_case, + }; + + #[test_case( + "tolerate_corrupted_tail_records", + BlockstoreRecoveryMode::TolerateCorruptedTailRecords + )] + #[test_case("absolute_consistency", BlockstoreRecoveryMode::AbsoluteConsistency)] + #[test_case("point_in_time", BlockstoreRecoveryMode::PointInTime)] + #[test_case( + "skip_any_corrupted_record", + BlockstoreRecoveryMode::SkipAnyCorruptedRecord + )] + fn verify_args_struct_by_command_run_with_wal_recovery_mode_valid( + arg_value: &str, + expected_mode: BlockstoreRecoveryMode, + ) { + let default_run_args = crate::commands::run::args::RunArgs::default(); + let expected_args = RunArgs { + blockstore_options: BlockstoreOptions { + recovery_mode: Some(expected_mode), + ..default_run_args.blockstore_options.clone() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec!["--wal-recovery-mode", arg_value], + expected_args, + ); + } + + #[test] + fn verify_args_struct_by_command_run_with_wal_recovery_mode_invalid() { + let default_run_args = crate::commands::run::args::RunArgs::default(); + verify_args_struct_by_command_run_is_error_with_identity_setup( + default_run_args, + vec!["--wal-recovery-mode", "invalid"], + ); + } + + #[test_case("none", BlockstoreCompressionType::None)] + #[test_case("snappy", BlockstoreCompressionType::Snappy)] + #[test_case("lz4", BlockstoreCompressionType::Lz4)] + #[test_case("zlib", BlockstoreCompressionType::Zlib)] + fn verify_args_struct_by_command_run_with_rocksdb_ledger_compression( + arg_value: &str, + expected_compression: BlockstoreCompressionType, + ) { + let default_run_args = crate::commands::run::args::RunArgs::default(); + let expected_args = RunArgs { + blockstore_options: BlockstoreOptions { + column_options: LedgerColumnOptions { + compression_type: expected_compression, + ..default_run_args.blockstore_options.column_options.clone() + }, + ..default_run_args.blockstore_options.clone() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec!["--rocksdb-ledger-compression", arg_value], + expected_args, + ); + } + + #[test] + fn verify_args_struct_by_command_run_with_rocksdb_ledger_compression_invalid() { + let default_run_args = crate::commands::run::args::RunArgs::default(); + verify_args_struct_by_command_run_is_error_with_identity_setup( + default_run_args, + vec!["--rocksdb-ledger-compression", "invalid"], + ); + } + + #[test] + fn verify_args_struct_by_command_run_with_rocksdb_perf_sample_interval() { + let default_run_args = crate::commands::run::args::RunArgs::default(); + let expected_args = RunArgs { + blockstore_options: BlockstoreOptions { + column_options: LedgerColumnOptions { + rocks_perf_sample_interval: 100, + ..default_run_args.blockstore_options.column_options.clone() + }, + ..default_run_args.blockstore_options.clone() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec!["--rocksdb-perf-sample-interval", "100"], + expected_args, + ); + } + + #[test] + fn verify_args_struct_by_command_run_with_rocksdb_compaction_threads() { + // long arg + { + let default_run_args = crate::commands::run::args::RunArgs::default(); + let expected_args = RunArgs { + blockstore_options: BlockstoreOptions { + num_rocksdb_compaction_threads: NonZeroUsize::new(1).unwrap(), + ..default_run_args.blockstore_options.clone() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec!["--rocksdb-compaction-threads", "1"], + expected_args, + ); + } + } + + #[test] + fn verify_args_struct_by_command_run_with_rocksdb_flush_threads() { + // long arg + { + let default_run_args = crate::commands::run::args::RunArgs::default(); + let expected_args = RunArgs { + blockstore_options: BlockstoreOptions { + num_rocksdb_flush_threads: NonZeroUsize::new(1).unwrap(), + ..default_run_args.blockstore_options.clone() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec!["--rocksdb-flush-threads", "1"], + expected_args, + ); + } + } +} diff --git a/validator/src/commands/run/execute.rs b/validator/src/commands/run/execute.rs index 5a73d6d50a285f..07c69234a021fd 100644 --- a/validator/src/commands/run/execute.rs +++ b/validator/src/commands/run/execute.rs @@ -45,10 +45,6 @@ use { solana_keypair::Keypair, solana_ledger::{ blockstore_cleanup_service::{DEFAULT_MAX_LEDGER_SHREDS, DEFAULT_MIN_MAX_LEDGER_SHREDS}, - blockstore_options::{ - AccessType, BlockstoreCompressionType, BlockstoreOptions, BlockstoreRecoveryMode, - LedgerColumnOptions, - }, use_snapshot_archives_at_startup::{self, UseSnapshotArchivesAtStartup}, }, solana_logger::redirect_stderr_to_file, @@ -111,8 +107,6 @@ pub fn execute( rayon_global_threads, replay_forks_threads, replay_transactions_threads, - rocksdb_compaction_threads, - rocksdb_flush_threads, tpu_transaction_forward_receive_threads, tpu_transaction_receive_threads, tpu_vote_transaction_receive_threads, @@ -183,10 +177,6 @@ pub fn execute( ) })?; - let recovery_mode = matches - .value_of("wal_recovery_mode") - .map(BlockstoreRecoveryMode::from); - let max_ledger_shreds = if matches.is_present("limit_ledger_size") { let limit_ledger_size = match matches.value_of("limit_ledger_size") { Some(_) => value_t_or_exit!(matches, "limit_ledger_size", u64), @@ -203,36 +193,6 @@ pub fn execute( None }; - let column_options = LedgerColumnOptions { - compression_type: match matches.value_of("rocksdb_ledger_compression") { - None => BlockstoreCompressionType::default(), - Some(ledger_compression_string) => match ledger_compression_string { - "none" => BlockstoreCompressionType::None, - "snappy" => BlockstoreCompressionType::Snappy, - "lz4" => BlockstoreCompressionType::Lz4, - "zlib" => BlockstoreCompressionType::Zlib, - _ => panic!("Unsupported ledger_compression: {ledger_compression_string}"), - }, - }, - rocks_perf_sample_interval: value_t_or_exit!( - matches, - "rocksdb_perf_sample_interval", - usize - ), - }; - - let blockstore_options = BlockstoreOptions { - recovery_mode, - column_options, - // The validator needs to open many files, check that the process has - // permission to do so in order to fail quickly and give a direct error - enforce_ulimit_nofile: true, - // The validator needs primary (read/write) - access_type: AccessType::Primary, - num_rocksdb_compaction_threads: rocksdb_compaction_threads, - num_rocksdb_flush_threads: rocksdb_flush_threads, - }; - let accounts_hash_cache_path = matches .value_of("accounts_hash_cache_path") .map(Into::into) @@ -651,7 +611,7 @@ pub fn execute( repair_whitelist, gossip_validators, max_ledger_shreds, - blockstore_options, + blockstore_options: run_args.blockstore_options, run_verification: !matches.is_present("skip_startup_ledger_verification"), debug_keys, contact_debug_interval, From 70d2a9e7c8a2244d4602557c2296ac864768601d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 22 Jul 2025 19:46:58 +0800 Subject: [PATCH 27/68] build(deps): bump serde_json from 1.0.140 to 1.0.141 (#7076) * build(deps): bump serde_json from 1.0.140 to 1.0.141 Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.140 to 1.0.141. - [Release notes](https://github.com/serde-rs/json/releases) - [Commits](https://github.com/serde-rs/json/compare/v1.0.140...v1.0.141) --- updated-dependencies: - dependency-name: serde_json dependency-version: 1.0.141 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- svm/examples/Cargo.lock | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0b21f4b8d2eac5..c9db0fbf0d2527 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6329,9 +6329,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.140" +version = "1.0.141" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" +checksum = "30b9eff21ebe718216c6ec64e1d9ac57087aad11efc64e32002bce4a0d4c03d3" dependencies = [ "itoa", "memchr", diff --git a/Cargo.toml b/Cargo.toml index 7ec6f54b22c72b..5b4b680086141f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -359,7 +359,7 @@ serde = "1.0.219" # must match the serde_derive version, see https://github.com/ serde-big-array = "0.5.1" serde_bytes = "0.11.17" serde_derive = "1.0.219" # must match the serde version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 -serde_json = "1.0.140" +serde_json = "1.0.141" serde_with = { version = "3.14.0", default-features = false } serde_yaml = "0.9.34" serial_test = "2.0.0" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 7f0c79074b31bf..929fc22877c2b9 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -5118,9 +5118,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.140" +version = "1.0.141" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" +checksum = "30b9eff21ebe718216c6ec64e1d9ac57087aad11efc64e32002bce4a0d4c03d3" dependencies = [ "itoa", "memchr", diff --git a/svm/examples/Cargo.lock b/svm/examples/Cargo.lock index 953b4206315c26..5ba8ccb8e56c65 100644 --- a/svm/examples/Cargo.lock +++ b/svm/examples/Cargo.lock @@ -4952,9 +4952,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.140" +version = "1.0.141" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" +checksum = "30b9eff21ebe718216c6ec64e1d9ac57087aad11efc64e32002bce4a0d4c03d3" dependencies = [ "itoa", "memchr", From 2a5ecfb9ad76a9a83572fe73d118d93ad9d70066 Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Tue, 22 Jul 2025 21:12:51 +0900 Subject: [PATCH 28/68] Finally introduce sane unified scheduler shutdown (#5866) * Finally introduce sane unified scheduler shutdown * Avoid lock contentions on poh_recorder * Remove confusing redundant comment * Provide explicit msg to unreachable!()s * Minor edits * Improve ci stability with faster joining --- core/src/banking_stage/decision_maker.rs | 28 +++- core/src/banking_stage/unified_scheduler.rs | 4 +- runtime/src/bank_forks.rs | 12 ++ runtime/src/installed_scheduler_pool.rs | 2 + unified-scheduler-pool/src/lib.rs | 165 ++++++++++++++++---- 5 files changed, 177 insertions(+), 34 deletions(-) diff --git a/core/src/banking_stage/decision_maker.rs b/core/src/banking_stage/decision_maker.rs index 19e0a674a848cf..aa973a7a231c94 100644 --- a/core/src/banking_stage/decision_maker.rs +++ b/core/src/banking_stage/decision_maker.rs @@ -7,7 +7,7 @@ use { solana_pubkey::Pubkey, solana_unified_scheduler_pool::{BankingStageMonitor, BankingStageStatus}, std::{ - sync::{Arc, RwLock}, + sync::{atomic::{AtomicBool, Ordering::Relaxed}, Arc, RwLock}, time::{Duration, Instant}, }, }; @@ -136,10 +136,30 @@ impl DecisionMaker { } } -impl BankingStageMonitor for DecisionMaker { +#[derive(Debug)] +pub(crate) struct DecisionMakerWrapper { + is_exited: Arc, + decision_maker: DecisionMaker, +} + +impl DecisionMakerWrapper { + pub(crate) fn new(decision_maker: DecisionMaker) -> Self { + // Clone-off before hand to avoid lock contentions. + let is_exited = decision_maker.poh_recorder.read().unwrap().is_exited.clone(); + + Self { + is_exited, + decision_maker, + } + } +} + +impl BankingStageMonitor for DecisionMakerWrapper { fn status(&mut self) -> BankingStageStatus { - if matches!( - self.make_consume_or_forward_decision(), + if self.is_exited.load(Relaxed) { + BankingStageStatus::Exited + } else if matches!( + self.decision_maker.make_consume_or_forward_decision(), BufferedPacketsDecision::Forward, ) { BankingStageStatus::Inactive diff --git a/core/src/banking_stage/unified_scheduler.rs b/core/src/banking_stage/unified_scheduler.rs index 602bc0c061096e..283af6117b6c83 100644 --- a/core/src/banking_stage/unified_scheduler.rs +++ b/core/src/banking_stage/unified_scheduler.rs @@ -30,7 +30,7 @@ use qualifier_attr::qualifiers; use { super::{ - decision_maker::{BufferedPacketsDecision, DecisionMaker}, + decision_maker::{BufferedPacketsDecision, DecisionMaker, DecisionMakerWrapper}, packet_deserializer::PacketDeserializer, LikeClusterInfo, }, @@ -56,7 +56,7 @@ pub(crate) fn ensure_banking_stage_setup( let mut root_bank_cache = RootBankCache::new(bank_forks.clone()); let unified_receiver = channels.unified_receiver().clone(); let mut decision_maker = DecisionMaker::new(cluster_info.id(), poh_recorder.clone()); - let banking_stage_monitor = Box::new(decision_maker.clone()); + let banking_stage_monitor = Box::new(DecisionMakerWrapper::new(decision_maker.clone())); let banking_packet_handler = Box::new( move |helper: &BankingStageHelper, batches: BankingPacketBatch| { diff --git a/runtime/src/bank_forks.rs b/runtime/src/bank_forks.rs index 413ef2472214bc..d0397b03a36351 100644 --- a/runtime/src/bank_forks.rs +++ b/runtime/src/bank_forks.rs @@ -649,6 +649,18 @@ impl ForkGraph for BankForks { } } +impl Drop for BankForks { + fn drop(&mut self) { + info!("BankForks::drop(): started..."); + self.banks.clear(); + + if let Some(scheduler_pool) = self.scheduler_pool.take() { + scheduler_pool.uninstalled_from_bank_forks(); + } + info!("BankForks::drop(): ...finished"); + } +} + #[cfg(test)] mod tests { use { diff --git a/runtime/src/installed_scheduler_pool.rs b/runtime/src/installed_scheduler_pool.rs index 3726f1ac500981..9977a6baed2fc1 100644 --- a/runtime/src/installed_scheduler_pool.rs +++ b/runtime/src/installed_scheduler_pool.rs @@ -67,6 +67,8 @@ pub trait InstalledSchedulerPool: Send + Sync + Debug { /// timing of scheduler returning to reduce latency of the normal block-verification code-path, /// relying on eventual stale listener clean-up by `solScCleaner`. fn register_timeout_listener(&self, timeout_listener: TimeoutListener); + + fn uninstalled_from_bank_forks(self: Arc); } #[derive(Debug)] diff --git a/unified-scheduler-pool/src/lib.rs b/unified-scheduler-pool/src/lib.rs index eae37119739c00..3e2f1bad77e938 100644 --- a/unified-scheduler-pool/src/lib.rs +++ b/unified-scheduler-pool/src/lib.rs @@ -17,7 +17,9 @@ use qualifier_attr::qualifiers; use { agave_banking_stage_ingress_types::{BankingPacketBatch, BankingPacketReceiver}, assert_matches::assert_matches, - crossbeam_channel::{self, never, select_biased, Receiver, RecvError, SendError, Sender}, + crossbeam_channel::{ + self, never, select_biased, Receiver, RecvError, RecvTimeoutError, SendError, Sender, + }, dashmap::DashMap, derive_where::derive_where, dyn_clone::{clone_trait_object, DynClone}, @@ -130,6 +132,8 @@ pub struct SchedulerPool, TH: TaskHandler> { weak_self: Weak, next_scheduler_id: AtomicSchedulerId, max_usage_queue_count: usize, + scheduler_pool_sender: Sender>, + cleaner_thread: JoinHandle<()>, _phantom: PhantomData, } @@ -231,6 +235,20 @@ impl HandlerContext { fn banking_stage_helper(&self) -> &BankingStageHelper { self.banking_stage_helper.as_ref().unwrap() } + + fn clone_for_scheduler_thread(&self) -> Self { + let mut context = self.clone(); + if self.banking_stage_helper.is_some() { + context.disable_banking_packet_handler(); + } + context + } + + fn disable_banking_packet_handler(&mut self) { + self.banking_packet_receiver = never(); + self.banking_packet_handler = + Box::new(|_, _| unreachable!("paired with never() receiver, this cannot be called")); + } } #[derive(Debug, Clone)] @@ -433,32 +451,22 @@ where max_usage_queue_count: usize, timeout_duration: Duration, ) -> Arc { - let scheduler_pool = Arc::new_cyclic(|weak_self| Self { - scheduler_inners: Mutex::default(), - block_production_scheduler_inner: Mutex::default(), - trashed_scheduler_inners: Mutex::default(), - timeout_listeners: Mutex::default(), - common_handler_context: CommonHandlerContext { - log_messages_bytes_limit, - transaction_status_sender, - replay_vote_sender, - prioritization_fee_cache, - }, - block_verification_handler_count, - banking_stage_handler_context: Mutex::default(), - weak_self: weak_self.clone(), - next_scheduler_id: AtomicSchedulerId::default(), - max_usage_queue_count, - _phantom: PhantomData, - }); + let (scheduler_pool_sender, scheduler_pool_receiver) = crossbeam_channel::bounded(1); - let cleaner_main_loop = { - let weak_scheduler_pool = Arc::downgrade(&scheduler_pool); + let mut exiting = false; + let cleaner_main_loop = move || { + info!("cleaner_main_loop: started..."); - move || loop { - sleep(pool_cleaner_interval); + let weak_scheduler_pool: Weak = scheduler_pool_receiver.recv().unwrap(); + loop { + match scheduler_pool_receiver.recv_timeout(pool_cleaner_interval) { + Ok(_) => unreachable!(), + Err(RecvTimeoutError::Disconnected | RecvTimeoutError::Timeout) => (), + } let Some(scheduler_pool) = weak_scheduler_pool.upgrade() else { + // this is the only safe termination point of cleaner_main_loop while all other + // `break`s being due to poisoned locks. break; }; @@ -490,6 +498,10 @@ where }; let banking_stage_status = scheduler_pool.banking_stage_status(); + if !exiting && matches!(banking_stage_status, Some(BankingStageStatus::Exited)) { + exiting = true; + scheduler_pool.unregister_banking_stage(); + } if matches!(banking_stage_status, Some(BankingStageStatus::Inactive)) { let Ok(mut inner) = scheduler_pool.block_production_scheduler_inner.lock() @@ -595,14 +607,39 @@ where triggered_timeout_listener_count, )); } + info!("cleaner_main_loop: ...finished"); }; - // No need to join; the spawned main loop will gracefully exit. - thread::Builder::new() + let cleaner_thread = thread::Builder::new() .name("solScCleaner".to_owned()) .spawn_tracked(cleaner_main_loop) .unwrap(); + let scheduler_pool = Arc::new_cyclic(|weak_self| Self { + scheduler_inners: Mutex::default(), + block_production_scheduler_inner: Mutex::default(), + trashed_scheduler_inners: Mutex::default(), + timeout_listeners: Mutex::default(), + common_handler_context: CommonHandlerContext { + log_messages_bytes_limit, + transaction_status_sender, + replay_vote_sender, + prioritization_fee_cache, + }, + block_verification_handler_count, + banking_stage_handler_context: Mutex::default(), + weak_self: weak_self.clone(), + next_scheduler_id: AtomicSchedulerId::default(), + max_usage_queue_count, + scheduler_pool_sender: scheduler_pool_sender.clone(), + cleaner_thread, + _phantom: PhantomData, + }); + + scheduler_pool_sender + .send(Arc::downgrade(&scheduler_pool)) + .unwrap(); + scheduler_pool } @@ -751,6 +788,19 @@ where ); } + fn unregister_banking_stage(&self) { + let handler_context = &mut self.banking_stage_handler_context.lock().unwrap(); + let handler_context = handler_context.as_mut().unwrap(); + // Replace with dummy ones to unblock validator shutdown. + // Note that replacing banking_stage_handler_context with None altogether will create a + // very short window of race condition due to untimely spawning of block production + // scheduler. + handler_context.banking_packet_receiver = never(); + handler_context.banking_packet_handler = + Box::new(|_, _| unreachable!("paired with never() receiver, this cannot be called")); + handler_context.banking_stage_monitor = Box::new(ExitedBankingMonitor); + } + fn banking_stage_status(&self) -> Option { self.banking_stage_handler_context .lock() @@ -782,7 +832,9 @@ where self.block_verification_handler_count, // Return various type-specific no-op values. never(), - Box::new(|_, _| {}), + Box::new(|_, _| { + unreachable!("paired with never() receiver, this cannot be called") + }), None, None, ) @@ -884,6 +936,48 @@ where .unwrap() .push((timeout_listener, Instant::now())); } + + fn uninstalled_from_bank_forks(self: Arc) { + info!("SchedulerPool::uninstalled_from_bank_forks(): started..."); + + // Forcibly return back all taken schedulers back to this scheduler pool. + for (listener, _registered_at) in mem::take(&mut *self.timeout_listeners.lock().unwrap()) { + listener.trigger(self.clone()); + } + + // Then, drop all schedulers in the pool. + mem::take(&mut *self.scheduler_inners.lock().unwrap()); + mem::take(&mut *self.block_production_scheduler_inner.lock().unwrap()); + mem::take(&mut *self.trashed_scheduler_inners.lock().unwrap()); + + // At this point, all circular references of this pool has been cut. And there should be + // only 1 strong rerefence unless the cleaner thread is active right now. + + // So, wait a bit to unwrap the pool out of the sinful Arc finally here. Note that we can't resort to the + // Drop impl, because of the need to take the ownership of the join handle of the cleaner + // thread... + let mut this = self; + let mut this: Self = loop { + match Arc::try_unwrap(this) { + Ok(pool) => { + break pool; + } + Err(that) => { + // It seems solScCleaner is active... retry later + this = that; + sleep(Duration::from_millis(100)); + // Yes, indefinite loop, but the situation isn't so different from the + // following join(), which indefinitely waits as well. + continue; + } + } + }; + // Accelerate cleaner thread joining by disconnection + this.scheduler_pool_sender = crossbeam_channel::bounded(1).0; + this.cleaner_thread.join().unwrap(); + + info!("SchedulerPool::uninstalled_from_bank_forks(): ...finished"); + } } pub trait TaskHandler: Send + Sync + Debug + Sized + 'static { @@ -1828,7 +1922,7 @@ impl, TH: TaskHandler> ThreadManager { // 5. the handler thread reply back to the scheduler thread as an executed task. // 6. the scheduler thread post-processes the executed task. let scheduler_main_loop = { - let handler_context = handler_context.clone(); + let handler_context = handler_context.clone_for_scheduler_thread(); let session_result_sender = self.session_result_sender.clone(); // Taking new_task_receiver here is important to ensure there's a single receiver. In // this way, the replay stage will get .send() failures reliably, after this scheduler @@ -2167,7 +2261,12 @@ impl, TH: TaskHandler> ThreadManager { let Ok(banking_packet) = banking_packet else { info!("disconnected banking_packet_receiver"); - break; + // Don't break here; handler threads are expected to outlive its + // associated scheduler thread always. So, disable banking packet + // handler then continue to be cleaned up properly later, much like + // block verification handler thread. + handler_context.disable_banking_packet_handler(); + continue; }; banking_packet_handler(banking_stage_helper, banking_packet); continue; @@ -2461,12 +2560,22 @@ impl SpawnableScheduler for PooledScheduler { pub enum BankingStageStatus { Active, Inactive, + Exited, } pub trait BankingStageMonitor: Send + Debug { fn status(&mut self) -> BankingStageStatus; } +#[derive(Debug)] +struct ExitedBankingMonitor; + +impl BankingStageMonitor for ExitedBankingMonitor { + fn status(&mut self) -> BankingStageStatus { + BankingStageStatus::Exited + } +} + impl InstalledScheduler for PooledScheduler { fn id(&self) -> SchedulerId { self.inner.id() From d856fc5006eca4b052c0f28922dc06861fc59859 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 22 Jul 2025 20:22:53 +0800 Subject: [PATCH 29/68] build(deps): bump bytemuck_derive from 1.9.3 to 1.10.0 (#7075) * build(deps): bump bytemuck_derive from 1.9.3 to 1.10.0 Bumps [bytemuck_derive](https://github.com/Lokathor/bytemuck) from 1.9.3 to 1.10.0. - [Changelog](https://github.com/Lokathor/bytemuck/blob/main/changelog.md) - [Commits](https://github.com/Lokathor/bytemuck/commits/bytemuck_derive-v1.10.0) --- updated-dependencies: - dependency-name: bytemuck_derive dependency-version: 1.10.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- svm/examples/Cargo.lock | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c9db0fbf0d2527..3f1af9e474667b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1495,9 +1495,9 @@ dependencies = [ [[package]] name = "bytemuck_derive" -version = "1.9.3" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ecc273b49b3205b83d648f0690daa588925572cc5063745bfe547fe7ec8e1a1" +checksum = "441473f2b4b0459a68628c744bc61d23e730fb00128b841d30fa4bb3972257e4" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index 5b4b680086141f..bd90400f393ccf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -220,7 +220,7 @@ bs58 = { version = "0.5.1", default-features = false } bv = "0.11.1" byte-unit = "4.0.19" bytemuck = "1.23.1" -bytemuck_derive = "1.9.3" +bytemuck_derive = "1.10.0" bytes = "1.10" bzip2 = "0.4.4" caps = "0.5.5" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 929fc22877c2b9..79af5f89c46979 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -993,9 +993,9 @@ dependencies = [ [[package]] name = "bytemuck_derive" -version = "1.9.3" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ecc273b49b3205b83d648f0690daa588925572cc5063745bfe547fe7ec8e1a1" +checksum = "441473f2b4b0459a68628c744bc61d23e730fb00128b841d30fa4bb3972257e4" dependencies = [ "proc-macro2", "quote", diff --git a/svm/examples/Cargo.lock b/svm/examples/Cargo.lock index 5ba8ccb8e56c65..0f6668d05f920e 100644 --- a/svm/examples/Cargo.lock +++ b/svm/examples/Cargo.lock @@ -898,9 +898,9 @@ dependencies = [ [[package]] name = "bytemuck_derive" -version = "1.9.3" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ecc273b49b3205b83d648f0690daa588925572cc5063745bfe547fe7ec8e1a1" +checksum = "441473f2b4b0459a68628c744bc61d23e730fb00128b841d30fa4bb3972257e4" dependencies = [ "proc-macro2", "quote", From a1da71eca01d93c5d110bd2729946161dc92707a Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Tue, 22 Jul 2025 09:10:13 -0500 Subject: [PATCH 30/68] update_index inline for snapshot minimization (#7067) --- accounts-db/src/accounts_db.rs | 20 ++++++++++++++------ accounts-db/src/ancient_append_vecs.rs | 9 ++++++--- runtime/src/snapshot_minimizer.rs | 8 ++++++-- 3 files changed, 26 insertions(+), 11 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 9cdbb40464e738..07dde4371a31b6 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -3701,8 +3701,11 @@ impl AccountsDb { // mutating rooted slots; There should be no writers to them. let accounts = [(slot, &shrink_collect.alive_accounts.alive_accounts()[..])]; let storable_accounts = StorableAccountsBySlot::new(slot, &accounts, self); - stats_sub.store_accounts_timing = - self.store_accounts_frozen(storable_accounts, shrink_in_progress.new_storage()); + stats_sub.store_accounts_timing = self.store_accounts_frozen( + storable_accounts, + shrink_in_progress.new_storage(), + UpdateIndexThreadSelection::PoolWithThreshold, + ); rewrite_elapsed.stop(); stats_sub.rewrite_elapsed_us = Saturating(rewrite_elapsed.as_us()); @@ -5766,8 +5769,12 @@ impl AccountsDb { flush_stats.num_bytes_flushed.0, "flush_slot_cache", ); - let (store_accounts_timing_inner, store_accounts_total_inner_us) = - measure_us!(self.store_accounts_frozen((slot, &accounts[..]), &flushed_store,)); + let (store_accounts_timing_inner, store_accounts_total_inner_us) = measure_us!(self + .store_accounts_frozen( + (slot, &accounts[..]), + &flushed_store, + UpdateIndexThreadSelection::PoolWithThreshold, + )); flush_stats.store_accounts_timing = store_accounts_timing_inner; flush_stats.store_accounts_total_us = Saturating(store_accounts_total_inner_us); @@ -7526,6 +7533,7 @@ impl AccountsDb { &self, accounts: impl StorableAccounts<'a>, storage: &Arc, + update_index_thread_selection: UpdateIndexThreadSelection, ) -> StoreAccountsTiming { let slot = accounts.target_slot(); let mut store_accounts_time = Measure::start("store_accounts"); @@ -7562,7 +7570,7 @@ impl AccountsDb { infos, &accounts, UpsertReclaim::IgnoreReclaims, - UpdateIndexThreadSelection::PoolWithThreshold, + update_index_thread_selection, &self.thread_pool_clean, ); @@ -8429,7 +8437,7 @@ impl CalcAccountsHashKind { } } -pub(crate) enum UpdateIndexThreadSelection { +pub enum UpdateIndexThreadSelection { /// Use current thread only Inline, /// Use a thread-pool if the number of updates exceeds a threshold diff --git a/accounts-db/src/ancient_append_vecs.rs b/accounts-db/src/ancient_append_vecs.rs index 36dfde9fc661c3..4e77f7b5a1aa62 100644 --- a/accounts-db/src/ancient_append_vecs.rs +++ b/accounts-db/src/ancient_append_vecs.rs @@ -11,6 +11,7 @@ use { stats::{ShrinkAncientStats, ShrinkStatsSub}, AccountFromStorage, AccountStorageEntry, AccountsDb, AliveAccounts, GetUniqueAccountsResult, ShrinkCollect, ShrinkCollectAliveSeparatedByRefs, + UpdateIndexThreadSelection, }, active_stats::ActiveStatItem, storable_accounts::{StorableAccounts, StorableAccountsBySlot}, @@ -544,9 +545,11 @@ impl AccountsDb { let target_slot = accounts_to_write.target_slot(); let (shrink_in_progress, create_and_insert_store_elapsed_us) = measure_us!(self.get_store_for_shrink(target_slot, bytes)); - let (store_accounts_timing, rewrite_elapsed_us) = measure_us!( - self.store_accounts_frozen(accounts_to_write, shrink_in_progress.new_storage(),) - ); + let (store_accounts_timing, rewrite_elapsed_us) = measure_us!(self.store_accounts_frozen( + accounts_to_write, + shrink_in_progress.new_storage(), + UpdateIndexThreadSelection::PoolWithThreshold + )); write_ancient_accounts.metrics.accumulate(&ShrinkStatsSub { store_accounts_timing, diff --git a/runtime/src/snapshot_minimizer.rs b/runtime/src/snapshot_minimizer.rs index ae06393ce6eb50..2ec9704d94a2d1 100644 --- a/runtime/src/snapshot_minimizer.rs +++ b/runtime/src/snapshot_minimizer.rs @@ -14,6 +14,7 @@ use { solana_accounts_db::{ accounts_db::{ stats::PurgeStats, AccountStorageEntry, AccountsDb, GetUniqueAccountsResult, + UpdateIndexThreadSelection, }, storable_accounts::StorableAccountsBySlot, }, @@ -327,8 +328,11 @@ impl<'a> SnapshotMinimizer<'a> { let storable_accounts = StorableAccountsBySlot::new(slot, &accounts, self.accounts_db()); - self.accounts_db() - .store_accounts_frozen(storable_accounts, new_storage); + self.accounts_db().store_accounts_frozen( + storable_accounts, + new_storage, + UpdateIndexThreadSelection::Inline, + ); new_storage.flush().unwrap(); } From 231fbc47f8916fb075fb1b79fbda50d7ddd11c9b Mon Sep 17 00:00:00 2001 From: Tao Zhu <82401714+tao-stones@users.noreply.github.com> Date: Tue, 22 Jul 2025 09:55:52 -0500 Subject: [PATCH 31/68] Reports total transaction fee collected in frozen bank to cost_tracker (#7046) * reports total transaction fee collected in frozen bank to cost_tracker * - report both total transaction fee and priority fee to metrics; - bank provides read-only access to collector_fee_details * - clone CollectorFeeDetails instead of exposing read lock; --- core/src/cost_update_service.rs | 14 +++++++++++++- cost-model/src/cost_tracker.rs | 24 ++++++++++++++++-------- runtime/src/bank.rs | 13 +++++++++++-- runtime/src/bank/fee_distribution.rs | 2 +- 4 files changed, 41 insertions(+), 12 deletions(-) diff --git a/core/src/cost_update_service.rs b/core/src/cost_update_service.rs index de4d1ad1b30eaa..5662a2c3673a99 100644 --- a/core/src/cost_update_service.rs +++ b/core/src/cost_update_service.rs @@ -51,6 +51,13 @@ impl CostUpdateService { bank, is_leader_block, } => { + let (total_transaction_fee, total_priority_fee) = { + let collector_fee_details = bank.get_collector_fee_details(); + ( + collector_fee_details.total_transaction_fee(), + collector_fee_details.total_priority_fee(), + ) + }; for loop_count in 1..=MAX_LOOP_COUNT { { // Release the lock so that the thread that will @@ -67,7 +74,12 @@ impl CostUpdateService { "inflight transaction count is {in_flight_transaction_count} \ for slot {slot} after {loop_count} iteration(s)" ); - cost_tracker.report_stats(slot, is_leader_block); + cost_tracker.report_stats( + slot, + is_leader_block, + total_transaction_fee, + total_priority_fee, + ); break; } } diff --git a/cost-model/src/cost_tracker.rs b/cost-model/src/cost_tracker.rs index 210f30738dbc4b..c26062754e5c32 100644 --- a/cost-model/src/cost_tracker.rs +++ b/cost-model/src/cost_tracker.rs @@ -216,7 +216,13 @@ impl CostTracker { self.transaction_count.0 } - pub fn report_stats(&self, bank_slot: solana_clock::Slot, is_leader: bool) { + pub fn report_stats( + &self, + bank_slot: solana_clock::Slot, + is_leader: bool, + total_transaction_fee: u64, + total_priority_fee: u64, + ) { // skip reporting if block is empty if self.transaction_count.0 == 0 { return; @@ -227,13 +233,13 @@ impl CostTracker { datapoint_info!( "cost_tracker_stats", "is_leader" => is_leader.to_string(), - ("bank_slot", bank_slot as i64, i64), - ("block_cost", self.block_cost as i64, i64), - ("vote_cost", self.vote_cost as i64, i64), - ("transaction_count", self.transaction_count.0 as i64, i64), - ("number_of_accounts", self.number_of_accounts() as i64, i64), + ("bank_slot", bank_slot, i64), + ("block_cost", self.block_cost, i64), + ("vote_cost", self.vote_cost, i64), + ("transaction_count", self.transaction_count.0, i64), + ("number_of_accounts", self.number_of_accounts(), i64), ("costliest_account", costliest_account.to_string(), String), - ("costliest_account_cost", costliest_account_cost as i64, i64), + ("costliest_account_cost", costliest_account_cost, i64), ( "allocated_accounts_data_size", self.allocated_accounts_data_size.0, @@ -263,7 +269,9 @@ impl CostTracker { "secp256r1_instruction_signature_count", self.secp256r1_instruction_signature_count.0, i64 - ) + ), + ("total_transaction_fee", total_transaction_fee, i64), + ("total_priority_fee", total_priority_fee, i64), ); } diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 2ab65f181d4596..1abbd1dfd5ed9b 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -247,7 +247,7 @@ impl AddAssign for SquashTiming { } } -#[derive(Debug, Default, PartialEq)] +#[derive(Clone, Debug, Default, PartialEq)] pub struct CollectorFeeDetails { transaction_fee: u64, priority_fee: u64, @@ -263,9 +263,13 @@ impl CollectorFeeDetails { .saturating_add(fee_details.prioritization_fee()); } - pub(crate) fn total(&self) -> u64 { + pub fn total_transaction_fee(&self) -> u64 { self.transaction_fee.saturating_add(self.priority_fee) } + + pub fn total_priority_fee(&self) -> u64 { + self.priority_fee + } } impl From for CollectorFeeDetails { @@ -5686,6 +5690,11 @@ impl Bank { pub fn set_accounts_lt_hash_for_snapshot_minimizer(&self, accounts_lt_hash: AccountsLtHash) { *self.accounts_lt_hash.lock().unwrap() = accounts_lt_hash; } + + /// Return total transaction fee collected + pub fn get_collector_fee_details(&self) -> CollectorFeeDetails { + self.collector_fee_details.read().unwrap().clone() + } } impl InvokeContextCallback for Bank { diff --git a/runtime/src/bank/fee_distribution.rs b/runtime/src/bank/fee_distribution.rs index 39274665be0816..37b4d2d0535e0d 100644 --- a/runtime/src/bank/fee_distribution.rs +++ b/runtime/src/bank/fee_distribution.rs @@ -49,7 +49,7 @@ impl Bank { // form of transaction fees as well. pub(super) fn distribute_transaction_fee_details(&self) { let fee_details = self.collector_fee_details.read().unwrap(); - if fee_details.total() == 0 { + if fee_details.total_transaction_fee() == 0 { // nothing to distribute, exit early return; } From a25ac4a282aba38b7a2be1fb407a373b45586c03 Mon Sep 17 00:00:00 2001 From: crStiv Date: Tue, 22 Jul 2025 18:38:46 +0300 Subject: [PATCH 32/68] Fixes set_geyser_plugin_notifer() typo (#7063) --- accounts-db/src/accounts_db/geyser_plugin_utils.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/accounts-db/src/accounts_db/geyser_plugin_utils.rs b/accounts-db/src/accounts_db/geyser_plugin_utils.rs index 0fec62a8a74df4..61f076fb7359bb 100644 --- a/accounts-db/src/accounts_db/geyser_plugin_utils.rs +++ b/accounts-db/src/accounts_db/geyser_plugin_utils.rs @@ -144,7 +144,7 @@ pub mod tests { }; impl AccountsDb { - pub fn set_geyser_plugin_notifer(&mut self, notifier: Option) { + pub fn set_geyser_plugin_notifier(&mut self, notifier: Option) { self.accounts_update_notifier = notifier; } } @@ -218,7 +218,7 @@ pub mod tests { // Do the notification let notifier = GeyserTestPlugin::default(); let notifier = Arc::new(notifier); - accounts.set_geyser_plugin_notifer(Some(notifier.clone())); + accounts.set_geyser_plugin_notifier(Some(notifier.clone())); accounts.notify_account_restore_from_snapshot(); // Ensure key1 was notified twice in different slots @@ -253,7 +253,7 @@ pub mod tests { let notifier = GeyserTestPlugin::default(); let notifier = Arc::new(notifier); - accounts.set_geyser_plugin_notifer(Some(notifier.clone())); + accounts.set_geyser_plugin_notifier(Some(notifier.clone())); // Account with key1 is updated twice in two different slots -- should only get notified twice. // Account with key2 is updated slot0, should get notified once From 1ca4f1a187ca1c742e92abd9193252fd7ea235f6 Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 22 Jul 2025 11:43:03 -0400 Subject: [PATCH 33/68] Removes VerifyAccountsHashAndLamportsConfig and AccountsHashVerificationError (#7069) --- accounts-db/src/accounts_db.rs | 43 ---------------------------------- 1 file changed, 43 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 07dde4371a31b6..1a0431a2cbc10d 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -173,23 +173,6 @@ pub(crate) struct ShrinkCollectAliveSeparatedByRefs<'a> { pub(crate) many_refs_old_alive: AliveAccounts<'a>, } -/// Configuration Parameters for running accounts hash and total lamports verification -#[derive(Debug, Clone)] -pub struct VerifyAccountsHashAndLamportsConfig<'a> { - /// bank ancestors - pub ancestors: &'a Ancestors, - /// epoch_schedule - pub epoch_schedule: &'a EpochSchedule, - /// epoch - pub epoch: Epoch, - /// true to ignore mismatches - pub ignore_mismatch: bool, - /// true to dump debug log if mismatch happens - pub store_detailed_debug_info: bool, - /// true to use dedicated background thread pool for verification - pub use_bg_thread_pool: bool, -} - pub(crate) trait ShrinkCollectRefs<'a>: Sync + Send { fn with_capacity(capacity: usize, slot: Slot) -> Self; fn collect(&mut self, other: Self); @@ -975,13 +958,6 @@ impl ReadableAccount for LoadedAccount<'_> { } } -#[derive(Debug)] -pub enum AccountsHashVerificationError { - MissingAccountsHash, - MismatchedAccountsHash, - MismatchedTotalLamports(u64, u64), -} - #[derive(Default)] struct CleanKeyTimings { collect_delta_keys_us: u64, @@ -8646,25 +8622,6 @@ impl AccountsDb { } } -// These functions/fields are only usable from a dev context (i.e. tests and benches) -#[cfg(feature = "dev-context-only-utils")] -impl<'a> VerifyAccountsHashAndLamportsConfig<'a> { - pub fn new_for_test( - ancestors: &'a Ancestors, - epoch_schedule: &'a EpochSchedule, - epoch: Epoch, - ) -> Self { - Self { - ancestors, - epoch_schedule, - epoch, - ignore_mismatch: false, - store_detailed_debug_info: false, - use_bg_thread_pool: false, - } - } -} - /// A set of utility functions used for testing and benchmarking #[cfg(feature = "dev-context-only-utils")] pub mod test_utils { From 9d26fd6ce488e707175604b64452bf0d9e1a6080 Mon Sep 17 00:00:00 2001 From: Rory Harris Date: Tue, 22 Jul 2025 09:25:17 -0700 Subject: [PATCH 34/68] Switch Test to use AccountsDB Config For Tests (#7068) --- runtime/src/snapshot_bank_utils.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/snapshot_bank_utils.rs b/runtime/src/snapshot_bank_utils.rs index 8234fce41f132e..17690397a3ef97 100644 --- a/runtime/src/snapshot_bank_utils.rs +++ b/runtime/src/snapshot_bank_utils.rs @@ -1907,7 +1907,7 @@ mod tests { let bank_test_config = BankTestConfig { accounts_db_config: AccountsDbConfig { storage_access, - ..AccountsDbConfig::default() + ..ACCOUNTS_DB_CONFIG_FOR_TESTING }, }; From 640dde533ca44b96049f0948f36531dc4103bbeb Mon Sep 17 00:00:00 2001 From: steviez Date: Tue, 22 Jul 2025 12:41:40 -0500 Subject: [PATCH 35/68] Remove duplicate arguments from rpc_bootstrap() (#7073) The snapshot archive paths can be read from the SnapshotConfig contained in the ValidatorConfig that is also passed to the function --- validator/src/bootstrap.rs | 24 ++++++++---------------- validator/src/commands/run/execute.rs | 16 ++++++++++------ 2 files changed, 18 insertions(+), 22 deletions(-) diff --git a/validator/src/bootstrap.rs b/validator/src/bootstrap.rs index 8b14d99c87d4e4..de842ccd961dfb 100644 --- a/validator/src/bootstrap.rs +++ b/validator/src/bootstrap.rs @@ -368,8 +368,6 @@ pub fn attempt_download_genesis_and_snapshot( use_progress_bar: bool, gossip: &mut Option<(Arc, Arc, GossipService)>, rpc_client: &RpcClient, - full_snapshot_archives_dir: &Path, - incremental_snapshot_archives_dir: &Path, maximum_local_snapshot_age: Slot, start_progress: &Arc>, minimal_snapshot_download_speed: f32, @@ -402,8 +400,6 @@ pub fn attempt_download_genesis_and_snapshot( info!("RPC node root slot: {rpc_client_slot}"); download_snapshots( - full_snapshot_archives_dir, - incremental_snapshot_archives_dir, validator_config, bootstrap_config, use_progress_bar, @@ -559,8 +555,6 @@ pub fn rpc_bootstrap( node: &Node, identity_keypair: &Arc, ledger_path: &Path, - full_snapshot_archives_dir: &Path, - incremental_snapshot_archives_dir: &Path, vote_account: &Pubkey, authorized_voter_keypairs: Arc>>>, cluster_entrypoints: &[ContactInfo], @@ -643,8 +637,6 @@ pub fn rpc_bootstrap( use_progress_bar, &mut gossip, &rpc_client, - full_snapshot_archives_dir, - incremental_snapshot_archives_dir, maximum_local_snapshot_age, start_progress, minimal_snapshot_download_speed, @@ -1106,8 +1098,6 @@ fn retain_peer_snapshot_hashes_with_highest_incremental_snapshot_slot( /// Check to see if we can use our local snapshots, otherwise download newer ones. #[allow(clippy::too_many_arguments)] fn download_snapshots( - full_snapshot_archives_dir: &Path, - incremental_snapshot_archives_dir: &Path, validator_config: &ValidatorConfig, bootstrap_config: &RpcBootstrapConfig, use_progress_bar: bool, @@ -1126,6 +1116,10 @@ fn download_snapshots( full: full_snapshot_hash, incr: incremental_snapshot_hash, } = snapshot_hash.unwrap(); + let full_snapshot_archives_dir = &validator_config.snapshot_config.full_snapshot_archives_dir; + let incremental_snapshot_archives_dir = &validator_config + .snapshot_config + .incremental_snapshot_archives_dir; // If the local snapshots are new enough, then use 'em; no need to download new snapshots if should_use_local_snapshot( @@ -1153,8 +1147,6 @@ fn download_snapshots( ); } else { download_snapshot( - full_snapshot_archives_dir, - incremental_snapshot_archives_dir, validator_config, bootstrap_config, use_progress_bar, @@ -1186,8 +1178,6 @@ fn download_snapshots( ); } else { download_snapshot( - full_snapshot_archives_dir, - incremental_snapshot_archives_dir, validator_config, bootstrap_config, use_progress_bar, @@ -1209,8 +1199,6 @@ fn download_snapshots( /// Download a snapshot #[allow(clippy::too_many_arguments)] fn download_snapshot( - full_snapshot_archives_dir: &Path, - incremental_snapshot_archives_dir: &Path, validator_config: &ValidatorConfig, bootstrap_config: &RpcBootstrapConfig, use_progress_bar: bool, @@ -1228,6 +1216,10 @@ fn download_snapshot( let maximum_incremental_snapshot_archives_to_retain = validator_config .snapshot_config .maximum_incremental_snapshot_archives_to_retain; + let full_snapshot_archives_dir = &validator_config.snapshot_config.full_snapshot_archives_dir; + let incremental_snapshot_archives_dir = &validator_config + .snapshot_config + .incremental_snapshot_archives_dir; *start_progress.write().unwrap() = ValidatorStartProgress::DownloadingSnapshot { slot: desired_snapshot_hash.0, diff --git a/validator/src/commands/run/execute.rs b/validator/src/commands/run/execute.rs index 07c69234a021fd..2824441262d8ca 100644 --- a/validator/src/commands/run/execute.rs +++ b/validator/src/commands/run/execute.rs @@ -881,8 +881,8 @@ pub fn execute( full_snapshot_archive_interval, incremental_snapshot_archive_interval, bank_snapshots_dir, - full_snapshot_archives_dir: full_snapshot_archives_dir.clone(), - incremental_snapshot_archives_dir: incremental_snapshot_archives_dir.clone(), + full_snapshot_archives_dir, + incremental_snapshot_archives_dir, archive_format, snapshot_version, maximum_full_snapshot_archives_to_retain, @@ -1164,8 +1164,14 @@ pub fn execute( solana_metrics::set_host_id(identity_keypair.pubkey().to_string()); solana_metrics::set_panic_hook("validator", Some(String::from(solana_version))); solana_entry::entry::init_poh(); - snapshot_utils::remove_tmp_snapshot_archives(&full_snapshot_archives_dir); - snapshot_utils::remove_tmp_snapshot_archives(&incremental_snapshot_archives_dir); + snapshot_utils::remove_tmp_snapshot_archives( + &validator_config.snapshot_config.full_snapshot_archives_dir, + ); + snapshot_utils::remove_tmp_snapshot_archives( + &validator_config + .snapshot_config + .incremental_snapshot_archives_dir, + ); let should_check_duplicate_instance = true; if !cluster_entrypoints.is_empty() { @@ -1173,8 +1179,6 @@ pub fn execute( &node, &identity_keypair, &ledger_path, - &full_snapshot_archives_dir, - &incremental_snapshot_archives_dir, &vote_account, authorized_voter_keypairs.clone(), &cluster_entrypoints, From c57c245c5eeac018afca0d49c49cd9bd74b30bdd Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 22 Jul 2025 14:55:57 -0400 Subject: [PATCH 36/68] Removes accounts-bench (#7084) --- Cargo.lock | 17 ---- Cargo.toml | 1 - accounts-bench/Cargo.toml | 28 ------ accounts-bench/src/main.rs | 158 ------------------------------- ci/bench/part2.sh | 1 - ci/test-bench.sh | 1 - scripts/dcou-tainted-packages.sh | 1 - 7 files changed, 207 deletions(-) delete mode 100644 accounts-bench/Cargo.toml delete mode 100644 accounts-bench/src/main.rs diff --git a/Cargo.lock b/Cargo.lock index 3f1af9e474667b..0183c77d2f81e5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6763,23 +6763,6 @@ dependencies = [ "solana-pubkey", ] -[[package]] -name = "solana-accounts-bench" -version = "3.0.0" -dependencies = [ - "clap 2.33.3", - "log", - "rayon", - "solana-accounts-db", - "solana-clock", - "solana-epoch-schedule", - "solana-logger", - "solana-measure", - "solana-pubkey", - "solana-rent-collector", - "solana-version", -] - [[package]] name = "solana-accounts-cluster-bench" version = "3.0.0" diff --git a/Cargo.toml b/Cargo.toml index bd90400f393ccf..c121495a54cfbd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,7 +2,6 @@ members = [ "account-decoder", "account-decoder-client-types", - "accounts-bench", "accounts-cluster-bench", "accounts-db", "accounts-db/accounts-hash-cache-tool", diff --git a/accounts-bench/Cargo.toml b/accounts-bench/Cargo.toml deleted file mode 100644 index 1cfe16a4052ba5..00000000000000 --- a/accounts-bench/Cargo.toml +++ /dev/null @@ -1,28 +0,0 @@ -[package] -name = "solana-accounts-bench" -publish = false -version = { workspace = true } -authors = { workspace = true } -repository = { workspace = true } -homepage = { workspace = true } -license = { workspace = true } -edition = { workspace = true } - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[features] -dev-context-only-utils = [] - -[dependencies] -clap = { workspace = true } -log = { workspace = true } -rayon = { workspace = true } -solana-accounts-db = { workspace = true, features = ["dev-context-only-utils"] } -solana-clock = { workspace = true } -solana-epoch-schedule = { workspace = true } -solana-logger = { workspace = true } -solana-measure = { workspace = true } -solana-pubkey = { workspace = true } -solana-rent-collector = { workspace = true } -solana-version = { workspace = true } diff --git a/accounts-bench/src/main.rs b/accounts-bench/src/main.rs deleted file mode 100644 index 0b2052065963b6..00000000000000 --- a/accounts-bench/src/main.rs +++ /dev/null @@ -1,158 +0,0 @@ -#![allow(clippy::arithmetic_side_effects)] - -#[macro_use] -extern crate log; -use { - clap::{crate_description, crate_name, value_t, App, Arg}, - rayon::prelude::*, - solana_accounts_db::{ - accounts::Accounts, - accounts_db::{ - test_utils::{create_test_accounts, update_accounts_bench}, - AccountsDb, CalcAccountsHashDataSource, ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS, - }, - ancestors::Ancestors, - }, - solana_epoch_schedule::EpochSchedule, - solana_measure::measure::Measure, - solana_pubkey::Pubkey, - std::{env, fs, path::PathBuf, sync::Arc}, -}; - -fn main() { - solana_logger::setup(); - - let matches = App::new(crate_name!()) - .about(crate_description!()) - .version(solana_version::version!()) - .arg( - Arg::with_name("num_slots") - .long("num_slots") - .takes_value(true) - .value_name("SLOTS") - .help("Number of slots to store to."), - ) - .arg( - Arg::with_name("num_accounts") - .long("num_accounts") - .takes_value(true) - .value_name("NUM_ACCOUNTS") - .help("Total number of accounts"), - ) - .arg( - Arg::with_name("iterations") - .long("iterations") - .takes_value(true) - .value_name("ITERATIONS") - .help("Number of bench iterations"), - ) - .arg( - Arg::with_name("clean") - .long("clean") - .takes_value(false) - .help("Run clean"), - ) - .get_matches(); - - let num_slots = value_t!(matches, "num_slots", usize).unwrap_or(4); - let num_accounts = value_t!(matches, "num_accounts", usize).unwrap_or(10_000); - let iterations = value_t!(matches, "iterations", usize).unwrap_or(20); - let clean = matches.is_present("clean"); - println!("clean: {clean:?}"); - - let path = PathBuf::from(env::var("FARF_DIR").unwrap_or_else(|_| "farf".to_owned())) - .join("accounts-bench"); - println!("cleaning file system: {path:?}"); - if fs::remove_dir_all(path.clone()).is_err() { - println!("Warning: Couldn't remove {path:?}"); - } - let accounts_db = AccountsDb::new_with_config( - vec![path], - Some(ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS), - None, - Arc::default(), - ); - let accounts = Accounts::new(Arc::new(accounts_db)); - println!("Creating {num_accounts} accounts"); - let mut create_time = Measure::start("create accounts"); - let pubkeys: Vec<_> = (0..num_slots) - .into_par_iter() - .map(|slot| { - let mut pubkeys: Vec = vec![]; - create_test_accounts( - &accounts, - &mut pubkeys, - num_accounts / num_slots, - slot as u64, - ); - pubkeys - }) - .collect(); - let pubkeys: Vec<_> = pubkeys.into_iter().flatten().collect(); - create_time.stop(); - println!( - "created {} accounts in {} slots {}", - (num_accounts / num_slots) * num_slots, - num_slots, - create_time - ); - let mut ancestors = Vec::with_capacity(num_slots); - ancestors.push(0); - for i in 1..num_slots { - ancestors.push(i as u64); - accounts.add_root(i as u64); - } - let ancestors = Ancestors::from(ancestors); - let mut elapsed = vec![0; iterations]; - let mut elapsed_store = vec![0; iterations]; - for x in 0..iterations { - if clean { - let mut time = Measure::start("clean"); - accounts.accounts_db.clean_accounts_for_tests(); - time.stop(); - println!("{time}"); - for slot in 0..num_slots { - update_accounts_bench(&accounts, &pubkeys, ((x + 1) * num_slots + slot) as u64); - accounts.add_root((x * num_slots + slot) as u64); - } - } else { - let mut pubkeys: Vec = vec![]; - let mut time = Measure::start("hash"); - let results = accounts - .accounts_db - .update_accounts_hash_for_tests(0, &ancestors, false, false); - time.stop(); - let mut time_store = Measure::start("hash using store"); - let results_store = accounts.accounts_db.update_accounts_hash_with_verify_from( - CalcAccountsHashDataSource::Storages, - false, - solana_clock::Slot::default(), - &ancestors, - None, - &EpochSchedule::default(), - true, - ); - time_store.stop(); - if results != results_store { - error!("results different: \n{:?}\n{:?}", results, results_store); - } - println!( - "hash,{},{},{},{}%", - results.0 .0, - time, - time_store, - (time_store.as_us() as f64 / time.as_us() as f64 * 100.0f64) as u32 - ); - create_test_accounts(&accounts, &mut pubkeys, 1, 0); - elapsed[x] = time.as_us(); - elapsed_store[x] = time_store.as_us(); - } - } - - for x in elapsed { - info!("update_accounts_hash(us),{}", x); - } - for x in elapsed_store { - info!("calculate_accounts_hash_from_storages(us),{}", x); - } -} diff --git a/ci/bench/part2.sh b/ci/bench/part2.sh index cd6dffae551f45..34e617807de40c 100755 --- a/ci/bench/part2.sh +++ b/ci/bench/part2.sh @@ -24,7 +24,6 @@ _ cargo +"$rust_nightly" bench --manifest-path runtime/Cargo.toml ${V:+--verbose # Run banking/accounts bench. Doesn't require nightly, but use since it is already built. _ cargo +"$rust_nightly" run --release --manifest-path banking-bench/Cargo.toml ${V:+--verbose} | tee -a "$BENCH_FILE" -_ cargo +"$rust_nightly" run --release --manifest-path accounts-bench/Cargo.toml ${V:+--verbose} -- --num_accounts 10000 --num_slots 4 | tee -a "$BENCH_FILE" # Run zk-elgamal-proof benches. _ cargo +"$rust_nightly" bench --manifest-path programs/zk-elgamal-proof/Cargo.toml ${V:+--verbose} | tee -a "$BENCH_FILE" diff --git a/ci/test-bench.sh b/ci/test-bench.sh index 8e441be34a8759..c39d787323029b 100755 --- a/ci/test-bench.sh +++ b/ci/test-bench.sh @@ -63,7 +63,6 @@ _ $cargoNightly bench --manifest-path programs/sbf/Cargo.toml ${V:+--verbose} -- # Run banking/accounts bench. Doesn't require nightly, but use since it is already built. _ $cargoNightly run --release --manifest-path banking-bench/Cargo.toml ${V:+--verbose} | tee -a "$BENCH_FILE" -_ $cargoNightly run --release --manifest-path accounts-bench/Cargo.toml ${V:+--verbose} -- --num_accounts 10000 --num_slots 4 | tee -a "$BENCH_FILE" # `solana-upload-perf` disabled as it can take over 30 minutes to complete for some # reason diff --git a/scripts/dcou-tainted-packages.sh b/scripts/dcou-tainted-packages.sh index 72e578bf2d1bfe..a043840de0dd86 100644 --- a/scripts/dcou-tainted-packages.sh +++ b/scripts/dcou-tainted-packages.sh @@ -2,7 +2,6 @@ # shellcheck disable=SC2034 # This file is intended to be `source`d declare dcou_tainted_packages=( - solana-accounts-bench solana-banking-bench agave-ledger-tool solana-bench-tps From 8b4a870be86cc46c31205099e21489d18871d169 Mon Sep 17 00:00:00 2001 From: steviez Date: Tue, 22 Jul 2025 14:24:43 -0500 Subject: [PATCH 37/68] rpc: Resolve Rust 1.88 clippy lints and format strings (#7047) - Add rustfmt::skip to several already well-crafted strings - Run `cargo clippy --fix --tests` with Rust 1.88.0 set in `rust-toolchain.toml` - Run `cargo fmt` with `format_strings = true` set in `rustfmt.toml` --- client/src/transaction_executor.rs | 7 +- .../src/geyser_plugin_manager.rs | 9 +- .../src/geyser_plugin_service.rs | 5 +- .../src/nonblocking/pubsub_client.rs | 2 +- pubsub-client/src/pubsub_client.rs | 8 +- rpc-client/src/http_sender.rs | 8 +- rpc-client/src/nonblocking/rpc_client.rs | 50 +++---- rpc-test/tests/rpc.rs | 4 +- .../optimistically_confirmed_bank_tracker.rs | 16 +- rpc/src/rpc.rs | 140 ++++++++---------- rpc/src/rpc_health.rs | 6 +- rpc/src/rpc_pubsub_service.rs | 10 +- rpc/src/rpc_service.rs | 15 +- rpc/src/rpc_subscriptions.rs | 18 +-- .../src/send_transaction_service.rs | 12 +- thin-client/src/thin_client.rs | 7 +- tpu-client-next/src/workers_cache.rs | 13 +- .../connection_workers_scheduler_test.rs | 22 ++- tpu-client/src/nonblocking/tpu_client.rs | 16 +- transaction-status-client-types/src/lib.rs | 4 + transaction-status/src/lib.rs | 54 +++---- 21 files changed, 191 insertions(+), 235 deletions(-) diff --git a/client/src/transaction_executor.rs b/client/src/transaction_executor.rs index 23354818777788..56d5faa8b515d5 100644 --- a/client/src/transaction_executor.rs +++ b/client/src/transaction_executor.rs @@ -76,7 +76,7 @@ impl TransactionExecutor { return Some((sig, timestamp(), id)); } Err(e) => { - info!("error: {:#?}", e); + info!("error: {e:#?}"); } } None @@ -136,7 +136,7 @@ impl TransactionExecutor { let mut retain = true; let sent_ts = sigs_w[i].1; if let Some(e) = &statuses[j] { - debug!("error: {:?}", e); + debug!("error: {e:?}"); if e.status.is_ok() { success += 1; } else { @@ -169,8 +169,7 @@ impl TransactionExecutor { ); if last_log.elapsed().as_millis() > 5000 { info!( - "success: {} error: {} timed_out: {}", - success, error_count, timed_out, + "success: {success} error: {error_count} timed_out: {timed_out}", ); last_log = Instant::now(); } diff --git a/geyser-plugin-manager/src/geyser_plugin_manager.rs b/geyser-plugin-manager/src/geyser_plugin_manager.rs index 1e4fb7dbba0aef..2f55978ca5999d 100644 --- a/geyser-plugin-manager/src/geyser_plugin_manager.rs +++ b/geyser-plugin-manager/src/geyser_plugin_manager.rs @@ -238,7 +238,8 @@ impl GeyserPluginManager { return Err(jsonrpc_core::Error { code: ErrorCode::InvalidRequest, message: format!( - "There already exists a plugin named {} loaded, while reloading {name}. Did not load requested plugin", + "There already exists a plugin named {} loaded, while reloading {name}. Did \ + not load requested plugin", new_plugin.name() ), data: None, @@ -357,7 +358,8 @@ pub(crate) fn load_plugin_from_config( Ok(file) => file, Err(err) => { return Err(GeyserPluginManagerError::CannotOpenConfigFile(format!( - "Failed to open the plugin config file {geyser_plugin_config_file:?}, error: {err:?}" + "Failed to open the plugin config file {geyser_plugin_config_file:?}, error: \ + {err:?}" ))); } }; @@ -373,7 +375,8 @@ pub(crate) fn load_plugin_from_config( Ok(value) => value, Err(err) => { return Err(GeyserPluginManagerError::InvalidConfigFileFormat(format!( - "The config file {geyser_plugin_config_file:?} is not in a valid Json5 format, error: {err:?}" + "The config file {geyser_plugin_config_file:?} is not in a valid Json5 format, \ + error: {err:?}" ))); } }; diff --git a/geyser-plugin-manager/src/geyser_plugin_service.rs b/geyser-plugin-manager/src/geyser_plugin_service.rs index b0691af2196c7a..b866470a7e0717 100644 --- a/geyser-plugin-manager/src/geyser_plugin_service.rs +++ b/geyser-plugin-manager/src/geyser_plugin_service.rs @@ -78,10 +78,7 @@ impl GeyserPluginService { Arc, )>, ) -> Result { - info!( - "Starting GeyserPluginService from config files: {:?}", - geyser_plugin_config_files - ); + info!("Starting GeyserPluginService from config files: {geyser_plugin_config_files:?}"); let mut plugin_manager = GeyserPluginManager::new(); for geyser_plugin_config_file in geyser_plugin_config_files { diff --git a/pubsub-client/src/nonblocking/pubsub_client.rs b/pubsub-client/src/nonblocking/pubsub_client.rs index f23b781846e504..fd26574ccae3b7 100644 --- a/pubsub-client/src/nonblocking/pubsub_client.rs +++ b/pubsub-client/src/nonblocking/pubsub_client.rs @@ -627,7 +627,7 @@ impl PubsubClient { } } } else { - error!("Unknown request id: {}", id); + error!("Unknown request id: {id}"); break; } continue; diff --git a/pubsub-client/src/pubsub_client.rs b/pubsub-client/src/pubsub_client.rs index 0c7d789a022748..73c92305d57e3a 100644 --- a/pubsub-client/src/pubsub_client.rs +++ b/pubsub-client/src/pubsub_client.rs @@ -324,8 +324,8 @@ fn connect_with_retry( connection_retries -= 1; debug!( - "Too many requests: server responded with {:?}, {} retries left, pausing for {:?}", - response, connection_retries, duration + "Too many requests: server responded with {response:?}, {connection_retries} \ + retries left, pausing for {duration:?}" ); sleep(duration); @@ -785,7 +785,7 @@ impl PubsubClient { let handler = move |message| match sender.send(message) { Ok(_) => (), Err(err) => { - info!("receive error: {:?}", err); + info!("receive error: {err:?}"); } }; Self::cleanup_with_handler(exit, socket, handler); @@ -810,7 +810,7 @@ impl PubsubClient { // Nothing useful, means we received a ping message } Err(err) => { - info!("receive error: {:?}", err); + info!("receive error: {err:?}"); break; } } diff --git a/rpc-client/src/http_sender.rs b/rpc-client/src/http_sender.rs index 4924ce5cea13fe..ee96431a5d7e68 100644 --- a/rpc-client/src/http_sender.rs +++ b/rpc-client/src/http_sender.rs @@ -174,9 +174,9 @@ impl RpcSender for HttpSender { too_many_requests_retries -= 1; debug!( - "Too many requests: server responded with {:?}, {} retries left, pausing for {:?}", - response, too_many_requests_retries, duration - ); + "Too many requests: server responded with {response:?}, \ + {too_many_requests_retries} retries left, pausing for {duration:?}" + ); sleep(duration).await; stats_updater.add_rate_limited_time(duration); @@ -194,7 +194,7 @@ impl RpcSender for HttpSender { match serde_json::from_value::(json["error"]["data"].clone()) { Ok(data) => RpcResponseErrorData::SendTransactionPreflightFailure(data), Err(err) => { - debug!("Failed to deserialize RpcSimulateTransactionResult: {:?}", err); + debug!("Failed to deserialize RpcSimulateTransactionResult: {err:?}"); RpcResponseErrorData::Empty } } diff --git a/rpc-client/src/nonblocking/rpc_client.rs b/rpc-client/src/nonblocking/rpc_client.rs index 8da0562052e1ab..161aff42e3bfaf 100644 --- a/rpc-client/src/nonblocking/rpc_client.rs +++ b/rpc-client/src/nonblocking/rpc_client.rs @@ -719,9 +719,8 @@ impl RpcClient { } Err(RpcError::ForUser( - "unable to confirm transaction. \ - This can happen in situations such as transaction expiration \ - and insufficient fee-payer funds" + "unable to confirm transaction. This can happen in situations such as transaction \ + expiration and insufficient fee-payer funds" .to_string(), ) .into()) @@ -989,7 +988,7 @@ impl RpcClient { data, }) = err.kind() { - debug!("{} {}", code, message); + debug!("{code} {message}"); if let RpcResponseErrorData::SendTransactionPreflightFailure( RpcSimulateTransactionResult { logs: Some(logs), .. @@ -1204,9 +1203,8 @@ impl RpcClient { } } else { return Err(RpcError::ForUser( - "unable to confirm transaction. \ - This can happen in situations such as transaction expiration \ - and insufficient fee-payer funds" + "unable to confirm transaction. This can happen in situations such as transaction \ + expiration and insufficient fee-payer funds" .to_string(), ) .into()); @@ -1237,11 +1235,12 @@ impl RpcClient { .await .unwrap_or(confirmations); if now.elapsed().as_secs() >= MAX_HASH_AGE_IN_SECONDS as u64 { - return Err( - RpcError::ForUser("transaction not finalized. \ - This can happen when a transaction lands in an abandoned fork. \ - Please retry.".to_string()).into(), - ); + return Err(RpcError::ForUser( + "transaction not finalized. This can happen when a transaction lands in an \ + abandoned fork. Please retry." + .to_string(), + ) + .into()); } } } @@ -2316,8 +2315,7 @@ impl RpcClient { } info!( - "Waiting for stake to drop below {} current: {:.1}", - max_stake_percent, current_percent + "Waiting for stake to drop below {max_stake_percent} current: {current_percent:.1}" ); sleep(Duration::from_secs(5)).await; } @@ -2945,7 +2943,7 @@ impl RpcClient { } let result = serde_json::from_value(result_json) .map_err(|err| ClientError::new_with_request(err.into(), request))?; - trace!("Response block timestamp {:?} {:?}", slot, result); + trace!("Response block timestamp {slot:?} {result:?}"); Ok(result) }) .map_err(|err| err.into_with_request(request))? @@ -3604,7 +3602,7 @@ impl RpcClient { context, value: rpc_account, } = serde_json::from_value::>>(result_json)?; - trace!("Response account {:?} {:?}", pubkey, rpc_account); + trace!("Response account {pubkey:?} {rpc_account:?}"); let account = rpc_account.and_then(|rpc_account| rpc_account.decode()); Ok(Response { @@ -3891,11 +3889,7 @@ impl RpcClient { let minimum_balance: u64 = serde_json::from_value(minimum_balance_json) .map_err(|err| ClientError::new_with_request(err.into(), request))?; - trace!( - "Response minimum balance {:?} {:?}", - data_len, - minimum_balance - ); + trace!("Response minimum balance {data_len:?} {minimum_balance:?}"); Ok(minimum_balance) } @@ -4227,7 +4221,7 @@ impl RpcClient { context, value: rpc_account, } = serde_json::from_value::>>(result_json)?; - trace!("Response account {:?} {:?}", pubkey, rpc_account); + trace!("Response account {pubkey:?} {rpc_account:?}"); let response = { if let Some(rpc_account) = rpc_account { if let UiAccountData::Json(account_data) = rpc_account.data { @@ -4450,8 +4444,7 @@ impl RpcClient { }) .map_err(|_| { RpcError::ForUser( - "airdrop request failed. \ - This can happen when the rate limit is reached." + "airdrop request failed. This can happen when the rate limit is reached." .to_string(), ) .into() @@ -4514,10 +4507,7 @@ impl RpcClient { return balance_result; } trace!( - "wait_for_balance_with_commitment [{}] {:?} {:?}", - run, - balance_result, - expected_balance + "wait_for_balance_with_commitment [{run}] {balance_result:?} {expected_balance:?}" ); if let (Some(expected_balance), Ok(balance_result)) = (expected_balance, balance_result) { @@ -4591,7 +4581,7 @@ impl RpcClient { } } Err(err) => { - debug!("check_confirmations request failed: {:?}", err); + debug!("check_confirmations request failed: {err:?}"); } }; if now.elapsed().as_secs() > 20 { @@ -4707,7 +4697,7 @@ impl RpcClient { return Ok(new_blockhash); } } - debug!("Got same blockhash ({:?}), will retry...", blockhash); + debug!("Got same blockhash ({blockhash:?}), will retry..."); // Retry ~twice during a slot sleep(Duration::from_millis(DEFAULT_MS_PER_SLOT / 2)).await; diff --git a/rpc-test/tests/rpc.rs b/rpc-test/tests/rpc.rs index 5c02280ce71fea..9cab007a90680b 100644 --- a/rpc-test/tests/rpc.rs +++ b/rpc-test/tests/rpc.rs @@ -83,7 +83,7 @@ fn test_rpc_send_tx() { .parse() .unwrap(); - info!("blockhash: {:?}", blockhash); + info!("blockhash: {blockhash:?}"); let tx = system_transaction::transfer( &alice, &bob_pubkey, @@ -442,7 +442,7 @@ fn test_rpc_subscriptions() { sleep(Duration::from_millis(100)); } if mint_balance != expected_mint_balance { - error!("mint-check timeout. mint_balance {:?}", mint_balance); + error!("mint-check timeout. mint_balance {mint_balance:?}"); } // Wait for all signature subscriptions diff --git a/rpc/src/optimistically_confirmed_bank_tracker.rs b/rpc/src/optimistically_confirmed_bank_tracker.rs index 568ddc31b49d2a..4430f6f1782698 100644 --- a/rpc/src/optimistically_confirmed_bank_tracker.rs +++ b/rpc/src/optimistically_confirmed_bank_tracker.rs @@ -165,10 +165,7 @@ impl OptimisticallyConfirmedBankTracker { match sender.send(notification.clone()) { Ok(_) => {} Err(err) => { - info!( - "Failed to send notification {:?}, error: {:?}", - notification, err - ); + info!("Failed to send notification {notification:?}, error: {err:?}"); } } } @@ -250,10 +247,7 @@ impl OptimisticallyConfirmedBankTracker { let root = roots[i]; if root > *newest_root_slot { let parent = roots[i - 1]; - debug!( - "Doing SlotNotification::Root for root {}, parent: {}", - root, parent - ); + debug!("Doing SlotNotification::Root for root {root}, parent: {parent}"); Self::notify_slot_status( slot_notification_subscribers, SlotNotification::Root((root, parent)), @@ -276,7 +270,7 @@ impl OptimisticallyConfirmedBankTracker { slot_notification_subscribers: &Option>>>, prioritization_fee_cache: &PrioritizationFeeCache, ) { - debug!("received bank notification: {:?}", notification); + debug!("received bank notification: {notification:?}"); match notification { BankNotification::OptimisticallyConfirmed(slot) => { let bank = bank_forks.read().unwrap().get(slot); @@ -344,8 +338,8 @@ impl OptimisticallyConfirmedBankTracker { if pending_optimistically_confirmed_banks.remove(&bank.slot()) { debug!( - "Calling notify_gossip_subscribers to send deferred notification {:?}", - frozen_slot + "Calling notify_gossip_subscribers to send deferred notification \ + {frozen_slot:?}" ); Self::notify_or_defer_confirmed_banks( diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index 054d2cc443bca6..f04e5a2f1d2c64 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -344,7 +344,7 @@ impl JsonRpcRequestProcessor { #[allow(deprecated)] fn bank(&self, commitment: Option) -> Arc { - debug!("RPC commitment_config: {:?}", commitment); + debug!("RPC commitment_config: {commitment:?}"); let commitment = commitment.unwrap_or_default(); if commitment.is_confirmed() { @@ -366,10 +366,10 @@ impl JsonRpcRequestProcessor { match commitment.commitment { CommitmentLevel::Processed => { - debug!("RPC using the heaviest slot: {:?}", slot); + debug!("RPC using the heaviest slot: {slot:?}"); } CommitmentLevel::Finalized => { - debug!("RPC using block: {:?}", slot); + debug!("RPC using block: {slot:?}"); } CommitmentLevel::Confirmed => unreachable!(), // SingleGossip variant is deprecated }; @@ -1016,7 +1016,7 @@ impl JsonRpcRequestProcessor { None => Err(Error::invalid_request()), }, Err(err) => { - warn!("slot_meta_iterator failed: {:?}", err); + warn!("slot_meta_iterator failed: {err:?}"); Err(Error::invalid_request()) } } @@ -1897,7 +1897,7 @@ impl JsonRpcRequestProcessor { bigtable_before = None; } Err(err) => { - warn!("Failed to query Bigtable: {:?}", err); + warn!("Failed to query Bigtable: {err:?}"); return Err(RpcCustomError::LongTermStorageUnreachable.into()); } Ok(_) => {} @@ -1929,7 +1929,7 @@ impl JsonRpcRequestProcessor { } Err(StorageError::SignatureNotFound) => {} Err(err) => { - warn!("Failed to query Bigtable: {:?}", err); + warn!("Failed to query Bigtable: {err:?}"); return Err(RpcCustomError::LongTermStorageUnreachable.into()); } } @@ -2538,7 +2538,10 @@ fn encode_account( .unwrap_or(account.data().len()) > MAX_BASE58_BYTES { - let message = format!("Encoded binary (base 58) data should be less than {MAX_BASE58_BYTES} bytes, please use Base64 encoding."); + let message = format!( + "Encoded binary (base 58) data should be less than {MAX_BASE58_BYTES} bytes, please \ + use Base64 encoding." + ); Err(error::Error { code: error::ErrorCode::InvalidRequest, message, @@ -2591,8 +2594,7 @@ fn get_spl_token_owner_filter(program_id: &Pubkey, filters: &[RpcFilterType]) -> { if let Some(incorrect_owner_len) = incorrect_owner_len { info!( - "Incorrect num bytes ({:?}) provided for spl_token_owner_filter", - incorrect_owner_len + "Incorrect num bytes ({incorrect_owner_len:?}) provided for spl_token_owner_filter" ); } owner_key @@ -2642,8 +2644,7 @@ fn get_spl_token_mint_filter(program_id: &Pubkey, filters: &[RpcFilterType]) -> { if let Some(incorrect_mint_len) = incorrect_mint_len { info!( - "Incorrect num bytes ({:?}) provided for spl_token_mint_filter", - incorrect_mint_len + "Incorrect num bytes ({incorrect_mint_len:?}) provided for spl_token_mint_filter" ); } mint @@ -2703,7 +2704,7 @@ fn _send_transaction( ); meta.transaction_sender .send(transaction_info) - .unwrap_or_else(|err| warn!("Failed to enqueue transaction: {}", err)); + .unwrap_or_else(|err| warn!("Failed to enqueue transaction: {err}")); Ok(signature.to_string()) } @@ -2792,7 +2793,7 @@ pub mod rpc_minimal { pubkey_str: String, config: Option, ) -> Result> { - debug!("get_balance rpc request received: {:?}", pubkey_str); + debug!("get_balance rpc request received: {pubkey_str:?}"); let pubkey = verify_pubkey(&pubkey_str)?; meta.get_balance(&pubkey, config.unwrap_or_default()) } @@ -2927,7 +2928,7 @@ pub mod rpc_minimal { let slot = slot.unwrap_or_else(|| bank.slot()); let epoch = bank.epoch_schedule().get_epoch(slot); - debug!("get_leader_schedule rpc request received: {:?}", slot); + debug!("get_leader_schedule rpc request received: {slot:?}"); Ok(meta .leader_schedule_cache @@ -3008,10 +3009,7 @@ pub mod rpc_bank { data_len: usize, commitment: Option, ) -> Result { - debug!( - "get_minimum_balance_for_rent_exemption rpc request received: {:?}", - data_len - ); + debug!("get_minimum_balance_for_rent_exemption rpc request received: {data_len:?}"); if data_len as u64 > solana_system_interface::MAX_PERMITTED_DATA_LENGTH { return Err(Error::invalid_request()); } @@ -3052,10 +3050,7 @@ pub mod rpc_bank { start_slot: Slot, limit: u64, ) -> Result> { - debug!( - "get_slot_leaders rpc request received (start: {} limit: {})", - start_slot, limit - ); + debug!("get_slot_leaders rpc request received (start: {start_slot} limit: {limit})"); let limit = limit as usize; if limit > MAX_GET_SLOT_LEADERS { @@ -3223,7 +3218,7 @@ pub mod rpc_accounts { pubkey_str: String, config: Option, ) -> BoxFuture>>> { - debug!("get_account_info rpc request received: {:?}", pubkey_str); + debug!("get_account_info rpc request received: {pubkey_str:?}"); async move { let pubkey = verify_pubkey(&pubkey_str)?; meta.get_account_info(pubkey, config).await @@ -3275,10 +3270,7 @@ pub mod rpc_accounts { pubkey_str: String, commitment: Option, ) -> Result> { - debug!( - "get_token_account_balance rpc request received: {:?}", - pubkey_str - ); + debug!("get_token_account_balance rpc request received: {pubkey_str:?}"); let pubkey = verify_pubkey(&pubkey_str)?; meta.get_token_account_balance(&pubkey, commitment) } @@ -3289,7 +3281,7 @@ pub mod rpc_accounts { mint_str: String, commitment: Option, ) -> Result> { - debug!("get_token_supply rpc request received: {:?}", mint_str); + debug!("get_token_supply rpc request received: {mint_str:?}"); let mint = verify_pubkey(&mint_str)?; meta.get_token_supply(&mint, commitment) } @@ -3368,10 +3360,7 @@ pub mod rpc_accounts_scan { program_id_str: String, config: Option, ) -> BoxFuture>>> { - debug!( - "get_program_accounts rpc request received: {:?}", - program_id_str - ); + debug!("get_program_accounts rpc request received: {program_id_str:?}"); async move { let program_id = verify_pubkey(&program_id_str)?; let (config, filters, with_context, sort_results) = if let Some(config) = config { @@ -3415,10 +3404,7 @@ pub mod rpc_accounts_scan { mint_str: String, commitment: Option, ) -> BoxFuture>>> { - debug!( - "get_token_largest_accounts rpc request received: {:?}", - mint_str - ); + debug!("get_token_largest_accounts rpc request received: {mint_str:?}"); async move { let mint = verify_pubkey(&mint_str)?; meta.get_token_largest_accounts(mint, commitment).await @@ -3433,10 +3419,7 @@ pub mod rpc_accounts_scan { token_account_filter: RpcTokenAccountsFilter, config: Option, ) -> BoxFuture>>> { - debug!( - "get_token_accounts_by_owner rpc request received: {:?}", - owner_str - ); + debug!("get_token_accounts_by_owner rpc request received: {owner_str:?}"); async move { let owner = verify_pubkey(&owner_str)?; let token_account_filter = verify_token_account_filter(token_account_filter)?; @@ -3453,10 +3436,7 @@ pub mod rpc_accounts_scan { token_account_filter: RpcTokenAccountsFilter, config: Option, ) -> BoxFuture>>> { - debug!( - "get_token_accounts_by_delegate rpc request received: {:?}", - delegate_str - ); + debug!("get_token_accounts_by_delegate rpc request received: {delegate_str:?}"); async move { let delegate = verify_pubkey(&delegate_str)?; let token_account_filter = verify_token_account_filter(token_account_filter)?; @@ -3653,7 +3633,7 @@ pub mod rpc_full { .blockstore .get_recent_perf_samples(limit) .map_err(|err| { - warn!("get_recent_performance_samples failed: {:?}", err); + warn!("get_recent_performance_samples failed: {err:?}"); Error::invalid_request() })? .into_iter() @@ -3794,13 +3774,13 @@ pub mod rpc_full { let transaction = request_airdrop_transaction(&faucet_addr, &pubkey, lamports, blockhash).map_err( |err| { - info!("request_airdrop_transaction failed: {:?}", err); + info!("request_airdrop_transaction failed: {err:?}"); Error::internal_error() }, )?; let wire_transaction = serialize(&transaction).map_err(|err| { - info!("request_airdrop: serialize error: {:?}", err); + info!("request_airdrop: serialize error: {err:?}"); Error::internal_error() })?; @@ -4096,7 +4076,7 @@ pub mod rpc_full { slot: Slot, config: Option>, ) -> BoxFuture>> { - debug!("get_block rpc request received: {:?}", slot); + debug!("get_block rpc request received: {slot:?}"); Box::pin(async move { meta.get_block(slot, config).await }) } @@ -4109,10 +4089,7 @@ pub mod rpc_full { ) -> BoxFuture>> { let (end_slot, maybe_config) = wrapper.map(|wrapper| wrapper.unzip()).unwrap_or_default(); - debug!( - "get_blocks rpc request received: {}-{:?}", - start_slot, end_slot - ); + debug!("get_blocks rpc request received: {start_slot}-{end_slot:?}"); Box::pin(async move { meta.get_blocks(start_slot, end_slot, config.or(maybe_config)) .await @@ -4126,10 +4103,7 @@ pub mod rpc_full { limit: usize, config: Option, ) -> BoxFuture>> { - debug!( - "get_blocks_with_limit rpc request received: {}-{}", - start_slot, limit, - ); + debug!("get_blocks_with_limit rpc request received: {start_slot}-{limit}",); Box::pin(async move { meta.get_blocks_with_limit(start_slot, limit, config).await }) } @@ -4147,7 +4121,7 @@ pub mod rpc_full { signature_str: String, config: Option>, ) -> BoxFuture>> { - debug!("get_transaction rpc request received: {:?}", signature_str); + debug!("get_transaction rpc request received: {signature_str:?}"); let signature = verify_signature(&signature_str); if let Err(err) = signature { return Box::pin(future::err(err)); @@ -4648,7 +4622,8 @@ pub mod tests { if let Some(account) = bank.get_account(key) { assert!( *account.owner() != bpf_loader_upgradeable::id(), - "LoaderV3 is not supported; to add it, parse the program account and add its programdata size.", + "LoaderV3 is not supported; to add it, parse the program account and add its \ + programdata size.", ); loaded_accounts_data_size += (account.data().len() + TRANSACTION_ACCOUNT_BASE_SIZE) as u32; @@ -6467,11 +6442,10 @@ pub mod tests { "id":1, "method":"simulateTransaction", "params":[ - "{}", + "{tx_serialized_encoded}", {{ "encoding": "base64" }} ] }}"#, - tx_serialized_encoded, ); let res = io.handle_request_sync(&req, meta.clone()); let expected = json!({ @@ -6511,11 +6485,10 @@ pub mod tests { "id":1, "method":"simulateTransaction", "params":[ - "{}", + "{tx_serialized_encoded}", {{ "innerInstructions": false, "encoding": "base64" }} ] }}"#, - tx_serialized_encoded, ); let res = io.handle_request_sync(&req, meta.clone()); let expected = json!({ @@ -6555,11 +6528,10 @@ pub mod tests { "id":1, "method":"simulateTransaction", "params":[ - "{}", + "{tx_serialized_encoded}", {{ "innerInstructions": true, "encoding": "base64" }} ] }}"#, - tx_serialized_encoded, ); let res = io.handle_request_sync(&req, meta.clone()); let expected = json!({ @@ -7235,9 +7207,9 @@ pub mod tests { let expected = ( JSON_RPC_SERVER_ERROR_UNSUPPORTED_TRANSACTION_VERSION, String::from( - "Transaction version (0) is not supported by the requesting client. \ - Please try the request again with the following configuration parameter: \ - \"maxSupportedTransactionVersion\": 0", + "Transaction version (0) is not supported by the requesting client. Please try \ + the request again with the following configuration parameter: \ + \"maxSupportedTransactionVersion\": 0", ), ); assert_eq!(response, expected); @@ -7264,7 +7236,8 @@ pub mod tests { { assert_eq!( version, None, - "requests which don't set max_supported_transaction_version shouldn't receive a version" + "requests which don't set max_supported_transaction_version shouldn't receive a \ + version" ); if let EncodedTransaction::Json(transaction) = transaction { if transaction.signatures[0] == confirmed_block_signatures[0].to_string() { @@ -7308,7 +7281,8 @@ pub mod tests { { assert_eq!( version, None, - "requests which don't set max_supported_transaction_version shouldn't receive a version" + "requests which don't set max_supported_transaction_version shouldn't receive a \ + version" ); if let EncodedTransaction::LegacyBinary(transaction) = transaction { let decoded_transaction: Transaction = @@ -8938,9 +8912,10 @@ pub mod tests { decode_and_deserialize::(tx58, TransactionBinaryEncoding::Base58) .unwrap_err(), Error::invalid_params(format!( - "base58 encoded solana_transaction::Transaction too large: {tx58_len} bytes (max: encoded/raw {MAX_BASE58_SIZE}/{PACKET_DATA_SIZE})", - ) - )); + "base58 encoded solana_transaction::Transaction too large: {tx58_len} bytes (max: \ + encoded/raw {MAX_BASE58_SIZE}/{PACKET_DATA_SIZE})", + )) + ); let tx64 = BASE64_STANDARD.encode(&tx_ser); let tx64_len = tx64.len(); @@ -8948,9 +8923,10 @@ pub mod tests { decode_and_deserialize::(tx64, TransactionBinaryEncoding::Base64) .unwrap_err(), Error::invalid_params(format!( - "base64 encoded solana_transaction::Transaction too large: {tx64_len} bytes (max: encoded/raw {MAX_BASE64_SIZE}/{PACKET_DATA_SIZE})", - ) - )); + "base64 encoded solana_transaction::Transaction too large: {tx64_len} bytes (max: \ + encoded/raw {MAX_BASE64_SIZE}/{PACKET_DATA_SIZE})", + )) + ); let too_big = PACKET_DATA_SIZE + 1; let tx_ser = vec![0x00u8; too_big]; @@ -8959,7 +8935,8 @@ pub mod tests { decode_and_deserialize::(tx58, TransactionBinaryEncoding::Base58) .unwrap_err(), Error::invalid_params(format!( - "decoded solana_transaction::Transaction too large: {too_big} bytes (max: {PACKET_DATA_SIZE} bytes)" + "decoded solana_transaction::Transaction too large: {too_big} bytes (max: \ + {PACKET_DATA_SIZE} bytes)" )) ); @@ -8968,7 +8945,8 @@ pub mod tests { decode_and_deserialize::(tx64, TransactionBinaryEncoding::Base64) .unwrap_err(), Error::invalid_params(format!( - "decoded solana_transaction::Transaction too large: {too_big} bytes (max: {PACKET_DATA_SIZE} bytes)" + "decoded solana_transaction::Transaction too large: {too_big} bytes (max: \ + {PACKET_DATA_SIZE} bytes)" )) ); @@ -8978,8 +8956,8 @@ pub mod tests { decode_and_deserialize::(tx64.clone(), TransactionBinaryEncoding::Base64) .unwrap_err(), Error::invalid_params( - "failed to deserialize solana_transaction::Transaction: invalid value: \ - continue signal on byte-three, expected a terminal signal on or before byte-three" + "failed to deserialize solana_transaction::Transaction: invalid value: continue \ + signal on byte-three, expected a terminal signal on or before byte-three" .to_string() ) ); @@ -8996,8 +8974,8 @@ pub mod tests { decode_and_deserialize::(tx58.clone(), TransactionBinaryEncoding::Base58) .unwrap_err(), Error::invalid_params( - "failed to deserialize solana_transaction::Transaction: invalid value: \ - continue signal on byte-three, expected a terminal signal on or before byte-three" + "failed to deserialize solana_transaction::Transaction: invalid value: continue \ + signal on byte-three, expected a terminal signal on or before byte-three" .to_string() ) ); diff --git a/rpc/src/rpc_health.rs b/rpc/src/rpc_health.rs index 38ec51a6173597..56684d8edbd802 100644 --- a/rpc/src/rpc_health.rs +++ b/rpc/src/rpc_health.rs @@ -104,9 +104,9 @@ impl RpcHealth { let num_slots = cluster_latest_optimistically_confirmed_slot .saturating_sub(my_latest_optimistically_confirmed_slot); warn!( - "health check: behind by {num_slots} \ - slots: me={my_latest_optimistically_confirmed_slot}, \ - latest cluster={cluster_latest_optimistically_confirmed_slot}", + "health check: behind by {num_slots} slots: \ + me={my_latest_optimistically_confirmed_slot}, latest \ + cluster={cluster_latest_optimistically_confirmed_slot}", ); RpcHealthStatus::Behind { num_slots } } diff --git a/rpc/src/rpc_pubsub_service.rs b/rpc/src/rpc_pubsub_service.rs index ffac5b1b6503da..5532558a0683fc 100644 --- a/rpc/src/rpc_pubsub_service.rs +++ b/rpc/src/rpc_pubsub_service.rs @@ -87,7 +87,7 @@ impl PubSubService { pubsub_addr: SocketAddr, ) -> (Trigger, Self) { let subscription_control = subscriptions.control().clone(); - info!("rpc_pubsub bound to {:?}", pubsub_addr); + info!("rpc_pubsub bound to {pubsub_addr:?}"); let (trigger, tripwire) = Tripwire::new(); let thread_hdl = Builder::new() @@ -454,7 +454,7 @@ async fn listen( select! { result = listener.accept() => match result { Ok((socket, addr)) => { - debug!("new client ({:?})", addr); + debug!("new client ({addr:?})"); let subscription_control = subscription_control.clone(); let config = config.clone(); let tripwire = tripwire.clone(); @@ -464,13 +464,13 @@ async fn listen( socket, subscription_control, config, tripwire ); match handle.await { - Ok(()) => debug!("connection closed ({:?})", addr), - Err(err) => warn!("connection handler error ({:?}): {}", addr, err), + Ok(()) => debug!("connection closed ({addr:?})"), + Err(err) => warn!("connection handler error ({addr:?}): {err}"), } drop(counter_token); // Force moving token into the task. }); } - Err(e) => error!("couldn't accept connection: {:?}", e), + Err(e) => error!("couldn't accept connection: {e:?}"), }, _ = &mut tripwire => return Ok(()), } diff --git a/rpc/src/rpc_service.rs b/rpc/src/rpc_service.rs index 36dce48b3b2f29..62e4742ee7cfca 100644 --- a/rpc/src/rpc_service.rs +++ b/rpc/src/rpc_service.rs @@ -272,7 +272,7 @@ impl RpcRequestMiddleware { .map(|m| m.len()) .unwrap_or(0) .to_string(); - info!("get {} -> {:?} ({} bytes)", path, filename, file_length); + info!("get {path} -> {filename:?} ({file_length} bytes)"); if cfg!(not(test)) { assert!( @@ -335,7 +335,7 @@ impl RpcRequestMiddleware { RpcHealthStatus::Behind { .. } => "behind", RpcHealthStatus::Unknown => "unknown", }; - info!("health check: {}", response); + info!("health check: {response}"); response } } @@ -716,8 +716,8 @@ impl JsonRpcService { prioritization_fee_cache: Arc, runtime: Arc, ) -> Result { - info!("rpc bound to {:?}", rpc_addr); - info!("rpc configuration: {:?}", config); + info!("rpc bound to {rpc_addr:?}"); + info!("rpc configuration: {config:?}"); let rpc_niceness_adj = config.rpc_niceness_adj; let health = Arc::new(RpcHealth::new( @@ -778,7 +778,7 @@ impl JsonRpcService { ) }) .unwrap_or_else(|err| { - error!("Failed to initialize BigTable ledger storage: {:?}", err); + error!("Failed to initialize BigTable ledger storage: {err:?}"); (None, None) }) } else { @@ -867,9 +867,8 @@ impl JsonRpcService { if let Err(e) = server { warn!( - "JSON RPC service unavailable error: {:?}. \n\ - Also, check that port {} is not already in use by another application", - e, + "JSON RPC service unavailable error: {e:?}. Also, check that port {} is \ + not already in use by another application", rpc_addr.port() ); close_handle_sender.send(Err(e.to_string())).unwrap(); diff --git a/rpc/src/rpc_subscriptions.rs b/rpc/src/rpc_subscriptions.rs index 545651210af490..89b9b5b5817470 100644 --- a/rpc/src/rpc_subscriptions.rs +++ b/rpc/src/rpc_subscriptions.rs @@ -525,7 +525,7 @@ pub struct RpcSubscriptions { impl Drop for RpcSubscriptions { fn drop(&mut self) { self.shutdown().unwrap_or_else(|err| { - warn!("RPC Notification - shutdown error: {:?}", err); + warn!("RPC Notification - shutdown error: {err:?}"); }); } } @@ -747,10 +747,7 @@ impl RpcSubscriptions { match notification_sender.send(notification_entry.into()) { Ok(()) => (), Err(SendError(notification)) => { - warn!( - "Dropped RPC Notification - receiver disconnected : {:?}", - notification - ); + warn!("Dropped RPC Notification - receiver disconnected : {notification:?}"); } } } @@ -797,7 +794,7 @@ impl RpcSubscriptions { .node_progress_watchers() .get(&SubscriptionParams::Slot) { - debug!("slot notify: {:?}", slot_info); + debug!("slot notify: {slot_info:?}"); inc_new_counter_info!("rpc-subscription-notify-slot", 1); notifier.notify(slot_info, sub, false); } @@ -826,7 +823,7 @@ impl RpcSubscriptions { timestamp: vote_info.timestamp(), signature: signature.to_string(), }; - debug!("vote notify: {:?}", vote_info); + debug!("vote notify: {vote_info:?}"); inc_new_counter_info!("rpc-subscription-notify-vote", 1); notifier.notify(&rpc_vote, sub, false); } @@ -836,7 +833,7 @@ impl RpcSubscriptions { .node_progress_watchers() .get(&SubscriptionParams::Root) { - debug!("root notify: {:?}", root); + debug!("root notify: {root:?}"); inc_new_counter_info!("rpc-subscription-notify-root", 1); notifier.notify(root, sub, false); } @@ -1015,7 +1012,7 @@ impl RpcSubscriptions { let block_update_result = blockstore .get_complete_block(s, false) .map_err(|e| { - error!("get_complete_block error: {}", e); + error!("get_complete_block error: {e}"); RpcBlockUpdateError::BlockStoreError }) .and_then(|block| filter_block_result_txs(block, s, params)); @@ -1132,7 +1129,8 @@ impl RpcSubscriptions { let total_ms = total_time.as_ms(); if total_notified > 0 || total_ms > 10 { debug!( - "notified({}): accounts: {} / {} logs: {} / {} programs: {} / {} signatures: {} / {}", + "notified({}): accounts: {} / {} logs: {} / {} programs: {} / {} signatures: {} / \ + {}", source, num_accounts_found.load(Ordering::Relaxed), num_accounts_notified.load(Ordering::Relaxed), diff --git a/send-transaction-service/src/send_transaction_service.rs b/send-transaction-service/src/send_transaction_service.rs index dd2a3a54eb64cb..6a3fe363d4ea14 100644 --- a/send-transaction-service/src/send_transaction_service.rs +++ b/send-transaction-service/src/send_transaction_service.rs @@ -396,7 +396,7 @@ impl SendTransactionService { ) .is_some() { - info!("Transaction is rooted: {}", signature); + info!("Transaction is rooted: {signature}"); result.rooted += 1; stats.rooted_transactions.fetch_add(1, Ordering::Relaxed); return false; @@ -416,14 +416,14 @@ impl SendTransactionService { let verify_nonce_account = nonce_account::verify_nonce_account(&nonce_account, &durable_nonce); if verify_nonce_account.is_none() && signature_status.is_none() && expired { - info!("Dropping expired durable-nonce transaction: {}", signature); + info!("Dropping expired durable-nonce transaction: {signature}"); result.expired += 1; stats.expired_transactions.fetch_add(1, Ordering::Relaxed); return false; } } if transaction_info.last_valid_block_height < root_bank.block_height() { - info!("Dropping expired transaction: {}", signature); + info!("Dropping expired transaction: {signature}"); result.expired += 1; stats.expired_transactions.fetch_add(1, Ordering::Relaxed); return false; @@ -434,7 +434,7 @@ impl SendTransactionService { if let Some(max_retries) = max_retries { if transaction_info.retries >= max_retries { - info!("Dropping transaction due to max retries: {}", signature); + info!("Dropping transaction due to max retries: {signature}"); result.max_retries_elapsed += 1; stats .transactions_exceeding_max_retries @@ -456,7 +456,7 @@ impl SendTransactionService { // Transaction sent before is unknown to the working bank, it might have been // dropped or landed in another fork. Re-send it. - info!("Retrying transaction: {}", signature); + info!("Retrying transaction: {signature}"); result.retried += 1; transaction_info.retries += 1; } @@ -483,7 +483,7 @@ impl SendTransactionService { } Some((_slot, status)) => { if !status { - info!("Dropping failed transaction: {}", signature); + info!("Dropping failed transaction: {signature}"); result.failed += 1; stats.failed_transactions.fetch_add(1, Ordering::Relaxed); false diff --git a/thin-client/src/thin_client.rs b/thin-client/src/thin_client.rs index 0e0f615f029eb3..af9f8a7c8d979e 100644 --- a/thin-client/src/thin_client.rs +++ b/thin-client/src/thin_client.rs @@ -88,12 +88,7 @@ impl ClientOptimizer { if index == (self.num_clients - 1) || time_ms == u64::MAX { let times = self.times.read().unwrap(); let (min_time, min_index) = min_index(×); - trace!( - "done experimenting min: {} time: {} times: {:?}", - min_index, - min_time, - times - ); + trace!("done experimenting min: {min_index} time: {min_time} times: {times:?}"); // Only 1 thread should grab the num_clients-1 index, so this should be ok. self.cur_index.store(min_index, Ordering::Relaxed); diff --git a/tpu-client-next/src/workers_cache.rs b/tpu-client-next/src/workers_cache.rs index 7d005e832f346b..dd4c9f03b34396 100644 --- a/tpu-client-next/src/workers_cache.rs +++ b/tpu-client-next/src/workers_cache.rs @@ -187,8 +187,8 @@ impl WorkersCache { } let current_worker = workers.get(peer).expect( - "Failed to fetch worker for peer {peer}.\n\ - Peer existence must be checked before this call using `contains` method.", + "Failed to fetch worker for peer {peer}. Peer existence must be checked before this \ + call using `contains` method.", ); let send_res = current_worker.try_send_transactions(txs_batch); @@ -214,7 +214,8 @@ impl WorkersCache { /// is removed from the cache. #[allow( dead_code, - reason = "This method will be used in the upcoming changes to implement optional backpressure on the sender." + reason = "This method will be used in the upcoming changes to implement optional \ + backpressure on the sender." )] pub async fn send_transactions_to_address( &mut self, @@ -227,8 +228,8 @@ impl WorkersCache { let body = async move { let current_worker = workers.get(peer).expect( - "Failed to fetch worker for peer {peer}.\n\ - Peer existence must be checked before this call using `contains` method.", + "Failed to fetch worker for peer {peer}. Peer existence must be checked before \ + this call using `contains` method.", ); let send_res = current_worker.send_transactions(txs_batch).await; if let Err(WorkersCacheError::ReceiverDropped) = send_res { @@ -280,7 +281,7 @@ impl WorkersCache { } while let Some(res) = tasks.join_next().await { if let Err(err) = res { - debug!("A shutdown task failed: {}", err); + debug!("A shutdown task failed: {err}"); } } } diff --git a/tpu-client-next/tests/connection_workers_scheduler_test.rs b/tpu-client-next/tests/connection_workers_scheduler_test.rs index 4b2866ba604d1a..2cc808ab88ef85 100644 --- a/tpu-client-next/tests/connection_workers_scheduler_test.rs +++ b/tpu-client-next/tests/connection_workers_scheduler_test.rs @@ -222,10 +222,8 @@ async fn test_basic_transactions_sending() { let elapsed = now.elapsed(); assert!( elapsed < TEST_MAX_TIME, - "Failed to send {} transaction in {:?}. Only sent {}", - expected_num_txs, - elapsed, - actual_num_packets, + "Failed to send {expected_num_txs} transaction in {elapsed:?}. Only sent \ + {actual_num_packets}", ); } @@ -314,8 +312,8 @@ async fn test_connection_denied_until_allowed() { let actual_num_packets = count_received_packets_for(receiver, tx_size, TEST_MAX_TIME).await; assert!( actual_num_packets < expected_num_txs, - "Expected to receive {expected_num_txs} packets in {TEST_MAX_TIME:?}\n\ - Got packets: {actual_num_packets}" + "Expected to receive {expected_num_txs} packets in {TEST_MAX_TIME:?} Got packets: \ + {actual_num_packets}" ); // Wait for the exchange to finish. @@ -641,9 +639,9 @@ async fn test_rate_limiting_establish_connection() { count_received_packets_for(receiver, tx_size, Duration::from_secs(70)).await; assert!( actual_num_packets > 0, - "As we wait longer than 1 minute, at least one transaction should be delivered. \ - After 1 minute the server is expected to accept our connection.\n\ - Actual packets delivered: {actual_num_packets}" + "As we wait longer than 1 minute, at least one transaction should be delivered. After 1 \ + minute the server is expected to accept our connection. Actual packets delivered: \ + {actual_num_packets}" ); // Stop the sender. @@ -655,15 +653,13 @@ async fn test_rate_limiting_establish_connection() { assert!( stats.connection_error_timed_out > 0, "As the quinn timeout is below 1 minute, a few connections will fail to connect during \ - the 1 minute delay.\n\ - Actual connection_error_timed_out: {}", + the 1 minute delay. Actual connection_error_timed_out: {}", stats.connection_error_timed_out ); assert!( stats.successfully_sent > 0, "As we run the test for longer than 1 minute, we expect a connection to be established, \ - and a number of transactions to be delivered.\n\ - Actual successfully_sent: {}", + and a number of transactions to be delivered.\nActual successfully_sent: {}", stats.successfully_sent ); diff --git a/tpu-client/src/nonblocking/tpu_client.rs b/tpu-client/src/nonblocking/tpu_client.rs index 30f1c9fa525070..1530c463990c78 100644 --- a/tpu-client/src/nonblocking/tpu_client.rs +++ b/tpu-client/src/nonblocking/tpu_client.rs @@ -162,7 +162,7 @@ impl LeaderTpuCache { leader_sockets.push(*tpu_socket); } else { // The leader is probably delinquent - trace!("TPU not available for leader {}", leader); + trace!("TPU not available for leader {leader}"); } } else { // Overran the local leader schedule cache @@ -227,7 +227,7 @@ impl LeaderTpuCache { cluster_refreshed = true; } Err(err) => { - warn!("Failed to fetch cluster tpu sockets: {}", err); + warn!("Failed to fetch cluster tpu sockets: {err}"); has_error = true; } } @@ -247,8 +247,8 @@ impl LeaderTpuCache { } Err(err) => { warn!( - "Failed to fetch slot leaders (current estimated slot: {}): {}", - estimated_current_slot, err + "Failed to fetch slot leaders (current estimated slot: \ + {estimated_current_slot}): {err}" ); has_error = true; } @@ -778,8 +778,8 @@ impl LeaderTpuService { .await .map_err(|_| { TpuSenderError::Custom(format!( - "Failed to get slot leaders connecting to: {}, timeout: {:?}. Invalid slot range", - websocket_url, tpu_leader_service_creation_timeout + "Failed to get slot leaders connecting to: {websocket_url}, timeout: \ + {tpu_leader_service_creation_timeout:?}. Invalid slot range" )) })??; @@ -800,8 +800,8 @@ impl LeaderTpuService { .await .map_err(|_| { TpuSenderError::Custom(format!( - "Failed find any cluster node info for upcoming leaders, timeout: {:?}.", - tpu_leader_service_creation_timeout + "Failed find any cluster node info for upcoming leaders, timeout: \ + {tpu_leader_service_creation_timeout:?}." )) })??; let leader_tpu_cache = Arc::new(RwLock::new(LeaderTpuCache::new( diff --git a/transaction-status-client-types/src/lib.rs b/transaction-status-client-types/src/lib.rs index 2daca8be5289df..b003bcbfef2e23 100644 --- a/transaction-status-client-types/src/lib.rs +++ b/transaction-status-client-types/src/lib.rs @@ -861,6 +861,7 @@ mod test { assert_eq!(reserialized_value, expected_json_output_value); } + #[rustfmt::skip] let json_input = "{\ \"err\":null,\ \"status\":{\"Ok\":null},\ @@ -868,6 +869,7 @@ mod test { \"preBalances\":[1,2,3],\ \"postBalances\":[4,5,6]\ }"; + #[rustfmt::skip] let expected_json_output = "{\ \"err\":null,\ \"status\":{\"Ok\":null},\ @@ -882,6 +884,7 @@ mod test { }"; test_serde::(json_input, expected_json_output); + #[rustfmt::skip] let json_input = "{\ \"accountIndex\":5,\ \"mint\":\"DXM2yVSouSg1twmQgHLKoSReqXhtUroehWxrTgPmmfWi\",\ @@ -892,6 +895,7 @@ mod test { \"uiAmountString\": \"1\"\ }\ }"; + #[rustfmt::skip] let expected_json_output = "{\ \"accountIndex\":5,\ \"mint\":\"DXM2yVSouSg1twmQgHLKoSReqXhtUroehWxrTgPmmfWi\",\ diff --git a/transaction-status/src/lib.rs b/transaction-status/src/lib.rs index 9776a8a9a915a3..44dbf85fd43cdb 100644 --- a/transaction-status/src/lib.rs +++ b/transaction-status/src/lib.rs @@ -903,23 +903,24 @@ mod test { compute_units_consumed: None, cost_units: None, }; + #[rustfmt::skip] let expected_json_output_value: serde_json::Value = serde_json::from_str( "{\ - \"err\":null,\ - \"status\":{\"Ok\":null},\ - \"fee\":1234,\ - \"preBalances\":[1,2,3],\ - \"postBalances\":[4,5,6],\ - \"innerInstructions\":null,\ - \"logMessages\":null,\ - \"preTokenBalances\":null,\ - \"postTokenBalances\":null,\ - \"rewards\":null,\ - \"loadedAddresses\":{\ - \"readonly\": [],\ - \"writable\": []\ - }\ - }", + \"err\":null,\ + \"status\":{\"Ok\":null},\ + \"fee\":1234,\ + \"preBalances\":[1,2,3],\ + \"postBalances\":[4,5,6],\ + \"innerInstructions\":null,\ + \"logMessages\":null,\ + \"preTokenBalances\":null,\ + \"postTokenBalances\":null,\ + \"rewards\":null,\ + \"loadedAddresses\":{\ + \"readonly\": [],\ + \"writable\": []\ + }\ + }", ) .unwrap(); let ui_meta_from: UiTransactionStatusMeta = meta.clone().into(); @@ -928,19 +929,20 @@ mod test { expected_json_output_value ); + #[rustfmt::skip] let expected_json_output_value: serde_json::Value = serde_json::from_str( "{\ - \"err\":null,\ - \"status\":{\"Ok\":null},\ - \"fee\":1234,\ - \"preBalances\":[1,2,3],\ - \"postBalances\":[4,5,6],\ - \"innerInstructions\":null,\ - \"logMessages\":null,\ - \"preTokenBalances\":null,\ - \"postTokenBalances\":null,\ - \"rewards\":null\ - }", + \"err\":null,\ + \"status\":{\"Ok\":null},\ + \"fee\":1234,\ + \"preBalances\":[1,2,3],\ + \"postBalances\":[4,5,6],\ + \"innerInstructions\":null,\ + \"logMessages\":null,\ + \"preTokenBalances\":null,\ + \"postTokenBalances\":null,\ + \"rewards\":null\ + }", ) .unwrap(); let ui_meta_parse_with_rewards = parse_ui_transaction_status_meta(meta.clone(), &[], true); From 8cbd45570caea1d0fc1ff6097336b2d7fcd57528 Mon Sep 17 00:00:00 2001 From: Lucas Ste <38472950+LucasSte@users.noreply.github.com> Date: Tue, 22 Jul 2025 16:45:35 -0300 Subject: [PATCH 38/68] Refactor `fn prepare_instruction` (#7066) * Refactor prepare instruction * Add a debug assert * Use [u8; 256] * Add another debug assert --- program-runtime/src/invoke_context.rs | 84 ++++++++++++++++----------- 1 file changed, 51 insertions(+), 33 deletions(-) diff --git a/program-runtime/src/invoke_context.rs b/program-runtime/src/invoke_context.rs index 628db069a2d890..c65d275b6746dc 100644 --- a/program-runtime/src/invoke_context.rs +++ b/program-runtime/src/invoke_context.rs @@ -326,13 +326,15 @@ impl<'a> InvokeContext<'a> { instruction: &Instruction, signers: &[Pubkey], ) -> Result<(Vec, Vec), InstructionError> { - // Finds the index of each account in the instruction by its pubkey. - // Then normalizes / unifies the privileges of duplicate accounts. - // Note: This is an O(n^2) algorithm, - // but performed on a very small slice and requires no heap allocations. + // We reference accounts by an u8 index, so we have a total of 256 accounts. + // This algorithm allocates the array on the stack for speed. + // On AArch64 in release mode, this function only consumes 640 bytes of stack. + let mut transaction_callee_map: [u8; 256] = [u8::MAX; 256]; + let mut instruction_accounts: Vec = + Vec::with_capacity(instruction.accounts.len()); let instruction_context = self.transaction_context.get_current_instruction_context()?; - let mut deduplicated_instruction_accounts: Vec = Vec::new(); - let mut duplicate_indicies = Vec::with_capacity(instruction.accounts.len()); + debug_assert!(instruction.accounts.len() <= u8::MAX as usize); + for (instruction_account_index, account_meta) in instruction.accounts.iter().enumerate() { let index_in_transaction = self .transaction_context @@ -345,21 +347,25 @@ impl<'a> InvokeContext<'a> { ); InstructionError::MissingAccount })?; - if let Some(duplicate_index) = - deduplicated_instruction_accounts - .iter() - .position(|instruction_account| { - instruction_account.index_in_transaction == index_in_transaction - }) - { - duplicate_indicies.push(duplicate_index); - let instruction_account = deduplicated_instruction_accounts - .get_mut(duplicate_index) - .ok_or(InstructionError::NotEnoughAccountKeys)?; - instruction_account - .set_is_signer(instruction_account.is_signer() || account_meta.is_signer); - instruction_account - .set_is_writable(instruction_account.is_writable() || account_meta.is_writable); + + debug_assert!((index_in_transaction as usize) < transaction_callee_map.len()); + let index_in_callee = transaction_callee_map + .get_mut(index_in_transaction as usize) + .unwrap(); + + if (*index_in_callee as usize) < instruction_accounts.len() { + let cloned_account = { + let instruction_account = instruction_accounts + .get_mut(*index_in_callee as usize) + .ok_or(InstructionError::NotEnoughAccountKeys)?; + instruction_account + .set_is_signer(instruction_account.is_signer() || account_meta.is_signer); + instruction_account.set_is_writable( + instruction_account.is_writable() || account_meta.is_writable, + ); + instruction_account.clone() + }; + instruction_accounts.push(cloned_account); } else { let index_in_caller = instruction_context .find_index_of_instruction_account( @@ -374,8 +380,8 @@ impl<'a> InvokeContext<'a> { ); InstructionError::MissingAccount })?; - duplicate_indicies.push(deduplicated_instruction_accounts.len()); - deduplicated_instruction_accounts.push(InstructionAccount::new( + *index_in_callee = instruction_accounts.len() as u8; + instruction_accounts.push(InstructionAccount::new( index_in_transaction, index_in_caller, instruction_account_index as IndexOfAccount, @@ -384,7 +390,28 @@ impl<'a> InvokeContext<'a> { )); } } - for instruction_account in deduplicated_instruction_accounts.iter() { + + for current_index in 0..instruction_accounts.len() { + let instruction_account = instruction_accounts.get(current_index).unwrap(); + + if current_index != instruction_account.index_in_callee as usize { + let (is_signer, is_writable) = { + let reference_account = instruction_accounts + .get(instruction_account.index_in_callee as usize) + .ok_or(InstructionError::NotEnoughAccountKeys)?; + ( + reference_account.is_signer(), + reference_account.is_writable(), + ) + }; + + let current_account = instruction_accounts.get_mut(current_index).unwrap(); + current_account.set_is_signer(current_account.is_signer() || is_signer); + current_account.set_is_writable(current_account.is_writable() || is_writable); + // This account is repeated, so there is no need to check for permissions + continue; + } + let borrowed_account = instruction_context.try_borrow_instruction_account( self.transaction_context, instruction_account.index_in_caller, @@ -413,15 +440,6 @@ impl<'a> InvokeContext<'a> { return Err(InstructionError::PrivilegeEscalation); } } - let instruction_accounts = duplicate_indicies - .into_iter() - .map(|duplicate_index| { - deduplicated_instruction_accounts - .get(duplicate_index) - .cloned() - .ok_or(InstructionError::NotEnoughAccountKeys) - }) - .collect::, InstructionError>>()?; // Find and validate executables / program accounts let callee_program_id = instruction.program_id; From fb98ae880cdc9098347d1f7203ddfb1f5dc92908 Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 22 Jul 2025 15:53:09 -0400 Subject: [PATCH 39/68] BankFieldsToDeserialize must have AccountsLtHash (#7062) --- runtime/src/bank.rs | 11 +++-------- runtime/src/serde_snapshot.rs | 18 ++++++++++++------ runtime/src/serde_snapshot/tests.rs | 2 +- 3 files changed, 16 insertions(+), 15 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 1abbd1dfd5ed9b..79149d0f3c1e11 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -407,7 +407,7 @@ impl TransactionLogCollector { /// Since it is difficult to insert fields to serialize/deserialize against existing code already deployed, /// new fields can be optionally serialized and optionally deserialized. At some point, the serialization and /// deserialization will use a new mechanism or otherwise be in sync more clearly. -#[derive(Clone, Debug, Default)] +#[derive(Clone, Debug)] #[cfg_attr(feature = "dev-context-only-utils", derive(PartialEq))] pub struct BankFieldsToDeserialize { pub(crate) blockhash_queue: BlockhashQueue, @@ -440,8 +440,7 @@ pub struct BankFieldsToDeserialize { pub(crate) is_delta: bool, pub(crate) accounts_data_len: u64, pub(crate) incremental_snapshot_persistence: Option, - // When removing the accounts lt hash featurization code, also remove this Option wrapper - pub(crate) accounts_lt_hash: Option, + pub(crate) accounts_lt_hash: AccountsLtHash, pub(crate) bank_hash_stats: BankHashStats, } @@ -1808,11 +1807,7 @@ impl Bank { fee_structure: FeeStructure::default(), #[cfg(feature = "dev-context-only-utils")] hash_overrides: Arc::new(Mutex::new(HashOverrides::default())), - accounts_lt_hash: Mutex::new( - fields - .accounts_lt_hash - .expect("accounts_lt_hash must exist in snapshot"), - ), + accounts_lt_hash: Mutex::new(fields.accounts_lt_hash), cache_for_accounts_lt_hash: DashMap::default(), stats_for_accounts_lt_hash: AccountsLtHashStats::default(), block_id: RwLock::new(None), diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index 5dee143636642e..2ddc70897660cc 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -19,7 +19,7 @@ use { AtomicAccountsFileId, DuplicatesLtHash, IndexGenerationInfo, }, accounts_file::{AccountsFile, StorageAccess}, - accounts_hash::{AccountsDeltaHash, AccountsHash}, + accounts_hash::{AccountsDeltaHash, AccountsHash, AccountsLtHash}, accounts_update_notifier_interface::AccountsUpdateNotifier, ancestors::AncestorsForSerialization, blockhash_queue::BlockhashQueue, @@ -32,6 +32,7 @@ use { solana_hard_forks::HardForks, solana_hash::Hash, solana_inflation::Inflation, + solana_lattice_hash::lt_hash::LtHash, solana_measure::measure::Measure, solana_pubkey::Pubkey, solana_rent_collector::RentCollector, @@ -164,6 +165,9 @@ struct DeserializableVersionedBank { impl From for BankFieldsToDeserialize { fn from(dvb: DeserializableVersionedBank) -> Self { + // This serves as a canary for the LtHash. + // If it is not replaced during deserialization, it indicates a bug. + const LT_HASH_CANARY: LtHash = LtHash([0xCAFE; LtHash::NUM_ELEMENTS]); BankFieldsToDeserialize { blockhash_queue: dvb.blockhash_queue, ancestors: dvb.ancestors, @@ -195,8 +199,8 @@ impl From for BankFieldsToDeserialize { is_delta: dvb.is_delta, incremental_snapshot_persistence: None, versioned_epoch_stakes: HashMap::default(), // populated from ExtraFieldsToDeserialize - accounts_lt_hash: None, // populated from ExtraFieldsToDeserialize - bank_hash_stats: BankHashStats::default(), // populated from AccountsDbFields + accounts_lt_hash: AccountsLtHash(LT_HASH_CANARY), // populated from ExtraFieldsToDeserialize + bank_hash_stats: BankHashStats::default(), // populated from AccountsDbFields } } } @@ -472,7 +476,9 @@ where .clone_with_lamports_per_signature(lamports_per_signature); bank_fields.incremental_snapshot_persistence = incremental_snapshot_persistence; bank_fields.versioned_epoch_stakes = versioned_epoch_stakes; - bank_fields.accounts_lt_hash = accounts_lt_hash.map(Into::into); + bank_fields.accounts_lt_hash = accounts_lt_hash + .expect("snapshot must have accounts_lt_hash") + .into(); Ok((bank_fields, accounts_db_fields)) } @@ -875,7 +881,7 @@ where exit, capitalizations, bank_fields.incremental_snapshot_persistence.as_ref(), - bank_fields.accounts_lt_hash.is_some(), + true, )?; bank_fields.bank_hash_stats = reconstructed_accounts_db_info.bank_hash_stats; @@ -1039,7 +1045,7 @@ fn reconstruct_accountsdb_from_fields( exit: Arc, capitalizations: (u64, Option), incremental_snapshot_persistence: Option<&BankIncrementalSnapshotPersistence>, - has_accounts_lt_hash: bool, + has_accounts_lt_hash: bool, // always true, will be removed next ) -> Result<(AccountsDb, ReconstructedAccountsDbInfo), Error> where E: SerializableStorage + std::marker::Sync, diff --git a/runtime/src/serde_snapshot/tests.rs b/runtime/src/serde_snapshot/tests.rs index a9620e5113a956..7ddd5c0a54ef16 100644 --- a/runtime/src/serde_snapshot/tests.rs +++ b/runtime/src/serde_snapshot/tests.rs @@ -77,7 +77,7 @@ mod serde_snapshot_tests { Arc::default(), (u64::default(), None), None, - false, + true, ) .map(|(accounts_db, _)| accounts_db) } From 04ef62c65494c8c9c9aac58409948ffedf153868 Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 22 Jul 2025 16:36:05 -0400 Subject: [PATCH 40/68] Removes accounts hash calculation fns on Bank (#7083) --- runtime/src/bank.rs | 91 +++------------------------------------ runtime/src/bank/tests.rs | 25 +++-------- 2 files changed, 14 insertions(+), 102 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 79149d0f3c1e11..dfe1f066c59aef 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -75,18 +75,13 @@ use { account_locks::validate_account_locks, accounts::{AccountAddressFilter, Accounts, PubkeyAccountSlot}, accounts_db::{ - AccountStorageEntry, AccountsDb, AccountsDbConfig, CalcAccountsHashDataSource, - DuplicatesLtHash, PubkeyHashAccount, - }, - accounts_hash::{ - AccountsHash, AccountsLtHash, CalcAccountsHashConfig, HashStats, - IncrementalAccountsHash, + AccountStorageEntry, AccountsDb, AccountsDbConfig, DuplicatesLtHash, PubkeyHashAccount, }, + accounts_hash::{AccountsHash, AccountsLtHash, IncrementalAccountsHash}, accounts_index::{IndexKey, ScanConfig, ScanResult}, accounts_update_notifier_interface::AccountsUpdateNotifier, ancestors::{Ancestors, AncestorsForSerialization}, blockhash_queue::BlockhashQueue, - sorted_storages::SortedStorages, storable_accounts::StorableAccounts, }, solana_bpf_loader_program::syscalls::{ @@ -4818,81 +4813,6 @@ impl Bank { .load_account_into_read_cache(&self.ancestors, key); } - pub fn update_accounts_hash( - &self, - data_source: CalcAccountsHashDataSource, - is_startup: bool, - ) -> AccountsHash { - let (accounts_hash, total_lamports) = self - .rc - .accounts - .accounts_db - .update_accounts_hash_with_verify_from( - data_source, - false, // debug_verify - self.slot(), - &self.ancestors, - Some(self.capitalization()), - self.epoch_schedule(), - is_startup, - ); - if total_lamports != self.capitalization() { - datapoint_info!( - "capitalization_mismatch", - ("slot", self.slot(), i64), - ("calculated_lamports", total_lamports, i64), - ("capitalization", self.capitalization(), i64), - ); - - // cap mismatch detected. It has been logged to metrics above. - // Run both versions of the calculation to attempt to get more info. - let debug_verify = true; - self.rc - .accounts - .accounts_db - .update_accounts_hash_with_verify_from( - data_source, - debug_verify, - self.slot(), - &self.ancestors, - Some(self.capitalization()), - self.epoch_schedule(), - is_startup, - ); - - panic!( - "capitalization_mismatch. slot: {}, calculated_lamports: {}, capitalization: {}", - self.slot(), - total_lamports, - self.capitalization() - ); - } - accounts_hash - } - - /// Calculate the incremental accounts hash from `base_slot` to `self` - pub fn update_incremental_accounts_hash(&self, base_slot: Slot) -> IncrementalAccountsHash { - let config = CalcAccountsHashConfig { - use_bg_thread_pool: true, - ancestors: None, // does not matter, will not be used - epoch_schedule: &self.epoch_schedule, - epoch: self.epoch, - store_detailed_debug_info_on_failure: false, - }; - let storages = self.get_snapshot_storages(Some(base_slot)); - let sorted_storages = SortedStorages::new(&storages); - self.rc - .accounts - .accounts_db - .update_incremental_accounts_hash( - &config, - &sorted_storages, - self.slot(), - HashStats::default(), - ) - .0 - } - /// A snapshot bank should be purged of 0 lamport accounts which are not part of the hash /// calculation and could shield other real accounts. pub fn verify_snapshot_bank( @@ -5978,8 +5898,11 @@ impl Bank { self.transaction_processor.get_sysvar_cache_for_tests() } - pub fn update_accounts_hash_for_tests(&self) -> AccountsHash { - self.update_accounts_hash(CalcAccountsHashDataSource::IndexForTests, false) + pub fn calculate_accounts_lt_hash_for_tests(&self) -> AccountsLtHash { + self.rc + .accounts + .accounts_db + .calculate_accounts_lt_hash_at_startup_from_index(&self.ancestors, self.slot) } pub fn new_program_cache_for_tx_batch_for_slot(&self, slot: Slot) -> ProgramCacheForTxBatch { diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 814ca766051af2..935e33a9e2c70c 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -974,9 +974,9 @@ fn test_purge_empty_accounts() { bank.freeze(); bank.squash(); bank.force_flush_accounts_cache(); - let hash = bank.update_accounts_hash_for_tests(); + let hash = bank.calculate_accounts_lt_hash_for_tests(); bank.clean_accounts_for_tests(); - assert_eq!(bank.update_accounts_hash_for_tests(), hash); + assert_eq!(bank.calculate_accounts_lt_hash_for_tests(), hash); let bank0 = new_from_parent_with_fork_next_slot(bank.clone(), bank_forks.as_ref()); let blockhash = bank.last_blockhash(); @@ -997,9 +997,9 @@ fn test_purge_empty_accounts() { assert_eq!(bank1.get_account(&keypair.pubkey()), None); info!("bank0 purge"); - let hash = bank0.update_accounts_hash_for_tests(); + let hash = bank0.calculate_accounts_lt_hash_for_tests(); bank0.clean_accounts_for_tests(); - assert_eq!(bank0.update_accounts_hash_for_tests(), hash); + assert_eq!(bank0.calculate_accounts_lt_hash_for_tests(), hash); assert_eq!( bank0.get_account(&keypair.pubkey()).unwrap().lamports(), @@ -1034,7 +1034,6 @@ fn test_purge_empty_accounts() { bank1.freeze(); bank1.squash(); add_root_and_flush_write_cache(&bank1); - bank1.update_accounts_hash_for_tests(); assert!(bank1.verify_accounts_hash(VerifyAccountsHashConfig::default_for_test(), None)); // keypair should have 0 tokens on both forks @@ -2179,7 +2178,6 @@ fn test_bank_hash_internal_state() { bank2.transfer(amount, &mint_keypair, &pubkey2).unwrap(); bank2.squash(); bank2.force_flush_accounts_cache(); - bank2.update_accounts_hash(CalcAccountsHashDataSource::Storages, false); assert!(bank2.verify_accounts_hash(VerifyAccountsHashConfig::default_for_test(), None)); } @@ -2214,7 +2212,6 @@ fn test_bank_hash_internal_state_verify() { // we later modify bank 2, so this flush is destructive to the test bank2.freeze(); add_root_and_flush_write_cache(&bank2); - bank2.update_accounts_hash_for_tests(); assert!(bank2.verify_accounts_hash(VerifyAccountsHashConfig::default_for_test(), None)); } let bank3 = new_bank_from_parent_with_bank_forks( @@ -2235,7 +2232,6 @@ fn test_bank_hash_internal_state_verify() { // Doing so throws an assert. So, we can't flush 3 until 2 is flushed. bank3.freeze(); add_root_and_flush_write_cache(&bank3); - bank3.update_accounts_hash_for_tests(); assert!(bank3.verify_accounts_hash(VerifyAccountsHashConfig::default_for_test(), None)); continue; } @@ -2245,7 +2241,6 @@ fn test_bank_hash_internal_state_verify() { bank2.freeze(); // <-- keep freeze() *outside* `if pass == 2 {}` if pass == 2 { add_root_and_flush_write_cache(&bank2); - bank2.update_accounts_hash_for_tests(); assert!(bank2.verify_accounts_hash(VerifyAccountsHashConfig::default_for_test(), None)); // Verifying the accounts lt hash is only intended to be called at startup, and @@ -2261,7 +2256,6 @@ fn test_bank_hash_internal_state_verify() { bank3.freeze(); add_root_and_flush_write_cache(&bank3); - bank3.update_accounts_hash_for_tests(); assert!(bank3.verify_accounts_hash(VerifyAccountsHashConfig::default_for_test(), None)); } } @@ -2287,7 +2281,6 @@ fn test_verify_snapshot_bank() { .unwrap(); bank.freeze(); add_root_and_flush_write_cache(&bank); - bank.update_accounts_hash_for_tests(); assert!(bank.verify_snapshot_bank(false, false, bank.slot(), None)); // tamper the bank after freeze! @@ -3596,13 +3589,11 @@ fn test_add_instruction_processor_for_existing_unrelated_accounts() { ); // Re-adding builtin programs should be no-op - bank.update_accounts_hash_for_tests(); - let old_hash = bank.get_accounts_hash().unwrap(); + let old_hash = bank.calculate_accounts_lt_hash_for_tests(); bank.add_mockup_builtin(vote_id, MockBuiltin::vm); bank.add_mockup_builtin(stake_id, MockBuiltin::vm); add_root_and_flush_write_cache(&bank); - bank.update_accounts_hash_for_tests(); - let new_hash = bank.get_accounts_hash().unwrap(); + let new_hash = bank.calculate_accounts_lt_hash_for_tests(); assert_eq!(old_hash, new_hash); { let stakes = bank.stakes_cache.stakes(); @@ -10793,6 +10784,7 @@ fn test_feature_activation_loaded_programs_epoch_transition() { assert!(bank.process_transaction(&transaction).is_ok()); } +// this test is obsolete and will be removed next #[test] fn test_bank_verify_accounts_hash_with_base() { let GenesisConfigInfo { @@ -10845,8 +10837,6 @@ fn test_bank_verify_accounts_hash_with_base() { // update the base accounts hash bank.squash(); bank.force_flush_accounts_cache(); - bank.update_accounts_hash(CalcAccountsHashDataSource::Storages, false); - let base_slot = bank.slot(); // make more banks, do more transactions, ensure there's more zero-lamport accounts for _ in 0..2 { @@ -10863,7 +10853,6 @@ fn test_bank_verify_accounts_hash_with_base() { // update the incremental accounts hash bank.squash(); bank.force_flush_accounts_cache(); - bank.update_incremental_accounts_hash(base_slot); // ensure the accounts hash verifies assert!(bank.verify_accounts_hash(VerifyAccountsHashConfig::default_for_test(), None)); From 43eccfa5b4ca50a955f6b91f707a2cb7c0e40043 Mon Sep 17 00:00:00 2001 From: steviez Date: Tue, 22 Jul 2025 15:52:23 -0500 Subject: [PATCH 41/68] validator: Refactor account paths parsing in ValidatorConfig (#7085) These fields are currently parsed and set in a ValidatorConfig after the ValidatorConfig has been created. Parsing the values prior and passing them into ValidatorConfig creation allows us to move towards removing the use of ..ValidatorConfig::default() --- validator/src/commands/run/execute.rs | 62 +++++++++++++-------------- 1 file changed, 31 insertions(+), 31 deletions(-) diff --git a/validator/src/commands/run/execute.rs b/validator/src/commands/run/execute.rs index 2824441262d8ca..f95a7312ff6d44 100644 --- a/validator/src/commands/run/execute.rs +++ b/validator/src/commands/run/execute.rs @@ -526,6 +526,35 @@ pub fn execute( ) }); + let account_paths: Vec = + if let Ok(account_paths) = values_t!(matches, "account_paths", String) { + account_paths + .join(",") + .split(',') + .map(PathBuf::from) + .collect() + } else { + vec![ledger_path.join("accounts")] + }; + let account_paths = create_and_canonicalize_directories(account_paths) + .map_err(|err| format!("unable to access account path: {err}"))?; + + // From now on, use run/ paths in the same way as the previous account_paths. + let (account_run_paths, account_snapshot_paths) = + create_all_accounts_run_and_snapshot_dirs(&account_paths) + .map_err(|err| format!("unable to create account directories: {err}"))?; + + // These snapshot paths are only used for initial clean up, add in shrink paths if they exist. + let account_snapshot_paths = + if let Some(account_shrink_snapshot_paths) = account_shrink_snapshot_paths { + account_snapshot_paths + .into_iter() + .chain(account_shrink_snapshot_paths) + .collect() + } else { + account_snapshot_paths + }; + let mut validator_config = ValidatorConfig { require_tower: matches.is_present("require_tower"), tower_storage, @@ -648,6 +677,8 @@ pub fn execute( poh_hashes_per_batch: value_of(matches, "poh_hashes_per_batch") .unwrap_or(poh_service::DEFAULT_HASHES_PER_BATCH), process_ledger_before_services: matches.is_present("process_ledger_before_services"), + account_paths: account_run_paths, + account_snapshot_paths, accounts_db_config, accounts_db_skip_shrink: true, accounts_db_force_initial_clean: matches.is_present("no_skip_initial_accounts_db_clean"), @@ -707,37 +738,6 @@ pub fn execute( solana_net_utils::parse_port_range(matches.value_of("dynamic_port_range").unwrap()) .expect("invalid dynamic_port_range"); - let account_paths: Vec = - if let Ok(account_paths) = values_t!(matches, "account_paths", String) { - account_paths - .join(",") - .split(',') - .map(PathBuf::from) - .collect() - } else { - vec![ledger_path.join("accounts")] - }; - let account_paths = create_and_canonicalize_directories(account_paths) - .map_err(|err| format!("unable to access account path: {err}"))?; - - let (account_run_paths, account_snapshot_paths) = - create_all_accounts_run_and_snapshot_dirs(&account_paths) - .map_err(|err| format!("unable to create account directories: {err}"))?; - - // From now on, use run/ paths in the same way as the previous account_paths. - validator_config.account_paths = account_run_paths; - - // These snapshot paths are only used for initial clean up, add in shrink paths if they exist. - validator_config.account_snapshot_paths = - if let Some(account_shrink_snapshot_paths) = account_shrink_snapshot_paths { - account_snapshot_paths - .into_iter() - .chain(account_shrink_snapshot_paths) - .collect() - } else { - account_snapshot_paths - }; - let maximum_local_snapshot_age = value_t_or_exit!(matches, "maximum_local_snapshot_age", u64); let maximum_full_snapshot_archives_to_retain = value_t_or_exit!(matches, "maximum_full_snapshots_to_retain", NonZeroUsize); From d7f82653aeaf3ffbff73b092507488f076cab009 Mon Sep 17 00:00:00 2001 From: steviez Date: Tue, 22 Jul 2025 16:08:30 -0500 Subject: [PATCH 42/68] thread-manager: Resolve Rust 1.88 clippy lints and format strings (#7088) - Run `cargo clippy --fix --tests` with Rust 1.88.0 set in `rust-toolchain.toml` - Run `cargo fmt` with `format_strings = true` set in `rustfmt.toml` --- thread-manager/src/lib.rs | 2 +- thread-manager/src/native_thread_runtime.rs | 5 ++++- thread-manager/src/tokio_runtime.rs | 2 +- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/thread-manager/src/lib.rs b/thread-manager/src/lib.rs index e81e1e3fb9e486..1b0fd7213864ff 100644 --- a/thread-manager/src/lib.rs +++ b/thread-manager/src/lib.rs @@ -220,7 +220,7 @@ mod tests { .read_to_string(&mut buf) .unwrap(); let cfg: ThreadManagerConfig = toml::from_str(&buf).unwrap(); - println!("{:?}", cfg); + println!("{cfg:?}"); } } // Nobody runs Agave on windows, and on Mac we can not set mask affinity without patching external crate diff --git a/thread-manager/src/native_thread_runtime.rs b/thread-manager/src/native_thread_runtime.rs index 9ee6d58490fec6..5882a6cca5498e 100644 --- a/thread-manager/src/native_thread_runtime.rs +++ b/thread-manager/src/native_thread_runtime.rs @@ -95,7 +95,10 @@ impl JoinHandle { impl Drop for JoinHandle { fn drop(&mut self) { if self.std_handle.is_some() { - warn!("Attempting to drop a Join Handle of a running thread will leak thread IDs, please join your threads!"); + warn!( + "Attempting to drop a Join Handle of a running thread will leak thread IDs, \ + please join your threads!" + ); self.join_inner().expect("Child thread panicked"); } } diff --git a/thread-manager/src/tokio_runtime.rs b/thread-manager/src/tokio_runtime.rs index fb842b9f6a860b..2b8bfaab232d82 100644 --- a/thread-manager/src/tokio_runtime.rs +++ b/thread-manager/src/tokio_runtime.rs @@ -100,7 +100,7 @@ impl TokioRuntime { .event_interval(cfg.event_interval) .thread_name_fn(move || { let id = atomic_id.fetch_add(1, Ordering::Relaxed); - format!("{}-{}", base_name, id) + format!("{base_name}-{id}") }) .on_thread_park({ let counters = counters.clone(); From 6188a3d014610c59c5cf1aa7ec8e76e87e0dc5a4 Mon Sep 17 00:00:00 2001 From: samkim-crypto Date: Wed, 23 Jul 2025 07:06:25 +0900 Subject: [PATCH 43/68] Downgrade to curve25519-dalek v4.1.3 (#7079) --- Cargo.lock | 28 ++++++++++++++-------------- Cargo.toml | 2 +- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0183c77d2f81e5..251f094f025f7b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2200,9 +2200,9 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "4.2.0" +version = "4.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "373b7c5dbd637569a2cca66e8d66b8c446a1e7bf064ea321d265d7b3dfe7c97e" +checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" dependencies = [ "cfg-if 1.0.1", "cpufeatures", @@ -2771,9 +2771,9 @@ checksum = "835a3dc7d1ec9e75e2b5fb4ba75396837112d2060b03f7d43bc1897c7f7211da" [[package]] name = "fiat-crypto" -version = "0.3.0" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64cd1e32ddd350061ae6edb1b082d7c54915b5c672c389143b9a63403a109f24" +checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" [[package]] name = "filedescriptor" @@ -8096,7 +8096,7 @@ checksum = "def3cfe5279edb64fc39111cff6dcf77b01fbfba2c02c13ced41e6a48baf4cbe" dependencies = [ "bytemuck", "bytemuck_derive", - "curve25519-dalek 4.2.0", + "curve25519-dalek 4.1.3", "solana-define-syscall", "subtle", "thiserror 2.0.12", @@ -8108,7 +8108,7 @@ version = "3.0.0" dependencies = [ "bytemuck", "bytemuck_derive", - "curve25519-dalek 4.2.0", + "curve25519-dalek 4.1.3", "solana-define-syscall", "subtle", "thiserror 2.0.12", @@ -9310,7 +9310,7 @@ dependencies = [ "bv", "bytes", "caps", - "curve25519-dalek 4.2.0", + "curve25519-dalek 4.1.3", "dlopen2", "fnv", "libc", @@ -9709,7 +9709,7 @@ dependencies = [ "borsh 1.5.7", "bytemuck", "bytemuck_derive", - "curve25519-dalek 4.2.0", + "curve25519-dalek 4.1.3", "five8", "five8_const", "getrandom 0.2.15", @@ -11863,7 +11863,7 @@ dependencies = [ "agave-feature-set", "bytemuck", "criterion", - "curve25519-dalek 4.2.0", + "curve25519-dalek 4.1.3", "num-derive", "num-traits", "solana-instruction", @@ -11922,7 +11922,7 @@ dependencies = [ "bincode", "bytemuck", "bytemuck_derive", - "curve25519-dalek 4.2.0", + "curve25519-dalek 4.1.3", "itertools 0.12.1", "js-sys", "lazy_static", @@ -11957,7 +11957,7 @@ dependencies = [ "bincode", "bytemuck", "bytemuck_derive", - "curve25519-dalek 4.2.0", + "curve25519-dalek 4.1.3", "itertools 0.12.1", "js-sys", "merlin", @@ -11991,7 +11991,7 @@ dependencies = [ "agave-feature-set", "bytemuck", "criterion", - "curve25519-dalek 4.2.0", + "curve25519-dalek 4.1.3", "num-derive", "num-traits", "solana-instruction", @@ -12010,7 +12010,7 @@ dependencies = [ "bincode", "bytemuck", "bytemuck_derive", - "curve25519-dalek 4.2.0", + "curve25519-dalek 4.1.3", "itertools 0.12.1", "merlin", "num-derive", @@ -12354,7 +12354,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae5b124840d4aed474cef101d946a798b806b46a509ee4df91021e1ab1cef3ef" dependencies = [ - "curve25519-dalek 4.2.0", + "curve25519-dalek 4.1.3", "solana-zk-sdk 2.2.15", "thiserror 2.0.12", ] diff --git a/Cargo.toml b/Cargo.toml index c121495a54cfbd..c5b5d87773248a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -241,7 +241,7 @@ criterion-stats = "0.3.0" crossbeam-channel = "0.5.15" csv = "1.3.1" ctrlc = "3.4.7" -curve25519-dalek = { version = "4.2.0", features = ["digest", "rand_core"] } +curve25519-dalek = { version = "4.1.3", features = ["digest", "rand_core"] } dashmap = "5.5.3" derivation-path = { version = "0.2.0", default-features = false } derive-where = "1.5.0" From eb4c2c8e94fb56506e18e29d3712b9ad51d5c98b Mon Sep 17 00:00:00 2001 From: puhtaytow <18026645+puhtaytow@users.noreply.github.com> Date: Wed, 23 Jul 2025 00:24:53 +0200 Subject: [PATCH 44/68] perf: move benchmarks to bencher 0.1.5 (#7041) move benchmarks to bencher 0.1.5 --- Cargo.lock | 1 + perf/Cargo.toml | 19 +++++++++++ perf/benches/dedup.rs | 56 ++++++++++++++---------------- perf/benches/discard.rs | 14 ++++---- perf/benches/recycler.rs | 14 ++++---- perf/benches/reset.rs | 28 +++++++-------- perf/benches/shrink.rs | 35 +++++++++---------- perf/benches/sigverify.rs | 72 ++++++++++++++++++--------------------- 8 files changed, 123 insertions(+), 116 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 251f094f025f7b..3f4811ce40da1c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9306,6 +9306,7 @@ version = "3.0.0" dependencies = [ "ahash 0.8.11", "assert_matches", + "bencher", "bincode", "bv", "bytes", diff --git a/perf/Cargo.toml b/perf/Cargo.toml index 6103c3c43d3c40..1f8e42a8f58a14 100644 --- a/perf/Cargo.toml +++ b/perf/Cargo.toml @@ -77,6 +77,7 @@ nix = { workspace = true, features = ["user"] } [dev-dependencies] assert_matches = { workspace = true } +bencher = { workspace = true } rand_chacha = { workspace = true } solana-logger = { workspace = true } solana-perf = { path = ".", features = ["dev-context-only-utils"] } @@ -85,11 +86,29 @@ test-case = { workspace = true } [target.'cfg(not(any(target_env = "msvc", target_os = "freebsd")))'.dev-dependencies] jemallocator = { workspace = true } +[[bench]] +name = "dedup" +harness = false + +[[bench]] +name = "recycler" +harness = false + +[[bench]] +name = "reset" +harness = false + +[[bench]] +name = "shrink" +harness = false + [[bench]] name = "sigverify" +harness = false [[bench]] name = "discard" +harness = false [lints.rust.unexpected_cfgs] level = "warn" diff --git a/perf/benches/dedup.rs b/perf/benches/dedup.rs index adef2b3dfed67a..4af359f2bce8e6 100644 --- a/perf/benches/dedup.rs +++ b/perf/benches/dedup.rs @@ -1,16 +1,13 @@ #![allow(clippy::arithmetic_side_effects)] -#![feature(test)] - -extern crate test; use { + bencher::{benchmark_group, benchmark_main, Bencher}, rand::prelude::*, solana_perf::{ deduper::{self, Deduper}, packet::{to_packet_batches, PacketBatch}, }, std::time::Duration, - test::Bencher, }; #[cfg(not(any(target_env = "msvc", target_os = "freebsd")))] @@ -26,11 +23,11 @@ fn test_packet_with_size(size: usize, rng: &mut ThreadRng) -> Vec { .collect() } -fn do_bench_dedup_packets(bencher: &mut Bencher, mut batches: Vec) { +fn do_bench_dedup_packets(b: &mut Bencher, mut batches: Vec) { // verify packets let mut rng = rand::thread_rng(); let mut deduper = Deduper::<2, [u8]>::new(&mut rng, /*num_bits:*/ 63_999_979); - bencher.iter(|| { + b.iter(|| { let _ans = deduper::dedup_packets_and_count_discards(&deduper, &mut batches); deduper.maybe_reset( &mut rng, @@ -44,9 +41,7 @@ fn do_bench_dedup_packets(bencher: &mut Bencher, mut batches: Vec) }); } -#[bench] -#[ignore] -fn bench_dedup_same_small_packets(bencher: &mut Bencher) { +fn bench_dedup_same_small_packets(b: &mut Bencher) { let mut rng = rand::thread_rng(); let small_packet = test_packet_with_size(128, &mut rng); @@ -55,12 +50,10 @@ fn bench_dedup_same_small_packets(bencher: &mut Bencher) { 128, ); - do_bench_dedup_packets(bencher, batches); + do_bench_dedup_packets(b, batches); } -#[bench] -#[ignore] -fn bench_dedup_same_big_packets(bencher: &mut Bencher) { +fn bench_dedup_same_big_packets(b: &mut Bencher) { let mut rng = rand::thread_rng(); let big_packet = test_packet_with_size(1024, &mut rng); @@ -69,12 +62,10 @@ fn bench_dedup_same_big_packets(bencher: &mut Bencher) { 128, ); - do_bench_dedup_packets(bencher, batches); + do_bench_dedup_packets(b, batches); } -#[bench] -#[ignore] -fn bench_dedup_diff_small_packets(bencher: &mut Bencher) { +fn bench_dedup_diff_small_packets(b: &mut Bencher) { let mut rng = rand::thread_rng(); let batches = to_packet_batches( @@ -84,12 +75,10 @@ fn bench_dedup_diff_small_packets(bencher: &mut Bencher) { 128, ); - do_bench_dedup_packets(bencher, batches); + do_bench_dedup_packets(b, batches); } -#[bench] -#[ignore] -fn bench_dedup_diff_big_packets(bencher: &mut Bencher) { +fn bench_dedup_diff_big_packets(b: &mut Bencher) { let mut rng = rand::thread_rng(); let batches = to_packet_batches( @@ -99,12 +88,10 @@ fn bench_dedup_diff_big_packets(bencher: &mut Bencher) { 128, ); - do_bench_dedup_packets(bencher, batches); + do_bench_dedup_packets(b, batches); } -#[bench] -#[ignore] -fn bench_dedup_baseline(bencher: &mut Bencher) { +fn bench_dedup_baseline(b: &mut Bencher) { let mut rng = rand::thread_rng(); let batches = to_packet_batches( @@ -114,15 +101,13 @@ fn bench_dedup_baseline(bencher: &mut Bencher) { 128, ); - do_bench_dedup_packets(bencher, batches); + do_bench_dedup_packets(b, batches); } -#[bench] -#[ignore] -fn bench_dedup_reset(bencher: &mut Bencher) { +fn bench_dedup_reset(b: &mut Bencher) { let mut rng = rand::thread_rng(); let mut deduper = Deduper::<2, [u8]>::new(&mut rng, /*num_bits:*/ 63_999_979); - bencher.iter(|| { + b.iter(|| { deduper.maybe_reset( &mut rng, 0.001, // false_positive_rate @@ -130,3 +115,14 @@ fn bench_dedup_reset(bencher: &mut Bencher) { ); }); } + +benchmark_group!( + benches, + bench_dedup_reset, + bench_dedup_baseline, + bench_dedup_diff_big_packets, + bench_dedup_diff_small_packets, + bench_dedup_same_big_packets, + bench_dedup_same_small_packets +); +benchmark_main!(benches); diff --git a/perf/benches/discard.rs b/perf/benches/discard.rs index c79484257e49fa..837907e257c84b 100644 --- a/perf/benches/discard.rs +++ b/perf/benches/discard.rs @@ -1,10 +1,6 @@ -#![feature(test)] - -extern crate test; - use { + bencher::{benchmark_group, benchmark_main, Bencher}, solana_perf::{discard::discard_batches_randomly, packet::to_packet_batches, test_tx::test_tx}, - test::Bencher, }; #[cfg(not(any(target_env = "msvc", target_os = "freebsd")))] @@ -13,8 +9,7 @@ static GLOBAL: jemallocator::Jemalloc = jemallocator::Jemalloc; const NUM: usize = 1000; -#[bench] -fn bench_discard(bencher: &mut Bencher) { +fn bench_discard(b: &mut Bencher) { solana_logger::setup(); let tx = test_tx(); let num_packets = NUM; @@ -25,9 +20,12 @@ fn bench_discard(bencher: &mut Bencher) { 10, ); - bencher.iter(|| { + b.iter(|| { let mut discarded = batches.clone(); discard_batches_randomly(&mut discarded, 100, NUM); assert_eq!(discarded.len(), 10); }) } + +benchmark_group!(benches, bench_discard); +benchmark_main!(benches); diff --git a/perf/benches/recycler.rs b/perf/benches/recycler.rs index 0533e4a11eb3a2..1f996f1c3d9398 100644 --- a/perf/benches/recycler.rs +++ b/perf/benches/recycler.rs @@ -1,14 +1,9 @@ -#![feature(test)] - -extern crate test; - use { + bencher::{benchmark_group, benchmark_main, Bencher}, solana_perf::{packet::PacketBatchRecycler, recycler::Recycler}, - test::Bencher, }; -#[bench] -fn bench_recycler(bencher: &mut Bencher) { +fn bench_recycler(b: &mut Bencher) { solana_logger::setup(); let recycler: PacketBatchRecycler = Recycler::default(); @@ -17,7 +12,10 @@ fn bench_recycler(bencher: &mut Bencher) { let _packet = recycler.allocate(""); } - bencher.iter(move || { + b.iter(move || { let _packet = recycler.allocate(""); }); } + +benchmark_group!(benches, bench_recycler); +benchmark_main!(benches); diff --git a/perf/benches/reset.rs b/perf/benches/reset.rs index 18401dcd664a6a..72382d0c5f23ff 100644 --- a/perf/benches/reset.rs +++ b/perf/benches/reset.rs @@ -1,10 +1,9 @@ -#![feature(test)] - -extern crate test; - use { - std::sync::atomic::{AtomicU64, Ordering}, - test::Bencher, + bencher::{benchmark_group, benchmark_main, Bencher}, + std::{ + hint::black_box, + sync::atomic::{AtomicU64, Ordering}, + }, }; #[cfg(not(any(target_env = "msvc", target_os = "freebsd")))] @@ -16,15 +15,14 @@ const N: usize = 1_000_000; // test bench_reset1 ... bench: 436,240 ns/iter (+/- 176,714) // test bench_reset2 ... bench: 274,007 ns/iter (+/- 129,552) -#[bench] -fn bench_reset1(bencher: &mut Bencher) { +fn bench_reset1(b: &mut Bencher) { solana_logger::setup(); let mut v = Vec::with_capacity(N); v.resize_with(N, AtomicU64::default); - bencher.iter(|| { - test::black_box({ + b.iter(|| { + black_box({ for i in &v { i.store(0, Ordering::Relaxed); } @@ -33,18 +31,20 @@ fn bench_reset1(bencher: &mut Bencher) { }); } -#[bench] -fn bench_reset2(bencher: &mut Bencher) { +fn bench_reset2(b: &mut Bencher) { solana_logger::setup(); let mut v = Vec::with_capacity(N); v.resize_with(N, AtomicU64::default); - bencher.iter(|| { - test::black_box({ + b.iter(|| { + black_box({ v.clear(); v.resize_with(N, AtomicU64::default); 0 }); }); } + +benchmark_group!(benches, bench_reset2, bench_reset1); +benchmark_main!(benches); diff --git a/perf/benches/shrink.rs b/perf/benches/shrink.rs index 461e651466d2d2..2072e649d01371 100644 --- a/perf/benches/shrink.rs +++ b/perf/benches/shrink.rs @@ -1,16 +1,13 @@ #![allow(clippy::arithmetic_side_effects)] -#![feature(test)] - -extern crate test; use { + bencher::{benchmark_group, benchmark_main, Bencher}, rand::prelude::*, solana_perf::{ packet::{to_packet_batches, PacketBatch, PACKETS_PER_BATCH}, sigverify, }, std::iter, - test::Bencher, }; #[cfg(not(any(target_env = "msvc", target_os = "freebsd")))] @@ -26,7 +23,7 @@ fn test_packet_with_size(size: usize, rng: &mut ThreadRng) -> Vec { .collect() } -fn do_bench_shrink_packets(bencher: &mut Bencher, mut batches: Vec) { +fn do_bench_shrink_packets(b: &mut Bencher, mut batches: Vec) { let mut batches = iter::repeat_with(|| { batches.iter_mut().for_each(|b| { b.iter_mut() @@ -40,16 +37,14 @@ fn do_bench_shrink_packets(bencher: &mut Bencher, mut batches: Vec) .collect::>() .into_iter() .cycle(); - bencher.iter(|| { + b.iter(|| { let batches = batches.next().unwrap(); // verify packets sigverify::shrink_batches(batches); }); } -#[bench] -#[ignore] -fn bench_shrink_diff_small_packets(bencher: &mut Bencher) { +fn bench_shrink_diff_small_packets(b: &mut Bencher) { let mut rng = rand::thread_rng(); let batches = to_packet_batches( @@ -59,12 +54,10 @@ fn bench_shrink_diff_small_packets(bencher: &mut Bencher) { PACKETS_PER_BATCH, ); - do_bench_shrink_packets(bencher, batches); + do_bench_shrink_packets(b, batches); } -#[bench] -#[ignore] -fn bench_shrink_diff_big_packets(bencher: &mut Bencher) { +fn bench_shrink_diff_big_packets(b: &mut Bencher) { let mut rng = rand::thread_rng(); let batches = to_packet_batches( @@ -74,12 +67,10 @@ fn bench_shrink_diff_big_packets(bencher: &mut Bencher) { PACKETS_PER_BATCH, ); - do_bench_shrink_packets(bencher, batches); + do_bench_shrink_packets(b, batches); } -#[bench] -#[ignore] -fn bench_shrink_count_packets(bencher: &mut Bencher) { +fn bench_shrink_count_packets(b: &mut Bencher) { let mut rng = rand::thread_rng(); let mut batches = to_packet_batches( @@ -93,7 +84,15 @@ fn bench_shrink_count_packets(bencher: &mut Bencher) { .for_each(|mut p| p.meta_mut().set_discard(thread_rng().gen())) }); - bencher.iter(|| { + b.iter(|| { let _ = sigverify::count_valid_packets(&batches); }); } + +benchmark_group!( + benches, + bench_shrink_count_packets, + bench_shrink_diff_big_packets, + bench_shrink_diff_small_packets +); +benchmark_main!(benches); diff --git a/perf/benches/sigverify.rs b/perf/benches/sigverify.rs index 8a914e6f82a06c..fb9491f3460bb3 100644 --- a/perf/benches/sigverify.rs +++ b/perf/benches/sigverify.rs @@ -1,8 +1,7 @@ -#![feature(test)] - -extern crate test; +#![allow(clippy::arithmetic_side_effects)] use { + bencher::{benchmark_group, benchmark_main, Bencher}, log::*, rand::{thread_rng, Rng}, solana_perf::{ @@ -11,7 +10,6 @@ use { sigverify, test_tx::{test_multisig_tx, test_tx}, }, - test::Bencher, }; #[cfg(not(any(target_env = "msvc", target_os = "freebsd")))] @@ -21,8 +19,7 @@ static GLOBAL: jemallocator::Jemalloc = jemallocator::Jemalloc; const NUM: usize = 256; const LARGE_BATCH_PACKET_COUNT: usize = 128; -#[bench] -fn bench_sigverify_simple(bencher: &mut Bencher) { +fn bench_sigverify_simple(b: &mut Bencher) { let tx = test_tx(); let num_packets = NUM; @@ -35,7 +32,7 @@ fn bench_sigverify_simple(bencher: &mut Bencher) { let recycler = Recycler::default(); let recycler_out = Recycler::default(); // verify packets - bencher.iter(|| { + b.iter(|| { sigverify::ed25519_verify(&mut batches, &recycler, &recycler_out, false, num_packets); }) } @@ -56,82 +53,68 @@ fn gen_batches( } } -#[bench] -#[ignore] -fn bench_sigverify_low_packets_small_batch(bencher: &mut Bencher) { +fn bench_sigverify_low_packets_small_batch(b: &mut Bencher) { let num_packets = sigverify::VERIFY_PACKET_CHUNK_SIZE - 1; let mut batches = gen_batches(false, 1, num_packets); let recycler = Recycler::default(); let recycler_out = Recycler::default(); - bencher.iter(|| { + b.iter(|| { sigverify::ed25519_verify(&mut batches, &recycler, &recycler_out, false, num_packets); }) } -#[bench] -#[ignore] -fn bench_sigverify_low_packets_large_batch(bencher: &mut Bencher) { +fn bench_sigverify_low_packets_large_batch(b: &mut Bencher) { let num_packets = sigverify::VERIFY_PACKET_CHUNK_SIZE - 1; let mut batches = gen_batches(false, LARGE_BATCH_PACKET_COUNT, num_packets); let recycler = Recycler::default(); let recycler_out = Recycler::default(); - bencher.iter(|| { + b.iter(|| { sigverify::ed25519_verify(&mut batches, &recycler, &recycler_out, false, num_packets); }) } -#[bench] -#[ignore] -fn bench_sigverify_medium_packets_small_batch(bencher: &mut Bencher) { +fn bench_sigverify_medium_packets_small_batch(b: &mut Bencher) { let num_packets = sigverify::VERIFY_PACKET_CHUNK_SIZE * 8; let mut batches = gen_batches(false, 1, num_packets); let recycler = Recycler::default(); let recycler_out = Recycler::default(); - bencher.iter(|| { + b.iter(|| { sigverify::ed25519_verify(&mut batches, &recycler, &recycler_out, false, num_packets); }) } -#[bench] -#[ignore] -fn bench_sigverify_medium_packets_large_batch(bencher: &mut Bencher) { +fn bench_sigverify_medium_packets_large_batch(b: &mut Bencher) { let num_packets = sigverify::VERIFY_PACKET_CHUNK_SIZE * 8; let mut batches = gen_batches(false, LARGE_BATCH_PACKET_COUNT, num_packets); let recycler = Recycler::default(); let recycler_out = Recycler::default(); - bencher.iter(|| { + b.iter(|| { sigverify::ed25519_verify(&mut batches, &recycler, &recycler_out, false, num_packets); }) } -#[bench] -#[ignore] -fn bench_sigverify_high_packets_small_batch(bencher: &mut Bencher) { +fn bench_sigverify_high_packets_small_batch(b: &mut Bencher) { let num_packets = sigverify::VERIFY_PACKET_CHUNK_SIZE * 32; let mut batches = gen_batches(false, 1, num_packets); let recycler = Recycler::default(); let recycler_out = Recycler::default(); - bencher.iter(|| { + b.iter(|| { sigverify::ed25519_verify(&mut batches, &recycler, &recycler_out, false, num_packets); }) } -#[bench] -#[ignore] -fn bench_sigverify_high_packets_large_batch(bencher: &mut Bencher) { +fn bench_sigverify_high_packets_large_batch(b: &mut Bencher) { let num_packets = sigverify::VERIFY_PACKET_CHUNK_SIZE * 32; let mut batches = gen_batches(false, LARGE_BATCH_PACKET_COUNT, num_packets); let recycler = Recycler::default(); let recycler_out = Recycler::default(); // verify packets - bencher.iter(|| { + b.iter(|| { sigverify::ed25519_verify(&mut batches, &recycler, &recycler_out, false, num_packets); }) } -#[bench] -#[ignore] -fn bench_sigverify_uneven(bencher: &mut Bencher) { +fn bench_sigverify_uneven(b: &mut Bencher) { solana_logger::setup(); let simple_tx = test_tx(); let multi_tx = test_multisig_tx(); @@ -171,13 +154,12 @@ fn bench_sigverify_uneven(bencher: &mut Bencher) { let recycler = Recycler::default(); let recycler_out = Recycler::default(); // verify packets - bencher.iter(|| { + b.iter(|| { sigverify::ed25519_verify(&mut batches, &recycler, &recycler_out, false, num_packets); }) } -#[bench] -fn bench_get_offsets(bencher: &mut Bencher) { +fn bench_get_offsets(b: &mut Bencher) { let tx = test_tx(); // generate packet vector @@ -185,7 +167,21 @@ fn bench_get_offsets(bencher: &mut Bencher) { let recycler = Recycler::default(); // verify packets - bencher.iter(|| { + b.iter(|| { let _ans = sigverify::generate_offsets(&mut batches, &recycler, false); }) } + +benchmark_group!( + benches, + bench_get_offsets, + bench_sigverify_uneven, + bench_sigverify_high_packets_large_batch, + bench_sigverify_high_packets_small_batch, + bench_sigverify_medium_packets_large_batch, + bench_sigverify_medium_packets_small_batch, + bench_sigverify_low_packets_large_batch, + bench_sigverify_low_packets_small_batch, + bench_sigverify_simple +); +benchmark_main!(benches); From cdc96edc6a46c7743a5c5e2d198f46ba61d84b24 Mon Sep 17 00:00:00 2001 From: steviez Date: Tue, 22 Jul 2025 20:47:50 -0500 Subject: [PATCH 45/68] rayon-threadlimit: Deprecate get_max_thread_count() (#7071) This function provided a hack to change threadpool sizes to avoid observed performance issues. The users of this function have since been updated to have CLI args wired up to control the threadpool sizes. So, the hack is no longer necessary and can be deprecated --- Cargo.lock | 7 +++++-- core/Cargo.toml | 1 + core/src/validator.rs | 4 ++-- entry/Cargo.toml | 6 +++++- entry/src/entry.rs | 4 ++-- ledger/src/blockstore_processor.rs | 5 ++--- poh-bench/Cargo.toml | 2 +- poh-bench/src/main.rs | 5 +---- poh/Cargo.toml | 1 + programs/sbf/Cargo.lock | 4 +++- rayon-threadlimit/Cargo.toml | 1 + rayon-threadlimit/src/lib.rs | 17 +++++++++++++---- svm/examples/Cargo.lock | 4 +++- validator/src/cli/thread_args.rs | 8 ++++---- 14 files changed, 44 insertions(+), 25 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3f4811ce40da1c..9c387e8e731d56 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7910,6 +7910,7 @@ dependencies = [ "log", "lru", "min-max-heap", + "num_cpus", "num_enum", "prio-graph", "qualifier_attr", @@ -8235,9 +8236,11 @@ dependencies = [ "crossbeam-channel", "dlopen2", "log", + "num_cpus", "rand 0.8.5", "rayon", "serde", + "solana-entry", "solana-hash", "solana-keypair", "solana-logger", @@ -8248,7 +8251,6 @@ dependencies = [ "solana-packet", "solana-perf", "solana-pubkey", - "solana-rayon-threadlimit", "solana-runtime-transaction", "solana-sha256-hasher", "solana-signature", @@ -9387,12 +9389,12 @@ version = "3.0.0" dependencies = [ "clap 3.2.23", "log", + "num_cpus", "rayon", "solana-entry", "solana-logger", "solana-measure", "solana-perf", - "solana-rayon-threadlimit", "solana-sha256-hasher", "solana-version", ] @@ -9801,6 +9803,7 @@ dependencies = [ name = "solana-rayon-threadlimit" version = "3.0.0" dependencies = [ + "log", "num_cpus", ] diff --git a/core/Cargo.toml b/core/Cargo.toml index 3697d6fe9c707a..177a42271cf94e 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -66,6 +66,7 @@ itertools = { workspace = true } log = { workspace = true } lru = { workspace = true } min-max-heap = { workspace = true } +num_cpus = { workspace = true } num_enum = { workspace = true } prio-graph = { workspace = true } qualifier_attr = { workspace = true } diff --git a/core/src/validator.rs b/core/src/validator.rs index 2f978877d0133c..8eb790168dcc8a 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -83,7 +83,7 @@ use { transaction_recorder::TransactionRecorder, }, solana_pubkey::Pubkey, - solana_rayon_threadlimit::{get_max_thread_count, get_thread_count}, + solana_rayon_threadlimit::get_thread_count, solana_rpc::{ max_slots::MaxSlots, optimistically_confirmed_bank_tracker::{ @@ -379,7 +379,7 @@ impl Default for ValidatorConfig { impl ValidatorConfig { pub fn default_for_test() -> Self { let max_thread_count = - NonZeroUsize::new(get_max_thread_count()).expect("thread count is non-zero"); + NonZeroUsize::new(num_cpus::get()).expect("thread count is non-zero"); Self { accounts_db_config: Some(ACCOUNTS_DB_CONFIG_FOR_TESTING), diff --git a/entry/Cargo.toml b/entry/Cargo.toml index bf72e34fdf8ab8..d516424608dacf 100644 --- a/entry/Cargo.toml +++ b/entry/Cargo.toml @@ -16,11 +16,15 @@ targets = ["x86_64-unknown-linux-gnu"] crate-type = ["lib"] name = "solana_entry" +[features] +dev-context-only-utils = [] + [dependencies] bincode = { workspace = true } crossbeam-channel = { workspace = true } dlopen2 = { workspace = true } log = { workspace = true } +num_cpus = { workspace = true } rand = { workspace = true } rayon = { workspace = true } serde = { workspace = true } @@ -30,7 +34,6 @@ solana-merkle-tree = { workspace = true } solana-metrics = { workspace = true } solana-packet = { workspace = true } solana-perf = { workspace = true } -solana-rayon-threadlimit = { workspace = true } solana-runtime-transaction = { workspace = true } solana-sha256-hasher = { workspace = true } solana-transaction = { workspace = true } @@ -39,6 +42,7 @@ solana-transaction-error = { workspace = true } [dev-dependencies] agave-reserved-account-keys = { workspace = true } assert_matches = { workspace = true } +solana-entry = { path = ".", features = ["dev-context-only-utils"] } solana-keypair = { workspace = true } solana-logger = { workspace = true } solana-message = { workspace = true } diff --git a/entry/src/entry.rs b/entry/src/entry.rs index c5249d7e3d4ca2..11cd27ccf30532 100644 --- a/entry/src/entry.rs +++ b/entry/src/entry.rs @@ -22,7 +22,6 @@ use { recycler::Recycler, sigverify, }, - solana_rayon_threadlimit::get_max_thread_count, solana_runtime_transaction::transaction_with_meta::TransactionWithMeta, solana_transaction::{ versioned::VersionedTransaction, Transaction, TransactionVerificationMode, @@ -959,9 +958,10 @@ pub fn thread_pool_for_tests() -> ThreadPool { .expect("new rayon threadpool") } +#[cfg(feature = "dev-context-only-utils")] pub fn thread_pool_for_benches() -> ThreadPool { rayon::ThreadPoolBuilder::new() - .num_threads(get_max_thread_count()) + .num_threads(num_cpus::get()) .thread_name(|i| format!("solEntryBnch{i:02}")) .build() .expect("new rayon threadpool") diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index 05c0df82a30d6f..7119f6eed2fa7c 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -28,7 +28,6 @@ use { solana_measure::{measure::Measure, measure_us}, solana_metrics::datapoint_error, solana_pubkey::Pubkey, - solana_rayon_threadlimit::get_max_thread_count, solana_runtime::{ bank::{Bank, PreCommitResult, TransactionBalancesSet}, bank_forks::{BankForks, SetRootError}, @@ -918,7 +917,7 @@ pub(crate) fn process_blockstore_for_bank_0( let bank_forks = BankForks::new_rw_arc(bank0); info!("Processing ledger for slot 0..."); - let replay_tx_thread_pool = create_thread_pool(get_max_thread_count()); + let replay_tx_thread_pool = create_thread_pool(num_cpus::get()); process_bank_0( &bank_forks .read() @@ -997,7 +996,7 @@ pub fn process_blockstore_from_root( .meta(start_slot) .unwrap_or_else(|_| panic!("Failed to get meta for slot {start_slot}")) { - let replay_tx_thread_pool = create_thread_pool(get_max_thread_count()); + let replay_tx_thread_pool = create_thread_pool(num_cpus::get()); load_frozen_forks( bank_forks, &start_slot_meta, diff --git a/poh-bench/Cargo.toml b/poh-bench/Cargo.toml index e95e7a22a02cb4..f3d62bd93cb850 100644 --- a/poh-bench/Cargo.toml +++ b/poh-bench/Cargo.toml @@ -15,11 +15,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] clap = { version = "3.1.5", features = ["cargo"] } log = { workspace = true } +num_cpus = { workspace = true } rayon = { workspace = true } solana-entry = { workspace = true } solana-logger = { workspace = true } solana-measure = { workspace = true } solana-perf = { workspace = true } -solana-rayon-threadlimit = { workspace = true } solana-sha256-hasher = { workspace = true } solana-version = { workspace = true } diff --git a/poh-bench/src/main.rs b/poh-bench/src/main.rs index 6ac1ebaa4ed9f1..2d454904c6ba1f 100644 --- a/poh-bench/src/main.rs +++ b/poh-bench/src/main.rs @@ -7,7 +7,6 @@ use { clap::{crate_description, crate_name, Arg, Command}, solana_measure::measure::Measure, solana_perf::perf_libs, - solana_rayon_threadlimit::get_max_thread_count, solana_sha256_hasher::hash, }; @@ -74,9 +73,7 @@ fn main() { let start_hash = hash(&[1, 2, 3, 4]); let ticks = create_ticks(max_num_entries, hashes_per_tick, start_hash); let mut num_entries = start_num_entries as usize; - let num_threads = matches - .value_of_t("num_threads") - .unwrap_or(get_max_thread_count()); + let num_threads = matches.value_of_t("num_threads").unwrap_or(num_cpus::get()); let thread_pool = rayon::ThreadPoolBuilder::new() .num_threads(num_threads) .thread_name(|i| format!("solPohBench{i:02}")) diff --git a/poh/Cargo.toml b/poh/Cargo.toml index bea3d38dc76548..7af6cda5705323 100644 --- a/poh/Cargo.toml +++ b/poh/Cargo.toml @@ -42,6 +42,7 @@ assert_matches = { workspace = true } bincode = { workspace = true } criterion = { workspace = true } rand = { workspace = true } +solana-entry = { workspace = true, features = ["dev-context-only-utils"] } solana-keypair = { workspace = true } solana-logger = { workspace = true } solana-perf = { workspace = true, features = ["dev-context-only-utils"] } diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 79af5f89c46979..7fb87bdc36ef7b 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -6089,6 +6089,7 @@ dependencies = [ "log", "lru", "min-max-heap", + "num_cpus", "num_enum", "prio-graph", "qualifier_attr", @@ -6319,6 +6320,7 @@ dependencies = [ "crossbeam-channel", "dlopen2", "log", + "num_cpus", "rand 0.8.5", "rayon", "serde", @@ -6328,7 +6330,6 @@ dependencies = [ "solana-metrics", "solana-packet", "solana-perf", - "solana-rayon-threadlimit", "solana-runtime-transaction", "solana-sha256-hasher", "solana-transaction", @@ -7538,6 +7539,7 @@ dependencies = [ name = "solana-rayon-threadlimit" version = "3.0.0" dependencies = [ + "log", "num_cpus", ] diff --git a/rayon-threadlimit/Cargo.toml b/rayon-threadlimit/Cargo.toml index b2f433492e5679..44b36615950d5e 100644 --- a/rayon-threadlimit/Cargo.toml +++ b/rayon-threadlimit/Cargo.toml @@ -14,4 +14,5 @@ edition = { workspace = true } targets = ["x86_64-unknown-linux-gnu"] [dependencies] +log = { workspace = true } num_cpus = { workspace = true } diff --git a/rayon-threadlimit/src/lib.rs b/rayon-threadlimit/src/lib.rs index 912cb622aaa4b4..e12d3584b81d23 100644 --- a/rayon-threadlimit/src/lib.rs +++ b/rayon-threadlimit/src/lib.rs @@ -1,4 +1,4 @@ -use std::env; +use {log::warn, std::env}; //TODO remove this hack when rayon fixes itself // reduce the number of threads each pool is allowed to half the cpu core count, to avoid rayon @@ -6,7 +6,13 @@ use std::env; static MAX_RAYON_THREADS: std::sync::LazyLock = std::sync::LazyLock::new(|| { env::var("SOLANA_RAYON_THREADS") .ok() - .and_then(|num_threads| num_threads.parse().ok()) + .and_then(|num_threads| { + warn!( + "Use of SOLANA_RAYON_THREADS has been deprecated and will be removed soon. Use \ + the individual agave-validator CLI flags to configure threadpool sizes" + ); + num_threads.parse().ok() + }) .unwrap_or_else(|| num_cpus::get() / 2) .max(1) }); @@ -15,8 +21,11 @@ pub fn get_thread_count() -> usize { *MAX_RAYON_THREADS } -// Only used in legacy code. -// Use get_thread_count instead in all new code. +#[deprecated( + since = "3.0.0", + note = "The solana-rayon-threadlimit crate will be removed, use num_cpus::get() or something \ + similar instead" +)] pub fn get_max_thread_count() -> usize { get_thread_count().saturating_mul(2) } diff --git a/svm/examples/Cargo.lock b/svm/examples/Cargo.lock index 0f6668d05f920e..597aff37baf6c0 100644 --- a/svm/examples/Cargo.lock +++ b/svm/examples/Cargo.lock @@ -5922,6 +5922,7 @@ dependencies = [ "log", "lru", "min-max-heap", + "num_cpus", "num_enum", "prio-graph", "qualifier_attr", @@ -6141,6 +6142,7 @@ dependencies = [ "crossbeam-channel", "dlopen2", "log", + "num_cpus", "rand 0.8.5", "rayon", "serde", @@ -6150,7 +6152,6 @@ dependencies = [ "solana-metrics", "solana-packet", "solana-perf", - "solana-rayon-threadlimit", "solana-runtime-transaction", "solana-sha256-hasher", "solana-transaction", @@ -7336,6 +7337,7 @@ dependencies = [ name = "solana-rayon-threadlimit" version = "3.0.0" dependencies = [ + "log", "num_cpus", ] diff --git a/validator/src/cli/thread_args.rs b/validator/src/cli/thread_args.rs index 6d0bbd23fd6fd0..b1fb47a9d69227 100644 --- a/validator/src/cli/thread_args.rs +++ b/validator/src/cli/thread_args.rs @@ -4,7 +4,7 @@ use { clap::{value_t_or_exit, Arg, ArgMatches}, solana_accounts_db::{accounts_db, accounts_index}, solana_clap_utils::{hidden_unless_forced, input_validators::is_within_range}, - solana_rayon_threadlimit::{get_max_thread_count, get_thread_count}, + solana_rayon_threadlimit::get_thread_count, std::{num::NonZeroUsize, ops::RangeInclusive}, }; @@ -197,7 +197,7 @@ pub trait ThreadArg { /// The maximum allowed number of threads (inclusive) fn max() -> usize { // By default, no thread pool should scale over the number of the machine's threads - get_max_thread_count() + num_cpus::get() } /// The range of allowed number of threads (inclusive on both ends) fn range() -> RangeInclusive { @@ -270,7 +270,7 @@ impl ThreadArg for RayonGlobalThreadsArg { const HELP: &'static str = "Number of threads to use for the global rayon thread pool"; fn default() -> usize { - get_max_thread_count() + num_cpus::get() } } @@ -298,7 +298,7 @@ impl ThreadArg for ReplayTransactionsThreadsArg { const HELP: &'static str = "Number of threads to use for transaction replay"; fn default() -> usize { - get_max_thread_count() + num_cpus::get() } } From 788d21d9733a16550b947943629db0e3ebf1b59b Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Wed, 23 Jul 2025 10:22:31 +0800 Subject: [PATCH 46/68] agave-validator: move tests (#7078) * move verify_args_struct_by_command_run_with_no_genesis_fetch() * move verify_args_struct_by_command_run_with_no_snapshot_fetch * move verify_args_struct_by_command_run_with_check_vote_account * move verify_args_struct_by_command_run_with_only_known_rpc * move verify_args_struct_by_command_run_with_incremental_snapshot_fetch * wording --- validator/src/commands/run/args.rs | 143 ---------------- .../commands/run/args/rpc_bootstrap_config.rs | 158 ++++++++++++++++++ 2 files changed, 158 insertions(+), 143 deletions(-) diff --git a/validator/src/commands/run/args.rs b/validator/src/commands/run/args.rs index 75f89bb968c6bd..dc512e3e9c8397 100644 --- a/validator/src/commands/run/args.rs +++ b/validator/src/commands/run/args.rs @@ -1915,46 +1915,6 @@ mod tests { } } - #[test] - fn verify_args_struct_by_command_run_with_no_genesis_fetch() { - // long arg - { - let default_run_args = RunArgs::default(); - let expected_args = RunArgs { - rpc_bootstrap_config: RpcBootstrapConfig { - no_genesis_fetch: true, - ..RpcBootstrapConfig::default() - }, - ..default_run_args.clone() - }; - verify_args_struct_by_command_run_with_identity_setup( - default_run_args.clone(), - vec!["--no-genesis-fetch"], - expected_args, - ); - } - } - - #[test] - fn verify_args_struct_by_command_run_with_no_snapshot_fetch() { - // long arg - { - let default_run_args = RunArgs::default(); - let expected_args = RunArgs { - rpc_bootstrap_config: RpcBootstrapConfig { - no_snapshot_fetch: true, - ..RpcBootstrapConfig::default() - }, - ..default_run_args.clone() - }; - verify_args_struct_by_command_run_with_identity_setup( - default_run_args.clone(), - vec!["--no-snapshot-fetch"], - expected_args, - ); - } - } - #[test] fn verify_args_struct_by_command_run_with_entrypoints() { // short arg + single entrypoint @@ -2044,36 +2004,6 @@ mod tests { } } - #[test] - fn verify_args_struct_by_command_run_with_check_vote_account() { - // long arg - { - let default_run_args = RunArgs::default(); - let expected_args = RunArgs { - entrypoints: vec![SocketAddr::new( - IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), - 8000, - )], - rpc_bootstrap_config: RpcBootstrapConfig { - check_vote_account: Some("https://api.mainnet-beta.solana.com".to_string()), - ..RpcBootstrapConfig::default() - }, - ..default_run_args.clone() - }; - verify_args_struct_by_command_run_with_identity_setup( - default_run_args, - vec![ - // entrypoint is required for check-vote-account - "--entrypoint", - "127.0.0.1:8000", - "--check-vote-account", - "https://api.mainnet-beta.solana.com", - ], - expected_args, - ); - } - } - #[test] fn verify_args_struct_by_command_run_with_known_validators() { // long arg + single known validator @@ -2194,59 +2124,6 @@ mod tests { } } - #[test] - fn verify_args_struct_by_command_run_with_only_known_rpc() { - // long arg - { - let default_run_args = RunArgs::default(); - let known_validators_pubkey = Pubkey::new_unique(); - let known_validators = Some(HashSet::from([known_validators_pubkey])); - let expected_args = RunArgs { - known_validators, - rpc_bootstrap_config: RpcBootstrapConfig { - only_known_rpc: true, - ..RpcBootstrapConfig::default() - }, - ..default_run_args.clone() - }; - verify_args_struct_by_command_run_with_identity_setup( - default_run_args, - vec![ - // --known-validator is required - "--known-validator", - &known_validators_pubkey.to_string(), - "--only-known-rpc", - ], - expected_args, - ); - } - - // alias - { - let default_run_args = RunArgs::default(); - let known_validators_pubkey = Pubkey::new_unique(); - let known_validators = Some(HashSet::from([known_validators_pubkey])); - let expected_args = RunArgs { - known_validators, - rpc_bootstrap_config: RpcBootstrapConfig { - only_known_rpc: true, - ..RpcBootstrapConfig::default() - }, - ..default_run_args.clone() - }; - verify_args_struct_by_command_run_with_identity_setup( - default_run_args, - vec![ - // --known-validator is required - "--known-validator", - &known_validators_pubkey.to_string(), - "--no-untrusted-rpc", - ], - expected_args, - ); - } - } - #[test] fn verify_args_struct_by_command_run_with_max_genesis_archive_unpacked_size() { // long arg @@ -2270,24 +2147,4 @@ mod tests { ); } } - - #[test] - fn verify_args_struct_by_command_run_with_incremental_snapshot_fetch() { - // long arg - { - let default_run_args = RunArgs::default(); - let expected_args = RunArgs { - rpc_bootstrap_config: RpcBootstrapConfig { - incremental_snapshot_fetch: false, - ..RpcBootstrapConfig::default() - }, - ..default_run_args.clone() - }; - verify_args_struct_by_command_run_with_identity_setup( - default_run_args, - vec!["--no-incremental-snapshots"], - expected_args, - ); - } - } } diff --git a/validator/src/commands/run/args/rpc_bootstrap_config.rs b/validator/src/commands/run/args/rpc_bootstrap_config.rs index 4800cfa3053e5f..9c2187896cdb07 100644 --- a/validator/src/commands/run/args/rpc_bootstrap_config.rs +++ b/validator/src/commands/run/args/rpc_bootstrap_config.rs @@ -51,3 +51,161 @@ impl FromClapArgMatches for RpcBootstrapConfig { }) } } + +#[cfg(test)] +mod tests { + use { + super::*, + crate::commands::run::args::{ + tests::verify_args_struct_by_command_run_with_identity_setup, RunArgs, + }, + solana_pubkey::Pubkey, + std::{ + collections::HashSet, + net::{IpAddr, Ipv4Addr, SocketAddr}, + }, + }; + + #[test] + fn verify_args_struct_by_command_run_with_no_genesis_fetch() { + // long arg + { + let default_run_args = RunArgs::default(); + let expected_args = RunArgs { + rpc_bootstrap_config: RpcBootstrapConfig { + no_genesis_fetch: true, + ..RpcBootstrapConfig::default() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args.clone(), + vec!["--no-genesis-fetch"], + expected_args, + ); + } + } + + #[test] + fn verify_args_struct_by_command_run_with_no_snapshot_fetch() { + // long arg + { + let default_run_args = RunArgs::default(); + let expected_args = RunArgs { + rpc_bootstrap_config: RpcBootstrapConfig { + no_snapshot_fetch: true, + ..RpcBootstrapConfig::default() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args.clone(), + vec!["--no-snapshot-fetch"], + expected_args, + ); + } + } + + #[test] + fn verify_args_struct_by_command_run_with_check_vote_account() { + // long arg + { + let default_run_args = RunArgs::default(); + let expected_args = RunArgs { + entrypoints: vec![SocketAddr::new( + IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), + 8000, + )], + rpc_bootstrap_config: RpcBootstrapConfig { + check_vote_account: Some("https://api.mainnet-beta.solana.com".to_string()), + ..RpcBootstrapConfig::default() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec![ + // required by --check-vote-account + "--entrypoint", + "127.0.0.1:8000", + "--check-vote-account", + "https://api.mainnet-beta.solana.com", + ], + expected_args, + ); + } + } + + #[test] + fn verify_args_struct_by_command_run_with_only_known_rpc() { + // long arg + { + let default_run_args = RunArgs::default(); + let known_validators_pubkey = Pubkey::new_unique(); + let known_validators = Some(HashSet::from([known_validators_pubkey])); + let expected_args = RunArgs { + known_validators, + rpc_bootstrap_config: RpcBootstrapConfig { + only_known_rpc: true, + ..RpcBootstrapConfig::default() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec![ + // required by --only-known-rpc + "--known-validator", + &known_validators_pubkey.to_string(), + "--only-known-rpc", + ], + expected_args, + ); + } + + // alias + { + let default_run_args = RunArgs::default(); + let known_validators_pubkey = Pubkey::new_unique(); + let known_validators = Some(HashSet::from([known_validators_pubkey])); + let expected_args = RunArgs { + known_validators, + rpc_bootstrap_config: RpcBootstrapConfig { + only_known_rpc: true, + ..RpcBootstrapConfig::default() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec![ + // required by --no-untrusted-rpc + "--known-validator", + &known_validators_pubkey.to_string(), + "--no-untrusted-rpc", + ], + expected_args, + ); + } + } + + #[test] + fn verify_args_struct_by_command_run_with_incremental_snapshot_fetch() { + // long arg + { + let default_run_args = RunArgs::default(); + let expected_args = RunArgs { + rpc_bootstrap_config: RpcBootstrapConfig { + incremental_snapshot_fetch: false, + ..RpcBootstrapConfig::default() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec!["--no-incremental-snapshots"], + expected_args, + ); + } + } +} From ad6df9156b27b2190d60ea579b2ae1ffa7dfb696 Mon Sep 17 00:00:00 2001 From: steviez Date: Tue, 22 Jul 2025 22:22:49 -0500 Subject: [PATCH 47/68] validator: Move SnapshotConfig parsing to separate function (#7086) SnapshotConfig has a non-trivial amount of arguments, operations and checks that occur. Shift all of this logic into a side function to help organize and de-clutter --- validator/src/commands/run/execute.rs | 377 ++++++++++++++------------ 1 file changed, 197 insertions(+), 180 deletions(-) diff --git a/validator/src/commands/run/execute.rs b/validator/src/commands/run/execute.rs index f95a7312ff6d44..e87556d41cbfea 100644 --- a/validator/src/commands/run/execute.rs +++ b/validator/src/commands/run/execute.rs @@ -555,6 +555,13 @@ pub fn execute( account_snapshot_paths }; + let snapshot_config = new_snapshot_config( + matches, + &ledger_path, + &account_paths, + run_args.rpc_bootstrap_config.incremental_snapshot_fetch, + )?; + let mut validator_config = ValidatorConfig { require_tower: matches.is_present("require_tower"), tower_storage, @@ -682,6 +689,7 @@ pub fn execute( accounts_db_config, accounts_db_skip_shrink: true, accounts_db_force_initial_clean: matches.is_present("no_skip_initial_accounts_db_clean"), + snapshot_config, tpu_coalesce, no_wait_for_vote_to_start_leader: matches.is_present("no_wait_for_vote_to_start_leader"), runtime_config: RuntimeConfig { @@ -739,191 +747,11 @@ pub fn execute( .expect("invalid dynamic_port_range"); let maximum_local_snapshot_age = value_t_or_exit!(matches, "maximum_local_snapshot_age", u64); - let maximum_full_snapshot_archives_to_retain = - value_t_or_exit!(matches, "maximum_full_snapshots_to_retain", NonZeroUsize); - let maximum_incremental_snapshot_archives_to_retain = value_t_or_exit!( - matches, - "maximum_incremental_snapshots_to_retain", - NonZeroUsize - ); - let snapshot_packager_niceness_adj = - value_t_or_exit!(matches, "snapshot_packager_niceness_adj", i8); let minimal_snapshot_download_speed = value_t_or_exit!(matches, "minimal_snapshot_download_speed", f32); let maximum_snapshot_download_abort = value_t_or_exit!(matches, "maximum_snapshot_download_abort", u64); - let snapshots_dir = if let Some(snapshots) = matches.value_of("snapshots") { - Path::new(snapshots) - } else { - &ledger_path - }; - let snapshots_dir = create_and_canonicalize_directory(snapshots_dir).map_err(|err| { - format!( - "failed to create snapshots directory '{}': {err}", - snapshots_dir.display(), - ) - })?; - - if account_paths - .iter() - .any(|account_path| account_path == &snapshots_dir) - { - Err( - "the --accounts and --snapshots paths must be unique since they \ - both create 'snapshots' subdirectories, otherwise there may be collisions" - .to_string(), - )?; - } - - let bank_snapshots_dir = snapshots_dir.join("snapshots"); - fs::create_dir_all(&bank_snapshots_dir).map_err(|err| { - format!( - "failed to create bank snapshots directory '{}': {err}", - bank_snapshots_dir.display(), - ) - })?; - - let full_snapshot_archives_dir = - if let Some(full_snapshot_archive_path) = matches.value_of("full_snapshot_archive_path") { - PathBuf::from(full_snapshot_archive_path) - } else { - snapshots_dir.clone() - }; - fs::create_dir_all(&full_snapshot_archives_dir).map_err(|err| { - format!( - "failed to create full snapshot archives directory '{}': {err}", - full_snapshot_archives_dir.display(), - ) - })?; - - let incremental_snapshot_archives_dir = if let Some(incremental_snapshot_archive_path) = - matches.value_of("incremental_snapshot_archive_path") - { - PathBuf::from(incremental_snapshot_archive_path) - } else { - snapshots_dir.clone() - }; - fs::create_dir_all(&incremental_snapshot_archives_dir).map_err(|err| { - format!( - "failed to create incremental snapshot archives directory '{}': {err}", - incremental_snapshot_archives_dir.display(), - ) - })?; - - let archive_format = { - let archive_format_str = value_t_or_exit!(matches, "snapshot_archive_format", String); - let mut archive_format = ArchiveFormat::from_cli_arg(&archive_format_str) - .unwrap_or_else(|| panic!("Archive format not recognized: {archive_format_str}")); - if let ArchiveFormat::TarZstd { config } = &mut archive_format { - config.compression_level = - value_t_or_exit!(matches, "snapshot_zstd_compression_level", i32); - } - archive_format - }; - - let snapshot_version = matches - .value_of("snapshot_version") - .map(|value| { - value - .parse::() - .map_err(|err| format!("unable to parse snapshot version: {err}")) - }) - .transpose()? - .unwrap_or(SnapshotVersion::default()); - - let (full_snapshot_archive_interval, incremental_snapshot_archive_interval) = - if matches.is_present("no_snapshots") { - // snapshots are disabled - (SnapshotInterval::Disabled, SnapshotInterval::Disabled) - } else { - match ( - run_args.rpc_bootstrap_config.incremental_snapshot_fetch, - value_t_or_exit!(matches, "snapshot_interval_slots", NonZeroU64), - ) { - (true, incremental_snapshot_interval_slots) => { - // incremental snapshots are enabled - // use --snapshot-interval-slots for the incremental snapshot interval - let full_snapshot_interval_slots = - value_t_or_exit!(matches, "full_snapshot_interval_slots", NonZeroU64); - ( - SnapshotInterval::Slots(full_snapshot_interval_slots), - SnapshotInterval::Slots(incremental_snapshot_interval_slots), - ) - } - (false, full_snapshot_interval_slots) => { - // incremental snapshots are *disabled* - // use --snapshot-interval-slots for the *full* snapshot interval - // also warn if --full-snapshot-interval-slots was specified - if matches.occurrences_of("full_snapshot_interval_slots") > 0 { - warn!( - "Incremental snapshots are disabled, yet \ - --full-snapshot-interval-slots was specified! \ - Note that --full-snapshot-interval-slots is *ignored* \ - when incremental snapshots are disabled. \ - Use --snapshot-interval-slots instead.", - ); - } - ( - SnapshotInterval::Slots(full_snapshot_interval_slots), - SnapshotInterval::Disabled, - ) - } - } - }; - - validator_config.snapshot_config = SnapshotConfig { - usage: if full_snapshot_archive_interval == SnapshotInterval::Disabled { - SnapshotUsage::LoadOnly - } else { - SnapshotUsage::LoadAndGenerate - }, - full_snapshot_archive_interval, - incremental_snapshot_archive_interval, - bank_snapshots_dir, - full_snapshot_archives_dir, - incremental_snapshot_archives_dir, - archive_format, - snapshot_version, - maximum_full_snapshot_archives_to_retain, - maximum_incremental_snapshot_archives_to_retain, - packager_thread_niceness_adj: snapshot_packager_niceness_adj, - }; - - info!( - "Snapshot configuration: full snapshot interval: {}, incremental snapshot interval: {}", - match full_snapshot_archive_interval { - SnapshotInterval::Disabled => "disabled".to_string(), - SnapshotInterval::Slots(interval) => format!("{interval} slots"), - }, - match incremental_snapshot_archive_interval { - SnapshotInterval::Disabled => "disabled".to_string(), - SnapshotInterval::Slots(interval) => format!("{interval} slots"), - }, - ); - - // It is unlikely that a full snapshot interval greater than an epoch is a good idea. - // Minimally we should warn the user in case this was a mistake. - if let SnapshotInterval::Slots(full_snapshot_interval_slots) = full_snapshot_archive_interval { - let full_snapshot_interval_slots = full_snapshot_interval_slots.get(); - if full_snapshot_interval_slots > DEFAULT_SLOTS_PER_EPOCH { - warn!( - "The full snapshot interval is excessively large: {}! This will negatively \ - impact the background cleanup tasks in accounts-db. Consider a smaller value.", - full_snapshot_interval_slots, - ); - } - } - - if !is_snapshot_config_valid(&validator_config.snapshot_config) { - Err( - "invalid snapshot configuration provided: snapshot intervals are incompatible. \ - \n\t- full snapshot interval MUST be larger than incremental snapshot interval \ - (if enabled)" - .to_string(), - )?; - } - configure_banking_trace_dir_byte_limit(&mut validator_config, matches); validator_config.block_verification_method = value_t_or_exit!( matches, @@ -1353,6 +1181,195 @@ fn configure_banking_trace_dir_byte_limit( }; } +fn new_snapshot_config( + matches: &ArgMatches, + ledger_path: &Path, + account_paths: &[PathBuf], + incremental_snapshot_fetch: bool, +) -> Result> { + let (full_snapshot_archive_interval, incremental_snapshot_archive_interval) = + if matches.is_present("no_snapshots") { + // snapshots are disabled + (SnapshotInterval::Disabled, SnapshotInterval::Disabled) + } else { + match ( + incremental_snapshot_fetch, + value_t_or_exit!(matches, "snapshot_interval_slots", NonZeroU64), + ) { + (true, incremental_snapshot_interval_slots) => { + // incremental snapshots are enabled + // use --snapshot-interval-slots for the incremental snapshot interval + let full_snapshot_interval_slots = + value_t_or_exit!(matches, "full_snapshot_interval_slots", NonZeroU64); + ( + SnapshotInterval::Slots(full_snapshot_interval_slots), + SnapshotInterval::Slots(incremental_snapshot_interval_slots), + ) + } + (false, full_snapshot_interval_slots) => { + // incremental snapshots are *disabled* + // use --snapshot-interval-slots for the *full* snapshot interval + // also warn if --full-snapshot-interval-slots was specified + if matches.occurrences_of("full_snapshot_interval_slots") > 0 { + warn!( + "Incremental snapshots are disabled, yet \ + --full-snapshot-interval-slots was specified! \ + Note that --full-snapshot-interval-slots is *ignored* \ + when incremental snapshots are disabled. \ + Use --snapshot-interval-slots instead.", + ); + } + ( + SnapshotInterval::Slots(full_snapshot_interval_slots), + SnapshotInterval::Disabled, + ) + } + } + }; + + info!( + "Snapshot configuration: full snapshot interval: {}, incremental snapshot interval: {}", + match full_snapshot_archive_interval { + SnapshotInterval::Disabled => "disabled".to_string(), + SnapshotInterval::Slots(interval) => format!("{interval} slots"), + }, + match incremental_snapshot_archive_interval { + SnapshotInterval::Disabled => "disabled".to_string(), + SnapshotInterval::Slots(interval) => format!("{interval} slots"), + }, + ); + // It is unlikely that a full snapshot interval greater than an epoch is a good idea. + // Minimally we should warn the user in case this was a mistake. + if let SnapshotInterval::Slots(full_snapshot_interval_slots) = full_snapshot_archive_interval { + let full_snapshot_interval_slots = full_snapshot_interval_slots.get(); + if full_snapshot_interval_slots > DEFAULT_SLOTS_PER_EPOCH { + warn!( + "The full snapshot interval is excessively large: {}! This will negatively \ + impact the background cleanup tasks in accounts-db. Consider a smaller value.", + full_snapshot_interval_slots, + ); + } + } + + let snapshots_dir = if let Some(snapshots) = matches.value_of("snapshots") { + Path::new(snapshots) + } else { + ledger_path + }; + let snapshots_dir = create_and_canonicalize_directory(snapshots_dir).map_err(|err| { + format!( + "failed to create snapshots directory '{}': {err}", + snapshots_dir.display(), + ) + })?; + if account_paths + .iter() + .any(|account_path| account_path == &snapshots_dir) + { + Err( + "the --accounts and --snapshots paths must be unique since they \ + both create 'snapshots' subdirectories, otherwise there may be collisions" + .to_string(), + )?; + } + + let bank_snapshots_dir = snapshots_dir.join("snapshots"); + fs::create_dir_all(&bank_snapshots_dir).map_err(|err| { + format!( + "failed to create bank snapshots directory '{}': {err}", + bank_snapshots_dir.display(), + ) + })?; + + let full_snapshot_archives_dir = + if let Some(full_snapshot_archive_path) = matches.value_of("full_snapshot_archive_path") { + PathBuf::from(full_snapshot_archive_path) + } else { + snapshots_dir.clone() + }; + fs::create_dir_all(&full_snapshot_archives_dir).map_err(|err| { + format!( + "failed to create full snapshot archives directory '{}': {err}", + full_snapshot_archives_dir.display(), + ) + })?; + + let incremental_snapshot_archives_dir = if let Some(incremental_snapshot_archive_path) = + matches.value_of("incremental_snapshot_archive_path") + { + PathBuf::from(incremental_snapshot_archive_path) + } else { + snapshots_dir.clone() + }; + fs::create_dir_all(&incremental_snapshot_archives_dir).map_err(|err| { + format!( + "failed to create incremental snapshot archives directory '{}': {err}", + incremental_snapshot_archives_dir.display(), + ) + })?; + + let archive_format = { + let archive_format_str = value_t_or_exit!(matches, "snapshot_archive_format", String); + let mut archive_format = ArchiveFormat::from_cli_arg(&archive_format_str) + .unwrap_or_else(|| panic!("Archive format not recognized: {archive_format_str}")); + if let ArchiveFormat::TarZstd { config } = &mut archive_format { + config.compression_level = + value_t_or_exit!(matches, "snapshot_zstd_compression_level", i32); + } + archive_format + }; + + let snapshot_version = matches + .value_of("snapshot_version") + .map(|value| { + value + .parse::() + .map_err(|err| format!("unable to parse snapshot version: {err}")) + }) + .transpose()? + .unwrap_or(SnapshotVersion::default()); + + let maximum_full_snapshot_archives_to_retain = + value_t_or_exit!(matches, "maximum_full_snapshots_to_retain", NonZeroUsize); + let maximum_incremental_snapshot_archives_to_retain = value_t_or_exit!( + matches, + "maximum_incremental_snapshots_to_retain", + NonZeroUsize + ); + + let snapshot_packager_niceness_adj = + value_t_or_exit!(matches, "snapshot_packager_niceness_adj", i8); + + let snapshot_config = SnapshotConfig { + usage: if full_snapshot_archive_interval == SnapshotInterval::Disabled { + SnapshotUsage::LoadOnly + } else { + SnapshotUsage::LoadAndGenerate + }, + full_snapshot_archive_interval, + incremental_snapshot_archive_interval, + bank_snapshots_dir, + full_snapshot_archives_dir, + incremental_snapshot_archives_dir, + archive_format, + snapshot_version, + maximum_full_snapshot_archives_to_retain, + maximum_incremental_snapshot_archives_to_retain, + packager_thread_niceness_adj: snapshot_packager_niceness_adj, + }; + + if !is_snapshot_config_valid(&snapshot_config) { + Err( + "invalid snapshot configuration provided: snapshot intervals are incompatible. \ + \n\t- full snapshot interval MUST be larger than incremental snapshot interval \ + (if enabled)" + .to_string(), + )?; + } + + Ok(snapshot_config) +} + fn process_account_indexes(matches: &ArgMatches) -> AccountSecondaryIndexes { let account_indexes: HashSet = matches .values_of("account_indexes") From db5c8ee084b02340ecbb1e0c812e952508300a7f Mon Sep 17 00:00:00 2001 From: steviez Date: Wed, 23 Jul 2025 03:28:13 -0500 Subject: [PATCH 48/68] ledger: Remove deprecated legacy shred functions (#7094) --- ledger/src/shred.rs | 5 ----- 1 file changed, 5 deletions(-) diff --git a/ledger/src/shred.rs b/ledger/src/shred.rs index 9e12bb19ec7b3a..f3f88d07d9a723 100644 --- a/ledger/src/shred.rs +++ b/ledger/src/shred.rs @@ -412,11 +412,6 @@ impl Shred { dispatch!(pub fn payload(&self) -> &Payload); dispatch!(pub fn sanitize(&self) -> Result<(), Error>); - #[deprecated(since = "2.3.0")] - pub fn set_index(&mut self, _index: u32) {} - #[deprecated(since = "2.3.0")] - pub fn set_slot(&mut self, _slot: Slot) {} - #[cfg(any(test, feature = "dev-context-only-utils"))] pub fn copy_to_packet(&self, packet: &mut Packet) { let payload = self.payload(); From d5a5b92e86dd23dd5d7050549089e3befdbb5695 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 23 Jul 2025 21:29:54 +0800 Subject: [PATCH 49/68] build(deps): bump io-uring from 0.7.8 to 0.7.9 (#7102) * build(deps): bump io-uring from 0.7.8 to 0.7.9 Bumps [io-uring](https://github.com/tokio-rs/io-uring) from 0.7.8 to 0.7.9. - [Commits](https://github.com/tokio-rs/io-uring/commits) --- updated-dependencies: - dependency-name: io-uring dependency-version: 0.7.9 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- svm/examples/Cargo.lock | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9c387e8e731d56..bb211e9cc1dd61 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3882,9 +3882,9 @@ dependencies = [ [[package]] name = "io-uring" -version = "0.7.8" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b86e202f00093dcba4275d4636b93ef9dd75d025ae560d2521b45ea28ab49013" +checksum = "d93587f37623a1a17d94ef2bc9ada592f5465fe7732084ab7beefabe5c77c0c4" dependencies = [ "bitflags 2.9.1", "cfg-if 1.0.1", diff --git a/Cargo.toml b/Cargo.toml index c5b5d87773248a..e293bd5d28a478 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -282,7 +282,7 @@ hyper-proxy = "0.9.1" im = "15.1.0" indexmap = "2.10.0" indicatif = "0.18.0" -io-uring = "0.7.8" +io-uring = "0.7.9" itertools = "0.12.1" jemallocator = { package = "tikv-jemallocator", version = "0.6.0", features = [ "unprefixed_malloc_on_supported_platforms", diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 7fb87bdc36ef7b..c08b474bd58879 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -2940,9 +2940,9 @@ dependencies = [ [[package]] name = "io-uring" -version = "0.7.8" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b86e202f00093dcba4275d4636b93ef9dd75d025ae560d2521b45ea28ab49013" +checksum = "d93587f37623a1a17d94ef2bc9ada592f5465fe7732084ab7beefabe5c77c0c4" dependencies = [ "bitflags 2.9.1", "cfg-if 1.0.0", diff --git a/svm/examples/Cargo.lock b/svm/examples/Cargo.lock index 597aff37baf6c0..a5bfdb088bdcd2 100644 --- a/svm/examples/Cargo.lock +++ b/svm/examples/Cargo.lock @@ -2800,9 +2800,9 @@ dependencies = [ [[package]] name = "io-uring" -version = "0.7.8" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b86e202f00093dcba4275d4636b93ef9dd75d025ae560d2521b45ea28ab49013" +checksum = "d93587f37623a1a17d94ef2bc9ada592f5465fe7732084ab7beefabe5c77c0c4" dependencies = [ "bitflags 2.9.1", "cfg-if 1.0.0", From 85cd1d9b6cdad403996fa6f03940b7844fa18717 Mon Sep 17 00:00:00 2001 From: Brooks Date: Wed, 23 Jul 2025 11:20:31 -0400 Subject: [PATCH 50/68] Uses Rust's div_ceil() (#7096) --- runtime/src/bank/partitioned_epoch_rewards/mod.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/runtime/src/bank/partitioned_epoch_rewards/mod.rs b/runtime/src/bank/partitioned_epoch_rewards/mod.rs index e3b9f87973c682..6231e081702a1d 100644 --- a/runtime/src/bank/partitioned_epoch_rewards/mod.rs +++ b/runtime/src/bank/partitioned_epoch_rewards/mod.rs @@ -235,10 +235,9 @@ impl Bank { 1 } else { const MAX_FACTOR_OF_REWARD_BLOCKS_IN_EPOCH: u64 = 10; - let num_chunks = solana_accounts_db::accounts_hash::AccountsHasher::div_ceil( - total_stake_accounts, - self.partitioned_rewards_stake_account_stores_per_block() as usize, - ) as u64; + let num_chunks = total_stake_accounts + .div_ceil(self.partitioned_rewards_stake_account_stores_per_block() as usize) + as u64; // Limit the reward credit interval to 10% of the total number of slots in a epoch num_chunks.clamp( From 526cf8d99609048a08d382feb0d9dfcb79a16b65 Mon Sep 17 00:00:00 2001 From: Brooks Date: Wed, 23 Jul 2025 12:01:13 -0400 Subject: [PATCH 51/68] Removes update_accounts_hash() family of fns (#7091) --- accounts-db/benches/accounts.rs | 16 -- accounts-db/src/accounts_db.rs | 266 +-------------------------- accounts-db/src/accounts_db/tests.rs | 153 +-------------- runtime/src/serde_snapshot.rs | 2 +- runtime/src/serde_snapshot/tests.rs | 42 +---- 5 files changed, 14 insertions(+), 465 deletions(-) diff --git a/accounts-db/benches/accounts.rs b/accounts-db/benches/accounts.rs index 332460f9c9c3de..70776106b6c581 100644 --- a/accounts-db/benches/accounts.rs +++ b/accounts-db/benches/accounts.rs @@ -42,22 +42,6 @@ fn new_accounts_db(account_paths: Vec) -> AccountsDb { ) } -#[bench] -fn bench_update_accounts_hash(bencher: &mut Bencher) { - solana_logger::setup(); - let accounts_db = new_accounts_db(vec![PathBuf::from("update_accounts_hash")]); - let accounts = Accounts::new(Arc::new(accounts_db)); - let mut pubkeys: Vec = vec![]; - create_test_accounts(&accounts, &mut pubkeys, 50_000, 0); - accounts.accounts_db.add_root_and_flush_write_cache(0); - let ancestors = Ancestors::from(vec![0]); - bencher.iter(|| { - accounts - .accounts_db - .update_accounts_hash_for_tests(0, &ancestors, false, false); - }); -} - #[bench] fn bench_accounts_delta_hash(bencher: &mut Bencher) { solana_logger::setup(); diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 1a0431a2cbc10d..38c7c3741c43e7 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -5930,100 +5930,6 @@ impl AccountsDb { AccountsHasher::checked_cast_for_capitalization(balances.map(|b| b as u128).sum::()) } - pub fn calculate_accounts_hash_from_index( - &self, - max_slot: Slot, - config: &CalcAccountsHashConfig<'_>, - ) -> (AccountsHash, u64) { - let mut collect = Measure::start("collect"); - let keys: Vec<_> = self - .accounts_index - .account_maps - .iter() - .flat_map(|map| { - let mut keys = map.keys(); - keys.sort_unstable(); // hashmap is not ordered, but bins are relative to each other - keys - }) - .collect(); - collect.stop(); - - // Pick a chunk size big enough to allow us to produce output vectors that are smaller than the overall size. - // We'll also accumulate the lamports within each chunk and fewer chunks results in less contention to accumulate the sum. - let chunks = crate::accounts_hash::MERKLE_FANOUT.pow(4); - let total_lamports = Mutex::::new(0); - - let get_account_hashes = || { - keys.par_chunks(chunks) - .map(|pubkeys| { - let mut sum = 0u128; - let account_hashes: Vec = pubkeys - .iter() - .filter_map(|pubkey| { - let index_entry = self.accounts_index.get_cloned(pubkey)?; - self.accounts_index - .get_account_info_with_and_then( - &index_entry, - config.ancestors, - Some(max_slot), - |(slot, account_info)| { - if account_info.is_zero_lamport() { - return None; - } - self.get_account_accessor( - slot, - pubkey, - &account_info.storage_location(), - ) - .get_loaded_account(|loaded_account| { - let mut loaded_hash = loaded_account.loaded_hash(); - let balance = loaded_account.lamports(); - let hash_is_missing = - loaded_hash == AccountHash(Hash::default()); - if hash_is_missing { - let computed_hash = Self::hash_account( - &loaded_account, - loaded_account.pubkey(), - ); - loaded_hash = computed_hash; - } - sum += balance as u128; - loaded_hash.0 - }) - }, - ) - .flatten() - }) - .collect(); - let mut total = total_lamports.lock().unwrap(); - *total = AccountsHasher::checked_cast_for_capitalization(*total as u128 + sum); - account_hashes - }) - .collect() - }; - - let mut scan = Measure::start("scan"); - let account_hashes: Vec> = self.thread_pool_clean.install(get_account_hashes); - scan.stop(); - - let total_lamports = *total_lamports.lock().unwrap(); - - let mut hash_time = Measure::start("hash"); - let (accumulated_hash, hash_total) = AccountsHasher::calculate_hash(account_hashes); - hash_time.stop(); - - datapoint_info!( - "calculate_accounts_hash_from_index", - ("accounts_scan", scan.as_us(), i64), - ("hash", hash_time.as_us(), i64), - ("hash_total", hash_total, i64), - ("collect", collect.as_us(), i64), - ); - - let accounts_hash = AccountsHash(accumulated_hash); - (accounts_hash, total_lamports) - } - /// Calculates the accounts lt hash /// /// Only intended to be called at startup (or by tests). @@ -6172,26 +6078,6 @@ impl AccountsDb { .expect("capitalization cannot overflow") } - /// This is only valid to call from tests. - /// run the accounts hash calculation and store the results - pub fn update_accounts_hash_for_tests( - &self, - slot: Slot, - ancestors: &Ancestors, - debug_verify: bool, - is_startup: bool, - ) -> (AccountsHash, u64) { - self.update_accounts_hash_with_verify_from( - CalcAccountsHashDataSource::IndexForTests, - debug_verify, - slot, - ancestors, - None, - &EpochSchedule::default(), - is_startup, - ) - } - fn update_old_slot_stats(&self, stats: &HashStats, storage: Option<&Arc>) { if let Some(storage) = storage { stats.roots_older_than_epoch.fetch_add(1, Ordering::Relaxed); @@ -6259,155 +6145,6 @@ impl AccountsDb { true } - pub fn calculate_accounts_hash_from( - &self, - data_source: CalcAccountsHashDataSource, - slot: Slot, - config: &CalcAccountsHashConfig<'_>, - ) -> (AccountsHash, u64) { - match data_source { - CalcAccountsHashDataSource::Storages => { - if self.accounts_cache.contains_any_slots(slot) { - // this indicates a race condition - inc_new_counter_info!("accounts_hash_items_in_write_cache", 1); - } - - let mut collect_time = Measure::start("collect"); - let (combined_maps, slots) = self.get_storages(..=slot); - collect_time.stop(); - - let mut sort_time = Measure::start("sort_storages"); - let min_root = self.accounts_index.min_alive_root(); - let storages = SortedStorages::new_with_slots( - combined_maps.iter().zip(slots), - min_root, - Some(slot), - ); - sort_time.stop(); - - let mut timings = HashStats { - collect_snapshots_us: collect_time.as_us(), - storage_sort_us: sort_time.as_us(), - ..HashStats::default() - }; - timings.calc_storage_size_quartiles(&combined_maps); - - self.calculate_accounts_hash(config, &storages, timings) - } - CalcAccountsHashDataSource::IndexForTests => { - self.calculate_accounts_hash_from_index(slot, config) - } - } - } - - fn calculate_accounts_hash_with_verify_from( - &self, - data_source: CalcAccountsHashDataSource, - debug_verify: bool, - slot: Slot, - config: CalcAccountsHashConfig<'_>, - expected_capitalization: Option, - ) -> (AccountsHash, u64) { - let (accounts_hash, total_lamports) = - self.calculate_accounts_hash_from(data_source, slot, &config); - if debug_verify { - // calculate the other way (store or non-store) and verify results match. - let data_source_other = match data_source { - CalcAccountsHashDataSource::IndexForTests => CalcAccountsHashDataSource::Storages, - CalcAccountsHashDataSource::Storages => CalcAccountsHashDataSource::IndexForTests, - }; - let (accounts_hash_other, total_lamports_other) = - self.calculate_accounts_hash_from(data_source_other, slot, &config); - - let success = accounts_hash == accounts_hash_other - && total_lamports == total_lamports_other - && total_lamports == expected_capitalization.unwrap_or(total_lamports); - assert!( - success, - "calculate_accounts_hash_with_verify mismatch. hashes: {}, {}; lamports: {}, {}; \ - expected lamports: {:?}, data source: {:?}, slot: {}", - accounts_hash.0, - accounts_hash_other.0, - total_lamports, - total_lamports_other, - expected_capitalization, - data_source, - slot - ); - } - (accounts_hash, total_lamports) - } - - /// run the accounts hash calculation and store the results - #[allow(clippy::too_many_arguments)] - pub fn update_accounts_hash_with_verify_from( - &self, - data_source: CalcAccountsHashDataSource, - debug_verify: bool, - slot: Slot, - ancestors: &Ancestors, - expected_capitalization: Option, - epoch_schedule: &EpochSchedule, - is_startup: bool, - ) -> (AccountsHash, u64) { - let epoch = epoch_schedule.get_epoch(slot); - let (accounts_hash, total_lamports) = self.calculate_accounts_hash_with_verify_from( - data_source, - debug_verify, - slot, - CalcAccountsHashConfig { - use_bg_thread_pool: !is_startup, - ancestors: Some(ancestors), - epoch_schedule, - epoch, - store_detailed_debug_info_on_failure: false, - }, - expected_capitalization, - ); - self.set_accounts_hash(slot, (accounts_hash, total_lamports)); - (accounts_hash, total_lamports) - } - - /// Calculate the full accounts hash for `storages` and save the results at `slot` - pub fn update_accounts_hash( - &self, - config: &CalcAccountsHashConfig<'_>, - storages: &SortedStorages<'_>, - slot: Slot, - stats: HashStats, - ) -> (AccountsHash, /*capitalization*/ u64) { - let accounts_hash = self.calculate_accounts_hash(config, storages, stats); - let old_accounts_hash = self.set_accounts_hash(slot, accounts_hash); - if let Some(old_accounts_hash) = old_accounts_hash { - warn!( - "Accounts hash was already set for slot {slot}! old: {old_accounts_hash:?}, new: \ - {accounts_hash:?}" - ); - } - accounts_hash - } - - /// Calculate the incremental accounts hash for `storages` and save the results at `slot` - pub fn update_incremental_accounts_hash( - &self, - config: &CalcAccountsHashConfig<'_>, - storages: &SortedStorages<'_>, - slot: Slot, - stats: HashStats, - ) -> (IncrementalAccountsHash, /*capitalization*/ u64) { - let incremental_accounts_hash = - self.calculate_incremental_accounts_hash(config, storages, stats); - let old_incremental_accounts_hash = - self.set_incremental_accounts_hash(slot, incremental_accounts_hash); - if let Some(old_incremental_accounts_hash) = old_incremental_accounts_hash { - warn!( - "Incremental accounts hash was already set for slot {slot}! old: \ - {old_incremental_accounts_hash:?}, new: {incremental_accounts_hash:?}" - ); - } - incremental_accounts_hash - } - /// Set the accounts hash for `slot` /// /// returns the previous accounts hash for `slot` @@ -6541,6 +6278,7 @@ impl AccountsDb { /// /// This is intended to be used by startup verification, and also AccountsHashVerifier. /// Uses account storage files as the data source for the calculation. + // obsolete, will be removed next pub fn calculate_accounts_hash( &self, config: &CalcAccountsHashConfig<'_>, @@ -6567,6 +6305,7 @@ impl AccountsDb { /// included in the incremental snapshot. This ensures reconstructing the AccountsDb is /// still correct when using this incremental accounts hash. /// - `storages` must be the same as the ones going into the incremental snapshot. + // obsolete, will be removed next pub fn calculate_incremental_accounts_hash( &self, config: &CalcAccountsHashConfig<'_>, @@ -6587,6 +6326,7 @@ impl AccountsDb { /// The shared code for calculating accounts hash from storages. /// Used for both full accounts hash and incremental accounts hash calculation. + // obsolete, will be removed next fn calculate_accounts_hash_from_storages( &self, config: &CalcAccountsHashConfig<'_>, diff --git a/accounts-db/src/accounts_db/tests.rs b/accounts-db/src/accounts_db/tests.rs index 03f8f952352668..bca593f2887858 100644 --- a/accounts-db/src/accounts_db/tests.rs +++ b/accounts-db/src/accounts_db/tests.rs @@ -1866,12 +1866,12 @@ fn test_accounts_db_purge1() { let ancestors = linear_ancestors(current_slot); info!("ancestors: {ancestors:?}"); - let hash = accounts.update_accounts_hash_for_tests(current_slot, &ancestors, true, true); + let hash = accounts.calculate_accounts_lt_hash_at_startup_from_index(&ancestors, current_slot); accounts.clean_accounts_for_tests(); assert_eq!( - accounts.update_accounts_hash_for_tests(current_slot, &ancestors, true, true), + accounts.calculate_accounts_lt_hash_at_startup_from_index(&ancestors, current_slot), hash ); @@ -2153,7 +2153,6 @@ fn test_verify_bank_capitalization() { db.store_for_tests(some_slot, &[(&key, &account)]); if pass == 0 { db.add_root_and_flush_write_cache(some_slot); - db.update_accounts_hash_for_tests(some_slot, &ancestors, true, true); assert_eq!( db.calculate_capitalization_at_startup_from_index(&ancestors, some_slot), @@ -2171,7 +2170,6 @@ fn test_verify_bank_capitalization() { )], ); db.add_root_and_flush_write_cache(some_slot); - db.update_accounts_hash_for_tests(some_slot, &ancestors, true, true); assert_eq!( db.calculate_capitalization_at_startup_from_index(&ancestors, some_slot), @@ -7061,153 +7059,6 @@ fn test_handle_dropped_roots_for_ancient_assert() { db.handle_dropped_roots_for_ancient(dropped_roots.into_iter()); } -define_accounts_db_test!(test_calculate_incremental_accounts_hash, |accounts_db| { - let owner = Pubkey::new_unique(); - let mut accounts: Vec<_> = (0..10) - .map(|_| (Pubkey::new_unique(), AccountSharedData::new(0, 0, &owner))) - .collect(); - - // store some accounts into slot 0 - let slot = 0; - { - accounts[0].1.set_lamports(0); - accounts[1].1.set_lamports(1); - accounts[2].1.set_lamports(10); - accounts[3].1.set_lamports(100); - //accounts[4].1.set_lamports(1_000); <-- will be added next slot - - let accounts = vec![ - (&accounts[0].0, &accounts[0].1), - (&accounts[1].0, &accounts[1].1), - (&accounts[2].0, &accounts[2].1), - (&accounts[3].0, &accounts[3].1), - ]; - accounts_db.store_cached((slot, accounts.as_slice())); - accounts_db.add_root_and_flush_write_cache(slot); - } - - // store some accounts into slot 1 - let slot = slot + 1; - { - //accounts[0].1.set_lamports(0); <-- unchanged - accounts[1].1.set_lamports(0); /* <-- drain account */ - //accounts[2].1.set_lamports(10); <-- unchanged - //accounts[3].1.set_lamports(100); <-- unchanged - accounts[4].1.set_lamports(1_000); /* <-- add account */ - - let accounts = vec![ - (&accounts[1].0, &accounts[1].1), - (&accounts[4].0, &accounts[4].1), - ]; - accounts_db.store_cached((slot, accounts.as_slice())); - accounts_db.add_root_and_flush_write_cache(slot); - } - - // calculate the full accounts hash - let full_accounts_hash = { - accounts_db.clean_accounts(Some(slot - 1), false, &EpochSchedule::default()); - let (storages, _) = accounts_db.get_storages(..=slot); - let storages = SortedStorages::new(&storages); - accounts_db.calculate_accounts_hash( - &CalcAccountsHashConfig::default(), - &storages, - HashStats::default(), - ) - }; - assert_eq!(full_accounts_hash.1, 1_110); - let full_accounts_hash_slot = slot; - - // Calculate the expected full accounts hash here and ensure it matches. - // Ensure the zero-lamport accounts are NOT included in the full accounts hash. - let full_account_hashes = [(2, 0), (3, 0), (4, 1)].into_iter().map(|(index, _slot)| { - let (pubkey, account) = &accounts[index]; - AccountsDb::hash_account(account, pubkey).0 - }); - let expected_accounts_hash = AccountsHash(compute_merkle_root(full_account_hashes)); - assert_eq!(full_accounts_hash.0, expected_accounts_hash); - - // store accounts into slot 2 - let slot = slot + 1; - { - //accounts[0].1.set_lamports(0); <-- unchanged - //accounts[1].1.set_lamports(0); <-- unchanged - accounts[2].1.set_lamports(0); /* <-- drain account */ - //accounts[3].1.set_lamports(100); <-- unchanged - //accounts[4].1.set_lamports(1_000); <-- unchanged - accounts[5].1.set_lamports(10_000); /* <-- add account */ - accounts[6].1.set_lamports(100_000); /* <-- add account */ - //accounts[7].1.set_lamports(1_000_000); <-- will be added next slot - - let accounts = vec![ - (&accounts[2].0, &accounts[2].1), - (&accounts[5].0, &accounts[5].1), - (&accounts[6].0, &accounts[6].1), - ]; - accounts_db.store_cached((slot, accounts.as_slice())); - accounts_db.add_root_and_flush_write_cache(slot); - } - - // store accounts into slot 3 - let slot = slot + 1; - { - //accounts[0].1.set_lamports(0); <-- unchanged - //accounts[1].1.set_lamports(0); <-- unchanged - //accounts[2].1.set_lamports(0); <-- unchanged - accounts[3].1.set_lamports(0); /* <-- drain account */ - //accounts[4].1.set_lamports(1_000); <-- unchanged - accounts[5].1.set_lamports(0); /* <-- drain account */ - //accounts[6].1.set_lamports(100_000); <-- unchanged - accounts[7].1.set_lamports(1_000_000); /* <-- add account */ - - let accounts = vec![ - (&accounts[3].0, &accounts[3].1), - (&accounts[5].0, &accounts[5].1), - (&accounts[7].0, &accounts[7].1), - ]; - accounts_db.store_cached((slot, accounts.as_slice())); - accounts_db.add_root_and_flush_write_cache(slot); - } - - // calculate the incremental accounts hash - let incremental_accounts_hash = { - accounts_db.set_latest_full_snapshot_slot(full_accounts_hash_slot); - accounts_db.clean_accounts(Some(slot - 1), false, &EpochSchedule::default()); - let (storages, _) = accounts_db.get_storages(full_accounts_hash_slot + 1..=slot); - let storages = SortedStorages::new(&storages); - accounts_db.calculate_incremental_accounts_hash( - &CalcAccountsHashConfig::default(), - &storages, - HashStats::default(), - ) - }; - assert_eq!(incremental_accounts_hash.1, 1_100_000); - - // Ensure the zero-lamport accounts are included in the IAH. - // Accounts 2, 3, and 5 are all zero-lamports. - let incremental_account_hashes = - [(2, 2), (3, 3), (5, 3), (6, 2), (7, 3)] - .into_iter() - .map(|(index, _slot)| { - let (pubkey, account) = &accounts[index]; - if account.is_zero_lamport() { - // For incremental accounts hash, the hash of a zero lamport account is the hash of its pubkey. - // Ensure this implementation detail remains in sync with AccountsHasher::de_dup_in_parallel(). - let hash = blake3::hash(bytemuck::bytes_of(pubkey)); - Hash::new_from_array(hash.into()) - } else { - AccountsDb::hash_account(account, pubkey).0 - } - }); - let expected_accounts_hash = - IncrementalAccountsHash(compute_merkle_root(incremental_account_hashes)); - assert_eq!(incremental_accounts_hash.0, expected_accounts_hash); -}); - -fn compute_merkle_root(hashes: impl IntoIterator) -> Hash { - let hashes = hashes.into_iter().collect(); - AccountsHasher::compute_merkle_root_recurse(hashes, MERKLE_FANOUT) -} - /// Test that `clean` reclaims old accounts when cleaning old storages /// /// When `clean` constructs candidates from old storages, pubkeys in these storages may have other diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index 2ddc70897660cc..60f7e9c5ab7dab 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -784,7 +784,7 @@ struct SerializableAccountsDb<'a> { account_storage_entries: &'a [Vec>], bank_hash_stats: BankHashStats, accounts_delta_hash: AccountsDeltaHash, // obsolete, will be removed next - accounts_hash: AccountsHash, + accounts_hash: AccountsHash, // obsolete, will be removed next write_version: u64, } diff --git a/runtime/src/serde_snapshot/tests.rs b/runtime/src/serde_snapshot/tests.rs index 7ddd5c0a54ef16..e00dbcfd14f004 100644 --- a/runtime/src/serde_snapshot/tests.rs +++ b/runtime/src/serde_snapshot/tests.rs @@ -104,7 +104,7 @@ mod serde_snapshot_tests { { let bank_hash_stats = BankHashStats::default(); let accounts_delta_hash = accounts_db.get_accounts_delta_hash(slot).unwrap(); - let accounts_hash = accounts_db.get_accounts_hash(slot).unwrap().0; + let accounts_hash = AccountsHash(Hash::default()); // obsolete, any value works let write_version = accounts_db.write_version.load(Ordering::Acquire); serialize_into( stream, @@ -219,10 +219,9 @@ mod serde_snapshot_tests { check_accounts_local(&accounts, &pubkeys, 100); accounts.accounts_db.add_root_and_flush_write_cache(slot); let accounts_delta_hash = accounts.accounts_db.calculate_accounts_delta_hash(slot); - let accounts_hash = AccountsHash(Hash::new_unique()); - accounts + let accounts_hash = accounts .accounts_db - .set_accounts_hash(slot, (accounts_hash, u64::default())); + .calculate_accounts_lt_hash_at_startup_from_index(&Ancestors::default(), slot); let mut writer = Cursor::new(vec![]); accountsdb_to_stream( @@ -257,7 +256,9 @@ mod serde_snapshot_tests { check_accounts_local(&daccounts, &pubkeys, 100); let daccounts_delta_hash = daccounts.accounts_db.calculate_accounts_delta_hash(slot); assert_eq!(accounts_delta_hash, daccounts_delta_hash); - let daccounts_hash = daccounts.accounts_db.get_accounts_hash(slot).unwrap().0; + let daccounts_hash = accounts + .accounts_db + .calculate_accounts_lt_hash_at_startup_from_index(&Ancestors::default(), slot); assert_eq!(accounts_hash, daccounts_hash); } @@ -282,7 +283,6 @@ mod serde_snapshot_tests { db.add_root_and_flush_write_cache(new_root); db.calculate_accounts_delta_hash(new_root); - db.update_accounts_hash_for_tests(new_root, &linear_ancestors(new_root), false, false); // Simulate reconstruction from snapshot let db = reconstruct_accounts_db_via_serialization(&db, new_root, storage_access); @@ -364,7 +364,6 @@ mod serde_snapshot_tests { accounts.check_storage(2, 31, 31); let ancestors = linear_ancestors(latest_slot); - accounts.update_accounts_hash_for_tests(latest_slot, &ancestors, false, false); accounts.clean_accounts_for_tests(); // The first 20 accounts of slot 0 have been updated in slot 2, as well as @@ -392,10 +391,6 @@ mod serde_snapshot_tests { daccounts.get_accounts_delta_hash(latest_slot).unwrap(), accounts.get_accounts_delta_hash(latest_slot).unwrap(), ); - assert_eq!( - daccounts.get_accounts_hash(latest_slot).unwrap().0, - accounts.get_accounts_hash(latest_slot).unwrap().0, - ); daccounts.print_count_and_status("daccounts"); @@ -407,8 +402,8 @@ mod serde_snapshot_tests { daccounts.check_storage(2, 31, 31); assert_eq!( - daccounts.update_accounts_hash_for_tests(latest_slot, &ancestors, false, false,), - accounts.update_accounts_hash_for_tests(latest_slot, &ancestors, false, false,) + daccounts.calculate_accounts_lt_hash_at_startup_from_index(&ancestors, latest_slot), + accounts.calculate_accounts_lt_hash_at_startup_from_index(&ancestors, latest_slot), ); } } @@ -451,12 +446,6 @@ mod serde_snapshot_tests { accounts.print_accounts_stats("accounts_post_purge"); accounts.calculate_accounts_delta_hash(current_slot); - accounts.update_accounts_hash_for_tests( - current_slot, - &linear_ancestors(current_slot), - false, - false, - ); let accounts = reconstruct_accounts_db_via_serialization(&accounts, current_slot, storage_access); @@ -509,7 +498,6 @@ mod serde_snapshot_tests { accounts.print_accounts_stats("pre_f"); accounts.calculate_accounts_delta_hash(current_slot); - accounts.update_accounts_hash_for_tests(4, &Ancestors::default(), false, false); let accounts = f(accounts, current_slot); @@ -602,12 +590,6 @@ mod serde_snapshot_tests { accounts.print_count_and_status("before reconstruct"); accounts.calculate_accounts_delta_hash(current_slot); - accounts.update_accounts_hash_for_tests( - current_slot, - &linear_ancestors(current_slot), - false, - false, - ); let accounts = reconstruct_accounts_db_via_serialization(&accounts, current_slot, storage_access); accounts.print_count_and_status("before purge zero"); @@ -720,12 +702,6 @@ mod serde_snapshot_tests { // So, prevent that from happening by introducing refcount ((current_slot - 1)..=current_slot).for_each(|slot| accounts.flush_root_write_cache(slot)); accounts.clean_accounts_for_tests(); - accounts.update_accounts_hash_for_tests( - current_slot, - &linear_ancestors(current_slot), - false, - false, - ); let accounts = reconstruct_accounts_db_via_serialization(&accounts, current_slot, storage_access); accounts.clean_accounts_for_tests(); @@ -809,8 +785,6 @@ mod serde_snapshot_tests { let no_ancestors = Ancestors::default(); let epoch_schedule = EpochSchedule::default(); - accounts.update_accounts_hash_for_tests(current_slot, &no_ancestors, false, false); - let calculated_capitalization = accounts .calculate_capitalization_at_startup_from_index(&no_ancestors, current_slot); let expected_capitalization = 22_300; From 3ca1e747f054bef1c8235610bfadfc5685744af5 Mon Sep 17 00:00:00 2001 From: hana <81144685+2501babe@users.noreply.github.com> Date: Wed, 23 Jul 2025 09:18:52 -0700 Subject: [PATCH 52/68] svm: test for nonce inspection (#7090) --- svm/src/transaction_processor.rs | 4 - svm/tests/integration_test.rs | 147 +++++++++++++++++++++++++++++++ 2 files changed, 147 insertions(+), 4 deletions(-) diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index d9266d3d5f1cf0..8d26e35bb5e49c 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -624,10 +624,6 @@ impl TransactionBatchProcessor { // We must validate the account in case it was reopened, either as a normal system account, // or a fake nonce account. We must also check the signer in case the authority was changed. // - // We do not need to inspect the nonce account here, because by definition it is either the - // first account, inspected in `validate_transaction_fee_payer()`, or the second through nth - // account, inspected in `load_transaction()`. - // // Note these checks are *not* obviated by fee-only transactions. let nonce_is_valid = account_loader .load_transaction_account(nonce_info.address(), true) diff --git a/svm/tests/integration_test.rs b/svm/tests/integration_test.rs index f6f0271886dea1..9dd7e60f3e6987 100644 --- a/svm/tests/integration_test.rs +++ b/svm/tests/integration_test.rs @@ -2439,6 +2439,153 @@ impl InspectedAccounts { } } +#[test_case(false, false; "separate_nonce::old")] +#[test_case(false, true; "separate_nonce::simd186")] +#[test_case(true, false; "fee_paying_nonce::old")] +#[test_case(true, true; "fee_paying_nonce::simd186")] +fn svm_inspect_nonce_load_failure( + fee_paying_nonce: bool, + formalize_loaded_transaction_data_size: bool, +) { + let mut test_entry = SvmTestEntry::default(); + let mut expected_inspected_accounts = InspectedAccounts::default(); + + if !formalize_loaded_transaction_data_size { + test_entry + .disabled_features + .push(feature_set::formalize_loaded_transaction_data_size::id()); + } + + let fee_payer_keypair = Keypair::new(); + let dummy_keypair = Keypair::new(); + let separate_nonce_keypair = Keypair::new(); + + let fee_payer = fee_payer_keypair.pubkey(); + let dummy = dummy_keypair.pubkey(); + let nonce_pubkey = if fee_paying_nonce { + fee_payer + } else { + separate_nonce_keypair.pubkey() + }; + + let initial_durable = DurableNonce::from_blockhash(&Hash::new_unique()); + let initial_nonce_data = + nonce::state::Data::new(fee_payer, initial_durable, LAMPORTS_PER_SIGNATURE); + let mut initial_nonce_account = AccountSharedData::new_data( + LAMPORTS_PER_SOL, + &nonce::versions::Versions::new(nonce::state::State::Initialized( + initial_nonce_data.clone(), + )), + &system_program::id(), + ) + .unwrap(); + initial_nonce_account.set_rent_epoch(u64::MAX); + let initial_nonce_account = initial_nonce_account; + let initial_nonce_info = NonceInfo::new(nonce_pubkey, initial_nonce_account.clone()); + + let advanced_durable = DurableNonce::from_blockhash(&LAST_BLOCKHASH); + let mut advanced_nonce_info = initial_nonce_info.clone(); + advanced_nonce_info + .try_advance_nonce(advanced_durable, LAMPORTS_PER_SIGNATURE) + .unwrap(); + + let compute_instruction = ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(1); + let advance_instruction = system_instruction::advance_nonce_account(&nonce_pubkey, &fee_payer); + let fee_only_noop_instruction = Instruction::new_with_bytes( + Pubkey::new_unique(), + &[], + vec![AccountMeta { + pubkey: dummy, + is_writable: true, + is_signer: true, + }], + ); + + test_entry.add_initial_account(nonce_pubkey, &initial_nonce_account); + + let mut separate_fee_payer_account = AccountSharedData::default(); + separate_fee_payer_account.set_lamports(LAMPORTS_PER_SOL); + let separate_fee_payer_account = separate_fee_payer_account; + + let dummy_account = + AccountSharedData::create(1, vec![0; 2], system_program::id(), false, u64::MAX); + test_entry.add_initial_account(dummy, &dummy_account); + + // we always inspect the nonce at least once + expected_inspected_accounts.inspect(nonce_pubkey, Inspect::LiveWrite(&initial_nonce_account)); + + // if we have a fee-paying nonce, we happen to inspect it again + // this is an unimportant implementation detail and also means these cases are trivial + // the true test is a separate nonce, to ensure we inspect it in pre-checks + if fee_paying_nonce { + expected_inspected_accounts + .inspect(nonce_pubkey, Inspect::LiveWrite(&initial_nonce_account)); + } else { + test_entry.add_initial_account(fee_payer, &separate_fee_payer_account); + expected_inspected_accounts + .inspect(fee_payer, Inspect::LiveWrite(&separate_fee_payer_account)); + } + + // with simd186, transaction loading aborts when we hit the fee-payer because of TRANSACTION_ACCOUNT_BASE_SIZE + // without simd186, transaction loading aborts on the dummy account, so it also happens to be inspected + // the difference is immaterial to the test as long as it happens before the nonce is loaded for the transaction + if !fee_paying_nonce && !formalize_loaded_transaction_data_size { + expected_inspected_accounts.inspect(dummy, Inspect::LiveWrite(&dummy_account)); + } + + // by signing with the dummy account we ensure it precedes a separate nonce + let transaction = Transaction::new_signed_with_payer( + &[ + compute_instruction, + advance_instruction, + fee_only_noop_instruction, + ], + Some(&fee_payer), + &[&fee_payer_keypair, &dummy_keypair], + *initial_durable.as_hash(), + ); + if !fee_paying_nonce { + let sanitized = SanitizedTransaction::from_transaction_for_tests(transaction.clone()); + let dummy_index = sanitized + .account_keys() + .iter() + .position(|key| *key == dummy) + .unwrap(); + let nonce_index = sanitized + .account_keys() + .iter() + .position(|key| *key == nonce_pubkey) + .unwrap(); + assert!(dummy_index < nonce_index); + } + + test_entry.push_nonce_transaction_with_status( + transaction, + initial_nonce_info.clone(), + ExecutionStatus::ProcessedFailed, + ); + + test_entry.decrease_expected_lamports(&fee_payer, LAMPORTS_PER_SIGNATURE * 2); + test_entry + .final_accounts + .get_mut(&nonce_pubkey) + .unwrap() + .data_as_mut_slice() + .copy_from_slice(advanced_nonce_info.account().data()); + + let env = SvmTestEnvironment::create(test_entry.clone()); + env.execute(); + + let actual_inspected_accounts = env.mock_bank.inspected_accounts.read().unwrap().clone(); + for (expected_pubkey, expected_account) in &expected_inspected_accounts.0 { + let actual_account = actual_inspected_accounts.get(expected_pubkey).unwrap(); + assert_eq!( + expected_account, actual_account, + "pubkey: {expected_pubkey}", + ); + } +} + #[test] fn svm_inspect_account() { let mut initial_test_entry = SvmTestEntry::default(); From be57238072d86cb93603f21a4b9554c657b88cd5 Mon Sep 17 00:00:00 2001 From: steviez Date: Wed, 23 Jul 2025 11:21:22 -0500 Subject: [PATCH 53/68] validator: Remove use of ValidatorConfig::default() (#7100) A default function is certainly useful for tests to avoid tons of copy/paste. However, we should avoid it in production code to reduce the chance of accidental misconfiguration or forgetting to update somewhere --- validator/src/commands/run/execute.rs | 80 +++++++++++++++------------ 1 file changed, 45 insertions(+), 35 deletions(-) diff --git a/validator/src/commands/run/execute.rs b/validator/src/commands/run/execute.rs index e87556d41cbfea..83ed9934dad17e 100644 --- a/validator/src/commands/run/execute.rs +++ b/validator/src/commands/run/execute.rs @@ -17,6 +17,7 @@ use { AccountIndex, AccountSecondaryIndexes, AccountSecondaryIndexesIncludeExclude, AccountsIndexConfig, IndexLimitMb, ScanFilter, }, + hardened_unpack::MAX_GENESIS_ARCHIVE_UNPACKED_SIZE, utils::{ create_all_accounts_run_and_snapshot_dirs, create_and_canonicalize_directories, create_and_canonicalize_directory, @@ -29,6 +30,7 @@ use { solana_core::{ banking_trace::DISABLED_BAKING_TRACE_DIR, consensus::tower_storage, + repair::repair_handler::RepairHandlerType, snapshot_packager_service::SnapshotPackagerService, system_monitor_service::SystemMonitorService, validator::{ @@ -38,7 +40,7 @@ use { }, }, solana_gossip::{ - cluster_info::{BindIpAddrs, Node, NodeConfig}, + cluster_info::{BindIpAddrs, Node, NodeConfig, DEFAULT_CONTACT_SAVE_INTERVAL_MILLIS}, contact_info::ContactInfo, }, solana_hash::Hash, @@ -67,7 +69,11 @@ use { socket::SocketAddrSpace, }, solana_tpu_client::tpu_client::DEFAULT_TPU_ENABLE_UDP, - solana_turbine::xdp::{set_cpu_affinity, XdpConfig}, + solana_turbine::{ + broadcast_stage::BroadcastStageType, + xdp::{set_cpu_affinity, XdpConfig}, + }, + solana_validator_exit::Exit, std::{ collections::HashSet, fs::{self, File}, @@ -566,9 +572,11 @@ pub fn execute( require_tower: matches.is_present("require_tower"), tower_storage, halt_at_slot: value_t!(matches, "dev_halt_at_slot", Slot).ok(), + max_genesis_archive_unpacked_size: MAX_GENESIS_ARCHIVE_UNPACKED_SIZE, expected_genesis_hash: matches .value_of("expected_genesis_hash") .map(|s| Hash::from_str(s).unwrap()), + fixed_leader_schedule: None, expected_bank_hash: matches .value_of("expected_bank_hash") .map(|s| Hash::from_str(s).unwrap()), @@ -645,12 +653,16 @@ pub fn execute( known_validators: run_args.known_validators, repair_validators, repair_whitelist, + repair_handler_type: RepairHandlerType::default(), gossip_validators, max_ledger_shreds, blockstore_options: run_args.blockstore_options, run_verification: !matches.is_present("skip_startup_ledger_verification"), debug_keys, + warp_slot: None, + generator_config: None, contact_debug_interval, + contact_save_interval: DEFAULT_CONTACT_SAVE_INTERVAL_MILLIS, send_transaction_service_config: send_transaction_service::Config { retry_rate_ms: rpc_send_retry_rate_ms, leader_forward_count, @@ -692,6 +704,7 @@ pub fn execute( snapshot_config, tpu_coalesce, no_wait_for_vote_to_start_leader: matches.is_present("no_wait_for_vote_to_start_leader"), + wait_to_vote_slot: None, runtime_config: RuntimeConfig { log_messages_bytes_limit: value_of(matches, "log_messages_bytes_limit"), ..RuntimeConfig::default() @@ -711,9 +724,35 @@ pub fn execute( .is_present("delay_leader_block_for_pending_fork"), wen_restart_proto_path: value_t!(matches, "wen_restart", PathBuf).ok(), wen_restart_coordinator: value_t!(matches, "wen_restart_coordinator", Pubkey).ok(), + turbine_disabled: Arc::::default(), retransmit_xdp, + broadcast_stage_type: BroadcastStageType::Standard, use_tpu_client_next: !matches.is_present("use_connection_cache"), - ..ValidatorConfig::default() + block_verification_method: value_t_or_exit!( + matches, + "block_verification_method", + BlockVerificationMethod + ), + unified_scheduler_handler_threads: value_t!( + matches, + "unified_scheduler_handler_threads", + usize + ) + .ok(), + block_production_method: value_t_or_exit!( + matches, + "block_production_method", + BlockProductionMethod + ), + transaction_struct: value_t_or_exit!(matches, "transaction_struct", TransactionStructure), + enable_block_production_forwarding: staked_nodes_overrides_path.is_some(), + banking_trace_dir_byte_limit: parse_banking_trace_dir_byte_limit(matches), + validator_exit: Arc::new(RwLock::new(Exit::default())), + validator_exit_backpressure: [( + SnapshotPackagerService::NAME.to_string(), + Arc::new(AtomicBool::new(false)), + )] + .into(), }; let reserved = validator_config @@ -752,12 +791,6 @@ pub fn execute( let maximum_snapshot_download_abort = value_t_or_exit!(matches, "maximum_snapshot_download_abort", u64); - configure_banking_trace_dir_byte_limit(&mut validator_config, matches); - validator_config.block_verification_method = value_t_or_exit!( - matches, - "block_verification_method", - BlockVerificationMethod - ); match validator_config.block_verification_method { BlockVerificationMethod::BlockstoreProcessor => { warn!( @@ -769,19 +802,6 @@ pub fn execute( } BlockVerificationMethod::UnifiedScheduler => {} } - validator_config.block_production_method = value_t_or_exit!( - matches, // comment to align formatting... - "block_production_method", - BlockProductionMethod - ); - validator_config.transaction_struct = value_t_or_exit!( - matches, // comment to align formatting... - "transaction_struct", - TransactionStructure - ); - validator_config.enable_block_production_forwarding = staked_nodes_overrides_path.is_some(); - validator_config.unified_scheduler_handler_threads = - value_t!(matches, "unified_scheduler_handler_threads", usize).ok(); let public_rpc_addr = matches .value_of("public_rpc_addr") @@ -801,13 +821,6 @@ pub fn execute( } } - let validator_exit_backpressure = [( - SnapshotPackagerService::NAME.to_string(), - Arc::new(AtomicBool::new(false)), - )] - .into(); - validator_config.validator_exit_backpressure = validator_exit_backpressure; - let mut ledger_lock = ledger_lockfile(&ledger_path); let _ledger_write_guard = lock_ledger(&ledger_path, &mut ledger_lock); @@ -1165,11 +1178,8 @@ fn get_cluster_shred_version(entrypoints: &[SocketAddr], bind_address: IpAddr) - None } -fn configure_banking_trace_dir_byte_limit( - validator_config: &mut ValidatorConfig, - matches: &ArgMatches, -) { - validator_config.banking_trace_dir_byte_limit = if matches.is_present("disable_banking_trace") { +fn parse_banking_trace_dir_byte_limit(matches: &ArgMatches) -> u64 { + if matches.is_present("disable_banking_trace") { // disable with an explicit flag; This effectively becomes `opt-out` by resetting to // DISABLED_BAKING_TRACE_DIR, while allowing us to specify a default sensible limit in clap // configuration for cli help. @@ -1178,7 +1188,7 @@ fn configure_banking_trace_dir_byte_limit( // a default value in clap configuration (BANKING_TRACE_DIR_DEFAULT_BYTE_LIMIT) or // explicit user-supplied override value value_t_or_exit!(matches, "banking_trace_dir_byte_limit", u64) - }; + } } fn new_snapshot_config( From 53985bead1b36a041c360e7810dc624f5399afc9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Wed, 23 Jul 2025 19:11:46 +0200 Subject: [PATCH 54/68] Revert - The disabling of `enable_stack_frame_gaps` in `bpf_account_data_direct_mapping` (#7056) * Reverts the disabling of enable_stack_frame_gaps in bpf_account_data_direct_mapping. * Adjusts test_stack_heap_zeroed. --- programs/bpf_loader/src/syscalls/mod.rs | 2 +- programs/sbf/rust/invoke/src/lib.rs | 4 ++-- svm/tests/mock_bank.rs | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/programs/bpf_loader/src/syscalls/mod.rs b/programs/bpf_loader/src/syscalls/mod.rs index 32b01375cc0f99..3ea76b5daa7ed7 100644 --- a/programs/bpf_loader/src/syscalls/mod.rs +++ b/programs/bpf_loader/src/syscalls/mod.rs @@ -357,7 +357,7 @@ pub fn create_program_runtime_environment_v1<'a>( max_call_depth: compute_budget.max_call_depth, stack_frame_size: compute_budget.stack_frame_size, enable_address_translation: true, - enable_stack_frame_gaps: !feature_set.bpf_account_data_direct_mapping, + enable_stack_frame_gaps: true, instruction_meter_checkpoint_distance: 10000, enable_instruction_meter: true, enable_instruction_tracing: debugging_features, diff --git a/programs/sbf/rust/invoke/src/lib.rs b/programs/sbf/rust/invoke/src/lib.rs index b56f965f36fd4c..51a708ecf7a9aa 100644 --- a/programs/sbf/rust/invoke/src/lib.rs +++ b/programs/sbf/rust/invoke/src/lib.rs @@ -1348,7 +1348,7 @@ fn process_instruction<'a>( let stack = unsafe { slice::from_raw_parts_mut( MM_STACK_START as *mut u8, - MAX_CALL_DEPTH * STACK_FRAME_SIZE, + MAX_CALL_DEPTH * STACK_FRAME_SIZE * 2, ) }; @@ -1361,7 +1361,7 @@ fn process_instruction<'a>( // When we don't have dynamic stack frames, the stack grows from lower addresses // to higher addresses, so we compare accordingly. for i in 10..MAX_CALL_DEPTH { - let stack = &mut stack[i * STACK_FRAME_SIZE..][..STACK_FRAME_SIZE]; + let stack = &mut stack[i * STACK_FRAME_SIZE * 2..][..STACK_FRAME_SIZE]; assert!(stack == &ZEROS[..STACK_FRAME_SIZE], "stack not zeroed"); stack.fill(42); } diff --git a/svm/tests/mock_bank.rs b/svm/tests/mock_bank.rs index 0e15b7003315ff..25998d41a46be8 100644 --- a/svm/tests/mock_bank.rs +++ b/svm/tests/mock_bank.rs @@ -353,7 +353,7 @@ pub fn create_custom_loader<'a>() -> BuiltinProgram> { max_call_depth: compute_budget.max_call_depth, stack_frame_size: compute_budget.stack_frame_size, enable_address_translation: true, - enable_stack_frame_gaps: false, + enable_stack_frame_gaps: true, instruction_meter_checkpoint_distance: 10000, enable_instruction_meter: true, enable_instruction_tracing: true, From 079b37f78210ee71859bf858a109596790bfaefd Mon Sep 17 00:00:00 2001 From: Brennan Date: Wed, 23 Jul 2025 10:44:07 -0700 Subject: [PATCH 55/68] Clean up feature: raise_block_limits_to_60m (#7110) --- cost-model/src/block_cost_limits.rs | 16 +------ runtime/src/bank.rs | 27 +---------- runtime/src/bank/tests.rs | 70 ----------------------------- 3 files changed, 2 insertions(+), 111 deletions(-) diff --git a/cost-model/src/block_cost_limits.rs b/cost-model/src/block_cost_limits.rs index 74c1c1f3307213..aa54a7ce252c03 100644 --- a/cost-model/src/block_cost_limits.rs +++ b/cost-model/src/block_cost_limits.rs @@ -25,8 +25,7 @@ pub const INSTRUCTION_DATA_BYTES_COST: u64 = 140 /*bytes per us*/ / COMPUTE_UNIT /// accumulated by Transactions added to it; A transaction's compute units are /// calculated by cost_model, based on transaction's signatures, write locks, /// data size and built-in and SBF instructions. -pub const MAX_BLOCK_UNITS: u64 = MAX_BLOCK_UNITS_SIMD_0207; -pub const MAX_BLOCK_UNITS_SIMD_0207: u64 = 50_000_000; +pub const MAX_BLOCK_UNITS: u64 = MAX_BLOCK_UNITS_SIMD_0256; pub const MAX_BLOCK_UNITS_SIMD_0256: u64 = 60_000_000; /// Number of compute units that a writable account in a block is allowed. The @@ -41,16 +40,3 @@ pub const MAX_VOTE_UNITS: u64 = 36_000_000; /// The maximum allowed size, in bytes, that accounts data can grow, per block. /// This can also be thought of as the maximum size of new allocations per block. pub const MAX_BLOCK_ACCOUNTS_DATA_SIZE_DELTA: u64 = 100_000_000; - -/// Return the block limits that will be used upon activation of SIMD-0256. -/// Returns as -/// (account_limit, block_limit, vote_limit) -// ^ Above order is used to be consistent with the order of -// `CostTracker::set_limits`. -pub const fn simd_0256_block_limits() -> (u64, u64, u64) { - ( - MAX_WRITABLE_ACCOUNT_UNITS, - MAX_BLOCK_UNITS_SIMD_0256, - MAX_VOTE_UNITS, - ) -} diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index dfe1f066c59aef..203c00b46d4e53 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -94,7 +94,7 @@ use { }, solana_compute_budget::compute_budget::ComputeBudget, solana_compute_budget_instruction::instructions_processor::process_compute_budget_instructions, - solana_cost_model::{block_cost_limits::simd_0256_block_limits, cost_tracker::CostTracker}, + solana_cost_model::cost_tracker::CostTracker, solana_epoch_info::EpochInfo, solana_epoch_schedule::EpochSchedule, solana_feature_gate_interface as feature, @@ -4044,22 +4044,6 @@ impl Bank { debug_do_not_add_builtins, ); - // Cost-Tracker is not serialized in snapshot or any configs. - // We must apply previously activated features related to limits here - // so that the initial bank state is consistent with the feature set. - // Cost-tracker limits are propagated through children banks. - if self - .feature_set - .is_active(&feature_set::raise_block_limits_to_60m::id()) - { - let (account_cost_limit, block_cost_limit, vote_cost_limit) = simd_0256_block_limits(); - self.write_cost_tracker().unwrap().set_limits( - account_cost_limit, - block_cost_limit, - vote_cost_limit, - ); - } - if !debug_do_not_add_builtins { for builtin in BUILTINS .iter() @@ -5337,15 +5321,6 @@ impl Bank { &new_feature_activations, ); } - - if new_feature_activations.contains(&feature_set::raise_block_limits_to_60m::id()) { - let (account_cost_limit, block_cost_limit, vote_cost_limit) = simd_0256_block_limits(); - self.write_cost_tracker().unwrap().set_limits( - account_cost_limit, - block_cost_limit, - vote_cost_limit, - ); - } } fn adjust_sysvar_balance_for_rent(&self, account: &mut AccountSharedData) { diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 935e33a9e2c70c..d22fffebae717c 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -50,7 +50,6 @@ use { compute_budget::ComputeBudget, compute_budget_limits::ComputeBudgetLimits, }, solana_compute_budget_interface::ComputeBudgetInstruction, - solana_cost_model::block_cost_limits::{MAX_BLOCK_UNITS, MAX_BLOCK_UNITS_SIMD_0256}, solana_cpi::MAX_RETURN_DATA, solana_epoch_schedule::{EpochSchedule, MINIMUM_SLOTS_PER_EPOCH}, solana_feature_gate_interface::{self as feature, Feature}, @@ -6711,75 +6710,6 @@ fn test_reserved_account_keys() { ); } -#[test] -fn test_block_limits() { - let (bank0, _bank_forks) = create_simple_test_arc_bank(100_000); - let mut bank = Bank::new_from_parent(bank0, &Pubkey::default(), 1); - - // Ensure increased block limits features are inactive. - assert!(!bank - .feature_set - .is_active(&feature_set::raise_block_limits_to_60m::id())); - assert_eq!( - bank.read_cost_tracker().unwrap().get_block_limit(), - MAX_BLOCK_UNITS, - "before activating the feature, bank should have old/default limit" - ); - - // Activate `raise_block_limits_to_60m` feature - bank.store_account( - &feature_set::raise_block_limits_to_60m::id(), - &feature::create_account(&Feature::default(), 42), - ); - // apply_feature_activations for `FinishInit` will not cause the block limit to be updated - bank.apply_feature_activations(ApplyFeatureActivationsCaller::FinishInit, true); - assert_eq!( - bank.read_cost_tracker().unwrap().get_block_limit(), - MAX_BLOCK_UNITS, - "before activating the feature, bank should have old/default limit" - ); - - // apply_feature_activations for `NewFromParent` will cause feature to be activated - bank.apply_feature_activations(ApplyFeatureActivationsCaller::NewFromParent, true); - assert_eq!( - bank.read_cost_tracker().unwrap().get_block_limit(), - MAX_BLOCK_UNITS_SIMD_0256, - "after activating the feature, bank should have new limit" - ); - - // Make sure the limits propagate to the child-bank. - let bank = Bank::new_from_parent(Arc::new(bank), &Pubkey::default(), 2); - assert_eq!( - bank.read_cost_tracker().unwrap().get_block_limit(), - MAX_BLOCK_UNITS_SIMD_0256, - "child bank should have new limit" - ); - - // Test starting from a genesis config with and without feature account - let (mut genesis_config, _keypair) = create_genesis_config(100_000); - // Without feature account in genesis, old limits are used. - let bank = Bank::new_for_tests(&genesis_config); - assert_eq!( - bank.read_cost_tracker().unwrap().get_block_limit(), - MAX_BLOCK_UNITS, - "before activating the feature, bank should have old/default limit" - ); - - activate_feature( - &mut genesis_config, - feature_set::raise_block_limits_to_60m::id(), - ); - let bank = Bank::new_for_tests(&genesis_config); - assert!(bank - .feature_set - .is_active(&feature_set::raise_block_limits_to_60m::id())); - assert_eq!( - bank.read_cost_tracker().unwrap().get_block_limit(), - MAX_BLOCK_UNITS_SIMD_0256, - "bank created from genesis config should have new limit" - ); -} - #[test] fn test_program_replacement() { let mut bank = create_simple_test_bank(0); From 385d95e84e040e942bf0b76876b0f1940e7f290e Mon Sep 17 00:00:00 2001 From: Brooks Date: Wed, 23 Jul 2025 14:10:44 -0400 Subject: [PATCH 56/68] Removes calculate_accounts_delta_hash() (#7093) --- accounts-db/benches/accounts.rs | 18 +------- accounts-db/src/accounts_db.rs | 39 ----------------- accounts-db/src/accounts_db/tests.rs | 64 ---------------------------- runtime/src/serde_snapshot.rs | 4 +- runtime/src/serde_snapshot/tests.rs | 32 +------------- runtime/src/snapshot_minimizer.rs | 1 - runtime/src/snapshot_package.rs | 2 +- 7 files changed, 6 insertions(+), 154 deletions(-) diff --git a/accounts-db/benches/accounts.rs b/accounts-db/benches/accounts.rs index 70776106b6c581..cfb0c89d15fdc6 100644 --- a/accounts-db/benches/accounts.rs +++ b/accounts-db/benches/accounts.rs @@ -11,10 +11,7 @@ use { solana_accounts_db::{ account_info::{AccountInfo, StorageLocation}, accounts::{AccountAddressFilter, Accounts}, - accounts_db::{ - test_utils::create_test_accounts, AccountFromStorage, AccountsDb, - ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS, - }, + accounts_db::{AccountFromStorage, AccountsDb, ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS}, accounts_index::ScanConfig, ancestors::Ancestors, }, @@ -42,19 +39,6 @@ fn new_accounts_db(account_paths: Vec) -> AccountsDb { ) } -#[bench] -fn bench_accounts_delta_hash(bencher: &mut Bencher) { - solana_logger::setup(); - let accounts_db = new_accounts_db(vec![PathBuf::from("accounts_delta_hash")]); - let accounts = Accounts::new(Arc::new(accounts_db)); - let mut pubkeys: Vec = vec![]; - create_test_accounts(&accounts, &mut pubkeys, 100_000, 0); - accounts.accounts_db.add_root_and_flush_write_cache(0); - bencher.iter(|| { - accounts.accounts_db.calculate_accounts_delta_hash(0); - }); -} - #[bench] fn bench_delete_dependencies(bencher: &mut Bencher) { solana_logger::setup(); diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 38c7c3741c43e7..df4da64dcfc5c3 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -6555,36 +6555,6 @@ impl AccountsDb { } } - /// Calculate accounts delta hash for `slot` - pub fn calculate_accounts_delta_hash_internal( - &self, - slot: Slot, - ignore: Option, - ) -> AccountsDeltaHash { - let (mut hashes, scan_us, mut accumulate) = self.get_pubkey_hash_for_slot(slot); - - if let Some(ignore) = ignore { - hashes.retain(|k| k.0 != ignore); - } - - let accounts_delta_hash = self - .thread_pool - .install(|| AccountsDeltaHash(AccountsHasher::accumulate_account_hashes(hashes))); - accumulate.stop(); - - self.set_accounts_delta_hash(slot, accounts_delta_hash); - - self.stats - .delta_hash_scan_time_total_us - .fetch_add(scan_us, Ordering::Relaxed); - self.stats - .delta_hash_accumulate_time_total_us - .fetch_add(accumulate.as_us(), Ordering::Relaxed); - self.stats.delta_hash_num.fetch_add(1, Ordering::Relaxed); - - accounts_delta_hash - } - /// Set the accounts delta hash for `slot` in the `accounts_delta_hashes` map /// /// returns the previous accounts delta hash for `slot` @@ -8190,11 +8160,6 @@ impl AccountsDb { self.flush_root_write_cache(slot); } - /// Wrapper function to calculate accounts delta hash for `slot` (only used for testing and benchmarking.) - pub fn calculate_accounts_delta_hash(&self, slot: Slot) -> AccountsDeltaHash { - self.calculate_accounts_delta_hash_internal(slot, None) - } - pub fn load_without_fixed_root( &self, ancestors: &Ancestors, @@ -8210,10 +8175,6 @@ impl AccountsDb { ) } - pub fn accounts_delta_hashes(&self) -> &Mutex> { - &self.accounts_delta_hashes - } - pub fn accounts_hashes(&self) -> &Mutex> { &self.accounts_hashes } diff --git a/accounts-db/src/accounts_db/tests.rs b/accounts-db/src/accounts_db/tests.rs index bca593f2887858..b30b9b562e847e 100644 --- a/accounts-db/src/accounts_db/tests.rs +++ b/accounts-db/src/accounts_db/tests.rs @@ -745,7 +745,6 @@ define_accounts_db_test!(test_accountsdb_count_stores, |db| { db.store_for_tests(1, &[(&pubkey, &account)]); db.store_for_tests(1, &[(&pubkeys[0], &account)]); // adding root doesn't change anything - db.calculate_accounts_delta_hash(1); db.add_root_and_flush_write_cache(1); { let slot_0_store = &db.storage.get_slot_storage_entry(0).unwrap(); @@ -1031,7 +1030,6 @@ fn test_lazy_gc_slot() { |(_slot, account_info)| account_info.store_id(), ) .unwrap(); - accounts.calculate_accounts_delta_hash(0); //slot is still there, since gc is lazy assert_eq!(accounts.storage.get_slot_storage_entry(0).unwrap().id(), id); @@ -1039,9 +1037,6 @@ fn test_lazy_gc_slot() { //store causes clean accounts.store_for_tests(1, &[(&pubkey, &account)]); - // generate delta state for slot 1, so clean operates on it. - accounts.calculate_accounts_delta_hash(1); - //slot is gone accounts.print_accounts_stats("pre-clean"); accounts.add_root_and_flush_write_cache(1); @@ -1111,11 +1106,8 @@ fn test_clean_zero_lamport_and_dead_slot() { // Pubkey 1 was the only account in slot 1, and it was updated in slot 2, so // slot 1 should be purged - accounts.calculate_accounts_delta_hash(0); accounts.add_root_and_flush_write_cache(0); - accounts.calculate_accounts_delta_hash(1); accounts.add_root_and_flush_write_cache(1); - accounts.calculate_accounts_delta_hash(2); accounts.add_root_and_flush_write_cache(2); // Slot 1 should be removed, slot 0 cannot be removed because it still has @@ -1196,11 +1188,9 @@ fn test_remove_zero_lamport_multi_ref_accounts_panic() { let slot = 1; accounts.store_for_tests(slot, &[(&pubkey_zero, &one_lamport_account)]); - accounts.calculate_accounts_delta_hash(slot); accounts.add_root_and_flush_write_cache(slot); accounts.store_for_tests(slot + 1, &[(&pubkey_zero, &zero_lamport_account)]); - accounts.calculate_accounts_delta_hash(slot + 1); accounts.add_root_and_flush_write_cache(slot + 1); // This should panic because there are 2 refs for pubkey_zero. @@ -1229,7 +1219,6 @@ fn test_remove_zero_lamport_single_ref_accounts_after_shrink() { ); // Simulate rooting the zero-lamport account, writes it to storage - accounts.calculate_accounts_delta_hash(slot); accounts.add_root_and_flush_write_cache(slot); if pass > 0 { @@ -1237,7 +1226,6 @@ fn test_remove_zero_lamport_single_ref_accounts_after_shrink() { accounts.store_for_tests(slot + 1, &[(&pubkey_zero, &zero_lamport_account)]); if pass == 2 { // move to a storage (causing ref count to increase) - accounts.calculate_accounts_delta_hash(slot + 1); accounts.add_root_and_flush_write_cache(slot + 1); } } @@ -1350,7 +1338,6 @@ fn test_shrink_zero_lamport_single_ref_account() { // Simulate rooting the zero-lamport account, should be a // candidate for cleaning - accounts.calculate_accounts_delta_hash(slot); accounts.add_root_and_flush_write_cache(slot); // for testing, we need to cause shrink to think this will be productive. @@ -1428,11 +1415,8 @@ fn test_clean_multiple_zero_lamport_decrements_index_ref_count() { accounts.store_for_tests(1, &[(&pubkey1, &zero_lamport_account)]); accounts.store_for_tests(2, &[(&pubkey1, &zero_lamport_account)]); // Root all slots - accounts.calculate_accounts_delta_hash(0); accounts.add_root_and_flush_write_cache(0); - accounts.calculate_accounts_delta_hash(1); accounts.add_root_and_flush_write_cache(1); - accounts.calculate_accounts_delta_hash(2); accounts.add_root_and_flush_write_cache(2); // Account ref counts should match how many slots they were stored in @@ -1475,9 +1459,7 @@ fn test_clean_zero_lamport_and_old_roots() { // Simulate rooting the zero-lamport account, should be a // candidate for cleaning - accounts.calculate_accounts_delta_hash(0); accounts.add_root_and_flush_write_cache(0); - accounts.calculate_accounts_delta_hash(1); accounts.add_root_and_flush_write_cache(1); // Slot 0 should be removed, and @@ -1513,9 +1495,7 @@ fn test_clean_old_with_normal_account() { accounts.store_for_tests(1, &[(&pubkey, &account)]); // simulate slots are rooted after while - accounts.calculate_accounts_delta_hash(0); accounts.add_root_and_flush_write_cache(0); - accounts.calculate_accounts_delta_hash(1); accounts.add_root_and_flush_write_cache(1); //even if rooted, old state isn't cleaned up @@ -1545,9 +1525,7 @@ fn test_clean_old_with_zero_lamport_account() { accounts.store_for_tests(1, &[(&pubkey2, &normal_account)]); //simulate slots are rooted after while - accounts.calculate_accounts_delta_hash(0); accounts.add_root_and_flush_write_cache(0); - accounts.calculate_accounts_delta_hash(1); accounts.add_root_and_flush_write_cache(1); //even if rooted, old state isn't cleaned up @@ -1596,11 +1574,8 @@ fn test_clean_old_with_both_normal_and_zero_lamport_accounts() { accounts.store_for_tests(2, &[(&pubkey2, &normal_account)]); //simulate slots are rooted after while - accounts.calculate_accounts_delta_hash(0); accounts.add_root_and_flush_write_cache(0); - accounts.calculate_accounts_delta_hash(1); accounts.add_root_and_flush_write_cache(1); - accounts.calculate_accounts_delta_hash(2); accounts.add_root_and_flush_write_cache(2); //even if rooted, old state isn't cleaned up @@ -1718,9 +1693,7 @@ fn test_clean_max_slot_zero_lamport_account() { accounts.store_for_tests(1, &[(&pubkey, &zero_account)]); // simulate slots are rooted after while - accounts.calculate_accounts_delta_hash(0); accounts.add_root_and_flush_write_cache(0); - accounts.calculate_accounts_delta_hash(1); accounts.add_root_and_flush_write_cache(1); // Only clean up to account 0, should not purge slot 0 based on @@ -1764,7 +1737,6 @@ fn test_accounts_db_purge_keep_live() { let zero_lamport_account = AccountSharedData::new(zero_lamport, no_data, &owner); let accounts = AccountsDb::new_single_for_tests(); - accounts.calculate_accounts_delta_hash(0); accounts.add_root_and_flush_write_cache(0); // Step A @@ -1773,7 +1745,6 @@ fn test_accounts_db_purge_keep_live() { // Store another live account to slot 1 which will prevent any purge // since the store count will not be zero accounts.store_for_tests(current_slot, &[(&pubkey2, &account2)]); - accounts.calculate_accounts_delta_hash(current_slot); accounts.add_root_and_flush_write_cache(current_slot); let (slot1, account_info1) = accounts .accounts_index @@ -1795,13 +1766,11 @@ fn test_accounts_db_purge_keep_live() { current_slot += 1; let zero_lamport_slot = current_slot; accounts.store_for_tests(current_slot, &[(&pubkey, &zero_lamport_account)]); - accounts.calculate_accounts_delta_hash(current_slot); accounts.add_root_and_flush_write_cache(current_slot); accounts.assert_load_account(current_slot, pubkey, zero_lamport); current_slot += 1; - accounts.calculate_accounts_delta_hash(current_slot); accounts.add_root_and_flush_write_cache(current_slot); accounts.print_accounts_stats("pre_purge"); @@ -1847,19 +1816,16 @@ fn test_accounts_db_purge1() { let mut current_slot = 1; accounts.store_for_tests(current_slot, &[(&pubkey, &account)]); - accounts.calculate_accounts_delta_hash(current_slot); accounts.add_root_and_flush_write_cache(current_slot); current_slot += 1; accounts.store_for_tests(current_slot, &[(&pubkey, &zero_lamport_account)]); - accounts.calculate_accounts_delta_hash(current_slot); accounts.add_root_and_flush_write_cache(current_slot); accounts.assert_load_account(current_slot, pubkey, zero_lamport); // Otherwise slot 2 will not be removed current_slot += 1; - accounts.calculate_accounts_delta_hash(current_slot); accounts.add_root_and_flush_write_cache(current_slot); accounts.print_accounts_stats("pre_purge"); @@ -2337,7 +2303,6 @@ fn do_full_clean_refcount(mut accounts: AccountsDb, store1_first: bool, store_si accounts.store_for_tests(current_slot, &[(&pubkey2, &account)]); accounts.store_for_tests(current_slot, &[(&pubkey1, &account)]); } - accounts.calculate_accounts_delta_hash(current_slot); accounts.add_root_and_flush_write_cache(current_slot); info!("post A"); @@ -2354,7 +2319,6 @@ fn do_full_clean_refcount(mut accounts: AccountsDb, store1_first: bool, store_si // Stores to same pubkey, same slot only count once towards the // ref count assert_eq!(2, accounts.ref_count_for_pubkey(&pubkey1)); - accounts.calculate_accounts_delta_hash(current_slot); accounts.add_root_and_flush_write_cache(current_slot); accounts.print_accounts_stats("Post-B pre-clean"); @@ -2372,7 +2336,6 @@ fn do_full_clean_refcount(mut accounts: AccountsDb, store1_first: bool, store_si accounts.store_for_tests(current_slot, &[(&pubkey3, &account4)]); accounts.add_root_and_flush_write_cache(current_slot); assert_eq!(3, accounts.ref_count_for_pubkey(&pubkey1)); - accounts.calculate_accounts_delta_hash(current_slot); info!("post C"); @@ -2393,7 +2356,6 @@ fn do_full_clean_refcount(mut accounts: AccountsDb, store1_first: bool, store_si info!("post D"); accounts.print_accounts_stats("Post-D"); - accounts.calculate_accounts_delta_hash(current_slot); accounts.add_root_and_flush_write_cache(current_slot); accounts.clean_accounts_for_tests(); @@ -2474,7 +2436,6 @@ fn test_shrink_candidate_slots() { accounts.store_for_tests(current_slot, &[(pubkey, &account)]); } let shrink_slot = current_slot; - accounts.calculate_accounts_delta_hash(current_slot); accounts.add_root_and_flush_write_cache(current_slot); current_slot += 1; @@ -2484,7 +2445,6 @@ fn test_shrink_candidate_slots() { for pubkey in updated_pubkeys { accounts.store_for_tests(current_slot, &[(pubkey, &account)]); } - accounts.calculate_accounts_delta_hash(current_slot); accounts.add_root_and_flush_write_cache(current_slot); accounts.clean_accounts_for_tests(); @@ -2562,7 +2522,6 @@ fn test_shrink_candidate_slots_with_dead_ancient_account() { current_slot, &[(&modified_account_pubkey, &modified_account)], ); - db.calculate_accounts_delta_hash(current_slot); db.add_root_and_flush_write_cache(current_slot); // This should remove the dead ancient account from the index. db.clean_accounts_for_tests(); @@ -2982,7 +2941,6 @@ fn test_store_clean_after_shrink() { accounts.store_cached((1, &[(&pubkey1, &zero_account)][..])); // Add root 0 and flush separately - accounts.calculate_accounts_delta_hash(0); accounts.add_root(0); accounts.flush_accounts_cache(true, None); @@ -2990,7 +2948,6 @@ fn test_store_clean_after_shrink() { accounts.clean_accounts_for_tests(); // flush 1 - accounts.calculate_accounts_delta_hash(1); accounts.add_root(1); accounts.flush_accounts_cache(true, None); @@ -3024,7 +2981,6 @@ fn test_wrapping_storage_id() { keys.iter().enumerate().for_each(|(slot, key)| { let slot = slot as Slot; db.store_for_tests(slot, &[(key, &zero_lamport_account)]); - db.calculate_accounts_delta_hash(slot); db.add_root_and_flush_write_cache(slot); }); assert_eq!(slots - 1, db.next_id.load(Ordering::Acquire)); @@ -3050,7 +3006,6 @@ fn test_reuse_storage_id() { keys.iter().enumerate().for_each(|(slot, key)| { let slot = slot as Slot; db.store_for_tests(slot, &[(key, &zero_lamport_account)]); - db.calculate_accounts_delta_hash(slot); db.add_root_and_flush_write_cache(slot); // reset next_id to what it was previously to cause us to re-use the same id db.next_id.store(AccountsFileId::MAX, Ordering::Release); @@ -3070,9 +3025,7 @@ fn test_zero_lamport_new_root_not_cleaned() { // Store zero lamport account into slots 0 and 1, root both slots db.store_for_tests(0, &[(&account_key, &zero_lamport_account)]); db.store_for_tests(1, &[(&account_key, &zero_lamport_account)]); - db.calculate_accounts_delta_hash(0); db.add_root_and_flush_write_cache(0); - db.calculate_accounts_delta_hash(1); db.add_root_and_flush_write_cache(1); // Only clean zero lamport accounts up to slot 0 @@ -3676,7 +3629,6 @@ fn test_scan_flush_accounts_cache_then_clean_drop() { // Fodder for the scan so that the lock on `account_key` is not held db.store_cached((1, &[(&account_key2, &slot1_account)][..])); db.store_cached((2, &[(&account_key, &slot2_account)][..])); - db.calculate_accounts_delta_hash(0); let max_scan_root = 0; db.add_root(max_scan_root); @@ -3686,7 +3638,6 @@ fn test_scan_flush_accounts_cache_then_clean_drop() { // Add a new root 2 let new_root = 2; - db.calculate_accounts_delta_hash(new_root); db.add_root(new_root); // Check that the scan is properly set up @@ -4266,8 +4217,6 @@ fn test_shrink_unref() { db.add_root(1); // Flushes all roots db.flush_accounts_cache(true, None); - db.calculate_accounts_delta_hash(0); - db.calculate_accounts_delta_hash(1); // Clean to remove outdated entry from slot 0 db.clean_accounts(Some(1), false, &EpochSchedule::default()); @@ -4288,7 +4237,6 @@ fn test_shrink_unref() { // Should be one store before clean for slot 0 db.get_and_assert_single_storage(0); - db.calculate_accounts_delta_hash(2); db.clean_accounts(Some(2), false, &EpochSchedule::default()); // No stores should exist for slot 0 after clean @@ -4320,8 +4268,6 @@ fn test_clean_drop_dead_zero_lamport_single_ref_accounts() { accounts_db.add_root(slot); accounts_db.flush_accounts_cache(true, None); - accounts_db.calculate_accounts_delta_hash(0); - accounts_db.calculate_accounts_delta_hash(1); // run clean accounts_db.clean_accounts(Some(1), false, &epoch_schedule); @@ -4351,8 +4297,6 @@ fn test_clean_drop_dead_storage_handle_zero_lamport_single_ref_accounts() { db.add_root(1); // Flushes all roots db.flush_accounts_cache(true, None); - db.calculate_accounts_delta_hash(0); - db.calculate_accounts_delta_hash(1); // Clean should mark slot 0 dead and drop it. During the dropping, it // will find that slot 1 has a single ref zero accounts and mark it. @@ -4393,8 +4337,6 @@ fn test_shrink_unref_handle_zero_lamport_single_ref_accounts() { db.add_root(1); // Flushes all roots db.flush_accounts_cache(true, None); - db.calculate_accounts_delta_hash(0); - db.calculate_accounts_delta_hash(1); // Clean to remove outdated entry from slot 0 db.clean_accounts(Some(1), false, &EpochSchedule::default()); @@ -4430,7 +4372,6 @@ fn test_shrink_unref_handle_zero_lamport_single_ref_accounts() { // Should be one store before clean for slot 0 and slot 1 db.get_and_assert_single_storage(0); db.get_and_assert_single_storage(1); - db.calculate_accounts_delta_hash(2); db.clean_accounts(Some(2), false, &EpochSchedule::default()); // No stores should exist for slot 0 after clean @@ -5164,14 +5105,12 @@ define_accounts_db_test!(test_purge_alive_unrooted_slots_after_clean, |accounts| // Simulate adding dirty pubkeys on bank freeze. Note this is // not a rooted slot - accounts.calculate_accounts_delta_hash(slot0); // On the next *rooted* slot, update the `shared_key` account to zero lamports let zero_lamport_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner()); accounts.store_for_tests(slot1, &[(&shared_key, &zero_lamport_account)]); // Simulate adding dirty pubkeys on bank freeze, set root - accounts.calculate_accounts_delta_hash(slot1); accounts.add_root_and_flush_write_cache(slot1); // The later rooted zero-lamport update to `shared_key` cannot be cleaned @@ -5217,19 +5156,16 @@ define_accounts_db_test!( let slot1: Slot = 1; let account = AccountSharedData::new(111, space, &owner); accounts_db.store_cached((slot1, &[(&pubkey, &account)][..])); - accounts_db.calculate_accounts_delta_hash(slot1); accounts_db.add_root_and_flush_write_cache(slot1); let slot2: Slot = 2; let account = AccountSharedData::new(222, space, &owner); accounts_db.store_cached((slot2, &[(&pubkey, &account)][..])); - accounts_db.calculate_accounts_delta_hash(slot2); accounts_db.add_root_and_flush_write_cache(slot2); let slot3: Slot = 3; let account = AccountSharedData::new(0, space, &owner); accounts_db.store_cached((slot3, &[(&pubkey, &account)][..])); - accounts_db.calculate_accounts_delta_hash(slot3); accounts_db.add_root_and_flush_write_cache(slot3); assert_eq!(accounts_db.ref_count_for_pubkey(&pubkey), 3); diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index 60f7e9c5ab7dab..c01605e724f7bc 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -703,7 +703,7 @@ impl Serialize for SerializableBankAndStorage<'_> { let mut bank_fields = self.bank.get_fields_to_serialize(); let accounts_db = &self.bank.rc.accounts.accounts_db; let bank_hash_stats = self.bank.get_bank_hash_stats(); - let accounts_delta_hash = accounts_db.get_accounts_delta_hash(slot).unwrap(); + let accounts_delta_hash = AccountsDeltaHash(Hash::default()); // obsolete, any value works let accounts_hash = accounts_db.get_accounts_hash(slot).unwrap().0; let write_version = accounts_db.write_version.load(Ordering::Acquire); let lamports_per_signature = bank_fields.fee_rate_governor.lamports_per_signature; @@ -747,7 +747,7 @@ impl Serialize for SerializableBankAndStorageNoExtra<'_> { let bank_fields = self.bank.get_fields_to_serialize(); let accounts_db = &self.bank.rc.accounts.accounts_db; let bank_hash_stats = self.bank.get_bank_hash_stats(); - let accounts_delta_hash = accounts_db.get_accounts_delta_hash(slot).unwrap(); + let accounts_delta_hash = AccountsDeltaHash(Hash::default()); // obsolete, any value works let accounts_hash = accounts_db.get_accounts_hash(slot).unwrap().0; let write_version = accounts_db.write_version.load(Ordering::Acquire); ( diff --git a/runtime/src/serde_snapshot/tests.rs b/runtime/src/serde_snapshot/tests.rs index e00dbcfd14f004..d87e413e8f486a 100644 --- a/runtime/src/serde_snapshot/tests.rs +++ b/runtime/src/serde_snapshot/tests.rs @@ -22,7 +22,7 @@ mod serde_snapshot_tests { AccountsDb, AtomicAccountsFileId, }, accounts_file::{AccountsFile, AccountsFileError, StorageAccess}, - accounts_hash::AccountsHash, + accounts_hash::{AccountsDeltaHash, AccountsHash}, ancestors::Ancestors, }, solana_clock::Slot, @@ -103,7 +103,7 @@ mod serde_snapshot_tests { W: Write, { let bank_hash_stats = BankHashStats::default(); - let accounts_delta_hash = accounts_db.get_accounts_delta_hash(slot).unwrap(); + let accounts_delta_hash = AccountsDeltaHash(Hash::default()); // obsolete, any value works let accounts_hash = AccountsHash(Hash::default()); // obsolete, any value works let write_version = accounts_db.write_version.load(Ordering::Acquire); serialize_into( @@ -218,7 +218,6 @@ mod serde_snapshot_tests { create_test_accounts(&accounts, &mut pubkeys, 100, slot); check_accounts_local(&accounts, &pubkeys, 100); accounts.accounts_db.add_root_and_flush_write_cache(slot); - let accounts_delta_hash = accounts.accounts_db.calculate_accounts_delta_hash(slot); let accounts_hash = accounts .accounts_db .calculate_accounts_lt_hash_at_startup_from_index(&Ancestors::default(), slot); @@ -254,8 +253,6 @@ mod serde_snapshot_tests { .unwrap(), )); check_accounts_local(&daccounts, &pubkeys, 100); - let daccounts_delta_hash = daccounts.accounts_db.calculate_accounts_delta_hash(slot); - assert_eq!(accounts_delta_hash, daccounts_delta_hash); let daccounts_hash = accounts .accounts_db .calculate_accounts_lt_hash_at_startup_from_index(&Ancestors::default(), slot); @@ -282,8 +279,6 @@ mod serde_snapshot_tests { db.store_for_tests(new_root, &[(&key2, &account0)]); db.add_root_and_flush_write_cache(new_root); - db.calculate_accounts_delta_hash(new_root); - // Simulate reconstruction from snapshot let db = reconstruct_accounts_db_via_serialization(&db, new_root, storage_access); @@ -321,7 +316,6 @@ mod serde_snapshot_tests { accounts.add_root_and_flush_write_cache(0); accounts.check_storage(0, 100, 100); accounts.check_accounts(&pubkeys, 0, 100, 2); - accounts.calculate_accounts_delta_hash(0); let mut pubkeys1: Vec = vec![]; @@ -339,7 +333,6 @@ mod serde_snapshot_tests { // accounts accounts.create_account(&mut pubkeys1, latest_slot, 10, 0, 0); - accounts.calculate_accounts_delta_hash(latest_slot); accounts.add_root_and_flush_write_cache(latest_slot); accounts.check_storage(1, 21, 21); @@ -359,7 +352,6 @@ mod serde_snapshot_tests { // 21 + 10 = 31 accounts accounts.create_account(&mut pubkeys2, latest_slot, 10, 0, 0); - accounts.calculate_accounts_delta_hash(latest_slot); accounts.add_root_and_flush_write_cache(latest_slot); accounts.check_storage(2, 31, 31); @@ -383,15 +375,6 @@ mod serde_snapshot_tests { accounts.write_version.load(Ordering::Acquire) ); - // Get the hashes for the latest slot, which should be the only hashes in the - // map on the deserialized AccountsDb - assert_eq!(daccounts.accounts_delta_hashes().lock().unwrap().len(), 1); - assert_eq!(daccounts.accounts_hashes().lock().unwrap().len(), 1); - assert_eq!( - daccounts.get_accounts_delta_hash(latest_slot).unwrap(), - accounts.get_accounts_delta_hash(latest_slot).unwrap(), - ); - daccounts.print_count_and_status("daccounts"); // Don't check the first 35 accounts which have not been modified on slot 0 @@ -445,7 +428,6 @@ mod serde_snapshot_tests { accounts.print_accounts_stats("accounts_post_purge"); - accounts.calculate_accounts_delta_hash(current_slot); let accounts = reconstruct_accounts_db_via_serialization(&accounts, current_slot, storage_access); @@ -497,7 +479,6 @@ mod serde_snapshot_tests { accounts.add_root_and_flush_write_cache(current_slot); accounts.print_accounts_stats("pre_f"); - accounts.calculate_accounts_delta_hash(current_slot); let accounts = f(accounts, current_slot); @@ -589,7 +570,6 @@ mod serde_snapshot_tests { accounts.add_root_and_flush_write_cache(current_slot); accounts.print_count_and_status("before reconstruct"); - accounts.calculate_accounts_delta_hash(current_slot); let accounts = reconstruct_accounts_db_via_serialization(&accounts, current_slot, storage_access); accounts.print_count_and_status("before purge zero"); @@ -628,7 +608,6 @@ mod serde_snapshot_tests { current_slot += 1; accounts.store_for_tests(current_slot, &[(&pubkey1, &account)]); accounts.store_for_tests(current_slot, &[(&pubkey2, &account)]); - accounts.calculate_accounts_delta_hash(current_slot); accounts.add_root(current_slot); // B: Test multiple updates to pubkey1 in a single slot/storage @@ -643,7 +622,6 @@ mod serde_snapshot_tests { // Stores to same pubkey, same slot only count once towards the // ref count assert_eq!(2, accounts.ref_count_for_pubkey(&pubkey1)); - accounts.calculate_accounts_delta_hash(current_slot); // C: Yet more update to trigger lazy clean of step A current_slot += 1; @@ -651,7 +629,6 @@ mod serde_snapshot_tests { accounts.store_for_tests(current_slot, &[(&pubkey1, &account3)]); accounts.add_root_and_flush_write_cache(current_slot); assert_eq!(3, accounts.ref_count_for_pubkey(&pubkey1)); - accounts.calculate_accounts_delta_hash(current_slot); accounts.add_root_and_flush_write_cache(current_slot); // D: Make pubkey1 0-lamport; also triggers clean of step B @@ -683,13 +660,11 @@ mod serde_snapshot_tests { 3, /* == 3 - 1 + 1 */ accounts.ref_count_for_pubkey(&pubkey1) ); - accounts.calculate_accounts_delta_hash(current_slot); accounts.add_root(current_slot); // E: Avoid missing bank hash error current_slot += 1; accounts.store_for_tests(current_slot, &[(&dummy_pubkey, &dummy_account)]); - accounts.calculate_accounts_delta_hash(current_slot); accounts.add_root(current_slot); accounts.assert_load_account(current_slot, pubkey1, zero_lamport); @@ -715,7 +690,6 @@ mod serde_snapshot_tests { // F: Finally, make Step A cleanable current_slot += 1; accounts.store_for_tests(current_slot, &[(&pubkey2, &account)]); - accounts.calculate_accounts_delta_hash(current_slot); accounts.add_root(current_slot); // Do clean @@ -757,7 +731,6 @@ mod serde_snapshot_tests { accounts.store_for_tests(current_slot, &[(pubkey, &account)]); } let shrink_slot = current_slot; - accounts.calculate_accounts_delta_hash(current_slot); accounts.add_root_and_flush_write_cache(current_slot); current_slot += 1; @@ -767,7 +740,6 @@ mod serde_snapshot_tests { for pubkey in updated_pubkeys { accounts.store_for_tests(current_slot, &[(pubkey, &account)]); } - accounts.calculate_accounts_delta_hash(current_slot); accounts.add_root_and_flush_write_cache(current_slot); accounts.clean_accounts_for_tests(); diff --git a/runtime/src/snapshot_minimizer.rs b/runtime/src/snapshot_minimizer.rs index 2ec9704d94a2d1..1d00a1b410a4c1 100644 --- a/runtime/src/snapshot_minimizer.rs +++ b/runtime/src/snapshot_minimizer.rs @@ -557,7 +557,6 @@ mod tests { minimized_account_set.insert(*pubkey); } } - accounts.calculate_accounts_delta_hash(current_slot); accounts.add_root_and_flush_write_cache(current_slot); } diff --git a/runtime/src/snapshot_package.rs b/runtime/src/snapshot_package.rs index 8e48316b4c4afd..03085ca3a71420 100644 --- a/runtime/src/snapshot_package.rs +++ b/runtime/src/snapshot_package.rs @@ -67,7 +67,7 @@ impl AccountsPackage { let snapshot_info = { let accounts_db = &bank.rc.accounts.accounts_db; let write_version = accounts_db.write_version.load(Ordering::Acquire); - let accounts_delta_hash = AccountsDeltaHash(Hash::default()); + let accounts_delta_hash = AccountsDeltaHash(Hash::default()); // obsolete, any value works let bank_hash_stats = bank.get_bank_hash_stats(); let bank_fields_to_serialize = bank.get_fields_to_serialize(); SupplementalSnapshotInfo { From 3a88a3585d0bea94fcbadc72a5ba30aac36ef774 Mon Sep 17 00:00:00 2001 From: steviez Date: Wed, 23 Jul 2025 14:50:55 -0500 Subject: [PATCH 57/68] Remove the Default trait impl. for ValidatorConfig (#7101) A Default impl for this config struct is a potential footgun; we should be explicitly specifying all fields in production code. Note that the default_for_test() function is left as-is to avoid copy/past in tests --- core/src/validator.rs | 44 ++++++++++++++----------------------------- 1 file changed, 14 insertions(+), 30 deletions(-) diff --git a/core/src/validator.rs b/core/src/validator.rs index 8eb790168dcc8a..cb9ba3389d87d0 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -299,8 +299,11 @@ pub struct ValidatorConfig { pub repair_handler_type: RepairHandlerType, } -impl Default for ValidatorConfig { - fn default() -> Self { +impl ValidatorConfig { + pub fn default_for_test() -> Self { + let max_thread_count = + NonZeroUsize::new(num_cpus::get()).expect("thread count is non-zero"); + Self { halt_at_slot: None, expected_genesis_hash: None, @@ -308,10 +311,10 @@ impl Default for ValidatorConfig { expected_shred_version: None, voting_disabled: false, max_ledger_shreds: None, - blockstore_options: BlockstoreOptions::default(), + blockstore_options: BlockstoreOptions::default_for_tests(), account_paths: Vec::new(), account_snapshot_paths: Vec::new(), - rpc_config: JsonRpcConfig::default(), + rpc_config: JsonRpcConfig::default_for_test(), on_start_geyser_plugin_config_files: None, geyser_plugin_always_enabled: false, rpc_addrs: None, @@ -350,49 +353,30 @@ impl Default for ValidatorConfig { validator_exit: Arc::new(RwLock::new(Exit::default())), validator_exit_backpressure: HashMap::default(), no_wait_for_vote_to_start_leader: true, - accounts_db_config: None, + accounts_db_config: Some(ACCOUNTS_DB_CONFIG_FOR_TESTING), wait_to_vote_slot: None, runtime_config: RuntimeConfig::default(), banking_trace_dir_byte_limit: 0, block_verification_method: BlockVerificationMethod::default(), block_production_method: BlockProductionMethod::default(), transaction_struct: TransactionStructure::default(), - enable_block_production_forwarding: false, + // enable forwarding by default for tests + enable_block_production_forwarding: true, generator_config: None, use_snapshot_archives_at_startup: UseSnapshotArchivesAtStartup::default(), wen_restart_proto_path: None, wen_restart_coordinator: None, unified_scheduler_handler_threads: None, ip_echo_server_threads: NonZeroUsize::new(1).expect("1 is non-zero"), - rayon_global_threads: NonZeroUsize::new(1).expect("1 is non-zero"), - replay_forks_threads: NonZeroUsize::new(1).expect("1 is non-zero"), - replay_transactions_threads: NonZeroUsize::new(1).expect("1 is non-zero"), - tvu_shred_sigverify_threads: NonZeroUsize::new(1).expect("1 is non-zero"), - delay_leader_block_for_pending_fork: false, - use_tpu_client_next: true, - retransmit_xdp: None, - repair_handler_type: RepairHandlerType::default(), - } - } -} - -impl ValidatorConfig { - pub fn default_for_test() -> Self { - let max_thread_count = - NonZeroUsize::new(num_cpus::get()).expect("thread count is non-zero"); - - Self { - accounts_db_config: Some(ACCOUNTS_DB_CONFIG_FOR_TESTING), - blockstore_options: BlockstoreOptions::default_for_tests(), - rpc_config: JsonRpcConfig::default_for_test(), - block_production_method: BlockProductionMethod::default(), - enable_block_production_forwarding: true, // enable forwarding by default for tests rayon_global_threads: max_thread_count, replay_forks_threads: NonZeroUsize::new(1).expect("1 is non-zero"), replay_transactions_threads: max_thread_count, tvu_shred_sigverify_threads: NonZeroUsize::new(get_thread_count()) .expect("thread count is non-zero"), - ..Self::default() + delay_leader_block_for_pending_fork: false, + use_tpu_client_next: true, + retransmit_xdp: None, + repair_handler_type: RepairHandlerType::default(), } } From f674c2a1b8bada49c5cbdf121b71586344bf1597 Mon Sep 17 00:00:00 2001 From: Brooks Date: Wed, 23 Jul 2025 16:18:09 -0400 Subject: [PATCH 58/68] Removes AccountsDb::accounts_delta_hash (#7095) --- accounts-db/src/accounts_db.rs | 54 +++--------------------------- runtime/src/bank/serde_snapshot.rs | 4 --- runtime/src/serde_snapshot.rs | 10 +----- 3 files changed, 5 insertions(+), 63 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index df4da64dcfc5c3..3ef632bacec340 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -44,11 +44,10 @@ use { StorageAccess, }, accounts_hash::{ - AccountHash, AccountLtHash, AccountsDeltaHash, AccountsHash, AccountsHashKind, - AccountsHasher, AccountsLtHash, CalcAccountsHashConfig, CalculateHashIntermediate, - HashStats, IncrementalAccountsHash, SerdeAccountsDeltaHash, SerdeAccountsHash, - SerdeIncrementalAccountsHash, ZeroLamportAccounts, ZERO_LAMPORT_ACCOUNT_HASH, - ZERO_LAMPORT_ACCOUNT_LT_HASH, + AccountHash, AccountLtHash, AccountsHash, AccountsHashKind, AccountsHasher, + AccountsLtHash, CalcAccountsHashConfig, CalculateHashIntermediate, HashStats, + IncrementalAccountsHash, SerdeAccountsHash, SerdeIncrementalAccountsHash, + ZeroLamportAccounts, ZERO_LAMPORT_ACCOUNT_HASH, ZERO_LAMPORT_ACCOUNT_LT_HASH, }, accounts_index::{ in_mem_accounts_index::StartupStats, AccountSecondaryIndexes, AccountsIndex, @@ -1341,7 +1340,6 @@ pub struct AccountsDb { /// Thread pool for AccountsHashVerifier pub thread_pool_hash: ThreadPool, - accounts_delta_hashes: Mutex>, accounts_hashes: Mutex>, incremental_accounts_hashes: Mutex>, @@ -1839,7 +1837,6 @@ impl AccountsDb { shrink_candidate_slots: Mutex::new(ShrinkCandidates::default()), write_version: AtomicU64::new(0), file_size: DEFAULT_FILE_SIZE, - accounts_delta_hashes: Mutex::new(HashMap::new()), accounts_hashes: Mutex::new(HashMap::new()), incremental_accounts_hashes: Mutex::new(HashMap::new()), external_purge_slots_stats: PurgeStats::default(), @@ -3938,11 +3935,8 @@ impl AccountsDb { &self, dropped_roots: impl Iterator, ) { - let mut accounts_delta_hashes = self.accounts_delta_hashes.lock().unwrap(); - dropped_roots.for_each(|slot| { self.accounts_index.clean_dead_slot(slot); - accounts_delta_hashes.remove(&slot); // the storage has been removed from this slot and recycled or dropped assert!(self.storage.remove(&slot, false).is_none()); debug_assert!( @@ -6555,39 +6549,6 @@ impl AccountsDb { } } - /// Set the accounts delta hash for `slot` in the `accounts_delta_hashes` map - /// - /// returns the previous accounts delta hash for `slot` - #[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] - fn set_accounts_delta_hash( - &self, - slot: Slot, - accounts_delta_hash: AccountsDeltaHash, - ) -> Option { - self.accounts_delta_hashes - .lock() - .unwrap() - .insert(slot, accounts_delta_hash) - } - - /// After deserializing a snapshot, set the accounts delta hash for the new AccountsDb - pub fn set_accounts_delta_hash_from_snapshot( - &mut self, - slot: Slot, - accounts_delta_hash: SerdeAccountsDeltaHash, - ) -> Option { - self.set_accounts_delta_hash(slot, accounts_delta_hash.into()) - } - - /// Get the accounts delta hash for `slot` in the `accounts_delta_hashes` map - pub fn get_accounts_delta_hash(&self, slot: Slot) -> Option { - self.accounts_delta_hashes - .lock() - .unwrap() - .get(&slot) - .cloned() - } - fn update_index<'a>( &self, infos: Vec, @@ -6813,13 +6774,6 @@ impl AccountsDb { ) { let mut measure = Measure::start("remove_dead_slots_metadata-ms"); self.clean_dead_slots_from_accounts_index(dead_slots_iter.clone()); - - let mut accounts_delta_hashes = self.accounts_delta_hashes.lock().unwrap(); - for slot in dead_slots_iter { - accounts_delta_hashes.remove(slot); - } - drop(accounts_delta_hashes); - measure.stop(); inc_new_counter_info!("remove_dead_slots_metadata-ms", measure.as_ms() as usize); } diff --git a/runtime/src/bank/serde_snapshot.rs b/runtime/src/bank/serde_snapshot.rs index 3088634abf1094..394e266d7d3762 100644 --- a/runtime/src/bank/serde_snapshot.rs +++ b/runtime/src/bank/serde_snapshot.rs @@ -234,10 +234,6 @@ mod tests { let mut bank = Bank::new_from_parent(bank0.clone(), &Pubkey::default(), 1); bank.freeze(); add_root_and_flush_write_cache(&bank0); - bank.rc - .accounts - .accounts_db - .set_accounts_delta_hash(bank.slot(), AccountsDeltaHash(Hash::new_unique())); bank.rc.accounts.accounts_db.set_accounts_hash( bank.slot(), (AccountsHash(Hash::new_unique()), u64::default()), diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index c01605e724f7bc..41723dc15b17b4 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -1160,7 +1160,7 @@ where let AccountsDbFields( _snapshot_storages, snapshot_version, - snapshot_slot, + _snapshot_slot, snapshot_bank_hash_info, _snapshot_historical_roots, _snapshot_historical_roots_with_hash, @@ -1190,14 +1190,6 @@ where ); // Process deserialized data, set necessary fields in self - let old_accounts_delta_hash = accounts_db.set_accounts_delta_hash_from_snapshot( - snapshot_slot, - snapshot_bank_hash_info.accounts_delta_hash, - ); - assert!( - old_accounts_delta_hash.is_none(), - "There should not already be an AccountsDeltaHash at slot {snapshot_slot}: {old_accounts_delta_hash:?}", - ); accounts_db.storage.initialize(storage); accounts_db .next_id From b6e27a5fa339bdf9bbb0a77e37f150c08e1af054 Mon Sep 17 00:00:00 2001 From: puhtaytow <18026645+puhtaytow@users.noreply.github.com> Date: Wed, 23 Jul 2025 22:35:02 +0200 Subject: [PATCH 59/68] scripts: remove obsolete cargo-fmt.sh (#7036) * add sort workspace to cargo fmt script * remove obsolete formatting script --- scripts/cargo-fmt.sh | 27 --------------------------- 1 file changed, 27 deletions(-) delete mode 100755 scripts/cargo-fmt.sh diff --git a/scripts/cargo-fmt.sh b/scripts/cargo-fmt.sh deleted file mode 100755 index ceab4afac0215f..00000000000000 --- a/scripts/cargo-fmt.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env bash - -here="$(dirname "$0")" -cargo="$(readlink -f "${here}/../cargo")" - -if [[ -z $cargo ]]; then - >&2 echo "Failed to find cargo. Mac readlink doesn't support -f. Consider switching - to gnu readlink with 'brew install coreutils' and then symlink greadlink as - /usr/local/bin/readlink." - exit 1 -fi - -fmt_dirs=( - . - programs/sbf - platform-tools-sdk/cargo-build-sbf/tests/crates/fail - platform-tools-sdk/cargo-build-sbf/tests/crates/noop - storage-bigtable/build-proto -) - -for fmt_dir in "${fmt_dirs[@]}"; do - ( - manifest_path="$(readlink -f "$here"/../"$fmt_dir"/Cargo.toml)" - set -ex - "$cargo" nightly fmt --all --manifest-path "$manifest_path" - ) -done From 651dab5c1fb6ae14cb68812c88b7dc5bcfde0429 Mon Sep 17 00:00:00 2001 From: Faycel Kouteib Date: Wed, 23 Jul 2025 14:58:10 -0700 Subject: [PATCH 60/68] pubsub-client: Remove deprecated set_node_version (#7117) --- pubsub-client/src/nonblocking/pubsub_client.rs | 5 ----- 1 file changed, 5 deletions(-) diff --git a/pubsub-client/src/nonblocking/pubsub_client.rs b/pubsub-client/src/nonblocking/pubsub_client.rs index fd26574ccae3b7..47ecfa5b68aaa0 100644 --- a/pubsub-client/src/nonblocking/pubsub_client.rs +++ b/pubsub-client/src/nonblocking/pubsub_client.rs @@ -302,11 +302,6 @@ impl PubsubClient { self.ws.await.unwrap() // WS future should not be cancelled or panicked } - #[deprecated(since = "2.0.2", note = "PubsubClient::node_version is no longer used")] - pub async fn set_node_version(&self, _version: semver::Version) -> Result<(), ()> { - Ok(()) - } - async fn subscribe<'a, T>(&self, operation: &str, params: Value) -> SubscribeResult<'a, T> where T: DeserializeOwned + Send + 'a, From 7b3f1ca5d950d8af51cda2ee87063de136785b54 Mon Sep 17 00:00:00 2001 From: Faycel Kouteib Date: Wed, 23 Jul 2025 14:58:36 -0700 Subject: [PATCH 61/68] rpc-client: Remove deprecated set_node_version (#7116) --- rpc-client/src/nonblocking/rpc_client.rs | 5 ----- 1 file changed, 5 deletions(-) diff --git a/rpc-client/src/nonblocking/rpc_client.rs b/rpc-client/src/nonblocking/rpc_client.rs index 161aff42e3bfaf..e4e1396abcfb19 100644 --- a/rpc-client/src/nonblocking/rpc_client.rs +++ b/rpc-client/src/nonblocking/rpc_client.rs @@ -601,11 +601,6 @@ impl RpcClient { self.sender.url() } - #[deprecated(since = "2.0.2", note = "RpcClient::node_version is no longer used")] - pub async fn set_node_version(&self, _version: semver::Version) -> Result<(), ()> { - Ok(()) - } - /// Get the configured default [commitment level][cl]. /// /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment From 681e8e3703f31bb3a10da3fddf700a8df533f2e5 Mon Sep 17 00:00:00 2001 From: Faycel Kouteib Date: Wed, 23 Jul 2025 15:28:37 -0700 Subject: [PATCH 62/68] account-decoder: Remove deprecated AccountAdditionalData and AccountAdditionalDataV2 (#7118) Remove deprecated AccountAdditionalData and AccountAdditionalDataV2 --- account-decoder/src/parse_account_data.rs | 52 ----------------------- account-decoder/src/parse_token.rs | 37 +--------------- 2 files changed, 1 insertion(+), 88 deletions(-) diff --git a/account-decoder/src/parse_account_data.rs b/account-decoder/src/parse_account_data.rs index 9f58a0c4fb49b9..6cf54d11c8fd93 100644 --- a/account-decoder/src/parse_account_data.rs +++ b/account-decoder/src/parse_account_data.rs @@ -73,32 +73,11 @@ pub enum ParsableAccount { Vote, } -#[deprecated(since = "2.0.0", note = "Use `AccountAdditionalDataV3` instead")] -#[derive(Clone, Copy, Default)] -pub struct AccountAdditionalData { - pub spl_token_decimals: Option, -} - -#[deprecated(since = "2.2.0", note = "Use `AccountAdditionalDataV3` instead")] -#[derive(Clone, Copy, Default)] -pub struct AccountAdditionalDataV2 { - pub spl_token_additional_data: Option, -} - #[derive(Clone, Copy, Default)] pub struct AccountAdditionalDataV3 { pub spl_token_additional_data: Option, } -#[allow(deprecated)] -impl From for AccountAdditionalDataV3 { - fn from(v: AccountAdditionalDataV2) -> Self { - Self { - spl_token_additional_data: v.spl_token_additional_data.map(Into::into), - } - } -} - #[derive(Clone, Copy, Default)] pub struct SplTokenAdditionalData { pub decimals: u8, @@ -140,37 +119,6 @@ impl SplTokenAdditionalDataV2 { } } -#[deprecated(since = "2.0.0", note = "Use `parse_account_data_v3` instead")] -#[allow(deprecated)] -pub fn parse_account_data( - pubkey: &Pubkey, - program_id: &Pubkey, - data: &[u8], - additional_data: Option, -) -> Result { - parse_account_data_v3( - pubkey, - program_id, - data, - additional_data.map(|d| AccountAdditionalDataV3 { - spl_token_additional_data: d - .spl_token_decimals - .map(SplTokenAdditionalDataV2::with_decimals), - }), - ) -} - -#[deprecated(since = "2.2.0", note = "Use `parse_account_data_v3` instead")] -#[allow(deprecated)] -pub fn parse_account_data_v2( - pubkey: &Pubkey, - program_id: &Pubkey, - data: &[u8], - additional_data: Option, -) -> Result { - parse_account_data_v3(pubkey, program_id, data, additional_data.map(Into::into)) -} - pub fn parse_account_data_v3( pubkey: &Pubkey, program_id: &Pubkey, diff --git a/account-decoder/src/parse_token.rs b/account-decoder/src/parse_token.rs index 88354eec97c793..51954246282c27 100644 --- a/account-decoder/src/parse_token.rs +++ b/account-decoder/src/parse_token.rs @@ -1,8 +1,6 @@ use { crate::{ - parse_account_data::{ - ParsableAccount, ParseAccountError, SplTokenAdditionalData, SplTokenAdditionalDataV2, - }, + parse_account_data::{ParsableAccount, ParseAccountError, SplTokenAdditionalDataV2}, parse_token_extension::parse_extension, }, solana_program_option::COption, @@ -23,25 +21,6 @@ pub use { spl_generic_token::{is_known_spl_token_id, spl_token_ids}, }; -#[deprecated(since = "2.0.0", note = "Use `parse_token_v3` instead")] -#[allow(deprecated)] -pub fn parse_token( - data: &[u8], - decimals: Option, -) -> Result { - let additional_data = decimals.map(SplTokenAdditionalData::with_decimals); - parse_token_v2(data, additional_data.as_ref()) -} - -#[deprecated(since = "2.2.0", note = "Use `parse_token_v3` instead")] -pub fn parse_token_v2( - data: &[u8], - additional_data: Option<&SplTokenAdditionalData>, -) -> Result { - let additional_data = additional_data.map(|v| (*v).into()); - parse_token_v3(data, additional_data.as_ref()) -} - pub fn parse_token_v3( data: &[u8], additional_data: Option<&SplTokenAdditionalDataV2>, @@ -143,20 +122,6 @@ pub fn convert_account_state(state: AccountState) -> UiAccountState { } } -#[deprecated(since = "2.0.0", note = "Use `token_amount_to_ui_amount_v3` instead")] -#[allow(deprecated)] -pub fn token_amount_to_ui_amount(amount: u64, decimals: u8) -> UiTokenAmount { - token_amount_to_ui_amount_v2(amount, &SplTokenAdditionalData::with_decimals(decimals)) -} - -#[deprecated(since = "2.2.0", note = "Use `token_amount_to_ui_amount_v3` instead")] -pub fn token_amount_to_ui_amount_v2( - amount: u64, - additional_data: &SplTokenAdditionalData, -) -> UiTokenAmount { - token_amount_to_ui_amount_v3(amount, &(*additional_data).into()) -} - pub fn token_amount_to_ui_amount_v3( amount: u64, additional_data: &SplTokenAdditionalDataV2, From 354b7bd8d382dd08b187947722625a007e3f53bd Mon Sep 17 00:00:00 2001 From: Brooks Date: Wed, 23 Jul 2025 19:34:29 -0400 Subject: [PATCH 63/68] Removes accounts delta hash (#7112) --- accounts-db/src/accounts_hash.rs | 20 -------------------- runtime/src/bank/serde_snapshot.rs | 6 ++---- runtime/src/serde_snapshot.rs | 21 +++++---------------- runtime/src/serde_snapshot/tests.rs | 4 +--- runtime/src/snapshot_package.rs | 11 +---------- runtime/src/snapshot_utils.rs | 6 +----- 6 files changed, 10 insertions(+), 58 deletions(-) diff --git a/accounts-db/src/accounts_hash.rs b/accounts-db/src/accounts_hash.rs index d842aaa3f31c04..2999d7a4db72cd 100644 --- a/accounts-db/src/accounts_hash.rs +++ b/accounts-db/src/accounts_hash.rs @@ -1279,26 +1279,6 @@ pub struct AccountsHash(pub Hash); #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub struct IncrementalAccountsHash(pub Hash); -/// Hash of accounts written in a single slot -#[derive(Debug, Copy, Clone, Eq, PartialEq)] -pub struct AccountsDeltaHash(pub Hash); - -/// Snapshot serde-safe accounts delta hash -#[cfg_attr(feature = "frozen-abi", derive(AbiExample))] -#[derive(Clone, Default, Debug, Serialize, Deserialize, PartialEq, Eq)] -pub struct SerdeAccountsDeltaHash(pub Hash); - -impl From for AccountsDeltaHash { - fn from(accounts_delta_hash: SerdeAccountsDeltaHash) -> Self { - Self(accounts_delta_hash.0) - } -} -impl From for SerdeAccountsDeltaHash { - fn from(accounts_delta_hash: AccountsDeltaHash) -> Self { - Self(accounts_delta_hash.0) - } -} - /// Snapshot serde-safe accounts hash #[cfg_attr(feature = "frozen-abi", derive(AbiExample))] #[derive(Clone, Default, Debug, Serialize, Deserialize, PartialEq, Eq)] diff --git a/runtime/src/bank/serde_snapshot.rs b/runtime/src/bank/serde_snapshot.rs index 394e266d7d3762..273216b5a93a29 100644 --- a/runtime/src/bank/serde_snapshot.rs +++ b/runtime/src/bank/serde_snapshot.rs @@ -25,7 +25,7 @@ mod tests { ACCOUNTS_DB_CONFIG_FOR_TESTING, }, accounts_file::{AccountsFile, AccountsFileError, StorageAccess}, - accounts_hash::{AccountsDeltaHash, AccountsHash}, + accounts_hash::AccountsHash, }, solana_epoch_schedule::EpochSchedule, solana_genesis_config::create_genesis_config, @@ -143,7 +143,6 @@ mod tests { &mut writer, bank_fields, bank2.get_bank_hash_stats(), - AccountsDeltaHash(Hash::default()), // obsolete, will be removed next expected_accounts_hash, &get_storages_to_serialize(&bank2.get_snapshot_storages(None)), ExtraFieldsToSerialize { @@ -393,7 +392,7 @@ mod tests { #[cfg_attr( feature = "frozen-abi", derive(AbiExample), - frozen_abi(digest = "7F6xtBno4tS6QaD3wP8kQxa9BiRtvmCu8TzFNprFjM7A") + frozen_abi(digest = "RHixw67oBUdJQn9TLES55Nb4Sr1wuAfo7NTJH56oRxb") )] #[derive(Serialize)] pub struct BankAbiTestWrapper { @@ -424,7 +423,6 @@ mod tests { serializer, bank_fields, BankHashStats::default(), - AccountsDeltaHash(Hash::new_unique()), AccountsHash(Hash::new_unique()), &get_storages_to_serialize(&snapshot_storages), ExtraFieldsToSerialize { diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index 41723dc15b17b4..6b9351c8fa81b2 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -19,7 +19,7 @@ use { AtomicAccountsFileId, DuplicatesLtHash, IndexGenerationInfo, }, accounts_file::{AccountsFile, StorageAccess}, - accounts_hash::{AccountsDeltaHash, AccountsHash, AccountsLtHash}, + accounts_hash::{AccountsHash, AccountsLtHash}, accounts_update_notifier_interface::AccountsUpdateNotifier, ancestors::AncestorsForSerialization, blockhash_queue::BlockhashQueue, @@ -61,9 +61,7 @@ mod types; mod utils; pub(crate) use { - solana_accounts_db::accounts_hash::{ - SerdeAccountsDeltaHash, SerdeAccountsHash, SerdeIncrementalAccountsHash, - }, + solana_accounts_db::accounts_hash::{SerdeAccountsHash, SerdeIncrementalAccountsHash}, storage::{SerializableAccountStorageEntry, SerializedAccountsFileId}, }; @@ -110,7 +108,7 @@ pub struct BankIncrementalSnapshotPersistence { #[cfg_attr(feature = "frozen-abi", derive(AbiExample))] #[derive(Clone, Default, Debug, Serialize, Deserialize, PartialEq, Eq)] struct BankHashInfo { - accounts_delta_hash: SerdeAccountsDeltaHash, + obsolete_accounts_delta_hash: [u8; 32], accounts_hash: SerdeAccountsHash, stats: BankHashStats, } @@ -635,7 +633,6 @@ pub fn serialize_bank_snapshot_into( stream: &mut BufWriter, bank_fields: BankFieldsToSerialize, bank_hash_stats: BankHashStats, - accounts_delta_hash: AccountsDeltaHash, accounts_hash: AccountsHash, account_storage_entries: &[Vec>], extra_fields: ExtraFieldsToSerialize, @@ -652,7 +649,6 @@ where &mut serializer, bank_fields, bank_hash_stats, - accounts_delta_hash, accounts_hash, account_storage_entries, extra_fields, @@ -665,7 +661,6 @@ pub fn serialize_bank_snapshot_with( serializer: S, bank_fields: BankFieldsToSerialize, bank_hash_stats: BankHashStats, - accounts_delta_hash: AccountsDeltaHash, accounts_hash: AccountsHash, account_storage_entries: &[Vec>], extra_fields: ExtraFieldsToSerialize, @@ -680,7 +675,6 @@ where slot, account_storage_entries, bank_hash_stats, - accounts_delta_hash, accounts_hash, write_version, }; @@ -703,7 +697,6 @@ impl Serialize for SerializableBankAndStorage<'_> { let mut bank_fields = self.bank.get_fields_to_serialize(); let accounts_db = &self.bank.rc.accounts.accounts_db; let bank_hash_stats = self.bank.get_bank_hash_stats(); - let accounts_delta_hash = AccountsDeltaHash(Hash::default()); // obsolete, any value works let accounts_hash = accounts_db.get_accounts_hash(slot).unwrap().0; let write_version = accounts_db.write_version.load(Ordering::Acquire); let lamports_per_signature = bank_fields.fee_rate_governor.lamports_per_signature; @@ -715,7 +708,6 @@ impl Serialize for SerializableBankAndStorage<'_> { slot, account_storage_entries: self.snapshot_storages, bank_hash_stats, - accounts_delta_hash, accounts_hash, write_version, }, @@ -747,7 +739,6 @@ impl Serialize for SerializableBankAndStorageNoExtra<'_> { let bank_fields = self.bank.get_fields_to_serialize(); let accounts_db = &self.bank.rc.accounts.accounts_db; let bank_hash_stats = self.bank.get_bank_hash_stats(); - let accounts_delta_hash = AccountsDeltaHash(Hash::default()); // obsolete, any value works let accounts_hash = accounts_db.get_accounts_hash(slot).unwrap().0; let write_version = accounts_db.write_version.load(Ordering::Acquire); ( @@ -756,7 +747,6 @@ impl Serialize for SerializableBankAndStorageNoExtra<'_> { slot, account_storage_entries: self.snapshot_storages, bank_hash_stats, - accounts_delta_hash, accounts_hash, write_version, }, @@ -783,8 +773,7 @@ struct SerializableAccountsDb<'a> { slot: Slot, account_storage_entries: &'a [Vec>], bank_hash_stats: BankHashStats, - accounts_delta_hash: AccountsDeltaHash, // obsolete, will be removed next - accounts_hash: AccountsHash, // obsolete, will be removed next + accounts_hash: AccountsHash, // obsolete, will be removed next write_version: u64, } @@ -806,7 +795,7 @@ impl Serialize for SerializableAccountsDb<'_> { ) })); let bank_hash_info = BankHashInfo { - accounts_delta_hash: self.accounts_delta_hash.into(), + obsolete_accounts_delta_hash: [0; 32], accounts_hash: self.accounts_hash.into(), stats: self.bank_hash_stats.clone(), }; diff --git a/runtime/src/serde_snapshot/tests.rs b/runtime/src/serde_snapshot/tests.rs index d87e413e8f486a..577d14e58d679f 100644 --- a/runtime/src/serde_snapshot/tests.rs +++ b/runtime/src/serde_snapshot/tests.rs @@ -22,7 +22,7 @@ mod serde_snapshot_tests { AccountsDb, AtomicAccountsFileId, }, accounts_file::{AccountsFile, AccountsFileError, StorageAccess}, - accounts_hash::{AccountsDeltaHash, AccountsHash}, + accounts_hash::AccountsHash, ancestors::Ancestors, }, solana_clock::Slot, @@ -103,7 +103,6 @@ mod serde_snapshot_tests { W: Write, { let bank_hash_stats = BankHashStats::default(); - let accounts_delta_hash = AccountsDeltaHash(Hash::default()); // obsolete, any value works let accounts_hash = AccountsHash(Hash::default()); // obsolete, any value works let write_version = accounts_db.write_version.load(Ordering::Acquire); serialize_into( @@ -112,7 +111,6 @@ mod serde_snapshot_tests { slot, account_storage_entries, bank_hash_stats, - accounts_delta_hash, accounts_hash, write_version, }, diff --git a/runtime/src/snapshot_package.rs b/runtime/src/snapshot_package.rs index 03085ca3a71420..5fb879d45af555 100644 --- a/runtime/src/snapshot_package.rs +++ b/runtime/src/snapshot_package.rs @@ -5,9 +5,7 @@ use { }, log::*, solana_accounts_db::{ - accounts::Accounts, - accounts_db::AccountStorageEntry, - accounts_hash::{AccountsDeltaHash, AccountsHash}, + accounts::Accounts, accounts_db::AccountStorageEntry, accounts_hash::AccountsHash, }, solana_clock::Slot, solana_epoch_schedule::EpochSchedule, @@ -67,14 +65,12 @@ impl AccountsPackage { let snapshot_info = { let accounts_db = &bank.rc.accounts.accounts_db; let write_version = accounts_db.write_version.load(Ordering::Acquire); - let accounts_delta_hash = AccountsDeltaHash(Hash::default()); // obsolete, any value works let bank_hash_stats = bank.get_bank_hash_stats(); let bank_fields_to_serialize = bank.get_fields_to_serialize(); SupplementalSnapshotInfo { status_cache_slot_deltas, bank_fields_to_serialize, bank_hash_stats, - accounts_delta_hash, write_version, } }; @@ -122,7 +118,6 @@ impl AccountsPackage { status_cache_slot_deltas: Vec::default(), bank_fields_to_serialize: BankFieldsToSerialize::default_for_tests(), bank_hash_stats: BankHashStats::default(), - accounts_delta_hash: AccountsDeltaHash(Hash::default()), write_version: u64::default(), }), enqueued: Instant::now(), @@ -145,7 +140,6 @@ pub struct SupplementalSnapshotInfo { pub status_cache_slot_deltas: Vec, pub bank_fields_to_serialize: BankFieldsToSerialize, pub bank_hash_stats: BankHashStats, - pub accounts_delta_hash: AccountsDeltaHash, // obsolete, will be removed next pub write_version: u64, } @@ -167,7 +161,6 @@ pub struct SnapshotPackage { pub status_cache_slot_deltas: Vec, pub bank_fields_to_serialize: BankFieldsToSerialize, pub bank_hash_stats: BankHashStats, - pub accounts_delta_hash: AccountsDeltaHash, // obsolete, will be removed next pub accounts_hash: AccountsHash, pub write_version: u64, @@ -199,7 +192,6 @@ impl SnapshotPackage { snapshot_storages: accounts_package.snapshot_storages, status_cache_slot_deltas: snapshot_info.status_cache_slot_deltas, bank_fields_to_serialize: snapshot_info.bank_fields_to_serialize, - accounts_delta_hash: snapshot_info.accounts_delta_hash, bank_hash_stats: snapshot_info.bank_hash_stats, accounts_hash: AccountsHash(Hash::default()), // obsolete, will be removed next write_version: snapshot_info.write_version, @@ -221,7 +213,6 @@ impl SnapshotPackage { snapshot_storages: Vec::default(), status_cache_slot_deltas: Vec::default(), bank_fields_to_serialize: BankFieldsToSerialize::default_for_tests(), - accounts_delta_hash: AccountsDeltaHash(Hash::default()), bank_hash_stats: BankHashStats::default(), accounts_hash: AccountsHash(Hash::default()), write_version: u64::default(), diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index 5cb10e68387f5e..3571a4c58d2646 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -25,7 +25,7 @@ use { account_storage_reader::AccountStorageReader, accounts_db::{AccountStorageEntry, AtomicAccountsFileId}, accounts_file::{AccountsFile, AccountsFileError, StorageAccess}, - accounts_hash::{AccountsDeltaHash, AccountsHash}, + accounts_hash::AccountsHash, hardened_unpack::{self, ArchiveChunker, BytesChannelReader, MultiBytes, UnpackError}, utils::{move_and_async_delete_path, ACCOUNTS_RUN_DIR, ACCOUNTS_SNAPSHOT_DIR}, }, @@ -828,7 +828,6 @@ pub fn serialize_and_archive_snapshot_package( status_cache_slot_deltas, bank_fields_to_serialize, bank_hash_stats, - accounts_delta_hash, accounts_hash, write_version, enqueued: _, @@ -841,7 +840,6 @@ pub fn serialize_and_archive_snapshot_package( status_cache_slot_deltas.as_slice(), bank_fields_to_serialize, bank_hash_stats, - accounts_delta_hash, accounts_hash, write_version, should_flush_and_hard_link_storages, @@ -902,7 +900,6 @@ fn serialize_snapshot( slot_deltas: &[BankSlotDelta], mut bank_fields: BankFieldsToSerialize, bank_hash_stats: BankHashStats, - accounts_delta_hash: AccountsDeltaHash, accounts_hash: AccountsHash, write_version: u64, should_flush_and_hard_link_storages: bool, @@ -965,7 +962,6 @@ fn serialize_snapshot( stream, bank_fields, bank_hash_stats, - accounts_delta_hash, accounts_hash, &get_storages_to_serialize(snapshot_storages), extra_fields, From 79c6c0577f0f32bdee2d1f4dc1e35fa590e36497 Mon Sep 17 00:00:00 2001 From: Faycel Kouteib Date: Wed, 23 Jul 2025 17:12:10 -0700 Subject: [PATCH 64/68] accounts-db: Remove deprecated is_hash_valid (#7122) accounts-db: Remove deprecated is_hash_valid() --- accounts-db/src/blockhash_queue.rs | 6 ------ 1 file changed, 6 deletions(-) diff --git a/accounts-db/src/blockhash_queue.rs b/accounts-db/src/blockhash_queue.rs index bc7ff77d4c145a..a971869568c7a4 100644 --- a/accounts-db/src/blockhash_queue.rs +++ b/accounts-db/src/blockhash_queue.rs @@ -69,12 +69,6 @@ impl BlockhashQueue { .map(|hash_age| hash_age.fee_calculator.lamports_per_signature) } - /// Check if the age of the hash is within the queue's max age - #[deprecated(since = "2.0.0", note = "Please use `is_hash_valid_for_age` instead")] - pub fn is_hash_valid(&self, hash: &Hash) -> bool { - self.hashes.contains_key(hash) - } - /// Check if the age of the hash is within the specified age pub fn is_hash_valid_for_age(&self, hash: &Hash, max_age: usize) -> bool { self.get_hash_info_if_valid(hash, max_age).is_some() From a44ceb0a2c1ab6d7af942cb32f2b8591725fa877 Mon Sep 17 00:00:00 2001 From: Lucas Ste <38472950+LucasSte@users.noreply.github.com> Date: Wed, 23 Jul 2025 21:31:23 -0300 Subject: [PATCH 65/68] Configure more SVM conformace tests (#7121) --- svm/tests/conformance.rs | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/svm/tests/conformance.rs b/svm/tests/conformance.rs index cf0ae545769569..468aa2e7627ead 100644 --- a/svm/tests/conformance.rs +++ b/svm/tests/conformance.rs @@ -112,9 +112,30 @@ fn execute_fixtures() { run_from_folder(&base_dir); base_dir.pop(); + // bpf-loader-v2 tests + base_dir.push("bpf-loader-v2"); + run_from_folder(&base_dir); + base_dir.pop(); + + // bpf-loader-v3 tests + base_dir.push("bpf-loader-v3"); + run_from_folder(&base_dir); + base_dir.pop(); + + // bpf-loader-v3 tests + base_dir.push("bpf-loader-v3-programs"); + run_from_folder(&base_dir); + base_dir.pop(); + // System program tests base_dir.push("system"); run_from_folder(&base_dir); + base_dir.pop(); + + // non-builtin-programs tests + base_dir.push("unknown"); + run_from_folder(&base_dir); + base_dir.pop(); cleanup(); } From 700b083401477c7d62d614a4bd678196a7fd0e54 Mon Sep 17 00:00:00 2001 From: Alex Pyattaev Date: Thu, 24 Jul 2025 06:28:52 +0300 Subject: [PATCH 66/68] Update more blockstore.rs tests to use merkle shreds (#6163) --- ledger/src/blockstore.rs | 236 +++++++++++++++++---------------------- 1 file changed, 103 insertions(+), 133 deletions(-) diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 8794dc19e0c5f6..c11d4c80cab6a3 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -5339,7 +5339,7 @@ pub mod tests { crate::{ genesis_utils::{create_genesis_config, GenesisConfigInfo}, leader_schedule::{FixedSchedule, IdentityKeyedLeaderSchedule}, - shred::{max_ticks_per_n_shreds, ShredFlags, LEGACY_SHRED_DATA_CAPACITY}, + shred::{max_ticks_per_n_shreds, ShredFlags}, }, assert_matches::assert_matches, bincode::{serialize, Options}, @@ -5366,7 +5366,7 @@ pub mod tests { solana_transaction_status::{ InnerInstruction, InnerInstructions, Reward, Rewards, TransactionTokenBalance, }, - std::{cmp::Ordering, thread::Builder, time::Duration}, + std::{cmp::Ordering, time::Duration}, test_case::test_case, }; @@ -10773,21 +10773,60 @@ pub mod tests { assert!(!blockstore.is_dead(0)); } + /// Prepare two FEC sets of shreds for the same slot index + /// with reasonable shred indices, but in such a way that + /// both FEC sets include a shred with LAST_IN_SLOT flag set. + #[allow(clippy::type_complexity)] + fn setup_duplicate_last_in_slot( + slot: Slot, + ) -> ((Vec, Vec), (Vec, Vec)) { + let entries = make_slot_entries_with_transactions(1); + let leader_keypair = Arc::new(Keypair::new()); + let reed_solomon_cache = ReedSolomonCache::default(); + let shredder = Shredder::new(slot, 0, 0, 0).unwrap(); + let (shreds1, code1): (Vec, Vec) = shredder + .make_merkle_shreds_from_entries( + &leader_keypair, + &entries, + true, // is_last_in_slot + Some(Hash::new_unique()), + 0, // next_shred_index + 0, // next_code_index, + &reed_solomon_cache, + &mut ProcessShredsStats::default(), + ) + .partition(Shred::is_data); + let last_data1 = shreds1.last().unwrap(); + let last_code1 = code1.last().unwrap(); + + let (shreds2, code2) = shredder + .make_merkle_shreds_from_entries( + &leader_keypair, + &entries, + true, // is_last_in_slot + Some(last_data1.chained_merkle_root().unwrap()), + last_data1.index() + 1, // next_shred_index + last_code1.index() + 1, // next_code_index, + &reed_solomon_cache, + &mut ProcessShredsStats::default(), + ) + .partition(Shred::is_data); + ((shreds1, code1), (shreds2, code2)) + } + #[test] fn test_duplicate_last_index() { - let num_shreds = 2; - let num_entries = max_ticks_per_n_shreds(num_shreds, None); let slot = 1; - let (mut shreds, _) = - make_slot_entries(slot, 0, num_entries, /*merkle_variant:*/ false); + let ((shreds1, _code1), (shreds2, _code2)) = setup_duplicate_last_in_slot(slot); - // Mark both as last shred - shreds[0].set_last_in_slot(); - shreds[1].set_last_in_slot(); + let last_data1 = shreds1.last().unwrap(); + let last_data2 = shreds2.last().unwrap(); let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); - blockstore.insert_shreds(shreds, None, false).unwrap(); + blockstore + .insert_shreds(vec![last_data1.clone(), last_data2.clone()], None, false) + .unwrap(); assert!(blockstore.get_duplicate_slot(slot).is_some()); } @@ -10795,16 +10834,13 @@ pub mod tests { #[test] fn test_duplicate_last_index_mark_dead() { let num_shreds = 10; - let smaller_last_shred_index = 5; + let smaller_last_shred_index = 31; let larger_last_shred_index = 8; let setup_test_shreds = |slot: Slot| -> Vec { - let num_entries = max_ticks_per_n_shreds(num_shreds, Some(LEGACY_SHRED_DATA_CAPACITY)); - let (mut shreds, _) = - make_slot_entries(slot, 0, num_entries, /*merkle_variant:*/ false); - shreds[smaller_last_shred_index].set_last_in_slot(); - shreds[larger_last_shred_index].set_last_in_slot(); - shreds + let ((mut shreds1, _code1), (mut shreds2, _code2)) = setup_duplicate_last_in_slot(slot); + shreds1.append(&mut shreds2); + shreds1 }; let get_expected_slot_meta_and_index_meta = @@ -10863,38 +10899,6 @@ pub mod tests { assert_eq!(meta, expected_slot_meta); assert_eq!(blockstore.get_index(slot).unwrap().unwrap(), expected_index); - // Case 2: Inserting a duplicate with an even smaller last shred index should not - // mark the slot as dead since the Slotmeta is full. - let even_smaller_last_shred_duplicate = { - let mut payload = shreds[smaller_last_shred_index - 1].payload().clone(); - // Flip a byte to create a duplicate shred - payload[0] = u8::MAX - payload[0]; - let mut shred = Shred::new_from_serialized_shred(payload).unwrap(); - shred.set_last_in_slot(); - shred - }; - assert!(blockstore - .is_shred_duplicate(&even_smaller_last_shred_duplicate) - .is_some()); - blockstore - .insert_shreds(vec![even_smaller_last_shred_duplicate], None, false) - .unwrap(); - assert!(!blockstore.is_dead(slot)); - for i in 0..num_shreds { - if i <= smaller_last_shred_index as u64 { - assert_eq!( - blockstore.get_data_shred(slot, i).unwrap().unwrap(), - shreds[i as usize].payload().as_ref(), - ); - } else { - assert!(blockstore.get_data_shred(slot, i).unwrap().is_none()); - } - } - let mut meta = blockstore.meta(slot).unwrap().unwrap(); - meta.first_shred_timestamp = expected_slot_meta.first_shred_timestamp; - assert_eq!(meta, expected_slot_meta); - assert_eq!(blockstore.get_index(slot).unwrap().unwrap(), expected_index); - // Case 3: Insert shreds in reverse so that consumed will not be updated. Now on insert, the // the slot should be marked as dead slot += 1; @@ -10964,24 +10968,6 @@ pub mod tests { #[test] fn test_get_slot_entries_dead_slot_race() { - let setup_test_shreds = move |slot: Slot| -> Vec { - let num_shreds = 10; - let middle_shred_index = 5; - let num_entries = max_ticks_per_n_shreds(num_shreds, None); - let (shreds, _) = - make_slot_entries(slot, 0, num_entries, /*merkle_variant:*/ false); - - // Reverse shreds so that last shred gets inserted first and sets meta.received - let mut shreds: Vec = shreds.into_iter().rev().collect(); - - // Push the real middle shred to the end of the shreds list - shreds.push(shreds[middle_shred_index].clone()); - - // Set the middle shred as a last shred to cause the slot to be marked dead - shreds[middle_shred_index].set_last_in_slot(); - shreds - }; - let ledger_path = get_tmp_ledger_path_auto_delete!(); { let blockstore = Arc::new(Blockstore::open(ledger_path.path()).unwrap()); @@ -10989,79 +10975,63 @@ pub mod tests { let (shred_sender, shred_receiver) = unbounded::>(); let (signal_sender, signal_receiver) = unbounded(); - let t_entry_getter = { - let blockstore = blockstore.clone(); - let signal_sender = signal_sender.clone(); - Builder::new() - .spawn(move || { - while let Ok(slot) = slot_receiver.recv() { - match blockstore.get_slot_entries_with_shred_info(slot, 0, false) { - Ok((_entries, _num_shreds, is_full)) => { - if is_full { - signal_sender - .send(Err(IoError::other( - "got full slot entries for dead slot", - ))) - .unwrap(); - } - } - Err(err) => { - assert_matches!(err, BlockstoreError::DeadSlot); + std::thread::scope(|scope| { + scope.spawn(|| { + while let Ok(slot) = slot_receiver.recv() { + match blockstore.get_slot_entries_with_shred_info(slot, 0, false) { + Ok((_entries, _num_shreds, is_full)) => { + if is_full { + signal_sender + .send(Err(IoError::other( + "got full slot entries for dead slot", + ))) + .unwrap(); } } - signal_sender.send(Ok(())).unwrap(); - } - }) - .unwrap() - }; - - let t_shred_inserter = { - let blockstore = blockstore.clone(); - Builder::new() - .spawn(move || { - while let Ok(shreds) = shred_receiver.recv() { - let slot = shreds[0].slot(); - // Grab this lock to block `get_slot_entries` before it fetches completed datasets - // and then mark the slot as dead, but full, by inserting carefully crafted shreds. - - #[allow(clippy::readonly_write_lock)] - // Possible clippy bug, the lock is unused so clippy shouldn't care - // about read vs. write lock - let _lowest_cleanup_slot = - blockstore.lowest_cleanup_slot.write().unwrap(); - blockstore.insert_shreds(shreds, None, false).unwrap(); - assert!(blockstore.get_duplicate_slot(slot).is_some()); - assert!(blockstore.is_dead(slot)); - assert!(blockstore.meta(slot).unwrap().unwrap().is_full()); - signal_sender.send(Ok(())).unwrap(); + Err(err) => { + assert_matches!(err, BlockstoreError::DeadSlot); + } } - }) - .unwrap() - }; - - for slot in 0..100 { - let shreds = setup_test_shreds(slot); + signal_sender.send(Ok(())).unwrap(); + } + }); - // Start a task on each thread to trigger a race condition - slot_sender.send(slot).unwrap(); - shred_sender.send(shreds).unwrap(); + scope.spawn(|| { + while let Ok(shreds) = shred_receiver.recv() { + let slot = shreds[0].slot(); + // Grab this lock to block `get_slot_entries` before it fetches completed datasets + // and then mark the slot as dead, but full, by inserting carefully crafted shreds. + + #[allow(clippy::readonly_write_lock)] + // Possible clippy bug, the lock is unused so clippy shouldn't care + // about read vs. write lock + let _lowest_cleanup_slot = blockstore.lowest_cleanup_slot.write().unwrap(); + blockstore.insert_shreds(shreds, None, false).unwrap(); + assert!(blockstore.get_duplicate_slot(slot).is_some()); + assert!(blockstore.is_dead(slot)); + signal_sender.send(Ok(())).unwrap(); + } + }); - // Check that each thread processed their task before continuing - for _ in 1..=2 { - let res = signal_receiver.recv().unwrap(); - assert!(res.is_ok(), "race condition: {res:?}"); + for slot in 0..100 { + let ((mut shreds1, _), (mut shreds2, _)) = setup_duplicate_last_in_slot(slot); + // compose shreds in reverse order of FEC sets to + // make sure slot is marked dead + shreds2.append(&mut shreds1); + // Start a task on each thread to trigger a race condition + slot_sender.send(slot).unwrap(); + shred_sender.send(shreds2).unwrap(); + + // Check that each thread processed their task before continuing + for _ in 1..=2 { + let res = signal_receiver.recv().unwrap(); + assert!(res.is_ok(), "race condition: {res:?}"); + } } - } - drop(slot_sender); - drop(shred_sender); - - let handles = vec![t_entry_getter, t_shred_inserter]; - for handle in handles { - assert!(handle.join().is_ok()); - } - - assert!(Arc::strong_count(&blockstore) == 1); + drop(slot_sender); + drop(shred_sender); + }); } } From 753deedc47d82271bb26a45ff1cdf50dfbc07904 Mon Sep 17 00:00:00 2001 From: hana <81144685+2501babe@users.noreply.github.com> Date: Wed, 23 Jul 2025 22:04:09 -0700 Subject: [PATCH 67/68] svm tests: add loaderv3 program cache tests (#7050) --- svm/tests/integration_test.rs | 353 ++++++++++++++++++++++++++++------ svm/tests/mock_bank.rs | 25 ++- 2 files changed, 312 insertions(+), 66 deletions(-) diff --git a/svm/tests/integration_test.rs b/svm/tests/integration_test.rs index 9dd7e60f3e6987..3c6a6f410f0779 100644 --- a/svm/tests/integration_test.rs +++ b/svm/tests/integration_test.rs @@ -3,7 +3,7 @@ use { crate::mock_bank::{ - create_custom_loader, deploy_program_with_upgrade_authority, program_address, + create_custom_loader, deploy_program_with_upgrade_authority, load_program, program_address, program_data_size, register_builtins, MockBankCallback, MockForkGraph, EXECUTION_EPOCH, EXECUTION_SLOT, WALLCLOCK_TIME, }, @@ -16,19 +16,25 @@ use { solana_hash::Hash, solana_instruction::{AccountMeta, Instruction}, solana_keypair::Keypair, - solana_loader_v3_interface as bpf_loader_upgradeable, + solana_loader_v3_interface::{ + get_program_data_address, instruction as loaderv3_instruction, + state::UpgradeableLoaderState, + }, solana_native_token::LAMPORTS_PER_SOL, solana_nonce::{self as nonce, state::DurableNonce}, solana_program_entrypoint::MAX_PERMITTED_DATA_INCREASE, solana_program_runtime::execution_budget::SVMTransactionExecutionAndFeeBudgetLimits, solana_pubkey::{pubkey, Pubkey}, - solana_sdk_ids::native_loader, + solana_sdk_ids::{bpf_loader_upgradeable, native_loader}, solana_signer::Signer, solana_svm::{ account_loader::{CheckedTransactionDetails, TransactionCheckResult}, nonce_info::NonceInfo, transaction_execution_result::TransactionExecutionDetails, - transaction_processing_result::{ProcessedTransaction, TransactionProcessingResult}, + transaction_processing_result::{ + ProcessedTransaction, TransactionProcessingResult, + TransactionProcessingResultExtensions, + }, transaction_processor::{ ExecutionRecordingConfig, LoadAndExecuteSanitizedTransactionsOutput, TransactionBatchProcessor, TransactionProcessingConfig, @@ -250,8 +256,43 @@ impl SvmTestEnvironment<'_> { let mut mock_bank_accounts = self.mock_bank.account_shared_data.write().unwrap(); mock_bank_accounts.extend(final_accounts_actual); + // update global program cache + for processing_result in batch_output.processing_results.iter() { + if let Some(ProcessedTransaction::Executed(executed_tx)) = + processing_result.processed_transaction() + { + let programs_modified_by_tx = &executed_tx.programs_modified_by_tx; + if executed_tx.was_successful() && !programs_modified_by_tx.is_empty() { + self.batch_processor + .program_cache + .write() + .unwrap() + .merge(programs_modified_by_tx); + } + } + } + batch_output } + + pub fn is_program_blocked(&self, program_id: &Pubkey) -> bool { + let (_, program_cache_entry) = self + .batch_processor + .program_cache + .read() + .unwrap() + .get_flattened_entries_for_tests() + .into_iter() + .rev() + .find(|(key, _)| key == program_id) + .unwrap(); + + // in the same batch, a new valid loaderv3 program may have a Loaded entry with a later execution slot + // in a later batch, the same loaderv3 program will have a DelayedVisibility tombstone + // a new loaderv1/v2 account will have a FailedVerification tombstone + // and a closed loaderv3 program or any loaderv3 buffer will have a Closed tombstone + program_cache_entry.effective_slot > EXECUTION_SLOT || program_cache_entry.is_tombstone() + } } // container for a transaction batch and all data needed to run and verify it against svm @@ -2267,55 +2308,6 @@ fn simd83_account_reallocate(formalize_loaded_transaction_data_size: bool) -> Ve test_entries } -fn program_cache_update_tombstone() -> Vec { - let mut test_entry = SvmTestEntry::default(); - - let program_name = "hello-solana"; - let program_id = program_address(program_name); - - let fee_payer_keypair = Keypair::new(); - let fee_payer = fee_payer_keypair.pubkey(); - - let mut fee_payer_data = AccountSharedData::default(); - fee_payer_data.set_lamports(LAMPORTS_PER_SOL); - test_entry.add_initial_account(fee_payer, &fee_payer_data); - - test_entry - .initial_programs - .push((program_name.to_string(), DEPLOYMENT_SLOT, Some(fee_payer))); - - // 0: close a deployed program - let instruction = bpf_loader_upgradeable::instruction::close_any( - &bpf_loader_upgradeable::get_program_data_address(&program_id), - &Pubkey::new_unique(), - Some(&fee_payer), - Some(&program_id), - ); - test_entry.push_transaction(Transaction::new_signed_with_payer( - &[instruction], - Some(&fee_payer), - &[&fee_payer_keypair], - Hash::default(), - )); - - // 1: attempt to invoke it, which must fail - // this ensures the local program cache reflects the change of state - let instruction = Instruction::new_with_bytes(program_id, &[], vec![]); - test_entry.push_transaction_with_status( - Transaction::new_signed_with_payer( - &[instruction], - Some(&fee_payer), - &[&fee_payer_keypair], - Hash::default(), - ), - ExecutionStatus::ExecutedFailed, - ); - - test_entry.decrease_expected_lamports(&fee_payer, LAMPORTS_PER_SIGNATURE * 2); - - vec![test_entry] -} - #[test_case(program_medley())] #[test_case(simple_transfer())] #[test_case(simple_nonce(false))] @@ -2327,7 +2319,6 @@ fn program_cache_update_tombstone() -> Vec { #[test_case(simd83_fee_payer_deallocate())] #[test_case(simd83_account_reallocate(false))] #[test_case(simd83_account_reallocate(true))] -#[test_case(program_cache_update_tombstone())] fn svm_integration(test_entries: Vec) { for test_entry in test_entries { let env = SvmTestEnvironment::create(test_entry); @@ -2356,6 +2347,7 @@ fn program_cache_create_account(remove_accounts_executable_flag_checks: bool) { let new_account_keypair = Keypair::new(); let program_id = new_account_keypair.pubkey(); + // create an account owned by a loader let create_transaction = system_transaction::create_account( &fee_payer_keypair, &new_account_keypair, @@ -2370,6 +2362,7 @@ fn program_cache_create_account(remove_accounts_executable_flag_checks: bool) { test_entry .decrease_expected_lamports(&fee_payer, LAMPORTS_PER_SOL + LAMPORTS_PER_SIGNATURE * 2); + // attempt to invoke the new account let invoke_transaction = Transaction::new_signed_with_payer( &[Instruction::new_with_bytes(program_id, &[], vec![])], Some(&fee_payer), @@ -2377,6 +2370,8 @@ fn program_cache_create_account(remove_accounts_executable_flag_checks: bool) { Hash::default(), ); + // fails at load-time for executable flag if feature is disabled + // if feature is enabled fails at execution let expected_status = if remove_accounts_executable_flag_checks { ExecutionStatus::ExecutedFailed } else { @@ -2384,10 +2379,7 @@ fn program_cache_create_account(remove_accounts_executable_flag_checks: bool) { }; test_entry.push_transaction_with_status(invoke_transaction.clone(), expected_status); - - if expected_status != ExecutionStatus::Discarded { - test_entry.decrease_expected_lamports(&fee_payer, LAMPORTS_PER_SIGNATURE); - } + test_entry.decrease_expected_lamports(&fee_payer, LAMPORTS_PER_SIGNATURE); let mut env = SvmTestEnvironment::create(test_entry); @@ -2401,10 +2393,7 @@ fn program_cache_create_account(remove_accounts_executable_flag_checks: bool) { }; test_entry.push_transaction_with_status(invoke_transaction, expected_status); - - if expected_status != ExecutionStatus::Discarded { - test_entry.decrease_expected_lamports(&fee_payer, LAMPORTS_PER_SIGNATURE); - } + test_entry.decrease_expected_lamports(&fee_payer, LAMPORTS_PER_SIGNATURE); // test in different entry same slot env.test_entry = test_entry; @@ -2412,6 +2401,246 @@ fn program_cache_create_account(remove_accounts_executable_flag_checks: bool) { } } +#[test_case(false, false; "close::scan_only")] +#[test_case(false, true; "close::invoke")] +#[test_case(true, false; "upgrade::scan_only")] +#[test_case(true, true; "upgrade::invoke")] +fn program_cache_loaderv3_update_tombstone(upgrade_program: bool, invoke_changed_program: bool) { + let mut test_entry = SvmTestEntry::default(); + + let program_name = "hello-solana"; + let program_id = program_address(program_name); + + let fee_payer_keypair = Keypair::new(); + let fee_payer = fee_payer_keypair.pubkey(); + + let mut fee_payer_data = AccountSharedData::default(); + fee_payer_data.set_lamports(LAMPORTS_PER_SOL); + test_entry.add_initial_account(fee_payer, &fee_payer_data); + + test_entry + .initial_programs + .push((program_name.to_string(), DEPLOYMENT_SLOT, Some(fee_payer))); + + let buffer_address = Pubkey::new_unique(); + + // upgrade or close a deployed program + let change_instruction = if upgrade_program { + let mut data = bincode::serialize(&UpgradeableLoaderState::Buffer { + authority_address: Some(fee_payer), + }) + .unwrap(); + let mut program_bytecode = load_program(program_name.to_string()); + data.append(&mut program_bytecode); + + let buffer_account = AccountSharedData::create( + LAMPORTS_PER_SOL, + data, + bpf_loader_upgradeable::id(), + true, + u64::MAX, + ); + + test_entry.add_initial_account(buffer_address, &buffer_account); + test_entry.drop_expected_account(buffer_address); + + loaderv3_instruction::upgrade( + &program_id, + &buffer_address, + &fee_payer, + &Pubkey::new_unique(), + ) + } else { + loaderv3_instruction::close_any( + &get_program_data_address(&program_id), + &Pubkey::new_unique(), + Some(&fee_payer), + Some(&program_id), + ) + }; + + test_entry.push_transaction(Transaction::new_signed_with_payer( + &[change_instruction], + Some(&fee_payer), + &[&fee_payer_keypair], + Hash::default(), + )); + + test_entry.decrease_expected_lamports(&fee_payer, LAMPORTS_PER_SIGNATURE); + + let invoke_transaction = Transaction::new_signed_with_payer( + &[Instruction::new_with_bytes(program_id, &[], vec![])], + Some(&fee_payer), + &[&fee_payer_keypair], + Hash::default(), + ); + + // attempt to invoke the program, which must fail + // this ensures the local program cache reflects the change of state + // we have cases without this so we can assert the cache *before* the invoke contains the tombstone + if invoke_changed_program { + test_entry.push_transaction_with_status( + invoke_transaction.clone(), + ExecutionStatus::ExecutedFailed, + ); + + test_entry.decrease_expected_lamports(&fee_payer, LAMPORTS_PER_SIGNATURE); + } + + let mut env = SvmTestEnvironment::create(test_entry); + + // test in same entry as program change + env.execute(); + assert!(env.is_program_blocked(&program_id)); + + let mut test_entry = SvmTestEntry { + initial_accounts: env.test_entry.final_accounts.clone(), + final_accounts: env.test_entry.final_accounts.clone(), + ..SvmTestEntry::default() + }; + + test_entry.push_transaction_with_status(invoke_transaction, ExecutionStatus::ExecutedFailed); + + test_entry.decrease_expected_lamports(&fee_payer, LAMPORTS_PER_SIGNATURE); + + // test in different entry same slot + env.test_entry = test_entry; + env.execute(); + assert!(env.is_program_blocked(&program_id)); +} + +#[test_case(false; "upgrade::scan_only")] +#[test_case(true; "upgrade::invoke")] +fn program_cache_loaderv3_buffer_swap(invoke_changed_program: bool) { + let mut test_entry = SvmTestEntry::default(); + + let program_name = "hello-solana"; + + let fee_payer_keypair = Keypair::new(); + let fee_payer = fee_payer_keypair.pubkey(); + + let mut fee_payer_data = AccountSharedData::default(); + fee_payer_data.set_lamports(LAMPORTS_PER_SOL * 10); + test_entry.add_initial_account(fee_payer, &fee_payer_data); + + // this account will start as a buffer and then become a program + // buffers make their way into the program cache + // so we test that pathological address reuse is not a problem + let target_keypair = Keypair::new(); + let target = target_keypair.pubkey(); + let programdata_address = get_program_data_address(&target); + + // we have the same buffer ready at a different address to deploy from + let deploy_keypair = Keypair::new(); + let deploy = deploy_keypair.pubkey(); + + let mut buffer_data = bincode::serialize(&UpgradeableLoaderState::Buffer { + authority_address: Some(fee_payer), + }) + .unwrap(); + let mut program_bytecode = load_program(program_name.to_string()); + buffer_data.append(&mut program_bytecode); + + let buffer_account = AccountSharedData::create( + LAMPORTS_PER_SOL, + buffer_data.clone(), + bpf_loader_upgradeable::id(), + true, + u64::MAX, + ); + + test_entry.add_initial_account(target, &buffer_account); + test_entry.add_initial_account(deploy, &buffer_account); + + let program_data = bincode::serialize(&UpgradeableLoaderState::Program { + programdata_address, + }) + .unwrap(); + let program_account = AccountSharedData::create( + LAMPORTS_PER_SOL, + program_data, + bpf_loader_upgradeable::id(), + true, + u64::MAX, + ); + test_entry.update_expected_account_data(target, &program_account); + test_entry.drop_expected_account(deploy); + + // close the buffer + let close_instruction = + loaderv3_instruction::close_any(&target, &Pubkey::new_unique(), Some(&fee_payer), None); + + // reopen as a program + #[allow(deprecated)] + let deploy_instruction = loaderv3_instruction::deploy_with_max_program_len( + &fee_payer, + &target, + &deploy, + &fee_payer, + LAMPORTS_PER_SOL, + buffer_data.len(), + ) + .unwrap(); + + test_entry.push_transaction(Transaction::new_signed_with_payer( + &[close_instruction], + Some(&fee_payer), + &[&fee_payer_keypair], + Hash::default(), + )); + + test_entry.push_transaction(Transaction::new_signed_with_payer( + &deploy_instruction, + Some(&fee_payer), + &[&fee_payer_keypair, &target_keypair], + Hash::default(), + )); + + test_entry.decrease_expected_lamports( + &fee_payer, + Rent::default().minimum_balance( + UpgradeableLoaderState::size_of_programdata_metadata() + buffer_data.len(), + ) + LAMPORTS_PER_SIGNATURE * 3, + ); + + let invoke_transaction = Transaction::new_signed_with_payer( + &[Instruction::new_with_bytes(target, &[], vec![])], + Some(&fee_payer), + &[&fee_payer_keypair], + Hash::default(), + ); + + if invoke_changed_program { + test_entry.push_transaction_with_status( + invoke_transaction.clone(), + ExecutionStatus::ExecutedFailed, + ); + + test_entry.decrease_expected_lamports(&fee_payer, LAMPORTS_PER_SIGNATURE); + } + + let mut env = SvmTestEnvironment::create(test_entry); + + // test in same entry as program change + env.execute(); + assert!(env.is_program_blocked(&target)); + + let mut test_entry = SvmTestEntry { + initial_accounts: env.test_entry.final_accounts.clone(), + final_accounts: env.test_entry.final_accounts.clone(), + ..SvmTestEntry::default() + }; + + test_entry.push_transaction_with_status(invoke_transaction, ExecutionStatus::ExecutedFailed); + + test_entry.decrease_expected_lamports(&fee_payer, LAMPORTS_PER_SIGNATURE); + + // test in different entry same slot + env.test_entry = test_entry; + env.execute(); + assert!(env.is_program_blocked(&target)); +} + #[derive(Clone, PartialEq, Eq)] enum Inspect<'a> { LiveRead(&'a AccountSharedData), diff --git a/svm/tests/mock_bank.rs b/svm/tests/mock_bank.rs index 25998d41a46be8..ba270445d69858 100644 --- a/svm/tests/mock_bank.rs +++ b/svm/tests/mock_bank.rs @@ -5,11 +5,12 @@ use solana_sysvar::recent_blockhashes::{Entry as BlockhashesEntry, RecentBlockha use { solana_account::{Account, AccountSharedData, ReadableAccount, WritableAccount}, solana_bpf_loader_program::syscalls::{ - SyscallAbort, SyscallGetClockSysvar, SyscallGetRentSysvar, SyscallInvokeSignedRust, - SyscallLog, SyscallMemcmp, SyscallMemcpy, SyscallMemmove, SyscallMemset, - SyscallSetReturnData, + SyscallAbort, SyscallGetClockSysvar, SyscallGetEpochScheduleSysvar, SyscallGetRentSysvar, + SyscallInvokeSignedRust, SyscallLog, SyscallMemcmp, SyscallMemcpy, SyscallMemmove, + SyscallMemset, SyscallSetReturnData, }, solana_clock::{Clock, Slot, UnixTimestamp}, + solana_epoch_schedule::EpochSchedule, solana_fee_structure::{FeeDetails, FeeStructure}, solana_loader_v3_interface::{self as bpf_loader_upgradeable, state::UpgradeableLoaderState}, solana_program_runtime::{ @@ -176,10 +177,20 @@ impl MockBankCallback { .write() .unwrap() .insert(RecentBlockhashes::id(), account_data); + + // EpochSchedule is required for non-mocked LoaderV3 deploy + let epoch_schedule = EpochSchedule::without_warmup(); + + let mut account_data = AccountSharedData::default(); + account_data.set_data(bincode::serialize(&epoch_schedule).unwrap()); + self.account_shared_data + .write() + .unwrap() + .insert(EpochSchedule::id(), account_data); } } -fn load_program(name: String) -> Vec { +pub fn load_program(name: String) -> Vec { // Loading the program file let mut dir = env::current_dir().unwrap(); dir.push("tests"); @@ -400,4 +411,10 @@ pub fn create_custom_loader<'a>() -> BuiltinProgram> { .register_function("sol_get_rent_sysvar", SyscallGetRentSysvar::vm) .expect("Registration failed"); loader + .register_function( + "sol_get_epoch_schedule_sysvar", + SyscallGetEpochScheduleSysvar::vm, + ) + .expect("Registration failed"); + loader } From b2dfc6273b585e1ab263fc352248bcece1e74c34 Mon Sep 17 00:00:00 2001 From: steviez Date: Thu, 24 Jul 2025 08:03:07 -0500 Subject: [PATCH 68/68] ledger: Update make_slot_entries() to make merkle shreds only (#7097) The merkle_variant parameter has been removed as well --- core/src/consensus.rs | 16 ++--- core/src/repair/repair_service.rs | 16 ++--- core/src/replay_stage.rs | 25 +++----- ledger/src/ancestor_iterator.rs | 6 +- ledger/src/blockstore.rs | 75 +++++++---------------- ledger/src/blockstore/blockstore_purge.rs | 4 +- ledger/src/leader_schedule_cache.rs | 4 +- 7 files changed, 50 insertions(+), 96 deletions(-) diff --git a/core/src/consensus.rs b/core/src/consensus.rs index ec4d08c6dd8a61..d1da8af2a105ca 100644 --- a/core/src/consensus.rs +++ b/core/src/consensus.rs @@ -3246,11 +3246,11 @@ pub mod test { let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); - let (shreds, _) = make_slot_entries(1, 0, 42, /*merkle_variant:*/ true); + let (shreds, _) = make_slot_entries(1, 0, 42); blockstore.insert_shreds(shreds, None, false).unwrap(); - let (shreds, _) = make_slot_entries(3, 1, 42, /*merkle_variant:*/ true); + let (shreds, _) = make_slot_entries(3, 1, 42); blockstore.insert_shreds(shreds, None, false).unwrap(); - let (shreds, _) = make_slot_entries(4, 1, 42, /*merkle_variant:*/ true); + let (shreds, _) = make_slot_entries(4, 1, 42); blockstore.insert_shreds(shreds, None, false).unwrap(); assert!(!blockstore.is_root(0)); assert!(!blockstore.is_root(1)); @@ -3282,11 +3282,11 @@ pub mod test { let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); - let (shreds, _) = make_slot_entries(1, 0, 42, /*merkle_variant:*/ true); + let (shreds, _) = make_slot_entries(1, 0, 42); blockstore.insert_shreds(shreds, None, false).unwrap(); - let (shreds, _) = make_slot_entries(3, 1, 42, /*merkle_variant:*/ true); + let (shreds, _) = make_slot_entries(3, 1, 42); blockstore.insert_shreds(shreds, None, false).unwrap(); - let (shreds, _) = make_slot_entries(4, 1, 42, /*merkle_variant:*/ true); + let (shreds, _) = make_slot_entries(4, 1, 42); blockstore.insert_shreds(shreds, None, false).unwrap(); blockstore.set_roots(std::iter::once(&3)).unwrap(); assert!(!blockstore.is_root(0)); @@ -3310,9 +3310,9 @@ pub mod test { let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); - let (shreds, _) = make_slot_entries(1, 0, 42, /*merkle_variant:*/ true); + let (shreds, _) = make_slot_entries(1, 0, 42); blockstore.insert_shreds(shreds, None, false).unwrap(); - let (shreds, _) = make_slot_entries(3, 1, 42, /*merkle_variant:*/ true); + let (shreds, _) = make_slot_entries(3, 1, 42); blockstore.insert_shreds(shreds, None, false).unwrap(); assert!(!blockstore.is_root(0)); assert!(!blockstore.is_root(1)); diff --git a/core/src/repair/repair_service.rs b/core/src/repair/repair_service.rs index 5bfef19cc68127..3b4adb8d171d1d 100644 --- a/core/src/repair/repair_service.rs +++ b/core/src/repair/repair_service.rs @@ -1349,8 +1349,8 @@ mod test { let blockstore = Blockstore::open(ledger_path.path()).unwrap(); // Create some orphan slots - let (mut shreds, _) = make_slot_entries(1, 0, 1, /*merkle_variant:*/ true); - let (shreds2, _) = make_slot_entries(5, 2, 1, /*merkle_variant:*/ true); + let (mut shreds, _) = make_slot_entries(1, 0, 1); + let (shreds2, _) = make_slot_entries(5, 2, 1); shreds.extend(shreds2); blockstore.insert_shreds(shreds, None, false).unwrap(); let mut repair_weight = RepairWeight::new(0); @@ -1378,7 +1378,7 @@ mod test { let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); - let (shreds, _) = make_slot_entries(2, 0, 1, /*merkle_variant:*/ true); + let (shreds, _) = make_slot_entries(2, 0, 1); // Write this shred to slot 2, should chain to slot 0, which we haven't received // any shreds for @@ -1486,7 +1486,6 @@ mod test { 0, // slot 0, // parent_slot num_entries_per_slot as u64, - true, // merkle_variant ); let num_shreds_per_slot = shreds.len() as u64; @@ -1580,7 +1579,6 @@ mod test { i, // slot parent, num_entries_per_slot as u64, - true, // merkle_variant ); blockstore.insert_shreds(shreds, None, false).unwrap(); @@ -1618,7 +1616,6 @@ mod test { dead_slot, // slot dead_slot - 1, // parent_slot num_entries_per_slot, - true, // merkle_variant ); blockstore .insert_shreds(shreds[..shreds.len() - 1].to_vec(), None, false) @@ -1665,12 +1662,7 @@ mod test { // Insert some shreds to create a SlotMeta, let num_entries_per_slot = max_ticks_per_n_shreds(1, None) + 1; - let (mut shreds, _) = make_slot_entries( - dead_slot, - dead_slot - 1, - num_entries_per_slot, - true, // merkle_variant - ); + let (mut shreds, _) = make_slot_entries(dead_slot, dead_slot - 1, num_entries_per_slot); blockstore .insert_shreds(shreds[..shreds.len() - 1].to_vec(), None, false) .unwrap(); diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index f7fdf71662c1ca..b8b0ab39fd1334 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -4557,7 +4557,6 @@ pub(crate) mod tests { NUM_CONSECUTIVE_LEADER_SLOTS, // slot 1, // parent_slot 8, // num_entries - true, // merkle_variant ); blockstore.insert_shreds(shreds, None, false).unwrap(); assert!(bank_forks @@ -4583,12 +4582,7 @@ pub(crate) mod tests { // Insert shreds for slot 2 * NUM_CONSECUTIVE_LEADER_SLOTS, // chaining to slot 1 - let (shreds, _) = make_slot_entries( - 2 * NUM_CONSECUTIVE_LEADER_SLOTS, - 1, - 8, - true, // merkle_variant - ); + let (shreds, _) = make_slot_entries(2 * NUM_CONSECUTIVE_LEADER_SLOTS, 1, 8); blockstore.insert_shreds(shreds, None, false).unwrap(); assert!(bank_forks .read() @@ -6472,17 +6466,15 @@ pub(crate) mod tests { // Simulate repair fixing slot 3 and 5 let (shreds, _) = make_slot_entries( - 3, // slot - 1, // parent_slot - 8, // num_entries - true, // merkle_variant + 3, // slot + 1, // parent_slot + 8, // num_entries ); blockstore.insert_shreds(shreds, None, false).unwrap(); let (shreds, _) = make_slot_entries( - 5, // slot - 3, // parent_slot - 8, // num_entries - true, // merkle_variant + 5, // slot + 3, // parent_slot + 8, // num_entries ); blockstore.insert_shreds(shreds, None, false).unwrap(); @@ -9267,8 +9259,7 @@ pub(crate) mod tests { let dummy_slot = working_bank.slot() + 2; let initial_slot = working_bank.slot(); let num_entries = 10; - let merkle_variant = true; - let (shreds, _) = make_slot_entries(dummy_slot, initial_slot, num_entries, merkle_variant); + let (shreds, _) = make_slot_entries(dummy_slot, initial_slot, num_entries); blockstore.insert_shreds(shreds, None, false).unwrap(); // Reset PoH recorder to the completed bank to ensure consistent state diff --git a/ledger/src/ancestor_iterator.rs b/ledger/src/ancestor_iterator.rs index 94333e737f1d85..d736f578c468d9 100644 --- a/ledger/src/ancestor_iterator.rs +++ b/ledger/src/ancestor_iterator.rs @@ -120,11 +120,11 @@ mod tests { let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); - let (shreds, _) = make_slot_entries(0, 0, 42, /*merkle_variant:*/ true); + let (shreds, _) = make_slot_entries(0, 0, 42); blockstore.insert_shreds(shreds, None, false).unwrap(); - let (shreds, _) = make_slot_entries(1, 0, 42, /*merkle_variant:*/ true); + let (shreds, _) = make_slot_entries(1, 0, 42); blockstore.insert_shreds(shreds, None, false).unwrap(); - let (shreds, _) = make_slot_entries(2, 1, 42, /*merkle_variant:*/ true); + let (shreds, _) = make_slot_entries(2, 1, 42); blockstore.insert_shreds(shreds, None, false).unwrap(); assert_eq!( diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index c11d4c80cab6a3..9d5259f67953cd 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -5119,9 +5119,9 @@ pub fn make_slot_entries( slot: Slot, parent_slot: Slot, num_entries: u64, - merkle_variant: bool, ) -> (Vec, Vec) { let entries = create_ticks(num_entries, 1, Hash::new_unique()); + let merkle_variant = true; let shreds = entries_to_test_shreds(&entries, slot, parent_slot, true, 0, merkle_variant); (shreds, entries) } @@ -5137,12 +5137,7 @@ pub fn make_many_slot_entries( for slot in start_slot..start_slot + num_slots { let parent_slot = if slot == 0 { 0 } else { slot - 1 }; - let (slot_shreds, slot_entries) = make_slot_entries( - slot, - parent_slot, - entries_per_slot, - true, // merkle_variant - ); + let (slot_shreds, slot_entries) = make_slot_entries(slot, parent_slot, entries_per_slot); shreds.extend(slot_shreds); entries.extend(slot_entries); } @@ -5268,12 +5263,7 @@ pub fn make_chaining_slot_entries( } }; - let result = make_slot_entries( - *slot, - parent_slot, - entries_per_slot, - true, // merkle_variant - ); + let result = make_slot_entries(*slot, parent_slot, entries_per_slot); slots_shreds_and_entries.push(result); } @@ -5392,8 +5382,7 @@ pub mod tests { let (shreds, _) = make_slot_entries( slot, parent_slot, - 100, // num_entries - true, // merkle_variant + 100, // num_entries ); blockstore.insert_shreds(shreds, None, true).unwrap(); @@ -5441,7 +5430,6 @@ pub mod tests { 0, // slot 0, // parent_slot num_entries, - true, // merkle_variant ); let ledger_path = get_tmp_ledger_path_auto_delete!(); @@ -5641,7 +5629,7 @@ pub mod tests { #[test] fn test_read_shred_bytes() { let slot = 0; - let (shreds, _) = make_slot_entries(slot, 0, 100, /*merkle_variant:*/ true); + let (shreds, _) = make_slot_entries(slot, 0, 100); let num_shreds = shreds.len() as u64; let shred_bufs: Vec<_> = shreds.iter().map(Shred::payload).cloned().collect(); @@ -5697,7 +5685,7 @@ pub mod tests { #[test] fn test_shred_cleanup_check() { let slot = 1; - let (shreds, _) = make_slot_entries(slot, 0, 100, /*merkle_variant:*/ true); + let (shreds, _) = make_slot_entries(slot, 0, 100); let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); @@ -5726,7 +5714,6 @@ pub mod tests { 0, // slot 0, // parent_slot num_entries, - true, // merkle_variant ); let num_shreds = shreds.len() as u64; @@ -5774,7 +5761,6 @@ pub mod tests { 0, // slot 0, // parent_slot num_entries, - true, // merkle_variant ); let num_shreds = shreds.len() as u64; @@ -5904,12 +5890,7 @@ pub mod tests { let parent_slot = if i == 0 { 0 } else { i - 1 }; // Write entries let num_entries = min_entries * (i + 1); - let (shreds, original_entries) = make_slot_entries( - slot, - parent_slot, - num_entries, - true, // merkle_variant - ); + let (shreds, original_entries) = make_slot_entries(slot, parent_slot, num_entries); let num_shreds = shreds.len() as u64; assert!(num_shreds > 1); @@ -6007,7 +5988,6 @@ pub mod tests { 0, // slot 0, // parent_slot entries_per_slot, - true, // merkle_variant ); let shreds_per_slot = shreds.len() as u64; @@ -6042,7 +6022,6 @@ pub mod tests { slot, slot - 1, // parent_slot entries_per_slot, - true, // merkle_variant ); let missing_shred = slot_shreds.remove(slot as usize - 1); shreds.extend(slot_shreds); @@ -6087,8 +6066,7 @@ pub mod tests { let entries_per_slot = 10; // Create shreds for slot 0 - let (mut shreds, _) = - make_slot_entries(0, 0, entries_per_slot, /*merkle_variant:*/ true); + let (mut shreds, _) = make_slot_entries(0, 0, entries_per_slot); let shred0 = shreds.remove(0); // Insert all but the first shred in the slot, should not be considered complete @@ -6167,7 +6145,6 @@ pub mod tests { disconnected_slot, 1, // parent_slot entries_per_slot, - true, // merkle_variant ); let mut all_shreds: Vec<_> = vec![shreds0, shreds1, shreds2, shreds3] @@ -6420,12 +6397,7 @@ pub mod tests { } else { slot.saturating_sub(1) }; - let (shreds, _) = make_slot_entries( - slot, - parent_slot, - entries_per_slot, - true, // merkle_variant - ); + let (shreds, _) = make_slot_entries(slot, parent_slot, entries_per_slot); shreds.into_iter() }) .collect(); @@ -6712,8 +6684,8 @@ pub mod tests { // Write some slot that also chains to existing slots and orphan, // nothing should change - let (shred4, _) = make_slot_entries(4, 0, 1, /*merkle_variant:*/ true); - let (shred5, _) = make_slot_entries(5, 1, 1, /*merkle_variant:*/ true); + let (shred4, _) = make_slot_entries(4, 0, 1); + let (shred5, _) = make_slot_entries(5, 1, 1); blockstore.insert_shreds(shred4, None, false).unwrap(); blockstore.insert_shreds(shred5, None, false).unwrap(); assert_eq!( @@ -6744,7 +6716,7 @@ pub mod tests { let mut shreds = vec![]; for slot in 0..num_slots { let parent_slot = slot.saturating_sub(1); - let (slot_shreds, entry) = make_slot_entries(slot, parent_slot, 1, true); + let (slot_shreds, entry) = make_slot_entries(slot, parent_slot, 1); shreds.extend(slot_shreds); entries.extend(entry); } @@ -7249,7 +7221,7 @@ pub mod tests { #[test] fn test_is_data_shred_present() { - let (shreds, _) = make_slot_entries(0, 0, 200, /*merkle_variant:*/ true); + let (shreds, _) = make_slot_entries(0, 0, 200); let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); let index_cf = &blockstore.index_cf; @@ -7743,7 +7715,7 @@ pub mod tests { #[test] fn test_insert_multiple_is_last() { solana_logger::setup(); - let (shreds, _) = make_slot_entries(0, 0, 18, /*merkle_variant:*/ true); + let (shreds, _) = make_slot_entries(0, 0, 18); let num_shreds = shreds.len() as u64; let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); @@ -7756,7 +7728,7 @@ pub mod tests { assert_eq!(slot_meta.last_index, Some(num_shreds - 1)); assert!(slot_meta.is_full()); - let (shreds, _) = make_slot_entries(0, 0, 600, /*merkle_variant:*/ true); + let (shreds, _) = make_slot_entries(0, 0, 600); assert!(shreds.len() > num_shreds as usize); blockstore.insert_shreds(shreds, None, false).unwrap(); let slot_meta = blockstore.meta(0).unwrap().unwrap(); @@ -7946,7 +7918,7 @@ pub mod tests { fn test_no_insert_but_modify_slot_meta() { // This tests correctness of the SlotMeta in various cases in which a shred // that gets filtered out by checks - let (shreds0, _) = make_slot_entries(0, 0, 200, /*merkle_variant:*/ true); + let (shreds0, _) = make_slot_entries(0, 0, 200); let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); @@ -7958,8 +7930,8 @@ pub mod tests { // Insert a repetitive shred for slot 's', should get ignored, but also // insert shreds that chains to 's', should see the update in the SlotMeta // for 's'. - let (mut shreds2, _) = make_slot_entries(2, 0, 200, /*merkle_variant:*/ true); - let (mut shreds3, _) = make_slot_entries(3, 0, 200, /*merkle_variant:*/ true); + let (mut shreds2, _) = make_slot_entries(2, 0, 200); + let (mut shreds3, _) = make_slot_entries(3, 0, 200); shreds2.push(shreds0[1].clone()); shreds3.insert(0, shreds0[1].clone()); blockstore.insert_shreds(shreds2, None, false).unwrap(); @@ -7976,7 +7948,7 @@ pub mod tests { let blockstore = Blockstore::open(ledger_path.path()).unwrap(); // Make shred for slot 1 - let (shreds1, _) = make_slot_entries(1, 0, 1, /*merkle_variant:*/ true); + let (shreds1, _) = make_slot_entries(1, 0, 1); let max_root = 100; blockstore.set_roots(std::iter::once(&max_root)).unwrap(); @@ -9302,7 +9274,7 @@ pub mod tests { let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); - let (shreds, _) = make_slot_entries(1, 0, 4, /*merkle_variant:*/ true); + let (shreds, _) = make_slot_entries(1, 0, 4); blockstore.insert_shreds(shreds, None, false).unwrap(); fn make_slot_entries_with_transaction_addresses(addresses: &[Pubkey]) -> Vec { @@ -9989,7 +9961,7 @@ pub mod tests { assert_eq!(blockstore.lowest_slot(), 0); for slot in 0..10 { - let (shreds, _) = make_slot_entries(slot, 0, 1, /*merkle_variant:*/ true); + let (shreds, _) = make_slot_entries(slot, 0, 1); blockstore.insert_shreds(shreds, None, false).unwrap(); } assert_eq!(blockstore.lowest_slot(), 1); @@ -10005,7 +9977,7 @@ pub mod tests { assert_eq!(blockstore.highest_slot().unwrap(), None); for slot in 0..10 { - let (shreds, _) = make_slot_entries(slot, 0, 1, /*merkle_variant:*/ true); + let (shreds, _) = make_slot_entries(slot, 0, 1); blockstore.insert_shreds(shreds, None, false).unwrap(); assert_eq!(blockstore.highest_slot().unwrap(), Some(slot)); } @@ -10729,8 +10701,7 @@ pub mod tests { // Create enough entries to ensure there are at least two shreds created let num_unique_entries = max_ticks_per_n_shreds(1, None) + 1; - let (mut original_shreds, original_entries) = - make_slot_entries(0, 0, num_unique_entries, /*merkle_variant:*/ true); + let (mut original_shreds, original_entries) = make_slot_entries(0, 0, num_unique_entries); let mut duplicate_shreds = original_shreds.clone(); // Mutate signature so that payloads are not the same as the originals. for shred in &mut duplicate_shreds { diff --git a/ledger/src/blockstore/blockstore_purge.rs b/ledger/src/blockstore/blockstore_purge.rs index c5ce36084ce972..bfd4462e58e316 100644 --- a/ledger/src/blockstore/blockstore_purge.rs +++ b/ledger/src/blockstore/blockstore_purge.rs @@ -1094,9 +1094,9 @@ pub mod tests { let (shreds, _) = make_many_slot_entries(0, 10, 5); blockstore.insert_shreds(shreds, None, false).unwrap(); - let (slot_11, _) = make_slot_entries(11, 4, 5, true); + let (slot_11, _) = make_slot_entries(11, 4, 5); blockstore.insert_shreds(slot_11, None, false).unwrap(); - let (slot_12, _) = make_slot_entries(12, 5, 5, true); + let (slot_12, _) = make_slot_entries(12, 5, 5); blockstore.insert_shreds(slot_12, None, false).unwrap(); blockstore.purge_slot_cleanup_chaining(5).unwrap(); diff --git a/ledger/src/leader_schedule_cache.rs b/ledger/src/leader_schedule_cache.rs index 7a51d1582cd3c7..af63c76d26c86e 100644 --- a/ledger/src/leader_schedule_cache.rs +++ b/ledger/src/leader_schedule_cache.rs @@ -446,7 +446,7 @@ mod tests { // Write a shred into slot 2 that chains to slot 1, // but slot 1 is empty so should not be skipped - let (shreds, _) = make_slot_entries(2, 1, 1, /*merkle_variant:*/ true); + let (shreds, _) = make_slot_entries(2, 1, 1); blockstore.insert_shreds(shreds, None, false).unwrap(); assert_eq!( cache @@ -457,7 +457,7 @@ mod tests { ); // Write a shred into slot 1 - let (shreds, _) = make_slot_entries(1, 0, 1, /*merkle_variant:*/ true); + let (shreds, _) = make_slot_entries(1, 0, 1); // Check that slot 1 and 2 are skipped blockstore.insert_shreds(shreds, None, false).unwrap();