From 6b566ba9cd58a4cb08015744f9db1236ff2f2458 Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Sat, 14 Jun 2025 22:01:13 -0500 Subject: [PATCH 001/124] geyser: update to `ReplicaTransactionInfoV3` (#6515) * geyser: update to ReplicaTransactionInfoV3 * add signature back --- Cargo.lock | 2 + geyser-plugin-interface/Cargo.toml | 1 + .../src/geyser_plugin_interface.rs | 27 ++++++++++++- geyser-plugin-manager/Cargo.toml | 1 + .../src/transaction_notifier.rs | 24 ++++++++---- programs/sbf/Cargo.lock | 2 + rpc/src/transaction_notifier_interface.rs | 8 ++-- rpc/src/transaction_status_service.rs | 39 +++++++++++-------- svm/examples/Cargo.lock | 2 + 9 files changed, 78 insertions(+), 28 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 92772e1ce6b17d..16aeb51bf134db 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -151,6 +151,7 @@ version = "3.0.0" dependencies = [ "log", "solana-clock", + "solana-hash", "solana-signature", "solana-transaction", "solana-transaction-status", @@ -8485,6 +8486,7 @@ dependencies = [ "solana-accounts-db", "solana-clock", "solana-entry", + "solana-hash", "solana-ledger", "solana-measure", "solana-metrics", diff --git a/geyser-plugin-interface/Cargo.toml b/geyser-plugin-interface/Cargo.toml index 6a39745cab0c6c..dcd59b815833e4 100644 --- a/geyser-plugin-interface/Cargo.toml +++ b/geyser-plugin-interface/Cargo.toml @@ -15,6 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = { workspace = true, features = ["std"] } solana-clock = { workspace = true } +solana-hash = { workspace = true } solana-signature = { workspace = true } solana-transaction = { workspace = true } solana-transaction-status = { workspace = true } diff --git a/geyser-plugin-interface/src/geyser_plugin_interface.rs b/geyser-plugin-interface/src/geyser_plugin_interface.rs index bdf8694f534388..ca6a7892aac26a 100644 --- a/geyser-plugin-interface/src/geyser_plugin_interface.rs +++ b/geyser-plugin-interface/src/geyser_plugin_interface.rs @@ -4,8 +4,9 @@ /// creates the implementation of the plugin. use { solana_clock::{Slot, UnixTimestamp}, + solana_hash::Hash, solana_signature::Signature, - solana_transaction::sanitized::SanitizedTransaction, + solana_transaction::{sanitized::SanitizedTransaction, versioned::VersionedTransaction}, solana_transaction_status::{Reward, RewardsAndNumPartitions, TransactionStatusMeta}, std::{any::Any, error, io}, thiserror::Error, @@ -157,6 +158,29 @@ pub struct ReplicaTransactionInfoV2<'a> { pub index: usize, } +/// Information about a transaction, including index in block +#[derive(Clone, Debug)] +#[repr(C)] +pub struct ReplicaTransactionInfoV3<'a> { + /// The transaction signature, used for identifying the transaction. + pub signature: &'a Signature, + + /// The transaction message hash, used for identifying the transaction. + pub message_hash: &'a Hash, + + /// Indicates if the transaction is a simple vote transaction. + pub is_vote: bool, + + /// The versioned transaction. + pub transaction: &'a VersionedTransaction, + + /// Metadata of the transaction status. + pub transaction_status_meta: &'a TransactionStatusMeta, + + /// The transaction's index in the block + pub index: usize, +} + /// A wrapper to future-proof ReplicaTransactionInfo handling. /// If there were a change to the structure of ReplicaTransactionInfo, /// there would be new enum entry for the newer version, forcing @@ -165,6 +189,7 @@ pub struct ReplicaTransactionInfoV2<'a> { pub enum ReplicaTransactionInfoVersions<'a> { V0_0_1(&'a ReplicaTransactionInfo<'a>), V0_0_2(&'a ReplicaTransactionInfoV2<'a>), + V0_0_3(&'a ReplicaTransactionInfoV3<'a>), } #[derive(Clone, Debug)] diff --git a/geyser-plugin-manager/Cargo.toml b/geyser-plugin-manager/Cargo.toml index 2637384dc1a331..17e438af32915a 100644 --- a/geyser-plugin-manager/Cargo.toml +++ b/geyser-plugin-manager/Cargo.toml @@ -25,6 +25,7 @@ solana-account = { workspace = true } solana-accounts-db = { workspace = true } solana-clock = { workspace = true } solana-entry = { workspace = true } +solana-hash = { workspace = true } solana-ledger = { workspace = true } solana-measure = { workspace = true } solana-metrics = { workspace = true } diff --git a/geyser-plugin-manager/src/transaction_notifier.rs b/geyser-plugin-manager/src/transaction_notifier.rs index c9818cc1a8f7be..bd57c969ca9be5 100644 --- a/geyser-plugin-manager/src/transaction_notifier.rs +++ b/geyser-plugin-manager/src/transaction_notifier.rs @@ -2,15 +2,16 @@ use { crate::geyser_plugin_manager::GeyserPluginManager, agave_geyser_plugin_interface::geyser_plugin_interface::{ - ReplicaTransactionInfoV2, ReplicaTransactionInfoVersions, + ReplicaTransactionInfoV3, ReplicaTransactionInfoVersions, }, log::*, solana_clock::Slot, + solana_hash::Hash, solana_measure::measure::Measure, solana_metrics::*, solana_rpc::transaction_notifier_interface::TransactionNotifier, solana_signature::Signature, - solana_transaction::sanitized::SanitizedTransaction, + solana_transaction::versioned::VersionedTransaction, solana_transaction_status::TransactionStatusMeta, std::sync::{Arc, RwLock}, }; @@ -29,13 +30,17 @@ impl TransactionNotifier for TransactionNotifierImpl { slot: Slot, index: usize, signature: &Signature, + message_hash: &Hash, + is_vote: bool, transaction_status_meta: &TransactionStatusMeta, - transaction: &SanitizedTransaction, + transaction: &VersionedTransaction, ) { let mut measure = Measure::start("geyser-plugin-notify_plugins_of_transaction_info"); let transaction_log_info = Self::build_replica_transaction_info( index, signature, + message_hash, + is_vote, transaction_status_meta, transaction, ); @@ -51,7 +56,7 @@ impl TransactionNotifier for TransactionNotifierImpl { continue; } match plugin.notify_transaction( - ReplicaTransactionInfoVersions::V0_0_2(&transaction_log_info), + ReplicaTransactionInfoVersions::V0_0_3(&transaction_log_info), slot, ) { Err(err) => { @@ -87,13 +92,16 @@ impl TransactionNotifierImpl { fn build_replica_transaction_info<'a>( index: usize, signature: &'a Signature, + message_hash: &'a Hash, + is_vote: bool, transaction_status_meta: &'a TransactionStatusMeta, - transaction: &'a SanitizedTransaction, - ) -> ReplicaTransactionInfoV2<'a> { - ReplicaTransactionInfoV2 { + transaction: &'a VersionedTransaction, + ) -> ReplicaTransactionInfoV3<'a> { + ReplicaTransactionInfoV3 { index, + message_hash, signature, - is_vote: transaction.is_simple_vote_transaction(), + is_vote, transaction, transaction_status_meta, } diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 7be87b32c6893e..2ece25263e3993 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -89,6 +89,7 @@ version = "3.0.0" dependencies = [ "log", "solana-clock", + "solana-hash", "solana-signature", "solana-transaction", "solana-transaction-status", @@ -6537,6 +6538,7 @@ dependencies = [ "solana-accounts-db", "solana-clock", "solana-entry", + "solana-hash", "solana-ledger", "solana-measure", "solana-metrics", diff --git a/rpc/src/transaction_notifier_interface.rs b/rpc/src/transaction_notifier_interface.rs index 3151875da6643d..232ee64004dc81 100644 --- a/rpc/src/transaction_notifier_interface.rs +++ b/rpc/src/transaction_notifier_interface.rs @@ -1,6 +1,6 @@ use { - solana_clock::Slot, solana_signature::Signature, - solana_transaction::sanitized::SanitizedTransaction, + solana_clock::Slot, solana_hash::Hash, solana_signature::Signature, + solana_transaction::versioned::VersionedTransaction, solana_transaction_status::TransactionStatusMeta, std::sync::Arc, }; @@ -10,8 +10,10 @@ pub trait TransactionNotifier { slot: Slot, transaction_slot_index: usize, signature: &Signature, + message_hash: &Hash, + is_vote: bool, transaction_status_meta: &TransactionStatusMeta, - transaction: &SanitizedTransaction, + transaction: &VersionedTransaction, ); } diff --git a/rpc/src/transaction_status_service.rs b/rpc/src/transaction_status_service.rs index e62ddb16d3047e..0899d7e503df29 100644 --- a/rpc/src/transaction_status_service.rs +++ b/rpc/src/transaction_status_service.rs @@ -205,10 +205,16 @@ impl TransactionStatusService { }; if let Some(transaction_notifier) = transaction_notifier.as_ref() { + let is_vote = transaction.is_simple_vote_transaction(); + let message_hash = transaction.message_hash(); + let signature = transaction.signature(); + let transaction = transaction.to_versioned_transaction(); transaction_notifier.notify_transaction( slot, transaction_index, - transaction.signature(), + signature, + message_hash, + is_vote, &transaction_status_meta, &transaction, ); @@ -362,12 +368,12 @@ pub(crate) mod tests { struct TestNotifierKey { slot: Slot, transaction_index: usize, - signature: Signature, + message_hash: Hash, } struct TestNotification { _meta: TransactionStatusMeta, - transaction: SanitizedTransaction, + transaction: VersionedTransaction, } struct TestTransactionNotifier { @@ -387,15 +393,17 @@ pub(crate) mod tests { &self, slot: Slot, transaction_index: usize, - signature: &Signature, + _signature: &Signature, + message_hash: &Hash, + _is_vote: bool, transaction_status_meta: &TransactionStatusMeta, - transaction: &SanitizedTransaction, + transaction: &VersionedTransaction, ) { self.notifications.insert( TestNotifierKey { slot, transaction_index, - signature: *signature, + message_hash: *message_hash, }, TestNotification { _meta: transaction_status_meta.clone(), @@ -495,7 +503,7 @@ pub(crate) mod tests { }; let slot = bank.slot(); - let signature = *transaction.signature(); + let message_hash = *transaction.message_hash(); let transaction_index: usize = bank.transaction_count().try_into().unwrap(); let transaction_status_batch = TransactionStatusBatch { slot, @@ -529,14 +537,14 @@ pub(crate) mod tests { let key = TestNotifierKey { slot, transaction_index, - signature, + message_hash, }; assert!(test_notifier.notifications.contains_key(&key)); let result = test_notifier.notifications.get(&key).unwrap(); assert_eq!( expected_transaction.signature(), - result.transaction.signature() + result.transaction.signatures.first().unwrap() ); } @@ -633,12 +641,12 @@ pub(crate) mod tests { let key1 = TestNotifierKey { slot, transaction_index: transaction_index1, - signature: *expected_transaction1.signature(), + message_hash: *expected_transaction1.message_hash(), }; let key2 = TestNotifierKey { slot, transaction_index: transaction_index2, - signature: *expected_transaction2.signature(), + message_hash: *expected_transaction2.message_hash(), }; assert!(test_notifier.notifications.contains_key(&key1)); @@ -649,20 +657,19 @@ pub(crate) mod tests { assert_eq!( expected_transaction1.signature(), - result1.transaction.signature() + result1.transaction.signatures.first().unwrap() ); assert_eq!( expected_transaction1.message_hash(), - result1.transaction.message_hash() + &result1.transaction.message.hash(), ); - assert_eq!( expected_transaction2.signature(), - result2.transaction.signature() + result2.transaction.signatures.first().unwrap() ); assert_eq!( expected_transaction2.message_hash(), - result2.transaction.message_hash() + &result2.transaction.message.hash(), ); } } diff --git a/svm/examples/Cargo.lock b/svm/examples/Cargo.lock index a51fdf381a4e3b..ad7ad920176ecd 100644 --- a/svm/examples/Cargo.lock +++ b/svm/examples/Cargo.lock @@ -89,6 +89,7 @@ version = "3.0.0" dependencies = [ "log", "solana-clock", + "solana-hash", "solana-signature", "solana-transaction", "solana-transaction-status", @@ -6349,6 +6350,7 @@ dependencies = [ "solana-accounts-db", "solana-clock", "solana-entry", + "solana-hash", "solana-ledger", "solana-measure", "solana-metrics", From b98bdce6903c6f9ba378aca99f2fd44f15d20c18 Mon Sep 17 00:00:00 2001 From: FT <140458077+zeevick10@users.noreply.github.com> Date: Mon, 16 Jun 2025 04:54:16 +0200 Subject: [PATCH 002/124] chore: typo fixes (#6594) --- local-cluster/tests/local_cluster.rs | 2 +- storage-bigtable/proto/google.api.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/local-cluster/tests/local_cluster.rs b/local-cluster/tests/local_cluster.rs index 9314b662b5fcff..5c050914aa1217 100644 --- a/local-cluster/tests/local_cluster.rs +++ b/local-cluster/tests/local_cluster.rs @@ -3388,7 +3388,7 @@ fn do_test_lockout_violation_with_or_without_tower(with_tower: bool) { let a_blockstore = open_blockstore(&val_a_ledger_path); copy_blocks(next_slot_on_a, &b_blockstore, &a_blockstore, false); - // Purge uneccessary slots + // Purge unnecessary slots purge_slots_with_count(&a_blockstore, next_slot_on_a + 1, truncated_slots); } diff --git a/storage-bigtable/proto/google.api.rs b/storage-bigtable/proto/google.api.rs index b21805351dc52c..fd301b7a124c37 100644 --- a/storage-bigtable/proto/google.api.rs +++ b/storage-bigtable/proto/google.api.rs @@ -1208,7 +1208,7 @@ pub struct ResourceReference { /// The routing header consists of one or multiple key-value pairs. Every key /// and value must be percent-encoded, and joined together in the format of /// `key1=value1&key2=value2`. -/// In the examples below I am skipping the percent-encoding for readablity. +/// In the examples below I am skipping the percent-encoding for readability. /// /// Example 1 /// From 491660b8ff7c13cd90e4ca5d7966f511069c6b7b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Jun 2025 19:49:19 +0800 Subject: [PATCH 003/124] build(deps): bump proptest from 1.6.0 to 1.7.0 (#6604) * build(deps): bump proptest from 1.6.0 to 1.7.0 Bumps [proptest](https://github.com/proptest-rs/proptest) from 1.6.0 to 1.7.0. - [Release notes](https://github.com/proptest-rs/proptest/releases) - [Changelog](https://github.com/proptest-rs/proptest/blob/main/CHANGELOG.md) - [Commits](https://github.com/proptest-rs/proptest/commits) --- updated-dependencies: - dependency-name: proptest dependency-version: 1.7.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 22 +++++++++++----------- Cargo.toml | 2 +- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 16aeb51bf134db..aae31a2b3a1916 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2613,7 +2613,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" dependencies = [ "libc", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -5216,17 +5216,17 @@ dependencies = [ [[package]] name = "proptest" -version = "1.6.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14cae93065090804185d3b75f0bf93b8eeda30c7a9b4a33d3bdb3988d6229e50" +checksum = "6fcdab19deb5195a31cf7726a210015ff1496ba1464fd42cb4f537b8b01b471f" dependencies = [ "bit-set", "bit-vec", "bitflags 2.9.1", "lazy_static", "num-traits", - "rand 0.8.5", - "rand_chacha 0.3.1", + "rand 0.9.0", + "rand_chacha 0.9.0", "rand_xorshift", "regex-syntax", "rusty-fork", @@ -5563,11 +5563,11 @@ dependencies = [ [[package]] name = "rand_xorshift" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" +checksum = "513962919efc330f829edb2535844d1b912b0fbe2ca165d613e4e8788bb05a5a" dependencies = [ - "rand_core 0.6.4", + "rand_core 0.9.3", ] [[package]] @@ -5924,7 +5924,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.9.2", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -6001,7 +6001,7 @@ dependencies = [ "security-framework 3.2.0", "security-framework-sys", "webpki-root-certs", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -12589,7 +12589,7 @@ dependencies = [ "getrandom 0.3.3", "once_cell", "rustix 1.0.2", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 92e074a1c78c94..543edde8a870fe 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -325,7 +325,7 @@ predicates = "2.1" pretty-hex = "0.3.0" prio-graph = "0.3.0" proc-macro2 = "1.0.95" -proptest = "1.6" +proptest = "1.7" prost = "0.11.9" prost-build = "0.11.9" prost-types = "0.11.9" From de6ce29e1a7ecbdc6dc39527fce80beea404d314 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Jun 2025 19:49:31 +0800 Subject: [PATCH 004/124] build(deps): bump serde_with from 3.12.0 to 3.13.0 (#6603) * build(deps): bump serde_with from 3.12.0 to 3.13.0 Bumps [serde_with](https://github.com/jonasbb/serde_with) from 3.12.0 to 3.13.0. - [Release notes](https://github.com/jonasbb/serde_with/releases) - [Commits](https://github.com/jonasbb/serde_with/compare/v3.12.0...v3.13.0) --- updated-dependencies: - dependency-name: serde_with dependency-version: 3.13.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 8 ++++---- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 8 ++++---- svm/examples/Cargo.lock | 8 ++++---- 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index aae31a2b3a1916..f8ed0161bdf7e8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6260,9 +6260,9 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.12.0" +version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6b6f7f2fcb69f747921f79f3926bd1e203fce4fef62c268dd3abfb6d86029aa" +checksum = "bf65a400f8f66fb7b0552869ad70157166676db75ed8181f8104ea91cf9d0b42" dependencies = [ "serde", "serde_derive", @@ -6271,9 +6271,9 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.12.0" +version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d00caa5193a3c8362ac2b73be6b9e768aa5a4b2f721d8f4b339600c3cb51f8e" +checksum = "81679d9ed988d5e9a5e6531dc3f2c28efbd639cbd1dfb628df08edea6004da77" dependencies = [ "darling", "proc-macro2", diff --git a/Cargo.toml b/Cargo.toml index 543edde8a870fe..cfe532be7baf6d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -355,7 +355,7 @@ serde-big-array = "0.5.1" serde_bytes = "0.11.17" serde_derive = "1.0.219" # must match the serde version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 serde_json = "1.0.140" -serde_with = { version = "3.12.0", default-features = false } +serde_with = { version = "3.13.0", default-features = false } serde_yaml = "0.9.34" serial_test = "2.0.0" sha2 = "0.10.9" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 2ece25263e3993..97e0286611a8d8 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -5111,9 +5111,9 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.12.0" +version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6b6f7f2fcb69f747921f79f3926bd1e203fce4fef62c268dd3abfb6d86029aa" +checksum = "bf65a400f8f66fb7b0552869ad70157166676db75ed8181f8104ea91cf9d0b42" dependencies = [ "serde", "serde_derive", @@ -5122,9 +5122,9 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.12.0" +version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d00caa5193a3c8362ac2b73be6b9e768aa5a4b2f721d8f4b339600c3cb51f8e" +checksum = "81679d9ed988d5e9a5e6531dc3f2c28efbd639cbd1dfb628df08edea6004da77" dependencies = [ "darling", "proc-macro2", diff --git a/svm/examples/Cargo.lock b/svm/examples/Cargo.lock index ad7ad920176ecd..ea5cde2fab4b75 100644 --- a/svm/examples/Cargo.lock +++ b/svm/examples/Cargo.lock @@ -4959,9 +4959,9 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.12.0" +version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6b6f7f2fcb69f747921f79f3926bd1e203fce4fef62c268dd3abfb6d86029aa" +checksum = "bf65a400f8f66fb7b0552869ad70157166676db75ed8181f8104ea91cf9d0b42" dependencies = [ "serde", "serde_derive", @@ -4970,9 +4970,9 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.12.0" +version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d00caa5193a3c8362ac2b73be6b9e768aa5a4b2f721d8f4b339600c3cb51f8e" +checksum = "81679d9ed988d5e9a5e6531dc3f2c28efbd639cbd1dfb628df08edea6004da77" dependencies = [ "darling", "proc-macro2", From 8d235e71791081975eac936318c0a2449e249c64 Mon Sep 17 00:00:00 2001 From: steviez Date: Mon, 16 Jun 2025 10:31:16 -0500 Subject: [PATCH 005/124] changelog: Move deprecated snapshot archive note to Breaking section (#6598) --- CHANGELOG.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4947c24cb2046c..66d0accd4fe231 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -28,8 +28,6 @@ Release channels have their own copy of this changelog: * `--replay-slots-concurrently` * `--rpc-pubsub-max-connections`, `--rpc-pubsub-max-fragment-size`, `--rpc-pubsub-max-in-buffer-capacity`, `--rpc-pubsub-max-out-buffer-capacity`, `--enable-cpi-and-log-storage`, `--minimal-rpc-api` * `--skip-poh-verify` - -#### Changes * Deprecated snapshot archive formats have been removed and are no longer loadable. ## 2.3.0 From c22b2b5fe47e587f3b9a4ece0977ad141e8cfd70 Mon Sep 17 00:00:00 2001 From: steviez Date: Mon, 16 Jun 2025 10:31:32 -0500 Subject: [PATCH 006/124] readme: Update agave logo (#6597) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 89d821e088ce30..e1cb6c29c049bb 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@

- Anza + Anza

From bbc6a474878a9e4e92fa60c9b5a7c35afd0d831c Mon Sep 17 00:00:00 2001 From: Alex Pyattaev Date: Mon, 16 Jun 2025 20:26:08 +0300 Subject: [PATCH 007/124] streamer: add docs to recv_mmsg (#6494) add docs to recv_mmsg --- streamer/src/packet.rs | 8 ++++++++ streamer/src/recvmmsg.rs | 12 ++++++++++++ 2 files changed, 20 insertions(+) diff --git a/streamer/src/packet.rs b/streamer/src/packet.rs index b0f43d3df0a01c..aa8212f3d9b776 100644 --- a/streamer/src/packet.rs +++ b/streamer/src/packet.rs @@ -18,6 +18,14 @@ pub use { }, }; +/** Receive multiple messages from `sock` into buffer provided in `batch`. +This is a wrapper around recvmmsg(7) call. + + This function is *supposed to* timeout in 1 second and *may* block forever + due to a bug in the linux kernel. + You may want to call `sock.set_read_timeout(Some(Duration::from_secs(1)));` or similar + prior to calling this function if you require this to actually time out after 1 second. +*/ pub(crate) fn recv_from( batch: &mut PinnedPacketBatch, socket: &UdpSocket, diff --git a/streamer/src/recvmmsg.rs b/streamer/src/recvmmsg.rs index 4f7190b572625d..238e57583eb7a0 100644 --- a/streamer/src/recvmmsg.rs +++ b/streamer/src/recvmmsg.rs @@ -78,6 +78,18 @@ fn cast_socket_addr(addr: &sockaddr_storage, hdr: &mmsghdr) -> Option io::Result { // Should never hit this, but bail if the caller didn't provide any Packets From ec3872b1f1b192bbafc8182b7c939857031a8094 Mon Sep 17 00:00:00 2001 From: Brooks Date: Mon, 16 Jun 2025 13:41:16 -0400 Subject: [PATCH 008/124] Boxes rpc client_error::Error's ErrorKind field (#6293) --- rpc-client-api/src/client_error.rs | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/rpc-client-api/src/client_error.rs b/rpc-client-api/src/client_error.rs index 49f2cad8474d8d..fbe2fcb46b93c0 100644 --- a/rpc-client-api/src/client_error.rs +++ b/rpc-client-api/src/client_error.rs @@ -77,14 +77,14 @@ pub struct Error { pub request: Option, #[source] - pub kind: ErrorKind, + pub kind: Box, } impl Error { pub fn new_with_request(kind: ErrorKind, request: request::RpcRequest) -> Self { Self { request: Some(request), - kind, + kind: Box::new(kind), } } @@ -100,7 +100,7 @@ impl Error { } pub fn kind(&self) -> &ErrorKind { - &self.kind + self.kind.as_ref() } pub fn get_transaction_error(&self) -> Option { @@ -112,7 +112,7 @@ impl From for Error { fn from(kind: ErrorKind) -> Self { Self { request: None, - kind, + kind: Box::new(kind), } } } @@ -121,14 +121,14 @@ impl From for Error { fn from(err: TransportError) -> Self { Self { request: None, - kind: err.into(), + kind: Box::new(err.into()), } } } impl From for TransportError { fn from(client_error: Error) -> Self { - client_error.kind.into() + (*client_error.kind).into() } } @@ -136,7 +136,7 @@ impl From for Error { fn from(err: std::io::Error) -> Self { Self { request: None, - kind: err.into(), + kind: Box::new(err.into()), } } } @@ -145,7 +145,7 @@ impl From for Error { fn from(err: reqwest::Error) -> Self { Self { request: None, - kind: err.into(), + kind: Box::new(err.into()), } } } @@ -158,7 +158,7 @@ impl From for Error { }; Self { request: None, - kind, + kind: Box::new(kind), } } } @@ -167,7 +167,7 @@ impl From for Error { fn from(err: request::RpcError) -> Self { Self { request: None, - kind: err.into(), + kind: Box::new(err.into()), } } } @@ -176,7 +176,7 @@ impl From for Error { fn from(err: serde_json::error::Error) -> Self { Self { request: None, - kind: err.into(), + kind: Box::new(err.into()), } } } @@ -185,7 +185,7 @@ impl From for Error { fn from(err: SignerError) -> Self { Self { request: None, - kind: err.into(), + kind: Box::new(err.into()), } } } @@ -194,7 +194,7 @@ impl From for Error { fn from(err: TransactionError) -> Self { Self { request: None, - kind: err.into(), + kind: Box::new(err.into()), } } } From 5d393904246819bac30f942aee15eb93e4179948 Mon Sep 17 00:00:00 2001 From: HaoranYi <219428+HaoranYi@users.noreply.github.com> Date: Mon, 16 Jun 2025 13:38:07 -0500 Subject: [PATCH 009/124] Add a TODO for "solBnkNewFlds" threadpool (#6584) add todo comments --- runtime/src/bank.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index b1b6cffee88417..0beabad6127ba7 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -1886,6 +1886,13 @@ impl Bank { bank.transaction_processor = TransactionBatchProcessor::new_uninitialized(bank.slot, bank.epoch); + // TODO: Only create the thread pool if we need to recalculate rewards, + // i.e. epoch_reward_status is active. Currently, this thread pool is + // always created and used for recalculate_partitioned_rewards and + // lt_hash calculation. Once lt_hash feature is active, lt_hash won't + // need the thread pool. Thereby, after lt_hash feature activation, we + // can change to create the thread pool only when we need to recalculate + // rewards. let thread_pool = ThreadPoolBuilder::new() .thread_name(|i| format!("solBnkNewFlds{i:02}")) .build() From 520f85a56cbe1288c69016cd9ed55e77a042f979 Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Mon, 16 Jun 2025 13:42:21 -0500 Subject: [PATCH 010/124] send-transaction-service: use message hash and blockhash to look up committed tx status (#6585) * sts: Use message hash to look up tx commitment status * feedback --- banks-server/src/banks_server.rs | 6 + rpc/src/rpc.rs | 15 ++- runtime/src/bank.rs | 11 ++ .../src/send_transaction_service.rs | 127 ++++++++++++++---- 4 files changed, 134 insertions(+), 25 deletions(-) diff --git a/banks-server/src/banks_server.rs b/banks-server/src/banks_server.rs index d5a1e0dfa938d6..c273e77f381e81 100644 --- a/banks-server/src/banks_server.rs +++ b/banks-server/src/banks_server.rs @@ -209,6 +209,7 @@ fn simulate_transaction( #[tarpc::server] impl Banks for BanksServer { async fn send_transaction_with_context(self, _: Context, transaction: VersionedTransaction) { + let message_hash = transaction.message.hash(); let blockhash = transaction.message.recent_blockhash(); let last_valid_block_height = self .bank_forks @@ -219,7 +220,9 @@ impl Banks for BanksServer { .unwrap(); let signature = transaction.signatures.first().cloned().unwrap_or_default(); let info = TransactionInfo::new( + message_hash, signature, + *blockhash, serialize(&transaction).unwrap(), last_valid_block_height, None, @@ -322,6 +325,7 @@ impl Banks for BanksServer { return Some(Err(err)); } + let message_hash = sanitized_transaction.message_hash(); let blockhash = transaction.message.recent_blockhash(); let last_valid_block_height = self .bank(commitment) @@ -329,7 +333,9 @@ impl Banks for BanksServer { .unwrap(); let signature = sanitized_transaction.signature(); let info = TransactionInfo::new( + *message_hash, *signature, + *blockhash, serialize(&transaction).unwrap(), last_valid_block_height, None, diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index 1464a290e6c943..b1193802da6b60 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -2683,14 +2683,18 @@ fn get_token_program_id_and_mint( fn _send_transaction( meta: JsonRpcRequestProcessor, + message_hash: Hash, signature: Signature, + blockhash: Hash, wire_transaction: Vec, last_valid_block_height: u64, durable_nonce_info: Option<(Pubkey, Hash)>, max_retries: Option, ) -> Result { let transaction_info = TransactionInfo::new( + message_hash, signature, + blockhash, wire_transaction, last_valid_block_height, durable_nonce_info, @@ -3800,6 +3804,7 @@ pub mod rpc_full { Error::internal_error() })?; + let message_hash = transaction.message().hash(); let signature = if !transaction.signatures.is_empty() { transaction.signatures[0] } else { @@ -3808,7 +3813,9 @@ pub mod rpc_full { _send_transaction( meta, + message_hash, signature, + blockhash, wire_transaction, last_valid_block_height, None, @@ -3854,15 +3861,17 @@ pub mod rpc_full { preflight_bank, preflight_bank.get_reserved_account_keys(), )?; + let blockhash = *transaction.message().recent_blockhash(); + let message_hash = *transaction.message_hash(); let signature = *transaction.signature(); let mut last_valid_block_height = preflight_bank - .get_blockhash_last_valid_block_height(transaction.message().recent_blockhash()) + .get_blockhash_last_valid_block_height(&blockhash) .unwrap_or(0); let durable_nonce_info = transaction .get_durable_nonce() - .map(|&pubkey| (pubkey, *transaction.message().recent_blockhash())); + .map(|&pubkey| (pubkey, blockhash)); if durable_nonce_info.is_some() || (skip_preflight && last_valid_block_height == 0) { // While it uses a defined constant, this last_valid_block_height value is chosen arbitrarily. // It provides a fallback timeout for durable-nonce transaction retries in case of @@ -3931,7 +3940,9 @@ pub mod rpc_full { _send_transaction( meta, + message_hash, signature, + blockhash, wire_transaction, last_valid_block_height, durable_nonce_info, diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 0beabad6127ba7..5733634fa3de9f 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -5273,6 +5273,17 @@ impl Bank { .map(|v| v.1) } + pub fn get_committed_transaction_status_and_slot( + &self, + message_hash: &Hash, + transaction_blockhash: &Hash, + ) -> Option<(Slot, bool)> { + let rcache = self.status_cache.read().unwrap(); + rcache + .get_status(message_hash, transaction_blockhash, &self.ancestors) + .map(|(slot, status)| (slot, status.is_ok())) + } + pub fn get_signature_status_slot(&self, signature: &Signature) -> Option<(Slot, Result<()>)> { let rcache = self.status_cache.read().unwrap(); rcache.get_status_any_blockhash(signature, &self.ancestors) diff --git a/send-transaction-service/src/send_transaction_service.rs b/send-transaction-service/src/send_transaction_service.rs index 460610890edc17..f4954e5e0cceb8 100644 --- a/send-transaction-service/src/send_transaction_service.rs +++ b/send-transaction-service/src/send_transaction_service.rs @@ -69,7 +69,9 @@ pub struct SendTransactionService { } pub struct TransactionInfo { + pub message_hash: Hash, pub signature: Signature, + pub blockhash: Hash, pub wire_transaction: Vec, pub last_valid_block_height: u64, pub durable_nonce_info: Option<(Pubkey, Hash)>, @@ -81,7 +83,9 @@ pub struct TransactionInfo { impl TransactionInfo { pub fn new( + message_hash: Hash, signature: Signature, + blockhash: Hash, wire_transaction: Vec, last_valid_block_height: u64, durable_nonce_info: Option<(Pubkey, Hash)>, @@ -89,7 +93,9 @@ impl TransactionInfo { last_sent_time: Option, ) -> Self { Self { + message_hash, signature, + blockhash, wire_transaction, last_valid_block_height, durable_nonce_info, @@ -434,13 +440,22 @@ impl SendTransactionService { if transaction_info.durable_nonce_info.is_some() { stats.nonced_transactions.fetch_add(1, Ordering::Relaxed); } - if root_bank.has_signature(signature) { + if root_bank + .get_committed_transaction_status_and_slot( + &transaction_info.message_hash, + &transaction_info.blockhash, + ) + .is_some() + { info!("Transaction is rooted: {}", signature); result.rooted += 1; stats.rooted_transactions.fetch_add(1, Ordering::Relaxed); return false; } - let signature_status = working_bank.get_signature_status_slot(signature); + let signature_status = working_bank.get_committed_transaction_status_and_slot( + &transaction_info.message_hash, + &transaction_info.blockhash, + ); if let Some((nonce_pubkey, durable_nonce)) = transaction_info.durable_nonce_info { let nonce_account = working_bank.get_account(&nonce_pubkey).unwrap_or_default(); let now = Instant::now(); @@ -518,7 +533,7 @@ impl SendTransactionService { true } Some((_slot, status)) => { - if status.is_err() { + if !status { info!("Dropping failed transaction: {}", signature); result.failed += 1; stats.failed_transactions.fetch_add(1, Ordering::Relaxed); @@ -624,7 +639,9 @@ mod test { let (sender, receiver) = bounded(0); let dummy_tx_info = || TransactionInfo { + message_hash: Hash::default(), signature: Signature::default(), + blockhash: Hash::default(), wire_transaction: vec![0; 128], last_valid_block_height: 0, durable_nonce_info: None, @@ -690,9 +707,17 @@ mod test { .insert(root_bank) .clone_without_scheduler(); - let rooted_signature = root_bank - .transfer(1, &mint_keypair, &mint_keypair.pubkey()) - .unwrap(); + let (rooted_transaction, rooted_signature) = { + let transaction = system_transaction::transfer( + &mint_keypair, + &mint_keypair.pubkey(), + 1, + root_bank.last_blockhash(), + ); + root_bank.process_transaction(&transaction).unwrap(); + let signature = transaction.signatures[0]; + (transaction, signature) + }; let working_bank = bank_forks .write() @@ -704,17 +729,28 @@ mod test { )) .clone_without_scheduler(); - let non_rooted_signature = working_bank - .transfer(2, &mint_keypair, &mint_keypair.pubkey()) - .unwrap(); - - let failed_signature = { - let blockhash = working_bank.last_blockhash(); - let transaction = - system_transaction::transfer(&mint_keypair, &Pubkey::default(), 1, blockhash); + let (non_rooted_transaction, non_rooted_signature) = { + let transaction = system_transaction::transfer( + &mint_keypair, + &mint_keypair.pubkey(), + 2, + working_bank.last_blockhash(), + ); + working_bank.process_transaction(&transaction).unwrap(); let signature = transaction.signatures[0]; + (transaction, signature) + }; + + let (failed_transaction, failed_signature) = { + let transaction = system_transaction::transfer( + &mint_keypair, + &Pubkey::default(), + 1, + working_bank.last_blockhash(), + ); working_bank.process_transaction(&transaction).unwrap_err(); - signature + let signature = transaction.signatures[0]; + (transaction, signature) }; let mut transactions = HashMap::new(); @@ -724,7 +760,9 @@ mod test { transactions.insert( Signature::default(), TransactionInfo::new( + Hash::default(), Signature::default(), + Hash::default(), vec![], root_bank.block_height() - 1, None, @@ -760,7 +798,9 @@ mod test { transactions.insert( rooted_signature, TransactionInfo::new( + rooted_transaction.message.hash(), rooted_signature, + rooted_transaction.message.recent_blockhash, vec![], working_bank.block_height(), None, @@ -789,7 +829,9 @@ mod test { transactions.insert( failed_signature, TransactionInfo::new( + failed_transaction.message.hash(), failed_signature, + failed_transaction.message.recent_blockhash, vec![], working_bank.block_height(), None, @@ -818,7 +860,9 @@ mod test { transactions.insert( non_rooted_signature, TransactionInfo::new( + non_rooted_transaction.message.hash(), non_rooted_signature, + non_rooted_transaction.message.recent_blockhash, vec![], working_bank.block_height(), None, @@ -848,7 +892,9 @@ mod test { transactions.insert( Signature::default(), TransactionInfo::new( + Hash::default(), Signature::default(), + Hash::default(), vec![], working_bank.block_height(), None, @@ -879,7 +925,9 @@ mod test { transactions.insert( Signature::from([1; 64]), TransactionInfo::new( + Hash::default(), Signature::default(), + Hash::default(), vec![], working_bank.block_height(), None, @@ -890,7 +938,9 @@ mod test { transactions.insert( Signature::from([2; 64]), TransactionInfo::new( + Hash::default(), Signature::default(), + Hash::default(), vec![], working_bank.block_height(), None, @@ -948,9 +998,17 @@ mod test { .insert(root_bank) .clone_without_scheduler(); - let rooted_signature = root_bank - .transfer(1, &mint_keypair, &mint_keypair.pubkey()) - .unwrap(); + let (rooted_transaction, rooted_signature) = { + let transaction = system_transaction::transfer( + &mint_keypair, + &mint_keypair.pubkey(), + 1, + root_bank.last_blockhash(), + ); + root_bank.process_transaction(&transaction).unwrap(); + let signature = transaction.signatures[0]; + (transaction, signature) + }; let nonce_address = Pubkey::new_unique(); let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); @@ -970,19 +1028,28 @@ mod test { 2, )) .clone_without_scheduler(); - let non_rooted_signature = working_bank - .transfer(2, &mint_keypair, &mint_keypair.pubkey()) - .unwrap(); + + let (non_rooted_transaction, non_rooted_signature) = { + let transaction = system_transaction::transfer( + &mint_keypair, + &mint_keypair.pubkey(), + 2, + working_bank.last_blockhash(), + ); + let signature = transaction.signatures[0]; + working_bank.process_transaction(&transaction).unwrap(); + (transaction, signature) + }; let last_valid_block_height = working_bank.block_height() + 300; - let failed_signature = { + let (failed_transaction, failed_signature) = { let blockhash = working_bank.last_blockhash(); let transaction = system_transaction::transfer(&mint_keypair, &Pubkey::default(), 1, blockhash); let signature = transaction.signatures[0]; working_bank.process_transaction(&transaction).unwrap_err(); - signature + (transaction, signature) }; let mut transactions = HashMap::new(); @@ -991,7 +1058,9 @@ mod test { transactions.insert( rooted_signature, TransactionInfo::new( + rooted_transaction.message.hash(), rooted_signature, + rooted_transaction.message.recent_blockhash, vec![], last_valid_block_height, Some((nonce_address, *durable_nonce.as_hash())), @@ -1026,7 +1095,9 @@ mod test { transactions.insert( rooted_signature, TransactionInfo::new( + rooted_transaction.message.hash(), rooted_signature, + rooted_transaction.message.recent_blockhash, vec![], last_valid_block_height, Some((nonce_address, Hash::new_unique())), @@ -1056,7 +1127,9 @@ mod test { transactions.insert( Signature::default(), TransactionInfo::new( + Hash::default(), Signature::default(), + Hash::default(), vec![], last_valid_block_height, Some((nonce_address, Hash::new_unique())), @@ -1084,7 +1157,9 @@ mod test { transactions.insert( Signature::default(), TransactionInfo::new( + Hash::default(), Signature::default(), + Hash::default(), vec![], root_bank.block_height() - 1, Some((nonce_address, *durable_nonce.as_hash())), @@ -1113,7 +1188,9 @@ mod test { transactions.insert( failed_signature, TransactionInfo::new( + failed_transaction.message.hash(), failed_signature, + failed_transaction.message.recent_blockhash, vec![], last_valid_block_height, Some((nonce_address, Hash::new_unique())), // runtime should advance nonce on failed transactions @@ -1142,7 +1219,9 @@ mod test { transactions.insert( non_rooted_signature, TransactionInfo::new( + non_rooted_transaction.message.hash(), non_rooted_signature, + non_rooted_transaction.message.recent_blockhash, vec![], last_valid_block_height, Some((nonce_address, Hash::new_unique())), // runtime advances nonce when transaction lands @@ -1173,7 +1252,9 @@ mod test { transactions.insert( Signature::default(), TransactionInfo::new( + Hash::default(), Signature::default(), + Hash::default(), vec![], last_valid_block_height, Some((nonce_address, *durable_nonce.as_hash())), From 51c33c2f385552e6e710f54cfd66fc23a2a6f7f3 Mon Sep 17 00:00:00 2001 From: Mircea Colonescu Date: Mon, 16 Jun 2025 14:55:14 -0400 Subject: [PATCH 011/124] Remove conditional feature as it is not supported (#6577) * remove condition as it is not supported * remove useless whitespace to fix sort --- svm/Cargo.toml | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/svm/Cargo.toml b/svm/Cargo.toml index a3888b5f9bb80f..1980c3be9dc541 100644 --- a/svm/Cargo.toml +++ b/svm/Cargo.toml @@ -75,15 +75,12 @@ solana-svm-transaction = { workspace = true } solana-system-interface = { workspace = true } solana-sysvar-id = { workspace = true } solana-timings = { workspace = true } -solana-transaction-context = { workspace = true } +solana-transaction-context = { workspace = true, features = ["debug-signature"] } solana-transaction-error = { workspace = true } solana-type-overrides = { workspace = true } spl-generic-token = { workspace = true } thiserror = { workspace = true } -[target.'cfg(debug_assertions)'.dependencies] -solana-transaction-context = { workspace = true, features = ["debug-signature"] } - [dev-dependencies] agave-feature-set = { workspace = true } agave-reserved-account-keys = { workspace = true } From d849be8cb2b509eae5d703dd9fb9e5e4e78f237c Mon Sep 17 00:00:00 2001 From: Steven Luscher Date: Mon, 16 Jun 2025 13:11:45 -0700 Subject: [PATCH 012/124] Airgap`TransactionError` type from blockstore (#6434) * Add `StoredTransaction` * Nits from review --- Cargo.lock | 1 + storage-proto/Cargo.toml | 1 + storage-proto/src/convert.rs | 16 ++++- storage-proto/src/lib.rs | 119 ++++++++++++++++++++++++++++++++++- 4 files changed, 135 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f8ed0161bdf7e8..2b5c70dc67403d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10670,6 +10670,7 @@ dependencies = [ "solana-transaction-context", "solana-transaction-error", "solana-transaction-status", + "test-case", "tonic-build", ] diff --git a/storage-proto/Cargo.toml b/storage-proto/Cargo.toml index e617cddcb82c96..23f166efaa444e 100644 --- a/storage-proto/Cargo.toml +++ b/storage-proto/Cargo.toml @@ -43,3 +43,4 @@ protobuf-src = { workspace = true } [dev-dependencies] enum-iterator = { workspace = true } +test-case = { workspace = true } diff --git a/storage-proto/src/convert.rs b/storage-proto/src/convert.rs index cd9372c8dc58ca..7d5fd96d589ffc 100644 --- a/storage-proto/src/convert.rs +++ b/storage-proto/src/convert.rs @@ -1,5 +1,5 @@ use { - crate::{StoredExtendedRewards, StoredTransactionStatusMeta}, + crate::{StoredExtendedRewards, StoredTransactionError, StoredTransactionStatusMeta}, solana_account_decoder::parse_token::{real_number_string_trimmed, UiTokenAmount}, solana_hash::{Hash, HASH_BYTES}, solana_instruction::error::InstructionError, @@ -286,6 +286,20 @@ impl From for VersionedTransaction { } } +impl From for generated::TransactionError { + fn from(value: TransactionError) -> Self { + let stored_error = StoredTransactionError::from(value).0; + Self { err: stored_error } + } +} + +impl From for TransactionError { + fn from(value: generated::TransactionError) -> Self { + let stored_error = StoredTransactionError(value.err); + stored_error.into() + } +} + impl From for generated::Message { fn from(message: LegacyMessage) -> Self { Self { diff --git a/storage-proto/src/lib.rs b/storage-proto/src/lib.rs index b2f22a25b2ff72..c76b46e1bc4b51 100644 --- a/storage-proto/src/lib.rs +++ b/storage-proto/src/lib.rs @@ -7,7 +7,7 @@ use { solana_message::v0::LoadedAddresses, solana_serde::default_on_eof, solana_transaction_context::TransactionReturnData, - solana_transaction_error::TransactionResult as Result, + solana_transaction_error::{TransactionError, TransactionResult as Result}, solana_transaction_status::{ InnerInstructions, Reward, RewardType, TransactionStatusMeta, TransactionTokenBalance, }, @@ -109,6 +109,22 @@ impl From for StoredTokenAmount { } } +struct StoredTransactionError(Vec); + +impl From for TransactionError { + fn from(value: StoredTransactionError) -> Self { + let bytes = value.0; + bincode::deserialize(&bytes).expect("transaction error to deserialize from bytes") + } +} + +impl From for StoredTransactionError { + fn from(value: TransactionError) -> Self { + let bytes = bincode::serialize(&value).expect("transaction error to serialize to bytes"); + StoredTransactionError(bytes) + } +} + #[derive(Serialize, Deserialize)] pub struct StoredTransactionTokenBalance { pub account_index: u8, @@ -265,3 +281,104 @@ impl TryFrom for StoredTransactionStatusMeta { }) } } + +#[cfg(test)] +mod tests { + use { + crate::StoredTransactionError, solana_instruction::error::InstructionError, + solana_transaction_error::TransactionError, test_case::test_case, + }; + + #[test_case(TransactionError::InsufficientFundsForFee; "Named variant error")] + #[test_case(TransactionError::InsufficientFundsForRent { account_index: 42 }; "Struct variant error")] + #[test_case(TransactionError::DuplicateInstruction(42); "Single-value tuple variant error")] + #[test_case(TransactionError::InstructionError(42, InstructionError::Custom(0xdeadbeef)); "`InstructionError`")] + fn test_serialize_transaction_error_to_stored_transaction_error_round_trip( + err: TransactionError, + ) { + let serialized: StoredTransactionError = err.clone().into(); + let deserialized: TransactionError = serialized.into(); + assert_eq!(deserialized, err); + } + + #[test_case( + vec![4, 0, 0, 0, /* Fourth enum variant - `InsufficientFundsForFee` */], + TransactionError::InsufficientFundsForFee; + "Named variant error" + )] + #[test_case( + vec![ + 31, 0, 0, 0, /* Thirty-first enum variant - `InsufficientFundsForRent` */ + 42, /* Account index */ + ], + TransactionError::InsufficientFundsForRent { account_index: 42 }; + "Struct variant error" + )] + #[test_case( + vec![ + 30, 0, 0, 0, /* Thirtieth enum variant - `DuplicateInstruction` */ + 42, /* Instruction index */ + ], + TransactionError::DuplicateInstruction(42); + "Single-value tuple variant error" + )] + #[test_case( + vec![ + 8, 0, 0, 0, /* Eighth enum variant - `InstructionError` */ + 42, /* Outer instruction index */ + 25, 0, 0, 0, /* InstructionError::Custom */ + /* 0xdeadbeef */ + 239, 190, 173, 222, + ], + TransactionError::InstructionError(42, InstructionError::Custom(0xdeadbeef)); + "`InstructionError`" + )] + fn test_deserialize_stored_transaction_error( + stored_bytes: Vec, + expected_transaction_error: TransactionError, + ) { + let stored_transaction = StoredTransactionError(stored_bytes); + let deserialized: TransactionError = stored_transaction.into(); + assert_eq!(deserialized, expected_transaction_error); + } + + #[test_case( + vec![4, 0, 0, 0, /* Fourth enum variant - `InsufficientFundsForFee` */], + TransactionError::InsufficientFundsForFee; + "Named variant error" + )] + #[test_case( + vec![ + 31, 0, 0, 0, /* Thirty-first enum variant - `InsufficientFundsForRent` */ + 42, /* Account index */ + ], + TransactionError::InsufficientFundsForRent { account_index: 42 }; + "Struct variant error" + )] + #[test_case( + vec![ + 30, 0, 0, 0, /* Thirtieth enum variant - `DuplicateInstruction` */ + 42, /* Instruction index */ + ], + TransactionError::DuplicateInstruction(42); + "Single-value tuple variant error" + )] + #[test_case( + vec![ + 8, 0, 0, 0, /* Eighth enum variant - `InstructionError` */ + 42, /* Outer instruction index */ + 25, 0, 0, 0, /* InstructionError::Custom */ + /* 0xdeadbeef */ + 239, 190, 173, 222, + ], + TransactionError::InstructionError(42, InstructionError::Custom(0xdeadbeef)); + "`InstructionError`" + )] + fn test_seserialize_stored_transaction_error( + expected_serialized_bytes: Vec, + transaction_error: TransactionError, + ) { + let StoredTransactionError(serialized_bytes) = transaction_error.into(); + assert_eq!(serialized_bytes, expected_serialized_bytes); + } +} From 40c7522a754eb44fb0b43450e4a249086edab1e2 Mon Sep 17 00:00:00 2001 From: Brooks Date: Mon, 16 Jun 2025 17:55:05 -0400 Subject: [PATCH 013/124] cli: Full snapshot interval doesn't need to be a multiple of the incremental (#6595) --- validator/src/commands/run/args.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/validator/src/commands/run/args.rs b/validator/src/commands/run/args.rs index 8494dc6553efe9..e8120875f9a86b 100644 --- a/validator/src/commands/run/args.rs +++ b/validator/src/commands/run/args.rs @@ -444,8 +444,9 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .default_value(&default_args.full_snapshot_archive_interval_slots) .help("Number of slots between generating full snapshots") .long_help( - "Number of slots between generating full snapshots. Must be a multiple of the \ - incremental snapshot interval. Only used when incremental snapshots are enabled.", + "Number of slots between generating full snapshots. \ + Only used when incremental snapshots are enabled. \ + Must be greater than the incremental snapshot interval.", ), ) .arg( From ffc8065f259acb108e13633752d0f011e04ddede Mon Sep 17 00:00:00 2001 From: Brooks Date: Mon, 16 Jun 2025 18:54:51 -0400 Subject: [PATCH 014/124] cli: Snapshot interval must be greater than zero (#6596) --- CHANGELOG.md | 1 + clap-utils/src/input_validators.rs | 9 +++++++++ validator/src/commands/run/args.rs | 11 +++++++---- 3 files changed, 17 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 66d0accd4fe231..6f14b18312c58e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -29,6 +29,7 @@ Release channels have their own copy of this changelog: * `--rpc-pubsub-max-connections`, `--rpc-pubsub-max-fragment-size`, `--rpc-pubsub-max-in-buffer-capacity`, `--rpc-pubsub-max-out-buffer-capacity`, `--enable-cpi-and-log-storage`, `--minimal-rpc-api` * `--skip-poh-verify` * Deprecated snapshot archive formats have been removed and are no longer loadable. +* Using `--snapshot-interval-slots 0` to disable generating snapshots has been removed. Use `--no-snapshots` instead. ## 2.3.0 diff --git a/clap-utils/src/input_validators.rs b/clap-utils/src/input_validators.rs index 19b1be8b9f7aed..94c5644c66dfa3 100644 --- a/clap-utils/src/input_validators.rs +++ b/clap-utils/src/input_validators.rs @@ -449,6 +449,15 @@ where .map_err(|e| format!("{err_prefix} {e}")) } +pub fn is_non_zero(value: impl AsRef) -> Result<(), String> { + let value = value.as_ref(); + if value.eq("0") { + Err(String::from("cannot be zero")) + } else { + Ok(()) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/validator/src/commands/run/args.rs b/validator/src/commands/run/args.rs index e8120875f9a86b..da86b24d0ddd4d 100644 --- a/validator/src/commands/run/args.rs +++ b/validator/src/commands/run/args.rs @@ -4,8 +4,8 @@ use { solana_clap_utils::{ hidden_unless_forced, input_validators::{ - is_keypair_or_ask_keyword, is_parsable, is_pow2, is_pubkey, is_pubkey_or_keypair, - is_slot, is_within_range, validate_cpu_ranges, + is_keypair_or_ask_keyword, is_non_zero, is_parsable, is_pow2, is_pubkey, + is_pubkey_or_keypair, is_slot, is_within_range, validate_cpu_ranges, validate_maximum_full_snapshot_archives_to_retain, validate_maximum_incremental_snapshot_archives_to_retain, }, @@ -428,12 +428,13 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .value_name("NUMBER") .takes_value(true) .default_value(&default_args.incremental_snapshot_archive_interval_slots) + .validator(is_non_zero) .help("Number of slots between generating snapshots") .long_help( "Number of slots between generating snapshots. \ If incremental snapshots are enabled, this sets the incremental snapshot interval. \ If incremental snapshots are disabled, this sets the full snapshot interval. \ - To disable all snapshot generation, see --no-snapshots.", + Must be greater than zero.", ), ) .arg( @@ -442,11 +443,13 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .value_name("NUMBER") .takes_value(true) .default_value(&default_args.full_snapshot_archive_interval_slots) + .validator(is_non_zero) .help("Number of slots between generating full snapshots") .long_help( "Number of slots between generating full snapshots. \ Only used when incremental snapshots are enabled. \ - Must be greater than the incremental snapshot interval.", + Must be greater than the incremental snapshot interval. \ + Must be greater than zero.", ), ) .arg( From 64a3969a993908772278ecd2ba6356f683ab4c98 Mon Sep 17 00:00:00 2001 From: Maxim Evtush <154841002+maximevtush@users.noreply.github.com> Date: Tue, 17 Jun 2025 08:40:55 +0300 Subject: [PATCH 015/124] chore: Typo fixes (#6599) --- install/src/config.rs | 2 +- zk-token-sdk/src/instruction/transfer/mod.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/install/src/config.rs b/install/src/config.rs index bfc5e5665ed858..789ed9a12d17a0 100644 --- a/install/src/config.rs +++ b/install/src/config.rs @@ -77,7 +77,7 @@ impl Config { }) }); if result.is_err() { - eprintln!("config upgrade failed! restoring orignal"); + eprintln!("config upgrade failed! restoring original"); let restored = std::fs::copy(&bak_filename, config_file) .and_then(|_| std::fs::remove_file(&bak_filename)); if restored.is_err() { diff --git a/zk-token-sdk/src/instruction/transfer/mod.rs b/zk-token-sdk/src/instruction/transfer/mod.rs index 80d4b1861d173e..8efb9c06a10464 100644 --- a/zk-token-sdk/src/instruction/transfer/mod.rs +++ b/zk-token-sdk/src/instruction/transfer/mod.rs @@ -49,7 +49,7 @@ pub fn split_u64(amount: u64, bit_length: usize) -> (u64, u64) { } /// Takes in a 64-bit number `amount` and a bit length `bit_length`. It returns: -/// - the `bit_length` low bits of `amount` interpretted as u64 +/// - the `bit_length` low bits of `amount` interpreted as u64 /// - the `(64 - bit_length)` high bits of `amount` interpretted as u64 #[cfg(not(target_os = "solana"))] pub fn try_split_u64(amount: u64, bit_length: usize) -> Result<(u64, u64), InstructionError> { From 234afe489aa20a04a51b810213b945e297ef38c7 Mon Sep 17 00:00:00 2001 From: Bhaumik Maan <82998871+bhaumikmaan@users.noreply.github.com> Date: Tue, 17 Jun 2025 11:14:54 +0530 Subject: [PATCH 016/124] chore: Fix several crate documentation links (#6601) --- accounts-db/Cargo.toml | 2 +- rpc-client-nonce-utils/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/accounts-db/Cargo.toml b/accounts-db/Cargo.toml index c0fb8e18e061db..a202c0233594de 100644 --- a/accounts-db/Cargo.toml +++ b/accounts-db/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "solana-accounts-db" description = "Solana accounts db" -documentation = "https://docs.rs/solana-acounts-db" +documentation = "https://docs.rs/solana-accounts-db" version = { workspace = true } authors = { workspace = true } repository = { workspace = true } diff --git a/rpc-client-nonce-utils/Cargo.toml b/rpc-client-nonce-utils/Cargo.toml index 955a14300777da..85c732716af6a4 100644 --- a/rpc-client-nonce-utils/Cargo.toml +++ b/rpc-client-nonce-utils/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "solana-rpc-client-nonce-utils" description = "Solana RPC Client Nonce Utilities" -documentation = "https://docs.rs/solana-nonce-client" +documentation = "https://docs.rs/solana-rpc-client-nonce-utils" version = { workspace = true } authors = { workspace = true } repository = { workspace = true } From af69341ee2eaf4ed1d20ed9343f9b2f56ac7254a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 Jun 2025 19:34:50 +0800 Subject: [PATCH 017/124] build(deps): bump libc from 0.2.172 to 0.2.173 (#6614) * build(deps): bump libc from 0.2.172 to 0.2.173 Bumps [libc](https://github.com/rust-lang/libc) from 0.2.172 to 0.2.173. - [Release notes](https://github.com/rust-lang/libc/releases) - [Changelog](https://github.com/rust-lang/libc/blob/0.2.173/CHANGELOG.md) - [Commits](https://github.com/rust-lang/libc/compare/0.2.172...0.2.173) --- updated-dependencies: - dependency-name: libc dependency-version: 0.2.173 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 12 ++++++------ Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- svm/examples/Cargo.lock | 4 ++-- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2b5c70dc67403d..d02de913d82c4d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2613,7 +2613,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -4099,9 +4099,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.172" +version = "0.2.173" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa" +checksum = "d8cfeafaffdbc32176b64fb251369d52ea9f0a8fbc6f8759edffef7b525d64bb" [[package]] name = "libloading" @@ -5924,7 +5924,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.9.2", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -6001,7 +6001,7 @@ dependencies = [ "security-framework 3.2.0", "security-framework-sys", "webpki-root-certs", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -12590,7 +12590,7 @@ dependencies = [ "getrandom 0.3.3", "once_cell", "rustix 1.0.2", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index cfe532be7baf6d..59aca26a231a99 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -293,7 +293,7 @@ jsonrpc-http-server = "18.0.0" jsonrpc-ipc-server = "18.0.0" jsonrpc-pubsub = "18.0.0" lazy-lru = "0.1.3" -libc = "0.2.172" +libc = "0.2.173" libloading = "0.7.4" libsecp256k1 = { version = "0.6.0", default-features = false, features = [ "std", diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 97e0286611a8d8..507191ca14df0c 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -3178,9 +3178,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.172" +version = "0.2.173" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa" +checksum = "d8cfeafaffdbc32176b64fb251369d52ea9f0a8fbc6f8759edffef7b525d64bb" [[package]] name = "libloading" diff --git a/svm/examples/Cargo.lock b/svm/examples/Cargo.lock index ea5cde2fab4b75..ff1d790aa11c17 100644 --- a/svm/examples/Cargo.lock +++ b/svm/examples/Cargo.lock @@ -3105,9 +3105,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.172" +version = "0.2.173" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa" +checksum = "d8cfeafaffdbc32176b64fb251369d52ea9f0a8fbc6f8759edffef7b525d64bb" [[package]] name = "libloading" From 64630de411e5a61e1f5d97ff66eafa5846575edd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 Jun 2025 19:35:01 +0800 Subject: [PATCH 018/124] build(deps): bump derive-where from 1.4.0 to 1.5.0 (#6615) * build(deps): bump derive-where from 1.4.0 to 1.5.0 Bumps [derive-where](https://github.com/ModProg/derive-where) from 1.4.0 to 1.5.0. - [Release notes](https://github.com/ModProg/derive-where/releases) - [Changelog](https://github.com/ModProg/derive-where/blob/main/CHANGELOG.md) - [Commits](https://github.com/ModProg/derive-where/compare/v1.4.0...v1.5.0) --- updated-dependencies: - dependency-name: derive-where dependency-version: 1.5.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- svm/examples/Cargo.lock | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d02de913d82c4d..311396532395c9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2296,9 +2296,9 @@ dependencies = [ [[package]] name = "derive-where" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e73f2692d4bd3cac41dca28934a39894200c9fabf49586d77d0e5954af1d7902" +checksum = "510c292c8cf384b1a340b816a9a6cf2599eb8f566a44949024af88418000c50b" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index 59aca26a231a99..8e2d7d4b920f52 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -241,7 +241,7 @@ ctrlc = "3.4.7" curve25519-dalek = { version = "4.1.3", features = ["digest", "rand_core"] } dashmap = "5.5.3" derivation-path = { version = "0.2.0", default-features = false } -derive-where = "1.4.0" +derive-where = "1.5.0" derive_more = { version = "1.0.0", features = ["full"] } dialoguer = "0.10.4" digest = "0.10.7" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 507191ca14df0c..b0c980826b9380 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -1533,9 +1533,9 @@ dependencies = [ [[package]] name = "derive-where" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e73f2692d4bd3cac41dca28934a39894200c9fabf49586d77d0e5954af1d7902" +checksum = "510c292c8cf384b1a340b816a9a6cf2599eb8f566a44949024af88418000c50b" dependencies = [ "proc-macro2", "quote", diff --git a/svm/examples/Cargo.lock b/svm/examples/Cargo.lock index ff1d790aa11c17..6c42cc27cfed24 100644 --- a/svm/examples/Cargo.lock +++ b/svm/examples/Cargo.lock @@ -1425,9 +1425,9 @@ dependencies = [ [[package]] name = "derive-where" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e73f2692d4bd3cac41dca28934a39894200c9fabf49586d77d0e5954af1d7902" +checksum = "510c292c8cf384b1a340b816a9a6cf2599eb8f566a44949024af88418000c50b" dependencies = [ "proc-macro2", "quote", From 236cca050c7da4c0127ad88d1badfa71773c18a5 Mon Sep 17 00:00:00 2001 From: Tao Zhu <82401714+tao-stones@users.noreply.github.com> Date: Tue, 17 Jun 2025 09:40:07 -0500 Subject: [PATCH 019/124] TestValidator checks usability of programs added to genesis during startup (#6587) * TestValidator checks usability of programs added to genesis during startup * update lock tree * track programs that have been successfully deployed --- Cargo.lock | 1 + programs/sbf/Cargo.lock | 1 + svm/examples/Cargo.lock | 1 + test-validator/Cargo.toml | 1 + test-validator/src/lib.rs | 133 ++++++++++++++++++++++++++++++++++++++ 5 files changed, 137 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 311396532395c9..9663dbab7b59e2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11014,6 +11014,7 @@ dependencies = [ "solana-signer", "solana-streamer", "solana-tpu-client", + "solana-transaction", "solana-validator-exit", "tokio", ] diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index b0c980826b9380..d7c2330efde54b 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -9306,6 +9306,7 @@ dependencies = [ "solana-signer", "solana-streamer", "solana-tpu-client", + "solana-transaction", "solana-validator-exit", "tokio", ] diff --git a/svm/examples/Cargo.lock b/svm/examples/Cargo.lock index 6c42cc27cfed24..ed799bb2c49ed6 100644 --- a/svm/examples/Cargo.lock +++ b/svm/examples/Cargo.lock @@ -8406,6 +8406,7 @@ dependencies = [ "solana-signer", "solana-streamer", "solana-tpu-client", + "solana-transaction", "solana-validator-exit", "tokio", ] diff --git a/test-validator/Cargo.toml b/test-validator/Cargo.toml index 513445e3a3ef66..f55bae37a7be63 100644 --- a/test-validator/Cargo.toml +++ b/test-validator/Cargo.toml @@ -52,6 +52,7 @@ solana-sdk-ids = { workspace = true } solana-signer = { workspace = true } solana-streamer = { workspace = true } solana-tpu-client = { workspace = true } +solana-transaction = { workspace = true } solana-validator-exit = { workspace = true } tokio = { workspace = true, features = ["full"] } diff --git a/test-validator/src/lib.rs b/test-validator/src/lib.rs index 46ada12caa809f..9cd6fb499eaa58 100644 --- a/test-validator/src/lib.rs +++ b/test-validator/src/lib.rs @@ -54,6 +54,7 @@ use { solana_signer::Signer, solana_streamer::socket::SocketAddrSpace, solana_tpu_client::tpu_client::DEFAULT_TPU_ENABLE_UDP, + solana_transaction::Transaction, solana_validator_exit::Exit, std::{ collections::{HashMap, HashSet}, @@ -707,6 +708,22 @@ impl TestValidatorGenesis { ) -> (TestValidator, Keypair) { let mint_keypair = Keypair::new(); self.start_with_mint_address(mint_keypair.pubkey(), socket_addr_space) + .inspect(|test_validator| { + let runtime = tokio::runtime::Builder::new_current_thread() + .enable_io() + .enable_time() + .build() + .unwrap(); + let upgradeable_program_ids: Vec<&Pubkey> = self + .upgradeable_programs + .iter() + .map(|p| &p.program_id) + .collect(); + runtime.block_on(test_validator.wait_for_upgradeable_programs_deployed( + &upgradeable_program_ids, + &mint_keypair, + )); + }) .map(|test_validator| (test_validator, mint_keypair)) .unwrap_or_else(|err| panic!("Test validator failed to start: {err}")) } @@ -726,6 +743,14 @@ impl TestValidatorGenesis { match TestValidator::start(mint_keypair.pubkey(), self, socket_addr_space, None) { Ok(test_validator) => { test_validator.wait_for_nonzero_fees().await; + let upgradeable_program_ids: Vec<&Pubkey> = self + .upgradeable_programs + .iter() + .map(|p| &p.program_id) + .collect(); + test_validator + .wait_for_upgradeable_programs_deployed(&upgradeable_program_ids, &mint_keypair) + .await; (test_validator, mint_keypair) } Err(err) => panic!("Test validator failed to start: {err}"), @@ -1158,6 +1183,62 @@ impl TestValidator { } } + /// programs added to genesis ain't immediately usable. Actively check "Program + /// is not deployed" error for their availibility. + async fn wait_for_upgradeable_programs_deployed( + &self, + upgradeable_programs: &[&Pubkey], + payer: &Keypair, + ) { + let rpc_client = nonblocking::rpc_client::RpcClient::new_with_commitment( + self.rpc_url.clone(), + CommitmentConfig::processed(), + ); + + let mut deployed = vec![false; upgradeable_programs.len()]; + const MAX_ATTEMPTS: u64 = 10; + + for attempt in 1..=MAX_ATTEMPTS { + let blockhash = rpc_client.get_latest_blockhash().await.unwrap(); + for (program_id, is_deployed) in upgradeable_programs.iter().zip(deployed.iter_mut()) { + if *is_deployed { + continue; + } + + let transaction = Transaction::new_signed_with_payer( + &[Instruction { + program_id: **program_id, + accounts: vec![], + data: vec![], + }], + Some(&payer.pubkey()), + &[&payer], + blockhash, + ); + match rpc_client.send_transaction(&transaction).await { + Ok(_) => *is_deployed = true, + Err(e) => { + if format!("{:?}", e).contains("Program is not deployed") { + debug!("{:?} - not deployed", program_id); + } else { + // Assuming all other other errors could only occur *after* + // program is deployed for usability. + *is_deployed = true; + debug!("{:?} - Unexpected error: {:?}", program_id, e); + } + } + } + } + if deployed.iter().all(|&deployed| deployed) { + return; + } + + println!("Waiting for programs to be fully deployed {} ...", attempt); + sleep(Duration::from_millis(DEFAULT_MS_PER_SLOT)).await; + } + panic!("Timeout waiting for program to become usable"); + } + /// Return the validator's TPU address pub fn tpu(&self) -> &SocketAddr { &self.tpu @@ -1252,6 +1333,58 @@ mod test { rpc_client.get_health().await.expect("health"); } + #[test] + fn test_upgradeable_program_deploayment() { + let program_id = Pubkey::new_unique(); + let (test_validator, payer) = TestValidatorGenesis::default() + .add_program("../programs/bpf-loader-tests/noop", program_id) + .start(); + let rpc_client = test_validator.get_rpc_client(); + + let blockhash = rpc_client.get_latest_blockhash().unwrap(); + let transaction = Transaction::new_signed_with_payer( + &[Instruction { + program_id, + accounts: vec![], + data: vec![], + }], + Some(&payer.pubkey()), + &[&payer], + blockhash, + ); + + assert!(rpc_client + .send_and_confirm_transaction(&transaction) + .is_ok()); + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] + async fn test_nonblocking_upgradeable_program_deploayment() { + let program_id = Pubkey::new_unique(); + let (test_validator, payer) = TestValidatorGenesis::default() + .add_program("../programs/bpf-loader-tests/noop", program_id) + .start_async() + .await; + let rpc_client = test_validator.get_async_rpc_client(); + + let blockhash = rpc_client.get_latest_blockhash().await.unwrap(); + let transaction = Transaction::new_signed_with_payer( + &[Instruction { + program_id, + accounts: vec![], + data: vec![], + }], + Some(&payer.pubkey()), + &[&payer], + blockhash, + ); + + assert!(rpc_client + .send_and_confirm_transaction(&transaction) + .await + .is_ok()); + } + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[should_panic] async fn document_tokio_panic() { From 79f78307ba4cad503e918dbbf59887f26f5ffc91 Mon Sep 17 00:00:00 2001 From: VolodymyrBg Date: Tue, 17 Jun 2025 17:42:58 +0300 Subject: [PATCH 020/124] fix: update SOL/lamports doc link to point directly to lamports section (#6608) * fix: update SOL/lamports doc link to point directly to lamports section * Update validator-start.md --- docs/src/operations/guides/validator-start.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/src/operations/guides/validator-start.md b/docs/src/operations/guides/validator-start.md index 9fb1ea5dcfd0c6..6a4f8ef36528ff 100644 --- a/docs/src/operations/guides/validator-start.md +++ b/docs/src/operations/guides/validator-start.md @@ -216,7 +216,7 @@ Or to see in finer detail: solana balance --lamports ``` -Read more about the [difference between SOL and lamports here](https://solana.com/docs/intro#what-are-sols). +Read more about the difference between SOL and lamports here: [What is SOL?](https://solana.com/docs/references/terminology#sol), [What is a lamport?](https://solana.com/docs/references/terminology#lamport). ## Create Authorized Withdrawer Account From 3b89962e8ceb5bd4faafbae33f99a1dca0c264d3 Mon Sep 17 00:00:00 2001 From: zhiqiangxu <652732310@qq.com> Date: Tue, 17 Jun 2025 22:48:59 +0800 Subject: [PATCH 021/124] `ProgramCacheForTxBatch::find`: only find in `entries` if not found in `modified_entries` (#6562) * only find in entries if not found in modified_entries * cargo fmt --- program-runtime/src/loaded_programs.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/program-runtime/src/loaded_programs.rs b/program-runtime/src/loaded_programs.rs index 53a577cf1d25ef..2c3ddbab122dc3 100644 --- a/program-runtime/src/loaded_programs.rs +++ b/program-runtime/src/loaded_programs.rs @@ -763,7 +763,7 @@ impl ProgramCacheForTxBatch { // programs that are loaded for the transaction batch. self.modified_entries .get(key) - .or(self.entries.get(key)) + .or_else(|| self.entries.get(key)) .map(|entry| { if entry.is_implicit_delay_visibility_tombstone(self.slot) { // Found a program entry on the current fork, but it's not effective From f9a04f708189d534ca18f4d7dddb7cec6bb3ff6c Mon Sep 17 00:00:00 2001 From: Greg Cusack Date: Tue, 17 Jun 2025 09:08:40 -0700 Subject: [PATCH 022/124] Multihoming: Udp verify reachability (#6611) multihoming: check all udp sockets are reachable across ips --- net-utils/src/ip_echo_client.rs | 193 +++++++++++++++----------------- 1 file changed, 90 insertions(+), 103 deletions(-) diff --git a/net-utils/src/ip_echo_client.rs b/net-utils/src/ip_echo_client.rs index 1ec2def4af73f5..43d0bf99b23476 100644 --- a/net-utils/src/ip_echo_client.rs +++ b/net-utils/src/ip_echo_client.rs @@ -8,7 +8,7 @@ use { itertools::Itertools, log::*, std::{ - collections::{BTreeMap, HashSet}, + collections::{BTreeMap, HashMap, HashSet}, net::{IpAddr, SocketAddr, TcpListener, TcpStream, UdpSocket}, sync::{Arc, RwLock}, time::{Duration, Instant}, @@ -219,14 +219,13 @@ pub(crate) async fn verify_all_reachable_tcp( ok } -/// Checks if all of the provided UDP ports are reachable by the machine at -/// `ip_echo_server_addr`. +/// Checks if all of the provided UDP ports on all of the provided IPs are +/// reachable by the machine at `ip_echo_server_addr`. /// This function will test a few ports at a time, retrying if necessary. /// Tests must complete within timeout provided, so a longer timeout may be /// necessary if checking many ports. /// A given amount of retries will be made to accommodate packet loss. /// This function may panic. -/// This function assumes that all sockets are bound to the same IP. pub(crate) async fn verify_all_reachable_udp( ip_echo_server_addr: SocketAddr, sockets: &[&UdpSocket], @@ -237,119 +236,107 @@ pub(crate) async fn verify_all_reachable_udp( warn!("No ports provided for verify_all_reachable_udp to check"); return true; } - // Extract the bind_address for requests from the first socket, it should be same for all others too - let bind_address = sockets[0] - .local_addr() - .expect("Sockets should be bound") - .ip(); - // This function may get fed multiple sockets bound to the same port. - // In such case we need to know which sockets are bound to each port, - // as only one of them will receive a packet from echo server - let mut ports_to_socks_map: BTreeMap<_, _> = BTreeMap::new(); + let mut ip_to_ports: HashMap>> = HashMap::new(); for &socket in sockets.iter() { - let local_binding = socket.local_addr().expect("Sockets should be bound"); - assert_eq!( - local_binding.ip(), - bind_address, - "All sockets should be bound to the same IP" - ); - let port = local_binding.port(); - ports_to_socks_map - .entry(port) - .or_insert_with(Vec::new) + let local_addr = socket.local_addr().expect("Socket must be bound"); + ip_to_ports + .entry(local_addr.ip()) + .or_default() + .entry(local_addr.port()) + .or_default() .push(socket); } + for (bind_ip, ports_to_socks_map) in ip_to_ports { + let ports: Vec = ports_to_socks_map.keys().copied().collect(); - let ports: Vec<_> = ports_to_socks_map.into_iter().collect(); - - info!( - "Checking that udp ports {:?} are reachable from {:?}", - ports.iter().map(|(port, _)| port).collect::>(), - ip_echo_server_addr - ); + info!( + "Checking that udp ports {:?} are reachable from bind IP {:?}", + ports, bind_ip + ); - 'outer: for chunk_to_check in ports.chunks(MAX_PORT_COUNT_PER_MESSAGE) { - let ports_to_check = chunk_to_check - .iter() - .map(|(port, _)| *port) - .collect::>(); + 'outer: for chunk_to_check in ports.chunks(MAX_PORT_COUNT_PER_MESSAGE) { + let ports_to_check = chunk_to_check.to_vec(); - for attempt in 0..retry_count { - if attempt > 0 { - error!("There are some udp ports with no response!! Retrying..."); - } - // clone off the sockets that use ports within our chunk - let sockets_to_check = chunk_to_check.iter().flat_map(|(_, sockets)| { - sockets + for attempt in 0..retry_count { + if attempt > 0 { + error!("There are some udp ports with no response!! Retrying..."); + } + // clone off the sockets that use ports within our chunk + let sockets_to_check: Vec = ports_to_check .iter() - .map(|&s| s.try_clone().expect("Unable to clone udp socket")) - }); - - let _ = ip_echo_server_request_with_binding( - ip_echo_server_addr, - IpEchoServerMessage::new(&[], &ports_to_check), - bind_address, - ) - .await - .map_err(|err| warn!("ip_echo_server request failed: {}", err)); - - let reachable_ports = Arc::new(RwLock::new(HashSet::new())); - // Spawn threads for each socket to check - let mut checkers = JoinSet::new(); - for socket in sockets_to_check { - let port = socket.local_addr().expect("Socket should be bound").port(); - let reachable_ports = reachable_ports.clone(); - - // Use blocking API since we have no idea if sockets given to us are nonblocking or not - checkers.spawn_blocking(move || { - let start = Instant::now(); - - let original_read_timeout = socket.read_timeout().unwrap(); - socket - .set_read_timeout(Some(Duration::from_millis(250))) - .unwrap(); - - loop { - if reachable_ports.read().unwrap().contains(&port) - || Instant::now().duration_since(start) >= timeout - { - break; + .flat_map(|port| ports_to_socks_map.get(port).unwrap()) + .map(|&s| s.try_clone().expect("Unable to clone UDP socket")) + .collect(); + + let _ = ip_echo_server_request_with_binding( + ip_echo_server_addr, + IpEchoServerMessage::new(&[], &ports_to_check), + bind_ip, + ) + .await + .map_err(|err| warn!("ip_echo_server request failed: {}", err)); + + let reachable_ports = Arc::new(RwLock::new(HashSet::new())); + // Spawn threads for each socket to check + let mut checkers = JoinSet::new(); + for socket in sockets_to_check { + let port = socket.local_addr().expect("Socket should be bound").port(); + let reachable_ports = reachable_ports.clone(); + + checkers.spawn_blocking(move || { + let start = Instant::now(); + + let original_read_timeout = socket.read_timeout().unwrap(); + socket + .set_read_timeout(Some(Duration::from_millis(250))) + .unwrap(); + + loop { + if reachable_ports.read().unwrap().contains(&port) + || Instant::now().duration_since(start) >= timeout + { + break; + } + + let recv_result = socket.recv(&mut [0; 1]); + debug!( + "Waited for incoming datagram on udp/{}: {:?}", + port, recv_result + ); + + if recv_result.is_ok() { + reachable_ports.write().unwrap().insert(port); + break; + } } - let recv_result = socket.recv(&mut [0; 1]); - debug!( - "Waited for incoming datagram on udp/{}: {:?}", - port, recv_result - ); - - if recv_result.is_ok() { - reachable_ports.write().unwrap().insert(port); - break; - } - } - socket.set_read_timeout(original_read_timeout).unwrap(); - }); - } + socket.set_read_timeout(original_read_timeout).unwrap(); + }); + } - while let Some(r) = checkers.join_next().await { - r.expect("Threads should exit cleanly"); + while let Some(r) = checkers.join_next().await { + r.expect("Threads should exit cleanly"); + } + // Might have lost a UDP packet, check that all ports were reached + let reachable_ports = Arc::into_inner(reachable_ports) + .expect("Single owner expected") + .into_inner() + .expect("No threads should hold the lock"); + info!( + "checked udp ports: {:?}, reachable udp ports: {:?}", + ports_to_check, reachable_ports + ); + if reachable_ports.len() == ports_to_check.len() { + continue 'outer; // starts checking next chunk of ports, if any + } } - // Might have lost a UDP packet, check that all ports were reached - let reachable_ports = Arc::into_inner(reachable_ports) - .expect("Single owner expected") - .into_inner() - .expect("No threads should hold the lock"); - info!( - "checked udp ports: {:?}, reachable udp ports: {:?}", - ports_to_check, reachable_ports + error!( + "Maximum retry count reached. Some ports for IP {} unreachable.", + bind_ip ); - if reachable_ports.len() == ports_to_check.len() { - continue 'outer; // starts checking next chunk of ports, if any - } + return false; } - error!("Maximum retry count is reached...."); - return false; } true } From cbd3eb5a734402e807f2d83ec06e4da73b4bf85a Mon Sep 17 00:00:00 2001 From: Rory Harris Date: Tue, 17 Jun 2025 09:14:00 -0700 Subject: [PATCH 023/124] Reclaim support for obsolete accounts (#6501) * Reclaim support for obsolete accounts * Updated mark accounts obsolete to take an array and reduce lock/unlock --- accounts-db/src/account_storage_reader.rs | 15 +++-- accounts-db/src/accounts_db.rs | 58 ++++++++++++++++--- .../src/accounts_db/scan_account_storage.rs | 2 +- accounts-db/src/accounts_db/tests.rs | 17 ++++-- runtime/src/snapshot_bank_utils.rs | 2 +- 5 files changed, 74 insertions(+), 20 deletions(-) diff --git a/accounts-db/src/account_storage_reader.rs b/accounts-db/src/account_storage_reader.rs index af4d5924dbe4c2..dbe5e5cbcc450c 100644 --- a/accounts-db/src/account_storage_reader.rs +++ b/accounts-db/src/account_storage_reader.rs @@ -177,7 +177,7 @@ mod tests { let offset = 0; // Mark the obsolete accounts in storage let mut size = storage.accounts.get_account_data_lens(&[0]); - storage.mark_account_obsolete(offset, size.pop().unwrap(), 0); + storage.mark_accounts_obsolete(vec![(offset, size.pop().unwrap())].into_iter(), 0); _ = AccountStorageReader::new(&storage, None).unwrap(); } @@ -260,10 +260,10 @@ mod tests { assert_eq!(obsolete_account_offset.len(), number_of_accounts_to_remove); // Mark the obsolete accounts in storage - obsolete_account_offset.into_iter().for_each(|offset| { - let mut size = storage.accounts.get_account_data_lens(&[offset]); - storage.mark_account_obsolete(offset, size.pop().unwrap(), 0); - }); + let data_lens = storage + .accounts + .get_account_data_lens(&obsolete_account_offset); + storage.mark_accounts_obsolete(obsolete_account_offset.into_iter().zip(data_lens), 0); let storage = storage .reopen_as_readonly(storage_access) @@ -375,7 +375,10 @@ mod tests { let mut slot_marked_dead = 0; obsolete_account_offset.into_iter().for_each(|offset| { let mut size = storage.accounts.get_account_data_lens(&[offset]); - storage.mark_account_obsolete(offset, size.pop().unwrap(), slot_marked_dead); + storage.mark_accounts_obsolete( + vec![(offset, size.pop().unwrap())].into_iter(), + slot_marked_dead, + ); slot_marked_dead += 1; }); diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 49880eb7d6def4..dc2d96e7546e31 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -1171,12 +1171,18 @@ impl AccountStorageEntry { self.alive_bytes.load(Ordering::Acquire) } - /// Marks the account at the given offset as obsolete - pub fn mark_account_obsolete(&self, offset: Offset, data_len: usize, slot: Slot) { - self.obsolete_accounts - .write() - .unwrap() - .push((offset, data_len, slot)); + /// Marks the accounts at the given offsets as obsolete + pub fn mark_accounts_obsolete( + &self, + newly_obsolete_accounts: impl ExactSizeIterator, + slot: Slot, + ) { + let mut obsolete_accounts_list = self.obsolete_accounts.write().unwrap(); + obsolete_accounts_list.reserve(newly_obsolete_accounts.len()); + + for (offset, data_len) in newly_obsolete_accounts { + obsolete_accounts_list.push((offset, data_len, slot)); + } } /// Returns the accounts that were marked obsolete as of the passed in slot @@ -2091,6 +2097,7 @@ impl AccountsDb { reset_accounts, pubkeys_removed_from_accounts_index, HandleReclaims::ProcessDeadSlots(&self.clean_accounts_stats.purge_stats), + MarkAccountsObsolete::No, ); measure.stop(); debug!("{}", measure); @@ -2946,6 +2953,7 @@ impl AccountsDb { reset_accounts, &pubkeys_removed_from_accounts_index, HandleReclaims::ProcessDeadSlots(&self.clean_accounts_stats.purge_stats), + MarkAccountsObsolete::No, ); reclaims_time.stop(); @@ -3123,6 +3131,11 @@ impl AccountsDb { /// cleaned up/removed via `process_dead_slots`. For instance, on store, no slots should /// be cleaned up, but during the background clean accounts purges accounts from old rooted /// slots, so outdated slots may be removed. + /// * 'mark_accounts_obsolete' - Whether to mark accounts as obsolete or not. If `Yes`, then + /// obsolete account entry will be marked in the storage so snapshots/accounts hash can + /// determine the state of the account at a specified slot. This should only be done if the + /// account is already unrefed and removed from the accounts index + /// It must be unrefed and removed to avoid double counting or missed counting in shrink fn handle_reclaims<'a, I>( &'a self, reclaims: Option, @@ -3130,14 +3143,19 @@ impl AccountsDb { reset_accounts: bool, pubkeys_removed_from_accounts_index: &PubkeysRemovedFromAccountsIndex, handle_reclaims: HandleReclaims<'a>, + mark_accounts_obsolete: MarkAccountsObsolete, ) -> ReclaimResult where I: Iterator, { let mut reclaim_result = ReclaimResult::default(); if let Some(reclaims) = reclaims { - let (dead_slots, reclaimed_offsets) = - self.remove_dead_accounts(reclaims, expected_single_dead_slot, reset_accounts); + let (dead_slots, reclaimed_offsets) = self.remove_dead_accounts( + reclaims, + expected_single_dead_slot, + reset_accounts, + mark_accounts_obsolete, + ); reclaim_result.1 = reclaimed_offsets; if let HandleReclaims::ProcessDeadSlots(purge_stats) = handle_reclaims { @@ -5375,6 +5393,7 @@ impl AccountsDb { // storage entries let mut handle_reclaims_elapsed = Measure::start("handle_reclaims_elapsed"); // Slot should be dead after removing all its account entries + // There is no reason to mark accounts obsolete as the slot storage is being purged let expected_dead_slot = Some(remove_slot); self.handle_reclaims( (!reclaims.is_empty()).then(|| reclaims.iter()), @@ -5382,6 +5401,7 @@ impl AccountsDb { false, &pubkeys_removed_from_accounts_index, HandleReclaims::ProcessDeadSlots(purge_stats), + MarkAccountsObsolete::No, ); handle_reclaims_elapsed.stop(); purge_stats @@ -7215,6 +7235,7 @@ impl AccountsDb { reclaims: I, expected_slot: Option, reset_accounts: bool, + mark_accounts_obsolete: MarkAccountsObsolete, ) -> (IntSet, SlotOffsets) where I: Iterator, @@ -7270,6 +7291,16 @@ impl AccountsDb { .map(|len| store.accounts.calculate_stored_size(*len)) .sum(); store.remove_accounts(dead_bytes, reset_accounts, offsets.len()); + + if let MarkAccountsObsolete::Yes(slot_marked_obsolete) = + mark_accounts_obsolete + { + store.mark_accounts_obsolete( + offsets.into_iter().zip(data_lens), + slot_marked_obsolete, + ); + } + if Self::is_shrinking_productive(&store) && self.is_candidate_for_shrink(&store) { @@ -7811,6 +7842,7 @@ impl AccountsDb { &HashSet::default(), // this callsite does NOT process dead slots HandleReclaims::DoNotProcessDeadSlots, + MarkAccountsObsolete::No, ); handle_reclaims_time.stop(); handle_reclaims_elapsed = handle_reclaims_time.as_us(); @@ -8675,6 +8707,16 @@ enum HandleReclaims<'a> { DoNotProcessDeadSlots, } +/// Specify whether obsolete accounts should be marked or not during reclaims +/// They should only be marked if they are also getting unreffed in the index +/// Temporariliy allow dead code until the feature is implemented +#[derive(Debug, Copy, Clone)] +enum MarkAccountsObsolete { + #[allow(dead_code)] + Yes(Slot), + No, +} + /// Which accounts hash calculation is being performed? #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub enum CalcAccountsHashKind { diff --git a/accounts-db/src/accounts_db/scan_account_storage.rs b/accounts-db/src/accounts_db/scan_account_storage.rs index 9370dc7813f73a..6f9afd61fde6c4 100644 --- a/accounts-db/src/accounts_db/scan_account_storage.rs +++ b/accounts-db/src/accounts_db/scan_account_storage.rs @@ -1119,7 +1119,7 @@ mod tests { // Mark each account obsolete at a different slot for (i, offsets) in offsets.unwrap().offsets.iter().enumerate() { - storage.mark_account_obsolete(*offsets, 0, i as Slot); + storage.mark_accounts_obsolete(vec![(*offsets, 0)].into_iter(), i as Slot); } // Perform scans of the storage assuming a different slot and verify the number of accounts found matches diff --git a/accounts-db/src/accounts_db/tests.rs b/accounts-db/src/accounts_db/tests.rs index 20aa73634eaa64..9c3731c7ded572 100644 --- a/accounts-db/src/accounts_db/tests.rs +++ b/accounts-db/src/accounts_db/tests.rs @@ -3869,6 +3869,7 @@ impl AccountsDb { define_accounts_db_test!(test_alive_bytes, |accounts_db| { let slot: Slot = 0; let num_keys = 10; + let mut num_obsolete_accounts = 0; for data_size in 0..num_keys { let account = AccountSharedData::new(1, data_size, &Pubkey::default()); @@ -3894,15 +3895,23 @@ define_accounts_db_test!(test_alive_bytes, |accounts_db| { [0]; assert_eq!(account_info.0, slot); let reclaims = [account_info]; - accounts_db.remove_dead_accounts(reclaims.iter(), None, true); + num_obsolete_accounts += reclaims.len(); + accounts_db.remove_dead_accounts( + reclaims.iter(), + None, + true, + MarkAccountsObsolete::Yes(slot), + ); let after_size = storage0.alive_bytes(); - if storage0.count() == 0 - && AccountsFileProvider::HotStorage == accounts_db.accounts_file_provider - { + if storage0.count() == 0 { // when `remove_dead_accounts` reaches 0 accounts, all bytes are marked as dead assert_eq!(after_size, 0); } else { assert_eq!(before_size, after_size + account.stored_size_aligned); + assert_eq!( + storage0.get_obsolete_accounts(None).len(), + num_obsolete_accounts + ); } }); }); diff --git a/runtime/src/snapshot_bank_utils.rs b/runtime/src/snapshot_bank_utils.rs index 3d183e7bf43c40..d13b563cb6aba7 100644 --- a/runtime/src/snapshot_bank_utils.rs +++ b/runtime/src/snapshot_bank_utils.rs @@ -1310,7 +1310,7 @@ mod tests { bank1.fill_bank_with_ticks_for_tests(); // Mark the entry for pubkey1 as obsolete in slot0 - account_storage_entry.mark_account_obsolete(offset, 0, slot); + account_storage_entry.mark_accounts_obsolete(vec![(offset, 0)].into_iter(), slot); let (_tmp_dir, accounts_dir) = create_tmp_accounts_dir_for_tests(); let bank_snapshots_dir = tempfile::TempDir::new().unwrap(); From 7510f5d8043a1e3328ad5b8663c441868fcff885 Mon Sep 17 00:00:00 2001 From: Alessandro Decina Date: Wed, 18 Jun 2025 01:16:08 +0700 Subject: [PATCH 024/124] Don't set affinity if XDP is not in use (#6618) This fixes the validator not starting on macos, oops --- validator/src/commands/run/execute.rs | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/validator/src/commands/run/execute.rs b/validator/src/commands/run/execute.rs index 3209491989c4c4..22fea95c5a96f2 100644 --- a/validator/src/commands/run/execute.rs +++ b/validator/src/commands/run/execute.rs @@ -751,11 +751,6 @@ pub fn execute( ..ValidatorConfig::default() }; - let available = core_affinity::get_core_ids() - .unwrap_or_default() - .into_iter() - .map(|core_id| core_id.id) - .collect::>(); let reserved = validator_config .retransmit_xdp .as_ref() @@ -764,8 +759,15 @@ pub fn execute( .iter() .cloned() .collect::>(); - let available = available.difference(&reserved); - set_cpu_affinity(available.into_iter().copied()).unwrap(); + if !reserved.is_empty() { + let available = core_affinity::get_core_ids() + .unwrap_or_default() + .into_iter() + .map(|core_id| core_id.id) + .collect::>(); + let available = available.difference(&reserved); + set_cpu_affinity(available.into_iter().copied()).unwrap(); + } let vote_account = pubkey_of(matches, "vote_account").unwrap_or_else(|| { if !validator_config.voting_disabled { From df949690d977b276be86dd9e5a3c22b0dcc21c2e Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 17 Jun 2025 15:58:49 -0400 Subject: [PATCH 025/124] Boxes PubsubClientError's ConnectionError and WsError (#6610) --- .../src/nonblocking/pubsub_client.rs | 21 ++++++++++--------- pubsub-client/src/pubsub_client.rs | 19 ++++++++++------- 2 files changed, 23 insertions(+), 17 deletions(-) diff --git a/pubsub-client/src/nonblocking/pubsub_client.rs b/pubsub-client/src/nonblocking/pubsub_client.rs index 71782710031f34..f23b781846e504 100644 --- a/pubsub-client/src/nonblocking/pubsub_client.rs +++ b/pubsub-client/src/nonblocking/pubsub_client.rs @@ -219,10 +219,10 @@ pub enum PubsubClientError { UrlParseError(#[from] url::ParseError), #[error("unable to connect to server")] - ConnectionError(tokio_tungstenite::tungstenite::Error), + ConnectionError(Box), #[error("websocket error")] - WsError(#[from] tokio_tungstenite::tungstenite::Error), + WsError(#[from] Box), #[error("connection closed (({0})")] ConnectionClosed(String), @@ -276,6 +276,7 @@ impl PubsubClient { let url = Url::parse(url)?; let (ws, _response) = connect_async(url) .await + .map_err(Box::new) .map_err(PubsubClientError::ConnectionError)?; let (subscribe_sender, subscribe_receiver) = mpsc::unbounded_channel(); @@ -505,20 +506,20 @@ impl PubsubClient { // Send close on shutdown signal _ = (&mut shutdown_receiver) => { let frame = CloseFrame { code: CloseCode::Normal, reason: "".into() }; - ws.send(Message::Close(Some(frame))).await?; - ws.flush().await?; + ws.send(Message::Close(Some(frame))).await.map_err(Box::new)?; + ws.flush().await.map_err(Box::new)?; break; }, // Send `Message::Ping` each 10s if no any other communication () = sleep(Duration::from_secs(10)) => { - ws.send(Message::Ping(Vec::new())).await?; + ws.send(Message::Ping(Vec::new())).await.map_err(Box::new)?; }, // Read message for subscribe Some((operation, params, response_sender)) = subscribe_receiver.recv() => { request_id += 1; let method = format!("{operation}Subscribe"); let text = json!({"jsonrpc":"2.0","id":request_id,"method":method,"params":params}).to_string(); - ws.send(Message::Text(text)).await?; + ws.send(Message::Text(text)).await.map_err(Box::new)?; requests_subscribe.insert(request_id, (operation, response_sender)); }, // Read message for unsubscribe @@ -527,20 +528,20 @@ impl PubsubClient { request_id += 1; let method = format!("{operation}Unsubscribe"); let text = json!({"jsonrpc":"2.0","id":request_id,"method":method,"params":[sid]}).to_string(); - ws.send(Message::Text(text)).await?; + ws.send(Message::Text(text)).await.map_err(Box::new)?; requests_unsubscribe.insert(request_id, response_sender); }, // Read message for other requests Some((method, params, response_sender)) = request_receiver.recv() => { request_id += 1; let text = json!({"jsonrpc":"2.0","id":request_id,"method":method,"params":params}).to_string(); - ws.send(Message::Text(text)).await?; + ws.send(Message::Text(text)).await.map_err(Box::new)?; other_requests.insert(request_id, response_sender); } // Read incoming WebSocket message next_msg = ws.next() => { let msg = match next_msg { - Some(msg) => msg?, + Some(msg) => msg.map_err(Box::new)?, None => break, }; trace!("ws.next(): {:?}", &msg); @@ -550,7 +551,7 @@ impl PubsubClient { Message::Text(text) => text, Message::Binary(_data) => continue, // Ignore Message::Ping(data) => { - ws.send(Message::Pong(data)).await?; + ws.send(Message::Pong(data)).await.map_err(Box::new)?; continue }, Message::Pong(_data) => continue, diff --git a/pubsub-client/src/pubsub_client.rs b/pubsub-client/src/pubsub_client.rs index a93dd312838951..0c7d789a022748 100644 --- a/pubsub-client/src/pubsub_client.rs +++ b/pubsub-client/src/pubsub_client.rs @@ -165,13 +165,17 @@ where writable_socket: &Arc>>>, body: String, ) -> Result { - writable_socket.write().unwrap().send(Message::Text(body))?; - let message = writable_socket.write().unwrap().read()?; + writable_socket + .write() + .unwrap() + .send(Message::Text(body)) + .map_err(Box::new)?; + let message = writable_socket.write().unwrap().read().map_err(Box::new)?; Self::extract_subscription_id(message) } fn extract_subscription_id(message: Message) -> Result { - let message_text = &message.into_text()?; + let message_text = &message.into_text().map_err(Box::new)?; if let Ok(json_msg) = serde_json::from_str::>(message_text) { if let Some(Number(x)) = json_msg.get("result") { @@ -205,17 +209,18 @@ where }) .to_string(), )) + .map_err(Box::new) .map_err(|err| err.into()) } fn read_message( writable_socket: &Arc>>>, ) -> Result, PubsubClientError> { - let message = writable_socket.write().unwrap().read()?; + let message = writable_socket.write().unwrap().read().map_err(Box::new)?; if message.is_ping() { return Ok(None); } - let message_text = &message.into_text()?; + let message_text = &message.into_text().map_err(Box::new)?; if let Ok(json_msg) = serde_json::from_str::>(message_text) { if let Some(Object(params)) = json_msg.get("params") { if let Some(result) = params.get("result") { @@ -300,7 +305,7 @@ pub struct PubsubClient {} fn connect_with_retry( url: Url, -) -> Result>, tungstenite::Error> { +) -> Result>, Box> { let mut connection_retries = 5; loop { let result = connect(url.clone()).map(|(socket, _)| socket); @@ -327,7 +332,7 @@ fn connect_with_retry( continue; } } - return result; + return result.map_err(Box::new); } } From 3281f61c5f27c0d61c147e8f6f383986033be506 Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Tue, 17 Jun 2025 15:16:14 -0500 Subject: [PATCH 026/124] Return transaction status from all bank process tx methods (#6561) refactor: rm status lookup in bank process tx --- core/src/banking_stage.rs | 6 ++--- runtime/src/bank.rs | 7 ++--- runtime/src/bank/tests.rs | 55 ++++++++++++++++++++++++++------------- 3 files changed, 42 insertions(+), 26 deletions(-) diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index 445aca5a8ee3af..68e73a9ab5ba97 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -1095,9 +1095,9 @@ mod tests { let (bank, _bank_forks) = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); for entry in entries { - bank.process_entry_transactions(entry.transactions) - .iter() - .for_each(|x| assert_eq!(*x, Ok(()))); + let _ = bank + .try_process_entry_transactions(entry.transactions) + .expect("All transactions should be processed"); } // Assert the user doesn't hold three lamports. If the stage only outputs one diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 5733634fa3de9f..2b0c4f0843ed6d 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -4683,10 +4683,7 @@ impl Bank { /// Process a Transaction. This is used for unit tests and simply calls the vector /// Bank::process_transactions method. pub fn process_transaction(&self, tx: &Transaction) -> Result<()> { - self.try_process_transactions(std::iter::once(tx))?[0].clone()?; - tx.signatures - .first() - .map_or(Ok(()), |sig| self.get_signature_status(sig).unwrap()) + self.try_process_transactions(std::iter::once(tx))?[0].clone() } /// Process a Transaction and store metadata. This is used for tests and the banks services. It @@ -4750,7 +4747,7 @@ impl Bank { ) .0 .into_iter() - .map(|commit_result| commit_result.map(|_| ())) + .map(|commit_result| commit_result.and_then(|committed_tx| committed_tx.status)) .collect() } diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 09cf9545854566..f8e44058f0c037 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -118,10 +118,11 @@ use { }, solana_transaction_context::TransactionAccount, solana_transaction_error::{TransactionError, TransactionResult as Result}, + solana_vote_interface::state::TowerSync, solana_vote_program::{ vote_instruction, vote_state::{ - self, create_account_with_authorized, BlockTimestamp, Vote, VoteInit, VoteState, + self, create_account_with_authorized, BlockTimestamp, VoteInit, VoteState, VoteStateVersions, MAX_LOCKOUT_HISTORY, }, }, @@ -503,14 +504,15 @@ fn test_credit_debit_rent_no_side_effect_on_hash() { assert_eq!(bank.last_blockhash(), genesis_config.hash()); - let plenty_of_lamports = 264; + let min_balance = genesis_config.rent.minimum_balance(0); + let plenty_of_lamports = min_balance + 1; let too_few_lamports = 10; // Initialize credit-debit and credit only accounts let accounts = [ AccountSharedData::new(plenty_of_lamports, 0, &Pubkey::default()), - AccountSharedData::new(plenty_of_lamports, 1, &Pubkey::default()), AccountSharedData::new(plenty_of_lamports, 0, &Pubkey::default()), - AccountSharedData::new(plenty_of_lamports, 1, &Pubkey::default()), + AccountSharedData::new(plenty_of_lamports, 0, &Pubkey::default()), + AccountSharedData::new(plenty_of_lamports, 0, &Pubkey::default()), // Transaction between these two accounts will fail AccountSharedData::new(too_few_lamports, 0, &Pubkey::default()), AccountSharedData::new(too_few_lamports, 1, &Pubkey::default()), @@ -3087,6 +3089,9 @@ fn test_readonly_accounts(relax_intrabatch_account_locks: bool) { if !relax_intrabatch_account_locks { bank.deactivate_feature(&feature_set::relax_intrabatch_account_locks::id()); } + + let next_slot = bank.slot() + 1; + let bank = Bank::new_from_parent(Arc::new(bank), &Pubkey::default(), next_slot); let (bank, _bank_forks) = bank.wrap_with_bank_forks_for_tests(); let vote_pubkey0 = solana_pubkey::new_rand(); @@ -3097,12 +3102,27 @@ fn test_readonly_accounts(relax_intrabatch_account_locks: bool) { let payer1 = Keypair::new(); // Create vote accounts - let vote_account0 = - vote_state::create_account(&vote_pubkey0, &authorized_voter.pubkey(), 0, 100); - let vote_account1 = - vote_state::create_account(&vote_pubkey1, &authorized_voter.pubkey(), 0, 100); - let vote_account2 = - vote_state::create_account(&vote_pubkey2, &authorized_voter.pubkey(), 0, 100); + let vote_account0 = vote_state::create_account_with_authorized( + &vote_pubkey0, + &authorized_voter.pubkey(), + &authorized_voter.pubkey(), + 0, + 100, + ); + let vote_account1 = vote_state::create_account_with_authorized( + &vote_pubkey1, + &authorized_voter.pubkey(), + &authorized_voter.pubkey(), + 0, + 100, + ); + let vote_account2 = vote_state::create_account_with_authorized( + &vote_pubkey2, + &authorized_voter.pubkey(), + &authorized_voter.pubkey(), + 0, + 100, + ); bank.store_account(&vote_pubkey0, &vote_account0); bank.store_account(&vote_pubkey1, &vote_account1); bank.store_account(&vote_pubkey2, &vote_account2); @@ -3113,15 +3133,15 @@ fn test_readonly_accounts(relax_intrabatch_account_locks: bool) { bank.transfer(1, &mint_keypair, &authorized_voter.pubkey()) .unwrap(); - let vote = Vote::new(vec![1], Hash::default()); - let ix0 = vote_instruction::vote(&vote_pubkey0, &authorized_voter.pubkey(), vote.clone()); + let vote = TowerSync::new_from_slot(bank.parent_slot, bank.parent_hash); + let ix0 = vote_instruction::tower_sync(&vote_pubkey0, &authorized_voter.pubkey(), vote.clone()); let tx0 = Transaction::new_signed_with_payer( &[ix0], Some(&payer0.pubkey()), &[&payer0, &authorized_voter], bank.last_blockhash(), ); - let ix1 = vote_instruction::vote(&vote_pubkey1, &authorized_voter.pubkey(), vote.clone()); + let ix1 = vote_instruction::tower_sync(&vote_pubkey1, &authorized_voter.pubkey(), vote.clone()); let tx1 = Transaction::new_signed_with_payer( &[ix1], Some(&payer1.pubkey()), @@ -3136,7 +3156,7 @@ fn test_readonly_accounts(relax_intrabatch_account_locks: bool) { assert_eq!(results[0], Ok(())); assert_eq!(results[1], Ok(())); - let ix0 = vote_instruction::vote(&vote_pubkey2, &authorized_voter.pubkey(), vote); + let ix0 = vote_instruction::tower_sync(&vote_pubkey2, &authorized_voter.pubkey(), vote); let tx0 = Transaction::new_signed_with_payer( &[ix0], Some(&payer0.pubkey()), @@ -10153,6 +10173,7 @@ fn test_failed_compute_request_instruction() { bank.transfer(10, &mint_keypair, &payer1_keypair.pubkey()) .unwrap(); + const TEST_COMPUTE_UNIT_LIMIT: u32 = 500u32; declare_process_instruction!(MockBuiltin, 1, |invoke_context| { let compute_budget = ComputeBudget::from_budget_and_cost( invoke_context.get_compute_budget(), @@ -10161,9 +10182,7 @@ fn test_failed_compute_request_instruction() { assert_eq!( compute_budget, ComputeBudget { - compute_unit_limit: u64::from( - execution_budget::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT - ), + compute_unit_limit: u64::from(TEST_COMPUTE_UNIT_LIMIT), heap_size: 48 * 1024, ..ComputeBudget::default() } @@ -10182,7 +10201,7 @@ fn test_failed_compute_request_instruction() { // This message will be processed successfully let message1 = Message::new( &[ - ComputeBudgetInstruction::set_compute_unit_limit(1), + ComputeBudgetInstruction::set_compute_unit_limit(TEST_COMPUTE_UNIT_LIMIT), ComputeBudgetInstruction::request_heap_frame(48 * 1024), Instruction::new_with_bincode(program_id, &0, vec![]), ], From 05880341d95e59c5c0e439dfc500e01758540a10 Mon Sep 17 00:00:00 2001 From: Alex Pyattaev Date: Wed, 18 Jun 2025 14:12:53 +0300 Subject: [PATCH 027/124] rm tests that make no sense for merkle shreds (#6626) --- ledger/src/sigverify_shreds.rs | 93 +--------------------------------- 1 file changed, 1 insertion(+), 92 deletions(-) diff --git a/ledger/src/sigverify_shreds.rs b/ledger/src/sigverify_shreds.rs index dd3882e53c6b6d..d1fe3ba123b4c8 100644 --- a/ledger/src/sigverify_shreds.rs +++ b/ledger/src/sigverify_shreds.rs @@ -530,7 +530,7 @@ mod tests { use { super::*, crate::{ - shred::{ProcessShredsStats, Shred, ShredFlags, LEGACY_SHRED_DATA_CAPACITY}, + shred::{ProcessShredsStats, Shred, ShredFlags}, shredder::{ReedSolomonCache, Shredder}, }, assert_matches::assert_matches, @@ -707,97 +707,6 @@ mod tests { run_test_sigverify_shreds_gpu(&thread_pool, 0xdead_c0de); } - fn run_test_sigverify_shreds_sign_gpu(thread_pool: &ThreadPool, slot: Slot) { - solana_logger::setup(); - let recycler_cache = RecyclerCache::default(); - let cache = RwLock::new(LruCache::new(/*capacity:*/ 128)); - - let num_packets = 32; - let num_batches = 100; - let mut packet_batch = PinnedPacketBatch::with_capacity(num_packets); - packet_batch.resize(num_packets, Packet::default()); - - for (i, p) in packet_batch.iter_mut().enumerate() { - let shred = Shred::new_from_data( - slot, - 0xc0de, - i as u16, - &[5; LEGACY_SHRED_DATA_CAPACITY], - ShredFlags::LAST_SHRED_IN_SLOT, - 1, - 2, - 0xc0de, - ); - shred.copy_to_packet(p); - } - let packet_batch = PacketBatch::from(packet_batch); - let mut batches = vec![packet_batch; num_batches]; - let keypair = Keypair::new(); - let pinned_keypair = sign_shreds_gpu_pinned_keypair(&keypair, &recycler_cache); - let pinned_keypair = Some(Arc::new(pinned_keypair)); - let pubkeys = HashMap::from([(u64::MAX, Pubkey::default()), (slot, keypair.pubkey())]); - //unsigned - let rv = verify_shreds_gpu(thread_pool, &batches, &pubkeys, &recycler_cache, &cache); - assert_eq!(rv, vec![vec![0; num_packets]; num_batches]); - //signed - sign_shreds_gpu( - thread_pool, - &keypair, - &pinned_keypair, - &mut batches, - &recycler_cache, - ); - let rv = verify_shreds_cpu(thread_pool, &batches, &pubkeys, &cache); - assert_eq!(rv, vec![vec![1; num_packets]; num_batches]); - - let rv = verify_shreds_gpu(thread_pool, &batches, &pubkeys, &recycler_cache, &cache); - assert_eq!(rv, vec![vec![1; num_packets]; num_batches]); - } - - #[test] - fn test_sigverify_shreds_sign_gpu() { - let thread_pool = ThreadPoolBuilder::new().num_threads(3).build().unwrap(); - run_test_sigverify_shreds_sign_gpu(&thread_pool, 0xdead_c0de); - } - - fn run_test_sigverify_shreds_sign_cpu(thread_pool: &ThreadPool, slot: Slot) { - solana_logger::setup(); - - let mut batch = PinnedPacketBatch::default(); - let cache = RwLock::new(LruCache::new(/*capacity:*/ 128)); - let keypair = Keypair::new(); - let shred = Shred::new_from_data( - slot, - 0xc0de, - 0xdead, - &[1, 2, 3, 4], - ShredFlags::LAST_SHRED_IN_SLOT, - 0, - 0, - 0xc0de, - ); - batch.resize(1, Packet::default()); - batch[0].buffer_mut()[..shred.payload().len()].copy_from_slice(shred.payload()); - batch[0].meta_mut().size = shred.payload().len(); - let batch = PacketBatch::from(batch); - let mut batches = [batch]; - - let pubkeys = HashMap::from([(slot, keypair.pubkey()), (u64::MAX, Pubkey::default())]); - //unsigned - let rv = verify_shreds_cpu(thread_pool, &batches, &pubkeys, &cache); - assert_eq!(rv, vec![vec![0]]); - //signed - sign_shreds_cpu(thread_pool, &keypair, &mut batches); - let rv = verify_shreds_cpu(thread_pool, &batches, &pubkeys, &cache); - assert_eq!(rv, vec![vec![1]]); - } - - #[test] - fn test_sigverify_shreds_sign_cpu() { - let thread_pool = ThreadPoolBuilder::new().num_threads(3).build().unwrap(); - run_test_sigverify_shreds_sign_cpu(&thread_pool, 0xdead_c0de); - } - fn make_transaction(rng: &mut R) -> Transaction { let block = rng.gen::<[u8; 32]>(); let recent_blockhash = solana_sha256_hasher::hashv(&[&block]); From 9c851237b768eba1d5631d3d22b76f54597a6f5b Mon Sep 17 00:00:00 2001 From: Brooks Date: Wed, 18 Jun 2025 07:28:34 -0400 Subject: [PATCH 028/124] Uses an enum for the snapshot interval (#6612) --- core/src/validator.rs | 91 ++++++++++--------- core/tests/epoch_accounts_hash.rs | 11 ++- core/tests/snapshots.rs | 35 ++++--- local-cluster/src/integration_tests.rs | 25 ++--- local-cluster/tests/local_cluster.rs | 52 ++++++----- rpc/src/rpc_service.rs | 25 +++-- runtime/src/accounts_background_service.rs | 11 ++- runtime/src/snapshot_bank_utils.rs | 4 - runtime/src/snapshot_config.rs | 24 ++--- runtime/src/snapshot_controller.rs | 28 ++++-- runtime/src/snapshot_utils.rs | 9 +- .../src/snapshot_utils/snapshot_interval.rs | 10 ++ test-validator/src/lib.rs | 8 +- validator/src/cli.rs | 20 ++-- validator/src/commands/run/execute.rs | 78 +++++++--------- 15 files changed, 239 insertions(+), 192 deletions(-) create mode 100644 runtime/src/snapshot_utils/snapshot_interval.rs diff --git a/core/src/validator.rs b/core/src/validator.rs index 03a5bccd333255..84eb7185dead34 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -109,11 +109,11 @@ use { prioritization_fee_cache::PrioritizationFeeCache, runtime_config::RuntimeConfig, snapshot_archive_info::SnapshotArchiveInfoGetter, - snapshot_bank_utils::{self, DISABLED_SNAPSHOT_ARCHIVE_INTERVAL}, + snapshot_bank_utils, snapshot_config::SnapshotConfig, snapshot_controller::SnapshotController, snapshot_hash::StartingSnapshotHashes, - snapshot_utils::{self, clean_orphaned_account_snapshot_dirs}, + snapshot_utils::{self, clean_orphaned_account_snapshot_dirs, SnapshotInterval}, }, solana_send_transaction_service::send_transaction_service::Config as SendTransactionServiceConfig, solana_shred_version::compute_shred_version, @@ -2779,16 +2779,18 @@ pub fn is_snapshot_config_valid(snapshot_config: &SnapshotConfig) -> bool { return true; } - let full_snapshot_interval_slots = snapshot_config.full_snapshot_archive_interval_slots; - let incremental_snapshot_interval_slots = - snapshot_config.incremental_snapshot_archive_interval_slots; + let SnapshotInterval::Slots(full_snapshot_interval_slots) = + snapshot_config.full_snapshot_archive_interval + else { + // if we *are* generating snapshots, then the full snapshot interval cannot be disabled + return false; + }; - if incremental_snapshot_interval_slots == DISABLED_SNAPSHOT_ARCHIVE_INTERVAL { - true - } else if incremental_snapshot_interval_slots == 0 { - false - } else { - full_snapshot_interval_slots > incremental_snapshot_interval_slots + match snapshot_config.incremental_snapshot_archive_interval { + SnapshotInterval::Disabled => true, + SnapshotInterval::Slots(incremental_snapshot_interval_slots) => { + full_snapshot_interval_slots > incremental_snapshot_interval_slots + } } } @@ -2807,7 +2809,7 @@ mod tests { solana_poh_config::PohConfig, solana_sha256_hasher::hash, solana_tpu_client::tpu_client::DEFAULT_TPU_ENABLE_UDP, - std::{fs::remove_dir_all, thread, time::Duration}, + std::{fs::remove_dir_all, num::NonZeroU64, thread, time::Duration}, }; #[test] @@ -3163,60 +3165,61 @@ mod tests { } #[test] - fn test_interval_check() { + fn test_is_snapshot_config_valid() { fn new_snapshot_config( full_snapshot_archive_interval_slots: Slot, incremental_snapshot_archive_interval_slots: Slot, ) -> SnapshotConfig { SnapshotConfig { - full_snapshot_archive_interval_slots, - incremental_snapshot_archive_interval_slots, + full_snapshot_archive_interval: SnapshotInterval::Slots( + NonZeroU64::new(full_snapshot_archive_interval_slots).unwrap(), + ), + incremental_snapshot_archive_interval: SnapshotInterval::Slots( + NonZeroU64::new(incremental_snapshot_archive_interval_slots).unwrap(), + ), ..SnapshotConfig::default() } } - assert!(is_snapshot_config_valid(&new_snapshot_config(300, 200))); - - assert!(is_snapshot_config_valid(&new_snapshot_config( - snapshot_bank_utils::DEFAULT_FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, - snapshot_bank_utils::DEFAULT_INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS - ))); - assert!(is_snapshot_config_valid(&new_snapshot_config( - snapshot_bank_utils::DEFAULT_FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, - DISABLED_SNAPSHOT_ARCHIVE_INTERVAL - ))); - assert!(is_snapshot_config_valid(&new_snapshot_config( - snapshot_bank_utils::DEFAULT_INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, - DISABLED_SNAPSHOT_ARCHIVE_INTERVAL - ))); - assert!(is_snapshot_config_valid(&new_snapshot_config( - DISABLED_SNAPSHOT_ARCHIVE_INTERVAL, - DISABLED_SNAPSHOT_ARCHIVE_INTERVAL - ))); - - // Full snaphot intervals used to be required to be a multiple of - // incremental snapshot intervals, but that's no longer the case - // so test that the check is relaxed. + // default config must be valid + assert!(is_snapshot_config_valid(&SnapshotConfig::default())); + + // disabled incremental snapshot must be valid + assert!(is_snapshot_config_valid(&SnapshotConfig { + incremental_snapshot_archive_interval: SnapshotInterval::Disabled, + ..SnapshotConfig::default() + })); + + // disabled full snapshot must be invalid though (if generating snapshots) + assert!(!is_snapshot_config_valid(&SnapshotConfig { + full_snapshot_archive_interval: SnapshotInterval::Disabled, + ..SnapshotConfig::default() + })); + + // simple config must be valid + assert!(is_snapshot_config_valid(&new_snapshot_config(400, 200))); assert!(is_snapshot_config_valid(&new_snapshot_config(100, 42))); assert!(is_snapshot_config_valid(&new_snapshot_config(444, 200))); assert!(is_snapshot_config_valid(&new_snapshot_config(400, 222))); - assert!(!is_snapshot_config_valid(&new_snapshot_config(0, 100))); - assert!(!is_snapshot_config_valid(&new_snapshot_config(100, 0))); - assert!(!is_snapshot_config_valid(&new_snapshot_config(0, 0))); + // config where full interval is not larger than incremental interval must be invalid assert!(!is_snapshot_config_valid(&new_snapshot_config(42, 100))); assert!(!is_snapshot_config_valid(&new_snapshot_config(100, 100))); assert!(!is_snapshot_config_valid(&new_snapshot_config(100, 200))); + // config with snapshots disabled (or load-only) must be valid + assert!(is_snapshot_config_valid(&SnapshotConfig::new_disabled())); assert!(is_snapshot_config_valid(&SnapshotConfig::new_load_only())); assert!(is_snapshot_config_valid(&SnapshotConfig { - full_snapshot_archive_interval_slots: 37, - incremental_snapshot_archive_interval_slots: 41, + full_snapshot_archive_interval: SnapshotInterval::Slots(NonZeroU64::new(37).unwrap()), + incremental_snapshot_archive_interval: SnapshotInterval::Slots( + NonZeroU64::new(41).unwrap() + ), ..SnapshotConfig::new_load_only() })); assert!(is_snapshot_config_valid(&SnapshotConfig { - full_snapshot_archive_interval_slots: DISABLED_SNAPSHOT_ARCHIVE_INTERVAL, - incremental_snapshot_archive_interval_slots: DISABLED_SNAPSHOT_ARCHIVE_INTERVAL, + full_snapshot_archive_interval: SnapshotInterval::Disabled, + incremental_snapshot_archive_interval: SnapshotInterval::Disabled, ..SnapshotConfig::new_load_only() })); } diff --git a/core/tests/epoch_accounts_hash.rs b/core/tests/epoch_accounts_hash.rs index 587468705face3..9f33a1cdf81030 100755 --- a/core/tests/epoch_accounts_hash.rs +++ b/core/tests/epoch_accounts_hash.rs @@ -32,7 +32,7 @@ use { snapshot_bank_utils, snapshot_config::SnapshotConfig, snapshot_controller::SnapshotController, - snapshot_utils, + snapshot_utils::{self, SnapshotInterval}, }, solana_signer::Signer, solana_streamer::socket::SocketAddrSpace, @@ -40,6 +40,7 @@ use { solana_time_utils::timestamp, std::{ mem::ManuallyDrop, + num::NonZeroU64, sync::{ atomic::{AtomicBool, Ordering}, Arc, Mutex, RwLock, @@ -79,8 +80,12 @@ impl TestEnvironment { incremental_snapshot_archive_interval_slots: Slot, ) -> TestEnvironment { let snapshot_config = SnapshotConfig { - full_snapshot_archive_interval_slots, - incremental_snapshot_archive_interval_slots, + full_snapshot_archive_interval: SnapshotInterval::Slots( + NonZeroU64::new(full_snapshot_archive_interval_slots).unwrap(), + ), + incremental_snapshot_archive_interval: SnapshotInterval::Slots( + NonZeroU64::new(incremental_snapshot_archive_interval_slots).unwrap(), + ), ..SnapshotConfig::default() }; Self::_new(snapshot_config) diff --git a/core/tests/snapshots.rs b/core/tests/snapshots.rs index c7a41ad21472f0..842a243c72e8e1 100644 --- a/core/tests/snapshots.rs +++ b/core/tests/snapshots.rs @@ -32,11 +32,11 @@ use { genesis_utils::{create_genesis_config_with_leader, GenesisConfigInfo}, runtime_config::RuntimeConfig, snapshot_archive_info::FullSnapshotArchiveInfo, - snapshot_bank_utils::{self, DISABLED_SNAPSHOT_ARCHIVE_INTERVAL}, + snapshot_bank_utils, snapshot_config::SnapshotConfig, snapshot_controller::SnapshotController, snapshot_utils::{ - self, + self, SnapshotInterval, SnapshotVersion::{self, V1_2_0}, }, status_cache::MAX_CACHE_ENTRIES, @@ -47,6 +47,7 @@ use { solana_system_transaction as system_transaction, solana_time_utils::timestamp, std::{ + num::NonZeroU64, path::PathBuf, sync::{ atomic::{AtomicBool, Ordering}, @@ -75,8 +76,8 @@ impl SnapshotTestConfig { fn new( snapshot_version: SnapshotVersion, cluster_type: ClusterType, - full_snapshot_archive_interval_slots: Slot, - incremental_snapshot_archive_interval_slots: Slot, + full_snapshot_archive_interval: SnapshotInterval, + incremental_snapshot_archive_interval: SnapshotInterval, ) -> SnapshotTestConfig { let (accounts_tmp_dir, accounts_dir) = create_tmp_accounts_dir_for_tests(); let bank_snapshots_dir = TempDir::new().unwrap(); @@ -103,8 +104,8 @@ impl SnapshotTestConfig { let bank_forks_arc = BankForks::new_rw_arc(bank0); let snapshot_config = SnapshotConfig { - full_snapshot_archive_interval_slots, - incremental_snapshot_archive_interval_slots, + full_snapshot_archive_interval, + incremental_snapshot_archive_interval, full_snapshot_archives_dir: full_snapshot_archives_dir.path().to_path_buf(), incremental_snapshot_archives_dir: incremental_snapshot_archives_dir .path() @@ -189,8 +190,8 @@ fn run_bank_forks_snapshot_n( let snapshot_test_config = SnapshotTestConfig::new( snapshot_version, cluster_type, - set_root_interval, - DISABLED_SNAPSHOT_ARCHIVE_INTERVAL, + SnapshotInterval::Slots(NonZeroU64::new(set_root_interval).unwrap()), + SnapshotInterval::Disabled, ); let bank_forks = snapshot_test_config.bank_forks.clone(); @@ -310,8 +311,10 @@ fn test_slots_to_snapshot(snapshot_version: SnapshotVersion, cluster_type: Clust let snapshot_test_config = SnapshotTestConfig::new( snapshot_version, cluster_type, - (*add_root_interval * num_set_roots * 2) as Slot, - DISABLED_SNAPSHOT_ARCHIVE_INTERVAL, + SnapshotInterval::Slots( + NonZeroU64::new((*add_root_interval * num_set_roots * 2) as Slot).unwrap(), + ), + SnapshotInterval::Disabled, ); let bank_forks = snapshot_test_config.bank_forks.clone(); let bank_forks_r = bank_forks.read().unwrap(); @@ -445,8 +448,10 @@ fn test_bank_forks_incremental_snapshot( let snapshot_test_config = SnapshotTestConfig::new( snapshot_version, cluster_type, - FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, - INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, + SnapshotInterval::Slots(NonZeroU64::new(FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS).unwrap()), + SnapshotInterval::Slots( + NonZeroU64::new(INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS).unwrap(), + ), ); trace!( "SnapshotTestConfig:\naccounts_dir: {}\nbank_snapshots_dir: \ @@ -682,8 +687,10 @@ fn test_snapshots_with_background_services( let snapshot_test_config = SnapshotTestConfig::new( snapshot_version, cluster_type, - FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, - INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, + SnapshotInterval::Slots(NonZeroU64::new(FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS).unwrap()), + SnapshotInterval::Slots( + NonZeroU64::new(INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS).unwrap(), + ), ); let node_keypair = Arc::new(Keypair::new()); diff --git a/local-cluster/src/integration_tests.rs b/local-cluster/src/integration_tests.rs index a35c7d9121e130..04b11d31bf0b66 100644 --- a/local-cluster/src/integration_tests.rs +++ b/local-cluster/src/integration_tests.rs @@ -38,9 +38,7 @@ use { solana_native_token::LAMPORTS_PER_SOL, solana_pubkey::Pubkey, solana_rpc_client::rpc_client::RpcClient, - solana_runtime::{ - snapshot_bank_utils::DISABLED_SNAPSHOT_ARCHIVE_INTERVAL, snapshot_config::SnapshotConfig, - }, + solana_runtime::{snapshot_config::SnapshotConfig, snapshot_utils::SnapshotInterval}, solana_signer::Signer, solana_streamer::socket::SocketAddrSpace, solana_turbine::broadcast_stage::BroadcastStageType, @@ -48,7 +46,7 @@ use { std::{ collections::HashSet, fs, iter, - num::NonZeroUsize, + num::{NonZeroU64, NonZeroUsize}, path::{Path, PathBuf}, sync::{ atomic::{AtomicBool, Ordering}, @@ -536,15 +534,12 @@ pub struct SnapshotValidatorConfig { impl SnapshotValidatorConfig { pub fn new( - full_snapshot_archive_interval_slots: Slot, - incremental_snapshot_archive_interval_slots: Slot, + full_snapshot_archive_interval: SnapshotInterval, + incremental_snapshot_archive_interval: SnapshotInterval, num_account_paths: usize, ) -> SnapshotValidatorConfig { - // Interval values must be nonzero - assert!(full_snapshot_archive_interval_slots > 0); - assert!(incremental_snapshot_archive_interval_slots > 0); // Ensure that some snapshots will be created - assert!(full_snapshot_archive_interval_slots != DISABLED_SNAPSHOT_ARCHIVE_INTERVAL); + assert_ne!(full_snapshot_archive_interval, SnapshotInterval::Disabled); // Create the snapshot config let _ = fs::create_dir_all(farf_dir()); @@ -552,8 +547,8 @@ impl SnapshotValidatorConfig { let full_snapshot_archives_dir = tempfile::tempdir_in(farf_dir()).unwrap(); let incremental_snapshot_archives_dir = tempfile::tempdir_in(farf_dir()).unwrap(); let snapshot_config = SnapshotConfig { - full_snapshot_archive_interval_slots, - incremental_snapshot_archive_interval_slots, + full_snapshot_archive_interval, + incremental_snapshot_archive_interval, full_snapshot_archives_dir: full_snapshot_archives_dir.path().to_path_buf(), incremental_snapshot_archives_dir: incremental_snapshot_archives_dir .path() @@ -592,12 +587,12 @@ impl SnapshotValidatorConfig { } pub fn setup_snapshot_validator_config( - snapshot_interval_slots: Slot, + snapshot_interval_slots: NonZeroU64, num_account_paths: usize, ) -> SnapshotValidatorConfig { SnapshotValidatorConfig::new( - snapshot_interval_slots, - DISABLED_SNAPSHOT_ARCHIVE_INTERVAL, + SnapshotInterval::Slots(snapshot_interval_slots), + SnapshotInterval::Disabled, num_account_paths, ) } diff --git a/local-cluster/tests/local_cluster.rs b/local-cluster/tests/local_cluster.rs index 5c050914aa1217..3edeccb5d4e205 100644 --- a/local-cluster/tests/local_cluster.rs +++ b/local-cluster/tests/local_cluster.rs @@ -69,9 +69,12 @@ use { response::RpcSignatureResult, }, solana_runtime::{ - commitment::VOTE_THRESHOLD_SIZE, snapshot_archive_info::SnapshotArchiveInfoGetter, - snapshot_bank_utils, snapshot_config::SnapshotConfig, snapshot_package::SnapshotKind, - snapshot_utils, + commitment::VOTE_THRESHOLD_SIZE, + snapshot_archive_info::SnapshotArchiveInfoGetter, + snapshot_bank_utils, + snapshot_config::SnapshotConfig, + snapshot_package::SnapshotKind, + snapshot_utils::{self, SnapshotInterval}, }, solana_signer::Signer, solana_stake_interface::{self as stake, state::NEW_WARMUP_COOLDOWN_RATE}, @@ -90,6 +93,7 @@ use { fs, io::Read, iter, + num::NonZeroU64, path::Path, sync::{ atomic::{AtomicBool, AtomicUsize, Ordering}, @@ -479,7 +483,7 @@ fn test_mainnet_beta_cluster_type() { fn test_snapshot_download() { solana_logger::setup_with_default(RUST_LOG_FILTER); // First set up the cluster with 1 node - let snapshot_interval_slots = 50; + let snapshot_interval_slots = NonZeroU64::new(50).unwrap(); let num_account_paths = 3; let leader_snapshot_test_config = @@ -559,13 +563,13 @@ fn test_incremental_snapshot_download() { let num_account_paths = 3; let leader_snapshot_test_config = SnapshotValidatorConfig::new( - full_snapshot_interval, - incremental_snapshot_interval, + SnapshotInterval::Slots(NonZeroU64::new(full_snapshot_interval).unwrap()), + SnapshotInterval::Slots(NonZeroU64::new(incremental_snapshot_interval).unwrap()), num_account_paths, ); let validator_snapshot_test_config = SnapshotValidatorConfig::new( - full_snapshot_interval, - incremental_snapshot_interval, + SnapshotInterval::Slots(NonZeroU64::new(full_snapshot_interval).unwrap()), + SnapshotInterval::Slots(NonZeroU64::new(incremental_snapshot_interval).unwrap()), num_account_paths, ); @@ -730,13 +734,13 @@ fn test_incremental_snapshot_download_with_crossing_full_snapshot_interval_at_st let num_account_paths = 3; let leader_snapshot_test_config = SnapshotValidatorConfig::new( - full_snapshot_interval, - incremental_snapshot_interval, + SnapshotInterval::Slots(NonZeroU64::new(full_snapshot_interval).unwrap()), + SnapshotInterval::Slots(NonZeroU64::new(incremental_snapshot_interval).unwrap()), num_account_paths, ); let mut validator_snapshot_test_config = SnapshotValidatorConfig::new( - full_snapshot_interval, - incremental_snapshot_interval, + SnapshotInterval::Slots(NonZeroU64::new(full_snapshot_interval).unwrap()), + SnapshotInterval::Slots(NonZeroU64::new(incremental_snapshot_interval).unwrap()), num_account_paths, ); // The test has asserts that require the validator always boots from snapshot archives @@ -756,7 +760,7 @@ fn test_incremental_snapshot_download_with_crossing_full_snapshot_interval_at_st let mut cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified); info!( - "snapshot config:\n\tfull snapshot interval: {}\n\tincremental snapshot interval: {}", + "snapshot config:\n\tfull snapshot interval: {:?}\n\tincremental snapshot interval: {:?}", full_snapshot_interval, incremental_snapshot_interval, ); debug!( @@ -1174,8 +1178,8 @@ fn test_incremental_snapshot_download_with_crossing_full_snapshot_interval_at_st // And lastly, startup another node with the new snapshots to ensure they work let final_validator_snapshot_test_config = SnapshotValidatorConfig::new( - full_snapshot_interval, - incremental_snapshot_interval, + SnapshotInterval::Slots(NonZeroU64::new(full_snapshot_interval).unwrap()), + SnapshotInterval::Slots(NonZeroU64::new(incremental_snapshot_interval).unwrap()), num_account_paths, ); @@ -1215,7 +1219,7 @@ fn test_incremental_snapshot_download_with_crossing_full_snapshot_interval_at_st fn test_snapshot_restart_tower() { solana_logger::setup_with_default(RUST_LOG_FILTER); // First set up the cluster with 2 nodes - let snapshot_interval_slots = 10; + let snapshot_interval_slots = NonZeroU64::new(10).unwrap(); let num_account_paths = 2; let leader_snapshot_test_config = @@ -1288,7 +1292,7 @@ fn test_snapshot_restart_tower() { fn test_snapshots_blockstore_floor() { solana_logger::setup_with_default(RUST_LOG_FILTER); // First set up the cluster with 1 snapshotting leader - let snapshot_interval_slots = 100; + let snapshot_interval_slots = NonZeroU64::new(100).unwrap(); let num_account_paths = 4; let leader_snapshot_test_config = @@ -1400,7 +1404,7 @@ fn test_snapshots_blockstore_floor() { #[serial] fn test_snapshots_restart_validity() { solana_logger::setup_with_default(RUST_LOG_FILTER); - let snapshot_interval_slots = 100; + let snapshot_interval_slots = NonZeroU64::new(100).unwrap(); let num_account_paths = 1; let mut snapshot_test_config = setup_snapshot_validator_config(snapshot_interval_slots, num_account_paths); @@ -4986,8 +4990,10 @@ fn test_duplicate_with_pruned_ancestor() { #[serial] fn test_boot_from_local_state() { solana_logger::setup_with_default("error,local_cluster=info"); - const FULL_SNAPSHOT_INTERVAL: Slot = 100; - const INCREMENTAL_SNAPSHOT_INTERVAL: Slot = 10; + const FULL_SNAPSHOT_INTERVAL: SnapshotInterval = + SnapshotInterval::Slots(NonZeroU64::new(100).unwrap()); + const INCREMENTAL_SNAPSHOT_INTERVAL: SnapshotInterval = + SnapshotInterval::Slots(NonZeroU64::new(10).unwrap()); let validator1_config = SnapshotValidatorConfig::new(FULL_SNAPSHOT_INTERVAL, INCREMENTAL_SNAPSHOT_INTERVAL, 2); @@ -5266,8 +5272,10 @@ fn test_boot_from_local_state() { #[serial] fn test_boot_from_local_state_missing_archive() { solana_logger::setup_with_default(RUST_LOG_FILTER); - const FULL_SNAPSHOT_INTERVAL: Slot = 20; - const INCREMENTAL_SNAPSHOT_INTERVAL: Slot = 10; + const FULL_SNAPSHOT_INTERVAL: SnapshotInterval = + SnapshotInterval::Slots(NonZeroU64::new(20).unwrap()); + const INCREMENTAL_SNAPSHOT_INTERVAL: SnapshotInterval = + SnapshotInterval::Slots(NonZeroU64::new(10).unwrap()); let validator_config = SnapshotValidatorConfig::new(FULL_SNAPSHOT_INTERVAL, INCREMENTAL_SNAPSHOT_INTERVAL, 7); diff --git a/rpc/src/rpc_service.rs b/rpc/src/rpc_service.rs index 4a61a29bfad727..d429ecfffcd985 100644 --- a/rpc/src/rpc_service.rs +++ b/rpc/src/rpc_service.rs @@ -32,12 +32,14 @@ use { solana_poh::poh_recorder::PohRecorder, solana_quic_definitions::NotifyKeyUpdate, solana_runtime::{ - bank::Bank, bank_forks::BankForks, commitment::BlockCommitmentCache, + bank::Bank, + bank_forks::BankForks, + commitment::BlockCommitmentCache, non_circulating_supply::calculate_non_circulating_supply, prioritization_fee_cache::PrioritizationFeeCache, snapshot_archive_info::SnapshotArchiveInfoGetter, - snapshot_bank_utils::DISABLED_SNAPSHOT_ARCHIVE_INTERVAL, snapshot_config::SnapshotConfig, - snapshot_utils, + snapshot_config::SnapshotConfig, + snapshot_utils::{self, SnapshotInterval}, }, solana_send_transaction_service::{ send_transaction_service::{self, SendTransactionService}, @@ -280,14 +282,17 @@ impl RpcRequestMiddleware { } let snapshot_timeout = self.snapshot_config.as_ref().and_then(|config| { snapshot_type.map(|st| { - let slots = match st { - SnapshotKind::Full => config.full_snapshot_archive_interval_slots, - SnapshotKind::Incremental => config.incremental_snapshot_archive_interval_slots, + let interval = match st { + SnapshotKind::Full => config.full_snapshot_archive_interval, + SnapshotKind::Incremental => config.incremental_snapshot_archive_interval, }; - let computed = if slots == DISABLED_SNAPSHOT_ARCHIVE_INTERVAL { - Duration::ZERO - } else { - Duration::from_millis(slots.saturating_mul(solana_clock::DEFAULT_MS_PER_SLOT)) + let computed = match interval { + SnapshotInterval::Disabled => Duration::ZERO, + SnapshotInterval::Slots(slots) => Duration::from_millis( + slots + .get() + .saturating_mul(solana_clock::DEFAULT_MS_PER_SLOT), + ), }; let fallback = match st { SnapshotKind::Full => FALLBACK_FULL_SNAPSHOT_TIMEOUT_SECS, diff --git a/runtime/src/accounts_background_service.rs b/runtime/src/accounts_background_service.rs index 29cd48b79c823c..2e0b7ccdd348ca 100644 --- a/runtime/src/accounts_background_service.rs +++ b/runtime/src/accounts_background_service.rs @@ -856,7 +856,7 @@ mod test { super::*, crate::{ bank::epoch_accounts_hash_utils, genesis_utils::create_genesis_config, - snapshot_config::SnapshotConfig, + snapshot_config::SnapshotConfig, snapshot_utils::SnapshotInterval, }, crossbeam_channel::unbounded, solana_account::AccountSharedData, @@ -864,6 +864,7 @@ mod test { solana_epoch_schedule::EpochSchedule, solana_hash::Hash, solana_pubkey::Pubkey, + std::num::NonZeroU64, }; #[test] @@ -910,8 +911,12 @@ mod test { const INCREMENTAL_SNAPSHOT_INTERVAL: Slot = 30; let snapshot_config = SnapshotConfig { - full_snapshot_archive_interval_slots: FULL_SNAPSHOT_INTERVAL, - incremental_snapshot_archive_interval_slots: INCREMENTAL_SNAPSHOT_INTERVAL, + full_snapshot_archive_interval: SnapshotInterval::Slots( + NonZeroU64::new(FULL_SNAPSHOT_INTERVAL).unwrap(), + ), + incremental_snapshot_archive_interval: SnapshotInterval::Slots( + NonZeroU64::new(INCREMENTAL_SNAPSHOT_INTERVAL).unwrap(), + ), ..SnapshotConfig::default() }; diff --git a/runtime/src/snapshot_bank_utils.rs b/runtime/src/snapshot_bank_utils.rs index d13b563cb6aba7..f3d45c45f4edde 100644 --- a/runtime/src/snapshot_bank_utils.rs +++ b/runtime/src/snapshot_bank_utils.rs @@ -53,10 +53,6 @@ use { }, }; -pub const DEFAULT_FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS: Slot = 50_000; -pub const DEFAULT_INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS: Slot = 100; -pub const DISABLED_SNAPSHOT_ARCHIVE_INTERVAL: Slot = Slot::MAX; - pub fn serialize_status_cache( slot_deltas: &[BankSlotDelta], status_cache_path: &Path, diff --git a/runtime/src/snapshot_config.rs b/runtime/src/snapshot_config.rs index 04a8c18e064848..f4eac45391644c 100644 --- a/runtime/src/snapshot_config.rs +++ b/runtime/src/snapshot_config.rs @@ -1,9 +1,5 @@ use { - crate::{ - snapshot_bank_utils, - snapshot_utils::{self, ArchiveFormat, SnapshotVersion, ZstdConfig}, - }, - solana_clock::Slot, + crate::snapshot_utils::{self, ArchiveFormat, SnapshotInterval, SnapshotVersion, ZstdConfig}, std::{num::NonZeroUsize, path::PathBuf}, }; @@ -14,10 +10,10 @@ pub struct SnapshotConfig { pub usage: SnapshotUsage, /// Generate a new full snapshot archive every this many slots - pub full_snapshot_archive_interval_slots: Slot, + pub full_snapshot_archive_interval: SnapshotInterval, /// Generate a new incremental snapshot archive every this many slots - pub incremental_snapshot_archive_interval_slots: Slot, + pub incremental_snapshot_archive_interval: SnapshotInterval, /// Path to the directory where full snapshot archives are stored pub full_snapshot_archives_dir: PathBuf, @@ -49,10 +45,12 @@ impl Default for SnapshotConfig { fn default() -> Self { Self { usage: SnapshotUsage::LoadAndGenerate, - full_snapshot_archive_interval_slots: - snapshot_bank_utils::DEFAULT_FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, - incremental_snapshot_archive_interval_slots: - snapshot_bank_utils::DEFAULT_INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, + full_snapshot_archive_interval: SnapshotInterval::Slots( + snapshot_utils::DEFAULT_FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, + ), + incremental_snapshot_archive_interval: SnapshotInterval::Slots( + snapshot_utils::DEFAULT_INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, + ), full_snapshot_archives_dir: PathBuf::default(), incremental_snapshot_archives_dir: PathBuf::default(), bank_snapshots_dir: PathBuf::default(), @@ -74,6 +72,8 @@ impl SnapshotConfig { pub fn new_load_only() -> Self { Self { usage: SnapshotUsage::LoadOnly, + full_snapshot_archive_interval: SnapshotInterval::Disabled, + incremental_snapshot_archive_interval: SnapshotInterval::Disabled, ..Self::default() } } @@ -83,6 +83,8 @@ impl SnapshotConfig { pub fn new_disabled() -> Self { Self { usage: SnapshotUsage::Disabled, + full_snapshot_archive_interval: SnapshotInterval::Disabled, + incremental_snapshot_archive_interval: SnapshotInterval::Disabled, ..Self::default() } } diff --git a/runtime/src/snapshot_controller.rs b/runtime/src/snapshot_controller.rs index bf388fa13e7e40..5e98995523d7d5 100644 --- a/runtime/src/snapshot_controller.rs +++ b/runtime/src/snapshot_controller.rs @@ -6,6 +6,7 @@ use { bank::{epoch_accounts_hash_utils, Bank, SquashTiming}, bank_forks::SetRootError, snapshot_config::SnapshotConfig, + snapshot_utils::SnapshotInterval, }, log::*, solana_clock::Slot, @@ -20,8 +21,8 @@ use { }; struct SnapshotGenerationIntervals { - full_snapshot_interval: Slot, - incremental_snapshot_interval: Slot, + full_snapshot_interval: SnapshotInterval, + incremental_snapshot_interval: SnapshotInterval, } pub struct SnapshotController { @@ -80,11 +81,26 @@ impl SnapshotController { }) = self.snapshot_generation_intervals() { if let Some((bank, request_kind)) = banks.iter().find_map(|bank| { + let should_request_full_snapshot = + if let SnapshotInterval::Slots(snapshot_interval) = full_snapshot_interval { + bank.block_height() % snapshot_interval == 0 + } else { + false + }; + let should_request_incremental_snapshot = + if let SnapshotInterval::Slots(snapshot_interval) = + incremental_snapshot_interval + { + bank.block_height() % snapshot_interval == 0 + } else { + false + }; + if bank.slot() <= self.latest_abs_request_slot() { None - } else if bank.block_height() % full_snapshot_interval == 0 { + } else if should_request_full_snapshot { Some((bank, SnapshotRequestKind::FullSnapshot)) - } else if bank.block_height() % incremental_snapshot_interval == 0 { + } else if should_request_incremental_snapshot { Some((bank, SnapshotRequestKind::IncrementalSnapshot)) } else { None @@ -132,10 +148,10 @@ impl SnapshotController { self.snapshot_config .should_generate_snapshots() .then_some(SnapshotGenerationIntervals { - full_snapshot_interval: self.snapshot_config.full_snapshot_archive_interval_slots, + full_snapshot_interval: self.snapshot_config.full_snapshot_archive_interval, incremental_snapshot_interval: self .snapshot_config - .incremental_snapshot_archive_interval_slots, + .incremental_snapshot_archive_interval, }) } diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index 68a1a71fb78359..f72edc195bc1d6 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -39,7 +39,7 @@ use { fmt, fs, io::{BufReader, BufWriter, Error as IoError, Read, Result as IoResult, Seek, Write}, mem, - num::NonZeroUsize, + num::{NonZeroU64, NonZeroUsize}, ops::RangeInclusive, path::{Path, PathBuf}, process::ExitStatus, @@ -58,8 +58,9 @@ use { }; mod archive_format; +mod snapshot_interval; pub mod snapshot_storage_rebuilder; -pub use archive_format::*; +pub use {archive_format::*, snapshot_interval::SnapshotInterval}; pub const SNAPSHOT_STATUS_CACHE_FILENAME: &str = "status_cache"; pub const SNAPSHOT_VERSION_FILENAME: &str = "version"; @@ -77,6 +78,10 @@ pub const BANK_SNAPSHOT_PRE_FILENAME_EXTENSION: &str = "pre"; // - Safe because the values are fixed, known non-zero constants // - Necessary in order to have a plain NonZeroUsize as the constant, NonZeroUsize // returns an Option and we can't .unwrap() at compile time +pub const DEFAULT_FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS: NonZeroU64 = + NonZeroU64::new(50_000).unwrap(); +pub const DEFAULT_INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS: NonZeroU64 = + NonZeroU64::new(100).unwrap(); pub const DEFAULT_MAX_FULL_SNAPSHOT_ARCHIVES_TO_RETAIN: NonZeroUsize = NonZeroUsize::new(2).unwrap(); pub const DEFAULT_MAX_INCREMENTAL_SNAPSHOT_ARCHIVES_TO_RETAIN: NonZeroUsize = diff --git a/runtime/src/snapshot_utils/snapshot_interval.rs b/runtime/src/snapshot_utils/snapshot_interval.rs new file mode 100644 index 00000000000000..72f4f632f5b30c --- /dev/null +++ b/runtime/src/snapshot_utils/snapshot_interval.rs @@ -0,0 +1,10 @@ +use std::num::NonZeroU64; + +/// The interval in between taking snapshots +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +pub enum SnapshotInterval { + /// Snapshots are disabled + Disabled, + /// Snapshots are taken every this many slots + Slots(NonZeroU64), +} diff --git a/test-validator/src/lib.rs b/test-validator/src/lib.rs index 9cd6fb499eaa58..28840597f074a7 100644 --- a/test-validator/src/lib.rs +++ b/test-validator/src/lib.rs @@ -49,6 +49,7 @@ use { genesis_utils::{self, create_genesis_config_with_leader_ex_no_features}, runtime_config::RuntimeConfig, snapshot_config::SnapshotConfig, + snapshot_utils::SnapshotInterval, }, solana_sdk_ids::address_lookup_table, solana_signer::Signer, @@ -63,6 +64,7 @@ use { fs::{self, remove_dir_all, File}, io::Read, net::{IpAddr, Ipv4Addr, SocketAddr}, + num::NonZeroU64, path::{Path, PathBuf}, str::FromStr, sync::{Arc, RwLock}, @@ -1086,8 +1088,10 @@ impl TestValidator { ], run_verification: false, // Skip PoH verification of ledger on startup for speed snapshot_config: SnapshotConfig { - full_snapshot_archive_interval_slots: 100, - incremental_snapshot_archive_interval_slots: Slot::MAX, + full_snapshot_archive_interval: SnapshotInterval::Slots( + NonZeroU64::new(100).unwrap(), + ), + incremental_snapshot_archive_interval: SnapshotInterval::Disabled, bank_snapshots_dir: ledger_path.join("snapshot"), full_snapshot_archives_dir: ledger_path.to_path_buf(), incremental_snapshot_archives_dir: ledger_path.to_path_buf(), diff --git a/validator/src/cli.rs b/validator/src/cli.rs index bc15ec493af6a1..7d261c26d43f1d 100644 --- a/validator/src/cli.rs +++ b/validator/src/cli.rs @@ -24,16 +24,11 @@ use { solana_rayon_threadlimit::get_thread_count, solana_rpc::{rpc::MAX_REQUEST_BODY_SIZE, rpc_pubsub_service::PubSubConfig}, solana_rpc_client_api::request::{DELINQUENT_VALIDATOR_SLOT_DISTANCE, MAX_MULTIPLE_ACCOUNTS}, - solana_runtime::{ - snapshot_bank_utils::{ - DEFAULT_FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, - DEFAULT_INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, - }, - snapshot_utils::{ - SnapshotVersion, DEFAULT_ARCHIVE_COMPRESSION, - DEFAULT_MAX_FULL_SNAPSHOT_ARCHIVES_TO_RETAIN, - DEFAULT_MAX_INCREMENTAL_SNAPSHOT_ARCHIVES_TO_RETAIN, - }, + solana_runtime::snapshot_utils::{ + SnapshotVersion, DEFAULT_ARCHIVE_COMPRESSION, DEFAULT_FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, + DEFAULT_INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, + DEFAULT_MAX_FULL_SNAPSHOT_ARCHIVES_TO_RETAIN, + DEFAULT_MAX_INCREMENTAL_SNAPSHOT_ARCHIVES_TO_RETAIN, }, solana_send_transaction_service::send_transaction_service::{self}, solana_streamer::quic::{ @@ -307,9 +302,12 @@ impl DefaultArgs { DEFAULT_MAX_INCREMENTAL_SNAPSHOT_ARCHIVES_TO_RETAIN.to_string(), snapshot_packager_niceness_adjustment: "0".to_string(), full_snapshot_archive_interval_slots: DEFAULT_FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS + .get() .to_string(), incremental_snapshot_archive_interval_slots: - DEFAULT_INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS.to_string(), + DEFAULT_INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS + .get() + .to_string(), min_snapshot_download_speed: DEFAULT_MIN_SNAPSHOT_DOWNLOAD_SPEED.to_string(), max_snapshot_download_abort: MAX_SNAPSHOT_DOWNLOAD_ABORT.to_string(), snapshot_archive_format: DEFAULT_ARCHIVE_COMPRESSION.to_string(), diff --git a/validator/src/commands/run/execute.rs b/validator/src/commands/run/execute.rs index 22fea95c5a96f2..c9c4cd224fe957 100644 --- a/validator/src/commands/run/execute.rs +++ b/validator/src/commands/run/execute.rs @@ -60,9 +60,8 @@ use { }, solana_runtime::{ runtime_config::RuntimeConfig, - snapshot_bank_utils::DISABLED_SNAPSHOT_ARCHIVE_INTERVAL, snapshot_config::{SnapshotConfig, SnapshotUsage}, - snapshot_utils::{self, ArchiveFormat, SnapshotVersion}, + snapshot_utils::{self, ArchiveFormat, SnapshotInterval, SnapshotVersion}, }, solana_send_transaction_service::send_transaction_service, solana_signer::Signer, @@ -76,7 +75,7 @@ use { collections::HashSet, fs::{self, File}, net::{IpAddr, Ipv4Addr, SocketAddr}, - num::NonZeroUsize, + num::{NonZeroU64, NonZeroUsize}, path::{Path, PathBuf}, process::exit, str::FromStr, @@ -906,35 +905,23 @@ pub fn execute( .transpose()? .unwrap_or(SnapshotVersion::default()); - let (full_snapshot_archive_interval_slots, incremental_snapshot_archive_interval_slots) = + let (full_snapshot_archive_interval, incremental_snapshot_archive_interval) = if matches.is_present("no_snapshots") { // snapshots are disabled - ( - DISABLED_SNAPSHOT_ARCHIVE_INTERVAL, - DISABLED_SNAPSHOT_ARCHIVE_INTERVAL, - ) + (SnapshotInterval::Disabled, SnapshotInterval::Disabled) } else { match ( !matches.is_present("no_incremental_snapshots"), - value_t_or_exit!(matches, "snapshot_interval_slots", u64), + value_t_or_exit!(matches, "snapshot_interval_slots", NonZeroU64), ) { - (_, 0) => { - // snapshots are disabled - warn!( - "Snapshot generation was disabled with `--snapshot-interval-slots 0`, \ - which is now deprecated. Use `--no-snapshots` instead.", - ); - ( - DISABLED_SNAPSHOT_ARCHIVE_INTERVAL, - DISABLED_SNAPSHOT_ARCHIVE_INTERVAL, - ) - } (true, incremental_snapshot_interval_slots) => { // incremental snapshots are enabled // use --snapshot-interval-slots for the incremental snapshot interval + let full_snapshot_interval_slots = + value_t_or_exit!(matches, "full_snapshot_interval_slots", NonZeroU64); ( - value_t_or_exit!(matches, "full_snapshot_interval_slots", u64), - incremental_snapshot_interval_slots, + SnapshotInterval::Slots(full_snapshot_interval_slots), + SnapshotInterval::Slots(incremental_snapshot_interval_slots), ) } (false, full_snapshot_interval_slots) => { @@ -943,27 +930,29 @@ pub fn execute( // also warn if --full-snapshot-interval-slots was specified if matches.occurrences_of("full_snapshot_interval_slots") > 0 { warn!( - "Incremental snapshots are disabled, yet --full-snapshot-interval-slots was specified! \ - Note that --full-snapshot-interval-slots is *ignored* when incremental snapshots are disabled. \ + "Incremental snapshots are disabled, yet \ + --full-snapshot-interval-slots was specified! \ + Note that --full-snapshot-interval-slots is *ignored* \ + when incremental snapshots are disabled. \ Use --snapshot-interval-slots instead.", ); } ( - full_snapshot_interval_slots, - DISABLED_SNAPSHOT_ARCHIVE_INTERVAL, + SnapshotInterval::Slots(full_snapshot_interval_slots), + SnapshotInterval::Disabled, ) } } }; validator_config.snapshot_config = SnapshotConfig { - usage: if full_snapshot_archive_interval_slots == DISABLED_SNAPSHOT_ARCHIVE_INTERVAL { + usage: if full_snapshot_archive_interval == SnapshotInterval::Disabled { SnapshotUsage::LoadOnly } else { SnapshotUsage::LoadAndGenerate }, - full_snapshot_archive_interval_slots, - incremental_snapshot_archive_interval_slots, + full_snapshot_archive_interval, + incremental_snapshot_archive_interval, bank_snapshots_dir, full_snapshot_archives_dir: full_snapshot_archives_dir.clone(), incremental_snapshot_archives_dir: incremental_snapshot_archives_dir.clone(), @@ -976,28 +965,27 @@ pub fn execute( info!( "Snapshot configuration: full snapshot interval: {}, incremental snapshot interval: {}", - if full_snapshot_archive_interval_slots == DISABLED_SNAPSHOT_ARCHIVE_INTERVAL { - "disabled".to_string() - } else { - format!("{full_snapshot_archive_interval_slots} slots") + match full_snapshot_archive_interval { + SnapshotInterval::Disabled => "disabled".to_string(), + SnapshotInterval::Slots(interval) => format!("{interval} slots"), }, - if incremental_snapshot_archive_interval_slots == DISABLED_SNAPSHOT_ARCHIVE_INTERVAL { - "disabled".to_string() - } else { - format!("{incremental_snapshot_archive_interval_slots} slots") + match incremental_snapshot_archive_interval { + SnapshotInterval::Disabled => "disabled".to_string(), + SnapshotInterval::Slots(interval) => format!("{interval} slots"), }, ); // It is unlikely that a full snapshot interval greater than an epoch is a good idea. // Minimally we should warn the user in case this was a mistake. - if full_snapshot_archive_interval_slots > DEFAULT_SLOTS_PER_EPOCH - && full_snapshot_archive_interval_slots != DISABLED_SNAPSHOT_ARCHIVE_INTERVAL - { - warn!( - "The full snapshot interval is excessively large: {}! This will negatively \ - impact the background cleanup tasks in accounts-db. Consider a smaller value.", - full_snapshot_archive_interval_slots, - ); + if let SnapshotInterval::Slots(full_snapshot_interval_slots) = full_snapshot_archive_interval { + let full_snapshot_interval_slots = full_snapshot_interval_slots.get(); + if full_snapshot_interval_slots > DEFAULT_SLOTS_PER_EPOCH { + warn!( + "The full snapshot interval is excessively large: {}! This will negatively \ + impact the background cleanup tasks in accounts-db. Consider a smaller value.", + full_snapshot_interval_slots, + ); + } } if !is_snapshot_config_valid(&validator_config.snapshot_config) { From b9ce16dbab6a39b4cd166ddd86fd068bd827c8fa Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 18 Jun 2025 19:33:11 +0800 Subject: [PATCH 029/124] build(deps): bump libc from 0.2.173 to 0.2.174 (#6636) * build(deps): bump libc from 0.2.173 to 0.2.174 Bumps [libc](https://github.com/rust-lang/libc) from 0.2.173 to 0.2.174. - [Release notes](https://github.com/rust-lang/libc/releases) - [Changelog](https://github.com/rust-lang/libc/blob/0.2.174/CHANGELOG.md) - [Commits](https://github.com/rust-lang/libc/compare/0.2.173...0.2.174) --- updated-dependencies: - dependency-name: libc dependency-version: 0.2.174 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- svm/examples/Cargo.lock | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9663dbab7b59e2..4e307021ba17d2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4099,9 +4099,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.173" +version = "0.2.174" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8cfeafaffdbc32176b64fb251369d52ea9f0a8fbc6f8759edffef7b525d64bb" +checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776" [[package]] name = "libloading" diff --git a/Cargo.toml b/Cargo.toml index 8e2d7d4b920f52..0efbc92260304a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -293,7 +293,7 @@ jsonrpc-http-server = "18.0.0" jsonrpc-ipc-server = "18.0.0" jsonrpc-pubsub = "18.0.0" lazy-lru = "0.1.3" -libc = "0.2.173" +libc = "0.2.174" libloading = "0.7.4" libsecp256k1 = { version = "0.6.0", default-features = false, features = [ "std", diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index d7c2330efde54b..19e95f240787c2 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -3178,9 +3178,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.173" +version = "0.2.174" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8cfeafaffdbc32176b64fb251369d52ea9f0a8fbc6f8759edffef7b525d64bb" +checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776" [[package]] name = "libloading" diff --git a/svm/examples/Cargo.lock b/svm/examples/Cargo.lock index ed799bb2c49ed6..9f4c87f33cd949 100644 --- a/svm/examples/Cargo.lock +++ b/svm/examples/Cargo.lock @@ -3105,9 +3105,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.173" +version = "0.2.174" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8cfeafaffdbc32176b64fb251369d52ea9f0a8fbc6f8759edffef7b525d64bb" +checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776" [[package]] name = "libloading" From d52b7f2dd8fe524c610f39afe3f61904f48c6cba Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 18 Jun 2025 19:33:21 +0800 Subject: [PATCH 030/124] build(deps): bump rustls from 0.23.27 to 0.23.28 (#6635) * build(deps): bump rustls from 0.23.27 to 0.23.28 Bumps [rustls](https://github.com/rustls/rustls) from 0.23.27 to 0.23.28. - [Release notes](https://github.com/rustls/rustls/releases) - [Changelog](https://github.com/rustls/rustls/blob/main/CHANGELOG.md) - [Commits](https://github.com/rustls/rustls/compare/v/0.23.27...v/0.23.28) --- updated-dependencies: - dependency-name: rustls dependency-version: 0.23.28 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 30 +++++++++++++++--------------- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 28 ++++++++++++++-------------- svm/examples/Cargo.lock | 28 ++++++++++++++-------------- 4 files changed, 44 insertions(+), 44 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4e307021ba17d2..bb30193eeac4e6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3488,7 +3488,7 @@ dependencies = [ "http 1.1.0", "hyper 1.6.0", "hyper-util", - "rustls 0.23.27", + "rustls 0.23.28", "rustls-pki-types", "tokio", "tokio-rustls 0.26.2", @@ -5358,7 +5358,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash 2.0.0", - "rustls 0.23.27", + "rustls 0.23.28", "socket2", "thiserror 2.0.12", "tokio", @@ -5379,7 +5379,7 @@ dependencies = [ "rand 0.9.0", "ring", "rustc-hash 2.0.0", - "rustls 0.23.27", + "rustls 0.23.28", "rustls-pki-types", "rustls-platform-verifier", "slab", @@ -5768,7 +5768,7 @@ dependencies = [ "percent-encoding 2.3.1", "pin-project-lite", "quinn", - "rustls 0.23.27", + "rustls 0.23.28", "rustls-pki-types", "serde", "serde_json", @@ -5941,9 +5941,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.27" +version = "0.23.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "730944ca083c1c233a75c09f199e973ca499344a2b7ba9e755c457e86fb4a321" +checksum = "7160e3e10bf4535308537f3c4e1641468cd0e485175d6163087c0393c7d46643" dependencies = [ "once_cell", "ring", @@ -5994,7 +5994,7 @@ dependencies = [ "jni", "log", "once_cell", - "rustls 0.23.27", + "rustls 0.23.28", "rustls-native-certs", "rustls-platform-verifier-android", "rustls-webpki 0.103.2", @@ -7814,7 +7814,7 @@ dependencies = [ "rand_chacha 0.3.1", "rayon", "rolling-file", - "rustls 0.23.27", + "rustls 0.23.28", "serde", "serde_bytes", "serde_derive", @@ -9660,7 +9660,7 @@ dependencies = [ "log", "quinn", "quinn-proto", - "rustls 0.23.27", + "rustls 0.23.28", "solana-connection-cache", "solana-keypair", "solana-logger", @@ -10697,7 +10697,7 @@ dependencies = [ "quinn", "quinn-proto", "rand 0.8.5", - "rustls 0.23.27", + "rustls 0.23.28", "smallvec", "socket2", "solana-keypair", @@ -11066,7 +11066,7 @@ dependencies = [ name = "solana-tls-utils" version = "3.0.0" dependencies = [ - "rustls 0.23.27", + "rustls 0.23.28", "solana-keypair", "solana-pubkey", "solana-signer", @@ -11193,7 +11193,7 @@ dependencies = [ "log", "lru", "quinn", - "rustls 0.23.27", + "rustls 0.23.28", "solana-cli-config", "solana-clock", "solana-commitment-config", @@ -11417,7 +11417,7 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "rayon", - "rustls 0.23.27", + "rustls 0.23.28", "solana-clock", "solana-cluster-type", "solana-entry", @@ -11584,7 +11584,7 @@ dependencies = [ "quinn", "quinn-proto", "rand 0.8.5", - "rustls 0.23.27", + "rustls 0.23.28", "signal-hook", "smallvec", "socket2", @@ -12885,7 +12885,7 @@ version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e727b36a1a0e8b74c376ac2211e40c2c8af09fb4013c60d910495810f008e9b" dependencies = [ - "rustls 0.23.27", + "rustls 0.23.28", "tokio", ] diff --git a/Cargo.toml b/Cargo.toml index 0efbc92260304a..574dfcbf538704 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -346,7 +346,7 @@ reqwest = { version = "0.12.20", default-features = false } reqwest-middleware = "0.4.2" rolling-file = "0.2.0" rpassword = "7.4" -rustls = { version = "0.23.27", features = ["std"], default-features = false } +rustls = { version = "0.23.28", features = ["std"], default-features = false } scopeguard = "1.2.0" semver = "1.0.26" seqlock = "0.2.0" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 19e95f240787c2..f7137f38d704f9 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -2584,7 +2584,7 @@ dependencies = [ "http 1.2.0", "hyper 1.6.0", "hyper-util", - "rustls 0.23.27", + "rustls 0.23.28", "rustls-pki-types", "tokio", "tokio-rustls 0.26.2", @@ -4348,7 +4348,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash 2.0.0", - "rustls 0.23.27", + "rustls 0.23.28", "socket2", "thiserror 2.0.12", "tokio", @@ -4369,7 +4369,7 @@ dependencies = [ "rand 0.9.0", "ring", "rustc-hash 2.0.0", - "rustls 0.23.27", + "rustls 0.23.28", "rustls-pki-types", "rustls-platform-verifier", "slab", @@ -4687,7 +4687,7 @@ dependencies = [ "percent-encoding 2.3.1", "pin-project-lite", "quinn", - "rustls 0.23.27", + "rustls 0.23.28", "rustls-pki-types", "serde", "serde_json", @@ -4851,9 +4851,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.27" +version = "0.23.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "730944ca083c1c233a75c09f199e973ca499344a2b7ba9e755c457e86fb4a321" +checksum = "7160e3e10bf4535308537f3c4e1641468cd0e485175d6163087c0393c7d46643" dependencies = [ "once_cell", "ring", @@ -4904,7 +4904,7 @@ dependencies = [ "jni", "log", "once_cell", - "rustls 0.23.27", + "rustls 0.23.28", "rustls-native-certs", "rustls-platform-verifier-android", "rustls-webpki 0.103.2", @@ -6065,7 +6065,7 @@ dependencies = [ "rand_chacha 0.3.1", "rayon", "rolling-file", - "rustls 0.23.27", + "rustls 0.23.28", "serde", "serde_bytes", "serde_derive", @@ -7476,7 +7476,7 @@ dependencies = [ "log", "quinn", "quinn-proto", - "rustls 0.23.27", + "rustls 0.23.28", "solana-connection-cache", "solana-keypair", "solana-measure", @@ -9050,7 +9050,7 @@ dependencies = [ "quinn", "quinn-proto", "rand 0.8.5", - "rustls 0.23.27", + "rustls 0.23.28", "smallvec", "socket2", "solana-keypair", @@ -9357,7 +9357,7 @@ dependencies = [ name = "solana-tls-utils" version = "3.0.0" dependencies = [ - "rustls 0.23.27", + "rustls 0.23.28", "solana-keypair", "solana-pubkey", "solana-signer", @@ -9404,7 +9404,7 @@ dependencies = [ "log", "lru", "quinn", - "rustls 0.23.27", + "rustls 0.23.28", "solana-clock", "solana-connection-cache", "solana-keypair", @@ -9570,7 +9570,7 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "rayon", - "rustls 0.23.27", + "rustls 0.23.28", "solana-clock", "solana-cluster-type", "solana-entry", @@ -10788,7 +10788,7 @@ version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e727b36a1a0e8b74c376ac2211e40c2c8af09fb4013c60d910495810f008e9b" dependencies = [ - "rustls 0.23.27", + "rustls 0.23.28", "tokio", ] diff --git a/svm/examples/Cargo.lock b/svm/examples/Cargo.lock index 9f4c87f33cd949..3c06dfbd3ba248 100644 --- a/svm/examples/Cargo.lock +++ b/svm/examples/Cargo.lock @@ -2448,7 +2448,7 @@ dependencies = [ "http 1.2.0", "hyper 1.6.0", "hyper-util", - "rustls 0.23.27", + "rustls 0.23.28", "rustls-pki-types", "tokio", "tokio-rustls 0.26.2", @@ -4206,7 +4206,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash 2.1.0", - "rustls 0.23.27", + "rustls 0.23.28", "socket2", "thiserror 2.0.12", "tokio", @@ -4227,7 +4227,7 @@ dependencies = [ "rand 0.9.0", "ring", "rustc-hash 2.1.0", - "rustls 0.23.27", + "rustls 0.23.28", "rustls-pki-types", "rustls-platform-verifier", "slab", @@ -4535,7 +4535,7 @@ dependencies = [ "percent-encoding 2.3.1", "pin-project-lite", "quinn", - "rustls 0.23.27", + "rustls 0.23.28", "rustls-pki-types", "serde", "serde_json", @@ -4699,9 +4699,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.27" +version = "0.23.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "730944ca083c1c233a75c09f199e973ca499344a2b7ba9e755c457e86fb4a321" +checksum = "7160e3e10bf4535308537f3c4e1641468cd0e485175d6163087c0393c7d46643" dependencies = [ "once_cell", "ring", @@ -4752,7 +4752,7 @@ dependencies = [ "jni", "log", "once_cell", - "rustls 0.23.27", + "rustls 0.23.28", "rustls-native-certs", "rustls-platform-verifier-android", "rustls-webpki 0.103.2", @@ -5912,7 +5912,7 @@ dependencies = [ "rand_chacha 0.3.1", "rayon", "rolling-file", - "rustls 0.23.27", + "rustls 0.23.28", "serde", "serde_bytes", "serde_derive", @@ -7288,7 +7288,7 @@ dependencies = [ "log", "quinn", "quinn-proto", - "rustls 0.23.27", + "rustls 0.23.28", "solana-connection-cache", "solana-keypair", "solana-measure", @@ -8115,7 +8115,7 @@ dependencies = [ "quinn", "quinn-proto", "rand 0.8.5", - "rustls 0.23.27", + "rustls 0.23.28", "smallvec", "socket2", "solana-keypair", @@ -8457,7 +8457,7 @@ dependencies = [ name = "solana-tls-utils" version = "3.0.0" dependencies = [ - "rustls 0.23.27", + "rustls 0.23.28", "solana-keypair", "solana-pubkey", "solana-signer", @@ -8504,7 +8504,7 @@ dependencies = [ "log", "lru", "quinn", - "rustls 0.23.27", + "rustls 0.23.28", "solana-clock", "solana-connection-cache", "solana-keypair", @@ -8670,7 +8670,7 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "rayon", - "rustls 0.23.27", + "rustls 0.23.28", "solana-clock", "solana-cluster-type", "solana-entry", @@ -9885,7 +9885,7 @@ version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e727b36a1a0e8b74c376ac2211e40c2c8af09fb4013c60d910495810f008e9b" dependencies = [ - "rustls 0.23.27", + "rustls 0.23.28", "tokio", ] From d25252fa23c806e1de22025c66d4fcb4d3004679 Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Wed, 18 Jun 2025 20:34:02 +0900 Subject: [PATCH 031/124] Apply minor code reformatting to cleaner_main_loop (#6470) --- unified-scheduler-pool/src/lib.rs | 43 ++++++++++++++++--------------- 1 file changed, 22 insertions(+), 21 deletions(-) diff --git a/unified-scheduler-pool/src/lib.rs b/unified-scheduler-pool/src/lib.rs index 85071cf3aded59..408a1e11bbef0e 100644 --- a/unified-scheduler-pool/src/lib.rs +++ b/unified-scheduler-pool/src/lib.rs @@ -431,16 +431,33 @@ where let banking_stage_status = scheduler_pool.banking_stage_status(); + if matches!(banking_stage_status, Some(BankingStageStatus::Inactive)) { + let Ok(inner) = scheduler_pool.block_production_scheduler_inner.lock() else { + break; + }; + + if let Some(pooled) = inner.peek_pooled() { + { + pooled.discard_buffer(); + // Prevent replay stage's OpenSubchannel from winning the race by + // holding the inner lock for the duration of discard message sending + // just above. The message (internally SubchanneledPayload::Reset) + // must be sent only during gaps of subchannels of the new task + // channel. + sleepless_testing::at(CheckPoint::DiscardRequested); + drop(inner); + } + } + } + let trashed_inner_count = { - let Ok(mut trashed_scheduler_inners) = - scheduler_pool.trashed_scheduler_inners.lock() + let Ok(mut trashed_inners) = scheduler_pool.trashed_scheduler_inners.lock() else { break; }; - let trashed_inners: Vec<_> = mem::take(&mut *trashed_scheduler_inners); - drop(trashed_scheduler_inners); - let trashed_inner_count = trashed_inners.len(); + let trashed_inners: Vec<_> = mem::take(&mut *trashed_inners); + // drop all the trashded schedulers outside the lock guard drop(trashed_inners); trashed_inner_count }; @@ -469,22 +486,6 @@ where count }; - if matches!(banking_stage_status, Some(BankingStageStatus::Inactive)) { - let inner = scheduler_pool - .block_production_scheduler_inner - .lock() - .unwrap(); - if let Some(pooled) = inner.peek_pooled() { - pooled.discard_buffer(); - // Prevent replay stage's OpenSubchannel from winning the race by holding - // the inner lock for the duration of discard message sending just above. - // The message (internally SubchanneledPayload::Reset) must be sent only - // during gaps of subchannels of the new task channel. - sleepless_testing::at(CheckPoint::DiscardRequested); - drop(inner); - } - } - info!( "Scheduler pool cleaner: dropped {} idle inners, {} trashed inners, triggered {} timeout listeners", idle_inner_count, trashed_inner_count, triggered_timeout_listener_count, From cf5b94d61778610558b99a19d42b7c604343a152 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Wed, 18 Jun 2025 19:54:13 +0800 Subject: [PATCH 032/124] agave-validator: add args tests for run (part 1) (#5792) * extract identity to args * extract logfile to args * fmt * remove solana-sdk * identity -> identity_keypair * get_run_command_matches -> create_app_and_get_matches * combine identity tests * combine log tests * wrap verify_args_struct_by_command * remove unnecessary key clone * improve test_run_command_with_identity_setup * fmt --- validator/src/commands/run/args.rs | 172 +++++++++++++++++++++++++- validator/src/commands/run/execute.rs | 31 ++--- 2 files changed, 180 insertions(+), 23 deletions(-) diff --git a/validator/src/commands/run/args.rs b/validator/src/commands/run/args.rs index da86b24d0ddd4d..ff00da32eb8fe5 100644 --- a/validator/src/commands/run/args.rs +++ b/validator/src/commands/run/args.rs @@ -1,8 +1,12 @@ use { - crate::cli::{hash_validator, port_range_validator, port_validator, DefaultArgs}, - clap::{App, Arg}, + crate::{ + cli::{hash_validator, port_range_validator, port_validator, DefaultArgs}, + commands::{FromClapArgMatches, Result}, + }, + clap::{App, Arg, ArgMatches}, solana_clap_utils::{ hidden_unless_forced, + input_parsers::keypair_of, input_validators::{ is_keypair_or_ask_keyword, is_non_zero, is_parsable, is_pow2, is_pubkey, is_pubkey_or_keypair, is_slot, is_within_range, validate_cpu_ranges, @@ -15,11 +19,13 @@ use { banking_trace::DirByteLimit, validator::{BlockProductionMethod, BlockVerificationMethod, TransactionStructure}, }, + solana_keypair::Keypair, solana_ledger::use_snapshot_archives_at_startup, solana_runtime::snapshot_utils::{SnapshotVersion, SUPPORTED_ARCHIVE_COMPRESSION}, solana_send_transaction_service::send_transaction_service::{ MAX_BATCH_SEND_RATE_MS, MAX_TRANSACTION_BATCH_SIZE, }, + solana_signer::Signer, solana_unified_scheduler_pool::DefaultSchedulerPool, std::str::FromStr, }; @@ -27,6 +33,32 @@ use { const EXCLUDE_KEY: &str = "account-index-exclude-key"; const INCLUDE_KEY: &str = "account-index-include-key"; +#[derive(Debug, PartialEq)] +pub struct RunArgs { + pub identity_keypair: Keypair, + pub logfile: String, +} + +impl FromClapArgMatches for RunArgs { + fn from_clap_arg_match(matches: &ArgMatches) -> Result { + let identity_keypair = + keypair_of(matches, "identity").ok_or(clap::Error::with_description( + "The --identity argument is required", + clap::ErrorKind::ArgumentNotFound, + ))?; + + let logfile = matches + .value_of("logfile") + .map(|s| s.into()) + .unwrap_or_else(|| format!("agave-validator-{}.log", identity_keypair.pubkey())); + + Ok(RunArgs { + identity_keypair, + logfile, + }) + } +} + pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, 'a> { app .arg( @@ -1665,3 +1697,139 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, ), ) } + +#[cfg(test)] +mod tests { + use super::*; + + impl Default for RunArgs { + fn default() -> Self { + let identity_keypair = Keypair::new(); + let logfile = format!("agave-validator-{}.log", identity_keypair.pubkey()); + + RunArgs { + identity_keypair, + logfile, + } + } + } + + impl Clone for RunArgs { + fn clone(&self) -> Self { + RunArgs { + identity_keypair: self.identity_keypair.insecure_clone(), + logfile: self.logfile.clone(), + } + } + } + + fn verify_args_struct_by_command( + default_args: &DefaultArgs, + args: Vec<&str>, + expected_args: RunArgs, + ) { + crate::commands::tests::verify_args_struct_by_command::( + add_args(App::new("run_command"), default_args), + [&["run_command"], &args[..]].concat(), + expected_args, + ); + } + + #[test] + fn verify_args_struct_by_command_run_with_identity() { + let default_args = DefaultArgs::default(); + let default_run_args = RunArgs::default(); + + // generate a keypair + let tmp_dir = tempfile::tempdir().unwrap(); + let file = tmp_dir.path().join("id.json"); + let keypair = default_run_args.identity_keypair.insecure_clone(); + solana_keypair::write_keypair_file(&keypair, &file).unwrap(); + + let expected_args = RunArgs { + identity_keypair: keypair.insecure_clone(), + ..default_run_args + }; + + // short arg + { + verify_args_struct_by_command( + &default_args, + vec!["-i", file.to_str().unwrap()], + expected_args.clone(), + ); + } + + // long arg + { + verify_args_struct_by_command( + &default_args, + vec!["--identity", file.to_str().unwrap()], + expected_args.clone(), + ); + } + } + + fn verify_args_struct_by_command_run_with_identity_setup( + default_run_args: RunArgs, + args: Vec<&str>, + expected_args: RunArgs, + ) { + let default_args = DefaultArgs::default(); + + // generate a keypair + let tmp_dir = tempfile::tempdir().unwrap(); + let file = tmp_dir.path().join("id.json"); + let keypair = default_run_args.identity_keypair.insecure_clone(); + solana_keypair::write_keypair_file(&keypair, &file).unwrap(); + + let args = [&["--identity", file.to_str().unwrap()], &args[..]].concat(); + verify_args_struct_by_command(&default_args, args, expected_args); + } + + #[test] + fn verify_args_struct_by_command_run_with_log() { + let default_run_args = RunArgs::default(); + + // default + { + let expected_args = RunArgs { + logfile: "agave-validator-".to_string() + + &default_run_args.identity_keypair.pubkey().to_string() + + ".log", + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args.clone(), + vec![], + expected_args, + ); + } + + // short arg + { + let expected_args = RunArgs { + logfile: "-".to_string(), + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args.clone(), + vec!["-o", "-"], + expected_args, + ); + } + + // long arg + { + let expected_args = RunArgs { + logfile: "custom_log.log".to_string(), + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args.clone(), + vec!["--log", "custom_log.log"], + expected_args, + ); + } + } +} diff --git a/validator/src/commands/run/execute.rs b/validator/src/commands/run/execute.rs index c9c4cd224fe957..8a1e1eab9455c3 100644 --- a/validator/src/commands/run/execute.rs +++ b/validator/src/commands/run/execute.rs @@ -3,6 +3,7 @@ use { admin_rpc_service::{self, load_staked_nodes_overrides, StakedNodesOverrides}, bootstrap, cli::{self}, + commands::{run::args::RunArgs, FromClapArgMatches}, ledger_lockfile, lock_ledger, }, clap::{crate_name, value_t, value_t_or_exit, values_t, values_t_or_exit, ArgMatches}, @@ -99,6 +100,8 @@ pub fn execute( ledger_path: &Path, operation: Operation, ) -> Result<(), Box> { + let run_args = RunArgs::from_clap_arg_match(matches)?; + let cli::thread_args::NumThreadConfig { accounts_db_clean_threads, accounts_db_foreground_threads, @@ -115,26 +118,14 @@ pub fn execute( tvu_sigverify_threads, } = cli::thread_args::parse_num_threads_args(matches); - let identity_keypair = keypair_of(matches, "identity").unwrap_or_else(|| { - clap::Error::with_description( - "The --identity argument is required", - clap::ErrorKind::ArgumentNotFound, - ) - .exit(); - }); + let identity_keypair = Arc::new(run_args.identity_keypair); - let logfile = { - let logfile = matches - .value_of("logfile") - .map(|s| s.into()) - .unwrap_or_else(|| format!("agave-validator-{}.log", identity_keypair.pubkey())); - - if logfile == "-" { - None - } else { - println!("log file: {logfile}"); - Some(logfile) - } + let logfile = run_args.logfile; + let logfile = if logfile == "-" { + None + } else { + println!("log file: {logfile}"); + Some(logfile) }; let use_progress_bar = logfile.is_none(); let _logger_thread = redirect_stderr_to_file(logfile); @@ -1211,8 +1202,6 @@ pub fn execute( snapshot_utils::remove_tmp_snapshot_archives(&full_snapshot_archives_dir); snapshot_utils::remove_tmp_snapshot_archives(&incremental_snapshot_archives_dir); - let identity_keypair = Arc::new(identity_keypair); - let should_check_duplicate_instance = true; if !cluster_entrypoints.is_empty() { bootstrap::rpc_bootstrap( From 0ac3649d32115b1a6174b70309ad3cb8b0dbac4d Mon Sep 17 00:00:00 2001 From: steviez Date: Wed, 18 Jun 2025 10:26:25 -0500 Subject: [PATCH 033/124] blockstore: Remove deprecated last_root function (#6634) --- ledger/src/blockstore.rs | 8 -------- 1 file changed, 8 deletions(-) diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index dacfbe120f3ca2..f8ae798b090945 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -4125,14 +4125,6 @@ impl Blockstore { self.max_root.load(Ordering::Relaxed) } - #[deprecated( - since = "1.18.0", - note = "Please use `solana_ledger::blockstore::Blockstore::max_root()` instead" - )] - pub fn last_root(&self) -> Slot { - self.max_root() - } - // find the first available slot in blockstore that has some data in it pub fn lowest_slot(&self) -> Slot { for (slot, meta) in self From e5772bd9ca5c3ad7bdceeacdada3280d839a4823 Mon Sep 17 00:00:00 2001 From: Brooks Date: Wed, 18 Jun 2025 12:19:40 -0400 Subject: [PATCH 034/124] Removes outdated comments on snapshot NonZero constants (#6641) --- runtime/src/snapshot_utils.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index f72edc195bc1d6..22e102dff51276 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -74,10 +74,6 @@ const MAX_SNAPSHOT_VERSION_FILE_SIZE: u64 = 8; // byte const VERSION_STRING_V1_2_0: &str = "1.2.0"; pub const TMP_SNAPSHOT_ARCHIVE_PREFIX: &str = "tmp-snapshot-archive-"; pub const BANK_SNAPSHOT_PRE_FILENAME_EXTENSION: &str = "pre"; -// The following unsafes are -// - Safe because the values are fixed, known non-zero constants -// - Necessary in order to have a plain NonZeroUsize as the constant, NonZeroUsize -// returns an Option and we can't .unwrap() at compile time pub const DEFAULT_FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS: NonZeroU64 = NonZeroU64::new(50_000).unwrap(); pub const DEFAULT_INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS: NonZeroU64 = From 7a753dbeeb021f0588b6172eba3cdc15c5672c2c Mon Sep 17 00:00:00 2001 From: Lucas Ste <38472950+LucasSte@users.noreply.github.com> Date: Wed, 18 Jun 2025 13:36:18 -0300 Subject: [PATCH 035/124] Bump platform tools to v1.49 (#6628) --- platform-tools-sdk/cargo-build-sbf/src/main.rs | 2 +- .../cargo-build-sbf/tests/crates/package-metadata/Cargo.toml | 2 +- .../cargo-build-sbf/tests/crates/workspace-metadata/Cargo.toml | 2 +- platform-tools-sdk/sbf/scripts/install.sh | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/platform-tools-sdk/cargo-build-sbf/src/main.rs b/platform-tools-sdk/cargo-build-sbf/src/main.rs index 979bac43e362fa..ea833760f4ad3c 100644 --- a/platform-tools-sdk/cargo-build-sbf/src/main.rs +++ b/platform-tools-sdk/cargo-build-sbf/src/main.rs @@ -23,7 +23,7 @@ use { tar::Archive, }; -const DEFAULT_PLATFORM_TOOLS_VERSION: &str = "v1.48"; +const DEFAULT_PLATFORM_TOOLS_VERSION: &str = "v1.49"; #[derive(Debug)] pub struct Config<'a> { diff --git a/platform-tools-sdk/cargo-build-sbf/tests/crates/package-metadata/Cargo.toml b/platform-tools-sdk/cargo-build-sbf/tests/crates/package-metadata/Cargo.toml index d3bfe3ab5518e7..0ecb9c1b27e9c8 100644 --- a/platform-tools-sdk/cargo-build-sbf/tests/crates/package-metadata/Cargo.toml +++ b/platform-tools-sdk/cargo-build-sbf/tests/crates/package-metadata/Cargo.toml @@ -10,7 +10,7 @@ edition = "2021" publish = false [package.metadata.solana] -tools-version = "v1.48" +tools-version = "v1.49" program-id = "MyProgram1111111111111111111111111111111111" [dependencies] diff --git a/platform-tools-sdk/cargo-build-sbf/tests/crates/workspace-metadata/Cargo.toml b/platform-tools-sdk/cargo-build-sbf/tests/crates/workspace-metadata/Cargo.toml index 755dd6e8856f36..5231ee9454fa58 100644 --- a/platform-tools-sdk/cargo-build-sbf/tests/crates/workspace-metadata/Cargo.toml +++ b/platform-tools-sdk/cargo-build-sbf/tests/crates/workspace-metadata/Cargo.toml @@ -27,4 +27,4 @@ check-cfg = [ [workspace] [workspace.metadata.solana] -tools-version = "v1.48" +tools-version = "v1.49" diff --git a/platform-tools-sdk/sbf/scripts/install.sh b/platform-tools-sdk/sbf/scripts/install.sh index f2dd65e21fa0fe..89b93be5f1b3b9 100755 --- a/platform-tools-sdk/sbf/scripts/install.sh +++ b/platform-tools-sdk/sbf/scripts/install.sh @@ -109,7 +109,7 @@ if [[ ! -e criterion-$version.md || ! -e criterion ]]; then fi # Install platform tools -version=v1.48 +version=v1.49 if [[ ! -e platform-tools-$version.md || ! -e platform-tools ]]; then ( set -e From 3e6f0e9f9ec38c258dbb70d12adb41eef4d77a20 Mon Sep 17 00:00:00 2001 From: Ashwin Sekar Date: Wed, 18 Jun 2025 18:17:32 -0400 Subject: [PATCH 036/124] consensus: remove early return in OC loop to fix RPC notifications (#6645) --- core/src/cluster_info_vote_listener.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/cluster_info_vote_listener.rs b/core/src/cluster_info_vote_listener.rs index dfa71139daeea0..ce2b0a29782e7f 100644 --- a/core/src/cluster_info_vote_listener.rs +++ b/core/src/cluster_info_vote_listener.rs @@ -560,7 +560,7 @@ impl ClusterInfoVoteListener { // Note gossip votes will always be processed because those should be unique // and we need to update the gossip-only stake in the `VoteTracker`. - return; + break; } is_new_vote = is_new; From edaa0c05d7e4ea9358f7168691158b316b48e72d Mon Sep 17 00:00:00 2001 From: Rory Harris Date: Wed, 18 Jun 2025 15:59:42 -0700 Subject: [PATCH 037/124] Removing Reset Accounts (#6627) --- accounts-db/src/accounts_db.rs | 45 +++----------------------- accounts-db/src/accounts_db/tests.rs | 23 ++++--------- accounts-db/src/ancient_append_vecs.rs | 6 +--- 3 files changed, 12 insertions(+), 62 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index dc2d96e7546e31..82678688265a67 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -1267,16 +1267,11 @@ impl AccountStorageEntry { } /// returns # of accounts remaining in the storage - fn remove_accounts( - &self, - num_bytes: usize, - reset_accounts: bool, - num_accounts: usize, - ) -> usize { + fn remove_accounts(&self, num_bytes: usize, num_accounts: usize) -> usize { let mut count_and_status = self.count_and_status.lock_write(); let (mut count, mut status) = *count_and_status; - if count == num_accounts && status == AccountStorageStatus::Full && reset_accounts { + if count == num_accounts && status == AccountStorageStatus::Full { // this case arises when we remove the last account from the // storage, but we've learned from previous write attempts that // the storage is full @@ -2087,14 +2082,9 @@ impl AccountsDb { ) -> ReclaimResult { let mut measure = Measure::start("clean_old_root_reclaims"); - // Don't reset from clean, since the pubkeys in those stores may need to be unref'ed - // and those stores may be used for background hashing. - let reset_accounts = false; - let reclaim_result = self.handle_reclaims( (!reclaims.is_empty()).then(|| reclaims.iter()), None, - reset_accounts, pubkeys_removed_from_accounts_index, HandleReclaims::ProcessDeadSlots(&self.clean_accounts_stats.purge_stats), MarkAccountsObsolete::No, @@ -2944,13 +2934,9 @@ impl AccountsDb { self.purge_keys_exact(pubkey_to_slot_set.iter()); pubkeys_removed_from_accounts_index.extend(pubkeys_removed_from_accounts_index2); - // Don't reset from clean, since the pubkeys in those stores may need to be unref'ed - // and those stores may be used for background hashing. - let reset_accounts = false; self.handle_reclaims( (!reclaims.is_empty()).then(|| reclaims.iter()), None, - reset_accounts, &pubkeys_removed_from_accounts_index, HandleReclaims::ProcessDeadSlots(&self.clean_accounts_stats.purge_stats), MarkAccountsObsolete::No, @@ -3118,9 +3104,6 @@ impl AccountsDb { /// is the slot == `S`. This is true for instance when `handle_reclaims` is called /// from store or slot shrinking, as those should only touch the slot they are /// currently storing to or shrinking. - /// * `reset_accounts` - Reset the append_vec store when the store is dead (count==0) - /// From the clean and shrink paths it should be false since there may be an in-progress - /// hash operation and the stores may hold accounts that need to be unref'ed. /// * `pubkeys_removed_from_accounts_index` - These keys have already been removed from the /// accounts index and should not be unref'd. If they exist in the accounts index, /// they are NEW. @@ -3140,7 +3123,6 @@ impl AccountsDb { &'a self, reclaims: Option, expected_single_dead_slot: Option, - reset_accounts: bool, pubkeys_removed_from_accounts_index: &PubkeysRemovedFromAccountsIndex, handle_reclaims: HandleReclaims<'a>, mark_accounts_obsolete: MarkAccountsObsolete, @@ -3153,7 +3135,6 @@ impl AccountsDb { let (dead_slots, reclaimed_offsets) = self.remove_dead_accounts( reclaims, expected_single_dead_slot, - reset_accounts, mark_accounts_obsolete, ); reclaim_result.1 = reclaimed_offsets; @@ -5398,7 +5379,6 @@ impl AccountsDb { self.handle_reclaims( (!reclaims.is_empty()).then(|| reclaims.iter()), expected_dead_slot, - false, &pubkeys_removed_from_accounts_index, HandleReclaims::ProcessDeadSlots(purge_stats), MarkAccountsObsolete::No, @@ -7234,7 +7214,6 @@ impl AccountsDb { &'a self, reclaims: I, expected_slot: Option, - reset_accounts: bool, mark_accounts_obsolete: MarkAccountsObsolete, ) -> (IntSet, SlotOffsets) where @@ -7276,7 +7255,7 @@ impl AccountsDb { ); if offsets.len() == store.count() { // all remaining alive accounts in the storage are being removed, so the entire storage/slot is dead - store.remove_accounts(store.alive_bytes(), reset_accounts, offsets.len()); + store.remove_accounts(store.alive_bytes(), offsets.len()); self.dirty_stores.insert(*slot, store); dead_slots.insert(*slot); } else { @@ -7290,7 +7269,7 @@ impl AccountsDb { .iter() .map(|len| store.accounts.calculate_stored_size(*len)) .sum(); - store.remove_accounts(dead_bytes, reset_accounts, offsets.len()); + store.remove_accounts(dead_bytes, offsets.len()); if let MarkAccountsObsolete::Yes(slot_marked_obsolete) = mark_accounts_obsolete @@ -7716,14 +7695,6 @@ impl AccountsDb { transactions: Option<&'a [&'a SanitizedTransaction]>, update_index_thread_selection: UpdateIndexThreadSelection, ) { - // This path comes from a store to a non-frozen slot. - // If a store is dead here, then a newer update for - // each pubkey in the store must exist in another - // store in the slot. Thus it is safe to reset the store and - // re-use it for a future store op. The pubkey ref counts should still - // hold just 1 ref from this slot. - let reset_accounts = true; - // We are storing accounts unfrozen accounts which // will always be stored in the cache let store_to = StoreTo::Cache; @@ -7734,7 +7705,6 @@ impl AccountsDb { self.store_accounts_custom( accounts, &store_to, - reset_accounts, transactions, reclaim, update_index_thread_selection, @@ -7747,14 +7717,9 @@ impl AccountsDb { accounts: impl StorableAccounts<'a>, storage: &Arc, ) -> StoreAccountsTiming { - // stores on a frozen slot should not reset - // the append vec so that hashing could happen on the store - // and accounts in the append_vec can be unrefed correctly - let reset_accounts = false; self.store_accounts_custom( accounts, &StoreTo::Storage(storage), - reset_accounts, None, StoreReclaims::Ignore, UpdateIndexThreadSelection::PoolWithThreshold, @@ -7766,7 +7731,6 @@ impl AccountsDb { &self, accounts: impl StorableAccounts<'a>, store_to: &StoreTo, - reset_accounts: bool, transactions: Option<&'a [&'a SanitizedTransaction]>, reclaim: StoreReclaims, update_index_thread_selection: UpdateIndexThreadSelection, @@ -7838,7 +7802,6 @@ impl AccountsDb { self.handle_reclaims( (!reclaims.is_empty()).then(|| reclaims.iter()), expected_single_dead_slot, - reset_accounts, &HashSet::default(), // this callsite does NOT process dead slots HandleReclaims::DoNotProcessDeadSlots, diff --git a/accounts-db/src/accounts_db/tests.rs b/accounts-db/src/accounts_db/tests.rs index 9c3731c7ded572..70cc08d57bf03d 100644 --- a/accounts-db/src/accounts_db/tests.rs +++ b/accounts-db/src/accounts_db/tests.rs @@ -2378,7 +2378,7 @@ fn test_get_snapshot_storages_exclude_empty() { db.storage .get_slot_storage_entry(0) .unwrap() - .remove_accounts(0, true, 1); + .remove_accounts(0, 1); assert!(db.get_storages(..=after_slot).0.is_empty()); } @@ -2405,8 +2405,8 @@ define_accounts_db_test!( accounts.store_for_tests(0, &[(&pubkey, &account)]); accounts.add_root_and_flush_write_cache(0); let storage_entry = accounts.storage.get_slot_storage_entry(0).unwrap(); - storage_entry.remove_accounts(0, true, 1); - storage_entry.remove_accounts(0, true, 1); + storage_entry.remove_accounts(0, 1); + storage_entry.remove_accounts(0, 1); } ); @@ -3896,12 +3896,7 @@ define_accounts_db_test!(test_alive_bytes, |accounts_db| { assert_eq!(account_info.0, slot); let reclaims = [account_info]; num_obsolete_accounts += reclaims.len(); - accounts_db.remove_dead_accounts( - reclaims.iter(), - None, - true, - MarkAccountsObsolete::Yes(slot), - ); + accounts_db.remove_dead_accounts(reclaims.iter(), None, MarkAccountsObsolete::Yes(slot)); let after_size = storage0.alive_bytes(); if storage0.count() == 0 { // when `remove_dead_accounts` reaches 0 accounts, all bytes are marked as dead @@ -5070,7 +5065,7 @@ fn test_shrink_productive() { )); store.add_account(file_size as usize / 2); store.add_account(file_size as usize / 4); - store.remove_accounts(file_size as usize / 4, false, 1); + store.remove_accounts(file_size as usize / 4, 1); assert!(AccountsDb::is_shrinking_productive(&store)); store.add_account(file_size as usize / 2); @@ -6817,12 +6812,8 @@ fn populate_index(db: &AccountsDb, slots: Range) { }) } -pub(crate) fn remove_account_for_tests( - storage: &AccountStorageEntry, - num_bytes: usize, - reset_accounts: bool, -) { - storage.remove_accounts(num_bytes, reset_accounts, 1); +pub(crate) fn remove_account_for_tests(storage: &AccountStorageEntry, num_bytes: usize) { + storage.remove_accounts(num_bytes, 1); } pub(crate) fn create_storages_and_update_index_with_customized_account_size_per_slot( diff --git a/accounts-db/src/ancient_append_vecs.rs b/accounts-db/src/ancient_append_vecs.rs index ce7eee1a630ee6..7386373bdff6d8 100644 --- a/accounts-db/src/ancient_append_vecs.rs +++ b/accounts-db/src/ancient_append_vecs.rs @@ -2547,11 +2547,7 @@ pub mod tests { let alive = alives[slot as usize]; if !alive { // make this storage not alive - remove_account_for_tests( - storage, - storage.written_bytes() as usize, - false, - ); + remove_account_for_tests(storage, storage.written_bytes() as usize); } }); let alive_storages = storages From 75c3f67ccf4a73cb550ea0f564237b8ee613497e Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Thu, 19 Jun 2025 10:53:08 +0800 Subject: [PATCH 038/124] bump solana-program-entrypoint from 2.2.1 to 2.3.0 (#6643) --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- .../cargo-build-sbf/tests/crates/fail/Cargo.toml | 2 +- .../cargo-build-sbf/tests/crates/noop/Cargo.toml | 2 +- .../cargo-build-sbf/tests/crates/package-metadata/Cargo.toml | 2 +- .../tests/crates/workspace-metadata/Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- programs/sbf/Cargo.toml | 4 ++-- svm/examples/Cargo.lock | 4 ++-- svm/examples/json-rpc/program/Cargo.toml | 2 +- 10 files changed, 14 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bb30193eeac4e6..45e17c410ae30a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9429,9 +9429,9 @@ dependencies = [ [[package]] name = "solana-program-entrypoint" -version = "2.2.1" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "473ffe73c68d93e9f2aa726ad2985fe52760052709aaab188100a42c618060ec" +checksum = "32ce041b1a0ed275290a5008ee1a4a6c48f5054c8a3d78d313c08958a06aedbd" dependencies = [ "solana-account-info", "solana-msg", diff --git a/Cargo.toml b/Cargo.toml index 574dfcbf538704..5e984f504c6a01 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -471,7 +471,7 @@ solana-poseidon = { path = "poseidon", version = "=3.0.0" } solana-precompile-error = "2.2.2" solana-presigner = "2.2.1" solana-program = { version = "2.3.0", default-features = false } -solana-program-entrypoint = "2.2.1" +solana-program-entrypoint = "2.3.0" solana-program-error = "2.2.2" solana-program-memory = "2.3.1" solana-program-option = "2.2.1" diff --git a/platform-tools-sdk/cargo-build-sbf/tests/crates/fail/Cargo.toml b/platform-tools-sdk/cargo-build-sbf/tests/crates/fail/Cargo.toml index 43be8bcdac4b0d..a038f50da83b19 100644 --- a/platform-tools-sdk/cargo-build-sbf/tests/crates/fail/Cargo.toml +++ b/platform-tools-sdk/cargo-build-sbf/tests/crates/fail/Cargo.toml @@ -11,7 +11,7 @@ publish = false [dependencies] solana-account-info = "=2.3.0" -solana-program-entrypoint = "=2.2.1" +solana-program-entrypoint = "=2.3.0" solana-program-error = "=2.2.2" solana-pubkey = "=2.4.0" diff --git a/platform-tools-sdk/cargo-build-sbf/tests/crates/noop/Cargo.toml b/platform-tools-sdk/cargo-build-sbf/tests/crates/noop/Cargo.toml index fe40eb10d8f2e1..9e05e35e6af7d8 100644 --- a/platform-tools-sdk/cargo-build-sbf/tests/crates/noop/Cargo.toml +++ b/platform-tools-sdk/cargo-build-sbf/tests/crates/noop/Cargo.toml @@ -11,7 +11,7 @@ publish = false [dependencies] solana-account-info = "=2.3.0" -solana-program-entrypoint = "=2.2.1" +solana-program-entrypoint = "=2.3.0" solana-program-error = "=2.2.2" solana-pubkey = "=2.4.0" diff --git a/platform-tools-sdk/cargo-build-sbf/tests/crates/package-metadata/Cargo.toml b/platform-tools-sdk/cargo-build-sbf/tests/crates/package-metadata/Cargo.toml index 0ecb9c1b27e9c8..59c4088a1d5050 100644 --- a/platform-tools-sdk/cargo-build-sbf/tests/crates/package-metadata/Cargo.toml +++ b/platform-tools-sdk/cargo-build-sbf/tests/crates/package-metadata/Cargo.toml @@ -16,7 +16,7 @@ program-id = "MyProgram1111111111111111111111111111111111" [dependencies] solana-account-info = "=2.3.0" solana-package-metadata = "=2.2.1" -solana-program-entrypoint = "=2.2.1" +solana-program-entrypoint = "=2.3.0" solana-program-error = "=2.2.2" solana-pubkey = "=2.4.0" diff --git a/platform-tools-sdk/cargo-build-sbf/tests/crates/workspace-metadata/Cargo.toml b/platform-tools-sdk/cargo-build-sbf/tests/crates/workspace-metadata/Cargo.toml index 5231ee9454fa58..c03078bf813f15 100644 --- a/platform-tools-sdk/cargo-build-sbf/tests/crates/workspace-metadata/Cargo.toml +++ b/platform-tools-sdk/cargo-build-sbf/tests/crates/workspace-metadata/Cargo.toml @@ -11,7 +11,7 @@ publish = false [dependencies] solana-account-info = "=2.3.0" -solana-program-entrypoint = "=2.2.1" +solana-program-entrypoint = "=2.3.0" solana-program-error = "=2.2.2" solana-pubkey = "=2.4.0" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index f7137f38d704f9..ce6b8aa1e308f9 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -7260,9 +7260,9 @@ dependencies = [ [[package]] name = "solana-program-entrypoint" -version = "2.2.1" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "473ffe73c68d93e9f2aa726ad2985fe52760052709aaab188100a42c618060ec" +checksum = "32ce041b1a0ed275290a5008ee1a4a6c48f5054c8a3d78d313c08958a06aedbd" dependencies = [ "solana-account-info", "solana-msg", diff --git a/programs/sbf/Cargo.toml b/programs/sbf/Cargo.toml index 8800af592a2732..17f3bdfbe5b10f 100644 --- a/programs/sbf/Cargo.toml +++ b/programs/sbf/Cargo.toml @@ -136,7 +136,7 @@ solana-measure = { path = "../../measure", version = "=3.0.0" } solana-msg = "=2.2.1" solana-poseidon = { path = "../../poseidon/", version = "=3.0.0" } solana-program = "=2.2.1" -solana-program-entrypoint = "=2.2.1" +solana-program-entrypoint = "=2.3.0" solana-program-error = "=2.2.2" solana-program-memory = "=2.2.1" solana-program-runtime = { path = "../../program-runtime", version = "=3.0.0" } @@ -219,7 +219,7 @@ solana-logger = { workspace = true } solana-measure = { workspace = true } solana-message = "2.3.0" solana-program = { workspace = true } -solana-program-entrypoint = "2.2.1" +solana-program-entrypoint = "2.3.0" solana-program-runtime = { workspace = true } solana-pubkey = "2.4.0" solana-rent = "2.2.1" diff --git a/svm/examples/Cargo.lock b/svm/examples/Cargo.lock index 3c06dfbd3ba248..5f3c3864851cb7 100644 --- a/svm/examples/Cargo.lock +++ b/svm/examples/Cargo.lock @@ -7072,9 +7072,9 @@ dependencies = [ [[package]] name = "solana-program-entrypoint" -version = "2.2.1" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "473ffe73c68d93e9f2aa726ad2985fe52760052709aaab188100a42c618060ec" +checksum = "32ce041b1a0ed275290a5008ee1a4a6c48f5054c8a3d78d313c08958a06aedbd" dependencies = [ "solana-account-info", "solana-msg", diff --git a/svm/examples/json-rpc/program/Cargo.toml b/svm/examples/json-rpc/program/Cargo.toml index a46d473ee48924..469e603318dcff 100644 --- a/svm/examples/json-rpc/program/Cargo.toml +++ b/svm/examples/json-rpc/program/Cargo.toml @@ -11,7 +11,7 @@ frozen-abi = [] [dependencies] borsh = "0.9" solana-account-info = "2.3.0" -solana-program-entrypoint = "2.2.1" +solana-program-entrypoint = "2.3.0" solana-msg = "2.2.1" solana-pubkey = "2.4.0" From e3606ec9acb1483df4883c6c1145da4b6407cc22 Mon Sep 17 00:00:00 2001 From: zhiqiangxu <652732310@qq.com> Date: Thu, 19 Jun 2025 12:48:24 +0800 Subject: [PATCH 039/124] Simplify geyser notifier initialization in `Validator::new` (#6638) --- core/src/validator.rs | 36 +++++++++++++++++------------------- 1 file changed, 17 insertions(+), 19 deletions(-) diff --git a/core/src/validator.rs b/core/src/validator.rs index 84eb7185dead34..9b8bb27023fb3f 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -744,25 +744,23 @@ impl Validator { .register_exit(Box::new(move || cancel_tpu_client_next.cancel())); } - let accounts_update_notifier = geyser_plugin_service - .as_ref() - .and_then(|geyser_plugin_service| geyser_plugin_service.get_accounts_update_notifier()); - - let transaction_notifier = geyser_plugin_service - .as_ref() - .and_then(|geyser_plugin_service| geyser_plugin_service.get_transaction_notifier()); - - let entry_notifier = geyser_plugin_service - .as_ref() - .and_then(|geyser_plugin_service| geyser_plugin_service.get_entry_notifier()); - - let block_metadata_notifier = geyser_plugin_service - .as_ref() - .and_then(|geyser_plugin_service| geyser_plugin_service.get_block_metadata_notifier()); - - let slot_status_notifier = geyser_plugin_service - .as_ref() - .and_then(|geyser_plugin_service| geyser_plugin_service.get_slot_status_notifier()); + let ( + accounts_update_notifier, + transaction_notifier, + entry_notifier, + block_metadata_notifier, + slot_status_notifier, + ) = if let Some(service) = &geyser_plugin_service { + ( + service.get_accounts_update_notifier(), + service.get_transaction_notifier(), + service.get_entry_notifier(), + service.get_block_metadata_notifier(), + service.get_slot_status_notifier(), + ) + } else { + (None, None, None, None, None) + }; info!( "Geyser plugin: accounts_update_notifier: {}, transaction_notifier: {}, \ From 61545a0db947e6477a31818182080afc064156b9 Mon Sep 17 00:00:00 2001 From: HaoranYi <219428+HaoranYi@users.noreply.github.com> Date: Thu, 19 Jun 2025 08:43:43 -0500 Subject: [PATCH 040/124] Optimized Internal Index Search for Storable Accounts (#5484) * wip * opt: get_interal_index * clippy * format * fix build * clippy * special case single storage * pr * pr --- accounts-db/src/storable_accounts.rs | 117 ++++++++++++++++++++++++--- 1 file changed, 104 insertions(+), 13 deletions(-) diff --git a/accounts-db/src/storable_accounts.rs b/accounts-db/src/storable_accounts.rs index bbb5692488da17..b342dc0911986d 100644 --- a/accounts-db/src/storable_accounts.rs +++ b/accounts-db/src/storable_accounts.rs @@ -8,7 +8,10 @@ use { solana_account::{AccountSharedData, ReadableAccount}, solana_clock::{Epoch, Slot}, solana_pubkey::Pubkey, - std::sync::{Arc, RwLock}, + std::{ + cmp::Ordering, + sync::{Arc, RwLock}, + }, }; /// hold a ref to an account to store. The account could be represented in memory a few different ways @@ -216,7 +219,7 @@ pub struct StorableAccountsBySlot<'a> { /// cumulative offset of all account slices prior to this one /// starting_offsets[0] is the starting offset of slots_and_accounts[1] /// The starting offset of slots_and_accounts[0] is always 0 - starting_offsets: Vec, + starting_offsets_for_slots_accounts_slice: Vec, /// true if there is more than 1 slot represented in slots_and_accounts contains_multiple_slots: bool, /// total len of all accounts, across all slots_and_accounts @@ -248,30 +251,42 @@ impl<'a> StorableAccountsBySlot<'a> { Self { target_slot, slots_and_accounts, - starting_offsets, + starting_offsets_for_slots_accounts_slice: starting_offsets, contains_multiple_slots, len: cumulative_len, db, cached_storage: RwLock::default(), } } - /// given an overall index for all accounts in self: - /// return (slots_and_accounts index, index within those accounts) + + /// given an overall index for all accounts in self: return + /// (slots_and_accounts index, index within those accounts) + /// This implementation is optimized for performance by using binary search + /// on the starting_offsets based on the assumption that the + /// starting_offsets are always sorted. fn find_internal_index(&self, index: usize) -> (usize, usize) { - // search offsets for the accounts slice that contains 'index'. - // This could be a binary search. - for (offset_index, next_offset) in self.starting_offsets.iter().enumerate() { - if next_offset > &index { - // offset of prior entry + // special case for when there is only one slot - just return the first index without searching. + // This happens when we are just shrinking a single slot storage, which happens very often. + if !self.contains_multiple_slots { + return (0, index); + } + let upper_bound = self + .starting_offsets_for_slots_accounts_slice + .binary_search_by(|offset| match offset.cmp(&index) { + Ordering::Equal => Ordering::Less, + ord => ord, + }); + match upper_bound { + Ok(offset_index) => unreachable!("we shouldn't reach here: {}", offset_index), + Err(offset_index) => { let prior_offset = if offset_index > 0 { - self.starting_offsets[offset_index.saturating_sub(1)] + self.starting_offsets_for_slots_accounts_slice[offset_index - 1] } else { 0 }; - return (offset_index, index - prior_offset); + (offset_index, index - prior_offset) } } - panic!("failed"); } } @@ -350,11 +365,40 @@ pub mod tests { accounts_hash::AccountHash, append_vec::{AccountMeta, StoredAccountMeta, StoredMeta}, }, + rand::Rng, solana_account::{accounts_equal, AccountSharedData, WritableAccount}, solana_hash::Hash, std::sync::Arc, }; + impl StorableAccountsBySlot<'_> { + /// given an overall index for all accounts in self: + /// return (slots_and_accounts index, index within those accounts) + /// This is the baseline unoptimized implementation. It is not used in the validator. It + /// is used for testing an optimized version - `find_internal_index`, in the actual implementation. + fn find_internal_index_loop(&self, index: usize) -> (usize, usize) { + // search offsets for the accounts slice that contains 'index'. + // This could be a binary search. + for (offset_index, next_offset) in self + .starting_offsets_for_slots_accounts_slice + .iter() + .enumerate() + { + if next_offset > &index { + // offset of prior entry + let prior_offset = if offset_index > 0 { + self.starting_offsets_for_slots_accounts_slice + [offset_index.saturating_sub(1)] + } else { + 0 + }; + return (offset_index, index - prior_offset); + } + } + panic!("failed"); + } + } + /// this is used in the test for generation of storages /// this is no longer used in the validator. /// It is very tricky to get these right. There are already tests for this. It is likely worth it to leave this here for a while until everything has settled. @@ -803,4 +847,51 @@ pub mod tests { } } } + + #[test] + fn test_find_internal_index() { + let db = AccountsDb::new_single_for_tests(); + let account_from_storage = AccountFromStorage::new(&StoredAccountMeta { + meta: &StoredMeta { + write_version_obsolete: 0, + pubkey: Pubkey::new_unique(), + data_len: 0, + }, + account_meta: &AccountMeta { + lamports: 0, + owner: Pubkey::new_unique(), + executable: false, + rent_epoch: 0, + }, + data: &[], + offset: 0, + stored_size: 0, + hash: &AccountHash(Hash::new_unique()), + }); + + let mut slot_accounts = Vec::new(); + let mut all_accounts = Vec::new(); + let mut total = 0; + let num_slots = 10_u64; + // generate accounts for 10 slots + // each slot has a random number of accounts, between 1 and 10 + for _slot in 0..num_slots { + // generate random accounts per slot + let n = rand::thread_rng().gen_range(1..10); + total += n; + let accounts = (0..n).map(|_| &account_from_storage).collect::>(); + all_accounts.push(accounts); + } + for slot in 0..num_slots { + slot_accounts.push((slot, &all_accounts[slot as usize][..])); + } + let storable_accounts = StorableAccountsBySlot::new(0, &slot_accounts[..], &db); + // check that the optimized version is correct by comparing it to the unoptimized version + for i in 0..total { + let (slot_index, account_index) = storable_accounts.find_internal_index_loop(i); + let (slot_index2, account_index2) = storable_accounts.find_internal_index(i); + assert_eq!(slot_index, slot_index2); + assert_eq!(account_index, account_index2); + } + } } From a621a434cc4df83c5ea230f35472e621597be3ce Mon Sep 17 00:00:00 2001 From: steviez Date: Thu, 19 Jun 2025 13:40:20 -0500 Subject: [PATCH 041/124] validator: Add CLI args to control TPU QUIC receive pools (#6633) The default value for these new args matches the preexisting behavior of creating num_cpus::get() threads per tokio runtime --- Cargo.lock | 1 + programs/sbf/Cargo.lock | 1 + streamer/Cargo.toml | 1 + streamer/src/nonblocking/quic.rs | 1 + streamer/src/nonblocking/testing_utilities.rs | 4 ++ streamer/src/quic.rs | 43 +++++++++--- svm/examples/Cargo.lock | 1 + validator/src/cli/thread_args.rs | 70 +++++++++++++++++++ validator/src/commands/run/execute.rs | 6 ++ 9 files changed, 120 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 45e17c410ae30a..b31d2dca44c29d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10692,6 +10692,7 @@ dependencies = [ "libc", "log", "nix", + "num_cpus", "pem", "percentage", "quinn", diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index ce6b8aa1e308f9..f63678ae0d6b0f 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -9045,6 +9045,7 @@ dependencies = [ "libc", "log", "nix", + "num_cpus", "pem", "percentage", "quinn", diff --git a/streamer/Cargo.toml b/streamer/Cargo.toml index 761881e9816280..b3ad45df3bdbba 100644 --- a/streamer/Cargo.toml +++ b/streamer/Cargo.toml @@ -33,6 +33,7 @@ itertools = { workspace = true } libc = { workspace = true } log = { workspace = true } nix = { workspace = true, features = ["net"] } +num_cpus = { workspace = true } pem = { workspace = true } percentage = { workspace = true } quinn = { workspace = true } diff --git a/streamer/src/nonblocking/quic.rs b/streamer/src/nonblocking/quic.rs index cfc5468d601d8e..d3e2fb69b75a31 100644 --- a/streamer/src/nonblocking/quic.rs +++ b/streamer/src/nonblocking/quic.rs @@ -190,6 +190,7 @@ pub fn spawn_server_multi( wait_for_chunk_timeout, coalesce, coalesce_channel_size, + num_threads: _, } = quic_server_params; let concurrent_connections = max_staked_connections + max_unstaked_connections; let max_concurrent_connections = concurrent_connections + concurrent_connections / 4; diff --git a/streamer/src/nonblocking/testing_utilities.rs b/streamer/src/nonblocking/testing_utilities.rs index d16516e7179246..564718b8b9e347 100644 --- a/streamer/src/nonblocking/testing_utilities.rs +++ b/streamer/src/nonblocking/testing_utilities.rs @@ -27,12 +27,15 @@ use { solana_tls_utils::{new_dummy_x509_certificate, tls_client_config_builder}, std::{ net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket}, + num::NonZeroUsize, sync::{atomic::AtomicBool, Arc, RwLock}, time::{Duration, Instant}, }, tokio::{task::JoinHandle, time::sleep}, }; +pub(crate) const DEFAULT_NUM_SERVER_THREADS_FOR_TEST: NonZeroUsize = NonZeroUsize::new(8).unwrap(); + pub fn get_client_config(keypair: &Keypair) -> ClientConfig { let (cert, key) = new_dummy_x509_certificate(keypair); @@ -137,6 +140,7 @@ pub fn setup_quic_server_with_sockets( wait_for_chunk_timeout: DEFAULT_WAIT_FOR_CHUNK_TIMEOUT, coalesce: DEFAULT_TPU_COALESCE, coalesce_channel_size, + num_threads: DEFAULT_NUM_SERVER_THREADS_FOR_TEST, }; let SpawnNonBlockingServerResult { endpoints: _, diff --git a/streamer/src/quic.rs b/streamer/src/quic.rs index 22ccdc00ba2519..f607e328b47716 100644 --- a/streamer/src/quic.rs +++ b/streamer/src/quic.rs @@ -19,6 +19,7 @@ use { solana_tls_utils::{new_dummy_x509_certificate, tls_server_config_builder}, std::{ net::UdpSocket, + num::NonZeroUsize, sync::{ atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering}, Arc, Mutex, RwLock, @@ -61,6 +62,18 @@ pub const DEFAULT_QUIC_ENDPOINTS: usize = 1; pub const DEFAULT_TPU_COALESCE: Duration = Duration::from_millis(5); +pub fn default_num_tpu_transaction_forward_receive_threads() -> usize { + num_cpus::get() +} + +pub fn default_num_tpu_transaction_receive_threads() -> usize { + num_cpus::get() +} + +pub fn default_num_tpu_vote_transaction_receive_threads() -> usize { + num_cpus::get() +} + pub struct SpawnServerResult { pub endpoints: Vec, pub thread: thread::JoinHandle<()>, @@ -114,9 +127,10 @@ pub(crate) fn configure_server( Ok((server_config, cert_chain_pem)) } -pub fn rt(name: String) -> Runtime { +pub fn rt(name: String, num_threads: NonZeroUsize) -> Runtime { tokio::runtime::Builder::new_multi_thread() .thread_name(name) + .worker_threads(num_threads.get()) .enable_all() .build() .unwrap() @@ -612,6 +626,7 @@ pub struct QuicServerParams { pub wait_for_chunk_timeout: Duration, pub coalesce: Duration, pub coalesce_channel_size: usize, + pub num_threads: NonZeroUsize, } impl Default for QuicServerParams { @@ -625,6 +640,7 @@ impl Default for QuicServerParams { wait_for_chunk_timeout: DEFAULT_WAIT_FOR_CHUNK_TIMEOUT, coalesce: DEFAULT_TPU_COALESCE, coalesce_channel_size: DEFAULT_MAX_COALESCE_CHANNEL_SIZE, + num_threads: NonZeroUsize::new(num_cpus::get().min(1)).expect("1 is non-zero"), } } } @@ -639,7 +655,7 @@ pub fn spawn_server_multi( staked_nodes: Arc>, quic_server_params: QuicServerParams, ) -> Result { - let runtime = rt(format!("{thread_name}Rt")); + let runtime = rt(format!("{thread_name}Rt"), quic_server_params.num_threads); let result = { let _guard = runtime.enter(); crate::nonblocking::quic::spawn_server_multi( @@ -674,12 +690,22 @@ pub fn spawn_server_multi( mod test { use { super::*, - crate::nonblocking::{quic::test::*, testing_utilities::check_multiple_streams}, + crate::nonblocking::{ + quic::test::*, + testing_utilities::{check_multiple_streams, DEFAULT_NUM_SERVER_THREADS_FOR_TEST}, + }, crossbeam_channel::unbounded, solana_net_utils::bind_to_localhost, std::net::SocketAddr, }; + fn rt_for_test() -> Runtime { + rt( + "solQuicTestRt".to_string(), + DEFAULT_NUM_SERVER_THREADS_FOR_TEST, + ) + } + fn setup_quic_server() -> ( std::thread::JoinHandle<()>, Arc, @@ -706,6 +732,7 @@ mod test { staked_nodes, QuicServerParams { coalesce_channel_size: 100_000, // smaller channel size for faster test + num_threads: DEFAULT_NUM_SERVER_THREADS_FOR_TEST, ..Default::default() }, ) @@ -724,7 +751,7 @@ mod test { fn test_quic_timeout() { solana_logger::setup(); let (t, exit, receiver, server_address) = setup_quic_server(); - let runtime = rt("solQuicTestRt".to_string()); + let runtime = rt_for_test(); runtime.block_on(check_timeout(receiver, server_address)); exit.store(true, Ordering::Relaxed); t.join().unwrap(); @@ -735,7 +762,7 @@ mod test { solana_logger::setup(); let (t, exit, _receiver, server_address) = setup_quic_server(); - let runtime = rt("solQuicTestRt".to_string()); + let runtime = rt_for_test(); runtime.block_on(check_block_multiple_connections(server_address)); exit.store(true, Ordering::Relaxed); t.join().unwrap(); @@ -770,7 +797,7 @@ mod test { ) .unwrap(); - let runtime = rt("solQuicTestRt".to_string()); + let runtime = rt_for_test(); runtime.block_on(check_multiple_streams(receiver, server_address, None)); exit.store(true, Ordering::Relaxed); t.join().unwrap(); @@ -781,7 +808,7 @@ mod test { solana_logger::setup(); let (t, exit, receiver, server_address) = setup_quic_server(); - let runtime = rt("solQuicTestRt".to_string()); + let runtime = rt_for_test(); runtime.block_on(check_multiple_writes(receiver, server_address, None)); exit.store(true, Ordering::Relaxed); t.join().unwrap(); @@ -816,7 +843,7 @@ mod test { ) .unwrap(); - let runtime = rt("solQuicTestRt".to_string()); + let runtime = rt_for_test(); runtime.block_on(check_unstaked_node_connect_failure(server_address)); exit.store(true, Ordering::Relaxed); t.join().unwrap(); diff --git a/svm/examples/Cargo.lock b/svm/examples/Cargo.lock index 5f3c3864851cb7..aab877dbf91205 100644 --- a/svm/examples/Cargo.lock +++ b/svm/examples/Cargo.lock @@ -8110,6 +8110,7 @@ dependencies = [ "libc", "log", "nix", + "num_cpus", "pem", "percentage", "quinn", diff --git a/validator/src/cli/thread_args.rs b/validator/src/cli/thread_args.rs index fa01b1c4b23735..bc2e03381efddb 100644 --- a/validator/src/cli/thread_args.rs +++ b/validator/src/cli/thread_args.rs @@ -20,6 +20,9 @@ pub struct DefaultThreadArgs { pub replay_transactions_threads: String, pub rocksdb_compaction_threads: String, pub rocksdb_flush_threads: String, + pub tpu_transaction_forward_receive_threads: String, + pub tpu_transaction_receive_threads: String, + pub tpu_vote_transaction_receive_threads: String, pub tvu_receive_threads: String, pub tvu_retransmit_threads: String, pub tvu_sigverify_threads: String, @@ -41,6 +44,12 @@ impl Default for DefaultThreadArgs { .to_string(), rocksdb_compaction_threads: RocksdbCompactionThreadsArg::bounded_default().to_string(), rocksdb_flush_threads: RocksdbFlushThreadsArg::bounded_default().to_string(), + tpu_transaction_forward_receive_threads: + TpuTransactionForwardReceiveThreadArgs::bounded_default().to_string(), + tpu_transaction_receive_threads: TpuTransactionReceiveThreads::bounded_default() + .to_string(), + tpu_vote_transaction_receive_threads: + TpuVoteTransactionReceiveThreads::bounded_default().to_string(), tvu_receive_threads: TvuReceiveThreadsArg::bounded_default().to_string(), tvu_retransmit_threads: TvuRetransmitThreadsArg::bounded_default().to_string(), tvu_sigverify_threads: TvuShredSigverifyThreadsArg::bounded_default().to_string(), @@ -60,6 +69,13 @@ pub fn thread_args<'a>(defaults: &DefaultThreadArgs) -> Vec> { new_thread_arg::(&defaults.replay_transactions_threads), new_thread_arg::(&defaults.rocksdb_compaction_threads), new_thread_arg::(&defaults.rocksdb_flush_threads), + new_thread_arg::( + &defaults.tpu_transaction_forward_receive_threads, + ), + new_thread_arg::(&defaults.tpu_transaction_receive_threads), + new_thread_arg::( + &defaults.tpu_vote_transaction_receive_threads, + ), new_thread_arg::(&defaults.tvu_receive_threads), new_thread_arg::(&defaults.tvu_retransmit_threads), new_thread_arg::(&defaults.tvu_sigverify_threads), @@ -88,6 +104,9 @@ pub struct NumThreadConfig { pub replay_transactions_threads: NonZeroUsize, pub rocksdb_compaction_threads: NonZeroUsize, pub rocksdb_flush_threads: NonZeroUsize, + pub tpu_transaction_forward_receive_threads: NonZeroUsize, + pub tpu_transaction_receive_threads: NonZeroUsize, + pub tpu_vote_transaction_receive_threads: NonZeroUsize, pub tvu_receive_threads: NonZeroUsize, pub tvu_retransmit_threads: NonZeroUsize, pub tvu_sigverify_threads: NonZeroUsize, @@ -137,6 +156,21 @@ pub fn parse_num_threads_args(matches: &ArgMatches) -> NumThreadConfig { RocksdbFlushThreadsArg::NAME, NonZeroUsize ), + tpu_transaction_forward_receive_threads: value_t_or_exit!( + matches, + TpuTransactionForwardReceiveThreadArgs::NAME, + NonZeroUsize + ), + tpu_transaction_receive_threads: value_t_or_exit!( + matches, + TpuTransactionReceiveThreads::NAME, + NonZeroUsize + ), + tpu_vote_transaction_receive_threads: value_t_or_exit!( + matches, + TpuVoteTransactionReceiveThreads::NAME, + NonZeroUsize + ), tvu_receive_threads: value_t_or_exit!(matches, TvuReceiveThreadsArg::NAME, NonZeroUsize), tvu_retransmit_threads: value_t_or_exit!( matches, @@ -302,6 +336,42 @@ impl ThreadArg for RocksdbFlushThreadsArg { } } +struct TpuTransactionForwardReceiveThreadArgs; +impl ThreadArg for TpuTransactionForwardReceiveThreadArgs { + const NAME: &'static str = "tpu_transaction_forward_receive_threads"; + const LONG_NAME: &'static str = "tpu-transaction-forward-receive-threads"; + const HELP: &'static str = + "Number of threads to use for receiving transactions on the TPU fowards port"; + + fn default() -> usize { + solana_streamer::quic::default_num_tpu_transaction_forward_receive_threads() + } +} + +struct TpuTransactionReceiveThreads; +impl ThreadArg for TpuTransactionReceiveThreads { + const NAME: &'static str = "tpu_transaction_receive_threads"; + const LONG_NAME: &'static str = "tpu-transaction-receive-threads"; + const HELP: &'static str = + "Number of threads to use for receiving transactions on the TPU port"; + + fn default() -> usize { + solana_streamer::quic::default_num_tpu_transaction_receive_threads() + } +} + +struct TpuVoteTransactionReceiveThreads; +impl ThreadArg for TpuVoteTransactionReceiveThreads { + const NAME: &'static str = "tpu_vote_transaction_receive_threads"; + const LONG_NAME: &'static str = "tpu-vote-transaction-receive-threads"; + const HELP: &'static str = + "Number of threads to use for receiving transactions on the TPU vote port"; + + fn default() -> usize { + solana_streamer::quic::default_num_tpu_vote_transaction_receive_threads() + } +} + struct TvuReceiveThreadsArg; impl ThreadArg for TvuReceiveThreadsArg { const NAME: &'static str = "tvu_receive_threads"; diff --git a/validator/src/commands/run/execute.rs b/validator/src/commands/run/execute.rs index 8a1e1eab9455c3..f8fcdd823dd34d 100644 --- a/validator/src/commands/run/execute.rs +++ b/validator/src/commands/run/execute.rs @@ -113,6 +113,9 @@ pub fn execute( replay_transactions_threads, rocksdb_compaction_threads, rocksdb_flush_threads, + tpu_transaction_forward_receive_threads, + tpu_transaction_receive_threads, + tpu_vote_transaction_receive_threads, tvu_receive_threads, tvu_retransmit_threads, tvu_sigverify_threads, @@ -1246,6 +1249,7 @@ pub fn execute( max_streams_per_ms, max_connections_per_ipaddr_per_min: tpu_max_connections_per_ipaddr_per_minute, coalesce: tpu_coalesce, + num_threads: tpu_transaction_receive_threads, ..Default::default() }; @@ -1256,6 +1260,7 @@ pub fn execute( max_streams_per_ms, max_connections_per_ipaddr_per_min: tpu_max_connections_per_ipaddr_per_minute, coalesce: tpu_coalesce, + num_threads: tpu_transaction_forward_receive_threads, ..Default::default() }; @@ -1264,6 +1269,7 @@ pub fn execute( let mut vote_quic_server_config = tpu_fwd_quic_server_config.clone(); vote_quic_server_config.max_connections_per_peer = 1; vote_quic_server_config.max_unstaked_connections = 0; + vote_quic_server_config.num_threads = tpu_vote_transaction_receive_threads; let validator = match Validator::new( node, From 354071ec054d39407c875916ae5a6c429e36fd8b Mon Sep 17 00:00:00 2001 From: steviez Date: Thu, 19 Jun 2025 15:36:30 -0500 Subject: [PATCH 042/124] streamer: Remove unnecessary TestServerConfig struct (#6649) This struct was used as an intermediate between some test functions and QuicServerParams. Instead of the extra struct and conversion, simply implement a default_for_tests() function on QuickServerParams directly --- quic-client/Cargo.toml | 1 + quic-client/tests/quic_client.rs | 33 ++-------- streamer/src/nonblocking/quic.rs | 32 +++++----- streamer/src/nonblocking/testing_utilities.rs | 62 ++----------------- streamer/src/quic.rs | 33 +++++----- .../connection_workers_scheduler_test.rs | 29 ++++----- 6 files changed, 60 insertions(+), 130 deletions(-) diff --git a/quic-client/Cargo.toml b/quic-client/Cargo.toml index 6dcc9bd7eac247..92b537ebd1713a 100644 --- a/quic-client/Cargo.toml +++ b/quic-client/Cargo.toml @@ -39,3 +39,4 @@ solana-logger = { workspace = true } solana-net-utils = { workspace = true, features = ["dev-context-only-utils"] } solana-packet = { workspace = true } solana-perf = { workspace = true } +solana-streamer = { workspace = true, features = ["dev-context-only-utils"] } diff --git a/quic-client/tests/quic_client.rs b/quic-client/tests/quic_client.rs index 7290170c3fac6f..4c0193e72c8310 100644 --- a/quic-client/tests/quic_client.rs +++ b/quic-client/tests/quic_client.rs @@ -80,13 +80,7 @@ mod tests { sender, exit.clone(), staked_nodes, - QuicServerParams { - max_connections_per_peer: 1, - max_staked_connections: 10, - max_unstaked_connections: 10, - coalesce_channel_size: 100_000, // smaller channel size for faster test - ..QuicServerParams::default() - }, + QuicServerParams::default_for_tests(), ) .unwrap(); @@ -166,14 +160,7 @@ mod tests { sender, exit.clone(), staked_nodes, - QuicServerParams { - max_connections_per_peer: 1, - max_staked_connections: 10, - max_unstaked_connections: 10, - wait_for_chunk_timeout: Duration::from_secs(1), - coalesce_channel_size: 100_000, // smaller channel size for faster test - ..QuicServerParams::default() - }, + QuicServerParams::default_for_tests(), ) .unwrap(); @@ -231,13 +218,7 @@ mod tests { sender, request_recv_exit.clone(), staked_nodes.clone(), - QuicServerParams { - max_connections_per_peer: 1, - max_staked_connections: 10, - max_unstaked_connections: 10, - coalesce_channel_size: 100_000, // smaller channel size for faster test - ..QuicServerParams::default() - }, + QuicServerParams::default_for_tests(), ) .unwrap(); @@ -261,13 +242,7 @@ mod tests { sender2, response_recv_exit.clone(), staked_nodes, - QuicServerParams { - max_connections_per_peer: 1, - max_staked_connections: 10, - max_unstaked_connections: 10, - coalesce_channel_size: 100_000, // smaller channel size for faster test - ..QuicServerParams::default() - }, + QuicServerParams::default_for_tests(), ) .unwrap(); diff --git a/streamer/src/nonblocking/quic.rs b/streamer/src/nonblocking/quic.rs index d3e2fb69b75a31..7de3640303edb8 100644 --- a/streamer/src/nonblocking/quic.rs +++ b/streamer/src/nonblocking/quic.rs @@ -1563,7 +1563,7 @@ pub mod test { quic::compute_max_allowed_uni_streams, testing_utilities::{ check_multiple_streams, get_client_config, make_client_endpoint, - setup_quic_server, SpawnTestServerResult, TestServerConfig, + setup_quic_server, SpawnTestServerResult, }, }, quic::DEFAULT_TPU_COALESCE, @@ -1687,7 +1687,7 @@ pub mod test { receiver: _, server_address: _, stats: _, - } = setup_quic_server(None, TestServerConfig::default()); + } = setup_quic_server(None, QuicServerParams::default_for_tests()); exit.store(true, Ordering::Relaxed); join_handle.await.unwrap(); } @@ -1701,7 +1701,7 @@ pub mod test { receiver, server_address, stats: _, - } = setup_quic_server(None, TestServerConfig::default()); + } = setup_quic_server(None, QuicServerParams::default_for_tests()); check_timeout(receiver, server_address).await; exit.store(true, Ordering::Relaxed); @@ -1768,7 +1768,7 @@ pub mod test { receiver: _, server_address, stats, - } = setup_quic_server(None, TestServerConfig::default()); + } = setup_quic_server(None, QuicServerParams::default_for_tests()); let conn1 = make_client_endpoint(&server_address, None).await; assert_eq!(stats.total_streams.load(Ordering::Relaxed), 0); @@ -1803,7 +1803,7 @@ pub mod test { receiver: _, server_address, stats: _, - } = setup_quic_server(None, TestServerConfig::default()); + } = setup_quic_server(None, QuicServerParams::default_for_tests()); check_block_multiple_connections(server_address).await; exit.store(true, Ordering::Relaxed); join_handle.await.unwrap(); @@ -1821,9 +1821,9 @@ pub mod test { stats, } = setup_quic_server( None, - TestServerConfig { + QuicServerParams { max_connections_per_peer: 2, - ..Default::default() + ..QuicServerParams::default_for_tests() }, ); @@ -1894,7 +1894,7 @@ pub mod test { receiver, server_address, stats: _, - } = setup_quic_server(None, TestServerConfig::default()); + } = setup_quic_server(None, QuicServerParams::default_for_tests()); check_multiple_writes(receiver, server_address, None).await; exit.store(true, Ordering::Relaxed); join_handle.await.unwrap(); @@ -1916,7 +1916,7 @@ pub mod test { receiver, server_address, stats, - } = setup_quic_server(Some(staked_nodes), TestServerConfig::default()); + } = setup_quic_server(Some(staked_nodes), QuicServerParams::default_for_tests()); check_multiple_writes(receiver, server_address, Some(&client_keypair)).await; exit.store(true, Ordering::Relaxed); join_handle.await.unwrap(); @@ -1948,7 +1948,7 @@ pub mod test { receiver, server_address, stats, - } = setup_quic_server(Some(staked_nodes), TestServerConfig::default()); + } = setup_quic_server(Some(staked_nodes), QuicServerParams::default_for_tests()); check_multiple_writes(receiver, server_address, Some(&client_keypair)).await; exit.store(true, Ordering::Relaxed); join_handle.await.unwrap(); @@ -1972,7 +1972,7 @@ pub mod test { receiver, server_address, stats, - } = setup_quic_server(None, TestServerConfig::default()); + } = setup_quic_server(None, QuicServerParams::default_for_tests()); check_multiple_writes(receiver, server_address, None).await; exit.store(true, Ordering::Relaxed); join_handle.await.unwrap(); @@ -2010,8 +2010,7 @@ pub mod test { staked_nodes, QuicServerParams { max_unstaked_connections: 0, // Do not allow any connection from unstaked clients/nodes - coalesce_channel_size: 100_000, // smaller channel size for faster test - ..QuicServerParams::default() + ..QuicServerParams::default_for_tests() }, ) .unwrap(); @@ -2044,8 +2043,7 @@ pub mod test { staked_nodes, QuicServerParams { max_connections_per_peer: 2, - coalesce_channel_size: 100_000, // smaller channel size for faster test - ..QuicServerParams::default() + ..QuicServerParams::default_for_tests() }, ) .unwrap(); @@ -2397,7 +2395,7 @@ pub mod test { receiver, server_address, stats, - } = setup_quic_server(None, TestServerConfig::default()); + } = setup_quic_server(None, QuicServerParams::default_for_tests()); let client_connection = make_client_endpoint(&server_address, None).await; @@ -2456,7 +2454,7 @@ pub mod test { stats, exit, .. - } = setup_quic_server(None, TestServerConfig::default()); + } = setup_quic_server(None, QuicServerParams::default_for_tests()); let client_connection = make_client_endpoint(&server_address, None).await; diff --git a/streamer/src/nonblocking/testing_utilities.rs b/streamer/src/nonblocking/testing_utilities.rs index 564718b8b9e347..1e82687f1d9104 100644 --- a/streamer/src/nonblocking/testing_utilities.rs +++ b/streamer/src/nonblocking/testing_utilities.rs @@ -1,15 +1,8 @@ //! Contains utility functions to create server and client for test purposes. use { - super::quic::{ - spawn_server_multi, SpawnNonBlockingServerResult, ALPN_TPU_PROTOCOL_ID, - DEFAULT_WAIT_FOR_CHUNK_TIMEOUT, - }, + super::quic::{spawn_server_multi, SpawnNonBlockingServerResult, ALPN_TPU_PROTOCOL_ID}, crate::{ - quic::{ - QuicServerParams, StreamerStats, DEFAULT_MAX_CONNECTIONS_PER_IPADDR_PER_MINUTE, - DEFAULT_MAX_STAKED_CONNECTIONS, DEFAULT_MAX_STREAMS_PER_MS, - DEFAULT_MAX_UNSTAKED_CONNECTIONS, DEFAULT_TPU_COALESCE, - }, + quic::{QuicServerParams, StreamerStats}, streamer::StakedNodes, }, crossbeam_channel::{unbounded, Receiver}, @@ -27,15 +20,12 @@ use { solana_tls_utils::{new_dummy_x509_certificate, tls_client_config_builder}, std::{ net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket}, - num::NonZeroUsize, sync::{atomic::AtomicBool, Arc, RwLock}, time::{Duration, Instant}, }, tokio::{task::JoinHandle, time::sleep}, }; -pub(crate) const DEFAULT_NUM_SERVER_THREADS_FOR_TEST: NonZeroUsize = NonZeroUsize::new(8).unwrap(); - pub fn get_client_config(keypair: &Keypair) -> ClientConfig { let (cert, key) = new_dummy_x509_certificate(keypair); @@ -58,29 +48,6 @@ pub fn get_client_config(keypair: &Keypair) -> ClientConfig { config } -#[derive(Debug, Clone)] -pub struct TestServerConfig { - pub max_connections_per_peer: usize, - pub max_staked_connections: usize, - pub max_unstaked_connections: usize, - pub max_streams_per_ms: u64, - pub max_connections_per_ipaddr_per_min: u64, - pub coalesce_channel_size: usize, -} - -impl Default for TestServerConfig { - fn default() -> Self { - Self { - max_connections_per_peer: 1, - max_staked_connections: DEFAULT_MAX_STAKED_CONNECTIONS, - max_unstaked_connections: DEFAULT_MAX_UNSTAKED_CONNECTIONS, - max_streams_per_ms: DEFAULT_MAX_STREAMS_PER_MS, - max_connections_per_ipaddr_per_min: DEFAULT_MAX_CONNECTIONS_PER_IPADDR_PER_MINUTE, - coalesce_channel_size: 100_000, // use a smaller value for test as create a huge bounded channel can take time - } - } -} - pub struct SpawnTestServerResult { pub join_handle: JoinHandle<()>, pub exit: Arc, @@ -108,40 +75,23 @@ pub fn create_quic_server_sockets() -> Vec { pub fn setup_quic_server( option_staked_nodes: Option, - config: TestServerConfig, + quic_server_params: QuicServerParams, ) -> SpawnTestServerResult { let sockets = create_quic_server_sockets(); - setup_quic_server_with_sockets(sockets, option_staked_nodes, config) + setup_quic_server_with_sockets(sockets, option_staked_nodes, quic_server_params) } pub fn setup_quic_server_with_sockets( sockets: Vec, option_staked_nodes: Option, - TestServerConfig { - max_connections_per_peer, - max_staked_connections, - max_unstaked_connections, - max_streams_per_ms, - max_connections_per_ipaddr_per_min, - coalesce_channel_size, - }: TestServerConfig, + quic_server_params: QuicServerParams, ) -> SpawnTestServerResult { let exit = Arc::new(AtomicBool::new(false)); let (sender, receiver) = unbounded(); let keypair = Keypair::new(); let server_address = sockets[0].local_addr().unwrap(); let staked_nodes = Arc::new(RwLock::new(option_staked_nodes.unwrap_or_default())); - let quic_server_params = QuicServerParams { - max_connections_per_peer, - max_staked_connections, - max_unstaked_connections, - max_streams_per_ms, - max_connections_per_ipaddr_per_min, - wait_for_chunk_timeout: DEFAULT_WAIT_FOR_CHUNK_TIMEOUT, - coalesce: DEFAULT_TPU_COALESCE, - coalesce_channel_size, - num_threads: DEFAULT_NUM_SERVER_THREADS_FOR_TEST, - }; + let SpawnNonBlockingServerResult { endpoints: _, stats, diff --git a/streamer/src/quic.rs b/streamer/src/quic.rs index f607e328b47716..61dee5db493a4d 100644 --- a/streamer/src/quic.rs +++ b/streamer/src/quic.rs @@ -645,6 +645,20 @@ impl Default for QuicServerParams { } } +#[cfg(feature = "dev-context-only-utils")] +impl QuicServerParams { + pub const DEFAULT_NUM_SERVER_THREADS_FOR_TEST: NonZeroUsize = NonZeroUsize::new(8).unwrap(); + + pub fn default_for_tests() -> Self { + // Shrink the channel size to avoid a massive allocation for tests + Self { + coalesce_channel_size: 100_000, + num_threads: Self::DEFAULT_NUM_SERVER_THREADS_FOR_TEST, + ..Self::default() + } + } +} + pub fn spawn_server_multi( thread_name: &'static str, metrics_name: &'static str, @@ -690,10 +704,7 @@ pub fn spawn_server_multi( mod test { use { super::*, - crate::nonblocking::{ - quic::test::*, - testing_utilities::{check_multiple_streams, DEFAULT_NUM_SERVER_THREADS_FOR_TEST}, - }, + crate::nonblocking::{quic::test::*, testing_utilities::check_multiple_streams}, crossbeam_channel::unbounded, solana_net_utils::bind_to_localhost, std::net::SocketAddr, @@ -702,7 +713,7 @@ mod test { fn rt_for_test() -> Runtime { rt( "solQuicTestRt".to_string(), - DEFAULT_NUM_SERVER_THREADS_FOR_TEST, + QuicServerParams::DEFAULT_NUM_SERVER_THREADS_FOR_TEST, ) } @@ -730,11 +741,7 @@ mod test { sender, exit.clone(), staked_nodes, - QuicServerParams { - coalesce_channel_size: 100_000, // smaller channel size for faster test - num_threads: DEFAULT_NUM_SERVER_THREADS_FOR_TEST, - ..Default::default() - }, + QuicServerParams::default_for_tests(), ) .unwrap(); (t, exit, receiver, server_address) @@ -791,8 +798,7 @@ mod test { staked_nodes, QuicServerParams { max_connections_per_peer: 2, - coalesce_channel_size: 100_000, // smaller channel size for faster test - ..QuicServerParams::default() + ..QuicServerParams::default_for_tests() }, ) .unwrap(); @@ -837,8 +843,7 @@ mod test { staked_nodes, QuicServerParams { max_unstaked_connections: 0, - coalesce_channel_size: 100_000, // smaller channel size for faster test - ..QuicServerParams::default() + ..QuicServerParams::default_for_tests() }, ) .unwrap(); diff --git a/tpu-client-next/tests/connection_workers_scheduler_test.rs b/tpu-client-next/tests/connection_workers_scheduler_test.rs index 746659df4e070c..4e3f29dfc9e315 100644 --- a/tpu-client-next/tests/connection_workers_scheduler_test.rs +++ b/tpu-client-next/tests/connection_workers_scheduler_test.rs @@ -9,9 +9,10 @@ use { solana_signer::Signer, solana_streamer::{ nonblocking::testing_utilities::{ - make_client_endpoint, setup_quic_server, SpawnTestServerResult, TestServerConfig, + make_client_endpoint, setup_quic_server, SpawnTestServerResult, }, packet::PacketBatch, + quic::QuicServerParams, streamer::StakedNodes, }, solana_tpu_client_next::{ @@ -195,7 +196,7 @@ async fn test_basic_transactions_sending() { receiver, server_address, stats: _stats, - } = setup_quic_server(None, TestServerConfig::default()); + } = setup_quic_server(None, QuicServerParams::default_for_tests()); // Setup sending txs let tx_size = 1; @@ -287,7 +288,7 @@ async fn test_connection_denied_until_allowed() { receiver, server_address, stats: _stats, - } = setup_quic_server(None, TestServerConfig::default()); + } = setup_quic_server(None, QuicServerParams::default_for_tests()); // To prevent server from accepting a new connection, we use the following observation. // Since max_connections_per_peer == 1 (< max_unstaked_connections == 500), if we create a first @@ -349,10 +350,10 @@ async fn test_connection_pruned_and_reopened() { stats: _stats, } = setup_quic_server( None, - TestServerConfig { + QuicServerParams { max_connections_per_peer: 100, max_unstaked_connections: 1, - ..Default::default() + ..QuicServerParams::default_for_tests() }, ); @@ -408,13 +409,13 @@ async fn test_staked_connection() { stats: _stats, } = setup_quic_server( Some(staked_nodes), - TestServerConfig { + QuicServerParams { // Must use at least the number of endpoints (10) because // `max_staked_connections` and `max_unstaked_connections` are // cumulative for all the endpoints. max_staked_connections: 10, max_unstaked_connections: 0, - ..Default::default() + ..QuicServerParams::default_for_tests() }, ); @@ -461,7 +462,7 @@ async fn test_connection_throttling() { receiver, server_address, stats: _stats, - } = setup_quic_server(None, TestServerConfig::default()); + } = setup_quic_server(None, QuicServerParams::default_for_tests()); // Setup sending txs let tx_size = 1; @@ -550,10 +551,10 @@ async fn test_rate_limiting() { stats: _stats, } = setup_quic_server( None, - TestServerConfig { + QuicServerParams { max_connections_per_peer: 100, max_connections_per_ipaddr_per_min: 1, - ..Default::default() + ..QuicServerParams::default_for_tests() }, ); @@ -608,10 +609,10 @@ async fn test_rate_limiting_establish_connection() { stats: _stats, } = setup_quic_server( None, - TestServerConfig { + QuicServerParams { max_connections_per_peer: 100, max_connections_per_ipaddr_per_min: 1, - ..Default::default() + ..QuicServerParams::default_for_tests() }, ); @@ -691,14 +692,14 @@ async fn test_update_identity() { stats: _stats, } = setup_quic_server( Some(staked_nodes), - TestServerConfig { + QuicServerParams { // Must use at least the number of endpoints (10) because // `max_staked_connections` and `max_unstaked_connections` are // cumulative for all the endpoints. max_staked_connections: 10, // Deny all unstaked connections. max_unstaked_connections: 0, - ..Default::default() + ..QuicServerParams::default_for_tests() }, ); From 8b6d80fb5791c1fd1ac96d4bdb2faca15dd03adb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 20 Jun 2025 11:05:35 +0800 Subject: [PATCH 043/124] build(deps): bump slab from 0.4.9 to 0.4.10 (#6651) * build(deps): bump slab from 0.4.9 to 0.4.10 Bumps [slab](https://github.com/tokio-rs/slab) from 0.4.9 to 0.4.10. - [Release notes](https://github.com/tokio-rs/slab/releases) - [Changelog](https://github.com/tokio-rs/slab/blob/master/CHANGELOG.md) - [Commits](https://github.com/tokio-rs/slab/compare/v0.4.9...v0.4.10) --- updated-dependencies: - dependency-name: slab dependency-version: 0.4.10 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 7 ++----- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 7 ++----- svm/examples/Cargo.lock | 7 ++----- 4 files changed, 7 insertions(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b31d2dca44c29d..a96ca96d1321ac 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6507,12 +6507,9 @@ dependencies = [ [[package]] name = "slab" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" -dependencies = [ - "autocfg", -] +checksum = "04dc19736151f35336d325007ac991178d504a119863a2fcb3758cdb5e52c50d" [[package]] name = "smallvec" diff --git a/Cargo.toml b/Cargo.toml index 5e984f504c6a01..51e1a232d4edd6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -363,7 +363,7 @@ sha3 = "0.10.8" shuttle = "0.7.1" signal-hook = "0.3.18" siphasher = "1.0.1" -slab = "0.4.9" +slab = "0.4.10" smallvec = "1.15.1" smpl_jwt = "0.7.1" socket2 = "0.5.10" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index f63678ae0d6b0f..362925eb1124bd 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -5279,12 +5279,9 @@ dependencies = [ [[package]] name = "slab" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" -dependencies = [ - "autocfg", -] +checksum = "04dc19736151f35336d325007ac991178d504a119863a2fcb3758cdb5e52c50d" [[package]] name = "smallvec" diff --git a/svm/examples/Cargo.lock b/svm/examples/Cargo.lock index aab877dbf91205..52bf4c6553f7af 100644 --- a/svm/examples/Cargo.lock +++ b/svm/examples/Cargo.lock @@ -5127,12 +5127,9 @@ dependencies = [ [[package]] name = "slab" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" -dependencies = [ - "autocfg", -] +checksum = "04dc19736151f35336d325007ac991178d504a119863a2fcb3758cdb5e52c50d" [[package]] name = "smallvec" From 7e90df184de86d47ff7f88592d828d7c2dd73a26 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 20 Jun 2025 18:42:25 +0800 Subject: [PATCH 044/124] build(deps): bump io-uring from 0.7.7 to 0.7.8 (#6666) * build(deps): bump io-uring from 0.7.7 to 0.7.8 Bumps [io-uring](https://github.com/tokio-rs/io-uring) from 0.7.7 to 0.7.8. - [Commits](https://github.com/tokio-rs/io-uring/commits) --- updated-dependencies: - dependency-name: io-uring dependency-version: 0.7.8 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- svm/examples/Cargo.lock | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a96ca96d1321ac..78892fec819933 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3809,9 +3809,9 @@ dependencies = [ [[package]] name = "io-uring" -version = "0.7.7" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ebb93303c65a11753dd0e45cd6bfa5c65ee1f0b9f8e2178b6998ddc8b284f04" +checksum = "b86e202f00093dcba4275d4636b93ef9dd75d025ae560d2521b45ea28ab49013" dependencies = [ "bitflags 2.9.1", "cfg-if 1.0.1", diff --git a/Cargo.toml b/Cargo.toml index 51e1a232d4edd6..29bfb52594cbf4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -279,7 +279,7 @@ hyper-proxy = "0.9.1" im = "15.1.0" indexmap = "2.9.0" indicatif = "0.17.11" -io-uring = "0.7.4" +io-uring = "0.7.8" itertools = "0.12.1" jemallocator = { package = "tikv-jemallocator", version = "0.6.0", features = [ "unprefixed_malloc_on_supported_platforms", diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 362925eb1124bd..3ee31857cc2a77 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -2905,9 +2905,9 @@ dependencies = [ [[package]] name = "io-uring" -version = "0.7.7" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ebb93303c65a11753dd0e45cd6bfa5c65ee1f0b9f8e2178b6998ddc8b284f04" +checksum = "b86e202f00093dcba4275d4636b93ef9dd75d025ae560d2521b45ea28ab49013" dependencies = [ "bitflags 2.9.1", "cfg-if 1.0.0", diff --git a/svm/examples/Cargo.lock b/svm/examples/Cargo.lock index 52bf4c6553f7af..982680712547f2 100644 --- a/svm/examples/Cargo.lock +++ b/svm/examples/Cargo.lock @@ -2779,9 +2779,9 @@ dependencies = [ [[package]] name = "io-uring" -version = "0.7.7" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ebb93303c65a11753dd0e45cd6bfa5c65ee1f0b9f8e2178b6998ddc8b284f04" +checksum = "b86e202f00093dcba4275d4636b93ef9dd75d025ae560d2521b45ea28ab49013" dependencies = [ "bitflags 2.9.1", "cfg-if 1.0.0", From 557bfcb1028103d6383535f374c358dbd10e6e92 Mon Sep 17 00:00:00 2001 From: Michal Rostecki Date: Fri, 20 Jun 2025 13:59:08 +0200 Subject: [PATCH 045/124] runtime: Use the same deserialized snapshot for storage and banks rebuild (#6540) Previously, storage and banks rebuilds were retrieving and deserializing snapshots the same snapshot separately, on their own. That means, deserialization of the same snapshot was done twice. Fix that by parsing the accounts and banks fields beforehand and then using them for rebuilding the storage and banks. Fixes #6539 --- runtime/src/serde_snapshot.rs | 30 +- runtime/src/serde_snapshot/storage.rs | 2 +- runtime/src/snapshot_bank_utils.rs | 323 +++++++----------- runtime/src/snapshot_utils.rs | 290 +++++++++++++--- .../snapshot_storage_rebuilder.rs | 148 +------- 5 files changed, 397 insertions(+), 396 deletions(-) diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index 3ac4399dd8c24c..68efcccdee2fa1 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -5,7 +5,6 @@ use { bank::{Bank, BankFieldsToDeserialize, BankFieldsToSerialize, BankHashStats, BankRc}, epoch_stakes::VersionedEpochStakes, runtime_config::RuntimeConfig, - serde_snapshot::storage::SerializableAccountStorageEntry, snapshot_utils::{SnapshotError, StorageAndNextAccountsFileId}, stake_account::StakeAccount, stakes::{serialize_stake_accounts_to_delegation_format, Stakes}, @@ -64,7 +63,7 @@ pub(crate) use { solana_accounts_db::accounts_hash::{ SerdeAccountsDeltaHash, SerdeAccountsHash, SerdeIncrementalAccountsHash, }, - storage::SerializedAccountsFileId, + storage::{SerializableAccountStorageEntry, SerializedAccountsFileId}, }; const MAX_STREAM_SIZE: u64 = 32 * 1024 * 1024 * 1024; @@ -301,6 +300,13 @@ pub struct SnapshotBankFields { } impl SnapshotBankFields { + pub fn new( + full: BankFieldsToDeserialize, + incremental: Option, + ) -> Self { + Self { full, incremental } + } + /// Collapse the SnapshotBankFields into a single (the latest) BankFieldsToDeserialize. pub fn collapse_into(self) -> BankFieldsToDeserialize { self.incremental.unwrap_or(self.full) @@ -316,11 +322,21 @@ pub struct SnapshotAccountsDbFields { } impl SnapshotAccountsDbFields { + pub fn new( + full_snapshot_accounts_db_fields: AccountsDbFields, + incremental_snapshot_accounts_db_fields: Option>, + ) -> Self { + Self { + full_snapshot_accounts_db_fields, + incremental_snapshot_accounts_db_fields, + } + } + /// Collapse the SnapshotAccountsDbFields into a single AccountsDbFields. If there is no /// incremental snapshot, this returns the AccountsDbFields from the full snapshot. /// Otherwise, use the AccountsDbFields from the incremental snapshot, and a combination /// of the storages from both the full and incremental snapshots. - fn collapse_into(self) -> Result, Error> { + pub fn collapse_into(self) -> Result, Error> { match self.incremental_snapshot_accounts_db_fields { None => Ok(self.full_snapshot_accounts_db_fields), Some(AccountsDbFields( @@ -513,6 +529,7 @@ pub(crate) fn fields_from_stream( deserialize_bank_fields(snapshot_stream) } +#[cfg(feature = "dev-context-only-utils")] pub(crate) fn fields_from_streams( snapshot_streams: &mut SnapshotStreams, ) -> std::result::Result< @@ -550,6 +567,7 @@ pub struct BankFromStreamsInfo { } #[allow(clippy::too_many_arguments)] +#[cfg(test)] pub(crate) fn bank_from_streams( snapshot_streams: &mut SnapshotStreams, account_paths: &[PathBuf], @@ -836,12 +854,12 @@ impl solana_frozen_abi::abi_example::TransparentAsHelper for SerializableAccount /// This struct contains side-info while reconstructing the bank from fields #[derive(Debug)] -struct ReconstructedBankInfo { - duplicates_lt_hash: Option>, +pub(crate) struct ReconstructedBankInfo { + pub(crate) duplicates_lt_hash: Option>, } #[allow(clippy::too_many_arguments)] -fn reconstruct_bank_from_fields( +pub(crate) fn reconstruct_bank_from_fields( bank_fields: SnapshotBankFields, snapshot_accounts_db_fields: SnapshotAccountsDbFields, genesis_config: &GenesisConfig, diff --git a/runtime/src/serde_snapshot/storage.rs b/runtime/src/serde_snapshot/storage.rs index 6cbc1510d4637c..931b9f26b56f69 100644 --- a/runtime/src/serde_snapshot/storage.rs +++ b/runtime/src/serde_snapshot/storage.rs @@ -33,7 +33,7 @@ impl SerializableAccountStorageEntry { } } -pub(super) trait SerializableStorage { +pub(crate) trait SerializableStorage { fn id(&self) -> SerializedAccountsFileId; fn current_len(&self) -> usize; } diff --git a/runtime/src/snapshot_bank_utils.rs b/runtime/src/snapshot_bank_utils.rs index f3d45c45f4edde..6f4c282acfd248 100644 --- a/runtime/src/snapshot_bank_utils.rs +++ b/runtime/src/snapshot_bank_utils.rs @@ -1,6 +1,13 @@ #[cfg(feature = "dev-context-only-utils")] use { - crate::{bank::BankFieldsToDeserialize, serde_snapshot::fields_from_streams}, + crate::{ + bank::BankFieldsToDeserialize, + serde_snapshot::fields_from_streams, + snapshot_utils::{ + deserialize_snapshot_data_files, verify_unpacked_snapshots_dir_and_version, + SnapshotRootPaths, UnpackedSnapshotsDirAndVersion, + }, + }, solana_accounts_db::accounts_file::StorageAccess, tempfile::TempDir, }; @@ -9,7 +16,10 @@ use { bank::{Bank, BankSlotDelta}, epoch_stakes::VersionedEpochStakes, runtime_config::RuntimeConfig, - serde_snapshot::{bank_from_streams, BankIncrementalSnapshotPersistence}, + serde_snapshot::{ + reconstruct_bank_from_fields, BankIncrementalSnapshotPersistence, + SnapshotAccountsDbFields, SnapshotBankFields, + }, snapshot_archive_info::{ FullSnapshotArchiveInfo, IncrementalSnapshotArchiveInfo, SnapshotArchiveInfoGetter, }, @@ -17,13 +27,12 @@ use { snapshot_hash::SnapshotHash, snapshot_package::{AccountsPackage, AccountsPackageKind, SnapshotKind, SnapshotPackage}, snapshot_utils::{ - self, deserialize_snapshot_data_file, deserialize_snapshot_data_files, - get_highest_bank_snapshot_post, get_highest_full_snapshot_archive_info, - get_highest_incremental_snapshot_archive_info, rebuild_storages_from_snapshot_dir, - serialize_snapshot_data_file, verify_and_unarchive_snapshots, - verify_unpacked_snapshots_dir_and_version, ArchiveFormat, BankSnapshotInfo, - SnapshotError, SnapshotRootPaths, SnapshotVersion, StorageAndNextAccountsFileId, - UnpackedSnapshotsDirAndVersion, VerifyEpochStakesError, VerifySlotDeltasError, + self, deserialize_snapshot_data_file, get_highest_bank_snapshot_post, + get_highest_full_snapshot_archive_info, get_highest_incremental_snapshot_archive_info, + rebuild_storages_from_snapshot_dir, serialize_snapshot_data_file, + verify_and_unarchive_snapshots, ArchiveFormat, BankSnapshotInfo, SnapshotError, + SnapshotVersion, StorageAndNextAccountsFileId, UnarchivedSnapshots, + VerifyEpochStakesError, VerifySlotDeltasError, }, status_cache, }, @@ -32,8 +41,7 @@ use { log::*, solana_accounts_db::{ accounts_db::{ - AccountStorageEntry, AccountsDbConfig, AtomicAccountsFileId, - CalcAccountsHashDataSource, DuplicatesLtHash, + AccountStorageEntry, AccountsDbConfig, AtomicAccountsFileId, CalcAccountsHashDataSource, }, accounts_hash::MerkleOrLatticeAccountsHash, accounts_update_notifier_interface::AccountsUpdateNotifier, @@ -100,22 +108,24 @@ pub fn bank_fields_from_snapshot_archives( let account_paths = vec![temp_accounts_dir.path().to_path_buf()]; - let (unarchived_full_snapshot, unarchived_incremental_snapshot, _next_append_vec_id) = - verify_and_unarchive_snapshots( - &temp_unpack_dir, - &full_snapshot_archive_info, - incremental_snapshot_archive_info.as_ref(), - &account_paths, - storage_access, - )?; + let ( + UnarchivedSnapshots { + full_unpacked_snapshots_dir_and_version, + incremental_unpacked_snapshots_dir_and_version, + .. + }, + _guard, + ) = verify_and_unarchive_snapshots( + &temp_unpack_dir, + &full_snapshot_archive_info, + incremental_snapshot_archive_info.as_ref(), + &account_paths, + storage_access, + )?; bank_fields_from_snapshots( - &unarchived_full_snapshot.unpacked_snapshots_dir_and_version, - unarchived_incremental_snapshot - .as_ref() - .map(|unarchive_preparation_result| { - &unarchive_preparation_result.unpacked_snapshots_dir_and_version - }), + &full_unpacked_snapshots_dir_and_version, + incremental_unpacked_snapshots_dir_and_version.as_ref(), ) } @@ -178,23 +188,33 @@ pub fn bank_from_snapshot_archives( ) ); - let (unarchived_full_snapshot, mut unarchived_incremental_snapshot, next_append_vec_id) = - verify_and_unarchive_snapshots( - bank_snapshots_dir, - full_snapshot_archive_info, - incremental_snapshot_archive_info, - account_paths, - accounts_db_config - .as_ref() - .map(|config| config.storage_access) - .unwrap_or_default(), - )?; + let ( + UnarchivedSnapshots { + full_storage: mut storage, + incremental_storage, + bank_fields, + accounts_db_fields, + full_unpacked_snapshots_dir_and_version, + incremental_unpacked_snapshots_dir_and_version, + full_measure_untar, + incremental_measure_untar, + next_append_vec_id, + .. + }, + _guard, + ) = verify_and_unarchive_snapshots( + bank_snapshots_dir, + full_snapshot_archive_info, + incremental_snapshot_archive_info, + account_paths, + accounts_db_config + .as_ref() + .map(|config| config.storage_access) + .unwrap_or_default(), + )?; - let mut storage = unarchived_full_snapshot.storage; - if let Some(ref mut unarchive_preparation_result) = unarchived_incremental_snapshot { - let incremental_snapshot_storages = - std::mem::take(&mut unarchive_preparation_result.storage); - storage.extend(incremental_snapshot_storages); + if let Some(incremental_storage) = incremental_storage { + storage.extend(incremental_storage); } let storage_and_next_append_vec_id = StorageAndNextAccountsFileId { @@ -203,17 +223,13 @@ pub fn bank_from_snapshot_archives( }; let mut measure_rebuild = Measure::start("rebuild bank from snapshots"); - let (bank, info) = rebuild_bank_from_unarchived_snapshots( - &unarchived_full_snapshot.unpacked_snapshots_dir_and_version, - unarchived_incremental_snapshot - .as_ref() - .map(|unarchive_preparation_result| { - &unarchive_preparation_result.unpacked_snapshots_dir_and_version - }), - account_paths, - storage_and_next_append_vec_id, + let (bank, info) = reconstruct_bank_from_fields( + bank_fields, + accounts_db_fields, genesis_config, runtime_config, + account_paths, + storage_and_next_append_vec_id, debug_keys, additional_builtins, limit_load_slot_count_from_snapshot, @@ -225,6 +241,31 @@ pub fn bank_from_snapshot_archives( measure_rebuild.stop(); info!("{}", measure_rebuild); + verify_epoch_stakes(&bank)?; + + // The status cache is rebuilt from the latest snapshot. So, if there's an incremental + // snapshot, use that. Otherwise use the full snapshot. + let status_cache_path = incremental_unpacked_snapshots_dir_and_version + .as_ref() + .map_or_else( + || { + full_unpacked_snapshots_dir_and_version + .unpacked_snapshots_dir + .as_path() + }, + |unarchived_incremental_snapshot| { + unarchived_incremental_snapshot + .unpacked_snapshots_dir + .as_path() + }, + ) + .join(snapshot_utils::SNAPSHOT_STATUS_CACHE_FILENAME); + let slot_deltas = deserialize_status_cache(&status_cache_path)?; + + verify_slot_deltas(slot_deltas.as_slice(), &bank)?; + + bank.status_cache.write().unwrap().append(&slot_deltas); + let snapshot_archive_info = incremental_snapshot_archive_info.map_or_else( || full_snapshot_archive_info.snapshot_archive_info(), |incremental_snapshot_archive_info| { @@ -268,10 +309,10 @@ pub fn bank_from_snapshot_archives( measure_verify.stop(); let timings = BankFromArchivesTimings { - untar_full_snapshot_archive_us: unarchived_full_snapshot.measure_untar.as_us(), - untar_incremental_snapshot_archive_us: unarchived_incremental_snapshot - .map_or(0, |unarchive_preparation_result| { - unarchive_preparation_result.measure_untar.as_us() + untar_full_snapshot_archive_us: full_measure_untar.as_us(), + untar_incremental_snapshot_archive_us: incremental_measure_untar + .map_or(0, |incremental_measure_untar| { + incremental_measure_untar.as_us() }), rebuild_bank_us: measure_rebuild.as_us(), verify_bank_us: measure_verify.as_us(), @@ -389,7 +430,7 @@ pub fn bank_from_snapshot_dir( .map(|config| config.storage_access) .unwrap_or_default(); - let (storage, measure_rebuild_storages) = measure_time!( + let ((storage, bank_fields, accounts_db_fields), measure_rebuild_storages) = measure_time!( rebuild_storages_from_snapshot_dir( bank_snapshot, account_paths, @@ -406,13 +447,16 @@ pub fn bank_from_snapshot_dir( storage, next_append_vec_id, }; + let snapshot_bank_fields = SnapshotBankFields::new(bank_fields, None); + let snapshot_accounts_db_fields = SnapshotAccountsDbFields::new(accounts_db_fields, None); let ((bank, info), measure_rebuild_bank) = measure_time!( - rebuild_bank_from_snapshot( - bank_snapshot, - account_paths, - storage_and_next_append_vec_id, + reconstruct_bank_from_fields( + snapshot_bank_fields, + snapshot_accounts_db_fields, genesis_config, runtime_config, + account_paths, + storage_and_next_append_vec_id, debug_keys, additional_builtins, limit_load_slot_count_from_snapshot, @@ -425,6 +469,17 @@ pub fn bank_from_snapshot_dir( ); info!("{}", measure_rebuild_bank); + verify_epoch_stakes(&bank)?; + + let status_cache_path = bank_snapshot + .snapshot_dir + .join(snapshot_utils::SNAPSHOT_STATUS_CACHE_FILENAME); + let slot_deltas = deserialize_status_cache(&status_cache_path)?; + + verify_slot_deltas(slot_deltas.as_slice(), &bank)?; + + bank.status_cache.write().unwrap().append(&slot_deltas); + if bank .feature_set .is_active(&feature_set::accounts_lt_hash::id()) @@ -542,6 +597,7 @@ fn verify_bank_against_expected_slot_hash( } /// Returns the validated version and root paths for the given snapshots. +#[cfg(feature = "dev-context-only-utils")] fn snapshot_version_and_root_paths( full_snapshot_unpacked_snapshots_dir_and_version: &UnpackedSnapshotsDirAndVersion, incremental_snapshot_unpacked_snapshots_dir_and_version: Option< @@ -591,155 +647,6 @@ fn deserialize_status_cache( }) } -/// This struct contains side-info from rebuilding the bank -#[derive(Debug)] -struct RebuiltBankInfo { - duplicates_lt_hash: Option>, -} - -#[allow(clippy::too_many_arguments)] -fn rebuild_bank_from_unarchived_snapshots( - full_snapshot_unpacked_snapshots_dir_and_version: &UnpackedSnapshotsDirAndVersion, - incremental_snapshot_unpacked_snapshots_dir_and_version: Option< - &UnpackedSnapshotsDirAndVersion, - >, - account_paths: &[PathBuf], - storage_and_next_append_vec_id: StorageAndNextAccountsFileId, - genesis_config: &GenesisConfig, - runtime_config: &RuntimeConfig, - debug_keys: Option>>, - additional_builtins: Option<&[BuiltinPrototype]>, - limit_load_slot_count_from_snapshot: Option, - verify_index: bool, - accounts_db_config: Option, - accounts_update_notifier: Option, - exit: Arc, -) -> snapshot_utils::Result<(Bank, RebuiltBankInfo)> { - let (snapshot_version, snapshot_root_paths) = snapshot_version_and_root_paths( - full_snapshot_unpacked_snapshots_dir_and_version, - incremental_snapshot_unpacked_snapshots_dir_and_version, - )?; - - info!( - "Rebuilding bank from full snapshot {} and incremental snapshot {:?}", - snapshot_root_paths.full_snapshot_root_file_path.display(), - snapshot_root_paths.incremental_snapshot_root_file_path, - ); - - let (bank, info) = deserialize_snapshot_data_files(&snapshot_root_paths, |snapshot_streams| { - Ok(match snapshot_version { - SnapshotVersion::V1_2_0 => bank_from_streams( - snapshot_streams, - account_paths, - storage_and_next_append_vec_id, - genesis_config, - runtime_config, - debug_keys, - additional_builtins, - limit_load_slot_count_from_snapshot, - verify_index, - accounts_db_config, - accounts_update_notifier, - exit, - ), - }?) - })?; - - verify_epoch_stakes(&bank)?; - - // The status cache is rebuilt from the latest snapshot. So, if there's an incremental - // snapshot, use that. Otherwise use the full snapshot. - let status_cache_path = incremental_snapshot_unpacked_snapshots_dir_and_version - .map_or_else( - || { - full_snapshot_unpacked_snapshots_dir_and_version - .unpacked_snapshots_dir - .as_path() - }, - |unpacked_snapshots_dir_and_version| { - unpacked_snapshots_dir_and_version - .unpacked_snapshots_dir - .as_path() - }, - ) - .join(snapshot_utils::SNAPSHOT_STATUS_CACHE_FILENAME); - let slot_deltas = deserialize_status_cache(&status_cache_path)?; - - verify_slot_deltas(slot_deltas.as_slice(), &bank)?; - - bank.status_cache.write().unwrap().append(&slot_deltas); - - info!("Rebuilt bank for slot: {}", bank.slot()); - Ok(( - bank, - RebuiltBankInfo { - duplicates_lt_hash: info.duplicates_lt_hash, - }, - )) -} - -#[allow(clippy::too_many_arguments)] -fn rebuild_bank_from_snapshot( - bank_snapshot: &BankSnapshotInfo, - account_paths: &[PathBuf], - storage_and_next_append_vec_id: StorageAndNextAccountsFileId, - genesis_config: &GenesisConfig, - runtime_config: &RuntimeConfig, - debug_keys: Option>>, - additional_builtins: Option<&[BuiltinPrototype]>, - limit_load_slot_count_from_snapshot: Option, - verify_index: bool, - accounts_db_config: Option, - accounts_update_notifier: Option, - exit: Arc, -) -> snapshot_utils::Result<(Bank, RebuiltBankInfo)> { - info!( - "Rebuilding bank from snapshot {}", - bank_snapshot.snapshot_dir.display(), - ); - - let snapshot_root_paths = SnapshotRootPaths { - full_snapshot_root_file_path: bank_snapshot.snapshot_path(), - incremental_snapshot_root_file_path: None, - }; - - let (bank, info) = deserialize_snapshot_data_files(&snapshot_root_paths, |snapshot_streams| { - Ok(bank_from_streams( - snapshot_streams, - account_paths, - storage_and_next_append_vec_id, - genesis_config, - runtime_config, - debug_keys, - additional_builtins, - limit_load_slot_count_from_snapshot, - verify_index, - accounts_db_config, - accounts_update_notifier, - exit, - )?) - })?; - - verify_epoch_stakes(&bank)?; - - let status_cache_path = bank_snapshot - .snapshot_dir - .join(snapshot_utils::SNAPSHOT_STATUS_CACHE_FILENAME); - let slot_deltas = deserialize_status_cache(&status_cache_path)?; - - verify_slot_deltas(slot_deltas.as_slice(), &bank)?; - - bank.status_cache.write().unwrap().append(&slot_deltas); - - info!("Rebuilt bank for slot: {}", bank.slot()); - Ok(( - bank, - RebuiltBankInfo { - duplicates_lt_hash: info.duplicates_lt_hash, - }, - )) -} - /// Verify that the snapshot's slot deltas are not corrupt/invalid fn verify_slot_deltas( slot_deltas: &[BankSlotDelta], diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index 22e102dff51276..44a7084e9de82a 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -1,8 +1,10 @@ use { crate::{ - bank::{BankFieldsToSerialize, BankHashStats, BankSlotDelta}, + bank::{BankFieldsToDeserialize, BankFieldsToSerialize, BankHashStats, BankSlotDelta}, serde_snapshot::{ - self, BankIncrementalSnapshotPersistence, ExtraFieldsToSerialize, SnapshotStreams, + self, AccountsDbFields, BankIncrementalSnapshotPersistence, ExtraFieldsToSerialize, + SerializableAccountStorageEntry, SnapshotAccountsDbFields, SnapshotBankFields, + SnapshotStreams, }, snapshot_archive_info::{ FullSnapshotArchiveInfo, IncrementalSnapshotArchiveInfo, SnapshotArchiveInfo, @@ -13,10 +15,10 @@ use { snapshot_hash::SnapshotHash, snapshot_package::{SnapshotKind, SnapshotPackage}, snapshot_utils::snapshot_storage_rebuilder::{ - RebuiltSnapshotStorage, SnapshotStorageRebuilder, + get_slot_and_append_vec_id, SnapshotStorageRebuilder, }, }, - crossbeam_channel::Sender, + crossbeam_channel::{Receiver, Sender}, log::*, regex::Regex, solana_accounts_db::{ @@ -44,7 +46,7 @@ use { path::{Path, PathBuf}, process::ExitStatus, str::FromStr, - sync::Arc, + sync::{Arc, LazyLock}, thread::{Builder, JoinHandle}, }, tar::{self, Archive}, @@ -281,10 +283,34 @@ pub struct UnarchivedSnapshot { #[allow(dead_code)] unpack_dir: TempDir, pub storage: AccountStorageMap, + pub bank_fields: BankFieldsToDeserialize, + pub accounts_db_fields: AccountsDbFields, pub unpacked_snapshots_dir_and_version: UnpackedSnapshotsDirAndVersion, pub measure_untar: Measure, } +/// Helper type to bundle up the results from `verify_and_unarchive_snapshots()`. +#[derive(Debug)] +pub struct UnarchivedSnapshots { + pub full_storage: AccountStorageMap, + pub incremental_storage: Option, + pub bank_fields: SnapshotBankFields, + pub accounts_db_fields: SnapshotAccountsDbFields, + pub full_unpacked_snapshots_dir_and_version: UnpackedSnapshotsDirAndVersion, + pub incremental_unpacked_snapshots_dir_and_version: Option, + pub full_measure_untar: Measure, + pub incremental_measure_untar: Option, + pub next_append_vec_id: AtomicAccountsFileId, +} + +/// Guard type that keeps the unpack directories of snapshots alive. +/// Once dropped, the unpack directories are removed. +#[allow(dead_code)] +#[derive(Debug)] +pub struct UnarchivedSnapshotsGuard { + full_unpack_dir: TempDir, + incremental_unpack_dir: Option, +} /// Helper type for passing around the unpacked snapshots dir and the snapshot version together #[derive(Debug)] pub struct UnpackedSnapshotsDirAndVersion { @@ -1566,11 +1592,7 @@ pub fn verify_and_unarchive_snapshots( incremental_snapshot_archive_info: Option<&IncrementalSnapshotArchiveInfo>, account_paths: &[PathBuf], storage_access: StorageAccess, -) -> Result<( - UnarchivedSnapshot, - Option, - AtomicAccountsFileId, -)> { +) -> Result<(UnarchivedSnapshots, UnarchivedSnapshotsGuard)> { check_are_snapshots_compatible( full_snapshot_archive_info, incremental_snapshot_archive_info, @@ -1579,7 +1601,14 @@ pub fn verify_and_unarchive_snapshots( let parallel_divisions = (num_cpus::get() / 4).clamp(1, PARALLEL_UNTAR_READERS_DEFAULT); let next_append_vec_id = Arc::new(AtomicAccountsFileId::new(0)); - let unarchived_full_snapshot = unarchive_snapshot( + let UnarchivedSnapshot { + unpack_dir: full_unpack_dir, + storage: full_storage, + bank_fields: full_bank_fields, + accounts_db_fields: full_accounts_db_fields, + unpacked_snapshots_dir_and_version: full_unpacked_snapshots_dir_and_version, + measure_untar: full_measure_untar, + } = unarchive_snapshot( &bank_snapshots_dir, TMP_SNAPSHOT_ARCHIVE_PREFIX, full_snapshot_archive_info.path(), @@ -1591,28 +1620,65 @@ pub fn verify_and_unarchive_snapshots( storage_access, )?; - let unarchived_incremental_snapshot = - if let Some(incremental_snapshot_archive_info) = incremental_snapshot_archive_info { - let unarchived_incremental_snapshot = unarchive_snapshot( - &bank_snapshots_dir, - TMP_SNAPSHOT_ARCHIVE_PREFIX, - incremental_snapshot_archive_info.path(), - "incremental snapshot untar", - account_paths, - incremental_snapshot_archive_info.archive_format(), - parallel_divisions, - next_append_vec_id.clone(), - storage_access, - )?; - Some(unarchived_incremental_snapshot) - } else { - None - }; + let ( + incremental_unpack_dir, + incremental_storage, + incremental_bank_fields, + incremental_accounts_db_fields, + incremental_unpacked_snapshots_dir_and_version, + incremental_measure_untar, + ) = if let Some(incremental_snapshot_archive_info) = incremental_snapshot_archive_info { + let UnarchivedSnapshot { + unpack_dir, + storage, + bank_fields, + accounts_db_fields, + unpacked_snapshots_dir_and_version, + measure_untar, + } = unarchive_snapshot( + &bank_snapshots_dir, + TMP_SNAPSHOT_ARCHIVE_PREFIX, + incremental_snapshot_archive_info.path(), + "incremental snapshot untar", + account_paths, + incremental_snapshot_archive_info.archive_format(), + parallel_divisions, + next_append_vec_id.clone(), + storage_access, + )?; + ( + Some(unpack_dir), + Some(storage), + Some(bank_fields), + Some(accounts_db_fields), + Some(unpacked_snapshots_dir_and_version), + Some(measure_untar), + ) + } else { + (None, None, None, None, None, None) + }; + + let bank_fields = SnapshotBankFields::new(full_bank_fields, incremental_bank_fields); + let accounts_db_fields = + SnapshotAccountsDbFields::new(full_accounts_db_fields, incremental_accounts_db_fields); + let next_append_vec_id = Arc::try_unwrap(next_append_vec_id).unwrap(); Ok(( - unarchived_full_snapshot, - unarchived_incremental_snapshot, - Arc::try_unwrap(next_append_vec_id).unwrap(), + UnarchivedSnapshots { + full_storage, + incremental_storage, + bank_fields, + accounts_db_fields, + full_unpacked_snapshots_dir_and_version, + incremental_unpacked_snapshots_dir_and_version, + full_measure_untar, + incremental_measure_untar, + next_append_vec_id, + }, + UnarchivedSnapshotsGuard { + full_unpack_dir, + incremental_unpack_dir, + }, )) } @@ -1682,6 +1748,111 @@ fn streaming_unarchive_snapshot( .collect() } +/// Used to determine if a filename is structured like a version file, bank file, or storage file +#[derive(PartialEq, Debug)] +enum SnapshotFileKind { + Version, + BankFields, + Storage, +} + +/// Determines `SnapshotFileKind` for `filename` if any +fn get_snapshot_file_kind(filename: &str) -> Option { + static VERSION_FILE_REGEX: LazyLock = + LazyLock::new(|| Regex::new(r"^version$").unwrap()); + static BANK_FIELDS_FILE_REGEX: LazyLock = + LazyLock::new(|| Regex::new(r"^[0-9]+(\.pre)?$").unwrap()); + + if VERSION_FILE_REGEX.is_match(filename) { + Some(SnapshotFileKind::Version) + } else if BANK_FIELDS_FILE_REGEX.is_match(filename) { + Some(SnapshotFileKind::BankFields) + } else if get_slot_and_append_vec_id(filename).is_ok() { + Some(SnapshotFileKind::Storage) + } else { + None + } +} + +/// Waits for snapshot file +/// Due to parallel unpacking, we may receive some append_vec files before the snapshot file +/// This function will push append_vec files into a buffer until we receive the snapshot file +fn get_version_and_snapshot_files( + file_receiver: &Receiver, +) -> (PathBuf, PathBuf, Vec) { + let mut append_vec_files = Vec::with_capacity(1024); + let mut snapshot_version_path = None; + let mut snapshot_file_path = None; + + loop { + if let Ok(path) = file_receiver.recv() { + let filename = path.file_name().unwrap().to_str().unwrap(); + match get_snapshot_file_kind(filename) { + Some(SnapshotFileKind::Version) => { + snapshot_version_path = Some(path); + + // break if we have both the snapshot file and the version file + if snapshot_file_path.is_some() { + break; + } + } + Some(SnapshotFileKind::BankFields) => { + snapshot_file_path = Some(path); + + // break if we have both the snapshot file and the version file + if snapshot_version_path.is_some() { + break; + } + } + Some(SnapshotFileKind::Storage) => { + append_vec_files.push(path); + } + None => {} // do nothing for other kinds of files + } + } else { + panic!("did not receive snapshot file from unpacking threads"); + } + } + let snapshot_version_path = snapshot_version_path.unwrap(); + let snapshot_file_path = snapshot_file_path.unwrap(); + + (snapshot_version_path, snapshot_file_path, append_vec_files) +} + +/// Fields and information parsed from the snapshot. +struct SnapshotFieldsBundle { + snapshot_version: SnapshotVersion, + bank_fields: BankFieldsToDeserialize, + accounts_db_fields: AccountsDbFields, + append_vec_files: Vec, +} + +/// Parses fields and information from the snapshot files provided by +/// `file_receiver`. +fn snapshot_fields_from_files(file_receiver: &Receiver) -> Result { + let (snapshot_version_path, snapshot_file_path, append_vec_files) = + get_version_and_snapshot_files(file_receiver); + let snapshot_version_str = snapshot_version_from_file(snapshot_version_path)?; + let snapshot_version = snapshot_version_str.parse().map_err(|err| { + IoError::other(format!( + "unsupported snapshot version '{snapshot_version_str}': {err}", + )) + })?; + + let snapshot_file = fs::File::open(snapshot_file_path).unwrap(); + let mut snapshot_stream = BufReader::new(snapshot_file); + let (bank_fields, accounts_db_fields) = match snapshot_version { + SnapshotVersion::V1_2_0 => serde_snapshot::fields_from_stream(&mut snapshot_stream)?, + }; + + Ok(SnapshotFieldsBundle { + snapshot_version, + bank_fields, + accounts_db_fields, + append_vec_files, + }) +} + /// BankSnapshotInfo::new_from_dir() requires a few meta files to accept a snapshot dir /// as a valid one. A dir unpacked from an archive lacks these files. Fill them here to /// allow new_from_dir() checks to pass. These checks are not needed for unpacked dirs, @@ -1746,8 +1917,17 @@ fn unarchive_snapshot( let num_rebuilder_threads = num_cpus::get_physical() .saturating_sub(parallel_divisions) .max(1); - let (version_and_storages, measure_untar) = measure_time!( + let SnapshotFieldsBundle { + snapshot_version, + bank_fields, + accounts_db_fields, + append_vec_files, + .. + } = snapshot_fields_from_files(&file_receiver)?; + let (storage, measure_untar) = measure_time!( SnapshotStorageRebuilder::rebuild_storage( + &accounts_db_fields, + append_vec_files, file_receiver, num_rebuilder_threads, next_append_vec_id, @@ -1760,13 +1940,11 @@ fn unarchive_snapshot( create_snapshot_meta_files_for_unarchived_snapshot(&unpack_dir)?; - let RebuiltSnapshotStorage { - snapshot_version, - storage, - } = version_and_storages; Ok(UnarchivedSnapshot { unpack_dir, storage, + bank_fields, + accounts_db_fields, unpacked_snapshots_dir_and_version: UnpackedSnapshotsDirAndVersion { unpacked_snapshots_dir, snapshot_version, @@ -1804,7 +1982,11 @@ pub fn rebuild_storages_from_snapshot_dir( account_paths: &[PathBuf], next_append_vec_id: Arc, storage_access: StorageAccess, -) -> Result { +) -> Result<( + AccountStorageMap, + BankFieldsToDeserialize, + AccountsDbFields, +)> { let bank_snapshot_dir = &snapshot_info.snapshot_dir; let accounts_hardlinks = bank_snapshot_dir.join(SNAPSHOT_ACCOUNTS_HARDLINKS); let account_run_paths: HashSet<_> = HashSet::from_iter(account_paths); @@ -1870,8 +2052,17 @@ pub fn rebuild_storages_from_snapshot_dir( account_paths, )?; + let SnapshotFieldsBundle { + bank_fields, + accounts_db_fields, + append_vec_files, + .. + } = snapshot_fields_from_files(&file_receiver)?; + let num_rebuilder_threads = num_cpus::get_physical().saturating_sub(1).max(1); - let version_and_storages = SnapshotStorageRebuilder::rebuild_storage( + let storage = SnapshotStorageRebuilder::rebuild_storage( + &accounts_db_fields, + append_vec_files, file_receiver, num_rebuilder_threads, next_append_vec_id, @@ -1879,11 +2070,7 @@ pub fn rebuild_storages_from_snapshot_dir( storage_access, )?; - let RebuiltSnapshotStorage { - snapshot_version: _, - storage, - } = version_and_storages; - Ok(storage) + Ok((storage, bank_fields, accounts_db_fields)) } /// Reads the `snapshot_version` from a file. Before opening the file, its size @@ -3538,6 +3725,23 @@ mod tests { ); } + #[test] + fn test_get_snapshot_file_kind() { + assert_eq!(None, get_snapshot_file_kind("file.txt")); + assert_eq!( + Some(SnapshotFileKind::Version), + get_snapshot_file_kind(SNAPSHOT_VERSION_FILENAME) + ); + assert_eq!( + Some(SnapshotFileKind::BankFields), + get_snapshot_file_kind("1234") + ); + assert_eq!( + Some(SnapshotFileKind::Storage), + get_snapshot_file_kind("1000.999") + ); + } + #[test] fn test_full_snapshot_slot_file_good() { let slot_written = 123_456_789; diff --git a/runtime/src/snapshot_utils/snapshot_storage_rebuilder.rs b/runtime/src/snapshot_utils/snapshot_storage_rebuilder.rs index 9368877c4aa8ce..5c84082982680b 100644 --- a/runtime/src/snapshot_utils/snapshot_storage_rebuilder.rs +++ b/runtime/src/snapshot_utils/snapshot_storage_rebuilder.rs @@ -1,10 +1,11 @@ //! Provides interfaces for rebuilding snapshot storages use { - super::{snapshot_version_from_file, SnapshotError, SnapshotFrom, SnapshotVersion}, + super::{SnapshotError, SnapshotFrom}, crate::serde_snapshot::{ - self, reconstruct_single_storage, remap_and_reconstruct_single_storage, - snapshot_storage_lengths_from_fields, SerializedAccountsFileId, + reconstruct_single_storage, remap_and_reconstruct_single_storage, + snapshot_storage_lengths_from_fields, AccountsDbFields, SerializableAccountStorageEntry, + SerializedAccountsFileId, }, crossbeam_channel::{select, unbounded, Receiver, Sender}, dashmap::DashMap, @@ -13,7 +14,6 @@ use { iter::{IntoParallelIterator, ParallelIterator}, ThreadPool, ThreadPoolBuilder, }, - regex::Regex, solana_accounts_db::{ account_storage::AccountStorageMap, accounts_db::{AccountsFileId, AtomicAccountsFileId}, @@ -23,8 +23,6 @@ use { solana_nohash_hasher::BuildNoHashHasher, std::{ collections::HashMap, - fs::File, - io::{BufReader, Error as IoError}, path::PathBuf, str::FromStr as _, sync::{ @@ -35,14 +33,6 @@ use { }, }; -/// Convenient wrapper for snapshot version and rebuilt storages -pub(crate) struct RebuiltSnapshotStorage { - /// Snapshot version - pub snapshot_version: SnapshotVersion, - /// Rebuilt storages - pub storage: AccountStorageMap, -} - /// Stores state for rebuilding snapshot storages #[derive(Debug)] pub(crate) struct SnapshotStorageRebuilder { @@ -71,22 +61,15 @@ pub(crate) struct SnapshotStorageRebuilder { impl SnapshotStorageRebuilder { /// Synchronously spawns threads to rebuild snapshot storages pub(crate) fn rebuild_storage( + accounts_db_fields: &AccountsDbFields, + append_vec_files: Vec, file_receiver: Receiver, num_threads: usize, next_append_vec_id: Arc, snapshot_from: SnapshotFrom, storage_access: StorageAccess, - ) -> Result { - let (snapshot_version_path, snapshot_file_path, append_vec_files) = - Self::get_version_and_snapshot_files(&file_receiver); - let snapshot_version_str = snapshot_version_from_file(snapshot_version_path)?; - let snapshot_version = snapshot_version_str.parse().map_err(|err| { - IoError::other(format!( - "unsupported snapshot version '{snapshot_version_str}': {err}", - )) - })?; - let snapshot_storage_lengths = - Self::process_snapshot_file(snapshot_version, snapshot_file_path)?; + ) -> Result { + let snapshot_storage_lengths = snapshot_storage_lengths_from_fields(accounts_db_fields); let account_storage_map = Self::spawn_rebuilder_threads( file_receiver, @@ -98,10 +81,7 @@ impl SnapshotStorageRebuilder { storage_access, )?; - Ok(RebuiltSnapshotStorage { - snapshot_version, - storage: account_storage_map, - }) + Ok(account_storage_map) } /// Create the SnapshotStorageRebuilder for storing state during rebuilding @@ -138,68 +118,6 @@ impl SnapshotStorageRebuilder { } } - /// Waits for snapshot file - /// Due to parallel unpacking, we may receive some append_vec files before the snapshot file - /// This function will push append_vec files into a buffer until we receive the snapshot file - fn get_version_and_snapshot_files( - file_receiver: &Receiver, - ) -> (PathBuf, PathBuf, Vec) { - let mut append_vec_files = Vec::with_capacity(1024); - let mut snapshot_version_path = None; - let mut snapshot_file_path = None; - - loop { - if let Ok(path) = file_receiver.recv() { - let filename = path.file_name().unwrap().to_str().unwrap(); - match get_snapshot_file_kind(filename) { - Some(SnapshotFileKind::Version) => { - snapshot_version_path = Some(path); - - // break if we have both the snapshot file and the version file - if snapshot_file_path.is_some() { - break; - } - } - Some(SnapshotFileKind::BankFields) => { - snapshot_file_path = Some(path); - - // break if we have both the snapshot file and the version file - if snapshot_version_path.is_some() { - break; - } - } - Some(SnapshotFileKind::Storage) => { - append_vec_files.push(path); - } - None => {} // do nothing for other kinds of files - } - } else { - panic!("did not receive snapshot file from unpacking threads"); - } - } - let snapshot_version_path = snapshot_version_path.unwrap(); - let snapshot_file_path = snapshot_file_path.unwrap(); - - (snapshot_version_path, snapshot_file_path, append_vec_files) - } - - /// Process the snapshot file to get the size of each snapshot storage file - fn process_snapshot_file( - snapshot_version: SnapshotVersion, - snapshot_file_path: PathBuf, - ) -> Result>, bincode::Error> { - let snapshot_file = File::open(snapshot_file_path).unwrap(); - let mut snapshot_stream = BufReader::new(snapshot_file); - match snapshot_version { - SnapshotVersion::V1_2_0 => { - let (_bank_fields, accounts_fields) = - serde_snapshot::fields_from_stream(&mut snapshot_stream)?; - - Ok(snapshot_storage_lengths_from_fields(&accounts_fields)) - } - } - } - /// Spawn threads for processing buffered append_vec_files, and then received files fn spawn_rebuilder_threads( file_receiver: Receiver, @@ -398,32 +316,6 @@ impl SnapshotStorageRebuilder { } } -/// Used to determine if a filename is structured like a version file, bank file, or storage file -#[derive(PartialEq, Debug)] -enum SnapshotFileKind { - Version, - BankFields, - Storage, -} - -/// Determines `SnapshotFileKind` for `filename` if any -fn get_snapshot_file_kind(filename: &str) -> Option { - static VERSION_FILE_REGEX: std::sync::LazyLock = - std::sync::LazyLock::new(|| Regex::new(r"^version$").unwrap()); - static BANK_FIELDS_FILE_REGEX: std::sync::LazyLock = - std::sync::LazyLock::new(|| Regex::new(r"^[0-9]+(\.pre)?$").unwrap()); - - if VERSION_FILE_REGEX.is_match(filename) { - Some(SnapshotFileKind::Version) - } else if BANK_FIELDS_FILE_REGEX.is_match(filename) { - Some(SnapshotFileKind::BankFields) - } else if get_slot_and_append_vec_id(filename).is_ok() { - Some(SnapshotFileKind::Storage) - } else { - None - } -} - /// Get the slot and append vec id from the filename pub(crate) fn get_slot_and_append_vec_id(filename: &str) -> Result<(Slot, usize), SnapshotError> { let mut parts = filename.splitn(2, '.'); @@ -436,27 +328,7 @@ pub(crate) fn get_slot_and_append_vec_id(filename: &str) -> Result<(Slot, usize) #[cfg(test)] mod tests { - use { - super::*, crate::snapshot_utils::SNAPSHOT_VERSION_FILENAME, - solana_accounts_db::accounts_file::AccountsFile, - }; - - #[test] - fn test_get_snapshot_file_kind() { - assert_eq!(None, get_snapshot_file_kind("file.txt")); - assert_eq!( - Some(SnapshotFileKind::Version), - get_snapshot_file_kind(SNAPSHOT_VERSION_FILENAME) - ); - assert_eq!( - Some(SnapshotFileKind::BankFields), - get_snapshot_file_kind("1234") - ); - assert_eq!( - Some(SnapshotFileKind::Storage), - get_snapshot_file_kind("1000.999") - ); - } + use {super::*, solana_accounts_db::accounts_file::AccountsFile}; #[test] fn test_get_slot_and_append_vec_id() { From bd8ccc6a2242f2b0d422f6faa2e078d18759a568 Mon Sep 17 00:00:00 2001 From: Mircea Colonescu Date: Fri, 20 Jun 2025 10:02:39 -0400 Subject: [PATCH 046/124] Remove debug-signature feature from transaction-context (#6647) * remove debug-signature feat from transaction-context * missed one usage of the feature * remove from svm examples deps * one more missing lock file * missed usage of set_signature * sort deps for tx context --- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 1 - svm/Cargo.toml | 6 ++--- svm/examples/Cargo.lock | 1 - svm/src/transaction_processor.rs | 2 -- transaction-context/Cargo.toml | 7 +----- transaction-context/src/lib.rs | 39 -------------------------------- 7 files changed, 5 insertions(+), 53 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 29bfb52594cbf4..1fe38507375163 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -540,7 +540,7 @@ solana-tps-client = { path = "tps-client", version = "=3.0.0" } solana-tpu-client = { path = "tpu-client", version = "=3.0.0", default-features = false } solana-tpu-client-next = { path = "tpu-client-next", version = "=3.0.0" } solana-transaction = "2.2.3" -solana-transaction-context = { path = "transaction-context", version = "=3.0.0", features = ["bincode", "debug-signature"] } +solana-transaction-context = { path = "transaction-context", version = "=3.0.0", features = ["bincode"] } solana-transaction-error = "2.2.1" solana-transaction-metrics-tracker = { path = "transaction-metrics-tracker", version = "=3.0.0" } solana-transaction-status = { path = "transaction-status", version = "=3.0.0" } diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 3ee31857cc2a77..63080b1d2b142e 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -9457,7 +9457,6 @@ dependencies = [ "solana-pubkey", "solana-rent", "solana-sdk-ids", - "solana-signature", ] [[package]] diff --git a/svm/Cargo.toml b/svm/Cargo.toml index 1980c3be9dc541..a52e5d9ac39a7d 100644 --- a/svm/Cargo.toml +++ b/svm/Cargo.toml @@ -75,7 +75,7 @@ solana-svm-transaction = { workspace = true } solana-system-interface = { workspace = true } solana-sysvar-id = { workspace = true } solana-timings = { workspace = true } -solana-transaction-context = { workspace = true, features = ["debug-signature"] } +solana-transaction-context = { workspace = true } solana-transaction-error = { workspace = true } solana-type-overrides = { workspace = true } spl-generic-token = { workspace = true } @@ -105,12 +105,12 @@ solana-logger = { workspace = true } solana-native-token = { workspace = true } solana-precompile-error = { workspace = true } solana-program-runtime = { workspace = true, features = ["dev-context-only-utils"] } -solana-pubkey = { workspace = true, features = [ "rand" ] } +solana-pubkey = { workspace = true, features = ["rand"] } solana-rent = { workspace = true } solana-sbpf = { workspace = true } solana-secp256k1-program = { workspace = true } solana-secp256r1-program = { workspace = true, features = ["openssl-vendored"] } -solana-signature = { workspace = true, features = [ "rand" ] } +solana-signature = { workspace = true, features = ["rand"] } solana-signer = { workspace = true } # See order-crates-for-publishing.py for using this unusual `path = "."` solana-svm = { path = ".", features = ["dev-context-only-utils", "svm-internal"] } diff --git a/svm/examples/Cargo.lock b/svm/examples/Cargo.lock index 982680712547f2..0863ca58c9cd9b 100644 --- a/svm/examples/Cargo.lock +++ b/svm/examples/Cargo.lock @@ -8557,7 +8557,6 @@ dependencies = [ "solana-pubkey", "solana-rent", "solana-sdk-ids", - "solana-signature", ] [[package]] diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index 7d00785a6fa492..c5daeae3f681bf 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -860,8 +860,6 @@ impl TransactionBatchProcessor { .feature_set .remove_accounts_executable_flag_checks, ); - #[cfg(debug_assertions)] - transaction_context.set_signature(tx.signature()); let pre_account_state_info = TransactionAccountStateInfo::new(&transaction_context, tx, rent_collector); diff --git a/transaction-context/Cargo.toml b/transaction-context/Cargo.toml index 9a76f58ae22c17..f88b7d447d9b4d 100644 --- a/transaction-context/Cargo.toml +++ b/transaction-context/Cargo.toml @@ -16,12 +16,7 @@ rustdoc-args = ["--cfg=docsrs"] [features] bincode = ["dep:bincode", "serde", "solana-account/bincode"] -debug-signature = ["dep:solana-signature"] -dev-context-only-utils = [ - "bincode", - "debug-signature", - "solana-account/dev-context-only-utils", -] +dev-context-only-utils = ["bincode", "solana-account/dev-context-only-utils"] serde = ["dep:serde", "dep:serde_derive"] [dependencies] diff --git a/transaction-context/src/lib.rs b/transaction-context/src/lib.rs index 8d4196e04ff9c2..5864d77fae8482 100644 --- a/transaction-context/src/lib.rs +++ b/transaction-context/src/lib.rs @@ -2,12 +2,6 @@ #![deny(clippy::indexing_slicing)] #![cfg_attr(docsrs, feature(doc_auto_cfg))] -#[cfg(all( - not(target_os = "solana"), - feature = "debug-signature", - debug_assertions -))] -use solana_signature::Signature; #[cfg(not(target_os = "solana"))] use {solana_account::WritableAccount, solana_rent::Rent, std::mem::MaybeUninit}; use { @@ -177,13 +171,6 @@ pub struct TransactionContext { remove_accounts_executable_flag_checks: bool, #[cfg(not(target_os = "solana"))] rent: Rent, - /// Useful for debugging to filter by or to look it up on the explorer - #[cfg(all( - not(target_os = "solana"), - feature = "debug-signature", - debug_assertions - ))] - signature: Signature, } impl TransactionContext { @@ -210,12 +197,6 @@ impl TransactionContext { return_data: TransactionReturnData::default(), remove_accounts_executable_flag_checks: true, rent, - #[cfg(all( - not(target_os = "solana"), - feature = "debug-signature", - debug_assertions - ))] - signature: Signature::default(), } } @@ -244,26 +225,6 @@ impl TransactionContext { &self.accounts } - /// Stores the signature of the current transaction - #[cfg(all( - not(target_os = "solana"), - feature = "debug-signature", - debug_assertions - ))] - pub fn set_signature(&mut self, signature: &Signature) { - self.signature = *signature; - } - - /// Returns the signature of the current transaction - #[cfg(all( - not(target_os = "solana"), - feature = "debug-signature", - debug_assertions - ))] - pub fn get_signature(&self) -> &Signature { - &self.signature - } - /// Returns the total number of accounts loaded in this Transaction pub fn get_number_of_accounts(&self) -> IndexOfAccount { self.accounts.len() as IndexOfAccount From e36fdb83eb1c2f8ffdfd4a3316f1f7fc8d09d7b4 Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Fri, 20 Jun 2025 09:22:39 -0500 Subject: [PATCH 047/124] clean up `disable_rent_fees_collection` feature (#6622) * clean up disable_rent_fees_collection feature * feedback * more cleanup and fix test * feedback --- Cargo.lock | 13 - Cargo.toml | 1 - feature-set/src/lib.rs | 1 - ledger/src/blockstore_processor.rs | 6 +- programs/sbf/Cargo.lock | 12 - rpc/Cargo.toml | 1 - rpc/src/transaction_status_service.rs | 21 +- runtime/Cargo.toml | 2 - runtime/benches/accounts.rs | 3 +- runtime/src/account_saver.rs | 11 - runtime/src/bank.rs | 194 +--- runtime/src/bank/fee_distribution.rs | 365 +----- runtime/src/bank/tests.rs | 1012 +---------------- runtime/src/rent_collector.rs | 6 +- runtime/src/serde_snapshot.rs | 5 +- svm-feature-set/src/lib.rs | 2 - svm-rent-collector/src/svm_rent_collector.rs | 4 - .../src/svm_rent_collector/rent_collector.rs | 10 +- svm/Cargo.toml | 1 - svm/examples/Cargo.lock | 12 - svm/src/account_loader.rs | 169 +-- svm/src/rollback_accounts.rs | 36 +- svm/src/transaction_commit_result.rs | 4 +- svm/src/transaction_processor.rs | 37 +- 24 files changed, 110 insertions(+), 1818 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 78892fec819933..f75d959d29bcf4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9750,16 +9750,6 @@ dependencies = [ "solana-sdk-ids", ] -[[package]] -name = "solana-rent-debits" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f6f9113c6003492e74438d1288e30cffa8ccfdc2ef7b49b9e816d8034da18cd" -dependencies = [ - "solana-pubkey", - "solana-reward-info", -] - [[package]] name = "solana-reward-info" version = "2.2.1" @@ -9835,7 +9825,6 @@ dependencies = [ "solana-quic-definitions", "solana-rayon-threadlimit", "solana-rent", - "solana-rent-debits", "solana-rpc", "solana-rpc-client-api", "solana-runtime", @@ -10134,7 +10123,6 @@ dependencies = [ "solana-rayon-threadlimit", "solana-rent", "solana-rent-collector", - "solana-rent-debits", "solana-reward-info", "solana-runtime", "solana-runtime-transaction", @@ -10774,7 +10762,6 @@ dependencies = [ "solana-pubkey", "solana-rent", "solana-rent-collector", - "solana-rent-debits", "solana-sbpf", "solana-sdk-ids", "solana-secp256k1-program", diff --git a/Cargo.toml b/Cargo.toml index 1fe38507375163..a402737570731e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -486,7 +486,6 @@ solana-rayon-threadlimit = { path = "rayon-threadlimit", version = "=3.0.0" } solana-remote-wallet = { path = "remote-wallet", version = "=3.0.0", default-features = false } solana-rent = "2.2.1" solana-rent-collector = "2.2.1" -solana-rent-debits = "2.2.1" solana-reward-info = "2.2.1" solana-rpc = { path = "rpc", version = "=3.0.0" } solana-rpc-client = { path = "rpc-client", version = "=3.0.0", default-features = false } diff --git a/feature-set/src/lib.rs b/feature-set/src/lib.rs index fa23dd4c4c8d2d..6d30154e916d7a 100644 --- a/feature-set/src/lib.rs +++ b/feature-set/src/lib.rs @@ -151,7 +151,6 @@ impl FeatureSet { .is_active(&fix_alt_bn128_multiplication_input_length::id()), loosen_cpi_size_restriction: self.is_active(&loosen_cpi_size_restriction::id()), increase_tx_account_lock_limit: self.is_active(&increase_tx_account_lock_limit::id()), - disable_rent_fees_collection: self.is_active(&disable_rent_fees_collection::id()), enable_extend_program_checked: self.is_active(&enable_extend_program_checked::id()), formalize_loaded_transaction_data_size: self .is_active(&formalize_loaded_transaction_data_size::id()), diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index 73f0e568b04861..93830624b09c78 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -3441,9 +3441,9 @@ pub mod tests { assert_eq!(bank.get_balance(&keypair1.pubkey()), 3); } - #[test_case(true, true; "rent_collected")] - #[test_case(false, true; "rent_not_collected")] - #[test_case(true, false; "rent_not-collected_part_rent_disabled")] + #[test_case(true, true; "fee_payer_in_rent_partition")] + #[test_case(false, true; "fee_payer_not_in_rent_partition")] + #[test_case(true, false; "fee_payer_in_rent_partition-partitioned_rent_disabled")] fn test_transaction_result_does_not_affect_bankhash( fee_payer_in_rent_partition: bool, should_run_partitioned_rent_collection: bool, diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 63080b1d2b142e..727f10533d6691 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -7558,16 +7558,6 @@ dependencies = [ "solana-sdk-ids", ] -[[package]] -name = "solana-rent-debits" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f6f9113c6003492e74438d1288e30cffa8ccfdc2ef7b49b9e816d8034da18cd" -dependencies = [ - "solana-pubkey", - "solana-reward-info", -] - [[package]] name = "solana-reward-info" version = "2.2.1" @@ -7848,7 +7838,6 @@ dependencies = [ "solana-rayon-threadlimit", "solana-rent", "solana-rent-collector", - "solana-rent-debits", "solana-reward-info", "solana-runtime-transaction", "solana-sdk-ids", @@ -9102,7 +9091,6 @@ dependencies = [ "solana-pubkey", "solana-rent", "solana-rent-collector", - "solana-rent-debits", "solana-sdk-ids", "solana-slot-hashes", "solana-svm-callback", diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 93748850d8a779..5b3e918062cfd1 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -114,7 +114,6 @@ solana-nonce-account = { workspace = true } solana-program-option = { workspace = true } solana-program-runtime = { workspace = true } solana-rent = { workspace = true } -solana-rent-debits = { workspace = true } solana-rpc = { path = ".", features = ["dev-context-only-utils"] } solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } solana-runtime-transaction = { workspace = true, features = [ diff --git a/rpc/src/transaction_status_service.rs b/rpc/src/transaction_status_service.rs index 0899d7e503df29..cfe418105d0b7a 100644 --- a/rpc/src/transaction_status_service.rs +++ b/rpc/src/transaction_status_service.rs @@ -164,7 +164,6 @@ impl TransactionStatusService { return_data, executed_units, fee_details, - rent_debits, .. } = committed_tx; @@ -175,18 +174,7 @@ impl TransactionStatusService { let pre_token_balances = Some(pre_token_balances); let post_token_balances = Some(post_token_balances); - let rewards = Some( - rent_debits - .into_unordered_rewards_iter() - .map(|(pubkey, reward_info)| Reward { - pubkey: pubkey.to_string(), - lamports: reward_info.lamports, - post_balance: reward_info.post_balance, - reward_type: Some(reward_info.reward_type), - commission: reward_info.commission, - }) - .collect(), - ); + let rewards = Some(vec![]); let loaded_addresses = transaction.get_loaded_addresses(); let mut transaction_status_meta = TransactionStatusMeta { status, @@ -346,7 +334,6 @@ pub(crate) mod tests { solana_nonce::{self as nonce, state::DurableNonce}, solana_nonce_account as nonce_account, solana_pubkey::Pubkey, - solana_rent_debits::RentDebits, solana_runtime::bank::{Bank, TransactionBalancesSet}, solana_signature::Signature, solana_signer::Signer, @@ -443,7 +430,6 @@ pub(crate) mod tests { .unwrap(); let expected_transaction = transaction.clone(); - let pubkey = Pubkey::new_unique(); let mut nonce_account = nonce_account::create_account(1).into_inner(); let durable_nonce = DurableNonce::from_blockhash(&Hash::new_from_array([42u8; 32])); @@ -454,9 +440,6 @@ pub(crate) mod tests { )) .unwrap(); - let mut rent_debits = RentDebits::default(); - rent_debits.insert(&pubkey, 123, 456); - let commit_result = Ok(CommittedTransaction { status: Ok(()), log_messages: None, @@ -464,7 +447,6 @@ pub(crate) mod tests { return_data: None, executed_units: 0, fee_details: FeeDetails::default(), - rent_debits, loaded_account_stats: TransactionLoadedAccountsStats::default(), }); @@ -591,7 +573,6 @@ pub(crate) mod tests { return_data: None, executed_units: 0, fee_details: FeeDetails::default(), - rent_debits: RentDebits::default(), loaded_account_stats: TransactionLoadedAccountsStats::default(), }); diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index b7d39f182b1f0e..4823fcaa060c00 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -138,7 +138,6 @@ solana-pubkey = { workspace = true, features = ["rand"] } solana-rayon-threadlimit = { workspace = true } solana-rent = { workspace = true } solana-rent-collector = { workspace = true, features = ["serde"] } -solana-rent-debits = { workspace = true } solana-reward-info = { workspace = true } solana-runtime-transaction = { workspace = true } solana-sdk-ids = { workspace = true } @@ -192,7 +191,6 @@ rand_chacha = { workspace = true } solana-accounts-db = { workspace = true, features = ["dev-context-only-utils"] } solana-builtins = { workspace = true, features = ["dev-context-only-utils"] } solana-logger = { workspace = true } -solana-rent-debits = { workspace = true, features = ["dev-context-only-utils"] } # See order-crates-for-publishing.py for using this unusual `path = "."` solana-runtime = { path = ".", features = ["dev-context-only-utils"] } solana-runtime-transaction = { workspace = true, features = [ diff --git a/runtime/benches/accounts.rs b/runtime/benches/accounts.rs index 33ec1332e08685..00aaf7fa3d0c4d 100644 --- a/runtime/benches/accounts.rs +++ b/runtime/benches/accounts.rs @@ -40,8 +40,7 @@ fn bench_accounts_create(bencher: &mut Bencher) { #[bench] fn bench_accounts_squash(bencher: &mut Bencher) { - let (mut genesis_config, _) = create_genesis_config(100_000); - genesis_config.rent.burn_percent = 100; // Avoid triggering an assert in Bank::distribute_rent_to_validators() + let (genesis_config, _) = create_genesis_config(100_000); let mut prev_bank = Arc::new(Bank::new_with_paths_for_benches( &genesis_config, vec![PathBuf::from("bench_a1")], diff --git a/runtime/src/account_saver.rs b/runtime/src/account_saver.rs index ca364ae9aa714f..e921a7c828636e 100644 --- a/runtime/src/account_saver.rs +++ b/runtime/src/account_saver.rs @@ -189,7 +189,6 @@ mod tests { }, solana_nonce_account as nonce_account, solana_program_runtime::execution_budget::SVMTransactionExecutionBudget, - solana_rent_debits::RentDebits, solana_sdk_ids::native_loader, solana_signer::{signers::Signers, Signer}, solana_svm::{ @@ -280,8 +279,6 @@ mod tests { fee_details: FeeDetails::default(), rollback_accounts: RollbackAccounts::default(), compute_budget: SVMTransactionExecutionBudget::default(), - rent: 0, - rent_debits: RentDebits::default(), loaded_accounts_data_size: 0, }; @@ -291,8 +288,6 @@ mod tests { fee_details: FeeDetails::default(), rollback_accounts: RollbackAccounts::default(), compute_budget: SVMTransactionExecutionBudget::default(), - rent: 0, - rent_debits: RentDebits::default(), loaded_accounts_data_size: 0, }; @@ -354,8 +349,6 @@ mod tests { fee_payer_account: from_account_pre.clone(), }, compute_budget: SVMTransactionExecutionBudget::default(), - rent: 0, - rent_debits: RentDebits::default(), loaded_accounts_data_size: 0, }; @@ -449,8 +442,6 @@ mod tests { fee_payer_account: from_account_pre.clone(), }, compute_budget: SVMTransactionExecutionBudget::default(), - rent: 0, - rent_debits: RentDebits::default(), loaded_accounts_data_size: 0, }; @@ -557,8 +548,6 @@ mod tests { nonce: nonce.clone(), }, compute_budget: SVMTransactionExecutionBudget::default(), - rent: 0, - rent_debits: RentDebits::default(), loaded_accounts_data_size: 0, }; diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 2b0c4f0843ed6d..becd3e3b53921a 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -86,7 +86,7 @@ use { IncrementalAccountsHash, MerkleOrLatticeAccountsHash, }, accounts_index::{IndexKey, ScanConfig, ScanResult}, - accounts_partition::{self, Partition, PartitionIndex}, + accounts_partition::{self, Partition}, accounts_update_notifier_interface::AccountsUpdateNotifier, ancestors::{Ancestors, AncestorsForSerialization}, blockhash_queue::BlockhashQueue, @@ -126,8 +126,7 @@ use { invoke_context::BuiltinFunctionWithContext, loaded_programs::ProgramCacheEntry, }, solana_pubkey::Pubkey, - solana_rent_collector::{CollectedInfo, RentCollector}, - solana_rent_debits::RentDebits, + solana_rent_collector::RentCollector, solana_reward_info::RewardInfo, solana_runtime_transaction::{ runtime_transaction::RuntimeTransaction, transaction_with_meta::TransactionWithMeta, @@ -139,7 +138,7 @@ use { solana_slot_history::{Check, SlotHistory}, solana_stake_interface::state::Delegation, solana_svm::{ - account_loader::{collect_rent_from_account, LoadedTransaction}, + account_loader::{update_rent_exempt_status_for_account, LoadedTransaction}, account_overrides::AccountOverrides, program_loader::load_program_with_pubkey, transaction_balances::BalanceCollector, @@ -446,7 +445,6 @@ pub struct BankFieldsToDeserialize { pub(crate) collector_id: Pubkey, pub(crate) collector_fees: u64, pub(crate) fee_rate_governor: FeeRateGovernor, - pub(crate) collected_rent: u64, pub(crate) rent_collector: RentCollector, pub(crate) epoch_schedule: EpochSchedule, pub(crate) inflation: Inflation, @@ -493,7 +491,6 @@ pub struct BankFieldsToSerialize { pub collector_id: Pubkey, pub collector_fees: u64, pub fee_rate_governor: FeeRateGovernor, - pub collected_rent: u64, pub rent_collector: RentCollector, pub epoch_schedule: EpochSchedule, pub inflation: Inflation, @@ -545,7 +542,6 @@ impl PartialEq for Bank { collector_id, collector_fees, fee_rate_governor, - collected_rent, rent_collector, epoch_schedule, inflation, @@ -610,7 +606,6 @@ impl PartialEq for Bank { && collector_id == &other.collector_id && collector_fees.load(Relaxed) == other.collector_fees.load(Relaxed) && fee_rate_governor == &other.fee_rate_governor - && collected_rent.load(Relaxed) == other.collected_rent.load(Relaxed) && rent_collector == &other.rent_collector && epoch_schedule == &other.epoch_schedule && *inflation.read().unwrap() == *other.inflation.read().unwrap() @@ -655,7 +650,6 @@ impl BankFieldsToSerialize { collector_id: Pubkey::default(), collector_fees: u64::default(), fee_rate_governor: FeeRateGovernor::default(), - collected_rent: u64::default(), rent_collector: RentCollector::default(), epoch_schedule: EpochSchedule::default(), inflation: Inflation::default(), @@ -827,9 +821,6 @@ pub struct Bank { /// Track cluster signature throughput and adjust fee rate pub(crate) fee_rate_governor: FeeRateGovernor, - /// Rent that has been collected - collected_rent: AtomicU64, - /// latest rent collector, knows the epoch rent_collector: RentCollector, @@ -1107,7 +1098,6 @@ impl Bank { collector_id: Pubkey::default(), collector_fees: AtomicU64::default(), fee_rate_governor: FeeRateGovernor::default(), - collected_rent: AtomicU64::default(), rent_collector: RentCollector::default(), epoch_schedule: EpochSchedule::default(), inflation: Arc::>::default(), @@ -1330,7 +1320,6 @@ impl Bank { genesis_creation_time: parent.genesis_creation_time, slots_per_year: parent.slots_per_year, epoch_schedule, - collected_rent: AtomicU64::new(0), rent_collector: Self::get_rent_collector_from(&parent.rent_collector, epoch), max_tick_height: slot .checked_add(1) @@ -1840,7 +1829,6 @@ impl Bank { collector_id: fields.collector_id, collector_fees: AtomicU64::new(fields.collector_fees), fee_rate_governor: fields.fee_rate_governor, - collected_rent: AtomicU64::new(fields.collected_rent), // clone()-ing is needed to consider a gated behavior in rent_collector rent_collector: Self::get_rent_collector_from(&fields.rent_collector, fields.epoch), epoch_schedule: fields.epoch_schedule, @@ -2040,7 +2028,6 @@ impl Bank { collector_id: self.collector_id, collector_fees: self.collector_fees.load(Relaxed), fee_rate_governor: self.fee_rate_governor.clone(), - collected_rent: self.collected_rent.load(Relaxed), rent_collector: self.rent_collector.clone(), epoch_schedule: self.epoch_schedule.clone(), inflation: *self.inflation.read().unwrap(), @@ -2654,9 +2641,8 @@ impl Bank { let mut hash = self.hash.write().unwrap(); if *hash == Hash::default() { // finish up any deferred changes to account state - self.collect_rent_eagerly(); + self.run_partitioned_rent_exempt_status_updates(); self.distribute_transaction_fee_details(); - self.distribute_rent_fees(); self.update_slot_history(); self.run_incinerator(); @@ -3796,8 +3782,6 @@ impl Bank { .store_cached(to_store, transactions.as_deref()); }); - self.collect_rent(&processing_results); - // Cached vote and stake accounts are synchronized with accounts-db // after each transaction. let ((), update_stakes_cache_us) = @@ -3867,19 +3851,11 @@ impl Bank { ProcessedTransaction::Executed(executed_tx) => { let execution_details = executed_tx.execution_details; let LoadedTransaction { - rent_debits, accounts: loaded_accounts, fee_details, .. } = executed_tx.loaded_transaction; - // Rent is only collected for successfully executed transactions - let rent_debits = if execution_details.was_successful() { - rent_debits - } else { - RentDebits::default() - }; - Ok(CommittedTransaction { status: execution_details.status, log_messages: execution_details.log_messages, @@ -3887,7 +3863,6 @@ impl Bank { return_data: execution_details.return_data, executed_units, fee_details, - rent_debits, loaded_account_stats: TransactionLoadedAccountsStats { loaded_accounts_count: loaded_accounts.len(), loaded_accounts_data_size, @@ -3900,7 +3875,6 @@ impl Bank { inner_instructions: None, return_data: None, executed_units, - rent_debits: RentDebits::default(), fee_details: fees_only_tx.fee_details, loaded_account_stats: TransactionLoadedAccountsStats { loaded_accounts_count: fees_only_tx.rollback_accounts.count(), @@ -3912,17 +3886,6 @@ impl Bank { .collect() } - fn collect_rent(&self, processing_results: &[TransactionProcessingResult]) { - let collected_rent = processing_results - .iter() - .filter_map(|processing_result| processing_result.processed_transaction()) - .filter_map(|processed_tx| processed_tx.executed_transaction()) - .filter(|executed_tx| executed_tx.was_successful()) - .map(|executed_tx| executed_tx.loaded_transaction.rent) - .sum(); - self.collected_rent.fetch_add(collected_rent, Relaxed); - } - fn run_incinerator(&self) { if let Some((account, _)) = self.get_account_modified_since_parent_with_fixed_root(&incinerator::id()) @@ -4060,7 +4023,7 @@ impl Bank { accounts_written_this_slot } - fn collect_rent_eagerly(&self) { + fn run_partitioned_rent_exempt_status_updates(&self) { if self.lazy_rent_collection.load(Relaxed) { return; } @@ -4111,17 +4074,17 @@ impl Bank { if parallel { let thread_pool = &self.rc.accounts.accounts_db.thread_pool; thread_pool.install(|| { - ranges.into_par_iter().for_each(|range| { - self.collect_rent_in_range(range.0, range.1, &rent_metrics) + ranges.into_par_iter().for_each(|(_, subrange_full)| { + self.update_rent_exempt_status_in_range(subrange_full, &rent_metrics) }); }); } } if !parallel { // collect serially - partitions - .into_iter() - .for_each(|partition| self.collect_rent_in_partition(partition, &rent_metrics)); + partitions.into_iter().for_each(|partition| { + self.update_rent_exempt_status_in_partition(partition, &rent_metrics) + }); } measure.stop(); datapoint_info!( @@ -4170,14 +4133,7 @@ impl Bank { .is_active(&feature_set::skip_rent_rewrites::id()) } - /// true if rent fees should be collected (i.e. disable_rent_fees_collection is NOT enabled) - fn should_collect_rent(&self) -> bool { - !self - .feature_set - .is_active(&feature_set::disable_rent_fees_collection::id()) - } - - /// Collect rent from `accounts` + /// Update rent exempt status for `accounts` /// /// This fn is called inside a parallel loop from `collect_rent_in_partition()`. Avoid adding /// any code that causes contention on shared memory/data (i.e. do not update atomic metrics). @@ -4186,14 +4142,10 @@ impl Bank { /// reduce at the end of its parallel loop. If possible, place data/computation that cause /// contention/take locks in the return struct and process them in /// `collect_rent_from_partition()` after reducing the parallel loop. - fn collect_rent_from_accounts( + fn update_rent_exempt_status_for_accounts( &self, mut accounts: Vec<(Pubkey, AccountSharedData, Slot)>, - rent_paying_pubkeys: Option<&HashSet>, - partition_index: PartitionIndex, ) -> CollectRentFromAccountsInfo { - let mut rent_debits = RentDebits::default(); - let mut total_rent_collected_info = CollectedInfo::default(); let mut accounts_to_store = Vec::<(&Pubkey, &AccountSharedData)>::with_capacity(accounts.len()); let mut time_collecting_rent_us = 0; @@ -4207,18 +4159,15 @@ impl Bank { let mut skipped_rewrites = Vec::default(); for (pubkey, account, _loaded_slot) in accounts.iter_mut() { let rent_epoch_pre = account.rent_epoch(); - let (rent_collected_info, collect_rent_us) = measure_us!(collect_rent_from_account( - &self.feature_set.runtime_features(), + let ((), collect_rent_us) = measure_us!(update_rent_exempt_status_for_account( &self.rent_collector, - pubkey, account )); time_collecting_rent_us += collect_rent_us; let rent_epoch_post = account.rent_epoch(); // did the account change in any way due to rent collection? - let rent_epoch_changed = rent_epoch_post != rent_epoch_pre; - let account_changed = rent_collected_info.rent_amount != 0 || rent_epoch_changed; + let account_changed = rent_epoch_post != rent_epoch_pre; // always store the account, regardless if it changed or not let always_store_accounts = @@ -4230,45 +4179,15 @@ impl Bank { // ensures we verify the whole on-chain state (= all accounts) // via the bank delta hash slowly once per an epoch. if account_changed || always_store_accounts { - if rent_collected_info.rent_amount > 0 { - if let Some(rent_paying_pubkeys) = rent_paying_pubkeys { - if !rent_paying_pubkeys.contains(pubkey) { - let partition_from_pubkey = accounts_partition::partition_from_pubkey( - pubkey, - self.epoch_schedule.slots_per_epoch, - ); - // Submit datapoint instead of assert while we verify this is correct - datapoint_warn!( - "bank-unexpected_rent_paying_pubkey", - ("slot", self.slot(), i64), - ("pubkey", pubkey.to_string(), String), - ("partition_index", partition_index, i64), - ("partition_from_pubkey", partition_from_pubkey, i64) - ); - warn!( - "Collecting rent from unexpected pubkey: {}, slot: {}, parent_slot: {:?}, \ - partition_index: {}, partition_from_pubkey: {}", - pubkey, - self.slot(), - self.parent().map(|bank| bank.slot()), - partition_index, - partition_from_pubkey, - ); - } - } - } else { - debug_assert_eq!(rent_collected_info.rent_amount, 0); - if rent_epoch_changed { - datapoint_info!( - "bank-rent_collection_updated_only_rent_epoch", - ("slot", self.slot(), i64), - ("pubkey", pubkey.to_string(), String), - ("rent_epoch_pre", rent_epoch_pre, i64), - ("rent_epoch_post", rent_epoch_post, i64), - ); - } + if account_changed { + datapoint_info!( + "bank-rent_collection_updated_only_rent_epoch", + ("slot", self.slot(), i64), + ("pubkey", pubkey.to_string(), String), + ("rent_epoch_pre", rent_epoch_pre, i64), + ("rent_epoch_post", rent_epoch_post, i64), + ); } - total_rent_collected_info += rent_collected_info; accounts_to_store.push((pubkey, account)); } else if !account_changed && !can_skip_rewrites @@ -4281,7 +4200,6 @@ impl Bank { let hash = AccountsDb::hash_account(account, pubkey); skipped_rewrites.push((*pubkey, hash)); } - rent_debits.insert(pubkey, rent_collected_info.rent_amount, account.lamports()); } if !accounts_to_store.is_empty() { @@ -4294,48 +4212,24 @@ impl Bank { CollectRentFromAccountsInfo { skipped_rewrites, - rent_collected_info: total_rent_collected_info, - rent_rewards: rent_debits.into_unordered_rewards_iter().collect(), time_collecting_rent_us, time_storing_accounts_us, num_accounts: accounts.len(), } } - /// convert 'partition' to a pubkey range and 'collect_rent_in_range' - fn collect_rent_in_partition(&self, partition: Partition, metrics: &RentMetrics) { + /// convert 'partition' to a pubkey range and 'update_rent_exempt_status_in_range' + fn update_rent_exempt_status_in_partition(&self, partition: Partition, metrics: &RentMetrics) { let subrange_full = accounts_partition::pubkey_range_from_partition(partition); - self.collect_rent_in_range(partition, subrange_full, metrics) + self.update_rent_exempt_status_in_range(subrange_full, metrics) } - /// get all pubkeys that we expect to be rent-paying or None, if this was not initialized at load time (that should only exist in test cases) - fn get_rent_paying_pubkeys(&self, partition: &Partition) -> Option> { - self.rc - .accounts - .accounts_db - .accounts_index - .rent_paying_accounts_by_partition - .get() - .and_then(|rent_paying_accounts| { - rent_paying_accounts.is_initialized().then(|| { - accounts_partition::get_partition_end_indexes(partition) - .into_iter() - .flat_map(|end_index| { - rent_paying_accounts.get_pubkeys_in_partition_index(end_index) - }) - .cloned() - .collect::>() - }) - }) - } - - /// load accounts with pubkeys in 'subrange_full' - /// collect rent and update 'account.rent_epoch' as necessary - /// store accounts, whether rent was collected or not (depending on whether we skipping rewrites is enabled) + /// load accounts with pubkeys in 'subrange_full', update + /// 'account.rent_epoch' as necessary, and store accounts, whether rent was + /// collected or not (depending on whether we skipping rewrites is enabled) /// update bank's rewrites set for all rewrites that were skipped - fn collect_rent_in_range( + fn update_rent_exempt_status_in_range( &self, - partition: Partition, subrange_full: RangeInclusive, metrics: &RentMetrics, ) { @@ -4348,9 +4242,6 @@ impl Bank { hold_range.stop(); metrics.hold_range_us.fetch_add(hold_range.as_us(), Relaxed); - let rent_paying_pubkeys_ = self.get_rent_paying_pubkeys(&partition); - let rent_paying_pubkeys = rent_paying_pubkeys_.as_ref(); - // divide the range into num_threads smaller ranges and process in parallel // Note that 'pubkey_range_from_partition' cannot easily be re-used here to break the range smaller. // It has special handling of 0..0 and partition_count changes affect all ranges unevenly. @@ -4360,7 +4251,7 @@ impl Bank { let end_prefix_inclusive = accounts_partition::prefix_from_pubkey(subrange_full.end()); let range = end_prefix_inclusive - start_prefix; let increment = range / num_threads; - let mut results = (0..num_threads) + let results = (0..num_threads) .into_par_iter() .map(|chunk| { let offset = |chunk| start_prefix + chunk * increment; @@ -4385,7 +4276,7 @@ impl Bank { .load_to_collect_rent_eagerly(&self.ancestors, subrange) }); CollectRentInPartitionInfo::new( - self.collect_rent_from_accounts(accounts, rent_paying_pubkeys, partition.1), + self.update_rent_exempt_status_for_accounts(accounts), Duration::from_nanos(measure_load_accounts.as_ns()), ) }) @@ -4406,16 +4297,6 @@ impl Bank { .accounts .hold_range_in_memory(&subrange_full, false, thread_pool); - self.collected_rent - .fetch_add(results.rent_collected, Relaxed); - self.update_accounts_data_size_delta_off_chain( - -(results.accounts_data_size_reclaimed as i64), - ); - self.rewards - .write() - .unwrap() - .append(&mut results.rent_rewards); - metrics .load_us .fetch_add(results.time_loading_accounts_us, Relaxed); @@ -7297,8 +7178,6 @@ enum ApplyFeatureActivationsCaller { #[derive(Debug, Default)] struct CollectRentFromAccountsInfo { skipped_rewrites: Vec<(Pubkey, AccountHash)>, - rent_collected_info: CollectedInfo, - rent_rewards: Vec<(Pubkey, RewardInfo)>, time_collecting_rent_us: u64, time_storing_accounts_us: u64, num_accounts: usize, @@ -7309,9 +7188,6 @@ struct CollectRentFromAccountsInfo { #[derive(Debug, Default)] struct CollectRentInPartitionInfo { skipped_rewrites: Vec<(Pubkey, AccountHash)>, - rent_collected: u64, - accounts_data_size_reclaimed: u64, - rent_rewards: Vec<(Pubkey, RewardInfo)>, time_loading_accounts_us: u64, time_collecting_rent_us: u64, time_storing_accounts_us: u64, @@ -7325,9 +7201,6 @@ impl CollectRentInPartitionInfo { fn new(info: CollectRentFromAccountsInfo, time_loading_accounts: Duration) -> Self { Self { skipped_rewrites: info.skipped_rewrites, - rent_collected: info.rent_collected_info.rent_amount, - accounts_data_size_reclaimed: info.rent_collected_info.account_data_len_reclaimed, - rent_rewards: info.rent_rewards, time_loading_accounts_us: time_loading_accounts.as_micros() as u64, time_collecting_rent_us: info.time_collecting_rent_us, time_storing_accounts_us: info.time_storing_accounts_us, @@ -7343,11 +7216,6 @@ impl CollectRentInPartitionInfo { fn reduce(lhs: Self, rhs: Self) -> Self { Self { skipped_rewrites: [lhs.skipped_rewrites, rhs.skipped_rewrites].concat(), - rent_collected: lhs.rent_collected.saturating_add(rhs.rent_collected), - accounts_data_size_reclaimed: lhs - .accounts_data_size_reclaimed - .saturating_add(rhs.accounts_data_size_reclaimed), - rent_rewards: [lhs.rent_rewards, rhs.rent_rewards].concat(), time_loading_accounts_us: lhs .time_loading_accounts_us .saturating_add(rhs.time_loading_accounts_us), diff --git a/runtime/src/bank/fee_distribution.rs b/runtime/src/bank/fee_distribution.rs index be7bb6e44a6aca..39274665be0816 100644 --- a/runtime/src/bank/fee_distribution.rs +++ b/runtime/src/bank/fee_distribution.rs @@ -1,7 +1,7 @@ use { super::Bank, crate::bank::CollectorFeeDetails, - log::{debug, warn}, + log::debug, solana_account::{ReadableAccount, WritableAccount}, solana_fee::FeeFeatures, solana_fee_structure::FeeBudgetLimits, @@ -10,7 +10,6 @@ use { solana_runtime_transaction::transaction_with_meta::TransactionWithMeta, solana_svm_rent_collector::svm_rent_collector::SVMRentCollector, solana_system_interface::program as system_program, - solana_vote::vote_account::VoteAccountsHashMap, std::{result::Result, sync::atomic::Ordering::Relaxed}, thiserror::Error, }; @@ -48,10 +47,6 @@ impl Bank { // earning transaction fees are fairly distributed by stake. And missing the opportunity // (not producing a block as a leader) earns nothing. So, being online is incentivized as a // form of transaction fees as well. - // - // On the other hand, rent fees are distributed under slightly different philosophy, while - // still being stake-weighted. - // Ref: distribute_rent_to_validators pub(super) fn distribute_transaction_fee_details(&self) { let fee_details = self.collector_fee_details.read().unwrap(); if fee_details.total() == 0 { @@ -175,177 +170,17 @@ impl Bank { self.store_account(pubkey, &account); Ok(account.lamports()) } - - // Distribute collected rent fees for this slot to staked validators (excluding stakers) - // according to stake. - // - // The nature of rent fee is the cost of doing business, every validator has to hold (or have - // access to) the same list of accounts, so we pay according to stake, which is a rough proxy for - // value to the network. - // - // Currently, rent distribution doesn't consider given validator's uptime at all (this might - // change). That's because rent should be rewarded for the storage resource utilization cost. - // It's treated differently from transaction fees, which is for the computing resource - // utilization cost. - // - // We can't use collector_id (which is rotated according to stake-weighted leader schedule) - // as an approximation to the ideal rent distribution to simplify and avoid this per-slot - // computation for the distribution (time: N log N, space: N acct. stores; N = # of - // validators). - // The reason is that rent fee doesn't need to be incentivized for throughput unlike transaction - // fees - // - // Ref: distribute_transaction_fee_details - fn distribute_rent_to_validators( - &self, - vote_accounts: &VoteAccountsHashMap, - rent_to_be_distributed: u64, - ) { - let mut total_staked = 0; - - // Collect the stake associated with each validator. - // Note that a validator may be present in this vector multiple times if it happens to have - // more than one staked vote account somehow - let mut validator_stakes = vote_accounts - .iter() - .filter_map(|(_vote_pubkey, (staked, account))| { - if *staked == 0 { - None - } else { - total_staked += *staked; - Some((*account.node_pubkey(), *staked)) - } - }) - .collect::>(); - - #[cfg(test)] - if validator_stakes.is_empty() { - // some tests bank.freezes() with bad staking state - self.capitalization - .fetch_sub(rent_to_be_distributed, Relaxed); - return; - } - #[cfg(not(test))] - assert!(!validator_stakes.is_empty()); - - // Sort first by stake and then by validator identity pubkey for determinism. - // If two items are still equal, their relative order does not matter since - // both refer to the same validator. - validator_stakes.sort_unstable_by(|(pubkey1, staked1), (pubkey2, staked2)| { - (staked1, pubkey1).cmp(&(staked2, pubkey2)).reverse() - }); - - let mut rent_distributed_in_initial_round = 0; - let validator_rent_shares = validator_stakes - .into_iter() - .map(|(pubkey, staked)| { - let rent_share = (((staked as u128) * (rent_to_be_distributed as u128)) - / (total_staked as u128)) - .try_into() - .unwrap(); - rent_distributed_in_initial_round += rent_share; - (pubkey, rent_share) - }) - .collect::>(); - - // Leftover lamports after fraction calculation, will be paid to validators starting from highest stake - // holder - let mut leftover_lamports = rent_to_be_distributed - rent_distributed_in_initial_round; - - let mut rent_to_burn: u64 = 0; - let mut rewards = vec![]; - validator_rent_shares - .into_iter() - .for_each(|(pubkey, rent_share)| { - let rent_to_be_paid = if leftover_lamports > 0 { - leftover_lamports -= 1; - rent_share + 1 - } else { - rent_share - }; - if rent_to_be_paid > 0 { - match self.deposit_fees(&pubkey, rent_to_be_paid) { - Ok(post_balance) => { - rewards.push(( - pubkey, - RewardInfo { - reward_type: RewardType::Rent, - lamports: rent_to_be_paid as i64, - post_balance, - commission: None, - }, - )); - } - Err(err) => { - debug!( - "Burned {} lamport rent fee instead of sending to {} due to {}", - rent_to_be_paid, pubkey, err - ); - - // overflow adding lamports or resulting account is invalid - // so burn lamports and track lamports burned per slot - rent_to_burn = rent_to_burn.saturating_add(rent_to_be_paid); - } - } - } - }); - self.rewards.write().unwrap().append(&mut rewards); - - if rent_to_burn > 0 { - self.capitalization.fetch_sub(rent_to_burn, Relaxed); - datapoint_warn!( - "bank-burned_rent", - ("slot", self.slot(), i64), - ("num_lamports", rent_to_burn, i64) - ); - } - - assert_eq!(leftover_lamports, 0); - } - - pub(super) fn distribute_rent_fees(&self) { - let total_rent_collected = self.collected_rent.load(Relaxed); - - if !self.should_collect_rent() { - if total_rent_collected != 0 { - warn!("Rent fees collection is disabled, yet total rent collected was non zero! Total rent collected: {total_rent_collected}"); - } - return; - } - - let (burned_portion, rent_to_be_distributed) = self - .rent_collector - .rent - .calculate_burn(total_rent_collected); - - debug!( - "distributed rent: {} (rounded from: {}, burned: {})", - rent_to_be_distributed, total_rent_collected, burned_portion - ); - self.capitalization.fetch_sub(burned_portion, Relaxed); - - if rent_to_be_distributed == 0 { - return; - } - - self.distribute_rent_to_validators(&self.vote_accounts(), rent_to_be_distributed); - } } #[cfg(test)] pub mod tests { use { super::*, - crate::genesis_utils::{ - create_genesis_config, create_genesis_config_with_leader, - create_genesis_config_with_vote_accounts, ValidatorVoteKeypairs, - }, + crate::genesis_utils::{create_genesis_config, create_genesis_config_with_leader}, solana_account::AccountSharedData, - solana_native_token::sol_to_lamports, solana_pubkey as pubkey, solana_rent::Rent, solana_signer::Signer, - solana_svm_rent_collector::rent_state::RentState, std::sync::RwLock, }; @@ -513,202 +348,6 @@ pub mod tests { ); } - #[test] - fn test_distribute_rent_to_validators_rent_paying() { - solana_logger::setup(); - - const RENT_PER_VALIDATOR: u64 = 55; - const TOTAL_RENT: u64 = RENT_PER_VALIDATOR * 4; - - let empty_validator = ValidatorVoteKeypairs::new_rand(); - let rent_paying_validator = ValidatorVoteKeypairs::new_rand(); - let becomes_rent_exempt_validator = ValidatorVoteKeypairs::new_rand(); - let rent_exempt_validator = ValidatorVoteKeypairs::new_rand(); - let keypairs = vec![ - &empty_validator, - &rent_paying_validator, - &becomes_rent_exempt_validator, - &rent_exempt_validator, - ]; - let genesis_config_info = create_genesis_config_with_vote_accounts( - sol_to_lamports(1000.), - &keypairs, - vec![sol_to_lamports(1000.); 4], - ); - let mut genesis_config = genesis_config_info.genesis_config; - genesis_config.rent = Rent::default(); // Ensure rent is non-zero, as genesis_utils sets Rent::free by default - - let bank = Bank::new_for_tests(&genesis_config); - let rent_exempt_minimum = bank.rent_collector().get_rent().minimum_balance(0); - - // Make one validator have an empty identity account - let mut empty_validator_account = bank - .get_account_with_fixed_root(&empty_validator.node_keypair.pubkey()) - .unwrap(); - empty_validator_account.set_lamports(0); - bank.store_account( - &empty_validator.node_keypair.pubkey(), - &empty_validator_account, - ); - - // Make one validator almost rent-exempt, less RENT_PER_VALIDATOR - let mut becomes_rent_exempt_validator_account = bank - .get_account_with_fixed_root(&becomes_rent_exempt_validator.node_keypair.pubkey()) - .unwrap(); - becomes_rent_exempt_validator_account - .set_lamports(rent_exempt_minimum - RENT_PER_VALIDATOR); - bank.store_account( - &becomes_rent_exempt_validator.node_keypair.pubkey(), - &becomes_rent_exempt_validator_account, - ); - - // Make one validator rent-exempt - let mut rent_exempt_validator_account = bank - .get_account_with_fixed_root(&rent_exempt_validator.node_keypair.pubkey()) - .unwrap(); - rent_exempt_validator_account.set_lamports(rent_exempt_minimum); - bank.store_account( - &rent_exempt_validator.node_keypair.pubkey(), - &rent_exempt_validator_account, - ); - - let get_rent_state = |bank: &Bank, address: &Pubkey| -> RentState { - let account = bank - .get_account_with_fixed_root(address) - .unwrap_or_default(); - bank.rent_collector().get_account_rent_state(&account) - }; - - // Assert starting RentStates - assert_eq!( - get_rent_state(&bank, &empty_validator.node_keypair.pubkey()), - RentState::Uninitialized - ); - assert_eq!( - get_rent_state(&bank, &rent_paying_validator.node_keypair.pubkey()), - RentState::RentPaying { - lamports: 42, - data_size: 0, - } - ); - assert_eq!( - get_rent_state(&bank, &becomes_rent_exempt_validator.node_keypair.pubkey()), - RentState::RentPaying { - lamports: rent_exempt_minimum - RENT_PER_VALIDATOR, - data_size: 0, - } - ); - assert_eq!( - get_rent_state(&bank, &rent_exempt_validator.node_keypair.pubkey()), - RentState::RentExempt - ); - - let old_empty_validator_lamports = bank.get_balance(&empty_validator.node_keypair.pubkey()); - let old_rent_paying_validator_lamports = - bank.get_balance(&rent_paying_validator.node_keypair.pubkey()); - let old_becomes_rent_exempt_validator_lamports = - bank.get_balance(&becomes_rent_exempt_validator.node_keypair.pubkey()); - let old_rent_exempt_validator_lamports = - bank.get_balance(&rent_exempt_validator.node_keypair.pubkey()); - - bank.distribute_rent_to_validators(&bank.vote_accounts(), TOTAL_RENT); - - let new_empty_validator_lamports = bank.get_balance(&empty_validator.node_keypair.pubkey()); - let new_rent_paying_validator_lamports = - bank.get_balance(&rent_paying_validator.node_keypair.pubkey()); - let new_becomes_rent_exempt_validator_lamports = - bank.get_balance(&becomes_rent_exempt_validator.node_keypair.pubkey()); - let new_rent_exempt_validator_lamports = - bank.get_balance(&rent_exempt_validator.node_keypair.pubkey()); - - // Assert ending balances; rent should be withheld if test is active and ending RentState - // is RentPaying, ie. empty_validator and rent_paying_validator - assert_eq!(old_empty_validator_lamports, new_empty_validator_lamports); - - assert_eq!( - old_rent_paying_validator_lamports, - new_rent_paying_validator_lamports - ); - - assert_eq!( - old_becomes_rent_exempt_validator_lamports + RENT_PER_VALIDATOR, - new_becomes_rent_exempt_validator_lamports - ); - - assert_eq!( - old_rent_exempt_validator_lamports + RENT_PER_VALIDATOR, - new_rent_exempt_validator_lamports - ); - - // Assert ending RentStates - assert_eq!( - RentState::Uninitialized, - get_rent_state(&bank, &empty_validator.node_keypair.pubkey()), - ); - assert_eq!( - RentState::RentPaying { - lamports: old_rent_paying_validator_lamports, - data_size: 0, - }, - get_rent_state(&bank, &rent_paying_validator.node_keypair.pubkey()), - ); - assert_eq!( - RentState::RentExempt, - get_rent_state(&bank, &becomes_rent_exempt_validator.node_keypair.pubkey()), - ); - assert_eq!( - RentState::RentExempt, - get_rent_state(&bank, &rent_exempt_validator.node_keypair.pubkey()), - ); - } - - #[test] - fn test_distribute_rent_to_validators_invalid_owner() { - struct TestCase { - use_invalid_owner: bool, - } - - impl TestCase { - fn new(use_invalid_owner: bool) -> Self { - Self { use_invalid_owner } - } - } - - for test_case in [TestCase::new(false), TestCase::new(true)] { - let genesis_config_info = - create_genesis_config_with_leader(0, &Pubkey::new_unique(), 100); - let mut genesis_config = genesis_config_info.genesis_config; - genesis_config.rent = Rent::default(); // Ensure rent is non-zero, as genesis_utils sets Rent::free by default - - let bank = Bank::new_for_tests(&genesis_config); - - let initial_balance = 1_000_000; - let account_owner = if test_case.use_invalid_owner { - Pubkey::new_unique() - } else { - system_program::id() - }; - let account = AccountSharedData::new(initial_balance, 0, &account_owner); - bank.store_account(bank.collector_id(), &account); - - let initial_capitalization = bank.capitalization(); - let rent_fees = 100; - bank.distribute_rent_to_validators(&bank.vote_accounts(), rent_fees); - let new_capitalization = bank.capitalization(); - let new_balance = bank.get_balance(bank.collector_id()); - - if test_case.use_invalid_owner { - assert_eq!(initial_balance, new_balance); - assert_eq!(initial_capitalization - rent_fees, new_capitalization); - assert_eq!(bank.rewards.read().unwrap().len(), 0); - } else { - assert_eq!(initial_balance + rent_fees, new_balance); - assert_eq!(initial_capitalization, new_capitalization); - assert_eq!(bank.rewards.read().unwrap().len(), 1); - } - } - } - #[test] fn test_distribute_transaction_fee_details_normal() { let genesis = create_genesis_config(0); diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index f8e44058f0c037..0bbbf289e6ee43 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -43,7 +43,7 @@ use { accounts_index::{ AccountIndex, AccountSecondaryIndexes, IndexKey, ScanConfig, ScanError, ITER_BATCH_SIZE, }, - accounts_partition::{self, PartitionIndex, RentPayingAccountsByPartition}, + accounts_partition, ancestors::Ancestors, }, solana_client_traits::SyncClient, @@ -111,12 +111,10 @@ use { MAX_PERMITTED_DATA_LENGTH, }, solana_system_transaction as system_transaction, solana_sysvar as sysvar, - solana_time_utils::years_as_slots, solana_timings::ExecuteTimings, solana_transaction::{ sanitized::SanitizedTransaction, Transaction, TransactionVerificationMode, }, - solana_transaction_context::TransactionAccount, solana_transaction_error::{TransactionError, TransactionResult as Result}, solana_vote_interface::state::TowerSync, solana_vote_program::{ @@ -463,257 +461,6 @@ fn test_bank_capitalization() { ); } -fn rent_with_exemption_threshold(exemption_threshold: f64) -> Rent { - Rent { - lamports_per_byte_year: 1, - exemption_threshold, - burn_percent: 10, - } -} - -#[test] -/// one thing being tested here is that a failed tx (due to rent collection using up all lamports) followed by rent collection -/// results in the same state as if just rent collection ran (and emptied the accounts that have too few lamports) -fn test_credit_debit_rent_no_side_effect_on_hash() { - solana_logger::setup(); - - let (mut genesis_config, _mint_keypair) = create_genesis_config_no_tx_fee(10); - - genesis_config.rent = rent_with_exemption_threshold(21.0); - - let slot = years_as_slots( - 2.0, - &genesis_config.poh_config.target_tick_duration, - genesis_config.ticks_per_slot, - ) as u64; - let (root_bank, bank_forks_1) = Bank::new_with_bank_forks_for_tests(&genesis_config); - let bank = new_bank_from_parent_with_bank_forks( - bank_forks_1.as_ref(), - root_bank, - &Pubkey::default(), - slot, - ); - - let (root_bank_2, bank_forks_2) = Bank::new_with_bank_forks_for_tests(&genesis_config); - let bank_with_success_txs = new_bank_from_parent_with_bank_forks( - bank_forks_2.as_ref(), - root_bank_2, - &Pubkey::default(), - slot, - ); - - assert_eq!(bank.last_blockhash(), genesis_config.hash()); - - let min_balance = genesis_config.rent.minimum_balance(0); - let plenty_of_lamports = min_balance + 1; - let too_few_lamports = 10; - // Initialize credit-debit and credit only accounts - let accounts = [ - AccountSharedData::new(plenty_of_lamports, 0, &Pubkey::default()), - AccountSharedData::new(plenty_of_lamports, 0, &Pubkey::default()), - AccountSharedData::new(plenty_of_lamports, 0, &Pubkey::default()), - AccountSharedData::new(plenty_of_lamports, 0, &Pubkey::default()), - // Transaction between these two accounts will fail - AccountSharedData::new(too_few_lamports, 0, &Pubkey::default()), - AccountSharedData::new(too_few_lamports, 1, &Pubkey::default()), - ]; - - let keypairs = accounts.iter().map(|_| Keypair::new()).collect::>(); - { - // make sure rent and epoch change are such that we collect all lamports in accounts 4 & 5 - let mut account_copy = accounts[4].clone(); - let expected_rent = bank - .rent_collector() - .collect_from_existing_account(&keypairs[4].pubkey(), &mut account_copy); - assert_eq!(expected_rent.rent_amount, too_few_lamports); - assert_eq!(account_copy.lamports(), 0); - } - - for i in 0..accounts.len() { - let account = &accounts[i]; - bank.store_account(&keypairs[i].pubkey(), account); - bank_with_success_txs.store_account(&keypairs[i].pubkey(), account); - } - - // Make builtin instruction loader rent exempt - let system_program_id = system_program::id(); - let mut system_program_account = bank.get_account(&system_program_id).unwrap(); - system_program_account.set_lamports( - bank.get_minimum_balance_for_rent_exemption(system_program_account.data().len()), - ); - bank.store_account(&system_program_id, &system_program_account); - bank_with_success_txs.store_account(&system_program_id, &system_program_account); - - let t1 = system_transaction::transfer( - &keypairs[0], - &keypairs[1].pubkey(), - 1, - genesis_config.hash(), - ); - let t2 = system_transaction::transfer( - &keypairs[2], - &keypairs[3].pubkey(), - 1, - genesis_config.hash(), - ); - // the idea is this transaction will result in both accounts being drained of all lamports due to rent collection - let t3 = system_transaction::transfer( - &keypairs[4], - &keypairs[5].pubkey(), - 1, - genesis_config.hash(), - ); - - let txs = vec![t1.clone(), t2.clone(), t3]; - let res = bank.process_transactions(txs.iter()); - - assert_eq!(res.len(), 3); - assert_eq!(res[0], Ok(())); - assert_eq!(res[1], Ok(())); - assert_eq!(res[2], Err(TransactionError::AccountNotFound)); - - bank.freeze(); - - let rwlockguard_bank_hash = bank.hash.read().unwrap(); - let bank_hash = rwlockguard_bank_hash.as_ref(); - - let txs = vec![t2, t1]; - let res = bank_with_success_txs.process_transactions(txs.iter()); - - assert_eq!(res.len(), 2); - assert_eq!(res[0], Ok(())); - assert_eq!(res[1], Ok(())); - - bank_with_success_txs.freeze(); - - let rwlockguard_bank_with_success_txs_hash = bank_with_success_txs.hash.read().unwrap(); - let bank_with_success_txs_hash = rwlockguard_bank_with_success_txs_hash.as_ref(); - - assert_eq!(bank_with_success_txs_hash, bank_hash); -} - -fn store_accounts_for_rent_test( - bank: &Bank, - keypairs: &[Keypair], - mock_program_id: Pubkey, - generic_rent_due_for_system_account: u64, -) { - let mut account_pairs: Vec = Vec::with_capacity(keypairs.len() - 1); - account_pairs.push(( - keypairs[0].pubkey(), - AccountSharedData::new( - generic_rent_due_for_system_account + 2, - 0, - &Pubkey::default(), - ), - )); - account_pairs.push(( - keypairs[1].pubkey(), - AccountSharedData::new( - generic_rent_due_for_system_account + 2, - 0, - &Pubkey::default(), - ), - )); - account_pairs.push(( - keypairs[2].pubkey(), - AccountSharedData::new( - generic_rent_due_for_system_account + 2, - 0, - &Pubkey::default(), - ), - )); - account_pairs.push(( - keypairs[3].pubkey(), - AccountSharedData::new( - generic_rent_due_for_system_account + 2, - 0, - &Pubkey::default(), - ), - )); - account_pairs.push(( - keypairs[4].pubkey(), - AccountSharedData::new(10, 0, &Pubkey::default()), - )); - account_pairs.push(( - keypairs[5].pubkey(), - AccountSharedData::new(10, 0, &Pubkey::default()), - )); - account_pairs.push(( - keypairs[6].pubkey(), - AccountSharedData::new( - (2 * generic_rent_due_for_system_account) + 24, - 0, - &Pubkey::default(), - ), - )); - - account_pairs.push(( - keypairs[8].pubkey(), - AccountSharedData::new( - generic_rent_due_for_system_account + 2 + 929, - 0, - &Pubkey::default(), - ), - )); - account_pairs.push(( - keypairs[9].pubkey(), - AccountSharedData::new(10, 0, &Pubkey::default()), - )); - - // Feeding to MockProgram to test read only rent behaviour - account_pairs.push(( - keypairs[10].pubkey(), - AccountSharedData::new( - generic_rent_due_for_system_account + 3, - 0, - &Pubkey::default(), - ), - )); - account_pairs.push(( - keypairs[11].pubkey(), - AccountSharedData::new(generic_rent_due_for_system_account + 3, 0, &mock_program_id), - )); - account_pairs.push(( - keypairs[12].pubkey(), - AccountSharedData::new(generic_rent_due_for_system_account + 3, 0, &mock_program_id), - )); - account_pairs.push(( - keypairs[13].pubkey(), - AccountSharedData::new(14, 22, &mock_program_id), - )); - - for account_pair in account_pairs.iter() { - bank.store_account(&account_pair.0, &account_pair.1); - } -} - -fn create_child_bank_for_rent_test( - root_bank: Arc, - genesis_config: &GenesisConfig, - bank_forks: &RwLock, - mock_builtin: Option<(Pubkey, BuiltinFunctionWithContext)>, -) -> Arc { - let mut bank = Bank::new_from_parent( - root_bank, - &Pubkey::default(), - years_as_slots( - 2.0, - &genesis_config.poh_config.target_tick_duration, - genesis_config.ticks_per_slot, - ) as u64, - ); - bank.rent_collector.slots_per_year = 421_812.0; - if let Some((program_id, builtin_function)) = mock_builtin { - bank.add_mockup_builtin(program_id, builtin_function); - } - bank_forks - .write() - .unwrap() - .insert(bank) - .clone_without_scheduler() -} - /// if asserter returns true, check the capitalization /// Checking the capitalization requires that the bank be a root and the slot be flushed. /// All tests are getting converted to use the write cache, so over time, each caller will be visited to throttle this input. @@ -818,488 +565,6 @@ fn test_store_account_and_update_capitalization_unchanged() { assert_eq!(account, bank.get_account(&pubkey).unwrap()); } -#[test] -#[ignore] -fn test_rent_distribution() { - solana_logger::setup(); - - let bootstrap_validator_pubkey = solana_pubkey::new_rand(); - let bootstrap_validator_stake_lamports = 30; - let mut genesis_config = create_genesis_config_with_leader( - 10, - &bootstrap_validator_pubkey, - bootstrap_validator_stake_lamports, - ) - .genesis_config; - // While we are preventing new accounts left in a rent-paying state, not quite ready to rip - // out all the rent assessment tests. Just deactivate the feature for now. - genesis_config - .accounts - .remove(&feature_set::require_rent_exempt_accounts::id()) - .unwrap(); - - genesis_config.epoch_schedule = EpochSchedule::custom( - MINIMUM_SLOTS_PER_EPOCH, - genesis_config.epoch_schedule.leader_schedule_slot_offset, - false, - ); - - genesis_config.rent = rent_with_exemption_threshold(2.0); - - let rent = Rent::free(); - - let validator_1_pubkey = solana_pubkey::new_rand(); - let validator_1_stake_lamports = 20; - let validator_1_staking_keypair = Keypair::new(); - let validator_1_voting_keypair = Keypair::new(); - - let validator_1_vote_account = vote_state::create_account( - &validator_1_voting_keypair.pubkey(), - &validator_1_pubkey, - 0, - validator_1_stake_lamports, - ); - - let validator_1_stake_account = stake_state::create_account( - &validator_1_staking_keypair.pubkey(), - &validator_1_voting_keypair.pubkey(), - &validator_1_vote_account, - &rent, - validator_1_stake_lamports, - ); - - genesis_config.accounts.insert( - validator_1_pubkey, - Account::new(42, 0, &system_program::id()), - ); - genesis_config.accounts.insert( - validator_1_staking_keypair.pubkey(), - Account::from(validator_1_stake_account), - ); - genesis_config.accounts.insert( - validator_1_voting_keypair.pubkey(), - Account::from(validator_1_vote_account), - ); - - let validator_2_pubkey = solana_pubkey::new_rand(); - let validator_2_stake_lamports = 20; - let validator_2_staking_keypair = Keypair::new(); - let validator_2_voting_keypair = Keypair::new(); - - let validator_2_vote_account = vote_state::create_account( - &validator_2_voting_keypair.pubkey(), - &validator_2_pubkey, - 0, - validator_2_stake_lamports, - ); - - let validator_2_stake_account = stake_state::create_account( - &validator_2_staking_keypair.pubkey(), - &validator_2_voting_keypair.pubkey(), - &validator_2_vote_account, - &rent, - validator_2_stake_lamports, - ); - - genesis_config.accounts.insert( - validator_2_pubkey, - Account::new(42, 0, &system_program::id()), - ); - genesis_config.accounts.insert( - validator_2_staking_keypair.pubkey(), - Account::from(validator_2_stake_account), - ); - genesis_config.accounts.insert( - validator_2_voting_keypair.pubkey(), - Account::from(validator_2_vote_account), - ); - - let validator_3_pubkey = solana_pubkey::new_rand(); - let validator_3_stake_lamports = 30; - let validator_3_staking_keypair = Keypair::new(); - let validator_3_voting_keypair = Keypair::new(); - - let validator_3_vote_account = vote_state::create_account( - &validator_3_voting_keypair.pubkey(), - &validator_3_pubkey, - 0, - validator_3_stake_lamports, - ); - - let validator_3_stake_account = stake_state::create_account( - &validator_3_staking_keypair.pubkey(), - &validator_3_voting_keypair.pubkey(), - &validator_3_vote_account, - &rent, - validator_3_stake_lamports, - ); - - genesis_config.accounts.insert( - validator_3_pubkey, - Account::new(42, 0, &system_program::id()), - ); - genesis_config.accounts.insert( - validator_3_staking_keypair.pubkey(), - Account::from(validator_3_stake_account), - ); - genesis_config.accounts.insert( - validator_3_voting_keypair.pubkey(), - Account::from(validator_3_vote_account), - ); - - genesis_config.rent = rent_with_exemption_threshold(10.0); - - let mut bank = Bank::new_for_tests(&genesis_config); - // Enable rent collection - bank.rent_collector.epoch = 5; - bank.rent_collector.slots_per_year = 192.0; - let (bank, _bank_forks) = bank.wrap_with_bank_forks_for_tests(); - - let payer = Keypair::new(); - let payer_account = AccountSharedData::new(400, 0, &system_program::id()); - bank.store_account_and_update_capitalization(&payer.pubkey(), &payer_account); - - let payee = Keypair::new(); - let payee_account = AccountSharedData::new(70, 1, &system_program::id()); - bank.store_account_and_update_capitalization(&payee.pubkey(), &payee_account); - - let bootstrap_validator_initial_balance = bank.get_balance(&bootstrap_validator_pubkey); - - let tx = system_transaction::transfer(&payer, &payee.pubkey(), 180, genesis_config.hash()); - - let result = bank.process_transaction(&tx); - assert_eq!(result, Ok(())); - - let mut total_rent_deducted = 0; - - // 400 - 128(Rent) - 180(Transfer) - assert_eq!(bank.get_balance(&payer.pubkey()), 92); - total_rent_deducted += 128; - - // 70 - 70(Rent) + 180(Transfer) - 21(Rent) - assert_eq!(bank.get_balance(&payee.pubkey()), 159); - total_rent_deducted += 70 + 21; - - let previous_capitalization = bank.capitalization.load(Relaxed); - - bank.freeze(); - - assert_eq!(bank.collected_rent.load(Relaxed), total_rent_deducted); - - let burned_portion = - total_rent_deducted * u64::from(bank.rent_collector.rent.burn_percent) / 100; - let rent_to_be_distributed = total_rent_deducted - burned_portion; - - let bootstrap_validator_portion = - ((bootstrap_validator_stake_lamports * rent_to_be_distributed) as f64 / 100.0) as u64 + 1; // Leftover lamport - assert_eq!( - bank.get_balance(&bootstrap_validator_pubkey), - bootstrap_validator_portion + bootstrap_validator_initial_balance - ); - - // Since, validator 1 and validator 2 has equal smallest stake, it comes down to comparison - // between their pubkey. - let tweak_1 = u64::from(validator_1_pubkey > validator_2_pubkey); - let validator_1_portion = - ((validator_1_stake_lamports * rent_to_be_distributed) as f64 / 100.0) as u64 + tweak_1; - assert_eq!( - bank.get_balance(&validator_1_pubkey), - validator_1_portion + 42 - tweak_1, - ); - - // Since, validator 1 and validator 2 has equal smallest stake, it comes down to comparison - // between their pubkey. - let tweak_2 = u64::from(validator_2_pubkey > validator_1_pubkey); - let validator_2_portion = - ((validator_2_stake_lamports * rent_to_be_distributed) as f64 / 100.0) as u64 + tweak_2; - assert_eq!( - bank.get_balance(&validator_2_pubkey), - validator_2_portion + 42 - tweak_2, - ); - - let validator_3_portion = - ((validator_3_stake_lamports * rent_to_be_distributed) as f64 / 100.0) as u64 + 1; - assert_eq!( - bank.get_balance(&validator_3_pubkey), - validator_3_portion + 42 - ); - - let current_capitalization = bank.capitalization.load(Relaxed); - - // only slot history is newly created - let sysvar_and_builtin_program_delta = - min_rent_exempt_balance_for_sysvars(&bank, &[sysvar::slot_history::id()]); - assert_eq!( - previous_capitalization - (current_capitalization - sysvar_and_builtin_program_delta), - burned_portion - ); - - assert!(bank.calculate_and_verify_capitalization(true)); - - assert_eq!( - rent_to_be_distributed, - bank.rewards - .read() - .unwrap() - .iter() - .map(|(address, reward)| { - if reward.lamports > 0 { - assert_eq!(reward.reward_type, RewardType::Rent); - if *address == validator_2_pubkey { - assert_eq!(reward.post_balance, validator_2_portion + 42 - tweak_2); - } else if *address == validator_3_pubkey { - assert_eq!(reward.post_balance, validator_3_portion + 42); - } - reward.lamports as u64 - } else { - 0 - } - }) - .sum::() - ); -} - -#[test] -fn test_rent_exempt_executable_account() { - let (mut genesis_config, mint_keypair) = create_genesis_config(100_000); - genesis_config.rent = rent_with_exemption_threshold(1000.0); - - let (root_bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); - let bank = - create_child_bank_for_rent_test(root_bank, &genesis_config, bank_forks.as_ref(), None); - - let account_pubkey = solana_pubkey::new_rand(); - let account_balance = 1; - let mut account = AccountSharedData::new(account_balance, 0, &solana_pubkey::new_rand()); - account.set_executable(true); - account.set_owner(bpf_loader_upgradeable::id()); - bank.store_account(&account_pubkey, &account); - - let transfer_lamports = 1; - let tx = system_transaction::transfer( - &mint_keypair, - &account_pubkey, - transfer_lamports, - genesis_config.hash(), - ); - assert_matches!( - bank.process_transaction(&tx), - Err(TransactionError::InstructionError(0, _)) - ); - assert_eq!(bank.get_balance(&account_pubkey), account_balance); -} - -#[test] -#[ignore] -#[allow(clippy::cognitive_complexity)] -fn test_rent_complex() { - solana_logger::setup(); - let mock_program_id = Pubkey::from([2u8; 32]); - - #[derive(Serialize, Deserialize)] - enum MockInstruction { - Deduction, - } - - declare_process_instruction!(MockBuiltin, 1, |invoke_context| { - let transaction_context = &invoke_context.transaction_context; - let instruction_context = transaction_context.get_current_instruction_context()?; - let instruction_data = instruction_context.get_instruction_data(); - if let Ok(instruction) = bincode::deserialize(instruction_data) { - match instruction { - MockInstruction::Deduction => { - instruction_context - .try_borrow_instruction_account(transaction_context, 1)? - .checked_add_lamports(1)?; - instruction_context - .try_borrow_instruction_account(transaction_context, 2)? - .checked_sub_lamports(1)?; - Ok(()) - } - } - } else { - Err(InstructionError::InvalidInstructionData) - } - }); - - let (mut genesis_config, _mint_keypair) = create_genesis_config(10); - let mut keypairs: Vec = Vec::with_capacity(14); - for _i in 0..14 { - keypairs.push(Keypair::new()); - } - - genesis_config.rent = rent_with_exemption_threshold(1000.0); - - let (root_bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); - // until we completely transition to the eager rent collection, - // we must ensure lazy rent collection doesn't get broken! - root_bank.restore_old_behavior_for_fragile_tests(); - let bank = create_child_bank_for_rent_test( - root_bank, - &genesis_config, - bank_forks.as_ref(), - Some((mock_program_id, MockBuiltin::vm)), - ); - - assert_eq!(bank.last_blockhash(), genesis_config.hash()); - - let slots_elapsed: u64 = (0..=bank.epoch) - .map(|epoch| { - bank.rent_collector - .epoch_schedule - .get_slots_in_epoch(epoch + 1) - }) - .sum(); - let generic_rent_due_for_system_account = bank - .rent_collector - .rent - .due( - bank.get_minimum_balance_for_rent_exemption(0) - 1, - 0, - slots_elapsed as f64 / bank.rent_collector.slots_per_year, - ) - .lamports(); - - store_accounts_for_rent_test( - &bank, - &keypairs, - mock_program_id, - generic_rent_due_for_system_account, - ); - - let magic_rent_number = 131; // yuck, derive this value programmatically one day - - let t1 = system_transaction::transfer( - &keypairs[0], - &keypairs[1].pubkey(), - 1, - genesis_config.hash(), - ); - let t2 = system_transaction::transfer( - &keypairs[2], - &keypairs[3].pubkey(), - 1, - genesis_config.hash(), - ); - let t3 = system_transaction::transfer( - &keypairs[4], - &keypairs[5].pubkey(), - 1, - genesis_config.hash(), - ); - let t4 = system_transaction::transfer( - &keypairs[6], - &keypairs[7].pubkey(), - generic_rent_due_for_system_account + 1, - genesis_config.hash(), - ); - let t5 = system_transaction::transfer( - &keypairs[8], - &keypairs[9].pubkey(), - 929, - genesis_config.hash(), - ); - - let account_metas = vec![ - AccountMeta::new(keypairs[10].pubkey(), true), - AccountMeta::new(keypairs[11].pubkey(), true), - AccountMeta::new(keypairs[12].pubkey(), true), - AccountMeta::new_readonly(keypairs[13].pubkey(), false), - ]; - let deduct_instruction = - Instruction::new_with_bincode(mock_program_id, &MockInstruction::Deduction, account_metas); - let t6 = Transaction::new_signed_with_payer( - &[deduct_instruction], - Some(&keypairs[10].pubkey()), - &[&keypairs[10], &keypairs[11], &keypairs[12]], - genesis_config.hash(), - ); - - let txs = vec![t6, t5, t1, t2, t3, t4]; - let res = bank.process_transactions(txs.iter()); - - assert_eq!(res.len(), 6); - assert_eq!(res[0], Ok(())); - assert_eq!(res[1], Ok(())); - assert_eq!(res[2], Ok(())); - assert_eq!(res[3], Ok(())); - assert_eq!(res[4], Err(TransactionError::AccountNotFound)); - assert_eq!(res[5], Ok(())); - - bank.freeze(); - - let mut rent_collected = 0; - - // 48992 - generic_rent_due_for_system_account(Rent) - 1(transfer) - assert_eq!(bank.get_balance(&keypairs[0].pubkey()), 1); - rent_collected += generic_rent_due_for_system_account; - - // 48992 - generic_rent_due_for_system_account(Rent) + 1(transfer) - assert_eq!(bank.get_balance(&keypairs[1].pubkey()), 3); - rent_collected += generic_rent_due_for_system_account; - - // 48992 - generic_rent_due_for_system_account(Rent) - 1(transfer) - assert_eq!(bank.get_balance(&keypairs[2].pubkey()), 1); - rent_collected += generic_rent_due_for_system_account; - - // 48992 - generic_rent_due_for_system_account(Rent) + 1(transfer) - assert_eq!(bank.get_balance(&keypairs[3].pubkey()), 3); - rent_collected += generic_rent_due_for_system_account; - - // No rent deducted - assert_eq!(bank.get_balance(&keypairs[4].pubkey()), 10); - assert_eq!(bank.get_balance(&keypairs[5].pubkey()), 10); - - // 98004 - generic_rent_due_for_system_account(Rent) - 48991(transfer) - assert_eq!(bank.get_balance(&keypairs[6].pubkey()), 23); - rent_collected += generic_rent_due_for_system_account; - - // 0 + 48990(transfer) - magic_rent_number(Rent) - assert_eq!( - bank.get_balance(&keypairs[7].pubkey()), - generic_rent_due_for_system_account + 1 - magic_rent_number - ); - - // Epoch should be updated - // Rent deducted on store side - let account8 = bank.get_account(&keypairs[7].pubkey()).unwrap(); - // Epoch should be set correctly. - assert_eq!(account8.rent_epoch(), bank.epoch + 1); - rent_collected += magic_rent_number; - - // 49921 - generic_rent_due_for_system_account(Rent) - 929(Transfer) - assert_eq!(bank.get_balance(&keypairs[8].pubkey()), 2); - rent_collected += generic_rent_due_for_system_account; - - let account10 = bank.get_account(&keypairs[9].pubkey()).unwrap(); - // Account was overwritten at load time, since it didn't have sufficient balance to pay rent - // Then, at store time we deducted `magic_rent_number` rent for the current epoch, once it has balance - assert_eq!(account10.rent_epoch(), bank.epoch + 1); - // account data is blank now - assert_eq!(account10.data().len(), 0); - // 10 - 10(Rent) + 929(Transfer) - magic_rent_number(Rent) - assert_eq!(account10.lamports(), 929 - magic_rent_number); - rent_collected += magic_rent_number + 10; - - // 48993 - generic_rent_due_for_system_account(Rent) - assert_eq!(bank.get_balance(&keypairs[10].pubkey()), 3); - rent_collected += generic_rent_due_for_system_account; - - // 48993 - generic_rent_due_for_system_account(Rent) + 1(Addition by program) - assert_eq!(bank.get_balance(&keypairs[11].pubkey()), 4); - rent_collected += generic_rent_due_for_system_account; - - // 48993 - generic_rent_due_for_system_account(Rent) - 1(Deduction by program) - assert_eq!(bank.get_balance(&keypairs[12].pubkey()), 2); - rent_collected += generic_rent_due_for_system_account; - - // No rent for read-only account - assert_eq!(bank.get_balance(&keypairs[13].pubkey()), 14); - - // Bank's collected rent should be sum of rent collected from all accounts - assert_eq!(bank.collected_rent.load(Relaxed), rent_collected); -} - #[test] fn test_rent_eager_across_epoch_without_gap() { let (mut bank, _bank_forks) = create_simple_test_arc_bank(1); @@ -1654,9 +919,8 @@ impl Bank { } } -#[test_case(true; "enable rent fees collection")] -#[test_case(false; "disable rent fees collection")] -fn test_rent_eager_collect_rent_in_partition(should_collect_rent: bool) { +#[test] +fn test_rent_eager_collect_rent_in_partition() { solana_logger::setup(); let (mut genesis_config, _mint_keypair) = create_genesis_config(1_000_000); activate_all_features(&mut genesis_config); @@ -1668,30 +932,18 @@ fn test_rent_eager_collect_rent_in_partition(should_collect_rent: bool) { .accounts .remove(&feature_set::disable_partitioned_rent_collection::id()) .unwrap(); - if should_collect_rent { - genesis_config - .accounts - .remove(&feature_set::disable_rent_fees_collection::id()) - .unwrap(); - } let zero_lamport_pubkey = solana_pubkey::new_rand(); let rent_due_pubkey = solana_pubkey::new_rand(); let rent_exempt_pubkey = solana_pubkey::new_rand(); let mut bank = Arc::new(Bank::new_for_tests(&genesis_config)); - - assert_eq!(should_collect_rent, bank.should_collect_rent()); + let genesis_slot = 0; let zero_lamports = 0; let little_lamports = 1234; let large_lamports = 123_456_789; // genesis_config.epoch_schedule.slots_per_epoch == 432_000 and is unsuitable for this test let some_slot = MINIMUM_SLOTS_PER_EPOCH; // chosen to cause epoch to be +1 - let rent_collected = if bank.should_collect_rent() { - 1 /* this is a function of 'some_slot' */ - } else { - 0 - }; bank.store_account( &zero_lamport_pubkey, @@ -1706,19 +958,6 @@ fn test_rent_eager_collect_rent_in_partition(should_collect_rent: bool) { &AccountSharedData::new(large_lamports, 0, &Pubkey::default()), ); - let genesis_slot = 0; - - let previous_epoch = bank.epoch(); - bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::default(), some_slot)); - let current_epoch = bank.epoch(); - assert_eq!(previous_epoch + 1, current_epoch); - - assert_eq!(bank.collected_rent.load(Relaxed), 0); - assert_eq!( - bank.get_account(&rent_due_pubkey).unwrap().lamports(), - little_lamports - ); - assert_eq!(bank.get_account(&rent_due_pubkey).unwrap().rent_epoch(), 0); assert_eq!(bank.slots_by_pubkey(&rent_due_pubkey), vec![genesis_slot]); assert_eq!( bank.slots_by_pubkey(&rent_exempt_pubkey), @@ -1729,21 +968,16 @@ fn test_rent_eager_collect_rent_in_partition(should_collect_rent: bool) { vec![genesis_slot] ); - assert_eq!(bank.collected_rent.load(Relaxed), 0); - bank.collect_rent_in_partition((0, 0, 1), &RentMetrics::default()); // all range + let previous_epoch = bank.epoch(); + bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::default(), some_slot)); + bank.freeze(); + let current_epoch = bank.epoch(); + assert_eq!(previous_epoch + 1, current_epoch); - assert_eq!(bank.collected_rent.load(Relaxed), rent_collected); + // Rent epoch is not updated for rent paying accounts assert_eq!( - bank.get_account(&rent_due_pubkey).unwrap().lamports(), - little_lamports - rent_collected - ); - assert!( - bank.get_account(&rent_due_pubkey).unwrap().rent_epoch() == current_epoch + 1 - || !bank.should_collect_rent() - ); - assert_eq!( - bank.get_account(&rent_exempt_pubkey).unwrap().lamports(), - large_lamports + bank.get_account(&rent_due_pubkey).unwrap().rent_epoch(), + previous_epoch ); assert_eq!( bank.get_account(&rent_exempt_pubkey).unwrap().rent_epoch(), @@ -1816,15 +1050,11 @@ fn test_collect_rent_from_accounts() { account3.set_rent_epoch(0); // stake accounts in genesis have a rent epoch of 0 // loaded from previous slot, so we skip rent collection on it - let _result = later_bank.collect_rent_from_accounts( - vec![ - (address1, account1, later_slot - 1), - (address2, account2, later_slot - 1), - (address3, account3, later_slot - 1), - ], - None, - PartitionIndex::default(), - ); + let _result = later_bank.update_rent_exempt_status_for_accounts(vec![ + (address1, account1, later_slot - 1), + (address2, account2, later_slot - 1), + (address3, account3, later_slot - 1), + ]); let deltas = later_bank .rc @@ -1901,8 +1131,8 @@ fn test_rent_eager_collect_rent_zero_lamport_deterministic() { assert_eq!(hash1_with_zero, hash1_without_zero); assert_ne!(hash1_with_zero, Hash::default()); - bank2_with_zero.collect_rent_in_partition((0, 0, 1), &RentMetrics::default()); // all - bank2_without_zero.collect_rent_in_partition((0, 0, 1), &RentMetrics::default()); // all + bank2_with_zero.update_rent_exempt_status_in_partition((0, 0, 1), &RentMetrics::default()); // all + bank2_without_zero.update_rent_exempt_status_in_partition((0, 0, 1), &RentMetrics::default()); // all bank2_with_zero.freeze(); let hash2_with_zero = bank2_with_zero.hash(); @@ -3304,7 +2534,6 @@ fn test_load_and_execute_commit_transactions_fees_only() { return_data: None, executed_units: 0, fee_details: FeeDetails::new(5000, 0), - rent_debits: RentDebits::default(), loaded_account_stats: TransactionLoadedAccountsStats { loaded_accounts_count: 2, loaded_accounts_data_size: nonce_size as u32, @@ -6562,26 +5791,26 @@ fn test_bank_hash_consistency() { if bank.slot == 0 { assert_eq!( bank.hash().to_string(), - "5b72TRrdMhGED3boghe55CyX8hmnpYt7RTMrwrHTrNpP", + "CTg8Vq5RjXhfp332YC9DHQjAfFueLPimszv9i6xBFgPW", ); } if bank.slot == 32 { assert_eq!( bank.hash().to_string(), - "2k9XFkra1XyobQb4Z73xSEgFsUdp87cmftYfWEpXQoah" + "CK1siD9yP37R4ErCECKg1rofsEAk9fdGpsfpMQnSvHBL" ); } if bank.slot == 64 { assert_eq!( bank.hash().to_string(), - "GpePwzXm6nomkj9CKPU4qwvrFnBcjYRrs3hSoRgHN5CP" + "5h8yw8oU78G4JeVB28U9ZjZpV5fCgm9gA8LfVJF8YD8W" ); } if bank.slot == 128 { assert_eq!( bank.hash().to_string(), - "8GjxSMXwRe7AyFZoF9R7XVcTC5wYoVKvdawU8ysRcBid" + "87cnbyVPkbfpQkjuQ5sCKXNYhvUbpzHNac6GJv1BnqDM" ); break; } @@ -9667,69 +8896,6 @@ fn test_tx_return_data() { } } -#[test] -fn test_load_and_execute_commit_transactions_rent_debits() { - let (mut genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); - genesis_config.rent = Rent::default(); - let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); - let bank = Bank::new_from_parent( - bank, - &Pubkey::new_unique(), - genesis_config.epoch_schedule.get_first_slot_in_epoch(1), - ); - let amount = genesis_config.rent.minimum_balance(0); - - // Make sure that rent debits are tracked for successful transactions - { - let alice = Keypair::new(); - test_utils::deposit(&bank, &alice.pubkey(), amount - 1).unwrap(); - let tx = system_transaction::transfer( - &mint_keypair, - &alice.pubkey(), - amount, - genesis_config.hash(), - ); - - let batch = bank.prepare_batch_for_tests(vec![tx]); - let commit_result = bank - .load_execute_and_commit_transactions( - &batch, - MAX_PROCESSING_AGE, - ExecutionRecordingConfig::new_single_setting(false), - &mut ExecuteTimings::default(), - None, - ) - .0 - .remove(0); - assert!(commit_result.is_ok()); - assert!(commit_result.was_executed_successfully()); - assert!(!commit_result.ok().unwrap().rent_debits.is_empty()); - } - - // Make sure that rent debits are ignored for failed transactions - { - let bob = Keypair::new(); - test_utils::deposit(&bank, &bob.pubkey(), amount - 1).unwrap(); - let tx = - system_transaction::transfer(&mint_keypair, &bob.pubkey(), 1, genesis_config.hash()); - - let batch = bank.prepare_batch_for_tests(vec![tx]); - let commit_result = bank - .load_execute_and_commit_transactions( - &batch, - MAX_PROCESSING_AGE, - ExecutionRecordingConfig::new_single_setting(false), - &mut ExecuteTimings::default(), - None, - ) - .0 - .remove(0); - assert!(commit_result.is_ok()); - assert!(!commit_result.was_executed_successfully()); - assert!(commit_result.ok().unwrap().rent_debits.is_empty()); - } -} - #[test] fn test_get_largest_accounts() { let GenesisConfigInfo { genesis_config, .. } = @@ -10038,21 +9204,6 @@ fn do_test_clean_dropped_unrooted_banks(freeze_bank1: FreezeBank1) { ); } -#[test] -fn test_rent_debits() { - let mut rent_debits = RentDebits::default(); - - // No entry for 0 rewards - rent_debits.insert(&Pubkey::new_unique(), 0, 0); - assert_eq!(rent_debits.len(), 0); - - // Some that actually work - rent_debits.insert(&Pubkey::new_unique(), 1, 0); - assert_eq!(rent_debits.len(), 1); - rent_debits.insert(&Pubkey::new_unique(), i64::MAX as u64, 0); - assert_eq!(rent_debits.len(), 2); -} - #[test] fn test_compute_budget_program_noop() { solana_logger::setup(); @@ -11876,63 +11027,6 @@ fn test_accounts_data_size_and_resize_transactions() { } } -#[test] -fn test_get_rent_paying_pubkeys() { - let lamports = 1; - let bank = create_simple_test_bank(lamports); - - let n = 432_000; - assert!(bank.get_rent_paying_pubkeys(&(0, 1, n)).is_none()); - assert!(bank.get_rent_paying_pubkeys(&(0, 2, n)).is_none()); - assert!(bank.get_rent_paying_pubkeys(&(0, 0, n)).is_none()); - - let pk1 = Pubkey::from([2; 32]); - let pk2 = Pubkey::from([3; 32]); - let index1 = accounts_partition::partition_from_pubkey(&pk1, n); - let index2 = accounts_partition::partition_from_pubkey(&pk2, n); - assert!(index1 > 0, "{}", index1); - assert!(index2 > index1, "{index2}, {index1}"); - - let epoch_schedule = EpochSchedule::custom(n, 0, false); - - let mut rent_paying_accounts_by_partition = RentPayingAccountsByPartition::new(&epoch_schedule); - rent_paying_accounts_by_partition.add_account(&pk1); - rent_paying_accounts_by_partition.add_account(&pk2); - - bank.rc - .accounts - .accounts_db - .accounts_index - .rent_paying_accounts_by_partition - .set(rent_paying_accounts_by_partition) - .unwrap(); - - assert_eq!( - bank.get_rent_paying_pubkeys(&(0, 1, n)), - Some(HashSet::default()) - ); - assert_eq!( - bank.get_rent_paying_pubkeys(&(0, 2, n)), - Some(HashSet::default()) - ); - assert_eq!( - bank.get_rent_paying_pubkeys(&(index1.saturating_sub(1), index1, n)), - Some(HashSet::from([pk1])) - ); - assert_eq!( - bank.get_rent_paying_pubkeys(&(index2.saturating_sub(1), index2, n)), - Some(HashSet::from([pk2])) - ); - assert_eq!( - bank.get_rent_paying_pubkeys(&(index1.saturating_sub(1), index2, n)), - Some(HashSet::from([pk2, pk1])) - ); - assert_eq!( - bank.get_rent_paying_pubkeys(&(0, 0, n)), - Some(HashSet::default()) - ); -} - /// Ensure that accounts rent epoch is updated correctly by rent collection #[test_case(true; "enable partitioned rent fees collection")] #[test_case(false; "disable partitioned rent fees collection")] @@ -11966,7 +11060,7 @@ fn test_partitioned_rent_collection(should_run_partitioned_rent_collection: bool // Run partitioned rent collection. If enabled, partitioned rent collection // will update the rent epoch for any rent exempt accounts whose rent epoch // is not already set to RENT_EXEMPT_RENT_EPOCH. - bank.collect_rent_eagerly(); + bank.run_partitioned_rent_exempt_status_updates(); let updated_account = bank.get_account(&account_pubkey).unwrap(); if should_run_partitioned_rent_collection { assert_eq!(updated_account.rent_epoch(), RENT_EXEMPT_RENT_EPOCH); @@ -11975,62 +11069,6 @@ fn test_partitioned_rent_collection(should_run_partitioned_rent_collection: bool } } -/// Ensure that accounts data size is updated correctly by rent collection -#[test_case(true; "enable rent fees collection")] -#[test_case(false; "disable rent fees collection")] -fn test_accounts_data_size_and_rent_collection(should_collect_rent: bool) { - let GenesisConfigInfo { - mut genesis_config, .. - } = genesis_utils::create_genesis_config(100 * LAMPORTS_PER_SOL); - genesis_config.rent = Rent::default(); - if should_collect_rent { - genesis_config - .accounts - .remove(&agave_feature_set::disable_rent_fees_collection::id()); - genesis_config - .accounts - .remove(&agave_feature_set::disable_partitioned_rent_collection::id()); - } - - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); - - let slot = bank.slot() + bank.slot_count_per_normal_epoch(); - let bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::default(), slot)); - - // make another bank so that any reclaimed accounts from the previous bank do not impact - // this test - let slot = bank.slot() + bank.slot_count_per_normal_epoch(); - let bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::default(), slot)); - - // Store an account into the bank that is rent-paying and has data - let data_size = 123; - let mut account = AccountSharedData::new(1, data_size, &Pubkey::default()); - let keypair = Keypair::new(); - bank.store_account(&keypair.pubkey(), &account); - - // Ensure if we collect rent from the account that it will be reclaimed - { - let info = bank - .rent_collector - .collect_from_existing_account(&keypair.pubkey(), &mut account); - assert_eq!(info.account_data_len_reclaimed, data_size as u64); - } - - // Collect rent for real - assert_eq!(should_collect_rent, bank.should_collect_rent()); - let accounts_data_size_delta_before_collecting_rent = bank.load_accounts_data_size_delta(); - bank.collect_rent_eagerly(); - let accounts_data_size_delta_after_collecting_rent = bank.load_accounts_data_size_delta(); - - let accounts_data_size_delta_delta = accounts_data_size_delta_after_collecting_rent - - accounts_data_size_delta_before_collecting_rent; - assert!(!should_collect_rent || accounts_data_size_delta_delta < 0); - let reclaimed_data_size = accounts_data_size_delta_delta.saturating_neg() as usize; - - // Ensure the account is reclaimed by rent collection - assert!(!should_collect_rent || reclaimed_data_size == data_size); -} - #[test] fn test_accounts_data_size_with_default_bank() { let bank = Bank::default_for_tests(); @@ -13038,7 +12076,7 @@ fn test_rebuild_skipped_rewrites() { // This fn is called within freeze(), but freeze() *consumes* Self::skipped_rewrites! // For testing, we want to know what's in the skipped rewrites, so we perform // rent collection manually. - bank.collect_rent_eagerly(); + bank.run_partitioned_rent_exempt_status_updates(); let actual_skipped_rewrites = bank.skipped_rewrites.lock().unwrap().clone(); // Ensure skipped rewrites now includes the account we stored above assert!(actual_skipped_rewrites.contains_key(&pubkey)); diff --git a/runtime/src/rent_collector.rs b/runtime/src/rent_collector.rs index 31da6303137cb2..7392d49df430e7 100644 --- a/runtime/src/rent_collector.rs +++ b/runtime/src/rent_collector.rs @@ -14,7 +14,7 @@ use { solana_clock::Epoch, solana_pubkey::Pubkey, solana_rent::{Rent, RentDue}, - solana_rent_collector::{CollectedInfo, RentCollector}, + solana_rent_collector::RentCollector, solana_svm_rent_collector::{rent_state::RentState, svm_rent_collector::SVMRentCollector}, solana_transaction_context::IndexOfAccount, solana_transaction_error::{TransactionError, TransactionResult as Result}, @@ -35,10 +35,6 @@ impl RentCollectorWithMetrics { } impl SVMRentCollector for RentCollectorWithMetrics { - fn collect_rent(&self, address: &Pubkey, account: &mut AccountSharedData) -> CollectedInfo { - self.0.collect_rent(address, account) - } - fn get_rent(&self) -> &Rent { self.0.get_rent() } diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index 68efcccdee2fa1..fa3ac5cf5858a6 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -151,7 +151,7 @@ struct DeserializableVersionedBank { collector_fees: u64, _fee_calculator: FeeCalculator, fee_rate_governor: FeeRateGovernor, - collected_rent: u64, + _collected_rent: u64, rent_collector: RentCollector, epoch_schedule: EpochSchedule, inflation: Inflation, @@ -188,7 +188,6 @@ impl From for BankFieldsToDeserialize { collector_id: dvb.collector_id, collector_fees: dvb.collector_fees, fee_rate_governor: dvb.fee_rate_governor, - collected_rent: dvb.collected_rent, rent_collector: dvb.rent_collector, epoch_schedule: dvb.epoch_schedule, inflation: dvb.inflation, @@ -269,7 +268,7 @@ impl From for SerializableVersionedBank { collector_fees: rhs.collector_fees, fee_calculator: FeeCalculator::default(), fee_rate_governor: rhs.fee_rate_governor, - collected_rent: rhs.collected_rent, + collected_rent: u64::default(), rent_collector: rhs.rent_collector, epoch_schedule: rhs.epoch_schedule, inflation: rhs.inflation, diff --git a/svm-feature-set/src/lib.rs b/svm-feature-set/src/lib.rs index 1fa0109ccb1e6d..4f2cc899a980db 100644 --- a/svm-feature-set/src/lib.rs +++ b/svm-feature-set/src/lib.rs @@ -34,7 +34,6 @@ pub struct SVMFeatureSet { pub fix_alt_bn128_multiplication_input_length: bool, pub loosen_cpi_size_restriction: bool, pub increase_tx_account_lock_limit: bool, - pub disable_rent_fees_collection: bool, pub enable_extend_program_checked: bool, pub formalize_loaded_transaction_data_size: bool, pub disable_zk_elgamal_proof_program: bool, @@ -78,7 +77,6 @@ impl SVMFeatureSet { fix_alt_bn128_multiplication_input_length: true, loosen_cpi_size_restriction: true, increase_tx_account_lock_limit: true, - disable_rent_fees_collection: true, enable_extend_program_checked: true, formalize_loaded_transaction_data_size: true, disable_zk_elgamal_proof_program: true, diff --git a/svm-rent-collector/src/svm_rent_collector.rs b/svm-rent-collector/src/svm_rent_collector.rs index bba32248bac907..867bf9d69dc759 100644 --- a/svm-rent-collector/src/svm_rent_collector.rs +++ b/svm-rent-collector/src/svm_rent_collector.rs @@ -6,7 +6,6 @@ use { solana_clock::Epoch, solana_pubkey::Pubkey, solana_rent::{Rent, RentDue}, - solana_rent_collector::CollectedInfo, solana_transaction_context::{IndexOfAccount, TransactionContext}, solana_transaction_error::{TransactionError, TransactionResult}, }; @@ -74,9 +73,6 @@ pub trait SVMRentCollector { } } - /// Collect rent from an account. - fn collect_rent(&self, address: &Pubkey, account: &mut AccountSharedData) -> CollectedInfo; - /// Determine the rent state of an account. /// /// This method has a default implementation that treats accounts with zero diff --git a/svm-rent-collector/src/svm_rent_collector/rent_collector.rs b/svm-rent-collector/src/svm_rent_collector/rent_collector.rs index f308ef11fd64e1..c4beb67a49cc6e 100644 --- a/svm-rent-collector/src/svm_rent_collector/rent_collector.rs +++ b/svm-rent-collector/src/svm_rent_collector/rent_collector.rs @@ -3,18 +3,12 @@ use { crate::svm_rent_collector::SVMRentCollector, - solana_account::AccountSharedData, solana_clock::Epoch, - solana_pubkey::Pubkey, solana_rent::{Rent, RentDue}, - solana_rent_collector::{CollectedInfo, RentCollector}, + solana_rent_collector::RentCollector, }; impl SVMRentCollector for RentCollector { - fn collect_rent(&self, address: &Pubkey, account: &mut AccountSharedData) -> CollectedInfo { - self.collect_from_existing_account(address, account) - } - fn get_rent(&self) -> &Rent { &self.rent } @@ -29,7 +23,7 @@ mod tests { use { super::*, crate::rent_state::RentState, - solana_account::ReadableAccount, + solana_account::{AccountSharedData, ReadableAccount}, solana_clock::Epoch, solana_epoch_schedule::EpochSchedule, solana_pubkey::Pubkey, diff --git a/svm/Cargo.toml b/svm/Cargo.toml index a52e5d9ac39a7d..99a9c9031c10a6 100644 --- a/svm/Cargo.toml +++ b/svm/Cargo.toml @@ -65,7 +65,6 @@ solana-program-runtime = { workspace = true, features = ["metrics"] } solana-pubkey = { workspace = true } solana-rent = { workspace = true } solana-rent-collector = { workspace = true } -solana-rent-debits = { workspace = true } solana-sdk-ids = { workspace = true } solana-slot-hashes = { workspace = true } solana-svm-callback = { workspace = true } diff --git a/svm/examples/Cargo.lock b/svm/examples/Cargo.lock index 0863ca58c9cd9b..17691ef9bee661 100644 --- a/svm/examples/Cargo.lock +++ b/svm/examples/Cargo.lock @@ -7370,16 +7370,6 @@ dependencies = [ "solana-sdk-ids", ] -[[package]] -name = "solana-rent-debits" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f6f9113c6003492e74438d1288e30cffa8ccfdc2ef7b49b9e816d8034da18cd" -dependencies = [ - "solana-pubkey", - "solana-reward-info", -] - [[package]] name = "solana-reward-info" version = "2.2.1" @@ -7660,7 +7650,6 @@ dependencies = [ "solana-rayon-threadlimit", "solana-rent", "solana-rent-collector", - "solana-rent-debits", "solana-reward-info", "solana-runtime-transaction", "solana-sdk-ids", @@ -8166,7 +8155,6 @@ dependencies = [ "solana-pubkey", "solana-rent", "solana-rent-collector", - "solana-rent-debits", "solana-sdk-ids", "solana-slot-hashes", "solana-svm-callback", diff --git a/svm/src/account_loader.rs b/svm/src/account_loader.rs index 8608a8d4dad311..804f384f0d0ee7 100644 --- a/svm/src/account_loader.rs +++ b/svm/src/account_loader.rs @@ -22,8 +22,7 @@ use { }, solana_pubkey::Pubkey, solana_rent::RentDue, - solana_rent_collector::{CollectedInfo, RENT_EXEMPT_RENT_EPOCH}, - solana_rent_debits::RentDebits, + solana_rent_collector::RENT_EXEMPT_RENT_EPOCH, solana_sdk_ids::{ bpf_loader_upgradeable, native_loader, sysvar::{self, slot_history}, @@ -47,7 +46,6 @@ pub(crate) const TRANSACTION_ACCOUNT_BASE_SIZE: usize = 64; const ADDRESS_LOOKUP_TABLE_BASE_SIZE: usize = 8248; // for the load instructions -pub(crate) type TransactionRent = u64; pub(crate) type TransactionProgramIndices = Vec>; pub type TransactionCheckResult = Result; type TransactionValidationResult = Result; @@ -127,7 +125,6 @@ impl Default for ValidatedTransactionDetails { pub(crate) struct LoadedTransactionAccount { pub(crate) account: AccountSharedData, pub(crate) loaded_size: usize, - pub(crate) rent_collected: u64, } #[derive(PartialEq, Eq, Debug, Clone)] @@ -142,8 +139,6 @@ pub struct LoadedTransaction { pub fee_details: FeeDetails, pub rollback_accounts: RollbackAccounts, pub(crate) compute_budget: SVMTransactionExecutionBudget, - pub rent: TransactionRent, - pub rent_debits: RentDebits, pub loaded_accounts_data_size: u32, } @@ -223,7 +218,6 @@ impl<'a, CB: TransactionProcessingCallback> AccountLoader<'a, CB> { account.map(|account| LoadedTransactionAccount { loaded_size: base_account_size.saturating_add(account.data().len()), account, - rent_collected: 0, }) } @@ -355,33 +349,24 @@ impl solana_svm_callback::InvokeContextCallba { } -/// Collect rent from an account if rent is still enabled and regardless of -/// whether rent is enabled, set the rent epoch to u64::MAX if the account is -/// rent exempt. -pub fn collect_rent_from_account( - feature_set: &SVMFeatureSet, +/// Set the rent epoch to u64::MAX if the account is rent exempt. +pub fn update_rent_exempt_status_for_account( rent_collector: &dyn SVMRentCollector, - address: &Pubkey, account: &mut AccountSharedData, -) -> CollectedInfo { - if !feature_set.disable_rent_fees_collection { - rent_collector.collect_rent(address, account) - } else { - // When rent fee collection is disabled, we won't collect rent for any account. If there - // are any rent paying accounts, their `rent_epoch` won't change either. However, if the - // account itself is rent-exempted but its `rent_epoch` is not u64::MAX, we will set its - // `rent_epoch` to u64::MAX. In such case, the behavior stays the same as before. - if account.rent_epoch() != RENT_EXEMPT_RENT_EPOCH - && rent_collector.get_rent_due( - account.lamports(), - account.data().len(), - account.rent_epoch(), - ) == RentDue::Exempt - { - account.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); - } - - CollectedInfo::default() +) { + // Now that rent fee collection is disabled, we won't collect rent for any + // account. If there are any rent paying accounts, their `rent_epoch` won't + // change either. However, if the account itself is rent-exempted but its + // `rent_epoch` is not u64::MAX, we will set its `rent_epoch` to u64::MAX. + // In such case, the behavior stays the same as before. + if account.rent_epoch() != RENT_EXEMPT_RENT_EPOCH + && rent_collector.get_rent_due( + account.lamports(), + account.data().len(), + account.rent_epoch(), + ) == RentDue::Exempt + { + account.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); } } @@ -465,8 +450,6 @@ pub(crate) fn load_transaction( accounts: loaded_tx_accounts.accounts, program_indices: loaded_tx_accounts.program_indices, fee_details: tx_details.fee_details, - rent: loaded_tx_accounts.rent, - rent_debits: loaded_tx_accounts.rent_debits, rollback_accounts: tx_details.rollback_accounts, compute_budget: tx_details.compute_budget, loaded_accounts_data_size: loaded_tx_accounts.loaded_accounts_data_size, @@ -485,8 +468,6 @@ pub(crate) fn load_transaction( struct LoadedTransactionAccounts { pub(crate) accounts: Vec, pub(crate) program_indices: TransactionProgramIndices, - pub(crate) rent: TransactionRent, - pub(crate) rent_debits: RentDebits, pub(crate) loaded_accounts_data_size: u32, } @@ -561,8 +542,6 @@ fn load_transaction_accounts_simd186( let mut loaded_transaction_accounts = LoadedTransactionAccounts { accounts: Vec::with_capacity(account_keys.len()), program_indices: Vec::with_capacity(message.num_instructions()), - rent: 0, - rent_debits: RentDebits::default(), loaded_accounts_data_size: 0, }; @@ -576,11 +555,10 @@ fn load_transaction_accounts_simd186( )?; let mut collect_loaded_account = - |account_loader: &mut AccountLoader, key, loaded_account| -> Result<()> { + |account_loader: &mut AccountLoader, key: &Pubkey, loaded_account| -> Result<()> { let LoadedTransactionAccount { account, loaded_size, - rent_collected, } = loaded_account; loaded_transaction_accounts.increase_calculated_data_size( @@ -589,14 +567,6 @@ fn load_transaction_accounts_simd186( error_metrics, )?; - loaded_transaction_accounts.rent = loaded_transaction_accounts - .rent - .saturating_add(rent_collected); - - loaded_transaction_accounts - .rent_debits - .insert(key, rent_collected, account.lamports()); - // This has been annotated branch-by-branch because collapsing the logic is infeasible. // Its purpose is to ensure programdata accounts are counted once and *only* once per // transaction. By checking account_keys, we never double-count a programdata account @@ -696,18 +666,15 @@ fn load_transaction_accounts_old( error_metrics: &mut TransactionErrorMetrics, rent_collector: &dyn SVMRentCollector, ) -> Result { - let mut tx_rent: TransactionRent = 0; let account_keys = message.account_keys(); let mut accounts = Vec::with_capacity(account_keys.len()); let mut validated_loaders = AHashSet::with_capacity(PROGRAM_OWNERS.len()); - let mut rent_debits = RentDebits::default(); let mut accumulated_accounts_data_size: Saturating = Saturating(0); - let mut collect_loaded_account = |key, loaded_account| -> Result<()> { + let mut collect_loaded_account = |key: &Pubkey, loaded_account| -> Result<()> { let LoadedTransactionAccount { account, loaded_size, - rent_collected, } = loaded_account; accumulate_and_check_loaded_account_data_size( @@ -717,9 +684,6 @@ fn load_transaction_accounts_old( error_metrics, )?; - tx_rent += rent_collected; - rent_debits.insert(key, rent_collected, account.lamports()); - accounts.push((*key, account)); Ok(()) }; @@ -800,8 +764,6 @@ fn load_transaction_accounts_old( Ok(LoadedTransactionAccounts { accounts, program_indices, - rent: tx_rent, - rent_debits, loaded_accounts_data_size: accumulated_accounts_data_size.0, }) } @@ -820,23 +782,13 @@ fn load_transaction_account( LoadedTransactionAccount { loaded_size: 0, account: construct_instructions_account(message), - rent_collected: 0, } } else if let Some(mut loaded_account) = account_loader.load_transaction_account(account_key, is_writable) { - loaded_account.rent_collected = if is_writable { - collect_rent_from_account( - account_loader.feature_set, - rent_collector, - account_key, - &mut loaded_account.account, - ) - .rent_amount - } else { - 0 - }; - + if is_writable { + update_rent_exempt_status_for_account(rent_collector, &mut loaded_account.account); + } loaded_account } else { let mut default_account = AccountSharedData::default(); @@ -847,7 +799,6 @@ fn load_transaction_account( LoadedTransactionAccount { loaded_size: default_account.data().len(), account: default_account, - rent_collected: 0, } }; @@ -934,7 +885,6 @@ mod tests { solana_pubkey::Pubkey, solana_rent::Rent, solana_rent_collector::{RentCollector, RENT_EXEMPT_RENT_EPOCH}, - solana_rent_debits::RentDebits, solana_sdk_ids::{ bpf_loader, bpf_loader_upgradeable, native_loader, system_program, sysvar, }, @@ -1013,9 +963,8 @@ mod tests { accounts: &[TransactionAccount], rent_collector: &RentCollector, error_metrics: &mut TransactionErrorMetrics, - mut feature_set: SVMFeatureSet, + feature_set: SVMFeatureSet, ) -> TransactionLoadResult { - feature_set.disable_rent_fees_collection = false; let sanitized_tx = SanitizedTransaction::from_transaction_for_tests(tx); let fee_payer_account = accounts[0].1.clone(); let mut accounts_map = HashMap::new(); @@ -1636,7 +1585,6 @@ mod tests { .accounts_map .insert(fee_payer_address, fee_payer_account.clone()); let mut account_loader = (&mock_bank).into(); - let fee_payer_rent_debit = 42; let mut error_metrics = TransactionErrorMetrics::default(); @@ -1651,25 +1599,16 @@ mod tests { LoadedTransactionAccount { loaded_size: fee_payer_account.data().len(), account: fee_payer_account.clone(), - rent_collected: fee_payer_rent_debit, }, MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES, &mut error_metrics, &RentCollector::default(), ); - - let expected_rent_debits = { - let mut rent_debits = RentDebits::default(); - rent_debits.insert(&fee_payer_address, fee_payer_rent_debit, fee_payer_balance); - rent_debits - }; assert_eq!( result.unwrap(), LoadedTransactionAccounts { accounts: vec![(fee_payer_address, fee_payer_account)], program_indices: vec![], - rent: fee_payer_rent_debit, - rent_debits: expected_rent_debits, loaded_accounts_data_size: 0, } ); @@ -1724,7 +1663,6 @@ mod tests { LoadedTransactionAccount { account: fee_payer_account.clone(), loaded_size: base_account_size, - ..LoadedTransactionAccount::default() }, MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES, &mut error_metrics, @@ -1749,8 +1687,6 @@ mod tests { ) ], program_indices: vec![vec![]], - rent: 0, - rent_debits: RentDebits::default(), loaded_accounts_data_size, } ); @@ -1900,7 +1836,6 @@ mod tests { LoadedTransactionAccount { account: fee_payer_account.clone(), loaded_size: base_account_size, - ..LoadedTransactionAccount::default() }, MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES, &mut error_metrics, @@ -1920,8 +1855,6 @@ mod tests { ), ], program_indices: vec![vec![1]], - rent: 0, - rent_debits: RentDebits::default(), loaded_accounts_data_size, } ); @@ -2092,7 +2025,6 @@ mod tests { LoadedTransactionAccount { account: fee_payer_account.clone(), loaded_size: base_account_size, - ..LoadedTransactionAccount::default() }, MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES, &mut error_metrics, @@ -2112,8 +2044,6 @@ mod tests { ), ], program_indices: vec![vec![1]], - rent: 0, - rent_debits: RentDebits::default(), loaded_accounts_data_size, } ); @@ -2191,7 +2121,6 @@ mod tests { LoadedTransactionAccount { account: fee_payer_account.clone(), loaded_size: base_account_size, - ..LoadedTransactionAccount::default() }, MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES, &mut error_metrics, @@ -2214,8 +2143,6 @@ mod tests { (key3.pubkey(), account_data), ], program_indices: vec![vec![1], vec![1]], - rent: 0, - rent_debits: RentDebits::default(), loaded_accounts_data_size, } ); @@ -2354,7 +2281,6 @@ mod tests { loaded_fee_payer_account: LoadedTransactionAccount { account: fee_payer_account, loaded_size: base_account_size, - ..LoadedTransactionAccount::default() }, ..ValidatedTransactionDetails::default() }); @@ -2393,8 +2319,6 @@ mod tests { fee_details: FeeDetails::default(), rollback_accounts: RollbackAccounts::default(), compute_budget: SVMTransactionExecutionBudget::default(), - rent: 0, - rent_debits: RentDebits::default(), loaded_accounts_data_size, } ); @@ -2458,76 +2382,39 @@ mod tests { } #[test] - fn test_collect_rent_from_account() { - let feature_set = SVMFeatureSet::all_enabled(); + fn test_update_rent_exempt_status_for_account() { let rent_collector = RentCollector { epoch: 1, ..RentCollector::default() }; - let address = Pubkey::new_unique(); let min_exempt_balance = rent_collector.rent.minimum_balance(0); let mut account = AccountSharedData::from(Account { lamports: min_exempt_balance, ..Account::default() }); - assert_eq!( - collect_rent_from_account(&feature_set, &rent_collector, &address, &mut account), - CollectedInfo::default() - ); + update_rent_exempt_status_for_account(&rent_collector, &mut account); assert_eq!(account.rent_epoch(), RENT_EXEMPT_RENT_EPOCH); } #[test] - fn test_collect_rent_from_account_rent_paying() { - let feature_set = SVMFeatureSet::all_enabled(); + fn test_update_rent_exempt_status_for_rent_paying_account() { let rent_collector = RentCollector { epoch: 1, ..RentCollector::default() }; - let address = Pubkey::new_unique(); let mut account = AccountSharedData::from(Account { lamports: 1, ..Account::default() }); - assert_eq!( - collect_rent_from_account(&feature_set, &rent_collector, &address, &mut account), - CollectedInfo::default() - ); + update_rent_exempt_status_for_account(&rent_collector, &mut account); assert_eq!(account.rent_epoch(), 0); assert_eq!(account.lamports(), 1); } - #[test] - fn test_collect_rent_from_account_rent_enabled() { - let mut feature_set = SVMFeatureSet::all_enabled(); - feature_set.disable_rent_fees_collection = false; - let rent_collector = RentCollector { - epoch: 1, - ..RentCollector::default() - }; - - let address = Pubkey::new_unique(); - let mut account = AccountSharedData::from(Account { - lamports: 1, - data: vec![0], - ..Account::default() - }); - - assert_eq!( - collect_rent_from_account(&feature_set, &rent_collector, &address, &mut account), - CollectedInfo { - rent_amount: 1, - account_data_len_reclaimed: 1 - } - ); - assert_eq!(account.rent_epoch(), 0); - assert_eq!(account.lamports(), 0); - } - // Ensure `TransactionProcessingCallback::inspect_account()` is called when // loading accounts for transaction processing. #[test] @@ -2718,7 +2605,6 @@ mod tests { LoadedTransactionAccount { account: fee_payer_account.clone(), loaded_size: fee_payer_size as usize, - rent_collected: 0, }, MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES, &mut TransactionErrorMetrics::default(), @@ -3160,7 +3046,6 @@ mod tests { LoadedTransactionAccount { loaded_size: TRANSACTION_ACCOUNT_BASE_SIZE + fee_payer_account.data().len(), account: fee_payer_account, - rent_collected: 0, }, MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES, &mut TransactionErrorMetrics::default(), diff --git a/svm/src/rollback_accounts.rs b/svm/src/rollback_accounts.rs index e25f507499463d..850d0beb1c997d 100644 --- a/svm/src/rollback_accounts.rs +++ b/svm/src/rollback_accounts.rs @@ -35,18 +35,8 @@ impl RollbackAccounts { nonce: Option, fee_payer_address: Pubkey, mut fee_payer_account: AccountSharedData, - fee_payer_rent_debit: u64, fee_payer_loaded_rent_epoch: Epoch, ) -> Self { - // When the fee payer account is rolled back due to transaction failure, - // rent should not be charged so credit the previously debited rent - // amount. - fee_payer_account.set_lamports( - fee_payer_account - .lamports() - .saturating_add(fee_payer_rent_debit), - ); - if let Some(nonce) = nonce { if &fee_payer_address == nonce.address() { // `nonce` contains an AccountSharedData which has already been advanced to the current DurableNonce @@ -120,10 +110,9 @@ mod tests { let fee_payer_account = AccountSharedData::new(100, 0, &Pubkey::default()); let fee_payer_rent_epoch = fee_payer_account.rent_epoch(); - const TEST_RENT_DEBIT: u64 = 1; - let rent_collected_fee_payer_account = { + let rent_epoch_updated_fee_payer_account = { let mut account = fee_payer_account.clone(); - account.set_lamports(fee_payer_account.lamports() - TEST_RENT_DEBIT); + account.set_lamports(fee_payer_account.lamports()); account.set_rent_epoch(fee_payer_rent_epoch + 1); account }; @@ -131,8 +120,7 @@ mod tests { let rollback_accounts = RollbackAccounts::new( None, fee_payer_address, - rent_collected_fee_payer_account, - TEST_RENT_DEBIT, + rent_epoch_updated_fee_payer_account, fee_payer_rent_epoch, ); @@ -161,19 +149,17 @@ mod tests { ) .unwrap(); - const TEST_RENT_DEBIT: u64 = 1; - let rent_collected_nonce_account = { + let rent_epoch_updated_fee_payer_account = { let mut account = nonce_account.clone(); - account.set_lamports(nonce_account.lamports() - TEST_RENT_DEBIT); + account.set_lamports(nonce_account.lamports()); account }; - let nonce = NonceInfo::new(nonce_address, rent_collected_nonce_account.clone()); + let nonce = NonceInfo::new(nonce_address, rent_epoch_updated_fee_payer_account.clone()); let rollback_accounts = RollbackAccounts::new( Some(nonce), nonce_address, - rent_collected_nonce_account, - TEST_RENT_DEBIT, + rent_epoch_updated_fee_payer_account, u64::MAX, // ignored ); @@ -205,10 +191,9 @@ mod tests { let fee_payer_address = Pubkey::new_unique(); let fee_payer_account = AccountSharedData::new(44, 0, &Pubkey::default()); - const TEST_RENT_DEBIT: u64 = 1; - let rent_collected_fee_payer_account = { + let rent_epoch_updated_fee_payer_account = { let mut account = fee_payer_account.clone(); - account.set_lamports(fee_payer_account.lamports() - TEST_RENT_DEBIT); + account.set_lamports(fee_payer_account.lamports()); account }; @@ -216,8 +201,7 @@ mod tests { let rollback_accounts = RollbackAccounts::new( Some(nonce), fee_payer_address, - rent_collected_fee_payer_account.clone(), - TEST_RENT_DEBIT, + rent_epoch_updated_fee_payer_account.clone(), u64::MAX, // ignored ); diff --git a/svm/src/transaction_commit_result.rs b/svm/src/transaction_commit_result.rs index 671d2052e7427c..cf7dee4334017b 100644 --- a/svm/src/transaction_commit_result.rs +++ b/svm/src/transaction_commit_result.rs @@ -1,8 +1,7 @@ use { crate::transaction_execution_result::TransactionLoadedAccountsStats, solana_fee_structure::FeeDetails, solana_message::inner_instruction::InnerInstructionsList, - solana_rent_debits::RentDebits, solana_transaction_context::TransactionReturnData, - solana_transaction_error::TransactionResult, + solana_transaction_context::TransactionReturnData, solana_transaction_error::TransactionResult, }; pub type TransactionCommitResult = TransactionResult; @@ -16,7 +15,6 @@ pub struct CommittedTransaction { pub return_data: Option, pub executed_units: u64, pub fee_details: FeeDetails, - pub rent_debits: RentDebits, pub loaded_account_stats: TransactionLoadedAccountsStats, } diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index c5daeae3f681bf..4257f9c76e9799 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -3,8 +3,8 @@ use qualifier_attr::{field_qualifiers, qualifiers}; use { crate::{ account_loader::{ - collect_rent_from_account, load_transaction, validate_fee_payer, AccountLoader, - CheckedTransactionDetails, LoadedTransaction, TransactionCheckResult, + load_transaction, update_rent_exempt_status_for_account, validate_fee_payer, + AccountLoader, CheckedTransactionDetails, LoadedTransaction, TransactionCheckResult, TransactionLoadResult, ValidatedTransactionDetails, }, account_overrides::AccountOverrides, @@ -582,13 +582,7 @@ impl TransactionBatchProcessor { }; let fee_payer_loaded_rent_epoch = loaded_fee_payer.account.rent_epoch(); - loaded_fee_payer.rent_collected = collect_rent_from_account( - account_loader.feature_set, - rent_collector, - fee_payer_address, - &mut loaded_fee_payer.account, - ) - .rent_amount; + update_rent_exempt_status_for_account(rent_collector, &mut loaded_fee_payer.account); let fee_payer_index = 0; validate_fee_payer( @@ -606,7 +600,6 @@ impl TransactionBatchProcessor { nonce, *fee_payer_address, loaded_fee_payer.account.clone(), - loaded_fee_payer.rent_collected, fee_payer_loaded_rent_epoch, ); @@ -1123,7 +1116,6 @@ mod tests { }, solana_rent::Rent, solana_rent_collector::{RentCollector, RENT_EXEMPT_RENT_EPOCH}, - solana_rent_debits::RentDebits, solana_sdk_ids::{bpf_loader, system_program, sysvar}, solana_signature::Signature, solana_svm_callback::{AccountState, InvokeContextCallback}, @@ -1370,8 +1362,6 @@ mod tests { fee_details: FeeDetails::default(), rollback_accounts: RollbackAccounts::default(), compute_budget: SVMTransactionExecutionBudget::default(), - rent: 0, - rent_debits: RentDebits::default(), loaded_accounts_data_size: 32, }; @@ -1467,8 +1457,6 @@ mod tests { fee_details: FeeDetails::default(), rollback_accounts: RollbackAccounts::default(), compute_budget: SVMTransactionExecutionBudget::default(), - rent: 0, - rent_debits: RentDebits::default(), loaded_accounts_data_size: 0, }; @@ -2080,7 +2068,6 @@ mod tests { ); let fee_payer_rent_epoch = current_epoch; - let fee_payer_rent_debit = 0; let fee_payer_account = AccountSharedData::new_rent_epoch( starting_balance, 0, @@ -2136,7 +2123,6 @@ mod tests { None, // nonce *fee_payer_address, post_validation_fee_payer_account.clone(), - fee_payer_rent_debit, fee_payer_rent_epoch ), compute_budget: compute_budget_and_limits.budget, @@ -2146,7 +2132,6 @@ mod tests { loaded_fee_payer_account: LoadedTransactionAccount { loaded_size: base_account_size + fee_payer_account.data().len(), account: post_validation_fee_payer_account, - rent_collected: fee_payer_rent_debit, }, }) ); @@ -2170,14 +2155,6 @@ mod tests { let transaction_fee = lamports_per_signature; let starting_balance = min_balance - 1; let fee_payer_account = AccountSharedData::new(starting_balance, 0, &Pubkey::default()); - let fee_payer_rent_debit = rent_collector - .get_rent_due( - fee_payer_account.lamports(), - fee_payer_account.data().len(), - fee_payer_account.rent_epoch(), - ) - .lamports(); - assert!(fee_payer_rent_debit > 0); let mut mock_accounts = HashMap::new(); mock_accounts.insert(*fee_payer_address, fee_payer_account.clone()); @@ -2187,7 +2164,6 @@ mod tests { }; mock_bank.feature_set.formalize_loaded_transaction_data_size = formalize_loaded_transaction_data_size; - mock_bank.feature_set.disable_rent_fees_collection = false; let mut account_loader = (&mock_bank).into(); let mut error_counters = TransactionErrorMetrics::default(); @@ -2206,8 +2182,7 @@ mod tests { let post_validation_fee_payer_account = { let mut account = fee_payer_account.clone(); - account.set_rent_epoch(1); - account.set_lamports(starting_balance - transaction_fee - fee_payer_rent_debit); + account.set_lamports(starting_balance - transaction_fee); account }; @@ -2224,7 +2199,6 @@ mod tests { None, // nonce *fee_payer_address, post_validation_fee_payer_account.clone(), - fee_payer_rent_debit, 0, // rent epoch ), compute_budget: compute_budget_and_limits.budget, @@ -2234,7 +2208,6 @@ mod tests { loaded_fee_payer_account: LoadedTransactionAccount { loaded_size: base_account_size + fee_payer_account.data().len(), account: post_validation_fee_payer_account, - rent_collected: fee_payer_rent_debit, } }) ); @@ -2507,7 +2480,6 @@ mod tests { Some(future_nonce), *fee_payer_address, post_validation_fee_payer_account.clone(), - 0, // fee_payer_rent_debit 0, // fee_payer_rent_epoch ), compute_budget: compute_budget_and_limits.budget, @@ -2517,7 +2489,6 @@ mod tests { loaded_fee_payer_account: LoadedTransactionAccount { loaded_size: base_account_size + fee_payer_account.data().len(), account: post_validation_fee_payer_account, - rent_collected: 0, } }) ); From 19d0027463ea8c574d36a9d7921844caeb094a5b Mon Sep 17 00:00:00 2001 From: Kamil Skalski Date: Fri, 20 Jun 2025 17:27:18 +0200 Subject: [PATCH 048/124] Untar snapshot using io_uring readahead reader and unpacking on worker threads (#6535) * Impl untar chunking with io_uring readahead file * Address PR comments * Address PR comments * Unwrap directly in test util for better stacktrace * Test setting ulimit -l on CI * Try disabling seccomp for docker * Try setting docker ulimit flag * Also set hard limit * Update comment * Set ulimit in run again * Try with -1 * Address PR comments * Asset entry is not sparse file * Don't replace buf from original op * More PR comments addressed * Use NonNull for PageAlignedMemory. * Join handles from streaming_unarchive_snapshot * Make storages rebuilder fail more gracefully when file_receiver gets disconnected. --- Cargo.lock | 2 + accounts-db/Cargo.toml | 2 + accounts-db/src/buffered_reader.rs | 33 +- accounts-db/src/hardened_unpack.rs | 286 +++++- accounts-db/src/io_uring/memory.rs | 168 ++++ accounts-db/src/io_uring/mod.rs | 2 + .../src/io_uring/sequential_file_reader.rs | 507 ++++++++++ accounts-db/src/lib.rs | 2 +- accounts-db/src/shared_buffer_reader.rs | 913 ------------------ ci/docker-run.sh | 5 + programs/sbf/Cargo.lock | 26 +- runtime/src/snapshot_utils.rs | 258 +++-- runtime/src/snapshot_utils/archive_format.rs | 27 + svm/examples/Cargo.lock | 16 +- 14 files changed, 1128 insertions(+), 1119 deletions(-) create mode 100644 accounts-db/src/io_uring/memory.rs create mode 100644 accounts-db/src/io_uring/sequential_file_reader.rs delete mode 100644 accounts-db/src/shared_buffer_reader.rs diff --git a/Cargo.lock b/Cargo.lock index f75d959d29bcf4..6e3c5707df204e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6733,6 +6733,7 @@ dependencies = [ "indexmap 2.9.0", "io-uring", "itertools 0.12.1", + "libc", "libsecp256k1", "log", "lz4", @@ -6771,6 +6772,7 @@ dependencies = [ "solana-message", "solana-metrics", "solana-nohash-hasher", + "solana-perf", "solana-pubkey", "solana-rayon-threadlimit", "solana-rent", diff --git a/accounts-db/Cargo.toml b/accounts-db/Cargo.toml index a202c0233594de..28f590fc8df429 100644 --- a/accounts-db/Cargo.toml +++ b/accounts-db/Cargo.toml @@ -47,6 +47,7 @@ crossbeam-channel = { workspace = true } dashmap = { workspace = true, features = ["rayon", "raw-api"] } indexmap = { workspace = true } itertools = { workspace = true } +libc = { workspace = true } log = { workspace = true } lz4 = { workspace = true } memmap2 = { workspace = true } @@ -84,6 +85,7 @@ solana-measure = { workspace = true } solana-message = { workspace = true } solana-metrics = { workspace = true } solana-nohash-hasher = { workspace = true } +solana-perf = { workspace = true } solana-pubkey = { workspace = true } solana-rayon-threadlimit = { workspace = true } solana-rent = { workspace = true, optional = true } diff --git a/accounts-db/src/buffered_reader.rs b/accounts-db/src/buffered_reader.rs index 341593ef040b92..f0c36e00e10d35 100644 --- a/accounts-db/src/buffered_reader.rs +++ b/accounts-db/src/buffered_reader.rs @@ -10,7 +10,14 @@ //! be returned. use { crate::{append_vec::ValidSlice, file_io::read_more_buffer}, - std::{fs::File, mem::MaybeUninit, ops::Range, slice}, + std::{ + fs::File, + io::{BufRead, BufReader, Result as IoResult}, + mem::MaybeUninit, + ops::Range, + path::Path, + slice, + }, }; /// A trait that abstracts over the backing storage of the buffer. @@ -121,7 +128,7 @@ where T: Backing, { /// read to make sure we have the minimum amount of data - pub fn read(&mut self) -> std::io::Result { + pub fn read(&mut self) -> IoResult { let must_read = self .read_requirements .unwrap_or(self.default_min_read_requirement); @@ -179,6 +186,28 @@ impl<'a, const N: usize> BufferedReader<'a, Stack> { } } +/// Open file at `path` with buffering reader using `buf_size` memory and doing +/// read-ahead IO reads (if `io_uring` is supported by the host) +pub fn large_file_buf_reader( + path: impl AsRef, + buf_size: usize, +) -> IoResult> { + #[cfg(target_os = "linux")] + if agave_io_uring::io_uring_supported() { + use crate::io_uring::sequential_file_reader::SequentialFileReader; + + let io_uring_reader = SequentialFileReader::with_capacity(buf_size, path.as_ref()); + match io_uring_reader { + Ok(reader) => return Ok(Box::new(reader)), + Err(error) => { + log::warn!("unable to create io_uring reader: {error}"); + } + } + } + let file = File::open(path)?; + Ok(Box::new(BufReader::with_capacity(buf_size, file))) +} + #[cfg(all(unix, test))] mod tests { use {super::*, std::io::Write, tempfile::tempfile, test_case::test_case}; diff --git a/accounts-db/src/hardened_unpack.rs b/accounts-db/src/hardened_unpack.rs index 4ba48140173f17..2e0adaa42d32f1 100644 --- a/accounts-db/src/hardened_unpack.rs +++ b/accounts-db/src/hardened_unpack.rs @@ -3,10 +3,11 @@ use { log::*, rand::{thread_rng, Rng}, solana_genesis_config::{GenesisConfig, DEFAULT_GENESIS_ARCHIVE, DEFAULT_GENESIS_FILE}, + solana_perf::packet::bytes::{Buf, Bytes, BytesMut}, std::{ - collections::HashMap, + collections::{HashMap, VecDeque}, fs::{self, File}, - io::{BufReader, Read}, + io::{BufReader, Read, Result as IoResult}, path::{ Component::{self, CurDir, Normal}, Path, PathBuf, @@ -45,6 +46,226 @@ const MAX_SNAPSHOT_ARCHIVE_UNPACKED_COUNT: u64 = 5_000_000; pub const MAX_GENESIS_ARCHIVE_UNPACKED_SIZE: u64 = 10 * 1024 * 1024; // 10 MiB const MAX_GENESIS_ARCHIVE_UNPACKED_COUNT: u64 = 100; +/// Collection of shareable byte slices forming a chain of bytes to read (using `std::io::Read`) +pub struct MultiBytes(VecDeque); + +impl MultiBytes { + pub fn new() -> Self { + // Typically we expect 2 entries: + // archive spanning until end of decode buffer + + // short continuation of last entry from next buffer + Self(VecDeque::with_capacity(2)) + } + + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + pub fn push(&mut self, bytes: Bytes) { + self.0.push_back(bytes); + } +} + +impl Default for MultiBytes { + fn default() -> Self { + Self::new() + } +} + +impl Read for MultiBytes { + fn read(&mut self, mut buf: &mut [u8]) -> IoResult { + let mut copied_len = 0; + while let Some(bytes) = self.0.front_mut() { + let to_copy_len = bytes.len().min(buf.len()); + let (to_copy_dst_buf, remaining_buf) = buf.split_at_mut(to_copy_len); + bytes.copy_to_slice(to_copy_dst_buf); + copied_len += to_copy_len; + if bytes.is_empty() { + self.0.pop_front(); + } + if remaining_buf.is_empty() { + break; + } + buf = remaining_buf; + } + Ok(copied_len) + } +} + +pub struct BytesChannelReader { + current_bytes: MultiBytes, + receiver: crossbeam_channel::Receiver, +} + +impl BytesChannelReader { + pub fn new(receiver: crossbeam_channel::Receiver) -> Self { + Self { + current_bytes: MultiBytes::new(), + receiver, + } + } +} + +impl Read for BytesChannelReader { + fn read(&mut self, buf: &mut [u8]) -> IoResult { + while self.current_bytes.is_empty() { + let Ok(new_bytes) = self.receiver.recv() else { + return Ok(0); + }; + self.current_bytes = new_bytes; + } + self.current_bytes.read(buf) + } +} + +#[derive(Debug)] +pub struct ArchiveChunker { + input: R, + /// Intermediate buffer with tar contents to seek and split on entry boundaries + current_decoded: Bytes, + /// Number of bytes from last entry that were not available in decoded buffer + num_started_entry_bytes: usize, + mempool: VecDeque, +} + +impl ArchiveChunker { + const TAR_BLOCK_SIZE: usize = size_of::(); + // Buffer size will influence typical amount of bytes sent as single work item. + // Pick value significantly larger than majority of entries, yet not too large to keep + // the work-queue non-empty as much as possible. + const DECODE_BUF_SIZE: usize = 64 * 1024 * 1024; + + pub fn new(input: R) -> Self { + Self { + input, + current_decoded: Bytes::new(), + num_started_entry_bytes: 0, + mempool: VecDeque::new(), + } + } + + /// Read `self.input`, split it at TAR archive boundaries and send chunks consisting + /// of complete, independent tar archives into `chunk_sender`. + pub fn decode_and_send_chunks( + mut self, + chunk_sender: crossbeam_channel::Sender, + ) -> IoResult<()> { + // Bytes for chunk of archive to be sent to workers for unpacking + let mut current_chunk = MultiBytes::new(); + while self.refill_decoded_buf()? { + let (new_bytes, was_archive_completion) = if self.has_started_entry() { + let started_entry_bytes = self.take_started_entry_bytes(); + let did_finish_entry = !self.has_started_entry(); + (started_entry_bytes, did_finish_entry) + } else { + (self.take_complete_archive()?, true) + }; + if !new_bytes.is_empty() { + current_chunk.push(new_bytes); + if was_archive_completion { + let chunk = std::mem::take(&mut current_chunk); + if chunk_sender.send(chunk).is_err() { + break; + } + } + } + } + Ok(()) + } + + /// Take as many bytes as possible from decoded data until last entry boundary. + fn take_complete_archive(&mut self) -> IoResult { + let mut archive = Archive::new(self.current_decoded.as_ref()); + + let mut completed_entry_end = 0; + let mut entry_end = 0; + for entry in archive.entries()? { + let entry = entry?; + // End of file data + assert_ne!(tar::EntryType::GNUSparse, entry.header().entry_type()); + entry_end = (entry.raw_file_position() + entry.size()) as usize; + + // Padding to block size + entry_end = Self::TAR_BLOCK_SIZE * entry_end.div_ceil(Self::TAR_BLOCK_SIZE); + if entry_end <= self.current_decoded.len() { + // Entry ends within decoded input, we can consume it + completed_entry_end = entry_end; + } + if entry_end + Self::TAR_BLOCK_SIZE > self.current_decoded.len() { + // Next entry's header spans beyond input - can't decode it, + // so terminate at last completed entry and keep remaining input after it + break; + } + } + // Either we run out of entries or last entry crosses input + let completed_entry = self.current_decoded.split_to(completed_entry_end); + if completed_entry.is_empty() && entry_end == completed_entry_end { + // Archive ended, clear any tar footer from remaining input + assert!( + self.current_decoded.len() <= 1024, + "Footer should be at most 1024 len" + ); + self.current_decoded.clear(); + } + self.num_started_entry_bytes = entry_end - completed_entry_end; + Ok(completed_entry) + } + + fn has_started_entry(&self) -> bool { + self.num_started_entry_bytes > 0 + } + + fn take_started_entry_bytes(&mut self) -> Bytes { + let num_bytes = self.num_started_entry_bytes.min(self.current_decoded.len()); + self.num_started_entry_bytes -= num_bytes; + self.current_decoded.split_to(num_bytes) + } + + /// Re-fill decoded buffer such that it has minimum bytes to decode TAR header. + /// + /// Return `false` on EOF + fn refill_decoded_buf(&mut self) -> IoResult { + if self.current_decoded.len() < Self::TAR_BLOCK_SIZE { + let mut next_buffer = self.get_next_buffer(); + if !self.current_decoded.is_empty() { + next_buffer.extend_from_slice(&self.current_decoded); + } + self.current_decoded = self.decode_bytes(next_buffer)?; + } + Ok(!self.current_decoded.is_empty()) + } + + /// Acquire memory buffer for decoding input reusing already consumed chunks. + fn get_next_buffer(&mut self) -> BytesMut { + if self.mempool.front().is_some_and(Bytes::is_unique) { + let mut reclaimed: BytesMut = self.mempool.pop_front().unwrap().into(); + reclaimed.clear(); + reclaimed + } else { + BytesMut::with_capacity(Self::DECODE_BUF_SIZE) + } + } + + /// Fill `decode_buf` with data from `self.input`. + fn decode_bytes(&mut self, mut decode_buf: BytesMut) -> IoResult { + let mut_slice = unsafe { + std::slice::from_raw_parts_mut(decode_buf.as_mut_ptr(), decode_buf.capacity()) + }; + let mut current_len = decode_buf.len(); + while current_len < decode_buf.capacity() { + let new_bytes = self.input.read(&mut mut_slice[current_len..])?; + if new_bytes == 0 { + break; + } + current_len += new_bytes; + } + unsafe { decode_buf.set_len(current_len) }; + let bytes: Bytes = decode_buf.into(); + self.mempool.push_back(bytes.clone()); + Ok(bytes) + } +} + fn checked_total_size_sum(total_size: u64, entry_size: u64, limit_size: u64) -> Result { trace!( "checked_total_size_sum: {} + {} < {}", @@ -86,7 +307,7 @@ pub enum UnpackPath<'a> { } fn unpack_archive<'a, A, C, D>( - archive: &mut Archive, + mut archive: Archive, apparent_limit_size: u64, actual_limit_size: u64, limit_count: u64, @@ -197,7 +418,7 @@ where return Ok(()); #[cfg(unix)] - fn set_perms(dst: &Path, mode: u32) -> std::io::Result<()> { + fn set_perms(dst: &Path, mode: u32) -> IoResult<()> { use std::os::unix::fs::PermissionsExt; let perm = fs::Permissions::from_mode(mode as _); @@ -205,7 +426,7 @@ where } #[cfg(windows)] - fn set_perms(dst: &Path, _mode: u32) -> std::io::Result<()> { + fn set_perms(dst: &Path, _mode: u32) -> IoResult<()> { let mut perm = fs::metadata(dst)?.permissions(); // This is OK for Windows, but clippy doesn't realize we're doing this // only on Windows. @@ -300,32 +521,17 @@ fn validate_inside_dst(dst: &Path, file_dst: &Path) -> Result { /// Map from AppendVec file name to unpacked file system location pub type UnpackedAppendVecMap = HashMap; -// select/choose only 'index' out of each # of 'divisions' of total items. -pub struct ParallelSelector { - pub index: usize, - pub divisions: usize, -} - -impl ParallelSelector { - pub fn select_index(&self, index: usize) -> bool { - index % self.divisions == self.index - } -} - /// Unpacks snapshot and collects AppendVec file names & paths pub fn unpack_snapshot( - archive: &mut Archive, + archive: Archive, ledger_dir: &Path, account_paths: &[PathBuf], - parallel_selector: Option, ) -> Result { let mut unpacked_append_vec_map = UnpackedAppendVecMap::new(); - unpack_snapshot_with_processors( archive, ledger_dir, account_paths, - parallel_selector, |file, path| { unpacked_append_vec_map.insert(file.to_string(), path.join("accounts").join(file)); }, @@ -334,19 +540,18 @@ pub fn unpack_snapshot( .map(|_| unpacked_append_vec_map) } -/// Unpacks snapshots and sends entry file paths through the `sender` channel +/// Unpacks snapshot from (potentially partial) `archive` and +/// sends entry file paths through the `sender` channel pub fn streaming_unpack_snapshot( - archive: &mut Archive, + archive: Archive, ledger_dir: &Path, account_paths: &[PathBuf], - parallel_selector: Option, sender: &crossbeam_channel::Sender, ) -> Result<()> { unpack_snapshot_with_processors( archive, ledger_dir, account_paths, - parallel_selector, |_, _| {}, |entry_path_buf| { if entry_path_buf.is_file() { @@ -363,10 +568,9 @@ pub fn streaming_unpack_snapshot( } fn unpack_snapshot_with_processors( - archive: &mut Archive, + archive: Archive, ledger_dir: &Path, account_paths: &[PathBuf], - parallel_selector: Option, mut accounts_path_processor: F, entry_processor: G, ) -> Result<()> @@ -376,7 +580,6 @@ where G: Fn(PathBuf), { assert!(!account_paths.is_empty()); - let mut i = 0; unpack_archive( archive, @@ -385,12 +588,6 @@ where MAX_SNAPSHOT_ARCHIVE_UNPACKED_COUNT, |parts, kind| { if is_valid_snapshot_archive_entry(parts, kind) { - i += 1; - if let Some(parallel_selector) = ¶llel_selector { - if !parallel_selector.select_index(i - 1) { - return UnpackPath::Ignore; - } - }; if let ["accounts", file] = parts { // Randomly distribute the accounts files about the available `account_paths`, let path_index = thread_rng().gen_range(0..account_paths.len()); @@ -506,12 +703,8 @@ pub fn unpack_genesis_archive( fs::create_dir_all(destination_dir)?; let tar_bz2 = File::open(archive_filename)?; let tar = BzDecoder::new(BufReader::new(tar_bz2)); - let mut archive = Archive::new(tar); - unpack_genesis( - &mut archive, - destination_dir, - max_genesis_archive_unpacked_size, - )?; + let archive = Archive::new(tar); + unpack_genesis(archive, destination_dir, max_genesis_archive_unpacked_size)?; info!( "Extracted {:?} in {:?}", archive_filename, @@ -521,7 +714,7 @@ pub fn unpack_genesis_archive( } fn unpack_genesis( - archive: &mut Archive, + archive: Archive, unpack_dir: &Path, max_genesis_archive_unpacked_size: u64, ) -> Result<()> { @@ -789,14 +982,14 @@ mod tests { fn with_finalize_and_unpack(archive: tar::Builder>, checker: C) -> Result<()> where - C: Fn(&mut Archive>, &Path) -> Result<()>, + C: Fn(Archive>, &Path) -> Result<()>, { let data = archive.into_inner().unwrap(); let reader = BufReader::new(&data[..]); - let mut archive: Archive> = Archive::new(reader); + let archive: Archive> = Archive::new(reader); let temp_dir = tempfile::TempDir::new().unwrap(); - checker(&mut archive, temp_dir.path())?; + checker(archive, temp_dir.path())?; // Check that there is no bad permissions preventing deletion. let result = temp_dir.close(); assert_matches!(result, Ok(())); @@ -805,7 +998,7 @@ mod tests { fn finalize_and_unpack_snapshot(archive: tar::Builder>) -> Result<()> { with_finalize_and_unpack(archive, |a, b| { - unpack_snapshot_with_processors(a, b, &[PathBuf::new()], None, |_, _| {}, |_| {}) + unpack_snapshot_with_processors(a, b, &[PathBuf::new()], |_, _| {}, |_| {}).map(|_| ()) }) } @@ -947,7 +1140,7 @@ mod tests { let mut archive = Builder::new(Vec::new()); archive.append(&header, data).unwrap(); - with_finalize_and_unpack(archive, |unpacking_archive, path| { + with_finalize_and_unpack(archive, |mut unpacking_archive, path| { for entry in unpacking_archive.entries()? { if !entry?.unpack_in(path)? { return Err(UnpackError::Archive("failed!".to_string())); @@ -1060,7 +1253,6 @@ mod tests { ar, tmp, &[tmp.join("accounts_dest")], - None, |_, _| {}, |path| assert_eq!(path, tmp.join("accounts_dest/123.456")), ) diff --git a/accounts-db/src/io_uring/memory.rs b/accounts-db/src/io_uring/memory.rs new file mode 100644 index 00000000000000..f7377b689ea9c9 --- /dev/null +++ b/accounts-db/src/io_uring/memory.rs @@ -0,0 +1,168 @@ +use std::{ + ops::{Deref, DerefMut}, + ptr::{self, NonNull}, + slice, +}; + +pub enum LargeBuffer { + Vec(Vec), + HugeTable(PageAlignedMemory), +} + +impl Deref for LargeBuffer { + type Target = [u8]; + + fn deref(&self) -> &Self::Target { + match self { + Self::Vec(buf) => buf.as_slice(), + Self::HugeTable(mem) => mem.deref(), + } + } +} + +impl DerefMut for LargeBuffer { + fn deref_mut(&mut self) -> &mut Self::Target { + match self { + Self::Vec(buf) => buf.as_mut_slice(), + Self::HugeTable(ref mut mem) => mem.deref_mut(), + } + } +} + +impl AsMut<[u8]> for LargeBuffer { + fn as_mut(&mut self) -> &mut [u8] { + match self { + Self::Vec(vec) => vec.as_mut_slice(), + LargeBuffer::HugeTable(ref mut mem) => mem, + } + } +} + +impl LargeBuffer { + /// Allocare memory buffer optimized for io_uring operations, i.e. + /// using HugeTable when it is available on the host. + pub fn new(size: usize) -> Self { + if size > PageAlignedMemory::page_size() { + if let Ok(alloc) = PageAlignedMemory::alloc_huge_table(size) { + log::info!("obtained hugetable io_uring buffer (len={size})"); + return Self::HugeTable(alloc); + } + } + Self::Vec(vec![0; size]) + } +} + +#[derive(Debug)] +struct AllocError; + +pub struct PageAlignedMemory { + ptr: NonNull, + len: usize, +} + +impl PageAlignedMemory { + fn alloc_huge_table(memory_size: usize) -> Result { + let page_size = Self::page_size(); + debug_assert!(memory_size.is_power_of_two()); + debug_assert!(page_size.is_power_of_two()); + let aligned_size = memory_size.next_multiple_of(page_size); + + // Safety: + // doing an ANONYMOUS alloc. addr=NULL is ok, fd is not used. + let ptr = unsafe { + libc::mmap( + ptr::null_mut(), + aligned_size, + libc::PROT_READ | libc::PROT_WRITE, + libc::MAP_PRIVATE | libc::MAP_ANONYMOUS | libc::MAP_HUGETLB, + -1, + 0, + ) + }; + + if ptr == libc::MAP_FAILED { + return Err(AllocError); + } + + Ok(Self { + ptr: NonNull::new(ptr as *mut u8).ok_or(AllocError)?, + len: aligned_size, + }) + } + + fn page_size() -> usize { + // Safety: just a libc wrapper + unsafe { libc::sysconf(libc::_SC_PAGESIZE) as usize } + } +} + +impl Drop for PageAlignedMemory { + fn drop(&mut self) { + // Safety: + // ptr is a valid pointer returned by mmap + unsafe { + libc::munmap(self.ptr.as_ptr() as *mut libc::c_void, self.len); + } + } +} + +impl Deref for PageAlignedMemory { + type Target = [u8]; + + fn deref(&self) -> &Self::Target { + unsafe { slice::from_raw_parts(self.ptr.as_ptr(), self.len) } + } +} + +impl DerefMut for PageAlignedMemory { + fn deref_mut(&mut self) -> &mut Self::Target { + unsafe { slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len) } + } +} + +/// Fixed mutable view into externally allocated bytes buffer +/// +/// It is an unsafe (no lifetime tracking) equivalent of `&mut [u8]` +pub struct BorrowedBytesMut { + ptr: *mut u8, + size: usize, +} + +impl BorrowedBytesMut { + pub const fn empty() -> Self { + Self { + ptr: std::ptr::null_mut(), + size: 0, + } + } + + pub fn from_mut_slice(buf: &mut [u8]) -> Self { + Self { + ptr: buf.as_mut_ptr(), + size: buf.len(), + } + } + + pub fn as_mut_ptr(&self) -> *mut u8 { + self.ptr + } + + pub fn len(&self) -> usize { + self.size + } + + /// Return a clone of `self` reduced to specified `size` + pub fn sub_buf_to(&self, size: usize) -> Self { + assert!(size <= self.size); + Self { + ptr: self.ptr, + size, + } + } +} + +impl AsRef<[u8]> for BorrowedBytesMut { + fn as_ref(&self) -> &[u8] { + unsafe { slice::from_raw_parts(self.ptr, self.size) } + } +} diff --git a/accounts-db/src/io_uring/mod.rs b/accounts-db/src/io_uring/mod.rs index 91e8c4fe9673fb..d42d5cf7c67686 100644 --- a/accounts-db/src/io_uring/mod.rs +++ b/accounts-db/src/io_uring/mod.rs @@ -1,3 +1,5 @@ #![cfg(target_os = "linux")] pub mod dir_remover; +pub mod memory; +pub mod sequential_file_reader; diff --git a/accounts-db/src/io_uring/sequential_file_reader.rs b/accounts-db/src/io_uring/sequential_file_reader.rs new file mode 100644 index 00000000000000..e5d828a74e095c --- /dev/null +++ b/accounts-db/src/io_uring/sequential_file_reader.rs @@ -0,0 +1,507 @@ +use { + crate::io_uring::memory::{BorrowedBytesMut, LargeBuffer}, + agave_io_uring::{Completion, Ring, RingOp}, + io_uring::{opcode, squeue, types, IoUring}, + std::{ + fs::File, + io::{self, BufRead, Cursor, Read}, + mem, + os::fd::{AsRawFd as _, RawFd}, + path::Path, + }, +}; + +const DEFAULT_READ_SIZE: usize = 1024 * 1024; +#[allow(dead_code)] +const DEFAULT_BUFFER_SIZE: usize = 64 * DEFAULT_READ_SIZE; +const SQPOLL_IDLE_TIMEOUT: u32 = 50; +const MAX_IOWQ_WORKERS: u32 = 4; + +// Based on Linux +const IO_PRIO_CLASS_SHIFT: u16 = 13; +const IO_PRIO_CLASS_BE: u16 = 2; +const IO_PRIO_LEVEL_HIGHEST: u16 = 0; +const IO_PRIO_BE_HIGHEST: u16 = IO_PRIO_CLASS_BE << IO_PRIO_CLASS_SHIFT | IO_PRIO_LEVEL_HIGHEST; + +// We register fixed buffers in chunks of up to 1GB as this is faster than registering many +// `read_capacity` buffers. Registering fixed buffers saves the kernel some work in +// checking/mapping/unmapping buffers for each read operation. +const FIXED_BUFFER_LEN: usize = 1024 * 1024 * 1024; + +/// Reader for non-seekable files. +/// +/// Implements read-ahead using io_uring. +pub struct SequentialFileReader { + // Note: state is tied to `backing_buffer` and contains unsafe pointer references to it + inner: Ring, + /// Owned buffer used across lifespan of `inner` (should get dropped last) + #[allow(dead_code)] + backing_buffer: B, +} + +impl SequentialFileReader { + /// Create a new `SequentialFileReader` for the given `path` using internally allocated + /// large buffer and default read size. + /// + /// See [SequentialFileReader::with_buffer] for more information. + #[allow(dead_code)] + pub fn new(path: impl AsRef) -> io::Result { + Self::with_capacity(DEFAULT_BUFFER_SIZE, path) + } + + /// Create a new `SequentialFileReader` for the given `path` using internally allocated + /// buffer of specified `buf_size` and default read size. + pub fn with_capacity(buf_size: usize, path: impl AsRef) -> io::Result { + Self::with_buffer(path, LargeBuffer::new(buf_size), DEFAULT_READ_SIZE) + } +} + +/// Holds the state of the reader. +struct SequentialFileReaderState { + file: File, + read_capacity: usize, + offset: usize, + eof_buf_index: Option, + buffers: Vec, + current_buf: usize, +} + +impl> SequentialFileReader { + /// Create a new `SequentialFileReader` for the given file using provided backing `buffer`. + /// + /// `buffer` is the internal buffer used for reading. It must be at least `read_capacity` long. + /// The reader will execute multiple `read_capacity` sized reads in parallel to fill the buffer. + pub fn with_buffer( + path: impl AsRef, + mut buffer: B, + read_capacity: usize, + ) -> io::Result { + let buf_len = buffer.as_mut().len(); + + // Let submission queue hold half of buffers before we explicitly syscall + // to submit them for reading. + let ring_qsize = (buf_len / read_capacity / 2).max(1) as u32; + let ring = IoUring::builder() + .setup_sqpoll(SQPOLL_IDLE_TIMEOUT) + .build(ring_qsize)?; + ring.submitter() + .register_iowq_max_workers(&mut [MAX_IOWQ_WORKERS, 0])?; + Self::with_buffer_and_ring(buffer, ring, path, read_capacity) + } + + /// Create a new `SequentialFileReader` for the given file, using a custom + /// ring instance. + fn with_buffer_and_ring( + mut backing_buffer: B, + ring: IoUring, + path: impl AsRef, + read_capacity: usize, + ) -> io::Result { + let buffer = backing_buffer.as_mut(); + assert!(buffer.len() >= read_capacity, "buffer too small"); + assert!( + buffer.len() % read_capacity == 0, + "buffer size must be a multiple of read_capacity" + ); + + // Split the buffer into `read_capacity` sized chunks. + let buf_start = buffer.as_ptr() as usize; + let buffers = buffer + .chunks_exact_mut(read_capacity) + .map(|buf| { + let io_buf_index = (buf.as_ptr() as usize - buf_start) / FIXED_BUFFER_LEN; + ReadBufState::Uninit { + io_buf_index, + buf: BorrowedBytesMut::from_mut_slice(buf), + } + }) + .collect::>(); + + let file = std::os::unix::fs::OpenOptionsExt::custom_flags( + std::fs::OpenOptions::new().read(true), + libc::O_NOATIME, + ) + .open(path)?; + + let ring = Ring::new( + ring, + SequentialFileReaderState { + file, + read_capacity, + buffers, + offset: 0, + eof_buf_index: None, + current_buf: 0, + }, + ); + let iovecs = buffer + .chunks(FIXED_BUFFER_LEN) + .map(|buf| libc::iovec { + iov_base: buf.as_ptr() as _, + iov_len: buf.len(), + }) + .collect::>(); + // Safety: + // The iovecs point to a buffer which is guaranteed to be valid for the + // lifetime of the reader + unsafe { ring.register_buffers(&iovecs)? }; + + let mut reader = Self { + inner: ring, + backing_buffer, + }; + + // Start reading all buffers. + for i in 0..reader.inner.context().buffers.len() { + reader.start_reading_buf(i)?; + } + // Make sure work is started in case submission queue is large and we + // never submitted work when adding buffers. + reader.inner.submit()?; + + Ok(reader) + } + + /// Start reading into the buffer at `index`. + /// + /// This is called at start and as soon as a buffer is fully consumed by BufRead::fill_buf(). + /// + /// Reads [state.offset, state.offset + state.read_capacity) from the file into + /// state.buffers[index]. Once a read is complete, ReadOp::complete(state) is called to update + /// the state. + fn start_reading_buf(&mut self, index: usize) -> io::Result<()> { + let SequentialFileReaderState { + buffers, + current_buf: _, + file, + offset, + read_capacity, + eof_buf_index: _, + } = &mut self.inner.context_mut(); + let read_buf = mem::replace(&mut buffers[index], ReadBufState::Reading); + match read_buf { + ReadBufState::Uninit { buf, io_buf_index } => { + let op = ReadOp { + fd: file.as_raw_fd(), + buf, + buf_off: 0, + io_buf_index, + file_off: *offset, + read_len: *read_capacity, + reader_buf_index: index, + }; + + // We always advance by `read_capacity`. If we get a short read, we submit a new + // read for the remaining data. See ReadOp::complete(). + *offset += *read_capacity; + + // Safety: + // The op points to a buffer which is guaranteed to be valid for + // the lifetime of the operation + self.inner.push(op)? + } + _ => unreachable!("called start_reading_buf on a non-empty buffer"), + } + Ok(()) + } +} + +// BufRead requires Read, but we never really use the Read interface. +impl> Read for SequentialFileReader { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + let available = self.fill_buf()?; + if available.is_empty() { + return Ok(0); // EOF. + } + + let bytes_to_read = available.len().min(buf.len()); + buf[..bytes_to_read].copy_from_slice(&available[..bytes_to_read]); + self.consume(bytes_to_read); + Ok(bytes_to_read) + } +} + +impl> BufRead for SequentialFileReader { + fn fill_buf(&mut self) -> io::Result<&[u8]> { + let _have_data = loop { + let state = self.inner.context_mut(); + let num_buffers = state.buffers.len(); + let read_buf = &mut state.buffers[state.current_buf]; + match read_buf { + ReadBufState::Full { + ref mut cursor, + io_buf_index, + } => { + if !cursor.fill_buf()?.is_empty() { + // we have some data available + break true; + } + let index = state.current_buf; + + if let Some(eof_index) = state.eof_buf_index { + if eof_index == index { + // This is the last filled buf for the whole file + return Ok(&[]); + } + // Some other buffer encountered EOF: move on, but don't issue new read. + state.current_buf = (state.current_buf + 1) % num_buffers; + } else { + // we have finished consuming this buffer, queue the next read + let cursor = mem::replace(cursor, Cursor::new(BorrowedBytesMut::empty())); + let buf = cursor.into_inner(); + + // The very last read when we hit EOF could return less than `read_capacity`, in + // which case what's in the cursor is shorter than `read_capacity` and for + // strict correctness we should reset the length. + // + // Note though that once we hit EOF we don't queue any more reads, so even if we + // didn't reset the length it wouldn't matter. + debug_assert!(buf.len() == state.read_capacity); + + state.buffers[index] = ReadBufState::Uninit { + buf, + io_buf_index: *io_buf_index, + }; + state.current_buf = (state.current_buf + 1) % num_buffers; + + self.start_reading_buf(index)?; + } + + // move to the next buffer and check again whether we have data + continue; + } + ReadBufState::Uninit { .. } => unreachable!("should be initialized"), + _ => break false, + } + }; + + loop { + self.inner.process_completions()?; + let state = self.inner.context(); + + match &state.buffers[state.current_buf] { + ReadBufState::Full { .. } => break, + ReadBufState::Uninit { .. } => unreachable!("should be initialized"), + // Still no data, wait for more completions. + ReadBufState::Reading => (), + } + } + + // At this point we must have data or be at EOF. + let state = self.inner.context_mut(); + match &mut state.buffers[state.current_buf] { + ReadBufState::Full { cursor, .. } => Ok(cursor.fill_buf()?), + // after the loop above we either have some data or we must be at EOF + _ => unreachable!(), + } + } + + fn consume(&mut self, amt: usize) { + let state = self.inner.context_mut(); + match &mut state.buffers[state.current_buf] { + ReadBufState::Full { cursor, .. } => cursor.consume(amt), + _ => assert_eq!(amt, 0), + } + } +} + +enum ReadBufState { + /// The buffer is pending submission to read queue (on initialization and + /// in transition from `Full` to `Reading`). + Uninit { + buf: BorrowedBytesMut, + io_buf_index: usize, + }, + /// The buffer is currently being read and there's a corresponding ReadOp in + /// the ring. + Reading, + /// The buffer is filled and ready to be consumed. + Full { + cursor: Cursor, + io_buf_index: usize, + }, +} + +impl std::fmt::Debug for ReadBufState { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::Uninit { + buf: _, + io_buf_index, + } => f + .debug_struct("Uninit") + .field("io_buf_index", io_buf_index) + .finish(), + Self::Reading => write!(f, "Reading"), + Self::Full { + cursor: _, + io_buf_index, + } => f + .debug_struct("Full") + .field("io_buf_index", io_buf_index) + .finish(), + } + } +} + +struct ReadOp { + fd: RawFd, + buf: BorrowedBytesMut, + /// This is the offset inside the buffer. It's typically 0, but can be non-zero if a previous + /// read returned less data than requested (because of EINTR or whatever) and we submitted a new + /// read for the remaining data. + buf_off: usize, + /// The index of the fixed buffer in the ring. See register_buffers(). + io_buf_index: usize, + /// The offset in the file. + file_off: usize, + /// The length of the read. This is typically `read_capacity` but can be less if a previous read + /// returned less data than requested. + read_len: usize, + /// This is the index of the buffer in the reader's state. It's used to update the state once the + /// read completes. + reader_buf_index: usize, +} + +impl std::fmt::Debug for ReadOp { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("ReadOp") + .field("fd", &self.fd) + .field("buf_off", &self.buf_off) + .field("io_buf_index", &self.io_buf_index) + .field("file_off", &self.file_off) + .field("read_len", &self.read_len) + .field("reader_buf_index", &self.reader_buf_index) + .finish() + } +} + +impl RingOp for ReadOp { + fn entry(&mut self) -> squeue::Entry { + let ReadOp { + fd, + buf, + buf_off, + io_buf_index, + file_off, + read_len, + reader_buf_index: _, + } = self; + debug_assert!(*buf_off + *read_len <= buf.len()); + opcode::ReadFixed::new( + types::Fd(*fd), + // Safety: we assert that the buffer is large enough to hold the read. + unsafe { buf.as_mut_ptr().byte_add(*buf_off) }, + *read_len as u32, + *io_buf_index as u16, + ) + .offset(*file_off as u64) + .ioprio(IO_PRIO_BE_HIGHEST) + .build() + } + + fn complete( + &mut self, + completion: &mut Completion, + res: io::Result, + ) -> io::Result<()> { + let ReadOp { + fd, + buf, + buf_off, + io_buf_index, + file_off, + read_len, + reader_buf_index, + } = self; + let reader_state = completion.context_mut(); + + let last_read_len = res? as usize; + if last_read_len == 0 { + reader_state.eof_buf_index = Some(*reader_buf_index); + } + + let total_read_len = *buf_off + last_read_len; + + if last_read_len > 0 && last_read_len < *read_len { + // Partial read, retry the op with updated offsets + let op: ReadOp = ReadOp { + fd: *fd, + buf: buf.sub_buf_to(buf.len()), // Still use the full buf + buf_off: total_read_len, + io_buf_index: *io_buf_index, + file_off: *file_off + last_read_len, + read_len: *read_len - last_read_len, + reader_buf_index: *reader_buf_index, + }; + // Safety: + // The op points to a buffer which is guaranteed to be valid for the + // lifetime of the operation + completion.push(op); + } else { + reader_state.buffers[*reader_buf_index] = ReadBufState::Full { + cursor: Cursor::new(buf.sub_buf_to(total_read_len)), + io_buf_index: *io_buf_index, + }; + } + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use {super::*, tempfile::NamedTempFile}; + + fn check_reading_file(file_size: usize, backing_buffer_size: usize, read_capacity: usize) { + let pattern: Vec = (0..251).collect(); + + // Create a temp file and write the pattern to it repeatedly + let mut temp_file = NamedTempFile::new().unwrap(); + for _ in 0..file_size / pattern.len() { + io::Write::write_all(&mut temp_file, &pattern).unwrap(); + } + io::Write::write_all(&mut temp_file, &pattern[..file_size % pattern.len()]).unwrap(); + + let buf = vec![0; backing_buffer_size]; + let mut reader = + SequentialFileReader::with_buffer(temp_file.path(), buf, read_capacity).unwrap(); + + // Read contents from the reader and verify length + let mut all_read_data = Vec::new(); + reader.read_to_end(&mut all_read_data).unwrap(); + assert_eq!(all_read_data.len(), file_size); + + // Verify the contents + for (i, byte) in all_read_data.iter().enumerate() { + assert_eq!(*byte, pattern[i % pattern.len()], "Mismatch - pos {}", i); + } + } + + /// Test with buffer larger than the whole file + #[test] + fn test_reading_small_file() { + check_reading_file(2500, 4096, 1024); + check_reading_file(2500, 4096, 2048); + check_reading_file(2500, 4096, 4096); + } + + /// Test with buffer smaller than the whole file + #[test] + fn test_reading_file_in_chunks() { + check_reading_file(25_000, 16384, 1024); + check_reading_file(25_000, 4096, 1024); + check_reading_file(25_000, 4096, 2048); + check_reading_file(25_000, 4096, 4096); + } + + /// Test with buffer much smaller than the whole file + #[test] + fn test_reading_large_file() { + check_reading_file(250_000, 32768, 1024); + check_reading_file(250_000, 16384, 1024); + check_reading_file(250_000, 4096, 1024); + check_reading_file(250_000, 4096, 2048); + check_reading_file(250_000, 4096, 4096); + } +} diff --git a/accounts-db/src/lib.rs b/accounts-db/src/lib.rs index 2bbfe897c9018d..d74acd3b447ff4 100644 --- a/accounts-db/src/lib.rs +++ b/accounts-db/src/lib.rs @@ -41,7 +41,6 @@ pub mod read_only_accounts_cache; #[cfg(not(feature = "dev-context-only-utils"))] mod read_only_accounts_cache; mod rolling_bit_field; -pub mod shared_buffer_reader; pub mod sorted_storages; pub mod stake_rewards; pub mod storable_accounts; @@ -50,6 +49,7 @@ pub mod utils; mod verify_accounts_hash_in_background; pub mod waitable_condvar; +pub use buffered_reader::large_file_buf_reader; // the accounts-hash-cache-tool needs access to these types pub use { accounts_hash::CalculateHashIntermediate as CacheHashDataFileEntry, diff --git a/accounts-db/src/shared_buffer_reader.rs b/accounts-db/src/shared_buffer_reader.rs deleted file mode 100644 index 5542c189819d7c..00000000000000 --- a/accounts-db/src/shared_buffer_reader.rs +++ /dev/null @@ -1,913 +0,0 @@ -//! SharedBuffer is given a Reader and SharedBufferReader implements the Reader trait. -//! SharedBuffer reads ahead in the underlying file and saves the data. -//! SharedBufferReaders can be created for the buffer and independently keep track of each reader's read location. -//! The background reader keeps track of the progress of each client. After data has been read by all readers, -//! the buffer is recycled and reading ahead continues. -//! A primary use case is the underlying reader being decompressing a file, which can be computationally expensive. -//! The clients of SharedBufferReaders could be parallel instances which need access to the decompressed data. -use { - crate::waitable_condvar::WaitableCondvar, - log::*, - solana_measure::measure::Measure, - std::{ - io::*, - sync::{ - atomic::{AtomicBool, Ordering}, - Arc, Mutex, RwLock, - }, - thread::{Builder, JoinHandle}, - time::Duration, - }, -}; - -// tunable parameters: -// # bytes allocated and populated by reading ahead -const TOTAL_BUFFER_BUDGET_DEFAULT: usize = 2_000_000_000; -// data is read-ahead and saved in chunks of this many bytes -const CHUNK_SIZE_DEFAULT: usize = 100_000_000; - -type OneSharedBuffer = Arc>; - -struct SharedBufferInternal { - bg_reader_data: Arc, - - bg_reader_join_handle: Mutex>>, - - // Keep track of the next read location per outstanding client. - // index is client's my_client_index. - // Value at index is index into buffers where that client is currently reading. - // Any buffer at index < min(clients) can be recycled or destroyed. - clients: RwLock>, - - // unpacking callers read from 'data'. newly_read_data is transferred to 'data when 'data' is exhausted. - // This minimizes lock contention since bg file reader has to have almost constant write access. - data: RwLock>, - - // it is convenient to have one of these around - empty_buffer: OneSharedBuffer, -} - -pub struct SharedBuffer { - instance: Arc, -} - -impl SharedBuffer { - pub fn new(reader: T) -> Self { - Self::new_with_sizes(TOTAL_BUFFER_BUDGET_DEFAULT, CHUNK_SIZE_DEFAULT, reader) - } - fn new_with_sizes( - total_buffer_budget: usize, - chunk_size: usize, - reader: T, - ) -> Self { - assert!(total_buffer_budget > 0); - assert!(chunk_size > 0); - let instance = SharedBufferInternal { - bg_reader_data: Arc::new(SharedBufferBgReader::new()), - data: RwLock::new(vec![OneSharedBuffer::default()]), // initialize with 1 vector of empty data at data[0] - - // default values - bg_reader_join_handle: Mutex::default(), - clients: RwLock::default(), - empty_buffer: OneSharedBuffer::default(), - }; - let instance = Arc::new(instance); - let bg_reader_data = instance.bg_reader_data.clone(); - - let handle = Builder::new() - .name("solCompFileRead".to_string()) - .spawn(move || { - // importantly, this thread does NOT hold a refcount on the arc of 'instance' - bg_reader_data.read_entire_file_in_bg(reader, total_buffer_budget, chunk_size); - }); - *instance.bg_reader_join_handle.lock().unwrap() = Some(handle.unwrap()); - Self { instance } - } -} - -pub struct SharedBufferReader { - instance: Arc, - my_client_index: usize, - // index in 'instance' of the current buffer this reader is reading from. - // The current buffer is referenced from 'current_data'. - // Until we exhaust this buffer, we don't need to get a lock to read from this. - current_buffer_index: usize, - // the index within current_data where we will next read - index_in_current_data: usize, - current_data: OneSharedBuffer, - - // convenient to have access to - empty_buffer: OneSharedBuffer, -} - -impl Drop for SharedBufferInternal { - fn drop(&mut self) { - if let Some(handle) = self.bg_reader_join_handle.lock().unwrap().take() { - self.bg_reader_data.stop.store(true, Ordering::Relaxed); - handle.join().unwrap(); - } - } -} - -impl Drop for SharedBufferReader { - fn drop(&mut self) { - self.client_done_reading(); - } -} - -#[derive(Debug)] -struct SharedBufferBgReader { - stop: AtomicBool, - // error encountered during read - error: RwLock>, - // bg thread reads to 'newly_read_data' and signals - newly_read_data: RwLock>, - // set when newly_read_data gets new data written to it and can be transferred - newly_read_data_signal: WaitableCondvar, - - // currently available set of buffers for bg to read into - // during operation, this is exhausted as the bg reads ahead - // As all clients are done with an earlier buffer, it is recycled by being put back into this vec for the bg thread to pull out. - buffers: RwLock>, - // signaled when a new buffer is added to buffers. This throttles the bg reading. - new_buffer_signal: WaitableCondvar, - - bg_eof_reached: AtomicBool, -} - -impl SharedBufferBgReader { - fn new() -> Self { - SharedBufferBgReader { - buffers: RwLock::new(vec![]), - error: RwLock::new(Ok(0)), - - // easy defaults - stop: AtomicBool::new(false), - newly_read_data: RwLock::default(), - newly_read_data_signal: WaitableCondvar::default(), - new_buffer_signal: WaitableCondvar::default(), - bg_eof_reached: AtomicBool::default(), - } - } - - fn default_wait_timeout() -> Duration { - Duration::from_millis(100) // short enough to be unnoticable in case of trouble, long enough for efficient waiting - } - fn wait_for_new_buffer(&self) -> bool { - self.new_buffer_signal - .wait_timeout(Self::default_wait_timeout()) - } - fn num_buffers(total_buffer_budget: usize, chunk_size: usize) -> usize { - std::cmp::max(1, total_buffer_budget / chunk_size) // at least 1 buffer - } - fn set_error(&self, error: std::io::Error) { - *self.error.write().unwrap() = Err(error); - self.newly_read_data_signal.notify_all(); // any client waiting for new data needs to wake up and check for errors - } - - // read ahead the entire file. - // This is governed by the supply of buffers. - // Buffers are likely limited to cap memory usage. - // A buffer is recycled after the last client finishes reading from it. - // When a buffer is available (initially or recycled), this code wakes up and reads into that buffer. - fn read_entire_file_in_bg( - &self, - mut reader: T, - total_buffer_budget: usize, - chunk_size: usize, - ) { - let now = std::time::Instant::now(); - let mut read_us = 0; - - let mut max_bytes_read = 0; - let mut wait_us = 0; - let mut total_bytes = 0; - let mut error = SharedBufferReader::default_error(); - let mut remaining_buffers_to_allocate = Self::num_buffers(total_buffer_budget, chunk_size); - loop { - if self.stop.load(Ordering::Relaxed) { - // unsure what error is most appropriate here. - // bg reader was told to stop. All clients need to see that as an error if they try to read. - self.set_error(std::io::Error::from(std::io::ErrorKind::TimedOut)); - break; - } - let mut buffers = self.buffers.write().unwrap(); - let buffer = buffers.pop(); - drop(buffers); - let mut dest_data = if let Some(dest_data) = buffer { - // assert that this should not result in a vector copy - // These are internal buffers and should not be held by anyone else. - assert_eq!(Arc::strong_count(&dest_data), 1); - dest_data - } else if remaining_buffers_to_allocate > 0 { - // we still haven't allocated all the buffers we are allowed to allocate - remaining_buffers_to_allocate -= 1; - Arc::new(vec![0; chunk_size]) - } else { - // nowhere to write, so wait for a buffer to become available - let mut wait_for_new_buffer = Measure::start("wait_for_new_buffer"); - self.wait_for_new_buffer(); - wait_for_new_buffer.stop(); - wait_us += wait_for_new_buffer.as_us(); - continue; // check stop, try to get a buffer again - }; - let target = Arc::make_mut(&mut dest_data); - let dest_size = target.len(); - - let mut bytes_read = 0; - let mut eof = false; - let mut error_received = false; - - while bytes_read < dest_size { - let mut time_read = Measure::start("read"); - // Read from underlying reader into the remaining range in dest_data - // Note that this read takes less time (up to 2x) if we read into the same static buffer location each call. - // But, we have to copy the data out later, so we choose to pay the price at read time to put the data where it is useful. - let result = reader.read(&mut target[bytes_read..]); - time_read.stop(); - read_us += time_read.as_us(); - match result { - Ok(size) => { - if size == 0 { - eof = true; - break; - } - total_bytes += size; - max_bytes_read = std::cmp::max(max_bytes_read, size); - bytes_read += size; - // loop to read some more. Underlying reader does not usually read all we ask for. - } - Err(err) => { - error_received = true; - error = err; - break; - } - } - } - - if bytes_read > 0 { - // store this buffer in the bg data list - target.truncate(bytes_read); - let mut data = self.newly_read_data.write().unwrap(); - data.push(dest_data); - drop(data); - self.newly_read_data_signal.notify_all(); - } - - if eof { - self.bg_eof_reached.store(true, Ordering::Relaxed); - self.newly_read_data_signal.notify_all(); // anyone waiting for new data needs to know that we reached eof - break; - } - - if error_received { - // do not ask for more data from 'reader'. We got an error and saved all the data we got before the error. - // but, wait to set error until we have added our buffer to newly_read_data - self.set_error(error); - break; - } - } - - info!( - "reading entire decompressed file took: {} us, bytes: {}, read_us: {}, waiting_for_buffer_us: {}, largest fetch: {}, error: {:?}", - now.elapsed().as_micros(), - total_bytes, - read_us, - wait_us, - max_bytes_read, - self.error.read().unwrap() - ); - } -} - -impl SharedBufferInternal { - fn wait_for_newly_read_data(&self) -> bool { - self.bg_reader_data - .newly_read_data_signal - .wait_timeout(SharedBufferBgReader::default_wait_timeout()) - } - // bg reader uses write lock on 'newly_read_data' each time a buffer is read or recycled - // client readers read from 'data' using read locks - // when all of 'data' has been exhausted by clients, 1 client needs to transfer from 'newly_read_data' to 'data' one time. - // returns true if any data was added to 'data' - fn transfer_data_from_bg(&self) -> bool { - let mut from_lock = self.bg_reader_data.newly_read_data.write().unwrap(); - if from_lock.is_empty() { - // no data available from bg - return false; - } - // grab all data from bg - let mut newly_read_data: Vec = std::mem::take(&mut *from_lock); - // append all data to fg - let mut to_lock = self.data.write().unwrap(); - // from_lock has to be held until we have the to_lock lock. Otherwise, we can race with another reader and append to to_lock out of order. - drop(from_lock); - to_lock.append(&mut newly_read_data); - true // data was transferred - } - fn has_reached_eof(&self) -> bool { - self.bg_reader_data.bg_eof_reached.load(Ordering::Relaxed) - } -} - -// only public methods are new and from trait Read -impl SharedBufferReader { - pub fn new(original_instance: &SharedBuffer) -> Self { - let original_instance = &original_instance.instance; - let current_buffer_index = 0; - let mut list = original_instance.clients.write().unwrap(); - let my_client_index = list.len(); - if my_client_index > 0 { - let current_min = list.iter().min().unwrap(); - if current_min > &0 { - drop(list); - panic!("SharedBufferReaders must all be created before the first one reads"); - } - } - list.push(current_buffer_index); - drop(list); - - Self { - instance: Arc::clone(original_instance), - my_client_index, - current_buffer_index, - index_in_current_data: 0, - // startup condition for our local reference to the buffer we want to read from. - // data[0] will always exist. It will be empty, But that is ok. Corresponds to current_buffer_index initial value of 0. - current_data: original_instance.data.read().unwrap()[0].clone(), - empty_buffer: original_instance.empty_buffer.clone(), - } - } - fn default_error() -> std::io::Error { - // AN error - std::io::Error::from(std::io::ErrorKind::TimedOut) - } - fn client_done_reading(&mut self) { - // has the effect of causing nobody to ever again wait on this reader's progress - self.update_client_index(usize::MAX); - } - - // this client will now be reading from current_buffer_index - // We may be able to recycle the buffer(s) this client may have been previously potentially using. - fn update_client_index(&mut self, new_buffer_index: usize) { - let previous_buffer_index = self.current_buffer_index; - self.current_buffer_index = new_buffer_index; - let client_index = self.my_client_index; - let mut indexes = self.instance.clients.write().unwrap(); - indexes[client_index] = new_buffer_index; - drop(indexes); - let mut new_min = *self.instance.clients.read().unwrap().iter().min().unwrap(); - // if new_min == usize::MAX, then every caller is done reading. We could shut down the bg reader and effectively drop everything. - new_min = std::cmp::min(new_min, self.instance.data.read().unwrap().len()); - - // if any buffer indexes are now no longer used by any readers, then this reader was the last reader holding onto some indexes. - if new_min > previous_buffer_index { - // if bg reader reached eof, there is no need to recycle any buffers and they can all be dropped - let eof = self.instance.has_reached_eof(); - - for recycle in previous_buffer_index..new_min { - let remove = { - let mut data = self.instance.data.write().unwrap(); - std::mem::replace(&mut data[recycle], self.empty_buffer.clone()) - }; - if remove.is_empty() { - continue; // another thread beat us swapping out this buffer, so nothing to recycle here - } - - if !eof { - // if !eof, recycle this buffer and notify waiting reader(s) - // if eof, just drop buffer this buffer since it isn't needed for reading anymore - self.instance - .bg_reader_data - .buffers - .write() - .unwrap() - .push(remove); - self.instance.bg_reader_data.new_buffer_signal.notify_all(); - // new buffer available for bg reader - } - } - } - } -} - -impl Read for SharedBufferReader { - // called many times by client to read small buffer lengths - fn read(&mut self, buf: &mut [u8]) -> std::io::Result { - let dest_len = buf.len(); - let mut offset_in_dest = 0; - - let mut eof_seen = false; - 'outer: while offset_in_dest < dest_len { - // this code is optimized for the common case where we can satisfy this entire read request from current_data without locks - let source = &*self.current_data; - - let remaining_source_len = source.len() - self.index_in_current_data; - let bytes_to_transfer = std::cmp::min(dest_len - offset_in_dest, remaining_source_len); - // copy what we can - buf[offset_in_dest..(offset_in_dest + bytes_to_transfer)].copy_from_slice( - &source - [self.index_in_current_data..(self.index_in_current_data + bytes_to_transfer)], - ); - self.index_in_current_data += bytes_to_transfer; - offset_in_dest += bytes_to_transfer; - - if offset_in_dest >= dest_len { - break; - } - - // we exhausted the current buffer - // increment current_buffer_index get the next buffer to continue reading - self.current_data = self.empty_buffer.clone(); // unref it so it can be recycled without copy - self.index_in_current_data = 0; - self.update_client_index(self.current_buffer_index + 1); - - let instance = &*self.instance; - let mut lock; - // hang out in this loop until the buffer we need is available - loop { - lock = instance.data.read().unwrap(); - if self.current_buffer_index < lock.len() { - break; - } - drop(lock); - - if self.instance.transfer_data_from_bg() { - continue; - } - - // another thread may have transferred data, so check again to see if we have data now - lock = instance.data.read().unwrap(); - if self.current_buffer_index < lock.len() { - break; - } - drop(lock); - - if eof_seen { - // eof detected on previous iteration, we have had a chance to read all data that was buffered, and there is not enough for us - break 'outer; - } - - // no data, we could not transfer, and still no data, so check for eof. - // If we got an eof, then we have to check again for data to make sure there isn't data now that we may be able to transfer or read. Our reading can lag behind the bg read ahead. - if instance.has_reached_eof() { - eof_seen = true; - continue; - } - - { - // Since the bg reader could not satisfy our read, now is a good time to check to see if the bg reader encountered an error. - // Note this is a write lock because we want to get the actual error detected and return it here and avoid races with other readers if we tried a read and then subsequent write lock. - // This would be simpler if I could clone an io error. - let mut error = instance.bg_reader_data.error.write().unwrap(); - if error.is_err() { - // replace the current error (with AN error instead of ok) - // return the original error - return std::mem::replace(&mut *error, Err(Self::default_error())); - } - } - - // no data to transfer, and file not finished, but no error, so wait for bg reader to read some more data - instance.wait_for_newly_read_data(); - } - - // refresh current_data inside the lock - self.current_data = Arc::clone(&lock[self.current_buffer_index]); - } - Ok(offset_in_dest) - } -} - -#[cfg(test)] -pub mod tests { - use { - super::*, - crossbeam_channel::{unbounded, Receiver}, - rayon::prelude::*, - }; - - type SimpleReaderReceiverType = Receiver<(Vec, Option)>; - struct SimpleReader { - pub receiver: SimpleReaderReceiverType, - pub data: Vec, - pub done: bool, - pub err: Option, - } - impl SimpleReader { - fn new(receiver: SimpleReaderReceiverType) -> Self { - Self { - receiver, - data: Vec::default(), - done: false, - err: None, - } - } - } - - impl Read for SimpleReader { - fn read(&mut self, buf: &mut [u8]) -> std::io::Result { - if !self.done && self.data.is_empty() { - let (mut data, err) = self.receiver.recv().unwrap(); - if err.is_some() { - self.err = err; - } - if data.is_empty() { - self.done = true; - } else { - self.data.append(&mut data); - } - } - if self.err.is_some() { - return Err(self.err.take().unwrap()); - } - let len_request = buf.len(); - let len_data = self.data.len(); - let to_read = std::cmp::min(len_request, len_data); - buf[0..to_read].copy_from_slice(&self.data[0..to_read]); - self.data.drain(0..to_read); - Ok(to_read) - } - } - - #[test] - #[should_panic(expected = "total_buffer_budget > 0")] - fn test_shared_buffer_buffers_invalid() { - solana_logger::setup(); - let (_sender, receiver) = unbounded(); - let file = SimpleReader::new(receiver); - SharedBuffer::new_with_sizes(0, 1, file); - } - - #[test] - #[should_panic(expected = "chunk_size > 0")] - fn test_shared_buffer_buffers_invalid2() { - solana_logger::setup(); - let (_sender, receiver) = unbounded(); - let file = SimpleReader::new(receiver); - SharedBuffer::new_with_sizes(1, 0, file); - } - - #[test] - #[should_panic(expected = "SharedBufferReaders must all be created before the first one reads")] - fn test_shared_buffer_start_too_late() { - solana_logger::setup(); - let (sender, receiver) = unbounded(); - let file = SimpleReader::new(receiver); - let shared_buffer = SharedBuffer::new(file); - let mut reader = SharedBufferReader::new(&shared_buffer); - let mut data = Vec::new(); - let done_signal = vec![]; - - let sent = vec![1, 2, 3]; - let _ = sender.send((sent, None)); - let _ = sender.send((done_signal, None)); - assert!(reader.read_to_end(&mut data).is_ok()); - SharedBufferReader::new(&shared_buffer); // created after reader already read - } - - #[test] - fn test_shared_buffer_simple_read_to_end() { - solana_logger::setup(); - let (sender, receiver) = unbounded(); - let file = SimpleReader::new(receiver); - let shared_buffer = SharedBuffer::new(file); - let mut reader = SharedBufferReader::new(&shared_buffer); - let mut data = Vec::new(); - let done_signal = vec![]; - - let sent = vec![1, 2, 3]; - let _ = sender.send((sent.clone(), None)); - let _ = sender.send((done_signal, None)); - assert!(reader.read_to_end(&mut data).is_ok()); - assert_eq!(sent, data); - } - - fn get_error() -> std::io::Error { - std::io::Error::from(std::io::ErrorKind::WriteZero) - } - - #[test] - fn test_shared_buffer_simple_read() { - solana_logger::setup(); - let (sender, receiver) = unbounded(); - let file = SimpleReader::new(receiver); - let shared_buffer = SharedBuffer::new(file); - let mut reader = SharedBufferReader::new(&shared_buffer); - let done_signal = vec![]; - - let sent = vec![1, 2, 3]; - let mut data = vec![0; sent.len()]; - let _ = sender.send((sent.clone(), None)); - let _ = sender.send((done_signal, None)); - assert_eq!(reader.read(&mut data[..]).unwrap(), sent.len()); - assert_eq!(sent, data); - } - - #[test] - fn test_shared_buffer_error() { - solana_logger::setup(); - let (sender, receiver) = unbounded(); - let file = SimpleReader::new(receiver); - let shared_buffer = SharedBuffer::new(file); - let mut reader = SharedBufferReader::new(&shared_buffer); - let mut data = Vec::new(); - let done_signal = vec![]; - - let _ = sender.send((done_signal, Some(get_error()))); - assert_eq!( - reader.read_to_end(&mut data).unwrap_err().kind(), - get_error().kind() - ); - } - - #[test] - fn test_shared_buffer_2_errors() { - solana_logger::setup(); - let (sender, receiver) = unbounded(); - let file = SimpleReader::new(receiver); - let shared_buffer = SharedBuffer::new(file); - let mut reader = SharedBufferReader::new(&shared_buffer); - let mut reader2 = SharedBufferReader::new(&shared_buffer); - let mut data = Vec::new(); - let done_signal = vec![]; - - let _ = sender.send((done_signal, Some(get_error()))); - assert_eq!( - reader.read_to_end(&mut data).unwrap_err().kind(), - get_error().kind() - ); - // #2 will read 2nd, so should get default error, but still an error - assert_eq!( - reader2.read_to_end(&mut data).unwrap_err().kind(), - SharedBufferReader::default_error().kind() - ); - } - - #[test] - fn test_shared_buffer_2_errors_after_read() { - solana_logger::setup(); - let (sender, receiver) = unbounded(); - let file = SimpleReader::new(receiver); - let shared_buffer = SharedBuffer::new(file); - let mut reader = SharedBufferReader::new(&shared_buffer); - let mut reader2 = SharedBufferReader::new(&shared_buffer); - let mut data = Vec::new(); - let done_signal = vec![]; - - // send some data - let sent = vec![1, 2, 3]; - let _ = sender.send((sent.clone(), None)); - // send an error - let _ = sender.send((done_signal, Some(get_error()))); - assert_eq!( - reader.read_to_end(&mut data).unwrap_err().kind(), - get_error().kind() - ); - // #2 will read valid bytes first and succeed, then get error - let mut data = vec![0; sent.len()]; - // this read should succeed because it was prior to error being received by bg reader - assert_eq!(reader2.read(&mut data[..]).unwrap(), sent.len(),); - assert_eq!(sent, data); - assert_eq!( - reader2.read_to_end(&mut data).unwrap_err().kind(), - SharedBufferReader::default_error().kind() - ); - } - - #[test] - fn test_shared_buffer_2_errors_after_read2() { - solana_logger::setup(); - let (sender, receiver) = unbounded(); - let file = SimpleReader::new(receiver); - let shared_buffer = SharedBuffer::new(file); - let mut reader = SharedBufferReader::new(&shared_buffer); - let mut reader2 = SharedBufferReader::new(&shared_buffer); - let mut data = Vec::new(); - let done_signal = vec![]; - - // send some data - let sent = vec![1, 2, 3]; - let _ = sender.send((sent.clone(), None)); - // send an error - let _ = sender.send((done_signal, Some(get_error()))); - assert_eq!( - reader.read_to_end(&mut data).unwrap_err().kind(), - get_error().kind() - ); - // #2 will read valid bytes first and succeed, then get error - let mut data = vec![0; sent.len()]; - // this read should succeed because it is reading data prior to error being received by bg reader - let expected_len = 1; - for i in 0..sent.len() { - let len = reader2.read(&mut data[i..=i]); - assert!(len.is_ok(), "{len:?}, progress: {i}"); - assert_eq!(len.unwrap(), expected_len, "progress: {i}"); - } - assert_eq!(sent, data); - assert_eq!( - reader2.read(&mut data[0..=0]).unwrap_err().kind(), - SharedBufferReader::default_error().kind() - ); - } - - // read either all or in specified block sizes - fn test_read_all( - reader: &mut SharedBufferReader, - individual_read_size: Option, - ) -> Vec { - let mut data = Vec::new(); - match individual_read_size { - Some(size) => { - loop { - let mut buffer = vec![0; size]; - let result = reader.read(&mut buffer[..]); - assert!(result.is_ok()); - let len = result.unwrap(); - if len == 0 { - break; // done reading - } - buffer.truncate(len); - data.append(&mut buffer); - } - } - None => { - let result = reader.read_to_end(&mut data); - assert!(result.is_ok()); - assert_eq!(result.unwrap(), data.len()); - } - } - data - } - - #[test] - fn test_shared_buffer_drop_reader2() { - let done_signal = vec![]; - let (sender, receiver) = unbounded(); - let file = SimpleReader::new(receiver); - let budget_sz = 100; - let chunk_sz = 10; - let shared_buffer = SharedBuffer::new_with_sizes(budget_sz, chunk_sz, file); - let size = budget_sz * 2; - let mut reader = SharedBufferReader::new(&shared_buffer); - // with the Read trait, we don't know we are eof until we get Ok(0) from the underlying reader. - // This can't happen until we have enough space to store another chunk, thus we try to read another chunk and see the Ok(0) returned. - // Thus, we have to use size < budget_sz here instead of <= - let reader2 = SharedBufferReader::new(&shared_buffer); - - let sent = (0..size) - .map(|i| ((i + size) % 256) as u8) - .collect::>(); - - let _ = sender.send((sent.clone(), None)); - let _ = sender.send((done_signal, None)); - - // can't read all data because it is 2x the buffer budget - let mut data = vec![0; budget_sz]; - assert!(reader.read(&mut data[0..budget_sz]).is_ok()); - drop(reader2); - let mut rest = test_read_all(&mut reader, None); - data.append(&mut rest); - assert_eq!(sent, data); - } - - fn adjusted_buffer_size(total_buffer_budget: usize, chunk_size: usize) -> usize { - let num_buffers = SharedBufferBgReader::num_buffers(total_buffer_budget, chunk_size); - num_buffers * chunk_size - } - - #[test] - fn test_shared_buffer_sweep() { - solana_logger::setup(); - // try the inflection points with 1 to 3 readers, including a parallel reader - // a few different chunk sizes - for chunk_sz in [1, 2, 10] { - // same # of buffers as default - let equivalent_buffer_sz = - chunk_sz * (TOTAL_BUFFER_BUDGET_DEFAULT / CHUNK_SIZE_DEFAULT); - // 1 buffer, 2 buffers, - for budget_sz in [ - 1, - chunk_sz, - chunk_sz * 2, - equivalent_buffer_sz - 1, - equivalent_buffer_sz, - equivalent_buffer_sz * 2, - ] { - for read_sz in [0, 1, chunk_sz - 1, chunk_sz, chunk_sz + 1] { - let read_sz = if read_sz > 0 { Some(read_sz) } else { None }; - for reader_ct in 1..=3 { - for data_size in [ - 0, - 1, - chunk_sz - 1, - chunk_sz, - chunk_sz + 1, - chunk_sz * 2 - 1, - chunk_sz * 2, - chunk_sz * 2 + 1, - budget_sz - 1, - budget_sz, - budget_sz + 1, - budget_sz * 2, - budget_sz * 2 - 1, - budget_sz * 2 + 1, - ] { - let adjusted_budget_sz = adjusted_buffer_size(budget_sz, chunk_sz); - let done_signal = vec![]; - let (sender, receiver) = unbounded(); - let file = SimpleReader::new(receiver); - let shared_buffer = - SharedBuffer::new_with_sizes(budget_sz, chunk_sz, file); - let mut reader = SharedBufferReader::new(&shared_buffer); - // with the Read trait, we don't know we are eof until we get Ok(0) from the underlying reader. - // This can't happen until we have enough space to store another chunk, thus we try to read another chunk and see the Ok(0) returned. - // Thus, we have to use data_size < adjusted_budget_sz here instead of <= - let second_reader = reader_ct > 1 - && data_size < adjusted_budget_sz - && read_sz - .as_ref() - .map(|sz| sz < &adjusted_budget_sz) - .unwrap_or(true); - let reader2 = if second_reader { - Some(SharedBufferReader::new(&shared_buffer)) - } else { - None - }; - let sent = (0..data_size) - .map(|i| ((i + data_size) % 256) as u8) - .collect::>(); - - let parallel_reader = reader_ct > 2; - let handle = if parallel_reader { - // Avoid to create more than the number of threads available in the - // current rayon threadpool. Deadlock could happen otherwise. - let threads = std::cmp::min(8, rayon::current_num_threads()); - Some({ - let parallel = (0..threads) - .map(|_| { - // create before any reading starts - let reader_ = SharedBufferReader::new(&shared_buffer); - let sent_ = sent.clone(); - (reader_, sent_) - }) - .collect::>(); - - Builder::new() - .spawn(move || { - parallel.into_par_iter().for_each( - |(mut reader, sent)| { - let data = test_read_all(&mut reader, read_sz); - assert_eq!( - sent, - data, - "{:?}", - ( - chunk_sz, - budget_sz, - read_sz, - reader_ct, - data_size, - adjusted_budget_sz - ) - ); - }, - ) - }) - .unwrap() - }) - } else { - None - }; - drop(shared_buffer); // readers should work fine even if shared buffer is dropped - let _ = sender.send((sent.clone(), None)); - let _ = sender.send((done_signal, None)); - let data = test_read_all(&mut reader, read_sz); - assert_eq!( - sent, - data, - "{:?}", - ( - chunk_sz, - budget_sz, - read_sz, - reader_ct, - data_size, - adjusted_budget_sz - ) - ); - // a 2nd reader would stall us if we exceed the total buffer size - if second_reader { - // #2 will read valid bytes first and succeed, then get error - let data = test_read_all(&mut reader2.unwrap(), read_sz); - assert_eq!(sent, data); - } - if parallel_reader { - assert!(handle.unwrap().join().is_ok()); - } - } - } - } - } - } - } -} diff --git a/ci/docker-run.sh b/ci/docker-run.sh index 427ee7b319e1c9..503f8f51790505 100755 --- a/ci/docker-run.sh +++ b/ci/docker-run.sh @@ -86,6 +86,11 @@ if [[ -n $CI ]]; then ) fi fi + + # Disable seccomp to allow io_uring operations (https://github.com/moby/moby/pull/46762) + ARGS+=(--security-opt seccomp=unconfined) + # Adjust memlock limit to let io_uring register buffers + ARGS+=(--ulimit memlock=-1:-1) fi fi diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 727f10533d6691..3deeaf8e41669a 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -643,7 +643,7 @@ checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" dependencies = [ "async-trait", "axum-core", - "bitflags 1.3.2", + "bitflags 1.2.1", "bytes", "futures-util", "http 0.2.12", @@ -795,9 +795,9 @@ dependencies = [ [[package]] name = "bitflags" -version = "1.3.2" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" +checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" [[package]] name = "bitflags" @@ -1152,7 +1152,7 @@ checksum = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002" dependencies = [ "ansi_term", "atty", - "bitflags 1.3.2", + "bitflags 1.2.1", "strsim 0.8.0", "textwrap", "unicode-width 0.1.8", @@ -3575,7 +3575,7 @@ dependencies = [ "openssl-probe", "openssl-sys", "schannel", - "security-framework 2.10.0", + "security-framework 2.3.1", "security-framework-sys", "tempfile", ] @@ -4552,7 +4552,7 @@ version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8383f39639269cde97d255a32bdb68c047337295414940c68bdd30c2e13203ff" dependencies = [ - "bitflags 1.3.2", + "bitflags 1.2.1", ] [[package]] @@ -4561,7 +4561,7 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" dependencies = [ - "bitflags 1.3.2", + "bitflags 1.2.1", ] [[package]] @@ -4998,11 +4998,11 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.10.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "770452e37cad93e0a50d5abc3990d2bc351c36d0328f86cefec2f2fb206eaef6" +checksum = "23a2ac85147a3a11d77ecf1bc7166ec0b92febfa4461c37944e180f319ece467" dependencies = [ - "bitflags 1.3.2", + "bitflags 1.2.1", "core-foundation 0.9.3", "core-foundation-sys", "libc", @@ -5434,6 +5434,7 @@ dependencies = [ "indexmap 2.9.0", "io-uring", "itertools 0.12.1", + "libc", "log", "lz4", "memmap2 0.9.5", @@ -5460,6 +5461,7 @@ dependencies = [ "solana-message", "solana-metrics", "solana-nohash-hasher", + "solana-perf", "solana-pubkey", "solana-rayon-threadlimit", "solana-rent-collector", @@ -10452,7 +10454,7 @@ version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "225e483f02d0ad107168dc57381a8a40c3aeea6abe47f37506931f861643cfa8" dependencies = [ - "bitflags 1.3.2", + "bitflags 1.2.1", "byteorder 1.5.0", "libc", "thiserror 1.0.69", @@ -10465,7 +10467,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" dependencies = [ - "bitflags 1.3.2", + "bitflags 1.2.1", "core-foundation 0.9.3", "system-configuration-sys", ] diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index 44a7084e9de82a..2505056d89bf1b 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -28,8 +28,7 @@ use { accounts_file::{AccountsFile, AccountsFileError, StorageAccess}, accounts_hash::{AccountsDeltaHash, AccountsHash}, epoch_accounts_hash::EpochAccountsHash, - hardened_unpack::{self, ParallelSelector, UnpackError}, - shared_buffer_reader::{SharedBuffer, SharedBufferReader}, + hardened_unpack::{self, ArchiveChunker, BytesChannelReader, MultiBytes, UnpackError}, utils::{move_and_async_delete_path, ACCOUNTS_RUN_DIR, ACCOUNTS_SNAPSHOT_DIR}, }, solana_clock::{Epoch, Slot}, @@ -1598,7 +1597,7 @@ pub fn verify_and_unarchive_snapshots( incremental_snapshot_archive_info, )?; - let parallel_divisions = (num_cpus::get() / 4).clamp(1, PARALLEL_UNTAR_READERS_DEFAULT); + let num_worker_threads = (num_cpus::get() / 4).clamp(1, PARALLEL_UNTAR_READERS_DEFAULT); let next_append_vec_id = Arc::new(AtomicAccountsFileId::new(0)); let UnarchivedSnapshot { @@ -1615,7 +1614,7 @@ pub fn verify_and_unarchive_snapshots( "snapshot untar", account_paths, full_snapshot_archive_info.archive_format(), - parallel_divisions, + num_worker_threads, next_append_vec_id.clone(), storage_access, )?; @@ -1642,7 +1641,7 @@ pub fn verify_and_unarchive_snapshots( "incremental snapshot untar", account_paths, incremental_snapshot_archive_info.archive_format(), - parallel_divisions, + num_worker_threads, next_append_vec_id.clone(), storage_access, )?; @@ -1684,24 +1683,22 @@ pub fn verify_and_unarchive_snapshots( /// Spawns a thread for unpacking a snapshot fn spawn_unpack_snapshot_thread( + chunks_receiver: crossbeam_channel::Receiver, file_sender: Sender, account_paths: Arc>, ledger_dir: Arc, - mut archive: Archive, - parallel_selector: Option, thread_index: usize, -) -> JoinHandle<()> { +) -> JoinHandle> { Builder::new() .name(format!("solUnpkSnpsht{thread_index:02}")) .spawn(move || { hardened_unpack::streaming_unpack_snapshot( - &mut archive, + Archive::new(BytesChannelReader::new(chunks_receiver)), ledger_dir.as_path(), &account_paths, - parallel_selector, &file_sender, - ) - .unwrap(); + )?; + Ok(()) }) .unwrap() } @@ -1714,38 +1711,62 @@ fn streaming_unarchive_snapshot( snapshot_archive_path: PathBuf, archive_format: ArchiveFormat, num_threads: usize, -) -> Vec> { +) -> Vec>> { let account_paths = Arc::new(account_paths); let ledger_dir = Arc::new(ledger_dir); - let shared_buffer = untar_snapshot_create_shared_buffer(&snapshot_archive_path, archive_format); - // All shared buffer readers need to be created before the threads are spawned - let archives: Vec<_> = (0..num_threads) - .map(|_| { - let reader = SharedBufferReader::new(&shared_buffer); - Archive::new(reader) - }) - .collect(); + let mut handles = vec![]; - archives - .into_iter() - .enumerate() - .map(|(thread_index, archive)| { - let parallel_selector = Some(ParallelSelector { - index: thread_index, - divisions: num_threads, - }); + let (chunk_sender, chunk_receiver) = crossbeam_channel::bounded(num_threads * 2); + handles.push(spawn_archive_chunker_thread( + snapshot_archive_path, + archive_format, + chunk_sender, + )); - spawn_unpack_snapshot_thread( - file_sender.clone(), - account_paths.clone(), - ledger_dir.clone(), - archive, - parallel_selector, - thread_index, - ) + for thread_index in 0..num_threads { + handles.push(spawn_unpack_snapshot_thread( + chunk_receiver.clone(), + file_sender.clone(), + account_paths.clone(), + ledger_dir.clone(), + thread_index, + )) + } + + handles +} + +fn archive_chunker_from_path( + archive_path: &Path, + archive_format: ArchiveFormat, +) -> IoResult>>> { + const INPUT_READER_BUF_SIZE: usize = 128 * 1024 * 1024; + let buf_reader = solana_accounts_db::large_file_buf_reader(archive_path, INPUT_READER_BUF_SIZE) + .map_err(|err| { + IoError::other(format!( + "failed to open snapshot archive '{}': {err}", + archive_path.display(), + )) + })?; + let decompressor = ArchiveFormatDecompressor::new(archive_format, buf_reader)?; + Ok(ArchiveChunker::new(decompressor)) +} + +fn spawn_archive_chunker_thread( + archive_path: impl AsRef, + archive_format: ArchiveFormat, + chunk_sender: Sender, +) -> JoinHandle> { + let archive_path = archive_path.as_ref().to_path_buf(); + Builder::new() + .name("solTarDecompr".to_string()) + .spawn(move || { + let chunker = archive_chunker_from_path(&archive_path, archive_format)?; + chunker.decode_and_send_chunks(chunk_sender)?; + Ok(()) }) - .collect() + .unwrap() } /// Used to determine if a filename is structured like a version file, bank file, or storage file @@ -1779,7 +1800,7 @@ fn get_snapshot_file_kind(filename: &str) -> Option { /// This function will push append_vec files into a buffer until we receive the snapshot file fn get_version_and_snapshot_files( file_receiver: &Receiver, -) -> (PathBuf, PathBuf, Vec) { +) -> Result<(PathBuf, PathBuf, Vec)> { let mut append_vec_files = Vec::with_capacity(1024); let mut snapshot_version_path = None; let mut snapshot_file_path = None; @@ -1810,13 +1831,15 @@ fn get_version_and_snapshot_files( None => {} // do nothing for other kinds of files } } else { - panic!("did not receive snapshot file from unpacking threads"); + return Err(SnapshotError::RebuildStorages( + "did not receive snapshot file from unpacking threads".to_string(), + )); } } let snapshot_version_path = snapshot_version_path.unwrap(); let snapshot_file_path = snapshot_file_path.unwrap(); - (snapshot_version_path, snapshot_file_path, append_vec_files) + Ok((snapshot_version_path, snapshot_file_path, append_vec_files)) } /// Fields and information parsed from the snapshot. @@ -1831,7 +1854,7 @@ struct SnapshotFieldsBundle { /// `file_receiver`. fn snapshot_fields_from_files(file_receiver: &Receiver) -> Result { let (snapshot_version_path, snapshot_file_path, append_vec_files) = - get_version_and_snapshot_files(file_receiver); + get_version_and_snapshot_files(file_receiver)?; let snapshot_version_str = snapshot_version_from_file(snapshot_version_path)?; let snapshot_version = snapshot_version_str.parse().map_err(|err| { IoError::other(format!( @@ -1895,7 +1918,7 @@ fn unarchive_snapshot( measure_name: &'static str, account_paths: &[PathBuf], archive_format: ArchiveFormat, - parallel_divisions: usize, + num_untar_threads: usize, next_append_vec_id: Arc, storage_access: StorageAccess, ) -> Result { @@ -1905,52 +1928,58 @@ fn unarchive_snapshot( let unpacked_snapshots_dir = unpack_dir.path().join("snapshots"); let (file_sender, file_receiver) = crossbeam_channel::unbounded(); - streaming_unarchive_snapshot( + let unarchive_handles = streaming_unarchive_snapshot( file_sender, account_paths.to_vec(), unpack_dir.path().to_path_buf(), snapshot_archive_path.as_ref().to_path_buf(), archive_format, - parallel_divisions, + num_untar_threads, ); let num_rebuilder_threads = num_cpus::get_physical() - .saturating_sub(parallel_divisions) + .saturating_sub(num_untar_threads) .max(1); - let SnapshotFieldsBundle { - snapshot_version, - bank_fields, - accounts_db_fields, - append_vec_files, - .. - } = snapshot_fields_from_files(&file_receiver)?; - let (storage, measure_untar) = measure_time!( - SnapshotStorageRebuilder::rebuild_storage( - &accounts_db_fields, - append_vec_files, - file_receiver, - num_rebuilder_threads, - next_append_vec_id, - SnapshotFrom::Archive, - storage_access, - )?, - measure_name - ); - info!("{}", measure_untar); - - create_snapshot_meta_files_for_unarchived_snapshot(&unpack_dir)?; + let snapshot_result = snapshot_fields_from_files(&file_receiver).and_then( + |SnapshotFieldsBundle { + snapshot_version, + bank_fields, + accounts_db_fields, + append_vec_files, + .. + }| { + let (storage, measure_untar) = measure_time!( + SnapshotStorageRebuilder::rebuild_storage( + &accounts_db_fields, + append_vec_files, + file_receiver, + num_rebuilder_threads, + next_append_vec_id, + SnapshotFrom::Archive, + storage_access, + )?, + measure_name + ); + info!("{}", measure_untar); + create_snapshot_meta_files_for_unarchived_snapshot(&unpack_dir)?; - Ok(UnarchivedSnapshot { - unpack_dir, - storage, - bank_fields, - accounts_db_fields, - unpacked_snapshots_dir_and_version: UnpackedSnapshotsDirAndVersion { - unpacked_snapshots_dir, - snapshot_version, + Ok(UnarchivedSnapshot { + unpack_dir, + storage, + bank_fields, + accounts_db_fields, + unpacked_snapshots_dir_and_version: UnpackedSnapshotsDirAndVersion { + unpacked_snapshots_dir, + snapshot_version, + }, + measure_untar, + }) }, - measure_untar, - }) + ); + for handle in unarchive_handles { + handle.join().unwrap()?; + } + snapshot_result } /// Streams snapshot dir files across channel @@ -2446,36 +2475,26 @@ pub fn purge_old_snapshot_archives( #[cfg(feature = "dev-context-only-utils")] fn unpack_snapshot_local( - shared_buffer: SharedBuffer, + snapshot_path: impl AsRef, + archive_format: ArchiveFormat, ledger_dir: &Path, account_paths: &[PathBuf], - parallel_divisions: usize, + num_threads: usize, ) -> Result { - assert!(parallel_divisions > 0); + assert!(num_threads > 0); - // allocate all readers before any readers start reading - let readers = (0..parallel_divisions) - .map(|_| SharedBufferReader::new(&shared_buffer)) - .collect::>(); + let (chunk_sender, chunk_receiver) = crossbeam_channel::bounded(num_threads); + let handle = spawn_archive_chunker_thread(snapshot_path, archive_format, chunk_sender); - // create 'parallel_divisions' # of parallel workers, each responsible for 1/parallel_divisions of all the files to extract. - let all_unpacked_append_vec_map = readers + // create 'num_threads' # of parallel workers, each receiving chunks of archive to extract. + let all_unpacked_append_vec_map = (0..num_threads) .into_par_iter() - .enumerate() - .map(|(index, reader)| { - let parallel_selector = Some(ParallelSelector { - index, - divisions: parallel_divisions, - }); - let mut archive = Archive::new(reader); - hardened_unpack::unpack_snapshot( - &mut archive, - ledger_dir, - account_paths, - parallel_selector, - ) + .map(|_| { + let archive_subset = Archive::new(BytesChannelReader::new(chunk_receiver.clone())); + hardened_unpack::unpack_snapshot(archive_subset, ledger_dir, account_paths) }) .collect::>(); + handle.join().unwrap()?; let mut unpacked_append_vec_map = UnpackedAppendVecMap::new(); for h in all_unpacked_append_vec_map { @@ -2485,41 +2504,6 @@ fn unpack_snapshot_local( Ok(unpacked_append_vec_map) } -fn untar_snapshot_create_shared_buffer( - snapshot_tar: &Path, - archive_format: ArchiveFormat, -) -> SharedBuffer { - let open_file = || { - fs::File::open(snapshot_tar) - .map_err(|err| { - IoError::other(format!( - "failed to open snapshot archive '{}': {err}", - snapshot_tar.display(), - )) - }) - .unwrap() - }; - // Apply buffered reader for decoders that do not buffer internally. - match archive_format { - ArchiveFormat::TarZstd { .. } => { - SharedBuffer::new(zstd::stream::read::Decoder::new(open_file()).unwrap()) - } - ArchiveFormat::TarLz4 => SharedBuffer::new(lz4::Decoder::new(open_file()).unwrap()), - } -} - -#[cfg(feature = "dev-context-only-utils")] -fn untar_snapshot_in( - snapshot_tar: impl AsRef, - unpack_dir: &Path, - account_paths: &[PathBuf], - archive_format: ArchiveFormat, - parallel_divisions: usize, -) -> Result { - let shared_buffer = untar_snapshot_create_shared_buffer(snapshot_tar.as_ref(), archive_format); - unpack_snapshot_local(shared_buffer, unpack_dir, account_paths, parallel_divisions) -} - pub fn verify_unpacked_snapshots_dir_and_version( unpacked_snapshots_dir_and_version: &UnpackedSnapshotsDirAndVersion, ) -> Result<(SnapshotVersion, BankSnapshotInfo)> { @@ -2582,11 +2566,11 @@ pub fn verify_snapshot_archive( let temp_dir = tempfile::TempDir::new().unwrap(); let unpack_dir = temp_dir.path(); let unpack_account_dir = create_accounts_run_and_snapshot_dirs(unpack_dir).unwrap().0; - untar_snapshot_in( + unpack_snapshot_local( snapshot_archive, + archive_format, unpack_dir, &[unpack_account_dir.clone()], - archive_format, 1, ) .unwrap(); diff --git a/runtime/src/snapshot_utils/archive_format.rs b/runtime/src/snapshot_utils/archive_format.rs index c0d4703034edbe..a5b6a19c45b0b3 100644 --- a/runtime/src/snapshot_utils/archive_format.rs +++ b/runtime/src/snapshot_utils/archive_format.rs @@ -62,6 +62,33 @@ impl FromStr for ArchiveFormat { } } +pub enum ArchiveFormatDecompressor { + Zstd(zstd::stream::read::Decoder<'static, R>), + Lz4(lz4::Decoder), +} + +impl ArchiveFormatDecompressor { + pub fn new(format: ArchiveFormat, input: R) -> std::io::Result { + Ok(match format { + ArchiveFormat::TarZstd { .. } => { + Self::Zstd(zstd::stream::read::Decoder::with_buffer(input)?) + } + ArchiveFormat::TarLz4 => { + Self::Lz4(lz4::Decoder::new(input).map_err(std::io::Error::other)?) + } + }) + } +} + +impl std::io::Read for ArchiveFormatDecompressor { + fn read(&mut self, buf: &mut [u8]) -> std::io::Result { + match self { + Self::Zstd(decoder) => decoder.read(buf), + Self::Lz4(decoder) => decoder.read(buf), + } + } +} + #[derive(Debug, Clone, Eq, PartialEq)] pub enum ParseError { InvalidExtension(String), diff --git a/svm/examples/Cargo.lock b/svm/examples/Cargo.lock index 17691ef9bee661..307c786a4eb10a 100644 --- a/svm/examples/Cargo.lock +++ b/svm/examples/Cargo.lock @@ -549,7 +549,7 @@ checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" dependencies = [ "async-trait", "axum-core", - "bitflags 1.3.2", + "bitflags 1.2.1", "bytes", "futures-util", "http 0.2.12", @@ -701,9 +701,9 @@ dependencies = [ [[package]] name = "bitflags" -version = "1.3.2" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" +checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" [[package]] name = "bitflags" @@ -1052,7 +1052,7 @@ checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" dependencies = [ "ansi_term", "atty", - "bitflags 1.3.2", + "bitflags 1.2.1", "strsim 0.8.0", "textwrap", "unicode-width 0.1.14", @@ -4405,7 +4405,7 @@ version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" dependencies = [ - "bitflags 1.3.2", + "bitflags 1.2.1", ] [[package]] @@ -5281,6 +5281,7 @@ dependencies = [ "indexmap 2.9.0", "io-uring", "itertools 0.12.1", + "libc", "log", "lz4", "memmap2 0.9.5", @@ -5307,6 +5308,7 @@ dependencies = [ "solana-message", "solana-metrics", "solana-nohash-hasher", + "solana-perf", "solana-pubkey", "solana-rayon-threadlimit", "solana-rent-collector", @@ -9549,7 +9551,7 @@ version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "225e483f02d0ad107168dc57381a8a40c3aeea6abe47f37506931f861643cfa8" dependencies = [ - "bitflags 1.3.2", + "bitflags 1.2.1", "byteorder", "libc", "thiserror 1.0.69", @@ -9562,7 +9564,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" dependencies = [ - "bitflags 1.3.2", + "bitflags 1.2.1", "core-foundation 0.9.4", "system-configuration-sys", ] From c4f5c583942188cbb92bfa34884eb8e4191c7981 Mon Sep 17 00:00:00 2001 From: GarmashAlex Date: Fri, 20 Jun 2025 19:32:14 +0300 Subject: [PATCH 049/124] Update Solana warm-up period link to current documentation (#6653) --- docs/src/operations/guides/validator-stake.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/src/operations/guides/validator-stake.md b/docs/src/operations/guides/validator-stake.md index da43c3071d4fb7..e1ad04cc611d74 100644 --- a/docs/src/operations/guides/validator-stake.md +++ b/docs/src/operations/guides/validator-stake.md @@ -62,7 +62,7 @@ solana delegate-stake ~/validator-stake-keypair.json ~/some-other-vote-account-k ## Validator Stake Warm-up To combat various attacks on consensus, new stake delegations are subject to a -[warm-up](https://solana.com/docs/economics/staking/stake-accounts#delegation-warmup-and-cooldown) period. +[warm-up](https://solana.com/docs/references/staking/stake-accounts#delegation-warmup-and-cooldown) period. Monitor a validator's stake during warmup by: From 125cfa813ce2924509d69d7b04ccec437e63fd52 Mon Sep 17 00:00:00 2001 From: Lucas Ste <38472950+LucasSte@users.noreply.github.com> Date: Fri, 20 Jun 2025 15:28:10 -0300 Subject: [PATCH 050/124] Add v4 option to `cargo-build-sbf` (#6656) Add v4 option to arch --- platform-tools-sdk/cargo-build-sbf/src/main.rs | 2 +- platform-tools-sdk/cargo-build-sbf/tests/crates.rs | 12 ++++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/platform-tools-sdk/cargo-build-sbf/src/main.rs b/platform-tools-sdk/cargo-build-sbf/src/main.rs index ea833760f4ad3c..6925a261999391 100644 --- a/platform-tools-sdk/cargo-build-sbf/src/main.rs +++ b/platform-tools-sdk/cargo-build-sbf/src/main.rs @@ -876,7 +876,7 @@ fn main() { .arg( Arg::new("arch") .long("arch") - .possible_values(["v0", "v1", "v2", "v3"]) + .possible_values(["v0", "v1", "v2", "v3", "v4"]) .default_value("v0") .help("Build for the given target architecture"), ) diff --git a/platform-tools-sdk/cargo-build-sbf/tests/crates.rs b/platform-tools-sdk/cargo-build-sbf/tests/crates.rs index 7ccf3f4fec0f8f..2c0ae686ac4fed 100644 --- a/platform-tools-sdk/cargo-build-sbf/tests/crates.rs +++ b/platform-tools-sdk/cargo-build-sbf/tests/crates.rs @@ -218,6 +218,18 @@ fn test_sbpfv3() { clean_target("noop"); } +#[test] +#[serial] +fn test_sbpfv4() { + let assert_v1 = build_noop_and_readelf("v4"); + assert_v1 + .stdout(predicate::str::contains( + "Flags: 0x4", + )) + .success(); + clean_target("noop"); +} + #[test] #[serial] fn test_package_metadata_tools_version() { From 6c3dde4d6b858c8950d80591865bef4ca35c1c2e Mon Sep 17 00:00:00 2001 From: zhiqiangxu <652732310@qq.com> Date: Sat, 21 Jun 2025 03:26:43 +0800 Subject: [PATCH 051/124] `BankingStage::new_num_threads`: rm useless match (#6640) --- core/src/banking_stage.rs | 45 +++++++++++++++++---------------------- 1 file changed, 20 insertions(+), 25 deletions(-) diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index 68e73a9ab5ba97..a177abd4ea3ed7 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -413,31 +413,26 @@ impl BankingStage { bank_forks: Arc>, prioritization_fee_cache: &Arc, ) -> Self { - match block_production_method { - BlockProductionMethod::CentralScheduler - | BlockProductionMethod::CentralSchedulerGreedy => { - let use_greedy_scheduler = matches!( - block_production_method, - BlockProductionMethod::CentralSchedulerGreedy - ); - Self::new_central_scheduler( - transaction_struct, - use_greedy_scheduler, - cluster_info, - poh_recorder, - transaction_recorder, - non_vote_receiver, - tpu_vote_receiver, - gossip_vote_receiver, - num_threads, - transaction_status_sender, - replay_vote_sender, - log_messages_bytes_limit, - bank_forks, - prioritization_fee_cache, - ) - } - } + let use_greedy_scheduler = matches!( + block_production_method, + BlockProductionMethod::CentralSchedulerGreedy + ); + Self::new_central_scheduler( + transaction_struct, + use_greedy_scheduler, + cluster_info, + poh_recorder, + transaction_recorder, + non_vote_receiver, + tpu_vote_receiver, + gossip_vote_receiver, + num_threads, + transaction_status_sender, + replay_vote_sender, + log_messages_bytes_limit, + bank_forks, + prioritization_fee_cache, + ) } #[allow(clippy::too_many_arguments)] From 19f4f235fe1b54871c10238e932ccf3f129d3bee Mon Sep 17 00:00:00 2001 From: Brooks Date: Fri, 20 Jun 2025 16:16:08 -0400 Subject: [PATCH 052/124] Uses PackedOffsetAndFlags directly in AccountInfo (#6678) --- accounts-db/src/account_info.rs | 27 +++++---------------------- 1 file changed, 5 insertions(+), 22 deletions(-) diff --git a/accounts-db/src/account_info.rs b/accounts-db/src/account_info.rs index 5f950dc072997b..9cf8e771edd9e5 100644 --- a/accounts-db/src/account_info.rs +++ b/accounts-db/src/account_info.rs @@ -86,30 +86,20 @@ pub struct AccountInfo { /// index identifying the append storage store_id: AccountsFileId, - account_offset_and_flags: AccountOffsetAndFlags, -} - -#[derive(Default, Debug, PartialEq, Eq, Clone, Copy)] -pub struct AccountOffsetAndFlags { /// offset = 'packed_offset_and_flags.offset_reduced()' * ALIGN_BOUNDARY_OFFSET into the storage /// Note this is a smaller type than 'Offset' - packed_offset_and_flags: PackedOffsetAndFlags, + account_offset_and_flags: PackedOffsetAndFlags, } impl IsZeroLamport for AccountInfo { fn is_zero_lamport(&self) -> bool { - self.account_offset_and_flags - .packed_offset_and_flags - .is_zero_lamport() + self.account_offset_and_flags.is_zero_lamport() } } impl IsCached for AccountInfo { fn is_cached(&self) -> bool { - self.account_offset_and_flags - .packed_offset_and_flags - .offset_reduced() - == CACHED_OFFSET + self.account_offset_and_flags.offset_reduced() == CACHED_OFFSET } } @@ -146,12 +136,9 @@ impl AccountInfo { } }; packed_offset_and_flags.set_is_zero_lamport(is_zero_lamport); - let account_offset_and_flags = AccountOffsetAndFlags { - packed_offset_and_flags, - }; Self { store_id, - account_offset_and_flags, + account_offset_and_flags: packed_offset_and_flags, } } @@ -166,11 +153,7 @@ impl AccountInfo { } pub fn offset(&self) -> Offset { - Self::reduced_offset_to_offset( - self.account_offset_and_flags - .packed_offset_and_flags - .offset_reduced(), - ) + Self::reduced_offset_to_offset(self.account_offset_and_flags.offset_reduced()) } pub fn reduced_offset_to_offset(reduced_offset: OffsetReduced) -> Offset { From af532bbac8c83495e206d33bc326f88ac4d2a5f4 Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Fri, 20 Jun 2025 15:42:57 -0500 Subject: [PATCH 053/124] clean up `skip_rent_rewrites` feature (#6655) * clean up skip_rent_rewrites feature * update bank hash test --- accounts-db/benches/bench_accounts_file.rs | 4 +- accounts-db/benches/bench_hashing.rs | 4 +- accounts-db/src/accounts_db.rs | 28 +- accounts-db/src/accounts_db/stats.rs | 1 - ledger-tool/src/args.rs | 1 - runtime/src/bank.rs | 200 +---------- runtime/src/bank/tests.rs | 372 +++------------------ runtime/src/snapshot_minimizer.rs | 2 +- validator/src/commands/run/execute.rs | 1 - 9 files changed, 75 insertions(+), 538 deletions(-) diff --git a/accounts-db/benches/bench_accounts_file.rs b/accounts-db/benches/bench_accounts_file.rs index 5243d226a7e8dd..ddcf34a8294cfe 100644 --- a/accounts-db/benches/bench_accounts_file.rs +++ b/accounts-db/benches/bench_accounts_file.rs @@ -21,8 +21,8 @@ mod utils; const ACCOUNTS_COUNTS: [usize; 4] = [ 1, // the smallest count; will bench overhead - 100, // number of accounts written per slot on mnb (with *no* rent rewrites) - 1_000, // number of accounts written slot on mnb (with rent rewrites) + 100, // lower range of accounts written per slot on mnb + 1_000, // higher range of accounts written per slot on mnb 10_000, // reasonable largest number of accounts written per slot ]; diff --git a/accounts-db/benches/bench_hashing.rs b/accounts-db/benches/bench_hashing.rs index 960bc578ec20bb..c78af72506e2a7 100644 --- a/accounts-db/benches/bench_hashing.rs +++ b/accounts-db/benches/bench_hashing.rs @@ -51,8 +51,8 @@ fn bench_hash_account(c: &mut Criterion) { fn bench_accounts_delta_hash(c: &mut Criterion) { const ACCOUNTS_COUNTS: [usize; 4] = [ 1, // the smallest count; will bench overhead - 100, // number of accounts written per slot on mnb (with *no* rent rewrites) - 1_000, // number of accounts written slot on mnb (with rent rewrites) + 100, // lower range of accounts written per slot on mnb + 1_000, // higher range of accounts written per slot on mnb 10_000, // reasonable largest number of accounts written per slot ]; diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 82678688265a67..b3401691d1d924 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -362,7 +362,6 @@ pub const ACCOUNTS_DB_CONFIG_FOR_TESTING: AccountsDbConfig = AccountsDbConfig { skip_initial_hash_calc: false, exhaustively_verify_refcounts: false, partitioned_epoch_rewards_config: DEFAULT_PARTITIONED_EPOCH_REWARDS_CONFIG, - test_skip_rewrites_but_include_in_bank_hash: false, storage_access: StorageAccess::File, scan_filter_for_shrinking: ScanFilter::OnlyAbnormalTest, enable_experimental_accumulator_hash: false, @@ -389,7 +388,6 @@ pub const ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS: AccountsDbConfig = AccountsDbConfig skip_initial_hash_calc: false, exhaustively_verify_refcounts: false, partitioned_epoch_rewards_config: DEFAULT_PARTITIONED_EPOCH_REWARDS_CONFIG, - test_skip_rewrites_but_include_in_bank_hash: false, storage_access: StorageAccess::File, scan_filter_for_shrinking: ScanFilter::OnlyAbnormal, enable_experimental_accumulator_hash: false, @@ -517,7 +515,6 @@ pub struct AccountsDbConfig { pub ancient_storage_ideal_size: Option, pub max_ancient_storages: Option, pub hash_calculation_pubkey_bins: Option, - pub test_skip_rewrites_but_include_in_bank_hash: bool, pub skip_initial_hash_calc: bool, pub exhaustively_verify_refcounts: bool, pub partitioned_epoch_rewards_config: PartitionedEpochRewardsConfig, @@ -1371,9 +1368,6 @@ pub struct AccountsDb { pub storage: AccountStorage, - /// true if this client should skip rewrites but still include those rewrites in the bank hash as if rewrites had occurred. - pub test_skip_rewrites_but_include_in_bank_hash: bool, - pub accounts_cache: AccountsCache, write_cache_limit_bytes: Option, @@ -1919,8 +1913,6 @@ impl AccountsDb { write_cache_limit_bytes: accounts_db_config.write_cache_limit_bytes, partitioned_epoch_rewards_config: accounts_db_config.partitioned_epoch_rewards_config, exhaustively_verify_refcounts: accounts_db_config.exhaustively_verify_refcounts, - test_skip_rewrites_but_include_in_bank_hash: accounts_db_config - .test_skip_rewrites_but_include_in_bank_hash, storage_access: accounts_db_config.storage_access, scan_filter_for_shrinking: accounts_db_config.scan_filter_for_shrinking, is_experimental_accumulator_hash_enabled: accounts_db_config @@ -6117,11 +6109,6 @@ impl AccountsDb { .swap(0, Ordering::Relaxed), i64 ), - ( - "skipped_rewrites_num", - self.stats.skipped_rewrites_num.swap(0, Ordering::Relaxed), - i64 - ), ); } @@ -7041,19 +7028,9 @@ impl AccountsDb { &self, slot: Slot, ignore: Option, - mut skipped_rewrites: HashMap, ) -> AccountsDeltaHash { let (mut hashes, scan_us, mut accumulate) = self.get_pubkey_hash_for_slot(slot); - hashes.iter().for_each(|(k, _h)| { - skipped_rewrites.remove(k); - }); - - let num_skipped_rewrites = skipped_rewrites.len(); - hashes.extend(skipped_rewrites); - - info!("skipped rewrite hashes {} {}", slot, num_skipped_rewrites); - if let Some(ignore) = ignore { hashes.retain(|k| k.0 != ignore); } @@ -7072,9 +7049,6 @@ impl AccountsDb { .delta_hash_accumulate_time_total_us .fetch_add(accumulate.as_us(), Ordering::Relaxed); self.stats.delta_hash_num.fetch_add(1, Ordering::Relaxed); - self.stats - .skipped_rewrites_num - .fetch_add(num_skipped_rewrites, Ordering::Relaxed); accounts_delta_hash } @@ -8734,7 +8708,7 @@ impl AccountsDb { /// Wrapper function to calculate accounts delta hash for `slot` (only used for testing and benchmarking.) pub fn calculate_accounts_delta_hash(&self, slot: Slot) -> AccountsDeltaHash { - self.calculate_accounts_delta_hash_internal(slot, None, HashMap::default()) + self.calculate_accounts_delta_hash_internal(slot, None) } pub fn load_without_fixed_root( diff --git a/accounts-db/src/accounts_db/stats.rs b/accounts-db/src/accounts_db/stats.rs index d1637729f7d8af..9dc99c51363f02 100644 --- a/accounts-db/src/accounts_db/stats.rs +++ b/accounts-db/src/accounts_db/stats.rs @@ -12,7 +12,6 @@ pub struct AccountsStats { pub delta_hash_scan_time_total_us: AtomicU64, pub delta_hash_accumulate_time_total_us: AtomicU64, pub delta_hash_num: AtomicU64, - pub skipped_rewrites_num: AtomicUsize, pub last_store_report: AtomicInterval, pub store_hash_accounts: AtomicU64, diff --git a/ledger-tool/src/args.rs b/ledger-tool/src/args.rs index 84ffc682b3d5f7..60a8646fb6794e 100644 --- a/ledger-tool/src/args.rs +++ b/ledger-tool/src/args.rs @@ -359,7 +359,6 @@ pub fn get_accounts_db_config( .ok(), exhaustively_verify_refcounts: arg_matches.is_present("accounts_db_verify_refcounts"), skip_initial_hash_calc: arg_matches.is_present("accounts_db_skip_initial_hash_calculation"), - test_skip_rewrites_but_include_in_bank_hash: false, storage_access, scan_filter_for_shrinking, enable_experimental_accumulator_hash: !arg_matches diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index becd3e3b53921a..8ccc5073523e9b 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -82,7 +82,7 @@ use { DuplicatesLtHash, PubkeyHashAccount, VerifyAccountsHashAndLamportsConfig, }, accounts_hash::{ - AccountHash, AccountsHash, AccountsLtHash, CalcAccountsHashConfig, HashStats, + AccountsHash, AccountsLtHash, CalcAccountsHashConfig, HashStats, IncrementalAccountsHash, MerkleOrLatticeAccountsHash, }, accounts_index::{IndexKey, ScanConfig, ScanResult}, @@ -512,7 +512,6 @@ impl PartialEq for Bank { // Suppress rustfmt until https://github.com/rust-lang/rustfmt/issues/5920 is fixed ... #[rustfmt::skip] let Self { - skipped_rewrites: _, rc: _, status_cache: _, blockhash_queue, @@ -881,10 +880,6 @@ pub struct Bank { /// The change to accounts data size in this Bank, due to off-chain events (i.e. rent collection) accounts_data_size_delta_off_chain: AtomicI64, - /// until the skipped rewrites feature is activated, it is possible to skip rewrites and still include - /// the account hash of the accounts that would have been rewritten as bank hash expects. - skipped_rewrites: Mutex>, - epoch_reward_status: EpochRewardStatus, transaction_processor: TransactionBatchProcessor, @@ -1068,7 +1063,6 @@ impl AtomicBankHashStats { impl Bank { fn default_with_accounts(accounts: Accounts) -> Self { let mut bank = Self { - skipped_rewrites: Mutex::default(), rc: BankRc::new(accounts), status_cache: Arc::>::default(), blockhash_queue: RwLock::::default(), @@ -1305,7 +1299,6 @@ impl Bank { let accounts_data_size_initial = parent.load_accounts_data_size(); let mut new = Self { - skipped_rewrites: Mutex::default(), rc, status_cache, slot, @@ -1799,7 +1792,6 @@ impl Bank { info!("Loading Stakes took: {stakes_time}"); let stakes_accounts_load_duration = now.elapsed(); let mut bank = Self { - skipped_rewrites: Mutex::default(), rc: bank_rc, status_cache: Arc::>::default(), blockhash_queue: RwLock::new(fields.blockhash_queue), @@ -1894,7 +1886,6 @@ impl Bank { ); bank.transaction_processor .fill_missing_sysvar_cache_entries(&bank); - bank.rebuild_skipped_rewrites(); let mut calculate_accounts_lt_hash_duration = None; if let Some(accounts_lt_hash) = fields.accounts_lt_hash { @@ -3914,79 +3905,6 @@ impl Bank { }); } - /// After deserialize, populate skipped rewrites with accounts that would normally - /// have had their data rewritten in this slot due to rent collection (but didn't). - /// - /// This is required when starting up from a snapshot to verify the bank hash. - /// - /// A second usage is from the `bank_to_xxx_snapshot_archive()` functions. These fns call - /// `Bank::rehash()` to handle if the user manually modified any accounts and thus requires - /// calculating the bank hash again. Since calculating the bank hash *takes* the skipped - /// rewrites, this second time will not have any skipped rewrites, and thus the hash would be - /// updated to the wrong value. So, rebuild the skipped rewrites before rehashing. - fn rebuild_skipped_rewrites(&self) { - // If the feature gate to *not* add rent collection rewrites to the bank hash is enabled, - // then do *not* add anything to our skipped_rewrites. - if self.bank_hash_skips_rent_rewrites() { - return; - } - - let (skipped_rewrites, measure_skipped_rewrites) = - measure_time!(self.calculate_skipped_rewrites()); - info!( - "Rebuilding skipped rewrites of {} accounts{measure_skipped_rewrites}", - skipped_rewrites.len() - ); - - *self.skipped_rewrites.lock().unwrap() = skipped_rewrites; - } - - /// Calculates (and returns) skipped rewrites for this bank - /// - /// Refer to `rebuild_skipped_rewrites()` for more documentation. - /// This implementation is purposely separate to facilitate testing. - /// - /// The key observation is that accounts in Bank::skipped_rewrites are only used IFF the - /// specific account is *not* already in the accounts delta hash. If an account is not in - /// the accounts delta hash, then it means the account was not modified. Since (basically) - /// all accounts are rent exempt, this means (basically) all accounts are unmodified by rent - /// collection. So we just need to load the accounts that would've been checked for rent - /// collection, hash them, and add them to Bank::skipped_rewrites. - /// - /// As of this writing, there are ~350 million acounts on mainnet-beta. - /// Rent collection almost always collects a single slot at a time. - /// So 1 slot of 432,000, of 350 million accounts, is ~800 accounts per slot. - /// Since we haven't started processing anything yet, it should be fast enough to simply - /// load the accounts directly. - /// Empirically, this takes about 3-4 milliseconds. - fn calculate_skipped_rewrites(&self) -> HashMap { - // The returned skipped rewrites may include accounts that were actually *not* skipped! - // (This is safe, as per the fn's documentation above.) - self.get_accounts_for_skipped_rewrites() - .map(|(pubkey, account_hash, _account)| (pubkey, account_hash)) - .collect() - } - - /// Loads accounts that were selected for rent collection this slot. - /// After loading the accounts, also calculate and return the account hashes. - /// This is used when dealing with skipped rewrites. - fn get_accounts_for_skipped_rewrites( - &self, - ) -> impl Iterator + '_ { - self.rent_collection_partitions() - .into_iter() - .map(accounts_partition::pubkey_range_from_partition) - .flat_map(|pubkey_range| { - self.rc - .accounts - .load_to_collect_rent_eagerly(&self.ancestors, pubkey_range) - }) - .map(|(pubkey, account, _slot)| { - let account_hash = AccountsDb::hash_account(&account, &pubkey); - (pubkey, account_hash, account) - }) - } - /// Returns the accounts, sorted by pubkey, that were part of accounts delta hash calculation /// This is used when writing a bank hash details file. pub(crate) fn get_accounts_for_bank_hash_details(&self) -> Vec { @@ -3995,28 +3913,6 @@ impl Bank { let mut accounts_written_this_slot = accounts_db.get_pubkey_hash_account_for_slot(self.slot()); - // If we are skipping rewrites but also include them in the accounts delta hash, then we - // need to go load those accounts and add them to the list of accounts written this slot. - if !self.bank_hash_skips_rent_rewrites() - && accounts_db.test_skip_rewrites_but_include_in_bank_hash - { - let pubkeys_written_this_slot: HashSet<_> = accounts_written_this_slot - .iter() - .map(|pubkey_hash_account| pubkey_hash_account.pubkey) - .collect(); - - let rent_collection_accounts = self.get_accounts_for_skipped_rewrites(); - for (pubkey, hash, account) in rent_collection_accounts { - if !pubkeys_written_this_slot.contains(&pubkey) { - accounts_written_this_slot.push(PubkeyHashAccount { - pubkey, - hash, - account, - }); - } - } - } - // Sort the accounts by pubkey to match the order of the accounts delta hash. // This also makes comparison of files from different nodes deterministic. accounts_written_this_slot.sort_unstable_by_key(|account| account.pubkey); @@ -4124,15 +4020,6 @@ impl Bank { } } - /// true if rent collection does NOT rewrite accounts whose pubkey indicates - /// it is time for rent collection, but the account is rent exempt. - /// false if rent collection DOES rewrite accounts if the account is rent exempt - /// This is the default behavior historically. - fn bank_hash_skips_rent_rewrites(&self) -> bool { - self.feature_set - .is_active(&feature_set::skip_rent_rewrites::id()) - } - /// Update rent exempt status for `accounts` /// /// This fn is called inside a parallel loop from `collect_rent_in_partition()`. Avoid adding @@ -4150,13 +4037,6 @@ impl Bank { Vec::<(&Pubkey, &AccountSharedData)>::with_capacity(accounts.len()); let mut time_collecting_rent_us = 0; let mut time_storing_accounts_us = 0; - let can_skip_rewrites = self.bank_hash_skips_rent_rewrites(); - let test_skip_rewrites_but_include_in_bank_hash = self - .rc - .accounts - .accounts_db - .test_skip_rewrites_but_include_in_bank_hash; - let mut skipped_rewrites = Vec::default(); for (pubkey, account, _loaded_slot) in accounts.iter_mut() { let rent_epoch_pre = account.rent_epoch(); let ((), collect_rent_us) = measure_us!(update_rent_exempt_status_for_account( @@ -4169,36 +4049,16 @@ impl Bank { // did the account change in any way due to rent collection? let account_changed = rent_epoch_post != rent_epoch_pre; - // always store the account, regardless if it changed or not - let always_store_accounts = - !can_skip_rewrites && !test_skip_rewrites_but_include_in_bank_hash; - - // only store accounts where we collected rent - // but get the hash for all these accounts even if collected rent is 0 (= not updated). - // Also, there's another subtle side-effect from rewrites: this - // ensures we verify the whole on-chain state (= all accounts) - // via the bank delta hash slowly once per an epoch. - if account_changed || always_store_accounts { - if account_changed { - datapoint_info!( - "bank-rent_collection_updated_only_rent_epoch", - ("slot", self.slot(), i64), - ("pubkey", pubkey.to_string(), String), - ("rent_epoch_pre", rent_epoch_pre, i64), - ("rent_epoch_post", rent_epoch_post, i64), - ); - } + // only store accounts where we updated rent epoch + if account_changed { + datapoint_info!( + "bank-rent_collection_updated_only_rent_epoch", + ("slot", self.slot(), i64), + ("pubkey", pubkey.to_string(), String), + ("rent_epoch_pre", rent_epoch_pre, i64), + ("rent_epoch_post", rent_epoch_post, i64), + ); accounts_to_store.push((pubkey, account)); - } else if !account_changed - && !can_skip_rewrites - && test_skip_rewrites_but_include_in_bank_hash - { - // include rewrites that we skipped in the accounts delta hash. - // This is what consensus requires prior to activation of bank_hash_skips_rent_rewrites. - // This code path exists to allow us to test the long term effects on validators when the skipped rewrites - // feature is enabled. - let hash = AccountsDb::hash_account(account, pubkey); - skipped_rewrites.push((*pubkey, hash)); } } @@ -4211,7 +4071,6 @@ impl Bank { } CollectRentFromAccountsInfo { - skipped_rewrites, time_collecting_rent_us, time_storing_accounts_us, num_accounts: accounts.len(), @@ -4225,9 +4084,7 @@ impl Bank { } /// load accounts with pubkeys in 'subrange_full', update - /// 'account.rent_epoch' as necessary, and store accounts, whether rent was - /// collected or not (depending on whether we skipping rewrites is enabled) - /// update bank's rewrites set for all rewrites that were skipped + /// 'account.rent_epoch' as necessary, fn update_rent_exempt_status_in_range( &self, subrange_full: RangeInclusive, @@ -4285,11 +4142,6 @@ impl Bank { CollectRentInPartitionInfo::reduce, ); - self.skipped_rewrites - .lock() - .unwrap() - .extend(results.skipped_rewrites); - // We cannot assert here that we collected from all expected keys. // Some accounts may have been topped off or may have had all funds removed and gone to 0 lamports. @@ -5189,11 +5041,7 @@ impl Bank { self.rc .accounts .accounts_db - .calculate_accounts_delta_hash_internal( - slot, - None, - self.skipped_rewrites.lock().unwrap().clone(), - ) + .calculate_accounts_delta_hash_internal(slot, None) }) }); @@ -6300,26 +6148,10 @@ impl Bank { } pub(crate) fn shrink_ancient_slots(&self) { - // Invoke ancient slot shrinking only when the validator is - // explicitly configured to do so. This condition may be - // removed when the skip rewrites feature is enabled. - if self.are_ancient_storages_enabled() { - self.rc - .accounts - .accounts_db - .shrink_ancient_slots(self.epoch_schedule()) - } - } - - /// Returns if ancient storages are enabled or not - pub fn are_ancient_storages_enabled(&self) -> bool { - let can_skip_rewrites = self.bank_hash_skips_rent_rewrites(); - let test_skip_rewrites_but_include_in_bank_hash = self - .rc + self.rc .accounts .accounts_db - .test_skip_rewrites_but_include_in_bank_hash; - can_skip_rewrites || test_skip_rewrites_but_include_in_bank_hash + .shrink_ancient_slots(self.epoch_schedule()) } pub fn read_cost_tracker(&self) -> LockResult> { @@ -7177,7 +7009,6 @@ enum ApplyFeatureActivationsCaller { /// process later. #[derive(Debug, Default)] struct CollectRentFromAccountsInfo { - skipped_rewrites: Vec<(Pubkey, AccountHash)>, time_collecting_rent_us: u64, time_storing_accounts_us: u64, num_accounts: usize, @@ -7187,7 +7018,6 @@ struct CollectRentFromAccountsInfo { /// `collect_rent_in_partition()`—and then perform a reduce on all of them. #[derive(Debug, Default)] struct CollectRentInPartitionInfo { - skipped_rewrites: Vec<(Pubkey, AccountHash)>, time_loading_accounts_us: u64, time_collecting_rent_us: u64, time_storing_accounts_us: u64, @@ -7200,7 +7030,6 @@ impl CollectRentInPartitionInfo { #[must_use] fn new(info: CollectRentFromAccountsInfo, time_loading_accounts: Duration) -> Self { Self { - skipped_rewrites: info.skipped_rewrites, time_loading_accounts_us: time_loading_accounts.as_micros() as u64, time_collecting_rent_us: info.time_collecting_rent_us, time_storing_accounts_us: info.time_storing_accounts_us, @@ -7215,7 +7044,6 @@ impl CollectRentInPartitionInfo { #[must_use] fn reduce(lhs: Self, rhs: Self) -> Self { Self { - skipped_rewrites: [lhs.skipped_rewrites, rhs.skipped_rewrites].concat(), time_loading_accounts_us: lhs .time_loading_accounts_us .saturating_add(rhs.time_loading_accounts_us), diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 0bbbf289e6ee43..15874e9318729f 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -13,9 +13,6 @@ use { create_genesis_config_with_leader, create_genesis_config_with_vote_accounts, genesis_sysvar_and_builtin_program_lamports, GenesisConfigInfo, ValidatorVoteKeypairs, }, - snapshot_bank_utils, - snapshot_config::SnapshotConfig, - snapshot_utils, stake_history::StakeHistory, stakes::InvalidCacheEntryReason, status_cache::MAX_CACHE_ENTRIES, @@ -39,11 +36,9 @@ use { solana_accounts_db::{ accounts::AccountAddressFilter, accounts_db::DEFAULT_ACCOUNTS_SHRINK_RATIO, - accounts_hash::{AccountsDeltaHash, AccountsHasher}, accounts_index::{ AccountIndex, AccountSecondaryIndexes, IndexKey, ScanConfig, ScanError, ITER_BATCH_SIZE, }, - accounts_partition, ancestors::Ancestors, }, solana_client_traits::SyncClient, @@ -141,7 +136,6 @@ use { thread::Builder, time::{Duration, Instant}, }, - tempfile::TempDir, test_case::test_case, }; @@ -924,10 +918,6 @@ fn test_rent_eager_collect_rent_in_partition() { solana_logger::setup(); let (mut genesis_config, _mint_keypair) = create_genesis_config(1_000_000); activate_all_features(&mut genesis_config); - genesis_config - .accounts - .remove(&feature_set::skip_rent_rewrites::id()) - .unwrap(); genesis_config .accounts .remove(&feature_set::disable_partitioned_rent_collection::id()) @@ -983,10 +973,7 @@ fn test_rent_eager_collect_rent_in_partition() { bank.get_account(&rent_exempt_pubkey).unwrap().rent_epoch(), RENT_EXEMPT_RENT_EPOCH ); - assert_eq!( - bank.slots_by_pubkey(&rent_due_pubkey), - vec![genesis_slot, some_slot] - ); + assert_eq!(bank.slots_by_pubkey(&rent_due_pubkey), vec![genesis_slot]); assert_eq!( bank.slots_by_pubkey(&rent_exempt_pubkey), vec![genesis_slot, some_slot] @@ -1012,73 +999,63 @@ pub(in crate::bank) fn new_from_parent_next_epoch( new_bank_from_parent_with_bank_forks(bank_forks, parent, &Pubkey::default(), slot) } +/// test that only rent exempt accounts without rent epoch set to u64::MAX +/// are updated to rent exempt status #[test] -/// tests that an account which has already had rent collected IN this slot does not skip rewrites -fn test_collect_rent_from_accounts() { - solana_logger::setup(); +fn test_update_rent_exempt_status_for_accounts() { + let address1 = Pubkey::new_unique(); + let address2 = Pubkey::new_unique(); + let address3 = Pubkey::new_unique(); - for skip_rewrites in [false, true] { - let address1 = Pubkey::new_unique(); - let address2 = Pubkey::new_unique(); - let address3 = Pubkey::new_unique(); + let (genesis_bank, bank_forks) = create_simple_test_arc_bank(100000); + let first_bank = new_from_parent(genesis_bank.clone()); + let first_bank = bank_forks + .write() + .unwrap() + .insert(first_bank) + .clone_without_scheduler(); - let (genesis_bank, bank_forks) = create_simple_test_arc_bank(100000); - let mut first_bank = new_from_parent(genesis_bank.clone()); - if skip_rewrites { - first_bank.activate_feature(&feature_set::skip_rent_rewrites::id()); - } - let first_bank = bank_forks - .write() - .unwrap() - .insert(first_bank) - .clone_without_scheduler(); - - let first_slot = 1; - assert_eq!(first_slot, first_bank.slot()); - let epoch_delta = 4; - let later_bank = new_from_parent_next_epoch(first_bank, bank_forks.as_ref(), epoch_delta); // a bank a few epochs in the future - let later_slot = later_bank.slot(); - assert!(later_bank.epoch() == genesis_bank.epoch() + epoch_delta); - - let data_size = 0; // make sure we're rent exempt - let lamports = later_bank.get_minimum_balance_for_rent_exemption(data_size); // cannot be 0 or we zero out rent_epoch in rent collection and we need to be rent exempt - let mut account1 = AccountSharedData::new(lamports, data_size, &Pubkey::default()); - let mut account2 = AccountSharedData::new(lamports, data_size, &Pubkey::default()); - let mut account3 = AccountSharedData::new(lamports, data_size, &Pubkey::default()); - account1.set_rent_epoch(later_bank.epoch() - 1); // non-zero, but less than later_bank's epoch - account2.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); // already marked as rent exempt - account3.set_rent_epoch(0); // stake accounts in genesis have a rent epoch of 0 - - // loaded from previous slot, so we skip rent collection on it - let _result = later_bank.update_rent_exempt_status_for_accounts(vec![ - (address1, account1, later_slot - 1), - (address2, account2, later_slot - 1), - (address3, account3, later_slot - 1), - ]); - - let deltas = later_bank - .rc - .accounts - .accounts_db - .get_pubkey_hash_for_slot(later_slot) - .0; + let first_slot = 1; + assert_eq!(first_slot, first_bank.slot()); + let epoch_delta = 4; + let later_bank = new_from_parent_next_epoch(first_bank, bank_forks.as_ref(), epoch_delta); // a bank a few epochs in the future + let later_slot = later_bank.slot(); + assert!(later_bank.epoch() == genesis_bank.epoch() + epoch_delta); + + let data_size = 0; // make sure we're rent exempt + let lamports = later_bank.get_minimum_balance_for_rent_exemption(data_size); // cannot be 0 or we zero out rent_epoch in rent collection and we need to be rent exempt + let mut account1 = AccountSharedData::new(lamports, data_size, &Pubkey::default()); + let mut account2 = AccountSharedData::new(lamports, data_size, &Pubkey::default()); + let mut account3 = AccountSharedData::new(lamports, data_size, &Pubkey::default()); + account1.set_rent_epoch(later_bank.epoch() - 1); // non-zero, but less than later_bank's epoch + account2.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); // already marked as rent exempt + account3.set_rent_epoch(0); // stake accounts in genesis have a rent epoch of 0 + + // loaded from previous slot, so we skip rent collection on it + let _result = later_bank.update_rent_exempt_status_for_accounts(vec![ + (address1, account1, later_slot - 1), + (address2, account2, later_slot - 1), + (address3, account3, later_slot - 1), + ]); + + let deltas = later_bank + .rc + .accounts + .accounts_db + .get_pubkey_hash_for_slot(later_slot) + .0; - // ensure account1 *is* stored because the account *did* change - // (its rent epoch must be updated to RENT_EXEMPT_RENT_EPOCH) - assert!(deltas.iter().map(|(pubkey, _)| pubkey).contains(&address1)); + // ensure account1 *is* stored because the account *did* change + // (its rent epoch must be updated to RENT_EXEMPT_RENT_EPOCH) + assert!(deltas.iter().map(|(pubkey, _)| pubkey).contains(&address1)); - // if doing rewrites, ensure account2 *is* stored - // if skipping rewrites, ensure account2 is *not* stored - // (because the account did *not* change) - assert_eq!( - deltas.iter().map(|(pubkey, _)| pubkey).contains(&address2), - !skip_rewrites, - ); + // ensure account2 is *not* stored + // (because the account did *not* change) + assert!(!deltas.iter().map(|(pubkey, _)| pubkey).contains(&address2),); - // ensure account3 *is* stored because the account *did* change - // (same as account1 above) - assert!(deltas.iter().map(|(pubkey, _)| pubkey).contains(&address3)); - } + // ensure account3 *is* stored because the account *did* change + // (same as account1 above) + assert!(deltas.iter().map(|(pubkey, _)| pubkey).contains(&address3)); } #[test] @@ -5798,19 +5775,19 @@ fn test_bank_hash_consistency() { if bank.slot == 32 { assert_eq!( bank.hash().to_string(), - "CK1siD9yP37R4ErCECKg1rofsEAk9fdGpsfpMQnSvHBL" + "4qjTvZJd4resaoy6XYNgbTBbvPha5oyjBXMC4MuZ5Msn" ); } if bank.slot == 64 { assert_eq!( bank.hash().to_string(), - "5h8yw8oU78G4JeVB28U9ZjZpV5fCgm9gA8LfVJF8YD8W" + "5M1CbUrWq8hBfUGtQse6RtECwjeCqzZWb3GcSiqhXU1c" ); } if bank.slot == 128 { assert_eq!( bank.hash().to_string(), - "87cnbyVPkbfpQkjuQ5sCKXNYhvUbpzHNac6GJv1BnqDM" + "4xSvqtyQXB7qiMcSTokZTKZSqXZH8a9c8W9VJpQPHq3N" ); break; } @@ -11960,245 +11937,6 @@ fn test_last_restart_slot() { assert_eq!(get_last_restart_slot(&bank7), Some(6)); } -/// Test that rehashing works with skipped rewrites -/// -/// Since `bank_to_xxx_snapshot_archive()` calls `Bank::rehash()`, we must ensure that rehashing -/// works properly when also using `test_skip_rewrites_but_include_in_bank_hash`. -#[test] -fn test_rehash_with_skipped_rewrites() { - let accounts_db_config = AccountsDbConfig { - test_skip_rewrites_but_include_in_bank_hash: true, - ..ACCOUNTS_DB_CONFIG_FOR_TESTING - }; - let bank = Arc::new(Bank::new_with_paths( - &GenesisConfig::default(), - Arc::new(RuntimeConfig::default()), - Vec::default(), - None, - None, - false, - Some(accounts_db_config), - None, - Some(Pubkey::new_unique()), - Arc::new(AtomicBool::new(false)), - None, - None, - )); - // This test is only meaningful while the bank hash contains rewrites. - // Once this feature is enabled, it may be possible to remove this test entirely. - assert!(!bank.bank_hash_skips_rent_rewrites()); - - // Store an account *in this bank* that will be checked for rent collection *in the next bank* - let pubkey = { - let rent_collection_partition = bank - .variable_cycle_partitions_between_slots(bank.slot(), bank.slot() + 1) - .last() - .copied() - .unwrap(); - let pubkey_range = - accounts_partition::pubkey_range_from_partition(rent_collection_partition); - *pubkey_range.end() - }; - let mut account = AccountSharedData::new(123_456_789, 0, &Pubkey::default()); - // The account's rent epoch must be set to EXEMPT - // in order for its rewrite to be skipped by rent collection. - account.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); - bank.store_account_and_update_capitalization(&pubkey, &account); - - // Create a new bank that will do rent collection on the account stored in the previous slot - let bank = Arc::new(Bank::new_from_parent( - bank.clone(), - &Pubkey::new_unique(), - bank.slot() + 1, - )); - - // Freeze the bank to trigger rent collection and hash calculation - bank.freeze(); - - // Ensure the bank hash is the same before and after rehashing - let bank_hash = bank.hash(); - bank.rehash(); - let bank_rehash = bank.hash(); - assert_eq!(bank_rehash, bank_hash); -} - -/// Test that skipped_rewrites are properly rebuilt when booting from a snapshot -/// that was generated by a node skipping rewrites. -#[test] -fn test_rebuild_skipped_rewrites() { - let genesis_config = GenesisConfig::default(); - let accounts_db_config = AccountsDbConfig { - test_skip_rewrites_but_include_in_bank_hash: true, - ..ACCOUNTS_DB_CONFIG_FOR_TESTING - }; - let bank = Arc::new(Bank::new_with_paths( - &genesis_config, - Arc::new(RuntimeConfig::default()), - Vec::default(), - None, - None, - false, - Some(accounts_db_config.clone()), - None, - Some(Pubkey::new_unique()), - Arc::new(AtomicBool::new(false)), - None, - None, - )); - // This test is only meaningful while the bank hash contains rewrites. - // Once this feature is enabled, it may be possible to remove this test entirely. - assert!(!bank.bank_hash_skips_rent_rewrites()); - - // Store an account *in this bank* that will be checked for rent collection *in the next bank* - let pubkey = { - let rent_collection_partition = bank - .variable_cycle_partitions_between_slots(bank.slot(), bank.slot() + 1) - .last() - .copied() - .unwrap(); - let pubkey_range = - accounts_partition::pubkey_range_from_partition(rent_collection_partition); - *pubkey_range.end() - }; - let mut account = AccountSharedData::new(123_456_789, 0, &Pubkey::default()); - // The account's rent epoch must be set to EXEMPT - // in order for its rewrite to be skipped by rent collection. - account.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); - bank.store_account_and_update_capitalization(&pubkey, &account); - - // Create a new bank that will do rent collection on the account stored in the previous slot - let bank = Arc::new(Bank::new_from_parent( - bank.clone(), - &Pubkey::new_unique(), - bank.slot() + 1, - )); - - // This fn is called within freeze(), but freeze() *consumes* Self::skipped_rewrites! - // For testing, we want to know what's in the skipped rewrites, so we perform - // rent collection manually. - bank.run_partitioned_rent_exempt_status_updates(); - let actual_skipped_rewrites = bank.skipped_rewrites.lock().unwrap().clone(); - // Ensure skipped rewrites now includes the account we stored above - assert!(actual_skipped_rewrites.contains_key(&pubkey)); - // Ensure the calculated skipped rewrites match the actual ones - let calculated_skipped_rewrites = bank.calculate_skipped_rewrites(); - assert_eq!(calculated_skipped_rewrites, actual_skipped_rewrites); - - // required in order to snapshot the bank - bank.fill_bank_with_ticks_for_tests(); - - // Now take a snapshot! - let (_tmp_dir, accounts_dir) = snapshot_utils::create_tmp_accounts_dir_for_tests(); - let bank_snapshots_dir = TempDir::new().unwrap(); - let full_snapshot_archives_dir = TempDir::new().unwrap(); - let incremental_snapshot_archives_dir = TempDir::new().unwrap(); - let full_snapshot_archive = snapshot_bank_utils::bank_to_full_snapshot_archive( - bank_snapshots_dir.path(), - &bank, - None, - full_snapshot_archives_dir.path(), - incremental_snapshot_archives_dir.path(), - SnapshotConfig::default().archive_format, - ) - .unwrap(); - - // Rebuild the bank and ensure it passes verification - let (snapshot_bank, _) = snapshot_bank_utils::bank_from_snapshot_archives( - &[accounts_dir], - bank_snapshots_dir.path(), - &full_snapshot_archive, - None, - &genesis_config, - &RuntimeConfig::default(), - None, - None, - None, - false, - false, - false, - false, - Some(ACCOUNTS_DB_CONFIG_FOR_TESTING), - None, - Arc::new(AtomicBool::new(false)), - ) - .unwrap(); - snapshot_bank.wait_for_initial_accounts_hash_verification_completed_for_tests(); - assert_eq!(bank.as_ref(), &snapshot_bank); - - // Ensure the snapshot bank's skipped rewrites match the original bank's - let snapshot_skipped_rewrites = snapshot_bank.calculate_skipped_rewrites(); - assert_eq!(snapshot_skipped_rewrites, actual_skipped_rewrites); -} - -/// Test that getting accounts for BankHashDetails works with skipped rewrites -#[test_case(true; "skip rewrites")] -#[test_case(false; "do rewrites")] -fn test_get_accounts_for_bank_hash_details(skip_rewrites: bool) { - let genesis_config = GenesisConfig::default(); - let accounts_db_config = AccountsDbConfig { - test_skip_rewrites_but_include_in_bank_hash: skip_rewrites, - ..ACCOUNTS_DB_CONFIG_FOR_TESTING - }; - let bank = Arc::new(Bank::new_with_paths( - &genesis_config, - Arc::new(RuntimeConfig::default()), - Vec::default(), - None, - None, - false, - Some(accounts_db_config.clone()), - None, - Some(Pubkey::new_unique()), - Arc::new(AtomicBool::new(false)), - None, - None, - )); - // This test is only meaningful while the bank hash contains rewrites. - // Once this feature is enabled, it may be possible to remove this test entirely. - assert!(!bank.bank_hash_skips_rent_rewrites()); - - // Store an account *in this bank* that will be checked for rent collection *in the next bank* - let pubkey = { - let rent_collection_partition = bank - .variable_cycle_partitions_between_slots(bank.slot(), bank.slot() + 1) - .last() - .copied() - .unwrap(); - let pubkey_range = - accounts_partition::pubkey_range_from_partition(rent_collection_partition); - *pubkey_range.end() - }; - let mut account = AccountSharedData::new(123_456_789, 0, &Pubkey::default()); - // The account's rent epoch must be set to EXEMPT - // in order for its rewrite to be skipped by rent collection. - account.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); - bank.store_account_and_update_capitalization(&pubkey, &account); - - // Create a new bank that will do rent collection on the account stored in the previous slot - let bank = Bank::new_from_parent(bank.clone(), &Pubkey::new_unique(), bank.slot() + 1); - - // Freeze the bank to do rent collection and calculate the accounts delta hash - bank.freeze(); - - // Ensure that the accounts returned by `get_accounts_for_bank_hash_details()` produces the - // same AccountsDeltaHash as the actual value stored in the Bank. - let calculated_accounts_delta_hash = { - let accounts = bank.get_accounts_for_bank_hash_details(); - let hashes = accounts - .into_iter() - .map(|account| (account.pubkey, account.hash)) - .collect(); - AccountsDeltaHash(AccountsHasher::accumulate_account_hashes(hashes)) - }; - let actual_accounts_delta_hash = bank - .rc - .accounts - .accounts_db - .get_accounts_delta_hash(bank.slot()) - .unwrap(); - assert_eq!(calculated_accounts_delta_hash, actual_accounts_delta_hash); -} - /// Test that simulations report the compute units of failed transactions #[test] fn test_failed_simulation_compute_units() { diff --git a/runtime/src/snapshot_minimizer.rs b/runtime/src/snapshot_minimizer.rs index 2a199a3826e3f0..bb39eb97a70349 100644 --- a/runtime/src/snapshot_minimizer.rs +++ b/runtime/src/snapshot_minimizer.rs @@ -128,7 +128,7 @@ impl<'a> SnapshotMinimizer<'a> { } /// Used to get rent collection accounts in `minimize` - /// Add all pubkeys we would collect rent from or rewrite to `minimized_account_set`. + /// Add all pubkeys we would collect rent from to `minimized_account_set`. /// related to Bank::rent_collection_partitions fn get_rent_collection_accounts(&self) { let partitions = if !self.bank.use_fixed_collection_cycle() { diff --git a/validator/src/commands/run/execute.rs b/validator/src/commands/run/execute.rs index f8fcdd823dd34d..77ee068c057945 100644 --- a/validator/src/commands/run/execute.rs +++ b/validator/src/commands/run/execute.rs @@ -488,7 +488,6 @@ pub fn execute( ) .ok(), exhaustively_verify_refcounts: matches.is_present("accounts_db_verify_refcounts"), - test_skip_rewrites_but_include_in_bank_hash: false, storage_access, scan_filter_for_shrinking, enable_experimental_accumulator_hash: !matches From adb44ebc1d0c999e68291862e2aef0a4896b8da2 Mon Sep 17 00:00:00 2001 From: steviez Date: Fri, 20 Jun 2025 16:29:32 -0500 Subject: [PATCH 054/124] streamer: Remove deprecated MAX_*_CONNECTIONS constants (#6657) --- streamer/src/quic.rs | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/streamer/src/quic.rs b/streamer/src/quic.rs index 61dee5db493a4d..ce89d08edb3799 100644 --- a/streamer/src/quic.rs +++ b/streamer/src/quic.rs @@ -33,20 +33,8 @@ use { // allow multiple connections for NAT and any open/close overlap pub const DEFAULT_MAX_QUIC_CONNECTIONS_PER_PEER: usize = 8; -#[deprecated( - since = "2.2.0", - note = "Use solana_streamer::quic::DEFAULT_MAX_STAKED_CONNECTIONS" -)] -pub const MAX_STAKED_CONNECTIONS: usize = 2000; - pub const DEFAULT_MAX_STAKED_CONNECTIONS: usize = 2000; -#[deprecated( - since = "2.2.0", - note = "Use solana_streamer::quic::DEFAULT_MAX_UNSTAKED_CONNECTIONS" -)] -pub const MAX_UNSTAKED_CONNECTIONS: usize = 500; - pub const DEFAULT_MAX_UNSTAKED_CONNECTIONS: usize = 500; /// Limit to 250K PPS From a27e7c4a5532983fd10166359b64b2cda9404b85 Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Sun, 22 Jun 2025 19:51:41 -0500 Subject: [PATCH 055/124] make `test_bank_hash_consistency` easier to maintain (#6687) make test_bank_hash_consistency easier to maintain --- runtime/src/bank/tests.rs | 53 +++++++++++++++++++++++---------------- 1 file changed, 31 insertions(+), 22 deletions(-) diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 15874e9318729f..74cf5d60d6376f 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -5744,50 +5744,59 @@ fn test_fuzz_instructions() { info!("results: {:?}", results); } +// DEVELOPERS: This test is intended to ensure that the bank hash remains +// consistent across all changes, including feature set changes. If you add a +// new feature that affects the bank hash, you should update this test to use a +// test matrix that tests the bank hash calculation with and without your +// added feature. #[test] fn test_bank_hash_consistency() { - solana_logger::setup(); - let account = AccountSharedData::new(1_000_000_000_000, 0, &system_program::id()); - assert_eq!(account.rent_epoch(), 0); let mut genesis_config = GenesisConfig::new(&[(Pubkey::from([42; 32]), account)], &[]); + // Override the creation time to ensure bank hash consistency genesis_config.creation_time = 0; genesis_config.cluster_type = ClusterType::MainnetBeta; - genesis_config.rent.burn_percent = 100; - activate_feature( - &mut genesis_config, - agave_feature_set::set_exempt_rent_epoch_max::id(), - ); - let mut bank = Arc::new(Bank::new_for_tests(&genesis_config)); - // Check a few slots, cross an epoch boundary - assert_eq!(bank.get_slots_in_epoch(0), 32); - loop { - goto_end_of_slot(bank.clone()); + // Set the feature set to all enabled so that we detect any inconsistencies + // in the hash computation that may arise from feature set changes + let feature_set = FeatureSet::all_enabled(); + let mut bank = Arc::new(Bank::new_with_paths( + &genesis_config, + Arc::new(RuntimeConfig::default()), + vec![], + None, + None, + false, + Some(BankTestConfig::default().accounts_db_config), + None, + Some(Pubkey::from([42; 32])), + Arc::default(), + None, + Some(feature_set), + )); + loop { + goto_end_of_slot(Arc::clone(&bank)); if bank.slot == 0 { + assert_eq!(bank.epoch(), 0); assert_eq!( bank.hash().to_string(), - "CTg8Vq5RjXhfp332YC9DHQjAfFueLPimszv9i6xBFgPW", + "AyXhbqmPsC46x7MHAuW89pQcNZVrUZnAND6ABWJ24svx", ); } if bank.slot == 32 { + assert_eq!(bank.epoch(), 1); assert_eq!( bank.hash().to_string(), - "4qjTvZJd4resaoy6XYNgbTBbvPha5oyjBXMC4MuZ5Msn" - ); - } - if bank.slot == 64 { - assert_eq!( - bank.hash().to_string(), - "5M1CbUrWq8hBfUGtQse6RtECwjeCqzZWb3GcSiqhXU1c" + "ApbSYzbXgNBobjzp8ytimvVsMBUxtuJR9nFieePdpwj3" ); } if bank.slot == 128 { + assert_eq!(bank.epoch(), 2); assert_eq!( bank.hash().to_string(), - "4xSvqtyQXB7qiMcSTokZTKZSqXZH8a9c8W9VJpQPHq3N" + "FxaFn1Dj7fetY1SXWWi6DyEYidoiDLZexe3hM1tNvkwJ" ); break; } From 464dcdf35eccd44b846a375a0f7be4a50f71a3b6 Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Sun, 22 Jun 2025 22:10:38 -0500 Subject: [PATCH 056/124] add iter impl to rollback accounts (#6530) * refactor: simplify rollback accounts * feedback --- runtime/src/account_saver.rs | 54 +++------------ svm/src/account_loader.rs | 44 ++---------- svm/src/nonce_info.rs | 4 +- svm/src/rollback_accounts.rs | 114 +++++++++++++++++++++---------- svm/src/transaction_processor.rs | 3 +- svm/tests/integration_test.rs | 39 ++--------- 6 files changed, 104 insertions(+), 154 deletions(-) diff --git a/runtime/src/account_saver.rs b/runtime/src/account_saver.rs index e921a7c828636e..011e21d4cc6836 100644 --- a/runtime/src/account_saver.rs +++ b/runtime/src/account_saver.rs @@ -83,7 +83,6 @@ pub fn collect_accounts_to_store<'a, T: SVMMessage>( collect_accounts_for_failed_tx( &mut accounts, &mut transactions, - transaction, transaction_ref, &executed_tx.loaded_transaction.rollback_accounts, ); @@ -93,7 +92,6 @@ pub fn collect_accounts_to_store<'a, T: SVMMessage>( collect_accounts_for_failed_tx( &mut accounts, &mut transactions, - transaction, transaction_ref, &fees_only_tx.rollback_accounts, ); @@ -131,44 +129,17 @@ fn collect_accounts_for_successful_tx<'a, T: SVMMessage>( } } -fn collect_accounts_for_failed_tx<'a, T: SVMMessage>( +fn collect_accounts_for_failed_tx<'a>( collected_accounts: &mut Vec<(&'a Pubkey, &'a AccountSharedData)>, collected_account_transactions: &mut Option>, - transaction: &'a T, transaction_ref: Option<&'a SanitizedTransaction>, rollback_accounts: &'a RollbackAccounts, ) { - let fee_payer_address = transaction.fee_payer(); - match rollback_accounts { - RollbackAccounts::FeePayerOnly { fee_payer_account } => { - collected_accounts.push((fee_payer_address, fee_payer_account)); - if let Some(collected_account_transactions) = collected_account_transactions { - collected_account_transactions - .push(transaction_ref.expect("transaction ref must exist if collecting")); - } - } - RollbackAccounts::SameNonceAndFeePayer { nonce } => { - collected_accounts.push((nonce.address(), nonce.account())); - if let Some(collected_account_transactions) = collected_account_transactions { - collected_account_transactions - .push(transaction_ref.expect("transaction ref must exist if collecting")); - } - } - RollbackAccounts::SeparateNonceAndFeePayer { - nonce, - fee_payer_account, - } => { - collected_accounts.push((fee_payer_address, fee_payer_account)); - if let Some(collected_account_transactions) = collected_account_transactions { - collected_account_transactions - .push(transaction_ref.expect("transaction ref must exist if collecting")); - } - - collected_accounts.push((nonce.address(), nonce.account())); - if let Some(collected_account_transactions) = collected_account_transactions { - collected_account_transactions - .push(transaction_ref.expect("transaction ref must exist if collecting")); - } + for (address, account) in rollback_accounts { + collected_accounts.push((address, account)); + if let Some(collected_account_transactions) = collected_account_transactions { + collected_account_transactions + .push(transaction_ref.expect("transaction ref must exist if collecting")); } } } @@ -193,7 +164,6 @@ mod tests { solana_signer::{signers::Signers, Signer}, solana_svm::{ account_loader::{FeesOnlyTransaction, LoadedTransaction}, - nonce_info::NonceInfo, transaction_execution_result::{ExecutedTransaction, TransactionExecutionDetails}, }, solana_system_interface::{instruction as system_instruction, program as system_program}, @@ -346,7 +316,7 @@ mod tests { program_indices: vec![], fee_details: FeeDetails::default(), rollback_accounts: RollbackAccounts::FeePayerOnly { - fee_payer_account: from_account_pre.clone(), + fee_payer: (from_address, from_account_pre.clone()), }, compute_budget: SVMTransactionExecutionBudget::default(), loaded_accounts_data_size: 0, @@ -432,14 +402,13 @@ mod tests { AccountSharedData::new_data(42, &nonce_state, &system_program::id()).unwrap(); let from_account_pre = AccountSharedData::new(4242, 0, &Pubkey::default()); - let nonce = NonceInfo::new(nonce_address, nonce_account_pre.clone()); let loaded = LoadedTransaction { accounts: transaction_accounts, program_indices: vec![], fee_details: FeeDetails::default(), rollback_accounts: RollbackAccounts::SeparateNonceAndFeePayer { - nonce: nonce.clone(), - fee_payer_account: from_account_pre.clone(), + nonce: (nonce_address, nonce_account_pre.clone()), + fee_payer: (from_address, from_account_pre.clone()), }, compute_budget: SVMTransactionExecutionBudget::default(), loaded_accounts_data_size: 0, @@ -539,13 +508,12 @@ mod tests { let nonce_account_pre = AccountSharedData::new_data(42, &nonce_state, &system_program::id()).unwrap(); - let nonce = NonceInfo::new(nonce_address, nonce_account_pre.clone()); let loaded = LoadedTransaction { accounts: transaction_accounts, program_indices: vec![], fee_details: FeeDetails::default(), rollback_accounts: RollbackAccounts::SameNonceAndFeePayer { - nonce: nonce.clone(), + nonce: (nonce_address, nonce_account_pre.clone()), }, compute_budget: SVMTransactionExecutionBudget::default(), loaded_accounts_data_size: 0, @@ -611,7 +579,7 @@ mod tests { load_error: TransactionError::InvalidProgramForExecution, fee_details: FeeDetails::default(), rollback_accounts: RollbackAccounts::FeePayerOnly { - fee_payer_account: from_account_pre.clone(), + fee_payer: (from_address, from_account_pre.clone()), }, }, )))]; diff --git a/svm/src/account_loader.rs b/svm/src/account_loader.rs index 804f384f0d0ee7..8d96ccd5ecabab 100644 --- a/svm/src/account_loader.rs +++ b/svm/src/account_loader.rs @@ -268,36 +268,15 @@ impl<'a, CB: TransactionProcessingCallback> AccountLoader<'a, CB> { ); } else { self.update_accounts_for_failed_tx( - message, &executed_transaction.loaded_transaction.rollback_accounts, ); } } - pub(crate) fn update_accounts_for_failed_tx( - &mut self, - message: &impl SVMMessage, - rollback_accounts: &RollbackAccounts, - ) { - let fee_payer_address = message.fee_payer(); - match rollback_accounts { - RollbackAccounts::FeePayerOnly { fee_payer_account } => { - self.loaded_accounts - .insert(*fee_payer_address, fee_payer_account.clone()); - } - RollbackAccounts::SameNonceAndFeePayer { nonce } => { - self.loaded_accounts - .insert(*nonce.address(), nonce.account().clone()); - } - RollbackAccounts::SeparateNonceAndFeePayer { - nonce, - fee_payer_account, - } => { - self.loaded_accounts - .insert(*nonce.address(), nonce.account().clone()); - self.loaded_accounts - .insert(*fee_payer_address, fee_payer_account.clone()); - } + pub(crate) fn update_accounts_for_failed_tx(&mut self, rollback_accounts: &RollbackAccounts) { + for (account_address, account) in rollback_accounts { + self.loaded_accounts + .insert(*account_address, account.clone()); } } @@ -2776,14 +2755,6 @@ mod tests { #[test] fn test_account_loader_wrappers() { let fee_payer = Pubkey::new_unique(); - let message = Message { - account_keys: vec![fee_payer], - header: MessageHeader::default(), - instructions: vec![], - recent_blockhash: Hash::default(), - }; - let sanitized_message = new_unchecked_sanitized_message(message); - let mut fee_payer_account = AccountSharedData::default(); fee_payer_account.set_rent_epoch(u64::MAX); fee_payer_account.set_lamports(5000); @@ -2853,10 +2824,9 @@ mod tests { // drop the account and ensure all deliver the updated state fee_payer_account.set_lamports(0); - account_loader.update_accounts_for_failed_tx( - &sanitized_message, - &RollbackAccounts::FeePayerOnly { fee_payer_account }, - ); + account_loader.update_accounts_for_failed_tx(&RollbackAccounts::FeePayerOnly { + fee_payer: (fee_payer, fee_payer_account), + }); assert_eq!( account_loader.load_transaction_account(&fee_payer, false), diff --git a/svm/src/nonce_info.rs b/svm/src/nonce_info.rs index 1b053f22331c4f..6653e30587c82d 100644 --- a/svm/src/nonce_info.rs +++ b/svm/src/nonce_info.rs @@ -13,8 +13,8 @@ use {solana_account::AccountSharedData, solana_pubkey::Pubkey}; /// Holds limited nonce info available during transaction checks #[derive(Clone, Debug, Default, PartialEq, Eq)] pub struct NonceInfo { - address: Pubkey, - account: AccountSharedData, + pub address: Pubkey, + pub account: AccountSharedData, } #[derive(Error, Debug, PartialEq)] diff --git a/svm/src/rollback_accounts.rs b/svm/src/rollback_accounts.rs index 850d0beb1c997d..2fb2ca3837de15 100644 --- a/svm/src/rollback_accounts.rs +++ b/svm/src/rollback_accounts.rs @@ -3,6 +3,7 @@ use { solana_account::{AccountSharedData, ReadableAccount, WritableAccount}, solana_clock::Epoch, solana_pubkey::Pubkey, + solana_transaction_context::TransactionAccount, }; /// Captured account state used to rollback account state for nonce and fee @@ -10,14 +11,14 @@ use { #[derive(PartialEq, Eq, Debug, Clone)] pub enum RollbackAccounts { FeePayerOnly { - fee_payer_account: AccountSharedData, + fee_payer: TransactionAccount, }, SameNonceAndFeePayer { - nonce: NonceInfo, + nonce: TransactionAccount, }, SeparateNonceAndFeePayer { - nonce: NonceInfo, - fee_payer_account: AccountSharedData, + nonce: TransactionAccount, + fee_payer: TransactionAccount, }, } @@ -25,11 +26,41 @@ pub enum RollbackAccounts { impl Default for RollbackAccounts { fn default() -> Self { Self::FeePayerOnly { - fee_payer_account: AccountSharedData::default(), + fee_payer: TransactionAccount::default(), } } } +/// Rollback accounts iterator. +/// This struct is created by the `RollbackAccounts::iter`. +pub struct RollbackAccountsIter<'a> { + fee_payer: Option<&'a TransactionAccount>, + nonce: Option<&'a TransactionAccount>, +} + +impl<'a> Iterator for RollbackAccountsIter<'a> { + type Item = &'a TransactionAccount; + + fn next(&mut self) -> Option { + if let Some(fee_payer) = self.fee_payer.take() { + return Some(fee_payer); + } + if let Some(nonce) = self.nonce.take() { + return Some(nonce); + } + None + } +} + +impl<'a> IntoIterator for &'a RollbackAccounts { + type Item = &'a TransactionAccount; + type IntoIter = RollbackAccountsIter<'a>; + + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + impl RollbackAccounts { pub(crate) fn new( nonce: Option, @@ -46,12 +77,12 @@ impl RollbackAccounts { fee_payer_account.set_data_from_slice(nonce.account().data()); RollbackAccounts::SameNonceAndFeePayer { - nonce: NonceInfo::new(fee_payer_address, fee_payer_account), + nonce: (fee_payer_address, fee_payer_account), } } else { RollbackAccounts::SeparateNonceAndFeePayer { - nonce, - fee_payer_account, + nonce: (nonce.address, nonce.account), + fee_payer: (fee_payer_address, fee_payer_account), } } } else { @@ -62,7 +93,9 @@ impl RollbackAccounts { // alter this behavior such that rent epoch updates are handled the // same for both nonce and non-nonce failed transactions. fee_payer_account.set_rent_epoch(fee_payer_loaded_rent_epoch); - RollbackAccounts::FeePayerOnly { fee_payer_account } + RollbackAccounts::FeePayerOnly { + fee_payer: (fee_payer_address, fee_payer_account), + } } } @@ -74,20 +107,32 @@ impl RollbackAccounts { } } + /// Iterator over accounts tracked for rollback. + pub fn iter(&self) -> RollbackAccountsIter<'_> { + match self { + Self::FeePayerOnly { fee_payer } => RollbackAccountsIter { + fee_payer: Some(fee_payer), + nonce: None, + }, + Self::SameNonceAndFeePayer { nonce } => RollbackAccountsIter { + fee_payer: None, + nonce: Some(nonce), + }, + Self::SeparateNonceAndFeePayer { nonce, fee_payer } => RollbackAccountsIter { + fee_payer: Some(fee_payer), + nonce: Some(nonce), + }, + } + } + /// Size of accounts tracked for rollback, used when calculating the actual /// cost of transaction processing in the cost model. pub fn data_size(&self) -> usize { - match self { - Self::FeePayerOnly { fee_payer_account } => fee_payer_account.data().len(), - Self::SameNonceAndFeePayer { nonce } => nonce.account().data().len(), - Self::SeparateNonceAndFeePayer { - nonce, - fee_payer_account, - } => fee_payer_account - .data() - .len() - .saturating_add(nonce.account().data().len()), + let mut total_size: usize = 0; + for (_, account) in self.iter() { + total_size = total_size.saturating_add(account.data().len()); } + total_size } } @@ -124,10 +169,10 @@ mod tests { fee_payer_rent_epoch, ); - let expected_fee_payer_account = fee_payer_account; + let expected_fee_payer = (fee_payer_address, fee_payer_account); match rollback_accounts { - RollbackAccounts::FeePayerOnly { fee_payer_account } => { - assert_eq!(expected_fee_payer_account, fee_payer_account); + RollbackAccounts::FeePayerOnly { fee_payer } => { + assert_eq!(expected_fee_payer, fee_payer); } _ => panic!("Expected FeePayerOnly variant"), } @@ -163,13 +208,11 @@ mod tests { u64::MAX, // ignored ); - match rollback_accounts { - RollbackAccounts::SameNonceAndFeePayer { nonce } => { - assert_eq!(nonce.address(), &nonce_address); - assert_eq!(nonce.account(), &nonce_account); - } - _ => panic!("Expected SameNonceAndFeePayer variant"), - } + let expected_rollback_accounts = RollbackAccounts::SameNonceAndFeePayer { + nonce: (nonce_address, nonce_account), + }; + + assert_eq!(expected_rollback_accounts, rollback_accounts); } #[test] @@ -205,15 +248,12 @@ mod tests { u64::MAX, // ignored ); - let expected_fee_payer_account = fee_payer_account; + let expected_nonce = (nonce_address, nonce_account); + let expected_fee_payer = (fee_payer_address, fee_payer_account); match rollback_accounts { - RollbackAccounts::SeparateNonceAndFeePayer { - nonce, - fee_payer_account, - } => { - assert_eq!(nonce.address(), &nonce_address); - assert_eq!(nonce.account(), &nonce_account); - assert_eq!(expected_fee_payer_account, fee_payer_account); + RollbackAccounts::SeparateNonceAndFeePayer { nonce, fee_payer } => { + assert_eq!(expected_nonce, nonce); + assert_eq!(expected_fee_payer, fee_payer); } _ => panic!("Expected SeparateNonceAndFeePayer variant"), } diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index 4257f9c76e9799..75c6661ec4a9f9 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -439,8 +439,7 @@ impl TransactionBatchProcessor { TransactionLoadResult::NotLoaded(err) => Err(err), TransactionLoadResult::FeesOnly(fees_only_tx) => { // Update loaded accounts cache with nonce and fee-payer - account_loader - .update_accounts_for_failed_tx(tx, &fees_only_tx.rollback_accounts); + account_loader.update_accounts_for_failed_tx(&fees_only_tx.rollback_accounts); Ok(ProcessedTransaction::FeesOnly(Box::new(fees_only_tx))) } diff --git a/svm/tests/integration_test.rs b/svm/tests/integration_test.rs index 80768774e7624f..c22e3ab5bcd4bd 100644 --- a/svm/tests/integration_test.rs +++ b/svm/tests/integration_test.rs @@ -27,7 +27,6 @@ use { solana_svm::{ account_loader::{CheckedTransactionDetails, TransactionCheckResult}, nonce_info::NonceInfo, - rollback_accounts::RollbackAccounts, transaction_execution_result::TransactionExecutionDetails, transaction_processing_result::{ProcessedTransaction, TransactionProcessingResult}, transaction_processor::{ @@ -175,38 +174,12 @@ impl SvmTestEnvironment<'_> { } } Ok(ProcessedTransaction::FeesOnly(fees_only_transaction)) => { - let fee_payer = sanitized_transaction.fee_payer(); - - match fees_only_transaction.rollback_accounts.clone() { - RollbackAccounts::FeePayerOnly { fee_payer_account } => { - update_or_dealloc_account( - &mut final_accounts_actual, - *fee_payer, - fee_payer_account, - ); - } - RollbackAccounts::SameNonceAndFeePayer { nonce } => { - update_or_dealloc_account( - &mut final_accounts_actual, - *nonce.address(), - nonce.account().clone(), - ); - } - RollbackAccounts::SeparateNonceAndFeePayer { - nonce, - fee_payer_account, - } => { - update_or_dealloc_account( - &mut final_accounts_actual, - *fee_payer, - fee_payer_account, - ); - update_or_dealloc_account( - &mut final_accounts_actual, - *nonce.address(), - nonce.account().clone(), - ); - } + for (pubkey, account_data) in &fees_only_transaction.rollback_accounts { + update_or_dealloc_account( + &mut final_accounts_actual, + *pubkey, + account_data.clone(), + ); } } Err(_) => {} From 0b9bdfe6383bb3be4d8d5b28df1f81970fda32c9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 23 Jun 2025 20:14:23 +0800 Subject: [PATCH 057/124] build(deps): bump num_enum from 0.7.3 to 0.7.4 (#6694) * build(deps): bump num_enum from 0.7.3 to 0.7.4 Bumps [num_enum](https://github.com/illicitonion/num_enum) from 0.7.3 to 0.7.4. - [Commits](https://github.com/illicitonion/num_enum/compare/0.7.3...0.7.4) --- updated-dependencies: - dependency-name: num_enum dependency-version: 0.7.4 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 9 +++++---- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 9 +++++---- svm/examples/Cargo.lock | 9 +++++---- 4 files changed, 16 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6e3c5707df204e..66e73e7f017b54 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4628,18 +4628,19 @@ dependencies = [ [[package]] name = "num_enum" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e613fc340b2220f734a8595782c551f1250e969d87d3be1ae0579e8d4065179" +checksum = "a973b4e44ce6cad84ce69d797acf9a044532e4184c4f267913d1b546a0727b7a" dependencies = [ "num_enum_derive", + "rustversion", ] [[package]] name = "num_enum_derive" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af1844ef2428cc3e1cb900be36181049ef3d3193c63e43026cfe202983b27a56" +checksum = "77e878c846a8abae00dd069496dbe8751b16ac1c3d6bd2a7283a938e8228f90d" dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", diff --git a/Cargo.toml b/Cargo.toml index a402737570731e..bf1f9b0ce6f8f8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -314,7 +314,7 @@ num-bigint = "0.4.6" num-derive = "0.4" num-traits = "0.2" num_cpus = "1.17.0" -num_enum = "0.7.3" +num_enum = "0.7.4" openssl = "0.10" parking_lot = "0.12" pbkdf2 = { version = "0.11.0", default-features = false } diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 3deeaf8e41669a..553520d3417e31 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -3747,18 +3747,19 @@ dependencies = [ [[package]] name = "num_enum" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e613fc340b2220f734a8595782c551f1250e969d87d3be1ae0579e8d4065179" +checksum = "a973b4e44ce6cad84ce69d797acf9a044532e4184c4f267913d1b546a0727b7a" dependencies = [ "num_enum_derive", + "rustversion", ] [[package]] name = "num_enum_derive" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af1844ef2428cc3e1cb900be36181049ef3d3193c63e43026cfe202983b27a56" +checksum = "77e878c846a8abae00dd069496dbe8751b16ac1c3d6bd2a7283a938e8228f90d" dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", diff --git a/svm/examples/Cargo.lock b/svm/examples/Cargo.lock index 307c786a4eb10a..2e3838da906cdd 100644 --- a/svm/examples/Cargo.lock +++ b/svm/examples/Cargo.lock @@ -3628,18 +3628,19 @@ dependencies = [ [[package]] name = "num_enum" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e613fc340b2220f734a8595782c551f1250e969d87d3be1ae0579e8d4065179" +checksum = "a973b4e44ce6cad84ce69d797acf9a044532e4184c4f267913d1b546a0727b7a" dependencies = [ "num_enum_derive", + "rustversion", ] [[package]] name = "num_enum_derive" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af1844ef2428cc3e1cb900be36181049ef3d3193c63e43026cfe202983b27a56" +checksum = "77e878c846a8abae00dd069496dbe8751b16ac1c3d6bd2a7283a938e8228f90d" dependencies = [ "proc-macro-crate 3.2.0", "proc-macro2", From 4cc94fc70e51d597876687ddc568a246ecddfaf1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 23 Jun 2025 20:15:05 +0800 Subject: [PATCH 058/124] build(deps): bump syn from 2.0.103 to 2.0.104 (#6693) Bumps [syn](https://github.com/dtolnay/syn) from 2.0.103 to 2.0.104. - [Release notes](https://github.com/dtolnay/syn/releases) - [Commits](https://github.com/dtolnay/syn/compare/2.0.103...2.0.104) --- updated-dependencies: - dependency-name: syn dependency-version: 2.0.104 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 92 +++++++++++++++++++++++++++--------------------------- 1 file changed, 46 insertions(+), 46 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 66e73e7f017b54..6588125be1fe63 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -680,7 +680,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -963,7 +963,7 @@ checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -1202,7 +1202,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -1352,7 +1352,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -1483,7 +1483,7 @@ checksum = "7ecc273b49b3205b83d648f0690daa588925572cc5063745bfe547fe7ec8e1a1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -1646,7 +1646,7 @@ checksum = "45565fc9416b9896014f5732ac776f810ee53a66730c17e4020c3ec064a8f88f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -1796,7 +1796,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -2193,7 +2193,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -2217,7 +2217,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -2228,7 +2228,7 @@ checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" dependencies = [ "darling_core", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -2302,7 +2302,7 @@ checksum = "510c292c8cf384b1a340b816a9a6cf2599eb8f566a44949024af88418000c50b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -2313,7 +2313,7 @@ checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -2347,7 +2347,7 @@ dependencies = [ "convert_case 0.6.0", "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", "unicode-xid", ] @@ -2459,7 +2459,7 @@ checksum = "a6cbae11b3de8fce2a456e8ea3dada226b35fe791f0dc1d360c0941f0bb681f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -2571,7 +2571,7 @@ checksum = "03cdc46ec28bd728e67540c528013c6a10eb69a02eb31078a1bda695438cbfb8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -2910,7 +2910,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -3673,7 +3673,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -4572,7 +4572,7 @@ checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -4645,7 +4645,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -4740,7 +4740,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -5323,7 +5323,7 @@ checksum = "9e2e25ee72f5b24d773cae88422baddefff7714f97aab68d96fe2b6fc4a28fb2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -6213,7 +6213,7 @@ checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -6279,7 +6279,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -6329,7 +6329,7 @@ checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -8379,7 +8379,7 @@ checksum = "b83f88a126213cbcb57672c5e70ddb9791eff9b480e9f39fe9285fd2abca66fa" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -10246,7 +10246,7 @@ dependencies = [ "bs58", "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -11979,7 +11979,7 @@ checksum = "d9e8418ea6269dcfb01c712f0444d2c75542c04448b480e87de59d2865edc750" dependencies = [ "quote", "spl-discriminator-syn", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -11991,7 +11991,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.9", - "syn 2.0.103", + "syn 2.0.104", "thiserror 1.0.69", ] @@ -12101,7 +12101,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.9", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -12412,9 +12412,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.103" +version = "2.0.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4307e30089d6fd6aff212f2da3a1f9e32f3223b1f010fb09b7c95f90f3ca1e8" +checksum = "17b6f705963418cdb9927482fa304bc562ece2fdd4f616084c50b7023b435a40" dependencies = [ "proc-macro2", "quote", @@ -12456,7 +12456,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -12616,7 +12616,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -12628,7 +12628,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", "test-case-core", ] @@ -12673,7 +12673,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -12684,7 +12684,7 @@ checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -12844,7 +12844,7 @@ checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -13132,7 +13132,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -13492,7 +13492,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", "wasm-bindgen-shared", ] @@ -13526,7 +13526,7 @@ checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -14031,7 +14031,7 @@ checksum = "28cc31741b18cb6f1d5ff12f5b7523e3d6eb0852bbbad19d73905511d9849b95" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", "synstructure 0.13.1", ] @@ -14061,7 +14061,7 @@ checksum = "b3c129550b3e6de3fd0ba67ba5c81818f9805e58b8d7fee80a3a59d2c9fc601a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -14072,7 +14072,7 @@ checksum = "6352c01d0edd5db859a63e2605f4ea3183ddbd15e2c4a9e7d32184df75e4f154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -14092,7 +14092,7 @@ checksum = "0ea7b4a3637ea8669cedf0f1fd5c286a17f3de97b8dd5a70a6c167a1730e63a5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", "synstructure 0.13.1", ] @@ -14113,7 +14113,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -14135,7 +14135,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] From eb03185b9dcb4b16e71b231ead6d10648fa30c28 Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Mon, 23 Jun 2025 08:16:37 -0500 Subject: [PATCH 059/124] replay-stage: track vote transactions by message hash (#6609) --- core/src/replay_stage.rs | 88 ++++++++++++++++++++++++---------------- 1 file changed, 52 insertions(+), 36 deletions(-) diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 654c4f5ddb725e..f3009dad91a589 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -70,7 +70,6 @@ use { snapshot_controller::SnapshotController, vote_sender_types::ReplayVoteSender, }, - solana_signature::Signature, solana_signer::Signer, solana_time_utils::timestamp, solana_timings::ExecuteTimings, @@ -181,6 +180,11 @@ struct LastVoteRefreshTime { last_print_time: Instant, } +pub struct TrackedVoteTransaction { + message_hash: Hash, + transaction_blockhash: Hash, +} + #[derive(Default)] struct SkippedSlotsInfo { last_retransmit_slot: u64, @@ -664,7 +668,7 @@ impl ReplayStage { UnfrozenGossipVerifiedVoteHashes::default(); let mut latest_validator_votes_for_frozen_banks: LatestValidatorVotesForFrozenBanks = LatestValidatorVotesForFrozenBanks::default(); - let mut voted_signatures = Vec::new(); + let mut tracked_vote_transactions: Vec = Vec::new(); let mut has_new_vote_been_rooted = !wait_for_vote_to_start_leader; let mut last_vote_refresh_time = LastVoteRefreshTime { last_refresh_time: Instant::now(), @@ -954,7 +958,7 @@ impl ReplayStage { &vote_account, &identity_keypair, &authorized_voter_keypairs.read().unwrap(), - &mut voted_signatures, + &mut tracked_vote_transactions, has_new_vote_been_rooted, &mut last_vote_refresh_time, &voting_sender, @@ -1010,7 +1014,7 @@ impl ReplayStage { &mut duplicate_slots_tracker, &mut duplicate_confirmed_slots, &mut unfrozen_gossip_verified_vote_hashes, - &mut voted_signatures, + &mut tracked_vote_transactions, &mut has_new_vote_been_rooted, &mut replay_timing, &voting_sender, @@ -2405,7 +2409,7 @@ impl ReplayStage { duplicate_slots_tracker: &mut DuplicateSlotsTracker, duplicate_confirmed_slots: &mut DuplicateConfirmedSlots, unfrozen_gossip_verified_vote_hashes: &mut UnfrozenGossipVerifiedVoteHashes, - vote_signatures: &mut Vec, + tracked_vote_transactions: &mut Vec, has_new_vote_been_rooted: &mut bool, replay_timing: &mut ReplayLoopTiming, voting_sender: &Sender, @@ -2436,7 +2440,7 @@ impl ReplayStage { duplicate_confirmed_slots, unfrozen_gossip_verified_vote_hashes, has_new_vote_been_rooted, - vote_signatures, + tracked_vote_transactions, epoch_slots_frozen_slots, drop_bank_sender, )?; @@ -2477,7 +2481,7 @@ impl ReplayStage { authorized_voter_keypairs, tower, switch_fork_decision, - vote_signatures, + tracked_vote_transactions, *has_new_vote_been_rooted, replay_timing, voting_sender, @@ -2493,7 +2497,7 @@ impl ReplayStage { authorized_voter_keypairs: &[Arc], vote: VoteTransaction, switch_fork_decision: &SwitchForkDecision, - vote_signatures: &mut Vec, + tracked_vote_transactions: &mut Vec, has_new_vote_been_rooted: bool, wait_to_vote_slot: Option, ) -> GenerateVoteTxResult { @@ -2577,12 +2581,17 @@ impl ReplayStage { vote_tx.partial_sign(&[authorized_voter_keypair.as_ref()], blockhash); if !has_new_vote_been_rooted { - vote_signatures.push(vote_tx.signatures[0]); - if vote_signatures.len() > MAX_VOTE_SIGNATURES { - vote_signatures.remove(0); + let message_hash = vote_tx.message.hash(); + let recent_blockhash = vote_tx.message.recent_blockhash; + tracked_vote_transactions.push(TrackedVoteTransaction { + message_hash, + transaction_blockhash: recent_blockhash, + }); + if tracked_vote_transactions.len() > MAX_VOTE_SIGNATURES { + tracked_vote_transactions.remove(0); } } else { - vote_signatures.clear(); + tracked_vote_transactions.clear(); } GenerateVoteTxResult::Tx(vote_tx) @@ -2610,7 +2619,7 @@ impl ReplayStage { vote_account_pubkey: &Pubkey, identity_keypair: &Keypair, authorized_voter_keypairs: &[Arc], - vote_signatures: &mut Vec, + tracked_vote_transactions: &mut Vec, has_new_vote_been_rooted: bool, last_vote_refresh_time: &mut LastVoteRefreshTime, voting_sender: &Sender, @@ -2703,7 +2712,7 @@ impl ReplayStage { vote_account_pubkey, identity_keypair, authorized_voter_keypairs, - vote_signatures, + tracked_vote_transactions, has_new_vote_been_rooted, last_vote_refresh_time, voting_sender, @@ -2719,7 +2728,7 @@ impl ReplayStage { vote_account_pubkey: &Pubkey, identity_keypair: &Keypair, authorized_voter_keypairs: &[Arc], - vote_signatures: &mut Vec, + tracked_vote_transactions: &mut Vec, has_new_vote_been_rooted: bool, last_vote_refresh_time: &mut LastVoteRefreshTime, voting_sender: &Sender, @@ -2735,7 +2744,7 @@ impl ReplayStage { authorized_voter_keypairs, tower.last_vote(), &SwitchForkDecision::SameFork, - vote_signatures, + tracked_vote_transactions, has_new_vote_been_rooted, wait_to_vote_slot, ); @@ -2779,7 +2788,7 @@ impl ReplayStage { authorized_voter_keypairs: &[Arc], tower: &mut Tower, switch_fork_decision: &SwitchForkDecision, - vote_signatures: &mut Vec, + tracked_vote_transactions: &mut Vec, has_new_vote_been_rooted: bool, replay_timing: &mut ReplayLoopTiming, voting_sender: &Sender, @@ -2793,7 +2802,7 @@ impl ReplayStage { authorized_voter_keypairs, tower.last_vote(), switch_fork_decision, - vote_signatures, + tracked_vote_transactions, has_new_vote_been_rooted, wait_to_vote_slot, ); @@ -4015,7 +4024,7 @@ impl ReplayStage { duplicate_confirmed_slots: &mut DuplicateConfirmedSlots, unfrozen_gossip_verified_vote_hashes: &mut UnfrozenGossipVerifiedVoteHashes, has_new_vote_been_rooted: &mut bool, - voted_signatures: &mut Vec, + tracked_vote_transactions: &mut Vec, epoch_slots_frozen_slots: &mut EpochSlotsFrozenSlots, drop_bank_sender: &Sender>, ) -> Result<(), SetRootError> { @@ -4064,7 +4073,7 @@ impl ReplayStage { duplicate_confirmed_slots, unfrozen_gossip_verified_vote_hashes, has_new_vote_been_rooted, - voted_signatures, + tracked_vote_transactions, epoch_slots_frozen_slots, drop_bank_sender, )?; @@ -4099,7 +4108,7 @@ impl ReplayStage { duplicate_confirmed_slots: &mut DuplicateConfirmedSlots, unfrozen_gossip_verified_vote_hashes: &mut UnfrozenGossipVerifiedVoteHashes, has_new_vote_been_rooted: &mut bool, - voted_signatures: &mut Vec, + tracked_vote_transactions: &mut Vec, epoch_slots_frozen_slots: &mut EpochSlotsFrozenSlots, drop_bank_sender: &Sender>, ) -> Result<(), SetRootError> { @@ -4119,14 +4128,21 @@ impl ReplayStage { let r_bank_forks = bank_forks.read().unwrap(); let new_root_bank = &r_bank_forks[new_root]; if !*has_new_vote_been_rooted { - for signature in voted_signatures.iter() { - if new_root_bank.get_signature_status(signature).is_some() { + for TrackedVoteTransaction { + message_hash, + transaction_blockhash, + } in tracked_vote_transactions.iter() + { + if new_root_bank + .get_committed_transaction_status_and_slot(message_hash, transaction_blockhash) + .is_some() + { *has_new_vote_been_rooted = true; break; } } if *has_new_vote_been_rooted { - std::mem::take(voted_signatures); + std::mem::take(tracked_vote_transactions); } } progress.handle_new_root(&r_bank_forks); @@ -7593,7 +7609,7 @@ pub(crate) mod tests { last_print_time: Instant::now(), }; let has_new_vote_been_rooted = false; - let mut voted_signatures = vec![]; + let mut tracked_vote_transactions = vec![]; let identity_keypair = cluster_info.keypair().clone(); let my_vote_keypair = vec![Arc::new( @@ -7644,7 +7660,7 @@ pub(crate) mod tests { &my_vote_keypair, &mut tower, &SwitchForkDecision::SameFork, - &mut voted_signatures, + &mut tracked_vote_transactions, has_new_vote_been_rooted, &mut ReplayLoopTiming::default(), &voting_sender, @@ -7721,7 +7737,7 @@ pub(crate) mod tests { &my_vote_pubkey, &identity_keypair, &my_vote_keypair, - &mut voted_signatures, + &mut tracked_vote_transactions, has_new_vote_been_rooted, &mut last_vote_refresh_time, &voting_sender, @@ -7749,7 +7765,7 @@ pub(crate) mod tests { &my_vote_keypair, &mut tower, &SwitchForkDecision::SameFork, - &mut voted_signatures, + &mut tracked_vote_transactions, has_new_vote_been_rooted, &mut ReplayLoopTiming::default(), &voting_sender, @@ -7810,7 +7826,7 @@ pub(crate) mod tests { &my_vote_pubkey, &identity_keypair, &my_vote_keypair, - &mut voted_signatures, + &mut tracked_vote_transactions, has_new_vote_been_rooted, &mut last_vote_refresh_time, &voting_sender, @@ -7878,7 +7894,7 @@ pub(crate) mod tests { &my_vote_pubkey, &identity_keypair, &my_vote_keypair, - &mut voted_signatures, + &mut tracked_vote_transactions, has_new_vote_been_rooted, &mut last_vote_refresh_time, &voting_sender, @@ -7972,7 +7988,7 @@ pub(crate) mod tests { &my_vote_pubkey, &identity_keypair, &my_vote_keypair, - &mut voted_signatures, + &mut tracked_vote_transactions, has_new_vote_been_rooted, &mut last_vote_refresh_time, &voting_sender, @@ -7999,7 +8015,7 @@ pub(crate) mod tests { my_vote_keypair: &[Arc], tower: &mut Tower, identity_keypair: &Keypair, - voted_signatures: &mut Vec, + tracked_vote_transactions: &mut Vec, has_new_vote_been_rooted: bool, voting_sender: &Sender, voting_receiver: &Receiver, @@ -8020,7 +8036,7 @@ pub(crate) mod tests { my_vote_keypair, tower, &SwitchForkDecision::SameFork, - voted_signatures, + tracked_vote_transactions, has_new_vote_been_rooted, &mut ReplayLoopTiming::default(), voting_sender, @@ -8108,7 +8124,7 @@ pub(crate) mod tests { } = vote_simulator; let has_new_vote_been_rooted = false; - let mut voted_signatures = vec![]; + let mut tracked_vote_transactions = vec![]; let identity_keypair = cluster_info.keypair().clone(); let my_vote_keypair = vec![Arc::new( @@ -8150,7 +8166,7 @@ pub(crate) mod tests { &my_vote_keypair, &mut tower, &identity_keypair, - &mut voted_signatures, + &mut tracked_vote_transactions, has_new_vote_been_rooted, &voting_sender, &voting_receiver, @@ -8168,7 +8184,7 @@ pub(crate) mod tests { &my_vote_keypair, &mut tower, &identity_keypair, - &mut voted_signatures, + &mut tracked_vote_transactions, has_new_vote_been_rooted, &voting_sender, &voting_receiver, From 8ec80424526465c8181a1263282bc05682dc92d1 Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Mon, 23 Jun 2025 08:16:55 -0500 Subject: [PATCH 060/124] bank-bench: lookup by message hash (#6605) --- runtime/benches/bank.rs | 51 +++++++++++++++++++---------------------- 1 file changed, 24 insertions(+), 27 deletions(-) diff --git a/runtime/benches/bank.rs b/runtime/benches/bank.rs index b87617f634e170..226b9fc8003f68 100644 --- a/runtime/benches/bank.rs +++ b/runtime/benches/bank.rs @@ -8,6 +8,7 @@ use { solana_client_traits::{AsyncClient, SyncClient}, solana_clock::MAX_RECENT_BLOCKHASHES, solana_genesis_config::create_genesis_config, + solana_hash::Hash, solana_keypair::Keypair, solana_message::Message, solana_program_runtime::declare_process_instruction, @@ -37,7 +38,7 @@ const NOOP_PROGRAM_ID: [u8; 32] = [ pub fn create_builtin_transactions( bank_client: &BankClient, mint_keypair: &Keypair, -) -> Vec { +) -> Vec<(Transaction, Hash)> { let program_id = Pubkey::from(BUILTIN_PROGRAM_ID); (0..4096) @@ -51,7 +52,9 @@ pub fn create_builtin_transactions( let instruction = create_invoke_instruction(rando0.pubkey(), program_id, &1u8); let blockhash = bank_client.get_latest_blockhash().unwrap(); let message = Message::new(&[instruction], Some(&mint_keypair.pubkey())); - Transaction::new(&[&rando0], message, blockhash) + let message_hash = message.hash(); + let tx = Transaction::new(&[&rando0], message, blockhash); + (tx, message_hash) }) .collect() } @@ -59,7 +62,7 @@ pub fn create_builtin_transactions( pub fn create_native_loader_transactions( bank_client: &BankClient, mint_keypair: &Keypair, -) -> Vec { +) -> Vec<(Transaction, Hash)> { let program_id = Pubkey::from(NOOP_PROGRAM_ID); (0..4096) @@ -73,48 +76,42 @@ pub fn create_native_loader_transactions( let instruction = create_invoke_instruction(rando0.pubkey(), program_id, &1u8); let blockhash = bank_client.get_latest_blockhash().unwrap(); let message = Message::new(&[instruction], Some(&mint_keypair.pubkey())); - Transaction::new(&[&rando0], message, blockhash) + let message_hash = message.hash(); + let tx = Transaction::new(&[&rando0], message, blockhash); + (tx, message_hash) }) .collect() } -fn sync_bencher(bank: &Bank, _bank_client: &BankClient, transactions: &[Transaction]) { - let results = bank.process_transactions(transactions.iter()); +fn sync_bencher(bank: &Bank, _bank_client: &BankClient, transactions: &[(Transaction, Hash)]) { + let results = bank.process_transactions(transactions.iter().map(|(tx, _)| tx)); assert!(results.iter().all(Result::is_ok)); } -fn async_bencher(bank: &Bank, bank_client: &BankClient, transactions: &[Transaction]) { - for transaction in transactions.iter().cloned() { +fn async_bencher(bank: &Bank, bank_client: &BankClient, transactions: &[(Transaction, Hash)]) { + for (transaction, _hash) in transactions.iter().cloned() { bank_client.async_send_transaction(transaction).unwrap(); } + let (last_transaction, last_tx_hash) = transactions.last().unwrap(); for _ in 0..1_000_000_000_u64 { - if bank - .get_signature_status(transactions.last().unwrap().signatures.first().unwrap()) - .is_some() - { + if let Some((_slot, status)) = bank.get_committed_transaction_status_and_slot( + last_tx_hash, + &last_transaction.message.recent_blockhash, + ) { + if !status { + panic!("transaction failed"); + } break; } sleep(Duration::from_nanos(1)); } - if bank - .get_signature_status(transactions.last().unwrap().signatures.first().unwrap()) - .unwrap() - .is_err() - { - error!( - "transaction failed: {:?}", - bank.get_signature_status(transactions.last().unwrap().signatures.first().unwrap()) - .unwrap() - ); - panic!(); - } } #[allow(clippy::type_complexity)] fn do_bench_transactions( bencher: &mut Bencher, - bench_work: &dyn Fn(&Bank, &BankClient, &[Transaction]), - create_transactions: &dyn Fn(&BankClient, &Keypair) -> Vec, + bench_work: &dyn Fn(&Bank, &BankClient, &[(Transaction, Hash)]), + create_transactions: &dyn Fn(&BankClient, &Keypair) -> Vec<(Transaction, Hash)>, ) { solana_logger::setup(); let ns_per_s = 1_000_000_000; @@ -138,7 +135,7 @@ fn do_bench_transactions( let transactions = create_transactions(&bank_client, &mint_keypair); // Do once to fund accounts, load modules, etc... - let results = bank.process_transactions(transactions.iter()); + let results = bank.process_transactions(transactions.iter().map(|(tx, _)| tx)); assert!(results.iter().all(Result::is_ok)); bencher.iter(|| { From 6ae32e4e45a53b4ea384aaa77b20b02047c5fb26 Mon Sep 17 00:00:00 2001 From: Greg Cusack Date: Mon, 23 Jun 2025 08:56:23 -0500 Subject: [PATCH 061/124] Refactor `NodeConfig` to improve clarity (#6553) make nodeconfig more clear --- gossip/src/cluster_info.rs | 38 +++++++++++++++++---------- validator/src/commands/run/execute.rs | 4 +-- 2 files changed, 26 insertions(+), 16 deletions(-) diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index 26802ebaf9c444..76407dac969b27 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -2332,8 +2332,12 @@ pub struct Sockets { } pub struct NodeConfig { - pub gossip_addr: SocketAddr, + /// The IP address advertised to the cluster in gossip + pub advertised_ip: IpAddr, + /// The gossip port advertised to the cluster + pub gossip_port: u16, pub port_range: PortRange, + /// The IP address the node binds to pub bind_ip_addr: IpAddr, pub public_tpu_addr: Option, pub public_tpu_forwards_addr: Option, @@ -2660,7 +2664,8 @@ impl Node { pub fn new_with_external_ip(pubkey: &Pubkey, config: NodeConfig) -> Node { let NodeConfig { - gossip_addr, + advertised_ip, + gossip_port, port_range, bind_ip_addr, public_tpu_addr, @@ -2670,6 +2675,7 @@ impl Node { num_quic_endpoints, } = config; + let gossip_addr = SocketAddr::new(advertised_ip, gossip_port); let (gossip_port, (gossip, ip_echo)) = bind_gossip_port_in_range(&gossip_addr, port_range, bind_ip_addr); @@ -2771,22 +2777,24 @@ impl Node { timestamp(), // wallclock 0u16, // shred_version ); - let addr = gossip_addr.ip(); use contact_info::Protocol::{QUIC, UDP}; - info.set_gossip((addr, gossip_port)).unwrap(); - info.set_tvu(UDP, (addr, tvu_port)).unwrap(); - info.set_tvu(QUIC, (addr, tvu_quic_port)).unwrap(); - info.set_tpu(public_tpu_addr.unwrap_or_else(|| SocketAddr::new(addr, tpu_port))) + info.set_gossip((advertised_ip, gossip_port)).unwrap(); + info.set_tvu(UDP, (advertised_ip, tvu_port)).unwrap(); + info.set_tvu(QUIC, (advertised_ip, tvu_quic_port)).unwrap(); + info.set_tpu(public_tpu_addr.unwrap_or_else(|| SocketAddr::new(advertised_ip, tpu_port))) .unwrap(); info.set_tpu_forwards( - public_tpu_forwards_addr.unwrap_or_else(|| SocketAddr::new(addr, tpu_forwards_port)), + public_tpu_forwards_addr + .unwrap_or_else(|| SocketAddr::new(advertised_ip, tpu_forwards_port)), ) .unwrap(); - info.set_tpu_vote(UDP, (addr, tpu_vote_port)).unwrap(); - info.set_tpu_vote(QUIC, (addr, tpu_vote_quic_port)).unwrap(); - info.set_serve_repair(UDP, (addr, serve_repair_port)) + info.set_tpu_vote(UDP, (advertised_ip, tpu_vote_port)) + .unwrap(); + info.set_tpu_vote(QUIC, (advertised_ip, tpu_vote_quic_port)) + .unwrap(); + info.set_serve_repair(UDP, (advertised_ip, serve_repair_port)) .unwrap(); - info.set_serve_repair(QUIC, (addr, serve_repair_quic_port)) + info.set_serve_repair(QUIC, (advertised_ip, serve_repair_quic_port)) .unwrap(); trace!("new ContactInfo: {:?}", info); @@ -3283,7 +3291,8 @@ mod tests { let ip = Ipv4Addr::LOCALHOST; let port_range = localhost_port_range_for_tests(); let config = NodeConfig { - gossip_addr: socketaddr!(ip, 0), + advertised_ip: IpAddr::V4(ip), + gossip_port: 0, port_range, bind_ip_addr: IpAddr::V4(ip), public_tpu_addr: None, @@ -3306,7 +3315,8 @@ mod tests { let ip = IpAddr::V4(Ipv4Addr::LOCALHOST); let port = port_range.0; let config = NodeConfig { - gossip_addr: socketaddr!(Ipv4Addr::LOCALHOST, port), + advertised_ip: ip, + gossip_port: port, port_range, bind_ip_addr: ip, public_tpu_addr: None, diff --git a/validator/src/commands/run/execute.rs b/validator/src/commands/run/execute.rs index 77ee068c057945..1920db86ac6eca 100644 --- a/validator/src/commands/run/execute.rs +++ b/validator/src/commands/run/execute.rs @@ -1105,7 +1105,6 @@ pub fn execute( solana_net_utils::find_available_port_in_range(bind_address, (0, 1)) .map_err(|err| format!("unable to find an available gossip port: {err}")) })?; - let gossip_addr = SocketAddr::new(advertised_ip, gossip_port); let public_tpu_addr = matches .value_of("public_tpu_addr") @@ -1141,7 +1140,8 @@ pub fn execute( let max_streams_per_ms = value_t_or_exit!(matches, "tpu_max_streams_per_ms", u64); let node_config = NodeConfig { - gossip_addr, + advertised_ip, + gossip_port, port_range: dynamic_port_range, bind_ip_addr: bind_address, public_tpu_addr, From 0e97629536b3f47856860e5fc7cb1b2ad622359d Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Mon, 23 Jun 2025 09:27:12 -0500 Subject: [PATCH 062/124] Run rustfmt on all files (#6688) --- core/src/banking_stage/decision_maker.rs | 4 +-- core/src/banking_stage/packet_deserializer.rs | 18 +++++------ .../scheduler_controller.rs | 32 +++++++++++-------- .../scheduler_metrics.rs | 12 ++++--- .../tests/crates/package-metadata/src/lib.rs | 4 +-- .../crates/workspace-metadata/src/lib.rs | 4 +-- .../example-programs/clock-sysvar/src/lib.rs | 10 +++--- .../example-programs/hello-solana/src/lib.rs | 7 ++-- .../simple-transfer/src/lib.rs | 9 +++--- .../transfer-from-account/src/lib.rs | 9 +++--- .../write-to-account/src/lib.rs | 6 ++-- 11 files changed, 57 insertions(+), 58 deletions(-) diff --git a/core/src/banking_stage/decision_maker.rs b/core/src/banking_stage/decision_maker.rs index 7a88fbb44399ff..19e0a674a848cf 100644 --- a/core/src/banking_stage/decision_maker.rs +++ b/core/src/banking_stage/decision_maker.rs @@ -1,9 +1,9 @@ use { - solana_poh::poh_recorder::{BankStart, PohRecorder}, solana_clock::{ DEFAULT_TICKS_PER_SLOT, FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET, HOLD_TRANSACTIONS_SLOT_OFFSET, }, + solana_poh::poh_recorder::{BankStart, PohRecorder}, solana_pubkey::Pubkey, solana_unified_scheduler_pool::{BankingStageMonitor, BankingStageStatus}, std::{ @@ -154,10 +154,10 @@ mod tests { use { super::*, core::panic, + solana_clock::NUM_CONSECUTIVE_LEADER_SLOTS, solana_ledger::{blockstore::Blockstore, genesis_utils::create_genesis_config}, solana_poh::poh_recorder::create_test_recorder, solana_runtime::bank::Bank, - solana_clock::NUM_CONSECUTIVE_LEADER_SLOTS, std::{ env::temp_dir, sync::{atomic::Ordering, Arc}, diff --git a/core/src/banking_stage/packet_deserializer.rs b/core/src/banking_stage/packet_deserializer.rs index 716abffeb31e33..b843a28065d34e 100644 --- a/core/src/banking_stage/packet_deserializer.rs +++ b/core/src/banking_stage/packet_deserializer.rs @@ -8,7 +8,10 @@ use { agave_banking_stage_ingress_types::{BankingPacketBatch, BankingPacketReceiver}, crossbeam_channel::RecvTimeoutError, solana_perf::packet::PacketBatch, - std::{num::Saturating, time::{Duration, Instant}}, + std::{ + num::Saturating, + time::{Duration, Instant}, + }, }; /// Results from deserializing packet batches. @@ -125,7 +128,8 @@ impl PacketDeserializer { }) .collect(); let Saturating(errors) = errors; - packet_stats.passed_sigverify_count += errors.saturating_add(deserialized_packets.len()) as u64; + packet_stats.passed_sigverify_count += + errors.saturating_add(deserialized_packets.len()) as u64; packet_stats.failed_sigverify_count += packet_count .saturating_sub(deserialized_packets.len()) .saturating_sub(errors) as u64; @@ -184,13 +188,9 @@ impl PacketDeserializer { #[cfg(test)] mod tests { use { - super::*, - solana_perf::packet::to_packet_batches, - solana_hash::Hash, - solana_pubkey::Pubkey, - solana_keypair::Keypair, - solana_system_transaction as system_transaction, - solana_transaction::Transaction, + super::*, solana_hash::Hash, solana_keypair::Keypair, + solana_perf::packet::to_packet_batches, solana_pubkey::Pubkey, + solana_system_transaction as system_transaction, solana_transaction::Transaction, }; fn random_transfer() -> Transaction { diff --git a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs index dd66d7ffeb4b0a..443db68ae8ebfe 100644 --- a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs +++ b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs @@ -18,11 +18,14 @@ use { transaction_scheduler::transaction_state_container::StateContainer, TOTAL_BUFFERED_PACKETS, }, + solana_clock::MAX_PROCESSING_AGE, solana_measure::measure_us, solana_runtime::{bank::Bank, bank_forks::BankForks}, - solana_clock::MAX_PROCESSING_AGE, solana_svm::transaction_error_metrics::TransactionErrorMetrics, - std::{num::Saturating, sync::{Arc, RwLock}}, + std::{ + num::Saturating, + sync::{Arc, RwLock}, + }, }; /// Controls packet and transaction flow into scheduler, and scheduling execution. @@ -152,14 +155,15 @@ where self.count_metrics.update(|count_metrics| { count_metrics.num_scheduled += scheduling_summary.num_scheduled; - count_metrics.num_unschedulable_conflicts += scheduling_summary.num_unschedulable_conflicts; - count_metrics.num_unschedulable_threads += scheduling_summary.num_unschedulable_threads; + count_metrics.num_unschedulable_conflicts += + scheduling_summary.num_unschedulable_conflicts; + count_metrics.num_unschedulable_threads += + scheduling_summary.num_unschedulable_threads; count_metrics.num_schedule_filtered_out += scheduling_summary.num_filtered_out; }); self.timing_metrics.update(|timing_metrics| { - timing_metrics.schedule_filter_time_us += - scheduling_summary.filter_time_us; + timing_metrics.schedule_filter_time_us += scheduling_summary.filter_time_us; timing_metrics.schedule_time_us += schedule_time_us; }); self.scheduling_details.update(&scheduling_summary); @@ -233,7 +237,7 @@ where while transaction_ids.len() < MAX_TRANSACTION_CHECKS { let Some(id) = self.container.pop() else { - break + break; }; transaction_ids.push(id); } @@ -332,21 +336,21 @@ mod tests { agave_banking_stage_ingress_types::{BankingPacketBatch, BankingPacketReceiver}, crossbeam_channel::{unbounded, Receiver, Sender}, itertools::Itertools, + solana_compute_budget_interface::ComputeBudgetInstruction, + solana_fee_calculator::FeeRateGovernor, + solana_hash::Hash, + solana_keypair::Keypair, solana_ledger::{ blockstore::Blockstore, genesis_utils::GenesisConfigInfo, get_tmp_ledger_path_auto_delete, leader_schedule_cache::LeaderScheduleCache, }, + solana_message::Message, solana_perf::packet::{to_packet_batches, PacketBatch, NUM_PACKETS}, solana_poh::poh_recorder::PohRecorder, - solana_runtime::bank::Bank, - solana_runtime_transaction::transaction_meta::StaticMeta, - solana_compute_budget_interface::ComputeBudgetInstruction, - solana_fee_calculator::FeeRateGovernor, - solana_hash::Hash, - solana_message::Message, solana_poh_config::PohConfig, solana_pubkey::Pubkey, - solana_keypair::Keypair, + solana_runtime::bank::Bank, + solana_runtime_transaction::transaction_meta::StaticMeta, solana_signer::Signer, solana_system_interface::instruction as system_instruction, solana_transaction::Transaction, diff --git a/core/src/banking_stage/transaction_scheduler/scheduler_metrics.rs b/core/src/banking_stage/transaction_scheduler/scheduler_metrics.rs index efcbf2919a6087..7b7e664155a624 100644 --- a/core/src/banking_stage/transaction_scheduler/scheduler_metrics.rs +++ b/core/src/banking_stage/transaction_scheduler/scheduler_metrics.rs @@ -1,10 +1,13 @@ use { super::scheduler::SchedulingSummary, itertools::MinMaxResult, - solana_poh::poh_recorder::BankStart, solana_clock::Slot, + solana_poh::poh_recorder::BankStart, solana_time_utils::AtomicInterval, - std::{num::Saturating, time::{Duration, Instant}}, + std::{ + num::Saturating, + time::{Duration, Instant}, + }, }; #[derive(Default)] @@ -124,9 +127,8 @@ impl SchedulerCountMetricsInner { num_dropped_on_receive: Saturating(num_dropped_on_receive), num_dropped_on_sanitization: Saturating(num_dropped_on_sanitization), num_dropped_on_validate_locks: Saturating(num_dropped_on_validate_locks), - num_dropped_on_receive_transaction_checks: Saturating( - num_dropped_on_receive_transaction_checks, - ), + num_dropped_on_receive_transaction_checks: + Saturating(num_dropped_on_receive_transaction_checks), num_dropped_on_clear: Saturating(num_dropped_on_clear), num_dropped_on_age_and_status: Saturating(num_dropped_on_age_and_status), num_dropped_on_capacity: Saturating(num_dropped_on_capacity), diff --git a/platform-tools-sdk/cargo-build-sbf/tests/crates/package-metadata/src/lib.rs b/platform-tools-sdk/cargo-build-sbf/tests/crates/package-metadata/src/lib.rs index 688545c3002954..45b392c031aae2 100644 --- a/platform-tools-sdk/cargo-build-sbf/tests/crates/package-metadata/src/lib.rs +++ b/platform-tools-sdk/cargo-build-sbf/tests/crates/package-metadata/src/lib.rs @@ -1,9 +1,7 @@ //! Example Rust-based SBF noop program use { - solana_account_info::AccountInfo, - solana_program_error::ProgramResult, - solana_pubkey::Pubkey + solana_account_info::AccountInfo, solana_program_error::ProgramResult, solana_pubkey::Pubkey, }; solana_package_metadata::declare_id_with_package_metadata!("solana.program-id"); diff --git a/platform-tools-sdk/cargo-build-sbf/tests/crates/workspace-metadata/src/lib.rs b/platform-tools-sdk/cargo-build-sbf/tests/crates/workspace-metadata/src/lib.rs index 378527daac51dd..341cbbf37c9902 100644 --- a/platform-tools-sdk/cargo-build-sbf/tests/crates/workspace-metadata/src/lib.rs +++ b/platform-tools-sdk/cargo-build-sbf/tests/crates/workspace-metadata/src/lib.rs @@ -1,9 +1,7 @@ //! Example Rust-based SBF noop program use { - solana_account_info::AccountInfo, - solana_program_error::ProgramResult, - solana_pubkey::Pubkey + solana_account_info::AccountInfo, solana_program_error::ProgramResult, solana_pubkey::Pubkey, }; solana_program_entrypoint::entrypoint!(process_instruction); diff --git a/svm/tests/example-programs/clock-sysvar/src/lib.rs b/svm/tests/example-programs/clock-sysvar/src/lib.rs index 16c161c3990a5d..b35d142be15986 100644 --- a/svm/tests/example-programs/clock-sysvar/src/lib.rs +++ b/svm/tests/example-programs/clock-sysvar/src/lib.rs @@ -1,7 +1,10 @@ use { - solana_account_info::AccountInfo, solana_program_entrypoint::entrypoint, - solana_program_error::ProgramResult, solana_pubkey::Pubkey, - solana_sysvar::{clock::Clock, Sysvar}, solana_program::program::set_return_data + solana_account_info::AccountInfo, + solana_program::program::set_return_data, + solana_program_entrypoint::entrypoint, + solana_program_error::ProgramResult, + solana_pubkey::Pubkey, + solana_sysvar::{clock::Clock, Sysvar}, }; entrypoint!(process_instruction); @@ -11,7 +14,6 @@ fn process_instruction( _accounts: &[AccountInfo], _instruction_data: &[u8], ) -> ProgramResult { - let time_now = Clock::get().unwrap().unix_timestamp; let return_data = time_now.to_be_bytes(); set_return_data(&return_data); diff --git a/svm/tests/example-programs/hello-solana/src/lib.rs b/svm/tests/example-programs/hello-solana/src/lib.rs index e20aa92e545eee..3f6799c27e72b5 100644 --- a/svm/tests/example-programs/hello-solana/src/lib.rs +++ b/svm/tests/example-programs/hello-solana/src/lib.rs @@ -1,9 +1,6 @@ use { - solana_account_info::AccountInfo, - solana_program_entrypoint::entrypoint, - solana_program_error::ProgramResult, - solana_msg::msg, - solana_pubkey::Pubkey, + solana_account_info::AccountInfo, solana_msg::msg, solana_program_entrypoint::entrypoint, + solana_program_error::ProgramResult, solana_pubkey::Pubkey, }; entrypoint!(process_instruction); diff --git a/svm/tests/example-programs/simple-transfer/src/lib.rs b/svm/tests/example-programs/simple-transfer/src/lib.rs index e9f624c7f2a3f5..1e922c2f2edb39 100644 --- a/svm/tests/example-programs/simple-transfer/src/lib.rs +++ b/svm/tests/example-programs/simple-transfer/src/lib.rs @@ -1,19 +1,18 @@ use { - solana_account_info::{AccountInfo, next_account_info}, + solana_account_info::{next_account_info, AccountInfo}, + solana_program::program::invoke, solana_program_entrypoint::entrypoint, solana_program_error::ProgramResult, solana_pubkey::Pubkey, - solana_program::program::invoke, solana_system_interface::instruction as system_instruction, }; entrypoint!(process_instruction); - fn process_instruction( _program_id: &Pubkey, accounts: &[AccountInfo], - data: &[u8] + data: &[u8], ) -> ProgramResult { let amount = u64::from_be_bytes(data[0..8].try_into().unwrap()); let accounts_iter = &mut accounts.iter(); @@ -27,4 +26,4 @@ fn process_instruction( )?; Ok(()) -} \ No newline at end of file +} diff --git a/svm/tests/example-programs/transfer-from-account/src/lib.rs b/svm/tests/example-programs/transfer-from-account/src/lib.rs index 4cf5eb99494987..4460873f55e4c5 100644 --- a/svm/tests/example-programs/transfer-from-account/src/lib.rs +++ b/svm/tests/example-programs/transfer-from-account/src/lib.rs @@ -1,19 +1,18 @@ use { - solana_account_info::{AccountInfo, next_account_info}, + solana_account_info::{next_account_info, AccountInfo}, + solana_program::program::invoke, solana_program_entrypoint::entrypoint, solana_program_error::ProgramResult, solana_pubkey::Pubkey, - solana_program::program::invoke, solana_system_interface::instruction as system_instruction, }; entrypoint!(process_instruction); - fn process_instruction( _program_id: &Pubkey, accounts: &[AccountInfo], - _data: &[u8] + _data: &[u8], ) -> ProgramResult { let accounts_iter = &mut accounts.iter(); let payer = next_account_info(accounts_iter)?; @@ -29,4 +28,4 @@ fn process_instruction( )?; Ok(()) -} \ No newline at end of file +} diff --git a/svm/tests/example-programs/write-to-account/src/lib.rs b/svm/tests/example-programs/write-to-account/src/lib.rs index bcaa7a45411a4c..0fde9fac494045 100644 --- a/svm/tests/example-programs/write-to-account/src/lib.rs +++ b/svm/tests/example-programs/write-to-account/src/lib.rs @@ -1,10 +1,10 @@ use { solana_account_info::{next_account_info, AccountInfo}, + solana_msg::msg, solana_program_entrypoint::entrypoint, - solana_program_error::ProgramResult, - solana_sdk_ids::incinerator, solana_msg::msg, - solana_program_error::ProgramError, + solana_program_error::{ProgramError, ProgramResult}, solana_pubkey::Pubkey, + solana_sdk_ids::incinerator, }; entrypoint!(process_instruction); From 021237abab50412fdbc17f699466843d14ef1312 Mon Sep 17 00:00:00 2001 From: Alex Pyattaev Date: Mon, 23 Jun 2025 18:31:38 +0300 Subject: [PATCH 063/124] Reduce CLUSTER_NODES_CACHE_NUM_EPOCH_CAP to something reasonable (#6696) --- turbine/src/sigverify_shreds.rs | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/turbine/src/sigverify_shreds.rs b/turbine/src/sigverify_shreds.rs index dc08c13a4434f5..69a6688f858de2 100644 --- a/turbine/src/sigverify_shreds.rs +++ b/turbine/src/sigverify_shreds.rs @@ -17,13 +17,9 @@ use { }, solana_perf::{self, deduper::Deduper, packet::PacketBatch, recycler_cache::RecyclerCache}, solana_pubkey::Pubkey, - solana_runtime::{ - bank::{Bank, MAX_LEADER_SCHEDULE_STAKES}, - bank_forks::BankForks, - }, + solana_runtime::{bank::Bank, bank_forks::BankForks}, solana_signer::Signer, solana_streamer::{evicting_sender::EvictingSender, streamer::ChannelSend}, - static_assertions::const_assert_eq, std::{ collections::HashMap, num::NonZeroUsize, @@ -45,10 +41,8 @@ const DEDUPER_RESET_CYCLE: Duration = Duration::from_secs(5 * 60); // Num epochs capacity should be at least 2 because near the epoch boundary we // may receive shreds from the other side of the epoch boundary. Because of the -// TTL based eviction it does not make sense to cache more than -// MAX_LEADER_SCHEDULE_STAKES epochs. -const_assert_eq!(CLUSTER_NODES_CACHE_NUM_EPOCH_CAP, 5); -const CLUSTER_NODES_CACHE_NUM_EPOCH_CAP: usize = MAX_LEADER_SCHEDULE_STAKES as usize; +// TTL based eviction it is extremely unlikely that we will ever store > 2 epochs anyway +const CLUSTER_NODES_CACHE_NUM_EPOCH_CAP: usize = 2; // Because for ClusterNodes::get_retransmit_parent only pubkeys of staked nodes // are needed, we can use longer durations for cache TTL. const CLUSTER_NODES_CACHE_TTL: Duration = Duration::from_secs(30); From a82a860d20fbb5059eb3fa843f2af7276a272fce Mon Sep 17 00:00:00 2001 From: Brooks Date: Mon, 23 Jun 2025 13:24:58 -0400 Subject: [PATCH 064/124] Removes per-bucket in-mem accounts index stats (#6697) --- accounts-db/src/accounts_db.rs | 25 ------------ .../accounts_index/in_mem_accounts_index.rs | 20 ++++------ accounts-db/src/bucket_map_holder_stats.rs | 40 +++---------------- 3 files changed, 14 insertions(+), 71 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index b3401691d1d924..0eb74124004131 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -606,9 +606,6 @@ struct GenerateIndexTimings { pub index_time: u64, pub scan_time: u64, pub insertion_time_us: u64, - pub min_bin_size_in_mem: usize, - pub max_bin_size_in_mem: usize, - pub total_items_in_mem: usize, pub storage_size_storages_us: u64, pub index_flush_us: u64, pub rent_paying: AtomicUsize, @@ -645,8 +642,6 @@ impl GenerateIndexTimings { ("total_us", self.index_time, i64), ("scan_stores_us", self.scan_time, i64), ("insertion_time_us", self.insertion_time_us, i64), - ("min_bin_size_in_mem", self.min_bin_size_in_mem, i64), - ("max_bin_size_in_mem", self.max_bin_size_in_mem, i64), ( "storage_size_storages_us", self.storage_size_storages_us, @@ -668,7 +663,6 @@ impl GenerateIndexTimings { self.total_including_duplicates, i64 ), - ("total_items_in_mem", self.total_items_in_mem, i64), ( "accounts_data_len_dedup_time_us", self.accounts_data_len_dedup_time_us, @@ -8163,9 +8157,6 @@ impl AccountsDb { let mut index_flush_us = 0; let total_duplicate_slot_keys = AtomicU64::default(); let mut populate_duplicate_keys_us = 0; - let mut total_items_in_mem = 0; - let mut min_bin_size_in_mem = 0; - let mut max_bin_size_in_mem = 0; let total_num_unique_duplicate_keys = AtomicU64::default(); // outer vec is accounts index bin (determined by pubkey value) @@ -8204,19 +8195,6 @@ impl AccountsDb { }); }) .1; - - (total_items_in_mem, min_bin_size_in_mem, max_bin_size_in_mem) = self - .accounts_index - .account_maps - .iter() - .map(|map_bin| map_bin.len_for_stats()) - .fold((0, usize::MAX, usize::MIN), |acc, len| { - ( - acc.0 + len, - std::cmp::min(acc.1, len), - std::cmp::max(acc.2, len), - ) - }); } let unique_pubkeys_by_bin = unique_pubkeys_by_bin.into_inner().unwrap(); @@ -8225,9 +8203,6 @@ impl AccountsDb { scan_time, index_time: index_time.as_us(), insertion_time_us: insertion_time_us.load(Ordering::Relaxed), - min_bin_size_in_mem, - max_bin_size_in_mem, - total_items_in_mem, rent_paying, amount_to_top_off_rent, total_duplicate_slot_keys: total_duplicate_slot_keys.load(Ordering::Relaxed), diff --git a/accounts-db/src/accounts_index/in_mem_accounts_index.rs b/accounts-db/src/accounts_index/in_mem_accounts_index.rs index a6717710a32552..6694fe29313d47 100644 --- a/accounts-db/src/accounts_index/in_mem_accounts_index.rs +++ b/accounts-db/src/accounts_index/in_mem_accounts_index.rs @@ -100,7 +100,7 @@ pub struct InMemAccountsIndex + Into< // backing store map_internal: RwLock>, ahash::RandomState>>, storage: Arc>, - bin: usize, + _bin: usize, pub(crate) lowest_pubkey: Pubkey, pub(crate) highest_pubkey: Pubkey, @@ -185,7 +185,7 @@ impl + Into> InMemAccountsIndex + Into> InMemAccountsIndex + Into> InMemAccountsIndex + Into> InMemAccountsIndex + Into> InMemAccountsIndex usize { - self.stats().count_in_bucket(self.bin) - } - /// Queue up these insertions for when the flush thread is dealing with this bin. /// This is very fast and requires no lookups or disk access. pub fn startup_insert_only(&self, items: impl Iterator) { @@ -836,7 +832,7 @@ impl + Into> InMemAccountsIndex { // not in cache, look on disk let disk_entry = self.load_account_entry_from_disk(vacant.key()); - self.stats().inc_mem_count(self.bin); + self.stats().inc_mem_count(); if let Some(disk_entry) = disk_entry { let (slot, account_info) = new_entry.into(); InMemAccountsIndex::::lock_and_update_slot_list( @@ -1024,7 +1020,7 @@ impl + Into> InMemAccountsIndex + Into> InMemAccountsIndex, pub flush_entries_updated_on_disk: AtomicU64, pub flush_entries_evicted_from_mem: AtomicU64, pub active_threads: AtomicU64, @@ -69,7 +68,6 @@ impl BucketMapHolderStats { pub fn new(bins: usize) -> BucketMapHolderStats { BucketMapHolderStats { bins: bins as u64, - per_bucket_count: (0..bins).map(|_| AtomicUsize::default()).collect(), ..BucketMapHolderStats::default() } } @@ -88,24 +86,20 @@ impl BucketMapHolderStats { self.count.fetch_sub(1, Ordering::Relaxed); } - pub fn inc_mem_count(&self, bin: usize) { - self.add_mem_count(bin, 1); + pub fn inc_mem_count(&self) { + self.add_mem_count(1); } - pub fn dec_mem_count(&self, bin: usize) { - self.sub_mem_count(bin, 1); + pub fn dec_mem_count(&self) { + self.sub_mem_count(1); } - pub fn add_mem_count(&self, bin: usize, count: usize) { - let per_bucket = self.per_bucket_count.get(bin); + pub fn add_mem_count(&self, count: usize) { self.count_in_mem.fetch_add(count, Ordering::Relaxed); - per_bucket.map(|stat| stat.fetch_add(count, Ordering::Relaxed)); } - pub fn sub_mem_count(&self, bin: usize, count: usize) { - let per_bucket = self.per_bucket_count.get(bin); + pub fn sub_mem_count(&self, count: usize) { self.count_in_mem.fetch_sub(count, Ordering::Relaxed); - per_bucket.map(|stat| stat.fetch_sub(count, Ordering::Relaxed)); } fn ms_per_age + Into>( @@ -167,14 +161,6 @@ impl BucketMapHolderStats { self.count.load(Ordering::Relaxed) } - pub fn count_in_bucket(&self, bucket: usize) -> usize { - if bucket < self.per_bucket_count.len() { - self.per_bucket_count[bucket].load(Ordering::Relaxed) - } else { - 0 - } - } - /// This is an estimate of the # of items in mem that are awaiting flushing to disk. /// returns (# items in mem) - (# items we intend to hold in mem for performance heuristics) /// The result is also an estimate because 'held_in_mem' is based on a stat that is swapped out when stats are reported. @@ -201,11 +187,6 @@ impl BucketMapHolderStats { let ms_per_age = self.ms_per_age(storage, elapsed_ms); - let in_mem_per_bucket_counts = self - .per_bucket_count - .iter() - .map(|count| count.load(Ordering::Relaxed)) - .collect::>(); let disk = storage.disk.as_ref(); let disk_per_bucket_counts = disk .map(|disk| { @@ -214,7 +195,6 @@ impl BucketMapHolderStats { .collect::>() }) .unwrap_or_default(); - let in_mem_stats = Self::get_stats(in_mem_per_bucket_counts); let disk_stats = Self::get_stats(disk_per_bucket_counts); const US_PER_MS: u64 = 1_000; @@ -305,10 +285,6 @@ impl BucketMapHolderStats { self.held_in_mem.slot_list_cached.swap(0, Ordering::Relaxed), i64 ), - ("min_in_bin_mem", in_mem_stats.0, i64), - ("max_in_bin_mem", in_mem_stats.1, i64), - ("count_from_bins_mem", in_mem_stats.2, i64), - ("median_from_bins_mem", in_mem_stats.3, i64), ("min_in_bin_disk", disk_stats.0, i64), ("max_in_bin_disk", disk_stats.1, i64), ("count_from_bins_disk", disk_stats.2, i64), @@ -588,10 +564,6 @@ impl BucketMapHolderStats { ), f64 ), - ("min_in_bin_mem", in_mem_stats.0, i64), - ("max_in_bin_mem", in_mem_stats.1, i64), - ("count_from_bins_mem", in_mem_stats.2, i64), - ("median_from_bins_mem", in_mem_stats.3, i64), ( "gets_from_mem", self.gets_from_mem.swap(0, Ordering::Relaxed), From 55b5c085b090aa9c5fe4d0ea857039246e96c75a Mon Sep 17 00:00:00 2001 From: Greg Cusack Date: Mon, 23 Jun 2025 15:34:13 -0500 Subject: [PATCH 065/124] Add test for udp socket reachability (#6676) * added test for udp reachability * use localhost_port_range_for_tests * reduce iterations --- net-utils/src/lib.rs | 43 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/net-utils/src/lib.rs b/net-utils/src/lib.rs index 34fe9cc6b42b81..2cdade4d961f22 100644 --- a/net-utils/src/lib.rs +++ b/net-utils/src/lib.rs @@ -977,4 +977,47 @@ mod tests { "Expected an error when reuseport is not set to true" ); } + + #[test] + fn test_verify_udp_multiple_ips_reachable() { + solana_logger::setup(); + let config = SocketConfig::default(); + let ip_a = IpAddr::V4(Ipv4Addr::LOCALHOST); + let ip_b = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)); + + let server_ports = sockets::localhost_port_range_for_tests(); + let (_srv_udp_port, (srv_udp_sock, srv_tcp_listener)) = + bind_common_in_range_with_config(ip_a, server_ports, config).unwrap(); + + let ip_echo_server_addr = srv_udp_sock.local_addr().unwrap(); + let _runtime = ip_echo_server( + srv_tcp_listener, + DEFAULT_IP_ECHO_SERVER_THREADS, + /*shred_version=*/ Some(42), + ); + + let mut udp_sockets = Vec::new(); + let (_p1, (sock_a, _tl_a)) = bind_common_in_range_with_config( + ip_a, + sockets::localhost_port_range_for_tests(), + config, + ) + .unwrap(); + let (_p2, (sock_b, _tl_b)) = bind_common_in_range_with_config( + ip_b, + sockets::localhost_port_range_for_tests(), + config, + ) + .unwrap(); + + udp_sockets.push(sock_a); + udp_sockets.push(sock_b); + + let socket_refs: Vec<&UdpSocket> = udp_sockets.iter().collect(); + + assert!( + verify_all_reachable_udp(&ip_echo_server_addr, &socket_refs), + "all UDP ports on both 127.0.0.1 and 127.0.0.2 should be reachable" + ); + } } From 0cdebbcff8dadcddacfe49241e865e497cc8f103 Mon Sep 17 00:00:00 2001 From: Alex Pyattaev Date: Mon, 23 Jun 2025 23:41:14 +0300 Subject: [PATCH 066/124] net-utils: Deprecate explicit reuseport (#6639) * deprecate reuseport flag * fix tests and common usage of reuseport functions * clean up external users of .reuseport() --- bench-streamer/src/main.rs | 9 +- bench-vote/src/main.rs | 9 +- connection-cache/src/connection_cache.rs | 8 +- docs/src/validator/tvu.md | 4 +- gossip/src/cluster_info.rs | 78 ++-- net-utils/src/lib.rs | 259 ++++++------ net-utils/src/sockets.rs | 388 +++++++++++++++++- quic-client/src/nonblocking/quic_client.rs | 7 +- streamer/src/nonblocking/recvmmsg.rs | 6 +- streamer/src/nonblocking/sendmmsg.rs | 2 +- streamer/src/nonblocking/testing_utilities.rs | 9 +- streamer/src/recvmmsg.rs | 5 +- udp-client/src/lib.rs | 6 +- udp-client/src/nonblocking/udp_client.rs | 14 +- vortexor/src/main.rs | 4 +- vortexor/src/vortexor.rs | 6 +- 16 files changed, 588 insertions(+), 226 deletions(-) diff --git a/bench-streamer/src/main.rs b/bench-streamer/src/main.rs index bfe07fad98c061..6ee3449bdc348c 100644 --- a/bench-streamer/src/main.rs +++ b/bench-streamer/src/main.rs @@ -3,7 +3,10 @@ use { clap::{crate_description, crate_name, value_t_or_exit, Arg, Command}, crossbeam_channel::unbounded, - solana_net_utils::{bind_to_unspecified, SocketConfig}, + solana_net_utils::{ + bind_to_unspecified, + sockets::{multi_bind_in_range_with_config, SocketConfiguration}, + }, solana_streamer::{ packet::{Packet, PacketBatchRecycler, PinnedPacketBatch, PACKET_DATA_SIZE}, sendmmsg::batch_send, @@ -104,10 +107,10 @@ fn main() -> Result<()> { let port = 0; let ip_addr = IpAddr::V4(Ipv4Addr::UNSPECIFIED); - let (_port, read_sockets) = solana_net_utils::multi_bind_in_range_with_config( + let (_port, read_sockets) = multi_bind_in_range_with_config( ip_addr, (port, port + num_sockets as u16), - SocketConfig::default().reuseport(true), + SocketConfiguration::default(), num_sockets, ) .unwrap(); diff --git a/bench-vote/src/main.rs b/bench-vote/src/main.rs index 9a7f991daabcd8..92b50fc14109c8 100644 --- a/bench-vote/src/main.rs +++ b/bench-vote/src/main.rs @@ -9,7 +9,10 @@ use { solana_hash::Hash, solana_keypair::Keypair, solana_message::Message, - solana_net_utils::{bind_to_unspecified, SocketConfig}, + solana_net_utils::{ + bind_to_unspecified, + sockets::{multi_bind_in_range_with_config, SocketConfiguration as SocketConfig}, + }, solana_pubkey::Pubkey, solana_signer::Signer, solana_streamer::{ @@ -187,8 +190,8 @@ fn main() -> Result<()> { let mut read_channels = Vec::new(); let mut read_threads = Vec::new(); let recycler = PacketBatchRecycler::default(); - let config = SocketConfig::default().reuseport(true); - let (port, read_sockets) = solana_net_utils::multi_bind_in_range_with_config( + let config = SocketConfig::default(); + let (port, read_sockets) = multi_bind_in_range_with_config( ip_addr, (port, port + num_sockets as u16), config, diff --git a/connection-cache/src/connection_cache.rs b/connection-cache/src/connection_cache.rs index c030c638183343..fca357e3bf3c29 100644 --- a/connection-cache/src/connection_cache.rs +++ b/connection-cache/src/connection_cache.rs @@ -514,7 +514,9 @@ mod tests { async_trait::async_trait, rand::{Rng, SeedableRng}, rand_chacha::ChaChaRng, - solana_net_utils::SocketConfig, + solana_net_utils::sockets::{ + bind_with_any_port_with_config, SocketConfiguration as SocketConfig, + }, solana_transaction_error::TransportResult, std::{ net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket}, @@ -572,7 +574,7 @@ mod tests { fn default() -> Self { Self { udp_socket: Arc::new( - solana_net_utils::bind_with_any_port_with_config( + bind_with_any_port_with_config( IpAddr::V4(Ipv4Addr::UNSPECIFIED), SocketConfig::default(), ) @@ -586,7 +588,7 @@ mod tests { fn new() -> Result { Ok(Self { udp_socket: Arc::new( - solana_net_utils::bind_with_any_port_with_config( + bind_with_any_port_with_config( IpAddr::V4(Ipv4Addr::UNSPECIFIED), SocketConfig::default(), ) diff --git a/docs/src/validator/tvu.md b/docs/src/validator/tvu.md index ed4076dc81042a..946ae38b558cda 100644 --- a/docs/src/validator/tvu.md +++ b/docs/src/validator/tvu.md @@ -23,13 +23,13 @@ Internally, TVU is actually bound with multiple sockets to improve kernel's hand > **NOTE:** TPU sockets use similar logic -A node advertises one external ip/port for TVU while binding multiple sockets to that same port using SO_REUSEPORT: +A node advertises one external ip/port for TVU while binding multiple sockets to that same port: ```rust let (tvu_port, tvu_sockets) = multi_bind_in_range_with_config( bind_ip_addr, port_range, - socket_config_reuseport, + socket_config, num_tvu_sockets.get(), ) .expect("tvu multi_bind"); diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index 76407dac969b27..bb90975da55879 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -52,12 +52,14 @@ use { solana_keypair::{signable::Signable, Keypair}, solana_ledger::shred::Shred, solana_net_utils::{ - bind_common_in_range_with_config, bind_in_range, bind_in_range_with_config, - bind_more_with_config, bind_to_localhost, bind_to_unspecified, bind_to_with_config, - bind_two_in_range_with_offset_and_config, find_available_ports_in_range, - multi_bind_in_range_with_config, - sockets::{bind_gossip_port_in_range, localhost_port_range_for_tests}, - PortRange, SocketConfig, VALIDATOR_PORT_RANGE, + bind_in_range, bind_to_localhost, bind_to_unspecified, find_available_ports_in_range, + sockets::{ + bind_common_in_range_with_config, bind_gossip_port_in_range, bind_in_range_with_config, + bind_more_with_config, bind_to_with_config, bind_two_in_range_with_offset_and_config, + localhost_port_range_for_tests, multi_bind_in_range_with_config, + SocketConfiguration as SocketConfig, + }, + PortRange, VALIDATOR_PORT_RANGE, }, solana_perf::{ data_budget::DataBudget, @@ -2372,7 +2374,7 @@ impl Node { let localhost_ip_addr = IpAddr::V4(Ipv4Addr::LOCALHOST); let port_range = localhost_port_range_for_tests(); let udp_config = SocketConfig::default(); - let quic_config = SocketConfig::default().reuseport(true); + let quic_config = SocketConfig::default(); let ((_tpu_port, tpu), (_tpu_quic_port, tpu_quic)) = bind_two_in_range_with_offset_and_config( localhost_ip_addr, @@ -2524,7 +2526,6 @@ impl Node { bind_gossip_port_in_range(gossip_addr, port_range, bind_ip_addr); let socket_config = SocketConfig::default(); - let socket_config_reuseport = SocketConfig::default().reuseport(true); let (tvu_port, tvu) = Self::bind_with_config(bind_ip_addr, port_range, socket_config); let (tvu_quic_port, tvu_quic) = Self::bind_with_config(bind_ip_addr, port_range, socket_config); @@ -2534,12 +2535,11 @@ impl Node { port_range, QUIC_PORT_OFFSET, socket_config, - socket_config_reuseport, + socket_config, ) .unwrap(); let tpu_quic: Vec = - bind_more_with_config(tpu_quic, DEFAULT_QUIC_ENDPOINTS, socket_config_reuseport) - .unwrap(); + bind_more_with_config(tpu_quic, DEFAULT_QUIC_ENDPOINTS, socket_config).unwrap(); let ((tpu_forwards_port, tpu_forwards), (_tpu_forwards_quic_port, tpu_forwards_quic)) = bind_two_in_range_with_offset_and_config( @@ -2547,26 +2547,19 @@ impl Node { port_range, QUIC_PORT_OFFSET, socket_config, - socket_config_reuseport, + socket_config, ) .unwrap(); - let tpu_forwards_quic = bind_more_with_config( - tpu_forwards_quic, - DEFAULT_QUIC_ENDPOINTS, - socket_config_reuseport, - ) - .unwrap(); + let tpu_forwards_quic = + bind_more_with_config(tpu_forwards_quic, DEFAULT_QUIC_ENDPOINTS, socket_config) + .unwrap(); let (tpu_vote_port, tpu_vote) = Self::bind_with_config(bind_ip_addr, port_range, socket_config); let (tpu_vote_quic_port, tpu_vote_quic) = Self::bind_with_config(bind_ip_addr, port_range, socket_config); - let tpu_vote_quic: Vec = bind_more_with_config( - tpu_vote_quic, - DEFAULT_QUIC_ENDPOINTS, - socket_config_reuseport, - ) - .unwrap(); + let tpu_vote_quic: Vec = + bind_more_with_config(tpu_vote_quic, DEFAULT_QUIC_ENDPOINTS, socket_config).unwrap(); let (_, retransmit_socket) = Self::bind_with_config(bind_ip_addr, port_range, socket_config); @@ -2680,12 +2673,11 @@ impl Node { bind_gossip_port_in_range(&gossip_addr, port_range, bind_ip_addr); let socket_config = SocketConfig::default(); - let socket_config_reuseport = SocketConfig::default().reuseport(true); let (tvu_port, tvu_sockets) = multi_bind_in_range_with_config( bind_ip_addr, port_range, - socket_config_reuseport, + socket_config, num_tvu_receive_sockets.get(), ) .expect("tvu multi_bind"); @@ -2694,20 +2686,19 @@ impl Node { Self::bind_with_config(bind_ip_addr, port_range, socket_config); let (tpu_port, tpu_sockets) = - multi_bind_in_range_with_config(bind_ip_addr, port_range, socket_config_reuseport, 32) + multi_bind_in_range_with_config(bind_ip_addr, port_range, socket_config, 32) .expect("tpu multi_bind"); let (_tpu_port_quic, tpu_quic) = Self::bind_with_config( bind_ip_addr, (tpu_port + QUIC_PORT_OFFSET, tpu_port + QUIC_PORT_OFFSET + 1), - socket_config_reuseport, + socket_config, ); let tpu_quic = - bind_more_with_config(tpu_quic, num_quic_endpoints.get(), socket_config_reuseport) - .unwrap(); + bind_more_with_config(tpu_quic, num_quic_endpoints.get(), socket_config).unwrap(); let (tpu_forwards_port, tpu_forwards_sockets) = - multi_bind_in_range_with_config(bind_ip_addr, port_range, socket_config_reuseport, 8) + multi_bind_in_range_with_config(bind_ip_addr, port_range, socket_config, 8) .expect("tpu_forwards multi_bind"); let (_tpu_forwards_port_quic, tpu_forwards_quic) = Self::bind_with_config( @@ -2716,33 +2707,26 @@ impl Node { tpu_forwards_port + QUIC_PORT_OFFSET, tpu_forwards_port + QUIC_PORT_OFFSET + 1, ), - socket_config_reuseport, + socket_config, ); - let tpu_forwards_quic = bind_more_with_config( - tpu_forwards_quic, - num_quic_endpoints.get(), - socket_config_reuseport, - ) - .unwrap(); + let tpu_forwards_quic = + bind_more_with_config(tpu_forwards_quic, num_quic_endpoints.get(), socket_config) + .unwrap(); let (tpu_vote_port, tpu_vote_sockets) = - multi_bind_in_range_with_config(bind_ip_addr, port_range, socket_config_reuseport, 1) + multi_bind_in_range_with_config(bind_ip_addr, port_range, socket_config, 1) .expect("tpu_vote multi_bind"); let (tpu_vote_quic_port, tpu_vote_quic) = Self::bind_with_config(bind_ip_addr, port_range, socket_config); - let tpu_vote_quic = bind_more_with_config( - tpu_vote_quic, - num_quic_endpoints.get(), - socket_config_reuseport, - ) - .unwrap(); + let tpu_vote_quic = + bind_more_with_config(tpu_vote_quic, num_quic_endpoints.get(), socket_config).unwrap(); let (_, retransmit_sockets) = multi_bind_in_range_with_config( bind_ip_addr, port_range, - socket_config_reuseport, + socket_config, num_tvu_retransmit_sockets.get(), ) .expect("retransmit multi_bind"); @@ -2756,7 +2740,7 @@ impl Node { Self::bind_with_config(bind_ip_addr, port_range, socket_config); let (_, broadcast) = - multi_bind_in_range_with_config(bind_ip_addr, port_range, socket_config_reuseport, 4) + multi_bind_in_range_with_config(bind_ip_addr, port_range, socket_config, 4) .expect("broadcast multi_bind"); let (_, ancestor_hashes_requests) = diff --git a/net-utils/src/lib.rs b/net-utils/src/lib.rs index 2cdade4d961f22..622d6b3cd0268e 100644 --- a/net-utils/src/lib.rs +++ b/net-utils/src/lib.rs @@ -10,14 +10,13 @@ pub use ip_echo_server::{ ip_echo_server, IpEchoServer, DEFAULT_IP_ECHO_SERVER_THREADS, MAX_PORT_COUNT_PER_MESSAGE, MINIMUM_IP_ECHO_SERVER_THREADS, }; -#[cfg(feature = "dev-context-only-utils")] -use tokio::net::UdpSocket as TokioUdpSocket; use { + crate::sockets::{udp_socket_with_config, PLATFORM_SUPPORTS_SOCKET_CONFIGS}, ip_echo_client::{ip_echo_server_request, ip_echo_server_request_with_binding}, ip_echo_server::IpEchoServerMessage, log::*, rand::{thread_rng, Rng}, - socket2::{Domain, SockAddr, Socket, Type}, + socket2::SockAddr, std::{ io::{self}, net::{IpAddr, Ipv4Addr, SocketAddr, TcpListener, ToSocketAddrs, UdpSocket}, @@ -225,6 +224,10 @@ pub fn is_host_port(string: String) -> Result<(), String> { parse_host_port(&string).map(|_| ()) } +#[deprecated( + since = "2.3.2", + note = "Please use the equivalent struct from solana-net-utils::sockets" +)] #[derive(Clone, Copy, Debug, Default)] pub struct SocketConfig { reuseport: bool, @@ -232,6 +235,7 @@ pub struct SocketConfig { send_buffer_size: Option, } +#[allow(deprecated)] impl SocketConfig { pub fn reuseport(mut self, reuseport: bool) -> Self { self.reuseport = reuseport; @@ -261,40 +265,12 @@ impl SocketConfig { } } -#[cfg(any(windows, target_os = "ios"))] -fn udp_socket_with_config(_config: SocketConfig) -> io::Result { - let sock = Socket::new(Domain::IPV4, Type::DGRAM, None)?; - Ok(sock) -} - -#[cfg(not(any(windows, target_os = "ios")))] -fn udp_socket_with_config(config: SocketConfig) -> io::Result { - use nix::sys::socket::{setsockopt, sockopt::ReusePort}; - let SocketConfig { - reuseport, - recv_buffer_size, - send_buffer_size, - } = config; - - let sock = Socket::new(Domain::IPV4, Type::DGRAM, None)?; - - // Set buffer sizes - if let Some(recv_buffer_size) = recv_buffer_size { - sock.set_recv_buffer_size(recv_buffer_size)?; - } - - if let Some(send_buffer_size) = send_buffer_size { - sock.set_send_buffer_size(send_buffer_size)?; - } - - if reuseport { - setsockopt(&sock, ReusePort, &true).ok(); - } - - Ok(sock) -} - -// Find a port in the given range with a socket config that is available for both TCP and UDP +#[deprecated( + since = "2.3.2", + note = "Please use the equivalent from solana-net-utils::sockets" +)] +#[allow(deprecated)] +/// Find a port in the given range with a socket config that is available for both TCP and UDP pub fn bind_common_in_range_with_config( ip_addr: IpAddr, range: PortRange, @@ -312,23 +288,28 @@ pub fn bind_common_in_range_with_config( } pub fn bind_in_range(ip_addr: IpAddr, range: PortRange) -> io::Result<(u16, UdpSocket)> { - let config = SocketConfig::default(); - bind_in_range_with_config(ip_addr, range, config) + let config = sockets::SocketConfiguration::default(); + sockets::bind_in_range_with_config(ip_addr, range, config) } +#[deprecated( + since = "2.3.2", + note = "Please use the equivalent from solana-net-utils::sockets" +)] +#[allow(deprecated)] pub fn bind_in_range_with_config( ip_addr: IpAddr, range: PortRange, config: SocketConfig, ) -> io::Result<(u16, UdpSocket)> { - let sock = udp_socket_with_config(config)?; + let socket = udp_socket_with_config(config.into())?; for port in range.0..range.1 { let addr = SocketAddr::new(ip_addr, port); - if sock.bind(&SockAddr::from(addr)).is_ok() { - let sock: UdpSocket = sock.into(); - return Result::Ok((sock.local_addr().unwrap().port(), sock)); + if socket.bind(&SockAddr::from(addr)).is_ok() { + let udp_socket: UdpSocket = socket.into(); + return Result::Ok((udp_socket.local_addr().unwrap().port(), udp_socket)); } } @@ -337,11 +318,16 @@ pub fn bind_in_range_with_config( ))) } +#[deprecated( + since = "2.3.2", + note = "Please use the equivalent from solana-net-utils::sockets" +)] +#[allow(deprecated)] pub fn bind_with_any_port_with_config( ip_addr: IpAddr, config: SocketConfig, ) -> io::Result { - let sock = udp_socket_with_config(config)?; + let sock = udp_socket_with_config(config.into())?; let addr = SocketAddr::new(ip_addr, 0); match sock.bind(&SockAddr::from(addr)) { Ok(_) => Result::Ok(sock.into()), @@ -349,6 +335,11 @@ pub fn bind_with_any_port_with_config( } } +#[deprecated( + since = "2.3.2", + note = "Please use the equivalent from solana-net-utils::sockets" +)] +#[allow(deprecated)] /// binds num sockets to the same port in a range with config pub fn multi_bind_in_range_with_config( ip_addr: IpAddr, @@ -356,122 +347,68 @@ pub fn multi_bind_in_range_with_config( config: SocketConfig, mut num: usize, ) -> io::Result<(u16, Vec)> { - if !config.reuseport { - return Err(io::Error::new( - io::ErrorKind::InvalidInput, - "SocketConfig.reuseport must be true for multi_bind_in_range_with_config", - )); - } - if cfg!(windows) && num != 1 { + if !PLATFORM_SUPPORTS_SOCKET_CONFIGS && num != 1 { // See https://github.com/solana-labs/solana/issues/4607 warn!( - "multi_bind_in_range_with_config() only supports 1 socket in windows ({} requested)", + "multi_bind_in_range_with_config() only supports 1 socket on this platform ({} requested)", num ); num = 1; } - let mut sockets = Vec::with_capacity(num); - - const NUM_TRIES: usize = 100; - let mut port = 0; - let mut error = None; - for _ in 0..NUM_TRIES { - port = { - let (port, _) = bind_in_range(ip_addr, range)?; - port - }; // drop the probe, port should be available... briefly. - - for _ in 0..num { - let sock = bind_to_with_config(ip_addr, port, config); - if let Ok(sock) = sock { - sockets.push(sock); - } else { - error = Some(sock); - break; - } - } - if sockets.len() == num { - break; - } else { - sockets.clear(); - } - } - if sockets.len() != num { - error.unwrap()?; - } + let (port, socket) = bind_in_range_with_config(ip_addr, range, config)?; + let sockets = bind_more_with_config(socket, num, config)?; Ok((port, sockets)) } +#[deprecated( + since = "2.3.2", + note = "Please use the eqiuvalent from solana-net-utils::sockets" +)] +#[allow(deprecated)] pub fn bind_to(ip_addr: IpAddr, port: u16, reuseport: bool) -> io::Result { - let config = SocketConfig::default().reuseport(reuseport); + let config = SocketConfig { + reuseport, + ..Default::default() + }; bind_to_with_config(ip_addr, port, config) } -#[cfg(feature = "dev-context-only-utils")] -pub async fn bind_to_async( - ip_addr: IpAddr, - port: u16, - reuseport: bool, -) -> io::Result { - let config = SocketConfig::default().reuseport(reuseport); - let socket = bind_to_with_config_non_blocking(ip_addr, port, config)?; - TokioUdpSocket::from_std(socket) -} - pub fn bind_to_localhost() -> io::Result { - bind_to( - IpAddr::V4(Ipv4Addr::LOCALHOST), - /*port:*/ 0, - /*reuseport:*/ false, - ) -} - -#[cfg(feature = "dev-context-only-utils")] -pub async fn bind_to_localhost_async() -> io::Result { - bind_to_async( - IpAddr::V4(Ipv4Addr::LOCALHOST), - /*port:*/ 0, - /*reuseport:*/ false, - ) - .await + let config = sockets::SocketConfiguration::default(); + sockets::bind_to_with_config(IpAddr::V4(Ipv4Addr::LOCALHOST), 0, config) } pub fn bind_to_unspecified() -> io::Result { - bind_to( - IpAddr::V4(Ipv4Addr::UNSPECIFIED), - /*port:*/ 0, - /*reuseport:*/ false, - ) -} - -#[cfg(feature = "dev-context-only-utils")] -pub async fn bind_to_unspecified_async() -> io::Result { - bind_to_async( - IpAddr::V4(Ipv4Addr::UNSPECIFIED), - /*port:*/ 0, - /*reuseport:*/ false, - ) - .await + let config = sockets::SocketConfiguration::default(); + sockets::bind_to_with_config(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0, config) } +#[deprecated( + since = "2.3.2", + note = "Please avoid this function in favor of sockets::bind_to_with_config" +)] +#[allow(deprecated)] pub fn bind_to_with_config( ip_addr: IpAddr, port: u16, config: SocketConfig, ) -> io::Result { - let sock = udp_socket_with_config(config)?; - + let sock = udp_socket_with_config(config.into())?; let addr = SocketAddr::new(ip_addr, port); - sock.bind(&SockAddr::from(addr)).map(|_| sock.into()) } +#[deprecated( + since = "2.3.2", + note = "Please avoid this function, it is easy to misuse" +)] +#[allow(deprecated)] pub fn bind_to_with_config_non_blocking( ip_addr: IpAddr, port: u16, config: SocketConfig, ) -> io::Result { - let sock = udp_socket_with_config(config)?; + let sock = udp_socket_with_config(config.into())?; let addr = SocketAddr::new(ip_addr, port); @@ -480,19 +417,28 @@ pub fn bind_to_with_config_non_blocking( Ok(sock.into()) } +#[deprecated( + since = "2.3.2", + note = "Please avoid this function in favor of sockets::bind_common_with_config" +)] /// binds both a UdpSocket and a TcpListener pub fn bind_common(ip_addr: IpAddr, port: u16) -> io::Result<(UdpSocket, TcpListener)> { - let config = SocketConfig::default(); - bind_common_with_config(ip_addr, port, config) + let config = sockets::SocketConfiguration::default(); + sockets::bind_common_with_config(ip_addr, port, config) } +#[deprecated( + since = "2.3.2", + note = "Please avoid this function in favor of sockets::bind_common_with_config" +)] +#[allow(deprecated)] /// binds both a UdpSocket and a TcpListener on the same port pub fn bind_common_with_config( ip_addr: IpAddr, port: u16, config: SocketConfig, ) -> io::Result<(UdpSocket, TcpListener)> { - let sock = udp_socket_with_config(config)?; + let sock = udp_socket_with_config(config.into())?; let addr = SocketAddr::new(ip_addr, port); let sock_addr = SockAddr::from(addr); @@ -500,16 +446,31 @@ pub fn bind_common_with_config( .and_then(|_| TcpListener::bind(addr).map(|listener| (sock.into(), listener))) } +#[deprecated( + since = "2.3.2", + note = "Please avoid this function, in favor of sockets::bind_two_in_range_with_offset_and_config" +)] +#[allow(deprecated)] pub fn bind_two_in_range_with_offset( ip_addr: IpAddr, range: PortRange, offset: u16, ) -> io::Result<((u16, UdpSocket), (u16, UdpSocket))> { - let sock1_config = SocketConfig::default(); - let sock2_config = SocketConfig::default(); - bind_two_in_range_with_offset_and_config(ip_addr, range, offset, sock1_config, sock2_config) + let sock_config = sockets::SocketConfiguration::default(); + sockets::bind_two_in_range_with_offset_and_config( + ip_addr, + range, + offset, + sock_config, + sock_config, + ) } +#[deprecated( + since = "2.3.2", + note = "Please avoid this function, in favor of sockets::bind_two_in_range_with_offset_and_config" +)] +#[allow(deprecated)] pub fn bind_two_in_range_with_offset_and_config( ip_addr: IpAddr, range: PortRange, @@ -522,6 +483,7 @@ pub fn bind_two_in_range_with_offset_and_config( "range too small to find two ports with the correct offset".to_string(), )); } + for port in range.0..range.1 { if let Ok(first_bind) = bind_to_with_config(ip_addr, port, sock1_config) { if range.1.saturating_sub(port) >= offset { @@ -571,9 +533,10 @@ pub fn find_available_ports_in_range( .take(range.len()) // never take the same value twice .peekable(); let mut num = 0; + let config = sockets::SocketConfiguration::default(); while num < N { let port_to_try = next_port_to_try.next().unwrap(); // this unwrap never fails since we exit earlier - match bind_common(ip_addr, port_to_try) { + match sockets::bind_common_with_config(ip_addr, port_to_try, config) { Ok(_) => { result[num] = port_to_try; num = num.saturating_add(1); @@ -588,20 +551,36 @@ pub fn find_available_ports_in_range( Ok(result) } +#[deprecated( + since = "2.3.2", + note = "Please avoid this function, in favor of sockets::bind_more_with_config" +)] +#[allow(deprecated)] pub fn bind_more_with_config( socket: UdpSocket, num: usize, config: SocketConfig, ) -> io::Result> { - let addr = socket.local_addr().unwrap(); - let ip = addr.ip(); - let port = addr.port(); - std::iter::once(Ok(socket)) - .chain((1..num).map(|_| bind_to_with_config(ip, port, config))) - .collect() + if !PLATFORM_SUPPORTS_SOCKET_CONFIGS { + if num > 1 { + warn!( + "bind_more_with_config() only supports 1 socket on this platform ({} requested)", + num + ); + } + Ok(vec![socket]) + } else { + let addr = socket.local_addr().unwrap(); + let ip = addr.ip(); + let port = addr.port(); + std::iter::once(Ok(socket)) + .chain((1..num).map(|_| bind_to_with_config(ip, port, config))) + .collect() + } } #[cfg(test)] +#[allow(deprecated)] mod tests { use { super::*, diff --git a/net-utils/src/sockets.rs b/net-utils/src/sockets.rs index dd44c96ca08a91..fe88d19367e13c 100644 --- a/net-utils/src/sockets.rs +++ b/net-utils/src/sockets.rs @@ -1,10 +1,15 @@ use { - crate::{bind_common_in_range_with_config, bind_common_with_config, PortRange, SocketConfig}, + crate::PortRange, + log::warn, + socket2::{Domain, SockAddr, Socket, Type}, std::{ + io, net::{IpAddr, SocketAddr, TcpListener, UdpSocket}, sync::atomic::{AtomicU16, Ordering}, }, }; +#[cfg(feature = "dev-context-only-utils")] +use {std::net::Ipv4Addr, tokio::net::UdpSocket as TokioUdpSocket}; // base port for deconflicted allocations const BASE_PORT: u16 = 5000; // how much to allocate per individual process. @@ -44,7 +49,7 @@ pub fn bind_gossip_port_in_range( port_range: PortRange, bind_ip_addr: IpAddr, ) -> (u16, (UdpSocket, TcpListener)) { - let config = SocketConfig::default(); + let config = SocketConfiguration::default(); if gossip_addr.port() != 0 { ( gossip_addr.port(), @@ -56,3 +61,382 @@ pub fn bind_gossip_port_in_range( bind_common_in_range_with_config(bind_ip_addr, port_range, config).expect("Failed to bind") } } + +/// True on platforms that support advanced socket configuration +pub(crate) const PLATFORM_SUPPORTS_SOCKET_CONFIGS: bool = + cfg!(not(any(windows, target_os = "ios"))); + +#[derive(Clone, Copy, Debug, Default)] +pub struct SocketConfiguration { + reuseport: bool, // controls SO_REUSEPORT, this is not intended to be set explicitly + recv_buffer_size: Option, + send_buffer_size: Option, + non_blocking: bool, +} + +impl SocketConfiguration { + /// Sets the receive buffer size for the socket (no effect on windows/ios). + /// + /// **Note:** On Linux the kernel will double the value you specify. + /// For example, if you specify `16MB`, the kernel will configure the + /// socket to use `32MB`. + /// See: https://man7.org/linux/man-pages/man7/socket.7.html: SO_RCVBUF + pub fn recv_buffer_size(mut self, size: usize) -> Self { + self.recv_buffer_size = Some(size); + self + } + + /// Sets the send buffer size for the socket (no effect on windows/ios) + /// + /// **Note:** On Linux the kernel will double the value you specify. + /// For example, if you specify `16MB`, the kernel will configure the + /// socket to use `32MB`. + /// See: https://man7.org/linux/man-pages/man7/socket.7.html: SO_SNDBUF + pub fn send_buffer_size(mut self, size: usize) -> Self { + self.send_buffer_size = Some(size); + self + } + + /// Configure the socket for non-blocking IO + pub fn set_non_blocking(mut self, non_blocking: bool) -> Self { + self.non_blocking = non_blocking; + self + } +} + +#[allow(deprecated)] +impl From for SocketConfiguration { + fn from(value: crate::SocketConfig) -> Self { + Self { + reuseport: value.reuseport, + recv_buffer_size: value.recv_buffer_size, + send_buffer_size: value.send_buffer_size, + non_blocking: false, + } + } +} + +#[cfg(any(windows, target_os = "ios"))] +fn set_reuse_port(_socket: &T) -> io::Result<()> { + Ok(()) +} + +/// Sets SO_REUSEPORT on platforms that support it. +#[cfg(not(any(windows, target_os = "ios")))] +fn set_reuse_port(socket: &T) -> io::Result<()> +where + T: std::os::fd::AsFd, +{ + use nix::sys::socket::{setsockopt, sockopt::ReusePort}; + setsockopt(socket, ReusePort, &true).map_err(io::Error::from) +} + +pub(crate) fn udp_socket_with_config(config: SocketConfiguration) -> io::Result { + let SocketConfiguration { + reuseport, + recv_buffer_size, + send_buffer_size, + non_blocking, + } = config; + let sock = Socket::new(Domain::IPV4, Type::DGRAM, None)?; + if PLATFORM_SUPPORTS_SOCKET_CONFIGS { + // Set buffer sizes + if let Some(recv_buffer_size) = recv_buffer_size { + sock.set_recv_buffer_size(recv_buffer_size)?; + } + if let Some(send_buffer_size) = send_buffer_size { + sock.set_send_buffer_size(send_buffer_size)?; + } + + if reuseport { + set_reuse_port(&sock)?; + } + } + sock.set_nonblocking(non_blocking)?; + Ok(sock) +} + +/// Find a port in the given range with a socket config that is available for both TCP and UDP +pub fn bind_common_in_range_with_config( + ip_addr: IpAddr, + range: PortRange, + config: SocketConfiguration, +) -> io::Result<(u16, (UdpSocket, TcpListener))> { + for port in range.0..range.1 { + if let Ok((sock, listener)) = bind_common_with_config(ip_addr, port, config) { + return Result::Ok((sock.local_addr().unwrap().port(), (sock, listener))); + } + } + + Err(io::Error::other(format!( + "No available TCP/UDP ports in {range:?}" + ))) +} + +pub fn bind_in_range_with_config( + ip_addr: IpAddr, + range: PortRange, + config: SocketConfiguration, +) -> io::Result<(u16, UdpSocket)> { + let socket = udp_socket_with_config(config)?; + + for port in range.0..range.1 { + let addr = SocketAddr::new(ip_addr, port); + + if socket.bind(&SockAddr::from(addr)).is_ok() { + let udp_socket: UdpSocket = socket.into(); + return Result::Ok((udp_socket.local_addr().unwrap().port(), udp_socket)); + } + } + + Err(io::Error::other(format!( + "No available UDP ports in {range:?}" + ))) +} + +pub fn bind_with_any_port_with_config( + ip_addr: IpAddr, + config: SocketConfiguration, +) -> io::Result { + let sock = udp_socket_with_config(config)?; + let addr = SocketAddr::new(ip_addr, 0); + match sock.bind(&SockAddr::from(addr)) { + Ok(_) => Result::Ok(sock.into()), + Err(err) => Err(io::Error::other(format!("No available UDP port: {err}"))), + } +} + +/// binds num sockets to the same port in a range with config +pub fn multi_bind_in_range_with_config( + ip_addr: IpAddr, + range: PortRange, + config: SocketConfiguration, + mut num: usize, +) -> io::Result<(u16, Vec)> { + if !PLATFORM_SUPPORTS_SOCKET_CONFIGS && num != 1 { + // See https://github.com/solana-labs/solana/issues/4607 + warn!( + "multi_bind_in_range_with_config() only supports 1 socket on this platform ({} requested)", + num + ); + num = 1; + } + let (port, socket) = bind_in_range_with_config(ip_addr, range, config)?; + let sockets = bind_more_with_config(socket, num, config)?; + Ok((port, sockets)) +} + +pub fn bind_to(ip_addr: IpAddr, port: u16) -> io::Result { + let config = SocketConfiguration { + ..Default::default() + }; + bind_to_with_config(ip_addr, port, config) +} + +#[cfg(feature = "dev-context-only-utils")] +pub async fn bind_to_async(ip_addr: IpAddr, port: u16) -> io::Result { + let config = SocketConfiguration { + non_blocking: true, + ..Default::default() + }; + let socket = bind_to_with_config(ip_addr, port, config)?; + TokioUdpSocket::from_std(socket) +} + +#[cfg(feature = "dev-context-only-utils")] +pub async fn bind_to_localhost_async() -> io::Result { + bind_to_async(IpAddr::V4(Ipv4Addr::LOCALHOST), 0).await +} + +#[cfg(feature = "dev-context-only-utils")] +pub async fn bind_to_unspecified_async() -> io::Result { + bind_to_async(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0).await +} + +pub fn bind_to_with_config( + ip_addr: IpAddr, + port: u16, + config: SocketConfiguration, +) -> io::Result { + let sock = udp_socket_with_config(config)?; + + let addr = SocketAddr::new(ip_addr, port); + + sock.bind(&SockAddr::from(addr)).map(|_| sock.into()) +} + +/// binds both a UdpSocket and a TcpListener on the same port +pub fn bind_common_with_config( + ip_addr: IpAddr, + port: u16, + config: SocketConfiguration, +) -> io::Result<(UdpSocket, TcpListener)> { + let sock = udp_socket_with_config(config)?; + + let addr = SocketAddr::new(ip_addr, port); + let sock_addr = SockAddr::from(addr); + sock.bind(&sock_addr) + .and_then(|_| TcpListener::bind(addr).map(|listener| (sock.into(), listener))) +} + +pub fn bind_two_in_range_with_offset_and_config( + ip_addr: IpAddr, + range: PortRange, + offset: u16, + sock1_config: SocketConfiguration, + sock2_config: SocketConfiguration, +) -> io::Result<((u16, UdpSocket), (u16, UdpSocket))> { + if range.1.saturating_sub(range.0) < offset { + return Err(io::Error::other( + "range too small to find two ports with the correct offset".to_string(), + )); + } + + for port in range.0..range.1 { + if let Ok(first_bind) = bind_to_with_config(ip_addr, port, sock1_config) { + if range.1.saturating_sub(port) >= offset { + if let Ok(second_bind) = + bind_to_with_config(ip_addr, port.saturating_add(offset), sock2_config) + { + return Ok(( + (first_bind.local_addr().unwrap().port(), first_bind), + (second_bind.local_addr().unwrap().port(), second_bind), + )); + } + } else { + break; + } + } + } + Err(io::Error::other( + "couldn't find two ports with the correct offset in range".to_string(), + )) +} + +pub fn bind_more_with_config( + socket: UdpSocket, + num: usize, + mut config: SocketConfiguration, +) -> io::Result> { + if !PLATFORM_SUPPORTS_SOCKET_CONFIGS { + if num > 1 { + warn!( + "bind_more_with_config() only supports 1 socket on this platform ({} requested)", + num + ); + } + Ok(vec![socket]) + } else { + set_reuse_port(&socket)?; + config.reuseport = true; + let addr = socket.local_addr().unwrap(); + let ip = addr.ip(); + let port = addr.port(); + std::iter::once(Ok(socket)) + .chain((1..num).map(|_| bind_to_with_config(ip, port, config))) + .collect() + } +} + +#[cfg(test)] +#[allow(deprecated)] +mod tests { + use { + super::*, + crate::{bind_in_range, sockets::localhost_port_range_for_tests}, + std::net::Ipv4Addr, + }; + + #[test] + fn test_bind() { + let (pr_s, pr_e) = localhost_port_range_for_tests(); + let ip_addr = IpAddr::V4(Ipv4Addr::UNSPECIFIED); + let config = SocketConfiguration::default(); + let s = bind_in_range(ip_addr, (pr_s, pr_e)).unwrap(); + assert_eq!(s.0, pr_s, "bind_in_range should use first available port"); + let ip_addr = IpAddr::V4(Ipv4Addr::UNSPECIFIED); + let x = bind_to_with_config(ip_addr, pr_s + 1, config).unwrap(); + let y = bind_more_with_config(x, 2, config).unwrap(); + assert_eq!( + y[0].local_addr().unwrap().port(), + y[1].local_addr().unwrap().port() + ); + bind_to_with_config(ip_addr, pr_s, SocketConfiguration::default()).unwrap_err(); + bind_in_range(ip_addr, (pr_s, pr_s + 2)).unwrap_err(); + + let (port, v) = + multi_bind_in_range_with_config(ip_addr, (pr_s + 5, pr_e), config, 10).unwrap(); + for sock in &v { + assert_eq!(port, sock.local_addr().unwrap().port()); + } + } + + #[test] + fn test_bind_with_any_port() { + let ip_addr = IpAddr::V4(Ipv4Addr::UNSPECIFIED); + let config = SocketConfiguration::default(); + let x = bind_with_any_port_with_config(ip_addr, config).unwrap(); + let y = bind_with_any_port_with_config(ip_addr, config).unwrap(); + assert_ne!( + x.local_addr().unwrap().port(), + y.local_addr().unwrap().port() + ); + } + + #[test] + fn test_bind_in_range_nil() { + let ip_addr = IpAddr::V4(Ipv4Addr::UNSPECIFIED); + bind_in_range(ip_addr, (2000, 2000)).unwrap_err(); + bind_in_range(ip_addr, (2000, 1999)).unwrap_err(); + } + + #[test] + fn test_bind_on_top() { + let config = SocketConfiguration::default(); + let localhost = IpAddr::V4(Ipv4Addr::LOCALHOST); + let port_range = localhost_port_range_for_tests(); + let (_p, s) = bind_in_range_with_config(localhost, port_range, config).unwrap(); + let _socks = bind_more_with_config(s, 8, config).unwrap(); + + let _socks2 = multi_bind_in_range_with_config(localhost, port_range, config, 8).unwrap(); + } + + #[test] + fn test_bind_common_in_range() { + let ip_addr = IpAddr::V4(Ipv4Addr::LOCALHOST); + let (pr_s, pr_e) = localhost_port_range_for_tests(); + let config = SocketConfiguration::default(); + let (port, _sockets) = + bind_common_in_range_with_config(ip_addr, (pr_s, pr_e), config).unwrap(); + assert!((pr_s..pr_e).contains(&port)); + + bind_common_in_range_with_config(ip_addr, (port, port + 1), config).unwrap_err(); + } + + #[test] + fn test_bind_two_in_range_with_offset() { + solana_logger::setup(); + let config = SocketConfiguration::default(); + let ip_addr = IpAddr::V4(Ipv4Addr::UNSPECIFIED); + let offset = 6; + if let Ok(((port1, _), (port2, _))) = + bind_two_in_range_with_offset_and_config(ip_addr, (1024, 65535), offset, config, config) + { + assert!(port2 == port1 + offset); + } + let offset = 42; + if let Ok(((port1, _), (port2, _))) = + bind_two_in_range_with_offset_and_config(ip_addr, (1024, 65535), offset, config, config) + { + assert!(port2 == port1 + offset); + } + assert!(bind_two_in_range_with_offset_and_config( + ip_addr, + (1024, 1044), + offset, + config, + config + ) + .is_err()); + } +} diff --git a/quic-client/src/nonblocking/quic_client.rs b/quic-client/src/nonblocking/quic_client.rs index 606db52e520ee1..7275b98398c82f 100644 --- a/quic-client/src/nonblocking/quic_client.rs +++ b/quic-client/src/nonblocking/quic_client.rs @@ -17,7 +17,10 @@ use { }, solana_keypair::Keypair, solana_measure::measure::Measure, - solana_net_utils::{SocketConfig, VALIDATOR_PORT_RANGE}, + solana_net_utils::{ + sockets::{bind_in_range_with_config, SocketConfiguration as SocketConfig}, + VALIDATOR_PORT_RANGE, + }, solana_quic_definitions::{ QUIC_CONNECTION_HANDSHAKE_TIMEOUT, QUIC_KEEP_ALIVE, QUIC_MAX_TIMEOUT, QUIC_SEND_FAIRNESS, }, @@ -78,7 +81,7 @@ impl QuicLazyInitializedEndpoint { endpoint.clone() } else { let config = SocketConfig::default(); - let client_socket = solana_net_utils::bind_in_range_with_config( + let client_socket = bind_in_range_with_config( IpAddr::V4(Ipv4Addr::UNSPECIFIED), VALIDATOR_PORT_RANGE, config, diff --git a/streamer/src/nonblocking/recvmmsg.rs b/streamer/src/nonblocking/recvmmsg.rs index 5c00d38691bf1c..87b537718d590f 100644 --- a/streamer/src/nonblocking/recvmmsg.rs +++ b/streamer/src/nonblocking/recvmmsg.rs @@ -57,7 +57,7 @@ pub async fn recv_mmsg_exact( mod tests { use { crate::{nonblocking::recvmmsg::*, packet::PACKET_DATA_SIZE}, - solana_net_utils::{bind_to_async, bind_to_localhost_async}, + solana_net_utils::sockets::{bind_to_async, bind_to_localhost_async}, std::{net::SocketAddr, time::Instant}, tokio::net::UdpSocket, }; @@ -68,9 +68,9 @@ mod tests { let sock_addr: SocketAddr = ip_str .parse() .map_err(|e| io::Error::new(io::ErrorKind::InvalidInput, e))?; - let reader = bind_to_async(sock_addr.ip(), sock_addr.port(), /*reuseport:*/ false).await?; + let reader = bind_to_async(sock_addr.ip(), sock_addr.port()).await?; let addr = reader.local_addr()?; - let sender = bind_to_async(sock_addr.ip(), sock_addr.port(), /*reuseport:*/ false).await?; + let sender = bind_to_async(sock_addr.ip(), sock_addr.port()).await?; let saddr = sender.local_addr()?; Ok((reader, addr, sender, saddr)) } diff --git a/streamer/src/nonblocking/sendmmsg.rs b/streamer/src/nonblocking/sendmmsg.rs index ed1740b579d557..68f43c85d3bb52 100644 --- a/streamer/src/nonblocking/sendmmsg.rs +++ b/streamer/src/nonblocking/sendmmsg.rs @@ -61,7 +61,7 @@ mod tests { sendmmsg::SendPktsError, }, assert_matches::assert_matches, - solana_net_utils::{bind_to_localhost_async, bind_to_unspecified_async}, + solana_net_utils::sockets::{bind_to_localhost_async, bind_to_unspecified_async}, solana_packet::PACKET_DATA_SIZE, std::{ io::ErrorKind, diff --git a/streamer/src/nonblocking/testing_utilities.rs b/streamer/src/nonblocking/testing_utilities.rs index 1e82687f1d9104..5db38ecf20d22e 100644 --- a/streamer/src/nonblocking/testing_utilities.rs +++ b/streamer/src/nonblocking/testing_utilities.rs @@ -12,8 +12,11 @@ use { }, solana_keypair::Keypair, solana_net_utils::{ - bind_to_localhost, multi_bind_in_range_with_config, - sockets::localhost_port_range_for_tests, SocketConfig, + bind_to_localhost, + sockets::{ + localhost_port_range_for_tests, multi_bind_in_range_with_config, + SocketConfiguration as SocketConfig, + }, }, solana_perf::packet::PacketBatch, solana_quic_definitions::{QUIC_KEEP_ALIVE, QUIC_MAX_TIMEOUT, QUIC_SEND_FAIRNESS}, @@ -66,7 +69,7 @@ pub fn create_quic_server_sockets() -> Vec { multi_bind_in_range_with_config( IpAddr::V4(Ipv4Addr::LOCALHOST), port_range, - SocketConfig::default().reuseport(true), + SocketConfig::default(), num, ) .expect("bind operation for quic server sockets should succeed") diff --git a/streamer/src/recvmmsg.rs b/streamer/src/recvmmsg.rs index 238e57583eb7a0..78f0976acf1863 100644 --- a/streamer/src/recvmmsg.rs +++ b/streamer/src/recvmmsg.rs @@ -182,8 +182,9 @@ pub fn recv_mmsg(sock: &UdpSocket, packets: &mut [Packet]) -> io::Result Result { - let socket = solana_net_utils::bind_with_any_port_with_config( + let socket = bind_with_any_port_with_config( IpAddr::V4(Ipv4Addr::UNSPECIFIED), SocketConfig::default(), ) diff --git a/udp-client/src/nonblocking/udp_client.rs b/udp-client/src/nonblocking/udp_client.rs index 62d55c1c486dee..166cd79a50be5f 100644 --- a/udp-client/src/nonblocking/udp_client.rs +++ b/udp-client/src/nonblocking/udp_client.rs @@ -46,7 +46,9 @@ impl ClientConnection for UdpClientConnection { mod tests { use { super::*, - solana_net_utils::{bind_to_async, SocketConfig}, + solana_net_utils::sockets::{ + bind_to_async, bind_with_any_port_with_config, SocketConfiguration as SocketConfig, + }, solana_packet::{Packet, PACKET_DATA_SIZE}, solana_streamer::nonblocking::recvmmsg::recv_mmsg, std::net::{IpAddr, Ipv4Addr}, @@ -73,19 +75,13 @@ mod tests { async fn test_send_from_addr() { let addr_str = "0.0.0.0:50100"; let addr = addr_str.parse().unwrap(); - let socket = solana_net_utils::bind_with_any_port_with_config( + let socket = bind_with_any_port_with_config( IpAddr::V4(Ipv4Addr::UNSPECIFIED), SocketConfig::default(), ) .unwrap(); let connection = UdpClientConnection::new_from_addr(socket, addr); - let reader = bind_to_async( - addr.ip(), - /*port*/ addr.port(), - /*reuseport:*/ false, - ) - .await - .expect("bind"); + let reader = bind_to_async(addr.ip(), addr.port()).await.expect("bind"); check_send_one(&connection, &reader).await; check_send_batch(&connection, &reader).await; } diff --git a/vortexor/src/main.rs b/vortexor/src/main.rs index 86c177d5bf8b62..525ee749c3f94d 100644 --- a/vortexor/src/main.rs +++ b/vortexor/src/main.rs @@ -5,7 +5,7 @@ use { solana_core::banking_trace::BankingTracer, solana_keypair::read_keypair_file, solana_logger::redirect_stderr_to_file, - solana_net_utils::{bind_in_range_with_config, SocketConfig}, + solana_net_utils::sockets::{bind_in_range_with_config, SocketConfiguration as SocketConfig}, solana_quic_definitions::QUIC_PORT_OFFSET, solana_signer::Signer, solana_streamer::streamer::StakedNodes, @@ -100,7 +100,7 @@ pub fn main() { ) .unwrap(); - let config = SocketConfig::default().reuseport(false); + let config = SocketConfig::default(); let sender_socket = bind_in_range_with_config(*bind_address, dynamic_port_range, config).unwrap(); diff --git a/vortexor/src/vortexor.rs b/vortexor/src/vortexor.rs index e30363361585d0..8a1669e5f726b6 100644 --- a/vortexor/src/vortexor.rs +++ b/vortexor/src/vortexor.rs @@ -5,7 +5,9 @@ use { sigverify_stage::SigVerifyStage, }, solana_keypair::Keypair, - solana_net_utils::{multi_bind_in_range_with_config, SocketConfig}, + solana_net_utils::sockets::{ + multi_bind_in_range_with_config, SocketConfiguration as SocketConfig, + }, solana_perf::packet::PacketBatch, solana_quic_definitions::NotifyKeyUpdate, solana_streamer::{ @@ -61,7 +63,7 @@ impl Vortexor { tpu_forward_address: Option, num_quic_endpoints: usize, ) -> TpuSockets { - let quic_config = SocketConfig::default().reuseport(true); + let quic_config = SocketConfig::default(); let tpu_quic = bind_sockets( bind_address, From 90221b68c58776b226e6103ccfa70aabb09b3d5a Mon Sep 17 00:00:00 2001 From: steviez Date: Mon, 23 Jun 2025 16:04:49 -0500 Subject: [PATCH 067/124] validator: Add --wait-for-exit flag to exit subcommand (#6233) agave-validator exit currents returns immediately after the AdminRpc call returns. However, the running validator has not exited at this point and may continue to tear itself down for multiple seconds The exit subcommand now has an optional flag, --wait-for-exit, that queries the PID from the validator and loops until that PID has fully terminated. Use of this flag means that a caller can be sure the running validator is dead when agave-validator exit returns --- validator/Cargo.toml | 1 + validator/src/admin_rpc_service.rs | 19 +++-- validator/src/commands/exit/mod.rs | 108 ++++++++++++++++++++++++++--- validator/src/commands/mod.rs | 3 + 4 files changed, 116 insertions(+), 15 deletions(-) diff --git a/validator/Cargo.toml b/validator/Cargo.toml index c7d01f78545072..a79bc818587614 100644 --- a/validator/Cargo.toml +++ b/validator/Cargo.toml @@ -27,6 +27,7 @@ jsonrpc-core = { workspace = true } jsonrpc-core-client = { workspace = true, features = ["ipc"] } jsonrpc-derive = { workspace = true } jsonrpc-ipc-server = { workspace = true } +libc = { workspace = true } libloading = { workspace = true } log = { workspace = true } num_cpus = { workspace = true } diff --git a/validator/src/admin_rpc_service.rs b/validator/src/admin_rpc_service.rs index bbf4166fdf44d1..a5506013c028c5 100644 --- a/validator/src/admin_rpc_service.rs +++ b/validator/src/admin_rpc_service.rs @@ -152,7 +152,8 @@ pub trait AdminRpc { type Metadata; #[rpc(meta, name = "exit")] - fn exit(&self, meta: Self::Metadata) -> Result<()>; + /// Initiates validator exit and returns the PID + fn exit(&self, meta: Self::Metadata) -> Result; #[rpc(meta, name = "reloadPlugin")] fn reload_plugin( @@ -256,7 +257,7 @@ pub struct AdminRpcImpl; impl AdminRpc for AdminRpcImpl { type Metadata = AdminRpcRequestMetadata; - fn exit(&self, meta: Self::Metadata) -> Result<()> { + fn exit(&self, meta: Self::Metadata) -> Result { debug!("exit admin rpc request received"); thread::Builder::new() @@ -266,7 +267,7 @@ impl AdminRpc for AdminRpcImpl { // receive a confusing error as the validator shuts down before a response is sent back. thread::sleep(Duration::from_millis(100)); - warn!("validator exit requested"); + info!("validator exit requested"); meta.validator_exit.write().unwrap().exit(); if !meta.validator_exit_backpressure.is_empty() { @@ -308,7 +309,7 @@ impl AdminRpc for AdminRpcImpl { }) .unwrap(); - Ok(()) + Ok(std::process::id()) } fn reload_plugin( @@ -1510,9 +1511,13 @@ mod tests { expected_validator_id.pubkey().to_string() ); - let contact_info_request = - r#"{"jsonrpc":"2.0","id":1,"method":"exit","params":[]}"#.to_string(); - let exit_response = test_validator.handle_request(&contact_info_request); + let expected_parsed_response: Value = serde_json::from_str(&format!( + r#"{{"id": 1, "jsonrpc": "2.0", "result": {} }}"#, + std::process::id() + )) + .unwrap(); + let exit_request = r#"{"jsonrpc":"2.0","id":1,"method":"exit","params":[]}"#.to_string(); + let exit_response = test_validator.handle_request(&exit_request); let actual_parsed_response: Value = serde_json::from_str(&exit_response.expect("actual response")) .expect("actual response deserialization"); diff --git a/validator/src/commands/exit/mod.rs b/validator/src/commands/exit/mod.rs index bea065c5078fbb..bc896ca31f91ad 100644 --- a/validator/src/commands/exit/mod.rs +++ b/validator/src/commands/exit/mod.rs @@ -1,3 +1,5 @@ +#[cfg(target_os = "linux")] +use {crate::commands::Error, std::io, std::thread, std::time::Duration}; use { crate::{ admin_rpc_service, @@ -13,10 +15,18 @@ const COMMAND: &str = "exit"; const DEFAULT_MIN_IDLE_TIME: &str = "10"; const DEFAULT_MAX_DELINQUENT_STAKE: &str = "5"; +#[derive(Debug, PartialEq)] +pub enum PostExitAction { + // Run the agave-validator monitor command indefinitely + Monitor, + // Block until the exiting validator process has terminated + Wait, +} + #[derive(Debug, PartialEq)] pub struct ExitArgs { pub force: bool, - pub monitor: bool, + pub post_exit_action: Option, pub min_idle_time: usize, pub max_delinquent_stake: u8, pub skip_new_snapshot_check: bool, @@ -25,9 +35,17 @@ pub struct ExitArgs { impl FromClapArgMatches for ExitArgs { fn from_clap_arg_match(matches: &ArgMatches) -> Result { + let post_exit_action = if matches.is_present("monitor") { + Some(PostExitAction::Monitor) + } else if matches.is_present("wait_for_exit") { + Some(PostExitAction::Wait) + } else { + None + }; + Ok(ExitArgs { force: matches.is_present("force"), - monitor: matches.is_present("monitor"), + post_exit_action, min_idle_time: value_t_or_exit!(matches, "min_idle_time", usize), max_delinquent_stake: value_t_or_exit!(matches, "max_delinquent_stake", u8), skip_new_snapshot_check: matches.is_present("skip_new_snapshot_check"), @@ -55,6 +73,12 @@ pub fn command<'a>() -> App<'a, 'a> { .takes_value(false) .help("Monitor the validator after sending the exit request"), ) + .arg( + Arg::with_name("wait_for_exit") + .long("wait-for-exit") + .conflicts_with("monitor") + .help("Wait for the validator to terminate after sending the exit request"), + ) .arg( Arg::with_name("min_idle_time") .long("min-idle-time") @@ -102,16 +126,75 @@ pub fn execute(matches: &ArgMatches, ledger_path: &Path) -> Result<()> { } let admin_client = admin_rpc_service::connect(ledger_path); - admin_rpc_service::runtime().block_on(async move { admin_client.await?.exit().await })?; + let validator_pid = + admin_rpc_service::runtime().block_on(async move { admin_client.await?.exit().await })?; + println!("Exit request sent"); - if exit_args.monitor { - monitor::execute(matches, ledger_path)?; + match exit_args.post_exit_action { + None => Ok(()), + Some(PostExitAction::Monitor) => monitor::execute(matches, ledger_path), + Some(PostExitAction::Wait) => poll_until_pid_terminates(validator_pid), + }?; + + Ok(()) +} + +#[cfg(target_os = "linux")] +fn poll_until_pid_terminates(pid: u32) -> Result<()> { + let pid = i32::try_from(pid)?; + + println!("Waiting for agave-validator process {pid} to terminate"); + loop { + // From man kill(2) + // + // If sig is 0, then no signal is sent, but existence and permission + // checks are still performed; this can be used to check for the + // existence of a process ID or process group ID that the caller is + // permitted to signal. + let result = unsafe { + libc::kill(pid, /*sig:*/ 0) + }; + if result >= 0 { + // Give the process some time to exit before checking again + thread::sleep(Duration::from_millis(500)); + } else { + let errno = io::Error::last_os_error() + .raw_os_error() + .ok_or(Error::Dynamic("unable to read raw os error".into()))?; + match errno { + libc::ESRCH => { + println!("Done, agave-validator process {pid} has terminated"); + break; + } + libc::EINVAL => { + // An invalid signal was specified, we only pass sig=0 so + // this should not be possible + Err(Error::Dynamic( + format!("unexpected invalid signal error for kill({pid}, 0)").into(), + ))?; + } + libc::EPERM => { + Err(io::Error::from(io::ErrorKind::PermissionDenied))?; + } + unknown => { + Err(Error::Dynamic( + format!("unexpected errno for kill({pid}, 0): {unknown}").into(), + ))?; + } + } + } } Ok(()) } +#[cfg(not(target_os = "linux"))] +fn poll_until_pid_terminates(pid: u32) -> Result<()> { + println!("Unable to monitor agave-validator process {pid} on this platform"); + Ok(()) +} + #[cfg(test)] mod tests { use {super::*, crate::commands::tests::verify_args_struct_by_command}; @@ -126,7 +209,7 @@ mod tests { .parse() .expect("invalid DEFAULT_MAX_DELINQUENT_STAKE"), force: false, - monitor: false, + post_exit_action: None, skip_new_snapshot_check: false, skip_health_check: false, } @@ -151,12 +234,21 @@ mod tests { } #[test] - fn verify_args_struct_by_command_exit_with_monitor() { + fn verify_args_struct_by_command_exit_with_post_exit_action() { verify_args_struct_by_command( command(), vec![COMMAND, "--monitor"], ExitArgs { - monitor: true, + post_exit_action: Some(PostExitAction::Monitor), + ..ExitArgs::default() + }, + ); + + verify_args_struct_by_command( + command(), + vec![COMMAND, "--wait-for-exit"], + ExitArgs { + post_exit_action: Some(PostExitAction::Wait), ..ExitArgs::default() }, ); diff --git a/validator/src/commands/mod.rs b/validator/src/commands/mod.rs index 43cae3731e1ef5..815e44fad11243 100644 --- a/validator/src/commands/mod.rs +++ b/validator/src/commands/mod.rs @@ -27,6 +27,9 @@ pub enum Error { #[error(transparent)] Io(#[from] std::io::Error), + + #[error(transparent)] + TryFromInt(#[from] std::num::TryFromIntError), } pub type Result = std::result::Result; From bbea86ad8865fb75189bd77742c3a0df5b3f1e52 Mon Sep 17 00:00:00 2001 From: steviez Date: Mon, 23 Jun 2025 16:16:42 -0500 Subject: [PATCH 068/124] validator: Reduce the admin rpc client side threadpool size (#6683) This pool is used to issue admin RPC commands by the various agave-validator commands. Most commands make a single call only, and all results are blocked on. Thus, there isn't much parallelism happening here. So, reduce the thread count from num_cpus to 2 --- validator/src/admin_rpc_service.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/validator/src/admin_rpc_service.rs b/validator/src/admin_rpc_service.rs index a5506013c028c5..41f3e25ad5501b 100644 --- a/validator/src/admin_rpc_service.rs +++ b/validator/src/admin_rpc_service.rs @@ -853,10 +853,14 @@ pub async fn connect(ledger_path: &Path) -> std::result::Result Runtime { tokio::runtime::Builder::new_multi_thread() .thread_name("solAdminRpcRt") .enable_all() + // The agave-validator subcommands make few admin RPC calls and block + // on the results so two workers is plenty + .worker_threads(2) .build() .expect("new tokio runtime") } From f668f8b70cd3bf24e1b43eaa074fceba27046f38 Mon Sep 17 00:00:00 2001 From: Brooks Date: Mon, 23 Jun 2025 18:37:25 -0400 Subject: [PATCH 069/124] Batches stats updates for accounts index generation (#6703) --- accounts-db/src/accounts_db.rs | 47 +++++++++++++++++++ accounts-db/src/accounts_index.rs | 41 ++++++++++++++-- .../accounts_index/in_mem_accounts_index.rs | 43 +++++++++++++---- 3 files changed, 118 insertions(+), 13 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 0eb74124004131..1016b4222d47f5 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -583,6 +583,12 @@ struct SlotIndexGenerationInfo { rent_paying_accounts_by_partition: Vec, zero_lamport_pubkeys: Vec, all_accounts_are_zero_lamports: bool, + /// Number of accounts in this slot that didn't already exist in the index + num_did_not_exist: u64, + /// Number of accounts in this slot that already existed, and were in-mem + num_existed_in_mem: u64, + /// Number of accounts in this slot that already existed, and were on-disk + num_existed_on_disk: u64, } /// The lt hash of old/duplicate accounts @@ -7982,6 +7988,9 @@ impl AccountsDb { rent_paying_accounts_by_partition, zero_lamport_pubkeys, all_accounts_are_zero_lamports, + num_did_not_exist: generate_index_results.num_did_not_exist, + num_existed_in_mem: generate_index_results.num_existed_in_mem, + num_existed_on_disk: generate_index_results.num_existed_on_disk, } } @@ -8048,6 +8057,9 @@ impl AccountsDb { let mut insert_time_sum = 0; let mut total_including_duplicates_sum = 0; let mut accounts_data_len_sum = 0; + let mut local_num_did_not_exist = 0; + let mut local_num_existed_in_mem = 0; + let mut local_num_existed_on_disk = 0; for (index, slot) in slots.iter().enumerate() { let mut scan_time = Measure::start("scan"); log_status.report(index as u64); @@ -8073,6 +8085,9 @@ impl AccountsDb { rent_paying_accounts_by_partition_this_slot, zero_lamport_pubkeys: zero_pubkeys_this_slot, all_accounts_are_zero_lamports, + num_did_not_exist, + num_existed_in_mem, + num_existed_on_disk, } = self.generate_index_for_slot( &storage, *slot, @@ -8081,6 +8096,10 @@ impl AccountsDb { &storage_info, ); + local_num_did_not_exist += num_did_not_exist; + local_num_existed_in_mem += num_existed_in_mem; + local_num_existed_on_disk += num_existed_on_disk; + if rent_paying_this_slot > 0 { // We don't have any rent paying accounts on mainnet, so this code should never be hit. rent_paying.fetch_add(rent_paying_this_slot, Ordering::Relaxed); @@ -8135,6 +8154,34 @@ impl AccountsDb { }; insert_time_sum += insert_us; } + + if pass == 0 { + // This thread has finished processing its chunk of slots. + // Update the index stats now. + let index_stats = self.accounts_index.bucket_map_holder_stats(); + + // stats for inserted entries that previously did *not* exist + index_stats.inc_insert_count(local_num_did_not_exist); + index_stats.add_mem_count(local_num_did_not_exist as usize); + + // stats for inserted entries that previous did exist *in-mem* + index_stats + .entries_from_mem + .fetch_add(local_num_existed_in_mem, Ordering::Relaxed); + index_stats + .updates_in_mem + .fetch_add(local_num_existed_in_mem, Ordering::Relaxed); + + // stats for inserted entries that previously did exist *on-disk* + index_stats.add_mem_count(local_num_existed_on_disk as usize); + index_stats + .entries_missing + .fetch_add(local_num_existed_on_disk, Ordering::Relaxed); + index_stats + .updates_in_mem + .fetch_add(local_num_existed_on_disk, Ordering::Relaxed); + } + all_accounts_are_zero_lamports_slots.fetch_add( all_accounts_are_zero_lamports_slots_inner, Ordering::Relaxed, diff --git a/accounts-db/src/accounts_index.rs b/accounts-db/src/accounts_index.rs index 3814ca9d264ea6..41810da01d6eb7 100644 --- a/accounts-db/src/accounts_index.rs +++ b/accounts-db/src/accounts_index.rs @@ -9,13 +9,16 @@ use { accounts_partition::RentPayingAccountsByPartition, ancestors::Ancestors, bucket_map_holder::Age, + bucket_map_holder_stats::BucketMapHolderStats, contains::Contains, is_zero_lamport::IsZeroLamport, pubkey_bins::PubkeyBinCalculator24, rolling_bit_field::RollingBitField, }, account_map_entry::{AccountMapEntry, PreAllocatedAccountMapEntry}, - in_mem_accounts_index::{InMemAccountsIndex, InsertNewEntryResults, StartupStats}, + in_mem_accounts_index::{ + ExistedLocation, InMemAccountsIndex, InsertNewEntryResults, StartupStats, + }, iter::{AccountsIndexIterator, AccountsIndexIteratorReturnsItems}, log::*, rand::{thread_rng, Rng}, @@ -81,6 +84,12 @@ pub(crate) struct GenerateIndexResult { pub count: usize, /// pubkeys which were present multiple times in the insertion request. pub duplicates: Option>, + /// Number of accounts added to the index that didn't already exist in the index + pub num_did_not_exist: u64, + /// Number of accounts added to the index that already existed, and were in-mem + pub num_existed_in_mem: u64, + /// Number of accounts added to the index that already existed, and were on-disk + pub num_existed_on_disk: u64, } #[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] @@ -1073,6 +1082,10 @@ impl + Into> AccountsIndex { }); } + pub(crate) fn bucket_map_holder_stats(&self) -> &BucketMapHolderStats { + &self.storage.storage.stats + } + /// get stats related to startup pub(crate) fn get_startup_stats(&self) -> &StartupStats { &self.storage.storage.startup_stats @@ -1434,6 +1447,11 @@ impl + Into> AccountsIndex { let insertion_time = AtomicU64::new(0); + // accumulated stats after inserting pubkeys into the index + let mut num_did_not_exist = 0; + let mut num_existed_in_mem = 0; + let mut num_existed_on_disk = 0; + // offset bin processing in the 'binned' array by a random amount. // This results in calls to insert_new_entry_if_missing_with_lock from different threads starting at different bins to avoid // lock contention. @@ -1473,12 +1491,26 @@ impl + Into> AccountsIndex { match r_account_maps .insert_new_entry_if_missing_with_lock(pubkey, new_entry) { - InsertNewEntryResults::DidNotExist => {} - InsertNewEntryResults::Existed(other_slot) => { + InsertNewEntryResults::DidNotExist => { + num_did_not_exist += 1; + } + InsertNewEntryResults::Existed { + other_slot, + location, + } => { if let Some(other_slot) = other_slot { duplicates_from_in_memory.push((other_slot, pubkey)); } duplicates_from_in_memory.push((slot, pubkey)); + + match location { + ExistedLocation::InMem => { + num_existed_in_mem += 1; + } + ExistedLocation::OnDisk => { + num_existed_on_disk += 1; + } + } } } }); @@ -1496,6 +1528,9 @@ impl + Into> AccountsIndex { GenerateIndexResult { count, duplicates: (!duplicates.is_empty()).then_some(duplicates), + num_did_not_exist, + num_existed_in_mem, + num_existed_on_disk, }, ) } diff --git a/accounts-db/src/accounts_index/in_mem_accounts_index.rs b/accounts-db/src/accounts_index/in_mem_accounts_index.rs index 6694fe29313d47..6c7c4e03450656 100644 --- a/accounts-db/src/accounts_index/in_mem_accounts_index.rs +++ b/accounts-db/src/accounts_index/in_mem_accounts_index.rs @@ -140,9 +140,21 @@ impl + Into> Debug for InMemAccoun } } +/// An entry was inserted into the index; did it already exist in the index? +#[derive(Debug)] pub enum InsertNewEntryResults { DidNotExist, - Existed(Option), + Existed { + other_slot: Option, + location: ExistedLocation, + }, +} + +/// An entry was inserted into the index that previously existed; where did it previously exist? +#[derive(Debug)] +pub enum ExistedLocation { + InMem, + OnDisk, } #[derive(Default, Debug)] @@ -777,15 +789,19 @@ impl + Into> InMemAccountsIndex, ) -> InsertNewEntryResults { - let mut m = Measure::start("entry"); let mut map = self.map_internal.write().unwrap(); let entry = map.entry(pubkey); - m.stop(); let mut other_slot = None; let (found_in_mem, already_existed) = match entry { Entry::Occupied(occupied) => { @@ -832,7 +848,6 @@ impl + Into> InMemAccountsIndex { // not in cache, look on disk let disk_entry = self.load_account_entry_from_disk(vacant.key()); - self.stats().inc_mem_count(); if let Some(disk_entry) = disk_entry { let (slot, account_info) = new_entry.into(); InMemAccountsIndex::::lock_and_update_slot_list( @@ -854,18 +869,26 @@ impl + Into> InMemAccountsIndex Date: Mon, 23 Jun 2025 23:07:51 -0500 Subject: [PATCH 070/124] validator: Adjust --wait-for-exit message on non-linux (#6708) The validator exit command can have --monitor OR --wait-for-exit specified. The functionality for --wait-for-exit is only supported on linux, and the error message for the non-linux codepath uses the term "monitor" Given that the other option is called monitor, this is potentially confusing. So, adjust the message to use "wait" instead of "monitor" --- validator/src/commands/exit/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/validator/src/commands/exit/mod.rs b/validator/src/commands/exit/mod.rs index bc896ca31f91ad..5db7088e20f675 100644 --- a/validator/src/commands/exit/mod.rs +++ b/validator/src/commands/exit/mod.rs @@ -190,8 +190,8 @@ fn poll_until_pid_terminates(pid: u32) -> Result<()> { } #[cfg(not(target_os = "linux"))] -fn poll_until_pid_terminates(pid: u32) -> Result<()> { - println!("Unable to monitor agave-validator process {pid} on this platform"); +fn poll_until_pid_terminates(_pid: u32) -> Result<()> { + println!("Unable to wait for agave-validator process termination on this platform"); Ok(()) } From b0a649c86661afc007b6265940ac776df71fa6b7 Mon Sep 17 00:00:00 2001 From: Alex Pyattaev Date: Tue, 24 Jun 2025 10:19:57 +0300 Subject: [PATCH 071/124] Chore: streamer: remove deprecated declarations (#6684) axe deprecated declarations --- streamer/src/nonblocking/quic.rs | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/streamer/src/nonblocking/quic.rs b/streamer/src/nonblocking/quic.rs index 7de3640303edb8..c79384711a9f3a 100644 --- a/streamer/src/nonblocking/quic.rs +++ b/streamer/src/nonblocking/quic.rs @@ -85,21 +85,6 @@ const CONNECTION_CLOSE_REASON_TOO_MANY: &[u8] = b"too_many"; const CONNECTION_CLOSE_CODE_INVALID_STREAM: u32 = 5; const CONNECTION_CLOSE_REASON_INVALID_STREAM: &[u8] = b"invalid_stream"; -/// The new connections per minute from a particular IP address. -/// Heuristically set to the default maximum concurrent connections -/// per IP address. Might be adjusted later. -#[deprecated( - since = "2.2.0", - note = "Use solana_streamer::quic::DEFAULT_MAX_CONNECTIONS_PER_IPADDR_PER_MINUTE" -)] -pub use crate::quic::DEFAULT_MAX_CONNECTIONS_PER_IPADDR_PER_MINUTE; -/// Limit to 250K PPS -#[deprecated( - since = "2.2.0", - note = "Use solana_streamer::quic::DEFAULT_MAX_STREAMS_PER_MS" -)] -pub use crate::quic::DEFAULT_MAX_STREAMS_PER_MS; - /// Total new connection counts per second. Heuristically taken from /// the default staked and unstaked connection limits. Might be adjusted /// later. From 5e53ebc2e0dfc2352095a464446722fa8a0c60c5 Mon Sep 17 00:00:00 2001 From: Kamil Skalski Date: Tue, 24 Jun 2025 11:26:06 +0200 Subject: [PATCH 072/124] Changelog: add mention of memlock requirement for io_uring (#6702) * Changelog: add mention of memlock requirement for io_uring * Add memlock to docs and install-docker.sh --- CHANGELOG.md | 3 +++ docs/src/operations/guides/validator-start.md | 5 +++++ docs/src/operations/setup-a-validator.md | 4 ++++ net/scripts/install-docker.sh | 1 + 4 files changed, 13 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6f14b18312c58e..116ad2b1d96900 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,6 +31,9 @@ Release channels have their own copy of this changelog: * Deprecated snapshot archive formats have been removed and are no longer loadable. * Using `--snapshot-interval-slots 0` to disable generating snapshots has been removed. Use `--no-snapshots` instead. +#### Changes +* Reading snapshot archives requires increased `memlock` limits - recommended setting is `LimitMEMLOCK=2000000000` in systemd service configuration. Lack of sufficient limit will result slower startup times. + ## 2.3.0 ### Validator diff --git a/docs/src/operations/guides/validator-start.md b/docs/src/operations/guides/validator-start.md index 6a4f8ef36528ff..613286975e9148 100644 --- a/docs/src/operations/guides/validator-start.md +++ b/docs/src/operations/guides/validator-start.md @@ -70,6 +70,7 @@ Add ``` LimitNOFILE=1000000 +LimitMEMLOCK=2000000000 ``` to the `[Service]` section of your systemd service file, if you use one, @@ -77,6 +78,7 @@ otherwise add ``` DefaultLimitNOFILE=1000000 +DefaultLimitMEMLOCK=2000000000 ``` to the `[Manager]` section of `/etc/systemd/system.conf`. @@ -89,6 +91,8 @@ sudo systemctl daemon-reload sudo bash -c "cat >/etc/security/limits.d/90-solana-nofiles.conf </etc/security/limits.d/90-solana-nofiles.conf < Date: Tue, 24 Jun 2025 09:08:02 -0500 Subject: [PATCH 073/124] deprecate `get_public_ip_addr` in favor of `get_public_ip_addr_with_binding` (#6707) deprecate get_public_ip_addr in favor of get_public_ip_addr_with_binding --- net-utils/src/lib.rs | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) diff --git a/net-utils/src/lib.rs b/net-utils/src/lib.rs index 622d6b3cd0268e..54302fdac21ebd 100644 --- a/net-utils/src/lib.rs +++ b/net-utils/src/lib.rs @@ -41,6 +41,10 @@ pub(crate) const IP_ECHO_SERVER_RESPONSE_LENGTH: usize = HEADER_LENGTH + 23; /// Determine the public IP address of this machine by asking an ip_echo_server at the given /// address. +#[deprecated( + since = "3.0.0", + note = "Use `get_public_ip_addr_with_binding` instead" +)] pub fn get_public_ip_addr(ip_echo_server_addr: &SocketAddr) -> Result { let fut = ip_echo_server_request(*ip_echo_server_addr, IpEchoServerMessage::default()); let rt = tokio::runtime::Builder::new_current_thread() @@ -791,7 +795,11 @@ mod tests { let server_ip_echo_addr = server_udp_socket.local_addr().unwrap(); assert_eq!( - get_public_ip_addr(&server_ip_echo_addr).unwrap(), + get_public_ip_addr_with_binding( + &server_ip_echo_addr, + IpAddr::V4(Ipv4Addr::UNSPECIFIED) + ) + .unwrap(), parse_host("127.0.0.1").unwrap(), ); assert_eq!(get_cluster_shred_version(&server_ip_echo_addr).unwrap(), 42); @@ -818,7 +826,11 @@ mod tests { let ip_echo_server_addr = server_udp_socket.local_addr().unwrap(); assert_eq!( - get_public_ip_addr(&ip_echo_server_addr).unwrap(), + get_public_ip_addr_with_binding( + &ip_echo_server_addr, + IpAddr::V4(Ipv4Addr::UNSPECIFIED) + ) + .unwrap(), parse_host("127.0.0.1").unwrap(), ); assert_eq!( @@ -913,7 +925,11 @@ mod tests { ); assert_eq!( - get_public_ip_addr(&ip_echo_server_addr).unwrap(), + get_public_ip_addr_with_binding( + &ip_echo_server_addr, + IpAddr::V4(Ipv4Addr::UNSPECIFIED) + ) + .unwrap(), parse_host("127.0.0.1").unwrap(), ); From 13799bcde94e0bbb7cc53a1dca5e5dbf1bda7507 Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 24 Jun 2025 11:14:44 -0400 Subject: [PATCH 074/124] Adds logs when building the accounts index begins and ends (#6714) --- runtime/src/serde_snapshot.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index fa3ac5cf5858a6..e4688683666a73 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -49,6 +49,7 @@ use { Arc, }, thread::Builder, + time::Instant, }, storage::SerializableStorage, types::SerdeAccountsLtHash, @@ -1246,6 +1247,8 @@ where // This means, either when the cli arg is set, or when the snapshot has an accounts lt hash. let is_accounts_lt_hash_enabled = accounts_db.is_experimental_accumulator_hash_enabled() || has_accounts_lt_hash; + info!("Building accounts index..."); + let start = Instant::now(); let IndexGenerationInfo { accounts_data_len, rent_paying_accounts_by_partition, @@ -1256,6 +1259,7 @@ where genesis_config, is_accounts_lt_hash_enabled, ); + info!("Building accounts index... Done in {:?}", start.elapsed()); accounts_db .accounts_index .rent_paying_accounts_by_partition From 0b17e575f2a6fc7f6e32ed916affdc5714b64d3c Mon Sep 17 00:00:00 2001 From: HaoranYi <219428+HaoranYi@users.noreply.github.com> Date: Tue, 24 Jun 2025 10:39:54 -0500 Subject: [PATCH 075/124] clean up rent info/stats in index generation (#6700) * clean up rent info in index generation * fix tests --- accounts-db/src/accounts_db.rs | 103 -------------------------- accounts-db/src/accounts_db/tests.rs | 6 +- accounts-db/src/append_vec.rs | 7 -- accounts-db/src/tiered_storage/hot.rs | 2 - 4 files changed, 3 insertions(+), 115 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 1016b4222d47f5..c8becd6f0f004a 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -577,10 +577,7 @@ pub struct IndexGenerationInfo { struct SlotIndexGenerationInfo { insert_time_us: u64, num_accounts: u64, - num_accounts_rent_paying: usize, accounts_data_len: u64, - amount_to_top_off_rent: u64, - rent_paying_accounts_by_partition: Vec, zero_lamport_pubkeys: Vec, all_accounts_are_zero_lamports: bool, /// Number of accounts in this slot that didn't already exist in the index @@ -614,8 +611,6 @@ struct GenerateIndexTimings { pub insertion_time_us: u64, pub storage_size_storages_us: u64, pub index_flush_us: u64, - pub rent_paying: AtomicUsize, - pub amount_to_top_off_rent: AtomicU64, pub total_including_duplicates: u64, pub accounts_data_len_dedup_time_us: u64, pub total_duplicate_slot_keys: u64, @@ -654,16 +649,6 @@ impl GenerateIndexTimings { i64 ), ("index_flush_us", self.index_flush_us, i64), - ( - "total_rent_paying", - self.rent_paying.load(Ordering::Relaxed), - i64 - ), - ( - "amount_to_top_off_rent", - self.amount_to_top_off_rent.load(Ordering::Relaxed), - i64 - ), ( "total_items_including_duplicates", self.total_including_duplicates, @@ -7850,35 +7835,11 @@ impl AccountsDb { *self.latest_full_snapshot_slot.lock_write() = Some(slot); } - /// return Some(lamports_to_top_off) if 'account' would collect rent - fn stats_for_rent_payers( - pubkey: &Pubkey, - lamports: u64, - account_data_len: usize, - account_rent_epoch: Epoch, - executable: bool, - rent_collector: &RentCollector, - ) -> Option { - if lamports == 0 { - return None; - } - (rent_collector.should_collect_rent(pubkey, executable) - && !rent_collector - .get_rent_due(lamports, account_data_len, account_rent_epoch) - .is_exempt()) - .then(|| { - let min_balance = rent_collector.rent.minimum_balance(account_data_len); - // return lamports required to top off this account to make it rent exempt - min_balance.saturating_sub(lamports) - }) - } - fn generate_index_for_slot( &self, storage: &AccountStorageEntry, slot: Slot, store_id: AccountsFileId, - rent_collector: &RentCollector, storage_info: &StorageSizeAndCountMap, ) -> SlotIndexGenerationInfo { if storage.accounts.get_account_data_lens(&[0]).is_empty() { @@ -7886,10 +7847,7 @@ impl AccountsDb { } let secondary = !self.account_indexes.is_empty(); - let mut rent_paying_accounts_by_partition = Vec::default(); let mut accounts_data_len = 0; - let mut num_accounts_rent_paying = 0; - let mut amount_to_top_off_rent = 0; let mut stored_size_alive = 0; let mut zero_lamport_pubkeys = vec![]; let mut all_accounts_are_zero_lamports = true; @@ -7910,20 +7868,6 @@ impl AccountsDb { let items_len = items_local.len(); let items = items_local.into_iter().map(|info| { - if let Some(amount_to_top_off_rent_this_account) = Self::stats_for_rent_payers( - &info.pubkey, - info.lamports, - info.data_len as usize, - info.rent_epoch, - info.executable, - rent_collector, - ) { - amount_to_top_off_rent += amount_to_top_off_rent_this_account; - num_accounts_rent_paying += 1; - // remember this rent-paying account pubkey - rent_paying_accounts_by_partition.push(info.pubkey); - } - ( info.pubkey, AccountInfo::new( @@ -7982,10 +7926,7 @@ impl AccountsDb { SlotIndexGenerationInfo { insert_time_us, num_accounts: generate_index_results.count as u64, - num_accounts_rent_paying, accounts_data_len, - amount_to_top_off_rent, - rent_paying_accounts_by_partition, zero_lamport_pubkeys, all_accounts_are_zero_lamports, num_did_not_exist: generate_index_results.num_did_not_exist, @@ -8038,8 +7979,6 @@ impl AccountsDb { let chunk_size = (outer_slots_len / (std::cmp::max(1, threads.saturating_sub(1)))) + 1; // approximately 400k slots in a snapshot let mut index_time = Measure::start("index"); let insertion_time_us = AtomicU64::new(0); - let rent_paying = AtomicUsize::new(0); - let amount_to_top_off_rent = AtomicU64::new(0); let total_including_duplicates = AtomicU64::new(0); let all_accounts_are_zero_lamports_slots = AtomicU64::new(0); let mut all_zeros_slots = Mutex::new(Vec::<(Slot, Arc)>::new()); @@ -8078,11 +8017,7 @@ impl AccountsDb { let SlotIndexGenerationInfo { insert_time_us: insert_us, num_accounts: total_this_slot, - num_accounts_rent_paying: rent_paying_this_slot, accounts_data_len: accounts_data_len_this_slot, - amount_to_top_off_rent: amount_to_top_off_rent_this_slot, - rent_paying_accounts_by_partition: - rent_paying_accounts_by_partition_this_slot, zero_lamport_pubkeys: zero_pubkeys_this_slot, all_accounts_are_zero_lamports, num_did_not_exist, @@ -8092,27 +8027,12 @@ impl AccountsDb { &storage, *slot, store_id, - &rent_collector, &storage_info, ); local_num_did_not_exist += num_did_not_exist; local_num_existed_in_mem += num_existed_in_mem; local_num_existed_on_disk += num_existed_on_disk; - - if rent_paying_this_slot > 0 { - // We don't have any rent paying accounts on mainnet, so this code should never be hit. - rent_paying.fetch_add(rent_paying_this_slot, Ordering::Relaxed); - amount_to_top_off_rent - .fetch_add(amount_to_top_off_rent_this_slot, Ordering::Relaxed); - let mut rent_paying_accounts_by_partition = - rent_paying_accounts_by_partition.lock().unwrap(); - rent_paying_accounts_by_partition_this_slot - .iter() - .for_each(|k| { - rent_paying_accounts_by_partition.add_account(k); - }); - } total_including_duplicates_sum += total_this_slot; accounts_data_len_sum += accounts_data_len_this_slot; if all_accounts_are_zero_lamports { @@ -8250,8 +8170,6 @@ impl AccountsDb { scan_time, index_time: index_time.as_us(), insertion_time_us: insertion_time_us.load(Ordering::Relaxed), - rent_paying, - amount_to_top_off_rent, total_duplicate_slot_keys: total_duplicate_slot_keys.load(Ordering::Relaxed), total_num_unique_duplicate_keys: total_num_unique_duplicate_keys .load(Ordering::Relaxed), @@ -8332,7 +8250,6 @@ impl AccountsDb { duplicates_lt_hash, ) = self.visit_duplicate_pubkeys_during_startup( pubkeys, - &rent_collector, &timings, should_calculate_duplicates_lt_hash, ); @@ -8480,7 +8397,6 @@ impl AccountsDb { fn visit_duplicate_pubkeys_during_startup( &self, pubkeys: &[Pubkey], - rent_collector: &RentCollector, timings: &GenerateIndexTimings, should_calculate_duplicates_lt_hash: bool, ) -> (u64, u64, Option>) { @@ -8488,8 +8404,6 @@ impl AccountsDb { let mut num_duplicate_accounts = 0_u64; let mut duplicates_lt_hash = should_calculate_duplicates_lt_hash.then(|| Box::new(DuplicatesLtHash::default())); - let mut removed_rent_paying = 0; - let mut removed_top_off = 0; let mut lt_hash_time = Duration::default(); self.accounts_index.scan( pubkeys.iter(), @@ -8519,17 +8433,6 @@ impl AccountsDb { accounts_data_len_from_duplicates += data_len; } num_duplicate_accounts += 1; - if let Some(lamports_to_top_off) = Self::stats_for_rent_payers( - pubkey, - loaded_account.lamports(), - data_len, - loaded_account.rent_epoch(), - loaded_account.executable(), - rent_collector, - ) { - removed_rent_paying += 1; - removed_top_off += lamports_to_top_off; - } if let Some(duplicates_lt_hash) = duplicates_lt_hash.as_mut() { let (_, duration) = meas_dur!({ let account_lt_hash = @@ -8548,12 +8451,6 @@ impl AccountsDb { false, ScanFilter::All, ); - timings - .rent_paying - .fetch_sub(removed_rent_paying, Ordering::Relaxed); - timings - .amount_to_top_off_rent - .fetch_sub(removed_top_off, Ordering::Relaxed); timings .par_duplicates_lt_hash_us .fetch_add(lt_hash_time.as_micros() as u64, Ordering::Relaxed); diff --git a/accounts-db/src/accounts_db/tests.rs b/accounts-db/src/accounts_db/tests.rs index 70cc08d57bf03d..a5a55544639629 100644 --- a/accounts-db/src/accounts_db/tests.rs +++ b/accounts-db/src/accounts_db/tests.rs @@ -5133,7 +5133,7 @@ define_accounts_db_test!(test_calculate_storage_count_and_alive_bytes, |accounts let storage = accounts.storage.get_slot_storage_entry(slot0).unwrap(); let storage_info = StorageSizeAndCountMap::default(); - accounts.generate_index_for_slot(&storage, slot0, 0, &RentCollector::default(), &storage_info); + accounts.generate_index_for_slot(&storage, slot0, 0, &storage_info); assert_eq!(storage_info.len(), 1); for entry in storage_info.iter() { let expected_stored_size = @@ -5156,7 +5156,7 @@ define_accounts_db_test!( // empty store let storage = accounts.create_and_insert_store(0, 1, "test"); let storage_info = StorageSizeAndCountMap::default(); - accounts.generate_index_for_slot(&storage, 0, 0, &RentCollector::default(), &storage_info); + accounts.generate_index_for_slot(&storage, 0, 0, &storage_info); assert!(storage_info.is_empty()); } ); @@ -5192,7 +5192,7 @@ define_accounts_db_test!( ); let storage_info = StorageSizeAndCountMap::default(); - accounts.generate_index_for_slot(&storage, 0, 0, &RentCollector::default(), &storage_info); + accounts.generate_index_for_slot(&storage, 0, 0, &storage_info); assert_eq!(storage_info.len(), 1); for entry in storage_info.iter() { let expected_stored_size = diff --git a/accounts-db/src/append_vec.rs b/accounts-db/src/append_vec.rs index a5962318b6c849..1aad92819eac19 100644 --- a/accounts-db/src/append_vec.rs +++ b/accounts-db/src/append_vec.rs @@ -32,7 +32,6 @@ use { memmap2::MmapMut, meta::StoredAccountNoData, solana_account::{AccountSharedData, ReadableAccount, WritableAccount}, - solana_clock::Epoch, solana_hash::Hash, solana_pubkey::Pubkey, solana_system_interface::MAX_PERMITTED_DATA_LENGTH, @@ -142,8 +141,6 @@ pub(crate) struct IndexInfoInner { pub offset: usize, pub pubkey: Pubkey, pub lamports: u64, - pub rent_epoch: Epoch, - pub executable: bool, pub data_len: u64, } @@ -985,8 +982,6 @@ impl AppendVec { lamports: account.lamports(), offset: account.offset(), data_len: account.data_len(), - executable: account.executable(), - rent_epoch: account.rent_epoch(), }, }); }); @@ -2287,8 +2282,6 @@ pub mod tests { assert_eq!(index_info.index_info.offset, *offset); assert_eq!(index_info.index_info.pubkey, *pubkey); assert_eq!(index_info.index_info.lamports, account.lamports()); - assert_eq!(index_info.index_info.rent_epoch, account.rent_epoch()); - assert_eq!(index_info.index_info.executable, account.executable()); assert_eq!(index_info.index_info.data_len, account.data().len() as u64); i += 1; diff --git a/accounts-db/src/tiered_storage/hot.rs b/accounts-db/src/tiered_storage/hot.rs index f67e35f22b6bef..fe5a0d213ae4d3 100644 --- a/accounts-db/src/tiered_storage/hot.rs +++ b/accounts-db/src/tiered_storage/hot.rs @@ -690,8 +690,6 @@ impl HotStorageReader { lamports, offset: AccountInfo::reduced_offset_to_offset(i), data_len: data_len as u64, - executable: meta.flags().executable(), - rent_epoch: meta.final_rent_epoch(account_block), } }, stored_size_aligned: stored_size(data_len), From a2fc295d279871e118387956eb9c9c4d2a5a31b0 Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 24 Jun 2025 14:02:44 -0400 Subject: [PATCH 076/124] Removes RentPayingAccountsByPartition (#6715) --- accounts-db/src/accounts_db.rs | 7 --- accounts-db/src/accounts_index.rs | 7 +-- accounts-db/src/accounts_partition.rs | 68 +-------------------------- runtime/src/serde_snapshot.rs | 6 --- 4 files changed, 2 insertions(+), 86 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index c8becd6f0f004a..dff859af1e2cd9 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -57,7 +57,6 @@ use { UpsertReclaim, ACCOUNTS_INDEX_CONFIG_FOR_BENCHMARKS, ACCOUNTS_INDEX_CONFIG_FOR_TESTING, }, accounts_index_storage::Startup, - accounts_partition::RentPayingAccountsByPartition, accounts_update_notifier_interface::AccountsUpdateNotifier, active_stats::{ActiveStatItem, ActiveStats}, ancestors::Ancestors, @@ -567,7 +566,6 @@ pub enum ScanStorageResult { #[derive(Debug, Default)] pub struct IndexGenerationInfo { pub accounts_data_len: u64, - pub rent_paying_accounts_by_partition: RentPayingAccountsByPartition, /// The lt hash of the old/duplicate accounts identified during index generation. /// Will be used when verifying the accounts lt hash, after rebuilding a Bank. pub duplicates_lt_hash: Option>, @@ -7958,8 +7956,6 @@ impl AccountsDb { ); let accounts_data_len = AtomicU64::new(0); - let rent_paying_accounts_by_partition = - Mutex::new(RentPayingAccountsByPartition::new(schedule)); let zero_lamport_pubkeys = Mutex::new(HashSet::new()); let mut outer_duplicates_lt_hash = None; @@ -8321,9 +8317,6 @@ impl AccountsDb { IndexGenerationInfo { accounts_data_len: accounts_data_len.load(Ordering::Relaxed), - rent_paying_accounts_by_partition: rent_paying_accounts_by_partition - .into_inner() - .unwrap(), duplicates_lt_hash: outer_duplicates_lt_hash, } } diff --git a/accounts-db/src/accounts_index.rs b/accounts-db/src/accounts_index.rs index 41810da01d6eb7..f14dde5b2e1139 100644 --- a/accounts-db/src/accounts_index.rs +++ b/accounts-db/src/accounts_index.rs @@ -6,7 +6,6 @@ mod secondary; use { crate::{ accounts_index_storage::{AccountsIndexStorage, Startup}, - accounts_partition::RentPayingAccountsByPartition, ancestors::Ancestors, bucket_map_holder::Age, bucket_map_holder_stats::BucketMapHolderStats, @@ -40,7 +39,7 @@ use { path::PathBuf, sync::{ atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering}, - Arc, Mutex, OnceLock, RwLock, + Arc, Mutex, RwLock, }, }, thiserror::Error, @@ -315,9 +314,6 @@ pub struct AccountsIndex + Into> { pub active_scans: AtomicUsize, /// # of slots between latest max and latest scan pub max_distance_to_min_scan_slot: AtomicU64, - - /// populated at generate_index time - accounts that could possibly be rent paying - pub rent_paying_accounts_by_partition: OnceLock, } impl + Into> AccountsIndex { @@ -350,7 +346,6 @@ impl + Into> AccountsIndex { roots_removed: AtomicUsize::default(), active_scans: AtomicUsize::default(), max_distance_to_min_scan_slot: AtomicU64::default(), - rent_paying_accounts_by_partition: OnceLock::default(), } } diff --git a/accounts-db/src/accounts_partition.rs b/accounts-db/src/accounts_partition.rs index 731e5409496e86..06faf88963415c 100644 --- a/accounts-db/src/accounts_partition.rs +++ b/accounts-db/src/accounts_partition.rs @@ -3,9 +3,8 @@ use { itertools::Itertools, log::trace, solana_clock::{Epoch, Slot, SlotCount, SlotIndex}, - solana_epoch_schedule::EpochSchedule, solana_pubkey::Pubkey, - std::{collections::HashSet, mem, ops::RangeInclusive}, + std::{mem, ops::RangeInclusive}, }; // Eager rent collection repeats in cyclic manner. @@ -342,57 +341,6 @@ pub fn partition_from_pubkey( result } -static EMPTY_HASHSET: std::sync::LazyLock> = - std::sync::LazyLock::new(HashSet::default); - -/// populated at startup with the accounts that were found that are rent paying. -/// These are the 'possible' rent paying accounts. -/// This set can never grow during runtime since it is not possible to create rent paying accounts now. -/// It can shrink during execution if a rent paying account is dropped to lamports=0 or is topped off. -/// The next time the validator restarts, it will remove the account from this list. -#[derive(Debug, Default)] -pub struct RentPayingAccountsByPartition { - /// 1st index is partition end index, 0..=432_000 - /// 2nd dimension is list of pubkeys which were identified at startup to be rent paying - /// At the moment, we use this data structure to verify all rent paying accounts are expected. - /// When we stop iterating the accounts index to FIND rent paying accounts, we will no longer need this to be a hashset. - /// It can just be a vec. - pub accounts: Vec>, - partition_count: PartitionsPerCycle, -} - -impl RentPayingAccountsByPartition { - /// create new struct. Need slots per epoch from 'epoch_schedule' - pub fn new(epoch_schedule: &EpochSchedule) -> Self { - let partition_count = epoch_schedule.slots_per_epoch; - Self { - partition_count, - accounts: (0..=partition_count) - .map(|_| HashSet::::default()) - .collect(), - } - } - /// Remember that 'pubkey' can possibly be rent paying. - pub fn add_account(&mut self, pubkey: &Pubkey) { - let partition_end_index = partition_from_pubkey(pubkey, self.partition_count); - let list = &mut self.accounts[partition_end_index as usize]; - - list.insert(*pubkey); - } - /// return all pubkeys that can possibly be rent paying with this partition end_index - pub fn get_pubkeys_in_partition_index( - &self, - partition_end_index: PartitionIndex, - ) -> &HashSet { - self.accounts - .get(partition_end_index as usize) - .unwrap_or(&EMPTY_HASHSET) - } - pub fn is_initialized(&self) -> bool { - self.partition_count != 0 - } -} - #[cfg(test)] pub(crate) mod tests { use {super::*, std::str::FromStr}; @@ -685,18 +633,4 @@ pub(crate) mod tests { ); let _ = test_map.range(range); } - - #[test] - fn test_add() { - let mut test = RentPayingAccountsByPartition::new(&EpochSchedule::custom(32, 0, false)); - let pk = Pubkey::from([1; 32]); - test.add_account(&pk); - // make sure duplicate adds only result in a single item - test.add_account(&pk); - assert_eq!(test.get_pubkeys_in_partition_index(0).len(), 1); - assert!(test.get_pubkeys_in_partition_index(1).is_empty()); - assert!(test.is_initialized()); - let test = RentPayingAccountsByPartition::default(); - assert!(!test.is_initialized()); - } } diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index e4688683666a73..2f9423db479a66 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -1251,7 +1251,6 @@ where let start = Instant::now(); let IndexGenerationInfo { accounts_data_len, - rent_paying_accounts_by_partition, duplicates_lt_hash, } = accounts_db.generate_index( limit_load_slot_count_from_snapshot, @@ -1260,11 +1259,6 @@ where is_accounts_lt_hash_enabled, ); info!("Building accounts index... Done in {:?}", start.elapsed()); - accounts_db - .accounts_index - .rent_paying_accounts_by_partition - .set(rent_paying_accounts_by_partition) - .unwrap(); handle.join().unwrap(); measure_notify.stop(); From e5fda75b9bb8d4fbd39abbc2f524526f075ebf7b Mon Sep 17 00:00:00 2001 From: Rory Harris Date: Tue, 24 Jun 2025 11:45:29 -0700 Subject: [PATCH 077/124] Removing store_custom and splitting functionality into store_frozen and store_unfrozen (#6679) * Removing store_custom and splitting functionality into store_frozen and store_unfrozen * Removing calc_stored_meta and collapse flush_cache into store_accounts_frozen --- accounts-db/src/accounts_db.rs | 148 +++++++++------------------ accounts-db/src/accounts_db/stats.rs | 1 - 2 files changed, 50 insertions(+), 99 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index dff859af1e2cd9..e9d3111809a150 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -146,20 +146,6 @@ const SHRINK_COLLECT_CHUNK_SIZE: usize = 50; /// candidates for shrinking. const SHRINK_INSERT_ANCIENT_THRESHOLD: usize = 10; -#[derive(Debug)] -enum StoreTo<'a> { - /// write to cache - Cache, - /// write to storage - Storage(&'a Arc), -} - -impl StoreTo<'_> { - fn is_cached(&self) -> bool { - matches!(self, StoreTo::Cache) - } -} - #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub(crate) enum ScanAccountStorageData { /// callback for accounts in storage will not include `data` @@ -6009,36 +5995,6 @@ impl AccountsDb { account_infos } - fn store_accounts_to<'a: 'c, 'b, 'c>( - &self, - accounts: &'c impl StorableAccounts<'b>, - store_to: &StoreTo, - transactions: Option<&'a [&'a SanitizedTransaction]>, - ) -> Vec { - let mut calc_stored_meta_time = Measure::start("calc_stored_meta"); - let slot = accounts.target_slot(); - if self - .read_only_accounts_cache - .can_slot_be_in_cache(accounts.target_slot()) - { - (0..accounts.len()).for_each(|index| { - // based on the patterns of how a validator writes accounts, it is almost always the case that there is no read only cache entry - // for this pubkey and slot. So, we can give that hint to the `remove` for performance. - self.read_only_accounts_cache - .remove_assume_not_present(*accounts.pubkey(index)); - }); - } - calc_stored_meta_time.stop(); - self.stats - .calc_stored_meta - .fetch_add(calc_stored_meta_time.as_us(), Ordering::Relaxed); - - match store_to { - StoreTo::Cache => self.write_accounts_to_cache(slot, accounts, transactions), - StoreTo::Storage(storage) => self.write_accounts_to_storage(slot, storage, accounts), - } - } - fn report_store_stats(&self) { let mut total_count = 0; let mut newest_slot = 0; @@ -7598,11 +7554,6 @@ impl AccountsDb { read_cache_stats.evictor_wakeup_count_productive, i64 ), - ( - "calc_stored_meta_us", - self.stats.calc_stored_meta.swap(0, Ordering::Relaxed), - i64 - ), ( "handle_dead_keys_us", self.stats.handle_dead_keys_us.swap(0, Ordering::Relaxed), @@ -7646,71 +7597,73 @@ impl AccountsDb { } } + /// Stores accounts in the write cache and updates the index. + /// This should only be used for accounts that are unrooted (unfrozen) fn store_accounts_unfrozen<'a>( &self, accounts: impl StorableAccounts<'a>, transactions: Option<&'a [&'a SanitizedTransaction]>, update_index_thread_selection: UpdateIndexThreadSelection, ) { - // We are storing accounts unfrozen accounts which - // will always be stored in the cache - let store_to = StoreTo::Cache; - // If the store is stored to the cache, reclaims are not needed - // Default behavior for cache stores is to ignore reclaims - let reclaim = StoreReclaims::Default; - - self.store_accounts_custom( - accounts, - &store_to, - transactions, - reclaim, + let slot = accounts.target_slot(); + + // Store the accounts in the write cache + let mut store_accounts_time = Measure::start("store_accounts"); + let infos = self.write_accounts_to_cache(slot, &accounts, transactions); + store_accounts_time.stop(); + self.stats + .store_accounts + .fetch_add(store_accounts_time.as_us(), Ordering::Relaxed); + + // Update the index + let mut update_index_time = Measure::start("update_index"); + + self.update_index( + infos, + &accounts, + UpsertReclaim::PreviousSlotEntryWasCached, update_index_thread_selection, &self.thread_pool, ); + + update_index_time.stop(); + self.stats + .store_update_index + .fetch_add(update_index_time.as_us(), Ordering::Relaxed); + self.stats + .store_num_accounts + .fetch_add(accounts.len() as u64, Ordering::Relaxed); } + /// Stores accounts in the storage and updates the index. + /// This should only be used on accounts that are rooted (frozen) pub fn store_accounts_frozen<'a>( &self, accounts: impl StorableAccounts<'a>, storage: &Arc, ) -> StoreAccountsTiming { - self.store_accounts_custom( - accounts, - &StoreTo::Storage(storage), - None, - StoreReclaims::Ignore, - UpdateIndexThreadSelection::PoolWithThreshold, - &self.thread_pool_clean, - ) - } - - fn store_accounts_custom<'a>( - &self, - accounts: impl StorableAccounts<'a>, - store_to: &StoreTo, - transactions: Option<&'a [&'a SanitizedTransaction]>, - reclaim: StoreReclaims, - update_index_thread_selection: UpdateIndexThreadSelection, - thread_pool: &ThreadPool, - ) -> StoreAccountsTiming { - self.stats - .store_num_accounts - .fetch_add(accounts.len() as u64, Ordering::Relaxed); + let slot = accounts.target_slot(); let mut store_accounts_time = Measure::start("store_accounts"); - let infos = self.store_accounts_to(&accounts, store_to, transactions); + + // Flush the read cache if neccessary. This will occur during shrink or clean + if self.read_only_accounts_cache.can_slot_be_in_cache(slot) { + (0..accounts.len()).for_each(|index| { + // based on the patterns of how a validator writes accounts, it is almost always the case that there is no read only cache entry + // for this pubkey and slot. So, we can give that hint to the `remove` for performance. + self.read_only_accounts_cache + .remove_assume_not_present(*accounts.pubkey(index)); + }); + } + + // Write the accounts to storage + let infos = self.write_accounts_to_storage(slot, storage, &accounts); store_accounts_time.stop(); self.stats .store_accounts .fetch_add(store_accounts_time.as_us(), Ordering::Relaxed); let mut update_index_time = Measure::start("update_index"); - let reclaim = if matches!(reclaim, StoreReclaims::Ignore) { - UpsertReclaim::IgnoreReclaims - } else if store_to.is_cached() { - UpsertReclaim::PreviousSlotEntryWasCached - } else { - UpsertReclaim::PopulateReclaims - }; + let reclaim = UpsertReclaim::IgnoreReclaims; // if we are squashing a single slot, then we can expect a single dead slot let expected_single_dead_slot = @@ -7723,9 +7676,9 @@ impl AccountsDb { let mut reclaims = self.update_index( infos, &accounts, - reclaim, - update_index_thread_selection, - thread_pool, + UpsertReclaim::IgnoreReclaims, + UpdateIndexThreadSelection::PoolWithThreshold, + &self.thread_pool_clean, ); // For each updated account, `reclaims` should only have at most one @@ -7735,14 +7688,13 @@ impl AccountsDb { // entries reclaims.retain(|(_, r)| !r.is_cached()); - if store_to.is_cached() { - assert!(reclaims.is_empty()); - } - update_index_time.stop(); self.stats .store_update_index .fetch_add(update_index_time.as_us(), Ordering::Relaxed); + self.stats + .store_num_accounts + .fetch_add(accounts.len() as u64, Ordering::Relaxed); // A store for a single slot should: // 1) Only make "reclaims" for the same slot diff --git a/accounts-db/src/accounts_db/stats.rs b/accounts-db/src/accounts_db/stats.rs index 9dc99c51363f02..8d8a7ea1c1d564 100644 --- a/accounts-db/src/accounts_db/stats.rs +++ b/accounts-db/src/accounts_db/stats.rs @@ -15,7 +15,6 @@ pub struct AccountsStats { pub last_store_report: AtomicInterval, pub store_hash_accounts: AtomicU64, - pub calc_stored_meta: AtomicU64, pub store_accounts: AtomicU64, pub store_update_index: AtomicU64, pub store_handle_reclaims: AtomicU64, From 07f08b029848100c77a42ee9b4e34d9b7233f7e7 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Tue, 24 Jun 2025 13:52:51 -0500 Subject: [PATCH 078/124] PoH: track_transaction_indexes static after startup (#6701) --- banking-bench/src/main.rs | 1 - core/src/banking_simulation.rs | 1 - core/src/banking_stage.rs | 6 +----- core/src/replay_stage.rs | 14 +------------- core/src/validator.rs | 5 ++++- core/tests/unified_scheduler.rs | 2 +- poh/benches/poh.rs | 6 ++++-- poh/benches/transaction_recorder.rs | 13 +++++-------- poh/src/poh_recorder.rs | 28 +++++++++++++++------------- unified-scheduler-pool/src/lib.rs | 4 ++-- 10 files changed, 33 insertions(+), 47 deletions(-) diff --git a/banking-bench/src/main.rs b/banking-bench/src/main.rs index 0869dd7e54a1be..9afbb6cfd16117 100644 --- a/banking-bench/src/main.rs +++ b/banking-bench/src/main.rs @@ -559,7 +559,6 @@ fn main() { &bank_forks, &poh_recorder, new_bank, - false, ); bank = bank_forks.read().unwrap().working_bank_with_scheduler(); assert_matches!(poh_recorder.read().unwrap().bank(), Some(_)); diff --git a/core/src/banking_simulation.rs b/core/src/banking_simulation.rs index 6a5adf7c21f6cf..00e673627b5923 100644 --- a/core/src/banking_simulation.rs +++ b/core/src/banking_simulation.rs @@ -501,7 +501,6 @@ impl SimulatorLoop { &self.bank_forks, &self.poh_recorder, new_bank, - false, ); (bank, bank_created) = ( self.bank_forks diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index a177abd4ea3ed7..45f32b39c78c3e 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -674,13 +674,9 @@ pub(crate) fn update_bank_forks_and_poh_recorder_for_new_tpu_bank( bank_forks: &RwLock, poh_recorder: &RwLock, tpu_bank: Bank, - track_transaction_indexes: bool, ) { let tpu_bank = bank_forks.write().unwrap().insert(tpu_bank); - poh_recorder - .write() - .unwrap() - .set_bank(tpu_bank, track_transaction_indexes); + poh_recorder.write().unwrap().set_bank(tpu_bank); } #[cfg(test)] diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index f3009dad91a589..fb8b4575fc6ed5 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -1172,7 +1172,6 @@ impl ReplayStage { &mut skipped_slots_info, &banking_tracer, has_new_vote_been_rooted, - transaction_status_sender.is_some(), ); let poh_bank = poh_recorder.read().unwrap().bank(); @@ -2095,7 +2094,6 @@ impl ReplayStage { skipped_slots_info: &mut SkippedSlotsInfo, banking_tracer: &Arc, has_new_vote_been_rooted: bool, - track_transaction_indexes: bool, ) -> bool { // all the individual calls to poh_recorder.read() are designed to // increase granularity, decrease contention @@ -2226,12 +2224,7 @@ impl ReplayStage { // new()-ing of its child bank banking_tracer.hash_event(parent.slot(), &parent.last_blockhash(), &parent.hash()); - update_bank_forks_and_poh_recorder_for_new_tpu_bank( - bank_forks, - poh_recorder, - tpu_bank, - track_transaction_indexes, - ); + update_bank_forks_and_poh_recorder_for_new_tpu_bank(bank_forks, poh_recorder, tpu_bank); true } else { error!("{} No next leader found", my_pubkey); @@ -8667,7 +8660,6 @@ pub(crate) mod tests { // A vote has not technically been rooted, but it doesn't matter for // this test to use true to avoid skipping the leader slot let has_new_vote_been_rooted = true; - let track_transaction_indexes = false; assert!(!ReplayStage::maybe_start_leader( my_pubkey, @@ -8681,7 +8673,6 @@ pub(crate) mod tests { &mut SkippedSlotsInfo::default(), &banking_tracer, has_new_vote_been_rooted, - track_transaction_indexes, )); } @@ -9322,7 +9313,6 @@ pub(crate) mod tests { // A vote has not technically been rooted, but it doesn't matter for // this test to use true to avoid skipping the leader slot let has_new_vote_been_rooted = true; - let track_transaction_indexes = false; // We should not attempt to start leader for the dummy_slot assert_matches!( @@ -9341,7 +9331,6 @@ pub(crate) mod tests { &mut SkippedSlotsInfo::default(), &banking_tracer, has_new_vote_been_rooted, - track_transaction_indexes, )); // Register another slots worth of ticks with PoH recorder @@ -9368,7 +9357,6 @@ pub(crate) mod tests { &mut SkippedSlotsInfo::default(), &banking_tracer, has_new_vote_been_rooted, - track_transaction_indexes, )); // Get the new working bank, which is also the new leader bank/slot let working_bank = bank_forks.read().unwrap().working_bank(); diff --git a/core/src/validator.rs b/core/src/validator.rs index 9b8bb27023fb3f..d3063df6479d56 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -934,7 +934,7 @@ impl Validator { let leader_schedule_cache = Arc::new(leader_schedule_cache); let startup_verification_complete; - let (poh_recorder, entry_receiver) = { + let (mut poh_recorder, entry_receiver) = { let bank = &bank_forks.read().unwrap().working_bank(); startup_verification_complete = Arc::clone(bank.get_startup_verification_complete()); PohRecorder::new_with_clear_signal( @@ -951,6 +951,9 @@ impl Validator { exit.clone(), ) }; + if transaction_status_sender.is_some() { + poh_recorder.track_transaction_indexes(); + } let (record_sender, record_receiver) = unbounded(); let transaction_recorder = TransactionRecorder::new(record_sender, poh_recorder.is_exited.clone()); diff --git a/core/tests/unified_scheduler.rs b/core/tests/unified_scheduler.rs index 4b21fa123c0042..a5c21542dd7675 100644 --- a/core/tests/unified_scheduler.rs +++ b/core/tests/unified_scheduler.rs @@ -281,7 +281,7 @@ fn test_scheduler_producing_blocks() { poh_recorder .write() .unwrap() - .set_bank(tpu_bank.clone_with_scheduler(), false); + .set_bank(tpu_bank.clone_with_scheduler()); tpu_bank.unpause_new_block_production_scheduler(); let tpu_bank = bank_forks.read().unwrap().working_bank_with_scheduler(); assert_eq!(tpu_bank.transaction_count(), 0); diff --git a/poh/benches/poh.rs b/poh/benches/poh.rs index b12ac32ae8ab3b..c26a343d63730d 100644 --- a/poh/benches/poh.rs +++ b/poh/benches/poh.rs @@ -97,9 +97,10 @@ fn bench_poh_recorder_record_transaction_index(bencher: &mut Bencher) { &PohConfig::default(), Arc::new(AtomicBool::default()), ); + poh_recorder.track_transaction_indexes(); let h1 = hash(b"hello Agave, hello Anza!"); - poh_recorder.set_bank_with_transaction_index_for_test(bank.clone()); + poh_recorder.set_bank_for_test(bank.clone()); poh_recorder.tick(); let txs: [SanitizedTransaction; 7] = [ SanitizedTransaction::from_transaction_for_tests(test_tx()), @@ -145,8 +146,9 @@ fn bench_poh_recorder_set_bank(bencher: &mut Bencher) { &PohConfig::default(), Arc::new(AtomicBool::default()), ); + poh_recorder.track_transaction_indexes(); bencher.iter(|| { - poh_recorder.set_bank_with_transaction_index_for_test(bank.clone()); + poh_recorder.set_bank_for_test(bank.clone()); poh_recorder.tick(); poh_recorder.clear_bank_for_test(); }); diff --git a/poh/benches/transaction_recorder.rs b/poh/benches/transaction_recorder.rs index 0b7d8e3489d8dc..33e3ff876978b3 100644 --- a/poh/benches/transaction_recorder.rs +++ b/poh/benches/transaction_recorder.rs @@ -57,10 +57,7 @@ fn bench_record_transactions(c: &mut Criterion) { &genesis_config_info.genesis_config.poh_config, exit.clone(), ); - poh_recorder.set_bank( - BankWithScheduler::new_without_scheduler(bank.clone()), - false, - ); + poh_recorder.set_bank(BankWithScheduler::new_without_scheduler(bank.clone())); let (record_sender, record_receiver) = crossbeam_channel::unbounded(); let transaction_recorder = TransactionRecorder::new(record_sender, exit.clone()); @@ -103,10 +100,10 @@ fn bench_record_transactions(c: &mut Criterion) { &Pubkey::default(), bank.slot().wrapping_add(1), )); - poh_recorder.write().unwrap().set_bank( - BankWithScheduler::new_without_scheduler(bank.clone()), - false, - ); + poh_recorder + .write() + .unwrap() + .set_bank(BankWithScheduler::new_without_scheduler(bank.clone())); let start = Instant::now(); for txs in tx_batches { diff --git a/poh/src/poh_recorder.rs b/poh/src/poh_recorder.rs index cb9f3da5817291..64641b3d6564e7 100644 --- a/poh/src/poh_recorder.rs +++ b/poh/src/poh_recorder.rs @@ -194,6 +194,7 @@ pub struct PohRecorder { // Allocation to hold PohEntrys recorded into PoHStream. entries: Vec, + track_transaction_indexes: bool, } impl PohRecorder { @@ -279,11 +280,16 @@ impl PohRecorder { last_reported_slot_for_pending_fork: Arc::default(), is_exited, entries: Vec::with_capacity(64), + track_transaction_indexes: false, }, working_bank_receiver, ) } + pub fn track_transaction_indexes(&mut self) { + self.track_transaction_indexes = true; + } + // synchronize PoH with a bank pub fn reset(&mut self, reset_bank: Arc, next_leader_slot: Option<(Slot, Slot)>) { self.clear_bank(); @@ -426,7 +432,7 @@ impl PohRecorder { } } - pub fn set_bank(&mut self, bank: BankWithScheduler, track_transaction_indexes: bool) { + pub fn set_bank(&mut self, bank: BankWithScheduler) { assert!(self.working_bank.is_none()); self.leader_bank_notifier.set_in_progress(&bank); let working_bank = WorkingBank { @@ -434,7 +440,7 @@ impl PohRecorder { max_tick_height: bank.max_tick_height(), bank, start: Arc::new(Instant::now()), - transaction_index: track_transaction_indexes.then_some(0), + transaction_index: self.track_transaction_indexes.then_some(0), }; trace!("new working bank"); assert_eq!(working_bank.bank.ticks_per_slot(), self.ticks_per_slot()); @@ -868,12 +874,7 @@ impl PohRecorder { #[cfg(feature = "dev-context-only-utils")] pub fn set_bank_for_test(&mut self, bank: Arc) { - self.set_bank(BankWithScheduler::new_without_scheduler(bank), false) - } - - #[cfg(feature = "dev-context-only-utils")] - pub fn set_bank_with_transaction_index_for_test(&mut self, bank: Arc) { - self.set_bank(BankWithScheduler::new_without_scheduler(bank), true) + self.set_bank(BankWithScheduler::new_without_scheduler(bank)) } #[cfg(feature = "dev-context-only-utils")] @@ -912,12 +913,12 @@ fn do_create_test_recorder( &poh_config, exit.clone(), ); + if track_transaction_indexes { + poh_recorder.track_transaction_indexes(); + } let ticks_per_slot = bank.ticks_per_slot(); - poh_recorder.set_bank( - BankWithScheduler::new_without_scheduler(bank), - track_transaction_indexes, - ); + poh_recorder.set_bank(BankWithScheduler::new_without_scheduler(bank)); let (record_sender, record_receiver) = unbounded(); let transaction_recorder = TransactionRecorder::new(record_sender, exit.clone()); @@ -1378,8 +1379,9 @@ mod tests { &PohConfig::default(), Arc::new(AtomicBool::default()), ); + poh_recorder.track_transaction_indexes(); - poh_recorder.set_bank_with_transaction_index_for_test(bank.clone()); + poh_recorder.set_bank_for_test(bank.clone()); poh_recorder.tick(); assert_eq!( poh_recorder diff --git a/unified-scheduler-pool/src/lib.rs b/unified-scheduler-pool/src/lib.rs index 408a1e11bbef0e..00e20a443b2db4 100644 --- a/unified-scheduler-pool/src/lib.rs +++ b/unified-scheduler-pool/src/lib.rs @@ -3686,7 +3686,7 @@ mod tests { poh_recorder .write() .unwrap() - .set_bank(bank.clone_with_scheduler(), true); + .set_bank(bank.clone_with_scheduler()); bank.schedule_transaction_executions([(tx, ORIGINAL_TRANSACTION_INDEX)].into_iter()) .unwrap(); bank.unpause_new_block_production_scheduler(); @@ -3722,7 +3722,7 @@ mod tests { poh_recorder .write() .unwrap() - .set_bank(bank.clone_with_scheduler(), true); + .set_bank(bank.clone_with_scheduler()); bank.unpause_new_block_production_scheduler(); // Calling wait_for_completed_scheduler() for block production scheduler causes it to be From f1d74cac649340416c117a9f166103b865802c9d Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 24 Jun 2025 15:58:34 -0400 Subject: [PATCH 079/124] Removes cluster-type from snapshot integration tests (#6716) --- .config/nextest.toml | 14 +---------- core/tests/snapshots.rs | 52 ++++++++--------------------------------- 2 files changed, 11 insertions(+), 55 deletions(-) diff --git a/.config/nextest.toml b/.config/nextest.toml index 54ea8a384b4d0a..381428729ddeff 100644 --- a/.config/nextest.toml +++ b/.config/nextest.toml @@ -98,19 +98,7 @@ filter = 'package(solana-core) & test(/^banking_stage::consumer::tests::test_ban slow-timeout = { period = "60s", terminate-after = 8 } [[profile.ci.overrides]] -filter = 'package(solana-core) & test(/^test_slots_to_snapshot::v1_2_0_development_expects/)' -slow-timeout = { period = "60s", terminate-after = 2 } - -[[profile.ci.overrides]] -filter = 'package(solana-core) & test(/^test_slots_to_snapshot::v1_2_0_testnet_expects/)' -slow-timeout = { period = "60s", terminate-after = 2 } - -[[profile.ci.overrides]] -filter = 'package(solana-core) & test(/^test_slots_to_snapshot::v1_2_0_devnet_expects/)' -slow-timeout = { period = "60s", terminate-after = 2 } - -[[profile.ci.overrides]] -filter = 'package(solana-core) & test(/^test_slots_to_snapshot::v1_2_0_mainnetbeta_expects/)' +filter = 'package(solana-core) & test(/^test_slots_to_snapshot/)' slow-timeout = { period = "60s", terminate-after = 2 } [[profile.ci.overrides]] diff --git a/core/tests/snapshots.rs b/core/tests/snapshots.rs index 842a243c72e8e1..6e4bc1a93eb245 100644 --- a/core/tests/snapshots.rs +++ b/core/tests/snapshots.rs @@ -14,10 +14,7 @@ use { accounts_hash_verifier::AccountsHashVerifier, snapshot_packager_service::{PendingSnapshotPackages, SnapshotPackagerService}, }, - solana_genesis_config::{ - ClusterType::{self, Development, Devnet, MainnetBeta, Testnet}, - GenesisConfig, - }, + solana_genesis_config::GenesisConfig, solana_gossip::{cluster_info::ClusterInfo, contact_info::ContactInfo}, solana_hash::Hash, solana_keypair::Keypair, @@ -75,7 +72,6 @@ struct SnapshotTestConfig { impl SnapshotTestConfig { fn new( snapshot_version: SnapshotVersion, - cluster_type: ClusterType, full_snapshot_archive_interval: SnapshotInterval, incremental_snapshot_archive_interval: SnapshotInterval, ) -> SnapshotTestConfig { @@ -87,12 +83,11 @@ impl SnapshotTestConfig { // account will not be stored in accounts-db but still cached in // bank stakes which results in mismatch when banks are loaded from // snapshots. - let mut genesis_config_info = create_genesis_config_with_leader( + let genesis_config_info = create_genesis_config_with_leader( 10_000, // mint_lamports &solana_pubkey::new_rand(), // validator_pubkey 1, // validator_stake_lamports ); - genesis_config_info.genesis_config.cluster_type = cluster_type; let bank0 = Bank::new_with_paths_for_tests( &genesis_config_info.genesis_config, Arc::::default(), @@ -178,7 +173,6 @@ fn restore_from_snapshot( // `last_slot` bank fn run_bank_forks_snapshot_n( snapshot_version: SnapshotVersion, - cluster_type: ClusterType, last_slot: Slot, f: F, set_root_interval: u64, @@ -189,7 +183,6 @@ fn run_bank_forks_snapshot_n( // Set up snapshotting config let snapshot_test_config = SnapshotTestConfig::new( snapshot_version, - cluster_type, SnapshotInterval::Slots(NonZeroU64::new(set_root_interval).unwrap()), SnapshotInterval::Disabled, ); @@ -261,16 +254,12 @@ fn run_bank_forks_snapshot_n( ); } -#[test_case(V1_2_0, Development)] -#[test_case(V1_2_0, Devnet)] -#[test_case(V1_2_0, Testnet)] -#[test_case(V1_2_0, MainnetBeta)] -fn test_bank_forks_snapshot(snapshot_version: SnapshotVersion, cluster_type: ClusterType) { +#[test_case(V1_2_0)] +fn test_bank_forks_snapshot(snapshot_version: SnapshotVersion) { // create banks up to slot 4 and create 1 new account in each bank. test that bank 4 snapshots // and restores correctly run_bank_forks_snapshot_n( snapshot_version, - cluster_type, 4, |bank, mint_keypair| { let key1 = Keypair::new().pubkey(); @@ -297,11 +286,8 @@ fn goto_end_of_slot(bank: &Bank) { } } -#[test_case(V1_2_0, Development)] -#[test_case(V1_2_0, Devnet)] -#[test_case(V1_2_0, Testnet)] -#[test_case(V1_2_0, MainnetBeta)] -fn test_slots_to_snapshot(snapshot_version: SnapshotVersion, cluster_type: ClusterType) { +#[test_case(V1_2_0)] +fn test_slots_to_snapshot(snapshot_version: SnapshotVersion) { solana_logger::setup(); let num_set_roots = MAX_CACHE_ENTRIES * 2; @@ -310,7 +296,6 @@ fn test_slots_to_snapshot(snapshot_version: SnapshotVersion, cluster_type: Clust // Make sure this test never clears bank.slots_since_snapshot let snapshot_test_config = SnapshotTestConfig::new( snapshot_version, - cluster_type, SnapshotInterval::Slots( NonZeroU64::new((*add_root_interval * num_set_roots * 2) as Slot).unwrap(), ), @@ -379,14 +364,8 @@ fn test_slots_to_snapshot(snapshot_version: SnapshotVersion, cluster_type: Clust } } -#[test_case(V1_2_0, Development)] -#[test_case(V1_2_0, Devnet)] -#[test_case(V1_2_0, Testnet)] -#[test_case(V1_2_0, MainnetBeta)] -fn test_bank_forks_status_cache_snapshot( - snapshot_version: SnapshotVersion, - cluster_type: ClusterType, -) { +#[test_case(V1_2_0)] +fn test_bank_forks_status_cache_snapshot(snapshot_version: SnapshotVersion) { // create banks up to slot (MAX_CACHE_ENTRIES * 2) + 1 while transferring 1 lamport into 2 different accounts each time // this is done to ensure the AccountStorageEntries keep getting cleaned up as the root moves // ahead. Also tests the status_cache purge and status cache snapshotting. @@ -396,7 +375,6 @@ fn test_bank_forks_status_cache_snapshot( for set_root_interval in &[1, 4] { run_bank_forks_snapshot_n( snapshot_version, - cluster_type, (MAX_CACHE_ENTRIES * 2) as u64, |bank, mint_keypair| { let tx = system_transaction::transfer( @@ -420,14 +398,8 @@ fn test_bank_forks_status_cache_snapshot( } } -#[test_case(V1_2_0, Development)] -#[test_case(V1_2_0, Devnet)] -#[test_case(V1_2_0, Testnet)] -#[test_case(V1_2_0, MainnetBeta)] -fn test_bank_forks_incremental_snapshot( - snapshot_version: SnapshotVersion, - cluster_type: ClusterType, -) { +#[test_case(V1_2_0)] +fn test_bank_forks_incremental_snapshot(snapshot_version: SnapshotVersion) { solana_logger::setup(); const SET_ROOT_INTERVAL: Slot = 2; @@ -447,7 +419,6 @@ fn test_bank_forks_incremental_snapshot( let snapshot_test_config = SnapshotTestConfig::new( snapshot_version, - cluster_type, SnapshotInterval::Slots(NonZeroU64::new(FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS).unwrap()), SnapshotInterval::Slots( NonZeroU64::new(INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS).unwrap(), @@ -645,13 +616,11 @@ enum VerifySnapshotHashKind { /// Spin up the background services fully then test taking & verifying snapshots #[test_matrix( V1_2_0, - [Development, Devnet, Testnet, MainnetBeta], [VerifyAccountsKind::Merkle, VerifyAccountsKind::Lattice], [VerifySnapshotHashKind::Merkle, VerifySnapshotHashKind::Lattice] )] fn test_snapshots_with_background_services( snapshot_version: SnapshotVersion, - cluster_type: ClusterType, verify_accounts_kind: VerifyAccountsKind, verify_snapshot_hash_kind: VerifySnapshotHashKind, ) { @@ -686,7 +655,6 @@ fn test_snapshots_with_background_services( let snapshot_test_config = SnapshotTestConfig::new( snapshot_version, - cluster_type, SnapshotInterval::Slots(NonZeroU64::new(FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS).unwrap()), SnapshotInterval::Slots( NonZeroU64::new(INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS).unwrap(), From b65f9b010e87a3d4dfc0e721265129b453b06e5e Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 24 Jun 2025 17:25:25 -0400 Subject: [PATCH 080/124] Updates ReadOnlyAccountsHash::remove_assume_not_present() to take pubkey by reference (#6718) --- accounts-db/src/accounts_db.rs | 2 +- accounts-db/src/read_only_accounts_cache.rs | 12 +++++++----- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index e9d3111809a150..ab5feb37312f88 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -7651,7 +7651,7 @@ impl AccountsDb { // based on the patterns of how a validator writes accounts, it is almost always the case that there is no read only cache entry // for this pubkey and slot. So, we can give that hint to the `remove` for performance. self.read_only_accounts_cache - .remove_assume_not_present(*accounts.pubkey(index)); + .remove_assume_not_present(accounts.pubkey(index)); }); } diff --git a/accounts-db/src/read_only_accounts_cache.rs b/accounts-db/src/read_only_accounts_cache.rs index 2ec2eda495233f..be56a0058c9f35 100644 --- a/accounts-db/src/read_only_accounts_cache.rs +++ b/accounts-db/src/read_only_accounts_cache.rs @@ -211,15 +211,17 @@ impl ReadOnlyAccountsCache { /// remove entry if it exists. /// Assume the entry does not exist for performance. - pub(crate) fn remove_assume_not_present(&self, pubkey: Pubkey) -> Option { + pub(crate) fn remove_assume_not_present(&self, pubkey: &Pubkey) -> Option { // get read lock first to see if the entry exists - _ = self.cache.get(&pubkey)?; - self.remove(pubkey) + self.cache + .contains_key(pubkey) + .then(|| self.remove(pubkey)) + .flatten() } #[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] - pub(crate) fn remove(&self, pubkey: Pubkey) -> Option { - Self::do_remove(&pubkey, &self.cache, &self.data_size).map(|entry| entry.account) + pub(crate) fn remove(&self, pubkey: &Pubkey) -> Option { + Self::do_remove(pubkey, &self.cache, &self.data_size).map(|entry| entry.account) } /// Removes `key` from the cache, if present, and returns the account entry. From cc3cafd5e5a30d22a2bac0cccdba22fdfbab54ad Mon Sep 17 00:00:00 2001 From: HaoranYi <219428+HaoranYi@users.noreply.github.com> Date: Tue, 24 Jun 2025 16:27:47 -0500 Subject: [PATCH 081/124] typo (#6719) --- accounts-db/src/accounts_file.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/accounts-db/src/accounts_file.rs b/accounts-db/src/accounts_file.rs index 67c1a591c59150..387f7b53911297 100644 --- a/accounts-db/src/accounts_file.rs +++ b/accounts-db/src/accounts_file.rs @@ -366,7 +366,7 @@ impl AccountsFile { Self::AppendVec(av) => av.calculate_stored_size(data_len), Self::TieredStorage(ts) => ts .reader() - .expect("Reader must be initalized as stored size is specific to format") + .expect("Reader must be initialized as stored size is specific to format") .calculate_stored_size(data_len), } } From 6a77548fd44615ad48c5dfb2b60b704698283ed0 Mon Sep 17 00:00:00 2001 From: Rory Harris Date: Tue, 24 Jun 2025 14:47:15 -0700 Subject: [PATCH 082/124] Add length of obsolete account vector to storage hash (#6673) * Add length of obsolete account vector to storage hash * Resolving issue where hash cache hit could result in hash mismatch --- accounts-db/src/accounts_db.rs | 20 ++++++++++++-- .../src/accounts_db/scan_account_storage.rs | 3 ++- accounts-db/src/accounts_db/tests.rs | 27 ++++++++++++++++--- 3 files changed, 43 insertions(+), 7 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index ab5feb37312f88..e462f138e54f8e 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -6286,15 +6286,31 @@ impl AccountsDb { } } - /// hash info about 'storage' into 'hasher' - /// return true iff storage is valid for loading from cache + /// Hash information about `storage` into `hasher`. + /// + /// # Parameters + /// - `storage`: The storage to hash. + /// - `slot`: The slot of the storage. + /// - `hash_slot`: The slot at which the storage is being hashed. + /// Obsolete account data is only relevant for the hash if `hash_slot` is greater than the slot that marked the account obsolete. + /// + /// # Returns + /// `true` if the storage is valid for loading from the cache. fn hash_storage_info( hasher: &mut impl StdHasher, storage: &AccountStorageEntry, slot: Slot, + hash_slot: Slot, ) -> bool { // hash info about this storage storage.written_bytes().hash(hasher); + + // Obsolete accounts change the hash as they may cause accounts to no longer be included in the hash + storage + .get_obsolete_accounts(Some(hash_slot)) + .len() + .hash(hasher); + slot.hash(hasher); let storage_file = storage.accounts.path(); storage_file.hash(hasher); diff --git a/accounts-db/src/accounts_db/scan_account_storage.rs b/accounts-db/src/accounts_db/scan_account_storage.rs index 6f9afd61fde6c4..388d22d8906af0 100644 --- a/accounts-db/src/accounts_db/scan_account_storage.rs +++ b/accounts-db/src/accounts_db/scan_account_storage.rs @@ -232,6 +232,7 @@ impl AccountsDb { .range() .end .saturating_sub(slots_per_epoch); + let max_slot = snapshot_storages.max_slot_inclusive(); stats.scan_chunks = splitter.chunk_count; @@ -254,7 +255,7 @@ impl AccountsDb { self.update_old_slot_stats(stats, storage); } if let Some(storage) = storage { - let ok = Self::hash_storage_info(&mut hasher, storage, slot); + let ok = Self::hash_storage_info(&mut hasher, storage, slot, max_slot); if !ok { load_from_cache = false; break; diff --git a/accounts-db/src/accounts_db/tests.rs b/accounts-db/src/accounts_db/tests.rs index a5a55544639629..e45d13af2e5b76 100644 --- a/accounts-db/src/accounts_db/tests.rs +++ b/accounts-db/src/accounts_db/tests.rs @@ -6131,29 +6131,48 @@ fn test_hash_storage_info() { let mark_alive = false; let storage = sample_storage_with_entries(&tf, slot, &pubkey1, mark_alive); - let load = AccountsDb::hash_storage_info(&mut hasher, &storage, slot); + let load = AccountsDb::hash_storage_info(&mut hasher, &storage, slot, slot); let hash = hasher.finish(); // can't assert hash here - it is a function of mod date assert!(load); let slot = 2; // changed this let mut hasher = DefaultHasher::new(); - let load = AccountsDb::hash_storage_info(&mut hasher, &storage, slot); + let load = AccountsDb::hash_storage_info(&mut hasher, &storage, slot, slot); let hash2 = hasher.finish(); assert_ne!(hash, hash2); // slot changed, these should be different // can't assert hash here - it is a function of mod date assert!(load); let mut hasher = DefaultHasher::new(); append_sample_data_to_storage(&storage, &solana_pubkey::new_rand(), false, None); - let load = AccountsDb::hash_storage_info(&mut hasher, &storage, slot); + let load = AccountsDb::hash_storage_info(&mut hasher, &storage, slot, slot); let hash3 = hasher.finish(); assert_ne!(hash2, hash3); // moddate and written size changed // can't assert hash here - it is a function of mod date assert!(load); let mut hasher = DefaultHasher::new(); - let load = AccountsDb::hash_storage_info(&mut hasher, &storage, slot); + let load = AccountsDb::hash_storage_info(&mut hasher, &storage, slot, slot); let hash4 = hasher.finish(); assert_eq!(hash4, hash3); // same // can't assert hash here - it is a function of mod date + + assert!(load); + let mut hasher = DefaultHasher::new(); + storage.mark_accounts_obsolete(vec![(0, 136)].into_iter(), slot + 1); + let load = AccountsDb::hash_storage_info(&mut hasher, &storage, slot, slot); + let hash5 = hasher.finish(); + assert_eq!(hash5, hash4); // Obsolete accounts hasn't changed, as the obsolete account is newer than the slot being hashed + assert!(load); + + let mut hasher = DefaultHasher::new(); + let load = AccountsDb::hash_storage_info(&mut hasher, &storage, slot, slot + 1); + let hash6 = hasher.finish(); + assert_ne!(hash6, hash5); // Obsolete accounts has changed, as the obsolete account is now included in the hash + assert!(load); + + let mut hasher = DefaultHasher::new(); + let load = AccountsDb::hash_storage_info(&mut hasher, &storage, slot, slot + 2); + let hash7 = hasher.finish(); + assert_eq!(hash7, hash6); // Nothing has changed even though the slot that the hash is being performed at has changed. assert!(load); } } From e6824630602a76342246bfbc943a452d9661935b Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 24 Jun 2025 17:47:41 -0400 Subject: [PATCH 083/124] Removes snapshot-version from snapshot integration tests (#6717) --- core/tests/snapshots.rs | 43 +++++++++++++---------------------------- 1 file changed, 13 insertions(+), 30 deletions(-) diff --git a/core/tests/snapshots.rs b/core/tests/snapshots.rs index 6e4bc1a93eb245..e8bda483c416e8 100644 --- a/core/tests/snapshots.rs +++ b/core/tests/snapshots.rs @@ -32,10 +32,7 @@ use { snapshot_bank_utils, snapshot_config::SnapshotConfig, snapshot_controller::SnapshotController, - snapshot_utils::{ - self, SnapshotInterval, - SnapshotVersion::{self, V1_2_0}, - }, + snapshot_utils::{self, SnapshotInterval}, status_cache::MAX_CACHE_ENTRIES, }, solana_sha256_hasher::hashv, @@ -53,7 +50,7 @@ use { time::{Duration, Instant}, }, tempfile::TempDir, - test_case::{test_case, test_matrix}, + test_case::test_matrix, }; struct SnapshotTestConfig { @@ -71,7 +68,6 @@ struct SnapshotTestConfig { impl SnapshotTestConfig { fn new( - snapshot_version: SnapshotVersion, full_snapshot_archive_interval: SnapshotInterval, incremental_snapshot_archive_interval: SnapshotInterval, ) -> SnapshotTestConfig { @@ -106,7 +102,6 @@ impl SnapshotTestConfig { .path() .to_path_buf(), bank_snapshots_dir: bank_snapshots_dir.path().to_path_buf(), - snapshot_version, ..SnapshotConfig::default() }; SnapshotTestConfig { @@ -171,18 +166,13 @@ fn restore_from_snapshot( // also marks each bank as root and generates snapshots // finally tries to restore from the last bank's snapshot and compares the restored bank to the // `last_slot` bank -fn run_bank_forks_snapshot_n( - snapshot_version: SnapshotVersion, - last_slot: Slot, - f: F, - set_root_interval: u64, -) where +fn run_bank_forks_snapshot_n(last_slot: Slot, f: F, set_root_interval: u64) +where F: Fn(&Bank, &Keypair), { solana_logger::setup(); // Set up snapshotting config let snapshot_test_config = SnapshotTestConfig::new( - snapshot_version, SnapshotInterval::Slots(NonZeroU64::new(set_root_interval).unwrap()), SnapshotInterval::Disabled, ); @@ -234,7 +224,7 @@ fn run_bank_forks_snapshot_n( snapshot_bank_utils::bank_to_full_snapshot_archive( &snapshot_config.bank_snapshots_dir, &last_bank, - Some(snapshot_version), + Some(snapshot_config.snapshot_version), &snapshot_config.full_snapshot_archives_dir, &snapshot_config.incremental_snapshot_archives_dir, snapshot_config.archive_format, @@ -254,12 +244,11 @@ fn run_bank_forks_snapshot_n( ); } -#[test_case(V1_2_0)] -fn test_bank_forks_snapshot(snapshot_version: SnapshotVersion) { +#[test] +fn test_bank_forks_snapshot() { // create banks up to slot 4 and create 1 new account in each bank. test that bank 4 snapshots // and restores correctly run_bank_forks_snapshot_n( - snapshot_version, 4, |bank, mint_keypair| { let key1 = Keypair::new().pubkey(); @@ -286,8 +275,8 @@ fn goto_end_of_slot(bank: &Bank) { } } -#[test_case(V1_2_0)] -fn test_slots_to_snapshot(snapshot_version: SnapshotVersion) { +#[test] +fn test_slots_to_snapshot() { solana_logger::setup(); let num_set_roots = MAX_CACHE_ENTRIES * 2; @@ -295,7 +284,6 @@ fn test_slots_to_snapshot(snapshot_version: SnapshotVersion) { let (snapshot_sender, _snapshot_receiver) = unbounded(); // Make sure this test never clears bank.slots_since_snapshot let snapshot_test_config = SnapshotTestConfig::new( - snapshot_version, SnapshotInterval::Slots( NonZeroU64::new((*add_root_interval * num_set_roots * 2) as Slot).unwrap(), ), @@ -364,8 +352,8 @@ fn test_slots_to_snapshot(snapshot_version: SnapshotVersion) { } } -#[test_case(V1_2_0)] -fn test_bank_forks_status_cache_snapshot(snapshot_version: SnapshotVersion) { +#[test] +fn test_bank_forks_status_cache_snapshot() { // create banks up to slot (MAX_CACHE_ENTRIES * 2) + 1 while transferring 1 lamport into 2 different accounts each time // this is done to ensure the AccountStorageEntries keep getting cleaned up as the root moves // ahead. Also tests the status_cache purge and status cache snapshotting. @@ -374,7 +362,6 @@ fn test_bank_forks_status_cache_snapshot(snapshot_version: SnapshotVersion) { let key2 = Keypair::new().pubkey(); for set_root_interval in &[1, 4] { run_bank_forks_snapshot_n( - snapshot_version, (MAX_CACHE_ENTRIES * 2) as u64, |bank, mint_keypair| { let tx = system_transaction::transfer( @@ -398,8 +385,8 @@ fn test_bank_forks_status_cache_snapshot(snapshot_version: SnapshotVersion) { } } -#[test_case(V1_2_0)] -fn test_bank_forks_incremental_snapshot(snapshot_version: SnapshotVersion) { +#[test] +fn test_bank_forks_incremental_snapshot() { solana_logger::setup(); const SET_ROOT_INTERVAL: Slot = 2; @@ -418,7 +405,6 @@ fn test_bank_forks_incremental_snapshot(snapshot_version: SnapshotVersion) { ); let snapshot_test_config = SnapshotTestConfig::new( - snapshot_version, SnapshotInterval::Slots(NonZeroU64::new(FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS).unwrap()), SnapshotInterval::Slots( NonZeroU64::new(INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS).unwrap(), @@ -615,12 +601,10 @@ enum VerifySnapshotHashKind { /// Spin up the background services fully then test taking & verifying snapshots #[test_matrix( - V1_2_0, [VerifyAccountsKind::Merkle, VerifyAccountsKind::Lattice], [VerifySnapshotHashKind::Merkle, VerifySnapshotHashKind::Lattice] )] fn test_snapshots_with_background_services( - snapshot_version: SnapshotVersion, verify_accounts_kind: VerifyAccountsKind, verify_snapshot_hash_kind: VerifySnapshotHashKind, ) { @@ -654,7 +638,6 @@ fn test_snapshots_with_background_services( ); let snapshot_test_config = SnapshotTestConfig::new( - snapshot_version, SnapshotInterval::Slots(NonZeroU64::new(FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS).unwrap()), SnapshotInterval::Slots( NonZeroU64::new(INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS).unwrap(), From 18238bdfcb0c668f800c98e27f80df523c43e833 Mon Sep 17 00:00:00 2001 From: Lijun Wang <83639177+lijunwangs@users.noreply.github.com> Date: Tue, 24 Jun 2025 20:26:58 -0700 Subject: [PATCH 084/124] Restore vortexor receiver -- renamed to agave-verified-packet-receiver (#6542) * Revert "Revert "Support receiving verified transactions from the vortexor (#5321)" (#6525)" This reverts commit 63cf093fbe350204b47adb128560bc5a37ce1e85. * publish vortexor-receiver as it is used by core * Rename solana-vortexor-receiver to agave-verified-packet-receiver * Missed Cargo.lock update --- Cargo.lock | 10 ++ Cargo.toml | 2 + core/Cargo.toml | 1 + core/src/lib.rs | 1 + core/src/tpu.rs | 137 ++++++++++++++++------- core/src/validator.rs | 1 + core/src/vortexor_receiver_adapter.rs | 131 ++++++++++++++++++++++ gossip/src/cluster_info.rs | 26 +++++ programs/sbf/Cargo.lock | 9 ++ svm/examples/Cargo.lock | 9 ++ validator/src/commands/run/args.rs | 9 ++ validator/src/commands/run/execute.rs | 14 +++ verified-packet-receiver/Cargo.toml | 26 +++++ verified-packet-receiver/Readme.md | 60 ++++++++++ verified-packet-receiver/src/lib.rs | 1 + verified-packet-receiver/src/receiver.rs | 58 ++++++++++ vortexor/src/sender.rs | 4 +- 17 files changed, 455 insertions(+), 44 deletions(-) create mode 100644 core/src/vortexor_receiver_adapter.rs create mode 100644 verified-packet-receiver/Cargo.toml create mode 100644 verified-packet-receiver/Readme.md create mode 100644 verified-packet-receiver/src/lib.rs create mode 100644 verified-packet-receiver/src/receiver.rs diff --git a/Cargo.lock b/Cargo.lock index 6588125be1fe63..78ab52bacd6ce7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -488,6 +488,15 @@ dependencies = [ "tokio", ] +[[package]] +name = "agave-verified-packet-receiver" +version = "3.0.0" +dependencies = [ + "assert_matches", + "solana-perf", + "solana-streamer", +] + [[package]] name = "agave-watchtower" version = "3.0.0" @@ -7783,6 +7792,7 @@ dependencies = [ "agave-feature-set", "agave-reserved-account-keys", "agave-transaction-view", + "agave-verified-packet-receiver", "ahash 0.8.11", "anyhow", "arrayvec", diff --git a/Cargo.toml b/Cargo.toml index bf1f9b0ce6f8f8..6699f9b296812e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -133,6 +133,7 @@ members = [ "unified-scheduler-pool", "upload-perf", "validator", + "verified-packet-receiver", "version", "vortexor", "vote", @@ -185,6 +186,7 @@ agave-precompiles = { path = "precompiles", version = "=3.0.0" } agave-reserved-account-keys = { path = "reserved-account-keys", version = "=3.0.0" } agave-thread-manager = { path = "thread-manager", version = "=3.0.0" } agave-transaction-view = { path = "transaction-view", version = "=3.0.0" } +agave-verified-packet-receiver = { path = "verified-packet-receiver", version = "=3.0.0" } agave-xdp = { path = "xdp", version = "=3.0.0" } ahash = "0.8.11" anyhow = "1.0.98" diff --git a/core/Cargo.toml b/core/Cargo.toml index 7622e8a6b93cfe..3697d6fe9c707a 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -44,6 +44,7 @@ frozen-abi = [ agave-banking-stage-ingress-types = { workspace = true } agave-feature-set = { workspace = true } agave-transaction-view = { workspace = true } +agave-verified-packet-receiver = { workspace = true } ahash = { workspace = true } anyhow = { workspace = true } arrayvec = { workspace = true } diff --git a/core/src/lib.rs b/core/src/lib.rs index 5c6b6bafdbb001..4272ee04e3b43e 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -41,6 +41,7 @@ mod tpu_entry_notifier; pub mod tvu; pub mod unfrozen_gossip_verified_vote_hashes; pub mod validator; +mod vortexor_receiver_adapter; pub mod vote_simulator; pub mod voting_service; pub mod warm_quic_cache_service; diff --git a/core/src/tpu.rs b/core/src/tpu.rs index a6d58452bb6026..4dcc7b2ec7e6cb 100644 --- a/core/src/tpu.rs +++ b/core/src/tpu.rs @@ -28,6 +28,7 @@ use { staked_nodes_updater_service::StakedNodesUpdaterService, tpu_entry_notifier::TpuEntryNotifier, validator::{BlockProductionMethod, GeneratorConfig, TransactionStructure}, + vortexor_receiver_adapter::VortexorReceiverAdapter, }, bytes::Bytes, crossbeam_channel::{bounded, unbounded, Receiver}, @@ -79,18 +80,34 @@ pub struct TpuSockets { pub vote_quic: Vec, /// Client-side socket for the forwarding votes. pub vote_forwarding_client: UdpSocket, + pub vortexor_receivers: Option>, +} + +/// The `SigVerifier` enum is used to determine whether to use a local or remote signature verifier. +enum SigVerifier { + Local(SigVerifyStage), + Remote(VortexorReceiverAdapter), +} + +impl SigVerifier { + fn join(self) -> thread::Result<()> { + match self { + SigVerifier::Local(sig_verify_stage) => sig_verify_stage.join(), + SigVerifier::Remote(vortexor_receiver_adapter) => vortexor_receiver_adapter.join(), + } + } } pub struct Tpu { fetch_stage: FetchStage, - sigverify_stage: SigVerifyStage, + sig_verifier: SigVerifier, vote_sigverify_stage: SigVerifyStage, banking_stage: BankingStage, forwarding_stage: JoinHandle<()>, cluster_info_vote_listener: ClusterInfoVoteListener, broadcast_stage: BroadcastStage, - tpu_quic_t: thread::JoinHandle<()>, - tpu_forwards_quic_t: thread::JoinHandle<()>, + tpu_quic_t: Option>, + tpu_forwards_quic_t: Option>, tpu_entry_notifier: Option, staked_nodes_updater_service: StakedNodesUpdaterService, tracer_thread_hdl: TracerThread, @@ -150,6 +167,7 @@ impl Tpu { transactions_forwards_quic: transactions_forwards_quic_sockets, vote_quic: tpu_vote_quic_sockets, vote_forwarding_client: vote_forwarding_client_socket, + vortexor_receivers, } = sockets; let (packet_sender, packet_receiver) = unbounded(); @@ -203,47 +221,75 @@ impl Tpu { ) .unwrap(); - // Streamer for TPU - let SpawnServerResult { - endpoints: _, - thread: tpu_quic_t, - key_updater, - } = spawn_server_multi( - "solQuicTpu", - "quic_streamer_tpu", - transactions_quic_sockets, - keypair, - packet_sender, - exit.clone(), - staked_nodes.clone(), - tpu_quic_server_config, - ) - .unwrap(); + let (tpu_quic_t, key_updater) = if vortexor_receivers.is_none() { + // Streamer for TPU + let SpawnServerResult { + endpoints: _, + thread: tpu_quic_t, + key_updater, + } = spawn_server_multi( + "solQuicTpu", + "quic_streamer_tpu", + transactions_quic_sockets, + keypair, + packet_sender, + exit.clone(), + staked_nodes.clone(), + tpu_quic_server_config, + ) + .unwrap(); + (Some(tpu_quic_t), Some(key_updater)) + } else { + (None, None) + }; - // Streamer for TPU forward - let SpawnServerResult { - endpoints: _, - thread: tpu_forwards_quic_t, - key_updater: forwards_key_updater, - } = spawn_server_multi( - "solQuicTpuFwd", - "quic_streamer_tpu_forwards", - transactions_forwards_quic_sockets, - keypair, - forwarded_packet_sender, - exit.clone(), - staked_nodes.clone(), - tpu_fwd_quic_server_config, - ) - .unwrap(); + let (tpu_forwards_quic_t, forwards_key_updater) = if vortexor_receivers.is_none() { + // Streamer for TPU forward + let SpawnServerResult { + endpoints: _, + thread: tpu_forwards_quic_t, + key_updater: forwards_key_updater, + } = spawn_server_multi( + "solQuicTpuFwd", + "quic_streamer_tpu_forwards", + transactions_forwards_quic_sockets, + keypair, + forwarded_packet_sender, + exit.clone(), + staked_nodes.clone(), + tpu_fwd_quic_server_config, + ) + .unwrap(); + (Some(tpu_forwards_quic_t), Some(forwards_key_updater)) + } else { + (None, None) + }; let (forward_stage_sender, forward_stage_receiver) = bounded(1024); - let sigverify_stage = { + let sig_verifier = if let Some(vortexor_receivers) = vortexor_receivers { + info!("starting vortexor adapter"); + let sockets = vortexor_receivers.into_iter().map(Arc::new).collect(); + let adapter = VortexorReceiverAdapter::new( + sockets, + Duration::from_millis(5), + tpu_coalesce, + non_vote_sender, + enable_block_production_forwarding.then(|| forward_stage_sender.clone()), + exit.clone(), + ); + SigVerifier::Remote(adapter) + } else { + info!("starting regular sigverify stage"); let verifier = TransactionSigVerifier::new( non_vote_sender, enable_block_production_forwarding.then(|| forward_stage_sender.clone()), ); - SigVerifyStage::new(packet_receiver, verifier, "solSigVerTpu", "tpu-verifier") + SigVerifier::Local(SigVerifyStage::new( + packet_receiver, + verifier, + "solSigVerTpu", + "tpu-verifier", + )) }; let vote_sigverify_stage = { @@ -329,14 +375,19 @@ impl Tpu { ); let mut key_notifiers = key_notifiers.write().unwrap(); - key_notifiers.add(KeyUpdaterType::Tpu, key_updater); - key_notifiers.add(KeyUpdaterType::TpuForwards, forwards_key_updater); + if let Some(key_updater) = key_updater { + key_notifiers.add(KeyUpdaterType::Tpu, key_updater); + } + if let Some(forwards_key_updater) = forwards_key_updater { + key_notifiers.add(KeyUpdaterType::TpuForwards, forwards_key_updater); + } key_notifiers.add(KeyUpdaterType::TpuVote, vote_streamer_key_updater); + key_notifiers.add(KeyUpdaterType::Forward, client_updater); Self { fetch_stage, - sigverify_stage, + sig_verifier, vote_sigverify_stage, banking_stage, forwarding_stage, @@ -354,14 +405,14 @@ impl Tpu { pub fn join(self) -> thread::Result<()> { let results = vec![ self.fetch_stage.join(), - self.sigverify_stage.join(), + self.sig_verifier.join(), self.vote_sigverify_stage.join(), self.cluster_info_vote_listener.join(), self.banking_stage.join(), self.forwarding_stage.join(), self.staked_nodes_updater_service.join(), - self.tpu_quic_t.join(), - self.tpu_forwards_quic_t.join(), + self.tpu_quic_t.map_or(Ok(()), |t| t.join()), + self.tpu_forwards_quic_t.map_or(Ok(()), |t| t.join()), self.tpu_vote_quic_t.join(), ]; let broadcast_result = self.broadcast_stage.join(); diff --git a/core/src/validator.rs b/core/src/validator.rs index d3063df6479d56..14dc77dade397a 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -1612,6 +1612,7 @@ impl Validator { transactions_forwards_quic: node.sockets.tpu_forwards_quic, vote_quic: node.sockets.tpu_vote_quic, vote_forwarding_client: node.sockets.tpu_vote_forwarding_client, + vortexor_receivers: node.sockets.vortexor_receivers, }, &rpc_subscriptions, transaction_status_sender, diff --git a/core/src/vortexor_receiver_adapter.rs b/core/src/vortexor_receiver_adapter.rs new file mode 100644 index 00000000000000..9e243057ceb0c6 --- /dev/null +++ b/core/src/vortexor_receiver_adapter.rs @@ -0,0 +1,131 @@ +//! Vortexor receiver adapter which wraps the VerifiedPacketReceiver +//! to receive packet batches from the remote and sends the packets to the +//! banking stage. + +use { + crate::banking_trace::TracedSender, + agave_banking_stage_ingress_types::BankingPacketBatch, + agave_verified_packet_receiver::receiver::VerifiedPacketReceiver, + crossbeam_channel::{unbounded, Receiver, RecvTimeoutError, Sender}, + solana_perf::packet::PacketBatch, + std::{ + net::UdpSocket, + sync::{atomic::AtomicBool, Arc}, + thread::{self, Builder, JoinHandle}, + time::{Duration, Instant}, + }, +}; + +#[inline] +fn send(sender: &TracedSender, batch: Arc>, count: usize) -> Result<(), String> { + match sender.send(batch) { + Ok(_) => { + trace!("Sent batch: {count} received from vortexor successfully"); + Ok(()) + } + Err(err) => Err(format!("Failed to send batch {count} down {err:?}")), + } +} + +pub struct VortexorReceiverAdapter { + thread_hdl: JoinHandle<()>, + receiver: VerifiedPacketReceiver, +} + +const MAX_PACKET_BATCH_SIZE: usize = 8; + +impl VortexorReceiverAdapter { + pub fn new( + sockets: Vec>, + recv_timeout: Duration, + tpu_coalesce: Duration, + packets_sender: TracedSender, + forward_stage_sender: Option>, + exit: Arc, + ) -> Self { + let (batch_sender, batch_receiver) = unbounded(); + + let receiver = + VerifiedPacketReceiver::new(sockets, &batch_sender, tpu_coalesce, None, exit.clone()); + + let thread_hdl = Builder::new() + .name("vtxRcvAdptr".to_string()) + .spawn(move || { + if let Err(msg) = Self::recv_send( + batch_receiver, + recv_timeout, + MAX_PACKET_BATCH_SIZE, + packets_sender, + forward_stage_sender, + ) { + info!("Quiting VortexorReceiverAdapter: {msg}"); + } + }) + .unwrap(); + Self { + thread_hdl, + receiver, + } + } + + pub fn join(self) -> thread::Result<()> { + self.thread_hdl.join()?; + self.receiver.join() + } + + fn recv_send( + packet_batch_receiver: Receiver, + recv_timeout: Duration, + batch_size: usize, + traced_sender: TracedSender, + forward_stage_sender: Option>, + ) -> Result<(), String> { + loop { + match Self::receive_until(packet_batch_receiver.clone(), recv_timeout, batch_size) { + Ok(packet_batch) => { + let count = packet_batch.len(); + // Send out packet batches + if let Some(forward_stage_sender) = &forward_stage_sender { + send(&traced_sender, packet_batch.clone(), count)?; + // Send out packet batches to forward stage + let _ = forward_stage_sender + .try_send((packet_batch, false /* reject non-vote */)); + } else { + send(&traced_sender, packet_batch, count)?; + } + } + Err(err) => match err { + RecvTimeoutError::Timeout => { + continue; + } + RecvTimeoutError::Disconnected => { + return Err("Disconnected from the input channel".to_string()); + } + }, + } + } + } + + /// Receives packet batches from VerifiedPacketReceiver with a timeout + fn receive_until( + packet_batch_receiver: Receiver, + recv_timeout: Duration, + batch_size: usize, + ) -> Result { + let start = Instant::now(); + + let message = packet_batch_receiver.recv_timeout(recv_timeout)?; + let mut packet_batches = Vec::new(); + packet_batches.push(message); + + while let Ok(message) = packet_batch_receiver.try_recv() { + packet_batches.push(message); + + if start.elapsed() >= recv_timeout || packet_batches.len() >= batch_size { + break; + } + } + + Ok(Arc::new(packet_batches)) + } +} diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index bb90975da55879..7cc07fb981b3be 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -2331,6 +2331,7 @@ pub struct Sockets { pub quic_vote_client: UdpSocket, /// Client-side socket for RPC/SendTransactionService. pub rpc_sts_client: UdpSocket, + pub vortexor_receivers: Option>, } pub struct NodeConfig { @@ -2343,6 +2344,8 @@ pub struct NodeConfig { pub bind_ip_addr: IpAddr, pub public_tpu_addr: Option, pub public_tpu_forwards_addr: Option, + pub vortexor_receiver_addr: Option, + /// The number of TVU receive sockets to create pub num_tvu_receive_sockets: NonZeroUsize, /// The number of TVU retransmit sockets to create @@ -2504,6 +2507,7 @@ impl Node { tpu_transaction_forwarding_client, quic_vote_client, rpc_sts_client, + vortexor_receivers: None, }, } } @@ -2651,6 +2655,7 @@ impl Node { quic_vote_client, tpu_transaction_forwarding_client, rpc_sts_client, + vortexor_receivers: None, }, } } @@ -2666,6 +2671,7 @@ impl Node { num_tvu_receive_sockets, num_tvu_retransmit_sockets, num_quic_endpoints, + vortexor_receiver_addr, } = config; let gossip_addr = SocketAddr::new(advertised_ip, gossip_port); @@ -2781,6 +2787,23 @@ impl Node { info.set_serve_repair(QUIC, (advertised_ip, serve_repair_quic_port)) .unwrap(); + let vortexor_receivers = vortexor_receiver_addr.map(|vortexor_receiver_addr| { + multi_bind_in_range_with_config( + vortexor_receiver_addr.ip(), + ( + vortexor_receiver_addr.port(), + vortexor_receiver_addr.port() + 1, + ), + socket_config_reuseport, + 32, + ) + .unwrap_or_else(|_| { + panic!("Could not bind to the set vortexor_receiver_addr {vortexor_receiver_addr}") + }) + .1 + }); + + info!("vortexor_receivers is {vortexor_receivers:?}"); trace!("new ContactInfo: {:?}", info); let sockets = Sockets { gossip, @@ -2805,6 +2828,7 @@ impl Node { quic_vote_client, tpu_transaction_forwarding_client, rpc_sts_client, + vortexor_receivers, }; info!("Bound all network sockets as follows: {:#?}", &sockets); Node { info, sockets } @@ -3284,6 +3308,7 @@ mod tests { num_tvu_receive_sockets: MINIMUM_NUM_TVU_RECEIVE_SOCKETS, num_tvu_retransmit_sockets: MINIMUM_NUM_TVU_RECEIVE_SOCKETS, num_quic_endpoints: DEFAULT_NUM_QUIC_ENDPOINTS, + vortexor_receiver_addr: None, }; let node = Node::new_with_external_ip(&solana_pubkey::new_rand(), config); @@ -3308,6 +3333,7 @@ mod tests { num_tvu_receive_sockets: MINIMUM_NUM_TVU_RECEIVE_SOCKETS, num_tvu_retransmit_sockets: MINIMUM_NUM_TVU_RECEIVE_SOCKETS, num_quic_endpoints: DEFAULT_NUM_QUIC_ENDPOINTS, + vortexor_receiver_addr: None, }; let node = Node::new_with_external_ip(&solana_pubkey::new_rand(), config); diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 553520d3417e31..9b77ccaacedc30 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -229,6 +229,14 @@ dependencies = [ "tokio", ] +[[package]] +name = "agave-verified-packet-receiver" +version = "3.0.0" +dependencies = [ + "solana-perf", + "solana-streamer", +] + [[package]] name = "agave-xdp" version = "3.0.0" @@ -6036,6 +6044,7 @@ dependencies = [ "agave-banking-stage-ingress-types", "agave-feature-set", "agave-transaction-view", + "agave-verified-packet-receiver", "ahash 0.8.11", "anyhow", "arrayvec", diff --git a/svm/examples/Cargo.lock b/svm/examples/Cargo.lock index 2e3838da906cdd..28f40734667469 100644 --- a/svm/examples/Cargo.lock +++ b/svm/examples/Cargo.lock @@ -150,6 +150,14 @@ dependencies = [ "solana-svm-transaction", ] +[[package]] +name = "agave-verified-packet-receiver" +version = "3.0.0" +dependencies = [ + "solana-perf", + "solana-streamer", +] + [[package]] name = "agave-xdp" version = "3.0.0" @@ -5883,6 +5891,7 @@ dependencies = [ "agave-banking-stage-ingress-types", "agave-feature-set", "agave-transaction-view", + "agave-verified-packet-receiver", "ahash 0.8.11", "anyhow", "arrayvec", diff --git a/validator/src/commands/run/args.rs b/validator/src/commands/run/args.rs index ff00da32eb8fe5..462815b9a9c476 100644 --- a/validator/src/commands/run/args.rs +++ b/validator/src/commands/run/args.rs @@ -407,6 +407,15 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, --entrypoint or localhostwhen --entrypoint is not provided]", ), ) + .arg( + Arg::with_name("tpu_vortexor_receiver_address") + .long("tpu-vortexor-receiver-address") + .value_name("HOST:PORT") + .takes_value(true) + .hidden(hidden_unless_forced()) + .validator(solana_net_utils::is_host_port) + .help("TPU Vortexor Receiver address to which verified transaction packet will be forwarded."), + ) .arg( Arg::with_name("public_rpc_addr") .long("public-rpc-address") diff --git a/validator/src/commands/run/execute.rs b/validator/src/commands/run/execute.rs index 1920db86ac6eca..09c50597d6d7db 100644 --- a/validator/src/commands/run/execute.rs +++ b/validator/src/commands/run/execute.rs @@ -1122,6 +1122,19 @@ pub fn execute( }) .transpose()?; + let tpu_vortexor_receiver_address = + matches + .value_of("tpu_vortexor_receiver_address") + .map(|tpu_vortexor_receiver_address| { + solana_net_utils::parse_host_port(tpu_vortexor_receiver_address).unwrap_or_else( + |err| { + eprintln!("Failed to parse --tpu-vortexor-receiver-address: {err}"); + exit(1); + }, + ) + }); + + info!("tpu_vortexor_receiver_address is {tpu_vortexor_receiver_address:?}"); let num_quic_endpoints = value_t_or_exit!(matches, "num_quic_endpoints", NonZeroUsize); let tpu_max_connections_per_peer = @@ -1149,6 +1162,7 @@ pub fn execute( num_tvu_receive_sockets: tvu_receive_threads, num_tvu_retransmit_sockets: tvu_retransmit_threads, num_quic_endpoints, + vortexor_receiver_addr: tpu_vortexor_receiver_address, }; let cluster_entrypoints = entrypoint_addrs diff --git a/verified-packet-receiver/Cargo.toml b/verified-packet-receiver/Cargo.toml new file mode 100644 index 00000000000000..166a0fc0497252 --- /dev/null +++ b/verified-packet-receiver/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "agave-verified-packet-receiver" +description = "Agave Verified Packet Receiver Receiver" +documentation = "https://docs.rs/agave-verified-packet-receiver" +publish = true +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[lib] +crate-type = ["lib"] +name = "agave_verified_packet_receiver" + +[dependencies] +solana-perf = { workspace = true } +solana-streamer = { workspace = true } + +[dev-dependencies] +assert_matches = { workspace = true } +solana-streamer = { workspace = true, features = ["dev-context-only-utils"] } diff --git a/verified-packet-receiver/Readme.md b/verified-packet-receiver/Readme.md new file mode 100644 index 00000000000000..90834f86469ee3 --- /dev/null +++ b/verified-packet-receiver/Readme.md @@ -0,0 +1,60 @@ +# Introduction +The Vortexor is a service that can offload the tasks of receiving transactions +from the public, performing signature verifications, and deduplications from the +core validator, enabling it to focus on processing and executing the +transactions. The verified and filtered transactions will then be forwarded to +the validators linked with the Vortexor. This setup makes the TPU transaction +ingestion and verification more scalable compared to a single-node solution. + +This module implements the VerifiedPacketReceiver in the below architecture +which encapsulates the functionality of receiving the verified packet batches +from the vortexor. In the first impelementation, we use UDP to receive the +verified packets from the vortexor. It is designed to support other protocol +option such as using QUIC. + +# Architecture +Figure 1 describes the architecture diagram of the Vortexor and its +relationship with the validator. + + +---------------------+ + | Solana | + | RPC / Web Socket | + | Service | + +---------------------+ + | + v + +--------------------- VORTEXOR ------------------------+ + | | | + | +------------------+ | + | | StakedKeyUpdater | | + | +------------------+ | + | | | + | v | + | +-------------+ +--------------------+ | + TPU --> | | TPU Streamer| -----> | SigVerifier/Dedup | | + /QUIC | +-------------+ +--------------------+ | + | | | | + | v v | + | +----------------+ +------------------------+ | + | | Subscription |<----| VerifiedPacketForwarder| | + | | Management | +------------------------+ | + | +----------------+ | | + +--------------------------------|----------------------+ + ^ | (UDP/QUIC) + Heartbeat/subscriptions | | + | v + +-------------------- AGAVE VALIDATOR ------------------+ + | | + | +----------------+ +-----------------------+ | + Config-> | | Subscription | | VerifiedPacketReceiver| | + Admin RPC | | Management | | | | + | +----------------+ +-----------------------+ | + | | | | + | | v | + | v +-----------+ | + | +--------------------+ | Banking | | + Gossip <--------|--| Gossip/Contact Info| | Stage | | + | +--------------------+ +-----------+ | + +-------------------------------------------------------+ + + Figure 1. \ No newline at end of file diff --git a/verified-packet-receiver/src/lib.rs b/verified-packet-receiver/src/lib.rs new file mode 100644 index 00000000000000..4c0db7eaa35a61 --- /dev/null +++ b/verified-packet-receiver/src/lib.rs @@ -0,0 +1 @@ +pub mod receiver; diff --git a/verified-packet-receiver/src/receiver.rs b/verified-packet-receiver/src/receiver.rs new file mode 100644 index 00000000000000..4a8c3490498591 --- /dev/null +++ b/verified-packet-receiver/src/receiver.rs @@ -0,0 +1,58 @@ +/// This is responsible for receiving the verified and deduplicated transactions +/// from the vortexor and sending down to the banking stage. +use { + solana_perf::{packet::PacketBatchRecycler, recycler::Recycler}, + solana_streamer::streamer::{self, PacketBatchSender, StreamerReceiveStats}, + std::{ + net::UdpSocket, + sync::{atomic::AtomicBool, Arc}, + thread::{self, JoinHandle}, + time::Duration, + }, +}; + +pub struct VerifiedPacketReceiver { + thread_hdls: Vec>, +} + +impl VerifiedPacketReceiver { + pub fn new( + sockets: Vec>, + sender: &PacketBatchSender, + coalesce: Duration, + in_vote_only_mode: Option>, + exit: Arc, + ) -> Self { + let recycler: PacketBatchRecycler = Recycler::warmed(1000, 1024); + + let tpu_stats = Arc::new(StreamerReceiveStats::new("vortexor_receiver")); + + let thread_hdls = sockets + .into_iter() + .enumerate() + .map(|(i, socket)| { + streamer::receiver( + format!("solVtxRcvr{i:02}"), + socket, + exit.clone(), + sender.clone(), + recycler.clone(), + tpu_stats.clone(), + Some(coalesce), + true, + in_vote_only_mode.clone(), + false, // is_staked_service + ) + }) + .collect(); + + Self { thread_hdls } + } + + pub fn join(self) -> thread::Result<()> { + for thread_hdl in self.thread_hdls { + thread_hdl.join()?; + } + Ok(()) + } +} diff --git a/vortexor/src/sender.rs b/vortexor/src/sender.rs index c5526cce311fea..89812ee8b7607d 100644 --- a/vortexor/src/sender.rs +++ b/vortexor/src/sender.rs @@ -83,7 +83,9 @@ impl PacketBatchSender { for batch in &packet_batches { for packet_batch in batch.iter() { for packet in packet_batch { - packets.push(packet.data(0..).unwrap()); + if let Some(data) = packet.data(0..) { + packets.push(data); + } } } } From dd8c84237a8787cc8de8a5ff7272724ed24a1842 Mon Sep 17 00:00:00 2001 From: puhtaytow <18026645+puhtaytow@users.noreply.github.com> Date: Wed, 25 Jun 2025 10:54:24 +0200 Subject: [PATCH 085/124] ledger, tests: remove remaining parts of legacy shreds in shred tests (#6713) * remove legacy shreds --- ledger/tests/shred.rs | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/ledger/tests/shred.rs b/ledger/tests/shred.rs index f5ab214b7c1aeb..63f5ed12e300f6 100644 --- a/ledger/tests/shred.rs +++ b/ledger/tests/shred.rs @@ -7,7 +7,6 @@ use { solana_ledger::shred::{ max_entries_per_n_shred, recover, verify_test_data_shred, ProcessShredsStats, ReedSolomonCache, Shred, ShredData, Shredder, DATA_SHREDS_PER_FEC_BLOCK, - LEGACY_SHRED_DATA_CAPACITY, }, solana_signer::Signer, solana_system_transaction as system_transaction, @@ -197,17 +196,14 @@ fn setup_different_sized_fec_blocks( let keypair1 = Keypair::new(); let tx0 = system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default()); let entry = Entry::new(&Hash::default(), 1, vec![tx0]); + let merkle_capacity = ShredData::capacity(Some((6, true, true))).unwrap(); + let chained_merkle_root = Some(Hash::default()); - // Make enough entries for `DATA_SHREDS_PER_FEC_BLOCK + 2` shreds so one - // fec set will have `DATA_SHREDS_PER_FEC_BLOCK` shreds and the next - // will have 2 shreds. assert!(DATA_SHREDS_PER_FEC_BLOCK > 2); - let num_shreds_per_iter = DATA_SHREDS_PER_FEC_BLOCK + 2; - let num_entries = max_entries_per_n_shred( - &entry, - num_shreds_per_iter as u64, - Some(LEGACY_SHRED_DATA_CAPACITY), - ); + let num_shreds_per_iter = DATA_SHREDS_PER_FEC_BLOCK; + let num_entries = + max_entries_per_n_shred(&entry, num_shreds_per_iter as u64, Some(merkle_capacity)); + let entries: Vec<_> = (0..num_entries) .map(|_| { let keypair0 = Keypair::new(); @@ -234,10 +230,10 @@ fn setup_different_sized_fec_blocks( &keypair, &entries, is_last, - None, // chained_merkle_root + chained_merkle_root, next_shred_index, next_code_index, - false, // merkle_variant + true, // merkle_variant &reed_solomon_cache, &mut ProcessShredsStats::default(), ); From 0214bd539a296318722a94082fa9d37e5c0136d8 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Wed, 25 Jun 2025 18:24:36 +0800 Subject: [PATCH 086/124] fix logic in version bump script (#6691) * replace the whole script, not just the first match * ensure running git checkout with tracked files --- scripts/increment-cargo-version.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/increment-cargo-version.sh b/scripts/increment-cargo-version.sh index 56350436820a71..71b47e8bb40dcb 100755 --- a/scripts/increment-cargo-version.sh +++ b/scripts/increment-cargo-version.sh @@ -129,7 +129,7 @@ for Cargo_toml in "${Cargo_tomls[@]}"; do # Set new crate version ( set -x - sed -i "$Cargo_toml" -e "0,/^version =/{s/^version = \"[^\"]*\"$/version = \"$newVersion\"/}" + sed -i "$Cargo_toml" -e "s/^version = \"$currentVersion\"$/version = \"$newVersion\"/" ) # Fix up the version references to other internal crates @@ -183,7 +183,7 @@ scripts/cargo-for-all-lock-files.sh tree >/dev/null done mv "$tmp_file" filtered-cargo-lock-patch - git checkout ./**/Cargo.lock + git ls-files -- **/Cargo.lock | xargs -I {} git checkout {} git apply --unidiff-zero filtered-cargo-lock-patch rm cargo-lock-patch filtered-cargo-lock-patch ) From 40dcedf28ec928846428d69f58f3291597816613 Mon Sep 17 00:00:00 2001 From: Alex Pyattaev Date: Wed, 25 Jun 2025 15:00:22 +0300 Subject: [PATCH 087/124] fix merge conflict due to reuseport change (#6730) --- gossip/src/cluster_info.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index 7cc07fb981b3be..f0f6e5d7a812a9 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -2794,7 +2794,7 @@ impl Node { vortexor_receiver_addr.port(), vortexor_receiver_addr.port() + 1, ), - socket_config_reuseport, + socket_config, 32, ) .unwrap_or_else(|_| { From 4e285b8a7eac6aefedcf383fe717f40376337a1f Mon Sep 17 00:00:00 2001 From: Greg Cusack Date: Wed, 25 Jun 2025 09:11:51 -0500 Subject: [PATCH 088/124] Multihoming: add multiple bind addresses. (#6522) * add multiple bind addrs. first is primary. no functional change here * rename secondary to others * add comments/update test validator * switch to wrapper around Vec and remove multihoming from test-validator --- gossip/src/cluster_info.rs | 62 ++++++++++++++++++++++++--- validator/src/commands/run/args.rs | 5 ++- validator/src/commands/run/execute.rs | 45 +++++++++++-------- 3 files changed, 88 insertions(+), 24 deletions(-) diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index f0f6e5d7a812a9..3830dc6bc45de3 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -2340,8 +2340,8 @@ pub struct NodeConfig { /// The gossip port advertised to the cluster pub gossip_port: u16, pub port_range: PortRange, - /// The IP address the node binds to - pub bind_ip_addr: IpAddr, + /// Multihoming: The IP addresses the node can bind to + pub bind_ip_addrs: BindIpAddrs, pub public_tpu_addr: Option, pub public_tpu_forwards_addr: Option, pub vortexor_receiver_addr: Option, @@ -2354,6 +2354,57 @@ pub struct NodeConfig { pub num_quic_endpoints: NonZeroUsize, } +#[derive(Debug, Clone)] +pub struct BindIpAddrs { + /// The IP addresses this node may bind to + /// Index 0 is the primary address + /// Index 1+ are secondary addresses + addrs: Vec, +} + +impl BindIpAddrs { + pub fn new(addrs: Vec) -> Result { + if addrs.is_empty() { + return Err( + "BindIpAddrs requires at least one IP address (--bind-address)".to_string(), + ); + } + if addrs.len() > 1 { + for ip in &addrs { + if ip.is_loopback() || ip.is_unspecified() || ip.is_multicast() { + return Err(format!( + "Invalid configuration: {:?} is not allowed with multiple --bind-address values (loopback, unspecified, or multicast)", + ip + )); + } + } + } + + Ok(Self { addrs }) + } + + #[inline] + pub fn primary(&self) -> IpAddr { + self.addrs[0] + } +} + +// Makes BindIpAddrs behave like &[IpAddr] +impl Deref for BindIpAddrs { + type Target = [IpAddr]; + + fn deref(&self) -> &Self::Target { + &self.addrs + } +} + +// For generic APIs expecting something like AsRef<[IpAddr]> +impl AsRef<[IpAddr]> for BindIpAddrs { + fn as_ref(&self) -> &[IpAddr] { + &self.addrs + } +} + #[derive(Debug)] pub struct Node { pub info: ContactInfo, @@ -2665,7 +2716,7 @@ impl Node { advertised_ip, gossip_port, port_range, - bind_ip_addr, + bind_ip_addrs, public_tpu_addr, public_tpu_forwards_addr, num_tvu_receive_sockets, @@ -2673,6 +2724,7 @@ impl Node { num_quic_endpoints, vortexor_receiver_addr, } = config; + let bind_ip_addr = bind_ip_addrs.primary(); let gossip_addr = SocketAddr::new(advertised_ip, gossip_port); let (gossip_port, (gossip, ip_echo)) = @@ -3302,7 +3354,7 @@ mod tests { advertised_ip: IpAddr::V4(ip), gossip_port: 0, port_range, - bind_ip_addr: IpAddr::V4(ip), + bind_ip_addrs: BindIpAddrs::new(vec![IpAddr::V4(ip)]).unwrap(), public_tpu_addr: None, public_tpu_forwards_addr: None, num_tvu_receive_sockets: MINIMUM_NUM_TVU_RECEIVE_SOCKETS, @@ -3327,7 +3379,7 @@ mod tests { advertised_ip: ip, gossip_port: port, port_range, - bind_ip_addr: ip, + bind_ip_addrs: BindIpAddrs::new(vec![ip]).unwrap(), public_tpu_addr: None, public_tpu_forwards_addr: None, num_tvu_receive_sockets: MINIMUM_NUM_TVU_RECEIVE_SOCKETS, diff --git a/validator/src/commands/run/args.rs b/validator/src/commands/run/args.rs index 462815b9a9c476..0bbf9a56a209e9 100644 --- a/validator/src/commands/run/args.rs +++ b/validator/src/commands/run/args.rs @@ -946,8 +946,9 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .takes_value(true) .validator(solana_net_utils::is_host) .default_value(&default_args.bind_address) - .help("IP address to bind the validator ports"), - ) + .multiple(true) + .help("Repeatable. IP addresses to bind the validator ports on. First is primary (used on startup), the rest may be switched to during operation."), + ) .arg( Arg::with_name("rpc_bind_address") .long("rpc-bind-address") diff --git a/validator/src/commands/run/execute.rs b/validator/src/commands/run/execute.rs index 09c50597d6d7db..7df7a0002879cc 100644 --- a/validator/src/commands/run/execute.rs +++ b/validator/src/commands/run/execute.rs @@ -38,7 +38,7 @@ use { }, }, solana_gossip::{ - cluster_info::{Node, NodeConfig}, + cluster_info::{BindIpAddrs, Node, NodeConfig}, contact_info::ContactInfo, }, solana_hash::Hash, @@ -296,15 +296,22 @@ pub fn execute( "--gossip-validator", )?; - let bind_address = solana_net_utils::parse_host(matches.value_of("bind_address").unwrap()) - .expect("invalid bind_address"); + let bind_addresses = { + let parsed = matches + .values_of("bind_address") + .expect("bind_address should always be present due to default") + .map(solana_net_utils::parse_host) + .collect::, _>>()?; + BindIpAddrs::new(parsed).map_err(|err| format!("invalid bind_addresses: {err}"))? + }; + let rpc_bind_address = if matches.is_present("rpc_bind_address") { solana_net_utils::parse_host(matches.value_of("rpc_bind_address").unwrap()) .expect("invalid rpc_bind_address") } else if private_rpc { solana_net_utils::parse_host("127.0.0.1").unwrap() } else { - bind_address + bind_addresses.primary() }; let contact_debug_interval = value_t_or_exit!(matches, "contact_debug_interval", u64); @@ -359,7 +366,7 @@ pub fn execute( // version can then be deleted from gossip and get_rpc_node above. let expected_shred_version = value_t!(matches, "expected_shred_version", u16) .ok() - .or_else(|| get_cluster_shred_version(&entrypoint_addrs, bind_address)); + .or_else(|| get_cluster_shred_version(&entrypoint_addrs, bind_addresses.primary())); let tower_path = value_t!(matches, "tower", PathBuf) .ok() @@ -1074,8 +1081,9 @@ pub fn execute( let advertised_ip = if let Some(ip) = gossip_host { ip - } else if !bind_address.is_unspecified() && !bind_address.is_loopback() { - bind_address + } else if !bind_addresses.primary().is_unspecified() && !bind_addresses.primary().is_loopback() + { + bind_addresses.primary() } else if !entrypoint_addrs.is_empty() { let mut order: Vec<_> = (0..entrypoint_addrs.len()).collect(); order.shuffle(&mut thread_rng()); @@ -1088,21 +1096,24 @@ pub fn execute( "Contacting {} to determine the validator's public IP address", entrypoint_addr ); - solana_net_utils::get_public_ip_addr_with_binding(entrypoint_addr, bind_address) - .map_or_else( - |err| { - warn!("Failed to contact cluster entrypoint {entrypoint_addr}: {err}"); - None - }, - Some, - ) + solana_net_utils::get_public_ip_addr_with_binding( + entrypoint_addr, + bind_addresses.primary(), + ) + .map_or_else( + |err| { + warn!("Failed to contact cluster entrypoint {entrypoint_addr}: {err}"); + None + }, + Some, + ) }) .ok_or_else(|| "unable to determine the validator's public IP address".to_string())? } else { IpAddr::V4(Ipv4Addr::LOCALHOST) }; let gossip_port = value_t!(matches, "gossip_port", u16).or_else(|_| { - solana_net_utils::find_available_port_in_range(bind_address, (0, 1)) + solana_net_utils::find_available_port_in_range(bind_addresses.primary(), (0, 1)) .map_err(|err| format!("unable to find an available gossip port: {err}")) })?; @@ -1156,7 +1167,7 @@ pub fn execute( advertised_ip, gossip_port, port_range: dynamic_port_range, - bind_ip_addr: bind_address, + bind_ip_addrs: bind_addresses, public_tpu_addr, public_tpu_forwards_addr, num_tvu_receive_sockets: tvu_receive_threads, From ebe554559c1986450f9acdd4f1841d92c2904f4b Mon Sep 17 00:00:00 2001 From: Kristofer Peterson Date: Wed, 25 Jun 2025 15:56:59 +0100 Subject: [PATCH 089/124] solana-stake-accounts: support configurable commitment level and no wait (#6644) support configurable commitment level via configuration file and command line option promote no wait for transaction confirmation to global command line option --- Cargo.lock | 1 + stake-accounts/Cargo.toml | 1 + stake-accounts/src/arg_parser.rs | 30 +++++++++++++++++++++++++----- stake-accounts/src/args.rs | 7 +++++++ stake-accounts/src/main.rs | 21 +++++++++++++++------ 5 files changed, 49 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 78ab52bacd6ce7..ffda0a4b3c9d26 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10500,6 +10500,7 @@ dependencies = [ "solana-cli-config", "solana-client-traits", "solana-clock", + "solana-commitment-config", "solana-fee-calculator", "solana-genesis-config", "solana-instruction", diff --git a/stake-accounts/Cargo.toml b/stake-accounts/Cargo.toml index 6513c5f05d2e44..799db6536de53e 100644 --- a/stake-accounts/Cargo.toml +++ b/stake-accounts/Cargo.toml @@ -18,6 +18,7 @@ solana-account = { workspace = true } solana-clap-utils = { workspace = true } solana-cli-config = { workspace = true } solana-clock = { workspace = true } +solana-commitment-config = { workspace = true } solana-fee-calculator = { workspace = true } solana-genesis-config = { workspace = true } solana-instruction = { workspace = true } diff --git a/stake-accounts/src/arg_parser.rs b/stake-accounts/src/arg_parser.rs index c45d0662b6e6d4..173006c0e60ec2 100644 --- a/stake-accounts/src/arg_parser.rs +++ b/stake-accounts/src/arg_parser.rs @@ -141,6 +141,7 @@ where .arg( Arg::with_name("config_file") .long("config") + .global(true) .takes_value(true) .value_name("FILEPATH") .default_value(default_config_file) @@ -154,6 +155,25 @@ where .value_name("URL") .help("RPC entrypoint address. i.e. http://api.devnet.solana.com"), ) + .arg( + Arg::with_name("commitment") + .long("commitment") + .global(true) + .takes_value(true) + .value_name("COMMITMENT_LEVEL") + .possible_values(&["processed", "confirmed", "finalized"]) + .hide_possible_values(true) + .help( + "Return information at the selected commitment level \ + [possible values: processed, confirmed, finalized]", + ), + ) + .arg( + Arg::with_name("no_wait") + .long("no-wait") + .global(true) + .help("Send transactions without waiting for confirmation"), + ) .subcommand( SubCommand::with_name("new") .about("Create derived stake accounts") @@ -242,11 +262,6 @@ where .arg(lockup_date_arg()) .arg(new_custodian_arg()) .arg(num_accounts_arg()) - .arg( - Arg::with_name("no_wait") - .long("no-wait") - .help("Send transactions without waiting for confirmation"), - ) .arg( Arg::with_name("unlock_years") .long("unlock-years") @@ -288,6 +303,7 @@ fn parse_new_args(matches: &ArgMatches<'_>) -> NewArgs { stake_authority: value_t_or_exit!(matches, "stake_authority", String), withdraw_authority: value_t_or_exit!(matches, "withdraw_authority", String), index: value_t_or_exit!(matches, "index", usize), + no_wait: matches.is_present("no_wait"), } } @@ -313,6 +329,7 @@ fn parse_authorize_args(matches: &ArgMatches<'_>) -> AuthorizeArgs) -> RebaseArgs { new_base_keypair: value_t_or_exit!(matches, "new_base_keypair", String), stake_authority: value_t_or_exit!(matches, "stake_authority", String), num_accounts: value_t_or_exit!(matches, "num_accounts", usize), + no_wait: matches.is_present("no_wait"), } } @@ -355,6 +373,7 @@ where let matches = get_matches(args); let config_file = matches.value_of("config_file").unwrap().to_string(); let url = matches.value_of("url").map(|x| x.to_string()); + let commitment = matches.value_of("commitment").map(|x| x.to_string()); let command = match matches.subcommand() { ("new", Some(matches)) => Command::New(parse_new_args(matches)), @@ -373,6 +392,7 @@ where Args { config_file, url, + commitment, command, } } diff --git a/stake-accounts/src/args.rs b/stake-accounts/src/args.rs index cce90874a10277..c15daccf2ea059 100644 --- a/stake-accounts/src/args.rs +++ b/stake-accounts/src/args.rs @@ -16,6 +16,7 @@ pub(crate) struct NewArgs { pub stake_authority: P, pub withdraw_authority: P, pub index: usize, + pub no_wait: bool, } pub(crate) struct CountArgs

{ @@ -35,6 +36,7 @@ pub(crate) struct AuthorizeArgs { pub new_stake_authority: P, pub new_withdraw_authority: P, pub num_accounts: usize, + pub no_wait: bool, } pub(crate) struct SetLockupArgs { @@ -55,6 +57,7 @@ pub(crate) struct RebaseArgs { pub new_base_keypair: K, pub stake_authority: K, pub num_accounts: usize, + pub no_wait: bool, } pub(crate) struct MoveArgs { @@ -76,6 +79,7 @@ pub(crate) enum Command { pub(crate) struct Args { pub config_file: String, pub url: Option, + pub commitment: Option, pub command: Command, } @@ -176,6 +180,7 @@ fn resolve_authorize_args( &args.new_withdraw_authority, )?, num_accounts: args.num_accounts, + no_wait: args.no_wait, }; Ok(resolved_args) } @@ -208,6 +213,7 @@ fn resolve_rebase_args( new_base_keypair: resolve_new_base_keypair(wallet_manager, &args.new_base_keypair)?, stake_authority: resolve_stake_authority(wallet_manager, &args.stake_authority)?, num_accounts: args.num_accounts, + no_wait: args.no_wait, }; Ok(resolved_args) } @@ -247,6 +253,7 @@ pub(crate) fn resolve_command( )?, lamports: args.lamports, index: args.index, + no_wait: args.no_wait, }; Ok(Command::New(resolved_args)) } diff --git a/stake-accounts/src/main.rs b/stake-accounts/src/main.rs index b6b783b19de49e..f3c3461fa7e9f1 100644 --- a/stake-accounts/src/main.rs +++ b/stake-accounts/src/main.rs @@ -11,6 +11,7 @@ use { }, }, solana_cli_config::Config, + solana_commitment_config::CommitmentConfig, solana_message::Message, solana_native_token::lamports_to_sol, solana_pubkey::Pubkey, @@ -21,7 +22,7 @@ use { solana_stake_interface::{instruction::LockupArgs, state::Lockup}, solana_stake_program::stake_state, solana_transaction::Transaction, - std::{env, error::Error}, + std::{env, error::Error, str::FromStr}, }; fn get_balance_at(client: &RpcClient, pubkey: &Pubkey, i: usize) -> Result { @@ -83,7 +84,7 @@ fn process_new_stake_account( &*args.funding_keypair, &*args.base_keypair, ]); - let signature = send_and_confirm_message(client, message, &signers, false)?; + let signature = send_and_confirm_message(client, message, &signers, args.no_wait)?; Ok(signature) } @@ -105,7 +106,7 @@ fn process_authorize_stake_accounts( &*args.stake_authority, &*args.withdraw_authority, ]); - send_and_confirm_messages(client, messages, &signers, false)?; + send_and_confirm_messages(client, messages, &signers, args.no_wait)?; Ok(()) } @@ -161,7 +162,7 @@ fn process_rebase_stake_accounts( &*args.new_base_keypair, &*args.stake_authority, ]); - send_and_confirm_messages(client, messages, &signers, false)?; + send_and_confirm_messages(client, messages, &signers, args.no_wait)?; Ok(()) } @@ -194,7 +195,7 @@ fn process_move_stake_accounts( &*args.stake_authority, &*authorize_args.withdraw_authority, ]); - send_and_confirm_messages(client, messages, &signers, false)?; + send_and_confirm_messages(client, messages, &signers, args.no_wait)?; Ok(()) } @@ -235,7 +236,15 @@ fn main() -> Result<(), Box> { let command_args = parse_args(env::args_os()); let config = Config::load(&command_args.config_file).unwrap_or_default(); let json_rpc_url = command_args.url.unwrap_or(config.json_rpc_url); - let client = RpcClient::new(json_rpc_url); + let client = RpcClient::new_with_commitment( + json_rpc_url, + CommitmentConfig::from_str( + command_args + .commitment + .as_ref() + .unwrap_or(&config.commitment), + )?, + ); match resolve_command(&command_args.command)? { Command::New(args) => { From f6d3dcc7c0a87b5fa27fa274d99b26fd47835130 Mon Sep 17 00:00:00 2001 From: Emmanuel Thomas <50878033+nuel77@users.noreply.github.com> Date: Wed, 25 Jun 2025 21:24:54 +0530 Subject: [PATCH 090/124] dont call epoch info always (#6682) Fix a bug that leads to calling get_epoch_info rpc call too often --- tpu-client/src/nonblocking/tpu_client.rs | 37 ++++++++++++++++-------- 1 file changed, 25 insertions(+), 12 deletions(-) diff --git a/tpu-client/src/nonblocking/tpu_client.rs b/tpu-client/src/nonblocking/tpu_client.rs index e3b8afbdfc6498..7e97a74ccfc599 100644 --- a/tpu-client/src/nonblocking/tpu_client.rs +++ b/tpu-client/src/nonblocking/tpu_client.rs @@ -87,13 +87,14 @@ struct LeaderTpuCache { leaders: Vec, leader_tpu_map: HashMap, slots_in_epoch: Slot, - last_epoch_info_slot: Slot, + epoch_slot_boundary: Slot, } impl LeaderTpuCache { pub fn new( first_slot: Slot, slots_in_epoch: Slot, + epoch_slot_boundary: Slot, leaders: Vec, cluster_nodes: Vec, protocol: Protocol, @@ -105,7 +106,7 @@ impl LeaderTpuCache { leaders, leader_tpu_map, slots_in_epoch, - last_epoch_info_slot: first_slot, + epoch_slot_boundary, } } @@ -117,7 +118,7 @@ impl LeaderTpuCache { pub fn slot_info(&self) -> (Slot, Slot, Slot) { ( self.last_slot(), - self.last_epoch_info_slot, + self.epoch_slot_boundary, self.slots_in_epoch, ) } @@ -234,7 +235,10 @@ impl LeaderTpuCache { if let Some(Ok(epoch_info)) = cache_update_info.maybe_epoch_info { self.slots_in_epoch = epoch_info.slots_in_epoch; - self.last_epoch_info_slot = estimated_current_slot; + self.epoch_slot_boundary = epoch_info + .absolute_slot + .saturating_sub(epoch_info.slot_index) + .saturating_add(epoch_info.slots_in_epoch); } if let Some(slot_leaders) = cache_update_info.maybe_slot_leaders { @@ -741,7 +745,16 @@ impl LeaderTpuService { .await?; let recent_slots = RecentLeaderSlots::new(start_slot); - let slots_in_epoch = rpc_client.get_epoch_info().await?.slots_in_epoch; + let EpochInfo { + absolute_slot, + slots_in_epoch, + slot_index, + .. + } = rpc_client.get_epoch_info().await?; + + let epoch_boundary_slot = absolute_slot + .saturating_sub(slot_index) + .saturating_add(slots_in_epoch); // When a cluster is starting, we observe an invalid slot range failure that goes away after a // retry. It seems as if the leader schedule is not available, but it should be. The logic @@ -802,6 +815,7 @@ impl LeaderTpuService { let leader_tpu_cache = Arc::new(RwLock::new(LeaderTpuCache::new( start_slot, slots_in_epoch, + epoch_boundary_slot, leaders, cluster_nodes, protocol, @@ -973,16 +987,15 @@ async fn maybe_fetch_cache_info( }; let estimated_current_slot = recent_slots.estimated_current_slot(); - let (last_slot, last_epoch_info_slot, slots_in_epoch) = { + let (last_slot, epoch_slot_boundary, slots_in_epoch) = { let leader_tpu_cache = leader_tpu_cache.read().unwrap(); leader_tpu_cache.slot_info() }; - let maybe_epoch_info = - if estimated_current_slot >= last_epoch_info_slot.saturating_sub(slots_in_epoch) { - Some(rpc_client.get_epoch_info().await) - } else { - None - }; + let maybe_epoch_info = if estimated_current_slot >= epoch_slot_boundary { + Some(rpc_client.get_epoch_info().await) + } else { + None + }; let maybe_slot_leaders = if estimated_current_slot >= last_slot.saturating_sub(MAX_FANOUT_SLOTS) { From 9786c5edde6bbf88cb795eea25e2d9fc7ed891bc Mon Sep 17 00:00:00 2001 From: steviez Date: Wed, 25 Jun 2025 11:08:53 -0500 Subject: [PATCH 091/124] validator: Revert --wait-for-exit flag on exit subcommand (#6723) As written, agave-validator exit will always error out when run against a running agave-validator bin that did not have the change. The exit is actually initiated, but this could break automation that depends on the return code of agave-validator exit. The flag will be re-added to be more upgrade friendly in the future --- validator/Cargo.toml | 1 - validator/src/admin_rpc_service.rs | 19 ++--- validator/src/commands/exit/mod.rs | 108 +++-------------------------- validator/src/commands/mod.rs | 3 - 4 files changed, 15 insertions(+), 116 deletions(-) diff --git a/validator/Cargo.toml b/validator/Cargo.toml index a79bc818587614..c7d01f78545072 100644 --- a/validator/Cargo.toml +++ b/validator/Cargo.toml @@ -27,7 +27,6 @@ jsonrpc-core = { workspace = true } jsonrpc-core-client = { workspace = true, features = ["ipc"] } jsonrpc-derive = { workspace = true } jsonrpc-ipc-server = { workspace = true } -libc = { workspace = true } libloading = { workspace = true } log = { workspace = true } num_cpus = { workspace = true } diff --git a/validator/src/admin_rpc_service.rs b/validator/src/admin_rpc_service.rs index 41f3e25ad5501b..75a1412e4eba4b 100644 --- a/validator/src/admin_rpc_service.rs +++ b/validator/src/admin_rpc_service.rs @@ -152,8 +152,7 @@ pub trait AdminRpc { type Metadata; #[rpc(meta, name = "exit")] - /// Initiates validator exit and returns the PID - fn exit(&self, meta: Self::Metadata) -> Result; + fn exit(&self, meta: Self::Metadata) -> Result<()>; #[rpc(meta, name = "reloadPlugin")] fn reload_plugin( @@ -257,7 +256,7 @@ pub struct AdminRpcImpl; impl AdminRpc for AdminRpcImpl { type Metadata = AdminRpcRequestMetadata; - fn exit(&self, meta: Self::Metadata) -> Result { + fn exit(&self, meta: Self::Metadata) -> Result<()> { debug!("exit admin rpc request received"); thread::Builder::new() @@ -267,7 +266,7 @@ impl AdminRpc for AdminRpcImpl { // receive a confusing error as the validator shuts down before a response is sent back. thread::sleep(Duration::from_millis(100)); - info!("validator exit requested"); + warn!("validator exit requested"); meta.validator_exit.write().unwrap().exit(); if !meta.validator_exit_backpressure.is_empty() { @@ -309,7 +308,7 @@ impl AdminRpc for AdminRpcImpl { }) .unwrap(); - Ok(std::process::id()) + Ok(()) } fn reload_plugin( @@ -1515,13 +1514,9 @@ mod tests { expected_validator_id.pubkey().to_string() ); - let expected_parsed_response: Value = serde_json::from_str(&format!( - r#"{{"id": 1, "jsonrpc": "2.0", "result": {} }}"#, - std::process::id() - )) - .unwrap(); - let exit_request = r#"{"jsonrpc":"2.0","id":1,"method":"exit","params":[]}"#.to_string(); - let exit_response = test_validator.handle_request(&exit_request); + let contact_info_request = + r#"{"jsonrpc":"2.0","id":1,"method":"exit","params":[]}"#.to_string(); + let exit_response = test_validator.handle_request(&contact_info_request); let actual_parsed_response: Value = serde_json::from_str(&exit_response.expect("actual response")) .expect("actual response deserialization"); diff --git a/validator/src/commands/exit/mod.rs b/validator/src/commands/exit/mod.rs index 5db7088e20f675..bea065c5078fbb 100644 --- a/validator/src/commands/exit/mod.rs +++ b/validator/src/commands/exit/mod.rs @@ -1,5 +1,3 @@ -#[cfg(target_os = "linux")] -use {crate::commands::Error, std::io, std::thread, std::time::Duration}; use { crate::{ admin_rpc_service, @@ -15,18 +13,10 @@ const COMMAND: &str = "exit"; const DEFAULT_MIN_IDLE_TIME: &str = "10"; const DEFAULT_MAX_DELINQUENT_STAKE: &str = "5"; -#[derive(Debug, PartialEq)] -pub enum PostExitAction { - // Run the agave-validator monitor command indefinitely - Monitor, - // Block until the exiting validator process has terminated - Wait, -} - #[derive(Debug, PartialEq)] pub struct ExitArgs { pub force: bool, - pub post_exit_action: Option, + pub monitor: bool, pub min_idle_time: usize, pub max_delinquent_stake: u8, pub skip_new_snapshot_check: bool, @@ -35,17 +25,9 @@ pub struct ExitArgs { impl FromClapArgMatches for ExitArgs { fn from_clap_arg_match(matches: &ArgMatches) -> Result { - let post_exit_action = if matches.is_present("monitor") { - Some(PostExitAction::Monitor) - } else if matches.is_present("wait_for_exit") { - Some(PostExitAction::Wait) - } else { - None - }; - Ok(ExitArgs { force: matches.is_present("force"), - post_exit_action, + monitor: matches.is_present("monitor"), min_idle_time: value_t_or_exit!(matches, "min_idle_time", usize), max_delinquent_stake: value_t_or_exit!(matches, "max_delinquent_stake", u8), skip_new_snapshot_check: matches.is_present("skip_new_snapshot_check"), @@ -73,12 +55,6 @@ pub fn command<'a>() -> App<'a, 'a> { .takes_value(false) .help("Monitor the validator after sending the exit request"), ) - .arg( - Arg::with_name("wait_for_exit") - .long("wait-for-exit") - .conflicts_with("monitor") - .help("Wait for the validator to terminate after sending the exit request"), - ) .arg( Arg::with_name("min_idle_time") .long("min-idle-time") @@ -126,75 +102,16 @@ pub fn execute(matches: &ArgMatches, ledger_path: &Path) -> Result<()> { } let admin_client = admin_rpc_service::connect(ledger_path); - let validator_pid = - admin_rpc_service::runtime().block_on(async move { admin_client.await?.exit().await })?; - + admin_rpc_service::runtime().block_on(async move { admin_client.await?.exit().await })?; println!("Exit request sent"); - match exit_args.post_exit_action { - None => Ok(()), - Some(PostExitAction::Monitor) => monitor::execute(matches, ledger_path), - Some(PostExitAction::Wait) => poll_until_pid_terminates(validator_pid), - }?; - - Ok(()) -} - -#[cfg(target_os = "linux")] -fn poll_until_pid_terminates(pid: u32) -> Result<()> { - let pid = i32::try_from(pid)?; - - println!("Waiting for agave-validator process {pid} to terminate"); - loop { - // From man kill(2) - // - // If sig is 0, then no signal is sent, but existence and permission - // checks are still performed; this can be used to check for the - // existence of a process ID or process group ID that the caller is - // permitted to signal. - let result = unsafe { - libc::kill(pid, /*sig:*/ 0) - }; - if result >= 0 { - // Give the process some time to exit before checking again - thread::sleep(Duration::from_millis(500)); - } else { - let errno = io::Error::last_os_error() - .raw_os_error() - .ok_or(Error::Dynamic("unable to read raw os error".into()))?; - match errno { - libc::ESRCH => { - println!("Done, agave-validator process {pid} has terminated"); - break; - } - libc::EINVAL => { - // An invalid signal was specified, we only pass sig=0 so - // this should not be possible - Err(Error::Dynamic( - format!("unexpected invalid signal error for kill({pid}, 0)").into(), - ))?; - } - libc::EPERM => { - Err(io::Error::from(io::ErrorKind::PermissionDenied))?; - } - unknown => { - Err(Error::Dynamic( - format!("unexpected errno for kill({pid}, 0): {unknown}").into(), - ))?; - } - } - } + if exit_args.monitor { + monitor::execute(matches, ledger_path)?; } Ok(()) } -#[cfg(not(target_os = "linux"))] -fn poll_until_pid_terminates(_pid: u32) -> Result<()> { - println!("Unable to wait for agave-validator process termination on this platform"); - Ok(()) -} - #[cfg(test)] mod tests { use {super::*, crate::commands::tests::verify_args_struct_by_command}; @@ -209,7 +126,7 @@ mod tests { .parse() .expect("invalid DEFAULT_MAX_DELINQUENT_STAKE"), force: false, - post_exit_action: None, + monitor: false, skip_new_snapshot_check: false, skip_health_check: false, } @@ -234,21 +151,12 @@ mod tests { } #[test] - fn verify_args_struct_by_command_exit_with_post_exit_action() { + fn verify_args_struct_by_command_exit_with_monitor() { verify_args_struct_by_command( command(), vec![COMMAND, "--monitor"], ExitArgs { - post_exit_action: Some(PostExitAction::Monitor), - ..ExitArgs::default() - }, - ); - - verify_args_struct_by_command( - command(), - vec![COMMAND, "--wait-for-exit"], - ExitArgs { - post_exit_action: Some(PostExitAction::Wait), + monitor: true, ..ExitArgs::default() }, ); diff --git a/validator/src/commands/mod.rs b/validator/src/commands/mod.rs index 815e44fad11243..43cae3731e1ef5 100644 --- a/validator/src/commands/mod.rs +++ b/validator/src/commands/mod.rs @@ -27,9 +27,6 @@ pub enum Error { #[error(transparent)] Io(#[from] std::io::Error), - - #[error(transparent)] - TryFromInt(#[from] std::num::TryFromIntError), } pub type Result = std::result::Result; From 080febc8db3bd067bb18f775b91e64da9207910d Mon Sep 17 00:00:00 2001 From: Lijun Wang <83639177+lijunwangs@users.noreply.github.com> Date: Wed, 25 Jun 2025 09:56:34 -0700 Subject: [PATCH 092/124] Add options to bench-vote to tweak server and client connection limits (#6648) * Added options to allow better control on connection limiting * Passing connection_pool_size to producer in bench-vote * keep the original connection pool size unchanged * Update bench-vote/src/main.rs Co-authored-by: Alex Pyattaev * Update bench-vote/src/main.rs Co-authored-by: Alex Pyattaev * added option for max connections per ipaddr per min --------- Co-authored-by: Alex Pyattaev --- bench-vote/src/main.rs | 57 ++++++++++++++++++++++++++++++++++++++---- 1 file changed, 52 insertions(+), 5 deletions(-) diff --git a/bench-vote/src/main.rs b/bench-vote/src/main.rs index 92b50fc14109c8..5bed9927b196b6 100644 --- a/bench-vote/src/main.rs +++ b/bench-vote/src/main.rs @@ -17,7 +17,10 @@ use { solana_signer::Signer, solana_streamer::{ packet::PacketBatchRecycler, - quic::{spawn_server_multi, QuicServerParams}, + quic::{ + spawn_server_multi, QuicServerParams, DEFAULT_MAX_QUIC_CONNECTIONS_PER_PEER, + DEFAULT_MAX_STAKED_CONNECTIONS, + }, streamer::{receiver, PacketBatchReceiver, StakedNodes, StreamerReceiveStats}, }, solana_transaction::Transaction, @@ -92,6 +95,34 @@ fn main() -> Result<()> { .takes_value(true) .help("Use this many producer threads."), ) + .arg( + Arg::with_name("max-connections") + .long("max-connections") + .value_name("NUM") + .takes_value(true) + .help("Maximum concurrent client connections allowed on the server side."), + ) + .arg( + Arg::with_name("max-connections-per-peer") + .long("max-connections-per-peer") + .value_name("NUM") + .takes_value(true) + .help("Maximum concurrent client connections per peer allowed on the server side."), + ) + .arg( + Arg::with_name("max-connections-per-ipaddr-per-min") + .long("max-connections-per-ipaddr-per-min") + .value_name("NUM") + .takes_value(true) + .help("Maximum client connections per ipaddr per minute allowed on the server side."), + ) + .arg( + Arg::with_name("connection-pool-size") + .long("connection-pool-size") + .value_name("NUM") + .takes_value(true) + .help("Maximum concurrent client connections on the client side."), + ) .arg( Arg::with_name("server-only") .long("server-only") @@ -145,6 +176,16 @@ fn main() -> Result<()> { let vote_use_quic = value_t_or_exit!(matches, "use-quic", bool); let num_producers: u64 = value_t!(matches, "num-producers", u64).unwrap_or(4); + + let max_connections: usize = + value_t!(matches, "max-connections", usize).unwrap_or(DEFAULT_MAX_STAKED_CONNECTIONS); + let max_connections_per_peer: usize = value_t!(matches, "max-connections-per-peer", usize) + .unwrap_or(DEFAULT_MAX_QUIC_CONNECTIONS_PER_PEER); + let max_connections_per_ipaddr_per_min: usize = + value_t!(matches, "max-connections-per-ipaddr-per-min", usize).unwrap_or(1024); // Default value for max connections per ipaddr per minute + let connection_pool_size: usize = + value_t!(matches, "connection-pool-size", usize).unwrap_or(256); + let use_connection_cache = matches.is_present("use-connection-cache"); let server_only = matches.is_present("server-only"); let client_only = matches.is_present("client-only"); @@ -180,7 +221,8 @@ fn main() -> Result<()> { QuicParams { identity_keypair, - staked_nodes + staked_nodes, + connection_pool_size } }); @@ -202,8 +244,12 @@ fn main() -> Result<()> { if let Some(quic_params) = &quic_params { let quic_server_params = QuicServerParams { - max_connections_per_ipaddr_per_min: 1024, - max_connections_per_peer: 1024, + max_connections_per_ipaddr_per_min: max_connections_per_ipaddr_per_min + .try_into() + .unwrap(), + max_connections_per_peer, + max_staked_connections: max_connections, + max_unstaked_connections: 0, ..Default::default() }; let (s_reader, r_reader) = unbounded(); @@ -316,6 +362,7 @@ enum Transporter { struct QuicParams { identity_keypair: Keypair, staked_nodes: Arc>, + connection_pool_size: usize, } fn producer( @@ -330,7 +377,7 @@ fn producer( if let Some(quic_params) = &quic_params { Transporter::Cache(Arc::new(ConnectionCache::new_with_client_options( "connection_cache_vote_quic", - 256, // connection_pool_size + quic_params.connection_pool_size, None, // client_endpoint Some(( &quic_params.identity_keypair, From 80aa1ed9801131ca71ddd7a1565675475bb7e365 Mon Sep 17 00:00:00 2001 From: steviez Date: Wed, 25 Jun 2025 14:50:08 -0500 Subject: [PATCH 093/124] validator: Deprecate blockstore-processor for replay (#4728) Emit a deprecation warning if blockstore-processor is set when using --block-verification-method --- CHANGELOG.md | 1 + validator/src/commands/run/execute.rs | 11 +++++++++++ 2 files changed, 12 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 116ad2b1d96900..bcdf0cfa1e1cc7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -50,6 +50,7 @@ Release channels have their own copy of this changelog: #### Deprecations * Using `--snapshot-interval-slots 0` to disable generating snapshots is now deprecated. +* Using `blockstore-processor` for `--block-verification-method` is now deprecated. ### Platform Tools SDK diff --git a/validator/src/commands/run/execute.rs b/validator/src/commands/run/execute.rs index 7df7a0002879cc..f48a1fcc61c359 100644 --- a/validator/src/commands/run/execute.rs +++ b/validator/src/commands/run/execute.rs @@ -1003,6 +1003,17 @@ pub fn execute( "block_verification_method", BlockVerificationMethod ); + match validator_config.block_verification_method { + BlockVerificationMethod::BlockstoreProcessor => { + warn!( + "The value \"blockstore-processor\" for --block-verification-method has been \ + deprecated. The value \"blockstore-processor\" is still allowed for now, but \ + is planned for removal in the near future. To update, either set the value \ + \"unified-scheduler\" or remove the --block-verification-method argument" + ); + } + BlockVerificationMethod::UnifiedScheduler => {} + } validator_config.block_production_method = value_t_or_exit!( matches, // comment to align formatting... "block_production_method", From e632b9eeadbce7533bc6664cd1e405c6fb9872f0 Mon Sep 17 00:00:00 2001 From: HaoranYi <219428+HaoranYi@users.noreply.github.com> Date: Wed, 25 Jun 2025 16:34:46 -0500 Subject: [PATCH 094/124] clean up generate index (#6733) * clean up rent collector * rm geneisis config * clean up import --- accounts-db/src/accounts_db.rs | 12 ------------ accounts-db/src/accounts_db/tests.rs | 6 ++---- runtime/src/serde_snapshot.rs | 3 --- runtime/src/serde_snapshot/tests.rs | 5 ----- 4 files changed, 2 insertions(+), 24 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index e462f138e54f8e..204923188a32ab 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -85,7 +85,6 @@ use { solana_account::{Account, AccountSharedData, ReadableAccount}, solana_clock::{BankId, Epoch, Slot}, solana_epoch_schedule::EpochSchedule, - solana_genesis_config::GenesisConfig, solana_hash::Hash, solana_lattice_hash::lt_hash::LtHash, solana_measure::{meas_dur, measure::Measure, measure_us}, @@ -7905,7 +7904,6 @@ impl AccountsDb { &self, limit_load_slot_count_from_snapshot: Option, verify: bool, - genesis_config: &GenesisConfig, should_calculate_duplicates_lt_hash: bool, ) -> IndexGenerationInfo { let mut total_time = Measure::start("generate_index"); @@ -7914,14 +7912,6 @@ impl AccountsDb { if let Some(limit) = limit_load_slot_count_from_snapshot { slots.truncate(limit); // get rid of the newer slots and keep just the older } - let max_slot = slots.last().cloned().unwrap_or_default(); - let schedule = &genesis_config.epoch_schedule; - let rent_collector = RentCollector::new( - schedule.get_epoch(max_slot), - schedule.clone(), - genesis_config.slots_per_year(), - genesis_config.rent.clone(), - ); let accounts_data_len = AtomicU64::new(0); let zero_lamport_pubkeys = Mutex::new(HashSet::new()); @@ -8083,8 +8073,6 @@ impl AccountsDb { .sum(); index_time.stop(); - info!("rent_collector: {:?}", rent_collector); - let mut index_flush_us = 0; let total_duplicate_slot_keys = AtomicU64::default(); let mut populate_duplicate_keys_us = 0; diff --git a/accounts-db/src/accounts_db/tests.rs b/accounts-db/src/accounts_db/tests.rs index e45d13af2e5b76..d60483941ffcdb 100644 --- a/accounts-db/src/accounts_db/tests.rs +++ b/accounts-db/src/accounts_db/tests.rs @@ -177,9 +177,8 @@ fn run_generate_index_duplicates_within_slot_test(db: AccountsDb, reverse: bool) // construct append vec with account to generate an index from append_vec.accounts.append_accounts(&storable_accounts, 0); - let genesis_config = GenesisConfig::default(); assert!(!db.accounts_index.contains(&pubkey)); - let result = db.generate_index(None, false, &genesis_config, false); + let result = db.generate_index(None, false, false); // index entry should only contain a single entry for the pubkey since index cannot hold more than 1 entry per slot let entry = db.accounts_index.get_cloned(&pubkey).unwrap(); assert_eq!(entry.slot_list.read().unwrap().len(), 1); @@ -217,9 +216,8 @@ fn test_generate_index_for_single_ref_zero_lamport_slot() { let data = [(&pubkey, &account)]; let storable_accounts = (slot0, &data[..]); append_vec.accounts.append_accounts(&storable_accounts, 0); - let genesis_config = GenesisConfig::default(); assert!(!db.accounts_index.contains(&pubkey)); - let result = db.generate_index(None, false, &genesis_config, false); + let result = db.generate_index(None, false, false); let entry = db.accounts_index.get_cloned(&pubkey).unwrap(); assert_eq!(entry.slot_list.read().unwrap().len(), 1); assert_eq!(append_vec.alive_bytes(), aligned_stored_size(0)); diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index 2f9423db479a66..d82982aea0c7e3 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -889,7 +889,6 @@ where snapshot_accounts_db_fields, account_paths, storage_and_next_append_vec_id, - genesis_config, limit_load_slot_count_from_snapshot, verify_index, accounts_db_config, @@ -1055,7 +1054,6 @@ fn reconstruct_accountsdb_from_fields( snapshot_accounts_db_fields: SnapshotAccountsDbFields, account_paths: &[PathBuf], storage_and_next_append_vec_id: StorageAndNextAccountsFileId, - genesis_config: &GenesisConfig, limit_load_slot_count_from_snapshot: Option, verify_index: bool, accounts_db_config: Option, @@ -1255,7 +1253,6 @@ where } = accounts_db.generate_index( limit_load_slot_count_from_snapshot, verify_index, - genesis_config, is_accounts_lt_hash_enabled, ); info!("Building accounts index... Done in {:?}", start.elapsed()); diff --git a/runtime/src/serde_snapshot/tests.rs b/runtime/src/serde_snapshot/tests.rs index 10c4ada4b2256f..eb8e041cbe31f3 100644 --- a/runtime/src/serde_snapshot/tests.rs +++ b/runtime/src/serde_snapshot/tests.rs @@ -27,7 +27,6 @@ mod serde_snapshot_tests { }, solana_clock::Slot, solana_epoch_schedule::EpochSchedule, - solana_genesis_config::{ClusterType, GenesisConfig}, solana_hash::Hash, solana_nohash_hasher::BuildNoHashHasher, solana_pubkey::Pubkey, @@ -72,10 +71,6 @@ mod serde_snapshot_tests { snapshot_accounts_db_fields, account_paths, storage_and_next_append_vec_id, - &GenesisConfig { - cluster_type: ClusterType::Development, - ..GenesisConfig::default() - }, None, false, Some(solana_accounts_db::accounts_db::ACCOUNTS_DB_CONFIG_FOR_TESTING), From a32c018c772bbbb7466f87b23d709326b5258ac0 Mon Sep 17 00:00:00 2001 From: puhtaytow <18026645+puhtaytow@users.noreply.github.com> Date: Wed, 25 Jun 2025 23:57:59 +0200 Subject: [PATCH 095/124] core, bench: move bench_deserialize_hdr to merkle variant (#6732) move bench_deserialize_hdr to merkle variant --- core/benches/shredder.rs | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/core/benches/shredder.rs b/core/benches/shredder.rs index bb196cee7826a4..3b3cfe06d98290 100644 --- a/core/benches/shredder.rs +++ b/core/benches/shredder.rs @@ -10,8 +10,7 @@ use { solana_keypair::Keypair, solana_ledger::shred::{ get_data_shred_bytes_per_batch_typical, max_entries_per_n_shred, max_ticks_per_n_shreds, - ProcessShredsStats, ReedSolomonCache, Shred, ShredFlags, Shredder, - DATA_SHREDS_PER_FEC_BLOCK, + ProcessShredsStats, ReedSolomonCache, Shred, Shredder, DATA_SHREDS_PER_FEC_BLOCK, }, solana_perf::test_tx, test::{black_box, Bencher}, @@ -121,9 +120,25 @@ fn bench_deshredder(bencher: &mut Bencher) { #[bench] fn bench_deserialize_hdr(bencher: &mut Bencher) { - let data = vec![0; SHRED_SIZE_TYPICAL]; - - let shred = Shred::new_from_data(2, 1, 1, &data, ShredFlags::LAST_SHRED_IN_SLOT, 0, 0, 1); + let keypair = Keypair::new(); + let shredder = Shredder::new(2, 1, 0, 0).unwrap(); + let merkle_root = Some(Hash::new_from_array(rand::thread_rng().gen())); + let mut stats = ProcessShredsStats::default(); + let reed_solomon_cache = ReedSolomonCache::default(); + let mut shreds = shredder + .make_merkle_shreds_from_entries( + &keypair, + &[], + true, // is_last_in_slot + merkle_root, + 1, // next_shred_index + 0, // next_code_index + &reed_solomon_cache, + &mut stats, + ) + .filter(Shred::is_data) + .collect::>(); + let shred = shreds.remove(0); bencher.iter(|| { let payload = shred.payload().clone(); From e72266ca6ff6505a9b21d608d81becbba2c38db9 Mon Sep 17 00:00:00 2001 From: Steven Luscher Date: Wed, 25 Jun 2025 16:09:03 -0700 Subject: [PATCH 096/124] Airgap`TransactionError` type from RPC (#6435) * Add `UiTransactionError` and `UiTransactionResult` * Fixup callsites and tests * Review feedback * Changed CLI types to use `UiTransactionError` * Eliminated `UiTransactionResult` in favour of `Result<(), UiTransactionError>` * Implemented `std::error::Error` on `UiTransactionError` which just forwards to `TransactionError` * Implemented `fmt::Display` on `UiTransactionError` which just forwards to `TransactionError` * Made the inner error private on `UiTransactionError` * Eliminated `Deref` implementation, requiring explicit conversion * * Results, all the way down * Make the `InstructionError` deserialize actually work by taking all of the bytes * Remove stray dependency * Fix the deserializer --- Cargo.lock | 6 + cli-output/Cargo.toml | 1 + cli-output/src/cli_output.rs | 6 +- cli-output/src/display.rs | 4 +- cli/Cargo.toml | 1 + cli/src/wallet.rs | 2 +- client/Cargo.toml | 1 + ...nd_and_confirm_transactions_in_parallel.rs | 32 ++-- ledger-tool/src/bigtable.rs | 2 +- programs/sbf/Cargo.lock | 4 + rpc-client-api/src/client_error.rs | 2 +- rpc-client-types/src/response.rs | 14 +- rpc/src/rpc.rs | 38 ++--- rpc/src/rpc_subscriptions.rs | 6 +- svm/examples/Cargo.lock | 4 + .../json-rpc/server/src/rpc_process.rs | 2 +- transaction-status-client-types/Cargo.toml | 5 + transaction-status-client-types/src/lib.rs | 153 +++++++++++++++++- transaction-status/src/lib.rs | 8 +- 19 files changed, 228 insertions(+), 63 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ffda0a4b3c9d26..99bcbf223e9cfb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7449,6 +7449,7 @@ dependencies = [ "solana-transaction", "solana-transaction-error", "solana-transaction-status", + "solana-transaction-status-client-types", "solana-udp-client", "solana-version", "solana-vote-program", @@ -7514,6 +7515,7 @@ dependencies = [ "solana-transaction-context", "solana-transaction-error", "solana-transaction-status", + "solana-transaction-status-client-types", "solana-vote-program", "spl-memo", ] @@ -7559,6 +7561,7 @@ dependencies = [ "solana-tpu-client", "solana-transaction", "solana-transaction-error", + "solana-transaction-status-client-types", "solana-udp-client", "thiserror 2.0.12", "tokio", @@ -11386,12 +11389,15 @@ dependencies = [ "serde_json", "solana-account-decoder-client-types", "solana-commitment-config", + "solana-instruction", "solana-message", + "solana-pubkey", "solana-reward-info", "solana-signature", "solana-transaction", "solana-transaction-context", "solana-transaction-error", + "test-case", "thiserror 2.0.12", ] diff --git a/cli-output/Cargo.toml b/cli-output/Cargo.toml index 8af951f23c92b9..f35df3c5e3cdb8 100644 --- a/cli-output/Cargo.toml +++ b/cli-output/Cargo.toml @@ -46,6 +46,7 @@ solana-sysvar = { workspace = true } solana-transaction = { workspace = true, features = ["verify"] } solana-transaction-error = { workspace = true } solana-transaction-status = { workspace = true } +solana-transaction-status-client-types = { workspace = true } solana-vote-program = { workspace = true } spl-memo = { workspace = true, features = ["no-entrypoint"] } diff --git a/cli-output/src/cli_output.rs b/cli-output/src/cli_output.rs index 4aad5021051af7..a34af79a43d211 100644 --- a/cli-output/src/cli_output.rs +++ b/cli-output/src/cli_output.rs @@ -35,11 +35,11 @@ use { solana_stake_interface::state::{Authorized, Lockup}, solana_sysvar::stake_history::StakeHistoryEntry, solana_transaction::{versioned::VersionedTransaction, Transaction}, - solana_transaction_error::TransactionError, solana_transaction_status::{ EncodedConfirmedBlock, EncodedTransaction, TransactionConfirmationStatus, UiTransactionStatusMeta, }, + solana_transaction_status_client_types::UiTransactionError, solana_vote_program::{ authorized_voters::AuthorizedVoters, vote_state::{BlockTimestamp, LandedVote, MAX_EPOCH_CREDITS_HISTORY, MAX_LOCKOUT_HISTORY}, @@ -876,7 +876,7 @@ impl fmt::Display for CliHistorySignature { pub struct CliHistoryVerbose { pub slot: Slot, pub block_time: Option, - pub err: Option, + pub err: Option, pub confirmation_status: Option, pub memo: Option, } @@ -2925,7 +2925,7 @@ pub struct CliTransactionConfirmation { #[serde(skip_serializing)] pub get_transaction_error: Option, #[serde(skip_serializing_if = "Option::is_none")] - pub err: Option, + pub err: Option, } impl QuietDisplay for CliTransactionConfirmation {} diff --git a/cli-output/src/display.rs b/cli-output/src/display.rs index 5613746c0fc1b4..4245d9cfecf613 100644 --- a/cli-output/src/display.rs +++ b/cli-output/src/display.rs @@ -15,10 +15,10 @@ use { solana_signature::Signature, solana_stake_interface as stake, solana_transaction::versioned::{TransactionVersion, VersionedTransaction}, - solana_transaction_error::TransactionError, solana_transaction_status::{ Rewards, UiReturnDataEncoding, UiTransactionReturnData, UiTransactionStatusMeta, }, + solana_transaction_status_client_types::UiTransactionError, spl_memo::{id as spl_memo_id, v1::id as spl_memo_v1_id}, std::{collections::HashMap, fmt, io, time::Duration}, }; @@ -541,7 +541,7 @@ fn write_rewards( fn write_status( w: &mut W, - transaction_status: &Result<(), TransactionError>, + transaction_status: &Result<(), UiTransactionError>, prefix: &str, ) -> io::Result<()> { writeln!( diff --git a/cli/Cargo.toml b/cli/Cargo.toml index 65acf02be571e7..b1811dae7fd456 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -92,6 +92,7 @@ solana-tpu-client = { workspace = true, features = ["default"] } solana-transaction = "=2.2.3" solana-transaction-error = "=2.2.1" solana-transaction-status = { workspace = true } +solana-transaction-status-client-types = { workspace = true } solana-udp-client = { workspace = true } solana-version = { workspace = true } solana-vote-program = { workspace = true } diff --git a/cli/src/wallet.rs b/cli/src/wallet.rs index dd7cd097cbe890..32e3658fb71972 100644 --- a/cli/src/wallet.rs +++ b/cli/src/wallet.rs @@ -793,7 +793,7 @@ pub fn process_confirm( confirmation_status: Some(transaction_status.confirmation_status()), transaction, get_transaction_error, - err: transaction_status.err.clone(), + err: transaction_status.err.clone().map(Into::into), } } else { CliTransactionConfirmation { diff --git a/client/Cargo.toml b/client/Cargo.toml index b9bf4f9604e658..32d683114863b0 100644 --- a/client/Cargo.toml +++ b/client/Cargo.toml @@ -48,6 +48,7 @@ solana-time-utils = { workspace = true } solana-tpu-client = { workspace = true, features = ["default"] } solana-transaction = { workspace = true } solana-transaction-error = { workspace = true } +solana-transaction-status-client-types = { workspace = true } solana-udp-client = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true, features = ["full"] } diff --git a/client/src/send_and_confirm_transactions_in_parallel.rs b/client/src/send_and_confirm_transactions_in_parallel.rs index 8e754b5d01b627..f6ea54655961ed 100644 --- a/client/src/send_and_confirm_transactions_in_parallel.rs +++ b/client/src/send_and_confirm_transactions_in_parallel.rs @@ -284,32 +284,32 @@ async fn send_transaction_with_rpc_fallback( ErrorKind::Io(_) | ErrorKind::Reqwest(_) => { // fall through on io error, we will retry the transaction } - ErrorKind::TransactionError(TransactionError::BlockhashNotFound) - | ErrorKind::RpcError(RpcError::RpcResponseError { - data: - RpcResponseErrorData::SendTransactionPreflightFailure( - RpcSimulateTransactionResult { - err: Some(TransactionError::BlockhashNotFound), - .. - }, - ), - .. - }) => { + ErrorKind::TransactionError(TransactionError::BlockhashNotFound) => { // fall through so that we will resend with another blockhash } - ErrorKind::TransactionError(transaction_error) - | ErrorKind::RpcError(RpcError::RpcResponseError { + ErrorKind::TransactionError(transaction_error) => { + // if we get other than blockhash not found error the transaction is invalid + context.error_map.insert(index, transaction_error.clone()); + } + ErrorKind::RpcError(RpcError::RpcResponseError { data: RpcResponseErrorData::SendTransactionPreflightFailure( RpcSimulateTransactionResult { - err: Some(transaction_error), + err: Some(ui_transaction_error), .. }, ), .. }) => { - // if we get other than blockhash not found error the transaction is invalid - context.error_map.insert(index, transaction_error.clone()); + match TransactionError::from(ui_transaction_error.clone()) { + TransactionError::BlockhashNotFound => { + // fall through so that we will resend with another blockhash + } + err => { + // if we get other than blockhash not found error the transaction is invalid + context.error_map.insert(index, err); + } + } } _ => { return Err(TpuSenderError::from(e)); diff --git a/ledger-tool/src/bigtable.rs b/ledger-tool/src/bigtable.rs index 4721cba29d9e89..3c872f5596b10c 100644 --- a/ledger-tool/src/bigtable.rs +++ b/ledger-tool/src/bigtable.rs @@ -519,7 +519,7 @@ async fn confirm( confirmation_status: Some(transaction_status.confirmation_status()), transaction, get_transaction_error, - err: transaction_status.err.clone(), + err: transaction_status.err.clone().map(Into::into), }; println!("{}", output_format.formatted_string(&cli_transaction)); Ok(()) diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 9b77ccaacedc30..79190609fb621c 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -5837,6 +5837,7 @@ dependencies = [ "solana-transaction", "solana-transaction-error", "solana-transaction-status", + "solana-transaction-status-client-types", "solana-vote-program", "spl-memo", ] @@ -5880,6 +5881,7 @@ dependencies = [ "solana-tpu-client", "solana-transaction", "solana-transaction-error", + "solana-transaction-status-client-types", "solana-udp-client", "thiserror 2.0.12", "tokio", @@ -9539,7 +9541,9 @@ dependencies = [ "serde_json", "solana-account-decoder-client-types", "solana-commitment-config", + "solana-instruction", "solana-message", + "solana-pubkey", "solana-reward-info", "solana-signature", "solana-transaction", diff --git a/rpc-client-api/src/client_error.rs b/rpc-client-api/src/client_error.rs index fbe2fcb46b93c0..00c228471ea184 100644 --- a/rpc-client-api/src/client_error.rs +++ b/rpc-client-api/src/client_error.rs @@ -39,7 +39,7 @@ impl ErrorKind { }, ), .. - }) => Some(tx_err.clone()), + }) => Some(tx_err.clone().into()), Self::TransactionError(tx_err) => Some(tx_err.clone()), _ => None, } diff --git a/rpc-client-types/src/response.rs b/rpc-client-types/src/response.rs index 117c5be75976bd..ff96616e127b58 100644 --- a/rpc-client-types/src/response.rs +++ b/rpc-client-types/src/response.rs @@ -4,10 +4,10 @@ use { solana_clock::{Epoch, Slot, UnixTimestamp}, solana_fee_calculator::{FeeCalculator, FeeRateGovernor}, solana_inflation::Inflation, - solana_transaction_error::{TransactionError, TransactionResult as Result}, + solana_transaction_error::TransactionResult as Result, solana_transaction_status_client_types::{ ConfirmedTransactionStatusWithSignature, TransactionConfirmationStatus, UiConfirmedBlock, - UiInnerInstructions, UiTransactionReturnData, + UiInnerInstructions, UiTransactionError, UiTransactionReturnData, }, std::{collections::HashMap, fmt, net::SocketAddr, str::FromStr}, thiserror::Error, @@ -240,14 +240,14 @@ pub enum RpcSignatureResult { #[serde(rename_all = "camelCase")] pub struct RpcLogsResponse { pub signature: String, // Signature as base58 string - pub err: Option, + pub err: Option, pub logs: Vec, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] #[serde(rename_all = "camelCase")] pub struct ProcessedSignatureResult { - pub err: Option, + pub err: Option, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] @@ -397,7 +397,7 @@ pub struct RpcSignatureConfirmation { #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] #[serde(rename_all = "camelCase")] pub struct RpcSimulateTransactionResult { - pub err: Option, + pub err: Option, pub logs: Option>, pub accounts: Option>>, pub units_consumed: Option, @@ -452,7 +452,7 @@ pub struct RpcTokenAccountBalance { pub struct RpcConfirmedTransactionStatusWithSignature { pub signature: String, pub slot: Slot, - pub err: Option, + pub err: Option, pub memo: Option, pub block_time: Option, pub confirmation_status: Option, @@ -507,7 +507,7 @@ impl From for RpcConfirmedTransactionSt Self { signature: signature.to_string(), slot, - err, + err: err.map(Into::into), memo, block_time, confirmation_status: None, diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index b1193802da6b60..1df8307ebd9bc3 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -3924,7 +3924,7 @@ pub mod rpc_full { return Err(RpcCustomError::SendTransactionPreflightFailure { message: format!("Transaction simulation failed: {err}"), result: RpcSimulateTransactionResult { - err: Some(err), + err: Some(err.into()), logs: Some(logs), accounts: None, units_consumed: Some(units_consumed), @@ -4073,7 +4073,7 @@ pub mod rpc_full { Ok(new_response( bank, RpcSimulateTransactionResult { - err: result.err(), + err: result.err().map(Into::into), logs: Some(logs), accounts, units_consumed: Some(units_consumed), @@ -7276,17 +7276,17 @@ pub mod tests { let meta = meta.unwrap(); assert_eq!( meta.err, - Some(TransactionError::InstructionError( - 0, - InstructionError::Custom(1) - )) + Some( + TransactionError::InstructionError(0, InstructionError::Custom(1)) + .into() + ) ); assert_eq!( meta.status, - Err(TransactionError::InstructionError( - 0, - InstructionError::Custom(1) - )) + Err( + TransactionError::InstructionError(0, InstructionError::Custom(1)) + .into() + ), ); } else { assert_eq!(meta, None); @@ -7322,17 +7322,17 @@ pub mod tests { let meta = meta.unwrap(); assert_eq!( meta.err, - Some(TransactionError::InstructionError( - 0, - InstructionError::Custom(1) - )) + Some( + TransactionError::InstructionError(0, InstructionError::Custom(1)) + .into() + ) ); assert_eq!( meta.status, - Err(TransactionError::InstructionError( - 0, - InstructionError::Custom(1) - )) + Err( + TransactionError::InstructionError(0, InstructionError::Custom(1)) + .into() + ), ); } else { assert_eq!(meta, None); @@ -8533,7 +8533,7 @@ pub mod tests { }, ]); } - assert_eq!(result["result"]["value"]["data"], expected_value); + assert_eq!(result["result"]["value"]["data"], expected_value,); // Test Mint let req = format!( diff --git a/rpc/src/rpc_subscriptions.rs b/rpc/src/rpc_subscriptions.rs index 76a44d9e94d28d..6bc6aa51d2f3be 100644 --- a/rpc/src/rpc_subscriptions.rs +++ b/rpc/src/rpc_subscriptions.rs @@ -402,7 +402,9 @@ fn filter_signature_result( ) -> (Option, Slot) { ( result.map(|result| { - RpcSignatureResult::ProcessedSignature(ProcessedSignatureResult { err: result.err() }) + RpcSignatureResult::ProcessedSignature(ProcessedSignatureResult { + err: result.err().map(Into::into), + }) }), last_notified_slot, ) @@ -446,7 +448,7 @@ fn filter_logs_results( ) -> (impl Iterator, Slot) { let responses = logs.into_iter().flatten().map(|log| RpcLogsResponse { signature: log.signature.to_string(), - err: log.result.err(), + err: log.result.err().map(Into::into), logs: log.log_messages, }); (responses, last_notified_slot) diff --git a/svm/examples/Cargo.lock b/svm/examples/Cargo.lock index 28f40734667469..f2a9f352f3f7d7 100644 --- a/svm/examples/Cargo.lock +++ b/svm/examples/Cargo.lock @@ -5684,6 +5684,7 @@ dependencies = [ "solana-transaction", "solana-transaction-error", "solana-transaction-status", + "solana-transaction-status-client-types", "solana-vote-program", "spl-memo", ] @@ -5727,6 +5728,7 @@ dependencies = [ "solana-tpu-client", "solana-transaction", "solana-transaction-error", + "solana-transaction-status-client-types", "solana-udp-client", "thiserror 2.0.12", "tokio", @@ -8639,7 +8641,9 @@ dependencies = [ "serde_json", "solana-account-decoder-client-types", "solana-commitment-config", + "solana-instruction", "solana-message", + "solana-pubkey", "solana-reward-info", "solana-signature", "solana-transaction", diff --git a/svm/examples/json-rpc/server/src/rpc_process.rs b/svm/examples/json-rpc/server/src/rpc_process.rs index 00b266891f31e7..9b9c19905197a8 100644 --- a/svm/examples/json-rpc/server/src/rpc_process.rs +++ b/svm/examples/json-rpc/server/src/rpc_process.rs @@ -757,7 +757,7 @@ pub mod rpc { Ok(new_response( 0, RpcSimulateTransactionResult { - err: result.err(), + err: result.err().map(Into::into), logs: Some(logs), accounts, units_consumed: Some(units_consumed), diff --git a/transaction-status-client-types/Cargo.toml b/transaction-status-client-types/Cargo.toml index d67ad700d692c4..09b85aba647754 100644 --- a/transaction-status-client-types/Cargo.toml +++ b/transaction-status-client-types/Cargo.toml @@ -21,10 +21,15 @@ serde_derive = { workspace = true } serde_json = { workspace = true } solana-account-decoder-client-types = { workspace = true } solana-commitment-config = { workspace = true } +solana-instruction = { workspace = true } solana-message = { workspace = true } +solana-pubkey = { workspace = true } solana-reward-info = { workspace = true, features = ["serde"] } solana-signature = { workspace = true, default-features = false } solana-transaction = { workspace = true, features = ["serde"] } solana-transaction-context = { workspace = true } solana-transaction-error = { workspace = true, features = ["serde"] } thiserror = { workspace = true } + +[dev-dependencies] +test-case = { workspace = true } diff --git a/transaction-status-client-types/src/lib.rs b/transaction-status-client-types/src/lib.rs index 6036f9c8fc37df..2daca8be5289df 100644 --- a/transaction-status-client-types/src/lib.rs +++ b/transaction-status-client-types/src/lib.rs @@ -3,10 +3,16 @@ use { crate::option_serializer::OptionSerializer, base64::{prelude::BASE64_STANDARD, Engine}, core::fmt, + serde::{ + de::{self, Deserialize as DeserializeTrait, Error as DeserializeError}, + ser::{Serialize as SerializeTrait, SerializeTupleVariant}, + Deserializer, + }, serde_derive::{Deserialize, Serialize}, - serde_json::Value, + serde_json::{from_value, Value}, solana_account_decoder_client_types::token::UiTokenAmount, solana_commitment_config::CommitmentConfig, + solana_instruction::error::InstructionError, solana_message::{ compiled_instruction::CompiledInstruction, v0::{LoadedAddresses, MessageAddressTableLookup}, @@ -226,12 +232,94 @@ impl From<&MessageAddressTableLookup> for UiAddressTableLookup { } } +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct UiTransactionError(TransactionError); + +impl fmt::Display for UiTransactionError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.0.fmt(f) + } +} + +impl std::error::Error for UiTransactionError { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + Some(&self.0) + } +} + +impl From for UiTransactionError { + fn from(value: TransactionError) -> Self { + UiTransactionError(value) + } +} + +impl From for TransactionError { + fn from(value: UiTransactionError) -> Self { + value.0 + } +} + +impl SerializeTrait for UiTransactionError { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + match &self.0 { + TransactionError::InstructionError(outer_instruction_index, err) => { + let mut state = serializer.serialize_tuple_variant( + "TransactionError", + 8, + "InstructionError", + 2, + )?; + state.serialize_field(outer_instruction_index)?; + state.serialize_field(err)?; + state.end() + } + err => TransactionError::serialize(err, serializer), + } + } +} + +impl<'de> DeserializeTrait<'de> for UiTransactionError { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let value = serde_json::Value::deserialize(deserializer)?; + if let Some(obj) = value.as_object() { + if let Some(arr) = obj.get("InstructionError").and_then(|v| v.as_array()) { + let outer_instruction_index: u8 = arr + .first() + .ok_or_else(|| { + DeserializeError::invalid_length(0, &"Expected the first element to exist") + })? + .as_u64() + .ok_or_else(|| { + DeserializeError::custom("Expected the first element to be a u64") + })? as u8; + let instruction_error = arr.get(1).ok_or_else(|| { + DeserializeError::invalid_length(1, &"Expected there to be at least 2 elements") + })?; + let err: InstructionError = from_value(instruction_error.clone()) + .map_err(|e| DeserializeError::custom(e.to_string()))?; + return Ok(UiTransactionError(TransactionError::InstructionError( + outer_instruction_index, + err, + ))); + } + } + let err = TransactionError::deserialize(value).map_err(de::Error::custom)?; + Ok(UiTransactionError(err)) + } +} + /// A duplicate representation of TransactionStatusMeta with `err` field #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct UiTransactionStatusMeta { - pub err: Option, - pub status: TransactionResult<()>, // This field is deprecated. See https://github.com/solana-labs/solana/issues/9302 + pub err: Option, + pub status: Result<(), UiTransactionError>, // This field is deprecated. See https://github.com/solana-labs/solana/issues/9302 pub fee: u64, pub pre_balances: Vec, pub post_balances: Vec, @@ -285,8 +373,8 @@ pub struct UiTransactionStatusMeta { impl From for UiTransactionStatusMeta { fn from(meta: TransactionStatusMeta) -> Self { Self { - err: meta.status.clone().err(), - status: meta.status, + err: meta.status.clone().map_err(Into::into).err(), + status: meta.status.map_err(Into::into), fee: meta.fee, pre_balances: meta.pre_balances, post_balances: meta.post_balances, @@ -660,7 +748,11 @@ impl TransactionStatus { #[cfg(test)] mod test { - use {super::*, serde_json::json}; + use { + super::*, + serde_json::{from_value, json, to_value}, + test_case::test_case, + }; #[test] fn test_decode_invalid_transaction() { @@ -812,4 +904,53 @@ mod test { }"; test_serde::(json_input, expected_json_output); } + + #[test_case( + TransactionError::InstructionError (42, InstructionError::Custom(0xdeadbeef)), + json!({"InstructionError": [ + 42, + { "Custom": 0xdeadbeef_u32 }, + ]}); + "`InstructionError`" + )] + #[test_case(TransactionError::InsufficientFundsForRent { + account_index: 42, + }, json!({"InsufficientFundsForRent": { + "account_index": 42, + }}); "Struct variant error")] + #[test_case(TransactionError::DuplicateInstruction(42), json!({ "DuplicateInstruction": 42 }); "Single-value tuple variant error")] + #[test_case(TransactionError::InsufficientFundsForFee, json!("InsufficientFundsForFee"); "Named variant error")] + fn test_serialize_ui_transaction_error( + transaction_error: TransactionError, + expected_serialization: Value, + ) { + let actual_serialization = to_value(UiTransactionError(transaction_error)) + .expect("Failed to serialize `UiTransactionError"); + assert_eq!(actual_serialization, expected_serialization); + } + + #[test_case( + TransactionError::InstructionError (42, InstructionError::Custom(0xdeadbeef)), + json!({"InstructionError": [ + 42, + { "Custom": 0xdeadbeef_u32 }, + ]}); + "`InstructionError`" + )] + #[test_case(TransactionError::InsufficientFundsForRent { + account_index: 42, + }, json!({"InsufficientFundsForRent": { + "account_index": 42, + }}); "Struct variant error")] + #[test_case(TransactionError::DuplicateInstruction(42), json!({ "DuplicateInstruction": 42 }); "Single-value tuple variant error")] + #[test_case(TransactionError::InsufficientFundsForFee, json!("InsufficientFundsForFee"); "Named variant error")] + fn test_deserialize_ui_transaction_error( + expected_transaction_error: TransactionError, + serialized_value: Value, + ) { + let UiTransactionError(actual_transaction_error) = + from_value::(serialized_value) + .expect("Failed to deserialize `UiTransactionError"); + assert_eq!(actual_transaction_error, expected_transaction_error); + } } diff --git a/transaction-status/src/lib.rs b/transaction-status/src/lib.rs index 449fe355f6f6d4..9776a8a9a915a3 100644 --- a/transaction-status/src/lib.rs +++ b/transaction-status/src/lib.rs @@ -163,8 +163,8 @@ fn build_simple_ui_transaction_status_meta( show_rewards: bool, ) -> UiTransactionStatusMeta { UiTransactionStatusMeta { - err: meta.status.clone().err(), - status: meta.status, + err: meta.status.clone().map_err(Into::into).err(), + status: meta.status.map_err(Into::into), fee: meta.fee, pre_balances: meta.pre_balances, post_balances: meta.post_balances, @@ -197,8 +197,8 @@ fn parse_ui_transaction_status_meta( ) -> UiTransactionStatusMeta { let account_keys = AccountKeys::new(static_keys, Some(&meta.loaded_addresses)); UiTransactionStatusMeta { - err: meta.status.clone().err(), - status: meta.status, + err: meta.status.clone().map_err(Into::into).err(), + status: meta.status.map_err(Into::into), fee: meta.fee, pre_balances: meta.pre_balances, post_balances: meta.post_balances, From ba40986960515513dd95922090319cb6ba3c9f41 Mon Sep 17 00:00:00 2001 From: HaoranYi <219428+HaoranYi@users.noreply.github.com> Date: Thu, 26 Jun 2025 11:42:20 -0500 Subject: [PATCH 097/124] optimize zero lamort pubkeys at index generation (#6699) * optimize zero lamort pubkeys at index generation * drop * append * mut * pr:var rename --- accounts-db/src/accounts_db.rs | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 204923188a32ab..fa3c8b69c05cf8 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -7947,6 +7947,7 @@ impl AccountsDb { let mut scan_time_sum = 0; let mut all_accounts_are_zero_lamports_slots_inner = 0; let mut all_zeros_slots_inner = vec![]; + let mut local_zero_lamport_pubkeys = Vec::new(); let mut insert_time_sum = 0; let mut total_including_duplicates_sum = 0; let mut accounts_data_len_sum = 0; @@ -7972,7 +7973,7 @@ impl AccountsDb { insert_time_us: insert_us, num_accounts: total_this_slot, accounts_data_len: accounts_data_len_this_slot, - zero_lamport_pubkeys: zero_pubkeys_this_slot, + zero_lamport_pubkeys: mut zero_lamport_pubkeys_this_slot, all_accounts_are_zero_lamports, num_did_not_exist, num_existed_in_mem, @@ -7993,10 +7994,7 @@ impl AccountsDb { all_accounts_are_zero_lamports_slots_inner += 1; all_zeros_slots_inner.push((*slot, Arc::clone(&storage))); } - let mut zero_pubkeys = zero_lamport_pubkeys.lock().unwrap(); - zero_pubkeys_this_slot.into_iter().for_each(|k| { - zero_pubkeys.insert(k); - }); + local_zero_lamport_pubkeys.append(&mut zero_lamport_pubkeys_this_slot); insert_us } else { @@ -8030,6 +8028,11 @@ impl AccountsDb { } if pass == 0 { + let mut zero_lamport_pubkeys_lock = zero_lamport_pubkeys.lock().unwrap(); + zero_lamport_pubkeys_lock.reserve(local_zero_lamport_pubkeys.len()); + zero_lamport_pubkeys_lock.extend(local_zero_lamport_pubkeys.into_iter()); + drop(zero_lamport_pubkeys_lock); + // This thread has finished processing its chunk of slots. // Update the index stats now. let index_stats = self.accounts_index.bucket_map_holder_stats(); From b9799676fdff2733103c629be0c5e778f1fcf563 Mon Sep 17 00:00:00 2001 From: Brooks Date: Thu, 26 Jun 2025 12:51:04 -0400 Subject: [PATCH 098/124] Adds `Offset` to callback on scan_accounts() family (#6744) --- accounts-db/src/accounts_db.rs | 10 ++++---- .../src/accounts_db/scan_account_storage.rs | 6 ++--- accounts-db/src/accounts_db/tests.rs | 2 +- accounts-db/src/accounts_file.rs | 19 +++++++++++---- accounts-db/src/ancient_append_vecs.rs | 15 +++++------- accounts-db/src/append_vec.rs | 22 +++++++++++++---- accounts-db/src/tiered_storage.rs | 2 +- accounts-db/src/tiered_storage/hot.rs | 24 ++++++++++++++----- accounts-db/src/tiered_storage/readable.rs | 13 ++++++++-- runtime/src/bank/accounts_lt_hash.rs | 2 +- 10 files changed, 79 insertions(+), 36 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index fa3c8b69c05cf8..05ff007754d687 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -2509,7 +2509,7 @@ impl AccountsDb { return; } if let Some(storage) = self.storage.get_slot_storage_entry(slot) { - storage.accounts.scan_accounts(|account| { + storage.accounts.scan_accounts(|_offset, account| { let pk = account.pubkey(); match pubkey_refcount.entry(*pk) { dashmap::mapref::entry::Entry::Occupied(mut occupied_entry) => { @@ -4400,12 +4400,12 @@ impl AccountsDb { self.scan_cache_storage_fallback(slot, cache_map_func, |retval, storage| { match scan_account_storage_data { ScanAccountStorageData::NoData => { - storage.scan_accounts_without_data(|account_without_data| { + storage.scan_accounts_without_data(|_offset, account_without_data| { storage_scan_func(retval, &account_without_data, None); }); } ScanAccountStorageData::DataRefForStorage => { - storage.scan_accounts(|account| { + storage.scan_accounts(|_offset, account| { let account_without_data = StoredAccountInfoWithoutData::new_from(&account); storage_scan_func(retval, &account_without_data, Some(account.data)); }); @@ -6225,7 +6225,7 @@ impl AccountsDb { let mut lt_hash = storages .par_iter() .fold(LtHash::identity, |mut accum, storage| { - storage.accounts.scan_accounts(|account| { + storage.accounts.scan_accounts(|_offset, account| { let account_lt_hash = Self::lt_hash_account(&account, account.pubkey()); accum.mix_in(&account_lt_hash.0); }); @@ -7846,7 +7846,7 @@ impl AccountsDb { }; if secondary { // scan storage a second time to update the secondary index - storage.accounts.scan_accounts(|stored_account| { + storage.accounts.scan_accounts(|_offset, stored_account| { self.accounts_index.update_secondary_indexes( stored_account.pubkey(), &stored_account, diff --git a/accounts-db/src/accounts_db/scan_account_storage.rs b/accounts-db/src/accounts_db/scan_account_storage.rs index 388d22d8906af0..53d718e910022f 100644 --- a/accounts-db/src/accounts_db/scan_account_storage.rs +++ b/accounts-db/src/accounts_db/scan_account_storage.rs @@ -380,7 +380,7 @@ impl AccountsDb { where S: AppendVecScan, { - storage.accounts.scan_accounts(|account| { + storage.accounts.scan_accounts(|_offset, account| { if scanner.filter(account.pubkey()) { scanner.found_account(&LoadedAccount::Stored(account)) } @@ -707,7 +707,7 @@ mod tests { let slot = storage.slot(); let copied_storage = accounts_db.create_and_insert_store(slot, 10000, "test"); let mut all_accounts = Vec::default(); - storage.accounts.scan_accounts(|acct| { + storage.accounts.scan_accounts(|_offset, acct| { all_accounts.push((*acct.pubkey(), acct.to_account_shared_data())); }); let accounts = all_accounts @@ -741,7 +741,7 @@ mod tests { let slot = storage.slot() + max_slot; let copied_storage = accounts_db.create_and_insert_store(slot, 10000, "test"); let mut all_accounts = Vec::default(); - storage.accounts.scan_accounts(|acct| { + storage.accounts.scan_accounts(|_offset, acct| { all_accounts.push((*acct.pubkey(), acct.to_account_shared_data())); }); let accounts = all_accounts diff --git a/accounts-db/src/accounts_db/tests.rs b/accounts-db/src/accounts_db/tests.rs index d60483941ffcdb..594ba050d0442f 100644 --- a/accounts-db/src/accounts_db/tests.rs +++ b/accounts-db/src/accounts_db/tests.rs @@ -6581,7 +6581,7 @@ fn get_all_accounts_from_storages<'a>( storages .flat_map(|storage| { let mut vec = Vec::default(); - storage.accounts.scan_accounts(|account| { + storage.accounts.scan_accounts(|_offset, account| { vec.push((*account.pubkey(), account.to_account_shared_data())); }); // make sure scan_pubkeys results match diff --git a/accounts-db/src/accounts_file.rs b/accounts-db/src/accounts_file.rs index 387f7b53911297..2b781568da90c9 100644 --- a/accounts-db/src/accounts_file.rs +++ b/accounts-db/src/accounts_file.rs @@ -2,7 +2,7 @@ use crate::append_vec::StoredAccountMeta; use { crate::{ - account_info::AccountInfo, + account_info::{AccountInfo, Offset}, account_storage::stored_account_info::{StoredAccountInfo, StoredAccountInfoWithoutData}, accounts_db::AccountsFileId, accounts_update_notifier_interface::AccountForGeyser, @@ -293,10 +293,14 @@ impl AccountsFile { /// Iterate over all accounts and call `callback` with each account. /// + /// `callback` parameters: + /// * Offset: the offset within the file of this account + /// * StoredAccountInfoWithoutData: the account itself, without account data + /// /// Note that account data is not read/passed to the callback. pub fn scan_accounts_without_data( &self, - callback: impl for<'local> FnMut(StoredAccountInfoWithoutData<'local>), + callback: impl for<'local> FnMut(Offset, StoredAccountInfoWithoutData<'local>), ) { match self { Self::AppendVec(av) => av.scan_accounts_without_data(callback), @@ -310,9 +314,16 @@ impl AccountsFile { /// Iterate over all accounts and call `callback` with each account. /// + /// `callback` parameters: + /// * Offset: the offset within the file of this account + /// * StoredAccountInfo: the account itself, with account data + /// /// Prefer scan_accounts_without_data() when account data is not needed, /// as it can potentially read less and be faster. - pub fn scan_accounts(&self, callback: impl for<'local> FnMut(StoredAccountInfo<'local>)) { + pub fn scan_accounts( + &self, + callback: impl for<'local> FnMut(Offset, StoredAccountInfo<'local>), + ) { match self { Self::AppendVec(av) => av.scan_accounts(callback), Self::TieredStorage(ts) => { @@ -346,7 +357,7 @@ impl AccountsFile { &self, mut callback: impl for<'local> FnMut(AccountForGeyser<'local>), ) { - self.scan_accounts(|account| { + self.scan_accounts(|_offset, account| { let account_for_geyser = AccountForGeyser { pubkey: account.pubkey(), lamports: account.lamports(), diff --git a/accounts-db/src/ancient_append_vecs.rs b/accounts-db/src/ancient_append_vecs.rs index 7386373bdff6d8..506de947025ccc 100644 --- a/accounts-db/src/ancient_append_vecs.rs +++ b/accounts-db/src/ancient_append_vecs.rs @@ -2079,7 +2079,7 @@ pub mod tests { shrink_in_progress .new_storage() .accounts - .scan_accounts(|_| { + .scan_accounts(|_, _| { count += 1; }); assert_eq!(count, 1); @@ -2240,7 +2240,7 @@ pub mod tests { }) .unwrap(); let mut count = 0; - storage.accounts.scan_accounts(|_| { + storage.accounts.scan_accounts(|_, _| { count += 1; }); assert_eq!(count, 2); @@ -3175,14 +3175,11 @@ pub mod tests { ); // make sure the single new append vec contains all the same accounts let mut two = Vec::default(); - one.first() - .unwrap() - .1 - .new_storage() - .accounts - .scan_accounts(|meta| { + one.first().unwrap().1.new_storage().accounts.scan_accounts( + |_offset, meta| { two.push((*meta.pubkey(), meta.to_account_shared_data())); - }); + }, + ); compare_all_accounts(&initial_accounts, &two[..]); } diff --git a/accounts-db/src/append_vec.rs b/accounts-db/src/append_vec.rs index 1aad92819eac19..cc41e07589af4e 100644 --- a/accounts-db/src/append_vec.rs +++ b/accounts-db/src/append_vec.rs @@ -16,6 +16,7 @@ pub use meta::{AccountMeta, StoredMeta}; use meta::{AccountMeta, StoredMeta}; use { crate::{ + account_info::Offset, account_storage::stored_account_info::{StoredAccountInfo, StoredAccountInfoWithoutData}, accounts_file::{ AccountsFileError, InternalsForArchive, MatchAccountOwnerError, Result, StorageAccess, @@ -989,12 +990,17 @@ impl AppendVec { /// Iterate over all accounts and call `callback` with each account. /// + /// `callback` parameters: + /// * Offset: the offset within the file of this account + /// * StoredAccountInfoWithoutData: the account itself, without account data + /// /// Note that account data is not read/passed to the callback. pub fn scan_accounts_without_data( &self, - mut callback: impl for<'local> FnMut(StoredAccountInfoWithoutData<'local>), + mut callback: impl for<'local> FnMut(Offset, StoredAccountInfoWithoutData<'local>), ) { self.scan_stored_accounts_no_data(|stored_account| { + let offset = stored_account.offset(); let account = StoredAccountInfoWithoutData { pubkey: stored_account.pubkey(), lamports: stored_account.lamports(), @@ -1003,16 +1009,24 @@ impl AppendVec { executable: stored_account.executable(), rent_epoch: stored_account.rent_epoch(), }; - callback(account); + callback(offset, account); }) } /// Iterate over all accounts and call `callback` with each account. /// + /// `callback` parameters: + /// * Offset: the offset within the file of this account + /// * StoredAccountInfo: the account itself, with account data + /// /// Prefer scan_accounts_without_data() when account data is not needed, /// as it can potentially read less and be faster. - pub fn scan_accounts(&self, mut callback: impl for<'local> FnMut(StoredAccountInfo<'local>)) { + pub fn scan_accounts( + &self, + mut callback: impl for<'local> FnMut(Offset, StoredAccountInfo<'local>), + ) { self.scan_accounts_stored_meta(|stored_account_meta| { + let offset = stored_account_meta.offset(); let account = StoredAccountInfo { pubkey: stored_account_meta.pubkey(), lamports: stored_account_meta.lamports(), @@ -1021,7 +1035,7 @@ impl AppendVec { executable: stored_account_meta.executable(), rent_epoch: stored_account_meta.rent_epoch(), }; - callback(account); + callback(offset, account); }) } diff --git a/accounts-db/src/tiered_storage.rs b/accounts-db/src/tiered_storage.rs index 71d542c6d3c73c..355b385caa2ab1 100644 --- a/accounts-db/src/tiered_storage.rs +++ b/accounts-db/src/tiered_storage.rs @@ -384,7 +384,7 @@ mod tests { let mut max_pubkey = MIN_PUBKEY; reader - .scan_accounts(|stored_account| { + .scan_accounts(|_offset, stored_account| { if let Some(account) = expected_accounts_map.get(stored_account.pubkey()) { verify_test_account_with_footer( &stored_account, diff --git a/accounts-db/src/tiered_storage/hot.rs b/accounts-db/src/tiered_storage/hot.rs index fe5a0d213ae4d3..ca8952f60e4fae 100644 --- a/accounts-db/src/tiered_storage/hot.rs +++ b/accounts-db/src/tiered_storage/hot.rs @@ -2,7 +2,7 @@ use { crate::{ - account_info::AccountInfo, + account_info::{AccountInfo, Offset}, account_storage::stored_account_info::{StoredAccountInfo, StoredAccountInfoWithoutData}, accounts_file::{MatchAccountOwnerError, StoredAccountsInfo}, append_vec::{IndexInfo, IndexInfoInner}, @@ -644,27 +644,39 @@ impl HotStorageReader { /// Iterate over all accounts and call `callback` with each account. /// + /// `callback` parameters: + /// * Offset: the offset within the file of this account + /// * StoredAccountInfoWithoutData: the account itself, without account data + /// /// Note that account data is not read/passed to the callback. pub fn scan_accounts_without_data( &self, - mut callback: impl for<'local> FnMut(StoredAccountInfoWithoutData<'local>), + mut callback: impl for<'local> FnMut(Offset, StoredAccountInfoWithoutData<'local>), ) -> TieredStorageResult<()> { for i in 0..self.footer.account_entry_count { - self.get_stored_account_without_data_callback(IndexOffset(i), &mut callback)?; + self.get_stored_account_without_data_callback(IndexOffset(i), |account| { + callback(AccountInfo::reduced_offset_to_offset(i), account) + })?; } Ok(()) } /// Iterate over all accounts and call `callback` with each account. /// + /// `callback` parameters: + /// * Offset: the offset within the file of this account + /// * StoredAccountInfo: the account itself, with account data + /// /// Prefer scan_accounts_without_data() when account data is not needed, /// as it can potentially read less and be faster. pub fn scan_accounts( &self, - mut callback: impl for<'local> FnMut(StoredAccountInfo<'local>), + mut callback: impl for<'local> FnMut(Offset, StoredAccountInfo<'local>), ) -> TieredStorageResult<()> { for i in 0..self.footer.account_entry_count { - self.get_stored_account_callback(IndexOffset(i), &mut callback)?; + self.get_stored_account_callback(IndexOffset(i), |account| { + callback(AccountInfo::reduced_offset_to_offset(i), account) + })?; } Ok(()) } @@ -1674,7 +1686,7 @@ mod tests { // verify everything let mut i = 0; hot_storage - .scan_accounts(|stored_account| { + .scan_accounts(|_offset, stored_account| { storable_accounts.account_default_if_zero_lamport(i, |account| { verify_test_account( &stored_account, diff --git a/accounts-db/src/tiered_storage/readable.rs b/accounts-db/src/tiered_storage/readable.rs index 941881390fa934..21e8efc63b92cc 100644 --- a/accounts-db/src/tiered_storage/readable.rs +++ b/accounts-db/src/tiered_storage/readable.rs @@ -1,5 +1,6 @@ use { crate::{ + account_info::Offset, account_storage::stored_account_info::{StoredAccountInfo, StoredAccountInfoWithoutData}, accounts_file::MatchAccountOwnerError, append_vec::IndexInfo, @@ -150,10 +151,14 @@ impl TieredStorageReader { /// Iterate over all accounts and call `callback` with each account. /// + /// `callback` parameters: + /// * Offset: the offset within the file of this account + /// * StoredAccountInfoWithoutData: the account itself, without account data + /// /// Note that account data is not read/passed to the callback. pub fn scan_accounts_without_data( &self, - callback: impl for<'local> FnMut(StoredAccountInfoWithoutData<'local>), + callback: impl for<'local> FnMut(Offset, StoredAccountInfoWithoutData<'local>), ) -> TieredStorageResult<()> { match self { Self::Hot(hot) => hot.scan_accounts_without_data(callback), @@ -162,11 +167,15 @@ impl TieredStorageReader { /// Iterate over all accounts and call `callback` with each account. /// + /// `callback` parameters: + /// * Offset: the offset within the file of this account + /// * StoredAccountInfo: the account itself, with account data + /// /// Prefer scan_accounts_without_data() when account data is not needed, /// as it can potentially read less and be faster. pub fn scan_accounts( &self, - callback: impl for<'local> FnMut(StoredAccountInfo<'local>), + callback: impl for<'local> FnMut(Offset, StoredAccountInfo<'local>), ) -> TieredStorageResult<()> { match self { Self::Hot(hot) => hot.scan_accounts(callback), diff --git a/runtime/src/bank/accounts_lt_hash.rs b/runtime/src/bank/accounts_lt_hash.rs index ad87254cc00b55..75cd39578a416e 100644 --- a/runtime/src/bank/accounts_lt_hash.rs +++ b/runtime/src/bank/accounts_lt_hash.rs @@ -922,7 +922,7 @@ mod tests { // get all the lt hashes for each version of all accounts let mut stored_accounts_map = HashMap::<_, Vec<_>>::new(); for storage in &storages { - storage.accounts.scan_accounts(|account| { + storage.accounts.scan_accounts(|_offset, account| { let pubkey = account.pubkey(); let account_lt_hash = AccountsDb::lt_hash_account(&account, pubkey); stored_accounts_map From 1f147b837a497194977dcd1ed6b5aa25e81de831 Mon Sep 17 00:00:00 2001 From: puhtaytow <18026645+puhtaytow@users.noreply.github.com> Date: Thu, 26 Jun 2025 19:15:51 +0200 Subject: [PATCH 099/124] turbine, bench: move cluster_info::broadcast_shreds_bench to merkle shreds (#6729) * move broadcast_shreds_bench to merkle variant * replace entries_to_shreds by merkle variant --- turbine/benches/cluster_info.rs | 31 +++++++++++++++++++++++++++---- 1 file changed, 27 insertions(+), 4 deletions(-) diff --git a/turbine/benches/cluster_info.rs b/turbine/benches/cluster_info.rs index 32dacbf969c798..6cf1b2e3564e6b 100644 --- a/turbine/benches/cluster_info.rs +++ b/turbine/benches/cluster_info.rs @@ -4,14 +4,16 @@ extern crate test; use { rand::{thread_rng, Rng}, + solana_entry::entry::Entry, solana_gossip::{ cluster_info::{ClusterInfo, Node}, contact_info::ContactInfo, }, + solana_hash::Hash, solana_keypair::Keypair, solana_ledger::{ genesis_utils::{create_genesis_config, GenesisConfigInfo}, - shred::{Shred, ShredFlags}, + shred::{ProcessShredsStats, ReedSolomonCache, Shredder}, }, solana_net_utils::bind_to_unspecified, solana_pubkey as pubkey, @@ -38,17 +40,38 @@ fn broadcast_shreds_bench(bencher: &mut Bencher) { let leader_info = Node::new_localhost_with_pubkey(&leader_keypair.pubkey()); let cluster_info = ClusterInfo::new( leader_info.info, - leader_keypair, + leader_keypair.clone(), SocketAddrSpace::Unspecified, ); let socket = bind_to_unspecified().unwrap(); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); let bank = Bank::new_for_benches(&genesis_config); let bank_forks = BankForks::new_rw_arc(bank); + let root_bank = bank_forks.read().unwrap().root_bank(); const NUM_SHREDS: usize = 32; - let shred = Shred::new_from_data(0, 0, 0, &[], ShredFlags::empty(), 0, 0, 0); - let shreds = vec![shred; NUM_SHREDS]; + + let shredder = Shredder::new( + root_bank.slot(), + root_bank.parent_slot(), + 0, // reference_tick + 0, // version + ) + .unwrap(); + + let entries = vec![Entry::new(&Hash::default(), 0, vec![])]; + let data_shreds = shredder.make_merkle_shreds_from_entries( + &leader_keypair, + &entries, + true, // is_last_in_slot + None, // chained_merkle_root + 0, // next_shred_index + 0, // next_code_index + &ReedSolomonCache::default(), + &mut ProcessShredsStats::default(), + ); + let shreds: Vec<_> = data_shreds.take(NUM_SHREDS).collect(); + let mut stakes = HashMap::new(); const NUM_PEERS: usize = 200; for _ in 0..NUM_PEERS { From 16b5957456b289bc734561bce684e5b58c04d24b Mon Sep 17 00:00:00 2001 From: Will Hickey Date: Thu, 26 Jun 2025 14:32:56 -0500 Subject: [PATCH 100/124] Changelog instructions (#6756) * Remove unused patch release instructions from changelog * Add changelog instructions for pruning the changelog when a new release branch is created --- CHANGELOG.md | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index bcdf0cfa1e1cc7..b7b5324070c340 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -262,11 +262,8 @@ This simplifies the process of diffing between versions of the log. * Update the edge, beta, and stable links * Create new section: `vx.y+1.0 - Unreleased` * Remove `Unreleased` annotation from vx.y.0 section. -* Create vx.y branch starting at that commit -* Tag that commit as vx.y.0 - -### When creating a new patch release: -* Commit to the release branch updating the changelog: - * Remove `Unreleased` annotation from `vx.y.z` section - * Add a new section at the top for `vx.y.z+1 - Unreleased` -* Tag that new commit as the new release +* Create vx.y branch starting at that commit. +* Commit to `vx.y` updating the changelog: + * Remove the `vx.y+1.0 - Unreleased` section + * Remove the channel links +* Tag vx.y.0 on the new branch From b19af553ab5d532389dffb07dc718ee617786df6 Mon Sep 17 00:00:00 2001 From: Kamil Skalski Date: Thu, 26 Jun 2025 21:37:30 +0200 Subject: [PATCH 101/124] Use more canonical imports with io::Result instead of IoResult (#6754) * Use more canonical imports with io::Result instead of IoResult * Remove other IoResult uses --- accounts-db/src/accounts_db.rs | 9 +++---- accounts-db/src/buffered_reader.rs | 6 ++--- accounts-db/src/hardened_unpack.rs | 18 +++++++------- accounts-db/src/tiered_storage/byte_block.rs | 14 +++++------ accounts-db/src/tiered_storage/file.rs | 26 ++++++++++---------- accounts-db/src/tiered_storage/mmap_utils.rs | 8 +++--- core/src/accounts_hash_verifier.rs | 6 ++--- runtime/src/snapshot_utils.rs | 14 +++++------ 8 files changed, 50 insertions(+), 51 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 05ff007754d687..739789c3d1217e 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -99,8 +99,7 @@ use { collections::{BTreeSet, HashMap, HashSet, VecDeque}, fs, hash::{Hash as StdHash, Hasher as StdHasher}, - io::Result as IoResult, - iter, mem, + io, iter, mem, num::{NonZeroUsize, Saturating}, ops::{Range, RangeBounds}, path::{Path, PathBuf}, @@ -1273,11 +1272,11 @@ impl AccountStorageEntry { } } -pub fn get_temp_accounts_paths(count: u32) -> IoResult<(Vec, Vec)> { - let temp_dirs: IoResult> = (0..count).map(|_| TempDir::new()).collect(); +pub fn get_temp_accounts_paths(count: u32) -> io::Result<(Vec, Vec)> { + let temp_dirs: io::Result> = (0..count).map(|_| TempDir::new()).collect(); let temp_dirs = temp_dirs?; - let paths: IoResult> = temp_dirs + let paths: io::Result> = temp_dirs .iter() .map(|temp_dir| { utils::create_accounts_run_and_snapshot_dirs(temp_dir) diff --git a/accounts-db/src/buffered_reader.rs b/accounts-db/src/buffered_reader.rs index f0c36e00e10d35..dde980acbbdc4d 100644 --- a/accounts-db/src/buffered_reader.rs +++ b/accounts-db/src/buffered_reader.rs @@ -12,7 +12,7 @@ use { crate::{append_vec::ValidSlice, file_io::read_more_buffer}, std::{ fs::File, - io::{BufRead, BufReader, Result as IoResult}, + io::{self, BufRead, BufReader}, mem::MaybeUninit, ops::Range, path::Path, @@ -128,7 +128,7 @@ where T: Backing, { /// read to make sure we have the minimum amount of data - pub fn read(&mut self) -> IoResult { + pub fn read(&mut self) -> io::Result { let must_read = self .read_requirements .unwrap_or(self.default_min_read_requirement); @@ -191,7 +191,7 @@ impl<'a, const N: usize> BufferedReader<'a, Stack> { pub fn large_file_buf_reader( path: impl AsRef, buf_size: usize, -) -> IoResult> { +) -> io::Result> { #[cfg(target_os = "linux")] if agave_io_uring::io_uring_supported() { use crate::io_uring::sequential_file_reader::SequentialFileReader; diff --git a/accounts-db/src/hardened_unpack.rs b/accounts-db/src/hardened_unpack.rs index 2e0adaa42d32f1..638e6a151e3645 100644 --- a/accounts-db/src/hardened_unpack.rs +++ b/accounts-db/src/hardened_unpack.rs @@ -7,7 +7,7 @@ use { std::{ collections::{HashMap, VecDeque}, fs::{self, File}, - io::{BufReader, Read, Result as IoResult}, + io::{self, BufReader, Read}, path::{ Component::{self, CurDir, Normal}, Path, PathBuf, @@ -73,7 +73,7 @@ impl Default for MultiBytes { } impl Read for MultiBytes { - fn read(&mut self, mut buf: &mut [u8]) -> IoResult { + fn read(&mut self, mut buf: &mut [u8]) -> io::Result { let mut copied_len = 0; while let Some(bytes) = self.0.front_mut() { let to_copy_len = bytes.len().min(buf.len()); @@ -107,7 +107,7 @@ impl BytesChannelReader { } impl Read for BytesChannelReader { - fn read(&mut self, buf: &mut [u8]) -> IoResult { + fn read(&mut self, buf: &mut [u8]) -> io::Result { while self.current_bytes.is_empty() { let Ok(new_bytes) = self.receiver.recv() else { return Ok(0); @@ -149,7 +149,7 @@ impl ArchiveChunker { pub fn decode_and_send_chunks( mut self, chunk_sender: crossbeam_channel::Sender, - ) -> IoResult<()> { + ) -> io::Result<()> { // Bytes for chunk of archive to be sent to workers for unpacking let mut current_chunk = MultiBytes::new(); while self.refill_decoded_buf()? { @@ -174,7 +174,7 @@ impl ArchiveChunker { } /// Take as many bytes as possible from decoded data until last entry boundary. - fn take_complete_archive(&mut self) -> IoResult { + fn take_complete_archive(&mut self) -> io::Result { let mut archive = Archive::new(self.current_decoded.as_ref()); let mut completed_entry_end = 0; @@ -224,7 +224,7 @@ impl ArchiveChunker { /// Re-fill decoded buffer such that it has minimum bytes to decode TAR header. /// /// Return `false` on EOF - fn refill_decoded_buf(&mut self) -> IoResult { + fn refill_decoded_buf(&mut self) -> io::Result { if self.current_decoded.len() < Self::TAR_BLOCK_SIZE { let mut next_buffer = self.get_next_buffer(); if !self.current_decoded.is_empty() { @@ -247,7 +247,7 @@ impl ArchiveChunker { } /// Fill `decode_buf` with data from `self.input`. - fn decode_bytes(&mut self, mut decode_buf: BytesMut) -> IoResult { + fn decode_bytes(&mut self, mut decode_buf: BytesMut) -> io::Result { let mut_slice = unsafe { std::slice::from_raw_parts_mut(decode_buf.as_mut_ptr(), decode_buf.capacity()) }; @@ -418,7 +418,7 @@ where return Ok(()); #[cfg(unix)] - fn set_perms(dst: &Path, mode: u32) -> IoResult<()> { + fn set_perms(dst: &Path, mode: u32) -> io::Result<()> { use std::os::unix::fs::PermissionsExt; let perm = fs::Permissions::from_mode(mode as _); @@ -426,7 +426,7 @@ where } #[cfg(windows)] - fn set_perms(dst: &Path, _mode: u32) -> IoResult<()> { + fn set_perms(dst: &Path, _mode: u32) -> io::Result<()> { let mut perm = fs::metadata(dst)?.permissions(); // This is OK for Windows, but clippy doesn't realize we're doing this // only on Windows. diff --git a/accounts-db/src/tiered_storage/byte_block.rs b/accounts-db/src/tiered_storage/byte_block.rs index de783b3f5e2a8f..7436ae7b9710c1 100644 --- a/accounts-db/src/tiered_storage/byte_block.rs +++ b/accounts-db/src/tiered_storage/byte_block.rs @@ -4,7 +4,7 @@ use { crate::tiered_storage::{footer::AccountBlockFormat, meta::AccountMetaOptionalFields}, std::{ - io::{Cursor, Read, Result as IoResult, Write}, + io::{self, Cursor, Read, Write}, mem, ptr, }, }; @@ -56,7 +56,7 @@ impl ByteBlockWriter { /// Write plain ol' data to the internal buffer of the ByteBlockWriter instance /// /// Prefer this over `write_type()`, as it prevents some undefined behavior. - pub fn write_pod(&mut self, value: &T) -> IoResult { + pub fn write_pod(&mut self, value: &T) -> io::Result { // SAFETY: Since T is NoUninit, it does not contain any uninitialized bytes. unsafe { self.write_type(value) } } @@ -72,7 +72,7 @@ impl ByteBlockWriter { /// Caller must ensure casting T to bytes is safe. /// Refer to the Safety sections in std::slice::from_raw_parts() /// and bytemuck's Pod and NoUninit for more information. - pub unsafe fn write_type(&mut self, value: &T) -> IoResult { + pub unsafe fn write_type(&mut self, value: &T) -> io::Result { let size = mem::size_of::(); let ptr = ptr::from_ref(value).cast(); // SAFETY: The caller ensures that `value` contains no uninitialized bytes, @@ -90,7 +90,7 @@ impl ByteBlockWriter { pub fn write_optional_fields( &mut self, opt_fields: &AccountMetaOptionalFields, - ) -> IoResult { + ) -> io::Result { let mut size = 0; if let Some(rent_epoch) = opt_fields.rent_epoch { size += self.write_pod(&rent_epoch)?; @@ -103,7 +103,7 @@ impl ByteBlockWriter { /// Write the specified typed bytes to the internal buffer of the /// ByteBlockWriter instance. - pub fn write(&mut self, buf: &[u8]) -> IoResult<()> { + pub fn write(&mut self, buf: &[u8]) -> io::Result<()> { match &mut self.encoder { ByteBlockEncoder::Raw(cursor) => cursor.write_all(buf)?, ByteBlockEncoder::Lz4(lz4_encoder) => lz4_encoder.write_all(buf)?, @@ -114,7 +114,7 @@ impl ByteBlockWriter { /// Flush the internal byte buffer that collects all the previous writes /// into an encoded byte array. - pub fn finish(self) -> IoResult> { + pub fn finish(self) -> io::Result> { match self.encoder { ByteBlockEncoder::Raw(cursor) => Ok(cursor.into_inner()), ByteBlockEncoder::Lz4(lz4_encoder) => { @@ -173,7 +173,7 @@ impl ByteBlockReader { /// /// Note that calling this function with AccountBlockFormat::AlignedRaw encoding /// will result in panic as the input is already decoded. - pub fn decode(encoding: AccountBlockFormat, input: &[u8]) -> IoResult> { + pub fn decode(encoding: AccountBlockFormat, input: &[u8]) -> io::Result> { match encoding { AccountBlockFormat::Lz4 => { let mut decoder = lz4::Decoder::new(input).unwrap(); diff --git a/accounts-db/src/tiered_storage/file.rs b/accounts-db/src/tiered_storage/file.rs index 4728134a4f990c..aecb0b3a18b5d1 100644 --- a/accounts-db/src/tiered_storage/file.rs +++ b/accounts-db/src/tiered_storage/file.rs @@ -3,7 +3,7 @@ use { bytemuck::{AnyBitPattern, NoUninit, Zeroable}, std::{ fs::{File, OpenOptions}, - io::{BufWriter, Read, Result as IoResult, Seek, SeekFrom, Write}, + io::{self, BufWriter, Read, Seek, SeekFrom, Write}, mem, path::Path, ptr, @@ -43,7 +43,7 @@ impl TieredReadableFile { Ok(file) } - pub fn new_writable(file_path: impl AsRef) -> IoResult { + pub fn new_writable(file_path: impl AsRef) -> io::Result { Ok(Self( OpenOptions::new() .create_new(true) @@ -68,7 +68,7 @@ impl TieredReadableFile { /// Reads a value of type `T` from the file. /// /// Type T must be plain ol' data. - pub fn read_pod(&self, value: &mut T) -> IoResult<()> { + pub fn read_pod(&self, value: &mut T) -> io::Result<()> { // SAFETY: Since T is AnyBitPattern, it is safe to cast bytes to T. unsafe { self.read_type(value) } } @@ -83,7 +83,7 @@ impl TieredReadableFile { /// Caller must ensure casting bytes to T is safe. /// Refer to the Safety sections in std::slice::from_raw_parts() /// and bytemuck's Pod and AnyBitPattern for more information. - pub unsafe fn read_type(&self, value: &mut T) -> IoResult<()> { + pub unsafe fn read_type(&self, value: &mut T) -> io::Result<()> { let ptr = ptr::from_mut(value).cast(); // SAFETY: The caller ensures it is safe to cast bytes to T, // we ensure the size is safe by querying T directly, @@ -92,15 +92,15 @@ impl TieredReadableFile { self.read_bytes(bytes) } - pub fn seek(&self, offset: u64) -> IoResult { + pub fn seek(&self, offset: u64) -> io::Result { (&self.0).seek(SeekFrom::Start(offset)) } - pub fn seek_from_end(&self, offset: i64) -> IoResult { + pub fn seek_from_end(&self, offset: i64) -> io::Result { (&self.0).seek(SeekFrom::End(offset)) } - pub fn read_bytes(&self, buffer: &mut [u8]) -> IoResult<()> { + pub fn read_bytes(&self, buffer: &mut [u8]) -> io::Result<()> { (&self.0).read_exact(buffer) } } @@ -109,7 +109,7 @@ impl TieredReadableFile { pub struct TieredWritableFile(pub BufWriter); impl TieredWritableFile { - pub fn new(file_path: impl AsRef) -> IoResult { + pub fn new(file_path: impl AsRef) -> io::Result { Ok(Self(BufWriter::new( OpenOptions::new() .create_new(true) @@ -121,7 +121,7 @@ impl TieredWritableFile { /// Writes `value` to the file. /// /// `value` must be plain ol' data. - pub fn write_pod(&mut self, value: &T) -> IoResult { + pub fn write_pod(&mut self, value: &T) -> io::Result { // SAFETY: Since T is NoUninit, it does not contain any uninitialized bytes. unsafe { self.write_type(value) } } @@ -136,21 +136,21 @@ impl TieredWritableFile { /// Caller must ensure casting T to bytes is safe. /// Refer to the Safety sections in std::slice::from_raw_parts() /// and bytemuck's Pod and NoUninit for more information. - pub unsafe fn write_type(&mut self, value: &T) -> IoResult { + pub unsafe fn write_type(&mut self, value: &T) -> io::Result { let ptr = ptr::from_ref(value).cast(); let bytes = unsafe { std::slice::from_raw_parts(ptr, mem::size_of::()) }; self.write_bytes(bytes) } - pub fn seek(&mut self, offset: u64) -> IoResult { + pub fn seek(&mut self, offset: u64) -> io::Result { self.0.seek(SeekFrom::Start(offset)) } - pub fn seek_from_end(&mut self, offset: i64) -> IoResult { + pub fn seek_from_end(&mut self, offset: i64) -> io::Result { self.0.seek(SeekFrom::End(offset)) } - pub fn write_bytes(&mut self, bytes: &[u8]) -> IoResult { + pub fn write_bytes(&mut self, bytes: &[u8]) -> io::Result { self.0.write_all(bytes)?; Ok(bytes.len()) diff --git a/accounts-db/src/tiered_storage/mmap_utils.rs b/accounts-db/src/tiered_storage/mmap_utils.rs index 4047f1c6f4d1c4..d14b8c0f6a8e70 100644 --- a/accounts-db/src/tiered_storage/mmap_utils.rs +++ b/accounts-db/src/tiered_storage/mmap_utils.rs @@ -1,9 +1,9 @@ -use {crate::u64_align, log::*, memmap2::Mmap, std::io::Result as IoResult}; +use {crate::u64_align, log::*, memmap2::Mmap, std::io}; /// Borrows a value of type `T` from `mmap` /// /// Type T must be plain ol' data to ensure no undefined behavior. -pub fn get_pod(mmap: &Mmap, offset: usize) -> IoResult<(&T, usize)> { +pub fn get_pod(mmap: &Mmap, offset: usize) -> io::Result<(&T, usize)> { // SAFETY: Since T is AnyBitPattern, it is safe to cast bytes to T. unsafe { get_type::(mmap, offset) } } @@ -17,7 +17,7 @@ pub fn get_pod(mmap: &Mmap, offset: usize) -> IoResu /// Caller must ensure casting bytes to T is safe. /// Refer to the Safety sections in std::slice::from_raw_parts() /// and bytemuck's Pod and AnyBitPattern for more information. -pub unsafe fn get_type(mmap: &Mmap, offset: usize) -> IoResult<(&T, usize)> { +pub unsafe fn get_type(mmap: &Mmap, offset: usize) -> io::Result<(&T, usize)> { let (data, next) = get_slice(mmap, offset, std::mem::size_of::())?; let ptr = data.as_ptr().cast(); debug_assert!(ptr as usize % std::mem::align_of::() == 0); @@ -31,7 +31,7 @@ pub unsafe fn get_type(mmap: &Mmap, offset: usize) -> IoResult<(&T, usize)> { /// doesn't overrun the internal buffer. Otherwise return an Error. /// Also return the offset of the first byte after the requested data that /// falls on a 64-byte boundary. -pub fn get_slice(mmap: &Mmap, offset: usize, size: usize) -> IoResult<(&[u8], usize)> { +pub fn get_slice(mmap: &Mmap, offset: usize, size: usize) -> io::Result<(&[u8], usize)> { let (next, overflow) = offset.overflowing_add(size); if overflow || next > mmap.len() { error!( diff --git a/core/src/accounts_hash_verifier.rs b/core/src/accounts_hash_verifier.rs index 082bf59ba9e996..a2c64d62fa17c4 100644 --- a/core/src/accounts_hash_verifier.rs +++ b/core/src/accounts_hash_verifier.rs @@ -24,7 +24,7 @@ use { snapshot_utils, }, std::{ - io::Result as IoResult, + io, sync::{ atomic::{AtomicBool, Ordering}, Arc, Mutex, @@ -217,7 +217,7 @@ impl AccountsHashVerifier { accounts_package: AccountsPackage, pending_snapshot_packages: &Mutex, snapshot_config: &SnapshotConfig, - ) -> IoResult<()> { + ) -> io::Result<()> { let (merkle_or_lattice_accounts_hash, bank_incremental_snapshot_persistence) = Self::calculate_and_verify_accounts_hash(&accounts_package, snapshot_config)?; @@ -239,7 +239,7 @@ impl AccountsHashVerifier { fn calculate_and_verify_accounts_hash( accounts_package: &AccountsPackage, snapshot_config: &SnapshotConfig, - ) -> IoResult<( + ) -> io::Result<( MerkleOrLatticeAccountsHash, Option, )> { diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index 2505056d89bf1b..675f6b5ddc38ff 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -38,7 +38,7 @@ use { cmp::Ordering, collections::{HashMap, HashSet}, fmt, fs, - io::{BufReader, BufWriter, Error as IoError, Read, Result as IoResult, Seek, Write}, + io::{self, BufReader, BufWriter, Error as IoError, Read, Seek, Write}, mem, num::{NonZeroU64, NonZeroUsize}, ops::RangeInclusive, @@ -587,7 +587,7 @@ pub enum GetSnapshotAccountsHardLinkDirError { pub fn clean_orphaned_account_snapshot_dirs( bank_snapshots_dir: impl AsRef, account_snapshot_paths: &[PathBuf], -) -> IoResult<()> { +) -> io::Result<()> { // Create the HashSet of the account snapshot hardlink directories referenced by the snapshot dirs. // This is used to clean up any hardlinks that are no longer referenced by the snapshot dirs. let mut account_snapshot_dirs_referenced = HashSet::new(); @@ -678,7 +678,7 @@ fn is_bank_snapshot_complete(bank_snapshot_dir: impl AsRef) -> bool { } /// Marks the bank snapshot as complete -fn write_snapshot_state_complete_file(bank_snapshot_dir: impl AsRef) -> IoResult<()> { +fn write_snapshot_state_complete_file(bank_snapshot_dir: impl AsRef) -> io::Result<()> { let state_complete_path = bank_snapshot_dir .as_ref() .join(SNAPSHOT_STATE_COMPLETE_FILENAME); @@ -695,7 +695,7 @@ fn write_snapshot_state_complete_file(bank_snapshot_dir: impl AsRef) -> Io pub fn write_full_snapshot_slot_file( bank_snapshot_dir: impl AsRef, full_snapshot_slot: Slot, -) -> IoResult<()> { +) -> io::Result<()> { let full_snapshot_slot_path = bank_snapshot_dir .as_ref() .join(SNAPSHOT_FULL_SNAPSHOT_SLOT_FILENAME); @@ -712,7 +712,7 @@ pub fn write_full_snapshot_slot_file( } // Reads the full snapshot slot file from the bank snapshot dir -pub fn read_full_snapshot_slot_file(bank_snapshot_dir: impl AsRef) -> IoResult { +pub fn read_full_snapshot_slot_file(bank_snapshot_dir: impl AsRef) -> io::Result { const SLOT_SIZE: usize = std::mem::size_of::(); let full_snapshot_slot_path = bank_snapshot_dir .as_ref() @@ -735,7 +735,7 @@ pub fn read_full_snapshot_slot_file(bank_snapshot_dir: impl AsRef) -> IoRe } /// Writes the 'snapshot storages have been flushed' file to the bank snapshot dir -pub fn write_storages_flushed_file(bank_snapshot_dir: impl AsRef) -> IoResult<()> { +pub fn write_storages_flushed_file(bank_snapshot_dir: impl AsRef) -> io::Result<()> { let flushed_storages_path = bank_snapshot_dir .as_ref() .join(SNAPSHOT_STORAGES_FLUSHED_FILENAME); @@ -1740,7 +1740,7 @@ fn streaming_unarchive_snapshot( fn archive_chunker_from_path( archive_path: &Path, archive_format: ArchiveFormat, -) -> IoResult>>> { +) -> io::Result>>> { const INPUT_READER_BUF_SIZE: usize = 128 * 1024 * 1024; let buf_reader = solana_accounts_db::large_file_buf_reader(archive_path, INPUT_READER_BUF_SIZE) .map_err(|err| { From 4e33b7896317fbde84da29c5ce51ce1efe416d27 Mon Sep 17 00:00:00 2001 From: Greg Cusack Date: Thu, 26 Jun 2025 15:00:51 -0500 Subject: [PATCH 102/124] Multihoming: Hotswap gossip socketaddr (#6474) * add admin rpc for gossip ip/port change. add channel for relaying adminrpc command to gossipservice * wip. add streamer atomicudpsocket for receiver_atomic * finish up streamer responder * address alex's comments * need to know when socket is initially set * remove rebind socket channel. bind in adminrpc * change GossipSocket to SocketKind * remove GossipRebinder now that channel is gone * rm double arc and switch Socket::gossip from UdpSocket to AtomicUdpSocket --- Cargo.lock | 5 +- Cargo.toml | 1 + core/src/admin_rpc_post_init.rs | 2 + core/src/validator.rs | 3 +- gossip/src/cluster_info.rs | 20 +++- gossip/src/gossip_service.rs | 12 ++- programs/sbf/Cargo.lock | 1 + streamer/Cargo.toml | 1 + streamer/src/atomic_udp_socket.rs | 90 ++++++++++++++++ streamer/src/lib.rs | 1 + streamer/src/streamer.rs | 158 ++++++++++++++++++++++------- svm/examples/Cargo.lock | 1 + validator/src/admin_rpc_service.rs | 36 ++++++- validator/src/bootstrap.rs | 12 ++- 14 files changed, 290 insertions(+), 53 deletions(-) create mode 100644 streamer/src/atomic_udp_socket.rs diff --git a/Cargo.lock b/Cargo.lock index 99bcbf223e9cfb..dcc4281fcdf5a7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -703,9 +703,9 @@ dependencies = [ [[package]] name = "arc-swap" -version = "1.5.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5d78ce20460b82d3fa150275ed9d55e21064fc7951177baacf86a145c4a4b1f" +checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" [[package]] name = "ark-bn254" @@ -10680,6 +10680,7 @@ dependencies = [ name = "solana-streamer" version = "3.0.0" dependencies = [ + "arc-swap", "assert_matches", "async-channel", "bytes", diff --git a/Cargo.toml b/Cargo.toml index 6699f9b296812e..189d6d901ac248 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -192,6 +192,7 @@ ahash = "0.8.11" anyhow = "1.0.98" aquamarine = "0.6.0" arbitrary = "1.4.1" +arc-swap = "1.7.1" ark-bn254 = "0.4.0" ark-ec = "0.4.0" ark-ff = "0.4.0" diff --git a/core/src/admin_rpc_post_init.rs b/core/src/admin_rpc_post_init.rs index 3227bc4e0d5a8c..61d1f1e5123a62 100644 --- a/core/src/admin_rpc_post_init.rs +++ b/core/src/admin_rpc_post_init.rs @@ -7,6 +7,7 @@ use { solana_pubkey::Pubkey, solana_quic_definitions::NotifyKeyUpdate, solana_runtime::bank_forks::BankForks, + solana_streamer::atomic_udp_socket::AtomicUdpSocket, std::{ collections::{HashMap, HashSet}, net::UdpSocket, @@ -78,4 +79,5 @@ pub struct AdminRpcRequestMetadataPostInit { pub repair_socket: Arc, pub outstanding_repair_requests: Arc>>, pub cluster_slots: Arc, + pub gossip_socket: Option, } diff --git a/core/src/validator.rs b/core/src/validator.rs index 14dc77dade397a..6722cc48c511a5 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -1320,7 +1320,7 @@ impl Validator { let gossip_service = GossipService::new( &cluster_info, Some(bank_forks.clone()), - node.sockets.gossip, + node.sockets.gossip.clone(), config.gossip_validators.clone(), should_check_duplicate_instance, Some(stats_reporter_sender.clone()), @@ -1681,6 +1681,7 @@ impl Validator { repair_socket: Arc::new(node.sockets.repair), outstanding_repair_requests, cluster_slots, + gossip_socket: Some(node.sockets.gossip.clone()), }); Ok(Self { diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index 3830dc6bc45de3..8e6d4d6ead7781 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -73,6 +73,7 @@ use { solana_signature::Signature, solana_signer::Signer, solana_streamer::{ + atomic_udp_socket::AtomicUdpSocket, packet, quic::DEFAULT_QUIC_ENDPOINTS, socket::SocketAddrSpace, @@ -392,6 +393,15 @@ impl ClusterInfo { self.refresh_my_gossip_contact_info(); } + pub fn set_gossip_socket(&self, gossip_addr: SocketAddr) -> Result<(), ContactInfoError> { + self.my_contact_info + .write() + .unwrap() + .set_gossip(gossip_addr)?; + self.refresh_my_gossip_contact_info(); + Ok(()) + } + pub fn set_tpu(&self, tpu_addr: SocketAddr) -> Result<(), ContactInfoError> { self.my_contact_info.write().unwrap().set_tpu(tpu_addr)?; self.refresh_my_gossip_contact_info(); @@ -2298,7 +2308,7 @@ impl ClusterInfo { #[derive(Debug)] pub struct Sockets { - pub gossip: UdpSocket, + pub gossip: AtomicUdpSocket, pub ip_echo: Option, pub tvu: Vec, pub tvu_quic: UdpSocket, @@ -2536,7 +2546,7 @@ impl Node { Node { info, sockets: Sockets { - gossip, + gossip: AtomicUdpSocket::new(gossip), ip_echo: Some(ip_echo), tvu: vec![tvu], tvu_quic, @@ -2684,7 +2694,7 @@ impl Node { Node { info, sockets: Sockets { - gossip, + gossip: AtomicUdpSocket::new(gossip), ip_echo: Some(ip_echo), tvu: vec![tvu], tvu_quic, @@ -2858,7 +2868,7 @@ impl Node { info!("vortexor_receivers is {vortexor_receivers:?}"); trace!("new ContactInfo: {:?}", info); let sockets = Sockets { - gossip, + gossip: AtomicUdpSocket::new(gossip), tvu: tvu_sockets, tvu_quic, tpu: tpu_sockets, @@ -3338,7 +3348,7 @@ mod tests { } fn check_node_sockets(node: &Node, ip: IpAddr, range: (u16, u16)) { - check_socket(&node.sockets.gossip, ip, range); + check_socket(&node.sockets.gossip.load(), ip, range); check_socket(&node.sockets.repair, ip, range); check_socket(&node.sockets.tvu_quic, ip, range); diff --git a/gossip/src/gossip_service.rs b/gossip/src/gossip_service.rs index 3d0b277ad8ec9e..26a912a546d6c5 100644 --- a/gossip/src/gossip_service.rs +++ b/gossip/src/gossip_service.rs @@ -18,6 +18,7 @@ use { solana_runtime::bank_forks::BankForks, solana_signer::Signer, solana_streamer::{ + atomic_udp_socket::AtomicUdpSocket, evicting_sender::EvictingSender, socket::SocketAddrSpace, streamer::{self, StreamerReceiveStats}, @@ -25,7 +26,7 @@ use { solana_tpu_client::tpu_client::{TpuClient, TpuClientConfig}, std::{ collections::HashSet, - net::{SocketAddr, TcpListener, UdpSocket}, + net::{SocketAddr, TcpListener}, sync::{ atomic::{AtomicBool, Ordering}, Arc, RwLock, @@ -45,7 +46,7 @@ impl GossipService { pub fn new( cluster_info: &Arc, bank_forks: Option>>, - gossip_socket: UdpSocket, + gossip_socket: AtomicUdpSocket, gossip_validators: Option>, should_check_duplicate_instance: bool, stats_reporter_sender: Option>>, @@ -61,7 +62,7 @@ impl GossipService { ); let socket_addr_space = *cluster_info.socket_addr_space(); let gossip_receiver_stats = Arc::new(StreamerReceiveStats::new("gossip_receiver")); - let t_receiver = streamer::receiver( + let t_receiver = streamer::receiver_atomic( "solRcvrGossip".to_string(), gossip_socket.clone(), exit.clone(), @@ -96,9 +97,9 @@ impl GossipService { gossip_validators, exit.clone(), ); - let t_responder = streamer::responder( + let t_responder = streamer::responder_atomic( "Gossip", - gossip_socket, + gossip_socket.clone(), response_receiver, socket_addr_space, stats_reporter_sender, @@ -371,6 +372,7 @@ pub fn make_gossip_node( if let Some(entrypoint) = entrypoint { cluster_info.set_entrypoint(ContactInfo::new_gossip_entry_point(entrypoint)); } + let gossip_socket = AtomicUdpSocket::new(gossip_socket); let cluster_info = Arc::new(cluster_info); let gossip_service = GossipService::new( &cluster_info, diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 79190609fb621c..2df55255bff329 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -9032,6 +9032,7 @@ dependencies = [ name = "solana-streamer" version = "3.0.0" dependencies = [ + "arc-swap", "async-channel", "bytes", "crossbeam-channel", diff --git a/streamer/Cargo.toml b/streamer/Cargo.toml index b3ad45df3bdbba..c8716ae9bbe6aa 100644 --- a/streamer/Cargo.toml +++ b/streamer/Cargo.toml @@ -20,6 +20,7 @@ name = "solana_streamer" dev-context-only-utils = [] [dependencies] +arc-swap = { workspace = true } async-channel = { workspace = true } bytes = { workspace = true } crossbeam-channel = { workspace = true } diff --git a/streamer/src/atomic_udp_socket.rs b/streamer/src/atomic_udp_socket.rs new file mode 100644 index 00000000000000..e6ff4a838f7ba4 --- /dev/null +++ b/streamer/src/atomic_udp_socket.rs @@ -0,0 +1,90 @@ +use { + arc_swap::ArcSwap, + std::{ + net::{SocketAddr, UdpSocket}, + sync::Arc, + }, +}; + +/// Wrapper around UdpSocket that allows for atomic swapping of the socket. +#[derive(Clone, Debug)] +pub struct AtomicUdpSocket { + inner: Arc>, +} +impl AtomicUdpSocket { + pub fn new(sock: UdpSocket) -> Self { + Self { + inner: Arc::new(ArcSwap::from_pointee(sock)), + } + } + #[inline] + pub fn load(&self) -> Arc { + self.inner.load_full() + } + #[inline] + pub fn swap(&self, new_sock: UdpSocket) { + self.inner.store(Arc::new(new_sock)); + } + + pub fn local_addr(&self) -> std::io::Result { + self.inner.load().local_addr() + } +} + +pub enum CurrentSocket<'a> { + Same(&'a UdpSocket), + Changed(&'a UdpSocket), +} + +/// Trait for providing a socket. +pub trait SocketProvider { + fn current_socket(&mut self) -> CurrentSocket; + + #[inline] + fn current_socket_ref(&mut self) -> &UdpSocket { + match self.current_socket() { + CurrentSocket::Same(sock) | CurrentSocket::Changed(sock) => sock, + } + } +} + +/// Fixed UDP Socket -> default +pub struct FixedSocketProvider { + socket: Arc, +} +impl FixedSocketProvider { + pub fn new(socket: Arc) -> Self { + Self { socket } + } +} +impl SocketProvider for FixedSocketProvider { + #[inline] + fn current_socket(&mut self) -> CurrentSocket { + CurrentSocket::Same(&self.socket) + } +} + +/// Hot-swappable `AtomicUdpSocket` +pub struct AtomicSocketProvider { + atomic: Arc, + current: Arc, +} +impl AtomicSocketProvider { + pub fn new(atomic: Arc) -> Self { + let s = atomic.load(); + Self { atomic, current: s } + } +} +impl SocketProvider for AtomicSocketProvider { + // Check if the socket has changed since the last call + #[inline] + fn current_socket(&mut self) -> CurrentSocket { + let sock = self.atomic.load(); + if !Arc::ptr_eq(&sock, &self.current) { + self.current = sock; + CurrentSocket::Changed(&self.current) + } else { + CurrentSocket::Same(&self.current) + } + } +} diff --git a/streamer/src/lib.rs b/streamer/src/lib.rs index 60ee2b753f77e3..25fa9ae721d1b4 100644 --- a/streamer/src/lib.rs +++ b/streamer/src/lib.rs @@ -1,4 +1,5 @@ #![allow(clippy::arithmetic_side_effects)] +pub mod atomic_udp_socket; pub mod evicting_sender; pub mod msghdr; pub mod nonblocking; diff --git a/streamer/src/streamer.rs b/streamer/src/streamer.rs index 7b05b9d31d0d3b..8cb8b80bdab98e 100644 --- a/streamer/src/streamer.rs +++ b/streamer/src/streamer.rs @@ -3,6 +3,10 @@ use { crate::{ + atomic_udp_socket::{ + AtomicSocketProvider, AtomicUdpSocket, CurrentSocket, FixedSocketProvider, + SocketProvider, + }, packet::{ self, PacketBatch, PacketBatchRecycler, PacketRef, PinnedPacketBatch, PACKETS_PER_BATCH, }, @@ -146,8 +150,8 @@ impl StreamerReceiveStats { pub type Result = std::result::Result; -fn recv_loop( - socket: &UdpSocket, +fn recv_loop( + provider: &mut P, exit: &AtomicBool, packet_batch_sender: &impl ChannelSend, recycler: &PacketBatchRecycler, @@ -157,7 +161,22 @@ fn recv_loop( in_vote_only_mode: Option>, is_staked_service: bool, ) -> Result<()> { + let mut has_set_timeout = false; loop { + let socket = match provider.current_socket() { + CurrentSocket::Changed(sock) => { + sock.set_read_timeout(Some(Duration::from_secs(1)))?; + has_set_timeout = true; + sock + } + CurrentSocket::Same(sock) => { + if !has_set_timeout { + sock.set_read_timeout(Some(Duration::from_secs(1)))?; + has_set_timeout = true; + } + sock + } + }; let mut packet_batch = if use_pinned_memory { PinnedPacketBatch::new_with_recycler(recycler, PACKETS_PER_BATCH, stats.name) } else { @@ -230,8 +249,41 @@ pub fn receiver( Builder::new() .name(thread_name) .spawn(move || { + let mut provider = FixedSocketProvider::new(socket); let _ = recv_loop( - &socket, + &mut provider, + &exit, + &packet_batch_sender, + &recycler, + &stats, + coalesce, + use_pinned_memory, + in_vote_only_mode, + is_staked_service, + ); + }) + .unwrap() +} + +#[allow(clippy::too_many_arguments)] +pub fn receiver_atomic( + thread_name: String, + socket: Arc, + exit: Arc, + packet_batch_sender: impl ChannelSend, + recycler: PacketBatchRecycler, + stats: Arc, + coalesce: Option, + use_pinned_memory: bool, + in_vote_only_mode: Option>, + is_staked_service: bool, +) -> JoinHandle<()> { + Builder::new() + .name(thread_name) + .spawn(move || { + let mut provider = AtomicSocketProvider::new(socket); + let _ = recv_loop( + &mut provider, &exit, &packet_batch_sender, &recycler, @@ -460,6 +512,27 @@ pub fn recv_packet_batches( Ok((packet_batches, num_packets, recv_duration)) } +pub fn responder_atomic( + name: &'static str, + sock: Arc, + r: PacketBatchReceiver, + socket_addr_space: SocketAddrSpace, + stats_reporter_sender: Option>>, +) -> JoinHandle<()> { + Builder::new() + .name(format!("solRspndr{name}")) + .spawn(move || { + responder_loop( + AtomicSocketProvider::new(sock), + name, + r, + socket_addr_space, + stats_reporter_sender, + ); + }) + .unwrap() +} + pub fn responder( name: &'static str, sock: Arc, @@ -470,41 +543,58 @@ pub fn responder( Builder::new() .name(format!("solRspndr{name}")) .spawn(move || { - let mut errors = 0; - let mut last_error = None; - let mut last_print = 0; - let mut stats = None; + responder_loop( + FixedSocketProvider::new(sock), + name, + r, + socket_addr_space, + stats_reporter_sender, + ); + }) + .unwrap() +} - if stats_reporter_sender.is_some() { - stats = Some(StreamerSendStats::default()); - } +fn responder_loop( + mut provider: P, + name: &'static str, + r: PacketBatchReceiver, + socket_addr_space: SocketAddrSpace, + stats_reporter_sender: Option>>, +) { + let mut errors = 0; + let mut last_error = None; + let mut last_print = 0; + let mut stats = None; + + if stats_reporter_sender.is_some() { + stats = Some(StreamerSendStats::default()); + } - loop { - if let Err(e) = recv_send(&sock, &r, &socket_addr_space, &mut stats) { - match e { - StreamerError::RecvTimeout(RecvTimeoutError::Disconnected) => break, - StreamerError::RecvTimeout(RecvTimeoutError::Timeout) => (), - _ => { - errors += 1; - last_error = Some(e); - } - } - } - let now = timestamp(); - if now - last_print > 1000 && errors != 0 { - datapoint_info!(name, ("errors", errors, i64),); - info!("{} last-error: {:?} count: {}", name, last_error, errors); - last_print = now; - errors = 0; - } - if let Some(ref stats_reporter_sender) = stats_reporter_sender { - if let Some(ref mut stats) = stats { - stats.maybe_submit(name, stats_reporter_sender); - } + loop { + let sock = provider.current_socket_ref(); + if let Err(e) = recv_send(sock, &r, &socket_addr_space, &mut stats) { + match e { + StreamerError::RecvTimeout(RecvTimeoutError::Disconnected) => break, + StreamerError::RecvTimeout(RecvTimeoutError::Timeout) => (), + _ => { + errors += 1; + last_error = Some(e); } } - }) - .unwrap() + } + let now = timestamp(); + if now - last_print > 1000 && errors != 0 { + datapoint_info!(name, ("errors", errors, i64),); + info!("{} last-error: {:?} count: {}", name, last_error, errors); + last_print = now; + errors = 0; + } + if let Some(ref stats_reporter_sender) = stats_reporter_sender { + if let Some(ref mut stats) = stats { + stats.maybe_submit(name, stats_reporter_sender); + } + } + } } #[cfg(test)] diff --git a/svm/examples/Cargo.lock b/svm/examples/Cargo.lock index f2a9f352f3f7d7..7565178de7cf5f 100644 --- a/svm/examples/Cargo.lock +++ b/svm/examples/Cargo.lock @@ -8097,6 +8097,7 @@ dependencies = [ name = "solana-streamer" version = "3.0.0" dependencies = [ + "arc-swap", "async-channel", "bytes", "crossbeam-channel", diff --git a/validator/src/admin_rpc_service.rs b/validator/src/admin_rpc_service.rs index 75a1412e4eba4b..f127542634bc05 100644 --- a/validator/src/admin_rpc_service.rs +++ b/validator/src/admin_rpc_service.rs @@ -18,6 +18,7 @@ use { solana_geyser_plugin_manager::GeyserPluginManagerRequest, solana_gossip::contact_info::{ContactInfo, Protocol, SOCKET_ADDR_UNSPECIFIED}, solana_keypair::{read_keypair_file, Keypair}, + solana_net_utils::sockets::bind_to, solana_pubkey::Pubkey, solana_rpc::rpc::verify_pubkey, solana_rpc_client_api::{config::RpcAccountIndex, custom_error::RpcCustomError}, @@ -27,7 +28,7 @@ use { collections::{HashMap, HashSet}, env, error, fmt::{self, Display}, - net::SocketAddr, + net::{IpAddr, SocketAddr}, path::{Path, PathBuf}, sync::{ atomic::{AtomicBool, Ordering}, @@ -215,6 +216,9 @@ pub trait AdminRpc { #[rpc(meta, name = "contactInfo")] fn contact_info(&self, meta: Self::Metadata) -> Result; + #[rpc(meta, name = "setGossipSocket")] + fn set_gossip_socket(&self, meta: Self::Metadata, ip: String, port: u16) -> Result<()>; + #[rpc(meta, name = "repairShredFromPeer")] fn repair_shred_from_peer( &self, @@ -534,6 +538,35 @@ impl AdminRpc for AdminRpcImpl { meta.with_post_init(|post_init| Ok(post_init.cluster_info.my_contact_info().into())) } + fn set_gossip_socket(&self, meta: Self::Metadata, ip: String, port: u16) -> Result<()> { + let ip: IpAddr = ip + .parse() + .map_err(|e| jsonrpc_core::Error::invalid_params(format!("Invalid IP address: {e}")))?; + let new_addr = SocketAddr::new(ip, port); + + meta.with_post_init(|post_init| { + if let Some(socket) = &post_init.gossip_socket { + let new_socket = bind_to(new_addr.ip(), new_addr.port()).map_err(|e| { + jsonrpc_core::Error::invalid_params(format!("Gossip socket rebind failed: {e}")) + })?; + + // hot-swap new socket + socket.swap(new_socket); + + // update gossip socket in cluster info + post_init + .cluster_info + .set_gossip_socket(new_addr) + .map_err(|e| { + jsonrpc_core::Error::invalid_params(format!( + "Failed to refresh gossip ContactInfo: {e}" + )) + })?; + } + Ok(()) + }) + } + fn repair_shred_from_peer( &self, meta: Self::Metadata, @@ -994,6 +1027,7 @@ mod tests { cluster_slots: Arc::new( solana_core::cluster_slots_service::cluster_slots::ClusterSlots::default(), ), + gossip_socket: None, }))), staked_nodes_overrides: Arc::new(RwLock::new(HashMap::new())), rpc_to_plugin_manager_sender: None, diff --git a/validator/src/bootstrap.rs b/validator/src/bootstrap.rs index 9ed819668a796d..08e9ef7b3889eb 100644 --- a/validator/src/bootstrap.rs +++ b/validator/src/bootstrap.rs @@ -24,10 +24,10 @@ use { snapshot_utils, }, solana_signer::Signer, - solana_streamer::socket::SocketAddrSpace, + solana_streamer::{atomic_udp_socket::AtomicUdpSocket, socket::SocketAddrSpace}, std::{ collections::{hash_map::RandomState, HashMap, HashSet}, - net::{SocketAddr, TcpListener, TcpStream, UdpSocket}, + net::{SocketAddr, TcpListener, TcpStream}, path::Path, process::exit, sync::{ @@ -77,7 +77,9 @@ fn verify_reachable_ports( .map(|addr| socket_addr_space.check(addr)) .unwrap_or_default() }; - let mut udp_sockets = vec![&node.sockets.gossip, &node.sockets.repair]; + + let gossip_socket = node.sockets.gossip.load(); + let mut udp_sockets = vec![&gossip_socket, &node.sockets.repair]; if verify_address(&node.info.serve_repair(Protocol::UDP)) { udp_sockets.push(&node.sockets.serve_repair); @@ -141,7 +143,7 @@ fn start_gossip_node( cluster_entrypoints: &[ContactInfo], ledger_path: &Path, gossip_addr: &SocketAddr, - gossip_socket: UdpSocket, + gossip_socket: AtomicUdpSocket, expected_shred_version: u16, gossip_validators: Option>, should_check_duplicate_instance: bool, @@ -611,7 +613,7 @@ pub fn rpc_bootstrap( .info .gossip() .expect("Operator must spin up node with valid gossip address"), - node.sockets.gossip.try_clone().unwrap(), + node.sockets.gossip.clone(), validator_config .expected_shred_version .expect("expected_shred_version should not be None"), From cfa4c2ce0005c82c3998ca4999ae9910ab0422a8 Mon Sep 17 00:00:00 2001 From: Brooks Date: Thu, 26 Jun 2025 17:22:33 -0400 Subject: [PATCH 103/124] Scans each storage only once when building accounts index (#6745) --- accounts-db/src/accounts_db.rs | 43 ++++++++++++++++++++++++---------- 1 file changed, 30 insertions(+), 13 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 739789c3d1217e..7c1fb9d50408f3 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -61,7 +61,7 @@ use { active_stats::{ActiveStatItem, ActiveStats}, ancestors::Ancestors, ancient_append_vecs::get_ancient_append_vec_capacity, - append_vec::{aligned_stored_size, STORE_META_OVERHEAD}, + append_vec::{aligned_stored_size, IndexInfo, IndexInfoInner, STORE_META_OVERHEAD}, cache_hash_data::{CacheHashData, DeletionPolicy as CacheHashDeletionPolicy}, contains::Contains, epoch_accounts_hash::EpochAccountsHashManager, @@ -7818,7 +7818,8 @@ impl AccountsDb { let (dirty_pubkeys, insert_time_us, mut generate_index_results) = { let mut items_local = Vec::default(); - storage.accounts.scan_index(|info| { + // this closure is the shared code when scanning the storage + let mut itemizer = |info: IndexInfo| { stored_size_alive += info.stored_size_aligned; if info.index_info.lamports > 0 { accounts_data_len += info.index_info.data_len; @@ -7828,8 +7829,34 @@ impl AccountsDb { zero_lamport_pubkeys.push(info.index_info.pubkey); } items_local.push(info.index_info); - }); + }; + if secondary { + // WITH secondary indexes -- scan accounts WITH account data + storage.accounts.scan_accounts(|offset, account| { + let data_len = account.data.len() as u64; + let stored_size_aligned = + storage.accounts.calculate_stored_size(data_len as usize); + let info = IndexInfo { + stored_size_aligned, + index_info: IndexInfoInner { + offset, + pubkey: *account.pubkey, + lamports: account.lamports, + data_len, + }, + }; + itemizer(info); + self.accounts_index.update_secondary_indexes( + account.pubkey, + &account, + &self.account_indexes, + ); + }); + } else { + // withOUT secondary indexes -- scan accounts withOUT account data + storage.accounts.scan_index(itemizer); + } let items_len = items_local.len(); let items = items_local.into_iter().map(|info| { ( @@ -7843,16 +7870,6 @@ impl AccountsDb { self.accounts_index .insert_new_if_missing_into_primary_index(slot, items_len, items) }; - if secondary { - // scan storage a second time to update the secondary index - storage.accounts.scan_accounts(|_offset, stored_account| { - self.accounts_index.update_secondary_indexes( - stored_account.pubkey(), - &stored_account, - &self.account_indexes, - ); - }); - } if let Some(duplicates_this_slot) = std::mem::take(&mut generate_index_results.duplicates) { // there were duplicate pubkeys in this same slot From eb4306a89a0ff35a6e91a8a879054b07f519ba6a Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Fri, 27 Jun 2025 11:42:25 +0800 Subject: [PATCH 104/124] ci: add 'NO_INSTALL' flag to ci/rust-version.sh (#6663) --- ci/docker/env.sh | 2 +- ci/rust-version.sh | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/ci/docker/env.sh b/ci/docker/env.sh index 8631ec4b72b53b..a54c3620e2c28e 100755 --- a/ci/docker/env.sh +++ b/ci/docker/env.sh @@ -3,7 +3,7 @@ ci_docker_env_sh_here="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" # shellcheck disable=SC1091 -source "${ci_docker_env_sh_here}/../rust-version.sh" +NO_INSTALL=1 source "${ci_docker_env_sh_here}/../rust-version.sh" if [[ -z "${rust_stable}" || -z "${rust_nightly}" ]]; then echo "Error: rust_stable or rust_nightly is empty. Please check rust-version.sh." >&2 diff --git a/ci/rust-version.sh b/ci/rust-version.sh index 3cdc89b1cfc0e4..8bb41d9003717c 100644 --- a/ci/rust-version.sh +++ b/ci/rust-version.sh @@ -37,6 +37,10 @@ export rust_stable="$stable_version" export rust_nightly=nightly-"$nightly_version" +if [[ -n $NO_INSTALL ]]; then + return +fi + [[ -z $1 ]] || ( rustup_install() { From b362948243d59218165b350429cb9e6a6fa41264 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 27 Jun 2025 18:17:54 +0800 Subject: [PATCH 105/124] build(deps): bump indexmap from 2.9.0 to 2.10.0 (#6760) * build(deps): bump indexmap from 2.9.0 to 2.10.0 Bumps [indexmap](https://github.com/indexmap-rs/indexmap) from 2.9.0 to 2.10.0. - [Changelog](https://github.com/indexmap-rs/indexmap/blob/main/RELEASES.md) - [Commits](https://github.com/indexmap-rs/indexmap/compare/2.9.0...2.10.0) --- updated-dependencies: - dependency-name: indexmap dependency-version: 2.10.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 30 +++++++++++++++--------------- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 24 ++++++++++++------------ svm/examples/Cargo.lock | 26 +++++++++++++------------- 4 files changed, 41 insertions(+), 41 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index dcc4281fcdf5a7..9a85c066cc6459 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3157,7 +3157,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.9.0", + "indexmap 2.10.0", "slab", "tokio", "tokio-util 0.7.15", @@ -3776,9 +3776,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.9.0" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cea70ddb795996207ad57735b50c5982d8844f38ba9ee5f1aedcfb708a2aa11e" +checksum = "fe4cd85333e22411419a0bcae1297d25e58c9443848b11dc6a86fefe8c78a661" dependencies = [ "equivalent", "hashbrown 0.15.1", @@ -4689,7 +4689,7 @@ checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" dependencies = [ "crc32fast", "hashbrown 0.15.1", - "indexmap 2.9.0", + "indexmap 2.10.0", "memchr", ] @@ -6309,7 +6309,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.9.0", + "indexmap 2.10.0", "itoa", "ryu", "serde", @@ -6740,7 +6740,7 @@ dependencies = [ "criterion", "crossbeam-channel", "dashmap", - "indexmap 2.9.0", + "indexmap 2.10.0", "io-uring", "itertools 0.12.1", "libc", @@ -7530,7 +7530,7 @@ dependencies = [ "dashmap", "futures 0.3.31", "futures-util", - "indexmap 2.9.0", + "indexmap 2.10.0", "indicatif", "log", "quinn", @@ -7770,7 +7770,7 @@ dependencies = [ "bincode", "crossbeam-channel", "futures-util", - "indexmap 2.9.0", + "indexmap 2.10.0", "indicatif", "log", "rand 0.8.5", @@ -8528,7 +8528,7 @@ dependencies = [ "criterion", "crossbeam-channel", "flate2", - "indexmap 2.9.0", + "indexmap 2.10.0", "itertools 0.12.1", "log", "lru", @@ -10690,7 +10690,7 @@ dependencies = [ "futures-util", "governor", "histogram", - "indexmap 2.9.0", + "indexmap 2.10.0", "itertools 0.12.1", "libc", "log", @@ -11087,7 +11087,7 @@ dependencies = [ "console", "csv", "ctrlc", - "indexmap 2.9.0", + "indexmap 2.10.0", "indicatif", "pickledb", "serde", @@ -11161,7 +11161,7 @@ dependencies = [ "async-trait", "bincode", "futures-util", - "indexmap 2.9.0", + "indexmap 2.10.0", "indicatif", "log", "rayon", @@ -11580,7 +11580,7 @@ dependencies = [ "futures-util", "governor", "histogram", - "indexmap 2.9.0", + "indexmap 2.10.0", "itertools 0.12.1", "libc", "log", @@ -13002,7 +13002,7 @@ version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" dependencies = [ - "indexmap 2.9.0", + "indexmap 2.10.0", "toml_datetime", "winnow 0.5.16", ] @@ -13013,7 +13013,7 @@ version = "0.22.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3328d4f68a705b2a4498da1d580585d39a6510f98318a2cec3018a7ec61ddef" dependencies = [ - "indexmap 2.9.0", + "indexmap 2.10.0", "serde", "serde_spanned", "toml_datetime", diff --git a/Cargo.toml b/Cargo.toml index 189d6d901ac248..7a9b8edbaeb335 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -280,7 +280,7 @@ humantime = "2.2.0" hyper = "0.14.32" hyper-proxy = "0.9.1" im = "15.1.0" -indexmap = "2.9.0" +indexmap = "2.10.0" indicatif = "0.17.11" io-uring = "0.7.8" itertools = "0.12.1" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 2df55255bff329..1863642e6aa8dc 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -2284,7 +2284,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.9.0", + "indexmap 2.10.0", "slab", "tokio", "tokio-util 0.7.15", @@ -2871,9 +2871,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.9.0" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cea70ddb795996207ad57735b50c5982d8844f38ba9ee5f1aedcfb708a2aa11e" +checksum = "fe4cd85333e22411419a0bcae1297d25e58c9443848b11dc6a86fefe8c78a661" dependencies = [ "equivalent", "hashbrown 0.15.1", @@ -3798,7 +3798,7 @@ checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" dependencies = [ "crc32fast", "hashbrown 0.15.1", - "indexmap 2.9.0", + "indexmap 2.10.0", "memchr", ] @@ -5147,7 +5147,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.9.0", + "indexmap 2.10.0", "itoa", "ryu", "serde", @@ -5440,7 +5440,7 @@ dependencies = [ "bzip2", "crossbeam-channel", "dashmap", - "indexmap 2.9.0", + "indexmap 2.10.0", "io-uring", "itertools 0.12.1", "libc", @@ -5851,7 +5851,7 @@ dependencies = [ "dashmap", "futures 0.3.31", "futures-util", - "indexmap 2.9.0", + "indexmap 2.10.0", "indicatif", "log", "quinn", @@ -6026,7 +6026,7 @@ dependencies = [ "bincode", "crossbeam-channel", "futures-util", - "indexmap 2.9.0", + "indexmap 2.10.0", "log", "rand 0.8.5", "rayon", @@ -6575,7 +6575,7 @@ dependencies = [ "clap", "crossbeam-channel", "flate2", - "indexmap 2.9.0", + "indexmap 2.10.0", "itertools 0.12.1", "log", "lru", @@ -9041,7 +9041,7 @@ dependencies = [ "futures-util", "governor", "histogram", - "indexmap 2.9.0", + "indexmap 2.10.0", "itertools 0.12.1", "libc", "log", @@ -9372,7 +9372,7 @@ dependencies = [ "async-trait", "bincode", "futures-util", - "indexmap 2.9.0", + "indexmap 2.10.0", "indicatif", "log", "rayon", @@ -10886,7 +10886,7 @@ version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" dependencies = [ - "indexmap 2.9.0", + "indexmap 2.10.0", "toml_datetime", "winnow", ] diff --git a/svm/examples/Cargo.lock b/svm/examples/Cargo.lock index 7565178de7cf5f..1ea8b24c38657a 100644 --- a/svm/examples/Cargo.lock +++ b/svm/examples/Cargo.lock @@ -2157,7 +2157,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.9.0", + "indexmap 2.10.0", "slab", "tokio", "tokio-util 0.7.15", @@ -2745,9 +2745,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.9.0" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cea70ddb795996207ad57735b50c5982d8844f38ba9ee5f1aedcfb708a2aa11e" +checksum = "fe4cd85333e22411419a0bcae1297d25e58c9443848b11dc6a86fefe8c78a661" dependencies = [ "equivalent", "hashbrown 0.15.2", @@ -3670,7 +3670,7 @@ checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" dependencies = [ "crc32fast", "hashbrown 0.15.2", - "indexmap 2.9.0", + "indexmap 2.10.0", "memchr", ] @@ -3928,7 +3928,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ "fixedbitset", - "indexmap 2.9.0", + "indexmap 2.10.0", ] [[package]] @@ -4995,7 +4995,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.9.0", + "indexmap 2.10.0", "itoa", "ryu", "serde", @@ -5287,7 +5287,7 @@ dependencies = [ "bzip2", "crossbeam-channel", "dashmap", - "indexmap 2.9.0", + "indexmap 2.10.0", "io-uring", "itertools 0.12.1", "libc", @@ -5698,7 +5698,7 @@ dependencies = [ "dashmap", "futures 0.3.31", "futures-util", - "indexmap 2.9.0", + "indexmap 2.10.0", "indicatif", "log", "quinn", @@ -5873,7 +5873,7 @@ dependencies = [ "bincode", "crossbeam-channel", "futures-util", - "indexmap 2.9.0", + "indexmap 2.10.0", "log", "rand 0.8.5", "rayon", @@ -6387,7 +6387,7 @@ dependencies = [ "clap", "crossbeam-channel", "flate2", - "indexmap 2.9.0", + "indexmap 2.10.0", "itertools 0.12.1", "log", "lru", @@ -8106,7 +8106,7 @@ dependencies = [ "futures-util", "governor", "histogram", - "indexmap 2.9.0", + "indexmap 2.10.0", "itertools 0.12.1", "libc", "log", @@ -8472,7 +8472,7 @@ dependencies = [ "async-trait", "bincode", "futures-util", - "indexmap 2.9.0", + "indexmap 2.10.0", "indicatif", "log", "rayon", @@ -9983,7 +9983,7 @@ version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ - "indexmap 2.9.0", + "indexmap 2.10.0", "toml_datetime", "winnow", ] From d676f0e110d1ab141bd260eec18583ac83e7b11f Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Fri, 27 Jun 2025 23:34:54 +0800 Subject: [PATCH 106/124] ci: improve step descriptions (#6664) * get steps more explicit * Update ci/stable/run-local-cluster-partially.sh Co-authored-by: Will Hickey * Update ci/stable/run-partition.sh Co-authored-by: Will Hickey --------- Co-authored-by: Will Hickey --- .buildkite/scripts/build-stable.sh | 34 +++++++++++++++--------- ci/stable/run-local-cluster-partially.sh | 19 ++++++++----- ci/stable/run-partition.sh | 26 +++++++++--------- 3 files changed, 45 insertions(+), 34 deletions(-) diff --git a/.buildkite/scripts/build-stable.sh b/.buildkite/scripts/build-stable.sh index c6d1e4442be42d..5c9e66647fd025 100755 --- a/.buildkite/scripts/build-stable.sh +++ b/.buildkite/scripts/build-stable.sh @@ -8,31 +8,37 @@ source "$here"/common.sh agent="${1-solana}" -partitions=$( - cat < Date: Fri, 27 Jun 2025 08:54:52 -0700 Subject: [PATCH 107/124] Removing info from calc_accounts_to_combine loop (#6710) --- accounts-db/src/ancient_append_vecs.rs | 53 +++++++++++++++----------- 1 file changed, 30 insertions(+), 23 deletions(-) diff --git a/accounts-db/src/ancient_append_vecs.rs b/accounts-db/src/ancient_append_vecs.rs index 506de947025ccc..5b1ec697156bff 100644 --- a/accounts-db/src/ancient_append_vecs.rs +++ b/accounts-db/src/ancient_append_vecs.rs @@ -759,11 +759,6 @@ impl AccountsDb { tuning: &PackedAncientStorageTuning, mut many_ref_slots: IncludeManyRefSlots, ) -> AccountsToCombine<'a> { - let mut alive_bytes = accounts_per_storage - .iter() - .map(|a| a.0.alive_bytes) - .sum::(); - // reverse sort by slot # accounts_per_storage.sort_unstable_by(|a, b| b.0.slot.cmp(&a.0.slot)); let mut accounts_keep_slots = HashMap::default(); @@ -788,25 +783,26 @@ impl AccountsDb { }) .collect::>(); + let mut alive_bytes = accounts_to_combine + .iter() + .map(|a| a.alive_total_bytes) + .sum::(); + let mut many_refs_old_alive_count = 0; let mut remove = Vec::default(); let mut last_slot = None; - for (i, (shrink_collect, (info, _unique_accounts))) in accounts_to_combine - .iter_mut() - .zip(accounts_per_storage.iter()) - .enumerate() - { + for (i, shrink_collect) in accounts_to_combine.iter_mut().enumerate() { // If 0 < alive_bytes < `ideal_storage_size`, then `min_resulting_packed_slots` = 0. // We obviously require 1 packed slot if we have at least 1 alive byte. // We want ceiling, so we add 1. let min_resulting_packed_slots = - alive_bytes.saturating_sub(1) / u64::from(tuning.ideal_storage_size) + 1; + alive_bytes.saturating_sub(1) as u64 / u64::from(tuning.ideal_storage_size) + 1; // assert that iteration is in descending slot order since the code below relies on this. if let Some(last_slot) = last_slot { - assert!(last_slot > info.slot); + assert!(last_slot > shrink_collect.slot); } - last_slot = Some(info.slot); + last_slot = Some(shrink_collect.slot); let many_refs_old_alive = &mut shrink_collect.alive_accounts.many_refs_old_alive; if many_ref_slots == IncludeManyRefSlots::Skip @@ -836,7 +832,7 @@ impl AccountsDb { .many_ref_slots_skipped .fetch_add(1, Ordering::Relaxed); // since we're skipping this one, we don't count it as required target storages - alive_bytes = alive_bytes.saturating_sub(info.alive_bytes); + alive_bytes = alive_bytes.saturating_sub(shrink_collect.alive_total_bytes); remove.push(i); continue; } @@ -872,11 +868,12 @@ impl AccountsDb { remove.push(i); continue; } - accounts_keep_slots.insert(info.slot, std::mem::take(many_refs_old_alive)); + accounts_keep_slots + .insert(shrink_collect.slot, std::mem::take(many_refs_old_alive)); } else { // No alive accounts in this slot have a ref_count > 1. So, ALL alive accounts in this slot can be written to any other slot // we find convenient. There is NO other instance of any account to conflict with. - target_slots_sorted.push(info.slot); + target_slots_sorted.push(shrink_collect.slot); } } let unpackable_slots_count = remove.len(); @@ -1612,7 +1609,8 @@ pub mod tests { // all accounts have 1 ref or all accounts have 2 refs solana_logger::setup(); - let alive_bytes_per_slot = 2; + let data_size = 48; + let alive_bytes_per_slot = aligned_stored_size(data_size as usize) as u64; // pack 2.5 ancient slots into 1 packed slot ideally let tuning = PackedAncientStorageTuning { @@ -1624,7 +1622,7 @@ pub mod tests { for unsorted_slots in [false, true] { for two_refs in [false, true] { let (db, mut storages, _slots, mut infos) = - get_sample_storages(num_slots, None); + get_sample_storages(num_slots, Some(data_size)); infos.iter_mut().for_each(|a| { a.alive_bytes += alive_bytes_per_slot; @@ -1708,6 +1706,15 @@ pub mod tests { // n storages // 1 account each // all accounts have 1 ref or all accounts have 2 refs + let data_size = 48; + let alive_bytes_per_account = aligned_stored_size(data_size as usize) as u64; + + // pack 1 account into a slot ideally + let tuning = PackedAncientStorageTuning { + ideal_storage_size: NonZeroU64::new(alive_bytes_per_account).unwrap(), + ..default_tuning() + }; + for many_ref_slots in [IncludeManyRefSlots::Skip, IncludeManyRefSlots::Include] { for add_dead_account in [true, false] { for method in TestWriteMultipleRefs::iter() { @@ -1715,10 +1722,10 @@ pub mod tests { for unsorted_slots in [false, true] { for two_refs in [false, true] { let (db, mut storages, slots, mut infos) = - get_sample_storages(num_slots, None); - infos.iter_mut().for_each(|a| { - a.alive_bytes += 1; - }); + get_sample_storages(num_slots, Some(data_size)); + infos + .iter_mut() + .for_each(|a| a.alive_bytes += alive_bytes_per_account); let slots_vec; if unsorted_slots { @@ -1776,7 +1783,7 @@ pub mod tests { let accounts_to_combine = db.calc_accounts_to_combine( &mut accounts_per_storage, - &default_tuning(), + &tuning, many_ref_slots, ); // if we are only trying to pack a single slot of multi-refs, it will succeed From b948b97d2a08850f56146074c0be9727202ceeff Mon Sep 17 00:00:00 2001 From: MozirDmitriy Date: Fri, 27 Jun 2025 19:14:32 +0300 Subject: [PATCH 108/124] Update Solana staking economics link to current official URL (#6764) --- docs/src/operations/guides/vote-accounts.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/src/operations/guides/vote-accounts.md b/docs/src/operations/guides/vote-accounts.md index b90ad74c496efd..636a3806bfaaae 100644 --- a/docs/src/operations/guides/vote-accounts.md +++ b/docs/src/operations/guides/vote-accounts.md @@ -40,7 +40,7 @@ The address of a vote account is never needed to sign any transactions, but is just used to look up the account information. When someone wants to -[delegate tokens in a stake account](https://solana.com/docs/economics/staking), +[delegate tokens in a stake account](https://solana.com/staking), the delegation command is pointed at the vote account address of the validator to whom the token-holder wants to delegate. From 517971f427aac9d48648452f2e4b5ba4e84b7491 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Sat, 28 Jun 2025 19:31:46 +0200 Subject: [PATCH 109/124] Fix - Direct mapping CPI caller privilege escalation after ownership transfer (#6709) * Demonstrate the issue by giving a no-op callee a readonly instruction account instead of a writable one. * Fixes the issue by updating the MemoryRegion of the caller if it changed the owner at the CPI call edge. * Feature gates the change. --- programs/bpf_loader/src/syscalls/cpi.rs | 13 ++++++++----- programs/sbf/rust/invoke/src/lib.rs | 2 +- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/programs/bpf_loader/src/syscalls/cpi.rs b/programs/bpf_loader/src/syscalls/cpi.rs index e640760da79896..e54a8b6a8849dd 100644 --- a/programs/bpf_loader/src/syscalls/cpi.rs +++ b/programs/bpf_loader/src/syscalls/cpi.rs @@ -881,11 +881,12 @@ where direct_mapping, )?; - let caller_account = if instruction_account.is_writable || update_caller { - Some(caller_account) - } else { - None - }; + let caller_account = + if instruction_account.is_writable || (direct_mapping && update_caller) { + Some(caller_account) + } else { + None + }; accounts.push((instruction_account.index_in_caller, caller_account)); } else { ic_msg!( @@ -1184,6 +1185,8 @@ fn update_callee_account( // Change the owner at the end so that we are allowed to change the lamports and data before if callee_account.get_owner() != caller_account.owner { callee_account.set_owner(caller_account.owner.as_ref())?; + // caller gave ownership and thus write access away, so caller must be updated + must_update_caller = true; } Ok(must_update_caller) diff --git a/programs/sbf/rust/invoke/src/lib.rs b/programs/sbf/rust/invoke/src/lib.rs index 3701a3c523d00a..b75cce77aabe66 100644 --- a/programs/sbf/rust/invoke/src/lib.rs +++ b/programs/sbf/rust/invoke/src/lib.rs @@ -742,7 +742,7 @@ fn process_instruction<'a>( &create_instruction( *invoked_program_id, &[ - (accounts[ARGUMENT_INDEX].key, true, false), + (accounts[ARGUMENT_INDEX].key, false, false), (invoked_program_id, false, false), ], vec![RETURN_OK], From 528d984671c18e870ee94461a400751c8feddfbd Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Mon, 30 Jun 2025 10:37:09 +0800 Subject: [PATCH 110/124] chore: use local solana-zk-sdk (#6767) use local solana-zk-sdk --- programs/sbf/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/programs/sbf/Cargo.toml b/programs/sbf/Cargo.toml index 17f3bdfbe5b10f..0e5dc9a8d362f5 100644 --- a/programs/sbf/Cargo.toml +++ b/programs/sbf/Cargo.toml @@ -168,7 +168,7 @@ solana-transaction-status = { path = "../../transaction-status", version = "=3.0 solana-type-overrides = { path = "../../type-overrides", version = "=3.0.0" } solana-vote = { path = "../../vote", version = "=3.0.0" } solana-vote-program = { path = "../../programs/vote", version = "=3.0.0" } -solana-zk-sdk = "=2.2.1" +solana-zk-sdk = { path = "../../zk-sdk", version = "=3.0.0" } thiserror = "1.0" [features] From 0f647dd4a6bff9eff2ac6da15c0a286adaf03d6e Mon Sep 17 00:00:00 2001 From: puhtaytow <18026645+puhtaytow@users.noreply.github.com> Date: Mon, 30 Jun 2025 22:35:46 +0200 Subject: [PATCH 111/124] turbine, bench: move remaining benches to merkle variant (#6747) * move get_retransmit_peers_deterministic to merkle variant * drop unused imports * move imports to correct place * use hash default instead of unique / not needed * remove num_simulated_shreds entirely * one iter less * the possible arithmetics side effects is a lie / clippy override --- turbine/benches/cluster_nodes.rs | 44 +++++++++++++++++--------------- 1 file changed, 24 insertions(+), 20 deletions(-) diff --git a/turbine/benches/cluster_nodes.rs b/turbine/benches/cluster_nodes.rs index 385e6e7a558944..c576cf96df4ce4 100644 --- a/turbine/benches/cluster_nodes.rs +++ b/turbine/benches/cluster_nodes.rs @@ -7,7 +7,9 @@ use { solana_clock::Slot, solana_cluster_type::ClusterType, solana_gossip::contact_info::ContactInfo, - solana_ledger::shred::{Shred, ShredFlags}, + solana_hash::Hash, + solana_keypair::Keypair, + solana_ledger::shred::{ProcessShredsStats, ReedSolomonCache, Shredder}, solana_pubkey::Pubkey, solana_streamer::socket::SocketAddrSpace, solana_turbine::{ @@ -17,8 +19,6 @@ use { test::Bencher, }; -const NUM_SIMULATED_SHREDS: usize = 4; - fn make_cluster_nodes( rng: &mut R, unstaked_ratio: Option<(u32, u32)>, @@ -29,25 +29,31 @@ fn make_cluster_nodes( (nodes, cluster_nodes) } +#[allow(clippy::arithmetic_side_effects)] fn get_retransmit_peers_deterministic( cluster_nodes: &ClusterNodes, slot: Slot, slot_leader: &Pubkey, - num_simulated_shreds: usize, ) { - let parent_offset = u16::from(slot != 0); - for i in 0..num_simulated_shreds { - let index = i as u32; - let shred = Shred::new_from_data( - slot, - index, - parent_offset, - &[], - ShredFlags::empty(), - 0, - 0, - 0, - ); + let keypair = Keypair::new(); + let merkle_root = Some(Hash::default()); + let reed_solomon_cache = ReedSolomonCache::default(); + let mut stats = ProcessShredsStats::default(); + let parent_slot = if slot > 0 { slot - 1 } else { 0 }; + let shredder = Shredder::new(slot, parent_slot, 0, 0).unwrap(); + + let shreds = shredder.make_merkle_shreds_from_entries( + &keypair, + &[], // entries + true, // is_last_in_slot + merkle_root, + 0, // next_shred_index + 0, // next_code_index + &reed_solomon_cache, + &mut stats, + ); + + for shred in shreds { let _retransmit_peers = cluster_nodes.get_retransmit_addrs( slot_leader, &shred.id(), @@ -62,9 +68,7 @@ fn get_retransmit_peers_deterministic_wrapper(b: &mut Bencher, unstaked_ratio: O let (nodes, cluster_nodes) = make_cluster_nodes(&mut rng, unstaked_ratio); let slot_leader = *nodes[1..].choose(&mut rng).unwrap().pubkey(); let slot = rand::random::(); - b.iter(|| { - get_retransmit_peers_deterministic(&cluster_nodes, slot, &slot_leader, NUM_SIMULATED_SHREDS) - }); + b.iter(|| get_retransmit_peers_deterministic(&cluster_nodes, slot, &slot_leader)); } #[bench] From 34efa812feca4ecfe01dd3ec266de07bf224b802 Mon Sep 17 00:00:00 2001 From: Ashwin Sekar Date: Mon, 30 Jun 2025 17:34:25 -0400 Subject: [PATCH 112/124] ff cleanup: vote_only_full_fec_sets (#6741) --- ledger/src/blockstore.rs | 49 ++++------------------------------------ 1 file changed, 4 insertions(+), 45 deletions(-) diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index f8ae798b090945..69023321c30a30 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -203,9 +203,7 @@ impl LastFECSetCheckResults { &self, feature_set: &FeatureSet, ) -> std::result::Result, BlockstoreProcessorError> { - if feature_set.is_active(&agave_feature_set::vote_only_full_fec_sets::id()) - && self.last_fec_set_merkle_root.is_none() - { + if self.last_fec_set_merkle_root.is_none() { return Err(BlockstoreProcessorError::IncompleteFinalFecSet); } else if feature_set .is_active(&agave_feature_set::vote_only_retransmitter_signed_fec_sets::id()) @@ -3787,10 +3785,7 @@ impl Blockstore { "Unable to check the last fec set for slot {slot} {bank_hash}, \ marking as dead: {results:?}", ); - if feature_set.is_active(&agave_feature_set::vote_only_full_fec_sets::id()) { - return Err(BlockstoreProcessorError::IncompleteFinalFecSet); - } - return Ok(None); + return Err(BlockstoreProcessorError::IncompleteFinalFecSet); }; // Update metrics if results.last_fec_set_merkle_root.is_none() { @@ -5347,7 +5342,6 @@ pub mod tests { leader_schedule::{FixedSchedule, IdentityKeyedLeaderSchedule}, shred::{max_ticks_per_n_shreds, ShredFlags, LEGACY_SHRED_DATA_CAPACITY}, }, - agave_feature_set::{vote_only_full_fec_sets, vote_only_retransmitter_signed_fec_sets}, assert_matches::assert_matches, bincode::{serialize, Options}, crossbeam_channel::unbounded, @@ -11859,11 +11853,7 @@ pub mod tests { #[test] fn test_last_fec_set_check_results() { let enabled_feature_set = FeatureSet::all_enabled(); - let disabled_feature_set = FeatureSet::default(); - let mut full_only = FeatureSet::default(); - full_only.activate(&vote_only_full_fec_sets::id(), 0); - let mut retransmitter_only = FeatureSet::default(); - retransmitter_only.activate(&vote_only_retransmitter_signed_fec_sets::id(), 0); + let full_only = FeatureSet::default(); let results = LastFECSetCheckResults { last_fec_set_merkle_root: None, @@ -11877,14 +11867,6 @@ pub mod tests { results.get_last_fec_set_merkle_root(&full_only), Err(BlockstoreProcessorError::IncompleteFinalFecSet) ); - assert_matches!( - results.get_last_fec_set_merkle_root(&retransmitter_only), - Err(BlockstoreProcessorError::InvalidRetransmitterSignatureFinalFecSet) - ); - assert!(results - .get_last_fec_set_merkle_root(&disabled_feature_set) - .unwrap() - .is_none()); let block_id = Hash::new_unique(); let results = LastFECSetCheckResults { @@ -11899,16 +11881,6 @@ pub mod tests { results.get_last_fec_set_merkle_root(&full_only).unwrap(), Some(block_id) ); - assert_matches!( - results.get_last_fec_set_merkle_root(&retransmitter_only), - Err(BlockstoreProcessorError::InvalidRetransmitterSignatureFinalFecSet) - ); - assert_eq!( - results - .get_last_fec_set_merkle_root(&disabled_feature_set) - .unwrap(), - Some(block_id) - ); let results = LastFECSetCheckResults { last_fec_set_merkle_root: None, @@ -11922,26 +11894,13 @@ pub mod tests { results.get_last_fec_set_merkle_root(&full_only), Err(BlockstoreProcessorError::IncompleteFinalFecSet) ); - assert!(results - .get_last_fec_set_merkle_root(&retransmitter_only) - .unwrap() - .is_none()); - assert!(results - .get_last_fec_set_merkle_root(&disabled_feature_set) - .unwrap() - .is_none()); let block_id = Hash::new_unique(); let results = LastFECSetCheckResults { last_fec_set_merkle_root: Some(block_id), is_retransmitter_signed: true, }; - for feature_set in [ - enabled_feature_set, - disabled_feature_set, - full_only, - retransmitter_only, - ] { + for feature_set in [enabled_feature_set, full_only] { assert_eq!( results.get_last_fec_set_merkle_root(&feature_set).unwrap(), Some(block_id) From 825f16d3d5d0349b012a3a53e627d136312859f0 Mon Sep 17 00:00:00 2001 From: Jon C Date: Mon, 30 Jun 2025 19:35:36 -0400 Subject: [PATCH 113/124] accounts-db: Remove usage of RentCollector (#6781) #### Problem The RentCollector type is included in accounts-db types, but it's not actually used for anything. The only place it *is* used is to figure out the number of slots in an epoch, but it looks like that call site only ever passes in `0` anyway, from `RentCollector::default()`. #### Summary of changes Remove RentCollector from types and interfaces in accounts-db. --- accounts-bench/src/main.rs | 2 - accounts-db/benches/accounts.rs | 4 +- accounts-db/src/accounts_db.rs | 16 ++++---- .../src/accounts_db/scan_account_storage.rs | 5 +-- accounts-db/src/accounts_db/tests.rs | 40 ++++++------------- accounts-db/src/accounts_hash.rs | 6 +-- core/src/accounts_hash_verifier.rs | 10 ++++- core/tests/epoch_accounts_hash.rs | 2 +- ledger-tool/src/main.rs | 1 - runtime/src/accounts_background_service.rs | 2 +- runtime/src/bank.rs | 9 +---- runtime/src/serde_snapshot/tests.rs | 16 +++----- runtime/src/snapshot_bank_utils.rs | 2 +- 13 files changed, 45 insertions(+), 70 deletions(-) diff --git a/accounts-bench/src/main.rs b/accounts-bench/src/main.rs index 8af7b9a0b54aa8..0b2052065963b6 100644 --- a/accounts-bench/src/main.rs +++ b/accounts-bench/src/main.rs @@ -16,7 +16,6 @@ use { solana_epoch_schedule::EpochSchedule, solana_measure::measure::Measure, solana_pubkey::Pubkey, - solana_rent_collector::RentCollector, std::{env, fs, path::PathBuf, sync::Arc}, }; @@ -131,7 +130,6 @@ fn main() { &ancestors, None, &EpochSchedule::default(), - &RentCollector::default(), true, ); time_store.stop(); diff --git a/accounts-db/benches/accounts.rs b/accounts-db/benches/accounts.rs index c9151fe1903263..bbab6a5c1d88ff 100644 --- a/accounts-db/benches/accounts.rs +++ b/accounts-db/benches/accounts.rs @@ -18,9 +18,9 @@ use { accounts_index::ScanConfig, ancestors::Ancestors, }, + solana_clock::Epoch, solana_hash::Hash, solana_pubkey::Pubkey, - solana_rent_collector::RentCollector, solana_sysvar::epoch_schedule::EpochSchedule, std::{ collections::{HashMap, HashSet}, @@ -64,7 +64,7 @@ fn bench_accounts_hash_bank_hash(bencher: &mut Bencher) { ancestors: &ancestors, test_hash_calculation: false, epoch_schedule: &EpochSchedule::default(), - rent_collector: &RentCollector::default(), + epoch: Epoch::default(), ignore_mismatch: false, store_detailed_debug_info: false, use_bg_thread_pool: false, diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 7c1fb9d50408f3..bc5c7609e04976 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -91,7 +91,6 @@ use { solana_nohash_hasher::{BuildNoHashHasher, IntMap, IntSet}, solana_pubkey::Pubkey, solana_rayon_threadlimit::get_thread_count, - solana_rent_collector::RentCollector, solana_transaction::sanitized::SanitizedTransaction, std::{ borrow::Cow, @@ -185,8 +184,8 @@ pub struct VerifyAccountsHashAndLamportsConfig<'a> { pub test_hash_calculation: bool, /// epoch_schedule pub epoch_schedule: &'a EpochSchedule, - /// rent_collector - pub rent_collector: &'a RentCollector, + /// epoch + pub epoch: Epoch, /// true to ignore mismatches pub ignore_mismatch: bool, /// true to dump debug log if mismatch happens @@ -6256,7 +6255,6 @@ impl AccountsDb { ancestors, None, &EpochSchedule::default(), - &RentCollector::default(), is_startup, ) } @@ -6417,9 +6415,9 @@ impl AccountsDb { ancestors: &Ancestors, expected_capitalization: Option, epoch_schedule: &EpochSchedule, - rent_collector: &RentCollector, is_startup: bool, ) -> (AccountsHash, u64) { + let epoch = epoch_schedule.get_epoch(slot); let (accounts_hash, total_lamports) = self.calculate_accounts_hash_with_verify_from( data_source, debug_verify, @@ -6428,7 +6426,7 @@ impl AccountsDb { use_bg_thread_pool: !is_startup, ancestors: Some(ancestors), epoch_schedule, - rent_collector, + epoch, store_detailed_debug_info_on_failure: false, }, expected_capitalization, @@ -6772,7 +6770,7 @@ impl AccountsDb { use_bg_thread_pool: config.use_bg_thread_pool, ancestors: Some(config.ancestors), epoch_schedule: config.epoch_schedule, - rent_collector: config.rent_collector, + epoch: config.epoch, store_detailed_debug_info_on_failure: config.store_detailed_debug_info, }; let hash_mismatch_is_error = !config.ignore_mismatch; @@ -8788,13 +8786,13 @@ impl<'a> VerifyAccountsHashAndLamportsConfig<'a> { pub fn new_for_test( ancestors: &'a Ancestors, epoch_schedule: &'a EpochSchedule, - rent_collector: &'a RentCollector, + epoch: Epoch, ) -> Self { Self { ancestors, test_hash_calculation: true, epoch_schedule, - rent_collector, + epoch, ignore_mismatch: false, store_detailed_debug_info: false, use_bg_thread_pool: false, diff --git a/accounts-db/src/accounts_db/scan_account_storage.rs b/accounts-db/src/accounts_db/scan_account_storage.rs index 53d718e910022f..335883e058382c 100644 --- a/accounts-db/src/accounts_db/scan_account_storage.rs +++ b/accounts-db/src/accounts_db/scan_account_storage.rs @@ -224,10 +224,7 @@ impl AccountsDb { config.epoch_schedule, snapshot_storages.max_slot_inclusive(), ); - let slots_per_epoch = config - .rent_collector - .epoch_schedule - .get_slots_in_epoch(config.rent_collector.epoch); + let slots_per_epoch = config.epoch_schedule.get_slots_in_epoch(config.epoch); let one_epoch_old = snapshot_storages .range() .end diff --git a/accounts-db/src/accounts_db/tests.rs b/accounts-db/src/accounts_db/tests.rs index 594ba050d0442f..5276cf784c9da9 100644 --- a/accounts-db/src/accounts_db/tests.rs +++ b/accounts-db/src/accounts_db/tests.rs @@ -2105,8 +2105,6 @@ fn test_hash_stored_account() { pub static EPOCH_SCHEDULE: std::sync::LazyLock = std::sync::LazyLock::new(EpochSchedule::default); -pub static RENT_COLLECTOR: std::sync::LazyLock = - std::sync::LazyLock::new(RentCollector::default); impl CalcAccountsHashConfig<'_> { pub(crate) fn default() -> Self { @@ -2114,7 +2112,7 @@ impl CalcAccountsHashConfig<'_> { use_bg_thread_pool: false, ancestors: None, epoch_schedule: &EPOCH_SCHEDULE, - rent_collector: &RENT_COLLECTOR, + epoch: 0, store_detailed_debug_info_on_failure: false, } } @@ -2131,17 +2129,14 @@ fn test_verify_accounts_hash() { let account = AccountSharedData::new(1, some_data_len, &key); let ancestors = vec![(some_slot, 0)].into_iter().collect(); let epoch_schedule = EpochSchedule::default(); - let rent_collector = RentCollector::default(); + let epoch = Epoch::default(); db.store_for_tests(some_slot, &[(&key, &account)]); db.add_root_and_flush_write_cache(some_slot); let (_, capitalization) = db.update_accounts_hash_for_tests(some_slot, &ancestors, true, true); - let config = VerifyAccountsHashAndLamportsConfig::new_for_test( - &ancestors, - &epoch_schedule, - &rent_collector, - ); + let config = + VerifyAccountsHashAndLamportsConfig::new_for_test(&ancestors, &epoch_schedule, epoch); assert_matches!( db.verify_accounts_hash_and_lamports_for_tests(some_slot, 1, config.clone()), @@ -2181,12 +2176,9 @@ fn test_verify_bank_capitalization() { let account = AccountSharedData::new(1, some_data_len, &key); let ancestors = vec![(some_slot, 0)].into_iter().collect(); let epoch_schedule = EpochSchedule::default(); - let rent_collector = RentCollector::default(); - let config = VerifyAccountsHashAndLamportsConfig::new_for_test( - &ancestors, - &epoch_schedule, - &rent_collector, - ); + let epoch = Epoch::default(); + let config = + VerifyAccountsHashAndLamportsConfig::new_for_test(&ancestors, &epoch_schedule, epoch); db.store_for_tests(some_slot, &[(&key, &account)]); if pass == 0 { @@ -2235,12 +2227,9 @@ fn test_verify_accounts_hash_no_account() { db.update_accounts_hash_for_tests(some_slot, &ancestors, true, true); let epoch_schedule = EpochSchedule::default(); - let rent_collector = RentCollector::default(); - let config = VerifyAccountsHashAndLamportsConfig::new_for_test( - &ancestors, - &epoch_schedule, - &rent_collector, - ); + let epoch = Epoch::default(); + let config = + VerifyAccountsHashAndLamportsConfig::new_for_test(&ancestors, &epoch_schedule, epoch); assert_matches!( db.verify_accounts_hash_and_lamports_for_tests(some_slot, 0, config), @@ -2267,12 +2256,9 @@ fn test_verify_accounts_hash_bad_account_hash() { db.add_root_and_flush_write_cache(some_slot); let epoch_schedule = EpochSchedule::default(); - let rent_collector = RentCollector::default(); - let config = VerifyAccountsHashAndLamportsConfig::new_for_test( - &ancestors, - &epoch_schedule, - &rent_collector, - ); + let epoch = Epoch::default(); + let config = + VerifyAccountsHashAndLamportsConfig::new_for_test(&ancestors, &epoch_schedule, epoch); assert_matches!( db.verify_accounts_hash_and_lamports_for_tests(some_slot, 1, config), diff --git a/accounts-db/src/accounts_hash.rs b/accounts-db/src/accounts_hash.rs index a892c72ed8720f..b960118c372fb1 100644 --- a/accounts-db/src/accounts_hash.rs +++ b/accounts-db/src/accounts_hash.rs @@ -8,12 +8,11 @@ use { bytemuck_derive::{Pod, Zeroable}, log::*, rayon::prelude::*, - solana_clock::Slot, + solana_clock::{Epoch, Slot}, solana_hash::{Hash, HASH_BYTES}, solana_lattice_hash::lt_hash::LtHash, solana_measure::{measure::Measure, measure_us}, solana_pubkey::Pubkey, - solana_rent_collector::RentCollector, solana_sha256_hasher::Hasher, solana_sysvar::epoch_schedule::EpochSchedule, std::{ @@ -97,9 +96,10 @@ pub struct CalcAccountsHashConfig<'a> { /// does hash calc need to consider account data that exists in the write cache? /// if so, 'ancestors' will be used for this purpose as well as storages. pub epoch_schedule: &'a EpochSchedule, - pub rent_collector: &'a RentCollector, /// used for tracking down hash mismatches after the fact pub store_detailed_debug_info_on_failure: bool, + /// used to calculate the number of slots in the given epoch + pub epoch: Epoch, } // smallest, 3 quartiles, largest, average diff --git a/core/src/accounts_hash_verifier.rs b/core/src/accounts_hash_verifier.rs index a2c64d62fa17c4..e824fb74eb8605 100644 --- a/core/src/accounts_hash_verifier.rs +++ b/core/src/accounts_hash_verifier.rs @@ -335,11 +335,14 @@ impl AccountsHashVerifier { }; timings.calc_storage_size_quartiles(&accounts_package.snapshot_storages); + let epoch = accounts_package + .epoch_schedule + .get_epoch(accounts_package.slot); let calculate_accounts_hash_config = CalcAccountsHashConfig { use_bg_thread_pool: true, ancestors: None, epoch_schedule: &accounts_package.epoch_schedule, - rent_collector: &accounts_package.rent_collector, + epoch, store_detailed_debug_info_on_failure: false, }; @@ -403,11 +406,14 @@ impl AccountsHashVerifier { }); let sorted_storages = SortedStorages::new_with_slots(incremental_storages, None, None); + let epoch = accounts_package + .epoch_schedule + .get_epoch(accounts_package.slot); let calculate_accounts_hash_config = CalcAccountsHashConfig { use_bg_thread_pool: true, ancestors: None, epoch_schedule: &accounts_package.epoch_schedule, - rent_collector: &accounts_package.rent_collector, + epoch, store_detailed_debug_info_on_failure: false, }; diff --git a/core/tests/epoch_accounts_hash.rs b/core/tests/epoch_accounts_hash.rs index 9f33a1cdf81030..3afebaf9cd9f19 100755 --- a/core/tests/epoch_accounts_hash.rs +++ b/core/tests/epoch_accounts_hash.rs @@ -321,7 +321,7 @@ fn test_epoch_accounts_hash_basic(test_environment: TestEnvironment) { use_bg_thread_pool: false, ancestors: Some(&bank.ancestors), epoch_schedule: bank.epoch_schedule(), - rent_collector: bank.rent_collector(), + epoch: bank.epoch(), store_detailed_debug_info_on_failure: false, }, ); diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index f13ea3b53600cd..b2069893b48b4e 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -3118,7 +3118,6 @@ fn main() { assert_capitalization(&bank); println!("Inflation: {:?}", bank.inflation()); - println!("RentCollector: {:?}", bank.rent_collector()); println!("Capitalization: {}", Sol(bank.capitalization())); } } diff --git a/runtime/src/accounts_background_service.rs b/runtime/src/accounts_background_service.rs index 2e0b7ccdd348ca..c7c8d654e74a40 100644 --- a/runtime/src/accounts_background_service.rs +++ b/runtime/src/accounts_background_service.rs @@ -338,7 +338,7 @@ impl SnapshotRequestHandler { use_bg_thread_pool: true, ancestors: None, epoch_schedule: snapshot_root_bank.epoch_schedule(), - rent_collector: snapshot_root_bank.rent_collector(), + epoch: snapshot_root_bank.epoch(), store_detailed_debug_info_on_failure: false, }, ); diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 8ccc5073523e9b..c7dc8caa797f1c 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -5262,7 +5262,7 @@ impl Bank { let verify_config = VerifyAccountsHashAndLamportsConfig { ancestors: &self.ancestors, epoch_schedule: self.epoch_schedule(), - rent_collector: self.rent_collector(), + epoch: self.epoch(), test_hash_calculation: config.test_hash_calculation, ignore_mismatch: config.ignore_mismatch, store_detailed_debug_info: config.store_hash_raw_data_for_debug, @@ -5278,7 +5278,6 @@ impl Bank { let accounts_ = Arc::clone(&accounts); let ancestors = self.ancestors.clone(); let epoch_schedule = self.epoch_schedule().clone(); - let rent_collector = self.rent_collector().clone(); let expected_accounts_lt_hash = self.accounts_lt_hash.lock().unwrap().clone(); accounts.accounts_db.verify_accounts_hash_in_bg.start(|| { Builder::new() @@ -5328,7 +5327,6 @@ impl Bank { VerifyAccountsHashAndLamportsConfig { ancestors: &ancestors, epoch_schedule: &epoch_schedule, - rent_collector: &rent_collector, ..verify_config }, )); @@ -5533,7 +5531,6 @@ impl Bank { &self.ancestors, None, self.epoch_schedule(), - &self.rent_collector, is_startup, ) .1 @@ -5668,7 +5665,6 @@ impl Bank { &self.ancestors, Some(self.capitalization()), self.epoch_schedule(), - &self.rent_collector, is_startup, ); if total_lamports != self.capitalization() { @@ -5692,7 +5688,6 @@ impl Bank { &self.ancestors, Some(self.capitalization()), self.epoch_schedule(), - &self.rent_collector, is_startup, ); @@ -5712,7 +5707,7 @@ impl Bank { use_bg_thread_pool: true, ancestors: None, // does not matter, will not be used epoch_schedule: &self.epoch_schedule, - rent_collector: &self.rent_collector, + epoch: self.epoch, store_detailed_debug_info_on_failure: false, }; let storages = self.get_snapshot_storages(Some(base_slot)); diff --git a/runtime/src/serde_snapshot/tests.rs b/runtime/src/serde_snapshot/tests.rs index eb8e041cbe31f3..9fd5c085d88b47 100644 --- a/runtime/src/serde_snapshot/tests.rs +++ b/runtime/src/serde_snapshot/tests.rs @@ -25,12 +25,11 @@ mod serde_snapshot_tests { accounts_hash::AccountsHash, ancestors::Ancestors, }, - solana_clock::Slot, + solana_clock::{Epoch, Slot}, solana_epoch_schedule::EpochSchedule, solana_hash::Hash, solana_nohash_hasher::BuildNoHashHasher, solana_pubkey::Pubkey, - solana_rent_collector::RentCollector, std::{ fs::File, io::{self, BufReader, Cursor, Read, Write}, @@ -524,12 +523,9 @@ mod serde_snapshot_tests { let ancestors = Ancestors::default(); let epoch_schedule = EpochSchedule::default(); - let rent_collector = RentCollector::default(); - let config = VerifyAccountsHashAndLamportsConfig::new_for_test( - &ancestors, - &epoch_schedule, - &rent_collector, - ); + let epoch = Epoch::default(); + let config = + VerifyAccountsHashAndLamportsConfig::new_for_test(&ancestors, &epoch_schedule, epoch); accounts .verify_accounts_hash_and_lamports_for_tests(4, 1222, config) @@ -819,11 +815,11 @@ mod serde_snapshot_tests { let no_ancestors = Ancestors::default(); let epoch_schedule = EpochSchedule::default(); - let rent_collector = RentCollector::default(); + let epoch = Epoch::default(); let config = VerifyAccountsHashAndLamportsConfig::new_for_test( &no_ancestors, &epoch_schedule, - &rent_collector, + epoch, ); accounts.update_accounts_hash_for_tests(current_slot, &no_ancestors, false, false); diff --git a/runtime/src/snapshot_bank_utils.rs b/runtime/src/snapshot_bank_utils.rs index 6f4c282acfd248..5758890ea42722 100644 --- a/runtime/src/snapshot_bank_utils.rs +++ b/runtime/src/snapshot_bank_utils.rs @@ -2191,7 +2191,7 @@ mod tests { use_bg_thread_pool: false, ancestors: None, epoch_schedule: deserialized_bank.epoch_schedule(), - rent_collector: deserialized_bank.rent_collector(), + epoch: deserialized_bank.epoch(), store_detailed_debug_info_on_failure: false, }, &SortedStorages::new(&other_incremental_snapshot_storages), From 6d6d5147980183d1ed1dc20b6b938dc287cc9b88 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Jul 2025 19:51:20 +0800 Subject: [PATCH 114/124] build(deps): bump reqwest from 0.12.20 to 0.12.21 (#6785) * build(deps): bump reqwest from 0.12.20 to 0.12.21 Bumps [reqwest](https://github.com/seanmonstar/reqwest) from 0.12.20 to 0.12.21. - [Release notes](https://github.com/seanmonstar/reqwest/releases) - [Changelog](https://github.com/seanmonstar/reqwest/blob/master/CHANGELOG.md) - [Commits](https://github.com/seanmonstar/reqwest/commits) --- updated-dependencies: - dependency-name: reqwest dependency-version: 0.12.21 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 22 +++++++++++----------- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 12 ++++++------ svm/examples/Cargo.lock | 12 ++++++------ 4 files changed, 24 insertions(+), 24 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9a85c066cc6459..75393ab3e132be 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -173,7 +173,7 @@ dependencies = [ "dirs-next", "indicatif", "nix", - "reqwest 0.12.20", + "reqwest 0.12.21", "scopeguard", "semver 1.0.26", "serde", @@ -5757,9 +5757,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.20" +version = "0.12.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eabf4c97d9130e2bf606614eb937e86edac8292eaa6f422f995d7e8de1eb1813" +checksum = "4c8cea6b35bcceb099f30173754403d2eba0a5dc18cea3630fccd88251909288" dependencies = [ "async-compression", "base64 0.22.1", @@ -5806,7 +5806,7 @@ dependencies = [ "anyhow", "async-trait", "http 1.1.0", - "reqwest 0.12.20", + "reqwest 0.12.21", "serde", "thiserror 1.0.69", "tower-service", @@ -7280,7 +7280,7 @@ dependencies = [ "log", "predicates", "regex", - "reqwest 0.12.20", + "reqwest 0.12.21", "semver 1.0.26", "serial_test", "solana-file-download", @@ -7382,7 +7382,7 @@ dependencies = [ "log", "num-traits", "pretty-hex", - "reqwest 0.12.20", + "reqwest 0.12.21", "semver 1.0.26", "serde", "serde_derive", @@ -9077,7 +9077,7 @@ dependencies = [ "gethostname", "log", "rand 0.8.5", - "reqwest 0.12.20", + "reqwest 0.12.21", "serial_test", "solana-cluster-type", "solana-sha256-hasher", @@ -9171,7 +9171,7 @@ name = "solana-notifier" version = "3.0.0" dependencies = [ "log", - "reqwest 0.12.20", + "reqwest 0.12.21", "serde_json", "solana-hash", ] @@ -9897,7 +9897,7 @@ dependencies = [ "jsonrpc-core", "jsonrpc-http-server", "log", - "reqwest 0.12.20", + "reqwest 0.12.21", "reqwest-middleware", "semver 1.0.26", "serde", @@ -9935,7 +9935,7 @@ version = "3.0.0" dependencies = [ "anyhow", "jsonrpc-core", - "reqwest 0.12.20", + "reqwest 0.12.21", "reqwest-middleware", "serde", "serde_derive", @@ -10012,7 +10012,7 @@ dependencies = [ "crossbeam-channel", "futures-util", "log", - "reqwest 0.12.20", + "reqwest 0.12.21", "serde", "serde_json", "solana-account-decoder", diff --git a/Cargo.toml b/Cargo.toml index 7a9b8edbaeb335..6c24d5265932d1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -345,7 +345,7 @@ rand_chacha0-2 = { package = "rand_chacha", version = "0.2.2" } rayon = "1.10.0" reed-solomon-erasure = "6.0.0" regex = "1.11.1" -reqwest = { version = "0.12.20", default-features = false } +reqwest = { version = "0.12.21", default-features = false } reqwest-middleware = "0.4.2" rolling-file = "0.2.0" rpassword = "7.4" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 1863642e6aa8dc..bbfbb4f4f5ef89 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4675,9 +4675,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.20" +version = "0.12.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eabf4c97d9130e2bf606614eb937e86edac8292eaa6f422f995d7e8de1eb1813" +checksum = "4c8cea6b35bcceb099f30173754403d2eba0a5dc18cea3630fccd88251909288" dependencies = [ "async-compression", "base64 0.22.1", @@ -4724,7 +4724,7 @@ dependencies = [ "anyhow", "async-trait", "http 1.2.0", - "reqwest 0.12.20", + "reqwest 0.12.21", "serde", "thiserror 1.0.69", "tower-service", @@ -6994,7 +6994,7 @@ dependencies = [ "crossbeam-channel", "gethostname", "log", - "reqwest 0.12.20", + "reqwest 0.12.21", "solana-cluster-type", "solana-sha256-hasher", "solana-time-utils", @@ -7677,7 +7677,7 @@ dependencies = [ "futures 0.3.31", "indicatif", "log", - "reqwest 0.12.20", + "reqwest 0.12.21", "reqwest-middleware", "semver", "serde", @@ -7710,7 +7710,7 @@ version = "3.0.0" dependencies = [ "anyhow", "jsonrpc-core", - "reqwest 0.12.20", + "reqwest 0.12.21", "reqwest-middleware", "serde", "serde_derive", diff --git a/svm/examples/Cargo.lock b/svm/examples/Cargo.lock index 1ea8b24c38657a..322f4216a63527 100644 --- a/svm/examples/Cargo.lock +++ b/svm/examples/Cargo.lock @@ -4523,9 +4523,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.20" +version = "0.12.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eabf4c97d9130e2bf606614eb937e86edac8292eaa6f422f995d7e8de1eb1813" +checksum = "4c8cea6b35bcceb099f30173754403d2eba0a5dc18cea3630fccd88251909288" dependencies = [ "async-compression", "base64 0.22.1", @@ -4572,7 +4572,7 @@ dependencies = [ "anyhow", "async-trait", "http 1.2.0", - "reqwest 0.12.20", + "reqwest 0.12.21", "serde", "thiserror 1.0.69", "tower-service", @@ -6806,7 +6806,7 @@ dependencies = [ "crossbeam-channel", "gethostname", "log", - "reqwest 0.12.20", + "reqwest 0.12.21", "solana-cluster-type", "solana-sha256-hasher", "solana-time-utils", @@ -7489,7 +7489,7 @@ dependencies = [ "futures 0.3.31", "indicatif", "log", - "reqwest 0.12.20", + "reqwest 0.12.21", "reqwest-middleware", "semver", "serde", @@ -7522,7 +7522,7 @@ version = "3.0.0" dependencies = [ "anyhow", "jsonrpc-core", - "reqwest 0.12.20", + "reqwest 0.12.21", "reqwest-middleware", "serde", "serde_derive", From 6ecde287f225374b06f035e047db8442cbbbb2db Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Jul 2025 19:52:07 +0800 Subject: [PATCH 115/124] build(deps): bump indicatif from 0.17.11 to 0.17.12 (#6786) * build(deps): bump indicatif from 0.17.11 to 0.17.12 Bumps [indicatif](https://github.com/console-rs/indicatif) from 0.17.11 to 0.17.12. - [Release notes](https://github.com/console-rs/indicatif/releases) - [Commits](https://github.com/console-rs/indicatif/compare/0.17.11...0.17.12) --- updated-dependencies: - dependency-name: indicatif dependency-version: 0.17.12 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 124 ++++++++++++++++++++++++++++++++++------ Cargo.toml | 2 +- programs/sbf/Cargo.lock | 118 ++++++++++++++++++++++++++++++++------ svm/examples/Cargo.lock | 114 +++++++++++++++++++++++++++++++----- 4 files changed, 308 insertions(+), 50 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 75393ab3e132be..4ea79bc45b55e0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -167,7 +167,7 @@ dependencies = [ "bzip2", "chrono", "clap 2.33.3", - "console", + "console 0.15.11", "crossbeam-channel", "ctrlc", "dirs-next", @@ -408,7 +408,7 @@ dependencies = [ "assert_cmd", "chrono", "clap 2.33.3", - "console", + "console 0.15.11", "core_affinity", "crossbeam-channel", "fd-lock", @@ -1880,6 +1880,19 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "console" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e09ced7ebbccb63b4c65413d821f2e00ce54c5ca4514ddc6b3c892fdbcbc69d" +dependencies = [ + "encode_unicode", + "libc", + "once_cell", + "unicode-width 0.2.0", + "windows-sys 0.60.2", +] + [[package]] name = "console_error_panic_hook" version = "0.1.7" @@ -2366,7 +2379,7 @@ version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59c6f2989294b9a498d3ad5491a79c6deb604617378e1cdc4bfc1c1361fe2f87" dependencies = [ - "console", + "console 0.15.11", "shell-words", "tempfile", "zeroize", @@ -3787,14 +3800,14 @@ dependencies = [ [[package]] name = "indicatif" -version = "0.17.11" +version = "0.17.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "183b3088984b400f4cfac3620d5e076c84da5364016b4f49473de574b2586235" +checksum = "4adb2ee6ad319a912210a36e56e3623555817bcc877a7e6e8802d1d69c4d8056" dependencies = [ - "console", - "number_prefix", + "console 0.16.0", "portable-atomic", "unicode-width 0.2.0", + "unit-prefix", "web-time", ] @@ -4666,12 +4679,6 @@ dependencies = [ "libc", ] -[[package]] -name = "number_prefix" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" - [[package]] name = "object" version = "0.31.1" @@ -7372,7 +7379,7 @@ dependencies = [ "bincode", "bs58", "clap 2.33.3", - "console", + "console 0.15.11", "const_format", "criterion-stats", "crossbeam-channel", @@ -7483,7 +7490,7 @@ dependencies = [ "base64 0.22.1", "chrono", "clap 2.33.3", - "console", + "console 0.15.11", "ed25519-dalek", "humantime", "indicatif", @@ -8356,7 +8363,7 @@ version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05a9744774fdbd7ae8575e5bd6d5df6946f321fb9b6019410b300a515369a37d" dependencies = [ - "console", + "console 0.15.11", "indicatif", "log", "reqwest 0.11.27", @@ -9714,7 +9721,7 @@ name = "solana-remote-wallet" version = "3.0.0" dependencies = [ "assert_matches", - "console", + "console 0.15.11", "dialoguer", "hidapi", "log", @@ -11084,7 +11091,7 @@ dependencies = [ "bincode", "chrono", "clap 2.33.3", - "console", + "console 0.15.11", "csv", "ctrlc", "indexmap 2.10.0", @@ -13302,6 +13309,12 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" +[[package]] +name = "unit-prefix" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "323402cff2dd658f39ca17c789b502021b3f18707c91cdf22e3838e1b4023817" + [[package]] name = "universal-hash" version = "0.5.1" @@ -13755,6 +13768,15 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets 0.53.2", +] + [[package]] name = "windows-targets" version = "0.42.2" @@ -13794,13 +13816,29 @@ dependencies = [ "windows_aarch64_gnullvm 0.52.6", "windows_aarch64_msvc 0.52.6", "windows_i686_gnu 0.52.6", - "windows_i686_gnullvm", + "windows_i686_gnullvm 0.52.6", "windows_i686_msvc 0.52.6", "windows_x86_64_gnu 0.52.6", "windows_x86_64_gnullvm 0.52.6", "windows_x86_64_msvc 0.52.6", ] +[[package]] +name = "windows-targets" +version = "0.53.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c66f69fcc9ce11da9966ddb31a40968cad001c5bedeb5c2b82ede4253ab48aef" +dependencies = [ + "windows_aarch64_gnullvm 0.53.0", + "windows_aarch64_msvc 0.53.0", + "windows_i686_gnu 0.53.0", + "windows_i686_gnullvm 0.53.0", + "windows_i686_msvc 0.53.0", + "windows_x86_64_gnu 0.53.0", + "windows_x86_64_gnullvm 0.53.0", + "windows_x86_64_msvc 0.53.0", +] + [[package]] name = "windows_aarch64_gnullvm" version = "0.42.2" @@ -13819,6 +13857,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" + [[package]] name = "windows_aarch64_msvc" version = "0.42.2" @@ -13837,6 +13881,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" + [[package]] name = "windows_i686_gnu" version = "0.42.2" @@ -13855,12 +13905,24 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" +[[package]] +name = "windows_i686_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" + [[package]] name = "windows_i686_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" + [[package]] name = "windows_i686_msvc" version = "0.42.2" @@ -13879,6 +13941,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" +[[package]] +name = "windows_i686_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" + [[package]] name = "windows_x86_64_gnu" version = "0.42.2" @@ -13897,6 +13965,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" + [[package]] name = "windows_x86_64_gnullvm" version = "0.42.2" @@ -13915,6 +13989,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" + [[package]] name = "windows_x86_64_msvc" version = "0.42.2" @@ -13933,6 +14013,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" +[[package]] +name = "windows_x86_64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" + [[package]] name = "winnow" version = "0.5.16" diff --git a/Cargo.toml b/Cargo.toml index 6c24d5265932d1..faf215df5015b9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -281,7 +281,7 @@ hyper = "0.14.32" hyper-proxy = "0.9.1" im = "15.1.0" indexmap = "2.10.0" -indicatif = "0.17.11" +indicatif = "0.17.12" io-uring = "0.7.8" itertools = "0.12.1" jemallocator = { package = "tikv-jemallocator", version = "0.6.0", features = [ diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index bbfbb4f4f5ef89..f30a417a03f09e 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -157,7 +157,7 @@ dependencies = [ "agave-geyser-plugin-interface", "chrono", "clap", - "console", + "console 0.15.11", "core_affinity", "crossbeam-channel", "fd-lock", @@ -1218,6 +1218,19 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "console" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e09ced7ebbccb63b4c65413d821f2e00ce54c5ca4514ddc6b3c892fdbcbc69d" +dependencies = [ + "encode_unicode", + "libc", + "once_cell", + "unicode-width 0.2.0", + "windows-sys 0.60.2", +] + [[package]] name = "console_error_panic_hook" version = "0.1.7" @@ -1591,7 +1604,7 @@ version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59c6f2989294b9a498d3ad5491a79c6deb604617378e1cdc4bfc1c1361fe2f87" dependencies = [ - "console", + "console 0.15.11", "shell-words", "tempfile", "zeroize", @@ -2882,14 +2895,14 @@ dependencies = [ [[package]] name = "indicatif" -version = "0.17.11" +version = "0.17.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "183b3088984b400f4cfac3620d5e076c84da5364016b4f49473de574b2586235" +checksum = "4adb2ee6ad319a912210a36e56e3623555817bcc877a7e6e8802d1d69c4d8056" dependencies = [ - "console", - "number_prefix", + "console 0.16.0", "portable-atomic", "unicode-width 0.2.0", + "unit-prefix", "web-time", ] @@ -3775,12 +3788,6 @@ dependencies = [ "syn 2.0.87", ] -[[package]] -name = "number_prefix" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" - [[package]] name = "object" version = "0.31.1" @@ -5809,7 +5816,7 @@ dependencies = [ "base64 0.22.1", "chrono", "clap", - "console", + "console 0.15.11", "humantime", "indicatif", "pretty-hex", @@ -6485,7 +6492,7 @@ version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05a9744774fdbd7ae8575e5bd6d5df6946f321fb9b6019410b300a515369a37d" dependencies = [ - "console", + "console 0.15.11", "indicatif", "log", "reqwest 0.11.27", @@ -7524,7 +7531,7 @@ dependencies = [ name = "solana-remote-wallet" version = "3.0.0" dependencies = [ - "console", + "console 0.15.11", "dialoguer", "hidapi", "log", @@ -11168,6 +11175,12 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" +[[package]] +name = "unit-prefix" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "323402cff2dd658f39ca17c789b502021b3f18707c91cdf22e3838e1b4023817" + [[package]] name = "universal-hash" version = "0.5.1" @@ -11571,6 +11584,15 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets 0.53.2", +] + [[package]] name = "windows-targets" version = "0.42.2" @@ -11610,13 +11632,29 @@ dependencies = [ "windows_aarch64_gnullvm 0.52.6", "windows_aarch64_msvc 0.52.6", "windows_i686_gnu 0.52.6", - "windows_i686_gnullvm", + "windows_i686_gnullvm 0.52.6", "windows_i686_msvc 0.52.6", "windows_x86_64_gnu 0.52.6", "windows_x86_64_gnullvm 0.52.6", "windows_x86_64_msvc 0.52.6", ] +[[package]] +name = "windows-targets" +version = "0.53.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c66f69fcc9ce11da9966ddb31a40968cad001c5bedeb5c2b82ede4253ab48aef" +dependencies = [ + "windows_aarch64_gnullvm 0.53.0", + "windows_aarch64_msvc 0.53.0", + "windows_i686_gnu 0.53.0", + "windows_i686_gnullvm 0.53.0", + "windows_i686_msvc 0.53.0", + "windows_x86_64_gnu 0.53.0", + "windows_x86_64_gnullvm 0.53.0", + "windows_x86_64_msvc 0.53.0", +] + [[package]] name = "windows_aarch64_gnullvm" version = "0.42.2" @@ -11635,6 +11673,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" + [[package]] name = "windows_aarch64_msvc" version = "0.42.2" @@ -11653,6 +11697,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" + [[package]] name = "windows_i686_gnu" version = "0.42.2" @@ -11671,12 +11721,24 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" +[[package]] +name = "windows_i686_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" + [[package]] name = "windows_i686_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" + [[package]] name = "windows_i686_msvc" version = "0.42.2" @@ -11695,6 +11757,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" +[[package]] +name = "windows_i686_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" + [[package]] name = "windows_x86_64_gnu" version = "0.42.2" @@ -11713,6 +11781,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" + [[package]] name = "windows_x86_64_gnullvm" version = "0.42.2" @@ -11731,6 +11805,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" + [[package]] name = "windows_x86_64_msvc" version = "0.42.2" @@ -11749,6 +11829,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" +[[package]] +name = "windows_x86_64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" + [[package]] name = "winnow" version = "0.5.25" diff --git a/svm/examples/Cargo.lock b/svm/examples/Cargo.lock index 322f4216a63527..db2cab37c86258 100644 --- a/svm/examples/Cargo.lock +++ b/svm/examples/Cargo.lock @@ -1118,6 +1118,19 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "console" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e09ced7ebbccb63b4c65413d821f2e00ce54c5ca4514ddc6b3c892fdbcbc69d" +dependencies = [ + "encode_unicode", + "libc", + "once_cell", + "unicode-width 0.2.0", + "windows-sys 0.60.2", +] + [[package]] name = "console_error_panic_hook" version = "0.1.7" @@ -1483,7 +1496,7 @@ version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59c6f2989294b9a498d3ad5491a79c6deb604617378e1cdc4bfc1c1361fe2f87" dependencies = [ - "console", + "console 0.15.11", "shell-words", "tempfile", "zeroize", @@ -2756,14 +2769,14 @@ dependencies = [ [[package]] name = "indicatif" -version = "0.17.11" +version = "0.17.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "183b3088984b400f4cfac3620d5e076c84da5364016b4f49473de574b2586235" +checksum = "4adb2ee6ad319a912210a36e56e3623555817bcc877a7e6e8802d1d69c4d8056" dependencies = [ - "console", - "number_prefix", + "console 0.16.0", "portable-atomic", "unicode-width 0.2.0", + "unit-prefix", "web-time", ] @@ -3656,12 +3669,6 @@ dependencies = [ "syn 2.0.96", ] -[[package]] -name = "number_prefix" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" - [[package]] name = "object" version = "0.36.7" @@ -5656,7 +5663,7 @@ dependencies = [ "base64 0.22.1", "chrono", "clap", - "console", + "console 0.15.11", "humantime", "indicatif", "pretty-hex", @@ -7336,7 +7343,7 @@ dependencies = [ name = "solana-remote-wallet" version = "3.0.0" dependencies = [ - "console", + "console 0.15.11", "dialoguer", "hidapi", "log", @@ -10261,6 +10268,12 @@ version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" +[[package]] +name = "unit-prefix" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "323402cff2dd658f39ca17c789b502021b3f18707c91cdf22e3838e1b4023817" + [[package]] name = "universal-hash" version = "0.5.1" @@ -10673,6 +10686,15 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets 0.53.2", +] + [[package]] name = "windows-targets" version = "0.42.2" @@ -10712,13 +10734,29 @@ dependencies = [ "windows_aarch64_gnullvm 0.52.6", "windows_aarch64_msvc 0.52.6", "windows_i686_gnu 0.52.6", - "windows_i686_gnullvm", + "windows_i686_gnullvm 0.52.6", "windows_i686_msvc 0.52.6", "windows_x86_64_gnu 0.52.6", "windows_x86_64_gnullvm 0.52.6", "windows_x86_64_msvc 0.52.6", ] +[[package]] +name = "windows-targets" +version = "0.53.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c66f69fcc9ce11da9966ddb31a40968cad001c5bedeb5c2b82ede4253ab48aef" +dependencies = [ + "windows_aarch64_gnullvm 0.53.0", + "windows_aarch64_msvc 0.53.0", + "windows_i686_gnu 0.53.0", + "windows_i686_gnullvm 0.53.0", + "windows_i686_msvc 0.53.0", + "windows_x86_64_gnu 0.53.0", + "windows_x86_64_gnullvm 0.53.0", + "windows_x86_64_msvc 0.53.0", +] + [[package]] name = "windows_aarch64_gnullvm" version = "0.42.2" @@ -10737,6 +10775,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" + [[package]] name = "windows_aarch64_msvc" version = "0.42.2" @@ -10755,6 +10799,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" + [[package]] name = "windows_i686_gnu" version = "0.42.2" @@ -10773,12 +10823,24 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" +[[package]] +name = "windows_i686_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" + [[package]] name = "windows_i686_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" + [[package]] name = "windows_i686_msvc" version = "0.42.2" @@ -10797,6 +10859,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" +[[package]] +name = "windows_i686_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" + [[package]] name = "windows_x86_64_gnu" version = "0.42.2" @@ -10815,6 +10883,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" + [[package]] name = "windows_x86_64_gnullvm" version = "0.42.2" @@ -10833,6 +10907,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" + [[package]] name = "windows_x86_64_msvc" version = "0.42.2" @@ -10851,6 +10931,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" +[[package]] +name = "windows_x86_64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" + [[package]] name = "winnow" version = "0.6.25" From a54a182196d41baefb15257c365aa624227f0ed1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Jul 2025 19:56:02 +0800 Subject: [PATCH 116/124] build(deps): bump serde_with from 3.13.0 to 3.14.0 (#6787) * build(deps): bump serde_with from 3.13.0 to 3.14.0 Bumps [serde_with](https://github.com/jonasbb/serde_with) from 3.13.0 to 3.14.0. - [Release notes](https://github.com/jonasbb/serde_with/releases) - [Commits](https://github.com/jonasbb/serde_with/compare/v3.13.0...v3.14.0) --- updated-dependencies: - dependency-name: serde_with dependency-version: 3.14.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 8 ++++---- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 8 ++++---- svm/examples/Cargo.lock | 8 ++++---- 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4ea79bc45b55e0..2304cf2ce39b57 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6277,9 +6277,9 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.13.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf65a400f8f66fb7b0552869ad70157166676db75ed8181f8104ea91cf9d0b42" +checksum = "f2c45cd61fefa9db6f254525d46e392b852e0e61d9a1fd36e5bd183450a556d5" dependencies = [ "serde", "serde_derive", @@ -6288,9 +6288,9 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.13.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81679d9ed988d5e9a5e6531dc3f2c28efbd639cbd1dfb628df08edea6004da77" +checksum = "de90945e6565ce0d9a25098082ed4ee4002e047cb59892c318d66821e14bb30f" dependencies = [ "darling", "proc-macro2", diff --git a/Cargo.toml b/Cargo.toml index faf215df5015b9..5266b1fa197c83 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -358,7 +358,7 @@ serde-big-array = "0.5.1" serde_bytes = "0.11.17" serde_derive = "1.0.219" # must match the serde version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 serde_json = "1.0.140" -serde_with = { version = "3.13.0", default-features = false } +serde_with = { version = "3.14.0", default-features = false } serde_yaml = "0.9.34" serial_test = "2.0.0" sha2 = "0.10.9" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index f30a417a03f09e..0059c2914932cf 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -5127,9 +5127,9 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.13.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf65a400f8f66fb7b0552869ad70157166676db75ed8181f8104ea91cf9d0b42" +checksum = "f2c45cd61fefa9db6f254525d46e392b852e0e61d9a1fd36e5bd183450a556d5" dependencies = [ "serde", "serde_derive", @@ -5138,9 +5138,9 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.13.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81679d9ed988d5e9a5e6531dc3f2c28efbd639cbd1dfb628df08edea6004da77" +checksum = "de90945e6565ce0d9a25098082ed4ee4002e047cb59892c318d66821e14bb30f" dependencies = [ "darling", "proc-macro2", diff --git a/svm/examples/Cargo.lock b/svm/examples/Cargo.lock index db2cab37c86258..68e3f045323767 100644 --- a/svm/examples/Cargo.lock +++ b/svm/examples/Cargo.lock @@ -4975,9 +4975,9 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.13.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf65a400f8f66fb7b0552869ad70157166676db75ed8181f8104ea91cf9d0b42" +checksum = "f2c45cd61fefa9db6f254525d46e392b852e0e61d9a1fd36e5bd183450a556d5" dependencies = [ "serde", "serde_derive", @@ -4986,9 +4986,9 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.13.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81679d9ed988d5e9a5e6531dc3f2c28efbd639cbd1dfb628df08edea6004da77" +checksum = "de90945e6565ce0d9a25098082ed4ee4002e047cb59892c318d66821e14bb30f" dependencies = [ "darling", "proc-macro2", From 9f1dd1c51b044c942f07d9a24513f5ba7dc8471e Mon Sep 17 00:00:00 2001 From: Lucas Ste <38472950+LucasSte@users.noreply.github.com> Date: Tue, 1 Jul 2025 11:53:28 -0300 Subject: [PATCH 117/124] Update SIMDs for SBPF versions. (#6784) * Update SIMDs * Fix other SIMDs * Didn't you forget your own SIMD-0179? --- feature-set/src/lib.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/feature-set/src/lib.rs b/feature-set/src/lib.rs index 6d30154e916d7a..9278a0cba0a375 100644 --- a/feature-set/src/lib.rs +++ b/feature-set/src/lib.rs @@ -1324,11 +1324,11 @@ pub static FEATURE_NAMES: LazyLock> = LazyLock::n (enable_turbine_extended_fanout_experiments::id(), "enable turbine extended fanout experiments #"), (deprecate_legacy_vote_ixs::id(), "Deprecate legacy vote instructions"), (partitioned_epoch_rewards_superfeature::id(), "SIMD-0118: replaces enable_partitioned_epoch_reward to enable partitioned rewards at epoch boundary"), - (disable_sbpf_v0_execution::id(), "SIMD-0161: Disables execution of SBPFv1 programs"), - (reenable_sbpf_v0_execution::id(), "Re-enables execution of SBPFv1 programs"), - (enable_sbpf_v1_deployment_and_execution::id(), "SIMD-0161: Enables deployment and execution of SBPFv1 programs"), - (enable_sbpf_v2_deployment_and_execution::id(), "SIMD-0161: Enables deployment and execution of SBPFv2 programs"), - (enable_sbpf_v3_deployment_and_execution::id(), "SIMD-0161: Enables deployment and execution of SBPFv3 programs"), + (disable_sbpf_v0_execution::id(), "SIMD-0161: Disables execution of SBPFv0 programs"), + (reenable_sbpf_v0_execution::id(), "Re-enables execution of SBPFv0 programs"), + (enable_sbpf_v1_deployment_and_execution::id(), "SIMD-0166: Enable deployment and execution of SBPFv1 programs"), + (enable_sbpf_v2_deployment_and_execution::id(), "SIMD-0173 and SIMD-0174: Enable deployment and execution of SBPFv2 programs"), + (enable_sbpf_v3_deployment_and_execution::id(), "SIMD-0178, SIMD-0179 and SIMD-0189: Enable deployment and execution of SBPFv3 programs"), (remove_accounts_executable_flag_checks::id(), "SIMD-0162: Remove checks of accounts is_executable flag"), (lift_cpi_caller_restriction::id(), "Lift the restriction in CPI that the caller must have the callee as an instruction account #2202"), (disable_account_loader_special_case::id(), "Disable account loader special case #3513"), From 898b76e861d1f04d1352b772599cf26088976f0e Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 1 Jul 2025 12:10:22 -0400 Subject: [PATCH 118/124] Fixes AppendVecError::SizeMismatch error message (#6792) --- accounts-db/src/append_vec.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/accounts-db/src/append_vec.rs b/accounts-db/src/append_vec.rs index cc41e07589af4e..ce3e65f1a33d2b 100644 --- a/accounts-db/src/append_vec.rs +++ b/accounts-db/src/append_vec.rs @@ -100,7 +100,7 @@ pub enum AppendVecError { #[error("offset ({0}) is larger than file size ({1})")] OffsetOutOfBounds(usize, usize), - #[error("file size ({1}) and current length ({0}) do not match for '{0}'")] + #[error("file size ({2}) and current length ({1}) do not match for '{0}'")] SizeMismatch(PathBuf, usize, u64), } From af62b255465f4632e6e6ab4e035ee1fa3acb583e Mon Sep 17 00:00:00 2001 From: HaoranYi <219428+HaoranYi@users.noreply.github.com> Date: Tue, 1 Jul 2025 13:02:30 -0500 Subject: [PATCH 119/124] Add a sanity assert for stored_size at index generation time. (#5804) * assert store alive size * align * Update accounts-db/src/accounts_db.rs Co-authored-by: Brooks --------- Co-authored-by: Brooks --- accounts-db/src/accounts_db.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index bc5c7609e04976..8c6aeb7e90ff71 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -72,7 +72,7 @@ use { read_only_accounts_cache::ReadOnlyAccountsCache, sorted_storages::SortedStorages, storable_accounts::{StorableAccounts, StorableAccountsBySlot}, - utils, + u64_align, utils, verify_accounts_hash_in_background::VerifyAccountsHashInBackground, }, crossbeam_channel::{unbounded, Receiver, Sender, TryRecvError}, @@ -7893,6 +7893,13 @@ impl AccountsDb { let mut info = storage_info.entry(store_id).or_default(); info.stored_size += stored_size_alive; info.count += generate_index_results.count; + + // sanity check that stored_size is not larger than the u64 aligned size of the accounts files. + // Note that the stored_size is aligned, so it can be larger than the size of the accounts file. + assert!(info.stored_size <= u64_align!(storage.accounts.len()), + "Stored size ({}) is larger than the size of the accounts file ({}) for store_id: {}", + info.stored_size, storage.accounts.len(), store_id + ); } // dirty_pubkeys will contain a pubkey if an item has multiple rooted entries for From cec82c970125374dcb749fe701f495beded59805 Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 1 Jul 2025 15:42:12 -0400 Subject: [PATCH 120/124] Boxes inner tonic::Status inside BigTableError::Rpc (#6624) --- storage-bigtable/src/bigtable.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/storage-bigtable/src/bigtable.rs b/storage-bigtable/src/bigtable.rs index ec1c117b44f47e..3a21b037da9a0f 100644 --- a/storage-bigtable/src/bigtable.rs +++ b/storage-bigtable/src/bigtable.rs @@ -78,7 +78,7 @@ pub enum Error { ObjectCorrupt(String), #[error("RPC: {0}")] - Rpc(tonic::Status), + Rpc(Box), #[error("Timeout")] Timeout, @@ -107,7 +107,7 @@ impl std::convert::From for Error { impl std::convert::From for Error { fn from(err: tonic::Status) -> Self { - Self::Rpc(err) + Self::Rpc(Box::new(err)) } } From 21f770aaf78c10368caf025748b1e8e3a3518fec Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 1 Jul 2025 15:45:09 -0400 Subject: [PATCH 121/124] Boxes inner SnapshotError in BankForksUtilsError::BankFromSnapshotsArchive (#6625) --- ledger/src/bank_forks_utils.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ledger/src/bank_forks_utils.rs b/ledger/src/bank_forks_utils.rs index d2e882471656a8..8df2408394e040 100644 --- a/ledger/src/bank_forks_utils.rs +++ b/ledger/src/bank_forks_utils.rs @@ -39,7 +39,7 @@ pub enum BankForksUtilsError { incremental snapshot archive: {incremental_snapshot_archive}" )] BankFromSnapshotsArchive { - source: snapshot_utils::SnapshotError, + source: Box, full_snapshot_archive: String, incremental_snapshot_archive: String, }, @@ -320,7 +320,7 @@ fn bank_forks_from_snapshot( exit, ) .map_err(|err| BankForksUtilsError::BankFromSnapshotsArchive { - source: err, + source: Box::new(err), full_snapshot_archive: full_snapshot_archive_info.path().display().to_string(), incremental_snapshot_archive: incremental_snapshot_archive_info .as_ref() From f8fe0aedc3ebcf79a0b5d10f4a4f2bc08516ea59 Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 1 Jul 2025 22:34:12 -0400 Subject: [PATCH 122/124] clippy: ptr_eq (#6798) ``` error: use `std::ptr::eq` when comparing raw pointers --> core/src/cluster_slots_service/slot_supporters.rs:118:26 | 118 | let same_epoch = Arc::as_ptr(index_map) == Arc::as_ptr(&self.pubkey_to_index_map); | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: try: `std::ptr::eq(Arc::as_ptr(index_map), Arc::as_ptr(&self.pubkey_to_index_map))` | = help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#ptr_eq = note: `-D clippy::ptr-eq` implied by `-D warnings` = help: to override `-D warnings` add `#[allow(clippy::ptr_eq)]` ``` --- core/src/cluster_slots_service/slot_supporters.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/core/src/cluster_slots_service/slot_supporters.rs b/core/src/cluster_slots_service/slot_supporters.rs index c8117236b21ff7..d64842a4f4d5a8 100644 --- a/core/src/cluster_slots_service/slot_supporters.rs +++ b/core/src/cluster_slots_service/slot_supporters.rs @@ -4,6 +4,7 @@ use { std::{ collections::HashMap, hash::RandomState, + ptr, sync::{ atomic::{AtomicU64, Ordering}, Arc, @@ -115,7 +116,10 @@ impl SlotSupporters { pub(crate) fn recycle(mut self, total_stake: Stake, index_map: &Arc) -> Self { self.total_stake = total_stake; self.total_support.store(0, Ordering::Relaxed); - let same_epoch = Arc::as_ptr(index_map) == Arc::as_ptr(&self.pubkey_to_index_map); + let same_epoch = ptr::eq( + Arc::as_ptr(index_map), + Arc::as_ptr(&self.pubkey_to_index_map), + ); if !same_epoch { let old_len = self.supporting_stakes.len(); let new_len = index_map.len(); From 50168ff35a367b15ba7664b97f323c9238bc7268 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Wed, 2 Jul 2025 10:47:00 +0800 Subject: [PATCH 123/124] chore: bump itertools from 0.10.1 to 0.12.1 (#6774) --- programs/sbf/Cargo.lock | 2 +- programs/sbf/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 0059c2914932cf..ab3b78b479f915 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -7939,7 +7939,7 @@ dependencies = [ "borsh 1.5.7", "byteorder 1.5.0", "elf", - "itertools 0.10.5", + "itertools 0.12.1", "log", "miow", "net2", diff --git a/programs/sbf/Cargo.toml b/programs/sbf/Cargo.toml index 0e5dc9a8d362f5..9847af25b61401 100644 --- a/programs/sbf/Cargo.toml +++ b/programs/sbf/Cargo.toml @@ -100,7 +100,7 @@ borsh = "1.5.1" byteorder = "1.3.2" elf = "0.0.10" getrandom = "0.2.10" -itertools = "0.10.1" +itertools = "0.12.1" libsecp256k1 = { version = "0.7.0", default-features = false } log = "0.4.11" miow = "0.3.6" From 41b0729b885d11d1489744072d83e08cbeacc343 Mon Sep 17 00:00:00 2001 From: Michal Rostecki Date: Wed, 2 Jul 2025 07:55:05 +0200 Subject: [PATCH 124/124] ledger: Make use of `#[test_matrix]` (#6772) Some tests were using `#[test_case]` and listing all the combinations explicitly. To simplify them, use `#[test_matrix]` instead. --- ledger/src/shred.rs | 18 +++++------ ledger/src/shred/merkle.rs | 39 ++++++++--------------- ledger/src/shred/wire.rs | 15 ++++----- ledger/src/shredder.rs | 58 +++++++++++++++++----------------- ledger/src/sigverify_shreds.rs | 18 +++++------ 5 files changed, 67 insertions(+), 81 deletions(-) diff --git a/ledger/src/shred.rs b/ledger/src/shred.rs index 774fefc3874bc0..85c370f418d10a 100644 --- a/ledger/src/shred.rs +++ b/ledger/src/shred.rs @@ -1053,7 +1053,7 @@ mod tests { solana_keypair::keypair_from_seed, solana_signer::Signer, std::io::{Cursor, Seek, SeekFrom, Write}, - test_case::test_case, + test_case::{test_case, test_matrix}, }; const SIZE_OF_SHRED_INDEX: usize = 4; @@ -1215,10 +1215,10 @@ mod tests { ); } - #[test_case(false, false)] - #[test_case(false, true)] - #[test_case(true, false)] - #[test_case(true, true)] + #[test_matrix( + [true, false], + [true, false] + )] fn test_should_discard_shred(chained: bool, is_last_in_slot: bool) { solana_logger::setup(); let mut rng = rand::thread_rng(); @@ -1868,10 +1868,10 @@ mod tests { assert!(flags.contains(ShredFlags::LAST_SHRED_IN_SLOT)); } - #[test_case(false, false)] - #[test_case(false, true)] - #[test_case(true, false)] - #[test_case(true, true)] + #[test_matrix( + [true, false], + [true, false] + )] fn test_is_shred_duplicate(chained: bool, is_last_in_slot: bool) { fn fill_retransmitter_signature( rng: &mut R, diff --git a/ledger/src/shred/merkle.rs b/ledger/src/shred/merkle.rs index f5fb5123ae499a..09b4274cd92815 100644 --- a/ledger/src/shred/merkle.rs +++ b/ledger/src/shred/merkle.rs @@ -1321,7 +1321,7 @@ mod test { solana_packet::PACKET_DATA_SIZE, solana_signer::Signer, std::{cmp::Ordering, collections::HashMap, iter::repeat_with}, - test_case::test_case, + test_case::{test_case, test_matrix}, }; // Total size of a data shred including headers and merkle proof. @@ -1623,22 +1623,11 @@ mod test { } } - #[test_case(0, false, false)] - #[test_case(0, false, true)] - #[test_case(0, true, false)] - #[test_case(0, true, true)] - #[test_case(15600, false, false)] - #[test_case(15600, false, true)] - #[test_case(15600, true, false)] - #[test_case(15600, true, true)] - #[test_case(31200, false, false)] - #[test_case(31200, false, true)] - #[test_case(31200, true, false)] - #[test_case(31200, true, true)] - #[test_case(46800, false, false)] - #[test_case(46800, false, true)] - #[test_case(46800, true, false)] - #[test_case(46800, true, true)] + #[test_matrix( + [0, 15600, 31200, 46800], + [true, false], + [true, false] + )] fn test_make_shreds_from_data(data_size: usize, chained: bool, is_last_in_slot: bool) { let mut rng = rand::thread_rng(); let data_size = data_size.saturating_sub(16); @@ -1654,10 +1643,10 @@ mod test { } } - #[test_case(false, false)] - #[test_case(false, true)] - #[test_case(true, false)] - #[test_case(true, true)] + #[test_matrix( + [true, false], + [true, false] + )] fn test_make_shreds_from_data_rand(chained: bool, is_last_in_slot: bool) { let mut rng = rand::thread_rng(); let reed_solomon_cache = ReedSolomonCache::default(); @@ -1674,10 +1663,10 @@ mod test { } #[ignore] - #[test_case(false, false)] - #[test_case(false, true)] - #[test_case(true, false)] - #[test_case(true, true)] + #[test_matrix( + [true, false], + [true, false] + )] fn test_make_shreds_from_data_paranoid(chained: bool, is_last_in_slot: bool) { let mut rng = rand::thread_rng(); let reed_solomon_cache = ReedSolomonCache::default(); diff --git a/ledger/src/shred/wire.rs b/ledger/src/shred/wire.rs index f3325ae5bbe31f..47eee42124a977 100644 --- a/ledger/src/shred/wire.rs +++ b/ledger/src/shred/wire.rs @@ -447,7 +447,7 @@ mod tests { rand::Rng, solana_perf::packet::PacketFlags, std::io::{Cursor, Write}, - test_case::test_case, + test_case::test_matrix, }; fn make_dummy_signature(rng: &mut R) -> Signature { @@ -486,14 +486,11 @@ mod tests { packet.meta_mut().size = usize::try_from(cursor.position()).unwrap(); } - #[test_case(false, false, false)] - #[test_case(false, false, true)] - #[test_case(false, true, false)] - #[test_case(false, true, true)] - #[test_case(true, false, false)] - #[test_case(true, false, true)] - #[test_case(true, true, false)] - #[test_case(true, true, true)] + #[test_matrix( + [true, false], + [true, false], + [true, false] + )] fn test_merkle_shred_wire_layout(repaired: bool, chained: bool, is_last_in_slot: bool) { let mut rng = rand::thread_rng(); let slot = 318_230_963 + rng.gen_range(0..318_230_963); diff --git a/ledger/src/shredder.rs b/ledger/src/shredder.rs index 0b4f18c80d26b7..f4d5e170be1c39 100644 --- a/ledger/src/shredder.rs +++ b/ledger/src/shredder.rs @@ -558,7 +558,7 @@ mod tests { solana_signer::Signer, solana_system_transaction as system_transaction, std::{collections::HashSet, convert::TryInto, iter::repeat_with, sync::Arc}, - test_case::test_case, + test_case::{test_case, test_matrix}, }; fn verify_test_code_shred(shred: &Shred, index: u32, slot: Slot, pk: &Pubkey, verify: bool) { @@ -660,18 +660,18 @@ mod tests { assert_eq!(entries, deshred_entries); } - #[test_case(false, false)] - #[test_case(false, true)] - #[test_case(true, false)] - #[test_case(true, true)] + #[test_matrix( + [true, false], + [true, false] + )] fn test_data_shredder(chained: bool, is_last_in_slot: bool) { run_test_data_shredder(0x1234_5678_9abc_def0, chained, is_last_in_slot); } - #[test_case(false, false)] - #[test_case(false, true)] - #[test_case(true, false)] - #[test_case(true, true)] + #[test_matrix( + [true, false], + [true, false] + )] fn test_deserialize_shred_payload(chained: bool, is_last_in_slot: bool) { let keypair = Arc::new(Keypair::new()); let shredder = Shredder::new( @@ -709,10 +709,10 @@ mod tests { } } - #[test_case(false, false)] - #[test_case(false, true)] - #[test_case(true, false)] - #[test_case(true, true)] + #[test_matrix( + [true, false], + [true, false] + )] fn test_shred_reference_tick(chained: bool, is_last_in_slot: bool) { let keypair = Arc::new(Keypair::new()); let slot = 1; @@ -751,10 +751,10 @@ mod tests { assert_eq!(deserialized_shred.reference_tick(), 5); } - #[test_case(false, false)] - #[test_case(false, true)] - #[test_case(true, false)] - #[test_case(true, true)] + #[test_matrix( + [true, false], + [true, false] + )] fn test_shred_reference_tick_overflow(chained: bool, is_last_in_slot: bool) { let keypair = Arc::new(Keypair::new()); let slot = 1; @@ -848,10 +848,10 @@ mod tests { } } - #[test_case(false, false)] - #[test_case(false, true)] - #[test_case(true, false)] - #[test_case(true, true)] + #[test_matrix( + [true, false], + [true, false] + )] fn test_data_and_code_shredder(chained: bool, is_last_in_slot: bool) { run_test_data_and_code_shredder(0x1234_5678_9abc_def0, chained, is_last_in_slot); } @@ -1170,10 +1170,10 @@ mod tests { } } - #[test_case(false, false)] - #[test_case(false, true)] - #[test_case(true, false)] - #[test_case(true, true)] + #[test_matrix( + [true, false], + [true, false] + )] fn test_shred_version(chained: bool, is_last_in_slot: bool) { let keypair = Arc::new(Keypair::new()); let hash = hash(Hash::default().as_ref()); @@ -1208,10 +1208,10 @@ mod tests { .any(|s| s.version() != version)); } - #[test_case(false, false)] - #[test_case(false, true)] - #[test_case(true, false)] - #[test_case(true, true)] + #[test_matrix( + [true, false], + [true, false] + )] fn test_shred_fec_set_index(chained: bool, is_last_in_slot: bool) { let keypair = Arc::new(Keypair::new()); let hash = hash(Hash::default().as_ref()); diff --git a/ledger/src/sigverify_shreds.rs b/ledger/src/sigverify_shreds.rs index d1fe3ba123b4c8..6e3bd6373c0a26 100644 --- a/ledger/src/sigverify_shreds.rs +++ b/ledger/src/sigverify_shreds.rs @@ -544,7 +544,7 @@ mod tests { solana_system_transaction as system_transaction, solana_transaction::Transaction, std::iter::{once, repeat_with}, - test_case::test_case, + test_case::test_matrix, }; fn run_test_sigverify_shred_cpu(slot: Slot) { @@ -815,10 +815,10 @@ mod tests { packets } - #[test_case(false, false)] - #[test_case(false, true)] - #[test_case(true, false)] - #[test_case(true, true)] + #[test_matrix( + [true, false], + [true, false] + )] fn test_verify_shreds_fuzz(chained: bool, is_last_in_slot: bool) { let mut rng = rand::thread_rng(); let cache = RwLock::new(LruCache::new(/*capacity:*/ 128)); @@ -867,10 +867,10 @@ mod tests { ); } - #[test_case(false, false)] - #[test_case(false, true)] - #[test_case(true, false)] - #[test_case(true, true)] + #[test_matrix( + [true, false], + [true, false] + )] fn test_sign_shreds_gpu(chained: bool, is_last_in_slot: bool) { let mut rng = rand::thread_rng(); let cache = RwLock::new(LruCache::new(/*capacity:*/ 128));