diff --git a/CHANGELOG.md b/CHANGELOG.md
index 38c46f29232578..9562034a89822d 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -15,6 +15,15 @@ Release channels have their own copy of this changelog:
## 3.0.0 - Unreleased
+### RPC
+
+#### Breaking
+* Added a `slot` property to `EpochRewardsPeriodActiveErrorData`
+* Added error data containing a `slot` property to `RpcCustomError::SlotNotEpochBoundary`
+
+#### Changes
+* The subscription server now prioritizes processing received messages before sending out responses. This ensures that new subscription requests and time-sensitive messages like `PING` opcodes take priority over notifications.
+
### Validator
#### Breaking
diff --git a/Cargo.lock b/Cargo.lock
index d84bffa971e238..bb211e9cc1dd61 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -485,6 +485,7 @@ dependencies = [
"spl-token-2022",
"symlink",
"tempfile",
+ "test-case",
"thiserror 2.0.12",
"tikv-jemallocator",
"tokio",
@@ -1494,9 +1495,9 @@ dependencies = [
[[package]]
name = "bytemuck_derive"
-version = "1.9.3"
+version = "1.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7ecc273b49b3205b83d648f0690daa588925572cc5063745bfe547fe7ec8e1a1"
+checksum = "441473f2b4b0459a68628c744bc61d23e730fb00128b841d30fa4bb3972257e4"
dependencies = [
"proc-macro2",
"quote",
@@ -2199,9 +2200,9 @@ dependencies = [
[[package]]
name = "curve25519-dalek"
-version = "4.2.0"
+version = "4.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "373b7c5dbd637569a2cca66e8d66b8c446a1e7bf064ea321d265d7b3dfe7c97e"
+checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be"
dependencies = [
"cfg-if 1.0.1",
"cpufeatures",
@@ -2770,9 +2771,9 @@ checksum = "835a3dc7d1ec9e75e2b5fb4ba75396837112d2060b03f7d43bc1897c7f7211da"
[[package]]
name = "fiat-crypto"
-version = "0.3.0"
+version = "0.2.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "64cd1e32ddd350061ae6edb1b082d7c54915b5c672c389143b9a63403a109f24"
+checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d"
[[package]]
name = "filedescriptor"
@@ -3881,9 +3882,9 @@ dependencies = [
[[package]]
name = "io-uring"
-version = "0.7.8"
+version = "0.7.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b86e202f00093dcba4275d4636b93ef9dd75d025ae560d2521b45ea28ab49013"
+checksum = "d93587f37623a1a17d94ef2bc9ada592f5465fe7732084ab7beefabe5c77c0c4"
dependencies = [
"bitflags 2.9.1",
"cfg-if 1.0.1",
@@ -6328,9 +6329,9 @@ dependencies = [
[[package]]
name = "serde_json"
-version = "1.0.140"
+version = "1.0.141"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373"
+checksum = "30b9eff21ebe718216c6ec64e1d9ac57087aad11efc64e32002bce4a0d4c03d3"
dependencies = [
"itoa",
"memchr",
@@ -6762,23 +6763,6 @@ dependencies = [
"solana-pubkey",
]
-[[package]]
-name = "solana-accounts-bench"
-version = "3.0.0"
-dependencies = [
- "clap 2.33.3",
- "log",
- "rayon",
- "solana-accounts-db",
- "solana-clock",
- "solana-epoch-schedule",
- "solana-logger",
- "solana-measure",
- "solana-pubkey",
- "solana-rent-collector",
- "solana-version",
-]
-
[[package]]
name = "solana-accounts-cluster-bench"
version = "3.0.0"
@@ -7926,6 +7910,7 @@ dependencies = [
"log",
"lru",
"min-max-heap",
+ "num_cpus",
"num_enum",
"prio-graph",
"qualifier_attr",
@@ -8112,7 +8097,7 @@ checksum = "def3cfe5279edb64fc39111cff6dcf77b01fbfba2c02c13ced41e6a48baf4cbe"
dependencies = [
"bytemuck",
"bytemuck_derive",
- "curve25519-dalek 4.2.0",
+ "curve25519-dalek 4.1.3",
"solana-define-syscall",
"subtle",
"thiserror 2.0.12",
@@ -8124,7 +8109,7 @@ version = "3.0.0"
dependencies = [
"bytemuck",
"bytemuck_derive",
- "curve25519-dalek 4.2.0",
+ "curve25519-dalek 4.1.3",
"solana-define-syscall",
"subtle",
"thiserror 2.0.12",
@@ -8251,9 +8236,11 @@ dependencies = [
"crossbeam-channel",
"dlopen2",
"log",
+ "num_cpus",
"rand 0.8.5",
"rayon",
"serde",
+ "solana-entry",
"solana-hash",
"solana-keypair",
"solana-logger",
@@ -8264,7 +8251,6 @@ dependencies = [
"solana-packet",
"solana-perf",
"solana-pubkey",
- "solana-rayon-threadlimit",
"solana-runtime-transaction",
"solana-sha256-hasher",
"solana-signature",
@@ -9179,6 +9165,7 @@ dependencies = [
name = "solana-metrics"
version = "3.0.0"
dependencies = [
+ "bencher",
"crossbeam-channel",
"env_logger 0.11.8",
"gethostname",
@@ -9321,11 +9308,12 @@ version = "3.0.0"
dependencies = [
"ahash 0.8.11",
"assert_matches",
+ "bencher",
"bincode",
"bv",
"bytes",
"caps",
- "curve25519-dalek 4.2.0",
+ "curve25519-dalek 4.1.3",
"dlopen2",
"fnv",
"libc",
@@ -9401,12 +9389,12 @@ version = "3.0.0"
dependencies = [
"clap 3.2.23",
"log",
+ "num_cpus",
"rayon",
"solana-entry",
"solana-logger",
"solana-measure",
"solana-perf",
- "solana-rayon-threadlimit",
"solana-sha256-hasher",
"solana-version",
]
@@ -9724,7 +9712,7 @@ dependencies = [
"borsh 1.5.7",
"bytemuck",
"bytemuck_derive",
- "curve25519-dalek 4.2.0",
+ "curve25519-dalek 4.1.3",
"five8",
"five8_const",
"getrandom 0.2.15",
@@ -9815,6 +9803,7 @@ dependencies = [
name = "solana-rayon-threadlimit"
version = "3.0.0"
dependencies = [
+ "log",
"num_cpus",
]
@@ -10055,6 +10044,7 @@ dependencies = [
"solana-signer",
"solana-transaction-error",
"solana-transaction-status-client-types",
+ "test-case",
"thiserror 2.0.12",
]
@@ -11877,7 +11867,7 @@ dependencies = [
"agave-feature-set",
"bytemuck",
"criterion",
- "curve25519-dalek 4.2.0",
+ "curve25519-dalek 4.1.3",
"num-derive",
"num-traits",
"solana-instruction",
@@ -11936,7 +11926,7 @@ dependencies = [
"bincode",
"bytemuck",
"bytemuck_derive",
- "curve25519-dalek 4.2.0",
+ "curve25519-dalek 4.1.3",
"itertools 0.12.1",
"js-sys",
"lazy_static",
@@ -11971,7 +11961,7 @@ dependencies = [
"bincode",
"bytemuck",
"bytemuck_derive",
- "curve25519-dalek 4.2.0",
+ "curve25519-dalek 4.1.3",
"itertools 0.12.1",
"js-sys",
"merlin",
@@ -12005,7 +11995,7 @@ dependencies = [
"agave-feature-set",
"bytemuck",
"criterion",
- "curve25519-dalek 4.2.0",
+ "curve25519-dalek 4.1.3",
"num-derive",
"num-traits",
"solana-instruction",
@@ -12024,7 +12014,7 @@ dependencies = [
"bincode",
"bytemuck",
"bytemuck_derive",
- "curve25519-dalek 4.2.0",
+ "curve25519-dalek 4.1.3",
"itertools 0.12.1",
"merlin",
"num-derive",
@@ -12368,7 +12358,7 @@ version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ae5b124840d4aed474cef101d946a798b806b46a509ee4df91021e1ab1cef3ef"
dependencies = [
- "curve25519-dalek 4.2.0",
+ "curve25519-dalek 4.1.3",
"solana-zk-sdk 2.2.15",
"thiserror 2.0.12",
]
diff --git a/Cargo.toml b/Cargo.toml
index 7ec6f54b22c72b..e293bd5d28a478 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -2,7 +2,6 @@
members = [
"account-decoder",
"account-decoder-client-types",
- "accounts-bench",
"accounts-cluster-bench",
"accounts-db",
"accounts-db/accounts-hash-cache-tool",
@@ -220,7 +219,7 @@ bs58 = { version = "0.5.1", default-features = false }
bv = "0.11.1"
byte-unit = "4.0.19"
bytemuck = "1.23.1"
-bytemuck_derive = "1.9.3"
+bytemuck_derive = "1.10.0"
bytes = "1.10"
bzip2 = "0.4.4"
caps = "0.5.5"
@@ -242,7 +241,7 @@ criterion-stats = "0.3.0"
crossbeam-channel = "0.5.15"
csv = "1.3.1"
ctrlc = "3.4.7"
-curve25519-dalek = { version = "4.2.0", features = ["digest", "rand_core"] }
+curve25519-dalek = { version = "4.1.3", features = ["digest", "rand_core"] }
dashmap = "5.5.3"
derivation-path = { version = "0.2.0", default-features = false }
derive-where = "1.5.0"
@@ -283,7 +282,7 @@ hyper-proxy = "0.9.1"
im = "15.1.0"
indexmap = "2.10.0"
indicatif = "0.18.0"
-io-uring = "0.7.8"
+io-uring = "0.7.9"
itertools = "0.12.1"
jemallocator = { package = "tikv-jemallocator", version = "0.6.0", features = [
"unprefixed_malloc_on_supported_platforms",
@@ -359,7 +358,7 @@ serde = "1.0.219" # must match the serde_derive version, see https://github.com/
serde-big-array = "0.5.1"
serde_bytes = "0.11.17"
serde_derive = "1.0.219" # must match the serde version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251
-serde_json = "1.0.140"
+serde_json = "1.0.141"
serde_with = { version = "3.14.0", default-features = false }
serde_yaml = "0.9.34"
serial_test = "2.0.0"
diff --git a/account-decoder/src/parse_account_data.rs b/account-decoder/src/parse_account_data.rs
index 9f58a0c4fb49b9..6cf54d11c8fd93 100644
--- a/account-decoder/src/parse_account_data.rs
+++ b/account-decoder/src/parse_account_data.rs
@@ -73,32 +73,11 @@ pub enum ParsableAccount {
Vote,
}
-#[deprecated(since = "2.0.0", note = "Use `AccountAdditionalDataV3` instead")]
-#[derive(Clone, Copy, Default)]
-pub struct AccountAdditionalData {
- pub spl_token_decimals: Option,
-}
-
-#[deprecated(since = "2.2.0", note = "Use `AccountAdditionalDataV3` instead")]
-#[derive(Clone, Copy, Default)]
-pub struct AccountAdditionalDataV2 {
- pub spl_token_additional_data: Option,
-}
-
#[derive(Clone, Copy, Default)]
pub struct AccountAdditionalDataV3 {
pub spl_token_additional_data: Option,
}
-#[allow(deprecated)]
-impl From for AccountAdditionalDataV3 {
- fn from(v: AccountAdditionalDataV2) -> Self {
- Self {
- spl_token_additional_data: v.spl_token_additional_data.map(Into::into),
- }
- }
-}
-
#[derive(Clone, Copy, Default)]
pub struct SplTokenAdditionalData {
pub decimals: u8,
@@ -140,37 +119,6 @@ impl SplTokenAdditionalDataV2 {
}
}
-#[deprecated(since = "2.0.0", note = "Use `parse_account_data_v3` instead")]
-#[allow(deprecated)]
-pub fn parse_account_data(
- pubkey: &Pubkey,
- program_id: &Pubkey,
- data: &[u8],
- additional_data: Option,
-) -> Result {
- parse_account_data_v3(
- pubkey,
- program_id,
- data,
- additional_data.map(|d| AccountAdditionalDataV3 {
- spl_token_additional_data: d
- .spl_token_decimals
- .map(SplTokenAdditionalDataV2::with_decimals),
- }),
- )
-}
-
-#[deprecated(since = "2.2.0", note = "Use `parse_account_data_v3` instead")]
-#[allow(deprecated)]
-pub fn parse_account_data_v2(
- pubkey: &Pubkey,
- program_id: &Pubkey,
- data: &[u8],
- additional_data: Option,
-) -> Result {
- parse_account_data_v3(pubkey, program_id, data, additional_data.map(Into::into))
-}
-
pub fn parse_account_data_v3(
pubkey: &Pubkey,
program_id: &Pubkey,
diff --git a/account-decoder/src/parse_token.rs b/account-decoder/src/parse_token.rs
index 88354eec97c793..51954246282c27 100644
--- a/account-decoder/src/parse_token.rs
+++ b/account-decoder/src/parse_token.rs
@@ -1,8 +1,6 @@
use {
crate::{
- parse_account_data::{
- ParsableAccount, ParseAccountError, SplTokenAdditionalData, SplTokenAdditionalDataV2,
- },
+ parse_account_data::{ParsableAccount, ParseAccountError, SplTokenAdditionalDataV2},
parse_token_extension::parse_extension,
},
solana_program_option::COption,
@@ -23,25 +21,6 @@ pub use {
spl_generic_token::{is_known_spl_token_id, spl_token_ids},
};
-#[deprecated(since = "2.0.0", note = "Use `parse_token_v3` instead")]
-#[allow(deprecated)]
-pub fn parse_token(
- data: &[u8],
- decimals: Option,
-) -> Result {
- let additional_data = decimals.map(SplTokenAdditionalData::with_decimals);
- parse_token_v2(data, additional_data.as_ref())
-}
-
-#[deprecated(since = "2.2.0", note = "Use `parse_token_v3` instead")]
-pub fn parse_token_v2(
- data: &[u8],
- additional_data: Option<&SplTokenAdditionalData>,
-) -> Result {
- let additional_data = additional_data.map(|v| (*v).into());
- parse_token_v3(data, additional_data.as_ref())
-}
-
pub fn parse_token_v3(
data: &[u8],
additional_data: Option<&SplTokenAdditionalDataV2>,
@@ -143,20 +122,6 @@ pub fn convert_account_state(state: AccountState) -> UiAccountState {
}
}
-#[deprecated(since = "2.0.0", note = "Use `token_amount_to_ui_amount_v3` instead")]
-#[allow(deprecated)]
-pub fn token_amount_to_ui_amount(amount: u64, decimals: u8) -> UiTokenAmount {
- token_amount_to_ui_amount_v2(amount, &SplTokenAdditionalData::with_decimals(decimals))
-}
-
-#[deprecated(since = "2.2.0", note = "Use `token_amount_to_ui_amount_v3` instead")]
-pub fn token_amount_to_ui_amount_v2(
- amount: u64,
- additional_data: &SplTokenAdditionalData,
-) -> UiTokenAmount {
- token_amount_to_ui_amount_v3(amount, &(*additional_data).into())
-}
-
pub fn token_amount_to_ui_amount_v3(
amount: u64,
additional_data: &SplTokenAdditionalDataV2,
diff --git a/accounts-bench/Cargo.toml b/accounts-bench/Cargo.toml
deleted file mode 100644
index 1cfe16a4052ba5..00000000000000
--- a/accounts-bench/Cargo.toml
+++ /dev/null
@@ -1,28 +0,0 @@
-[package]
-name = "solana-accounts-bench"
-publish = false
-version = { workspace = true }
-authors = { workspace = true }
-repository = { workspace = true }
-homepage = { workspace = true }
-license = { workspace = true }
-edition = { workspace = true }
-
-[package.metadata.docs.rs]
-targets = ["x86_64-unknown-linux-gnu"]
-
-[features]
-dev-context-only-utils = []
-
-[dependencies]
-clap = { workspace = true }
-log = { workspace = true }
-rayon = { workspace = true }
-solana-accounts-db = { workspace = true, features = ["dev-context-only-utils"] }
-solana-clock = { workspace = true }
-solana-epoch-schedule = { workspace = true }
-solana-logger = { workspace = true }
-solana-measure = { workspace = true }
-solana-pubkey = { workspace = true }
-solana-rent-collector = { workspace = true }
-solana-version = { workspace = true }
diff --git a/accounts-bench/src/main.rs b/accounts-bench/src/main.rs
deleted file mode 100644
index 0b2052065963b6..00000000000000
--- a/accounts-bench/src/main.rs
+++ /dev/null
@@ -1,158 +0,0 @@
-#![allow(clippy::arithmetic_side_effects)]
-
-#[macro_use]
-extern crate log;
-use {
- clap::{crate_description, crate_name, value_t, App, Arg},
- rayon::prelude::*,
- solana_accounts_db::{
- accounts::Accounts,
- accounts_db::{
- test_utils::{create_test_accounts, update_accounts_bench},
- AccountsDb, CalcAccountsHashDataSource, ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS,
- },
- ancestors::Ancestors,
- },
- solana_epoch_schedule::EpochSchedule,
- solana_measure::measure::Measure,
- solana_pubkey::Pubkey,
- std::{env, fs, path::PathBuf, sync::Arc},
-};
-
-fn main() {
- solana_logger::setup();
-
- let matches = App::new(crate_name!())
- .about(crate_description!())
- .version(solana_version::version!())
- .arg(
- Arg::with_name("num_slots")
- .long("num_slots")
- .takes_value(true)
- .value_name("SLOTS")
- .help("Number of slots to store to."),
- )
- .arg(
- Arg::with_name("num_accounts")
- .long("num_accounts")
- .takes_value(true)
- .value_name("NUM_ACCOUNTS")
- .help("Total number of accounts"),
- )
- .arg(
- Arg::with_name("iterations")
- .long("iterations")
- .takes_value(true)
- .value_name("ITERATIONS")
- .help("Number of bench iterations"),
- )
- .arg(
- Arg::with_name("clean")
- .long("clean")
- .takes_value(false)
- .help("Run clean"),
- )
- .get_matches();
-
- let num_slots = value_t!(matches, "num_slots", usize).unwrap_or(4);
- let num_accounts = value_t!(matches, "num_accounts", usize).unwrap_or(10_000);
- let iterations = value_t!(matches, "iterations", usize).unwrap_or(20);
- let clean = matches.is_present("clean");
- println!("clean: {clean:?}");
-
- let path = PathBuf::from(env::var("FARF_DIR").unwrap_or_else(|_| "farf".to_owned()))
- .join("accounts-bench");
- println!("cleaning file system: {path:?}");
- if fs::remove_dir_all(path.clone()).is_err() {
- println!("Warning: Couldn't remove {path:?}");
- }
- let accounts_db = AccountsDb::new_with_config(
- vec![path],
- Some(ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS),
- None,
- Arc::default(),
- );
- let accounts = Accounts::new(Arc::new(accounts_db));
- println!("Creating {num_accounts} accounts");
- let mut create_time = Measure::start("create accounts");
- let pubkeys: Vec<_> = (0..num_slots)
- .into_par_iter()
- .map(|slot| {
- let mut pubkeys: Vec = vec![];
- create_test_accounts(
- &accounts,
- &mut pubkeys,
- num_accounts / num_slots,
- slot as u64,
- );
- pubkeys
- })
- .collect();
- let pubkeys: Vec<_> = pubkeys.into_iter().flatten().collect();
- create_time.stop();
- println!(
- "created {} accounts in {} slots {}",
- (num_accounts / num_slots) * num_slots,
- num_slots,
- create_time
- );
- let mut ancestors = Vec::with_capacity(num_slots);
- ancestors.push(0);
- for i in 1..num_slots {
- ancestors.push(i as u64);
- accounts.add_root(i as u64);
- }
- let ancestors = Ancestors::from(ancestors);
- let mut elapsed = vec![0; iterations];
- let mut elapsed_store = vec![0; iterations];
- for x in 0..iterations {
- if clean {
- let mut time = Measure::start("clean");
- accounts.accounts_db.clean_accounts_for_tests();
- time.stop();
- println!("{time}");
- for slot in 0..num_slots {
- update_accounts_bench(&accounts, &pubkeys, ((x + 1) * num_slots + slot) as u64);
- accounts.add_root((x * num_slots + slot) as u64);
- }
- } else {
- let mut pubkeys: Vec = vec![];
- let mut time = Measure::start("hash");
- let results = accounts
- .accounts_db
- .update_accounts_hash_for_tests(0, &ancestors, false, false);
- time.stop();
- let mut time_store = Measure::start("hash using store");
- let results_store = accounts.accounts_db.update_accounts_hash_with_verify_from(
- CalcAccountsHashDataSource::Storages,
- false,
- solana_clock::Slot::default(),
- &ancestors,
- None,
- &EpochSchedule::default(),
- true,
- );
- time_store.stop();
- if results != results_store {
- error!("results different: \n{:?}\n{:?}", results, results_store);
- }
- println!(
- "hash,{},{},{},{}%",
- results.0 .0,
- time,
- time_store,
- (time_store.as_us() as f64 / time.as_us() as f64 * 100.0f64) as u32
- );
- create_test_accounts(&accounts, &mut pubkeys, 1, 0);
- elapsed[x] = time.as_us();
- elapsed_store[x] = time_store.as_us();
- }
- }
-
- for x in elapsed {
- info!("update_accounts_hash(us),{}", x);
- }
- for x in elapsed_store {
- info!("calculate_accounts_hash_from_storages(us),{}", x);
- }
-}
diff --git a/accounts-db/benches/accounts.rs b/accounts-db/benches/accounts.rs
index 52b0d8e3ddc060..cfb0c89d15fdc6 100644
--- a/accounts-db/benches/accounts.rs
+++ b/accounts-db/benches/accounts.rs
@@ -11,17 +11,12 @@ use {
solana_accounts_db::{
account_info::{AccountInfo, StorageLocation},
accounts::{AccountAddressFilter, Accounts},
- accounts_db::{
- test_utils::create_test_accounts, AccountFromStorage, AccountsDb,
- VerifyAccountsHashAndLamportsConfig, ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS,
- },
+ accounts_db::{AccountFromStorage, AccountsDb, ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS},
accounts_index::ScanConfig,
ancestors::Ancestors,
},
- solana_clock::Epoch,
solana_hash::Hash,
solana_pubkey::Pubkey,
- solana_sysvar::epoch_schedule::EpochSchedule,
std::{
collections::{HashMap, HashSet},
path::PathBuf,
@@ -44,68 +39,6 @@ fn new_accounts_db(account_paths: Vec) -> AccountsDb {
)
}
-#[bench]
-fn bench_accounts_hash_bank_hash(bencher: &mut Bencher) {
- let accounts_db = new_accounts_db(vec![PathBuf::from("bench_accounts_hash_internal")]);
- let accounts = Accounts::new(Arc::new(accounts_db));
- let mut pubkeys: Vec = vec![];
- let num_accounts = 60_000;
- let slot = 0;
- create_test_accounts(&accounts, &mut pubkeys, num_accounts, slot);
- let ancestors = Ancestors::from(vec![0]);
- let (_, total_lamports) = accounts
- .accounts_db
- .update_accounts_hash_for_tests(0, &ancestors, false, false);
- accounts.add_root(slot);
- accounts.accounts_db.flush_accounts_cache(true, Some(slot));
- bencher.iter(|| {
- assert!(accounts
- .accounts_db
- .verify_accounts_hash_and_lamports_for_tests(
- 0,
- total_lamports,
- VerifyAccountsHashAndLamportsConfig {
- ancestors: &ancestors,
- epoch_schedule: &EpochSchedule::default(),
- epoch: Epoch::default(),
- ignore_mismatch: false,
- store_detailed_debug_info: false,
- use_bg_thread_pool: false,
- }
- )
- .is_ok())
- });
-}
-
-#[bench]
-fn bench_update_accounts_hash(bencher: &mut Bencher) {
- solana_logger::setup();
- let accounts_db = new_accounts_db(vec![PathBuf::from("update_accounts_hash")]);
- let accounts = Accounts::new(Arc::new(accounts_db));
- let mut pubkeys: Vec = vec![];
- create_test_accounts(&accounts, &mut pubkeys, 50_000, 0);
- accounts.accounts_db.add_root_and_flush_write_cache(0);
- let ancestors = Ancestors::from(vec![0]);
- bencher.iter(|| {
- accounts
- .accounts_db
- .update_accounts_hash_for_tests(0, &ancestors, false, false);
- });
-}
-
-#[bench]
-fn bench_accounts_delta_hash(bencher: &mut Bencher) {
- solana_logger::setup();
- let accounts_db = new_accounts_db(vec![PathBuf::from("accounts_delta_hash")]);
- let accounts = Accounts::new(Arc::new(accounts_db));
- let mut pubkeys: Vec = vec![];
- create_test_accounts(&accounts, &mut pubkeys, 100_000, 0);
- accounts.accounts_db.add_root_and_flush_write_cache(0);
- bencher.iter(|| {
- accounts.accounts_db.calculate_accounts_delta_hash(0);
- });
-}
-
#[bench]
fn bench_delete_dependencies(bencher: &mut Bencher) {
solana_logger::setup();
diff --git a/accounts-db/src/accounts.rs b/accounts-db/src/accounts.rs
index c7003eafb09898..0b9022e4028873 100644
--- a/accounts-db/src/accounts.rs
+++ b/accounts-db/src/accounts.rs
@@ -3,8 +3,8 @@ use {
account_locks::{validate_account_locks, AccountLocks},
account_storage::stored_account_info::StoredAccountInfo,
accounts_db::{
- AccountStorageEntry, AccountsAddRootTiming, AccountsDb, LoadHint, LoadedAccount,
- ScanAccountStorageData, ScanStorageResult, VerifyAccountsHashAndLamportsConfig,
+ AccountsAddRootTiming, AccountsDb, LoadHint, LoadedAccount, ScanAccountStorageData,
+ ScanStorageResult,
},
accounts_index::{IndexKey, ScanConfig, ScanError, ScanOrder, ScanResult},
ancestors::Ancestors,
@@ -306,30 +306,6 @@ impl Accounts {
.collect())
}
- /// Only called from startup or test code.
- #[must_use]
- pub fn verify_accounts_hash_and_lamports(
- &self,
- snapshot_storages_and_slots: (&[Arc], &[Slot]),
- slot: Slot,
- total_lamports: u64,
- base: Option<(Slot, /*capitalization*/ u64)>,
- config: VerifyAccountsHashAndLamportsConfig,
- ) -> bool {
- if let Err(err) = self.accounts_db.verify_accounts_hash_and_lamports(
- snapshot_storages_and_slots,
- slot,
- total_lamports,
- base,
- config,
- ) {
- warn!("verify_accounts_hash failed: {err:?}, slot: {slot}");
- false
- } else {
- true
- }
- }
-
fn load_while_filtering bool>(
collector: &mut Vec,
some_account_tuple: Option<(&Pubkey, AccountSharedData, Slot)>,
diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs
index 1703cfa8685eb8..3ef632bacec340 100644
--- a/accounts-db/src/accounts_db.rs
+++ b/accounts-db/src/accounts_db.rs
@@ -44,11 +44,10 @@ use {
StorageAccess,
},
accounts_hash::{
- AccountHash, AccountLtHash, AccountsDeltaHash, AccountsHash, AccountsHashKind,
- AccountsHasher, AccountsLtHash, CalcAccountsHashConfig, CalculateHashIntermediate,
- HashStats, IncrementalAccountsHash, SerdeAccountsDeltaHash, SerdeAccountsHash,
- SerdeIncrementalAccountsHash, ZeroLamportAccounts, ZERO_LAMPORT_ACCOUNT_HASH,
- ZERO_LAMPORT_ACCOUNT_LT_HASH,
+ AccountHash, AccountLtHash, AccountsHash, AccountsHashKind, AccountsHasher,
+ AccountsLtHash, CalcAccountsHashConfig, CalculateHashIntermediate, HashStats,
+ IncrementalAccountsHash, SerdeAccountsHash, SerdeIncrementalAccountsHash,
+ ZeroLamportAccounts, ZERO_LAMPORT_ACCOUNT_HASH, ZERO_LAMPORT_ACCOUNT_LT_HASH,
},
accounts_index::{
in_mem_accounts_index::StartupStats, AccountSecondaryIndexes, AccountsIndex,
@@ -173,23 +172,6 @@ pub(crate) struct ShrinkCollectAliveSeparatedByRefs<'a> {
pub(crate) many_refs_old_alive: AliveAccounts<'a>,
}
-/// Configuration Parameters for running accounts hash and total lamports verification
-#[derive(Debug, Clone)]
-pub struct VerifyAccountsHashAndLamportsConfig<'a> {
- /// bank ancestors
- pub ancestors: &'a Ancestors,
- /// epoch_schedule
- pub epoch_schedule: &'a EpochSchedule,
- /// epoch
- pub epoch: Epoch,
- /// true to ignore mismatches
- pub ignore_mismatch: bool,
- /// true to dump debug log if mismatch happens
- pub store_detailed_debug_info: bool,
- /// true to use dedicated background thread pool for verification
- pub use_bg_thread_pool: bool,
-}
-
pub(crate) trait ShrinkCollectRefs<'a>: Sync + Send {
fn with_capacity(capacity: usize, slot: Slot) -> Self;
fn collect(&mut self, other: Self);
@@ -975,13 +957,6 @@ impl ReadableAccount for LoadedAccount<'_> {
}
}
-#[derive(Debug)]
-pub enum AccountsHashVerificationError {
- MissingAccountsHash,
- MismatchedAccountsHash,
- MismatchedTotalLamports(u64, u64),
-}
-
#[derive(Default)]
struct CleanKeyTimings {
collect_delta_keys_us: u64,
@@ -1365,7 +1340,6 @@ pub struct AccountsDb {
/// Thread pool for AccountsHashVerifier
pub thread_pool_hash: ThreadPool,
- accounts_delta_hashes: Mutex>,
accounts_hashes: Mutex>,
incremental_accounts_hashes:
Mutex>,
@@ -1863,7 +1837,6 @@ impl AccountsDb {
shrink_candidate_slots: Mutex::new(ShrinkCandidates::default()),
write_version: AtomicU64::new(0),
file_size: DEFAULT_FILE_SIZE,
- accounts_delta_hashes: Mutex::new(HashMap::new()),
accounts_hashes: Mutex::new(HashMap::new()),
incremental_accounts_hashes: Mutex::new(HashMap::new()),
external_purge_slots_stats: PurgeStats::default(),
@@ -3701,8 +3674,11 @@ impl AccountsDb {
// mutating rooted slots; There should be no writers to them.
let accounts = [(slot, &shrink_collect.alive_accounts.alive_accounts()[..])];
let storable_accounts = StorableAccountsBySlot::new(slot, &accounts, self);
- stats_sub.store_accounts_timing =
- self.store_accounts_frozen(storable_accounts, shrink_in_progress.new_storage());
+ stats_sub.store_accounts_timing = self.store_accounts_frozen(
+ storable_accounts,
+ shrink_in_progress.new_storage(),
+ UpdateIndexThreadSelection::PoolWithThreshold,
+ );
rewrite_elapsed.stop();
stats_sub.rewrite_elapsed_us = Saturating(rewrite_elapsed.as_us());
@@ -3959,11 +3935,8 @@ impl AccountsDb {
&self,
dropped_roots: impl Iterator- ,
) {
- let mut accounts_delta_hashes = self.accounts_delta_hashes.lock().unwrap();
-
dropped_roots.for_each(|slot| {
self.accounts_index.clean_dead_slot(slot);
- accounts_delta_hashes.remove(&slot);
// the storage has been removed from this slot and recycled or dropped
assert!(self.storage.remove(&slot, false).is_none());
debug_assert!(
@@ -5766,8 +5739,12 @@ impl AccountsDb {
flush_stats.num_bytes_flushed.0,
"flush_slot_cache",
);
- let (store_accounts_timing_inner, store_accounts_total_inner_us) =
- measure_us!(self.store_accounts_frozen((slot, &accounts[..]), &flushed_store,));
+ let (store_accounts_timing_inner, store_accounts_total_inner_us) = measure_us!(self
+ .store_accounts_frozen(
+ (slot, &accounts[..]),
+ &flushed_store,
+ UpdateIndexThreadSelection::PoolWithThreshold,
+ ));
flush_stats.store_accounts_timing = store_accounts_timing_inner;
flush_stats.store_accounts_total_us = Saturating(store_accounts_total_inner_us);
@@ -5947,100 +5924,6 @@ impl AccountsDb {
AccountsHasher::checked_cast_for_capitalization(balances.map(|b| b as u128).sum::())
}
- pub fn calculate_accounts_hash_from_index(
- &self,
- max_slot: Slot,
- config: &CalcAccountsHashConfig<'_>,
- ) -> (AccountsHash, u64) {
- let mut collect = Measure::start("collect");
- let keys: Vec<_> = self
- .accounts_index
- .account_maps
- .iter()
- .flat_map(|map| {
- let mut keys = map.keys();
- keys.sort_unstable(); // hashmap is not ordered, but bins are relative to each other
- keys
- })
- .collect();
- collect.stop();
-
- // Pick a chunk size big enough to allow us to produce output vectors that are smaller than the overall size.
- // We'll also accumulate the lamports within each chunk and fewer chunks results in less contention to accumulate the sum.
- let chunks = crate::accounts_hash::MERKLE_FANOUT.pow(4);
- let total_lamports = Mutex::::new(0);
-
- let get_account_hashes = || {
- keys.par_chunks(chunks)
- .map(|pubkeys| {
- let mut sum = 0u128;
- let account_hashes: Vec = pubkeys
- .iter()
- .filter_map(|pubkey| {
- let index_entry = self.accounts_index.get_cloned(pubkey)?;
- self.accounts_index
- .get_account_info_with_and_then(
- &index_entry,
- config.ancestors,
- Some(max_slot),
- |(slot, account_info)| {
- if account_info.is_zero_lamport() {
- return None;
- }
- self.get_account_accessor(
- slot,
- pubkey,
- &account_info.storage_location(),
- )
- .get_loaded_account(|loaded_account| {
- let mut loaded_hash = loaded_account.loaded_hash();
- let balance = loaded_account.lamports();
- let hash_is_missing =
- loaded_hash == AccountHash(Hash::default());
- if hash_is_missing {
- let computed_hash = Self::hash_account(
- &loaded_account,
- loaded_account.pubkey(),
- );
- loaded_hash = computed_hash;
- }
- sum += balance as u128;
- loaded_hash.0
- })
- },
- )
- .flatten()
- })
- .collect();
- let mut total = total_lamports.lock().unwrap();
- *total = AccountsHasher::checked_cast_for_capitalization(*total as u128 + sum);
- account_hashes
- })
- .collect()
- };
-
- let mut scan = Measure::start("scan");
- let account_hashes: Vec> = self.thread_pool_clean.install(get_account_hashes);
- scan.stop();
-
- let total_lamports = *total_lamports.lock().unwrap();
-
- let mut hash_time = Measure::start("hash");
- let (accumulated_hash, hash_total) = AccountsHasher::calculate_hash(account_hashes);
- hash_time.stop();
-
- datapoint_info!(
- "calculate_accounts_hash_from_index",
- ("accounts_scan", scan.as_us(), i64),
- ("hash", hash_time.as_us(), i64),
- ("hash_total", hash_total, i64),
- ("collect", collect.as_us(), i64),
- );
-
- let accounts_hash = AccountsHash(accumulated_hash);
- (accounts_hash, total_lamports)
- }
-
/// Calculates the accounts lt hash
///
/// Only intended to be called at startup (or by tests).
@@ -6189,26 +6072,6 @@ impl AccountsDb {
.expect("capitalization cannot overflow")
}
- /// This is only valid to call from tests.
- /// run the accounts hash calculation and store the results
- pub fn update_accounts_hash_for_tests(
- &self,
- slot: Slot,
- ancestors: &Ancestors,
- debug_verify: bool,
- is_startup: bool,
- ) -> (AccountsHash, u64) {
- self.update_accounts_hash_with_verify_from(
- CalcAccountsHashDataSource::IndexForTests,
- debug_verify,
- slot,
- ancestors,
- None,
- &EpochSchedule::default(),
- is_startup,
- )
- }
-
fn update_old_slot_stats(&self, stats: &HashStats, storage: Option<&Arc>) {
if let Some(storage) = storage {
stats.roots_older_than_epoch.fetch_add(1, Ordering::Relaxed);
@@ -6276,155 +6139,6 @@ impl AccountsDb {
true
}
- pub fn calculate_accounts_hash_from(
- &self,
- data_source: CalcAccountsHashDataSource,
- slot: Slot,
- config: &CalcAccountsHashConfig<'_>,
- ) -> (AccountsHash, u64) {
- match data_source {
- CalcAccountsHashDataSource::Storages => {
- if self.accounts_cache.contains_any_slots(slot) {
- // this indicates a race condition
- inc_new_counter_info!("accounts_hash_items_in_write_cache", 1);
- }
-
- let mut collect_time = Measure::start("collect");
- let (combined_maps, slots) = self.get_storages(..=slot);
- collect_time.stop();
-
- let mut sort_time = Measure::start("sort_storages");
- let min_root = self.accounts_index.min_alive_root();
- let storages = SortedStorages::new_with_slots(
- combined_maps.iter().zip(slots),
- min_root,
- Some(slot),
- );
- sort_time.stop();
-
- let mut timings = HashStats {
- collect_snapshots_us: collect_time.as_us(),
- storage_sort_us: sort_time.as_us(),
- ..HashStats::default()
- };
- timings.calc_storage_size_quartiles(&combined_maps);
-
- self.calculate_accounts_hash(config, &storages, timings)
- }
- CalcAccountsHashDataSource::IndexForTests => {
- self.calculate_accounts_hash_from_index(slot, config)
- }
- }
- }
-
- fn calculate_accounts_hash_with_verify_from(
- &self,
- data_source: CalcAccountsHashDataSource,
- debug_verify: bool,
- slot: Slot,
- config: CalcAccountsHashConfig<'_>,
- expected_capitalization: Option,
- ) -> (AccountsHash, u64) {
- let (accounts_hash, total_lamports) =
- self.calculate_accounts_hash_from(data_source, slot, &config);
- if debug_verify {
- // calculate the other way (store or non-store) and verify results match.
- let data_source_other = match data_source {
- CalcAccountsHashDataSource::IndexForTests => CalcAccountsHashDataSource::Storages,
- CalcAccountsHashDataSource::Storages => CalcAccountsHashDataSource::IndexForTests,
- };
- let (accounts_hash_other, total_lamports_other) =
- self.calculate_accounts_hash_from(data_source_other, slot, &config);
-
- let success = accounts_hash == accounts_hash_other
- && total_lamports == total_lamports_other
- && total_lamports == expected_capitalization.unwrap_or(total_lamports);
- assert!(
- success,
- "calculate_accounts_hash_with_verify mismatch. hashes: {}, {}; lamports: {}, {}; \
- expected lamports: {:?}, data source: {:?}, slot: {}",
- accounts_hash.0,
- accounts_hash_other.0,
- total_lamports,
- total_lamports_other,
- expected_capitalization,
- data_source,
- slot
- );
- }
- (accounts_hash, total_lamports)
- }
-
- /// run the accounts hash calculation and store the results
- #[allow(clippy::too_many_arguments)]
- pub fn update_accounts_hash_with_verify_from(
- &self,
- data_source: CalcAccountsHashDataSource,
- debug_verify: bool,
- slot: Slot,
- ancestors: &Ancestors,
- expected_capitalization: Option,
- epoch_schedule: &EpochSchedule,
- is_startup: bool,
- ) -> (AccountsHash, u64) {
- let epoch = epoch_schedule.get_epoch(slot);
- let (accounts_hash, total_lamports) = self.calculate_accounts_hash_with_verify_from(
- data_source,
- debug_verify,
- slot,
- CalcAccountsHashConfig {
- use_bg_thread_pool: !is_startup,
- ancestors: Some(ancestors),
- epoch_schedule,
- epoch,
- store_detailed_debug_info_on_failure: false,
- },
- expected_capitalization,
- );
- self.set_accounts_hash(slot, (accounts_hash, total_lamports));
- (accounts_hash, total_lamports)
- }
-
- /// Calculate the full accounts hash for `storages` and save the results at `slot`
- pub fn update_accounts_hash(
- &self,
- config: &CalcAccountsHashConfig<'_>,
- storages: &SortedStorages<'_>,
- slot: Slot,
- stats: HashStats,
- ) -> (AccountsHash, /*capitalization*/ u64) {
- let accounts_hash = self.calculate_accounts_hash(config, storages, stats);
- let old_accounts_hash = self.set_accounts_hash(slot, accounts_hash);
- if let Some(old_accounts_hash) = old_accounts_hash {
- warn!(
- "Accounts hash was already set for slot {slot}! old: {old_accounts_hash:?}, new: \
- {accounts_hash:?}"
- );
- }
- accounts_hash
- }
-
- /// Calculate the incremental accounts hash for `storages` and save the results at `slot`
- pub fn update_incremental_accounts_hash(
- &self,
- config: &CalcAccountsHashConfig<'_>,
- storages: &SortedStorages<'_>,
- slot: Slot,
- stats: HashStats,
- ) -> (IncrementalAccountsHash, /*capitalization*/ u64) {
- let incremental_accounts_hash =
- self.calculate_incremental_accounts_hash(config, storages, stats);
- let old_incremental_accounts_hash =
- self.set_incremental_accounts_hash(slot, incremental_accounts_hash);
- if let Some(old_incremental_accounts_hash) = old_incremental_accounts_hash {
- warn!(
- "Incremental accounts hash was already set for slot {slot}! old: \
- {old_incremental_accounts_hash:?}, new: {incremental_accounts_hash:?}"
- );
- }
- incremental_accounts_hash
- }
-
/// Set the accounts hash for `slot`
///
/// returns the previous accounts hash for `slot`
@@ -6558,6 +6272,7 @@ impl AccountsDb {
///
/// This is intended to be used by startup verification, and also AccountsHashVerifier.
/// Uses account storage files as the data source for the calculation.
+ // obsolete, will be removed next
pub fn calculate_accounts_hash(
&self,
config: &CalcAccountsHashConfig<'_>,
@@ -6584,6 +6299,7 @@ impl AccountsDb {
/// included in the incremental snapshot. This ensures reconstructing the AccountsDb is
/// still correct when using this incremental accounts hash.
/// - `storages` must be the same as the ones going into the incremental snapshot.
+ // obsolete, will be removed next
pub fn calculate_incremental_accounts_hash(
&self,
config: &CalcAccountsHashConfig<'_>,
@@ -6604,6 +6320,7 @@ impl AccountsDb {
/// The shared code for calculating accounts hash from storages.
/// Used for both full accounts hash and incremental accounts hash calculation.
+ // obsolete, will be removed next
fn calculate_accounts_hash_from_storages(
&self,
config: &CalcAccountsHashConfig<'_>,
@@ -6702,99 +6419,6 @@ impl AccountsDb {
result
}
- /// Verify accounts hash at startup (or tests)
- ///
- /// Calculate accounts hash(es) and compare them to the values set at startup.
- /// If `base` is `None`, only calculates the full accounts hash for `[0, slot]`.
- /// If `base` is `Some`, calculate the full accounts hash for `[0, base slot]`
- /// and then calculate the incremental accounts hash for `(base slot, slot]`.
- pub fn verify_accounts_hash_and_lamports(
- &self,
- snapshot_storages_and_slots: (&[Arc], &[Slot]),
- slot: Slot,
- total_lamports: u64,
- base: Option<(Slot, /*capitalization*/ u64)>,
- config: VerifyAccountsHashAndLamportsConfig,
- ) -> Result<(), AccountsHashVerificationError> {
- let calc_config = CalcAccountsHashConfig {
- use_bg_thread_pool: config.use_bg_thread_pool,
- ancestors: Some(config.ancestors),
- epoch_schedule: config.epoch_schedule,
- epoch: config.epoch,
- store_detailed_debug_info_on_failure: config.store_detailed_debug_info,
- };
- let hash_mismatch_is_error = !config.ignore_mismatch;
-
- if let Some((base_slot, base_capitalization)) = base {
- self.verify_accounts_hash_and_lamports(
- snapshot_storages_and_slots,
- base_slot,
- base_capitalization,
- None,
- config,
- )?;
-
- let storages_and_slots = snapshot_storages_and_slots
- .0
- .iter()
- .zip(snapshot_storages_and_slots.1.iter())
- .filter(|storage_and_slot| *storage_and_slot.1 > base_slot)
- .map(|(storage, slot)| (storage, *slot));
- let sorted_storages = SortedStorages::new_with_slots(storages_and_slots, None, None);
- let calculated_incremental_accounts_hash = self.calculate_incremental_accounts_hash(
- &calc_config,
- &sorted_storages,
- HashStats::default(),
- );
- let found_incremental_accounts_hash = self
- .get_incremental_accounts_hash(slot)
- .ok_or(AccountsHashVerificationError::MissingAccountsHash)?;
- if calculated_incremental_accounts_hash != found_incremental_accounts_hash {
- warn!(
- "mismatched incremental accounts hash for slot {slot}: \
- {calculated_incremental_accounts_hash:?} (calculated) != \
- {found_incremental_accounts_hash:?} (expected)"
- );
- if hash_mismatch_is_error {
- return Err(AccountsHashVerificationError::MismatchedAccountsHash);
- }
- }
- } else {
- let storages_and_slots = snapshot_storages_and_slots
- .0
- .iter()
- .zip(snapshot_storages_and_slots.1.iter())
- .filter(|storage_and_slot| *storage_and_slot.1 <= slot)
- .map(|(storage, slot)| (storage, *slot));
- let sorted_storages = SortedStorages::new_with_slots(storages_and_slots, None, None);
- let (calculated_accounts_hash, calculated_lamports) =
- self.calculate_accounts_hash(&calc_config, &sorted_storages, HashStats::default());
- if calculated_lamports != total_lamports {
- warn!(
- "Mismatched total lamports: {total_lamports} calculated: {calculated_lamports}"
- );
- return Err(AccountsHashVerificationError::MismatchedTotalLamports(
- calculated_lamports,
- total_lamports,
- ));
- }
- let (found_accounts_hash, _) = self
- .get_accounts_hash(slot)
- .ok_or(AccountsHashVerificationError::MissingAccountsHash)?;
- if calculated_accounts_hash != found_accounts_hash {
- warn!(
- "Mismatched accounts hash for slot {slot}: {calculated_accounts_hash:?} \
- (calculated) != {found_accounts_hash:?} (expected)"
- );
- if hash_mismatch_is_error {
- return Err(AccountsHashVerificationError::MismatchedAccountsHash);
- }
- }
- }
-
- Ok(())
- }
-
/// Returns all of the accounts' pubkeys for a given slot
pub fn get_pubkeys_for_slot(&self, slot: Slot) -> Vec {
let scan_result = self.scan_cache_storage_fallback(
@@ -6925,69 +6549,6 @@ impl AccountsDb {
}
}
- /// Calculate accounts delta hash for `slot`
- pub fn calculate_accounts_delta_hash_internal(
- &self,
- slot: Slot,
- ignore: Option,
- ) -> AccountsDeltaHash {
- let (mut hashes, scan_us, mut accumulate) = self.get_pubkey_hash_for_slot(slot);
-
- if let Some(ignore) = ignore {
- hashes.retain(|k| k.0 != ignore);
- }
-
- let accounts_delta_hash = self
- .thread_pool
- .install(|| AccountsDeltaHash(AccountsHasher::accumulate_account_hashes(hashes)));
- accumulate.stop();
-
- self.set_accounts_delta_hash(slot, accounts_delta_hash);
-
- self.stats
- .delta_hash_scan_time_total_us
- .fetch_add(scan_us, Ordering::Relaxed);
- self.stats
- .delta_hash_accumulate_time_total_us
- .fetch_add(accumulate.as_us(), Ordering::Relaxed);
- self.stats.delta_hash_num.fetch_add(1, Ordering::Relaxed);
-
- accounts_delta_hash
- }
-
- /// Set the accounts delta hash for `slot` in the `accounts_delta_hashes` map
- ///
- /// returns the previous accounts delta hash for `slot`
- #[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))]
- fn set_accounts_delta_hash(
- &self,
- slot: Slot,
- accounts_delta_hash: AccountsDeltaHash,
- ) -> Option {
- self.accounts_delta_hashes
- .lock()
- .unwrap()
- .insert(slot, accounts_delta_hash)
- }
-
- /// After deserializing a snapshot, set the accounts delta hash for the new AccountsDb
- pub fn set_accounts_delta_hash_from_snapshot(
- &mut self,
- slot: Slot,
- accounts_delta_hash: SerdeAccountsDeltaHash,
- ) -> Option {
- self.set_accounts_delta_hash(slot, accounts_delta_hash.into())
- }
-
- /// Get the accounts delta hash for `slot` in the `accounts_delta_hashes` map
- pub fn get_accounts_delta_hash(&self, slot: Slot) -> Option {
- self.accounts_delta_hashes
- .lock()
- .unwrap()
- .get(&slot)
- .cloned()
- }
-
fn update_index<'a>(
&self,
infos: Vec,
@@ -7213,13 +6774,6 @@ impl AccountsDb {
) {
let mut measure = Measure::start("remove_dead_slots_metadata-ms");
self.clean_dead_slots_from_accounts_index(dead_slots_iter.clone());
-
- let mut accounts_delta_hashes = self.accounts_delta_hashes.lock().unwrap();
- for slot in dead_slots_iter {
- accounts_delta_hashes.remove(slot);
- }
- drop(accounts_delta_hashes);
-
measure.stop();
inc_new_counter_info!("remove_dead_slots_metadata-ms", measure.as_ms() as usize);
}
@@ -7619,6 +7173,7 @@ impl AccountsDb {
&self,
accounts: impl StorableAccounts<'a>,
storage: &Arc,
+ update_index_thread_selection: UpdateIndexThreadSelection,
) -> StoreAccountsTiming {
let slot = accounts.target_slot();
let mut store_accounts_time = Measure::start("store_accounts");
@@ -7655,7 +7210,7 @@ impl AccountsDb {
infos,
&accounts,
UpsertReclaim::IgnoreReclaims,
- UpdateIndexThreadSelection::PoolWithThreshold,
+ update_index_thread_selection,
&self.thread_pool_clean,
);
@@ -8522,7 +8077,7 @@ impl CalcAccountsHashKind {
}
}
-pub(crate) enum UpdateIndexThreadSelection {
+pub enum UpdateIndexThreadSelection {
/// Use current thread only
Inline,
/// Use a thread-pool if the number of updates exceeds a threshold
@@ -8559,11 +8114,6 @@ impl AccountsDb {
self.flush_root_write_cache(slot);
}
- /// Wrapper function to calculate accounts delta hash for `slot` (only used for testing and benchmarking.)
- pub fn calculate_accounts_delta_hash(&self, slot: Slot) -> AccountsDeltaHash {
- self.calculate_accounts_delta_hash_internal(slot, None)
- }
-
pub fn load_without_fixed_root(
&self,
ancestors: &Ancestors,
@@ -8579,10 +8129,6 @@ impl AccountsDb {
)
}
- pub fn accounts_delta_hashes(&self) -> &Mutex> {
- &self.accounts_delta_hashes
- }
-
pub fn accounts_hashes(&self) -> &Mutex> {
&self.accounts_hashes
}
@@ -8726,50 +8272,11 @@ impl AccountsDb {
}
}
- pub fn verify_accounts_hash_and_lamports_for_tests(
- &self,
- slot: Slot,
- total_lamports: u64,
- config: VerifyAccountsHashAndLamportsConfig,
- ) -> Result<(), AccountsHashVerificationError> {
- let snapshot_storages = self.get_storages(..);
- let snapshot_storages_and_slots = (
- snapshot_storages.0.as_slice(),
- snapshot_storages.1.as_slice(),
- );
- self.verify_accounts_hash_and_lamports(
- snapshot_storages_and_slots,
- slot,
- total_lamports,
- None,
- config,
- )
- }
-
pub fn uncleaned_pubkeys(&self) -> &DashMap, BuildNoHashHasher> {
&self.uncleaned_pubkeys
}
}
-// These functions/fields are only usable from a dev context (i.e. tests and benches)
-#[cfg(feature = "dev-context-only-utils")]
-impl<'a> VerifyAccountsHashAndLamportsConfig<'a> {
- pub fn new_for_test(
- ancestors: &'a Ancestors,
- epoch_schedule: &'a EpochSchedule,
- epoch: Epoch,
- ) -> Self {
- Self {
- ancestors,
- epoch_schedule,
- epoch,
- ignore_mismatch: false,
- store_detailed_debug_info: false,
- use_bg_thread_pool: false,
- }
- }
-}
-
/// A set of utility functions used for testing and benchmarking
#[cfg(feature = "dev-context-only-utils")]
pub mod test_utils {
diff --git a/accounts-db/src/accounts_db/geyser_plugin_utils.rs b/accounts-db/src/accounts_db/geyser_plugin_utils.rs
index 0fec62a8a74df4..61f076fb7359bb 100644
--- a/accounts-db/src/accounts_db/geyser_plugin_utils.rs
+++ b/accounts-db/src/accounts_db/geyser_plugin_utils.rs
@@ -144,7 +144,7 @@ pub mod tests {
};
impl AccountsDb {
- pub fn set_geyser_plugin_notifer(&mut self, notifier: Option) {
+ pub fn set_geyser_plugin_notifier(&mut self, notifier: Option) {
self.accounts_update_notifier = notifier;
}
}
@@ -218,7 +218,7 @@ pub mod tests {
// Do the notification
let notifier = GeyserTestPlugin::default();
let notifier = Arc::new(notifier);
- accounts.set_geyser_plugin_notifer(Some(notifier.clone()));
+ accounts.set_geyser_plugin_notifier(Some(notifier.clone()));
accounts.notify_account_restore_from_snapshot();
// Ensure key1 was notified twice in different slots
@@ -253,7 +253,7 @@ pub mod tests {
let notifier = GeyserTestPlugin::default();
let notifier = Arc::new(notifier);
- accounts.set_geyser_plugin_notifer(Some(notifier.clone()));
+ accounts.set_geyser_plugin_notifier(Some(notifier.clone()));
// Account with key1 is updated twice in two different slots -- should only get notified twice.
// Account with key2 is updated slot0, should get notified once
diff --git a/accounts-db/src/accounts_db/tests.rs b/accounts-db/src/accounts_db/tests.rs
index 7d7bfbcdc506b4..b30b9b562e847e 100644
--- a/accounts-db/src/accounts_db/tests.rs
+++ b/accounts-db/src/accounts_db/tests.rs
@@ -13,14 +13,12 @@ use {
},
storable_accounts::AccountForStorage,
},
- assert_matches::assert_matches,
itertools::Itertools,
rand::{prelude::SliceRandom, thread_rng, Rng},
solana_account::{
accounts_equal, Account, AccountSharedData, InheritableAccountFields, ReadableAccount,
WritableAccount, DUMMY_INHERITABLE_ACCOUNT_FIELDS,
},
- solana_hash::HASH_BYTES,
solana_pubkey::PUBKEY_BYTES,
std::{
hash::DefaultHasher,
@@ -747,7 +745,6 @@ define_accounts_db_test!(test_accountsdb_count_stores, |db| {
db.store_for_tests(1, &[(&pubkey, &account)]);
db.store_for_tests(1, &[(&pubkeys[0], &account)]);
// adding root doesn't change anything
- db.calculate_accounts_delta_hash(1);
db.add_root_and_flush_write_cache(1);
{
let slot_0_store = &db.storage.get_slot_storage_entry(0).unwrap();
@@ -1033,7 +1030,6 @@ fn test_lazy_gc_slot() {
|(_slot, account_info)| account_info.store_id(),
)
.unwrap();
- accounts.calculate_accounts_delta_hash(0);
//slot is still there, since gc is lazy
assert_eq!(accounts.storage.get_slot_storage_entry(0).unwrap().id(), id);
@@ -1041,9 +1037,6 @@ fn test_lazy_gc_slot() {
//store causes clean
accounts.store_for_tests(1, &[(&pubkey, &account)]);
- // generate delta state for slot 1, so clean operates on it.
- accounts.calculate_accounts_delta_hash(1);
-
//slot is gone
accounts.print_accounts_stats("pre-clean");
accounts.add_root_and_flush_write_cache(1);
@@ -1113,11 +1106,8 @@ fn test_clean_zero_lamport_and_dead_slot() {
// Pubkey 1 was the only account in slot 1, and it was updated in slot 2, so
// slot 1 should be purged
- accounts.calculate_accounts_delta_hash(0);
accounts.add_root_and_flush_write_cache(0);
- accounts.calculate_accounts_delta_hash(1);
accounts.add_root_and_flush_write_cache(1);
- accounts.calculate_accounts_delta_hash(2);
accounts.add_root_and_flush_write_cache(2);
// Slot 1 should be removed, slot 0 cannot be removed because it still has
@@ -1198,11 +1188,9 @@ fn test_remove_zero_lamport_multi_ref_accounts_panic() {
let slot = 1;
accounts.store_for_tests(slot, &[(&pubkey_zero, &one_lamport_account)]);
- accounts.calculate_accounts_delta_hash(slot);
accounts.add_root_and_flush_write_cache(slot);
accounts.store_for_tests(slot + 1, &[(&pubkey_zero, &zero_lamport_account)]);
- accounts.calculate_accounts_delta_hash(slot + 1);
accounts.add_root_and_flush_write_cache(slot + 1);
// This should panic because there are 2 refs for pubkey_zero.
@@ -1231,7 +1219,6 @@ fn test_remove_zero_lamport_single_ref_accounts_after_shrink() {
);
// Simulate rooting the zero-lamport account, writes it to storage
- accounts.calculate_accounts_delta_hash(slot);
accounts.add_root_and_flush_write_cache(slot);
if pass > 0 {
@@ -1239,7 +1226,6 @@ fn test_remove_zero_lamport_single_ref_accounts_after_shrink() {
accounts.store_for_tests(slot + 1, &[(&pubkey_zero, &zero_lamport_account)]);
if pass == 2 {
// move to a storage (causing ref count to increase)
- accounts.calculate_accounts_delta_hash(slot + 1);
accounts.add_root_and_flush_write_cache(slot + 1);
}
}
@@ -1352,7 +1338,6 @@ fn test_shrink_zero_lamport_single_ref_account() {
// Simulate rooting the zero-lamport account, should be a
// candidate for cleaning
- accounts.calculate_accounts_delta_hash(slot);
accounts.add_root_and_flush_write_cache(slot);
// for testing, we need to cause shrink to think this will be productive.
@@ -1430,11 +1415,8 @@ fn test_clean_multiple_zero_lamport_decrements_index_ref_count() {
accounts.store_for_tests(1, &[(&pubkey1, &zero_lamport_account)]);
accounts.store_for_tests(2, &[(&pubkey1, &zero_lamport_account)]);
// Root all slots
- accounts.calculate_accounts_delta_hash(0);
accounts.add_root_and_flush_write_cache(0);
- accounts.calculate_accounts_delta_hash(1);
accounts.add_root_and_flush_write_cache(1);
- accounts.calculate_accounts_delta_hash(2);
accounts.add_root_and_flush_write_cache(2);
// Account ref counts should match how many slots they were stored in
@@ -1477,9 +1459,7 @@ fn test_clean_zero_lamport_and_old_roots() {
// Simulate rooting the zero-lamport account, should be a
// candidate for cleaning
- accounts.calculate_accounts_delta_hash(0);
accounts.add_root_and_flush_write_cache(0);
- accounts.calculate_accounts_delta_hash(1);
accounts.add_root_and_flush_write_cache(1);
// Slot 0 should be removed, and
@@ -1515,9 +1495,7 @@ fn test_clean_old_with_normal_account() {
accounts.store_for_tests(1, &[(&pubkey, &account)]);
// simulate slots are rooted after while
- accounts.calculate_accounts_delta_hash(0);
accounts.add_root_and_flush_write_cache(0);
- accounts.calculate_accounts_delta_hash(1);
accounts.add_root_and_flush_write_cache(1);
//even if rooted, old state isn't cleaned up
@@ -1547,9 +1525,7 @@ fn test_clean_old_with_zero_lamport_account() {
accounts.store_for_tests(1, &[(&pubkey2, &normal_account)]);
//simulate slots are rooted after while
- accounts.calculate_accounts_delta_hash(0);
accounts.add_root_and_flush_write_cache(0);
- accounts.calculate_accounts_delta_hash(1);
accounts.add_root_and_flush_write_cache(1);
//even if rooted, old state isn't cleaned up
@@ -1598,11 +1574,8 @@ fn test_clean_old_with_both_normal_and_zero_lamport_accounts() {
accounts.store_for_tests(2, &[(&pubkey2, &normal_account)]);
//simulate slots are rooted after while
- accounts.calculate_accounts_delta_hash(0);
accounts.add_root_and_flush_write_cache(0);
- accounts.calculate_accounts_delta_hash(1);
accounts.add_root_and_flush_write_cache(1);
- accounts.calculate_accounts_delta_hash(2);
accounts.add_root_and_flush_write_cache(2);
//even if rooted, old state isn't cleaned up
@@ -1720,9 +1693,7 @@ fn test_clean_max_slot_zero_lamport_account() {
accounts.store_for_tests(1, &[(&pubkey, &zero_account)]);
// simulate slots are rooted after while
- accounts.calculate_accounts_delta_hash(0);
accounts.add_root_and_flush_write_cache(0);
- accounts.calculate_accounts_delta_hash(1);
accounts.add_root_and_flush_write_cache(1);
// Only clean up to account 0, should not purge slot 0 based on
@@ -1766,7 +1737,6 @@ fn test_accounts_db_purge_keep_live() {
let zero_lamport_account = AccountSharedData::new(zero_lamport, no_data, &owner);
let accounts = AccountsDb::new_single_for_tests();
- accounts.calculate_accounts_delta_hash(0);
accounts.add_root_and_flush_write_cache(0);
// Step A
@@ -1775,7 +1745,6 @@ fn test_accounts_db_purge_keep_live() {
// Store another live account to slot 1 which will prevent any purge
// since the store count will not be zero
accounts.store_for_tests(current_slot, &[(&pubkey2, &account2)]);
- accounts.calculate_accounts_delta_hash(current_slot);
accounts.add_root_and_flush_write_cache(current_slot);
let (slot1, account_info1) = accounts
.accounts_index
@@ -1797,13 +1766,11 @@ fn test_accounts_db_purge_keep_live() {
current_slot += 1;
let zero_lamport_slot = current_slot;
accounts.store_for_tests(current_slot, &[(&pubkey, &zero_lamport_account)]);
- accounts.calculate_accounts_delta_hash(current_slot);
accounts.add_root_and_flush_write_cache(current_slot);
accounts.assert_load_account(current_slot, pubkey, zero_lamport);
current_slot += 1;
- accounts.calculate_accounts_delta_hash(current_slot);
accounts.add_root_and_flush_write_cache(current_slot);
accounts.print_accounts_stats("pre_purge");
@@ -1849,31 +1816,28 @@ fn test_accounts_db_purge1() {
let mut current_slot = 1;
accounts.store_for_tests(current_slot, &[(&pubkey, &account)]);
- accounts.calculate_accounts_delta_hash(current_slot);
accounts.add_root_and_flush_write_cache(current_slot);
current_slot += 1;
accounts.store_for_tests(current_slot, &[(&pubkey, &zero_lamport_account)]);
- accounts.calculate_accounts_delta_hash(current_slot);
accounts.add_root_and_flush_write_cache(current_slot);
accounts.assert_load_account(current_slot, pubkey, zero_lamport);
// Otherwise slot 2 will not be removed
current_slot += 1;
- accounts.calculate_accounts_delta_hash(current_slot);
accounts.add_root_and_flush_write_cache(current_slot);
accounts.print_accounts_stats("pre_purge");
let ancestors = linear_ancestors(current_slot);
info!("ancestors: {ancestors:?}");
- let hash = accounts.update_accounts_hash_for_tests(current_slot, &ancestors, true, true);
+ let hash = accounts.calculate_accounts_lt_hash_at_startup_from_index(&ancestors, current_slot);
accounts.clean_accounts_for_tests();
assert_eq!(
- accounts.update_accounts_hash_for_tests(current_slot, &ancestors, true, true),
+ accounts.calculate_accounts_lt_hash_at_startup_from_index(&ancestors, current_slot),
hash
);
@@ -2140,52 +2104,6 @@ impl CalcAccountsHashConfig<'_> {
}
}
-#[test]
-fn test_verify_accounts_hash() {
- solana_logger::setup();
- let db = AccountsDb::new_single_for_tests();
-
- let key = solana_pubkey::new_rand();
- let some_data_len = 0;
- let some_slot: Slot = 0;
- let account = AccountSharedData::new(1, some_data_len, &key);
- let ancestors = vec![(some_slot, 0)].into_iter().collect();
- let epoch_schedule = EpochSchedule::default();
- let epoch = Epoch::default();
-
- db.store_for_tests(some_slot, &[(&key, &account)]);
- db.add_root_and_flush_write_cache(some_slot);
- let (_, capitalization) = db.update_accounts_hash_for_tests(some_slot, &ancestors, true, true);
-
- let config =
- VerifyAccountsHashAndLamportsConfig::new_for_test(&ancestors, &epoch_schedule, epoch);
-
- assert_matches!(
- db.verify_accounts_hash_and_lamports_for_tests(some_slot, 1, config.clone()),
- Ok(_)
- );
-
- db.accounts_hashes.lock().unwrap().remove(&some_slot);
-
- assert_matches!(
- db.verify_accounts_hash_and_lamports_for_tests(some_slot, 1, config.clone()),
- Err(AccountsHashVerificationError::MissingAccountsHash)
- );
-
- db.set_accounts_hash(
- some_slot,
- (
- AccountsHash(Hash::new_from_array([0xca; HASH_BYTES])),
- capitalization,
- ),
- );
-
- assert_matches!(
- db.verify_accounts_hash_and_lamports_for_tests(some_slot, 1, config),
- Err(AccountsHashVerificationError::MismatchedAccountsHash)
- );
-}
-
#[test]
fn test_verify_bank_capitalization() {
for pass in 0..2 {
@@ -2197,19 +2115,14 @@ fn test_verify_bank_capitalization() {
let some_slot: Slot = 0;
let account = AccountSharedData::new(1, some_data_len, &key);
let ancestors = vec![(some_slot, 0)].into_iter().collect();
- let epoch_schedule = EpochSchedule::default();
- let epoch = Epoch::default();
- let config =
- VerifyAccountsHashAndLamportsConfig::new_for_test(&ancestors, &epoch_schedule, epoch);
db.store_for_tests(some_slot, &[(&key, &account)]);
if pass == 0 {
db.add_root_and_flush_write_cache(some_slot);
- db.update_accounts_hash_for_tests(some_slot, &ancestors, true, true);
- assert_matches!(
- db.verify_accounts_hash_and_lamports_for_tests(some_slot, 1, config.clone()),
- Ok(_)
+ assert_eq!(
+ db.calculate_capitalization_at_startup_from_index(&ancestors, some_slot),
+ 1
);
continue;
}
@@ -2223,71 +2136,13 @@ fn test_verify_bank_capitalization() {
)],
);
db.add_root_and_flush_write_cache(some_slot);
- db.update_accounts_hash_for_tests(some_slot, &ancestors, true, true);
- assert_matches!(
- db.verify_accounts_hash_and_lamports_for_tests(some_slot, 2, config.clone()),
- Ok(_)
- );
-
- assert_matches!(
- db.verify_accounts_hash_and_lamports_for_tests(some_slot, 10, config),
- Err(AccountsHashVerificationError::MismatchedTotalLamports(expected, actual)) if expected == 2 && actual == 10
+ assert_eq!(
+ db.calculate_capitalization_at_startup_from_index(&ancestors, some_slot),
+ 2
);
}
}
-
-#[test]
-fn test_verify_accounts_hash_no_account() {
- solana_logger::setup();
- let db = AccountsDb::new_single_for_tests();
-
- let some_slot: Slot = 0;
- let ancestors = vec![(some_slot, 0)].into_iter().collect();
-
- db.add_root(some_slot);
- db.update_accounts_hash_for_tests(some_slot, &ancestors, true, true);
-
- let epoch_schedule = EpochSchedule::default();
- let epoch = Epoch::default();
- let config =
- VerifyAccountsHashAndLamportsConfig::new_for_test(&ancestors, &epoch_schedule, epoch);
-
- assert_matches!(
- db.verify_accounts_hash_and_lamports_for_tests(some_slot, 0, config),
- Ok(_)
- );
-}
-
-#[test]
-fn test_verify_accounts_hash_bad_account_hash() {
- solana_logger::setup();
- let db = AccountsDb::new_single_for_tests();
-
- let key = Pubkey::default();
- let some_data_len = 0;
- let some_slot: Slot = 0;
- let account = AccountSharedData::new(1, some_data_len, &key);
- let ancestors = vec![(some_slot, 0)].into_iter().collect();
-
- let accounts = &[(&key, &account)][..];
- db.update_accounts_hash_for_tests(some_slot, &ancestors, false, false);
-
- // provide bogus account hashes
- db.store_cached((some_slot, accounts));
- db.add_root_and_flush_write_cache(some_slot);
-
- let epoch_schedule = EpochSchedule::default();
- let epoch = Epoch::default();
- let config =
- VerifyAccountsHashAndLamportsConfig::new_for_test(&ancestors, &epoch_schedule, epoch);
-
- assert_matches!(
- db.verify_accounts_hash_and_lamports_for_tests(some_slot, 1, config),
- Err(AccountsHashVerificationError::MismatchedAccountsHash)
- );
-}
-
#[test]
fn test_storage_finder() {
solana_logger::setup();
@@ -2448,7 +2303,6 @@ fn do_full_clean_refcount(mut accounts: AccountsDb, store1_first: bool, store_si
accounts.store_for_tests(current_slot, &[(&pubkey2, &account)]);
accounts.store_for_tests(current_slot, &[(&pubkey1, &account)]);
}
- accounts.calculate_accounts_delta_hash(current_slot);
accounts.add_root_and_flush_write_cache(current_slot);
info!("post A");
@@ -2465,7 +2319,6 @@ fn do_full_clean_refcount(mut accounts: AccountsDb, store1_first: bool, store_si
// Stores to same pubkey, same slot only count once towards the
// ref count
assert_eq!(2, accounts.ref_count_for_pubkey(&pubkey1));
- accounts.calculate_accounts_delta_hash(current_slot);
accounts.add_root_and_flush_write_cache(current_slot);
accounts.print_accounts_stats("Post-B pre-clean");
@@ -2483,7 +2336,6 @@ fn do_full_clean_refcount(mut accounts: AccountsDb, store1_first: bool, store_si
accounts.store_for_tests(current_slot, &[(&pubkey3, &account4)]);
accounts.add_root_and_flush_write_cache(current_slot);
assert_eq!(3, accounts.ref_count_for_pubkey(&pubkey1));
- accounts.calculate_accounts_delta_hash(current_slot);
info!("post C");
@@ -2504,7 +2356,6 @@ fn do_full_clean_refcount(mut accounts: AccountsDb, store1_first: bool, store_si
info!("post D");
accounts.print_accounts_stats("Post-D");
- accounts.calculate_accounts_delta_hash(current_slot);
accounts.add_root_and_flush_write_cache(current_slot);
accounts.clean_accounts_for_tests();
@@ -2585,7 +2436,6 @@ fn test_shrink_candidate_slots() {
accounts.store_for_tests(current_slot, &[(pubkey, &account)]);
}
let shrink_slot = current_slot;
- accounts.calculate_accounts_delta_hash(current_slot);
accounts.add_root_and_flush_write_cache(current_slot);
current_slot += 1;
@@ -2595,7 +2445,6 @@ fn test_shrink_candidate_slots() {
for pubkey in updated_pubkeys {
accounts.store_for_tests(current_slot, &[(pubkey, &account)]);
}
- accounts.calculate_accounts_delta_hash(current_slot);
accounts.add_root_and_flush_write_cache(current_slot);
accounts.clean_accounts_for_tests();
@@ -2673,7 +2522,6 @@ fn test_shrink_candidate_slots_with_dead_ancient_account() {
current_slot,
&[(&modified_account_pubkey, &modified_account)],
);
- db.calculate_accounts_delta_hash(current_slot);
db.add_root_and_flush_write_cache(current_slot);
// This should remove the dead ancient account from the index.
db.clean_accounts_for_tests();
@@ -3093,7 +2941,6 @@ fn test_store_clean_after_shrink() {
accounts.store_cached((1, &[(&pubkey1, &zero_account)][..]));
// Add root 0 and flush separately
- accounts.calculate_accounts_delta_hash(0);
accounts.add_root(0);
accounts.flush_accounts_cache(true, None);
@@ -3101,7 +2948,6 @@ fn test_store_clean_after_shrink() {
accounts.clean_accounts_for_tests();
// flush 1
- accounts.calculate_accounts_delta_hash(1);
accounts.add_root(1);
accounts.flush_accounts_cache(true, None);
@@ -3135,7 +2981,6 @@ fn test_wrapping_storage_id() {
keys.iter().enumerate().for_each(|(slot, key)| {
let slot = slot as Slot;
db.store_for_tests(slot, &[(key, &zero_lamport_account)]);
- db.calculate_accounts_delta_hash(slot);
db.add_root_and_flush_write_cache(slot);
});
assert_eq!(slots - 1, db.next_id.load(Ordering::Acquire));
@@ -3161,7 +3006,6 @@ fn test_reuse_storage_id() {
keys.iter().enumerate().for_each(|(slot, key)| {
let slot = slot as Slot;
db.store_for_tests(slot, &[(key, &zero_lamport_account)]);
- db.calculate_accounts_delta_hash(slot);
db.add_root_and_flush_write_cache(slot);
// reset next_id to what it was previously to cause us to re-use the same id
db.next_id.store(AccountsFileId::MAX, Ordering::Release);
@@ -3181,9 +3025,7 @@ fn test_zero_lamport_new_root_not_cleaned() {
// Store zero lamport account into slots 0 and 1, root both slots
db.store_for_tests(0, &[(&account_key, &zero_lamport_account)]);
db.store_for_tests(1, &[(&account_key, &zero_lamport_account)]);
- db.calculate_accounts_delta_hash(0);
db.add_root_and_flush_write_cache(0);
- db.calculate_accounts_delta_hash(1);
db.add_root_and_flush_write_cache(1);
// Only clean zero lamport accounts up to slot 0
@@ -3787,7 +3629,6 @@ fn test_scan_flush_accounts_cache_then_clean_drop() {
// Fodder for the scan so that the lock on `account_key` is not held
db.store_cached((1, &[(&account_key2, &slot1_account)][..]));
db.store_cached((2, &[(&account_key, &slot2_account)][..]));
- db.calculate_accounts_delta_hash(0);
let max_scan_root = 0;
db.add_root(max_scan_root);
@@ -3797,7 +3638,6 @@ fn test_scan_flush_accounts_cache_then_clean_drop() {
// Add a new root 2
let new_root = 2;
- db.calculate_accounts_delta_hash(new_root);
db.add_root(new_root);
// Check that the scan is properly set up
@@ -4377,8 +4217,6 @@ fn test_shrink_unref() {
db.add_root(1);
// Flushes all roots
db.flush_accounts_cache(true, None);
- db.calculate_accounts_delta_hash(0);
- db.calculate_accounts_delta_hash(1);
// Clean to remove outdated entry from slot 0
db.clean_accounts(Some(1), false, &EpochSchedule::default());
@@ -4399,7 +4237,6 @@ fn test_shrink_unref() {
// Should be one store before clean for slot 0
db.get_and_assert_single_storage(0);
- db.calculate_accounts_delta_hash(2);
db.clean_accounts(Some(2), false, &EpochSchedule::default());
// No stores should exist for slot 0 after clean
@@ -4431,8 +4268,6 @@ fn test_clean_drop_dead_zero_lamport_single_ref_accounts() {
accounts_db.add_root(slot);
accounts_db.flush_accounts_cache(true, None);
- accounts_db.calculate_accounts_delta_hash(0);
- accounts_db.calculate_accounts_delta_hash(1);
// run clean
accounts_db.clean_accounts(Some(1), false, &epoch_schedule);
@@ -4462,8 +4297,6 @@ fn test_clean_drop_dead_storage_handle_zero_lamport_single_ref_accounts() {
db.add_root(1);
// Flushes all roots
db.flush_accounts_cache(true, None);
- db.calculate_accounts_delta_hash(0);
- db.calculate_accounts_delta_hash(1);
// Clean should mark slot 0 dead and drop it. During the dropping, it
// will find that slot 1 has a single ref zero accounts and mark it.
@@ -4504,8 +4337,6 @@ fn test_shrink_unref_handle_zero_lamport_single_ref_accounts() {
db.add_root(1);
// Flushes all roots
db.flush_accounts_cache(true, None);
- db.calculate_accounts_delta_hash(0);
- db.calculate_accounts_delta_hash(1);
// Clean to remove outdated entry from slot 0
db.clean_accounts(Some(1), false, &EpochSchedule::default());
@@ -4541,7 +4372,6 @@ fn test_shrink_unref_handle_zero_lamport_single_ref_accounts() {
// Should be one store before clean for slot 0 and slot 1
db.get_and_assert_single_storage(0);
db.get_and_assert_single_storage(1);
- db.calculate_accounts_delta_hash(2);
db.clean_accounts(Some(2), false, &EpochSchedule::default());
// No stores should exist for slot 0 after clean
@@ -5275,14 +5105,12 @@ define_accounts_db_test!(test_purge_alive_unrooted_slots_after_clean, |accounts|
// Simulate adding dirty pubkeys on bank freeze. Note this is
// not a rooted slot
- accounts.calculate_accounts_delta_hash(slot0);
// On the next *rooted* slot, update the `shared_key` account to zero lamports
let zero_lamport_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner());
accounts.store_for_tests(slot1, &[(&shared_key, &zero_lamport_account)]);
// Simulate adding dirty pubkeys on bank freeze, set root
- accounts.calculate_accounts_delta_hash(slot1);
accounts.add_root_and_flush_write_cache(slot1);
// The later rooted zero-lamport update to `shared_key` cannot be cleaned
@@ -5328,19 +5156,16 @@ define_accounts_db_test!(
let slot1: Slot = 1;
let account = AccountSharedData::new(111, space, &owner);
accounts_db.store_cached((slot1, &[(&pubkey, &account)][..]));
- accounts_db.calculate_accounts_delta_hash(slot1);
accounts_db.add_root_and_flush_write_cache(slot1);
let slot2: Slot = 2;
let account = AccountSharedData::new(222, space, &owner);
accounts_db.store_cached((slot2, &[(&pubkey, &account)][..]));
- accounts_db.calculate_accounts_delta_hash(slot2);
accounts_db.add_root_and_flush_write_cache(slot2);
let slot3: Slot = 3;
let account = AccountSharedData::new(0, space, &owner);
accounts_db.store_cached((slot3, &[(&pubkey, &account)][..]));
- accounts_db.calculate_accounts_delta_hash(slot3);
accounts_db.add_root_and_flush_write_cache(slot3);
assert_eq!(accounts_db.ref_count_for_pubkey(&pubkey), 3);
@@ -7170,153 +6995,6 @@ fn test_handle_dropped_roots_for_ancient_assert() {
db.handle_dropped_roots_for_ancient(dropped_roots.into_iter());
}
-define_accounts_db_test!(test_calculate_incremental_accounts_hash, |accounts_db| {
- let owner = Pubkey::new_unique();
- let mut accounts: Vec<_> = (0..10)
- .map(|_| (Pubkey::new_unique(), AccountSharedData::new(0, 0, &owner)))
- .collect();
-
- // store some accounts into slot 0
- let slot = 0;
- {
- accounts[0].1.set_lamports(0);
- accounts[1].1.set_lamports(1);
- accounts[2].1.set_lamports(10);
- accounts[3].1.set_lamports(100);
- //accounts[4].1.set_lamports(1_000); <-- will be added next slot
-
- let accounts = vec![
- (&accounts[0].0, &accounts[0].1),
- (&accounts[1].0, &accounts[1].1),
- (&accounts[2].0, &accounts[2].1),
- (&accounts[3].0, &accounts[3].1),
- ];
- accounts_db.store_cached((slot, accounts.as_slice()));
- accounts_db.add_root_and_flush_write_cache(slot);
- }
-
- // store some accounts into slot 1
- let slot = slot + 1;
- {
- //accounts[0].1.set_lamports(0); <-- unchanged
- accounts[1].1.set_lamports(0); /* <-- drain account */
- //accounts[2].1.set_lamports(10); <-- unchanged
- //accounts[3].1.set_lamports(100); <-- unchanged
- accounts[4].1.set_lamports(1_000); /* <-- add account */
-
- let accounts = vec![
- (&accounts[1].0, &accounts[1].1),
- (&accounts[4].0, &accounts[4].1),
- ];
- accounts_db.store_cached((slot, accounts.as_slice()));
- accounts_db.add_root_and_flush_write_cache(slot);
- }
-
- // calculate the full accounts hash
- let full_accounts_hash = {
- accounts_db.clean_accounts(Some(slot - 1), false, &EpochSchedule::default());
- let (storages, _) = accounts_db.get_storages(..=slot);
- let storages = SortedStorages::new(&storages);
- accounts_db.calculate_accounts_hash(
- &CalcAccountsHashConfig::default(),
- &storages,
- HashStats::default(),
- )
- };
- assert_eq!(full_accounts_hash.1, 1_110);
- let full_accounts_hash_slot = slot;
-
- // Calculate the expected full accounts hash here and ensure it matches.
- // Ensure the zero-lamport accounts are NOT included in the full accounts hash.
- let full_account_hashes = [(2, 0), (3, 0), (4, 1)].into_iter().map(|(index, _slot)| {
- let (pubkey, account) = &accounts[index];
- AccountsDb::hash_account(account, pubkey).0
- });
- let expected_accounts_hash = AccountsHash(compute_merkle_root(full_account_hashes));
- assert_eq!(full_accounts_hash.0, expected_accounts_hash);
-
- // store accounts into slot 2
- let slot = slot + 1;
- {
- //accounts[0].1.set_lamports(0); <-- unchanged
- //accounts[1].1.set_lamports(0); <-- unchanged
- accounts[2].1.set_lamports(0); /* <-- drain account */
- //accounts[3].1.set_lamports(100); <-- unchanged
- //accounts[4].1.set_lamports(1_000); <-- unchanged
- accounts[5].1.set_lamports(10_000); /* <-- add account */
- accounts[6].1.set_lamports(100_000); /* <-- add account */
- //accounts[7].1.set_lamports(1_000_000); <-- will be added next slot
-
- let accounts = vec![
- (&accounts[2].0, &accounts[2].1),
- (&accounts[5].0, &accounts[5].1),
- (&accounts[6].0, &accounts[6].1),
- ];
- accounts_db.store_cached((slot, accounts.as_slice()));
- accounts_db.add_root_and_flush_write_cache(slot);
- }
-
- // store accounts into slot 3
- let slot = slot + 1;
- {
- //accounts[0].1.set_lamports(0); <-- unchanged
- //accounts[1].1.set_lamports(0); <-- unchanged
- //accounts[2].1.set_lamports(0); <-- unchanged
- accounts[3].1.set_lamports(0); /* <-- drain account */
- //accounts[4].1.set_lamports(1_000); <-- unchanged
- accounts[5].1.set_lamports(0); /* <-- drain account */
- //accounts[6].1.set_lamports(100_000); <-- unchanged
- accounts[7].1.set_lamports(1_000_000); /* <-- add account */
-
- let accounts = vec![
- (&accounts[3].0, &accounts[3].1),
- (&accounts[5].0, &accounts[5].1),
- (&accounts[7].0, &accounts[7].1),
- ];
- accounts_db.store_cached((slot, accounts.as_slice()));
- accounts_db.add_root_and_flush_write_cache(slot);
- }
-
- // calculate the incremental accounts hash
- let incremental_accounts_hash = {
- accounts_db.set_latest_full_snapshot_slot(full_accounts_hash_slot);
- accounts_db.clean_accounts(Some(slot - 1), false, &EpochSchedule::default());
- let (storages, _) = accounts_db.get_storages(full_accounts_hash_slot + 1..=slot);
- let storages = SortedStorages::new(&storages);
- accounts_db.calculate_incremental_accounts_hash(
- &CalcAccountsHashConfig::default(),
- &storages,
- HashStats::default(),
- )
- };
- assert_eq!(incremental_accounts_hash.1, 1_100_000);
-
- // Ensure the zero-lamport accounts are included in the IAH.
- // Accounts 2, 3, and 5 are all zero-lamports.
- let incremental_account_hashes =
- [(2, 2), (3, 3), (5, 3), (6, 2), (7, 3)]
- .into_iter()
- .map(|(index, _slot)| {
- let (pubkey, account) = &accounts[index];
- if account.is_zero_lamport() {
- // For incremental accounts hash, the hash of a zero lamport account is the hash of its pubkey.
- // Ensure this implementation detail remains in sync with AccountsHasher::de_dup_in_parallel().
- let hash = blake3::hash(bytemuck::bytes_of(pubkey));
- Hash::new_from_array(hash.into())
- } else {
- AccountsDb::hash_account(account, pubkey).0
- }
- });
- let expected_accounts_hash =
- IncrementalAccountsHash(compute_merkle_root(incremental_account_hashes));
- assert_eq!(incremental_accounts_hash.0, expected_accounts_hash);
-});
-
-fn compute_merkle_root(hashes: impl IntoIterator
- ) -> Hash {
- let hashes = hashes.into_iter().collect();
- AccountsHasher::compute_merkle_root_recurse(hashes, MERKLE_FANOUT)
-}
-
/// Test that `clean` reclaims old accounts when cleaning old storages
///
/// When `clean` constructs candidates from old storages, pubkeys in these storages may have other
diff --git a/accounts-db/src/accounts_hash.rs b/accounts-db/src/accounts_hash.rs
index 1d07af561a74a3..2999d7a4db72cd 100644
--- a/accounts-db/src/accounts_hash.rs
+++ b/accounts-db/src/accounts_hash.rs
@@ -1246,15 +1246,6 @@ pub const ZERO_LAMPORT_ACCOUNT_LT_HASH: AccountLtHash = AccountLtHash(LtHash::id
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct AccountsLtHash(pub LtHash);
-/// Hash of accounts
-#[derive(Debug, Clone, Eq, PartialEq)]
-pub enum MerkleOrLatticeAccountsHash {
- /// Merkle-based hash of accounts
- Merkle(AccountsHashKind),
- /// Lattice-based hash of accounts
- Lattice,
-}
-
/// Hash of accounts
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum AccountsHashKind {
@@ -1288,26 +1279,6 @@ pub struct AccountsHash(pub Hash);
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub struct IncrementalAccountsHash(pub Hash);
-/// Hash of accounts written in a single slot
-#[derive(Debug, Copy, Clone, Eq, PartialEq)]
-pub struct AccountsDeltaHash(pub Hash);
-
-/// Snapshot serde-safe accounts delta hash
-#[cfg_attr(feature = "frozen-abi", derive(AbiExample))]
-#[derive(Clone, Default, Debug, Serialize, Deserialize, PartialEq, Eq)]
-pub struct SerdeAccountsDeltaHash(pub Hash);
-
-impl From for AccountsDeltaHash {
- fn from(accounts_delta_hash: SerdeAccountsDeltaHash) -> Self {
- Self(accounts_delta_hash.0)
- }
-}
-impl From for SerdeAccountsDeltaHash {
- fn from(accounts_delta_hash: AccountsDeltaHash) -> Self {
- Self(accounts_delta_hash.0)
- }
-}
-
/// Snapshot serde-safe accounts hash
#[cfg_attr(feature = "frozen-abi", derive(AbiExample))]
#[derive(Clone, Default, Debug, Serialize, Deserialize, PartialEq, Eq)]
diff --git a/accounts-db/src/ancient_append_vecs.rs b/accounts-db/src/ancient_append_vecs.rs
index 36dfde9fc661c3..4e77f7b5a1aa62 100644
--- a/accounts-db/src/ancient_append_vecs.rs
+++ b/accounts-db/src/ancient_append_vecs.rs
@@ -11,6 +11,7 @@ use {
stats::{ShrinkAncientStats, ShrinkStatsSub},
AccountFromStorage, AccountStorageEntry, AccountsDb, AliveAccounts,
GetUniqueAccountsResult, ShrinkCollect, ShrinkCollectAliveSeparatedByRefs,
+ UpdateIndexThreadSelection,
},
active_stats::ActiveStatItem,
storable_accounts::{StorableAccounts, StorableAccountsBySlot},
@@ -544,9 +545,11 @@ impl AccountsDb {
let target_slot = accounts_to_write.target_slot();
let (shrink_in_progress, create_and_insert_store_elapsed_us) =
measure_us!(self.get_store_for_shrink(target_slot, bytes));
- let (store_accounts_timing, rewrite_elapsed_us) = measure_us!(
- self.store_accounts_frozen(accounts_to_write, shrink_in_progress.new_storage(),)
- );
+ let (store_accounts_timing, rewrite_elapsed_us) = measure_us!(self.store_accounts_frozen(
+ accounts_to_write,
+ shrink_in_progress.new_storage(),
+ UpdateIndexThreadSelection::PoolWithThreshold
+ ));
write_ancient_accounts.metrics.accumulate(&ShrinkStatsSub {
store_accounts_timing,
diff --git a/accounts-db/src/append_vec.rs b/accounts-db/src/append_vec.rs
index 742f0b42e2bea7..2ab20b0599c0b4 100644
--- a/accounts-db/src/append_vec.rs
+++ b/accounts-db/src/append_vec.rs
@@ -23,7 +23,7 @@ use {
StoredAccountsInfo,
},
accounts_hash::AccountHash,
- buffered_reader::{BufferedReader, Stack},
+ buffered_reader::{BufferedReader, ContiguousBufFileRead, Stack},
file_io::read_into_buffer,
is_zero_lamport::IsZeroLamport,
storable_accounts::StorableAccounts,
@@ -1049,19 +1049,17 @@ impl AppendVec {
{}
}
AppendVecFileBacking::File(file) => {
- let self_len = self.len();
const BUFFER_SIZE: usize = PAGE_SIZE * 8;
- let mut reader = BufferedReader::>::new_stack(
- self_len,
- file,
- STORE_META_OVERHEAD,
- );
+ let mut reader = BufferedReader::>::new_stack(self.len(), file);
+ let mut min_buf_len = STORE_META_OVERHEAD;
// Buffer for account data that doesn't fit within the stack allocated buffer.
// This will be re-used for each account that doesn't fit within the stack allocated buffer.
let mut data_overflow_buffer = vec![];
loop {
- let offset = reader.get_offset();
- let bytes = match reader.fill_buf() {
+ let offset = reader.get_file_offset();
+ let bytes = match reader
+ .fill_buf_required_or_overflow(min_buf_len, &mut data_overflow_buffer)
+ {
Ok([]) => break,
Ok(bytes) => ValidSlice::new(bytes),
Err(err) if err.kind() == std::io::ErrorKind::UnexpectedEof => break,
@@ -1087,53 +1085,26 @@ impl AppendVec {
};
callback(account);
reader.consume(stored_size);
- } else if STORE_META_OVERHEAD + data_len <= BUFFER_SIZE {
- reader.set_required_data_len(STORE_META_OVERHEAD + data_len);
+ // restore default required buffer size
+ min_buf_len = STORE_META_OVERHEAD;
} else {
- const MAX_CAPACITY: usize = MAX_PERMITTED_DATA_LENGTH as usize;
- // 128KiB covers a reasonably large distribution of typical account sizes.
- // In a recent sample, 99.98% of accounts' data lengths were less than or equal to 128KiB.
- const MIN_CAPACITY: usize = 1024 * 128;
- let capacity = data_overflow_buffer.capacity();
- if data_len > capacity {
- let next_cap = data_len
- .next_power_of_two()
- .clamp(MIN_CAPACITY, MAX_CAPACITY);
- data_overflow_buffer.reserve_exact(next_cap - capacity);
- // SAFETY: We only write to the uninitialized portion of the buffer via `copy_from_slice` and `read_into_buffer`.
- // Later, we ensure we only read from the initialized portion of the buffer.
- unsafe {
- data_overflow_buffer.set_len(next_cap);
+ // repeat loop with required buffer size holding whole account data
+ min_buf_len = STORE_META_OVERHEAD + data_len;
+
+ if min_buf_len > BUFFER_SIZE {
+ const MAX_CAPACITY: usize =
+ STORE_META_OVERHEAD + MAX_PERMITTED_DATA_LENGTH as usize;
+ // 128KiB covers a reasonably large distribution of typical account sizes.
+ // In a recent sample, 99.98% of accounts' data lengths were less than or equal to 128KiB.
+ const MIN_CAPACITY: usize = 1024 * 128;
+ if min_buf_len > data_overflow_buffer.capacity() {
+ let next_cap = min_buf_len
+ .next_power_of_two()
+ .clamp(MIN_CAPACITY, MAX_CAPACITY);
+ data_overflow_buffer
+ .reserve_exact(next_cap - data_overflow_buffer.len());
}
}
-
- // Copy already read data to overflow buffer.
- data_overflow_buffer[..leftover].copy_from_slice(&bytes.0[next..]);
-
- // Read remaining data into overflow buffer.
- let Ok(bytes_read) = read_into_buffer(
- file,
- self_len,
- offset + next + leftover,
- &mut data_overflow_buffer[leftover..data_len],
- ) else {
- break;
- };
- if bytes_read + leftover < data_len {
- break;
- }
- let data = &data_overflow_buffer[..data_len];
- let stored_size = aligned_stored_size(data_len);
- let account = StoredAccountMeta {
- meta,
- account_meta,
- data,
- offset,
- stored_size,
- hash,
- };
- callback(account);
- reader.consume(stored_size);
}
}
}
@@ -1252,14 +1223,12 @@ impl AppendVec {
AppendVecFileBacking::File(file) => {
// Heuristic observed in benchmarking that maintains a reasonable balance between syscalls and data waste
const BUFFER_SIZE: usize = PAGE_SIZE * 4;
- let mut reader = BufferedReader::>::new_stack(
- self_len,
- file,
- mem::size_of::() + mem::size_of::(),
- );
+ let mut reader = BufferedReader::>::new_stack(self_len, file);
+ const REQUIRED_READ_LEN: usize =
+ mem::size_of::() + mem::size_of::();
loop {
- let offset = reader.get_offset();
- let bytes = match reader.fill_buf() {
+ let offset = reader.get_file_offset();
+ let bytes = match reader.fill_buf_required(REQUIRED_READ_LEN) {
Ok([]) => break,
Ok(bytes) => ValidSlice::new(bytes),
Err(err) if err.kind() == std::io::ErrorKind::UnexpectedEof => break,
@@ -1703,12 +1672,17 @@ pub mod tests {
let mut test_accounts = Vec::with_capacity(num_accounts);
let mut file_size = 0;
+ let special_file_interval = num_accounts / 8;
for i in 0..num_accounts {
let data_len = match i {
- // ensure one max size account
- 0 => MAX_PERMITTED_DATA_LENGTH as usize,
- // ensure one 64KiB account
- x if x == num_accounts - 1 => 1 << 16,
+ // Create several spread out accounts with varying sizes:
+ // for (x / special_file_interval) in 0..7 range
+ x if x % special_file_interval == 0 => {
+ // mult increases in 0 to 3 range twice
+ let mult = (x / special_file_interval) % 4;
+ // and data_len goes over 0..MAX_PERMITTED_DATA_LENGTH range also twice
+ mult * (MAX_PERMITTED_DATA_LENGTH as usize) / 3
+ }
// Otherwise use a reasonably small account to avoid long test times
x => x % 256,
};
diff --git a/accounts-db/src/blockhash_queue.rs b/accounts-db/src/blockhash_queue.rs
index bc7ff77d4c145a..a971869568c7a4 100644
--- a/accounts-db/src/blockhash_queue.rs
+++ b/accounts-db/src/blockhash_queue.rs
@@ -69,12 +69,6 @@ impl BlockhashQueue {
.map(|hash_age| hash_age.fee_calculator.lamports_per_signature)
}
- /// Check if the age of the hash is within the queue's max age
- #[deprecated(since = "2.0.0", note = "Please use `is_hash_valid_for_age` instead")]
- pub fn is_hash_valid(&self, hash: &Hash) -> bool {
- self.hashes.contains_key(hash)
- }
-
/// Check if the age of the hash is within the specified age
pub fn is_hash_valid_for_age(&self, hash: &Hash, max_age: usize) -> bool {
self.get_hash_info_if_valid(hash, max_age).is_some()
diff --git a/accounts-db/src/buffered_reader.rs b/accounts-db/src/buffered_reader.rs
index d1a649a762d067..e038f700b782a5 100644
--- a/accounts-db/src/buffered_reader.rs
+++ b/accounts-db/src/buffered_reader.rs
@@ -9,7 +9,7 @@
//! `set_required_data_len(len)`, the whole account data is buffered _linearly_ in memory and available to
//! be returned.
use {
- crate::file_io::read_more_buffer,
+ crate::file_io::{read_into_buffer, read_more_buffer},
std::{
fs::File,
io::{self, BufRead, BufReader},
@@ -26,6 +26,7 @@ use {
/// caller may be able to opt for a stack-allocated buffer rather than a heap-allocated buffer, or
/// vice versa.
pub(crate) trait Backing {
+ fn capacity(&self) -> usize;
unsafe fn as_slice(&self) -> &[u8];
unsafe fn as_mut_slice(&mut self) -> &mut [u8];
}
@@ -46,6 +47,10 @@ impl Stack {
}
impl Backing for Stack {
+ fn capacity(&self) -> usize {
+ N
+ }
+
#[inline(always)]
unsafe fn as_slice(&self) -> &[u8] {
slice::from_raw_parts(self.0.as_ptr() as *const u8, N)
@@ -57,6 +62,48 @@ impl Backing for Stack {
}
}
+/// An extension of the `BufRead` trait for file readers that require stronger control
+/// over returned buffer size and tracking of the file offset.
+///
+/// Unlike the standard `fill_buf`, which only guarantees a non-empty buffer,
+/// this trait allows callers to:
+/// - Enforce a minimum number of contiguous bytes to be made available.
+/// - Fall back to an overflow buffer if the internal buffer cannot satisfy the request.
+/// - Retrieve the current file offset corresponding to the start of the next buffer.
+pub(crate) trait ContiguousBufFileRead<'a>: BufRead {
+ /// Returns the current file offset corresponding to the start of the buffer
+ /// that will be returned by the next call to `fill_buf_*`.
+ ///
+ /// This offset represents the position within the underlying file where data
+ /// will be consumed from.
+ fn get_file_offset(&self) -> usize;
+
+ /// Ensures the internal buffer contains at least `required_len` contiguous bytes,
+ /// and returns a slice to that buffer.
+ ///
+ /// Returns `Err(io::ErrorKind::UnexpectedEof)` if the end of file is reached
+ /// before the required number of bytes is available.
+ fn fill_buf_required(&mut self, required_len: usize) -> io::Result<&[u8]>;
+
+ /// Attempts to provide at least `required_len` contiguous bytes by using
+ /// the internal buffer or the provided `overflow_buffer` if needed.
+ ///
+ /// If the internal buffer alone does not satisfy the requirement, additional
+ /// bytes are read and appended to `overflow_buffer`, which is resized to fit the data.
+ ///
+ /// Returns a slice containing all the required data (may point to either buffer).
+ ///
+ /// Returns `Err(io::ErrorKind::UnexpectedEof)` if the end of file is reached
+ /// before the required number of bytes can be read.
+ fn fill_buf_required_or_overflow<'b>(
+ &'b mut self,
+ required_len: usize,
+ overflow_buffer: &'b mut Vec,
+ ) -> io::Result<&'b [u8]>
+ where
+ 'a: 'b;
+}
+
/// read a file a large buffer at a time and provide access to a slice in that buffer
pub struct BufferedReader<'a, T> {
/// when we are next asked to read from file, start at this offset
@@ -67,105 +114,126 @@ pub struct BufferedReader<'a, T> {
buf_valid_bytes: Range,
/// offset in the file of the `buf_valid_bytes`.`start`
file_last_offset: usize,
- /// how many contiguous bytes caller needs
- read_requirements: Option,
/// how many bytes are valid in the file. The file's len may be longer.
file_len_valid: usize,
/// reference to file handle
file: &'a File,
- /// we always want at least this many contiguous bytes available or we must read more into the buffer.
- default_min_read_requirement: usize,
}
impl<'a, T> BufferedReader<'a, T> {
/// `buffer_size`: how much to try to read at a time
/// `file_len_valid`: # bytes that are valid in the file, may be less than overall file len
/// `default_min_read_requirement`: make sure we always have this much data available if we're asked to read
- pub fn new(
- backing: T,
- file_len_valid: usize,
- file: &'a File,
- default_min_read_requirement: usize,
- ) -> Self {
+ pub fn new(backing: T, file_len_valid: usize, file: &'a File) -> Self {
Self {
file_offset_of_next_read: 0,
buf: backing,
buf_valid_bytes: 0..0,
file_last_offset: 0,
- read_requirements: None,
file_len_valid,
file,
- default_min_read_requirement,
}
}
+}
- /// specify the amount of data required to read next time `read` is called
+impl<'a, T: Backing> ContiguousBufFileRead<'a> for BufferedReader<'a, T> {
#[inline(always)]
- pub fn set_required_data_len(&mut self, len: usize) {
- self.read_requirements = Some(len);
+ fn get_file_offset(&self) -> usize {
+ if self.buf_valid_bytes.is_empty() {
+ self.file_offset_of_next_read
+ } else {
+ self.file_last_offset + self.buf_valid_bytes.start
+ }
}
-}
-impl<'a, T> BufferedReader<'a, T>
-where
- T: Backing,
-{
- /// read to make sure we have the minimum amount of data
- fn read_required_bytes(&mut self) -> io::Result<()> {
- let must_read = self
- .read_requirements
- .unwrap_or(self.default_min_read_requirement);
- if self.buf_valid_bytes.len() < must_read {
- // we haven't used all the bytes we read last time, so adjust the effective offset
- debug_assert!(self.buf_valid_bytes.len() <= self.file_offset_of_next_read);
- self.file_last_offset = self.file_offset_of_next_read - self.buf_valid_bytes.len();
- read_more_buffer(
- self.file,
- self.file_len_valid,
- &mut self.file_offset_of_next_read,
- // SAFETY: `read_more_buffer` will only _write_ to uninitialized memory and lifetime is tied to self.
- unsafe { self.buf.as_mut_slice() },
- &mut self.buf_valid_bytes,
- )?;
- if self.buf_valid_bytes.len() < must_read {
+ fn fill_buf_required(&mut self, required_len: usize) -> io::Result<&[u8]> {
+ if self.buf_valid_bytes.len() < required_len {
+ self.read_more_bytes()?;
+ if self.buf_valid_bytes.len() < required_len {
return Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
"unable to read enough data",
));
}
}
- // reset this once we have checked that we had this much data once
- self.read_requirements = None;
- Ok(())
+ Ok(self.valid_slice())
}
- /// Return file offset within `file` of the current consume position.
- ///
- /// The offset is corresponding to the start of buffer that will be returned
- /// by the next `fill_buf` call.
- #[inline(always)]
- pub fn get_offset(&'a self) -> usize {
- if self.buf_valid_bytes.is_empty() {
- self.file_offset_of_next_read
- } else {
- self.file_last_offset + self.buf_valid_bytes.start
+ fn fill_buf_required_or_overflow<'b>(
+ &'b mut self,
+ required_len: usize,
+ overflow_buffer: &'b mut Vec,
+ ) -> io::Result<&'b [u8]>
+ where
+ 'a: 'b,
+ {
+ if required_len <= self.buf.capacity() {
+ return self.fill_buf_required(required_len);
+ }
+
+ if required_len > overflow_buffer.capacity() {
+ overflow_buffer.reserve_exact(required_len - overflow_buffer.len());
+ }
+ // SAFETY: We only write to the uninitialized portion of the buffer via `copy_from_slice` and `read_into_buffer`.
+ // Later, we ensure we only read from the initialized portion of the buffer.
+ unsafe {
+ overflow_buffer.set_len(required_len);
+ }
+
+ // Copy already read data to overflow buffer.
+ let available_valid_data = self.valid_slice();
+ let leftover = available_valid_data.len();
+ overflow_buffer[..leftover].copy_from_slice(available_valid_data);
+
+ // Read remaining data into overflow buffer.
+ let read_dst = &mut overflow_buffer[leftover..];
+ let bytes_read = read_into_buffer(
+ self.file,
+ self.file_len_valid,
+ self.file_offset_of_next_read,
+ read_dst,
+ )?;
+ if bytes_read < read_dst.len() {
+ return Err(io::Error::new(
+ io::ErrorKind::UnexpectedEof,
+ "unable to read required amount of data",
+ ));
}
+ Ok(overflow_buffer.as_slice())
+ }
+}
+
+impl BufferedReader<'_, T>
+where
+ T: Backing,
+{
+ /// Defragment buffer and read more bytes to make sure we have filled available
+ /// space as much as possible.
+ fn read_more_bytes(&mut self) -> io::Result<()> {
+ // we haven't used all the bytes we read last time, so adjust the effective offset
+ debug_assert!(self.buf_valid_bytes.len() <= self.file_offset_of_next_read);
+ self.file_last_offset = self.file_offset_of_next_read - self.buf_valid_bytes.len();
+ read_more_buffer(
+ self.file,
+ self.file_len_valid,
+ &mut self.file_offset_of_next_read,
+ // SAFETY: `read_more_buffer` will only _write_ to uninitialized memory and lifetime is tied to self.
+ unsafe { self.buf.as_mut_slice() },
+ &mut self.buf_valid_bytes,
+ )
+ }
+
+ fn valid_slice(&self) -> &[u8] {
+ // SAFETY: We only read from memory that has been initialized by `read_more_buffer`
+ // and lifetime is tied to self.
+ unsafe { &self.buf.as_slice()[self.buf_valid_bytes.clone()] }
}
}
impl<'a, const N: usize> BufferedReader<'a, Stack> {
/// create a new buffered reader with a stack-allocated buffer
- pub fn new_stack(
- file_len_valid: usize,
- file: &'a File,
- default_min_read_requirement: usize,
- ) -> Self {
- BufferedReader::new(
- Stack::new(),
- file_len_valid,
- file,
- default_min_read_requirement,
- )
+ pub fn new_stack(file_len_valid: usize, file: &'a File) -> Self {
+ BufferedReader::new(Stack::new(), file_len_valid, file)
}
}
@@ -185,18 +253,11 @@ impl io::Read for BufferedReader<'_, T> {
/// `BufferedReader` implements a more permissive API compared to `BufRead`
/// by allowing `consume` to advance beyond the end of the buffer returned by `fill_buf`.
impl BufRead for BufferedReader<'_, T> {
- /// Return the biggest slice of valid data starting at the current offset.
- ///
- /// Note that `fill_buf` has stronger guarantee than `BufRead::fill_buf` and returns
- /// at least the number of bytes requested by `default_min_read_requirement` and
- /// `set_required_data_len`. If that condition cannot be met
- /// `Err(io::ErrorKind::UnexpectedEof)` is returned.
fn fill_buf(&mut self) -> io::Result<&[u8]> {
- self.read_required_bytes()?;
-
- // SAFETY: We only read from memory that has been initialized by `read_more_buffer`
- // and lifetime is tied to self.
- Ok(unsafe { &self.buf.as_slice()[self.buf_valid_bytes.clone()] })
+ if self.buf_valid_bytes.is_empty() {
+ self.read_more_bytes()?;
+ }
+ Ok(self.valid_slice())
}
/// Advance the offset by `amt` to a `file` position where next `fill_buf` buffer should
@@ -225,7 +286,7 @@ pub fn large_file_buf_reader(
if agave_io_uring::io_uring_supported() {
use crate::io_uring::sequential_file_reader::SequentialFileReader;
- let io_uring_reader = SequentialFileReader::with_capacity(buf_size, path.as_ref());
+ let io_uring_reader = SequentialFileReader::with_capacity(buf_size, &path);
match io_uring_reader {
Ok(reader) => return Ok(Box::new(reader)),
Err(error) => {
@@ -262,10 +323,9 @@ mod tests {
// First read 16 bytes to fill buffer
let file_len_valid = 32;
let default_min_read = 8;
- let mut reader =
- BufferedReader::new(backing, file_len_valid, &sample_file, default_min_read);
- let offset = reader.get_offset();
- let slice = ValidSlice::new(reader.fill_buf().unwrap());
+ let mut reader = BufferedReader::new(backing, file_len_valid, &sample_file);
+ let offset = reader.get_file_offset();
+ let slice = ValidSlice::new(reader.fill_buf_required(default_min_read).unwrap());
let mut expected_offset = 0;
assert_eq!(offset, expected_offset);
assert_eq!(slice.len(), buffer_size);
@@ -275,31 +335,34 @@ mod tests {
let advance = 16;
let mut required_len = 32;
reader.consume(advance);
- reader.set_required_data_len(required_len);
- let offset = reader.get_offset();
+ let offset = reader.get_file_offset();
expected_offset += advance;
assert_eq!(offset, expected_offset);
assert_eq!(
- reader.fill_buf().expect_err("should hit EOF").kind(),
+ reader
+ .fill_buf_required(required_len)
+ .expect_err("should hit EOF")
+ .kind(),
io::ErrorKind::UnexpectedEof
);
// Continue reading should yield EOF.
reader.consume(advance);
- reader.set_required_data_len(required_len);
- let offset = reader.get_offset();
+ let offset = reader.get_file_offset();
expected_offset += advance;
assert_eq!(offset, expected_offset);
assert_eq!(
- reader.fill_buf().expect_err("should hit EOF").kind(),
+ reader
+ .fill_buf_required(required_len)
+ .expect_err("should hit EOF")
+ .kind(),
io::ErrorKind::UnexpectedEof
);
// set_required_data to zero and offset should not change, and slice should be empty.
required_len = 0;
- reader.set_required_data_len(required_len);
- let offset = reader.get_offset();
- let slice = ValidSlice::new(reader.fill_buf().unwrap());
+ let offset = reader.get_file_offset();
+ let slice = ValidSlice::new(reader.fill_buf_required(required_len).unwrap());
let expected_offset = file_len_valid;
assert_eq!(offset, expected_offset);
let expected_slice_len = 0;
@@ -319,10 +382,9 @@ mod tests {
// First read 16 bytes to fill buffer
let default_min_read_size = 8;
- let mut reader =
- BufferedReader::new(backing, valid_len, &sample_file, default_min_read_size);
- let offset = reader.get_offset();
- let slice = ValidSlice::new(reader.fill_buf().unwrap());
+ let mut reader = BufferedReader::new(backing, valid_len, &sample_file);
+ let offset = reader.get_file_offset();
+ let slice = ValidSlice::new(reader.fill_buf_required(default_min_read_size).unwrap());
let mut expected_offset = 0;
assert_eq!(offset, expected_offset);
assert_eq!(slice.len(), buffer_size);
@@ -332,12 +394,14 @@ mod tests {
let mut advance = 16;
let mut required_data_len = 32;
reader.consume(advance);
- reader.set_required_data_len(required_data_len);
- let offset = reader.get_offset();
+ let offset = reader.get_file_offset();
expected_offset += advance;
assert_eq!(offset, expected_offset);
assert_eq!(
- reader.fill_buf().expect_err("should hit EOF").kind(),
+ reader
+ .fill_buf_required(required_data_len)
+ .expect_err("should hit EOF")
+ .kind(),
io::ErrorKind::UnexpectedEof
);
@@ -345,12 +409,14 @@ mod tests {
advance = 14;
required_data_len = 32;
reader.consume(advance);
- reader.set_required_data_len(required_data_len);
- let offset = reader.get_offset();
+ let offset = reader.get_file_offset();
expected_offset += advance;
assert_eq!(offset, expected_offset);
assert_eq!(
- reader.fill_buf().expect_err("should hit EOF").kind(),
+ reader
+ .fill_buf_required(required_data_len)
+ .expect_err("should hit EOF")
+ .kind(),
io::ErrorKind::UnexpectedEof
);
@@ -358,12 +424,14 @@ mod tests {
advance = 1;
required_data_len = 8;
reader.consume(advance);
- reader.set_required_data_len(required_data_len);
- let offset = reader.get_offset();
+ let offset = reader.get_file_offset();
expected_offset += advance;
assert_eq!(offset, expected_offset);
assert_eq!(
- reader.fill_buf().expect_err("should hit EOF").kind(),
+ reader
+ .fill_buf_required(required_data_len)
+ .expect_err("should hit EOF")
+ .kind(),
io::ErrorKind::UnexpectedEof
);
@@ -371,12 +439,14 @@ mod tests {
advance = 3;
required_data_len = 8;
reader.consume(advance);
- reader.set_required_data_len(required_data_len);
- let offset = reader.get_offset();
+ let offset = reader.get_file_offset();
expected_offset += advance;
assert_eq!(offset, expected_offset);
assert_eq!(
- reader.fill_buf().expect_err("Should hit EOF").kind(),
+ reader
+ .fill_buf_required(required_data_len)
+ .expect_err("Should hit EOF")
+ .kind(),
io::ErrorKind::UnexpectedEof
);
}
@@ -392,10 +462,9 @@ mod tests {
// First read 16 bytes to fill buffer
let file_len_valid = 32;
let default_min_read_size = 8;
- let mut reader =
- BufferedReader::new(backing, file_len_valid, &sample_file, default_min_read_size);
- let offset = reader.get_offset();
- let slice = ValidSlice::new(reader.fill_buf().unwrap());
+ let mut reader = BufferedReader::new(backing, file_len_valid, &sample_file);
+ let offset = reader.get_file_offset();
+ let slice = ValidSlice::new(reader.fill_buf_required(default_min_read_size).unwrap());
let mut expected_offset = 0;
assert_eq!(offset, expected_offset);
assert_eq!(slice.len(), buffer_size);
@@ -405,9 +474,8 @@ mod tests {
let mut advance = 8;
let mut required_len = 8;
reader.consume(advance);
- reader.set_required_data_len(required_len);
- let offset = reader.get_offset();
- let slice = ValidSlice::new(reader.fill_buf().unwrap());
+ let offset = reader.get_file_offset();
+ let slice = ValidSlice::new(reader.fill_buf_required(required_len).unwrap());
expected_offset += advance;
assert_eq!(offset, expected_offset);
assert_eq!(slice.len(), required_len);
@@ -420,9 +488,8 @@ mod tests {
advance = 8;
required_len = 16;
reader.consume(advance);
- reader.set_required_data_len(required_len);
- let offset = reader.get_offset();
- let slice = ValidSlice::new(reader.fill_buf().unwrap());
+ let offset = reader.get_file_offset();
+ let slice = ValidSlice::new(reader.fill_buf_required(required_len).unwrap());
expected_offset += advance;
assert_eq!(offset, expected_offset);
assert_eq!(slice.len(), required_len);
@@ -435,12 +502,14 @@ mod tests {
advance = 16;
required_len = 32;
reader.consume(advance);
- reader.set_required_data_len(required_len);
- let offset = reader.get_offset();
+ let offset = reader.get_file_offset();
expected_offset += advance;
assert_eq!(offset, expected_offset);
assert_eq!(
- reader.fill_buf().expect_err("should hit EOF").kind(),
+ reader
+ .fill_buf_required(required_len)
+ .expect_err("should hit EOF")
+ .kind(),
io::ErrorKind::UnexpectedEof
);
}
@@ -456,9 +525,9 @@ mod tests {
// First read 16 bytes to fill buffer
let valid_len = 32;
let default_min_read = 8;
- let mut reader = BufferedReader::new(backing, valid_len, &sample_file, default_min_read);
- let offset = reader.get_offset();
- let slice = ValidSlice::new(reader.fill_buf().unwrap());
+ let mut reader = BufferedReader::new(backing, valid_len, &sample_file);
+ let offset = reader.get_file_offset();
+ let slice = ValidSlice::new(reader.fill_buf_required(default_min_read).unwrap());
let mut expected_offset = 0;
assert_eq!(offset, expected_offset);
assert_eq!(slice.len(), buffer_size);
@@ -469,9 +538,8 @@ mod tests {
let mut advance = 8;
let mut required_data_len = 16;
reader.consume(advance);
- reader.set_required_data_len(required_data_len);
- let offset = reader.get_offset();
- let slice = ValidSlice::new(reader.fill_buf().unwrap());
+ let offset = reader.get_file_offset();
+ let slice = ValidSlice::new(reader.fill_buf_required(required_data_len).unwrap());
expected_offset += advance;
assert_eq!(offset, expected_offset);
assert_eq!(slice.len(), required_data_len);
@@ -484,9 +552,8 @@ mod tests {
advance = 16;
required_data_len = 8;
reader.consume(advance);
- reader.set_required_data_len(required_data_len);
- let offset = reader.get_offset();
- let slice = ValidSlice::new(reader.fill_buf().unwrap());
+ let offset = reader.get_file_offset();
+ let slice = ValidSlice::new(reader.fill_buf_required(required_data_len).unwrap());
expected_offset += advance;
assert_eq!(offset, expected_offset);
assert_eq!(slice.len(), required_data_len);
@@ -495,4 +562,60 @@ mod tests {
&bytes[expected_offset..expected_offset + required_data_len]
);
}
+
+ #[test_case(Stack::<16>::new(), 16)]
+ fn test_fill_buf_required_or_overflow(backing: impl Backing, buffer_size: usize) {
+ // Setup a sample file with 32 bytes of data
+ const FILE_SIZE: usize = 32;
+ let mut sample_file = tempfile().unwrap();
+ let bytes = rand_bytes::();
+ sample_file.write_all(&bytes).unwrap();
+
+ let file_len_valid = 32;
+ let mut reader = BufferedReader::new(backing, file_len_valid, &sample_file);
+
+ // Case 1: required_len <= buffer_size (no overflow needed)
+ let mut overflow = Vec::new();
+ let required_len = 8;
+ let slice = reader
+ .fill_buf_required_or_overflow(required_len, &mut overflow)
+ .unwrap();
+ assert_eq!(&slice[..required_len], &bytes[..required_len]);
+ assert!(overflow.is_empty());
+
+ // Consume part of the buffer to simulate partial reading
+ reader.consume(required_len);
+
+ // Case 2: required_len > buffer_size (overflow required)
+ let mut overflow = Vec::new();
+ let required_len = buffer_size + 8;
+ let slice = reader
+ .fill_buf_required_or_overflow(required_len, &mut overflow)
+ .unwrap();
+
+ // Internal buffer is size `buffer_size`, overflow should extend with the remaining `8` bytes
+ assert_eq!(slice.len(), required_len);
+ assert_eq!(slice, &bytes[8..8 + required_len]);
+ assert_eq!(overflow.len(), required_len);
+
+ // Consume everything to reach EOF
+ reader.consume(required_len);
+
+ // Case 3: required_len larger than remaining data (expect UnexpectedEof)
+ let mut overflow = Vec::new();
+ let required_len = 64;
+ let result = reader.fill_buf_required_or_overflow(required_len, &mut overflow);
+ assert_eq!(result.unwrap_err().kind(), io::ErrorKind::UnexpectedEof);
+
+ // Case 4: required_len = 0 (should return empty slice)
+ let mut overflow = Vec::new();
+ let required_len = 0;
+ let offset_before = reader.get_file_offset();
+ let slice = reader
+ .fill_buf_required_or_overflow(required_len, &mut overflow)
+ .unwrap();
+ assert_eq!(slice.len(), 0);
+ let offset_after = reader.get_file_offset();
+ assert_eq!(offset_before, offset_after);
+ }
}
diff --git a/ci/bench/part2.sh b/ci/bench/part2.sh
index cd6dffae551f45..34e617807de40c 100755
--- a/ci/bench/part2.sh
+++ b/ci/bench/part2.sh
@@ -24,7 +24,6 @@ _ cargo +"$rust_nightly" bench --manifest-path runtime/Cargo.toml ${V:+--verbose
# Run banking/accounts bench. Doesn't require nightly, but use since it is already built.
_ cargo +"$rust_nightly" run --release --manifest-path banking-bench/Cargo.toml ${V:+--verbose} | tee -a "$BENCH_FILE"
-_ cargo +"$rust_nightly" run --release --manifest-path accounts-bench/Cargo.toml ${V:+--verbose} -- --num_accounts 10000 --num_slots 4 | tee -a "$BENCH_FILE"
# Run zk-elgamal-proof benches.
_ cargo +"$rust_nightly" bench --manifest-path programs/zk-elgamal-proof/Cargo.toml ${V:+--verbose} | tee -a "$BENCH_FILE"
diff --git a/ci/test-bench.sh b/ci/test-bench.sh
index 8e441be34a8759..c39d787323029b 100755
--- a/ci/test-bench.sh
+++ b/ci/test-bench.sh
@@ -63,7 +63,6 @@ _ $cargoNightly bench --manifest-path programs/sbf/Cargo.toml ${V:+--verbose} --
# Run banking/accounts bench. Doesn't require nightly, but use since it is already built.
_ $cargoNightly run --release --manifest-path banking-bench/Cargo.toml ${V:+--verbose} | tee -a "$BENCH_FILE"
-_ $cargoNightly run --release --manifest-path accounts-bench/Cargo.toml ${V:+--verbose} -- --num_accounts 10000 --num_slots 4 | tee -a "$BENCH_FILE"
# `solana-upload-perf` disabled as it can take over 30 minutes to complete for some
# reason
diff --git a/client/src/transaction_executor.rs b/client/src/transaction_executor.rs
index 23354818777788..56d5faa8b515d5 100644
--- a/client/src/transaction_executor.rs
+++ b/client/src/transaction_executor.rs
@@ -76,7 +76,7 @@ impl TransactionExecutor {
return Some((sig, timestamp(), id));
}
Err(e) => {
- info!("error: {:#?}", e);
+ info!("error: {e:#?}");
}
}
None
@@ -136,7 +136,7 @@ impl TransactionExecutor {
let mut retain = true;
let sent_ts = sigs_w[i].1;
if let Some(e) = &statuses[j] {
- debug!("error: {:?}", e);
+ debug!("error: {e:?}");
if e.status.is_ok() {
success += 1;
} else {
@@ -169,8 +169,7 @@ impl TransactionExecutor {
);
if last_log.elapsed().as_millis() > 5000 {
info!(
- "success: {} error: {} timed_out: {}",
- success, error_count, timed_out,
+ "success: {success} error: {error_count} timed_out: {timed_out}",
);
last_log = Instant::now();
}
diff --git a/core/Cargo.toml b/core/Cargo.toml
index 3697d6fe9c707a..177a42271cf94e 100644
--- a/core/Cargo.toml
+++ b/core/Cargo.toml
@@ -66,6 +66,7 @@ itertools = { workspace = true }
log = { workspace = true }
lru = { workspace = true }
min-max-heap = { workspace = true }
+num_cpus = { workspace = true }
num_enum = { workspace = true }
prio-graph = { workspace = true }
qualifier_attr = { workspace = true }
diff --git a/core/src/accounts_hash_verifier.rs b/core/src/accounts_hash_verifier.rs
index 55363acf0f7af4..a6db35e7e8dc1d 100644
--- a/core/src/accounts_hash_verifier.rs
+++ b/core/src/accounts_hash_verifier.rs
@@ -3,25 +3,14 @@
use {
crate::snapshot_packager_service::PendingSnapshotPackages,
crossbeam_channel::{Receiver, Sender},
- solana_accounts_db::{
- accounts_db::CalcAccountsHashKind,
- accounts_hash::{
- AccountsHash, CalcAccountsHashConfig, HashStats, IncrementalAccountsHash,
- MerkleOrLatticeAccountsHash,
- },
- sorted_storages::SortedStorages,
- },
- solana_clock::{Slot, DEFAULT_MS_PER_SLOT},
+ solana_clock::DEFAULT_MS_PER_SLOT,
solana_measure::measure_us,
solana_runtime::{
- serde_snapshot::BankIncrementalSnapshotPersistence,
snapshot_config::SnapshotConfig,
snapshot_controller::SnapshotController,
snapshot_package::{
- self, AccountsHashAlgorithm, AccountsPackage, AccountsPackageKind, SnapshotKind,
- SnapshotPackage,
+ self, AccountsPackage, AccountsPackageKind, SnapshotKind, SnapshotPackage,
},
- snapshot_utils,
},
std::{
io,
@@ -185,221 +174,13 @@ impl AccountsHashVerifier {
pending_snapshot_packages: &Mutex,
snapshot_config: &SnapshotConfig,
) -> io::Result<()> {
- let (merkle_or_lattice_accounts_hash, bank_incremental_snapshot_persistence) =
- Self::calculate_and_verify_accounts_hash(&accounts_package, snapshot_config)?;
-
Self::purge_old_accounts_hashes(&accounts_package, snapshot_config);
- Self::submit_for_packaging(
- accounts_package,
- pending_snapshot_packages,
- merkle_or_lattice_accounts_hash,
- bank_incremental_snapshot_persistence,
- );
+ Self::submit_for_packaging(accounts_package, pending_snapshot_packages);
Ok(())
}
- /// returns calculated accounts hash
- fn calculate_and_verify_accounts_hash(
- accounts_package: &AccountsPackage,
- snapshot_config: &SnapshotConfig,
- ) -> io::Result<(
- MerkleOrLatticeAccountsHash,
- Option,
- )> {
- match accounts_package.accounts_hash_algorithm {
- AccountsHashAlgorithm::Merkle => {
- debug!(
- "calculate_and_verify_accounts_hash(): snapshots lt hash is disabled, DO \
- merkle-based accounts hash calculation",
- );
- }
- AccountsHashAlgorithm::Lattice => {
- debug!(
- "calculate_and_verify_accounts_hash(): snapshots lt hash is enabled, SKIP \
- merkle-based accounts hash calculation",
- );
- return Ok((MerkleOrLatticeAccountsHash::Lattice, None));
- }
- }
-
- let accounts_hash_calculation_kind = match accounts_package.package_kind {
- AccountsPackageKind::Snapshot(snapshot_kind) => match snapshot_kind {
- SnapshotKind::FullSnapshot => CalcAccountsHashKind::Full,
- SnapshotKind::IncrementalSnapshot(_) => CalcAccountsHashKind::Incremental,
- },
- };
-
- let (accounts_hash_kind, bank_incremental_snapshot_persistence) =
- match accounts_hash_calculation_kind {
- CalcAccountsHashKind::Full => {
- let (accounts_hash, _capitalization) =
- Self::_calculate_full_accounts_hash(accounts_package);
- (accounts_hash.into(), None)
- }
- CalcAccountsHashKind::Incremental => {
- let AccountsPackageKind::Snapshot(SnapshotKind::IncrementalSnapshot(base_slot)) =
- accounts_package.package_kind
- else {
- panic!("Calculating incremental accounts hash requires a base slot");
- };
- let accounts_db = &accounts_package.accounts.accounts_db;
- let Some((base_accounts_hash, base_capitalization)) =
- accounts_db.get_accounts_hash(base_slot)
- else {
- #[rustfmt::skip]
- panic!(
- "incremental snapshot requires accounts hash and capitalization from \
- the full snapshot it is based on\n\
- package: {accounts_package:?}\n\
- accounts hashes: {:?}\n\
- incremental accounts hashes: {:?}\n\
- full snapshot archives: {:?}\n\
- bank snapshots: {:?}",
- accounts_db.get_accounts_hashes(),
- accounts_db.get_incremental_accounts_hashes(),
- snapshot_utils::get_full_snapshot_archives(
- &snapshot_config.full_snapshot_archives_dir,
- ),
- snapshot_utils::get_bank_snapshots(&snapshot_config.bank_snapshots_dir),
- );
- };
- let (incremental_accounts_hash, incremental_capitalization) =
- Self::_calculate_incremental_accounts_hash(accounts_package, base_slot);
- let bank_incremental_snapshot_persistence =
- BankIncrementalSnapshotPersistence {
- full_slot: base_slot,
- full_hash: base_accounts_hash.into(),
- full_capitalization: base_capitalization,
- incremental_hash: incremental_accounts_hash.into(),
- incremental_capitalization,
- };
- (
- incremental_accounts_hash.into(),
- Some(bank_incremental_snapshot_persistence),
- )
- }
- };
-
- Ok((
- MerkleOrLatticeAccountsHash::Merkle(accounts_hash_kind),
- bank_incremental_snapshot_persistence,
- ))
- }
-
- fn _calculate_full_accounts_hash(
- accounts_package: &AccountsPackage,
- ) -> (AccountsHash, /*capitalization*/ u64) {
- let (sorted_storages, storage_sort_us) =
- measure_us!(SortedStorages::new(&accounts_package.snapshot_storages));
-
- let mut timings = HashStats {
- storage_sort_us,
- ..HashStats::default()
- };
- timings.calc_storage_size_quartiles(&accounts_package.snapshot_storages);
-
- let epoch = accounts_package
- .epoch_schedule
- .get_epoch(accounts_package.slot);
- let calculate_accounts_hash_config = CalcAccountsHashConfig {
- use_bg_thread_pool: true,
- ancestors: None,
- epoch_schedule: &accounts_package.epoch_schedule,
- epoch,
- store_detailed_debug_info_on_failure: false,
- };
-
- let slot = accounts_package.slot;
- let ((accounts_hash, lamports), measure_hash_us) =
- measure_us!(accounts_package.accounts.accounts_db.update_accounts_hash(
- &calculate_accounts_hash_config,
- &sorted_storages,
- slot,
- timings,
- ));
-
- if accounts_package.expected_capitalization != lamports {
- // before we assert, run the hash calc again. This helps track down whether it could have been a failure in a race condition possibly with shrink.
- // We could add diagnostics to the hash calc here to produce a per bin cap or something to help narrow down how many pubkeys are different.
- let calculate_accounts_hash_config = CalcAccountsHashConfig {
- // since we're going to assert, use the fg thread pool to go faster
- use_bg_thread_pool: false,
- // now that we've failed, store off the failing contents that produced a bad capitalization
- store_detailed_debug_info_on_failure: true,
- ..calculate_accounts_hash_config
- };
- let second_accounts_hash = accounts_package
- .accounts
- .accounts_db
- .calculate_accounts_hash(
- &calculate_accounts_hash_config,
- &sorted_storages,
- HashStats::default(),
- );
- panic!(
- "accounts hash capitalization mismatch: expected {}, but calculated {} (then \
- recalculated {})",
- accounts_package.expected_capitalization, lamports, second_accounts_hash.1,
- );
- }
-
- datapoint_info!(
- "accounts_hash_verifier",
- ("calculate_hash", measure_hash_us, i64),
- );
-
- (accounts_hash, lamports)
- }
-
- fn _calculate_incremental_accounts_hash(
- accounts_package: &AccountsPackage,
- base_slot: Slot,
- ) -> (IncrementalAccountsHash, /*capitalization*/ u64) {
- let incremental_storages =
- accounts_package
- .snapshot_storages
- .iter()
- .filter_map(|storage| {
- let storage_slot = storage.slot();
- (storage_slot > base_slot).then_some((storage, storage_slot))
- });
- let sorted_storages = SortedStorages::new_with_slots(incremental_storages, None, None);
-
- let epoch = accounts_package
- .epoch_schedule
- .get_epoch(accounts_package.slot);
- let calculate_accounts_hash_config = CalcAccountsHashConfig {
- use_bg_thread_pool: true,
- ancestors: None,
- epoch_schedule: &accounts_package.epoch_schedule,
- epoch,
- store_detailed_debug_info_on_failure: false,
- };
-
- let (incremental_accounts_hash, measure_hash_us) = measure_us!(accounts_package
- .accounts
- .accounts_db
- .update_incremental_accounts_hash(
- &calculate_accounts_hash_config,
- &sorted_storages,
- accounts_package.slot,
- HashStats::default(),
- ));
-
- datapoint_info!(
- "accounts_hash_verifier",
- (
- "calculate_incremental_accounts_hash_us",
- measure_hash_us,
- i64
- ),
- );
-
- incremental_accounts_hash
- }
-
fn purge_old_accounts_hashes(
accounts_package: &AccountsPackage,
snapshot_config: &SnapshotConfig,
@@ -434,8 +215,6 @@ impl AccountsHashVerifier {
fn submit_for_packaging(
accounts_package: AccountsPackage,
pending_snapshot_packages: &Mutex,
- merkle_or_lattice_accounts_hash: MerkleOrLatticeAccountsHash,
- bank_incremental_snapshot_persistence: Option,
) {
if !matches!(
accounts_package.package_kind,
@@ -444,11 +223,7 @@ impl AccountsHashVerifier {
return;
}
- let snapshot_package = SnapshotPackage::new(
- accounts_package,
- merkle_or_lattice_accounts_hash,
- bank_incremental_snapshot_persistence,
- );
+ let snapshot_package = SnapshotPackage::new(accounts_package);
pending_snapshot_packages
.lock()
.unwrap()
@@ -462,7 +237,10 @@ impl AccountsHashVerifier {
#[cfg(test)]
mod tests {
- use {super::*, rand::seq::SliceRandom, solana_runtime::snapshot_package::SnapshotKind};
+ use {
+ super::*, rand::seq::SliceRandom, solana_clock::Slot,
+ solana_runtime::snapshot_package::SnapshotKind,
+ };
fn new(package_kind: AccountsPackageKind, slot: Slot) -> AccountsPackage {
AccountsPackage {
diff --git a/core/src/banking_stage/decision_maker.rs b/core/src/banking_stage/decision_maker.rs
index 19e0a674a848cf..aa973a7a231c94 100644
--- a/core/src/banking_stage/decision_maker.rs
+++ b/core/src/banking_stage/decision_maker.rs
@@ -7,7 +7,7 @@ use {
solana_pubkey::Pubkey,
solana_unified_scheduler_pool::{BankingStageMonitor, BankingStageStatus},
std::{
- sync::{Arc, RwLock},
+ sync::{atomic::{AtomicBool, Ordering::Relaxed}, Arc, RwLock},
time::{Duration, Instant},
},
};
@@ -136,10 +136,30 @@ impl DecisionMaker {
}
}
-impl BankingStageMonitor for DecisionMaker {
+#[derive(Debug)]
+pub(crate) struct DecisionMakerWrapper {
+ is_exited: Arc,
+ decision_maker: DecisionMaker,
+}
+
+impl DecisionMakerWrapper {
+ pub(crate) fn new(decision_maker: DecisionMaker) -> Self {
+ // Clone-off before hand to avoid lock contentions.
+ let is_exited = decision_maker.poh_recorder.read().unwrap().is_exited.clone();
+
+ Self {
+ is_exited,
+ decision_maker,
+ }
+ }
+}
+
+impl BankingStageMonitor for DecisionMakerWrapper {
fn status(&mut self) -> BankingStageStatus {
- if matches!(
- self.make_consume_or_forward_decision(),
+ if self.is_exited.load(Relaxed) {
+ BankingStageStatus::Exited
+ } else if matches!(
+ self.decision_maker.make_consume_or_forward_decision(),
BufferedPacketsDecision::Forward,
) {
BankingStageStatus::Inactive
diff --git a/core/src/banking_stage/unified_scheduler.rs b/core/src/banking_stage/unified_scheduler.rs
index 602bc0c061096e..283af6117b6c83 100644
--- a/core/src/banking_stage/unified_scheduler.rs
+++ b/core/src/banking_stage/unified_scheduler.rs
@@ -30,7 +30,7 @@
use qualifier_attr::qualifiers;
use {
super::{
- decision_maker::{BufferedPacketsDecision, DecisionMaker},
+ decision_maker::{BufferedPacketsDecision, DecisionMaker, DecisionMakerWrapper},
packet_deserializer::PacketDeserializer,
LikeClusterInfo,
},
@@ -56,7 +56,7 @@ pub(crate) fn ensure_banking_stage_setup(
let mut root_bank_cache = RootBankCache::new(bank_forks.clone());
let unified_receiver = channels.unified_receiver().clone();
let mut decision_maker = DecisionMaker::new(cluster_info.id(), poh_recorder.clone());
- let banking_stage_monitor = Box::new(decision_maker.clone());
+ let banking_stage_monitor = Box::new(DecisionMakerWrapper::new(decision_maker.clone()));
let banking_packet_handler = Box::new(
move |helper: &BankingStageHelper, batches: BankingPacketBatch| {
diff --git a/core/src/cluster_info_vote_listener.rs b/core/src/cluster_info_vote_listener.rs
index f8dbe38565c225..68887dd1c04398 100644
--- a/core/src/cluster_info_vote_listener.rs
+++ b/core/src/cluster_info_vote_listener.rs
@@ -193,7 +193,7 @@ impl ClusterInfoVoteListener {
verified_packets_sender: BankingPacketSender,
vote_tracker: Arc,
bank_forks: Arc>,
- subscriptions: Arc,
+ subscriptions: Option>,
verified_vote_sender: VerifiedVoteSender,
gossip_verified_vote_hash_sender: GossipVerifiedVoteHashSender,
replay_votes_receiver: ReplayVoteReceiver,
@@ -230,7 +230,7 @@ impl ClusterInfoVoteListener {
vote_tracker,
&mut bank_hash_cache,
dumped_slot_subscription,
- subscriptions,
+ subscriptions.as_deref(),
gossip_verified_vote_hash_sender,
verified_vote_sender,
replay_votes_receiver,
@@ -318,7 +318,7 @@ impl ClusterInfoVoteListener {
vote_tracker: Arc,
bank_hash_cache: &mut BankHashCache,
dumped_slot_subscription: DumpedSlotSubscription,
- subscriptions: Arc,
+ subscriptions: Option<&RpcSubscriptions>,
gossip_verified_vote_hash_sender: GossipVerifiedVoteHashSender,
verified_vote_sender: VerifiedVoteSender,
replay_votes_receiver: ReplayVoteReceiver,
@@ -355,7 +355,7 @@ impl ClusterInfoVoteListener {
&gossip_vote_txs_receiver,
&vote_tracker,
&root_bank,
- &subscriptions,
+ subscriptions,
&gossip_verified_vote_hash_sender,
&verified_vote_sender,
&replay_votes_receiver,
@@ -389,7 +389,7 @@ impl ClusterInfoVoteListener {
gossip_vote_txs_receiver: &VerifiedVoteTransactionsReceiver,
vote_tracker: &VoteTracker,
root_bank: &Bank,
- subscriptions: &RpcSubscriptions,
+ subscriptions: Option<&RpcSubscriptions>,
gossip_verified_vote_hash_sender: &GossipVerifiedVoteHashSender,
verified_vote_sender: &VerifiedVoteSender,
replay_votes_receiver: &ReplayVoteReceiver,
@@ -445,7 +445,7 @@ impl ClusterInfoVoteListener {
vote_transaction_signature: Signature,
vote_tracker: &VoteTracker,
root_bank: &Bank,
- subscriptions: &RpcSubscriptions,
+ rpc_subscriptions: Option<&RpcSubscriptions>,
verified_vote_sender: &VerifiedVoteSender,
gossip_verified_vote_hash_sender: &GossipVerifiedVoteHashSender,
diff: &mut HashMap>,
@@ -586,7 +586,9 @@ impl ClusterInfoVoteListener {
*latest_vote_slot = max(*latest_vote_slot, last_vote_slot);
if is_new_vote {
- subscriptions.notify_vote(*vote_pubkey, vote, vote_transaction_signature);
+ if let Some(rpc_subscriptions) = rpc_subscriptions {
+ rpc_subscriptions.notify_vote(*vote_pubkey, vote, vote_transaction_signature);
+ }
let _ = verified_vote_sender.send((*vote_pubkey, vote_slots));
}
}
@@ -597,7 +599,7 @@ impl ClusterInfoVoteListener {
gossip_vote_txs: Vec,
replayed_votes: Vec,
root_bank: &Bank,
- subscriptions: &RpcSubscriptions,
+ subscriptions: Option<&RpcSubscriptions>,
gossip_verified_vote_hash_sender: &GossipVerifiedVoteHashSender,
verified_vote_sender: &VerifiedVoteSender,
bank_notification_sender: &Option,
@@ -881,7 +883,7 @@ mod tests {
&votes_receiver,
&vote_tracker,
&bank3,
- &subscriptions,
+ Some(&subscriptions),
&gossip_verified_vote_hash_sender,
&verified_vote_sender,
&replay_votes_receiver,
@@ -916,7 +918,7 @@ mod tests {
&votes_receiver,
&vote_tracker,
&bank3,
- &subscriptions,
+ Some(&subscriptions),
&gossip_verified_vote_hash_sender,
&verified_vote_sender,
&replay_votes_receiver,
@@ -1010,7 +1012,7 @@ mod tests {
&votes_txs_receiver,
&vote_tracker,
&bank0,
- &subscriptions,
+ Some(&subscriptions),
&gossip_verified_vote_hash_sender,
&verified_vote_sender,
&replay_votes_receiver,
@@ -1180,7 +1182,7 @@ mod tests {
&votes_txs_receiver,
&vote_tracker,
&bank0,
- &subscriptions,
+ Some(&subscriptions),
&gossip_verified_vote_hash_sender,
&verified_vote_sender,
&replay_votes_receiver,
@@ -1293,7 +1295,7 @@ mod tests {
&votes_receiver,
&vote_tracker,
&bank,
- &subscriptions,
+ Some(&subscriptions),
&gossip_verified_vote_hash_sender,
&verified_vote_sender,
&replay_votes_receiver,
@@ -1389,7 +1391,7 @@ mod tests {
Signature::default(),
)],
&bank,
- &subscriptions,
+ Some(&subscriptions),
&gossip_verified_vote_hash_sender,
&verified_vote_sender,
&None,
@@ -1438,7 +1440,7 @@ mod tests {
Signature::default(),
)],
&new_root_bank,
- &subscriptions,
+ Some(&subscriptions),
&gossip_verified_vote_hash_sender,
&verified_vote_sender,
&None,
@@ -1656,7 +1658,7 @@ mod tests {
signature,
&vote_tracker,
&bank,
- &subscriptions,
+ Some(&subscriptions),
&verified_vote_sender,
&gossip_verified_vote_hash_sender,
&mut diff,
@@ -1689,7 +1691,7 @@ mod tests {
signature,
&vote_tracker,
&bank,
- &subscriptions,
+ Some(&subscriptions),
&verified_vote_sender,
&gossip_verified_vote_hash_sender,
&mut diff,
diff --git a/core/src/commitment_service.rs b/core/src/commitment_service.rs
index 54c85728310869..ec55bfaae15539 100644
--- a/core/src/commitment_service.rs
+++ b/core/src/commitment_service.rs
@@ -67,7 +67,7 @@ impl AggregateCommitmentService {
pub fn new(
exit: Arc,
block_commitment_cache: Arc>,
- subscriptions: Arc,
+ subscriptions: Option>,
) -> (Sender, Self) {
let (sender, receiver): (
Sender,
@@ -83,9 +83,12 @@ impl AggregateCommitmentService {
break;
}
- if let Err(RecvTimeoutError::Disconnected) =
- Self::run(&receiver, &block_commitment_cache, &subscriptions, &exit)
- {
+ if let Err(RecvTimeoutError::Disconnected) = Self::run(
+ &receiver,
+ &block_commitment_cache,
+ subscriptions.as_deref(),
+ &exit,
+ ) {
break;
}
})
@@ -97,7 +100,7 @@ impl AggregateCommitmentService {
fn run(
receiver: &Receiver,
block_commitment_cache: &RwLock,
- subscriptions: &Arc,
+ rpc_subscriptions: Option<&RpcSubscriptions>,
exit: &AtomicBool,
) -> Result<(), RecvTimeoutError> {
loop {
@@ -136,10 +139,12 @@ impl AggregateCommitmentService {
),
);
- // Triggers rpc_subscription notifications as soon as new commitment data is available,
- // sending just the commitment cache slot information that the notifications thread
- // needs
- subscriptions.notify_subscribers(update_commitment_slots);
+ if let Some(rpc_subscriptions) = rpc_subscriptions {
+ // Triggers rpc_subscription notifications as soon as new commitment data is
+ // available, sending just the commitment cache slot information that the
+ // notifications thread needs
+ rpc_subscriptions.notify_subscribers(update_commitment_slots);
+ }
}
}
diff --git a/core/src/consensus.rs b/core/src/consensus.rs
index ec4d08c6dd8a61..d1da8af2a105ca 100644
--- a/core/src/consensus.rs
+++ b/core/src/consensus.rs
@@ -3246,11 +3246,11 @@ pub mod test {
let ledger_path = get_tmp_ledger_path_auto_delete!();
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
- let (shreds, _) = make_slot_entries(1, 0, 42, /*merkle_variant:*/ true);
+ let (shreds, _) = make_slot_entries(1, 0, 42);
blockstore.insert_shreds(shreds, None, false).unwrap();
- let (shreds, _) = make_slot_entries(3, 1, 42, /*merkle_variant:*/ true);
+ let (shreds, _) = make_slot_entries(3, 1, 42);
blockstore.insert_shreds(shreds, None, false).unwrap();
- let (shreds, _) = make_slot_entries(4, 1, 42, /*merkle_variant:*/ true);
+ let (shreds, _) = make_slot_entries(4, 1, 42);
blockstore.insert_shreds(shreds, None, false).unwrap();
assert!(!blockstore.is_root(0));
assert!(!blockstore.is_root(1));
@@ -3282,11 +3282,11 @@ pub mod test {
let ledger_path = get_tmp_ledger_path_auto_delete!();
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
- let (shreds, _) = make_slot_entries(1, 0, 42, /*merkle_variant:*/ true);
+ let (shreds, _) = make_slot_entries(1, 0, 42);
blockstore.insert_shreds(shreds, None, false).unwrap();
- let (shreds, _) = make_slot_entries(3, 1, 42, /*merkle_variant:*/ true);
+ let (shreds, _) = make_slot_entries(3, 1, 42);
blockstore.insert_shreds(shreds, None, false).unwrap();
- let (shreds, _) = make_slot_entries(4, 1, 42, /*merkle_variant:*/ true);
+ let (shreds, _) = make_slot_entries(4, 1, 42);
blockstore.insert_shreds(shreds, None, false).unwrap();
blockstore.set_roots(std::iter::once(&3)).unwrap();
assert!(!blockstore.is_root(0));
@@ -3310,9 +3310,9 @@ pub mod test {
let ledger_path = get_tmp_ledger_path_auto_delete!();
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
- let (shreds, _) = make_slot_entries(1, 0, 42, /*merkle_variant:*/ true);
+ let (shreds, _) = make_slot_entries(1, 0, 42);
blockstore.insert_shreds(shreds, None, false).unwrap();
- let (shreds, _) = make_slot_entries(3, 1, 42, /*merkle_variant:*/ true);
+ let (shreds, _) = make_slot_entries(3, 1, 42);
blockstore.insert_shreds(shreds, None, false).unwrap();
assert!(!blockstore.is_root(0));
assert!(!blockstore.is_root(1));
diff --git a/core/src/cost_update_service.rs b/core/src/cost_update_service.rs
index e912037a43c55d..5662a2c3673a99 100644
--- a/core/src/cost_update_service.rs
+++ b/core/src/cost_update_service.rs
@@ -2,7 +2,6 @@
use {
crossbeam_channel::Receiver,
- solana_ledger::blockstore::Blockstore,
solana_runtime::bank::Bank,
std::{
sync::Arc,
@@ -30,11 +29,11 @@ const MAX_LOOP_COUNT: usize = 25;
const LOOP_LIMITER: Duration = Duration::from_millis(10);
impl CostUpdateService {
- pub fn new(blockstore: Arc, cost_update_receiver: CostUpdateReceiver) -> Self {
+ pub fn new(cost_update_receiver: CostUpdateReceiver) -> Self {
let thread_hdl = Builder::new()
.name("solCostUpdtSvc".to_string())
.spawn(move || {
- Self::service_loop(blockstore, cost_update_receiver);
+ Self::service_loop(cost_update_receiver);
})
.unwrap();
@@ -45,13 +44,20 @@ impl CostUpdateService {
self.thread_hdl.join()
}
- fn service_loop(_blockstore: Arc, cost_update_receiver: CostUpdateReceiver) {
+ fn service_loop(cost_update_receiver: CostUpdateReceiver) {
for cost_update in cost_update_receiver.iter() {
match cost_update {
CostUpdate::FrozenBank {
bank,
is_leader_block,
} => {
+ let (total_transaction_fee, total_priority_fee) = {
+ let collector_fee_details = bank.get_collector_fee_details();
+ (
+ collector_fee_details.total_transaction_fee(),
+ collector_fee_details.total_priority_fee(),
+ )
+ };
for loop_count in 1..=MAX_LOOP_COUNT {
{
// Release the lock so that the thread that will
@@ -68,7 +74,12 @@ impl CostUpdateService {
"inflight transaction count is {in_flight_transaction_count} \
for slot {slot} after {loop_count} iteration(s)"
);
- cost_tracker.report_stats(slot, is_leader_block);
+ cost_tracker.report_stats(
+ slot,
+ is_leader_block,
+ total_transaction_fee,
+ total_priority_fee,
+ );
break;
}
}
diff --git a/core/src/repair/ancestor_hashes_service.rs b/core/src/repair/ancestor_hashes_service.rs
index 32106a113efca7..50513acc49fd79 100644
--- a/core/src/repair/ancestor_hashes_service.rs
+++ b/core/src/repair/ancestor_hashes_service.rs
@@ -923,7 +923,7 @@ mod test {
blockstore::make_many_slot_entries, get_tmp_ledger_path,
get_tmp_ledger_path_auto_delete, shred::Nonce,
},
- solana_net_utils::bind_to_unspecified,
+ solana_net_utils::sockets::bind_to_localhost_unique,
solana_perf::packet::Packet,
solana_runtime::bank_forks::BankForks,
solana_signer::Signer,
@@ -1357,7 +1357,7 @@ mod test {
impl ManageAncestorHashesState {
fn new(bank_forks: Arc>) -> Self {
let ancestor_hashes_request_statuses = Arc::new(DashMap::new());
- let ancestor_hashes_request_socket = Arc::new(bind_to_unspecified().unwrap());
+ let ancestor_hashes_request_socket = Arc::new(bind_to_localhost_unique().expect("should bind"));
let epoch_schedule = bank_forks
.read()
.unwrap()
diff --git a/core/src/repair/repair_service.rs b/core/src/repair/repair_service.rs
index 65e71e7d3d893a..3b4adb8d171d1d 100644
--- a/core/src/repair/repair_service.rs
+++ b/core/src/repair/repair_service.rs
@@ -1275,17 +1275,13 @@ mod test {
get_tmp_ledger_path_auto_delete,
shred::max_ticks_per_n_shreds,
},
- solana_net_utils::{
- bind_to_unspecified,
- sockets::{bind_to, localhost_port_range_for_tests},
- },
+ solana_net_utils::sockets::bind_to_localhost_unique,
solana_runtime::bank::Bank,
solana_signer::Signer,
solana_streamer::socket::SocketAddrSpace,
solana_time_utils::timestamp,
std::{
collections::HashSet,
- net::{IpAddr, Ipv4Addr},
},
};
@@ -1302,10 +1298,9 @@ mod test {
let pubkey = cluster_info.id();
let slot = 100;
let shred_index = 50;
- let port_range = localhost_port_range_for_tests();
- let reader = bind_to(IpAddr::V4(Ipv4Addr::LOCALHOST), port_range.0).expect("should bind");
+ let reader = bind_to_localhost_unique().expect("should bind");
let address = reader.local_addr().unwrap();
- let sender = bind_to(IpAddr::V4(Ipv4Addr::LOCALHOST), port_range.1).expect("should bind");
+ let sender = bind_to_localhost_unique().expect("should bind");
let outstanding_repair_requests = Arc::new(RwLock::new(OutstandingShredRepairs::default()));
// Send a repair request
@@ -1354,8 +1349,8 @@ mod test {
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
// Create some orphan slots
- let (mut shreds, _) = make_slot_entries(1, 0, 1, /*merkle_variant:*/ true);
- let (shreds2, _) = make_slot_entries(5, 2, 1, /*merkle_variant:*/ true);
+ let (mut shreds, _) = make_slot_entries(1, 0, 1);
+ let (shreds2, _) = make_slot_entries(5, 2, 1);
shreds.extend(shreds2);
blockstore.insert_shreds(shreds, None, false).unwrap();
let mut repair_weight = RepairWeight::new(0);
@@ -1383,7 +1378,7 @@ mod test {
let ledger_path = get_tmp_ledger_path_auto_delete!();
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
- let (shreds, _) = make_slot_entries(2, 0, 1, /*merkle_variant:*/ true);
+ let (shreds, _) = make_slot_entries(2, 0, 1);
// Write this shred to slot 2, should chain to slot 0, which we haven't received
// any shreds for
@@ -1491,7 +1486,6 @@ mod test {
0, // slot
0, // parent_slot
num_entries_per_slot as u64,
- true, // merkle_variant
);
let num_shreds_per_slot = shreds.len() as u64;
@@ -1585,7 +1579,6 @@ mod test {
i, // slot
parent,
num_entries_per_slot as u64,
- true, // merkle_variant
);
blockstore.insert_shreds(shreds, None, false).unwrap();
@@ -1623,7 +1616,6 @@ mod test {
dead_slot, // slot
dead_slot - 1, // parent_slot
num_entries_per_slot,
- true, // merkle_variant
);
blockstore
.insert_shreds(shreds[..shreds.len() - 1].to_vec(), None, false)
@@ -1661,7 +1653,7 @@ mod test {
};
let mut duplicate_slot_repair_statuses = HashMap::new();
let dead_slot = 9;
- let receive_socket = &bind_to_unspecified().unwrap();
+ let receive_socket = &bind_to_localhost_unique().expect("should bind - receive socket");
let duplicate_status = DuplicateSlotRepairStatus {
correct_ancestor_to_repair: (dead_slot, Hash::default()),
start_ts: u64::MAX,
@@ -1670,12 +1662,7 @@ mod test {
// Insert some shreds to create a SlotMeta,
let num_entries_per_slot = max_ticks_per_n_shreds(1, None) + 1;
- let (mut shreds, _) = make_slot_entries(
- dead_slot,
- dead_slot - 1,
- num_entries_per_slot,
- true, // merkle_variant
- );
+ let (mut shreds, _) = make_slot_entries(dead_slot, dead_slot - 1, num_entries_per_slot);
blockstore
.insert_shreds(shreds[..shreds.len() - 1].to_vec(), None, false)
.unwrap();
@@ -1690,7 +1677,7 @@ mod test {
&blockstore,
&serve_repair,
&mut RepairStats::default(),
- &bind_to_unspecified().unwrap(),
+ &bind_to_localhost_unique().expect("should bind - repair socket"),
&None,
&RwLock::new(OutstandingRequests::default()),
&identity_keypair,
@@ -1716,7 +1703,7 @@ mod test {
&blockstore,
&serve_repair,
&mut RepairStats::default(),
- &bind_to_unspecified().unwrap(),
+ &bind_to_localhost_unique().expect("should bind - repair socket"),
&None,
&RwLock::new(OutstandingRequests::default()),
&identity_keypair,
@@ -1735,7 +1722,7 @@ mod test {
&blockstore,
&serve_repair,
&mut RepairStats::default(),
- &bind_to_unspecified().unwrap(),
+ &bind_to_localhost_unique().expect("should bind - repair socket"),
&None,
&RwLock::new(OutstandingRequests::default()),
&identity_keypair,
@@ -1750,7 +1737,7 @@ mod test {
let bank_forks = BankForks::new_rw_arc(bank);
let dummy_addr = Some((
Pubkey::default(),
- bind_to_unspecified().unwrap().local_addr().unwrap(),
+ bind_to_localhost_unique().expect("should bind - dummy socket").local_addr().unwrap(),
));
let cluster_info = Arc::new(new_test_cluster_info());
let ledger_path = get_tmp_ledger_path_auto_delete!();
diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs
index 3925ace68340a9..b8b0ab39fd1334 100644
--- a/core/src/replay_stage.rs
+++ b/core/src/replay_stage.rs
@@ -280,7 +280,7 @@ pub struct ReplayStageConfig {
}
pub struct ReplaySenders {
- pub rpc_subscriptions: Arc,
+ pub rpc_subscriptions: Option>,
pub slot_status_notifier: Option,
pub transaction_status_sender: Option,
pub entry_notification_sender: Option,
@@ -715,7 +715,7 @@ impl ReplayStage {
&blockstore,
&bank_forks,
&leader_schedule_cache,
- &rpc_subscriptions,
+ rpc_subscriptions.as_deref(),
&slot_status_notifier,
&mut progress,
&mut replay_timing,
@@ -741,7 +741,7 @@ impl ReplayStage {
&mut heaviest_subtree_fork_choice,
&replay_vote_sender,
&bank_notification_sender,
- &rpc_subscriptions,
+ rpc_subscriptions.as_deref(),
&slot_status_notifier,
&mut duplicate_slots_tracker,
&duplicate_confirmed_slots,
@@ -1002,7 +1002,7 @@ impl ReplayStage {
&leader_schedule_cache,
&lockouts_sender,
snapshot_controller.as_deref(),
- &rpc_subscriptions,
+ rpc_subscriptions.as_deref(),
&block_commitment_cache,
&mut heaviest_subtree_fork_choice,
&bank_notification_sender,
@@ -1160,7 +1160,7 @@ impl ReplayStage {
&bank_forks,
&poh_recorder,
&leader_schedule_cache,
- &rpc_subscriptions,
+ rpc_subscriptions.as_deref(),
&slot_status_notifier,
&mut progress,
&retransmit_slots_sender,
@@ -2079,7 +2079,7 @@ impl ReplayStage {
bank_forks: &Arc>,
poh_recorder: &Arc>,
leader_schedule_cache: &Arc,
- rpc_subscriptions: &Arc,
+ rpc_subscriptions: Option<&RpcSubscriptions>,
slot_status_notifier: &Option,
progress_map: &mut ProgressMap,
retransmit_slots_sender: &Sender,
@@ -2257,7 +2257,7 @@ impl ReplayStage {
bank: &Bank,
root: Slot,
err: &BlockstoreProcessorError,
- rpc_subscriptions: &Arc,
+ rpc_subscriptions: Option<&RpcSubscriptions>,
slot_status_notifier: &Option,
duplicate_slots_tracker: &mut DuplicateSlotsTracker,
duplicate_confirmed_slots: &DuplicateConfirmedSlots,
@@ -2309,11 +2309,13 @@ impl ReplayStage {
.notify_slot_dead(slot, parent_slot, err.clone());
}
- rpc_subscriptions.notify_slot_update(SlotUpdate::Dead {
- slot,
- err,
- timestamp: timestamp(),
- });
+ if let Some(rpc_subscriptions) = rpc_subscriptions {
+ rpc_subscriptions.notify_slot_update(SlotUpdate::Dead {
+ slot,
+ err,
+ timestamp: timestamp(),
+ });
+ }
let dead_state = DeadState::new_from_state(
slot,
@@ -2374,7 +2376,7 @@ impl ReplayStage {
leader_schedule_cache: &Arc,
lockouts_sender: &Sender,
snapshot_controller: Option<&SnapshotController>,
- rpc_subscriptions: &Arc,
+ rpc_subscriptions: Option<&RpcSubscriptions>,
block_commitment_cache: &Arc>,
heaviest_subtree_fork_choice: &mut HeaviestSubtreeForkChoice,
bank_notification_sender: &Option,
@@ -3051,7 +3053,7 @@ impl ReplayStage {
transaction_status_sender: Option<&TransactionStatusSender>,
heaviest_subtree_fork_choice: &mut HeaviestSubtreeForkChoice,
bank_notification_sender: &Option,
- rpc_subscriptions: &Arc,
+ rpc_subscriptions: Option<&RpcSubscriptions>,
slot_status_notifier: &Option,
duplicate_slots_tracker: &mut DuplicateSlotsTracker,
duplicate_confirmed_slots: &DuplicateConfirmedSlots,
@@ -3355,7 +3357,7 @@ impl ReplayStage {
heaviest_subtree_fork_choice: &mut HeaviestSubtreeForkChoice,
replay_vote_sender: &ReplayVoteSender,
bank_notification_sender: &Option,
- rpc_subscriptions: &Arc,
+ rpc_subscriptions: Option<&RpcSubscriptions>,
slot_status_notifier: &Option,
duplicate_slots_tracker: &mut DuplicateSlotsTracker,
duplicate_confirmed_slots: &DuplicateConfirmedSlots,
@@ -3980,7 +3982,7 @@ impl ReplayStage {
blockstore: &Blockstore,
leader_schedule_cache: &Arc,
snapshot_controller: Option<&SnapshotController>,
- rpc_subscriptions: &Arc,
+ rpc_subscriptions: Option<&RpcSubscriptions>,
block_commitment_cache: &Arc>,
heaviest_subtree_fork_choice: &mut HeaviestSubtreeForkChoice,
bank_notification_sender: &Option,
@@ -4042,7 +4044,9 @@ impl ReplayStage {
drop_bank_sender,
)?;
blockstore.slots_stats.mark_rooted(new_root);
- rpc_subscriptions.notify_roots(rooted_slots);
+ if let Some(rpc_subscriptions) = rpc_subscriptions {
+ rpc_subscriptions.notify_roots(rooted_slots);
+ }
if let Some(sender) = bank_notification_sender {
sender
.sender
@@ -4127,7 +4131,7 @@ impl ReplayStage {
blockstore: &Blockstore,
bank_forks: &RwLock,
leader_schedule_cache: &Arc,
- rpc_subscriptions: &Arc,
+ rpc_subscriptions: Option<&RpcSubscriptions>,
slot_status_notifier: &Option,
progress: &mut ProgressMap,
replay_timing: &mut ReplayLoopTiming,
@@ -4222,11 +4226,13 @@ impl ReplayStage {
slot: u64,
root_slot: u64,
leader: &Pubkey,
- rpc_subscriptions: &Arc,
+ rpc_subscriptions: Option<&RpcSubscriptions>,
slot_status_notifier: &Option,
new_bank_options: NewBankOptions,
) -> Bank {
- rpc_subscriptions.notify_slot(slot, parent.slot(), root_slot);
+ if let Some(rpc_subscriptions) = rpc_subscriptions {
+ rpc_subscriptions.notify_slot(slot, parent.slot(), root_slot);
+ }
if let Some(slot_status_notifier) = slot_status_notifier {
slot_status_notifier
.read()
@@ -4543,13 +4549,14 @@ pub(crate) mod tests {
bank1.freeze();
bank_forks.write().unwrap().insert(bank1);
+ let rpc_subscriptions = Some(rpc_subscriptions);
+
// Insert shreds for slot NUM_CONSECUTIVE_LEADER_SLOTS,
// chaining to slot 1
let (shreds, _) = make_slot_entries(
NUM_CONSECUTIVE_LEADER_SLOTS, // slot
1, // parent_slot
8, // num_entries
- true, // merkle_variant
);
blockstore.insert_shreds(shreds, None, false).unwrap();
assert!(bank_forks
@@ -4562,7 +4569,7 @@ pub(crate) mod tests {
&blockstore,
&bank_forks,
&leader_schedule_cache,
- &rpc_subscriptions,
+ rpc_subscriptions.as_deref(),
&None,
&mut progress,
&mut replay_timing,
@@ -4575,12 +4582,7 @@ pub(crate) mod tests {
// Insert shreds for slot 2 * NUM_CONSECUTIVE_LEADER_SLOTS,
// chaining to slot 1
- let (shreds, _) = make_slot_entries(
- 2 * NUM_CONSECUTIVE_LEADER_SLOTS,
- 1,
- 8,
- true, // merkle_variant
- );
+ let (shreds, _) = make_slot_entries(2 * NUM_CONSECUTIVE_LEADER_SLOTS, 1, 8);
blockstore.insert_shreds(shreds, None, false).unwrap();
assert!(bank_forks
.read()
@@ -4591,7 +4593,7 @@ pub(crate) mod tests {
&blockstore,
&bank_forks,
&leader_schedule_cache,
- &rpc_subscriptions,
+ rpc_subscriptions.as_deref(),
&None,
&mut progress,
&mut replay_timing,
@@ -5101,13 +5103,15 @@ pub(crate) mod tests {
SlotStatusNotifierForTest::new(dead_slots.clone()),
)));
+ let rpc_subscriptions = Some(rpc_subscriptions);
+
if let Err(err) = &res {
ReplayStage::mark_dead_slot(
&blockstore,
&bank1,
0,
err,
- &rpc_subscriptions,
+ rpc_subscriptions.as_deref(),
&slot_status_notifier,
&mut DuplicateSlotsTracker::default(),
&DuplicateConfirmedSlots::new(),
@@ -5164,13 +5168,13 @@ pub(crate) mod tests {
let exit = Arc::new(AtomicBool::new(false));
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
let max_complete_transaction_status_slot = Arc::new(AtomicU64::default());
- let rpc_subscriptions = Arc::new(RpcSubscriptions::new_for_tests(
+ let rpc_subscriptions = Some(Arc::new(RpcSubscriptions::new_for_tests(
exit.clone(),
max_complete_transaction_status_slot,
bank_forks.clone(),
block_commitment_cache.clone(),
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks),
- ));
+ )));
let (lockouts_sender, _) = AggregateCommitmentService::new(
exit,
block_commitment_cache.clone(),
@@ -6462,26 +6466,26 @@ pub(crate) mod tests {
// Simulate repair fixing slot 3 and 5
let (shreds, _) = make_slot_entries(
- 3, // slot
- 1, // parent_slot
- 8, // num_entries
- true, // merkle_variant
+ 3, // slot
+ 1, // parent_slot
+ 8, // num_entries
);
blockstore.insert_shreds(shreds, None, false).unwrap();
let (shreds, _) = make_slot_entries(
- 5, // slot
- 3, // parent_slot
- 8, // num_entries
- true, // merkle_variant
+ 5, // slot
+ 3, // parent_slot
+ 8, // num_entries
);
blockstore.insert_shreds(shreds, None, false).unwrap();
+ let rpc_subscriptions = Some(rpc_subscriptions);
+
// 3 should now be an active bank
ReplayStage::generate_new_bank_forks(
&blockstore,
&bank_forks,
&leader_schedule_cache,
- &rpc_subscriptions,
+ rpc_subscriptions.as_deref(),
&None,
&mut progress,
&mut replay_timing,
@@ -6511,7 +6515,7 @@ pub(crate) mod tests {
&blockstore,
&bank_forks,
&leader_schedule_cache,
- &rpc_subscriptions,
+ rpc_subscriptions.as_deref(),
&None,
&mut progress,
&mut replay_timing,
@@ -6542,7 +6546,7 @@ pub(crate) mod tests {
&blockstore,
&bank_forks,
&leader_schedule_cache,
- &rpc_subscriptions,
+ rpc_subscriptions.as_deref(),
&None,
&mut progress,
&mut replay_timing,
@@ -6572,7 +6576,7 @@ pub(crate) mod tests {
&blockstore,
&bank_forks,
&leader_schedule_cache,
- &rpc_subscriptions,
+ rpc_subscriptions.as_deref(),
&None,
&mut progress,
&mut replay_timing,
@@ -8632,12 +8636,14 @@ pub(crate) mod tests {
// this test to use true to avoid skipping the leader slot
let has_new_vote_been_rooted = true;
+ let rpc_subscriptions = Some(rpc_subscriptions);
+
assert!(!ReplayStage::maybe_start_leader(
my_pubkey,
bank_forks,
&poh_recorder,
&leader_schedule_cache,
- &rpc_subscriptions,
+ rpc_subscriptions.as_deref(),
&None,
&mut progress,
&retransmit_slots_sender,
@@ -9253,8 +9259,7 @@ pub(crate) mod tests {
let dummy_slot = working_bank.slot() + 2;
let initial_slot = working_bank.slot();
let num_entries = 10;
- let merkle_variant = true;
- let (shreds, _) = make_slot_entries(dummy_slot, initial_slot, num_entries, merkle_variant);
+ let (shreds, _) = make_slot_entries(dummy_slot, initial_slot, num_entries);
blockstore.insert_shreds(shreds, None, false).unwrap();
// Reset PoH recorder to the completed bank to ensure consistent state
@@ -9285,6 +9290,8 @@ pub(crate) mod tests {
// this test to use true to avoid skipping the leader slot
let has_new_vote_been_rooted = true;
+ let rpc_subscriptions = Some(rpc_subscriptions);
+
// We should not attempt to start leader for the dummy_slot
assert_matches!(
poh_recorder.read().unwrap().reached_leader_slot(&my_pubkey),
@@ -9295,7 +9302,7 @@ pub(crate) mod tests {
&bank_forks,
&poh_recorder,
&leader_schedule_cache,
- &rpc_subscriptions,
+ rpc_subscriptions.as_deref(),
&None,
&mut progress,
&retransmit_slots_sender,
@@ -9321,7 +9328,7 @@ pub(crate) mod tests {
&bank_forks,
&poh_recorder,
&leader_schedule_cache,
- &rpc_subscriptions,
+ rpc_subscriptions.as_deref(),
&None,
&mut progress,
&retransmit_slots_sender,
diff --git a/core/src/tpu.rs b/core/src/tpu.rs
index aa3130a4453c87..7411fb73c7cf1f 100644
--- a/core/src/tpu.rs
+++ b/core/src/tpu.rs
@@ -126,7 +126,7 @@ impl Tpu {
entry_receiver: Receiver,
retransmit_slots_receiver: Receiver,
sockets: TpuSockets,
- subscriptions: &Arc,
+ subscriptions: Option>,
transaction_status_sender: Option,
entry_notification_sender: Option,
blockstore: Arc,
@@ -315,7 +315,7 @@ impl Tpu {
gossip_vote_sender,
vote_tracker,
bank_forks.clone(),
- subscriptions.clone(),
+ subscriptions,
verified_vote_sender,
gossip_verified_vote_hash_sender,
replay_vote_receiver,
diff --git a/core/src/tvu.rs b/core/src/tvu.rs
index 576fd3aef2c059..0575a1d8c001a1 100644
--- a/core/src/tvu.rs
+++ b/core/src/tvu.rs
@@ -134,7 +134,7 @@ impl Tvu {
sockets: TvuSockets,
blockstore: Arc,
ledger_signal_receiver: Receiver,
- rpc_subscriptions: &Arc,
+ rpc_subscriptions: Option>,
poh_recorder: &Arc>,
tower: Tower,
tower_storage: Arc,
@@ -224,7 +224,7 @@ impl Tvu {
turbine_quic_endpoint_sender,
retransmit_receiver,
max_slots.clone(),
- Some(rpc_subscriptions.clone()),
+ rpc_subscriptions.clone(),
slot_status_notifier.clone(),
tvu_config.xdp_sender,
);
@@ -295,7 +295,7 @@ impl Tvu {
let (voting_sender, voting_receiver) = unbounded();
let replay_senders = ReplaySenders {
- rpc_subscriptions: rpc_subscriptions.clone(),
+ rpc_subscriptions,
slot_status_notifier,
transaction_status_sender,
entry_notification_sender,
@@ -360,7 +360,7 @@ impl Tvu {
&exit,
);
- let cost_update_service = CostUpdateService::new(blockstore.clone(), cost_update_receiver);
+ let cost_update_service = CostUpdateService::new(cost_update_receiver);
let drop_bank_service = DropBankService::new(drop_bank_receiver);
@@ -557,13 +557,13 @@ pub mod tests {
},
blockstore,
ledger_signal_receiver,
- &Arc::new(RpcSubscriptions::new_for_tests(
+ Some(Arc::new(RpcSubscriptions::new_for_tests(
exit.clone(),
max_complete_transaction_status_slot,
bank_forks.clone(),
block_commitment_cache.clone(),
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks),
- )),
+ ))),
&poh_recorder,
Tower::default(),
Arc::new(FileTowerStorage::default()),
diff --git a/core/src/validator.rs b/core/src/validator.rs
index 2e54c1193b7848..cb9ba3389d87d0 100644
--- a/core/src/validator.rs
+++ b/core/src/validator.rs
@@ -83,7 +83,7 @@ use {
transaction_recorder::TransactionRecorder,
},
solana_pubkey::Pubkey,
- solana_rayon_threadlimit::{get_max_thread_count, get_thread_count},
+ solana_rayon_threadlimit::get_thread_count,
solana_rpc::{
max_slots::MaxSlots,
optimistically_confirmed_bank_tracker::{
@@ -299,8 +299,11 @@ pub struct ValidatorConfig {
pub repair_handler_type: RepairHandlerType,
}
-impl Default for ValidatorConfig {
- fn default() -> Self {
+impl ValidatorConfig {
+ pub fn default_for_test() -> Self {
+ let max_thread_count =
+ NonZeroUsize::new(num_cpus::get()).expect("thread count is non-zero");
+
Self {
halt_at_slot: None,
expected_genesis_hash: None,
@@ -308,10 +311,10 @@ impl Default for ValidatorConfig {
expected_shred_version: None,
voting_disabled: false,
max_ledger_shreds: None,
- blockstore_options: BlockstoreOptions::default(),
+ blockstore_options: BlockstoreOptions::default_for_tests(),
account_paths: Vec::new(),
account_snapshot_paths: Vec::new(),
- rpc_config: JsonRpcConfig::default(),
+ rpc_config: JsonRpcConfig::default_for_test(),
on_start_geyser_plugin_config_files: None,
geyser_plugin_always_enabled: false,
rpc_addrs: None,
@@ -350,49 +353,30 @@ impl Default for ValidatorConfig {
validator_exit: Arc::new(RwLock::new(Exit::default())),
validator_exit_backpressure: HashMap::default(),
no_wait_for_vote_to_start_leader: true,
- accounts_db_config: None,
+ accounts_db_config: Some(ACCOUNTS_DB_CONFIG_FOR_TESTING),
wait_to_vote_slot: None,
runtime_config: RuntimeConfig::default(),
banking_trace_dir_byte_limit: 0,
block_verification_method: BlockVerificationMethod::default(),
block_production_method: BlockProductionMethod::default(),
transaction_struct: TransactionStructure::default(),
- enable_block_production_forwarding: false,
+ // enable forwarding by default for tests
+ enable_block_production_forwarding: true,
generator_config: None,
use_snapshot_archives_at_startup: UseSnapshotArchivesAtStartup::default(),
wen_restart_proto_path: None,
wen_restart_coordinator: None,
unified_scheduler_handler_threads: None,
ip_echo_server_threads: NonZeroUsize::new(1).expect("1 is non-zero"),
- rayon_global_threads: NonZeroUsize::new(1).expect("1 is non-zero"),
- replay_forks_threads: NonZeroUsize::new(1).expect("1 is non-zero"),
- replay_transactions_threads: NonZeroUsize::new(1).expect("1 is non-zero"),
- tvu_shred_sigverify_threads: NonZeroUsize::new(1).expect("1 is non-zero"),
- delay_leader_block_for_pending_fork: false,
- use_tpu_client_next: true,
- retransmit_xdp: None,
- repair_handler_type: RepairHandlerType::default(),
- }
- }
-}
-
-impl ValidatorConfig {
- pub fn default_for_test() -> Self {
- let max_thread_count =
- NonZeroUsize::new(get_max_thread_count()).expect("thread count is non-zero");
-
- Self {
- accounts_db_config: Some(ACCOUNTS_DB_CONFIG_FOR_TESTING),
- blockstore_options: BlockstoreOptions::default_for_tests(),
- rpc_config: JsonRpcConfig::default_for_test(),
- block_production_method: BlockProductionMethod::default(),
- enable_block_production_forwarding: true, // enable forwarding by default for tests
rayon_global_threads: max_thread_count,
replay_forks_threads: NonZeroUsize::new(1).expect("1 is non-zero"),
replay_transactions_threads: max_thread_count,
tvu_shred_sigverify_threads: NonZeroUsize::new(get_thread_count())
.expect("thread count is non-zero"),
- ..Self::default()
+ delay_leader_block_for_pending_fork: false,
+ use_tpu_client_next: true,
+ retransmit_xdp: None,
+ repair_handler_type: RepairHandlerType::default(),
}
}
@@ -1063,17 +1047,6 @@ impl Validator {
let optimistically_confirmed_bank =
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks);
- let rpc_subscriptions = Arc::new(RpcSubscriptions::new_with_config(
- exit.clone(),
- max_complete_transaction_status_slot.clone(),
- blockstore.clone(),
- bank_forks.clone(),
- block_commitment_cache.clone(),
- optimistically_confirmed_bank.clone(),
- &config.pubsub_config,
- None,
- ));
-
let max_slots = Arc::new(MaxSlots::default());
let staked_nodes = Arc::new(RwLock::new(StakedNodes::default()));
@@ -1152,6 +1125,7 @@ impl Validator {
Arc::new(AtomicBool::new(config.rpc_config.disable_health_check));
let (
json_rpc_service,
+ rpc_subscriptions,
pubsub_service,
completed_data_sets_sender,
completed_data_sets_service,
@@ -1208,13 +1182,22 @@ impl Validator {
send_transaction_service_config: config.send_transaction_service_config.clone(),
max_slots: max_slots.clone(),
leader_schedule_cache: leader_schedule_cache.clone(),
- max_complete_transaction_status_slot,
+ max_complete_transaction_status_slot: max_complete_transaction_status_slot.clone(),
prioritization_fee_cache: prioritization_fee_cache.clone(),
client_option,
};
let json_rpc_service =
JsonRpcService::new_with_config(rpc_svc_config).map_err(ValidatorError::Other)?;
-
+ let rpc_subscriptions = Arc::new(RpcSubscriptions::new_with_config(
+ exit.clone(),
+ max_complete_transaction_status_slot,
+ blockstore.clone(),
+ bank_forks.clone(),
+ block_commitment_cache.clone(),
+ optimistically_confirmed_bank.clone(),
+ &config.pubsub_config,
+ None,
+ ));
let pubsub_service = if !config.rpc_config.full_api {
None
} else {
@@ -1283,6 +1266,7 @@ impl Validator {
});
(
Some(json_rpc_service),
+ Some(rpc_subscriptions),
pubsub_service,
completed_data_sets_sender,
completed_data_sets_service,
@@ -1291,7 +1275,7 @@ impl Validator {
bank_notification_sender_config,
)
} else {
- (None, None, None, None, None, None, None)
+ (None, None, None, None, None, None, None, None)
};
if config.halt_at_slot.is_some() {
@@ -1522,7 +1506,7 @@ impl Validator {
},
blockstore.clone(),
ledger_signal_receiver,
- &rpc_subscriptions,
+ rpc_subscriptions.clone(),
&poh_recorder,
tower,
config.tower_storage.clone(),
@@ -1627,7 +1611,7 @@ impl Validator {
vote_forwarding_client: node.sockets.tpu_vote_forwarding_client,
vortexor_receivers: node.sockets.vortexor_receivers,
},
- &rpc_subscriptions,
+ rpc_subscriptions.clone(),
transaction_status_sender,
entry_notification_sender,
blockstore.clone(),
diff --git a/cost-model/src/block_cost_limits.rs b/cost-model/src/block_cost_limits.rs
index 74c1c1f3307213..aa54a7ce252c03 100644
--- a/cost-model/src/block_cost_limits.rs
+++ b/cost-model/src/block_cost_limits.rs
@@ -25,8 +25,7 @@ pub const INSTRUCTION_DATA_BYTES_COST: u64 = 140 /*bytes per us*/ / COMPUTE_UNIT
/// accumulated by Transactions added to it; A transaction's compute units are
/// calculated by cost_model, based on transaction's signatures, write locks,
/// data size and built-in and SBF instructions.
-pub const MAX_BLOCK_UNITS: u64 = MAX_BLOCK_UNITS_SIMD_0207;
-pub const MAX_BLOCK_UNITS_SIMD_0207: u64 = 50_000_000;
+pub const MAX_BLOCK_UNITS: u64 = MAX_BLOCK_UNITS_SIMD_0256;
pub const MAX_BLOCK_UNITS_SIMD_0256: u64 = 60_000_000;
/// Number of compute units that a writable account in a block is allowed. The
@@ -41,16 +40,3 @@ pub const MAX_VOTE_UNITS: u64 = 36_000_000;
/// The maximum allowed size, in bytes, that accounts data can grow, per block.
/// This can also be thought of as the maximum size of new allocations per block.
pub const MAX_BLOCK_ACCOUNTS_DATA_SIZE_DELTA: u64 = 100_000_000;
-
-/// Return the block limits that will be used upon activation of SIMD-0256.
-/// Returns as
-/// (account_limit, block_limit, vote_limit)
-// ^ Above order is used to be consistent with the order of
-// `CostTracker::set_limits`.
-pub const fn simd_0256_block_limits() -> (u64, u64, u64) {
- (
- MAX_WRITABLE_ACCOUNT_UNITS,
- MAX_BLOCK_UNITS_SIMD_0256,
- MAX_VOTE_UNITS,
- )
-}
diff --git a/cost-model/src/cost_model.rs b/cost-model/src/cost_model.rs
index a0e8196c71f699..77c27319c7c2f1 100644
--- a/cost-model/src/cost_model.rs
+++ b/cost-model/src/cost_model.rs
@@ -41,8 +41,9 @@ impl CostModel {
if transaction.is_simple_vote_transaction() {
TransactionCost::SimpleVote { transaction }
} else {
- let (programs_execution_cost, loaded_accounts_data_size_cost, data_bytes_cost) =
- Self::get_transaction_cost(transaction, feature_set);
+ let (programs_execution_cost, loaded_accounts_data_size_cost) =
+ Self::get_estimated_execution_cost(transaction, feature_set);
+ let data_bytes_cost = Self::get_instructions_data_cost(transaction);
Self::calculate_non_vote_transaction_cost(
transaction,
transaction.program_instructions_iter(),
@@ -97,8 +98,9 @@ impl CostModel {
if transaction.is_simple_vote_transaction() {
return TransactionCost::SimpleVote { transaction };
}
- let (programs_execution_cost, loaded_accounts_data_size_cost, data_bytes_cost) =
- Self::get_transaction_cost(transaction, feature_set);
+ let (programs_execution_cost, loaded_accounts_data_size_cost) =
+ Self::get_estimated_execution_cost(transaction, feature_set);
+ let data_bytes_cost = Self::get_instructions_data_cost(transaction);
Self::calculate_non_vote_transaction_cost(
transaction,
instructions,
@@ -181,18 +183,6 @@ impl CostModel {
WRITE_LOCK_UNITS.saturating_mul(num_write_locks)
}
- /// Return (programs_execution_cost, loaded_accounts_data_size_cost, data_bytes_cost)
- fn get_transaction_cost(meta: &impl StaticMeta, feature_set: &FeatureSet) -> (u64, u64, u16) {
- let data_bytes_cost = Self::get_instructions_data_cost(meta);
- let (programs_execution_cost, loaded_accounts_data_size_cost) =
- Self::get_estimated_execution_cost(meta, feature_set);
- (
- programs_execution_cost,
- loaded_accounts_data_size_cost,
- data_bytes_cost,
- )
- }
-
/// Return (programs_execution_cost, loaded_accounts_data_size_cost)
fn get_estimated_execution_cost(
transaction: &impl StaticMeta,
@@ -523,10 +513,10 @@ mod tests {
let feature_set = FeatureSet::default();
let expected_execution_cost = u64::from(MAX_BUILTIN_ALLOCATION_COMPUTE_UNIT_LIMIT);
- let (program_execution_cost, _loaded_accounts_data_size_cost, _data_bytes_cost) =
- CostModel::get_transaction_cost(&simple_transaction, &feature_set);
+ let (programs_execution_cost, _loaded_accounts_data_size_cost) =
+ CostModel::get_estimated_execution_cost(&simple_transaction, &feature_set);
- assert_eq!(expected_execution_cost, program_execution_cost);
+ assert_eq!(expected_execution_cost, programs_execution_cost);
}
#[test]
@@ -553,10 +543,11 @@ mod tests {
DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64,
),
] {
- let (program_execution_cost, _loaded_accounts_data_size_cost, data_bytes_cost) =
- CostModel::get_transaction_cost(&token_transaction, &feature_set);
+ let (programs_execution_cost, _loaded_accounts_data_size_cost) =
+ CostModel::get_estimated_execution_cost(&token_transaction, &feature_set);
+ let data_bytes_cost = CostModel::get_instructions_data_cost(&token_transaction);
- assert_eq!(expected_execution_cost, program_execution_cost);
+ assert_eq!(expected_execution_cost, programs_execution_cost);
assert_eq!(0, data_bytes_cost);
}
}
@@ -606,10 +597,11 @@ mod tests {
(FeatureSet::default(), expected_cu_limit as u64),
(FeatureSet::all_enabled(), expected_cu_limit as u64),
] {
- let (program_execution_cost, _loaded_accounts_data_size_cost, data_bytes_cost) =
- CostModel::get_transaction_cost(&token_transaction, &feature_set);
+ let (programs_execution_cost, _loaded_accounts_data_size_cost) =
+ CostModel::get_estimated_execution_cost(&token_transaction, &feature_set);
+ let data_bytes_cost = CostModel::get_instructions_data_cost(&token_transaction);
- assert_eq!(expected_execution_cost, program_execution_cost);
+ assert_eq!(expected_execution_cost, programs_execution_cost);
assert_eq!(1, data_bytes_cost);
}
}
@@ -646,9 +638,9 @@ mod tests {
let token_transaction = RuntimeTransaction::from_transaction_for_tests(tx);
for feature_set in [FeatureSet::default(), FeatureSet::all_enabled()] {
- let (program_execution_cost, _loaded_accounts_data_size_cost, _data_bytes_cost) =
- CostModel::get_transaction_cost(&token_transaction, &feature_set);
- assert_eq!(0, program_execution_cost);
+ let (programs_execution_cost, _loaded_accounts_data_size_cost) =
+ CostModel::get_estimated_execution_cost(&token_transaction, &feature_set);
+ assert_eq!(0, programs_execution_cost);
}
}
@@ -670,8 +662,9 @@ mod tests {
// expected cost for two system transfer instructions
let feature_set = FeatureSet::default();
let expected_execution_cost = 2 * u64::from(MAX_BUILTIN_ALLOCATION_COMPUTE_UNIT_LIMIT);
- let (programs_execution_cost, _loaded_accounts_data_size_cost, data_bytes_cost) =
- CostModel::get_transaction_cost(&tx, &feature_set);
+ let (programs_execution_cost, _loaded_accounts_data_size_cost) =
+ CostModel::get_estimated_execution_cost(&tx, &feature_set);
+ let data_bytes_cost = CostModel::get_instructions_data_cost(&tx);
assert_eq!(expected_execution_cost, programs_execution_cost);
assert_eq!(6, data_bytes_cost);
}
@@ -709,9 +702,10 @@ mod tests {
DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64 * 2,
),
] {
- let (program_execution_cost, _loaded_accounts_data_size_cost, data_bytes_cost) =
- CostModel::get_transaction_cost(&tx, &feature_set);
- assert_eq!(expected_cost, program_execution_cost);
+ let (programs_execution_cost, _loaded_accounts_data_size_cost) =
+ CostModel::get_estimated_execution_cost(&tx, &feature_set);
+ let data_bytes_cost = CostModel::get_instructions_data_cost(&tx);
+ assert_eq!(expected_cost, programs_execution_cost);
assert_eq!(0, data_bytes_cost);
}
}
@@ -827,8 +821,8 @@ mod tests {
let feature_set = FeatureSet::default();
let expected_execution_cost = u64::from(MAX_BUILTIN_ALLOCATION_COMPUTE_UNIT_LIMIT)
+ u64::from(DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT);
- let (programs_execution_cost, _loaded_accounts_data_size_cost, _data_bytes_cost) =
- CostModel::get_transaction_cost(&transaction, &feature_set);
+ let (programs_execution_cost, _loaded_accounts_data_size_cost) =
+ CostModel::get_estimated_execution_cost(&transaction, &feature_set);
assert_eq!(expected_execution_cost, programs_execution_cost);
}
@@ -851,8 +845,8 @@ mod tests {
let feature_set = FeatureSet::default();
let expected_execution_cost = cu_limit as u64;
- let (programs_execution_cost, _loaded_accounts_data_size_cost, _data_bytes_cost) =
- CostModel::get_transaction_cost(&transaction, &feature_set);
+ let (programs_execution_cost, _loaded_accounts_data_size_cost) =
+ CostModel::get_estimated_execution_cost(&transaction, &feature_set);
assert_eq!(expected_execution_cost, programs_execution_cost);
}
diff --git a/cost-model/src/cost_tracker.rs b/cost-model/src/cost_tracker.rs
index 210f30738dbc4b..c26062754e5c32 100644
--- a/cost-model/src/cost_tracker.rs
+++ b/cost-model/src/cost_tracker.rs
@@ -216,7 +216,13 @@ impl CostTracker {
self.transaction_count.0
}
- pub fn report_stats(&self, bank_slot: solana_clock::Slot, is_leader: bool) {
+ pub fn report_stats(
+ &self,
+ bank_slot: solana_clock::Slot,
+ is_leader: bool,
+ total_transaction_fee: u64,
+ total_priority_fee: u64,
+ ) {
// skip reporting if block is empty
if self.transaction_count.0 == 0 {
return;
@@ -227,13 +233,13 @@ impl CostTracker {
datapoint_info!(
"cost_tracker_stats",
"is_leader" => is_leader.to_string(),
- ("bank_slot", bank_slot as i64, i64),
- ("block_cost", self.block_cost as i64, i64),
- ("vote_cost", self.vote_cost as i64, i64),
- ("transaction_count", self.transaction_count.0 as i64, i64),
- ("number_of_accounts", self.number_of_accounts() as i64, i64),
+ ("bank_slot", bank_slot, i64),
+ ("block_cost", self.block_cost, i64),
+ ("vote_cost", self.vote_cost, i64),
+ ("transaction_count", self.transaction_count.0, i64),
+ ("number_of_accounts", self.number_of_accounts(), i64),
("costliest_account", costliest_account.to_string(), String),
- ("costliest_account_cost", costliest_account_cost as i64, i64),
+ ("costliest_account_cost", costliest_account_cost, i64),
(
"allocated_accounts_data_size",
self.allocated_accounts_data_size.0,
@@ -263,7 +269,9 @@ impl CostTracker {
"secp256r1_instruction_signature_count",
self.secp256r1_instruction_signature_count.0,
i64
- )
+ ),
+ ("total_transaction_fee", total_transaction_fee, i64),
+ ("total_priority_fee", total_priority_fee, i64),
);
}
diff --git a/entry/Cargo.toml b/entry/Cargo.toml
index bf72e34fdf8ab8..d516424608dacf 100644
--- a/entry/Cargo.toml
+++ b/entry/Cargo.toml
@@ -16,11 +16,15 @@ targets = ["x86_64-unknown-linux-gnu"]
crate-type = ["lib"]
name = "solana_entry"
+[features]
+dev-context-only-utils = []
+
[dependencies]
bincode = { workspace = true }
crossbeam-channel = { workspace = true }
dlopen2 = { workspace = true }
log = { workspace = true }
+num_cpus = { workspace = true }
rand = { workspace = true }
rayon = { workspace = true }
serde = { workspace = true }
@@ -30,7 +34,6 @@ solana-merkle-tree = { workspace = true }
solana-metrics = { workspace = true }
solana-packet = { workspace = true }
solana-perf = { workspace = true }
-solana-rayon-threadlimit = { workspace = true }
solana-runtime-transaction = { workspace = true }
solana-sha256-hasher = { workspace = true }
solana-transaction = { workspace = true }
@@ -39,6 +42,7 @@ solana-transaction-error = { workspace = true }
[dev-dependencies]
agave-reserved-account-keys = { workspace = true }
assert_matches = { workspace = true }
+solana-entry = { path = ".", features = ["dev-context-only-utils"] }
solana-keypair = { workspace = true }
solana-logger = { workspace = true }
solana-message = { workspace = true }
diff --git a/entry/src/entry.rs b/entry/src/entry.rs
index c5249d7e3d4ca2..11cd27ccf30532 100644
--- a/entry/src/entry.rs
+++ b/entry/src/entry.rs
@@ -22,7 +22,6 @@ use {
recycler::Recycler,
sigverify,
},
- solana_rayon_threadlimit::get_max_thread_count,
solana_runtime_transaction::transaction_with_meta::TransactionWithMeta,
solana_transaction::{
versioned::VersionedTransaction, Transaction, TransactionVerificationMode,
@@ -959,9 +958,10 @@ pub fn thread_pool_for_tests() -> ThreadPool {
.expect("new rayon threadpool")
}
+#[cfg(feature = "dev-context-only-utils")]
pub fn thread_pool_for_benches() -> ThreadPool {
rayon::ThreadPoolBuilder::new()
- .num_threads(get_max_thread_count())
+ .num_threads(num_cpus::get())
.thread_name(|i| format!("solEntryBnch{i:02}"))
.build()
.expect("new rayon threadpool")
diff --git a/geyser-plugin-manager/src/geyser_plugin_manager.rs b/geyser-plugin-manager/src/geyser_plugin_manager.rs
index 1e4fb7dbba0aef..2f55978ca5999d 100644
--- a/geyser-plugin-manager/src/geyser_plugin_manager.rs
+++ b/geyser-plugin-manager/src/geyser_plugin_manager.rs
@@ -238,7 +238,8 @@ impl GeyserPluginManager {
return Err(jsonrpc_core::Error {
code: ErrorCode::InvalidRequest,
message: format!(
- "There already exists a plugin named {} loaded, while reloading {name}. Did not load requested plugin",
+ "There already exists a plugin named {} loaded, while reloading {name}. Did \
+ not load requested plugin",
new_plugin.name()
),
data: None,
@@ -357,7 +358,8 @@ pub(crate) fn load_plugin_from_config(
Ok(file) => file,
Err(err) => {
return Err(GeyserPluginManagerError::CannotOpenConfigFile(format!(
- "Failed to open the plugin config file {geyser_plugin_config_file:?}, error: {err:?}"
+ "Failed to open the plugin config file {geyser_plugin_config_file:?}, error: \
+ {err:?}"
)));
}
};
@@ -373,7 +375,8 @@ pub(crate) fn load_plugin_from_config(
Ok(value) => value,
Err(err) => {
return Err(GeyserPluginManagerError::InvalidConfigFileFormat(format!(
- "The config file {geyser_plugin_config_file:?} is not in a valid Json5 format, error: {err:?}"
+ "The config file {geyser_plugin_config_file:?} is not in a valid Json5 format, \
+ error: {err:?}"
)));
}
};
diff --git a/geyser-plugin-manager/src/geyser_plugin_service.rs b/geyser-plugin-manager/src/geyser_plugin_service.rs
index b0691af2196c7a..b866470a7e0717 100644
--- a/geyser-plugin-manager/src/geyser_plugin_service.rs
+++ b/geyser-plugin-manager/src/geyser_plugin_service.rs
@@ -78,10 +78,7 @@ impl GeyserPluginService {
Arc,
)>,
) -> Result {
- info!(
- "Starting GeyserPluginService from config files: {:?}",
- geyser_plugin_config_files
- );
+ info!("Starting GeyserPluginService from config files: {geyser_plugin_config_files:?}");
let mut plugin_manager = GeyserPluginManager::new();
for geyser_plugin_config_file in geyser_plugin_config_files {
diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs
index a3bca722f7bb6b..e9af4d259fb06c 100644
--- a/gossip/src/cluster_info.rs
+++ b/gossip/src/cluster_info.rs
@@ -1431,7 +1431,7 @@ impl ClusterInfo {
) -> JoinHandle<()> {
let thread_pool = ThreadPoolBuilder::new()
.num_threads(std::cmp::min(get_thread_count(), 8))
- .thread_name(|i| format!("solRunGossip{i:02}"))
+ .thread_name(|i| format!("solGossipRun{i:02}"))
.build()
.unwrap();
let mut epoch_specs = bank_forks.map(EpochSpecs::from);
diff --git a/ledger/src/ancestor_iterator.rs b/ledger/src/ancestor_iterator.rs
index 94333e737f1d85..d736f578c468d9 100644
--- a/ledger/src/ancestor_iterator.rs
+++ b/ledger/src/ancestor_iterator.rs
@@ -120,11 +120,11 @@ mod tests {
let ledger_path = get_tmp_ledger_path_auto_delete!();
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
- let (shreds, _) = make_slot_entries(0, 0, 42, /*merkle_variant:*/ true);
+ let (shreds, _) = make_slot_entries(0, 0, 42);
blockstore.insert_shreds(shreds, None, false).unwrap();
- let (shreds, _) = make_slot_entries(1, 0, 42, /*merkle_variant:*/ true);
+ let (shreds, _) = make_slot_entries(1, 0, 42);
blockstore.insert_shreds(shreds, None, false).unwrap();
- let (shreds, _) = make_slot_entries(2, 1, 42, /*merkle_variant:*/ true);
+ let (shreds, _) = make_slot_entries(2, 1, 42);
blockstore.insert_shreds(shreds, None, false).unwrap();
assert_eq!(
diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs
index 8794dc19e0c5f6..9d5259f67953cd 100644
--- a/ledger/src/blockstore.rs
+++ b/ledger/src/blockstore.rs
@@ -5119,9 +5119,9 @@ pub fn make_slot_entries(
slot: Slot,
parent_slot: Slot,
num_entries: u64,
- merkle_variant: bool,
) -> (Vec, Vec) {
let entries = create_ticks(num_entries, 1, Hash::new_unique());
+ let merkle_variant = true;
let shreds = entries_to_test_shreds(&entries, slot, parent_slot, true, 0, merkle_variant);
(shreds, entries)
}
@@ -5137,12 +5137,7 @@ pub fn make_many_slot_entries(
for slot in start_slot..start_slot + num_slots {
let parent_slot = if slot == 0 { 0 } else { slot - 1 };
- let (slot_shreds, slot_entries) = make_slot_entries(
- slot,
- parent_slot,
- entries_per_slot,
- true, // merkle_variant
- );
+ let (slot_shreds, slot_entries) = make_slot_entries(slot, parent_slot, entries_per_slot);
shreds.extend(slot_shreds);
entries.extend(slot_entries);
}
@@ -5268,12 +5263,7 @@ pub fn make_chaining_slot_entries(
}
};
- let result = make_slot_entries(
- *slot,
- parent_slot,
- entries_per_slot,
- true, // merkle_variant
- );
+ let result = make_slot_entries(*slot, parent_slot, entries_per_slot);
slots_shreds_and_entries.push(result);
}
@@ -5339,7 +5329,7 @@ pub mod tests {
crate::{
genesis_utils::{create_genesis_config, GenesisConfigInfo},
leader_schedule::{FixedSchedule, IdentityKeyedLeaderSchedule},
- shred::{max_ticks_per_n_shreds, ShredFlags, LEGACY_SHRED_DATA_CAPACITY},
+ shred::{max_ticks_per_n_shreds, ShredFlags},
},
assert_matches::assert_matches,
bincode::{serialize, Options},
@@ -5366,7 +5356,7 @@ pub mod tests {
solana_transaction_status::{
InnerInstruction, InnerInstructions, Reward, Rewards, TransactionTokenBalance,
},
- std::{cmp::Ordering, thread::Builder, time::Duration},
+ std::{cmp::Ordering, time::Duration},
test_case::test_case,
};
@@ -5392,8 +5382,7 @@ pub mod tests {
let (shreds, _) = make_slot_entries(
slot,
parent_slot,
- 100, // num_entries
- true, // merkle_variant
+ 100, // num_entries
);
blockstore.insert_shreds(shreds, None, true).unwrap();
@@ -5441,7 +5430,6 @@ pub mod tests {
0, // slot
0, // parent_slot
num_entries,
- true, // merkle_variant
);
let ledger_path = get_tmp_ledger_path_auto_delete!();
@@ -5641,7 +5629,7 @@ pub mod tests {
#[test]
fn test_read_shred_bytes() {
let slot = 0;
- let (shreds, _) = make_slot_entries(slot, 0, 100, /*merkle_variant:*/ true);
+ let (shreds, _) = make_slot_entries(slot, 0, 100);
let num_shreds = shreds.len() as u64;
let shred_bufs: Vec<_> = shreds.iter().map(Shred::payload).cloned().collect();
@@ -5697,7 +5685,7 @@ pub mod tests {
#[test]
fn test_shred_cleanup_check() {
let slot = 1;
- let (shreds, _) = make_slot_entries(slot, 0, 100, /*merkle_variant:*/ true);
+ let (shreds, _) = make_slot_entries(slot, 0, 100);
let ledger_path = get_tmp_ledger_path_auto_delete!();
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
@@ -5726,7 +5714,6 @@ pub mod tests {
0, // slot
0, // parent_slot
num_entries,
- true, // merkle_variant
);
let num_shreds = shreds.len() as u64;
@@ -5774,7 +5761,6 @@ pub mod tests {
0, // slot
0, // parent_slot
num_entries,
- true, // merkle_variant
);
let num_shreds = shreds.len() as u64;
@@ -5904,12 +5890,7 @@ pub mod tests {
let parent_slot = if i == 0 { 0 } else { i - 1 };
// Write entries
let num_entries = min_entries * (i + 1);
- let (shreds, original_entries) = make_slot_entries(
- slot,
- parent_slot,
- num_entries,
- true, // merkle_variant
- );
+ let (shreds, original_entries) = make_slot_entries(slot, parent_slot, num_entries);
let num_shreds = shreds.len() as u64;
assert!(num_shreds > 1);
@@ -6007,7 +5988,6 @@ pub mod tests {
0, // slot
0, // parent_slot
entries_per_slot,
- true, // merkle_variant
);
let shreds_per_slot = shreds.len() as u64;
@@ -6042,7 +6022,6 @@ pub mod tests {
slot,
slot - 1, // parent_slot
entries_per_slot,
- true, // merkle_variant
);
let missing_shred = slot_shreds.remove(slot as usize - 1);
shreds.extend(slot_shreds);
@@ -6087,8 +6066,7 @@ pub mod tests {
let entries_per_slot = 10;
// Create shreds for slot 0
- let (mut shreds, _) =
- make_slot_entries(0, 0, entries_per_slot, /*merkle_variant:*/ true);
+ let (mut shreds, _) = make_slot_entries(0, 0, entries_per_slot);
let shred0 = shreds.remove(0);
// Insert all but the first shred in the slot, should not be considered complete
@@ -6167,7 +6145,6 @@ pub mod tests {
disconnected_slot,
1, // parent_slot
entries_per_slot,
- true, // merkle_variant
);
let mut all_shreds: Vec<_> = vec![shreds0, shreds1, shreds2, shreds3]
@@ -6420,12 +6397,7 @@ pub mod tests {
} else {
slot.saturating_sub(1)
};
- let (shreds, _) = make_slot_entries(
- slot,
- parent_slot,
- entries_per_slot,
- true, // merkle_variant
- );
+ let (shreds, _) = make_slot_entries(slot, parent_slot, entries_per_slot);
shreds.into_iter()
})
.collect();
@@ -6712,8 +6684,8 @@ pub mod tests {
// Write some slot that also chains to existing slots and orphan,
// nothing should change
- let (shred4, _) = make_slot_entries(4, 0, 1, /*merkle_variant:*/ true);
- let (shred5, _) = make_slot_entries(5, 1, 1, /*merkle_variant:*/ true);
+ let (shred4, _) = make_slot_entries(4, 0, 1);
+ let (shred5, _) = make_slot_entries(5, 1, 1);
blockstore.insert_shreds(shred4, None, false).unwrap();
blockstore.insert_shreds(shred5, None, false).unwrap();
assert_eq!(
@@ -6744,7 +6716,7 @@ pub mod tests {
let mut shreds = vec![];
for slot in 0..num_slots {
let parent_slot = slot.saturating_sub(1);
- let (slot_shreds, entry) = make_slot_entries(slot, parent_slot, 1, true);
+ let (slot_shreds, entry) = make_slot_entries(slot, parent_slot, 1);
shreds.extend(slot_shreds);
entries.extend(entry);
}
@@ -7249,7 +7221,7 @@ pub mod tests {
#[test]
fn test_is_data_shred_present() {
- let (shreds, _) = make_slot_entries(0, 0, 200, /*merkle_variant:*/ true);
+ let (shreds, _) = make_slot_entries(0, 0, 200);
let ledger_path = get_tmp_ledger_path_auto_delete!();
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
let index_cf = &blockstore.index_cf;
@@ -7743,7 +7715,7 @@ pub mod tests {
#[test]
fn test_insert_multiple_is_last() {
solana_logger::setup();
- let (shreds, _) = make_slot_entries(0, 0, 18, /*merkle_variant:*/ true);
+ let (shreds, _) = make_slot_entries(0, 0, 18);
let num_shreds = shreds.len() as u64;
let ledger_path = get_tmp_ledger_path_auto_delete!();
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
@@ -7756,7 +7728,7 @@ pub mod tests {
assert_eq!(slot_meta.last_index, Some(num_shreds - 1));
assert!(slot_meta.is_full());
- let (shreds, _) = make_slot_entries(0, 0, 600, /*merkle_variant:*/ true);
+ let (shreds, _) = make_slot_entries(0, 0, 600);
assert!(shreds.len() > num_shreds as usize);
blockstore.insert_shreds(shreds, None, false).unwrap();
let slot_meta = blockstore.meta(0).unwrap().unwrap();
@@ -7946,7 +7918,7 @@ pub mod tests {
fn test_no_insert_but_modify_slot_meta() {
// This tests correctness of the SlotMeta in various cases in which a shred
// that gets filtered out by checks
- let (shreds0, _) = make_slot_entries(0, 0, 200, /*merkle_variant:*/ true);
+ let (shreds0, _) = make_slot_entries(0, 0, 200);
let ledger_path = get_tmp_ledger_path_auto_delete!();
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
@@ -7958,8 +7930,8 @@ pub mod tests {
// Insert a repetitive shred for slot 's', should get ignored, but also
// insert shreds that chains to 's', should see the update in the SlotMeta
// for 's'.
- let (mut shreds2, _) = make_slot_entries(2, 0, 200, /*merkle_variant:*/ true);
- let (mut shreds3, _) = make_slot_entries(3, 0, 200, /*merkle_variant:*/ true);
+ let (mut shreds2, _) = make_slot_entries(2, 0, 200);
+ let (mut shreds3, _) = make_slot_entries(3, 0, 200);
shreds2.push(shreds0[1].clone());
shreds3.insert(0, shreds0[1].clone());
blockstore.insert_shreds(shreds2, None, false).unwrap();
@@ -7976,7 +7948,7 @@ pub mod tests {
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
// Make shred for slot 1
- let (shreds1, _) = make_slot_entries(1, 0, 1, /*merkle_variant:*/ true);
+ let (shreds1, _) = make_slot_entries(1, 0, 1);
let max_root = 100;
blockstore.set_roots(std::iter::once(&max_root)).unwrap();
@@ -9302,7 +9274,7 @@ pub mod tests {
let ledger_path = get_tmp_ledger_path_auto_delete!();
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
- let (shreds, _) = make_slot_entries(1, 0, 4, /*merkle_variant:*/ true);
+ let (shreds, _) = make_slot_entries(1, 0, 4);
blockstore.insert_shreds(shreds, None, false).unwrap();
fn make_slot_entries_with_transaction_addresses(addresses: &[Pubkey]) -> Vec {
@@ -9989,7 +9961,7 @@ pub mod tests {
assert_eq!(blockstore.lowest_slot(), 0);
for slot in 0..10 {
- let (shreds, _) = make_slot_entries(slot, 0, 1, /*merkle_variant:*/ true);
+ let (shreds, _) = make_slot_entries(slot, 0, 1);
blockstore.insert_shreds(shreds, None, false).unwrap();
}
assert_eq!(blockstore.lowest_slot(), 1);
@@ -10005,7 +9977,7 @@ pub mod tests {
assert_eq!(blockstore.highest_slot().unwrap(), None);
for slot in 0..10 {
- let (shreds, _) = make_slot_entries(slot, 0, 1, /*merkle_variant:*/ true);
+ let (shreds, _) = make_slot_entries(slot, 0, 1);
blockstore.insert_shreds(shreds, None, false).unwrap();
assert_eq!(blockstore.highest_slot().unwrap(), Some(slot));
}
@@ -10729,8 +10701,7 @@ pub mod tests {
// Create enough entries to ensure there are at least two shreds created
let num_unique_entries = max_ticks_per_n_shreds(1, None) + 1;
- let (mut original_shreds, original_entries) =
- make_slot_entries(0, 0, num_unique_entries, /*merkle_variant:*/ true);
+ let (mut original_shreds, original_entries) = make_slot_entries(0, 0, num_unique_entries);
let mut duplicate_shreds = original_shreds.clone();
// Mutate signature so that payloads are not the same as the originals.
for shred in &mut duplicate_shreds {
@@ -10773,21 +10744,60 @@ pub mod tests {
assert!(!blockstore.is_dead(0));
}
+ /// Prepare two FEC sets of shreds for the same slot index
+ /// with reasonable shred indices, but in such a way that
+ /// both FEC sets include a shred with LAST_IN_SLOT flag set.
+ #[allow(clippy::type_complexity)]
+ fn setup_duplicate_last_in_slot(
+ slot: Slot,
+ ) -> ((Vec, Vec), (Vec, Vec)) {
+ let entries = make_slot_entries_with_transactions(1);
+ let leader_keypair = Arc::new(Keypair::new());
+ let reed_solomon_cache = ReedSolomonCache::default();
+ let shredder = Shredder::new(slot, 0, 0, 0).unwrap();
+ let (shreds1, code1): (Vec, Vec) = shredder
+ .make_merkle_shreds_from_entries(
+ &leader_keypair,
+ &entries,
+ true, // is_last_in_slot
+ Some(Hash::new_unique()),
+ 0, // next_shred_index
+ 0, // next_code_index,
+ &reed_solomon_cache,
+ &mut ProcessShredsStats::default(),
+ )
+ .partition(Shred::is_data);
+ let last_data1 = shreds1.last().unwrap();
+ let last_code1 = code1.last().unwrap();
+
+ let (shreds2, code2) = shredder
+ .make_merkle_shreds_from_entries(
+ &leader_keypair,
+ &entries,
+ true, // is_last_in_slot
+ Some(last_data1.chained_merkle_root().unwrap()),
+ last_data1.index() + 1, // next_shred_index
+ last_code1.index() + 1, // next_code_index,
+ &reed_solomon_cache,
+ &mut ProcessShredsStats::default(),
+ )
+ .partition(Shred::is_data);
+ ((shreds1, code1), (shreds2, code2))
+ }
+
#[test]
fn test_duplicate_last_index() {
- let num_shreds = 2;
- let num_entries = max_ticks_per_n_shreds(num_shreds, None);
let slot = 1;
- let (mut shreds, _) =
- make_slot_entries(slot, 0, num_entries, /*merkle_variant:*/ false);
+ let ((shreds1, _code1), (shreds2, _code2)) = setup_duplicate_last_in_slot(slot);
- // Mark both as last shred
- shreds[0].set_last_in_slot();
- shreds[1].set_last_in_slot();
+ let last_data1 = shreds1.last().unwrap();
+ let last_data2 = shreds2.last().unwrap();
let ledger_path = get_tmp_ledger_path_auto_delete!();
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
- blockstore.insert_shreds(shreds, None, false).unwrap();
+ blockstore
+ .insert_shreds(vec![last_data1.clone(), last_data2.clone()], None, false)
+ .unwrap();
assert!(blockstore.get_duplicate_slot(slot).is_some());
}
@@ -10795,16 +10805,13 @@ pub mod tests {
#[test]
fn test_duplicate_last_index_mark_dead() {
let num_shreds = 10;
- let smaller_last_shred_index = 5;
+ let smaller_last_shred_index = 31;
let larger_last_shred_index = 8;
let setup_test_shreds = |slot: Slot| -> Vec {
- let num_entries = max_ticks_per_n_shreds(num_shreds, Some(LEGACY_SHRED_DATA_CAPACITY));
- let (mut shreds, _) =
- make_slot_entries(slot, 0, num_entries, /*merkle_variant:*/ false);
- shreds[smaller_last_shred_index].set_last_in_slot();
- shreds[larger_last_shred_index].set_last_in_slot();
- shreds
+ let ((mut shreds1, _code1), (mut shreds2, _code2)) = setup_duplicate_last_in_slot(slot);
+ shreds1.append(&mut shreds2);
+ shreds1
};
let get_expected_slot_meta_and_index_meta =
@@ -10863,38 +10870,6 @@ pub mod tests {
assert_eq!(meta, expected_slot_meta);
assert_eq!(blockstore.get_index(slot).unwrap().unwrap(), expected_index);
- // Case 2: Inserting a duplicate with an even smaller last shred index should not
- // mark the slot as dead since the Slotmeta is full.
- let even_smaller_last_shred_duplicate = {
- let mut payload = shreds[smaller_last_shred_index - 1].payload().clone();
- // Flip a byte to create a duplicate shred
- payload[0] = u8::MAX - payload[0];
- let mut shred = Shred::new_from_serialized_shred(payload).unwrap();
- shred.set_last_in_slot();
- shred
- };
- assert!(blockstore
- .is_shred_duplicate(&even_smaller_last_shred_duplicate)
- .is_some());
- blockstore
- .insert_shreds(vec![even_smaller_last_shred_duplicate], None, false)
- .unwrap();
- assert!(!blockstore.is_dead(slot));
- for i in 0..num_shreds {
- if i <= smaller_last_shred_index as u64 {
- assert_eq!(
- blockstore.get_data_shred(slot, i).unwrap().unwrap(),
- shreds[i as usize].payload().as_ref(),
- );
- } else {
- assert!(blockstore.get_data_shred(slot, i).unwrap().is_none());
- }
- }
- let mut meta = blockstore.meta(slot).unwrap().unwrap();
- meta.first_shred_timestamp = expected_slot_meta.first_shred_timestamp;
- assert_eq!(meta, expected_slot_meta);
- assert_eq!(blockstore.get_index(slot).unwrap().unwrap(), expected_index);
-
// Case 3: Insert shreds in reverse so that consumed will not be updated. Now on insert, the
// the slot should be marked as dead
slot += 1;
@@ -10964,24 +10939,6 @@ pub mod tests {
#[test]
fn test_get_slot_entries_dead_slot_race() {
- let setup_test_shreds = move |slot: Slot| -> Vec {
- let num_shreds = 10;
- let middle_shred_index = 5;
- let num_entries = max_ticks_per_n_shreds(num_shreds, None);
- let (shreds, _) =
- make_slot_entries(slot, 0, num_entries, /*merkle_variant:*/ false);
-
- // Reverse shreds so that last shred gets inserted first and sets meta.received
- let mut shreds: Vec = shreds.into_iter().rev().collect();
-
- // Push the real middle shred to the end of the shreds list
- shreds.push(shreds[middle_shred_index].clone());
-
- // Set the middle shred as a last shred to cause the slot to be marked dead
- shreds[middle_shred_index].set_last_in_slot();
- shreds
- };
-
let ledger_path = get_tmp_ledger_path_auto_delete!();
{
let blockstore = Arc::new(Blockstore::open(ledger_path.path()).unwrap());
@@ -10989,79 +10946,63 @@ pub mod tests {
let (shred_sender, shred_receiver) = unbounded::>();
let (signal_sender, signal_receiver) = unbounded();
- let t_entry_getter = {
- let blockstore = blockstore.clone();
- let signal_sender = signal_sender.clone();
- Builder::new()
- .spawn(move || {
- while let Ok(slot) = slot_receiver.recv() {
- match blockstore.get_slot_entries_with_shred_info(slot, 0, false) {
- Ok((_entries, _num_shreds, is_full)) => {
- if is_full {
- signal_sender
- .send(Err(IoError::other(
- "got full slot entries for dead slot",
- )))
- .unwrap();
- }
- }
- Err(err) => {
- assert_matches!(err, BlockstoreError::DeadSlot);
+ std::thread::scope(|scope| {
+ scope.spawn(|| {
+ while let Ok(slot) = slot_receiver.recv() {
+ match blockstore.get_slot_entries_with_shred_info(slot, 0, false) {
+ Ok((_entries, _num_shreds, is_full)) => {
+ if is_full {
+ signal_sender
+ .send(Err(IoError::other(
+ "got full slot entries for dead slot",
+ )))
+ .unwrap();
}
}
- signal_sender.send(Ok(())).unwrap();
- }
- })
- .unwrap()
- };
-
- let t_shred_inserter = {
- let blockstore = blockstore.clone();
- Builder::new()
- .spawn(move || {
- while let Ok(shreds) = shred_receiver.recv() {
- let slot = shreds[0].slot();
- // Grab this lock to block `get_slot_entries` before it fetches completed datasets
- // and then mark the slot as dead, but full, by inserting carefully crafted shreds.
-
- #[allow(clippy::readonly_write_lock)]
- // Possible clippy bug, the lock is unused so clippy shouldn't care
- // about read vs. write lock
- let _lowest_cleanup_slot =
- blockstore.lowest_cleanup_slot.write().unwrap();
- blockstore.insert_shreds(shreds, None, false).unwrap();
- assert!(blockstore.get_duplicate_slot(slot).is_some());
- assert!(blockstore.is_dead(slot));
- assert!(blockstore.meta(slot).unwrap().unwrap().is_full());
- signal_sender.send(Ok(())).unwrap();
+ Err(err) => {
+ assert_matches!(err, BlockstoreError::DeadSlot);
+ }
}
- })
- .unwrap()
- };
-
- for slot in 0..100 {
- let shreds = setup_test_shreds(slot);
+ signal_sender.send(Ok(())).unwrap();
+ }
+ });
- // Start a task on each thread to trigger a race condition
- slot_sender.send(slot).unwrap();
- shred_sender.send(shreds).unwrap();
+ scope.spawn(|| {
+ while let Ok(shreds) = shred_receiver.recv() {
+ let slot = shreds[0].slot();
+ // Grab this lock to block `get_slot_entries` before it fetches completed datasets
+ // and then mark the slot as dead, but full, by inserting carefully crafted shreds.
+
+ #[allow(clippy::readonly_write_lock)]
+ // Possible clippy bug, the lock is unused so clippy shouldn't care
+ // about read vs. write lock
+ let _lowest_cleanup_slot = blockstore.lowest_cleanup_slot.write().unwrap();
+ blockstore.insert_shreds(shreds, None, false).unwrap();
+ assert!(blockstore.get_duplicate_slot(slot).is_some());
+ assert!(blockstore.is_dead(slot));
+ signal_sender.send(Ok(())).unwrap();
+ }
+ });
- // Check that each thread processed their task before continuing
- for _ in 1..=2 {
- let res = signal_receiver.recv().unwrap();
- assert!(res.is_ok(), "race condition: {res:?}");
+ for slot in 0..100 {
+ let ((mut shreds1, _), (mut shreds2, _)) = setup_duplicate_last_in_slot(slot);
+ // compose shreds in reverse order of FEC sets to
+ // make sure slot is marked dead
+ shreds2.append(&mut shreds1);
+ // Start a task on each thread to trigger a race condition
+ slot_sender.send(slot).unwrap();
+ shred_sender.send(shreds2).unwrap();
+
+ // Check that each thread processed their task before continuing
+ for _ in 1..=2 {
+ let res = signal_receiver.recv().unwrap();
+ assert!(res.is_ok(), "race condition: {res:?}");
+ }
}
- }
-
- drop(slot_sender);
- drop(shred_sender);
- let handles = vec![t_entry_getter, t_shred_inserter];
- for handle in handles {
- assert!(handle.join().is_ok());
- }
-
- assert!(Arc::strong_count(&blockstore) == 1);
+ drop(slot_sender);
+ drop(shred_sender);
+ });
}
}
diff --git a/ledger/src/blockstore/blockstore_purge.rs b/ledger/src/blockstore/blockstore_purge.rs
index c5ce36084ce972..bfd4462e58e316 100644
--- a/ledger/src/blockstore/blockstore_purge.rs
+++ b/ledger/src/blockstore/blockstore_purge.rs
@@ -1094,9 +1094,9 @@ pub mod tests {
let (shreds, _) = make_many_slot_entries(0, 10, 5);
blockstore.insert_shreds(shreds, None, false).unwrap();
- let (slot_11, _) = make_slot_entries(11, 4, 5, true);
+ let (slot_11, _) = make_slot_entries(11, 4, 5);
blockstore.insert_shreds(slot_11, None, false).unwrap();
- let (slot_12, _) = make_slot_entries(12, 5, 5, true);
+ let (slot_12, _) = make_slot_entries(12, 5, 5);
blockstore.insert_shreds(slot_12, None, false).unwrap();
blockstore.purge_slot_cleanup_chaining(5).unwrap();
diff --git a/ledger/src/blockstore_options.rs b/ledger/src/blockstore_options.rs
index 15a9ff1041ed40..349c5658ecebc4 100644
--- a/ledger/src/blockstore_options.rs
+++ b/ledger/src/blockstore_options.rs
@@ -7,7 +7,7 @@ use {
/// The subdirectory under ledger directory where the Blockstore lives
pub const BLOCKSTORE_DIRECTORY_ROCKS_LEVEL: &str = "rocksdb";
-#[derive(Debug, Clone)]
+#[derive(Debug, Clone, PartialEq)]
pub struct BlockstoreOptions {
// The access type of blockstore. Default: Primary
pub access_type: AccessType,
@@ -59,7 +59,7 @@ pub enum AccessType {
Secondary,
}
-#[derive(Debug, Clone)]
+#[derive(Debug, Clone, PartialEq)]
pub enum BlockstoreRecoveryMode {
TolerateCorruptedTailRecords,
AbsoluteConsistency,
@@ -99,7 +99,7 @@ impl From for DBRecoveryMode {
/// Options for LedgerColumn.
/// Each field might also be used as a tag that supports group-by operation when
/// reporting metrics.
-#[derive(Default, Debug, Clone)]
+#[derive(Default, Debug, Clone, PartialEq)]
pub struct LedgerColumnOptions {
// Determine the way to compress column families which are eligible for
// compression.
@@ -122,7 +122,7 @@ impl LedgerColumnOptions {
}
}
-#[derive(Debug, Clone)]
+#[derive(Debug, Clone, PartialEq)]
pub enum BlockstoreCompressionType {
None,
Snappy,
diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs
index 05c0df82a30d6f..7119f6eed2fa7c 100644
--- a/ledger/src/blockstore_processor.rs
+++ b/ledger/src/blockstore_processor.rs
@@ -28,7 +28,6 @@ use {
solana_measure::{measure::Measure, measure_us},
solana_metrics::datapoint_error,
solana_pubkey::Pubkey,
- solana_rayon_threadlimit::get_max_thread_count,
solana_runtime::{
bank::{Bank, PreCommitResult, TransactionBalancesSet},
bank_forks::{BankForks, SetRootError},
@@ -918,7 +917,7 @@ pub(crate) fn process_blockstore_for_bank_0(
let bank_forks = BankForks::new_rw_arc(bank0);
info!("Processing ledger for slot 0...");
- let replay_tx_thread_pool = create_thread_pool(get_max_thread_count());
+ let replay_tx_thread_pool = create_thread_pool(num_cpus::get());
process_bank_0(
&bank_forks
.read()
@@ -997,7 +996,7 @@ pub fn process_blockstore_from_root(
.meta(start_slot)
.unwrap_or_else(|_| panic!("Failed to get meta for slot {start_slot}"))
{
- let replay_tx_thread_pool = create_thread_pool(get_max_thread_count());
+ let replay_tx_thread_pool = create_thread_pool(num_cpus::get());
load_frozen_forks(
bank_forks,
&start_slot_meta,
diff --git a/ledger/src/leader_schedule_cache.rs b/ledger/src/leader_schedule_cache.rs
index 7a51d1582cd3c7..af63c76d26c86e 100644
--- a/ledger/src/leader_schedule_cache.rs
+++ b/ledger/src/leader_schedule_cache.rs
@@ -446,7 +446,7 @@ mod tests {
// Write a shred into slot 2 that chains to slot 1,
// but slot 1 is empty so should not be skipped
- let (shreds, _) = make_slot_entries(2, 1, 1, /*merkle_variant:*/ true);
+ let (shreds, _) = make_slot_entries(2, 1, 1);
blockstore.insert_shreds(shreds, None, false).unwrap();
assert_eq!(
cache
@@ -457,7 +457,7 @@ mod tests {
);
// Write a shred into slot 1
- let (shreds, _) = make_slot_entries(1, 0, 1, /*merkle_variant:*/ true);
+ let (shreds, _) = make_slot_entries(1, 0, 1);
// Check that slot 1 and 2 are skipped
blockstore.insert_shreds(shreds, None, false).unwrap();
diff --git a/ledger/src/shred.rs b/ledger/src/shred.rs
index 9e12bb19ec7b3a..f3f88d07d9a723 100644
--- a/ledger/src/shred.rs
+++ b/ledger/src/shred.rs
@@ -412,11 +412,6 @@ impl Shred {
dispatch!(pub fn payload(&self) -> &Payload);
dispatch!(pub fn sanitize(&self) -> Result<(), Error>);
- #[deprecated(since = "2.3.0")]
- pub fn set_index(&mut self, _index: u32) {}
- #[deprecated(since = "2.3.0")]
- pub fn set_slot(&mut self, _slot: Slot) {}
-
#[cfg(any(test, feature = "dev-context-only-utils"))]
pub fn copy_to_packet(&self, packet: &mut Packet) {
let payload = self.payload();
diff --git a/metrics/Cargo.toml b/metrics/Cargo.toml
index d81b5f61a73444..cb6e51970f9b16 100644
--- a/metrics/Cargo.toml
+++ b/metrics/Cargo.toml
@@ -26,9 +26,11 @@ solana-time-utils = { workspace = true }
thiserror = { workspace = true }
[dev-dependencies]
+bencher = { workspace = true }
env_logger = { workspace = true }
rand = { workspace = true }
serial_test = { workspace = true }
[[bench]]
name = "metrics"
+harness = false
diff --git a/metrics/benches/metrics.rs b/metrics/benches/metrics.rs
index f8038ef51200b3..e762066cdd2ecf 100644
--- a/metrics/benches/metrics.rs
+++ b/metrics/benches/metrics.rs
@@ -1,8 +1,5 @@
-#![feature(test)]
-
-extern crate test;
-
use {
+ bencher::{benchmark_group, benchmark_main, Bencher},
log::*,
rand::distributions::{Distribution, Uniform},
solana_metrics::{
@@ -10,12 +7,10 @@ use {
datapoint::DataPoint,
metrics::{serialize_points, test_mocks::MockMetricsWriter, MetricsAgent},
},
- std::{sync::Arc, time::Duration},
- test::Bencher,
+ std::{hint::black_box, sync::Arc, time::Duration},
};
-#[bench]
-fn bench_write_points(bencher: &mut Bencher) {
+fn bench_write_points(b: &mut Bencher) {
let points = (0..10)
.map(|_| {
DataPoint::new("measurement")
@@ -26,19 +21,18 @@ fn bench_write_points(bencher: &mut Bencher) {
})
.collect();
let host_id = "benchmark-host-id";
- bencher.iter(|| {
+ b.iter(|| {
for _ in 0..10 {
- test::black_box(serialize_points(&points, host_id));
+ black_box(serialize_points(&points, host_id));
}
})
}
-#[bench]
-fn bench_datapoint_submission(bencher: &mut Bencher) {
+fn bench_datapoint_submission(b: &mut Bencher) {
let writer = Arc::new(MockMetricsWriter::new());
let agent = MetricsAgent::new(writer, Duration::from_secs(10), 1000);
- bencher.iter(|| {
+ b.iter(|| {
for i in 0..1000 {
agent.submit(
DataPoint::new("measurement")
@@ -51,12 +45,11 @@ fn bench_datapoint_submission(bencher: &mut Bencher) {
})
}
-#[bench]
-fn bench_counter_submission(bencher: &mut Bencher) {
+fn bench_counter_submission(b: &mut Bencher) {
let writer = Arc::new(MockMetricsWriter::new());
let agent = MetricsAgent::new(writer, Duration::from_secs(10), 1000);
- bencher.iter(|| {
+ b.iter(|| {
for i in 0..1000 {
agent.submit_counter(CounterPoint::new("counter 1"), Level::Info, i);
}
@@ -64,14 +57,13 @@ fn bench_counter_submission(bencher: &mut Bencher) {
})
}
-#[bench]
-fn bench_random_submission(bencher: &mut Bencher) {
+fn bench_random_submission(b: &mut Bencher) {
let writer = Arc::new(MockMetricsWriter::new());
let agent = MetricsAgent::new(writer, Duration::from_secs(10), 1000);
let mut rng = rand::thread_rng();
let die = Uniform::::from(1..7);
- bencher.iter(|| {
+ b.iter(|| {
for i in 0..1000 {
let dice = die.sample(&mut rng);
@@ -89,3 +81,12 @@ fn bench_random_submission(bencher: &mut Bencher) {
agent.flush();
})
}
+
+benchmark_group!(
+ benches,
+ bench_write_points,
+ bench_datapoint_submission,
+ bench_counter_submission,
+ bench_random_submission
+);
+benchmark_main!(benches);
diff --git a/net-utils/src/ip_echo_client.rs b/net-utils/src/ip_echo_client.rs
index 4e1afeb33ee165..c1ed42ebc5e50a 100644
--- a/net-utils/src/ip_echo_client.rs
+++ b/net-utils/src/ip_echo_client.rs
@@ -98,12 +98,20 @@ fn parse_response(
[b'H', b'T', b'T', b'P'] => {
let http_response = std::str::from_utf8(body);
match http_response {
- Ok(r) => bail!("Invalid gossip entrypoint. {ip_echo_server_addr} looks to be an HTTP port replying with {r}"),
- Err(_) => bail!("Invalid gossip entrypoint. {ip_echo_server_addr} looks to be an HTTP port."),
+ Ok(r) => bail!(
+ "Invalid gossip entrypoint. {ip_echo_server_addr} looks to be an HTTP port \
+ replying with {r}"
+ ),
+ Err(_) => bail!(
+ "Invalid gossip entrypoint. {ip_echo_server_addr} looks to be an HTTP port."
+ ),
}
}
_ => {
- bail!("Invalid gossip entrypoint. {ip_echo_server_addr} provided unexpected header bytes {response_header:?} ");
+ bail!(
+ "Invalid gossip entrypoint. {ip_echo_server_addr} provided unexpected header \
+ bytes {response_header:?} "
+ );
}
};
Ok(payload)
@@ -163,7 +171,7 @@ pub(crate) async fn verify_all_reachable_tcp(
bind_address,
)
.await
- .map_err(|err| warn!("ip_echo_server request failed: {}", err));
+ .map_err(|err| warn!("ip_echo_server request failed: {err}"));
// spawn checker to wait for reply
// since we do not know if tcp_listeners are nonblocking, we have to run them in native threads.
@@ -173,7 +181,7 @@ pub(crate) async fn verify_all_reachable_tcp(
// Use blocking API since we have no idea if sockets given to us are nonblocking or not
let thread_handle = tokio::task::spawn_blocking(move || {
- debug!("Waiting for incoming connection on tcp/{}", port);
+ debug!("Waiting for incoming connection on tcp/{port}");
match tcp_listener.incoming().next() {
Some(_) => {
// ignore errors here since this can only happen if a timeout was detected.
@@ -250,10 +258,7 @@ pub(crate) async fn verify_all_reachable_udp(
for (bind_ip, ports_to_socks_map) in ip_to_ports {
let ports: Vec = ports_to_socks_map.keys().copied().collect();
- info!(
- "Checking that udp ports {:?} are reachable from bind IP {:?}",
- ports, bind_ip
- );
+ info!("Checking that udp ports {ports:?} are reachable from bind IP {bind_ip:?}");
'outer: for chunk_to_check in ports.chunks(MAX_PORT_COUNT_PER_MESSAGE) {
let ports_to_check = chunk_to_check.to_vec();
@@ -275,7 +280,7 @@ pub(crate) async fn verify_all_reachable_udp(
bind_ip,
)
.await
- .map_err(|err| warn!("ip_echo_server request failed: {}", err));
+ .map_err(|err| warn!("ip_echo_server request failed: {err}"));
let reachable_ports = Arc::new(RwLock::new(HashSet::new()));
// Spawn threads for each socket to check
@@ -300,10 +305,7 @@ pub(crate) async fn verify_all_reachable_udp(
}
let recv_result = socket.recv(&mut [0; 1]);
- debug!(
- "Waited for incoming datagram on udp/{}: {:?}",
- port, recv_result
- );
+ debug!("Waited for incoming datagram on udp/{port}: {recv_result:?}");
if recv_result.is_ok() {
reachable_ports.write().unwrap().insert(port);
@@ -327,18 +329,15 @@ pub(crate) async fn verify_all_reachable_udp(
.into_inner()
.expect("No threads should hold the lock");
info!(
- "checked udp ports: {:?}, reachable udp ports: {:?}",
- ports_to_check, reachable_ports
+ "checked udp ports: {ports_to_check:?}, reachable udp ports: \
+ {reachable_ports:?}"
);
if reachable_ports.len() == ports_to_check.len() {
continue 'outer; // starts checking next chunk of ports, if any
}
}
- error!(
- "Maximum retry count reached. Some ports for IP {} unreachable.",
- bind_ip
- );
+ error!("Maximum retry count reached. Some ports for IP {bind_ip} unreachable.");
return false;
}
}
diff --git a/net-utils/src/ip_echo_server.rs b/net-utils/src/ip_echo_server.rs
index e877b30b1315d0..c662ff457229ee 100644
--- a/net-utils/src/ip_echo_server.rs
+++ b/net-utils/src/ip_echo_server.rs
@@ -68,7 +68,7 @@ async fn process_connection(
peer_addr: SocketAddr,
shred_version: Option,
) -> io::Result<()> {
- info!("connection from {:?}", peer_addr);
+ info!("connection from {peer_addr:?}");
let mut data = vec![0u8; ip_echo_server_request_length()];
@@ -104,7 +104,7 @@ async fn process_connection(
))
})?;
- trace!("request: {:?}", msg);
+ trace!("request: {msg:?}");
// Fire a datagram at each non-zero UDP port
match bind_to_unspecified() {
@@ -114,21 +114,21 @@ async fn process_connection(
let result =
udp_socket.send_to(&[0], SocketAddr::from((peer_addr.ip(), *udp_port)));
match result {
- Ok(_) => debug!("Successful send_to udp/{}", udp_port),
- Err(err) => info!("Failed to send_to udp/{}: {}", udp_port, err),
+ Ok(_) => debug!("Successful send_to udp/{udp_port}"),
+ Err(err) => info!("Failed to send_to udp/{udp_port}: {err}"),
}
}
}
}
Err(err) => {
- warn!("Failed to bind local udp socket: {}", err);
+ warn!("Failed to bind local udp socket: {err}");
}
}
// Try to connect to each non-zero TCP port
for tcp_port in &msg.tcp_ports {
if *tcp_port != 0 {
- debug!("Connecting to tcp/{}", tcp_port);
+ debug!("Connecting to tcp/{tcp_port}");
let mut tcp_stream = timeout(
IO_TIMEOUT,
@@ -148,7 +148,7 @@ async fn process_connection(
// conflict with the first four bytes of a valid HTTP response.
let mut bytes = vec![0u8; IP_ECHO_SERVER_RESPONSE_LENGTH];
bincode::serialize_into(&mut bytes[HEADER_LENGTH..], &response).unwrap();
- trace!("response: {:?}", bytes);
+ trace!("response: {bytes:?}");
writer.write_all(&bytes).await
}
@@ -163,11 +163,11 @@ async fn run_echo_server(tcp_listener: std::net::TcpListener, shred_version: Opt
Ok((socket, peer_addr)) => {
runtime::Handle::current().spawn(async move {
if let Err(err) = process_connection(socket, peer_addr, shred_version).await {
- info!("session failed: {:?}", err);
+ info!("session failed: {err:?}");
}
});
}
- Err(err) => warn!("listener accept failed: {:?}", err),
+ Err(err) => warn!("listener accept failed: {err:?}"),
}
}
}
diff --git a/net-utils/src/lib.rs b/net-utils/src/lib.rs
index 5b5eb70c53eff4..1b223362a0f3e3 100644
--- a/net-utils/src/lib.rs
+++ b/net-utils/src/lib.rs
@@ -366,8 +366,8 @@ pub fn multi_bind_in_range_with_config(
if !PLATFORM_SUPPORTS_SOCKET_CONFIGS && num != 1 {
// See https://github.com/solana-labs/solana/issues/4607
warn!(
- "multi_bind_in_range_with_config() only supports 1 socket on this platform ({} requested)",
- num
+ "multi_bind_in_range_with_config() only supports 1 socket on this platform ({num} \
+ requested)"
);
num = 1;
}
@@ -464,7 +464,8 @@ pub fn bind_common_with_config(
#[deprecated(
since = "2.3.2",
- note = "Please avoid this function, in favor of sockets::bind_two_in_range_with_offset_and_config"
+ note = "Please avoid this function, in favor of \
+ sockets::bind_two_in_range_with_offset_and_config"
)]
#[allow(deprecated)]
pub fn bind_two_in_range_with_offset(
@@ -484,7 +485,8 @@ pub fn bind_two_in_range_with_offset(
#[deprecated(
since = "2.3.2",
- note = "Please avoid this function, in favor of sockets::bind_two_in_range_with_offset_and_config"
+ note = "Please avoid this function, in favor of \
+ sockets::bind_two_in_range_with_offset_and_config"
)]
#[allow(deprecated)]
pub fn bind_two_in_range_with_offset_and_config(
@@ -582,8 +584,7 @@ pub fn bind_more_with_config(
if !PLATFORM_SUPPORTS_SOCKET_CONFIGS {
if num > 1 {
warn!(
- "bind_more_with_config() only supports 1 socket on this platform ({} requested)",
- num
+ "bind_more_with_config() only supports 1 socket on this platform ({num} requested)"
);
}
Ok(vec![socket])
diff --git a/net-utils/src/sockets.rs b/net-utils/src/sockets.rs
index 20017a6f38a733..78b562574e0c7a 100644
--- a/net-utils/src/sockets.rs
+++ b/net-utils/src/sockets.rs
@@ -1,22 +1,21 @@
+#[cfg(feature = "dev-context-only-utils")]
+use tokio::net::UdpSocket as TokioUdpSocket;
use {
crate::PortRange,
log::warn,
socket2::{Domain, SockAddr, Socket, Type},
std::{
io,
- net::{IpAddr, SocketAddr, TcpListener, UdpSocket},
+ net::{IpAddr, Ipv4Addr, SocketAddr, TcpListener, UdpSocket},
+ ops::Range,
sync::atomic::{AtomicU16, Ordering},
},
};
-#[cfg(feature = "dev-context-only-utils")]
-use {std::net::Ipv4Addr, tokio::net::UdpSocket as TokioUdpSocket};
// base port for deconflicted allocations
const BASE_PORT: u16 = 5000;
// how much to allocate per individual process.
// we expect to have at most 64 concurrent tests in CI at any moment on a given host.
const SLICE_PER_PROCESS: u16 = (u16::MAX - BASE_PORT) / 64;
-/// Retrieve a free 20-port slice for unit tests
-///
/// When running under nextest, this will try to provide
/// a unique slice of port numbers (assuming no other nextest processes
/// are running on the same host) based on NEXTEST_TEST_GLOBAL_SLOT variable
@@ -25,23 +24,46 @@ const SLICE_PER_PROCESS: u16 = (u16::MAX - BASE_PORT) / 64;
/// When running without nextest, this will only bump an atomic and eventually
/// panic when it runs out of port numbers to assign.
#[allow(clippy::arithmetic_side_effects)]
-pub fn localhost_port_range_for_tests() -> (u16, u16) {
+pub fn unique_port_range_for_tests(size: u16) -> Range {
static SLICE: AtomicU16 = AtomicU16::new(0);
- let offset = SLICE.fetch_add(20, Ordering::Relaxed);
+ let offset = SLICE.fetch_add(size, Ordering::Relaxed);
let start = offset
+ match std::env::var("NEXTEST_TEST_GLOBAL_SLOT") {
Ok(slot) => {
let slot: u16 = slot.parse().unwrap();
assert!(
offset < SLICE_PER_PROCESS,
- "Overrunning into the port range of another test! Consider using fewer ports per test."
+ "Overrunning into the port range of another test! Consider using fewer ports \
+ per test."
);
BASE_PORT + slot * SLICE_PER_PROCESS
}
Err(_) => BASE_PORT,
};
- assert!(start < u16::MAX - 20, "ran out of port numbers!");
- (start, start + 20)
+ assert!(start < u16::MAX - size, "Ran out of port numbers!");
+ start..start + size
+}
+
+/// Retrieve a free 20-port slice for unit tests
+///
+/// When running under nextest, this will try to provide
+/// a unique slice of port numbers (assuming no other nextest processes
+/// are running on the same host) based on NEXTEST_TEST_GLOBAL_SLOT variable
+/// The port ranges will be reused following nextest logic.
+///
+/// When running without nextest, this will only bump an atomic and eventually
+/// panic when it runs out of port numbers to assign.
+pub fn localhost_port_range_for_tests() -> (u16, u16) {
+ let pr = unique_port_range_for_tests(20);
+ (pr.start, pr.end)
+}
+
+/// Bind a `UdpSocket` to a unique port.
+pub fn bind_to_localhost_unique() -> io::Result {
+ bind_to(
+ IpAddr::V4(Ipv4Addr::LOCALHOST),
+ unique_port_range_for_tests(1).start,
+ )
}
pub fn bind_gossip_port_in_range(
@@ -217,8 +239,8 @@ pub fn multi_bind_in_range_with_config(
if !PLATFORM_SUPPORTS_SOCKET_CONFIGS && num != 1 {
// See https://github.com/solana-labs/solana/issues/4607
warn!(
- "multi_bind_in_range_with_config() only supports 1 socket on this platform ({} requested)",
- num
+ "multi_bind_in_range_with_config() only supports 1 socket on this platform ({num} \
+ requested)"
);
num = 1;
}
@@ -320,8 +342,7 @@ pub fn bind_more_with_config(
if !PLATFORM_SUPPORTS_SOCKET_CONFIGS {
if num > 1 {
warn!(
- "bind_more_with_config() only supports 1 socket on this platform ({} requested)",
- num
+ "bind_more_with_config() only supports 1 socket on this platform ({num} requested)"
);
}
Ok(vec![socket])
diff --git a/net/net.sh b/net/net.sh
index 235d485555c5ea..b8f743bf6d566d 100755
--- a/net/net.sh
+++ b/net/net.sh
@@ -189,9 +189,8 @@ annotateBlockexplorerUrl() {
}
build() {
- supported=("22.04")
declare MAYBE_DOCKER=
- if [[ $(uname) != Linux || ! " ${supported[*]} " =~ $(lsb_release -sr) ]]; then
+ if [[ $(uname) != Linux ]]; then
# shellcheck source=ci/docker/env.sh
source "$SOLANA_ROOT"/ci/docker/env.sh
MAYBE_DOCKER="ci/docker-run.sh ${CI_DOCKER_IMAGE:?}"
diff --git a/net/scripts/gce-provider.sh b/net/scripts/gce-provider.sh
index 376febdb981d5b..46e2ad3c2510ed 100755
--- a/net/scripts/gce-provider.sh
+++ b/net/scripts/gce-provider.sh
@@ -170,7 +170,6 @@ cloud_CreateInstances() {
declare optionalBootDiskType="${10:-pd-ssd}"
declare optionalAdditionalDiskSize="${11}"
declare optionalPreemptible="${12}"
- #declare sshPrivateKey="${13}" # unused
if $enableGpu; then
# Custom Ubuntu 20.04 LTS image with CUDA 10.2 installed
@@ -185,7 +184,7 @@ cloud_CreateInstances() {
echo "Error: Not supported" >&2
exit 1
else
- imageName="ubuntu-2204-jammy-v20241119 --image-project ubuntu-os-cloud"
+ imageName="ubuntu-2404-noble-amd64-v20250709 --image-project ubuntu-os-cloud"
fi
declare -a nodes
diff --git a/net/scripts/install-earlyoom.sh b/net/scripts/install-earlyoom.sh
index 5605bc9cb58e28..bf5946672f0dd4 100755
--- a/net/scripts/install-earlyoom.sh
+++ b/net/scripts/install-earlyoom.sh
@@ -15,8 +15,7 @@ echo kernel.sysrq=1 >> /etc/sysctl.conf
if command -v earlyoom; then
systemctl status earlyoom
else
- wget -r -l1 -np http://ftp.us.debian.org/debian/pool/main/e/earlyoom/ -A 'earlyoom_1.2-*_amd64.deb' -e robots=off -nd
- apt install --quiet --yes ./earlyoom_1.2-*_amd64.deb
+ apt-get install --quiet --yes earlyoom
cat > earlyoom < Vec {
.collect()
}
-fn do_bench_dedup_packets(bencher: &mut Bencher, mut batches: Vec) {
+fn do_bench_dedup_packets(b: &mut Bencher, mut batches: Vec) {
// verify packets
let mut rng = rand::thread_rng();
let mut deduper = Deduper::<2, [u8]>::new(&mut rng, /*num_bits:*/ 63_999_979);
- bencher.iter(|| {
+ b.iter(|| {
let _ans = deduper::dedup_packets_and_count_discards(&deduper, &mut batches);
deduper.maybe_reset(
&mut rng,
@@ -44,9 +41,7 @@ fn do_bench_dedup_packets(bencher: &mut Bencher, mut batches: Vec)
});
}
-#[bench]
-#[ignore]
-fn bench_dedup_same_small_packets(bencher: &mut Bencher) {
+fn bench_dedup_same_small_packets(b: &mut Bencher) {
let mut rng = rand::thread_rng();
let small_packet = test_packet_with_size(128, &mut rng);
@@ -55,12 +50,10 @@ fn bench_dedup_same_small_packets(bencher: &mut Bencher) {
128,
);
- do_bench_dedup_packets(bencher, batches);
+ do_bench_dedup_packets(b, batches);
}
-#[bench]
-#[ignore]
-fn bench_dedup_same_big_packets(bencher: &mut Bencher) {
+fn bench_dedup_same_big_packets(b: &mut Bencher) {
let mut rng = rand::thread_rng();
let big_packet = test_packet_with_size(1024, &mut rng);
@@ -69,12 +62,10 @@ fn bench_dedup_same_big_packets(bencher: &mut Bencher) {
128,
);
- do_bench_dedup_packets(bencher, batches);
+ do_bench_dedup_packets(b, batches);
}
-#[bench]
-#[ignore]
-fn bench_dedup_diff_small_packets(bencher: &mut Bencher) {
+fn bench_dedup_diff_small_packets(b: &mut Bencher) {
let mut rng = rand::thread_rng();
let batches = to_packet_batches(
@@ -84,12 +75,10 @@ fn bench_dedup_diff_small_packets(bencher: &mut Bencher) {
128,
);
- do_bench_dedup_packets(bencher, batches);
+ do_bench_dedup_packets(b, batches);
}
-#[bench]
-#[ignore]
-fn bench_dedup_diff_big_packets(bencher: &mut Bencher) {
+fn bench_dedup_diff_big_packets(b: &mut Bencher) {
let mut rng = rand::thread_rng();
let batches = to_packet_batches(
@@ -99,12 +88,10 @@ fn bench_dedup_diff_big_packets(bencher: &mut Bencher) {
128,
);
- do_bench_dedup_packets(bencher, batches);
+ do_bench_dedup_packets(b, batches);
}
-#[bench]
-#[ignore]
-fn bench_dedup_baseline(bencher: &mut Bencher) {
+fn bench_dedup_baseline(b: &mut Bencher) {
let mut rng = rand::thread_rng();
let batches = to_packet_batches(
@@ -114,15 +101,13 @@ fn bench_dedup_baseline(bencher: &mut Bencher) {
128,
);
- do_bench_dedup_packets(bencher, batches);
+ do_bench_dedup_packets(b, batches);
}
-#[bench]
-#[ignore]
-fn bench_dedup_reset(bencher: &mut Bencher) {
+fn bench_dedup_reset(b: &mut Bencher) {
let mut rng = rand::thread_rng();
let mut deduper = Deduper::<2, [u8]>::new(&mut rng, /*num_bits:*/ 63_999_979);
- bencher.iter(|| {
+ b.iter(|| {
deduper.maybe_reset(
&mut rng,
0.001, // false_positive_rate
@@ -130,3 +115,14 @@ fn bench_dedup_reset(bencher: &mut Bencher) {
);
});
}
+
+benchmark_group!(
+ benches,
+ bench_dedup_reset,
+ bench_dedup_baseline,
+ bench_dedup_diff_big_packets,
+ bench_dedup_diff_small_packets,
+ bench_dedup_same_big_packets,
+ bench_dedup_same_small_packets
+);
+benchmark_main!(benches);
diff --git a/perf/benches/discard.rs b/perf/benches/discard.rs
index c79484257e49fa..837907e257c84b 100644
--- a/perf/benches/discard.rs
+++ b/perf/benches/discard.rs
@@ -1,10 +1,6 @@
-#![feature(test)]
-
-extern crate test;
-
use {
+ bencher::{benchmark_group, benchmark_main, Bencher},
solana_perf::{discard::discard_batches_randomly, packet::to_packet_batches, test_tx::test_tx},
- test::Bencher,
};
#[cfg(not(any(target_env = "msvc", target_os = "freebsd")))]
@@ -13,8 +9,7 @@ static GLOBAL: jemallocator::Jemalloc = jemallocator::Jemalloc;
const NUM: usize = 1000;
-#[bench]
-fn bench_discard(bencher: &mut Bencher) {
+fn bench_discard(b: &mut Bencher) {
solana_logger::setup();
let tx = test_tx();
let num_packets = NUM;
@@ -25,9 +20,12 @@ fn bench_discard(bencher: &mut Bencher) {
10,
);
- bencher.iter(|| {
+ b.iter(|| {
let mut discarded = batches.clone();
discard_batches_randomly(&mut discarded, 100, NUM);
assert_eq!(discarded.len(), 10);
})
}
+
+benchmark_group!(benches, bench_discard);
+benchmark_main!(benches);
diff --git a/perf/benches/recycler.rs b/perf/benches/recycler.rs
index 0533e4a11eb3a2..1f996f1c3d9398 100644
--- a/perf/benches/recycler.rs
+++ b/perf/benches/recycler.rs
@@ -1,14 +1,9 @@
-#![feature(test)]
-
-extern crate test;
-
use {
+ bencher::{benchmark_group, benchmark_main, Bencher},
solana_perf::{packet::PacketBatchRecycler, recycler::Recycler},
- test::Bencher,
};
-#[bench]
-fn bench_recycler(bencher: &mut Bencher) {
+fn bench_recycler(b: &mut Bencher) {
solana_logger::setup();
let recycler: PacketBatchRecycler = Recycler::default();
@@ -17,7 +12,10 @@ fn bench_recycler(bencher: &mut Bencher) {
let _packet = recycler.allocate("");
}
- bencher.iter(move || {
+ b.iter(move || {
let _packet = recycler.allocate("");
});
}
+
+benchmark_group!(benches, bench_recycler);
+benchmark_main!(benches);
diff --git a/perf/benches/reset.rs b/perf/benches/reset.rs
index 18401dcd664a6a..72382d0c5f23ff 100644
--- a/perf/benches/reset.rs
+++ b/perf/benches/reset.rs
@@ -1,10 +1,9 @@
-#![feature(test)]
-
-extern crate test;
-
use {
- std::sync::atomic::{AtomicU64, Ordering},
- test::Bencher,
+ bencher::{benchmark_group, benchmark_main, Bencher},
+ std::{
+ hint::black_box,
+ sync::atomic::{AtomicU64, Ordering},
+ },
};
#[cfg(not(any(target_env = "msvc", target_os = "freebsd")))]
@@ -16,15 +15,14 @@ const N: usize = 1_000_000;
// test bench_reset1 ... bench: 436,240 ns/iter (+/- 176,714)
// test bench_reset2 ... bench: 274,007 ns/iter (+/- 129,552)
-#[bench]
-fn bench_reset1(bencher: &mut Bencher) {
+fn bench_reset1(b: &mut Bencher) {
solana_logger::setup();
let mut v = Vec::with_capacity(N);
v.resize_with(N, AtomicU64::default);
- bencher.iter(|| {
- test::black_box({
+ b.iter(|| {
+ black_box({
for i in &v {
i.store(0, Ordering::Relaxed);
}
@@ -33,18 +31,20 @@ fn bench_reset1(bencher: &mut Bencher) {
});
}
-#[bench]
-fn bench_reset2(bencher: &mut Bencher) {
+fn bench_reset2(b: &mut Bencher) {
solana_logger::setup();
let mut v = Vec::with_capacity(N);
v.resize_with(N, AtomicU64::default);
- bencher.iter(|| {
- test::black_box({
+ b.iter(|| {
+ black_box({
v.clear();
v.resize_with(N, AtomicU64::default);
0
});
});
}
+
+benchmark_group!(benches, bench_reset2, bench_reset1);
+benchmark_main!(benches);
diff --git a/perf/benches/shrink.rs b/perf/benches/shrink.rs
index 461e651466d2d2..2072e649d01371 100644
--- a/perf/benches/shrink.rs
+++ b/perf/benches/shrink.rs
@@ -1,16 +1,13 @@
#![allow(clippy::arithmetic_side_effects)]
-#![feature(test)]
-
-extern crate test;
use {
+ bencher::{benchmark_group, benchmark_main, Bencher},
rand::prelude::*,
solana_perf::{
packet::{to_packet_batches, PacketBatch, PACKETS_PER_BATCH},
sigverify,
},
std::iter,
- test::Bencher,
};
#[cfg(not(any(target_env = "msvc", target_os = "freebsd")))]
@@ -26,7 +23,7 @@ fn test_packet_with_size(size: usize, rng: &mut ThreadRng) -> Vec {
.collect()
}
-fn do_bench_shrink_packets(bencher: &mut Bencher, mut batches: Vec) {
+fn do_bench_shrink_packets(b: &mut Bencher, mut batches: Vec) {
let mut batches = iter::repeat_with(|| {
batches.iter_mut().for_each(|b| {
b.iter_mut()
@@ -40,16 +37,14 @@ fn do_bench_shrink_packets(bencher: &mut Bencher, mut batches: Vec)
.collect::>()
.into_iter()
.cycle();
- bencher.iter(|| {
+ b.iter(|| {
let batches = batches.next().unwrap();
// verify packets
sigverify::shrink_batches(batches);
});
}
-#[bench]
-#[ignore]
-fn bench_shrink_diff_small_packets(bencher: &mut Bencher) {
+fn bench_shrink_diff_small_packets(b: &mut Bencher) {
let mut rng = rand::thread_rng();
let batches = to_packet_batches(
@@ -59,12 +54,10 @@ fn bench_shrink_diff_small_packets(bencher: &mut Bencher) {
PACKETS_PER_BATCH,
);
- do_bench_shrink_packets(bencher, batches);
+ do_bench_shrink_packets(b, batches);
}
-#[bench]
-#[ignore]
-fn bench_shrink_diff_big_packets(bencher: &mut Bencher) {
+fn bench_shrink_diff_big_packets(b: &mut Bencher) {
let mut rng = rand::thread_rng();
let batches = to_packet_batches(
@@ -74,12 +67,10 @@ fn bench_shrink_diff_big_packets(bencher: &mut Bencher) {
PACKETS_PER_BATCH,
);
- do_bench_shrink_packets(bencher, batches);
+ do_bench_shrink_packets(b, batches);
}
-#[bench]
-#[ignore]
-fn bench_shrink_count_packets(bencher: &mut Bencher) {
+fn bench_shrink_count_packets(b: &mut Bencher) {
let mut rng = rand::thread_rng();
let mut batches = to_packet_batches(
@@ -93,7 +84,15 @@ fn bench_shrink_count_packets(bencher: &mut Bencher) {
.for_each(|mut p| p.meta_mut().set_discard(thread_rng().gen()))
});
- bencher.iter(|| {
+ b.iter(|| {
let _ = sigverify::count_valid_packets(&batches);
});
}
+
+benchmark_group!(
+ benches,
+ bench_shrink_count_packets,
+ bench_shrink_diff_big_packets,
+ bench_shrink_diff_small_packets
+);
+benchmark_main!(benches);
diff --git a/perf/benches/sigverify.rs b/perf/benches/sigverify.rs
index 8a914e6f82a06c..fb9491f3460bb3 100644
--- a/perf/benches/sigverify.rs
+++ b/perf/benches/sigverify.rs
@@ -1,8 +1,7 @@
-#![feature(test)]
-
-extern crate test;
+#![allow(clippy::arithmetic_side_effects)]
use {
+ bencher::{benchmark_group, benchmark_main, Bencher},
log::*,
rand::{thread_rng, Rng},
solana_perf::{
@@ -11,7 +10,6 @@ use {
sigverify,
test_tx::{test_multisig_tx, test_tx},
},
- test::Bencher,
};
#[cfg(not(any(target_env = "msvc", target_os = "freebsd")))]
@@ -21,8 +19,7 @@ static GLOBAL: jemallocator::Jemalloc = jemallocator::Jemalloc;
const NUM: usize = 256;
const LARGE_BATCH_PACKET_COUNT: usize = 128;
-#[bench]
-fn bench_sigverify_simple(bencher: &mut Bencher) {
+fn bench_sigverify_simple(b: &mut Bencher) {
let tx = test_tx();
let num_packets = NUM;
@@ -35,7 +32,7 @@ fn bench_sigverify_simple(bencher: &mut Bencher) {
let recycler = Recycler::default();
let recycler_out = Recycler::default();
// verify packets
- bencher.iter(|| {
+ b.iter(|| {
sigverify::ed25519_verify(&mut batches, &recycler, &recycler_out, false, num_packets);
})
}
@@ -56,82 +53,68 @@ fn gen_batches(
}
}
-#[bench]
-#[ignore]
-fn bench_sigverify_low_packets_small_batch(bencher: &mut Bencher) {
+fn bench_sigverify_low_packets_small_batch(b: &mut Bencher) {
let num_packets = sigverify::VERIFY_PACKET_CHUNK_SIZE - 1;
let mut batches = gen_batches(false, 1, num_packets);
let recycler = Recycler::default();
let recycler_out = Recycler::default();
- bencher.iter(|| {
+ b.iter(|| {
sigverify::ed25519_verify(&mut batches, &recycler, &recycler_out, false, num_packets);
})
}
-#[bench]
-#[ignore]
-fn bench_sigverify_low_packets_large_batch(bencher: &mut Bencher) {
+fn bench_sigverify_low_packets_large_batch(b: &mut Bencher) {
let num_packets = sigverify::VERIFY_PACKET_CHUNK_SIZE - 1;
let mut batches = gen_batches(false, LARGE_BATCH_PACKET_COUNT, num_packets);
let recycler = Recycler::default();
let recycler_out = Recycler::default();
- bencher.iter(|| {
+ b.iter(|| {
sigverify::ed25519_verify(&mut batches, &recycler, &recycler_out, false, num_packets);
})
}
-#[bench]
-#[ignore]
-fn bench_sigverify_medium_packets_small_batch(bencher: &mut Bencher) {
+fn bench_sigverify_medium_packets_small_batch(b: &mut Bencher) {
let num_packets = sigverify::VERIFY_PACKET_CHUNK_SIZE * 8;
let mut batches = gen_batches(false, 1, num_packets);
let recycler = Recycler::default();
let recycler_out = Recycler::default();
- bencher.iter(|| {
+ b.iter(|| {
sigverify::ed25519_verify(&mut batches, &recycler, &recycler_out, false, num_packets);
})
}
-#[bench]
-#[ignore]
-fn bench_sigverify_medium_packets_large_batch(bencher: &mut Bencher) {
+fn bench_sigverify_medium_packets_large_batch(b: &mut Bencher) {
let num_packets = sigverify::VERIFY_PACKET_CHUNK_SIZE * 8;
let mut batches = gen_batches(false, LARGE_BATCH_PACKET_COUNT, num_packets);
let recycler = Recycler::default();
let recycler_out = Recycler::default();
- bencher.iter(|| {
+ b.iter(|| {
sigverify::ed25519_verify(&mut batches, &recycler, &recycler_out, false, num_packets);
})
}
-#[bench]
-#[ignore]
-fn bench_sigverify_high_packets_small_batch(bencher: &mut Bencher) {
+fn bench_sigverify_high_packets_small_batch(b: &mut Bencher) {
let num_packets = sigverify::VERIFY_PACKET_CHUNK_SIZE * 32;
let mut batches = gen_batches(false, 1, num_packets);
let recycler = Recycler::default();
let recycler_out = Recycler::default();
- bencher.iter(|| {
+ b.iter(|| {
sigverify::ed25519_verify(&mut batches, &recycler, &recycler_out, false, num_packets);
})
}
-#[bench]
-#[ignore]
-fn bench_sigverify_high_packets_large_batch(bencher: &mut Bencher) {
+fn bench_sigverify_high_packets_large_batch(b: &mut Bencher) {
let num_packets = sigverify::VERIFY_PACKET_CHUNK_SIZE * 32;
let mut batches = gen_batches(false, LARGE_BATCH_PACKET_COUNT, num_packets);
let recycler = Recycler::default();
let recycler_out = Recycler::default();
// verify packets
- bencher.iter(|| {
+ b.iter(|| {
sigverify::ed25519_verify(&mut batches, &recycler, &recycler_out, false, num_packets);
})
}
-#[bench]
-#[ignore]
-fn bench_sigverify_uneven(bencher: &mut Bencher) {
+fn bench_sigverify_uneven(b: &mut Bencher) {
solana_logger::setup();
let simple_tx = test_tx();
let multi_tx = test_multisig_tx();
@@ -171,13 +154,12 @@ fn bench_sigverify_uneven(bencher: &mut Bencher) {
let recycler = Recycler::default();
let recycler_out = Recycler::default();
// verify packets
- bencher.iter(|| {
+ b.iter(|| {
sigverify::ed25519_verify(&mut batches, &recycler, &recycler_out, false, num_packets);
})
}
-#[bench]
-fn bench_get_offsets(bencher: &mut Bencher) {
+fn bench_get_offsets(b: &mut Bencher) {
let tx = test_tx();
// generate packet vector
@@ -185,7 +167,21 @@ fn bench_get_offsets(bencher: &mut Bencher) {
let recycler = Recycler::default();
// verify packets
- bencher.iter(|| {
+ b.iter(|| {
let _ans = sigverify::generate_offsets(&mut batches, &recycler, false);
})
}
+
+benchmark_group!(
+ benches,
+ bench_get_offsets,
+ bench_sigverify_uneven,
+ bench_sigverify_high_packets_large_batch,
+ bench_sigverify_high_packets_small_batch,
+ bench_sigverify_medium_packets_large_batch,
+ bench_sigverify_medium_packets_small_batch,
+ bench_sigverify_low_packets_large_batch,
+ bench_sigverify_low_packets_small_batch,
+ bench_sigverify_simple
+);
+benchmark_main!(benches);
diff --git a/perf/src/deduper.rs b/perf/src/deduper.rs
index a4853c3efdfc37..da4d5dfb9c5d66 100644
--- a/perf/src/deduper.rs
+++ b/perf/src/deduper.rs
@@ -171,7 +171,7 @@ mod tests {
let mut batches =
to_packet_batches(&(0..1000).map(|_| test_tx()).collect::>(), 128);
discard += dedup_packets_and_count_discards(&filter, &mut batches) as usize;
- trace!("{} {}", i, discard);
+ trace!("{i} {discard}");
if filter.popcount.load(Ordering::Relaxed) > capacity {
break;
}
diff --git a/perf/src/lib.rs b/perf/src/lib.rs
index f9b0b12772b680..864d541c8746f1 100644
--- a/perf/src/lib.rs
+++ b/perf/src/lib.rs
@@ -68,8 +68,9 @@ pub fn report_target_features() {
info!("AVX detected");
} else {
error!(
- "Incompatible CPU detected: missing AVX support. Please build from source on the target"
- );
+ "Incompatible CPU detected: missing AVX support. Please build from source on \
+ the target"
+ );
std::process::abort();
}
}
@@ -83,7 +84,8 @@ pub fn report_target_features() {
info!("AVX2 detected");
} else {
error!(
- "Incompatible CPU detected: missing AVX2 support. Please build from source on the target"
+ "Incompatible CPU detected: missing AVX2 support. Please build from source on \
+ the target"
);
std::process::abort();
}
diff --git a/perf/src/packet.rs b/perf/src/packet.rs
index 68277d46ec5a63..b7d9d31be13317 100644
--- a/perf/src/packet.rs
+++ b/perf/src/packet.rs
@@ -685,7 +685,7 @@ impl PinnedPacketBatch {
// TODO: This should never happen. Instead the caller should
// break the payload into smaller messages, and here any errors
// should be propagated.
- error!("Couldn't write to packet {:?}. Data skipped.", e);
+ error!("Couldn't write to packet {e:?}. Data skipped.");
packet.meta_mut().set_discard(true);
}
} else {
diff --git a/perf/src/perf_libs.rs b/perf/src/perf_libs.rs
index feedc6bc03b875..a9d336bfa255dd 100644
--- a/perf/src/perf_libs.rs
+++ b/perf/src/perf_libs.rs
@@ -84,10 +84,10 @@ pub struct Api<'a> {
static API: OnceLock> = OnceLock::new();
fn init(name: &OsStr) {
- info!("Loading {:?}", name);
+ info!("Loading {name:?}");
API.get_or_init(|| {
unsafe { Container::load(name) }.unwrap_or_else(|err| {
- error!("Unable to load {:?}: {}", name, err);
+ error!("Unable to load {name:?}: {err}");
std::process::exit(1);
})
});
@@ -97,10 +97,10 @@ pub fn locate_perf_libs() -> Option {
let exe = env::current_exe().expect("Unable to get executable path");
let perf_libs = exe.parent().unwrap().join("perf-libs");
if perf_libs.is_dir() {
- info!("perf-libs found at {:?}", perf_libs);
+ info!("perf-libs found at {perf_libs:?}");
return Some(perf_libs);
}
- warn!("{:?} does not exist", perf_libs);
+ warn!("{perf_libs:?} does not exist");
None
}
@@ -108,10 +108,10 @@ fn find_cuda_home(perf_libs_path: &Path) -> Option {
if let Ok(cuda_home) = env::var("CUDA_HOME") {
let path = PathBuf::from(cuda_home);
if path.is_dir() {
- info!("Using CUDA_HOME: {:?}", path);
+ info!("Using CUDA_HOME: {path:?}");
return Some(path);
}
- warn!("Ignoring CUDA_HOME, not a path: {:?}", path);
+ warn!("Ignoring CUDA_HOME, not a path: {path:?}");
}
// Search /usr/local for a `cuda-` directory that matches a perf-libs subdirectory
@@ -130,7 +130,7 @@ fn find_cuda_home(perf_libs_path: &Path) -> Option {
continue;
}
- info!("CUDA installation found at {:?}", cuda_home);
+ info!("CUDA installation found at {cuda_home:?}");
return Some(cuda_home);
}
None
@@ -141,7 +141,7 @@ pub fn append_to_ld_library_path(mut ld_library_path: String) {
ld_library_path.push(':');
ld_library_path.push_str(&env_value);
}
- info!("setting ld_library_path to: {:?}", ld_library_path);
+ info!("setting ld_library_path to: {ld_library_path:?}");
env::set_var("LD_LIBRARY_PATH", ld_library_path);
}
@@ -154,7 +154,7 @@ pub fn init_cuda() {
// to ensure the correct CUDA version is used
append_to_ld_library_path(cuda_lib64_dir.to_str().unwrap_or("").to_string())
} else {
- warn!("CUDA lib64 directory does not exist: {:?}", cuda_lib64_dir);
+ warn!("CUDA lib64 directory does not exist: {cuda_lib64_dir:?}");
}
let libcuda_crypt = perf_libs_path
diff --git a/perf/src/recycler.rs b/perf/src/recycler.rs
index 0a31df16bf2a49..0b1c2209860d15 100644
--- a/perf/src/recycler.rs
+++ b/perf/src/recycler.rs
@@ -47,7 +47,7 @@ pub struct RecyclerX {
impl Default for RecyclerX {
fn default() -> RecyclerX {
let id = thread_rng().gen_range(0..1000);
- trace!("new recycler..{}", id);
+ trace!("new recycler..{id}");
RecyclerX {
gc: Mutex::default(),
stats: RecyclerStats::default(),
diff --git a/perf/src/sigverify.rs b/perf/src/sigverify.rs
index 261f8ea7dd9933..80a7f7e80dc862 100644
--- a/perf/src/sigverify.rs
+++ b/perf/src/sigverify.rs
@@ -510,7 +510,7 @@ pub fn shrink_batches(batches: Vec) -> Vec {
}
pub fn ed25519_verify_cpu(batches: &mut [PacketBatch], reject_non_vote: bool, packet_count: usize) {
- debug!("CPU ECDSA for {}", packet_count);
+ debug!("CPU ECDSA for {packet_count}");
PAR_THREAD_POOL.install(|| {
batches.par_iter_mut().flatten().for_each(|mut packet| {
if !packet.meta().discard() && !verify_packet(&mut packet, reject_non_vote) {
@@ -522,7 +522,7 @@ pub fn ed25519_verify_cpu(batches: &mut [PacketBatch], reject_non_vote: bool, pa
pub fn ed25519_verify_disabled(batches: &mut [PacketBatch]) {
let packet_count = count_packets_in_batches(batches);
- debug!("disabled ECDSA for {}", packet_count);
+ debug!("disabled ECDSA for {packet_count}");
PAR_THREAD_POOL.install(|| {
batches.par_iter_mut().flatten().for_each(|mut packet| {
packet.meta_mut().set_discard(false);
@@ -613,7 +613,7 @@ pub fn ed25519_verify(
let (signature_offsets, pubkey_offsets, msg_start_offsets, msg_sizes, sig_lens) =
generate_offsets(batches, recycler, reject_non_vote);
- debug!("CUDA ECDSA for {}", valid_packet_count);
+ debug!("CUDA ECDSA for {valid_packet_count}");
debug!("allocating out..");
let mut out = recycler_out.allocate("out_buffer");
out.set_pinnable();
@@ -642,7 +642,7 @@ pub fn ed25519_verify(
num_packets = num_packets.saturating_add(batch.len());
}
out.resize(signature_offsets.len(), 0);
- trace!("Starting verify num packets: {}", num_packets);
+ trace!("Starting verify num packets: {num_packets}");
trace!("elem len: {}", elems.len() as u32);
trace!("packet sizeof: {}", size_of::() as u32);
trace!("len offset: {}", PACKET_DATA_SIZE as u32);
@@ -662,7 +662,7 @@ pub fn ed25519_verify(
USE_NON_DEFAULT_STREAM,
);
if res != 0 {
- trace!("RETURN!!!: {}", res);
+ trace!("RETURN!!!: {res}");
}
}
trace!("done verify");
@@ -879,7 +879,7 @@ mod tests {
let mut tx = Transaction::new_unsigned(message);
info!("message: {:?}", tx.message_data());
- info!("tx: {:?}", tx);
+ info!("tx: {tx:?}");
let sig = keypair1.try_sign_message(&tx.message_data()).unwrap();
tx.signatures = vec![sig; NUM_SIG];
@@ -1734,7 +1734,7 @@ mod tests {
let test_cases = set_discards.iter().zip(&expect_valids).enumerate();
for (i, (set_discard, (expect_batch_count, expect_valid_packets))) in test_cases {
- debug!("test_shrink case: {}", i);
+ debug!("test_shrink case: {i}");
let mut batches = to_packet_batches(
&(0..PACKET_COUNT).map(|_| test_tx()).collect::>(),
PACKETS_PER_BATCH,
@@ -1747,18 +1747,18 @@ mod tests {
.for_each(|(j, mut p)| p.meta_mut().set_discard(set_discard(i, j)))
});
assert_eq!(count_valid_packets(&batches), *expect_valid_packets);
- debug!("show valid packets for case {}", i);
+ debug!("show valid packets for case {i}");
batches.iter_mut().enumerate().for_each(|(i, b)| {
b.iter_mut().enumerate().for_each(|(j, p)| {
if !p.meta().discard() {
- trace!("{} {}", i, j)
+ trace!("{i} {j}")
}
})
});
- debug!("done show valid packets for case {}", i);
+ debug!("done show valid packets for case {i}");
let batches = shrink_batches(batches);
let shrunken_batch_count = batches.len();
- debug!("shrunk batch test {} count: {}", i, shrunken_batch_count);
+ debug!("shrunk batch test {i} count: {shrunken_batch_count}");
assert_eq!(shrunken_batch_count, *expect_batch_count);
assert_eq!(count_valid_packets(&batches), *expect_valid_packets);
}
diff --git a/perf/src/thread.rs b/perf/src/thread.rs
index 36cfde10826b14..7a101390ee5024 100644
--- a/perf/src/thread.rs
+++ b/perf/src/thread.rs
@@ -81,9 +81,8 @@ where
Ok(())
} else {
Err(String::from(
- "niceness adjustment supported only on Linux; negative adjustment \
- (priority increase) requires root or CAP_SYS_NICE (see `man 7 capabilities` \
- for details)",
+ "niceness adjustment supported only on Linux; negative adjustment (priority increase) \
+ requires root or CAP_SYS_NICE (see `man 7 capabilities` for details)",
))
}
}
diff --git a/poh-bench/Cargo.toml b/poh-bench/Cargo.toml
index e95e7a22a02cb4..f3d62bd93cb850 100644
--- a/poh-bench/Cargo.toml
+++ b/poh-bench/Cargo.toml
@@ -15,11 +15,11 @@ targets = ["x86_64-unknown-linux-gnu"]
[dependencies]
clap = { version = "3.1.5", features = ["cargo"] }
log = { workspace = true }
+num_cpus = { workspace = true }
rayon = { workspace = true }
solana-entry = { workspace = true }
solana-logger = { workspace = true }
solana-measure = { workspace = true }
solana-perf = { workspace = true }
-solana-rayon-threadlimit = { workspace = true }
solana-sha256-hasher = { workspace = true }
solana-version = { workspace = true }
diff --git a/poh-bench/src/main.rs b/poh-bench/src/main.rs
index 6ac1ebaa4ed9f1..2d454904c6ba1f 100644
--- a/poh-bench/src/main.rs
+++ b/poh-bench/src/main.rs
@@ -7,7 +7,6 @@ use {
clap::{crate_description, crate_name, Arg, Command},
solana_measure::measure::Measure,
solana_perf::perf_libs,
- solana_rayon_threadlimit::get_max_thread_count,
solana_sha256_hasher::hash,
};
@@ -74,9 +73,7 @@ fn main() {
let start_hash = hash(&[1, 2, 3, 4]);
let ticks = create_ticks(max_num_entries, hashes_per_tick, start_hash);
let mut num_entries = start_num_entries as usize;
- let num_threads = matches
- .value_of_t("num_threads")
- .unwrap_or(get_max_thread_count());
+ let num_threads = matches.value_of_t("num_threads").unwrap_or(num_cpus::get());
let thread_pool = rayon::ThreadPoolBuilder::new()
.num_threads(num_threads)
.thread_name(|i| format!("solPohBench{i:02}"))
diff --git a/poh/Cargo.toml b/poh/Cargo.toml
index bea3d38dc76548..7af6cda5705323 100644
--- a/poh/Cargo.toml
+++ b/poh/Cargo.toml
@@ -42,6 +42,7 @@ assert_matches = { workspace = true }
bincode = { workspace = true }
criterion = { workspace = true }
rand = { workspace = true }
+solana-entry = { workspace = true, features = ["dev-context-only-utils"] }
solana-keypair = { workspace = true }
solana-logger = { workspace = true }
solana-perf = { workspace = true, features = ["dev-context-only-utils"] }
diff --git a/program-runtime/src/invoke_context.rs b/program-runtime/src/invoke_context.rs
index 70a8f3de580225..c65d275b6746dc 100644
--- a/program-runtime/src/invoke_context.rs
+++ b/program-runtime/src/invoke_context.rs
@@ -12,7 +12,7 @@ use {
solana_clock::Slot,
solana_epoch_schedule::EpochSchedule,
solana_hash::Hash,
- solana_instruction::{error::InstructionError, AccountMeta},
+ solana_instruction::{error::InstructionError, AccountMeta, Instruction},
solana_log_collector::{ic_msg, LogCollector},
solana_measure::measure::Measure,
solana_pubkey::Pubkey,
@@ -26,7 +26,6 @@ use {
solana_sdk_ids::{
bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable, loader_v4, native_loader, sysvar,
},
- solana_stable_layout::stable_instruction::StableInstruction,
solana_svm_callback::InvokeContextCallback,
solana_svm_feature_set::SVMFeatureSet,
solana_timings::{ExecuteDetailsTimings, ExecuteTimings},
@@ -304,7 +303,7 @@ impl<'a> InvokeContext<'a> {
/// Entrypoint for a cross-program invocation from a builtin program
pub fn native_invoke(
&mut self,
- instruction: StableInstruction,
+ instruction: Instruction,
signers: &[Pubkey],
) -> Result<(), InstructionError> {
let (instruction_accounts, program_indices) =
@@ -324,16 +323,18 @@ impl<'a> InvokeContext<'a> {
#[allow(clippy::type_complexity)]
pub fn prepare_instruction(
&mut self,
- instruction: &StableInstruction,
+ instruction: &Instruction,
signers: &[Pubkey],
) -> Result<(Vec, Vec), InstructionError> {
- // Finds the index of each account in the instruction by its pubkey.
- // Then normalizes / unifies the privileges of duplicate accounts.
- // Note: This is an O(n^2) algorithm,
- // but performed on a very small slice and requires no heap allocations.
+ // We reference accounts by an u8 index, so we have a total of 256 accounts.
+ // This algorithm allocates the array on the stack for speed.
+ // On AArch64 in release mode, this function only consumes 640 bytes of stack.
+ let mut transaction_callee_map: [u8; 256] = [u8::MAX; 256];
+ let mut instruction_accounts: Vec =
+ Vec::with_capacity(instruction.accounts.len());
let instruction_context = self.transaction_context.get_current_instruction_context()?;
- let mut deduplicated_instruction_accounts: Vec = Vec::new();
- let mut duplicate_indicies = Vec::with_capacity(instruction.accounts.len() as usize);
+ debug_assert!(instruction.accounts.len() <= u8::MAX as usize);
+
for (instruction_account_index, account_meta) in instruction.accounts.iter().enumerate() {
let index_in_transaction = self
.transaction_context
@@ -346,21 +347,25 @@ impl<'a> InvokeContext<'a> {
);
InstructionError::MissingAccount
})?;
- if let Some(duplicate_index) =
- deduplicated_instruction_accounts
- .iter()
- .position(|instruction_account| {
- instruction_account.index_in_transaction == index_in_transaction
- })
- {
- duplicate_indicies.push(duplicate_index);
- let instruction_account = deduplicated_instruction_accounts
- .get_mut(duplicate_index)
- .ok_or(InstructionError::NotEnoughAccountKeys)?;
- instruction_account
- .set_is_signer(instruction_account.is_signer() || account_meta.is_signer);
- instruction_account
- .set_is_writable(instruction_account.is_writable() || account_meta.is_writable);
+
+ debug_assert!((index_in_transaction as usize) < transaction_callee_map.len());
+ let index_in_callee = transaction_callee_map
+ .get_mut(index_in_transaction as usize)
+ .unwrap();
+
+ if (*index_in_callee as usize) < instruction_accounts.len() {
+ let cloned_account = {
+ let instruction_account = instruction_accounts
+ .get_mut(*index_in_callee as usize)
+ .ok_or(InstructionError::NotEnoughAccountKeys)?;
+ instruction_account
+ .set_is_signer(instruction_account.is_signer() || account_meta.is_signer);
+ instruction_account.set_is_writable(
+ instruction_account.is_writable() || account_meta.is_writable,
+ );
+ instruction_account.clone()
+ };
+ instruction_accounts.push(cloned_account);
} else {
let index_in_caller = instruction_context
.find_index_of_instruction_account(
@@ -375,8 +380,8 @@ impl<'a> InvokeContext<'a> {
);
InstructionError::MissingAccount
})?;
- duplicate_indicies.push(deduplicated_instruction_accounts.len());
- deduplicated_instruction_accounts.push(InstructionAccount::new(
+ *index_in_callee = instruction_accounts.len() as u8;
+ instruction_accounts.push(InstructionAccount::new(
index_in_transaction,
index_in_caller,
instruction_account_index as IndexOfAccount,
@@ -385,7 +390,28 @@ impl<'a> InvokeContext<'a> {
));
}
}
- for instruction_account in deduplicated_instruction_accounts.iter() {
+
+ for current_index in 0..instruction_accounts.len() {
+ let instruction_account = instruction_accounts.get(current_index).unwrap();
+
+ if current_index != instruction_account.index_in_callee as usize {
+ let (is_signer, is_writable) = {
+ let reference_account = instruction_accounts
+ .get(instruction_account.index_in_callee as usize)
+ .ok_or(InstructionError::NotEnoughAccountKeys)?;
+ (
+ reference_account.is_signer(),
+ reference_account.is_writable(),
+ )
+ };
+
+ let current_account = instruction_accounts.get_mut(current_index).unwrap();
+ current_account.set_is_signer(current_account.is_signer() || is_signer);
+ current_account.set_is_writable(current_account.is_writable() || is_writable);
+ // This account is repeated, so there is no need to check for permissions
+ continue;
+ }
+
let borrowed_account = instruction_context.try_borrow_instruction_account(
self.transaction_context,
instruction_account.index_in_caller,
@@ -414,15 +440,6 @@ impl<'a> InvokeContext<'a> {
return Err(InstructionError::PrivilegeEscalation);
}
}
- let instruction_accounts = duplicate_indicies
- .into_iter()
- .map(|duplicate_index| {
- deduplicated_instruction_accounts
- .get(duplicate_index)
- .cloned()
- .ok_or(InstructionError::NotEnoughAccountKeys)
- })
- .collect::, InstructionError>>()?;
// Find and validate executables / program accounts
let callee_program_id = instruction.program_id;
@@ -1028,7 +1045,7 @@ mod tests {
assert_eq!(result, Err(InstructionError::UnbalancedInstruction));
result?;
invoke_context
- .native_invoke(inner_instruction.into(), &[])
+ .native_invoke(inner_instruction, &[])
.and(invoke_context.pop())?;
}
MockInstruction::UnbalancedPop => instruction_context
@@ -1186,7 +1203,7 @@ mod tests {
let inner_instruction =
Instruction::new_with_bincode(callee_program_id, &instruction, metas.clone());
let result = invoke_context
- .native_invoke(inner_instruction.into(), &[])
+ .native_invoke(inner_instruction, &[])
.and(invoke_context.pop());
assert_eq!(result, expected_result);
}
@@ -1250,7 +1267,6 @@ mod tests {
},
metas.clone(),
);
- let inner_instruction = StableInstruction::from(inner_instruction);
let (inner_instruction_accounts, program_indices) = invoke_context
.prepare_instruction(&inner_instruction, &[])
.unwrap();
diff --git a/program-test/src/lib.rs b/program-test/src/lib.rs
index a97eb0d517ea62..ad3ba4ca88a732 100644
--- a/program-test/src/lib.rs
+++ b/program-test/src/lib.rs
@@ -42,7 +42,6 @@ use {
runtime_config::RuntimeConfig,
},
solana_signer::Signer,
- solana_stable_layout::stable_instruction::StableInstruction,
solana_sysvar::Sysvar,
solana_sysvar_id::SysvarId,
solana_timings::ExecuteTimings,
@@ -250,7 +249,6 @@ impl solana_sysvar::program_stubs::SyscallStubs for SyscallStubs {
account_infos: &[AccountInfo],
signers_seeds: &[&[&[u8]]],
) -> ProgramResult {
- let instruction = StableInstruction::from(instruction.clone());
let invoke_context = get_invoke_context();
let log_collector = invoke_context.get_log_collector();
let transaction_context = &invoke_context.transaction_context;
@@ -273,7 +271,7 @@ impl solana_sysvar::program_stubs::SyscallStubs for SyscallStubs {
.collect::>();
let (instruction_accounts, program_indices) = invoke_context
- .prepare_instruction(&instruction, &signers)
+ .prepare_instruction(instruction, &signers)
.unwrap();
// Copy caller's account_info modifications into invoke_context accounts
diff --git a/programs/bpf_loader/src/lib.rs b/programs/bpf_loader/src/lib.rs
index 1651fef03fe023..0f5f989f3d5be8 100644
--- a/programs/bpf_loader/src/lib.rs
+++ b/programs/bpf_loader/src/lib.rs
@@ -659,7 +659,7 @@ fn process_loader_upgradeable_instruction(
.iter()
.map(|seeds| Pubkey::create_program_address(seeds, caller_program_id))
.collect::, solana_pubkey::PubkeyError>>()?;
- invoke_context.native_invoke(instruction.into(), signers.as_slice())?;
+ invoke_context.native_invoke(instruction, signers.as_slice())?;
// Load and verify the program bits
let transaction_context = &invoke_context.transaction_context;
@@ -1291,8 +1291,7 @@ fn process_loader_upgradeable_instruction(
&provided_authority_address,
program_len as u32,
&program_address,
- )
- .into(),
+ ),
&[],
)?;
@@ -1304,8 +1303,7 @@ fn process_loader_upgradeable_instruction(
0,
0,
program_len as u32,
- )
- .into(),
+ ),
&[],
)?;
@@ -1313,8 +1311,7 @@ fn process_loader_upgradeable_instruction(
solana_loader_v4_interface::instruction::deploy(
&program_address,
&provided_authority_address,
- )
- .into(),
+ ),
&[],
)?;
@@ -1324,8 +1321,7 @@ fn process_loader_upgradeable_instruction(
&program_address,
&provided_authority_address,
&program_address,
- )
- .into(),
+ ),
&[],
)?;
} else if migration_authority::check_id(&provided_authority_address) {
@@ -1334,8 +1330,7 @@ fn process_loader_upgradeable_instruction(
&program_address,
&provided_authority_address,
&upgrade_authority_address.unwrap(),
- )
- .into(),
+ ),
&[],
)?;
}
@@ -1496,7 +1491,7 @@ fn common_extend_program(
)?;
invoke_context.native_invoke(
- system_instruction::transfer(&payer_key, &programdata_key, required_payment).into(),
+ system_instruction::transfer(&payer_key, &programdata_key, required_payment),
&[],
)?;
}
diff --git a/programs/bpf_loader/src/syscalls/cpi.rs b/programs/bpf_loader/src/syscalls/cpi.rs
index 6eb4e46a401c91..675702e7a2d579 100644
--- a/programs/bpf_loader/src/syscalls/cpi.rs
+++ b/programs/bpf_loader/src/syscalls/cpi.rs
@@ -1,6 +1,7 @@
use {
super::*,
crate::{translate_inner, translate_slice_inner, translate_type_inner},
+ solana_instruction::Instruction,
solana_loader_v3_interface::instruction as bpf_loader_upgradeable,
solana_measure::measure::Measure,
solana_program_runtime::{
@@ -326,7 +327,7 @@ trait SyscallInvokeSigned {
addr: u64,
memory_mapping: &MemoryMapping,
invoke_context: &mut InvokeContext,
- ) -> Result;
+ ) -> Result;
fn translate_accounts<'a>(
instruction_accounts: &[InstructionAccount],
account_infos_addr: u64,
@@ -373,7 +374,7 @@ impl SyscallInvokeSigned for SyscallInvokeSignedRust {
addr: u64,
memory_mapping: &MemoryMapping,
invoke_context: &mut InvokeContext,
- ) -> Result {
+ ) -> Result {
let ix = translate_type::(
memory_mapping,
addr,
@@ -419,9 +420,9 @@ impl SyscallInvokeSigned for SyscallInvokeSignedRust {
accounts.push(account_meta.clone());
}
- Ok(StableInstruction {
- accounts: accounts.into(),
- data: data.into(),
+ Ok(Instruction {
+ accounts,
+ data,
program_id: ix.program_id,
})
}
@@ -580,7 +581,7 @@ impl SyscallInvokeSigned for SyscallInvokeSignedC {
addr: u64,
memory_mapping: &MemoryMapping,
invoke_context: &mut InvokeContext,
- ) -> Result {
+ ) -> Result {
let ix_c = translate_type::(
memory_mapping,
addr,
@@ -641,9 +642,9 @@ impl SyscallInvokeSigned for SyscallInvokeSignedC {
});
}
- Ok(StableInstruction {
- accounts: accounts.into(),
- data: data.into(),
+ Ok(Instruction {
+ accounts,
+ data,
program_id: *program_id,
})
}
diff --git a/programs/bpf_loader/src/syscalls/mod.rs b/programs/bpf_loader/src/syscalls/mod.rs
index 32b01375cc0f99..3ea76b5daa7ed7 100644
--- a/programs/bpf_loader/src/syscalls/mod.rs
+++ b/programs/bpf_loader/src/syscalls/mod.rs
@@ -357,7 +357,7 @@ pub fn create_program_runtime_environment_v1<'a>(
max_call_depth: compute_budget.max_call_depth,
stack_frame_size: compute_budget.stack_frame_size,
enable_address_translation: true,
- enable_stack_frame_gaps: !feature_set.bpf_account_data_direct_mapping,
+ enable_stack_frame_gaps: true,
instruction_meter_checkpoint_distance: 10000,
enable_instruction_meter: true,
enable_instruction_tracing: debugging_features,
diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock
index 7f0c79074b31bf..c08b474bd58879 100644
--- a/programs/sbf/Cargo.lock
+++ b/programs/sbf/Cargo.lock
@@ -993,9 +993,9 @@ dependencies = [
[[package]]
name = "bytemuck_derive"
-version = "1.9.3"
+version = "1.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7ecc273b49b3205b83d648f0690daa588925572cc5063745bfe547fe7ec8e1a1"
+checksum = "441473f2b4b0459a68628c744bc61d23e730fb00128b841d30fa4bb3972257e4"
dependencies = [
"proc-macro2",
"quote",
@@ -2940,9 +2940,9 @@ dependencies = [
[[package]]
name = "io-uring"
-version = "0.7.8"
+version = "0.7.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b86e202f00093dcba4275d4636b93ef9dd75d025ae560d2521b45ea28ab49013"
+checksum = "d93587f37623a1a17d94ef2bc9ada592f5465fe7732084ab7beefabe5c77c0c4"
dependencies = [
"bitflags 2.9.1",
"cfg-if 1.0.0",
@@ -5118,9 +5118,9 @@ dependencies = [
[[package]]
name = "serde_json"
-version = "1.0.140"
+version = "1.0.141"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373"
+checksum = "30b9eff21ebe718216c6ec64e1d9ac57087aad11efc64e32002bce4a0d4c03d3"
dependencies = [
"itoa",
"memchr",
@@ -6089,6 +6089,7 @@ dependencies = [
"log",
"lru",
"min-max-heap",
+ "num_cpus",
"num_enum",
"prio-graph",
"qualifier_attr",
@@ -6319,6 +6320,7 @@ dependencies = [
"crossbeam-channel",
"dlopen2",
"log",
+ "num_cpus",
"rand 0.8.5",
"rayon",
"serde",
@@ -6328,7 +6330,6 @@ dependencies = [
"solana-metrics",
"solana-packet",
"solana-perf",
- "solana-rayon-threadlimit",
"solana-runtime-transaction",
"solana-sha256-hasher",
"solana-transaction",
@@ -7538,6 +7539,7 @@ dependencies = [
name = "solana-rayon-threadlimit"
version = "3.0.0"
dependencies = [
+ "log",
"num_cpus",
]
diff --git a/programs/sbf/rust/invoke/src/lib.rs b/programs/sbf/rust/invoke/src/lib.rs
index b56f965f36fd4c..51a708ecf7a9aa 100644
--- a/programs/sbf/rust/invoke/src/lib.rs
+++ b/programs/sbf/rust/invoke/src/lib.rs
@@ -1348,7 +1348,7 @@ fn process_instruction<'a>(
let stack = unsafe {
slice::from_raw_parts_mut(
MM_STACK_START as *mut u8,
- MAX_CALL_DEPTH * STACK_FRAME_SIZE,
+ MAX_CALL_DEPTH * STACK_FRAME_SIZE * 2,
)
};
@@ -1361,7 +1361,7 @@ fn process_instruction<'a>(
// When we don't have dynamic stack frames, the stack grows from lower addresses
// to higher addresses, so we compare accordingly.
for i in 10..MAX_CALL_DEPTH {
- let stack = &mut stack[i * STACK_FRAME_SIZE..][..STACK_FRAME_SIZE];
+ let stack = &mut stack[i * STACK_FRAME_SIZE * 2..][..STACK_FRAME_SIZE];
assert!(stack == &ZEROS[..STACK_FRAME_SIZE], "stack not zeroed");
stack.fill(42);
}
diff --git a/pubsub-client/src/nonblocking/pubsub_client.rs b/pubsub-client/src/nonblocking/pubsub_client.rs
index f23b781846e504..47ecfa5b68aaa0 100644
--- a/pubsub-client/src/nonblocking/pubsub_client.rs
+++ b/pubsub-client/src/nonblocking/pubsub_client.rs
@@ -302,11 +302,6 @@ impl PubsubClient {
self.ws.await.unwrap() // WS future should not be cancelled or panicked
}
- #[deprecated(since = "2.0.2", note = "PubsubClient::node_version is no longer used")]
- pub async fn set_node_version(&self, _version: semver::Version) -> Result<(), ()> {
- Ok(())
- }
-
async fn subscribe<'a, T>(&self, operation: &str, params: Value) -> SubscribeResult<'a, T>
where
T: DeserializeOwned + Send + 'a,
@@ -627,7 +622,7 @@ impl PubsubClient {
}
}
} else {
- error!("Unknown request id: {}", id);
+ error!("Unknown request id: {id}");
break;
}
continue;
diff --git a/pubsub-client/src/pubsub_client.rs b/pubsub-client/src/pubsub_client.rs
index 0c7d789a022748..73c92305d57e3a 100644
--- a/pubsub-client/src/pubsub_client.rs
+++ b/pubsub-client/src/pubsub_client.rs
@@ -324,8 +324,8 @@ fn connect_with_retry(
connection_retries -= 1;
debug!(
- "Too many requests: server responded with {:?}, {} retries left, pausing for {:?}",
- response, connection_retries, duration
+ "Too many requests: server responded with {response:?}, {connection_retries} \
+ retries left, pausing for {duration:?}"
);
sleep(duration);
@@ -785,7 +785,7 @@ impl PubsubClient {
let handler = move |message| match sender.send(message) {
Ok(_) => (),
Err(err) => {
- info!("receive error: {:?}", err);
+ info!("receive error: {err:?}");
}
};
Self::cleanup_with_handler(exit, socket, handler);
@@ -810,7 +810,7 @@ impl PubsubClient {
// Nothing useful, means we received a ping message
}
Err(err) => {
- info!("receive error: {:?}", err);
+ info!("receive error: {err:?}");
break;
}
}
diff --git a/quic-client/src/lib.rs b/quic-client/src/lib.rs
index d5e748b8398660..930a0cc078fbc9 100644
--- a/quic-client/src/lib.rs
+++ b/quic-client/src/lib.rs
@@ -12,8 +12,11 @@ use {
QuicClient, QuicClientConnection as NonblockingQuicClientConnection,
QuicLazyInitializedEndpoint,
},
- quic_client::QuicClientConnection as BlockingQuicClientConnection,
+ quic_client::{
+ close_quic_connection, QuicClientConnection as BlockingQuicClientConnection,
+ },
},
+ log::debug,
quic_client::get_runtime,
quinn::{Endpoint, EndpointConfig, TokioRuntime},
solana_connection_cache::{
@@ -72,6 +75,19 @@ impl ConnectionPool for QuicPool {
}
}
+impl Drop for QuicPool {
+ fn drop(&mut self) {
+ debug!(
+ "Dropping QuicPool with {} connections",
+ self.connections.len()
+ );
+ for connection in self.connections.drain(..) {
+ // Explicitly drop each connection to ensure resources are released
+ close_quic_connection(connection.0.clone());
+ }
+ }
+}
+
pub struct QuicConfig {
// Arc to prevent having to copy the struct
client_certificate: RwLock>,
diff --git a/quic-client/src/nonblocking/quic_client.rs b/quic-client/src/nonblocking/quic_client.rs
index 7275b98398c82f..0dfc82f6a0b127 100644
--- a/quic-client/src/nonblocking/quic_client.rs
+++ b/quic-client/src/nonblocking/quic_client.rs
@@ -229,6 +229,26 @@ pub struct QuicClient {
stats: Arc,
}
+const CONNECTION_CLOSE_CODE_APPLICATION_CLOSE: u32 = 0u32;
+const CONNECTION_CLOSE_REASON_APPLICATION_CLOSE: &[u8] = b"dropped";
+
+impl QuicClient {
+ /// Explicitly close the connection. Must be called manually if cleanup is needed.
+ pub async fn close(&self) {
+ let mut conn_guard = self.connection.lock().await;
+ if let Some(conn) = conn_guard.take() {
+ debug!(
+ "Closing connection to {} connection_id: {:?}",
+ self.addr, conn.connection
+ );
+ conn.connection.close(
+ CONNECTION_CLOSE_CODE_APPLICATION_CLOSE.into(),
+ CONNECTION_CLOSE_REASON_APPLICATION_CLOSE,
+ );
+ }
+ }
+}
+
impl QuicClient {
pub fn new(endpoint: Arc, addr: SocketAddr) -> Self {
Self {
diff --git a/quic-client/src/quic_client.rs b/quic-client/src/quic_client.rs
index e51370326ad4a3..4b1b10462dbcd5 100644
--- a/quic-client/src/quic_client.rs
+++ b/quic-client/src/quic_client.rs
@@ -180,3 +180,9 @@ impl ClientConnection for QuicClientConnection {
Ok(())
}
}
+
+pub(crate) fn close_quic_connection(connection: Arc) {
+ // Close the connection and release resources
+ trace!("Closing QUIC connection to {}", connection.server_addr());
+ RUNTIME.block_on(connection.close());
+}
diff --git a/quic-client/tests/quic_client.rs b/quic-client/tests/quic_client.rs
index d6c8e23eb5cc2a..23234fd7631a6d 100644
--- a/quic-client/tests/quic_client.rs
+++ b/quic-client/tests/quic_client.rs
@@ -3,12 +3,14 @@ mod tests {
use {
crossbeam_channel::{unbounded, Receiver},
log::*,
- solana_connection_cache::connection_cache_stats::ConnectionCacheStats,
+ solana_connection_cache::{
+ client_connection::ClientStats, connection_cache_stats::ConnectionCacheStats,
+ },
solana_keypair::Keypair,
solana_net_utils::sockets::{bind_to, localhost_port_range_for_tests},
solana_packet::PACKET_DATA_SIZE,
solana_perf::packet::PacketBatch,
- solana_quic_client::nonblocking::quic_client::QuicLazyInitializedEndpoint,
+ solana_quic_client::nonblocking::quic_client::{QuicClient, QuicLazyInitializedEndpoint},
solana_streamer::{
quic::{QuicServerParams, SpawnServerResult},
streamer::StakedNodes,
@@ -310,4 +312,51 @@ mod tests {
response_recv_thread.join().unwrap();
info!("Response receiver exited!");
}
+
+ #[tokio::test]
+ async fn test_connection_close() {
+ solana_logger::setup();
+ let (sender, receiver) = unbounded();
+ let staked_nodes = Arc::new(RwLock::new(StakedNodes::default()));
+ let (s, exit, keypair) = server_args();
+ let solana_streamer::nonblocking::quic::SpawnNonBlockingServerResult {
+ endpoints: _,
+ stats: _,
+ thread: t,
+ max_concurrent_connections: _,
+ } = solana_streamer::nonblocking::quic::spawn_server(
+ "quic_streamer_test",
+ s.try_clone().unwrap(),
+ &keypair,
+ sender,
+ exit.clone(),
+ staked_nodes,
+ QuicServerParams::default_for_tests(),
+ )
+ .unwrap();
+
+ let addr = s.local_addr().unwrap().ip();
+ let port = s.local_addr().unwrap().port();
+ let tpu_addr = SocketAddr::new(addr, port);
+ let connection_cache_stats = Arc::new(ConnectionCacheStats::default());
+ let client = QuicClient::new(Arc::new(QuicLazyInitializedEndpoint::default()), tpu_addr);
+
+ // Send a full size packet with single byte writes.
+ let num_bytes = PACKET_DATA_SIZE;
+ let num_expected_packets: usize = 3;
+ let packets = vec![vec![0u8; PACKET_DATA_SIZE]; num_expected_packets];
+ let client_stats = ClientStats::default();
+ for packet in packets {
+ let _ = client
+ .send_buffer(&packet, &client_stats, connection_cache_stats.clone())
+ .await;
+ }
+
+ nonblocking_check_packets(receiver, num_bytes, num_expected_packets).await;
+ exit.store(true, Ordering::Relaxed);
+
+ t.await.unwrap();
+ // We close the connection after the server is down, this should not block
+ client.close().await;
+ }
}
diff --git a/rayon-threadlimit/Cargo.toml b/rayon-threadlimit/Cargo.toml
index b2f433492e5679..44b36615950d5e 100644
--- a/rayon-threadlimit/Cargo.toml
+++ b/rayon-threadlimit/Cargo.toml
@@ -14,4 +14,5 @@ edition = { workspace = true }
targets = ["x86_64-unknown-linux-gnu"]
[dependencies]
+log = { workspace = true }
num_cpus = { workspace = true }
diff --git a/rayon-threadlimit/src/lib.rs b/rayon-threadlimit/src/lib.rs
index 912cb622aaa4b4..e12d3584b81d23 100644
--- a/rayon-threadlimit/src/lib.rs
+++ b/rayon-threadlimit/src/lib.rs
@@ -1,4 +1,4 @@
-use std::env;
+use {log::warn, std::env};
//TODO remove this hack when rayon fixes itself
// reduce the number of threads each pool is allowed to half the cpu core count, to avoid rayon
@@ -6,7 +6,13 @@ use std::env;
static MAX_RAYON_THREADS: std::sync::LazyLock = std::sync::LazyLock::new(|| {
env::var("SOLANA_RAYON_THREADS")
.ok()
- .and_then(|num_threads| num_threads.parse().ok())
+ .and_then(|num_threads| {
+ warn!(
+ "Use of SOLANA_RAYON_THREADS has been deprecated and will be removed soon. Use \
+ the individual agave-validator CLI flags to configure threadpool sizes"
+ );
+ num_threads.parse().ok()
+ })
.unwrap_or_else(|| num_cpus::get() / 2)
.max(1)
});
@@ -15,8 +21,11 @@ pub fn get_thread_count() -> usize {
*MAX_RAYON_THREADS
}
-// Only used in legacy code.
-// Use get_thread_count instead in all new code.
+#[deprecated(
+ since = "3.0.0",
+ note = "The solana-rayon-threadlimit crate will be removed, use num_cpus::get() or something \
+ similar instead"
+)]
pub fn get_max_thread_count() -> usize {
get_thread_count().saturating_mul(2)
}
diff --git a/rpc-client-api/Cargo.toml b/rpc-client-api/Cargo.toml
index 0ba5f76a88aac7..b1e5692555e256 100644
--- a/rpc-client-api/Cargo.toml
+++ b/rpc-client-api/Cargo.toml
@@ -27,3 +27,6 @@ solana-signer = { workspace = true }
solana-transaction-error = { workspace = true }
solana-transaction-status-client-types = { workspace = true }
thiserror = { workspace = true }
+
+[dev-dependencies]
+test-case = { workspace = true }
diff --git a/rpc-client-api/src/custom_error.rs b/rpc-client-api/src/custom_error.rs
index 57381f2676fea3..cd1449ae6c8664 100644
--- a/rpc-client-api/src/custom_error.rs
+++ b/rpc-client-api/src/custom_error.rs
@@ -92,11 +92,13 @@ pub struct MinContextSlotNotReachedErrorData {
pub context_slot: Slot,
}
+#[cfg_attr(test, derive(PartialEq))]
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct EpochRewardsPeriodActiveErrorData {
pub current_block_height: u64,
pub rewards_complete_block_height: u64,
+ pub slot: Option,
}
impl From for RpcCustomError {
@@ -237,6 +239,7 @@ impl From for Error {
data: Some(serde_json::json!(EpochRewardsPeriodActiveErrorData {
current_block_height,
rewards_complete_block_height,
+ slot: Some(slot),
})),
},
RpcCustomError::SlotNotEpochBoundary { slot } => Self {
@@ -245,7 +248,9 @@ impl From for Error {
"Rewards cannot be found because slot {slot} is not the epoch boundary. This \
may be due to gap in the queried node's local ledger or long-term storage"
),
- data: None,
+ data: Some(serde_json::json!({
+ "slot": slot,
+ })),
},
RpcCustomError::LongTermStorageUnreachable => Self {
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_LONG_TERM_STORAGE_UNREACHABLE),
@@ -255,3 +260,42 @@ impl From for Error {
}
}
}
+
+#[cfg(test)]
+mod tests {
+ use {
+ crate::custom_error::EpochRewardsPeriodActiveErrorData, serde_json::Value,
+ test_case::test_case,
+ };
+
+ #[test_case(serde_json::json!({
+ "currentBlockHeight": 123,
+ "rewardsCompleteBlockHeight": 456
+ }); "Pre-3.0 schema")]
+ #[test_case(serde_json::json!({
+ "currentBlockHeight": 123,
+ "rewardsCompleteBlockHeight": 456,
+ "slot": 789
+ }); "3.0+ schema")]
+ fn test_deseriailze_epoch_rewards_period_active_error_data(serialized_data: Value) {
+ let expected_current_block_height = serialized_data
+ .get("currentBlockHeight")
+ .map(|v| v.as_u64().unwrap())
+ .unwrap();
+ let expected_rewards_complete_block_height = serialized_data
+ .get("rewardsCompleteBlockHeight")
+ .map(|v| v.as_u64().unwrap())
+ .unwrap();
+ let expected_slot: Option = serialized_data.get("slot").map(|v| v.as_u64().unwrap());
+ let actual: EpochRewardsPeriodActiveErrorData =
+ serde_json::from_value(serialized_data).expect("Failed to deserialize test fixture");
+ assert_eq!(
+ actual,
+ EpochRewardsPeriodActiveErrorData {
+ current_block_height: expected_current_block_height,
+ rewards_complete_block_height: expected_rewards_complete_block_height,
+ slot: expected_slot,
+ }
+ );
+ }
+}
diff --git a/rpc-client/src/http_sender.rs b/rpc-client/src/http_sender.rs
index 4924ce5cea13fe..ee96431a5d7e68 100644
--- a/rpc-client/src/http_sender.rs
+++ b/rpc-client/src/http_sender.rs
@@ -174,9 +174,9 @@ impl RpcSender for HttpSender {
too_many_requests_retries -= 1;
debug!(
- "Too many requests: server responded with {:?}, {} retries left, pausing for {:?}",
- response, too_many_requests_retries, duration
- );
+ "Too many requests: server responded with {response:?}, \
+ {too_many_requests_retries} retries left, pausing for {duration:?}"
+ );
sleep(duration).await;
stats_updater.add_rate_limited_time(duration);
@@ -194,7 +194,7 @@ impl RpcSender for HttpSender {
match serde_json::from_value::(json["error"]["data"].clone()) {
Ok(data) => RpcResponseErrorData::SendTransactionPreflightFailure(data),
Err(err) => {
- debug!("Failed to deserialize RpcSimulateTransactionResult: {:?}", err);
+ debug!("Failed to deserialize RpcSimulateTransactionResult: {err:?}");
RpcResponseErrorData::Empty
}
}
diff --git a/rpc-client/src/nonblocking/rpc_client.rs b/rpc-client/src/nonblocking/rpc_client.rs
index 8da0562052e1ab..e4e1396abcfb19 100644
--- a/rpc-client/src/nonblocking/rpc_client.rs
+++ b/rpc-client/src/nonblocking/rpc_client.rs
@@ -601,11 +601,6 @@ impl RpcClient {
self.sender.url()
}
- #[deprecated(since = "2.0.2", note = "RpcClient::node_version is no longer used")]
- pub async fn set_node_version(&self, _version: semver::Version) -> Result<(), ()> {
- Ok(())
- }
-
/// Get the configured default [commitment level][cl].
///
/// [cl]: https://solana.com/docs/rpc#configuring-state-commitment
@@ -719,9 +714,8 @@ impl RpcClient {
}
Err(RpcError::ForUser(
- "unable to confirm transaction. \
- This can happen in situations such as transaction expiration \
- and insufficient fee-payer funds"
+ "unable to confirm transaction. This can happen in situations such as transaction \
+ expiration and insufficient fee-payer funds"
.to_string(),
)
.into())
@@ -989,7 +983,7 @@ impl RpcClient {
data,
}) = err.kind()
{
- debug!("{} {}", code, message);
+ debug!("{code} {message}");
if let RpcResponseErrorData::SendTransactionPreflightFailure(
RpcSimulateTransactionResult {
logs: Some(logs), ..
@@ -1204,9 +1198,8 @@ impl RpcClient {
}
} else {
return Err(RpcError::ForUser(
- "unable to confirm transaction. \
- This can happen in situations such as transaction expiration \
- and insufficient fee-payer funds"
+ "unable to confirm transaction. This can happen in situations such as transaction \
+ expiration and insufficient fee-payer funds"
.to_string(),
)
.into());
@@ -1237,11 +1230,12 @@ impl RpcClient {
.await
.unwrap_or(confirmations);
if now.elapsed().as_secs() >= MAX_HASH_AGE_IN_SECONDS as u64 {
- return Err(
- RpcError::ForUser("transaction not finalized. \
- This can happen when a transaction lands in an abandoned fork. \
- Please retry.".to_string()).into(),
- );
+ return Err(RpcError::ForUser(
+ "transaction not finalized. This can happen when a transaction lands in an \
+ abandoned fork. Please retry."
+ .to_string(),
+ )
+ .into());
}
}
}
@@ -2316,8 +2310,7 @@ impl RpcClient {
}
info!(
- "Waiting for stake to drop below {} current: {:.1}",
- max_stake_percent, current_percent
+ "Waiting for stake to drop below {max_stake_percent} current: {current_percent:.1}"
);
sleep(Duration::from_secs(5)).await;
}
@@ -2945,7 +2938,7 @@ impl RpcClient {
}
let result = serde_json::from_value(result_json)
.map_err(|err| ClientError::new_with_request(err.into(), request))?;
- trace!("Response block timestamp {:?} {:?}", slot, result);
+ trace!("Response block timestamp {slot:?} {result:?}");
Ok(result)
})
.map_err(|err| err.into_with_request(request))?
@@ -3604,7 +3597,7 @@ impl RpcClient {
context,
value: rpc_account,
} = serde_json::from_value::>>(result_json)?;
- trace!("Response account {:?} {:?}", pubkey, rpc_account);
+ trace!("Response account {pubkey:?} {rpc_account:?}");
let account = rpc_account.and_then(|rpc_account| rpc_account.decode());
Ok(Response {
@@ -3891,11 +3884,7 @@ impl RpcClient {
let minimum_balance: u64 = serde_json::from_value(minimum_balance_json)
.map_err(|err| ClientError::new_with_request(err.into(), request))?;
- trace!(
- "Response minimum balance {:?} {:?}",
- data_len,
- minimum_balance
- );
+ trace!("Response minimum balance {data_len:?} {minimum_balance:?}");
Ok(minimum_balance)
}
@@ -4227,7 +4216,7 @@ impl RpcClient {
context,
value: rpc_account,
} = serde_json::from_value::>>(result_json)?;
- trace!("Response account {:?} {:?}", pubkey, rpc_account);
+ trace!("Response account {pubkey:?} {rpc_account:?}");
let response = {
if let Some(rpc_account) = rpc_account {
if let UiAccountData::Json(account_data) = rpc_account.data {
@@ -4450,8 +4439,7 @@ impl RpcClient {
})
.map_err(|_| {
RpcError::ForUser(
- "airdrop request failed. \
- This can happen when the rate limit is reached."
+ "airdrop request failed. This can happen when the rate limit is reached."
.to_string(),
)
.into()
@@ -4514,10 +4502,7 @@ impl RpcClient {
return balance_result;
}
trace!(
- "wait_for_balance_with_commitment [{}] {:?} {:?}",
- run,
- balance_result,
- expected_balance
+ "wait_for_balance_with_commitment [{run}] {balance_result:?} {expected_balance:?}"
);
if let (Some(expected_balance), Ok(balance_result)) = (expected_balance, balance_result)
{
@@ -4591,7 +4576,7 @@ impl RpcClient {
}
}
Err(err) => {
- debug!("check_confirmations request failed: {:?}", err);
+ debug!("check_confirmations request failed: {err:?}");
}
};
if now.elapsed().as_secs() > 20 {
@@ -4707,7 +4692,7 @@ impl RpcClient {
return Ok(new_blockhash);
}
}
- debug!("Got same blockhash ({:?}), will retry...", blockhash);
+ debug!("Got same blockhash ({blockhash:?}), will retry...");
// Retry ~twice during a slot
sleep(Duration::from_millis(DEFAULT_MS_PER_SLOT / 2)).await;
diff --git a/rpc-test/tests/rpc.rs b/rpc-test/tests/rpc.rs
index 5c02280ce71fea..9cab007a90680b 100644
--- a/rpc-test/tests/rpc.rs
+++ b/rpc-test/tests/rpc.rs
@@ -83,7 +83,7 @@ fn test_rpc_send_tx() {
.parse()
.unwrap();
- info!("blockhash: {:?}", blockhash);
+ info!("blockhash: {blockhash:?}");
let tx = system_transaction::transfer(
&alice,
&bob_pubkey,
@@ -442,7 +442,7 @@ fn test_rpc_subscriptions() {
sleep(Duration::from_millis(100));
}
if mint_balance != expected_mint_balance {
- error!("mint-check timeout. mint_balance {:?}", mint_balance);
+ error!("mint-check timeout. mint_balance {mint_balance:?}");
}
// Wait for all signature subscriptions
diff --git a/rpc/src/optimistically_confirmed_bank_tracker.rs b/rpc/src/optimistically_confirmed_bank_tracker.rs
index 568ddc31b49d2a..4430f6f1782698 100644
--- a/rpc/src/optimistically_confirmed_bank_tracker.rs
+++ b/rpc/src/optimistically_confirmed_bank_tracker.rs
@@ -165,10 +165,7 @@ impl OptimisticallyConfirmedBankTracker {
match sender.send(notification.clone()) {
Ok(_) => {}
Err(err) => {
- info!(
- "Failed to send notification {:?}, error: {:?}",
- notification, err
- );
+ info!("Failed to send notification {notification:?}, error: {err:?}");
}
}
}
@@ -250,10 +247,7 @@ impl OptimisticallyConfirmedBankTracker {
let root = roots[i];
if root > *newest_root_slot {
let parent = roots[i - 1];
- debug!(
- "Doing SlotNotification::Root for root {}, parent: {}",
- root, parent
- );
+ debug!("Doing SlotNotification::Root for root {root}, parent: {parent}");
Self::notify_slot_status(
slot_notification_subscribers,
SlotNotification::Root((root, parent)),
@@ -276,7 +270,7 @@ impl OptimisticallyConfirmedBankTracker {
slot_notification_subscribers: &Option>>>,
prioritization_fee_cache: &PrioritizationFeeCache,
) {
- debug!("received bank notification: {:?}", notification);
+ debug!("received bank notification: {notification:?}");
match notification {
BankNotification::OptimisticallyConfirmed(slot) => {
let bank = bank_forks.read().unwrap().get(slot);
@@ -344,8 +338,8 @@ impl OptimisticallyConfirmedBankTracker {
if pending_optimistically_confirmed_banks.remove(&bank.slot()) {
debug!(
- "Calling notify_gossip_subscribers to send deferred notification {:?}",
- frozen_slot
+ "Calling notify_gossip_subscribers to send deferred notification \
+ {frozen_slot:?}"
);
Self::notify_or_defer_confirmed_banks(
diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs
index 1df8307ebd9bc3..f04e5a2f1d2c64 100644
--- a/rpc/src/rpc.rs
+++ b/rpc/src/rpc.rs
@@ -344,7 +344,7 @@ impl JsonRpcRequestProcessor {
#[allow(deprecated)]
fn bank(&self, commitment: Option) -> Arc {
- debug!("RPC commitment_config: {:?}", commitment);
+ debug!("RPC commitment_config: {commitment:?}");
let commitment = commitment.unwrap_or_default();
if commitment.is_confirmed() {
@@ -366,10 +366,10 @@ impl JsonRpcRequestProcessor {
match commitment.commitment {
CommitmentLevel::Processed => {
- debug!("RPC using the heaviest slot: {:?}", slot);
+ debug!("RPC using the heaviest slot: {slot:?}");
}
CommitmentLevel::Finalized => {
- debug!("RPC using block: {:?}", slot);
+ debug!("RPC using block: {slot:?}");
}
CommitmentLevel::Confirmed => unreachable!(), // SingleGossip variant is deprecated
};
@@ -1016,7 +1016,7 @@ impl JsonRpcRequestProcessor {
None => Err(Error::invalid_request()),
},
Err(err) => {
- warn!("slot_meta_iterator failed: {:?}", err);
+ warn!("slot_meta_iterator failed: {err:?}");
Err(Error::invalid_request())
}
}
@@ -1897,7 +1897,7 @@ impl JsonRpcRequestProcessor {
bigtable_before = None;
}
Err(err) => {
- warn!("Failed to query Bigtable: {:?}", err);
+ warn!("Failed to query Bigtable: {err:?}");
return Err(RpcCustomError::LongTermStorageUnreachable.into());
}
Ok(_) => {}
@@ -1929,7 +1929,7 @@ impl JsonRpcRequestProcessor {
}
Err(StorageError::SignatureNotFound) => {}
Err(err) => {
- warn!("Failed to query Bigtable: {:?}", err);
+ warn!("Failed to query Bigtable: {err:?}");
return Err(RpcCustomError::LongTermStorageUnreachable.into());
}
}
@@ -2538,7 +2538,10 @@ fn encode_account(
.unwrap_or(account.data().len())
> MAX_BASE58_BYTES
{
- let message = format!("Encoded binary (base 58) data should be less than {MAX_BASE58_BYTES} bytes, please use Base64 encoding.");
+ let message = format!(
+ "Encoded binary (base 58) data should be less than {MAX_BASE58_BYTES} bytes, please \
+ use Base64 encoding."
+ );
Err(error::Error {
code: error::ErrorCode::InvalidRequest,
message,
@@ -2591,8 +2594,7 @@ fn get_spl_token_owner_filter(program_id: &Pubkey, filters: &[RpcFilterType]) ->
{
if let Some(incorrect_owner_len) = incorrect_owner_len {
info!(
- "Incorrect num bytes ({:?}) provided for spl_token_owner_filter",
- incorrect_owner_len
+ "Incorrect num bytes ({incorrect_owner_len:?}) provided for spl_token_owner_filter"
);
}
owner_key
@@ -2642,8 +2644,7 @@ fn get_spl_token_mint_filter(program_id: &Pubkey, filters: &[RpcFilterType]) ->
{
if let Some(incorrect_mint_len) = incorrect_mint_len {
info!(
- "Incorrect num bytes ({:?}) provided for spl_token_mint_filter",
- incorrect_mint_len
+ "Incorrect num bytes ({incorrect_mint_len:?}) provided for spl_token_mint_filter"
);
}
mint
@@ -2703,7 +2704,7 @@ fn _send_transaction(
);
meta.transaction_sender
.send(transaction_info)
- .unwrap_or_else(|err| warn!("Failed to enqueue transaction: {}", err));
+ .unwrap_or_else(|err| warn!("Failed to enqueue transaction: {err}"));
Ok(signature.to_string())
}
@@ -2792,7 +2793,7 @@ pub mod rpc_minimal {
pubkey_str: String,
config: Option,
) -> Result> {
- debug!("get_balance rpc request received: {:?}", pubkey_str);
+ debug!("get_balance rpc request received: {pubkey_str:?}");
let pubkey = verify_pubkey(&pubkey_str)?;
meta.get_balance(&pubkey, config.unwrap_or_default())
}
@@ -2927,7 +2928,7 @@ pub mod rpc_minimal {
let slot = slot.unwrap_or_else(|| bank.slot());
let epoch = bank.epoch_schedule().get_epoch(slot);
- debug!("get_leader_schedule rpc request received: {:?}", slot);
+ debug!("get_leader_schedule rpc request received: {slot:?}");
Ok(meta
.leader_schedule_cache
@@ -3008,10 +3009,7 @@ pub mod rpc_bank {
data_len: usize,
commitment: Option,
) -> Result {
- debug!(
- "get_minimum_balance_for_rent_exemption rpc request received: {:?}",
- data_len
- );
+ debug!("get_minimum_balance_for_rent_exemption rpc request received: {data_len:?}");
if data_len as u64 > solana_system_interface::MAX_PERMITTED_DATA_LENGTH {
return Err(Error::invalid_request());
}
@@ -3052,10 +3050,7 @@ pub mod rpc_bank {
start_slot: Slot,
limit: u64,
) -> Result> {
- debug!(
- "get_slot_leaders rpc request received (start: {} limit: {})",
- start_slot, limit
- );
+ debug!("get_slot_leaders rpc request received (start: {start_slot} limit: {limit})");
let limit = limit as usize;
if limit > MAX_GET_SLOT_LEADERS {
@@ -3223,7 +3218,7 @@ pub mod rpc_accounts {
pubkey_str: String,
config: Option,
) -> BoxFuture>>> {
- debug!("get_account_info rpc request received: {:?}", pubkey_str);
+ debug!("get_account_info rpc request received: {pubkey_str:?}");
async move {
let pubkey = verify_pubkey(&pubkey_str)?;
meta.get_account_info(pubkey, config).await
@@ -3275,10 +3270,7 @@ pub mod rpc_accounts {
pubkey_str: String,
commitment: Option,
) -> Result> {
- debug!(
- "get_token_account_balance rpc request received: {:?}",
- pubkey_str
- );
+ debug!("get_token_account_balance rpc request received: {pubkey_str:?}");
let pubkey = verify_pubkey(&pubkey_str)?;
meta.get_token_account_balance(&pubkey, commitment)
}
@@ -3289,7 +3281,7 @@ pub mod rpc_accounts {
mint_str: String,
commitment: Option,
) -> Result> {
- debug!("get_token_supply rpc request received: {:?}", mint_str);
+ debug!("get_token_supply rpc request received: {mint_str:?}");
let mint = verify_pubkey(&mint_str)?;
meta.get_token_supply(&mint, commitment)
}
@@ -3368,10 +3360,7 @@ pub mod rpc_accounts_scan {
program_id_str: String,
config: Option,
) -> BoxFuture>>> {
- debug!(
- "get_program_accounts rpc request received: {:?}",
- program_id_str
- );
+ debug!("get_program_accounts rpc request received: {program_id_str:?}");
async move {
let program_id = verify_pubkey(&program_id_str)?;
let (config, filters, with_context, sort_results) = if let Some(config) = config {
@@ -3415,10 +3404,7 @@ pub mod rpc_accounts_scan {
mint_str: String,
commitment: Option,
) -> BoxFuture>>> {
- debug!(
- "get_token_largest_accounts rpc request received: {:?}",
- mint_str
- );
+ debug!("get_token_largest_accounts rpc request received: {mint_str:?}");
async move {
let mint = verify_pubkey(&mint_str)?;
meta.get_token_largest_accounts(mint, commitment).await
@@ -3433,10 +3419,7 @@ pub mod rpc_accounts_scan {
token_account_filter: RpcTokenAccountsFilter,
config: Option,
) -> BoxFuture>>> {
- debug!(
- "get_token_accounts_by_owner rpc request received: {:?}",
- owner_str
- );
+ debug!("get_token_accounts_by_owner rpc request received: {owner_str:?}");
async move {
let owner = verify_pubkey(&owner_str)?;
let token_account_filter = verify_token_account_filter(token_account_filter)?;
@@ -3453,10 +3436,7 @@ pub mod rpc_accounts_scan {
token_account_filter: RpcTokenAccountsFilter,
config: Option,
) -> BoxFuture>>> {
- debug!(
- "get_token_accounts_by_delegate rpc request received: {:?}",
- delegate_str
- );
+ debug!("get_token_accounts_by_delegate rpc request received: {delegate_str:?}");
async move {
let delegate = verify_pubkey(&delegate_str)?;
let token_account_filter = verify_token_account_filter(token_account_filter)?;
@@ -3653,7 +3633,7 @@ pub mod rpc_full {
.blockstore
.get_recent_perf_samples(limit)
.map_err(|err| {
- warn!("get_recent_performance_samples failed: {:?}", err);
+ warn!("get_recent_performance_samples failed: {err:?}");
Error::invalid_request()
})?
.into_iter()
@@ -3794,13 +3774,13 @@ pub mod rpc_full {
let transaction =
request_airdrop_transaction(&faucet_addr, &pubkey, lamports, blockhash).map_err(
|err| {
- info!("request_airdrop_transaction failed: {:?}", err);
+ info!("request_airdrop_transaction failed: {err:?}");
Error::internal_error()
},
)?;
let wire_transaction = serialize(&transaction).map_err(|err| {
- info!("request_airdrop: serialize error: {:?}", err);
+ info!("request_airdrop: serialize error: {err:?}");
Error::internal_error()
})?;
@@ -4096,7 +4076,7 @@ pub mod rpc_full {
slot: Slot,
config: Option>,
) -> BoxFuture>> {
- debug!("get_block rpc request received: {:?}", slot);
+ debug!("get_block rpc request received: {slot:?}");
Box::pin(async move { meta.get_block(slot, config).await })
}
@@ -4109,10 +4089,7 @@ pub mod rpc_full {
) -> BoxFuture>> {
let (end_slot, maybe_config) =
wrapper.map(|wrapper| wrapper.unzip()).unwrap_or_default();
- debug!(
- "get_blocks rpc request received: {}-{:?}",
- start_slot, end_slot
- );
+ debug!("get_blocks rpc request received: {start_slot}-{end_slot:?}");
Box::pin(async move {
meta.get_blocks(start_slot, end_slot, config.or(maybe_config))
.await
@@ -4126,10 +4103,7 @@ pub mod rpc_full {
limit: usize,
config: Option,
) -> BoxFuture>> {
- debug!(
- "get_blocks_with_limit rpc request received: {}-{}",
- start_slot, limit,
- );
+ debug!("get_blocks_with_limit rpc request received: {start_slot}-{limit}",);
Box::pin(async move { meta.get_blocks_with_limit(start_slot, limit, config).await })
}
@@ -4147,7 +4121,7 @@ pub mod rpc_full {
signature_str: String,
config: Option>,
) -> BoxFuture>> {
- debug!("get_transaction rpc request received: {:?}", signature_str);
+ debug!("get_transaction rpc request received: {signature_str:?}");
let signature = verify_signature(&signature_str);
if let Err(err) = signature {
return Box::pin(future::err(err));
@@ -4648,7 +4622,8 @@ pub mod tests {
if let Some(account) = bank.get_account(key) {
assert!(
*account.owner() != bpf_loader_upgradeable::id(),
- "LoaderV3 is not supported; to add it, parse the program account and add its programdata size.",
+ "LoaderV3 is not supported; to add it, parse the program account and add its \
+ programdata size.",
);
loaded_accounts_data_size +=
(account.data().len() + TRANSACTION_ACCOUNT_BASE_SIZE) as u32;
@@ -4695,8 +4670,7 @@ pub mod tests {
lamports,
space,
&owner_pubkey,
- )
- .into(),
+ ),
&[],
)?;
@@ -6468,11 +6442,10 @@ pub mod tests {
"id":1,
"method":"simulateTransaction",
"params":[
- "{}",
+ "{tx_serialized_encoded}",
{{ "encoding": "base64" }}
]
}}"#,
- tx_serialized_encoded,
);
let res = io.handle_request_sync(&req, meta.clone());
let expected = json!({
@@ -6512,11 +6485,10 @@ pub mod tests {
"id":1,
"method":"simulateTransaction",
"params":[
- "{}",
+ "{tx_serialized_encoded}",
{{ "innerInstructions": false, "encoding": "base64" }}
]
}}"#,
- tx_serialized_encoded,
);
let res = io.handle_request_sync(&req, meta.clone());
let expected = json!({
@@ -6556,11 +6528,10 @@ pub mod tests {
"id":1,
"method":"simulateTransaction",
"params":[
- "{}",
+ "{tx_serialized_encoded}",
{{ "innerInstructions": true, "encoding": "base64" }}
]
}}"#,
- tx_serialized_encoded,
);
let res = io.handle_request_sync(&req, meta.clone());
let expected = json!({
@@ -7236,9 +7207,9 @@ pub mod tests {
let expected = (
JSON_RPC_SERVER_ERROR_UNSUPPORTED_TRANSACTION_VERSION,
String::from(
- "Transaction version (0) is not supported by the requesting client. \
- Please try the request again with the following configuration parameter: \
- \"maxSupportedTransactionVersion\": 0",
+ "Transaction version (0) is not supported by the requesting client. Please try \
+ the request again with the following configuration parameter: \
+ \"maxSupportedTransactionVersion\": 0",
),
);
assert_eq!(response, expected);
@@ -7265,7 +7236,8 @@ pub mod tests {
{
assert_eq!(
version, None,
- "requests which don't set max_supported_transaction_version shouldn't receive a version"
+ "requests which don't set max_supported_transaction_version shouldn't receive a \
+ version"
);
if let EncodedTransaction::Json(transaction) = transaction {
if transaction.signatures[0] == confirmed_block_signatures[0].to_string() {
@@ -7309,7 +7281,8 @@ pub mod tests {
{
assert_eq!(
version, None,
- "requests which don't set max_supported_transaction_version shouldn't receive a version"
+ "requests which don't set max_supported_transaction_version shouldn't receive a \
+ version"
);
if let EncodedTransaction::LegacyBinary(transaction) = transaction {
let decoded_transaction: Transaction =
@@ -8939,9 +8912,10 @@ pub mod tests {
decode_and_deserialize::(tx58, TransactionBinaryEncoding::Base58)
.unwrap_err(),
Error::invalid_params(format!(
- "base58 encoded solana_transaction::Transaction too large: {tx58_len} bytes (max: encoded/raw {MAX_BASE58_SIZE}/{PACKET_DATA_SIZE})",
- )
- ));
+ "base58 encoded solana_transaction::Transaction too large: {tx58_len} bytes (max: \
+ encoded/raw {MAX_BASE58_SIZE}/{PACKET_DATA_SIZE})",
+ ))
+ );
let tx64 = BASE64_STANDARD.encode(&tx_ser);
let tx64_len = tx64.len();
@@ -8949,9 +8923,10 @@ pub mod tests {
decode_and_deserialize::(tx64, TransactionBinaryEncoding::Base64)
.unwrap_err(),
Error::invalid_params(format!(
- "base64 encoded solana_transaction::Transaction too large: {tx64_len} bytes (max: encoded/raw {MAX_BASE64_SIZE}/{PACKET_DATA_SIZE})",
- )
- ));
+ "base64 encoded solana_transaction::Transaction too large: {tx64_len} bytes (max: \
+ encoded/raw {MAX_BASE64_SIZE}/{PACKET_DATA_SIZE})",
+ ))
+ );
let too_big = PACKET_DATA_SIZE + 1;
let tx_ser = vec![0x00u8; too_big];
@@ -8960,7 +8935,8 @@ pub mod tests {
decode_and_deserialize::(tx58, TransactionBinaryEncoding::Base58)
.unwrap_err(),
Error::invalid_params(format!(
- "decoded solana_transaction::Transaction too large: {too_big} bytes (max: {PACKET_DATA_SIZE} bytes)"
+ "decoded solana_transaction::Transaction too large: {too_big} bytes (max: \
+ {PACKET_DATA_SIZE} bytes)"
))
);
@@ -8969,7 +8945,8 @@ pub mod tests {
decode_and_deserialize::(tx64, TransactionBinaryEncoding::Base64)
.unwrap_err(),
Error::invalid_params(format!(
- "decoded solana_transaction::Transaction too large: {too_big} bytes (max: {PACKET_DATA_SIZE} bytes)"
+ "decoded solana_transaction::Transaction too large: {too_big} bytes (max: \
+ {PACKET_DATA_SIZE} bytes)"
))
);
@@ -8979,8 +8956,8 @@ pub mod tests {
decode_and_deserialize::(tx64.clone(), TransactionBinaryEncoding::Base64)
.unwrap_err(),
Error::invalid_params(
- "failed to deserialize solana_transaction::Transaction: invalid value: \
- continue signal on byte-three, expected a terminal signal on or before byte-three"
+ "failed to deserialize solana_transaction::Transaction: invalid value: continue \
+ signal on byte-three, expected a terminal signal on or before byte-three"
.to_string()
)
);
@@ -8997,8 +8974,8 @@ pub mod tests {
decode_and_deserialize::(tx58.clone(), TransactionBinaryEncoding::Base58)
.unwrap_err(),
Error::invalid_params(
- "failed to deserialize solana_transaction::Transaction: invalid value: \
- continue signal on byte-three, expected a terminal signal on or before byte-three"
+ "failed to deserialize solana_transaction::Transaction: invalid value: continue \
+ signal on byte-three, expected a terminal signal on or before byte-three"
.to_string()
)
);
diff --git a/rpc/src/rpc_health.rs b/rpc/src/rpc_health.rs
index 38ec51a6173597..56684d8edbd802 100644
--- a/rpc/src/rpc_health.rs
+++ b/rpc/src/rpc_health.rs
@@ -104,9 +104,9 @@ impl RpcHealth {
let num_slots = cluster_latest_optimistically_confirmed_slot
.saturating_sub(my_latest_optimistically_confirmed_slot);
warn!(
- "health check: behind by {num_slots} \
- slots: me={my_latest_optimistically_confirmed_slot}, \
- latest cluster={cluster_latest_optimistically_confirmed_slot}",
+ "health check: behind by {num_slots} slots: \
+ me={my_latest_optimistically_confirmed_slot}, latest \
+ cluster={cluster_latest_optimistically_confirmed_slot}",
);
RpcHealthStatus::Behind { num_slots }
}
diff --git a/rpc/src/rpc_pubsub_service.rs b/rpc/src/rpc_pubsub_service.rs
index 0cb602982e707c..5532558a0683fc 100644
--- a/rpc/src/rpc_pubsub_service.rs
+++ b/rpc/src/rpc_pubsub_service.rs
@@ -83,11 +83,11 @@ pub struct PubSubService {
impl PubSubService {
pub fn new(
pubsub_config: PubSubConfig,
- subscriptions: &Arc