Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/master'
Browse files Browse the repository at this point in the history
  • Loading branch information
Lijun Wang committed Mar 17, 2024
2 parents 564e79b + b27c80a commit 83a84c9
Show file tree
Hide file tree
Showing 25 changed files with 310 additions and 230 deletions.
7 changes: 0 additions & 7 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 2 additions & 2 deletions accounts-db/src/tiered_storage/hot.rs
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ const MAX_HOT_OWNER_OFFSET: OwnerOffset = OwnerOffset((1 << 29) - 1);
/// bytes in HotAccountOffset.
pub(crate) const HOT_ACCOUNT_ALIGNMENT: usize = 8;

/// The alignemnt for the blocks inside a hot accounts file. A hot accounts
/// The alignment for the blocks inside a hot accounts file. A hot accounts
/// file consists of accounts block, index block, owners block, and footer.
/// This requirement allows the offset of each block properly aligned so
/// that they can be readable under mmap.
Expand Down Expand Up @@ -190,7 +190,7 @@ impl TieredAccountMeta for HotAccountMeta {
/// A builder function that initializes the account data size.
fn with_account_data_size(self, _account_data_size: u64) -> Self {
// Hot meta does not store its data size as it derives its data length
// by comparing the offets of two consecutive account meta entries.
// by comparing the offsets of two consecutive account meta entries.
self
}

Expand Down
1 change: 0 additions & 1 deletion core/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,6 @@ trees = { workspace = true }
[dev-dependencies]
assert_matches = { workspace = true }
fs_extra = { workspace = true }
raptorq = { workspace = true }
serde_json = { workspace = true }
serial_test = { workspace = true }
# See order-crates-for-publishing.py for using this unusual `path = "."`
Expand Down
58 changes: 2 additions & 56 deletions core/benches/shredder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,26 +4,17 @@
extern crate test;

use {
rand::{seq::SliceRandom, Rng},
raptorq::{Decoder, Encoder},
rand::Rng,
solana_entry::entry::{create_ticks, Entry},
solana_ledger::shred::{
max_entries_per_n_shred, max_ticks_per_n_shreds, ProcessShredsStats, ReedSolomonCache,
Shred, ShredFlags, Shredder, DATA_SHREDS_PER_FEC_BLOCK, LEGACY_SHRED_DATA_CAPACITY,
},
solana_perf::test_tx,
solana_sdk::{hash::Hash, packet::PACKET_DATA_SIZE, signature::Keypair},
solana_sdk::{hash::Hash, signature::Keypair},
test::Bencher,
};

// Copied these values here to avoid exposing shreds
// internals only for the sake of benchmarks.

// size of nonce: 4
// size of common shred header: 83
// size of coding shred header: 6
const VALID_SHRED_DATA_LEN: usize = PACKET_DATA_SIZE - 4 - 83 - 6;

fn make_test_entry(txs_per_entry: u64) -> Entry {
Entry {
num_hashes: 100_000,
Expand Down Expand Up @@ -61,17 +52,6 @@ fn make_shreds(num_shreds: usize) -> Vec<Shred> {
data_shreds
}

fn make_concatenated_shreds(num_shreds: usize) -> Vec<u8> {
let data_shreds = make_shreds(num_shreds);
let mut data: Vec<u8> = vec![0; num_shreds * VALID_SHRED_DATA_LEN];
for (i, shred) in (data_shreds[0..num_shreds]).iter().enumerate() {
data[i * VALID_SHRED_DATA_LEN..(i + 1) * VALID_SHRED_DATA_LEN]
.copy_from_slice(&shred.payload()[..VALID_SHRED_DATA_LEN]);
}

data
}

#[bench]
fn bench_shredder_ticks(bencher: &mut Bencher) {
let kp = Keypair::new();
Expand Down Expand Up @@ -197,37 +177,3 @@ fn bench_shredder_decoding(bencher: &mut Bencher) {
Shredder::try_recovery(coding_shreds[..].to_vec(), &reed_solomon_cache).unwrap();
})
}

#[bench]
fn bench_shredder_coding_raptorq(bencher: &mut Bencher) {
let symbol_count = DATA_SHREDS_PER_FEC_BLOCK;
let data = make_concatenated_shreds(symbol_count);
bencher.iter(|| {
let encoder = Encoder::with_defaults(&data, VALID_SHRED_DATA_LEN as u16);
encoder.get_encoded_packets(symbol_count as u32);
})
}

#[bench]
fn bench_shredder_decoding_raptorq(bencher: &mut Bencher) {
let symbol_count = DATA_SHREDS_PER_FEC_BLOCK;
let data = make_concatenated_shreds(symbol_count);
let encoder = Encoder::with_defaults(&data, VALID_SHRED_DATA_LEN as u16);
let mut packets = encoder.get_encoded_packets(symbol_count as u32);
packets.shuffle(&mut rand::thread_rng());

// Here we simulate losing 1 less than 50% of the packets randomly
packets.truncate(packets.len() - packets.len() / 2 + 1);

bencher.iter(|| {
let mut decoder = Decoder::new(encoder.get_config());
let mut result = None;
for packet in &packets {
result = decoder.decode(packet.clone());
if result.is_some() {
break;
}
}
assert_eq!(result.unwrap(), data);
})
}
4 changes: 2 additions & 2 deletions core/src/repair/repair_service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1326,7 +1326,7 @@ mod test {
let slots: Vec<u64> = vec![1, 3, 5, 7, 8];
let num_entries_per_slot = max_ticks_per_n_shreds(1, None) + 1;

let shreds = make_chaining_slot_entries(&slots, num_entries_per_slot);
let shreds = make_chaining_slot_entries(&slots, num_entries_per_slot, 0);
for (mut slot_shreds, _) in shreds.into_iter() {
slot_shreds.remove(0);
blockstore.insert_shreds(slot_shreds, None, false).unwrap();
Expand Down Expand Up @@ -1621,7 +1621,7 @@ mod test {
let slots: Vec<u64> = vec![2, 3, 5, 7];
let num_entries_per_slot = max_ticks_per_n_shreds(3, None) + 1;

let shreds = make_chaining_slot_entries(&slots, num_entries_per_slot);
let shreds = make_chaining_slot_entries(&slots, num_entries_per_slot, 0);
for (i, (mut slot_shreds, _)) in shreds.into_iter().enumerate() {
slot_shreds.remove(i);
blockstore.insert_shreds(slot_shreds, None, false).unwrap();
Expand Down
2 changes: 1 addition & 1 deletion core/src/repair/repair_weight.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2377,7 +2377,7 @@ mod test {
assert_eq!(repairs[2].slot(), 5);

// Simulate repair on 6 and 5
for (shreds, _) in make_chaining_slot_entries(&[5, 6], 100) {
for (shreds, _) in make_chaining_slot_entries(&[5, 6], 100, 0) {
blockstore.insert_shreds(shreds, None, true).unwrap();
}

Expand Down
13 changes: 7 additions & 6 deletions ledger/src/blockstore.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4658,12 +4658,13 @@ pub fn make_many_slot_shreds(
pub fn make_chaining_slot_entries(
chain: &[u64],
entries_per_slot: u64,
first_parent: u64,
) -> Vec<(Vec<Shred>, Vec<Entry>)> {
let mut slots_shreds_and_entries = vec![];
for (i, slot) in chain.iter().enumerate() {
let parent_slot = {
if *slot == 0 || i == 0 {
0
first_parent
} else {
chain[i - 1]
}
Expand Down Expand Up @@ -5609,7 +5610,7 @@ pub mod tests {

let entries_per_slot = 10;
let slots = [2, 5, 10];
let mut all_shreds = make_chaining_slot_entries(&slots[..], entries_per_slot);
let mut all_shreds = make_chaining_slot_entries(&slots[..], entries_per_slot, 0);

// Get the shreds for slot 10, chaining to slot 5
let (mut orphan_child, _) = all_shreds.remove(2);
Expand Down Expand Up @@ -5654,7 +5655,7 @@ pub mod tests {

let entries_per_slot = 10;
let mut slots = vec![2, 5, 10];
let mut all_shreds = make_chaining_slot_entries(&slots[..], entries_per_slot);
let mut all_shreds = make_chaining_slot_entries(&slots[..], entries_per_slot, 0);
let disconnected_slot = 4;

let (shreds0, _) = all_shreds.remove(0);
Expand Down Expand Up @@ -7428,7 +7429,7 @@ pub mod tests {
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
let shreds_per_slot = 10;
let slots = vec![2, 4, 8, 12];
let all_shreds = make_chaining_slot_entries(&slots, shreds_per_slot);
let all_shreds = make_chaining_slot_entries(&slots, shreds_per_slot, 0);
let slot_8_shreds = all_shreds[2].0.clone();
for (slot_shreds, _) in all_shreds {
blockstore.insert_shreds(slot_shreds, None, false).unwrap();
Expand Down Expand Up @@ -9963,7 +9964,7 @@ pub mod tests {
let slots = vec![2, unconfirmed_slot, unconfirmed_child_slot];

// Insert into slot 9, mark it as dead
let shreds: Vec<_> = make_chaining_slot_entries(&slots, 1)
let shreds: Vec<_> = make_chaining_slot_entries(&slots, 1, 0)
.into_iter()
.flat_map(|x| x.0)
.collect();
Expand Down Expand Up @@ -10005,7 +10006,7 @@ pub mod tests {
let unconfirmed_slot = 8;
let slots = vec![confirmed_slot, unconfirmed_slot];

let shreds: Vec<_> = make_chaining_slot_entries(&slots, 1)
let shreds: Vec<_> = make_chaining_slot_entries(&slots, 1, 0)
.into_iter()
.flat_map(|x| x.0)
.collect();
Expand Down
2 changes: 1 addition & 1 deletion local-cluster/tests/local_cluster.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3114,7 +3114,7 @@ fn test_optimistic_confirmation_violation_without_tower() {
// |
// -> S4 (C) -> S5
//
// Step 5:
// Step 4:
// Without the persisted tower:
// `A` would choose to vote on the fork with `S4 -> S5`.
//
Expand Down
7 changes: 0 additions & 7 deletions programs/stake/src/stake_instruction.rs
Original file line number Diff line number Diff line change
Expand Up @@ -468,12 +468,6 @@ mod tests {
feature_set
}

fn feature_set_without_require_rent_exempt_split_destination() -> Arc<FeatureSet> {
let mut feature_set = FeatureSet::all_enabled();
feature_set.deactivate(&feature_set::require_rent_exempt_split_destination::id());
Arc::new(feature_set)
}

fn create_default_account() -> AccountSharedData {
AccountSharedData::new(0, 0, &Pubkey::new_unique())
}
Expand Down Expand Up @@ -6013,7 +6007,6 @@ mod tests {
}
}

#[test_case(feature_set_without_require_rent_exempt_split_destination(), Ok(()); "without_require_rent_exempt_split_destination")]
#[test_case(feature_set_all_enabled(), Err(InstructionError::InsufficientFunds); "all_enabled")]
fn test_split_require_rent_exempt_destination(
feature_set: Arc<FeatureSet>,
Expand Down
15 changes: 3 additions & 12 deletions programs/stake/src/stake_state.rs
Original file line number Diff line number Diff line change
Expand Up @@ -417,15 +417,10 @@ pub fn split(
StakeStateV2::Stake(meta, mut stake, stake_flags) => {
meta.authorized.check(signers, StakeAuthorize::Staker)?;
let minimum_delegation = crate::get_minimum_delegation(&invoke_context.feature_set);
let is_active = if invoke_context
.feature_set
.is_active(&feature_set::require_rent_exempt_split_destination::id())
{
let is_active = {
let clock = invoke_context.get_sysvar_cache().get_clock()?;
let status = get_stake_status(invoke_context, &stake, &clock)?;
status.effective > 0
} else {
false
};
let validated_split_info = validate_split_amount(
invoke_context,
Expand Down Expand Up @@ -990,14 +985,10 @@ fn validate_split_amount(
let rent = invoke_context.get_sysvar_cache().get_rent()?;
let destination_rent_exempt_reserve = rent.minimum_balance(destination_data_len);

// As of feature `require_rent_exempt_split_destination`, if the source is active stake, one of
// these criteria must be met:
// If the source is active stake, one of these criteria must be met:
// 1. the destination account must be prefunded with at least the rent-exempt reserve, or
// 2. the split must consume 100% of the source
if invoke_context
.feature_set
.is_active(&feature_set::require_rent_exempt_split_destination::id())
&& source_is_active
if source_is_active
&& source_remaining_balance != 0
&& destination_lamports < destination_rent_exempt_reserve
{
Expand Down
2 changes: 1 addition & 1 deletion programs/vote/benches/process_vote.rs
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ fn create_accounts() -> (Slot, SlotHashes, Vec<TransactionAccount>, Vec<AccountM
);

for next_vote_slot in 0..num_initial_votes {
vote_state.process_next_vote_slot(next_vote_slot, 0, 0);
vote_state.process_next_vote_slot(next_vote_slot, 0, 0, true, true);
}
let mut vote_account_data: Vec<u8> = vec![0; VoteState::size_of()];
let versioned = VoteStateVersions::new_current(vote_state);
Expand Down
Loading

0 comments on commit 83a84c9

Please sign in to comment.