forked from solana-labs/solana
-
Notifications
You must be signed in to change notification settings - Fork 1k
Fix turbine to sign last fec set if last entry in slot #6887
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
maheshr
merged 6 commits into
anza-xyz:master
from
maheshr:fix_turbine_to_sign_last_fec_set_if_last_slot_in_entry
Aug 29, 2025
Merged
Changes from all commits
Commits
Show all changes
6 commits
Select commit
Hold shift + click to select a range
2bac121
unit tests pass. local validator not working yet
maheshr 7551ec2
Works locally. Tweaked algorithm on splitting out signed FEC set.
maheshr ca4f477
Incorporated Greg's feedback. All unit-tests in solana-ledger pass.
maheshr e8d7bd9
Fixed failing test_multi_fec_block_coding. Used DATA_SHREDS_PER_FEC_B…
maheshr b7dab58
Removed design comment as per Alex's feedback
maheshr 2866349
Updated comments. Put function behind feature gate.
maheshr File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -1026,7 +1026,7 @@ pub(crate) fn make_shreds_from_data( | |
| keypair: &Keypair, | ||
| // The Merkle root of the previous erasure batch if chained. | ||
| chained_merkle_root: Option<Hash>, | ||
| mut data: &[u8], // Serialized &[Entry] | ||
| data: &[u8], // Serialized &[Entry] | ||
| slot: Slot, | ||
| parent_slot: Slot, | ||
| shred_version: u16, | ||
|
|
@@ -1039,18 +1039,31 @@ pub(crate) fn make_shreds_from_data( | |
| ) -> Result<Vec<Shred>, Error> { | ||
| let now = Instant::now(); | ||
| let chained = chained_merkle_root.is_some(); | ||
| let resigned = chained && is_last_in_slot; | ||
|
|
||
| // only sign if last fec set in slot and is chained | ||
| let sign_last_fec_set = chained && is_last_in_slot; | ||
| let proof_size = PROOF_ENTRIES_FOR_32_32_BATCH; | ||
| let data_buffer_per_shred_size = ShredData::capacity(proof_size, chained, resigned)?; | ||
|
|
||
| // unsigned data_buffer size | ||
| let data_buffer_per_shred_size = ShredData::capacity(proof_size, chained, false)?; | ||
| let data_buffer_total_size = DATA_SHREDS_PER_FEC_BLOCK * data_buffer_per_shred_size; | ||
|
|
||
| // signed data_buffer size | ||
| let data_buffer_per_shred_size_signed = if sign_last_fec_set { | ||
| ShredData::capacity(proof_size, chained, true)? | ||
| } else { | ||
| 0 | ||
| }; | ||
| let data_buffer_total_size_signed = | ||
| DATA_SHREDS_PER_FEC_BLOCK * data_buffer_per_shred_size_signed; | ||
|
|
||
| // Common header for the data shreds. | ||
| let mut common_header_data = ShredCommonHeader { | ||
| signature: Signature::default(), | ||
| shred_variant: ShredVariant::MerkleData { | ||
| proof_size, | ||
| chained, | ||
| resigned, | ||
| resigned: false, | ||
| }, | ||
| slot, | ||
| index: next_shred_index, | ||
|
|
@@ -1063,7 +1076,7 @@ pub(crate) fn make_shreds_from_data( | |
| shred_variant: ShredVariant::MerkleCode { | ||
| proof_size, | ||
| chained, | ||
| resigned, | ||
| resigned: false, | ||
| }, | ||
| index: next_code_index, | ||
| ..common_header_data | ||
|
|
@@ -1083,18 +1096,34 @@ pub(crate) fn make_shreds_from_data( | |
| } | ||
| }; | ||
|
|
||
| // Pre-allocate shreds to avoid reallocations. | ||
| let mut shreds = { | ||
| let number_of_batches = data.len().div_ceil(data_buffer_total_size); | ||
| let total_num_shreds = SHREDS_PER_FEC_BLOCK * number_of_batches; | ||
| Vec::<Shred>::with_capacity(total_num_shreds) | ||
| let (mut unsigned_data, signed_data) = if sign_last_fec_set { | ||
| // Reserve at least one signed batch (may be empty) at the end. | ||
| if data.len() > data_buffer_total_size_signed { | ||
| // sign everything except the last batch | ||
| let split_at = data.len() - data_buffer_total_size_signed; | ||
| data.split_at(split_at) | ||
| } else { | ||
| // only enough data for one fec set, sign the whole thing | ||
| (&[][..], data) | ||
| } | ||
| } else { | ||
| // not last fec set, so don't sign | ||
| (data, &[][..]) | ||
| }; | ||
| stats.data_bytes += data.len(); | ||
| stats.data_bytes += unsigned_data.len() + signed_data.len(); | ||
|
|
||
| let unsigned_sets = unsigned_data.len().div_ceil(data_buffer_total_size); | ||
| let number_of_fec_sets = if sign_last_fec_set { | ||
| unsigned_sets + 1 | ||
| } else { | ||
| unsigned_sets | ||
| }; | ||
| let mut shreds = Vec::<Shred>::with_capacity(SHREDS_PER_FEC_BLOCK * number_of_fec_sets); | ||
|
|
||
| // Split the data into full erasure batches and initialize data and coding | ||
| // shreds for each batch. | ||
| while data.len() >= data_buffer_total_size { | ||
| let (current_batch_data_chunk, rest) = data.split_at(data_buffer_total_size); | ||
| while unsigned_data.len() >= data_buffer_total_size { | ||
| let (current_batch_data_chunk, rest) = unsigned_data.split_at(data_buffer_total_size); | ||
| debug_assert_eq!( | ||
| current_batch_data_chunk.len(), | ||
| DATA_SHREDS_PER_FEC_BLOCK * data_buffer_per_shred_size | ||
|
|
@@ -1110,7 +1139,7 @@ pub(crate) fn make_shreds_from_data( | |
| .map(Shred::ShredData), | ||
| ); | ||
| shreds.extend(make_shreds_code_header_only(&mut common_header_code).map(Shred::ShredCode)); | ||
| data = rest; | ||
| unsigned_data = rest; | ||
| } | ||
|
|
||
| // Two possibilities for taking this conditional: | ||
|
|
@@ -1120,29 +1149,33 @@ pub(crate) fn make_shreds_from_data( | |
| // 2.) Shreds is_empty, which only happens when we entered w/ zero data. | ||
| // | ||
| // In either case, we want to generate empty data shreds. | ||
| if !data.is_empty() || shreds.is_empty() { | ||
| stats.padding_bytes += data_buffer_total_size - data.len(); | ||
| common_header_data.shred_variant = ShredVariant::MerkleData { | ||
| if !unsigned_data.is_empty() || (shreds.is_empty() && !sign_last_fec_set) { | ||
| stats.padding_bytes += data_buffer_total_size - unsigned_data.len(); | ||
| shred_leftover_data( | ||
| proof_size, | ||
| chained, | ||
| resigned, | ||
| }; | ||
| common_header_code.shred_variant = ShredVariant::MerkleCode { | ||
| false, | ||
| unsigned_data, | ||
| data_buffer_per_shred_size, | ||
| &mut common_header_data, | ||
| &mut common_header_code, | ||
| data_header, | ||
| &mut shreds, | ||
| ); | ||
| } | ||
| if !signed_data.is_empty() || (shreds.is_empty() && sign_last_fec_set) { | ||
| stats.padding_bytes += data_buffer_total_size_signed - signed_data.len(); | ||
| shred_leftover_data( | ||
| proof_size, | ||
| chained, | ||
| resigned, | ||
| }; | ||
| common_header_data.fec_set_index = common_header_data.index; | ||
| common_header_code.fec_set_index = common_header_data.fec_set_index; | ||
| shreds.extend({ | ||
| // Create data chunks out of remaining data + padding. | ||
| let chunks = data | ||
| .chunks(data_buffer_per_shred_size) | ||
| .chain(std::iter::repeat(&[][..])) // possible padding | ||
| .take(DATA_SHREDS_PER_FEC_BLOCK); | ||
| make_shreds_data(&mut common_header_data, data_header, chunks).map(Shred::ShredData) | ||
| }); | ||
| shreds.extend(make_shreds_code_header_only(&mut common_header_code).map(Shred::ShredCode)); | ||
| true, | ||
| signed_data, | ||
| data_buffer_per_shred_size_signed, | ||
| &mut common_header_data, | ||
| &mut common_header_code, | ||
| data_header, | ||
| &mut shreds, | ||
| ); | ||
| } | ||
|
|
||
| // Adjust flags for the very last data shred. | ||
|
|
@@ -1212,6 +1245,41 @@ pub(crate) fn make_shreds_from_data( | |
| Ok(shreds) | ||
| } | ||
|
|
||
| #[allow(clippy::too_many_arguments)] | ||
| fn shred_leftover_data( | ||
| proof_size: u8, | ||
| chained: bool, | ||
| resigned: bool, | ||
| data: &[u8], | ||
| data_buffer_per_shred_size: usize, | ||
| common_header_data: &mut ShredCommonHeader, | ||
| common_header_code: &mut ShredCommonHeader, | ||
| data_header: DataShredHeader, | ||
| shreds: &mut Vec<Shred>, | ||
| ) { | ||
| common_header_data.shred_variant = ShredVariant::MerkleData { | ||
| proof_size, | ||
| chained, | ||
| resigned, | ||
| }; | ||
| common_header_code.shred_variant = ShredVariant::MerkleCode { | ||
| proof_size, | ||
| chained, | ||
| resigned, | ||
| }; | ||
| common_header_data.fec_set_index = common_header_data.index; | ||
| common_header_code.fec_set_index = common_header_data.fec_set_index; | ||
| shreds.extend({ | ||
| // Create data chunks out of remaining data + padding. | ||
| let chunks = data | ||
| .chunks(data_buffer_per_shred_size) | ||
| .chain(std::iter::repeat(&[][..])) // possible padding | ||
| .take(DATA_SHREDS_PER_FEC_BLOCK); | ||
| make_shreds_data(common_header_data, data_header, chunks).map(Shred::ShredData) | ||
| }); | ||
| shreds.extend(make_shreds_code_header_only(common_header_code).map(Shred::ShredCode)); | ||
| } | ||
|
|
||
| // Given shreds of the same erasure batch: | ||
| // - Writes common and {data,coding} headers into shreds' payload. | ||
| // - Fills in erasure code buffers in the coding shreds. | ||
|
|
@@ -1687,7 +1755,11 @@ mod test { | |
| let thread_pool = ThreadPoolBuilder::new().num_threads(2).build().unwrap(); | ||
| let keypair = Keypair::new(); | ||
| let chained_merkle_root = chained.then(|| Hash::new_from_array(rng.gen())); | ||
| let resigned = chained && is_last_in_slot; | ||
|
|
||
| // only sign last batch if it is chained and is the last in slot | ||
| // let resigned = chained && is_last_in_slot; | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. nit: looks like this was left in. can be deleted: ^ you can do this in a follow up PR |
||
| let sign_last_fec_set = chained && is_last_in_slot; | ||
|
|
||
| let slot = 149_745_689; | ||
| let parent_slot = slot - rng.gen_range(1..65536); | ||
| let shred_version = rng.gen(); | ||
|
|
@@ -1720,14 +1792,12 @@ mod test { | |
| }) | ||
| .collect(); | ||
| // Assert that the input data can be recovered from data shreds. | ||
| assert_eq!( | ||
| data, | ||
| data_shreds | ||
| .iter() | ||
| .flat_map(|shred| shred.data().unwrap()) | ||
| .copied() | ||
| .collect::<Vec<_>>() | ||
| ); | ||
| let data2 = data_shreds | ||
| .iter() | ||
| .flat_map(|shred| shred.data().unwrap()) | ||
| .copied() | ||
| .collect::<Vec<_>>(); | ||
| assert_eq!(data, data2); | ||
| // Assert that shreds sanitize and verify. | ||
| let pubkey = keypair.pubkey(); | ||
| for shred in &shreds { | ||
|
|
@@ -1775,8 +1845,10 @@ mod test { | |
| // Verify common, data and coding headers. | ||
| let mut num_data_shreds = 0; | ||
| let mut num_coding_shreds = 0; | ||
| for shred in &shreds { | ||
| for (index, shred) in shreds.iter().enumerate() { | ||
| let common_header = shred.common_header(); | ||
| let resigned = sign_last_fec_set && index >= shreds.len() - 64; | ||
|
|
||
| assert_eq!(common_header.slot, slot); | ||
| assert_eq!(common_header.version, shred_version); | ||
| let proof_size = shred.proof_size().unwrap(); | ||
|
|
||
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
Uh oh!
There was an error while loading. Please reload this page.