diff --git a/core/src/window_service.rs b/core/src/window_service.rs index 504776db1e1a25..fdbc1894e804bd 100644 --- a/core/src/window_service.rs +++ b/core/src/window_service.rs @@ -169,6 +169,11 @@ fn run_check_duplicate( shred_slot, &root_bank, ); + let chained_merkle_conflict_duplicate_proofs = cluster_nodes::check_feature_activation( + &feature_set::chained_merkle_conflict_duplicate_proofs::id(), + shred_slot, + &root_bank, + ); let (shred1, shred2) = match shred { PossibleDuplicateShred::LastIndexConflict(shred, conflict) | PossibleDuplicateShred::ErasureConflict(shred, conflict) => { @@ -196,6 +201,24 @@ fn run_check_duplicate( return Ok(()); } } + PossibleDuplicateShred::ChainedMerkleRootConflict(shred, conflict) => { + if chained_merkle_conflict_duplicate_proofs { + // Although this proof can be immediately stored on detection, we wait until + // here in order to check the feature flag, as storage in blockstore can + // preclude the detection of other duplicate proofs in this slot + if blockstore.has_duplicate_shreds_in_slot(shred_slot) { + return Ok(()); + } + blockstore.store_duplicate_slot( + shred_slot, + conflict.clone(), + shred.clone().into_payload(), + )?; + (shred, conflict) + } else { + return Ok(()); + } + } PossibleDuplicateShred::Exists(shred) => { // Unlike the other cases we have to wait until here to decide to handle the duplicate and store // in blockstore. This is because the duplicate could have been part of the same insert batch, diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index cda801bb296e45..948b42800ae6a8 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -146,6 +146,7 @@ pub enum PossibleDuplicateShred { LastIndexConflict(/* original */ Shred, /* conflict */ Vec), // The index of this shred conflicts with `slot_meta.last_index` ErasureConflict(/* original */ Shred, /* conflict */ Vec), // The coding shred has a conflict in the erasure_meta MerkleRootConflict(/* original */ Shred, /* conflict */ Vec), // Merkle root conflict in the same fec set + ChainedMerkleRootConflict(/* original */ Shred, /* conflict */ Vec), // Merkle root chaining conflict with previous fec set } impl PossibleDuplicateShred { @@ -155,6 +156,7 @@ impl PossibleDuplicateShred { Self::LastIndexConflict(shred, _) => shred.slot(), Self::ErasureConflict(shred, _) => shred.slot(), Self::MerkleRootConflict(shred, _) => shred.slot(), + Self::ChainedMerkleRootConflict(shred, _) => shred.slot(), } } } @@ -1283,6 +1285,18 @@ impl Blockstore { return false; } } + + // Check that the chaining between our current shred, the previous fec_set + // and the next fec_set + if !self.check_chained_merkle_root_consistency( + just_received_shreds, + &erasure_set, + merkle_root_metas, + &shred, + duplicate_shreds, + ) { + return false; + } } let erasure_meta_entry = erasure_metas.entry(erasure_set).or_insert_with(|| { @@ -1517,6 +1531,18 @@ impl Blockstore { return Err(InsertDataShredError::InvalidShred); } } + + // Check that the chaining between our current shred, the previous fec_set + // and the next fec_set + if !self.check_chained_merkle_root_consistency( + just_inserted_shreds, + &erasure_set, + merkle_root_metas, + &shred, + duplicate_shreds, + ) { + return Err(InsertDataShredError::InvalidShred); + } } let newly_completed_data_sets = self.insert_data_shred( @@ -1648,6 +1674,113 @@ impl Blockstore { false } + /// Returns true if there is no chaining conflict between + /// the `shred` and `merkle_root_meta` of the next or previous + /// FEC set, or if shreds from the next or previous set are + /// yet to be received. + /// + /// Otherwise return false and add duplicate proof to + /// `duplicate_shreds`. + fn check_chained_merkle_root_consistency( + &self, + just_inserted_shreds: &HashMap, + erasure_set: &ErasureSetId, + merkle_root_metas: &HashMap>, + shred: &Shred, + duplicate_shreds: &mut Vec, + ) -> bool { + let (slot, fec_set_index) = erasure_set.store_key(); + + let next_erasure_set = ErasureSetId::new(slot, fec_set_index + 1); + if let Some(next_merkle_root_meta) = + merkle_root_metas.get(&next_erasure_set).map(AsRef::as_ref) + { + let next_shred_id = ShredId::new( + slot, + next_merkle_root_meta.first_received_shred_index(), + next_merkle_root_meta.first_received_shred_type(), + ); + let next_shred = + Self::get_shred_from_just_inserted_or_db(self, just_inserted_shreds, next_shred_id) + .expect("Shred indicated by merkle root meta must exist") + .into_owned(); + let next_shred = Shred::new_from_serialized_shred(next_shred) + .expect("Shred indicated by merkle root meta should deserialize"); + + if !self.check_chaining(shred, &next_shred, duplicate_shreds) { + return false; + } + } + + if fec_set_index == 0 { + // Although the first fec set chains to the last fec set of the parent block, + // if this chain is incorrect we do not which block is the duplicate until votes + // are received. We instead delay this check until the block reaches duplicate + // confirmation. + return true; + } + let prev_erasure_set = ErasureSetId::new(slot, fec_set_index - 1); + if let Some(prev_merkle_root_meta) = + merkle_root_metas.get(&prev_erasure_set).map(AsRef::as_ref) + { + let prev_shred_id = ShredId::new( + slot, + prev_merkle_root_meta.first_received_shred_index(), + prev_merkle_root_meta.first_received_shred_type(), + ); + let prev_shred = + Self::get_shred_from_just_inserted_or_db(self, just_inserted_shreds, prev_shred_id) + .expect("Shred indicated by merkle root meta must exist") + .into_owned(); + let prev_shred = Shred::new_from_serialized_shred(prev_shred) + .expect("Shred indicated by merkle root meta should deserialize"); + if !self.check_chaining(&prev_shred, shred, duplicate_shreds) { + return false; + } + } + + true + } + + /// Checks if the chained merkle root of `next_shred` == `prev_shred`'s merkle root. + /// + /// Returns true if no conflict, otherwise updates duplicate_shreds + fn check_chaining( + &self, + prev_shred: &Shred, + next_shred: &Shred, + duplicate_shreds: &mut Vec, + ) -> bool { + let Ok(chained_merkle_root) = next_shred.chained_merkle_root() else { + // Chained merkle roots have not been enabled yet + return true; + }; + let merkle_root = prev_shred.merkle_root().ok(); + if merkle_root == Some(chained_merkle_root) { + return true; + } + warn!( + "Received conflicting chained merkle roots for slot: {}, + shred {:?} type {:?} has merkle root {:?}, however + next shred {:?} type {:?} chains to merkle root {:?}. Reporting as duplicate", + prev_shred.slot(), + prev_shred.erasure_set(), + prev_shred.shred_type(), + merkle_root, + next_shred.erasure_set(), + next_shred.shred_type(), + chained_merkle_root, + ); + + if !self.has_duplicate_shreds_in_slot(prev_shred.slot()) { + duplicate_shreds.push(PossibleDuplicateShred::ChainedMerkleRootConflict( + prev_shred.clone(), + next_shred.payload().clone(), + )); + } + false + } + fn should_insert_data_shred( &self, shred: &Shred, diff --git a/ledger/src/shred.rs b/ledger/src/shred.rs index e3c896f71befa8..a1526bb502b1b5 100644 --- a/ledger/src/shred.rs +++ b/ledger/src/shred.rs @@ -286,6 +286,10 @@ impl ShredId { pub(crate) struct ErasureSetId(Slot, /*fec_set_index:*/ u32); impl ErasureSetId { + pub(crate) fn new(slot: Slot, fec_set_index: u32) -> Self { + Self(slot, fec_set_index) + } + pub(crate) fn slot(&self) -> Slot { self.0 } @@ -342,6 +346,7 @@ impl Shred { dispatch!(pub(crate) fn erasure_shard_index(&self) -> Result); dispatch!(pub fn into_payload(self) -> Vec); + dispatch!(pub fn chained_merkle_root(&self) -> Result); dispatch!(pub fn merkle_root(&self) -> Result); dispatch!(pub fn payload(&self) -> &Vec); dispatch!(pub fn sanitize(&self) -> Result<(), Error>); diff --git a/ledger/src/shred/merkle.rs b/ledger/src/shred/merkle.rs index ebc4a711b8c774..a5a50b5b2912cd 100644 --- a/ledger/src/shred/merkle.rs +++ b/ledger/src/shred/merkle.rs @@ -181,6 +181,14 @@ impl ShredData { Ok(Self::SIZE_OF_HEADERS + Self::capacity(proof_size, /*chained:*/ true)?) } + pub(super) fn chained_merkle_root(&self) -> Result { + let offset = self.chained_merkle_root_offset()?; + self.payload + .get(offset..offset + SIZE_OF_MERKLE_ROOT) + .map(Hash::new) + .ok_or(Error::InvalidPayloadSize(self.payload.len())) + } + fn set_chained_merkle_root(&mut self, chained_merkle_root: &Hash) -> Result<(), Error> { let offset = self.chained_merkle_root_offset()?; let Some(buffer) = self.payload.get_mut(offset..offset + SIZE_OF_MERKLE_ROOT) else { @@ -328,7 +336,7 @@ impl ShredCode { Ok(Self::SIZE_OF_HEADERS + Self::capacity(proof_size, /*chained:*/ true)?) } - fn chained_merkle_root(&self) -> Result { + pub(super) fn chained_merkle_root(&self) -> Result { let offset = self.chained_merkle_root_offset()?; self.payload .get(offset..offset + SIZE_OF_MERKLE_ROOT) diff --git a/ledger/src/shred/shred_code.rs b/ledger/src/shred/shred_code.rs index 0ad97a0f729a77..067d7edaf437eb 100644 --- a/ledger/src/shred/shred_code.rs +++ b/ledger/src/shred/shred_code.rs @@ -47,6 +47,13 @@ impl ShredCode { } } + pub(super) fn chained_merkle_root(&self) -> Result { + match self { + Self::Legacy(_) => Err(Error::InvalidShredType), + Self::Merkle(shred) => shred.chained_merkle_root(), + } + } + pub(super) fn merkle_root(&self) -> Result { match self { Self::Legacy(_) => Err(Error::InvalidShredType), diff --git a/ledger/src/shred/shred_data.rs b/ledger/src/shred/shred_data.rs index 5b9965afd787c8..976d5a884d5638 100644 --- a/ledger/src/shred/shred_data.rs +++ b/ledger/src/shred/shred_data.rs @@ -41,6 +41,13 @@ impl ShredData { } } + pub(super) fn chained_merkle_root(&self) -> Result { + match self { + Self::Legacy(_) => Err(Error::InvalidShredType), + Self::Merkle(shred) => shred.chained_merkle_root(), + } + } + pub(super) fn merkle_root(&self) -> Result { match self { Self::Legacy(_) => Err(Error::InvalidShredType), diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index abecf4fafb6b1d..b483a301a2f0ae 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -776,6 +776,10 @@ pub mod enable_gossip_duplicate_proof_ingestion { solana_sdk::declare_id!("FNKCMBzYUdjhHyPdsKG2LSmdzH8TCHXn3ytj8RNBS4nG"); } +pub mod chained_merkle_conflict_duplicate_proofs { + solana_sdk::declare_id!("chaie9S2zVfuxJKNRGkyTDokLwWxx6kD2ZLsqQHaDD8"); +} + pub mod enable_chained_merkle_shreds { solana_sdk::declare_id!("7uZBkJXJ1HkuP6R3MJfZs7mLwymBcDbKdqbF51ZWLier"); } @@ -975,6 +979,7 @@ lazy_static! { (enable_gossip_duplicate_proof_ingestion::id(), "enable gossip duplicate proof ingestion #32963"), (enable_chained_merkle_shreds::id(), "Enable chained Merkle shreds #34916"), (remove_rounding_in_fee_calculation::id(), "Removing unwanted rounding in fee calculation #34982"), + (chained_merkle_conflict_duplicate_proofs::id(), "generate duplicate proofs for chained merkle root conflicts #35316"), /*************** ADD NEW FEATURES HERE ***************/ ] .iter()