From f0f1a832973c2f03661fa3a1ce4c93cca28ffa2c Mon Sep 17 00:00:00 2001 From: Aztec Bot <49558828+AztecBot@users.noreply.github.com> Date: Sat, 14 Mar 2026 11:24:18 -0400 Subject: [PATCH 01/17] fix(aztec-nr): return Option from decode functions and fix event commitment capacity (backport #21264) (#21360) --- .../aztec/src/event/event_interface.nr | 9 +- .../src/messages/discovery/partial_notes.nr | 50 +++++---- .../src/messages/discovery/private_events.nr | 35 +++--- .../src/messages/discovery/private_notes.nr | 38 ++++--- .../src/messages/discovery/process_message.nr | 72 +++++++------ .../aztec-nr/aztec/src/messages/encoding.nr | 102 +++++++++++------- .../aztec-nr/aztec/src/messages/logs/event.nr | 59 ++++++---- .../aztec-nr/aztec/src/messages/logs/note.nr | 65 ++++++----- .../aztec/src/messages/logs/partial_note.nr | 82 ++++++++------ 9 files changed, 311 insertions(+), 201 deletions(-) diff --git a/noir-projects/aztec-nr/aztec/src/event/event_interface.nr b/noir-projects/aztec-nr/aztec/src/event/event_interface.nr index 38ba43d6e471..2c5ccd0d40ed 100644 --- a/noir-projects/aztec-nr/aztec/src/event/event_interface.nr +++ b/noir-projects/aztec-nr/aztec/src/event/event_interface.nr @@ -36,7 +36,7 @@ pub unconstrained fn compute_private_serialized_event_commitment( event_type_id: Field, ) -> Field { let mut commitment_preimage = - BoundedVec::<_, 1 + MAX_EVENT_SERIALIZED_LEN>::from_array([randomness, event_type_id]); + BoundedVec::<_, 2 + MAX_EVENT_SERIALIZED_LEN>::from_array([randomness, event_type_id]); commitment_preimage.extend_from_bounded_vec(serialized_event); poseidon2_hash_with_separator_bounded_vec(commitment_preimage, DOM_SEP__EVENT_COMMITMENT) @@ -46,12 +46,19 @@ mod test { use crate::event::event_interface::{ compute_private_event_commitment, compute_private_serialized_event_commitment, EventInterface, }; + use crate::messages::logs::event::MAX_EVENT_SERIALIZED_LEN; use crate::protocol::traits::{Serialize, ToField}; use crate::test::mocks::mock_event::MockEvent; global VALUE: Field = 7; global RANDOMNESS: Field = 10; + #[test] + unconstrained fn max_size_serialized_event_commitment() { + let serialized_event = BoundedVec::from_array([0; MAX_EVENT_SERIALIZED_LEN]); + let _ = compute_private_serialized_event_commitment(serialized_event, 0, 0); + } + #[test] unconstrained fn event_commitment_equivalence() { let event = MockEvent::new(VALUE).build_event(); diff --git a/noir-projects/aztec-nr/aztec/src/messages/discovery/partial_notes.nr b/noir-projects/aztec-nr/aztec/src/messages/discovery/partial_notes.nr index df29197a0020..77af6845e427 100644 --- a/noir-projects/aztec-nr/aztec/src/messages/discovery/partial_notes.nr +++ b/noir-projects/aztec-nr/aztec/src/messages/discovery/partial_notes.nr @@ -42,27 +42,37 @@ pub unconstrained fn process_partial_note_private_msg( recipient: AztecAddress, msg_metadata: u64, msg_content: BoundedVec, + tx_hash: Field, ) { - // We store the information of the partial note we found in a persistent capsule in PXE, so that we can later - // search for the public log that will complete it. - let (owner, storage_slot, randomness, note_completion_log_tag, note_type_id, packed_private_note_content) = - decode_partial_note_private_message(msg_metadata, msg_content); - - let pending = DeliveredPendingPartialNote { - owner, - storage_slot, - randomness, - note_completion_log_tag, - note_type_id, - packed_private_note_content, - recipient, - }; - - CapsuleArray::at( - contract_address, - DELIVERED_PENDING_PARTIAL_NOTE_ARRAY_LENGTH_CAPSULES_SLOT, - ) - .push(pending); + let decoded = decode_partial_note_private_message(msg_metadata, msg_content); + + if decoded.is_some() { + // We store the information of the partial note we found in a persistent capsule in PXE, so that we can later + // search for the public log that will complete it. + let (owner, storage_slot, randomness, note_completion_log_tag, note_type_id, packed_private_note_content) = + decoded.unwrap(); + + let pending = DeliveredPendingPartialNote { + owner, + storage_slot, + randomness, + note_completion_log_tag, + note_type_id, + packed_private_note_content, + recipient, + }; + + CapsuleArray::at( + contract_address, + DELIVERED_PENDING_PARTIAL_NOTE_ARRAY_LENGTH_CAPSULES_SLOT, + ) + .push(pending); + } else { + debug_log_format( + "Could not decode partial note private message from tx {0}, ignoring", + [tx_hash], + ); + } } /// Searches for logs that would result in the completion of pending partial notes, ultimately resulting in the notes diff --git a/noir-projects/aztec-nr/aztec/src/messages/discovery/private_events.nr b/noir-projects/aztec-nr/aztec/src/messages/discovery/private_events.nr index 697761f7c959..4ead09f40119 100644 --- a/noir-projects/aztec-nr/aztec/src/messages/discovery/private_events.nr +++ b/noir-projects/aztec-nr/aztec/src/messages/discovery/private_events.nr @@ -5,7 +5,7 @@ use crate::{ processing::enqueue_event_for_validation, }, }; -use crate::protocol::{address::AztecAddress, traits::ToField}; +use crate::protocol::{address::AztecAddress, logging::debug_log_format, traits::ToField}; pub unconstrained fn process_private_event_msg( contract_address: AztecAddress, @@ -14,18 +14,27 @@ pub unconstrained fn process_private_event_msg( msg_content: BoundedVec, tx_hash: Field, ) { - let (event_type_id, randomness, serialized_event) = decode_private_event_message(msg_metadata, msg_content); + let decoded = decode_private_event_message(msg_metadata, msg_content); - let event_commitment = - compute_private_serialized_event_commitment(serialized_event, randomness, event_type_id.to_field()); + if decoded.is_some() { + let (event_type_id, randomness, serialized_event) = decoded.unwrap(); - enqueue_event_for_validation( - contract_address, - event_type_id, - randomness, - serialized_event, - event_commitment, - tx_hash, - recipient, - ); + let event_commitment = + compute_private_serialized_event_commitment(serialized_event, randomness, event_type_id.to_field()); + + enqueue_event_for_validation( + contract_address, + event_type_id, + randomness, + serialized_event, + event_commitment, + tx_hash, + recipient, + ); + } else { + debug_log_format( + "Could not decode private event message from tx {0}, ignoring", + [tx_hash], + ); + } } diff --git a/noir-projects/aztec-nr/aztec/src/messages/discovery/private_notes.nr b/noir-projects/aztec-nr/aztec/src/messages/discovery/private_notes.nr index 6366ec9a5543..3ad8567170de 100644 --- a/noir-projects/aztec-nr/aztec/src/messages/discovery/private_notes.nr +++ b/noir-projects/aztec-nr/aztec/src/messages/discovery/private_notes.nr @@ -16,22 +16,30 @@ pub unconstrained fn process_private_note_msg( msg_metadata: u64, msg_content: BoundedVec, ) { - let (note_type_id, owner, storage_slot, randomness, packed_note) = - decode_private_note_message(msg_metadata, msg_content); + let decoded = decode_private_note_message(msg_metadata, msg_content); - attempt_note_discovery( - contract_address, - tx_hash, - unique_note_hashes_in_tx, - first_nullifier_in_tx, - recipient, - compute_note_hash_and_nullifier, - owner, - storage_slot, - randomness, - note_type_id, - packed_note, - ); + if decoded.is_some() { + let (note_type_id, owner, storage_slot, randomness, packed_note) = decoded.unwrap(); + + attempt_note_discovery( + contract_address, + tx_hash, + unique_note_hashes_in_tx, + first_nullifier_in_tx, + recipient, + compute_note_hash_and_nullifier, + owner, + storage_slot, + randomness, + note_type_id, + packed_note, + ); + } else { + debug_log_format( + "Could not decode private note message from tx {0}, ignoring", + [tx_hash], + ); + } } /// Attempts discovery of a note given information about its contents and the transaction in which it is suspected the diff --git a/noir-projects/aztec-nr/aztec/src/messages/discovery/process_message.nr b/noir-projects/aztec-nr/aztec/src/messages/discovery/process_message.nr index 965d7224c8a5..2ac488d053cd 100644 --- a/noir-projects/aztec-nr/aztec/src/messages/discovery/process_message.nr +++ b/noir-projects/aztec-nr/aztec/src/messages/discovery/process_message.nr @@ -56,41 +56,51 @@ pub unconstrained fn process_message_plaintext( // have 3 message types: private notes, partial notes and events. // We decode the message to obtain the message type id, metadata and content. - let (msg_type_id, msg_metadata, msg_content) = decode_message(message_plaintext); + let decoded = decode_message(message_plaintext); - if msg_type_id == PRIVATE_NOTE_MSG_TYPE_ID { - debug_log("Processing private note msg"); + if decoded.is_some() { + let (msg_type_id, msg_metadata, msg_content) = decoded.unwrap(); - process_private_note_msg( - contract_address, - message_context.tx_hash, - message_context.unique_note_hashes_in_tx, - message_context.first_nullifier_in_tx, - message_context.recipient, - compute_note_hash_and_nullifier, - msg_metadata, - msg_content, - ); - } else if msg_type_id == PARTIAL_NOTE_PRIVATE_MSG_TYPE_ID { - debug_log("Processing partial note private msg"); + if msg_type_id == PRIVATE_NOTE_MSG_TYPE_ID { + debug_log("Processing private note msg"); - process_partial_note_private_msg( - contract_address, - message_context.recipient, - msg_metadata, - msg_content, - ); - } else if msg_type_id == PRIVATE_EVENT_MSG_TYPE_ID { - debug_log("Processing private event msg"); + process_private_note_msg( + contract_address, + message_context.tx_hash, + message_context.unique_note_hashes_in_tx, + message_context.first_nullifier_in_tx, + message_context.recipient, + compute_note_hash_and_nullifier, + msg_metadata, + msg_content, + ); + } else if msg_type_id == PARTIAL_NOTE_PRIVATE_MSG_TYPE_ID { + debug_log("Processing partial note private msg"); - process_private_event_msg( - contract_address, - message_context.recipient, - msg_metadata, - msg_content, - message_context.tx_hash, - ); + process_partial_note_private_msg( + contract_address, + message_context.recipient, + msg_metadata, + msg_content, + message_context.tx_hash, + ); + } else if msg_type_id == PRIVATE_EVENT_MSG_TYPE_ID { + debug_log("Processing private event msg"); + + process_private_event_msg( + contract_address, + message_context.recipient, + msg_metadata, + msg_content, + message_context.tx_hash, + ); + } else { + debug_log_format("Unknown msg type id {0}", [msg_type_id as Field]); + } } else { - debug_log_format("Unknown msg type id {0}", [msg_type_id as Field]); + debug_log_format( + "Could not decode message plaintext from tx {0}, ignoring", + [message_context.tx_hash], + ); } } diff --git a/noir-projects/aztec-nr/aztec/src/messages/encoding.nr b/noir-projects/aztec-nr/aztec/src/messages/encoding.nr index dc484086cf8a..322d5dd78103 100644 --- a/noir-projects/aztec-nr/aztec/src/messages/encoding.nr +++ b/noir-projects/aztec-nr/aztec/src/messages/encoding.nr @@ -90,29 +90,34 @@ pub fn encode_message( /// Decodes a standard aztec-nr message, i.e. one created via `encode_message`, returning the original encoded values. /// +/// Returns `None` if the message is empty or has invalid (>128 bit) expanded metadata. +/// /// Note that `encode_message` returns a fixed size array while this function takes a `BoundedVec`: this is because /// prior to decoding the message type is unknown, and consequentially not known at compile time. If working with /// fixed-size messages, consider using `BoundedVec::from_array` to convert them. pub unconstrained fn decode_message( message: BoundedVec, -) -> (u64, u64, BoundedVec) { - assert( - message.len() >= MESSAGE_EXPANDED_METADATA_LEN, - f"Invalid message: it must have at least {MESSAGE_EXPANDED_METADATA_LEN} fields", - ); - - // If MESSAGE_EXPANDED_METADATA_LEN is changed, causing the assertion below to fail, then the destructuring of the - // message encoding below must be updated as well. - std::static_assert( - MESSAGE_EXPANDED_METADATA_LEN == 1, - "unexpected value for MESSAGE_EXPANDED_METADATA_LEN", - ); - - let msg_expanded_metadata = message.get(0); - let (msg_type_id, msg_metadata) = from_expanded_metadata(msg_expanded_metadata); - let msg_content = array::subbvec(message, MESSAGE_EXPANDED_METADATA_LEN); - - (msg_type_id, msg_metadata, msg_content) +) -> Option<(u64, u64, BoundedVec)> { + Option::some(message) + .and_then(|message| { + // If MESSAGE_EXPANDED_METADATA_LEN is changed, causing the assertion below to fail, then the destructuring + // of the + // message encoding below must be updated as well. + std::static_assert( + MESSAGE_EXPANDED_METADATA_LEN == 1, + "unexpected value for MESSAGE_EXPANDED_METADATA_LEN", + ); + if message.len() < MESSAGE_EXPANDED_METADATA_LEN { + Option::none() + } else { + Option::some(message.get(0)) + } + }) + .and_then(|msg_expanded_metadata| from_expanded_metadata(msg_expanded_metadata)) + .map(|(msg_type_id, msg_metadata)| { + let msg_content = array::subbvec(message, MESSAGE_EXPANDED_METADATA_LEN); + (msg_type_id, msg_metadata, msg_content) + }) } global U64_SHIFT_MULTIPLIER: Field = 2.pow_32(64); @@ -126,17 +131,26 @@ fn to_expanded_metadata(msg_type: u64, msg_metadata: u64) -> Field { type_field + msg_metadata_field } -fn from_expanded_metadata(input: Field) -> (u64, u64) { - input.assert_max_bit_size::<128>(); - let msg_metadata = (input as u64); - let msg_type = ((input - (msg_metadata as Field)) / U64_SHIFT_MULTIPLIER) as u64; - // Use division instead of bit shift since bit shifts are expensive in circuits - (msg_type, msg_metadata) +global TWO_POW_128: Field = 2.pow_32(128); + +/// Unpacks expanded metadata into (msg_type, msg_metadata). Returns `None` if `input >= 2^128`. +fn from_expanded_metadata(input: Field) -> Option<(u64, u64)> { + if input.lt(TWO_POW_128) { + let msg_metadata = (input as u64); + let msg_type = ((input - (msg_metadata as Field)) / U64_SHIFT_MULTIPLIER) as u64; + // Use division instead of bit shift since bit shifts are expensive in circuits + Option::some((msg_type, msg_metadata)) + } else { + Option::none() + } } mod tests { use crate::utils::array::subarray::subarray; - use super::{decode_message, encode_message, from_expanded_metadata, MAX_MESSAGE_CONTENT_LEN, to_expanded_metadata}; + use super::{ + decode_message, encode_message, from_expanded_metadata, MAX_MESSAGE_CONTENT_LEN, to_expanded_metadata, + TWO_POW_128, + }; global U64_MAX: u64 = (2.pow_32(64) - 1) as u64; global U128_MAX: Field = (2.pow_32(128) - 1); @@ -145,7 +159,7 @@ mod tests { unconstrained fn encode_decode_empty_message(msg_type: u64, msg_metadata: u64) { let encoded = encode_message(msg_type, msg_metadata, []); let (decoded_msg_type, decoded_msg_metadata, decoded_msg_content) = - decode_message(BoundedVec::from_array(encoded)); + decode_message(BoundedVec::from_array(encoded)).unwrap(); assert_eq(decoded_msg_type, msg_type); assert_eq(decoded_msg_metadata, msg_metadata); @@ -160,7 +174,7 @@ mod tests { ) { let encoded = encode_message(msg_type, msg_metadata, msg_content); let (decoded_msg_type, decoded_msg_metadata, decoded_msg_content) = - decode_message(BoundedVec::from_array(encoded)); + decode_message(BoundedVec::from_array(encoded)).unwrap(); assert_eq(decoded_msg_type, msg_type); assert_eq(decoded_msg_metadata, msg_metadata); @@ -176,7 +190,7 @@ mod tests { ) { let encoded = encode_message(msg_type, msg_metadata, msg_content); let (decoded_msg_type, decoded_msg_metadata, decoded_msg_content) = - decode_message(BoundedVec::from_array(encoded)); + decode_message(BoundedVec::from_array(encoded)).unwrap(); assert_eq(decoded_msg_type, msg_type); assert_eq(decoded_msg_metadata, msg_metadata); @@ -188,25 +202,25 @@ mod tests { unconstrained fn to_expanded_metadata_packing() { // Test case 1: All bits set let packed = to_expanded_metadata(U64_MAX, U64_MAX); - let (msg_type, msg_metadata) = from_expanded_metadata(packed); + let (msg_type, msg_metadata) = from_expanded_metadata(packed).unwrap(); assert_eq(msg_type, U64_MAX); assert_eq(msg_metadata, U64_MAX); // Test case 2: Only log type bits set let packed = to_expanded_metadata(U64_MAX, 0); - let (msg_type, msg_metadata) = from_expanded_metadata(packed); + let (msg_type, msg_metadata) = from_expanded_metadata(packed).unwrap(); assert_eq(msg_type, U64_MAX); assert_eq(msg_metadata, 0); // Test case 3: Only msg_metadata bits set let packed = to_expanded_metadata(0, U64_MAX); - let (msg_type, msg_metadata) = from_expanded_metadata(packed); + let (msg_type, msg_metadata) = from_expanded_metadata(packed).unwrap(); assert_eq(msg_type, 0); assert_eq(msg_metadata, U64_MAX); // Test case 4: No bits set let packed = to_expanded_metadata(0, 0); - let (msg_type, msg_metadata) = from_expanded_metadata(packed); + let (msg_type, msg_metadata) = from_expanded_metadata(packed).unwrap(); assert_eq(msg_type, 0); assert_eq(msg_metadata, 0); } @@ -215,25 +229,25 @@ mod tests { unconstrained fn from_expanded_metadata_packing() { // Test case 1: All bits set let input = U128_MAX as Field; - let (msg_type, msg_metadata) = from_expanded_metadata(input); + let (msg_type, msg_metadata) = from_expanded_metadata(input).unwrap(); assert_eq(msg_type, U64_MAX); assert_eq(msg_metadata, U64_MAX); // Test case 2: Only log type bits set let input = (U128_MAX - U64_MAX as Field); - let (msg_type, msg_metadata) = from_expanded_metadata(input); + let (msg_type, msg_metadata) = from_expanded_metadata(input).unwrap(); assert_eq(msg_type, U64_MAX); assert_eq(msg_metadata, 0); // Test case 3: Only msg_metadata bits set let input = U64_MAX as Field; - let (msg_type, msg_metadata) = from_expanded_metadata(input); + let (msg_type, msg_metadata) = from_expanded_metadata(input).unwrap(); assert_eq(msg_type, 0); assert_eq(msg_metadata, U64_MAX); // Test case 4: No bits set let input = 0; - let (msg_type, msg_metadata) = from_expanded_metadata(input); + let (msg_type, msg_metadata) = from_expanded_metadata(input).unwrap(); assert_eq(msg_type, 0); assert_eq(msg_metadata, 0); } @@ -241,7 +255,7 @@ mod tests { #[test] unconstrained fn to_from_expanded_metadata(original_msg_type: u64, original_msg_metadata: u64) { let packed = to_expanded_metadata(original_msg_type, original_msg_metadata); - let (unpacked_msg_type, unpacked_msg_metadata) = from_expanded_metadata(packed); + let (unpacked_msg_type, unpacked_msg_metadata) = from_expanded_metadata(packed).unwrap(); assert_eq(original_msg_type, unpacked_msg_type); assert_eq(original_msg_metadata, unpacked_msg_metadata); @@ -257,7 +271,8 @@ mod tests { } let encoded = encode_message(msg_type_id, msg_metadata, msg_content); - let (decoded_type_id, decoded_metadata, decoded_content) = decode_message(BoundedVec::from_array(encoded)); + let (decoded_type_id, decoded_metadata, decoded_content) = + decode_message(BoundedVec::from_array(encoded)).unwrap(); assert_eq(decoded_type_id, msg_type_id); assert_eq(decoded_metadata, msg_metadata); @@ -269,4 +284,15 @@ mod tests { let msg_content = [0; MAX_MESSAGE_CONTENT_LEN + 1]; let _ = encode_message(0, 0, msg_content); } + + #[test] + unconstrained fn decode_empty_message_returns_none() { + assert(decode_message(BoundedVec::new()).is_none()); + } + + #[test] + unconstrained fn decode_message_with_oversized_metadata_returns_none() { + let message = BoundedVec::from_array([TWO_POW_128]); + assert(decode_message(message).is_none()); + } } diff --git a/noir-projects/aztec-nr/aztec/src/messages/logs/event.nr b/noir-projects/aztec-nr/aztec/src/messages/logs/event.nr index fbe759248efa..101bff9a3f68 100644 --- a/noir-projects/aztec-nr/aztec/src/messages/logs/event.nr +++ b/noir-projects/aztec-nr/aztec/src/messages/logs/event.nr @@ -57,7 +57,8 @@ where /// Decodes the plaintext from a private event message (i.e. one of type [`PRIVATE_EVENT_MSG_TYPE_ID`]). /// -/// This plaintext is meant to have originated from [`encode_private_event_message`]. +/// Returns `None` if `msg_content` has too few fields. This plaintext is meant to have originated +/// from [`encode_private_event_message`]. /// /// Note that while [`encode_private_event_message`] returns a fixed-size array, this function takes a [`BoundedVec`] /// instead. This is because when decoding we're typically processing runtime-sized plaintexts, more specifically, @@ -65,26 +66,24 @@ where pub(crate) unconstrained fn decode_private_event_message( msg_metadata: u64, msg_content: BoundedVec, -) -> (EventSelector, Field, BoundedVec) { - // Private event messages contain the event type id in the metadata - let event_type_id = EventSelector::from_field(msg_metadata as Field); - - assert( - msg_content.len() > PRIVATE_EVENT_MSG_PLAINTEXT_RESERVED_FIELDS_LEN, - f"Invalid private event message: all private event messages must have at least {PRIVATE_EVENT_MSG_PLAINTEXT_RESERVED_FIELDS_LEN} fields", - ); - - // If PRIVATE_EVENT_MSG_PLAINTEXT_RESERVED_FIELDS_LEN is changed, causing the assertion below to fail, then the - // destructuring of the private event message encoding below must be updated as well. - std::static_assert( - PRIVATE_EVENT_MSG_PLAINTEXT_RESERVED_FIELDS_LEN == 1, - "unexpected value for PRIVATE_EVENT_MSG_PLAINTEXT_RESERVED_FIELDS_LEN", - ); - - let randomness = msg_content.get(PRIVATE_EVENT_MSG_PLAINTEXT_RANDOMNESS_INDEX); - let serialized_event = array::subbvec(msg_content, PRIVATE_EVENT_MSG_PLAINTEXT_RESERVED_FIELDS_LEN); - - (event_type_id, randomness, serialized_event) +) -> Option<(EventSelector, Field, BoundedVec)> { + if msg_content.len() <= PRIVATE_EVENT_MSG_PLAINTEXT_RESERVED_FIELDS_LEN { + Option::none() + } else { + let event_type_id = EventSelector::from_field(msg_metadata as Field); + + // If PRIVATE_EVENT_MSG_PLAINTEXT_RESERVED_FIELDS_LEN is changed, causing the assertion below to fail, then the + // destructuring of the private event message encoding below must be updated as well. + std::static_assert( + PRIVATE_EVENT_MSG_PLAINTEXT_RESERVED_FIELDS_LEN == 1, + "unexpected value for PRIVATE_EVENT_MSG_PLAINTEXT_RESERVED_FIELDS_LEN", + ); + + let randomness = msg_content.get(PRIVATE_EVENT_MSG_PLAINTEXT_RANDOMNESS_INDEX); + let serialized_event = array::subbvec(msg_content, PRIVATE_EVENT_MSG_PLAINTEXT_RESERVED_FIELDS_LEN); + + Option::some((event_type_id, randomness, serialized_event)) + } } mod test { @@ -108,14 +107,28 @@ mod test { let message_plaintext = encode_private_event_message(event, RANDOMNESS); - let (msg_type_id, msg_metadata, msg_content) = decode_message(BoundedVec::from_array(message_plaintext)); + let (msg_type_id, msg_metadata, msg_content) = + decode_message(BoundedVec::from_array(message_plaintext)).unwrap(); assert_eq(msg_type_id, PRIVATE_EVENT_MSG_TYPE_ID); - let (event_type_id, randomness, serialized_event) = decode_private_event_message(msg_metadata, msg_content); + let (event_type_id, randomness, serialized_event) = + decode_private_event_message(msg_metadata, msg_content).unwrap(); assert_eq(event_type_id, MockEvent::get_event_type_id()); assert_eq(randomness, RANDOMNESS); assert_eq(serialized_event, BoundedVec::from_array(event.serialize())); } + + #[test] + unconstrained fn decode_empty_content_returns_none() { + let empty = BoundedVec::new(); + assert(decode_private_event_message(0, empty).is_none()); + } + + #[test] + unconstrained fn decode_with_only_reserved_fields_returns_none() { + let content = BoundedVec::from_array([0]); + assert(decode_private_event_message(0, content).is_none()); + } } diff --git a/noir-projects/aztec-nr/aztec/src/messages/logs/note.nr b/noir-projects/aztec-nr/aztec/src/messages/logs/note.nr index 84a72a48e534..b78a17b09102 100644 --- a/noir-projects/aztec-nr/aztec/src/messages/logs/note.nr +++ b/noir-projects/aztec-nr/aztec/src/messages/logs/note.nr @@ -54,7 +54,8 @@ where /// Decodes the plaintext from a private note message (i.e. one of type [`PRIVATE_NOTE_MSG_TYPE_ID`]). /// -/// This plaintext is meant to have originated from [`encode_private_note_message`]. +/// Returns `None` if `msg_content` has too few fields. This plaintext is meant to have originated +/// from [`encode_private_note_message`]. /// /// Note that while [`encode_private_note_message`] returns a fixed-size array, this function takes a [`BoundedVec`] /// instead. This is because when decoding we're typically processing runtime-sized plaintexts, more specifically, @@ -62,27 +63,26 @@ where pub(crate) unconstrained fn decode_private_note_message( msg_metadata: u64, msg_content: BoundedVec, -) -> (Field, AztecAddress, Field, Field, BoundedVec) { - let note_type_id = msg_metadata as Field; // TODO: make note type id not be a full field - - assert( - msg_content.len() > PRIVATE_NOTE_MSG_PLAINTEXT_RESERVED_FIELDS_LEN, - f"Invalid private note message: all private note messages must have at least {PRIVATE_NOTE_MSG_PLAINTEXT_RESERVED_FIELDS_LEN} fields", - ); - - // If PRIVATE_NOTE_MSG_PLAINTEXT_RESERVED_FIELDS_LEN is changed, causing the assertion below to fail, then the - // decoding below must be updated as well. - std::static_assert( - PRIVATE_NOTE_MSG_PLAINTEXT_RESERVED_FIELDS_LEN == 3, - "unexpected value for PRIVATE_NOTE_MSG_PLAINTEXT_RESERVED_FIELDS_LEN", - ); - - let owner = AztecAddress::from_field(msg_content.get(PRIVATE_NOTE_MSG_PLAINTEXT_OWNER_INDEX)); - let storage_slot = msg_content.get(PRIVATE_NOTE_MSG_PLAINTEXT_STORAGE_SLOT_INDEX); - let randomness = msg_content.get(PRIVATE_NOTE_MSG_PLAINTEXT_RANDOMNESS_INDEX); - let packed_note = array::subbvec(msg_content, PRIVATE_NOTE_MSG_PLAINTEXT_RESERVED_FIELDS_LEN); - - (note_type_id, owner, storage_slot, randomness, packed_note) +) -> Option<(Field, AztecAddress, Field, Field, BoundedVec)> { + if msg_content.len() <= PRIVATE_NOTE_MSG_PLAINTEXT_RESERVED_FIELDS_LEN { + Option::none() + } else { + let note_type_id = msg_metadata as Field; // TODO: make note type id not be a full field + + // If PRIVATE_NOTE_MSG_PLAINTEXT_RESERVED_FIELDS_LEN is changed, causing the assertion below to fail, then the + // decoding below must be updated as well. + std::static_assert( + PRIVATE_NOTE_MSG_PLAINTEXT_RESERVED_FIELDS_LEN == 3, + "unexpected value for PRIVATE_NOTE_MSG_PLAINTEXT_RESERVED_FIELDS_LEN", + ); + + let owner = AztecAddress::from_field(msg_content.get(PRIVATE_NOTE_MSG_PLAINTEXT_OWNER_INDEX)); + let storage_slot = msg_content.get(PRIVATE_NOTE_MSG_PLAINTEXT_STORAGE_SLOT_INDEX); + let randomness = msg_content.get(PRIVATE_NOTE_MSG_PLAINTEXT_RANDOMNESS_INDEX); + let packed_note = array::subbvec(msg_content, PRIVATE_NOTE_MSG_PLAINTEXT_RESERVED_FIELDS_LEN); + + Option::some((note_type_id, owner, storage_slot, randomness, packed_note)) + } } mod test { @@ -108,12 +108,13 @@ mod test { let message_plaintext = encode_private_note_message(note, OWNER, STORAGE_SLOT, RANDOMNESS); - let (msg_type_id, msg_metadata, msg_content) = decode_message(BoundedVec::from_array(message_plaintext)); + let (msg_type_id, msg_metadata, msg_content) = + decode_message(BoundedVec::from_array(message_plaintext)).unwrap(); assert_eq(msg_type_id, PRIVATE_NOTE_MSG_TYPE_ID); let (note_type_id, owner, storage_slot, randomness, packed_note) = - decode_private_note_message(msg_metadata, msg_content); + decode_private_note_message(msg_metadata, msg_content).unwrap(); assert_eq(note_type_id, MockNote::get_id()); assert_eq(owner, OWNER); @@ -142,12 +143,12 @@ mod test { let note = MaxSizeNote { data }; let encoded = encode_private_note_message(note, OWNER, STORAGE_SLOT, RANDOMNESS); - let (msg_type_id, msg_metadata, msg_content) = decode_message(BoundedVec::from_array(encoded)); + let (msg_type_id, msg_metadata, msg_content) = decode_message(BoundedVec::from_array(encoded)).unwrap(); assert_eq(msg_type_id, PRIVATE_NOTE_MSG_TYPE_ID); let (note_type_id, owner, storage_slot, randomness, packed_note) = - decode_private_note_message(msg_metadata, msg_content); + decode_private_note_message(msg_metadata, msg_content).unwrap(); assert_eq(note_type_id, MaxSizeNote::get_id()); assert_eq(owner, OWNER); @@ -172,4 +173,16 @@ mod test { let note = OversizedNote { data: [0; MAX_NOTE_PACKED_LEN + 1] }; let _ = encode_private_note_message(note, OWNER, STORAGE_SLOT, RANDOMNESS); } + + #[test] + unconstrained fn decode_empty_content_returns_none() { + let empty = BoundedVec::new(); + assert(decode_private_note_message(0, empty).is_none()); + } + + #[test] + unconstrained fn decode_with_only_reserved_fields_returns_none() { + let content = BoundedVec::from_array([0, 0, 0]); + assert(decode_private_note_message(0, content).is_none()); + } } diff --git a/noir-projects/aztec-nr/aztec/src/messages/logs/partial_note.nr b/noir-projects/aztec-nr/aztec/src/messages/logs/partial_note.nr index b39e0a809fbc..63a19ee0cd15 100644 --- a/noir-projects/aztec-nr/aztec/src/messages/logs/partial_note.nr +++ b/noir-projects/aztec-nr/aztec/src/messages/logs/partial_note.nr @@ -91,9 +91,11 @@ where ) } -/// Decodes the plaintext from a private note message (i.e. one of type [`PARTIAL_NOTE_PRIVATE_MSG_TYPE_ID`]). +/// Decodes the plaintext from a partial note private message (i.e. one of type +/// [`PARTIAL_NOTE_PRIVATE_MSG_TYPE_ID`]). /// -/// This plaintext is meant to have originated from [`encode_partial_note_private_message`]. +/// Returns `None` if `msg_content` has too few fields. This plaintext is meant to have originated +/// from [`encode_partial_note_private_message`]. /// /// Note that while [`encode_partial_note_private_message`] returns a fixed-size array, this function takes a /// [`BoundedVec`] instead. This is because when decoding we're typically processing runtime-sized plaintexts, more @@ -102,39 +104,37 @@ where pub(crate) unconstrained fn decode_partial_note_private_message( msg_metadata: u64, msg_content: BoundedVec, -) -> (AztecAddress, Field, Field, Field, Field, BoundedVec) { - let note_type_id = msg_metadata as Field; // TODO: make note type id not be a full field - - // The following ensures that the message content contains at least the minimum number of fields required for a - // valid partial note private message. (Refer to the description of - // PARTIAL_NOTE_PRIVATE_MSG_PLAINTEXT_NON_NOTE_FIELDS_LEN for more information about these fields.) - assert( - msg_content.len() >= PARTIAL_NOTE_PRIVATE_MSG_PLAINTEXT_RESERVED_FIELDS_LEN, - f"Invalid private note message: all partial note private messages must have at least {PARTIAL_NOTE_PRIVATE_MSG_PLAINTEXT_RESERVED_FIELDS_LEN} fields", - ); - - // If PARTIAL_NOTE_PRIVATE_MSG_PLAINTEXT_NON_NOTE_FIELDS_LEN is changed, causing the assertion below to fail, then - // the destructuring of the partial note private message encoding below must be updated as well. - std::static_assert( - PARTIAL_NOTE_PRIVATE_MSG_PLAINTEXT_RESERVED_FIELDS_LEN == 4, - "unexpected value for PARTIAL_NOTE_PRIVATE_MSG_PLAINTEXT_NON_NOTE_FIELDS_LEN", - ); +) -> Option<(AztecAddress, Field, Field, Field, Field, BoundedVec)> { + if msg_content.len() < PARTIAL_NOTE_PRIVATE_MSG_PLAINTEXT_RESERVED_FIELDS_LEN { + Option::none() + } else { + let note_type_id: Field = msg_metadata as Field; // TODO: make note type id not be a full field + + // If PARTIAL_NOTE_PRIVATE_MSG_PLAINTEXT_NON_NOTE_FIELDS_LEN is changed, causing the assertion below to fail, + // then the destructuring of the partial note private message encoding below must be updated as well. + std::static_assert( + PARTIAL_NOTE_PRIVATE_MSG_PLAINTEXT_RESERVED_FIELDS_LEN == 4, + "unexpected value for PARTIAL_NOTE_PRIVATE_MSG_PLAINTEXT_NON_NOTE_FIELDS_LEN", + ); - // We currently have four fields that are not the partial note's packed representation, which are the owner, the - // storage slot, the randomness, and the note completion log tag. - let owner = AztecAddress::from_field( - msg_content.get(PARTIAL_NOTE_PRIVATE_MSG_PLAINTEXT_OWNER_INDEX), - ); - let storage_slot = msg_content.get(PARTIAL_NOTE_PRIVATE_MSG_PLAINTEXT_STORAGE_SLOT_INDEX); - let randomness = msg_content.get(PARTIAL_NOTE_PRIVATE_MSG_PLAINTEXT_RANDOMNESS_INDEX); - let note_completion_log_tag = msg_content.get(PARTIAL_NOTE_PRIVATE_MSG_PLAINTEXT_NOTE_COMPLETION_LOG_TAG_INDEX); + // We currently have four fields that are not the partial note's packed representation, which are the owner, + // the storage slot, the randomness, and the note completion log tag. + let owner = AztecAddress::from_field( + msg_content.get(PARTIAL_NOTE_PRIVATE_MSG_PLAINTEXT_OWNER_INDEX), + ); + let storage_slot = msg_content.get(PARTIAL_NOTE_PRIVATE_MSG_PLAINTEXT_STORAGE_SLOT_INDEX); + let randomness = msg_content.get(PARTIAL_NOTE_PRIVATE_MSG_PLAINTEXT_RANDOMNESS_INDEX); + let note_completion_log_tag = msg_content.get(PARTIAL_NOTE_PRIVATE_MSG_PLAINTEXT_NOTE_COMPLETION_LOG_TAG_INDEX); - let packed_private_note_content: BoundedVec = array::subbvec( - msg_content, - PARTIAL_NOTE_PRIVATE_MSG_PLAINTEXT_RESERVED_FIELDS_LEN, - ); + let packed_private_note_content: BoundedVec = array::subbvec( + msg_content, + PARTIAL_NOTE_PRIVATE_MSG_PLAINTEXT_RESERVED_FIELDS_LEN, + ); - (owner, storage_slot, randomness, note_completion_log_tag, note_type_id, packed_private_note_content) + Option::some(( + owner, storage_slot, randomness, note_completion_log_tag, note_type_id, packed_private_note_content, + )) + } } mod test { @@ -168,12 +168,13 @@ mod test { NOTE_COMPLETION_LOG_TAG, ); - let (msg_type_id, msg_metadata, msg_content) = decode_message(BoundedVec::from_array(message_plaintext)); + let (msg_type_id, msg_metadata, msg_content) = + decode_message(BoundedVec::from_array(message_plaintext)).unwrap(); assert_eq(msg_type_id, PARTIAL_NOTE_PRIVATE_MSG_TYPE_ID); let (owner, storage_slot, randomness, note_completion_log_tag, note_type_id, packed_note) = - decode_partial_note_private_message(msg_metadata, msg_content); + decode_partial_note_private_message(msg_metadata, msg_content).unwrap(); assert_eq(note_type_id, MockNote::get_id()); assert_eq(owner, OWNER); @@ -182,4 +183,17 @@ mod test { assert_eq(note_completion_log_tag, NOTE_COMPLETION_LOG_TAG); assert_eq(packed_note, BoundedVec::from_array(note.pack())); } + + #[test] + unconstrained fn decode_empty_content_returns_none() { + let empty = BoundedVec::new(); + assert(decode_partial_note_private_message(0, empty).is_none()); + } + + #[test] + unconstrained fn decode_succeeds_with_only_reserved_fields() { + let content = BoundedVec::from_array([0, 0, 0, 0]); + let (_, _, _, _, _, packed_note) = decode_partial_note_private_message(0, content).unwrap(); + assert_eq(packed_note.len(), 0); + } } From 48c65bb9f6ce07c8b2f88e3efc67c18e08caef24 Mon Sep 17 00:00:00 2001 From: Aztec Bot <49558828+AztecBot@users.noreply.github.com> Date: Sat, 14 Mar 2026 11:59:20 -0400 Subject: [PATCH 02/17] =?UTF-8?q?fix:=20backport=20#21271=20=E2=80=94=20ha?= =?UTF-8?q?ndle=20bad=20note=20lengths=20on=20compute=5Fnote=5Fhash=5Fand?= =?UTF-8?q?=5Fnullifier=20(#21364)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../aztec-nr/aztec/src/macros/aztec.nr | 68 ++++++++++++------- .../aztec/src/messages/discovery/mod.nr | 34 ++++++---- noir-projects/noir-contracts/Nargo.toml | 1 + .../Nargo.toml | 8 +++ .../src/main.nr | 37 ++++++++++ .../src/test.nr | 65 ++++++++++++++++++ .../src/test_note.nr | 48 +++++++++++++ 7 files changed, 220 insertions(+), 41 deletions(-) create mode 100644 noir-projects/noir-contracts/contracts/test/note_hash_and_nullifier/note_hash_and_nullifier_contract/Nargo.toml create mode 100644 noir-projects/noir-contracts/contracts/test/note_hash_and_nullifier/note_hash_and_nullifier_contract/src/main.nr create mode 100644 noir-projects/noir-contracts/contracts/test/note_hash_and_nullifier/note_hash_and_nullifier_contract/src/test.nr create mode 100644 noir-projects/noir-contracts/contracts/test/note_hash_and_nullifier/note_hash_and_nullifier_contract/src/test_note.nr diff --git a/noir-projects/aztec-nr/aztec/src/macros/aztec.nr b/noir-projects/aztec-nr/aztec/src/macros/aztec.nr index b7b27b9c3d16..7f76e04954dd 100644 --- a/noir-projects/aztec-nr/aztec/src/macros/aztec.nr +++ b/noir-projects/aztec-nr/aztec/src/macros/aztec.nr @@ -183,34 +183,50 @@ comptime fn generate_contract_library_method_compute_note_hash_and_nullifier() - // unpack function on it. let expected_len = <$typ as $crate::protocol::traits::Packable>::N; let actual_len = packed_note.len(); - assert( - actual_len == expected_len, - f"Expected packed note of length {expected_len} but got {actual_len} for note type id {note_type_id}" - ); - - let note = $unpack(aztec::utils::array::subarray(packed_note.storage(), 0)); - - let note_hash = $compute_note_hash(note, owner, storage_slot, randomness); - - // The message discovery process finds settled notes, that is, notes that were created in prior transactions and are therefore already part of the note hash tree. We therefore compute the nullification note hash by treating the note as a settled note with the provided note nonce. - let note_hash_for_nullification = aztec::note::utils::compute_note_hash_for_nullification( - aztec::note::HintedNote{ + if actual_len != expected_len { + aztec::protocol::logging::warn_log_format( + "[aztec-nr] Packed note length mismatch for note type id {2}: expected {0} fields, got {1}. Skipping note.", + [expected_len as Field, actual_len as Field, note_type_id], + ); + Option::none() + } else { + let note = $unpack(aztec::utils::array::subarray(packed_note.storage(), 0)); + + let note_hash = $compute_note_hash(note, owner, storage_slot, randomness); + + // The message discovery process finds settled notes, that is, notes that were created in + // prior transactions and are therefore already part of the note hash tree. We therefore + // compute the nullification note hash by treating the note as a settled note with the + // provided note nonce. + let note_hash_for_nullification = + aztec::note::utils::compute_note_hash_for_nullification( + aztec::note::HintedNote { + note, + contract_address, + owner, + randomness, + storage_slot, + metadata: + aztec::note::note_metadata::SettledNoteMetadata::new( + note_nonce, + ) + .into(), + }, + ); + + let inner_nullifier = $compute_nullifier_unconstrained( note, - contract_address, owner, - randomness, - storage_slot, - metadata: aztec::note::note_metadata::SettledNoteMetadata::new(note_nonce).into() - } - ); - - let inner_nullifier = $compute_nullifier_unconstrained(note, owner, note_hash_for_nullification); - - Option::some( - aztec::messages::discovery::NoteHashAndNullifier { - note_hash, inner_nullifier - } - ) + note_hash_for_nullification, + ); + + Option::some( + aztec::messages::discovery::NoteHashAndNullifier { + note_hash, + inner_nullifier, + }, + ) + } } }, ); diff --git a/noir-projects/aztec-nr/aztec/src/messages/discovery/mod.nr b/noir-projects/aztec-nr/aztec/src/messages/discovery/mod.nr index 312327417e48..bd5abdf3e311 100644 --- a/noir-projects/aztec-nr/aztec/src/messages/discovery/mod.nr +++ b/noir-projects/aztec-nr/aztec/src/messages/discovery/mod.nr @@ -36,25 +36,29 @@ pub struct NoteHashAndNullifier { /// ``` /// |packed_note, owner, storage_slot, note_type_id, contract_address, randomness, note_nonce| { /// if note_type_id == MyNoteType::get_id() { -/// assert(packed_note.len() == MY_NOTE_TYPE_SERIALIZATION_LENGTH); +/// if packed_note.len() != MY_NOTE_TYPE_SERIALIZATION_LENGTH { +/// Option::none() +/// } else { +/// let note = MyNoteType::unpack(aztec::utils::array::subarray(packed_note.storage(), 0)); /// -/// let note = MyNoteType::unpack(aztec::utils::array::subarray(packed_note.storage(), 0)); +/// let note_hash = note.compute_note_hash(owner, storage_slot, randomness); +/// let note_hash_for_nullification = aztec::note::utils::compute_note_hash_for_nullification( +/// HintedNote { +/// note, contract_address, owner, randomness, storage_slot, +/// metadata: SettledNoteMetadata::new(note_nonce).into(), +/// }, +/// ); /// -/// let note_hash = note.compute_note_hash(owner, storage_slot, randomness); -/// let note_hash_for_nullification = aztec::note::utils::compute_note_hash_for_nullification( -/// HintedNote{ note, contract_address, metadata: SettledNoteMetadata::new(note_nonce).into() }, -/// storage_slot -/// ); +/// let inner_nullifier = note.compute_nullifier_unconstrained(owner, note_hash_for_nullification); /// -/// let inner_nullifier = note.compute_nullifier_unconstrained(owner, note_hash_for_nullification); -/// -/// Option::some( -/// aztec::messages::discovery::NoteHashAndNullifier { -/// note_hash, inner_nullifier -/// } -/// ) +/// Option::some( +/// aztec::messages::discovery::NoteHashAndNullifier { +/// note_hash, inner_nullifier +/// } +/// ) +/// } /// } else if note_type_id == MyOtherNoteType::get_id() { -/// ... // Similar to above but calling MyOtherNoteType::unpack_content +/// ... // Similar to above but calling MyOtherNoteType::unpack /// } else { /// Option::none() // Unknown note type ID /// }; diff --git a/noir-projects/noir-contracts/Nargo.toml b/noir-projects/noir-contracts/Nargo.toml index 7b7c76bf8bad..cb61b6c33977 100644 --- a/noir-projects/noir-contracts/Nargo.toml +++ b/noir-projects/noir-contracts/Nargo.toml @@ -48,6 +48,7 @@ members = [ "contracts/test/import_test_contract", "contracts/test/invalid_account_contract", "contracts/test/no_constructor_contract", + "contracts/test/note_hash_and_nullifier/note_hash_and_nullifier_contract", "contracts/test/note_getter_contract", "contracts/test/offchain_effect_contract", "contracts/test/only_self_contract", diff --git a/noir-projects/noir-contracts/contracts/test/note_hash_and_nullifier/note_hash_and_nullifier_contract/Nargo.toml b/noir-projects/noir-contracts/contracts/test/note_hash_and_nullifier/note_hash_and_nullifier_contract/Nargo.toml new file mode 100644 index 000000000000..3f96bf14515a --- /dev/null +++ b/noir-projects/noir-contracts/contracts/test/note_hash_and_nullifier/note_hash_and_nullifier_contract/Nargo.toml @@ -0,0 +1,8 @@ +[package] +name = "note_hash_and_nullifier_contract" +authors = [""] +compiler_version = ">=0.25.0" +type = "contract" + +[dependencies] +aztec = { path = "../../../../../aztec-nr/aztec" } diff --git a/noir-projects/noir-contracts/contracts/test/note_hash_and_nullifier/note_hash_and_nullifier_contract/src/main.nr b/noir-projects/noir-contracts/contracts/test/note_hash_and_nullifier/note_hash_and_nullifier_contract/src/main.nr new file mode 100644 index 000000000000..f077b11497ae --- /dev/null +++ b/noir-projects/noir-contracts/contracts/test/note_hash_and_nullifier/note_hash_and_nullifier_contract/src/main.nr @@ -0,0 +1,37 @@ +pub mod test_note; +mod test; + +use aztec::macros::aztec; + +/// A minimal contract used to test the macro-generated `_compute_note_hash_and_nullifier` function. +#[aztec] +pub contract NoteHashAndNullifier { + use aztec::{ + messages::{ + discovery::NoteHashAndNullifier as NoteHashAndNullifierResult, + logs::note::MAX_NOTE_PACKED_LEN, + }, + protocol::address::AztecAddress, + }; + + #[contract_library_method] + pub unconstrained fn test_compute_note_hash_and_nullifier( + packed_note: BoundedVec, + owner: AztecAddress, + storage_slot: Field, + note_type_id: Field, + contract_address: AztecAddress, + randomness: Field, + note_nonce: Field, + ) -> Option { + _compute_note_hash_and_nullifier( + packed_note, + owner, + storage_slot, + note_type_id, + contract_address, + randomness, + note_nonce, + ) + } +} diff --git a/noir-projects/noir-contracts/contracts/test/note_hash_and_nullifier/note_hash_and_nullifier_contract/src/test.nr b/noir-projects/noir-contracts/contracts/test/note_hash_and_nullifier/note_hash_and_nullifier_contract/src/test.nr new file mode 100644 index 000000000000..c20909854926 --- /dev/null +++ b/noir-projects/noir-contracts/contracts/test/note_hash_and_nullifier/note_hash_and_nullifier_contract/src/test.nr @@ -0,0 +1,65 @@ +use crate::{NoteHashAndNullifier, test_note::{TEST_NOTE_NULLIFIER, TestNote}}; +use aztec::note::note_interface::{NoteHash, NoteType}; +use aztec::protocol::address::AztecAddress; + +#[test] +unconstrained fn returns_none_for_bad_note_length() { + // TestNote has Packable N=1, but we provide 2 fields + let packed_note = BoundedVec::from_array([42, 99]); + + let result = NoteHashAndNullifier::test_compute_note_hash_and_nullifier( + packed_note, + AztecAddress::zero(), + 0, + TestNote::get_id(), + AztecAddress::zero(), + 0, + 0, + ); + + assert(result.is_none()); +} + +#[test] +unconstrained fn returns_correct_note_hash_and_nullifier() { + // TestNote has Packable N=1 + let packed_note = BoundedVec::from_array([42]); + + let owner = AztecAddress::zero(); + let storage_slot = 0; + let randomness = 0; + + let result = NoteHashAndNullifier::test_compute_note_hash_and_nullifier( + packed_note, + owner, + storage_slot, + TestNote::get_id(), + AztecAddress::zero(), + randomness, + 1, + ); + + let note_hash_and_nullifier = result.unwrap(); + let note = TestNote { value: 42 }; + let expected_note_hash = note.compute_note_hash(owner, storage_slot, randomness); + assert_eq(note_hash_and_nullifier.note_hash, expected_note_hash); + + assert_eq(note_hash_and_nullifier.inner_nullifier.unwrap(), TEST_NOTE_NULLIFIER); +} + +#[test] +unconstrained fn returns_none_for_empty_packed_note() { + let packed_note = BoundedVec::new(); + + let result = NoteHashAndNullifier::test_compute_note_hash_and_nullifier( + packed_note, + AztecAddress::zero(), + 0, + TestNote::get_id(), + AztecAddress::zero(), + 0, + 0, + ); + + assert(result.is_none()); +} diff --git a/noir-projects/noir-contracts/contracts/test/note_hash_and_nullifier/note_hash_and_nullifier_contract/src/test_note.nr b/noir-projects/noir-contracts/contracts/test/note_hash_and_nullifier/note_hash_and_nullifier_contract/src/test_note.nr new file mode 100644 index 000000000000..5ca3704ceab7 --- /dev/null +++ b/noir-projects/noir-contracts/contracts/test/note_hash_and_nullifier/note_hash_and_nullifier_contract/src/test_note.nr @@ -0,0 +1,48 @@ +use aztec::{ + context::PrivateContext, + macros::notes::custom_note, + note::note_interface::NoteHash, + protocol::{ + address::AztecAddress, constants::DOM_SEP__NOTE_HASH, hash::poseidon2_hash_with_separator, + traits::Packable, + }, +}; + +#[derive(Eq, Packable)] +#[custom_note] +pub struct TestNote { + pub value: Field, +} + +pub global TEST_NOTE_NULLIFIER: Field = 2; + +impl NoteHash for TestNote { + fn compute_note_hash( + self, + _owner: AztecAddress, + storage_slot: Field, + randomness: Field, + ) -> Field { + let inputs = self.pack().concat([storage_slot, randomness]); + poseidon2_hash_with_separator(inputs, DOM_SEP__NOTE_HASH) + } + + fn compute_nullifier( + _self: Self, + _context: &mut PrivateContext, + _owner: AztecAddress, + _note_hash_for_nullification: Field, + ) -> Field { + // Not used in any meaningful way + 0 + } + + unconstrained fn compute_nullifier_unconstrained( + _self: Self, + _owner: AztecAddress, + _note_hash_for_nullification: Field, + ) -> Option { + // Returns a hardcoded value so we can verify that `_compute_note_hash_and_nullifier` propagates it correctly. + Option::some(TEST_NOTE_NULLIFIER) + } +} From 9f4cd6a97864a16d93e778826266b4bc63a3fb6b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Bene=C5=A1?= Date: Wed, 11 Mar 2026 16:20:30 +0700 Subject: [PATCH 03/17] fix: not reusing tags of partially reverted txs (#20817) --- .../execution_tagging_index_cache.ts | 27 +- .../oracle/private_execution.ts | 4 +- .../oracle/private_execution_oracle.ts | 8 +- yarn-project/pxe/src/pxe.ts | 12 +- yarn-project/pxe/src/storage/metadata.ts | 2 +- .../sender_tagging_store.test.ts | 332 ++++++++++++------ .../tagging_store/sender_tagging_store.ts | 317 ++++++++++------- yarn-project/pxe/src/tagging/index.ts | 2 +- .../sync_sender_tagging_indexes.test.ts | 68 +++- .../sync_sender_tagging_indexes.ts | 20 +- .../get_status_change_of_pending.test.ts | 42 ++- .../utils/get_status_change_of_pending.ts | 37 +- ...load_and_store_new_tagging_indexes.test.ts | 164 ++++----- .../load_and_store_new_tagging_indexes.ts | 28 +- yarn-project/stdlib/src/logs/index.ts | 1 + .../stdlib/src/logs/tagging_index_range.ts | 24 ++ .../stdlib/src/tx/private_execution_result.ts | 10 +- 17 files changed, 709 insertions(+), 389 deletions(-) create mode 100644 yarn-project/stdlib/src/logs/tagging_index_range.ts diff --git a/yarn-project/pxe/src/contract_function_simulator/execution_tagging_index_cache.ts b/yarn-project/pxe/src/contract_function_simulator/execution_tagging_index_cache.ts index 37ffc83016d9..27612bf8ceaf 100644 --- a/yarn-project/pxe/src/contract_function_simulator/execution_tagging_index_cache.ts +++ b/yarn-project/pxe/src/contract_function_simulator/execution_tagging_index_cache.ts @@ -1,32 +1,37 @@ -import { ExtendedDirectionalAppTaggingSecret, type PreTag } from '@aztec/stdlib/logs'; +import { ExtendedDirectionalAppTaggingSecret, type TaggingIndexRange } from '@aztec/stdlib/logs'; /** - * A map that stores the tagging index for a given extended directional app tagging secret. + * A map that stores the tagging index range for a given extended directional app tagging secret. * Note: The directional app tagging secret is unique for a (sender, recipient, contract) tuple while the direction * of sender -> recipient matters. */ export class ExecutionTaggingIndexCache { - private taggingIndexMap: Map = new Map(); + private taggingIndexMap: Map = new Map(); public getLastUsedIndex(secret: ExtendedDirectionalAppTaggingSecret): number | undefined { - return this.taggingIndexMap.get(secret.toString()); + return this.taggingIndexMap.get(secret.toString())?.highestIndex; } public setLastUsedIndex(secret: ExtendedDirectionalAppTaggingSecret, index: number) { const currentValue = this.taggingIndexMap.get(secret.toString()); - if (currentValue !== undefined && currentValue !== index - 1) { - throw new Error(`Invalid tagging index update. Current value: ${currentValue}, new value: ${index}`); + if (currentValue !== undefined && currentValue.highestIndex !== index - 1) { + throw new Error(`Invalid tagging index update. Current value: ${currentValue.highestIndex}, new value: ${index}`); + } + if (currentValue !== undefined) { + currentValue.highestIndex = index; + } else { + this.taggingIndexMap.set(secret.toString(), { lowestIndex: index, highestIndex: index }); } - this.taggingIndexMap.set(secret.toString(), index); } /** - * Returns the pre-tags that were used in this execution (and that need to be stored in the db). + * Returns the tagging index ranges that were used in this execution (and that need to be stored in the db). */ - public getUsedPreTags(): PreTag[] { - return Array.from(this.taggingIndexMap.entries()).map(([secret, index]) => ({ + public getUsedTaggingIndexRanges(): TaggingIndexRange[] { + return Array.from(this.taggingIndexMap.entries()).map(([secret, { lowestIndex, highestIndex }]) => ({ extendedSecret: ExtendedDirectionalAppTaggingSecret.fromString(secret), - index, + lowestIndex, + highestIndex, })); } } diff --git a/yarn-project/pxe/src/contract_function_simulator/oracle/private_execution.ts b/yarn-project/pxe/src/contract_function_simulator/oracle/private_execution.ts index e25c99930f3f..e9eeabca6d20 100644 --- a/yarn-project/pxe/src/contract_function_simulator/oracle/private_execution.ts +++ b/yarn-project/pxe/src/contract_function_simulator/oracle/private_execution.ts @@ -81,7 +81,7 @@ export async function executePrivateFunction( const newNotes = privateExecutionOracle.getNewNotes(); const noteHashNullifierCounterMap = privateExecutionOracle.getNoteHashNullifierCounterMap(); const offchainEffects = privateExecutionOracle.getOffchainEffects(); - const preTags = privateExecutionOracle.getUsedPreTags(); + const taggingIndexRanges = privateExecutionOracle.getUsedTaggingIndexRanges(); const nestedExecutionResults = privateExecutionOracle.getNestedExecutionResults(); let timerSubtractionList = nestedExecutionResults; @@ -104,7 +104,7 @@ export async function executePrivateFunction( noteHashNullifierCounterMap, rawReturnValues, offchainEffects, - preTags, + taggingIndexRanges, nestedExecutionResults, contractClassLogs, { diff --git a/yarn-project/pxe/src/contract_function_simulator/oracle/private_execution_oracle.ts b/yarn-project/pxe/src/contract_function_simulator/oracle/private_execution_oracle.ts index a1b2ada7881e..348dbc7ab593 100644 --- a/yarn-project/pxe/src/contract_function_simulator/oracle/private_execution_oracle.ts +++ b/yarn-project/pxe/src/contract_function_simulator/oracle/private_execution_oracle.ts @@ -14,7 +14,7 @@ import { import { AztecAddress } from '@aztec/stdlib/aztec-address'; import { siloNullifier } from '@aztec/stdlib/hash'; import { PrivateContextInputs } from '@aztec/stdlib/kernel'; -import { type ContractClassLog, ExtendedDirectionalAppTaggingSecret, type PreTag } from '@aztec/stdlib/logs'; +import { type ContractClassLog, ExtendedDirectionalAppTaggingSecret, type TaggingIndexRange } from '@aztec/stdlib/logs'; import { Tag } from '@aztec/stdlib/logs'; import { Note, type NoteStatus } from '@aztec/stdlib/note'; import { @@ -166,10 +166,10 @@ export class PrivateExecutionOracle extends UtilityExecutionOracle implements IP } /** - * Returns the pre-tags that were used in this execution (and that need to be stored in the db). + * Returns the tagging index ranges that were used in this execution (and that need to be stored in the db). */ - public getUsedPreTags(): PreTag[] { - return this.taggingIndexCache.getUsedPreTags(); + public getUsedTaggingIndexRanges(): TaggingIndexRange[] { + return this.taggingIndexCache.getUsedTaggingIndexRanges(); } /** diff --git a/yarn-project/pxe/src/pxe.ts b/yarn-project/pxe/src/pxe.ts index 9b7e5cc3ed98..5ff941da05db 100644 --- a/yarn-project/pxe/src/pxe.ts +++ b/yarn-project/pxe/src/pxe.ts @@ -764,17 +764,17 @@ export class PXE { // transaction before this one is included in a block from this PXE, and that transaction contains a log with // a tag derived from the same secret, we would reuse the tag and the transactions would be linked. Hence // storing the tags here prevents linkage of txs sent from the same PXE. - const preTagsUsedInTheTx = privateExecutionResult.entrypoint.preTags; - if (preTagsUsedInTheTx.length > 0) { + const taggingIndexRangesUsedInTheTx = privateExecutionResult.entrypoint.taggingIndexRanges; + if (taggingIndexRangesUsedInTheTx.length > 0) { // TODO(benesjan): The following is an expensive operation. Figure out a way to avoid it. const txHash = (await txProvingResult.toTx()).txHash; - await this.senderTaggingStore.storePendingIndexes(preTagsUsedInTheTx, txHash, jobId); - this.log.debug(`Stored used pre-tags as sender for the tx`, { - preTagsUsedInTheTx, + await this.senderTaggingStore.storePendingIndexes(taggingIndexRangesUsedInTheTx, txHash, jobId); + this.log.debug(`Stored used tagging index ranges as sender for the tx`, { + taggingIndexRangesUsedInTheTx, }); } else { - this.log.debug(`No pre-tags used in the tx`); + this.log.debug(`No tagging index ranges used in the tx`); } return txProvingResult; diff --git a/yarn-project/pxe/src/storage/metadata.ts b/yarn-project/pxe/src/storage/metadata.ts index cb1dee391377..826f90735b91 100644 --- a/yarn-project/pxe/src/storage/metadata.ts +++ b/yarn-project/pxe/src/storage/metadata.ts @@ -1 +1 @@ -export const PXE_DATA_SCHEMA_VERSION = 3; +export const PXE_DATA_SCHEMA_VERSION = 4; diff --git a/yarn-project/pxe/src/storage/tagging_store/sender_tagging_store.test.ts b/yarn-project/pxe/src/storage/tagging_store/sender_tagging_store.test.ts index 986f1daef6fc..b2800582f02d 100644 --- a/yarn-project/pxe/src/storage/tagging_store/sender_tagging_store.test.ts +++ b/yarn-project/pxe/src/storage/tagging_store/sender_tagging_store.test.ts @@ -1,11 +1,19 @@ +import { Fr } from '@aztec/foundation/curves/bn254'; import { openTmpStore } from '@aztec/kv-store/lmdb-v2'; -import type { ExtendedDirectionalAppTaggingSecret, PreTag } from '@aztec/stdlib/logs'; +import { RevertCode } from '@aztec/stdlib/avm'; +import type { ExtendedDirectionalAppTaggingSecret, TaggingIndexRange } from '@aztec/stdlib/logs'; +import { PrivateLog, SiloedTag } from '@aztec/stdlib/logs'; import { randomExtendedDirectionalAppTaggingSecret } from '@aztec/stdlib/testing'; -import { TxHash } from '@aztec/stdlib/tx'; +import { TxEffect, TxHash } from '@aztec/stdlib/tx'; import { UNFINALIZED_TAGGING_INDEXES_WINDOW_LEN } from '../../tagging/constants.js'; import { SenderTaggingStore } from './sender_tagging_store.js'; +/** Helper to create a single-index range (lowestIndex === highestIndex). */ +function range(secret: ExtendedDirectionalAppTaggingSecret, lowest: number, highest?: number): TaggingIndexRange { + return { extendedSecret: secret, lowestIndex: lowest, highestIndex: highest ?? lowest }; +} + describe('SenderTaggingStore', () => { let taggingStore: SenderTaggingStore; let secret1: ExtendedDirectionalAppTaggingSecret; @@ -18,25 +26,20 @@ describe('SenderTaggingStore', () => { }); describe('storePendingIndexes', () => { - it('stores a single pending index', async () => { + it('stores a single pending index range', async () => { const txHash = TxHash.random(); - const preTag: PreTag = { extendedSecret: secret1, index: 5 }; - await taggingStore.storePendingIndexes([preTag], txHash, 'test'); + await taggingStore.storePendingIndexes([range(secret1, 5)], txHash, 'test'); const txHashes = await taggingStore.getTxHashesOfPendingIndexes(secret1, 0, 10, 'test'); expect(txHashes).toHaveLength(1); expect(txHashes[0]).toEqual(txHash); }); - it('stores multiple pending indexes for different secrets', async () => { + it('stores multiple pending index ranges for different secrets', async () => { const txHash = TxHash.random(); - const preTags: PreTag[] = [ - { extendedSecret: secret1, index: 3 }, - { extendedSecret: secret2, index: 7 }, - ]; - await taggingStore.storePendingIndexes(preTags, txHash, 'test'); + await taggingStore.storePendingIndexes([range(secret1, 3), range(secret2, 7)], txHash, 'test'); const txHashes1 = await taggingStore.getTxHashesOfPendingIndexes(secret1, 0, 10, 'test'); expect(txHashes1).toHaveLength(1); @@ -47,12 +50,12 @@ describe('SenderTaggingStore', () => { expect(txHashes2[0]).toEqual(txHash); }); - it('stores multiple pending indexes for the same secret from different txs', async () => { + it('stores multiple pending index ranges for the same secret from different txs', async () => { const txHash1 = TxHash.random(); const txHash2 = TxHash.random(); - await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 3 }], txHash1, 'test'); - await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 7 }], txHash2, 'test'); + await taggingStore.storePendingIndexes([range(secret1, 3)], txHash1, 'test'); + await taggingStore.storePendingIndexes([range(secret1, 7)], txHash2, 'test'); const txHashes = await taggingStore.getTxHashesOfPendingIndexes(secret1, 0, 10, 'test'); expect(txHashes).toHaveLength(2); @@ -60,68 +63,71 @@ describe('SenderTaggingStore', () => { expect(txHashes).toContainEqual(txHash2); }); - it('ignores duplicate preTag + txHash combination', async () => { + it('ignores duplicate range + txHash combination', async () => { const txHash = TxHash.random(); - const preTag: PreTag = { extendedSecret: secret1, index: 5 }; - await taggingStore.storePendingIndexes([preTag], txHash, 'test'); - await taggingStore.storePendingIndexes([preTag], txHash, 'test'); + await taggingStore.storePendingIndexes([range(secret1, 5)], txHash, 'test'); + await taggingStore.storePendingIndexes([range(secret1, 5)], txHash, 'test'); const txHashes = await taggingStore.getTxHashesOfPendingIndexes(secret1, 0, 10, 'test'); expect(txHashes).toHaveLength(1); expect(txHashes[0]).toEqual(txHash); }); - it('throws when storing duplicate secrets in the same call', async () => { + it('stores a range spanning multiple indexes', async () => { const txHash = TxHash.random(); - const preTags: PreTag[] = [ - { extendedSecret: secret1, index: 3 }, - { extendedSecret: secret1, index: 7 }, - ]; - await expect(taggingStore.storePendingIndexes(preTags, txHash, 'test')).rejects.toThrow( - 'Duplicate secrets found when storing pending indexes', - ); + await taggingStore.storePendingIndexes([range(secret1, 3, 7)], txHash, 'test'); + + // By design the txs are filtered based on the highestIndex (7) in getTxHashesOfPendingIndexes so we shouldn't + // receive the tx only in the second query. + const txHashesNotContainingHighest = await taggingStore.getTxHashesOfPendingIndexes(secret1, 3, 4, 'test'); + expect(txHashesNotContainingHighest).toHaveLength(0); + + const txHashesContainingHighest = await taggingStore.getTxHashesOfPendingIndexes(secret1, 7, 8, 'test'); + expect(txHashesContainingHighest).toHaveLength(1); + expect(txHashesContainingHighest[0]).toEqual(txHash); + + expect(await taggingStore.getLastUsedIndex(secret1, 'test')).toBe(7); }); - it('throws when storing a different index for an existing secret + txHash pair', async () => { + it('throws when storing a different range for an existing secret + txHash pair', async () => { const txHash = TxHash.random(); - // First store an index - await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 5 }], txHash, 'test'); + await taggingStore.storePendingIndexes([range(secret1, 5)], txHash, 'test'); - // Try to store a different index for the same secret + txHash pair - await expect( - taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 7 }], txHash, 'test'), - ).rejects.toThrow(/Cannot store index 7.*a different index 5 already exists/); + // Storing a different range for the same secret + txHash should throw + await expect(taggingStore.storePendingIndexes([range(secret1, 7)], txHash, 'test')).rejects.toThrow( + /Conflicting range/, + ); }); - it('throws when storing a pending index lower than the last finalized index', async () => { + it('throws when storing a pending index range lower than the last finalized index', async () => { const txHash1 = TxHash.random(); const txHash2 = TxHash.random(); // First store and finalize an index - await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 10 }], txHash1, 'test'); + await taggingStore.storePendingIndexes([range(secret1, 10)], txHash1, 'test'); await taggingStore.finalizePendingIndexes([txHash1], 'test'); // Try to store a pending index lower than the finalized index - await expect( - taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 5 }], txHash2, 'test'), - ).rejects.toThrow(/Cannot store pending index 5.*lower than or equal to the last finalized index 10/); + await expect(taggingStore.storePendingIndexes([range(secret1, 5)], txHash2, 'test')).rejects.toThrow( + /lowestIndex is lower than or equal to the last finalized index 10/, + ); }); - it('throws when storing a pending index equal to the last finalized index', async () => { + it('throws when storing a pending index range equal to the last finalized index', async () => { const txHash1 = TxHash.random(); const txHash2 = TxHash.random(); // First store and finalize an index - await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 10 }], txHash1, 'test'); + await taggingStore.storePendingIndexes([range(secret1, 10)], txHash1, 'test'); await taggingStore.finalizePendingIndexes([txHash1], 'test'); // Try to store a pending index equal to the finalized index - await expect( - taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 10 }], txHash2, 'test'), - ).rejects.toThrow(/Cannot store pending index 10.*lower than or equal to the last finalized index 10/); + await expect(taggingStore.storePendingIndexes([range(secret1, 10)], txHash2, 'test')).rejects.toThrow( + /lowestIndex is lower than or equal to the last finalized index 10/, + ); }); it('allows storing a pending index higher than the last finalized index', async () => { @@ -129,13 +135,11 @@ describe('SenderTaggingStore', () => { const txHash2 = TxHash.random(); // First store and finalize an index - await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 10 }], txHash1, 'test'); + await taggingStore.storePendingIndexes([range(secret1, 10)], txHash1, 'test'); await taggingStore.finalizePendingIndexes([txHash1], 'test'); // Store a pending index higher than the finalized index - should succeed - await expect( - taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 15 }], txHash2, 'test'), - ).resolves.not.toThrow(); + await expect(taggingStore.storePendingIndexes([range(secret1, 15)], txHash2, 'test')).resolves.not.toThrow(); const txHashes = await taggingStore.getTxHashesOfPendingIndexes(secret1, 0, 20, 'test'); expect(txHashes).toHaveLength(1); @@ -150,12 +154,12 @@ describe('SenderTaggingStore', () => { const indexBeyondWindow = finalizedIndex + UNFINALIZED_TAGGING_INDEXES_WINDOW_LEN + 1; // First store and finalize an index - await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: finalizedIndex }], txHash1, 'test'); + await taggingStore.storePendingIndexes([range(secret1, finalizedIndex)], txHash1, 'test'); await taggingStore.finalizePendingIndexes([txHash1], 'test'); // Try to store an index beyond the window await expect( - taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: indexBeyondWindow }], txHash2, 'test'), + taggingStore.storePendingIndexes([range(secret1, indexBeyondWindow)], txHash2, 'test'), ).rejects.toThrow( `Highest used index ${indexBeyondWindow} is further than window length from the highest finalized index ${finalizedIndex}`, ); @@ -168,12 +172,12 @@ describe('SenderTaggingStore', () => { const indexAtBoundary = finalizedIndex + UNFINALIZED_TAGGING_INDEXES_WINDOW_LEN; // First store and finalize an index - await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: finalizedIndex }], txHash1, 'test'); + await taggingStore.storePendingIndexes([range(secret1, finalizedIndex)], txHash1, 'test'); await taggingStore.finalizePendingIndexes([txHash1], 'test'); // Store an index at the boundary, but check is >, so it should succeed await expect( - taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: indexAtBoundary }], txHash2, 'test'), + taggingStore.storePendingIndexes([range(secret1, indexAtBoundary)], txHash2, 'test'), ).resolves.not.toThrow(); const txHashes = await taggingStore.getTxHashesOfPendingIndexes(secret1, 0, indexAtBoundary + 5, 'test'); @@ -194,9 +198,9 @@ describe('SenderTaggingStore', () => { const txHash2 = TxHash.random(); const txHash3 = TxHash.random(); - await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 3 }], txHash1, 'test'); - await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 5 }], txHash2, 'test'); - await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 8 }], txHash3, 'test'); + await taggingStore.storePendingIndexes([range(secret1, 3)], txHash1, 'test'); + await taggingStore.storePendingIndexes([range(secret1, 5)], txHash2, 'test'); + await taggingStore.storePendingIndexes([range(secret1, 8)], txHash3, 'test'); const txHashes = await taggingStore.getTxHashesOfPendingIndexes(secret1, 4, 9, 'test'); expect(txHashes).toHaveLength(2); @@ -209,8 +213,8 @@ describe('SenderTaggingStore', () => { const txHash1 = TxHash.random(); const txHash2 = TxHash.random(); - await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 5 }], txHash1, 'test'); - await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 10 }], txHash2, 'test'); + await taggingStore.storePendingIndexes([range(secret1, 5)], txHash1, 'test'); + await taggingStore.storePendingIndexes([range(secret1, 10)], txHash2, 'test'); const txHashes = await taggingStore.getTxHashesOfPendingIndexes(secret1, 5, 10, 'test'); expect(txHashes).toHaveLength(1); @@ -223,16 +227,16 @@ describe('SenderTaggingStore', () => { const txHash3 = TxHash.random(); const txHash4 = TxHash.random(); - await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 3 }], txHash1, 'test'); - await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 5 }], txHash2, 'test'); + await taggingStore.storePendingIndexes([range(secret1, 3)], txHash1, 'test'); + await taggingStore.storePendingIndexes([range(secret1, 5)], txHash2, 'test'); // We store different secret with txHash1 to check we correctly don't return it in the result - await taggingStore.storePendingIndexes([{ extendedSecret: secret2, index: 7 }], txHash1, 'test'); + await taggingStore.storePendingIndexes([range(secret2, 7)], txHash1, 'test'); // Store "parallel" index for secret1 with a different tx (can happen when sending logs from multiple PXEs) - await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 7 }], txHash3, 'test'); - await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 7 }], txHash4, 'test'); + await taggingStore.storePendingIndexes([range(secret1, 7)], txHash3, 'test'); + await taggingStore.storePendingIndexes([range(secret1, 7)], txHash4, 'test'); const txHashes = await taggingStore.getTxHashesOfPendingIndexes(secret1, 0, 10, 'test'); - // Should have 3 unique tx hashes for secret1 + // Should have 4 unique tx hashes for secret1 expect(txHashes).toEqual(expect.arrayContaining([txHash1, txHash2, txHash3, txHash4])); }); }); @@ -245,7 +249,7 @@ describe('SenderTaggingStore', () => { it('returns the last finalized index after finalizePendingIndexes', async () => { const txHash = TxHash.random(); - await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 5 }], txHash, 'test'); + await taggingStore.storePendingIndexes([range(secret1, 5)], txHash, 'test'); await taggingStore.finalizePendingIndexes([txHash], 'test'); const lastFinalized = await taggingStore.getLastFinalizedIndex(secret1, 'test'); @@ -261,7 +265,7 @@ describe('SenderTaggingStore', () => { it('returns the last finalized index when no pending indexes exist', async () => { const txHash = TxHash.random(); - await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 5 }], txHash, 'test'); + await taggingStore.storePendingIndexes([range(secret1, 5)], txHash, 'test'); await taggingStore.finalizePendingIndexes([txHash], 'test'); const lastUsed = await taggingStore.getLastUsedIndex(secret1, 'test'); @@ -273,11 +277,11 @@ describe('SenderTaggingStore', () => { const txHash2 = TxHash.random(); // First, finalize an index - await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 3 }], txHash1, 'test'); + await taggingStore.storePendingIndexes([range(secret1, 3)], txHash1, 'test'); await taggingStore.finalizePendingIndexes([txHash1], 'test'); // Then add a higher pending index - await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 7 }], txHash2, 'test'); + await taggingStore.storePendingIndexes([range(secret1, 7)], txHash2, 'test'); const lastUsed = await taggingStore.getLastUsedIndex(secret1, 'test'); expect(lastUsed).toBe(7); @@ -288,9 +292,9 @@ describe('SenderTaggingStore', () => { const txHash2 = TxHash.random(); const txHash3 = TxHash.random(); - await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 3 }], txHash1, 'test'); - await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 7 }], txHash2, 'test'); - await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 5 }], txHash3, 'test'); + await taggingStore.storePendingIndexes([range(secret1, 3)], txHash1, 'test'); + await taggingStore.storePendingIndexes([range(secret1, 7)], txHash2, 'test'); + await taggingStore.storePendingIndexes([range(secret1, 5)], txHash3, 'test'); const lastUsed = await taggingStore.getLastUsedIndex(secret1, 'test'); expect(lastUsed).toBe(7); @@ -302,9 +306,9 @@ describe('SenderTaggingStore', () => { const txHash1 = TxHash.random(); const txHash2 = TxHash.random(); - await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 3 }], txHash1, 'test'); - await taggingStore.storePendingIndexes([{ extendedSecret: secret2, index: 5 }], txHash1, 'test'); - await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 7 }], txHash2, 'test'); + await taggingStore.storePendingIndexes([range(secret1, 3)], txHash1, 'test'); + await taggingStore.storePendingIndexes([range(secret2, 5)], txHash1, 'test'); + await taggingStore.storePendingIndexes([range(secret1, 7)], txHash2, 'test'); await taggingStore.dropPendingIndexes([txHash1], 'test'); @@ -322,7 +326,7 @@ describe('SenderTaggingStore', () => { describe('finalizePendingIndexes', () => { it('moves pending index to finalized for a given tx hash', async () => { const txHash = TxHash.random(); - await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 5 }], txHash, 'test'); + await taggingStore.storePendingIndexes([range(secret1, 5)], txHash, 'test'); await taggingStore.finalizePendingIndexes([txHash], 'test'); @@ -338,10 +342,10 @@ describe('SenderTaggingStore', () => { const txHash1 = TxHash.random(); const txHash2 = TxHash.random(); - await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 3 }], txHash1, 'test'); + await taggingStore.storePendingIndexes([range(secret1, 3)], txHash1, 'test'); await taggingStore.finalizePendingIndexes([txHash1], 'test'); - await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 7 }], txHash2, 'test'); + await taggingStore.storePendingIndexes([range(secret1, 7)], txHash2, 'test'); await taggingStore.finalizePendingIndexes([txHash2], 'test'); const lastFinalized = await taggingStore.getLastFinalizedIndex(secret1, 'test'); @@ -353,8 +357,8 @@ describe('SenderTaggingStore', () => { const txHash2 = TxHash.random(); // Store both pending indexes first - await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 7 }], txHash1, 'test'); - await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 3 }], txHash2, 'test'); + await taggingStore.storePendingIndexes([range(secret1, 7)], txHash1, 'test'); + await taggingStore.storePendingIndexes([range(secret1, 3)], txHash2, 'test'); // Finalize the higher index first await taggingStore.finalizePendingIndexes([txHash1], 'test'); @@ -366,14 +370,14 @@ describe('SenderTaggingStore', () => { expect(lastFinalized).toBe(7); // Should remain at 7 }); - it('prunes pending indexes with lower or equal index than finalized', async () => { + it('prunes pending indexes with lower or equal highestIndex than finalized', async () => { const txHash1 = TxHash.random(); const txHash2 = TxHash.random(); const txHash3 = TxHash.random(); - await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 3 }], txHash1, 'test'); - await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 5 }], txHash2, 'test'); - await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 7 }], txHash3, 'test'); + await taggingStore.storePendingIndexes([range(secret1, 3)], txHash1, 'test'); + await taggingStore.storePendingIndexes([range(secret1, 5)], txHash2, 'test'); + await taggingStore.storePendingIndexes([range(secret1, 7)], txHash3, 'test'); // Finalize txHash2 (index 5) await taggingStore.finalizePendingIndexes([txHash2], 'test'); @@ -387,14 +391,7 @@ describe('SenderTaggingStore', () => { it('handles multiple secrets in the same tx', async () => { const txHash = TxHash.random(); - await taggingStore.storePendingIndexes( - [ - { extendedSecret: secret1, index: 3 }, - { extendedSecret: secret2, index: 7 }, - ], - txHash, - 'test', - ); + await taggingStore.storePendingIndexes([range(secret1, 3), range(secret2, 7)], txHash, 'test'); await taggingStore.finalizePendingIndexes([txHash], 'test'); @@ -405,9 +402,19 @@ describe('SenderTaggingStore', () => { expect(lastFinalized2).toBe(7); }); + it('finalizes the highestIndex of a range', async () => { + const txHash = TxHash.random(); + await taggingStore.storePendingIndexes([range(secret1, 3, 7)], txHash, 'test'); + + await taggingStore.finalizePendingIndexes([txHash], 'test'); + + const lastFinalized = await taggingStore.getLastFinalizedIndex(secret1, 'test'); + expect(lastFinalized).toBe(7); + }); + it('does nothing when tx hash does not exist', async () => { const txHash = TxHash.random(); - await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 3 }], txHash, 'test'); + await taggingStore.storePendingIndexes([range(secret1, 3)], txHash, 'test'); await taggingStore.finalizePendingIndexes([TxHash.random()], 'test'); @@ -427,7 +434,7 @@ describe('SenderTaggingStore', () => { const txHash2 = TxHash.random(); // Step 1: Add pending index - await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 3 }], txHash1, 'test'); + await taggingStore.storePendingIndexes([range(secret1, 3)], txHash1, 'test'); expect(await taggingStore.getLastUsedIndex(secret1, 'test')).toBe(3); expect(await taggingStore.getLastFinalizedIndex(secret1, 'test')).toBeUndefined(); @@ -437,7 +444,7 @@ describe('SenderTaggingStore', () => { expect(await taggingStore.getLastFinalizedIndex(secret1, 'test')).toBe(3); // Step 3: Add a new higher pending index - await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 7 }], txHash2, 'test'); + await taggingStore.storePendingIndexes([range(secret1, 7)], txHash2, 'test'); expect(await taggingStore.getLastUsedIndex(secret1, 'test')).toBe(7); expect(await taggingStore.getLastFinalizedIndex(secret1, 'test')).toBe(3); @@ -451,8 +458,8 @@ describe('SenderTaggingStore', () => { const txHash1 = TxHash.random(); const txHash2 = TxHash.random(); - await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 3 }], txHash1, 'test'); - await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 5 }], txHash2, 'test'); + await taggingStore.storePendingIndexes([range(secret1, 3)], txHash1, 'test'); + await taggingStore.storePendingIndexes([range(secret1, 5)], txHash2, 'test'); expect(await taggingStore.getLastUsedIndex(secret1, 'test')).toBe(5); @@ -468,14 +475,14 @@ describe('SenderTaggingStore', () => { const txHash3 = TxHash.random(); // Secret1: pending -> finalized - await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 3 }], txHash1, 'test'); + await taggingStore.storePendingIndexes([range(secret1, 3)], txHash1, 'test'); await taggingStore.finalizePendingIndexes([txHash1], 'test'); // Secret2: pending (not finalized) - await taggingStore.storePendingIndexes([{ extendedSecret: secret2, index: 5 }], txHash2, 'test'); + await taggingStore.storePendingIndexes([range(secret2, 5)], txHash2, 'test'); // Secret1: new pending - await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 7 }], txHash3, 'test'); + await taggingStore.storePendingIndexes([range(secret1, 7)], txHash3, 'test'); expect(await taggingStore.getLastFinalizedIndex(secret1, 'test')).toBe(3); expect(await taggingStore.getLastUsedIndex(secret1, 'test')).toBe(7); @@ -484,18 +491,135 @@ describe('SenderTaggingStore', () => { }); }); + describe('finalizePendingIndexesOfAPartiallyRevertedTx', () => { + function makeTxEffect(txHash: TxHash, siloedTags: SiloedTag[]): TxEffect { + return new TxEffect( + RevertCode.APP_LOGIC_REVERTED, + txHash, + Fr.ZERO, + [Fr.random()], // noteHashes (at least 1 nullifier required below, not here) + [Fr.random()], // nullifiers (at least 1 required) + [], // l2ToL1Msgs + [], // publicDataWrites + siloedTags.map(tag => PrivateLog.random(tag.value)), // privateLogs with surviving tags + [], // publicLogs + [], // contractClassLogs + ); + } + + it('finalizes only the indexes whose tags appear in TxEffect', async () => { + const txHash = TxHash.random(); + + // Store a range [3, 5] for secret1 in the same tx + await taggingStore.storePendingIndexes([range(secret1, 3, 5)], txHash, 'test'); + + // Compute the siloed tag for index 3 (the one that survives) + const survivingTag = await SiloedTag.compute({ extendedSecret: secret1, index: 3 }); + const txEffect = makeTxEffect(txHash, [survivingTag]); + + await taggingStore.finalizePendingIndexesOfAPartiallyRevertedTx(txEffect, 'test'); + + // Index 3 should be finalized (it was onchain) + expect(await taggingStore.getLastFinalizedIndex(secret1, 'test')).toBe(3); + // All pending indexes for this tx should be removed + const txHashes = await taggingStore.getTxHashesOfPendingIndexes(secret1, 0, 10, 'test'); + expect(txHashes).toHaveLength(0); + }); + + it('drops all indexes when no tags survive onchain', async () => { + const txHash = TxHash.random(); + + await taggingStore.storePendingIndexes([range(secret1, 3, 5)], txHash, 'test'); + + // TxEffect with no matching private logs (empty) + const txEffect = makeTxEffect(txHash, []); + + await taggingStore.finalizePendingIndexesOfAPartiallyRevertedTx(txEffect, 'test'); + + // No finalized index should be set + expect(await taggingStore.getLastFinalizedIndex(secret1, 'test')).toBeUndefined(); + // All pending indexes for this tx should be removed + const txHashes = await taggingStore.getTxHashesOfPendingIndexes(secret1, 0, 10, 'test'); + expect(txHashes).toHaveLength(0); + }); + + it('handles multiple secrets affected by the same partially reverted tx', async () => { + const txHash = TxHash.random(); + + // Store pending index ranges for both secrets in the same tx + await taggingStore.storePendingIndexes([range(secret1, 3, 5), range(secret2, 7)], txHash, 'test'); + + // Only index 3 for secret1 survives onchain; other indexes for secret1 and secret2 are dropped + const survivingTag = await SiloedTag.compute({ extendedSecret: secret1, index: 3 }); + const txEffect = makeTxEffect(txHash, [survivingTag]); + + await taggingStore.finalizePendingIndexesOfAPartiallyRevertedTx(txEffect, 'test'); + + // secret1: index 3 should be finalized + expect(await taggingStore.getLastFinalizedIndex(secret1, 'test')).toBe(3); + expect(await taggingStore.getTxHashesOfPendingIndexes(secret1, 0, 10, 'test')).toHaveLength(0); + + // secret2: no finalized index, all pending removed + expect(await taggingStore.getLastFinalizedIndex(secret2, 'test')).toBeUndefined(); + expect(await taggingStore.getTxHashesOfPendingIndexes(secret2, 0, 10, 'test')).toHaveLength(0); + }); + + it('preserves pending indexes from other txs', async () => { + const revertedTxHash = TxHash.random(); + const otherTxHash = TxHash.random(); + + // Store pending indexes: one from reverted tx, one from another tx + await taggingStore.storePendingIndexes([range(secret1, 3)], revertedTxHash, 'test'); + await taggingStore.storePendingIndexes([range(secret1, 7)], otherTxHash, 'test'); + + // TxEffect with no surviving tags for the reverted tx + const txEffect = makeTxEffect(revertedTxHash, []); + + await taggingStore.finalizePendingIndexesOfAPartiallyRevertedTx(txEffect, 'test'); + + // No finalized index (nothing survived from the reverted tx) + expect(await taggingStore.getLastFinalizedIndex(secret1, 'test')).toBeUndefined(); + // The other tx's pending index should still be there + const txHashes = await taggingStore.getTxHashesOfPendingIndexes(secret1, 0, 10, 'test'); + expect(txHashes).toHaveLength(1); + expect(txHashes[0]).toEqual(otherTxHash); + }); + + it('correctly updates finalized index when there is an existing finalized index', async () => { + const txHash1 = TxHash.random(); + const revertedTxHash = TxHash.random(); + + // Store and finalize index 2 + await taggingStore.storePendingIndexes([range(secret1, 2)], txHash1, 'test'); + await taggingStore.finalizePendingIndexes([txHash1], 'test'); + + // Store a pending range [4, 6] for a partially reverted tx + await taggingStore.storePendingIndexes([range(secret1, 4, 6)], revertedTxHash, 'test'); + + // Only index 4 survives + const survivingTag = await SiloedTag.compute({ extendedSecret: secret1, index: 4 }); + const txEffect = makeTxEffect(revertedTxHash, [survivingTag]); + + await taggingStore.finalizePendingIndexesOfAPartiallyRevertedTx(txEffect, 'test'); + + // Finalized index should be updated to 4 (higher than previous 2) + expect(await taggingStore.getLastFinalizedIndex(secret1, 'test')).toBe(4); + expect(await taggingStore.getTxHashesOfPendingIndexes(secret1, 0, 10, 'test')).toHaveLength(0); + }); + }); + describe('staged writes', () => { it('writes of uncommitted jobs are not visible outside the job that makes them', async () => { const committedTxHash = TxHash.random(); { const commitJobId: string = 'commit-job'; - await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 3 }], committedTxHash, commitJobId); + await taggingStore.storePendingIndexes([range(secret1, 3)], committedTxHash, commitJobId); await taggingStore.commit(commitJobId); } const stagedTxHash = TxHash.random(); const stagingJobId: string = 'staging-job'; - await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 5 }], stagedTxHash, stagingJobId); + await taggingStore.storePendingIndexes([range(secret1, 5)], stagedTxHash, stagingJobId); // For a job without any staged data we should only get committed data const txHashesWithoutJobId = await taggingStore.getTxHashesOfPendingIndexes(secret1, 0, 10, 'no-data-job'); @@ -513,7 +637,7 @@ describe('SenderTaggingStore', () => { const txHash1 = TxHash.random(); { const commitJobId: string = 'commit-job'; - await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 3 }], txHash1, commitJobId); + await taggingStore.storePendingIndexes([range(secret1, 3)], txHash1, commitJobId); await taggingStore.finalizePendingIndexes([txHash1], commitJobId); await taggingStore.commit(commitJobId); } @@ -522,7 +646,7 @@ describe('SenderTaggingStore', () => { const stagingJobId: string = 'staging-job'; // Stage a higher finalized index (not committed) - await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 7 }], txHash2, stagingJobId); + await taggingStore.storePendingIndexes([range(secret1, 7)], txHash2, stagingJobId); await taggingStore.finalizePendingIndexes([txHash2], stagingJobId); // With a different jobId, should get the committed finalized index @@ -537,8 +661,8 @@ describe('SenderTaggingStore', () => { const txHash1 = TxHash.random(); const txHash2 = TxHash.random(); const commitJobId: string = 'commit-job'; - await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 2 }], txHash1, commitJobId); - await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 3 }], txHash2, commitJobId); + await taggingStore.storePendingIndexes([range(secret1, 2)], txHash1, commitJobId); + await taggingStore.storePendingIndexes([range(secret1, 3)], txHash2, commitJobId); await taggingStore.finalizePendingIndexes([txHash1], commitJobId); await taggingStore.commit(commitJobId); } @@ -546,7 +670,7 @@ describe('SenderTaggingStore', () => { const stagingJobId: string = 'staging-job'; { const txHash3 = TxHash.random(); - await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 7 }], txHash3, stagingJobId); + await taggingStore.storePendingIndexes([range(secret1, 7)], txHash3, stagingJobId); await taggingStore.finalizePendingIndexes([txHash3], stagingJobId); await taggingStore.discardStaged(stagingJobId); } diff --git a/yarn-project/pxe/src/storage/tagging_store/sender_tagging_store.ts b/yarn-project/pxe/src/storage/tagging_store/sender_tagging_store.ts index 1b15bbbb207a..05f79be89b88 100644 --- a/yarn-project/pxe/src/storage/tagging_store/sender_tagging_store.ts +++ b/yarn-project/pxe/src/storage/tagging_store/sender_tagging_store.ts @@ -1,10 +1,13 @@ import type { AztecAsyncKVStore, AztecAsyncMap } from '@aztec/kv-store'; -import type { ExtendedDirectionalAppTaggingSecret, PreTag } from '@aztec/stdlib/logs'; -import { TxHash } from '@aztec/stdlib/tx'; +import { ExtendedDirectionalAppTaggingSecret, SiloedTag, type TaggingIndexRange } from '@aztec/stdlib/logs'; +import { TxEffect, TxHash } from '@aztec/stdlib/tx'; import type { StagedStore } from '../../job_coordinator/job_coordinator.js'; import { UNFINALIZED_TAGGING_INDEXES_WINDOW_LEN } from '../../tagging/constants.js'; +/** Internal representation of a pending index range entry. */ +type PendingIndexesEntry = { lowestIndex: number; highestIndex: number; txHash: string }; + /** * Data provider of tagging data used when syncing the sender tagging indexes. The recipient counterpart of this class * is called RecipientTaggingStore. We have the data stores separate for sender and recipient because @@ -15,20 +18,19 @@ export class SenderTaggingStore implements StagedStore { #store: AztecAsyncKVStore; - // Stores the pending indexes for each directional app tagging secret. Pending here means that the tx that contained - // the private logs with tags corresponding to these indexes has not been finalized yet. - // - // We don't store just the highest index because if their transaction is dropped we'd then need the information about - // the lower pending indexes. For each secret-tx pair however we only store the largest index used in that tx, since - // the smaller ones are irrelevant due to tx atomicity. + // Stores the pending index ranges for each directional app tagging secret. Pending here means that the tx that + // contained the private logs with tags corresponding to these indexes has not been finalized yet. // - // TODO(#17615): This assumes no logs are used in the non-revertible phase. + // We store the full range (lowestIndex, highestIndex) for each secret-tx pair because transactions can partially + // revert, in which case only some logs (from the non-revertible phase) survive onchain. By storing the range, + // we can expand it and check each individual siloed tag against the TxEffect to determine which indexes made it + // onchain. // - // directional app tagging secret => { pending index, txHash }[] - #pendingIndexes: AztecAsyncMap; + // directional app tagging secret => { lowestIndex, highestIndex, txHash }[] + #pendingIndexes: AztecAsyncMap; - // jobId => directional app tagging secret => { pending index, txHash }[] - #pendingIndexesForJob: Map>; + // jobId => directional app tagging secret => { lowestIndex, highestIndex, txHash }[] + #pendingIndexesForJob: Map>; // Stores the last (highest) finalized index for each directional app tagging secret. We care only about the last // index because unlike the pending indexes, it will never happen that a finalized index would be removed and hence @@ -50,7 +52,7 @@ export class SenderTaggingStore implements StagedStore { this.#lastFinalizedIndexesForJob = new Map(); } - #getPendingIndexesForJob(jobId: string): Map { + #getPendingIndexesForJob(jobId: string): Map { let pendingIndexesForJob = this.#pendingIndexesForJob.get(jobId); if (!pendingIndexesForJob) { pendingIndexesForJob = new Map(); @@ -68,7 +70,7 @@ export class SenderTaggingStore implements StagedStore { return jobStagedLastFinalizedIndexes; } - async #readPendingIndexes(jobId: string, secret: string): Promise<{ index: number; txHash: string }[]> { + async #readPendingIndexes(jobId: string, secret: string): Promise { // Always issue DB read to keep IndexedDB transaction alive (they auto-commit when a new micro-task starts and there // are no pending read requests). The staged value still takes precedence if it exists. const dbValue = await this.#pendingIndexes.getAsync(secret); @@ -76,7 +78,7 @@ export class SenderTaggingStore implements StagedStore { return staged !== undefined ? staged : (dbValue ?? []); } - #writePendingIndexes(jobId: string, secret: string, pendingIndexes: { index: number; txHash: string }[]) { + #writePendingIndexes(jobId: string, secret: string, pendingIndexes: PendingIndexesEntry[]) { this.#getPendingIndexesForJob(jobId).set(secret, pendingIndexes); } @@ -126,57 +128,37 @@ export class SenderTaggingStore implements StagedStore { } /** - * Stores pending indexes. - * @remarks Ignores the index if the same preTag + txHash combination already exists in the db with the same index. - * This is expected to happen because whenever we start sync we start from the last finalized index and we can have - * pending indexes already stored from previous syncs. - * @param preTags - The pre-tags containing the directional app tagging secrets and the indexes that are to be - * stored in the db. - * @param txHash - The tx in which the pretags were used in private logs. + * Stores pending index ranges. + * @remarks If the same (secret, txHash) pair already exists in the db with an equal range, it's a no-op. This is + * expected to happen because whenever we start sync we start from the last finalized index and we can have pending + * ranges already stored from previous syncs. If the ranges differ, it throws an error as that indicates a bug. + * @param ranges - The tagging index ranges containing the directional app tagging secrets and the index ranges that are + * to be stored in the db. + * @param txHash - The tx in which the tagging indexes were used in private logs. * @param jobId - job context for staged writes to this store. See `JobCoordinator` for more details. - * @throws If any two pre-tags contain the same directional app tagging secret. This is enforced because we care - * only about the highest index for a given secret that was used in the tx. Hence this check is a good way to catch - * bugs. - * @throws If the newly stored pending index is further than window length from the highest finalized index for the - * same secret. This is enforced in order to give a guarantee to a recipient that he doesn't need to look further than - * window length ahead of the highest finalized index. - * @throws If a secret + txHash pair already exists in the db with a different index value. It should never happen - * that we would attempt to store a different index for a given secret-txHash pair because we always store just the - * highest index for a given secret-txHash pair. Hence this is a good way to catch bugs. - * @throws If the newly stored pending index is lower than or equal to the last finalized index for the same secret. - * This is enforced because this should never happen if the syncing is done correctly as we look for logs from higher - * indexes than finalized ones. + * @throws If the highestIndex is further than window length from the highest finalized index for the same secret. + * @throws If the lowestIndex is lower than or equal to the last finalized index for the same secret. + * @throws If a different range already exists for the same (secret, txHash) pair. */ - storePendingIndexes(preTags: PreTag[], txHash: TxHash, jobId: string): Promise { - if (preTags.length === 0) { + storePendingIndexes(ranges: TaggingIndexRange[], txHash: TxHash, jobId: string): Promise { + if (ranges.length === 0) { return Promise.resolve(); } - // The secrets in pre-tags should be unique because we always store just the highest index per given secret-txHash - // pair. Below we check that this is the case. - const secretsSet = new Set(preTags.map(preTag => preTag.extendedSecret.toString())); - if (secretsSet.size !== preTags.length) { - return Promise.reject(new Error(`Duplicate secrets found when storing pending indexes`)); - } - const txHashStr = txHash.toString(); return this.#store.transactionAsync(async () => { // Prefetch all data, start reads during iteration to keep IndexedDB transaction alive - const preTagReadPromises = preTags.map(({ extendedSecret, index }) => { - const secretStr = extendedSecret.toString(); - return { - extendedSecret, - secretStr, - index, - pending: this.#readPendingIndexes(jobId, secretStr), - finalized: this.#readLastFinalizedIndex(jobId, secretStr), - }; - }); + const rangeReadPromises = ranges.map(range => ({ + range, + secretStr: range.extendedSecret.toString(), + pending: this.#readPendingIndexes(jobId, range.extendedSecret.toString()), + finalized: this.#readLastFinalizedIndex(jobId, range.extendedSecret.toString()), + })); // Await all reads together - const preTagData = await Promise.all( - preTagReadPromises.map(async item => ({ + const rangeData = await Promise.all( + rangeReadPromises.map(async item => ({ ...item, pendingData: await item.pending, finalizedIndex: await item.finalized, @@ -184,48 +166,51 @@ export class SenderTaggingStore implements StagedStore { ); // Process in memory and validate - for (const { secretStr, index, pendingData, finalizedIndex } of preTagData) { - // First we check that for any secret the highest used index in tx is not further than window length from - // the highest finalized index. - if (index > (finalizedIndex ?? 0) + UNFINALIZED_TAGGING_INDEXES_WINDOW_LEN) { + for (const { range, secretStr, pendingData, finalizedIndex } of rangeData) { + // Check that the highest index is not further than window length from the highest finalized index. + if (range.highestIndex > (finalizedIndex ?? 0) + UNFINALIZED_TAGGING_INDEXES_WINDOW_LEN) { throw new Error( - `Highest used index ${index} is further than window length from the highest finalized index ${finalizedIndex ?? 0}. + `Highest used index ${range.highestIndex} is further than window length from the highest finalized index ${finalizedIndex ?? 0}. Tagging window length ${UNFINALIZED_TAGGING_INDEXES_WINDOW_LEN} is configured too low. Contact the Aztec team to increase it!`, ); } - // Throw if the new pending index is lower than or equal to the last finalized index - if (finalizedIndex !== undefined && index <= finalizedIndex) { + // Throw if the lowest index is lower than or equal to the last finalized index + if (finalizedIndex !== undefined && range.lowestIndex <= finalizedIndex) { throw new Error( - `Cannot store pending index ${index} for secret ${secretStr}: ` + - `it is lower than or equal to the last finalized index ${finalizedIndex}`, + `Cannot store pending index range [${range.lowestIndex}, ${range.highestIndex}] for secret ${secretStr}: ` + + `lowestIndex is lower than or equal to the last finalized index ${finalizedIndex}`, ); } - // Check if this secret + txHash combination already exists - const existingForSecretAndTx = pendingData.find(entry => entry.txHash === txHashStr); + // Check if an entry with the same txHash already exists + const existingEntry = pendingData.find(entry => entry.txHash === txHashStr); - if (existingForSecretAndTx) { - // If it exists with a different index, throw an error - if (existingForSecretAndTx.index !== index) { + if (existingEntry) { + // Assert that the ranges are equal — different ranges for the same (secret, txHash) indicates a bug + if (existingEntry.lowestIndex !== range.lowestIndex || existingEntry.highestIndex !== range.highestIndex) { throw new Error( - `Cannot store index ${index} for secret ${secretStr} and txHash ${txHashStr}: ` + - `a different index ${existingForSecretAndTx.index} already exists for this secret-txHash pair`, + `Conflicting range for secret ${secretStr} and txHash ${txHashStr}: ` + + `existing [${existingEntry.lowestIndex}, ${existingEntry.highestIndex}] vs ` + + `new [${range.lowestIndex}, ${range.highestIndex}]`, ); } - // If it exists with the same index, ignore the update (no-op) + // Exact duplicate — skip } else { - // If it doesn't exist, add it - this.#writePendingIndexes(jobId, secretStr, [...pendingData, { index, txHash: txHashStr }]); + this.#writePendingIndexes(jobId, secretStr, [ + ...pendingData, + { lowestIndex: range.lowestIndex, highestIndex: range.highestIndex, txHash: txHashStr }, + ]); } } }); } /** - * Returns the transaction hashes of all pending transactions that contain indexes within a specified range - * for a given directional app tagging secret. + * Returns the transaction hashes of all pending transactions that contain highest indexes within a specified range + * for a given directional app tagging secret. We check based on the highest indexes only as that is the relevant + * information for the caller of this function. * @param secret - The directional app tagging secret to query pending indexes for. * @param startIndex - The lower bound of the index range (inclusive). * @param endIndex - The upper bound of the index range (exclusive). @@ -241,7 +226,7 @@ export class SenderTaggingStore implements StagedStore { return this.#store.transactionAsync(async () => { const existing = await this.#readPendingIndexes(jobId, secret.toString()); const txHashes = existing - .filter(entry => entry.index >= startIndex && entry.index < endIndex) + .filter(entry => entry.highestIndex >= startIndex && entry.highestIndex < endIndex) .map(entry => entry.txHash); return Array.from(new Set(txHashes)).map(TxHash.fromString); }); @@ -269,16 +254,15 @@ export class SenderTaggingStore implements StagedStore { const pendingPromise = this.#readPendingIndexes(jobId, secretStr); const finalizedPromise = this.#readLastFinalizedIndex(jobId, secretStr); - const [pendingTxScopedIndexes, lastFinalized] = await Promise.all([pendingPromise, finalizedPromise]); - const pendingIndexes = pendingTxScopedIndexes.map(entry => entry.index); + const [pendingEntries, lastFinalized] = await Promise.all([pendingPromise, finalizedPromise]); - if (pendingTxScopedIndexes.length === 0) { + if (pendingEntries.length === 0) { return lastFinalized; } - // As the last used index we return the highest one from the pending indexes. Note that this value will be always - // higher than the last finalized index because we prune lower pending indexes when a tx is finalized. - return Math.max(...pendingIndexes); + // As the last used index we return the highest one from the pending index ranges. Note that this value will be + // always higher than the last finalized index because we prune lower pending indexes when a tx is finalized. + return Math.max(...pendingEntries.map(entry => entry.highestIndex)); }); } @@ -294,7 +278,7 @@ export class SenderTaggingStore implements StagedStore { return this.#store.transactionAsync(async () => { // Prefetch all data, start reads during iteration to keep IndexedDB transaction alive - const secretReadPromises: Map> = new Map(); + const secretReadPromises: Map> = new Map(); for await (const secret of this.#pendingIndexes.keysAsync()) { secretReadPromises.set(secret, this.#readPendingIndexes(jobId, secret)); @@ -330,22 +314,15 @@ export class SenderTaggingStore implements StagedStore { }); } - /** - * Updates pending indexes corresponding to the given transaction hashes to be finalized and prunes any lower pending - * indexes. - */ - finalizePendingIndexes(txHashes: TxHash[], jobId: string): Promise { - if (txHashes.length === 0) { - return Promise.resolve(); - } - - const txHashStrings = new Set(txHashes.map(tx => tx.toString())); - + /** Prefetches all pending and finalized index data for every secret (from both DB and staged writes). */ + #getSecretsWithPendingData( + jobId: string, + ): Promise<{ secret: string; pendingData: PendingIndexesEntry[]; lastFinalized: number | undefined }[]> { return this.#store.transactionAsync(async () => { // Prefetch all data, start reads during iteration to keep IndexedDB transaction alive const secretDataPromises: Map< string, - { pending: Promise<{ index: number; txHash: string }[]>; finalized: Promise } + { pending: Promise; finalized: Promise } > = new Map(); for await (const secret of this.#pendingIndexes.keysAsync()) { @@ -375,55 +352,125 @@ export class SenderTaggingStore implements StagedStore { })), ); - // Process all txHashes for each secret in memory - for (const { secret, pendingData, lastFinalized } of dataResults) { - if (!pendingData || pendingData.length === 0) { + return dataResults.filter(r => r.pendingData.length > 0); + }); + } + + /** + * Updates pending indexes corresponding to the given transaction hashes to be finalized and prunes any lower pending + * indexes. + */ + async finalizePendingIndexes(txHashes: TxHash[], jobId: string): Promise { + if (txHashes.length === 0) { + return; + } + + const txHashStrings = new Set(txHashes.map(tx => tx.toString())); + const secretsWithData = await this.#getSecretsWithPendingData(jobId); + + for (const { secret, pendingData, lastFinalized } of secretsWithData) { + let currentPending = pendingData; + let currentFinalized = lastFinalized; + + // Process all txHashes for this secret + for (const txHashStr of txHashStrings) { + const matchingEntries = currentPending.filter(item => item.txHash === txHashStr); + if (matchingEntries.length === 0) { + // This is expected as a higher index might have already been finalized which would lead to pruning of + // pending entries. continue; } - let currentPending = pendingData; - let currentFinalized = lastFinalized; + if (matchingEntries.length > 1) { + // We should always just store the highest pending index for a given tx hash and secret because the lower + // values are irrelevant. + throw new Error(`Multiple pending entries found for tx hash ${txHashStr} and secret ${secret}`); + } - // Process all txHashes for this secret - for (const txHashStr of txHashStrings) { - const matchingIndexes = currentPending.filter(item => item.txHash === txHashStr).map(item => item.index); - if (matchingIndexes.length === 0) { - continue; - } + const newFinalized = matchingEntries[0].highestIndex; - if (matchingIndexes.length > 1) { - // We should always just store the highest pending index for a given tx hash and secret because the lower - // values are irrelevant. - throw new Error(`Multiple pending indexes found for tx hash ${txHashStr} and secret ${secret}`); - } + if (newFinalized < (currentFinalized ?? 0)) { + // This should never happen because when last finalized index was finalized we should have pruned the lower + // pending indexes. + throw new Error( + `New finalized index ${newFinalized} is smaller than the current last finalized index ${currentFinalized}`, + ); + } - const newFinalized = matchingIndexes[0]; + currentFinalized = newFinalized; - if (newFinalized < (currentFinalized ?? 0)) { - // This should never happen because when last finalized index was finalized we should have pruned the lower - // pending indexes. - throw new Error( - `New finalized index ${newFinalized} is smaller than the current last finalized index ${currentFinalized}`, - ); - } + // When we add pending indexes, we ensure they are higher than the last finalized index. However, because we + // cannot control the order in which transactions are finalized, there may be pending indexes that are now + // obsolete because they are lower than the most recently finalized index. For this reason, we prune these + // outdated pending indexes. + currentPending = currentPending.filter(item => item.highestIndex > currentFinalized!); + } - currentFinalized = newFinalized; + // Write final state if changed + if (currentFinalized !== lastFinalized) { + this.#writeLastFinalizedIndex(jobId, secret, currentFinalized!); + } + if (currentPending !== pendingData) { + this.#writePendingIndexes(jobId, secret, currentPending); + } + } + } - // When we add pending indexes, we ensure they are higher than the last finalized index. However, because we - // cannot control the order in which transactions are finalized, there may be pending indexes that are now - // obsolete because they are lower than the most recently finalized index. For this reason, we prune these - // outdated pending indexes. - currentPending = currentPending.filter(item => item.index > currentFinalized!); - } + /** + * Handles finalization of pending indexes for a transaction whose execution was partially reverted. + * Recomputes the siloed tags for each pending index of the given tx and checks which ones appear in the + * TxEffect's private logs (i.e., which ones made it onchain). Those that survived are finalized; those that + * didn't are dropped. + * @param txEffect - The tx effect of the partially reverted transaction. + * @param jobId - job context for staged writes to this store. See `JobCoordinator` for more details. + */ + async finalizePendingIndexesOfAPartiallyRevertedTx(txEffect: TxEffect, jobId: string): Promise { + const txHashStr = txEffect.txHash.toString(); - // Write final state if changed - if (currentFinalized !== lastFinalized) { - this.#writeLastFinalizedIndex(jobId, secret, currentFinalized!); - } - if (currentPending !== pendingData) { - this.#writePendingIndexes(jobId, secret, currentPending); + // Build a set of all siloed tag values that made it onchain (first field of each private log). + const onChainTags = new Set(txEffect.privateLogs.map(log => log.fields[0].toString())); + + const secretsWithData = await this.#getSecretsWithPendingData(jobId); + + for (const { secret, pendingData, lastFinalized } of secretsWithData) { + const matchingEntries = pendingData.filter(item => item.txHash === txHashStr); + if (matchingEntries.length === 0) { + // This is expected as a higher index might have already been finalized which would lead to pruning of + // pending entries. + continue; + } + + if (matchingEntries.length > 1) { + // We should always just store the highest pending index for a given tx hash and secret because the lower + // values are irrelevant. + throw new Error(`Multiple pending entries found for tx hash ${txHashStr} and secret ${secret}`); + } + + const pendingEntry = matchingEntries[0]; + + // Expand each matching entry's range and recompute siloed tags for each index. + const extendedSecret = ExtendedDirectionalAppTaggingSecret.fromString(secret); + let highestSurvivingIndex: number | undefined; + + for (let index = pendingEntry.lowestIndex; index <= pendingEntry.highestIndex; index++) { + const siloedTag = await SiloedTag.compute({ extendedSecret, index }); + if (onChainTags.has(siloedTag.value.toString())) { + highestSurvivingIndex = highestSurvivingIndex !== undefined ? Math.max(highestSurvivingIndex, index) : index; } } - }); + + // Remove all entries for this txHash from pending (both surviving and non-surviving). + let currentPending = pendingData.filter(item => item.txHash !== txHashStr); + + if (highestSurvivingIndex !== undefined) { + const newFinalized = Math.max(lastFinalized ?? 0, highestSurvivingIndex); + this.#writeLastFinalizedIndex(jobId, secret, newFinalized); + + // Prune pending indexes that are now <= the finalized index. + currentPending = currentPending.filter(item => item.highestIndex > newFinalized); + } + + this.#writePendingIndexes(jobId, secret, currentPending); + } } } diff --git a/yarn-project/pxe/src/tagging/index.ts b/yarn-project/pxe/src/tagging/index.ts index ea8c6f80f613..6b812a8f0a47 100644 --- a/yarn-project/pxe/src/tagging/index.ts +++ b/yarn-project/pxe/src/tagging/index.ts @@ -16,4 +16,4 @@ export { getAllPrivateLogsByTags, getAllPublicLogsByTagsFromContract } from './g // Re-export tagging-related types from stdlib export { ExtendedDirectionalAppTaggingSecret, Tag, SiloedTag } from '@aztec/stdlib/logs'; -export { type PreTag } from '@aztec/stdlib/logs'; +export { type PreTag, type TaggingIndexRange } from '@aztec/stdlib/logs'; diff --git a/yarn-project/pxe/src/tagging/sender_sync/sync_sender_tagging_indexes.test.ts b/yarn-project/pxe/src/tagging/sender_sync/sync_sender_tagging_indexes.test.ts index d214b6e50120..dedfacbf5dda 100644 --- a/yarn-project/pxe/src/tagging/sender_sync/sync_sender_tagging_indexes.test.ts +++ b/yarn-project/pxe/src/tagging/sender_sync/sync_sender_tagging_indexes.test.ts @@ -1,10 +1,12 @@ import { BlockNumber } from '@aztec/foundation/branded-types'; import { Fr } from '@aztec/foundation/curves/bn254'; import { openTmpStore } from '@aztec/kv-store/lmdb-v2'; +import { RevertCode } from '@aztec/stdlib/avm'; import { BlockHash } from '@aztec/stdlib/block'; import type { AztecNode } from '@aztec/stdlib/interfaces/client'; +import { PrivateLog } from '@aztec/stdlib/logs'; import { randomExtendedDirectionalAppTaggingSecret, randomTxScopedPrivateL2Log } from '@aztec/stdlib/testing'; -import { TxExecutionResult, TxHash, TxReceipt, TxStatus } from '@aztec/stdlib/tx'; +import { type IndexedTxEffect, TxEffect, TxExecutionResult, TxHash, TxReceipt, TxStatus } from '@aztec/stdlib/tx'; import { type MockProxy, mock } from 'jest-mock-extended'; @@ -275,4 +277,68 @@ describe('syncSenderTaggingIndexes', () => { expect(await taggingStore.getLastFinalizedIndex(secret, 'test')).toBe(pendingAndFinalizedIndex); expect(await taggingStore.getLastUsedIndex(secret, 'test')).toBe(pendingAndFinalizedIndex); }); + + it('handles a partially reverted transaction', async () => { + await setUp(); + + const revertedTxHash = TxHash.random(); + + // Create logs at indexes 4 and 6 for the same (reverted) tx + const tag4 = await computeSiloedTagForIndex(4); + const tag6 = await computeSiloedTagForIndex(6); + + aztecNode.getPrivateLogsByTags.mockImplementation((tags: SiloedTag[]) => { + return Promise.resolve( + tags.map((tag: SiloedTag) => { + if (tag.equals(tag4)) { + return [makeLog(revertedTxHash, tag4.value)]; + } else if (tag.equals(tag6)) { + return [makeLog(revertedTxHash, tag6.value)]; + } + return []; + }), + ); + }); + + // Mock getTxReceipt to return FINALIZED with APP_LOGIC_REVERTED + aztecNode.getTxReceipt.mockResolvedValue( + new TxReceipt( + revertedTxHash, + TxStatus.FINALIZED, + TxExecutionResult.APP_LOGIC_REVERTED, + undefined, + undefined, + undefined, + BlockNumber(14), + ), + ); + + // Mock getTxEffect to return a TxEffect where only the tag at index 4 survived (non-revertible phase) + const txEffect = new TxEffect( + RevertCode.APP_LOGIC_REVERTED, + revertedTxHash, + Fr.ZERO, + [Fr.random()], // noteHashes + [Fr.random()], // nullifiers + [], // l2ToL1Msgs + [], // publicDataWrites + [PrivateLog.random(tag4.value)], // only the tag at index 4 survived + [], // publicLogs + [], // contractClassLogs + ); + + aztecNode.getTxEffect.mockResolvedValue({ + data: txEffect, + l2BlockNumber: BlockNumber(14), + l2BlockHash: MOCK_ANCHOR_BLOCK_HASH, + txIndexInBlock: 0, + } as IndexedTxEffect); + + await syncSenderTaggingIndexes(secret, aztecNode, taggingStore, MOCK_ANCHOR_BLOCK_HASH, 'test'); + + // Index 4 should be finalized (it survived the partial revert) + expect(await taggingStore.getLastFinalizedIndex(secret, 'test')).toBe(4); + // No pending indexes should remain for this secret + expect(await taggingStore.getLastUsedIndex(secret, 'test')).toBe(4); + }); }); diff --git a/yarn-project/pxe/src/tagging/sender_sync/sync_sender_tagging_indexes.ts b/yarn-project/pxe/src/tagging/sender_sync/sync_sender_tagging_indexes.ts index 87d56d6a46e7..516dc00483ef 100644 --- a/yarn-project/pxe/src/tagging/sender_sync/sync_sender_tagging_indexes.ts +++ b/yarn-project/pxe/src/tagging/sender_sync/sync_sender_tagging_indexes.ts @@ -62,11 +62,29 @@ export async function syncSenderTaggingIndexes( break; } - const { txHashesToFinalize, txHashesToDrop } = await getStatusChangeOfPending(pendingTxHashes, aztecNode); + const { txHashesToFinalize, txHashesToDrop, txHashesWithExecutionReverted } = await getStatusChangeOfPending( + pendingTxHashes, + aztecNode, + ); await taggingStore.dropPendingIndexes(txHashesToDrop, jobId); await taggingStore.finalizePendingIndexes(txHashesToFinalize, jobId); + if (txHashesWithExecutionReverted.length > 0) { + const indexedTxEffects = await Promise.all( + txHashesWithExecutionReverted.map(txHash => aztecNode.getTxEffect(txHash)), + ); + for (const indexedTxEffect of indexedTxEffects) { + if (indexedTxEffect === undefined) { + throw new Error( + 'TxEffect not found for execution-reverted tx. This is either a bug or a reorg has occurred.', + ); + } + + await taggingStore.finalizePendingIndexesOfAPartiallyRevertedTx(indexedTxEffect.data, jobId); + } + } + // We check if the finalized index has been updated. newFinalizedIndex = await taggingStore.getLastFinalizedIndex(secret, jobId); if (previousFinalizedIndex !== newFinalizedIndex) { diff --git a/yarn-project/pxe/src/tagging/sender_sync/utils/get_status_change_of_pending.test.ts b/yarn-project/pxe/src/tagging/sender_sync/utils/get_status_change_of_pending.test.ts index 7fd0fc92e3f3..676b491d8910 100644 --- a/yarn-project/pxe/src/tagging/sender_sync/utils/get_status_change_of_pending.test.ts +++ b/yarn-project/pxe/src/tagging/sender_sync/utils/get_status_change_of_pending.test.ts @@ -51,11 +51,41 @@ describe('getStatusChangeOfPending', () => { ), ); } else if (hash.equals(appLogicRevertedTxHash)) { - return Promise.resolve(new TxReceipt(hash, TxStatus.PROPOSED, TxExecutionResult.APP_LOGIC_REVERTED, undefined)); + return Promise.resolve( + new TxReceipt( + hash, + TxStatus.FINALIZED, + TxExecutionResult.APP_LOGIC_REVERTED, + undefined, + undefined, + undefined, + BlockNumber(10), + ), + ); } else if (hash.equals(teardownRevertedTxHash)) { - return Promise.resolve(new TxReceipt(hash, TxStatus.PROPOSED, TxExecutionResult.TEARDOWN_REVERTED, undefined)); + return Promise.resolve( + new TxReceipt( + hash, + TxStatus.FINALIZED, + TxExecutionResult.TEARDOWN_REVERTED, + undefined, + undefined, + undefined, + BlockNumber(10), + ), + ); } else if (hash.equals(bothRevertedTxHash)) { - return Promise.resolve(new TxReceipt(hash, TxStatus.PROPOSED, TxExecutionResult.BOTH_REVERTED, undefined)); + return Promise.resolve( + new TxReceipt( + hash, + TxStatus.FINALIZED, + TxExecutionResult.BOTH_REVERTED, + undefined, + undefined, + undefined, + BlockNumber(10), + ), + ); } else { throw new Error(`Unexpected tx hash: ${hash.toString()}`); } @@ -74,8 +104,8 @@ describe('getStatusChangeOfPending', () => { ); expect(result.txHashesToFinalize).toEqual([finalizedTxHash]); - expect(result.txHashesToDrop).toEqual([ - droppedTxHash, + expect(result.txHashesToDrop).toEqual([droppedTxHash]); + expect(result.txHashesWithExecutionReverted).toEqual([ appLogicRevertedTxHash, teardownRevertedTxHash, bothRevertedTxHash, @@ -101,6 +131,7 @@ describe('getStatusChangeOfPending', () => { expect(result.txHashesToFinalize).toEqual([txHash]); expect(result.txHashesToDrop).toEqual([]); + expect(result.txHashesWithExecutionReverted).toEqual([]); }); it('does not finalize tx that is only proven', async () => { @@ -123,5 +154,6 @@ describe('getStatusChangeOfPending', () => { // Not finalized yet, so stays pending expect(result.txHashesToFinalize).toEqual([]); expect(result.txHashesToDrop).toEqual([]); + expect(result.txHashesWithExecutionReverted).toEqual([]); }); }); diff --git a/yarn-project/pxe/src/tagging/sender_sync/utils/get_status_change_of_pending.ts b/yarn-project/pxe/src/tagging/sender_sync/utils/get_status_change_of_pending.ts index 8400b16237f3..1fc434d10c35 100644 --- a/yarn-project/pxe/src/tagging/sender_sync/utils/get_status_change_of_pending.ts +++ b/yarn-project/pxe/src/tagging/sender_sync/utils/get_status_change_of_pending.ts @@ -2,35 +2,50 @@ import type { AztecNode } from '@aztec/stdlib/interfaces/server'; import { TxHash, TxStatus } from '@aztec/stdlib/tx'; /** - * Based on receipts obtained from `aztecNode` returns which pending transactions changed their status to finalized or - * dropped. + * Based on receipts obtained from `aztecNode` returns which pending transactions changed their status to finalized, + * dropped, or execution-reverted (but mined). */ export async function getStatusChangeOfPending( pending: TxHash[], aztecNode: AztecNode, -): Promise<{ txHashesToFinalize: TxHash[]; txHashesToDrop: TxHash[] }> { +): Promise<{ + txHashesToFinalize: TxHash[]; + txHashesToDrop: TxHash[]; + txHashesWithExecutionReverted: TxHash[]; +}> { // Get receipts for all pending tx hashes. const receipts = await Promise.all(pending.map(pendingTxHash => aztecNode.getTxReceipt(pendingTxHash))); const txHashesToFinalize: TxHash[] = []; const txHashesToDrop: TxHash[] = []; + const txHashesWithExecutionReverted: TxHash[] = []; for (let i = 0; i < receipts.length; i++) { const receipt = receipts[i]; const txHash = pending[i]; - if (receipt.status === TxStatus.FINALIZED && receipt.hasExecutionSucceeded()) { - // Tx has been included in a block and the corresponding block is finalized --> we mark the indexes as - // finalized. - txHashesToFinalize.push(txHash); - } else if (receipt.isDropped() || receipt.hasExecutionReverted()) { - // Tx was dropped or reverted --> we drop the corresponding pending indexes. - // TODO(#17615): Don't drop pending indexes corresponding to non-revertible phases. + if (receipt.status === TxStatus.FINALIZED) { + // Tx has been included in a block and the corresponding block is finalized + if (receipt.hasExecutionSucceeded()) { + // No part of execution reverted - we just finalize all the indexes. + txHashesToFinalize.push(txHash); + } else if (receipt.hasExecutionReverted()) { + // Tx was mined but execution reverted (app logic, teardown, or both). Some logs from the non-revertible + // phase may still be onchain. We check which tags made it onchain and finalize those; drop the rest. + txHashesWithExecutionReverted.push(txHash); + } else { + // Defensive check - this branch should never be triggered + throw new Error( + 'Both hasExecutionSucceeded and hasExecutionReverted on the receipt returned false. This should never happen and it implies a bug. Please open an issue.', + ); + } + } else if (receipt.isDropped()) { + // Tx was dropped from the mempool --> we drop the corresponding pending indexes. txHashesToDrop.push(txHash); } else { // Tx is still pending, not yet finalized, or was mined successfully but not yet finalized --> we don't do anything. } } - return { txHashesToFinalize, txHashesToDrop }; + return { txHashesToFinalize, txHashesToDrop, txHashesWithExecutionReverted }; } diff --git a/yarn-project/pxe/src/tagging/sender_sync/utils/load_and_store_new_tagging_indexes.test.ts b/yarn-project/pxe/src/tagging/sender_sync/utils/load_and_store_new_tagging_indexes.test.ts index 789c67c79f8f..572ef56fb88e 100644 --- a/yarn-project/pxe/src/tagging/sender_sync/utils/load_and_store_new_tagging_indexes.test.ts +++ b/yarn-project/pxe/src/tagging/sender_sync/utils/load_and_store_new_tagging_indexes.test.ts @@ -1,5 +1,4 @@ import type { Fr } from '@aztec/foundation/curves/bn254'; -import { openTmpStore } from '@aztec/kv-store/lmdb-v2'; import { BlockHash } from '@aztec/stdlib/block'; import type { AztecNode } from '@aztec/stdlib/interfaces/server'; import { type ExtendedDirectionalAppTaggingSecret, SiloedTag } from '@aztec/stdlib/logs'; @@ -8,17 +7,15 @@ import { TxHash } from '@aztec/stdlib/tx'; import { type MockProxy, mock } from 'jest-mock-extended'; -import { SenderTaggingStore } from '../../../storage/tagging_store/sender_tagging_store.js'; +import type { SenderTaggingStore } from '../../../storage/tagging_store/sender_tagging_store.js'; import { loadAndStoreNewTaggingIndexes } from './load_and_store_new_tagging_indexes.js'; const MOCK_ANCHOR_BLOCK_HASH = BlockHash.random(); describe('loadAndStoreNewTaggingIndexes', () => { - // Secret to be used on the input of the loadAndStoreNewTaggingIndexes function. let secret: ExtendedDirectionalAppTaggingSecret; - let aztecNode: MockProxy; - let taggingStore: SenderTaggingStore; + let taggingStore: MockProxy; function computeSiloedTagForIndex(index: number) { return SiloedTag.compute({ extendedSecret: secret, index }); @@ -30,30 +27,21 @@ describe('loadAndStoreNewTaggingIndexes', () => { beforeAll(async () => { secret = await randomExtendedDirectionalAppTaggingSecret(); - aztecNode = mock(); }); - // Unlike for secret, app address and aztecNode we need a fresh instance of the tagging data provider for each test. - beforeEach(async () => { - aztecNode.getPrivateLogsByTags.mockReset(); - taggingStore = new SenderTaggingStore(await openTmpStore('test')); + beforeEach(() => { + aztecNode = mock(); + taggingStore = mock(); }); it('no logs found for the given window', async () => { aztecNode.getPrivateLogsByTags.mockImplementation((tags: SiloedTag[]) => { - // No log found for any tag - return Promise.resolve(tags.map((_tag: SiloedTag) => [])); + return Promise.resolve(tags.map(() => [])); }); await loadAndStoreNewTaggingIndexes(secret, 0, 10, aztecNode, taggingStore, MOCK_ANCHOR_BLOCK_HASH, 'test'); - // Verify that no pending indexes were stored - expect(await taggingStore.getLastUsedIndex(secret, 'test')).toBeUndefined(); - expect(await taggingStore.getLastFinalizedIndex(secret, 'test')).toBeUndefined(); - - // Verify the entire window has no pending tx hashes - const txHashesInWindow = await taggingStore.getTxHashesOfPendingIndexes(secret, 0, 10, 'test'); - expect(txHashesInWindow).toHaveLength(0); + expect(taggingStore.storePendingIndexes).not.toHaveBeenCalled(); }); it('single log found at a specific index', async () => { @@ -67,16 +55,15 @@ describe('loadAndStoreNewTaggingIndexes', () => { await loadAndStoreNewTaggingIndexes(secret, 0, 10, aztecNode, taggingStore, MOCK_ANCHOR_BLOCK_HASH, 'test'); - // Verify that the pending index was stored for this txHash - const txHashesInRange = await taggingStore.getTxHashesOfPendingIndexes(secret, index, index + 1, 'test'); - expect(txHashesInRange).toHaveLength(1); - expect(txHashesInRange[0].equals(txHash)).toBe(true); - - // Verify the last used index is correct - expect(await taggingStore.getLastUsedIndex(secret, 'test')).toBe(index); + expect(taggingStore.storePendingIndexes).toHaveBeenCalledTimes(1); + expect(taggingStore.storePendingIndexes).toHaveBeenCalledWith( + [{ extendedSecret: secret, lowestIndex: index, highestIndex: index }], + txHash, + 'test', + ); }); - it('for multiple logs with same txHash stores the highest index', async () => { + it('for multiple logs with same txHash stores full index range', async () => { const txHash = TxHash.random(); const index1 = 3; const index2 = 7; @@ -98,17 +85,12 @@ describe('loadAndStoreNewTaggingIndexes', () => { await loadAndStoreNewTaggingIndexes(secret, 0, 10, aztecNode, taggingStore, MOCK_ANCHOR_BLOCK_HASH, 'test'); - // Verify that only the highest index (7) was stored for this txHash and secret - const txHashesAtIndex2 = await taggingStore.getTxHashesOfPendingIndexes(secret, index2, index2 + 1, 'test'); - expect(txHashesAtIndex2).toHaveLength(1); - expect(txHashesAtIndex2[0].equals(txHash)).toBe(true); - - // Verify the lower index is not stored separately - const txHashesAtIndex1 = await taggingStore.getTxHashesOfPendingIndexes(secret, index1, index1 + 1, 'test'); - expect(txHashesAtIndex1).toHaveLength(0); - - // Verify the last used index is the highest - expect(await taggingStore.getLastUsedIndex(secret, 'test')).toBe(index2); + expect(taggingStore.storePendingIndexes).toHaveBeenCalledTimes(1); + expect(taggingStore.storePendingIndexes).toHaveBeenCalledWith( + [{ extendedSecret: secret, lowestIndex: index1, highestIndex: index2 }], + txHash, + 'test', + ); }); it('multiple logs with different txHashes', async () => { @@ -134,17 +116,17 @@ describe('loadAndStoreNewTaggingIndexes', () => { await loadAndStoreNewTaggingIndexes(secret, 0, 10, aztecNode, taggingStore, MOCK_ANCHOR_BLOCK_HASH, 'test'); - // Verify that both txHashes have their respective indexes stored - const txHashesAtIndex1 = await taggingStore.getTxHashesOfPendingIndexes(secret, index1, index1 + 1, 'test'); - expect(txHashesAtIndex1).toHaveLength(1); - expect(txHashesAtIndex1[0].equals(txHash1)).toBe(true); - - const txHashesAtIndex2 = await taggingStore.getTxHashesOfPendingIndexes(secret, index2, index2 + 1, 'test'); - expect(txHashesAtIndex2).toHaveLength(1); - expect(txHashesAtIndex2[0].equals(txHash2)).toBe(true); - - // Verify the last used index is the highest - expect(await taggingStore.getLastUsedIndex(secret, 'test')).toBe(index2); + expect(taggingStore.storePendingIndexes).toHaveBeenCalledTimes(2); + expect(taggingStore.storePendingIndexes).toHaveBeenCalledWith( + [{ extendedSecret: secret, lowestIndex: index1, highestIndex: index1 }], + txHash1, + 'test', + ); + expect(taggingStore.storePendingIndexes).toHaveBeenCalledWith( + [{ extendedSecret: secret, lowestIndex: index2, highestIndex: index2 }], + txHash2, + 'test', + ); }); // Expected to happen if sending logs from multiple PXEs at a similar time. @@ -162,15 +144,17 @@ describe('loadAndStoreNewTaggingIndexes', () => { await loadAndStoreNewTaggingIndexes(secret, 0, 10, aztecNode, taggingStore, MOCK_ANCHOR_BLOCK_HASH, 'test'); - // Verify that both txHashes have the same index stored - const txHashesAtIndex = await taggingStore.getTxHashesOfPendingIndexes(secret, index, index + 1, 'test'); - expect(txHashesAtIndex).toHaveLength(2); - const txHashStrings = txHashesAtIndex.map(h => h.toString()); - expect(txHashStrings).toContain(txHash1.toString()); - expect(txHashStrings).toContain(txHash2.toString()); - - // Verify the last used index is correct - expect(await taggingStore.getLastUsedIndex(secret, 'test')).toBe(index); + expect(taggingStore.storePendingIndexes).toHaveBeenCalledTimes(2); + expect(taggingStore.storePendingIndexes).toHaveBeenCalledWith( + [{ extendedSecret: secret, lowestIndex: index, highestIndex: index }], + txHash1, + 'test', + ); + expect(taggingStore.storePendingIndexes).toHaveBeenCalledWith( + [{ extendedSecret: secret, lowestIndex: index, highestIndex: index }], + txHash2, + 'test', + ); }); it('complex scenario: multiple txHashes with multiple indexes', async () => { @@ -178,10 +162,11 @@ describe('loadAndStoreNewTaggingIndexes', () => { const txHash2 = TxHash.random(); const txHash3 = TxHash.random(); - // txHash1 has logs at index 1 and 8 (should store 8) - // txHash2 has logs at index 3 and 5 (should store 5) - // txHash3 has a log at index 9 (should store 9) + // txHash1 has logs at index 1, 2 and 8 → range [1, 8] + // txHash2 has logs at index 3 and 5 → range [3, 5] + // txHash3 has a log at index 9 → range [9, 9] const tag1 = await computeSiloedTagForIndex(1); + const tag2 = await computeSiloedTagForIndex(2); const tag3 = await computeSiloedTagForIndex(3); const tag5 = await computeSiloedTagForIndex(5); const tag8 = await computeSiloedTagForIndex(8); @@ -192,6 +177,8 @@ describe('loadAndStoreNewTaggingIndexes', () => { tags.map((t: SiloedTag) => { if (t.equals(tag1)) { return [makeLog(txHash1, tag1.value)]; + } else if (t.equals(tag2)) { + return [makeLog(txHash1, tag1.value)]; } else if (t.equals(tag3)) { return [makeLog(txHash2, tag3.value)]; } else if (t.equals(tag5)) { @@ -208,27 +195,22 @@ describe('loadAndStoreNewTaggingIndexes', () => { await loadAndStoreNewTaggingIndexes(secret, 0, 10, aztecNode, taggingStore, MOCK_ANCHOR_BLOCK_HASH, 'test'); - // Verify txHash1 has highest index 8 (should not be at index 1) - const txHashesAtIndex1 = await taggingStore.getTxHashesOfPendingIndexes(secret, 1, 2, 'test'); - expect(txHashesAtIndex1).toHaveLength(0); - const txHashesAtIndex8 = await taggingStore.getTxHashesOfPendingIndexes(secret, 8, 9, 'test'); - expect(txHashesAtIndex8).toHaveLength(1); - expect(txHashesAtIndex8[0].equals(txHash1)).toBe(true); - - // Verify txHash2 has highest index 5 (should not be at index 3) - const txHashesAtIndex3 = await taggingStore.getTxHashesOfPendingIndexes(secret, 3, 4, 'test'); - expect(txHashesAtIndex3).toHaveLength(0); - const txHashesAtIndex5 = await taggingStore.getTxHashesOfPendingIndexes(secret, 5, 6, 'test'); - expect(txHashesAtIndex5).toHaveLength(1); - expect(txHashesAtIndex5[0].equals(txHash2)).toBe(true); - - // Verify txHash3 has index 9 - const txHashesAtIndex9 = await taggingStore.getTxHashesOfPendingIndexes(secret, 9, 10, 'test'); - expect(txHashesAtIndex9).toHaveLength(1); - expect(txHashesAtIndex9[0].equals(txHash3)).toBe(true); - - // Verify the last used index is the highest - expect(await taggingStore.getLastUsedIndex(secret, 'test')).toBe(9); + expect(taggingStore.storePendingIndexes).toHaveBeenCalledTimes(3); + expect(taggingStore.storePendingIndexes).toHaveBeenCalledWith( + [{ extendedSecret: secret, lowestIndex: 1, highestIndex: 8 }], + txHash1, + 'test', + ); + expect(taggingStore.storePendingIndexes).toHaveBeenCalledWith( + [{ extendedSecret: secret, lowestIndex: 3, highestIndex: 5 }], + txHash2, + 'test', + ); + expect(taggingStore.storePendingIndexes).toHaveBeenCalledWith( + [{ extendedSecret: secret, lowestIndex: 9, highestIndex: 9 }], + txHash3, + 'test', + ); }); it('start is inclusive and end is exclusive', async () => { @@ -256,16 +238,12 @@ describe('loadAndStoreNewTaggingIndexes', () => { await loadAndStoreNewTaggingIndexes(secret, start, end, aztecNode, taggingStore, MOCK_ANCHOR_BLOCK_HASH, 'test'); - // Verify that the log at start (inclusive) was processed - const txHashesAtStart = await taggingStore.getTxHashesOfPendingIndexes(secret, start, start + 1, 'test'); - expect(txHashesAtStart).toHaveLength(1); - expect(txHashesAtStart[0].equals(txHashAtStart)).toBe(true); - - // Verify that the log at end (exclusive) was NOT processed - const txHashesAtEnd = await taggingStore.getTxHashesOfPendingIndexes(secret, end, end + 1, 'test'); - expect(txHashesAtEnd).toHaveLength(0); - - // Verify the last used index is the start index (since end was not processed) - expect(await taggingStore.getLastUsedIndex(secret, 'test')).toBe(start); + // Only the log at start should be stored; end is exclusive + expect(taggingStore.storePendingIndexes).toHaveBeenCalledTimes(1); + expect(taggingStore.storePendingIndexes).toHaveBeenCalledWith( + [{ extendedSecret: secret, lowestIndex: start, highestIndex: start }], + txHashAtStart, + 'test', + ); }); }); diff --git a/yarn-project/pxe/src/tagging/sender_sync/utils/load_and_store_new_tagging_indexes.ts b/yarn-project/pxe/src/tagging/sender_sync/utils/load_and_store_new_tagging_indexes.ts index 5558c1097cba..3979f5007189 100644 --- a/yarn-project/pxe/src/tagging/sender_sync/utils/load_and_store_new_tagging_indexes.ts +++ b/yarn-project/pxe/src/tagging/sender_sync/utils/load_and_store_new_tagging_indexes.ts @@ -16,6 +16,7 @@ import { getAllPrivateLogsByTags } from '../../get_all_logs_by_tags.js'; * @param end - The ending index (exclusive) of the window to process. * @param aztecNode - The Aztec node instance to query for logs. * @param taggingStore - The data provider to store pending indexes. + * @param anchorBlockHash - Hash of a block to use as reference block when querying node. * @param jobId - Job identifier, used to keep writes in-memory until they can be persisted in a data integrity * preserving way. */ @@ -34,12 +35,13 @@ export async function loadAndStoreNewTaggingIndexes( ); const txsForTags = await getTxsContainingTags(siloedTagsForWindow, aztecNode, anchorBlockHash); - const highestIndexMap = getTxHighestIndexMap(txsForTags, start, siloedTagsForWindow.length); + const txIndexesMap = getTxIndexesMap(txsForTags, start, siloedTagsForWindow.length); - // Now we iterate over the map, reconstruct the preTags and tx hash and store them in the db. - for (const [txHashStr, highestIndex] of highestIndexMap.entries()) { + // Now we iterate over the map, construct the tagging index ranges and store them in the db. + for (const [txHashStr, indexes] of txIndexesMap.entries()) { const txHash = TxHash.fromString(txHashStr); - await taggingStore.storePendingIndexes([{ extendedSecret, index: highestIndex }], txHash, jobId); + const ranges = [{ extendedSecret, lowestIndex: Math.min(...indexes), highestIndex: Math.max(...indexes) }]; + await taggingStore.storePendingIndexes(ranges, txHash, jobId); } } @@ -56,20 +58,28 @@ async function getTxsContainingTags( return allLogs.map(logs => logs.map(log => log.txHash)); } -// Returns a map of txHash to the highest index for that txHash. -function getTxHighestIndexMap(txHashesForTags: TxHash[][], start: number, count: number): Map { +// Returns a map of txHash to all indexes for that txHash. +function getTxIndexesMap(txHashesForTags: TxHash[][], start: number, count: number): Map { if (txHashesForTags.length !== count) { throw new Error(`Number of tx hashes arrays does not match number of tags. ${txHashesForTags.length} !== ${count}`); } - const highestIndexMap = new Map(); + const indexesMap = new Map(); + // Iterate over indexes for (let i = 0; i < txHashesForTags.length; i++) { const taggingIndex = start + i; const txHashesForTag = txHashesForTags[i]; + // iterate over tx hashes that used that index (tag) for (const txHash of txHashesForTag) { const key = txHash.toString(); - highestIndexMap.set(key, Math.max(highestIndexMap.get(key) ?? 0, taggingIndex)); + const existing = indexesMap.get(key); + // Add the index to the tx's indexes + if (existing) { + existing.push(taggingIndex); + } else { + indexesMap.set(key, [taggingIndex]); + } } } - return highestIndexMap; + return indexesMap; } diff --git a/yarn-project/stdlib/src/logs/index.ts b/yarn-project/stdlib/src/logs/index.ts index 2e25c40da7c3..540d1fe99698 100644 --- a/yarn-project/stdlib/src/logs/index.ts +++ b/yarn-project/stdlib/src/logs/index.ts @@ -1,5 +1,6 @@ export * from './extended_directional_app_tagging_secret.js'; export * from './pre_tag.js'; +export * from './tagging_index_range.js'; export * from './contract_class_log.js'; export * from './public_log.js'; export * from './private_log.js'; diff --git a/yarn-project/stdlib/src/logs/tagging_index_range.ts b/yarn-project/stdlib/src/logs/tagging_index_range.ts new file mode 100644 index 000000000000..6392ac8fd26a --- /dev/null +++ b/yarn-project/stdlib/src/logs/tagging_index_range.ts @@ -0,0 +1,24 @@ +import { schemas } from '@aztec/foundation/schemas'; + +import { z } from 'zod'; + +import { + type ExtendedDirectionalAppTaggingSecret, + ExtendedDirectionalAppTaggingSecretSchema, +} from './extended_directional_app_tagging_secret.js'; + +/** + * Represents a range of tagging indexes for a given extended directional app tagging secret. Used to track the lowest + * and highest indexes used in a transaction for a given (sender, recipient, app/contract) tuple. + */ +export type TaggingIndexRange = { + extendedSecret: ExtendedDirectionalAppTaggingSecret; + lowestIndex: number; + highestIndex: number; +}; + +export const TaggingIndexRangeSchema = z.object({ + extendedSecret: ExtendedDirectionalAppTaggingSecretSchema, + lowestIndex: schemas.Integer, + highestIndex: schemas.Integer, +}); diff --git a/yarn-project/stdlib/src/tx/private_execution_result.ts b/yarn-project/stdlib/src/tx/private_execution_result.ts index 4ddd06352e08..4432901b8901 100644 --- a/yarn-project/stdlib/src/tx/private_execution_result.ts +++ b/yarn-project/stdlib/src/tx/private_execution_result.ts @@ -11,7 +11,7 @@ import { PrivateCircuitPublicInputs } from '../kernel/private_circuit_public_inp import type { IsEmpty } from '../kernel/utils/interfaces.js'; import { sortByCounter } from '../kernel/utils/order_and_comparison.js'; import { ContractClassLog, ContractClassLogFields } from '../logs/contract_class_log.js'; -import { type PreTag, PreTagSchema } from '../logs/pre_tag.js'; +import { type TaggingIndexRange, TaggingIndexRangeSchema } from '../logs/tagging_index_range.js'; import { Note } from '../note/note.js'; import { type ZodFor, mapSchema, schemas } from '../schemas/index.js'; import { HashedValues } from './hashed_values.js'; @@ -137,8 +137,8 @@ export class PrivateCallExecutionResult { public returnValues: Fr[], /** The offchain effects emitted during execution of this function call via the `emit_offchain_effect` oracle. */ public offchainEffects: { data: Fr[] }[], - /** The pre-tags used in this tx to compute tags for private logs */ - public preTags: PreTag[], + /** The tagging index ranges used in this tx to compute tags for private logs */ + public taggingIndexRanges: TaggingIndexRange[], /** The nested executions. */ public nestedExecutionResults: PrivateCallExecutionResult[], /** @@ -161,7 +161,7 @@ export class PrivateCallExecutionResult { noteHashNullifierCounterMap: mapSchema(z.coerce.number(), z.number()), returnValues: z.array(schemas.Fr), offchainEffects: z.array(z.object({ data: z.array(schemas.Fr) })), - preTags: z.array(PreTagSchema), + taggingIndexRanges: z.array(TaggingIndexRangeSchema), nestedExecutionResults: z.array(z.lazy(() => PrivateCallExecutionResult.schema)), contractClassLogs: z.array(CountedContractClassLog.schema), }) @@ -178,7 +178,7 @@ export class PrivateCallExecutionResult { fields.noteHashNullifierCounterMap, fields.returnValues, fields.offchainEffects, - fields.preTags, + fields.taggingIndexRanges, fields.nestedExecutionResults, fields.contractClassLogs, ); From d176c9e00de4d08d7f98599c6d6872e8c4f03911 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Bene=C5=A1?= Date: Mon, 16 Mar 2026 18:08:34 +0700 Subject: [PATCH 04/17] chore: revert accidental backport of #20817 (#21583) --- .../execution_tagging_index_cache.ts | 27 +- .../oracle/private_execution.ts | 4 +- .../oracle/private_execution_oracle.ts | 8 +- yarn-project/pxe/src/pxe.ts | 12 +- yarn-project/pxe/src/storage/metadata.ts | 2 +- .../sender_tagging_store.test.ts | 332 ++++++------------ .../tagging_store/sender_tagging_store.ts | 317 +++++++---------- yarn-project/pxe/src/tagging/index.ts | 2 +- .../sync_sender_tagging_indexes.test.ts | 68 +--- .../sync_sender_tagging_indexes.ts | 20 +- .../get_status_change_of_pending.test.ts | 42 +-- .../utils/get_status_change_of_pending.ts | 37 +- ...load_and_store_new_tagging_indexes.test.ts | 164 +++++---- .../load_and_store_new_tagging_indexes.ts | 28 +- yarn-project/stdlib/src/logs/index.ts | 1 - .../stdlib/src/logs/tagging_index_range.ts | 24 -- .../stdlib/src/tx/private_execution_result.ts | 10 +- 17 files changed, 389 insertions(+), 709 deletions(-) delete mode 100644 yarn-project/stdlib/src/logs/tagging_index_range.ts diff --git a/yarn-project/pxe/src/contract_function_simulator/execution_tagging_index_cache.ts b/yarn-project/pxe/src/contract_function_simulator/execution_tagging_index_cache.ts index 27612bf8ceaf..37ffc83016d9 100644 --- a/yarn-project/pxe/src/contract_function_simulator/execution_tagging_index_cache.ts +++ b/yarn-project/pxe/src/contract_function_simulator/execution_tagging_index_cache.ts @@ -1,37 +1,32 @@ -import { ExtendedDirectionalAppTaggingSecret, type TaggingIndexRange } from '@aztec/stdlib/logs'; +import { ExtendedDirectionalAppTaggingSecret, type PreTag } from '@aztec/stdlib/logs'; /** - * A map that stores the tagging index range for a given extended directional app tagging secret. + * A map that stores the tagging index for a given extended directional app tagging secret. * Note: The directional app tagging secret is unique for a (sender, recipient, contract) tuple while the direction * of sender -> recipient matters. */ export class ExecutionTaggingIndexCache { - private taggingIndexMap: Map = new Map(); + private taggingIndexMap: Map = new Map(); public getLastUsedIndex(secret: ExtendedDirectionalAppTaggingSecret): number | undefined { - return this.taggingIndexMap.get(secret.toString())?.highestIndex; + return this.taggingIndexMap.get(secret.toString()); } public setLastUsedIndex(secret: ExtendedDirectionalAppTaggingSecret, index: number) { const currentValue = this.taggingIndexMap.get(secret.toString()); - if (currentValue !== undefined && currentValue.highestIndex !== index - 1) { - throw new Error(`Invalid tagging index update. Current value: ${currentValue.highestIndex}, new value: ${index}`); - } - if (currentValue !== undefined) { - currentValue.highestIndex = index; - } else { - this.taggingIndexMap.set(secret.toString(), { lowestIndex: index, highestIndex: index }); + if (currentValue !== undefined && currentValue !== index - 1) { + throw new Error(`Invalid tagging index update. Current value: ${currentValue}, new value: ${index}`); } + this.taggingIndexMap.set(secret.toString(), index); } /** - * Returns the tagging index ranges that were used in this execution (and that need to be stored in the db). + * Returns the pre-tags that were used in this execution (and that need to be stored in the db). */ - public getUsedTaggingIndexRanges(): TaggingIndexRange[] { - return Array.from(this.taggingIndexMap.entries()).map(([secret, { lowestIndex, highestIndex }]) => ({ + public getUsedPreTags(): PreTag[] { + return Array.from(this.taggingIndexMap.entries()).map(([secret, index]) => ({ extendedSecret: ExtendedDirectionalAppTaggingSecret.fromString(secret), - lowestIndex, - highestIndex, + index, })); } } diff --git a/yarn-project/pxe/src/contract_function_simulator/oracle/private_execution.ts b/yarn-project/pxe/src/contract_function_simulator/oracle/private_execution.ts index e9eeabca6d20..e25c99930f3f 100644 --- a/yarn-project/pxe/src/contract_function_simulator/oracle/private_execution.ts +++ b/yarn-project/pxe/src/contract_function_simulator/oracle/private_execution.ts @@ -81,7 +81,7 @@ export async function executePrivateFunction( const newNotes = privateExecutionOracle.getNewNotes(); const noteHashNullifierCounterMap = privateExecutionOracle.getNoteHashNullifierCounterMap(); const offchainEffects = privateExecutionOracle.getOffchainEffects(); - const taggingIndexRanges = privateExecutionOracle.getUsedTaggingIndexRanges(); + const preTags = privateExecutionOracle.getUsedPreTags(); const nestedExecutionResults = privateExecutionOracle.getNestedExecutionResults(); let timerSubtractionList = nestedExecutionResults; @@ -104,7 +104,7 @@ export async function executePrivateFunction( noteHashNullifierCounterMap, rawReturnValues, offchainEffects, - taggingIndexRanges, + preTags, nestedExecutionResults, contractClassLogs, { diff --git a/yarn-project/pxe/src/contract_function_simulator/oracle/private_execution_oracle.ts b/yarn-project/pxe/src/contract_function_simulator/oracle/private_execution_oracle.ts index 348dbc7ab593..a1b2ada7881e 100644 --- a/yarn-project/pxe/src/contract_function_simulator/oracle/private_execution_oracle.ts +++ b/yarn-project/pxe/src/contract_function_simulator/oracle/private_execution_oracle.ts @@ -14,7 +14,7 @@ import { import { AztecAddress } from '@aztec/stdlib/aztec-address'; import { siloNullifier } from '@aztec/stdlib/hash'; import { PrivateContextInputs } from '@aztec/stdlib/kernel'; -import { type ContractClassLog, ExtendedDirectionalAppTaggingSecret, type TaggingIndexRange } from '@aztec/stdlib/logs'; +import { type ContractClassLog, ExtendedDirectionalAppTaggingSecret, type PreTag } from '@aztec/stdlib/logs'; import { Tag } from '@aztec/stdlib/logs'; import { Note, type NoteStatus } from '@aztec/stdlib/note'; import { @@ -166,10 +166,10 @@ export class PrivateExecutionOracle extends UtilityExecutionOracle implements IP } /** - * Returns the tagging index ranges that were used in this execution (and that need to be stored in the db). + * Returns the pre-tags that were used in this execution (and that need to be stored in the db). */ - public getUsedTaggingIndexRanges(): TaggingIndexRange[] { - return this.taggingIndexCache.getUsedTaggingIndexRanges(); + public getUsedPreTags(): PreTag[] { + return this.taggingIndexCache.getUsedPreTags(); } /** diff --git a/yarn-project/pxe/src/pxe.ts b/yarn-project/pxe/src/pxe.ts index 5ff941da05db..9b7e5cc3ed98 100644 --- a/yarn-project/pxe/src/pxe.ts +++ b/yarn-project/pxe/src/pxe.ts @@ -764,17 +764,17 @@ export class PXE { // transaction before this one is included in a block from this PXE, and that transaction contains a log with // a tag derived from the same secret, we would reuse the tag and the transactions would be linked. Hence // storing the tags here prevents linkage of txs sent from the same PXE. - const taggingIndexRangesUsedInTheTx = privateExecutionResult.entrypoint.taggingIndexRanges; - if (taggingIndexRangesUsedInTheTx.length > 0) { + const preTagsUsedInTheTx = privateExecutionResult.entrypoint.preTags; + if (preTagsUsedInTheTx.length > 0) { // TODO(benesjan): The following is an expensive operation. Figure out a way to avoid it. const txHash = (await txProvingResult.toTx()).txHash; - await this.senderTaggingStore.storePendingIndexes(taggingIndexRangesUsedInTheTx, txHash, jobId); - this.log.debug(`Stored used tagging index ranges as sender for the tx`, { - taggingIndexRangesUsedInTheTx, + await this.senderTaggingStore.storePendingIndexes(preTagsUsedInTheTx, txHash, jobId); + this.log.debug(`Stored used pre-tags as sender for the tx`, { + preTagsUsedInTheTx, }); } else { - this.log.debug(`No tagging index ranges used in the tx`); + this.log.debug(`No pre-tags used in the tx`); } return txProvingResult; diff --git a/yarn-project/pxe/src/storage/metadata.ts b/yarn-project/pxe/src/storage/metadata.ts index 826f90735b91..cb1dee391377 100644 --- a/yarn-project/pxe/src/storage/metadata.ts +++ b/yarn-project/pxe/src/storage/metadata.ts @@ -1 +1 @@ -export const PXE_DATA_SCHEMA_VERSION = 4; +export const PXE_DATA_SCHEMA_VERSION = 3; diff --git a/yarn-project/pxe/src/storage/tagging_store/sender_tagging_store.test.ts b/yarn-project/pxe/src/storage/tagging_store/sender_tagging_store.test.ts index b2800582f02d..986f1daef6fc 100644 --- a/yarn-project/pxe/src/storage/tagging_store/sender_tagging_store.test.ts +++ b/yarn-project/pxe/src/storage/tagging_store/sender_tagging_store.test.ts @@ -1,19 +1,11 @@ -import { Fr } from '@aztec/foundation/curves/bn254'; import { openTmpStore } from '@aztec/kv-store/lmdb-v2'; -import { RevertCode } from '@aztec/stdlib/avm'; -import type { ExtendedDirectionalAppTaggingSecret, TaggingIndexRange } from '@aztec/stdlib/logs'; -import { PrivateLog, SiloedTag } from '@aztec/stdlib/logs'; +import type { ExtendedDirectionalAppTaggingSecret, PreTag } from '@aztec/stdlib/logs'; import { randomExtendedDirectionalAppTaggingSecret } from '@aztec/stdlib/testing'; -import { TxEffect, TxHash } from '@aztec/stdlib/tx'; +import { TxHash } from '@aztec/stdlib/tx'; import { UNFINALIZED_TAGGING_INDEXES_WINDOW_LEN } from '../../tagging/constants.js'; import { SenderTaggingStore } from './sender_tagging_store.js'; -/** Helper to create a single-index range (lowestIndex === highestIndex). */ -function range(secret: ExtendedDirectionalAppTaggingSecret, lowest: number, highest?: number): TaggingIndexRange { - return { extendedSecret: secret, lowestIndex: lowest, highestIndex: highest ?? lowest }; -} - describe('SenderTaggingStore', () => { let taggingStore: SenderTaggingStore; let secret1: ExtendedDirectionalAppTaggingSecret; @@ -26,20 +18,25 @@ describe('SenderTaggingStore', () => { }); describe('storePendingIndexes', () => { - it('stores a single pending index range', async () => { + it('stores a single pending index', async () => { const txHash = TxHash.random(); + const preTag: PreTag = { extendedSecret: secret1, index: 5 }; - await taggingStore.storePendingIndexes([range(secret1, 5)], txHash, 'test'); + await taggingStore.storePendingIndexes([preTag], txHash, 'test'); const txHashes = await taggingStore.getTxHashesOfPendingIndexes(secret1, 0, 10, 'test'); expect(txHashes).toHaveLength(1); expect(txHashes[0]).toEqual(txHash); }); - it('stores multiple pending index ranges for different secrets', async () => { + it('stores multiple pending indexes for different secrets', async () => { const txHash = TxHash.random(); + const preTags: PreTag[] = [ + { extendedSecret: secret1, index: 3 }, + { extendedSecret: secret2, index: 7 }, + ]; - await taggingStore.storePendingIndexes([range(secret1, 3), range(secret2, 7)], txHash, 'test'); + await taggingStore.storePendingIndexes(preTags, txHash, 'test'); const txHashes1 = await taggingStore.getTxHashesOfPendingIndexes(secret1, 0, 10, 'test'); expect(txHashes1).toHaveLength(1); @@ -50,12 +47,12 @@ describe('SenderTaggingStore', () => { expect(txHashes2[0]).toEqual(txHash); }); - it('stores multiple pending index ranges for the same secret from different txs', async () => { + it('stores multiple pending indexes for the same secret from different txs', async () => { const txHash1 = TxHash.random(); const txHash2 = TxHash.random(); - await taggingStore.storePendingIndexes([range(secret1, 3)], txHash1, 'test'); - await taggingStore.storePendingIndexes([range(secret1, 7)], txHash2, 'test'); + await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 3 }], txHash1, 'test'); + await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 7 }], txHash2, 'test'); const txHashes = await taggingStore.getTxHashesOfPendingIndexes(secret1, 0, 10, 'test'); expect(txHashes).toHaveLength(2); @@ -63,71 +60,68 @@ describe('SenderTaggingStore', () => { expect(txHashes).toContainEqual(txHash2); }); - it('ignores duplicate range + txHash combination', async () => { + it('ignores duplicate preTag + txHash combination', async () => { const txHash = TxHash.random(); + const preTag: PreTag = { extendedSecret: secret1, index: 5 }; - await taggingStore.storePendingIndexes([range(secret1, 5)], txHash, 'test'); - await taggingStore.storePendingIndexes([range(secret1, 5)], txHash, 'test'); + await taggingStore.storePendingIndexes([preTag], txHash, 'test'); + await taggingStore.storePendingIndexes([preTag], txHash, 'test'); const txHashes = await taggingStore.getTxHashesOfPendingIndexes(secret1, 0, 10, 'test'); expect(txHashes).toHaveLength(1); expect(txHashes[0]).toEqual(txHash); }); - it('stores a range spanning multiple indexes', async () => { + it('throws when storing duplicate secrets in the same call', async () => { const txHash = TxHash.random(); + const preTags: PreTag[] = [ + { extendedSecret: secret1, index: 3 }, + { extendedSecret: secret1, index: 7 }, + ]; - await taggingStore.storePendingIndexes([range(secret1, 3, 7)], txHash, 'test'); - - // By design the txs are filtered based on the highestIndex (7) in getTxHashesOfPendingIndexes so we shouldn't - // receive the tx only in the second query. - const txHashesNotContainingHighest = await taggingStore.getTxHashesOfPendingIndexes(secret1, 3, 4, 'test'); - expect(txHashesNotContainingHighest).toHaveLength(0); - - const txHashesContainingHighest = await taggingStore.getTxHashesOfPendingIndexes(secret1, 7, 8, 'test'); - expect(txHashesContainingHighest).toHaveLength(1); - expect(txHashesContainingHighest[0]).toEqual(txHash); - - expect(await taggingStore.getLastUsedIndex(secret1, 'test')).toBe(7); + await expect(taggingStore.storePendingIndexes(preTags, txHash, 'test')).rejects.toThrow( + 'Duplicate secrets found when storing pending indexes', + ); }); - it('throws when storing a different range for an existing secret + txHash pair', async () => { + it('throws when storing a different index for an existing secret + txHash pair', async () => { const txHash = TxHash.random(); - await taggingStore.storePendingIndexes([range(secret1, 5)], txHash, 'test'); + // First store an index + await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 5 }], txHash, 'test'); - // Storing a different range for the same secret + txHash should throw - await expect(taggingStore.storePendingIndexes([range(secret1, 7)], txHash, 'test')).rejects.toThrow( - /Conflicting range/, - ); + // Try to store a different index for the same secret + txHash pair + await expect( + taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 7 }], txHash, 'test'), + ).rejects.toThrow(/Cannot store index 7.*a different index 5 already exists/); }); - it('throws when storing a pending index range lower than the last finalized index', async () => { + it('throws when storing a pending index lower than the last finalized index', async () => { const txHash1 = TxHash.random(); const txHash2 = TxHash.random(); // First store and finalize an index - await taggingStore.storePendingIndexes([range(secret1, 10)], txHash1, 'test'); + await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 10 }], txHash1, 'test'); await taggingStore.finalizePendingIndexes([txHash1], 'test'); // Try to store a pending index lower than the finalized index - await expect(taggingStore.storePendingIndexes([range(secret1, 5)], txHash2, 'test')).rejects.toThrow( - /lowestIndex is lower than or equal to the last finalized index 10/, - ); + await expect( + taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 5 }], txHash2, 'test'), + ).rejects.toThrow(/Cannot store pending index 5.*lower than or equal to the last finalized index 10/); }); - it('throws when storing a pending index range equal to the last finalized index', async () => { + it('throws when storing a pending index equal to the last finalized index', async () => { const txHash1 = TxHash.random(); const txHash2 = TxHash.random(); // First store and finalize an index - await taggingStore.storePendingIndexes([range(secret1, 10)], txHash1, 'test'); + await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 10 }], txHash1, 'test'); await taggingStore.finalizePendingIndexes([txHash1], 'test'); // Try to store a pending index equal to the finalized index - await expect(taggingStore.storePendingIndexes([range(secret1, 10)], txHash2, 'test')).rejects.toThrow( - /lowestIndex is lower than or equal to the last finalized index 10/, - ); + await expect( + taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 10 }], txHash2, 'test'), + ).rejects.toThrow(/Cannot store pending index 10.*lower than or equal to the last finalized index 10/); }); it('allows storing a pending index higher than the last finalized index', async () => { @@ -135,11 +129,13 @@ describe('SenderTaggingStore', () => { const txHash2 = TxHash.random(); // First store and finalize an index - await taggingStore.storePendingIndexes([range(secret1, 10)], txHash1, 'test'); + await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 10 }], txHash1, 'test'); await taggingStore.finalizePendingIndexes([txHash1], 'test'); // Store a pending index higher than the finalized index - should succeed - await expect(taggingStore.storePendingIndexes([range(secret1, 15)], txHash2, 'test')).resolves.not.toThrow(); + await expect( + taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 15 }], txHash2, 'test'), + ).resolves.not.toThrow(); const txHashes = await taggingStore.getTxHashesOfPendingIndexes(secret1, 0, 20, 'test'); expect(txHashes).toHaveLength(1); @@ -154,12 +150,12 @@ describe('SenderTaggingStore', () => { const indexBeyondWindow = finalizedIndex + UNFINALIZED_TAGGING_INDEXES_WINDOW_LEN + 1; // First store and finalize an index - await taggingStore.storePendingIndexes([range(secret1, finalizedIndex)], txHash1, 'test'); + await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: finalizedIndex }], txHash1, 'test'); await taggingStore.finalizePendingIndexes([txHash1], 'test'); // Try to store an index beyond the window await expect( - taggingStore.storePendingIndexes([range(secret1, indexBeyondWindow)], txHash2, 'test'), + taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: indexBeyondWindow }], txHash2, 'test'), ).rejects.toThrow( `Highest used index ${indexBeyondWindow} is further than window length from the highest finalized index ${finalizedIndex}`, ); @@ -172,12 +168,12 @@ describe('SenderTaggingStore', () => { const indexAtBoundary = finalizedIndex + UNFINALIZED_TAGGING_INDEXES_WINDOW_LEN; // First store and finalize an index - await taggingStore.storePendingIndexes([range(secret1, finalizedIndex)], txHash1, 'test'); + await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: finalizedIndex }], txHash1, 'test'); await taggingStore.finalizePendingIndexes([txHash1], 'test'); // Store an index at the boundary, but check is >, so it should succeed await expect( - taggingStore.storePendingIndexes([range(secret1, indexAtBoundary)], txHash2, 'test'), + taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: indexAtBoundary }], txHash2, 'test'), ).resolves.not.toThrow(); const txHashes = await taggingStore.getTxHashesOfPendingIndexes(secret1, 0, indexAtBoundary + 5, 'test'); @@ -198,9 +194,9 @@ describe('SenderTaggingStore', () => { const txHash2 = TxHash.random(); const txHash3 = TxHash.random(); - await taggingStore.storePendingIndexes([range(secret1, 3)], txHash1, 'test'); - await taggingStore.storePendingIndexes([range(secret1, 5)], txHash2, 'test'); - await taggingStore.storePendingIndexes([range(secret1, 8)], txHash3, 'test'); + await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 3 }], txHash1, 'test'); + await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 5 }], txHash2, 'test'); + await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 8 }], txHash3, 'test'); const txHashes = await taggingStore.getTxHashesOfPendingIndexes(secret1, 4, 9, 'test'); expect(txHashes).toHaveLength(2); @@ -213,8 +209,8 @@ describe('SenderTaggingStore', () => { const txHash1 = TxHash.random(); const txHash2 = TxHash.random(); - await taggingStore.storePendingIndexes([range(secret1, 5)], txHash1, 'test'); - await taggingStore.storePendingIndexes([range(secret1, 10)], txHash2, 'test'); + await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 5 }], txHash1, 'test'); + await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 10 }], txHash2, 'test'); const txHashes = await taggingStore.getTxHashesOfPendingIndexes(secret1, 5, 10, 'test'); expect(txHashes).toHaveLength(1); @@ -227,16 +223,16 @@ describe('SenderTaggingStore', () => { const txHash3 = TxHash.random(); const txHash4 = TxHash.random(); - await taggingStore.storePendingIndexes([range(secret1, 3)], txHash1, 'test'); - await taggingStore.storePendingIndexes([range(secret1, 5)], txHash2, 'test'); + await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 3 }], txHash1, 'test'); + await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 5 }], txHash2, 'test'); // We store different secret with txHash1 to check we correctly don't return it in the result - await taggingStore.storePendingIndexes([range(secret2, 7)], txHash1, 'test'); + await taggingStore.storePendingIndexes([{ extendedSecret: secret2, index: 7 }], txHash1, 'test'); // Store "parallel" index for secret1 with a different tx (can happen when sending logs from multiple PXEs) - await taggingStore.storePendingIndexes([range(secret1, 7)], txHash3, 'test'); - await taggingStore.storePendingIndexes([range(secret1, 7)], txHash4, 'test'); + await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 7 }], txHash3, 'test'); + await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 7 }], txHash4, 'test'); const txHashes = await taggingStore.getTxHashesOfPendingIndexes(secret1, 0, 10, 'test'); - // Should have 4 unique tx hashes for secret1 + // Should have 3 unique tx hashes for secret1 expect(txHashes).toEqual(expect.arrayContaining([txHash1, txHash2, txHash3, txHash4])); }); }); @@ -249,7 +245,7 @@ describe('SenderTaggingStore', () => { it('returns the last finalized index after finalizePendingIndexes', async () => { const txHash = TxHash.random(); - await taggingStore.storePendingIndexes([range(secret1, 5)], txHash, 'test'); + await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 5 }], txHash, 'test'); await taggingStore.finalizePendingIndexes([txHash], 'test'); const lastFinalized = await taggingStore.getLastFinalizedIndex(secret1, 'test'); @@ -265,7 +261,7 @@ describe('SenderTaggingStore', () => { it('returns the last finalized index when no pending indexes exist', async () => { const txHash = TxHash.random(); - await taggingStore.storePendingIndexes([range(secret1, 5)], txHash, 'test'); + await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 5 }], txHash, 'test'); await taggingStore.finalizePendingIndexes([txHash], 'test'); const lastUsed = await taggingStore.getLastUsedIndex(secret1, 'test'); @@ -277,11 +273,11 @@ describe('SenderTaggingStore', () => { const txHash2 = TxHash.random(); // First, finalize an index - await taggingStore.storePendingIndexes([range(secret1, 3)], txHash1, 'test'); + await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 3 }], txHash1, 'test'); await taggingStore.finalizePendingIndexes([txHash1], 'test'); // Then add a higher pending index - await taggingStore.storePendingIndexes([range(secret1, 7)], txHash2, 'test'); + await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 7 }], txHash2, 'test'); const lastUsed = await taggingStore.getLastUsedIndex(secret1, 'test'); expect(lastUsed).toBe(7); @@ -292,9 +288,9 @@ describe('SenderTaggingStore', () => { const txHash2 = TxHash.random(); const txHash3 = TxHash.random(); - await taggingStore.storePendingIndexes([range(secret1, 3)], txHash1, 'test'); - await taggingStore.storePendingIndexes([range(secret1, 7)], txHash2, 'test'); - await taggingStore.storePendingIndexes([range(secret1, 5)], txHash3, 'test'); + await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 3 }], txHash1, 'test'); + await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 7 }], txHash2, 'test'); + await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 5 }], txHash3, 'test'); const lastUsed = await taggingStore.getLastUsedIndex(secret1, 'test'); expect(lastUsed).toBe(7); @@ -306,9 +302,9 @@ describe('SenderTaggingStore', () => { const txHash1 = TxHash.random(); const txHash2 = TxHash.random(); - await taggingStore.storePendingIndexes([range(secret1, 3)], txHash1, 'test'); - await taggingStore.storePendingIndexes([range(secret2, 5)], txHash1, 'test'); - await taggingStore.storePendingIndexes([range(secret1, 7)], txHash2, 'test'); + await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 3 }], txHash1, 'test'); + await taggingStore.storePendingIndexes([{ extendedSecret: secret2, index: 5 }], txHash1, 'test'); + await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 7 }], txHash2, 'test'); await taggingStore.dropPendingIndexes([txHash1], 'test'); @@ -326,7 +322,7 @@ describe('SenderTaggingStore', () => { describe('finalizePendingIndexes', () => { it('moves pending index to finalized for a given tx hash', async () => { const txHash = TxHash.random(); - await taggingStore.storePendingIndexes([range(secret1, 5)], txHash, 'test'); + await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 5 }], txHash, 'test'); await taggingStore.finalizePendingIndexes([txHash], 'test'); @@ -342,10 +338,10 @@ describe('SenderTaggingStore', () => { const txHash1 = TxHash.random(); const txHash2 = TxHash.random(); - await taggingStore.storePendingIndexes([range(secret1, 3)], txHash1, 'test'); + await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 3 }], txHash1, 'test'); await taggingStore.finalizePendingIndexes([txHash1], 'test'); - await taggingStore.storePendingIndexes([range(secret1, 7)], txHash2, 'test'); + await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 7 }], txHash2, 'test'); await taggingStore.finalizePendingIndexes([txHash2], 'test'); const lastFinalized = await taggingStore.getLastFinalizedIndex(secret1, 'test'); @@ -357,8 +353,8 @@ describe('SenderTaggingStore', () => { const txHash2 = TxHash.random(); // Store both pending indexes first - await taggingStore.storePendingIndexes([range(secret1, 7)], txHash1, 'test'); - await taggingStore.storePendingIndexes([range(secret1, 3)], txHash2, 'test'); + await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 7 }], txHash1, 'test'); + await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 3 }], txHash2, 'test'); // Finalize the higher index first await taggingStore.finalizePendingIndexes([txHash1], 'test'); @@ -370,14 +366,14 @@ describe('SenderTaggingStore', () => { expect(lastFinalized).toBe(7); // Should remain at 7 }); - it('prunes pending indexes with lower or equal highestIndex than finalized', async () => { + it('prunes pending indexes with lower or equal index than finalized', async () => { const txHash1 = TxHash.random(); const txHash2 = TxHash.random(); const txHash3 = TxHash.random(); - await taggingStore.storePendingIndexes([range(secret1, 3)], txHash1, 'test'); - await taggingStore.storePendingIndexes([range(secret1, 5)], txHash2, 'test'); - await taggingStore.storePendingIndexes([range(secret1, 7)], txHash3, 'test'); + await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 3 }], txHash1, 'test'); + await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 5 }], txHash2, 'test'); + await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 7 }], txHash3, 'test'); // Finalize txHash2 (index 5) await taggingStore.finalizePendingIndexes([txHash2], 'test'); @@ -391,7 +387,14 @@ describe('SenderTaggingStore', () => { it('handles multiple secrets in the same tx', async () => { const txHash = TxHash.random(); - await taggingStore.storePendingIndexes([range(secret1, 3), range(secret2, 7)], txHash, 'test'); + await taggingStore.storePendingIndexes( + [ + { extendedSecret: secret1, index: 3 }, + { extendedSecret: secret2, index: 7 }, + ], + txHash, + 'test', + ); await taggingStore.finalizePendingIndexes([txHash], 'test'); @@ -402,19 +405,9 @@ describe('SenderTaggingStore', () => { expect(lastFinalized2).toBe(7); }); - it('finalizes the highestIndex of a range', async () => { - const txHash = TxHash.random(); - await taggingStore.storePendingIndexes([range(secret1, 3, 7)], txHash, 'test'); - - await taggingStore.finalizePendingIndexes([txHash], 'test'); - - const lastFinalized = await taggingStore.getLastFinalizedIndex(secret1, 'test'); - expect(lastFinalized).toBe(7); - }); - it('does nothing when tx hash does not exist', async () => { const txHash = TxHash.random(); - await taggingStore.storePendingIndexes([range(secret1, 3)], txHash, 'test'); + await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 3 }], txHash, 'test'); await taggingStore.finalizePendingIndexes([TxHash.random()], 'test'); @@ -434,7 +427,7 @@ describe('SenderTaggingStore', () => { const txHash2 = TxHash.random(); // Step 1: Add pending index - await taggingStore.storePendingIndexes([range(secret1, 3)], txHash1, 'test'); + await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 3 }], txHash1, 'test'); expect(await taggingStore.getLastUsedIndex(secret1, 'test')).toBe(3); expect(await taggingStore.getLastFinalizedIndex(secret1, 'test')).toBeUndefined(); @@ -444,7 +437,7 @@ describe('SenderTaggingStore', () => { expect(await taggingStore.getLastFinalizedIndex(secret1, 'test')).toBe(3); // Step 3: Add a new higher pending index - await taggingStore.storePendingIndexes([range(secret1, 7)], txHash2, 'test'); + await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 7 }], txHash2, 'test'); expect(await taggingStore.getLastUsedIndex(secret1, 'test')).toBe(7); expect(await taggingStore.getLastFinalizedIndex(secret1, 'test')).toBe(3); @@ -458,8 +451,8 @@ describe('SenderTaggingStore', () => { const txHash1 = TxHash.random(); const txHash2 = TxHash.random(); - await taggingStore.storePendingIndexes([range(secret1, 3)], txHash1, 'test'); - await taggingStore.storePendingIndexes([range(secret1, 5)], txHash2, 'test'); + await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 3 }], txHash1, 'test'); + await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 5 }], txHash2, 'test'); expect(await taggingStore.getLastUsedIndex(secret1, 'test')).toBe(5); @@ -475,14 +468,14 @@ describe('SenderTaggingStore', () => { const txHash3 = TxHash.random(); // Secret1: pending -> finalized - await taggingStore.storePendingIndexes([range(secret1, 3)], txHash1, 'test'); + await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 3 }], txHash1, 'test'); await taggingStore.finalizePendingIndexes([txHash1], 'test'); // Secret2: pending (not finalized) - await taggingStore.storePendingIndexes([range(secret2, 5)], txHash2, 'test'); + await taggingStore.storePendingIndexes([{ extendedSecret: secret2, index: 5 }], txHash2, 'test'); // Secret1: new pending - await taggingStore.storePendingIndexes([range(secret1, 7)], txHash3, 'test'); + await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 7 }], txHash3, 'test'); expect(await taggingStore.getLastFinalizedIndex(secret1, 'test')).toBe(3); expect(await taggingStore.getLastUsedIndex(secret1, 'test')).toBe(7); @@ -491,135 +484,18 @@ describe('SenderTaggingStore', () => { }); }); - describe('finalizePendingIndexesOfAPartiallyRevertedTx', () => { - function makeTxEffect(txHash: TxHash, siloedTags: SiloedTag[]): TxEffect { - return new TxEffect( - RevertCode.APP_LOGIC_REVERTED, - txHash, - Fr.ZERO, - [Fr.random()], // noteHashes (at least 1 nullifier required below, not here) - [Fr.random()], // nullifiers (at least 1 required) - [], // l2ToL1Msgs - [], // publicDataWrites - siloedTags.map(tag => PrivateLog.random(tag.value)), // privateLogs with surviving tags - [], // publicLogs - [], // contractClassLogs - ); - } - - it('finalizes only the indexes whose tags appear in TxEffect', async () => { - const txHash = TxHash.random(); - - // Store a range [3, 5] for secret1 in the same tx - await taggingStore.storePendingIndexes([range(secret1, 3, 5)], txHash, 'test'); - - // Compute the siloed tag for index 3 (the one that survives) - const survivingTag = await SiloedTag.compute({ extendedSecret: secret1, index: 3 }); - const txEffect = makeTxEffect(txHash, [survivingTag]); - - await taggingStore.finalizePendingIndexesOfAPartiallyRevertedTx(txEffect, 'test'); - - // Index 3 should be finalized (it was onchain) - expect(await taggingStore.getLastFinalizedIndex(secret1, 'test')).toBe(3); - // All pending indexes for this tx should be removed - const txHashes = await taggingStore.getTxHashesOfPendingIndexes(secret1, 0, 10, 'test'); - expect(txHashes).toHaveLength(0); - }); - - it('drops all indexes when no tags survive onchain', async () => { - const txHash = TxHash.random(); - - await taggingStore.storePendingIndexes([range(secret1, 3, 5)], txHash, 'test'); - - // TxEffect with no matching private logs (empty) - const txEffect = makeTxEffect(txHash, []); - - await taggingStore.finalizePendingIndexesOfAPartiallyRevertedTx(txEffect, 'test'); - - // No finalized index should be set - expect(await taggingStore.getLastFinalizedIndex(secret1, 'test')).toBeUndefined(); - // All pending indexes for this tx should be removed - const txHashes = await taggingStore.getTxHashesOfPendingIndexes(secret1, 0, 10, 'test'); - expect(txHashes).toHaveLength(0); - }); - - it('handles multiple secrets affected by the same partially reverted tx', async () => { - const txHash = TxHash.random(); - - // Store pending index ranges for both secrets in the same tx - await taggingStore.storePendingIndexes([range(secret1, 3, 5), range(secret2, 7)], txHash, 'test'); - - // Only index 3 for secret1 survives onchain; other indexes for secret1 and secret2 are dropped - const survivingTag = await SiloedTag.compute({ extendedSecret: secret1, index: 3 }); - const txEffect = makeTxEffect(txHash, [survivingTag]); - - await taggingStore.finalizePendingIndexesOfAPartiallyRevertedTx(txEffect, 'test'); - - // secret1: index 3 should be finalized - expect(await taggingStore.getLastFinalizedIndex(secret1, 'test')).toBe(3); - expect(await taggingStore.getTxHashesOfPendingIndexes(secret1, 0, 10, 'test')).toHaveLength(0); - - // secret2: no finalized index, all pending removed - expect(await taggingStore.getLastFinalizedIndex(secret2, 'test')).toBeUndefined(); - expect(await taggingStore.getTxHashesOfPendingIndexes(secret2, 0, 10, 'test')).toHaveLength(0); - }); - - it('preserves pending indexes from other txs', async () => { - const revertedTxHash = TxHash.random(); - const otherTxHash = TxHash.random(); - - // Store pending indexes: one from reverted tx, one from another tx - await taggingStore.storePendingIndexes([range(secret1, 3)], revertedTxHash, 'test'); - await taggingStore.storePendingIndexes([range(secret1, 7)], otherTxHash, 'test'); - - // TxEffect with no surviving tags for the reverted tx - const txEffect = makeTxEffect(revertedTxHash, []); - - await taggingStore.finalizePendingIndexesOfAPartiallyRevertedTx(txEffect, 'test'); - - // No finalized index (nothing survived from the reverted tx) - expect(await taggingStore.getLastFinalizedIndex(secret1, 'test')).toBeUndefined(); - // The other tx's pending index should still be there - const txHashes = await taggingStore.getTxHashesOfPendingIndexes(secret1, 0, 10, 'test'); - expect(txHashes).toHaveLength(1); - expect(txHashes[0]).toEqual(otherTxHash); - }); - - it('correctly updates finalized index when there is an existing finalized index', async () => { - const txHash1 = TxHash.random(); - const revertedTxHash = TxHash.random(); - - // Store and finalize index 2 - await taggingStore.storePendingIndexes([range(secret1, 2)], txHash1, 'test'); - await taggingStore.finalizePendingIndexes([txHash1], 'test'); - - // Store a pending range [4, 6] for a partially reverted tx - await taggingStore.storePendingIndexes([range(secret1, 4, 6)], revertedTxHash, 'test'); - - // Only index 4 survives - const survivingTag = await SiloedTag.compute({ extendedSecret: secret1, index: 4 }); - const txEffect = makeTxEffect(revertedTxHash, [survivingTag]); - - await taggingStore.finalizePendingIndexesOfAPartiallyRevertedTx(txEffect, 'test'); - - // Finalized index should be updated to 4 (higher than previous 2) - expect(await taggingStore.getLastFinalizedIndex(secret1, 'test')).toBe(4); - expect(await taggingStore.getTxHashesOfPendingIndexes(secret1, 0, 10, 'test')).toHaveLength(0); - }); - }); - describe('staged writes', () => { it('writes of uncommitted jobs are not visible outside the job that makes them', async () => { const committedTxHash = TxHash.random(); { const commitJobId: string = 'commit-job'; - await taggingStore.storePendingIndexes([range(secret1, 3)], committedTxHash, commitJobId); + await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 3 }], committedTxHash, commitJobId); await taggingStore.commit(commitJobId); } const stagedTxHash = TxHash.random(); const stagingJobId: string = 'staging-job'; - await taggingStore.storePendingIndexes([range(secret1, 5)], stagedTxHash, stagingJobId); + await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 5 }], stagedTxHash, stagingJobId); // For a job without any staged data we should only get committed data const txHashesWithoutJobId = await taggingStore.getTxHashesOfPendingIndexes(secret1, 0, 10, 'no-data-job'); @@ -637,7 +513,7 @@ describe('SenderTaggingStore', () => { const txHash1 = TxHash.random(); { const commitJobId: string = 'commit-job'; - await taggingStore.storePendingIndexes([range(secret1, 3)], txHash1, commitJobId); + await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 3 }], txHash1, commitJobId); await taggingStore.finalizePendingIndexes([txHash1], commitJobId); await taggingStore.commit(commitJobId); } @@ -646,7 +522,7 @@ describe('SenderTaggingStore', () => { const stagingJobId: string = 'staging-job'; // Stage a higher finalized index (not committed) - await taggingStore.storePendingIndexes([range(secret1, 7)], txHash2, stagingJobId); + await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 7 }], txHash2, stagingJobId); await taggingStore.finalizePendingIndexes([txHash2], stagingJobId); // With a different jobId, should get the committed finalized index @@ -661,8 +537,8 @@ describe('SenderTaggingStore', () => { const txHash1 = TxHash.random(); const txHash2 = TxHash.random(); const commitJobId: string = 'commit-job'; - await taggingStore.storePendingIndexes([range(secret1, 2)], txHash1, commitJobId); - await taggingStore.storePendingIndexes([range(secret1, 3)], txHash2, commitJobId); + await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 2 }], txHash1, commitJobId); + await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 3 }], txHash2, commitJobId); await taggingStore.finalizePendingIndexes([txHash1], commitJobId); await taggingStore.commit(commitJobId); } @@ -670,7 +546,7 @@ describe('SenderTaggingStore', () => { const stagingJobId: string = 'staging-job'; { const txHash3 = TxHash.random(); - await taggingStore.storePendingIndexes([range(secret1, 7)], txHash3, stagingJobId); + await taggingStore.storePendingIndexes([{ extendedSecret: secret1, index: 7 }], txHash3, stagingJobId); await taggingStore.finalizePendingIndexes([txHash3], stagingJobId); await taggingStore.discardStaged(stagingJobId); } diff --git a/yarn-project/pxe/src/storage/tagging_store/sender_tagging_store.ts b/yarn-project/pxe/src/storage/tagging_store/sender_tagging_store.ts index 05f79be89b88..1b15bbbb207a 100644 --- a/yarn-project/pxe/src/storage/tagging_store/sender_tagging_store.ts +++ b/yarn-project/pxe/src/storage/tagging_store/sender_tagging_store.ts @@ -1,13 +1,10 @@ import type { AztecAsyncKVStore, AztecAsyncMap } from '@aztec/kv-store'; -import { ExtendedDirectionalAppTaggingSecret, SiloedTag, type TaggingIndexRange } from '@aztec/stdlib/logs'; -import { TxEffect, TxHash } from '@aztec/stdlib/tx'; +import type { ExtendedDirectionalAppTaggingSecret, PreTag } from '@aztec/stdlib/logs'; +import { TxHash } from '@aztec/stdlib/tx'; import type { StagedStore } from '../../job_coordinator/job_coordinator.js'; import { UNFINALIZED_TAGGING_INDEXES_WINDOW_LEN } from '../../tagging/constants.js'; -/** Internal representation of a pending index range entry. */ -type PendingIndexesEntry = { lowestIndex: number; highestIndex: number; txHash: string }; - /** * Data provider of tagging data used when syncing the sender tagging indexes. The recipient counterpart of this class * is called RecipientTaggingStore. We have the data stores separate for sender and recipient because @@ -18,19 +15,20 @@ export class SenderTaggingStore implements StagedStore { #store: AztecAsyncKVStore; - // Stores the pending index ranges for each directional app tagging secret. Pending here means that the tx that - // contained the private logs with tags corresponding to these indexes has not been finalized yet. + // Stores the pending indexes for each directional app tagging secret. Pending here means that the tx that contained + // the private logs with tags corresponding to these indexes has not been finalized yet. + // + // We don't store just the highest index because if their transaction is dropped we'd then need the information about + // the lower pending indexes. For each secret-tx pair however we only store the largest index used in that tx, since + // the smaller ones are irrelevant due to tx atomicity. // - // We store the full range (lowestIndex, highestIndex) for each secret-tx pair because transactions can partially - // revert, in which case only some logs (from the non-revertible phase) survive onchain. By storing the range, - // we can expand it and check each individual siloed tag against the TxEffect to determine which indexes made it - // onchain. + // TODO(#17615): This assumes no logs are used in the non-revertible phase. // - // directional app tagging secret => { lowestIndex, highestIndex, txHash }[] - #pendingIndexes: AztecAsyncMap; + // directional app tagging secret => { pending index, txHash }[] + #pendingIndexes: AztecAsyncMap; - // jobId => directional app tagging secret => { lowestIndex, highestIndex, txHash }[] - #pendingIndexesForJob: Map>; + // jobId => directional app tagging secret => { pending index, txHash }[] + #pendingIndexesForJob: Map>; // Stores the last (highest) finalized index for each directional app tagging secret. We care only about the last // index because unlike the pending indexes, it will never happen that a finalized index would be removed and hence @@ -52,7 +50,7 @@ export class SenderTaggingStore implements StagedStore { this.#lastFinalizedIndexesForJob = new Map(); } - #getPendingIndexesForJob(jobId: string): Map { + #getPendingIndexesForJob(jobId: string): Map { let pendingIndexesForJob = this.#pendingIndexesForJob.get(jobId); if (!pendingIndexesForJob) { pendingIndexesForJob = new Map(); @@ -70,7 +68,7 @@ export class SenderTaggingStore implements StagedStore { return jobStagedLastFinalizedIndexes; } - async #readPendingIndexes(jobId: string, secret: string): Promise { + async #readPendingIndexes(jobId: string, secret: string): Promise<{ index: number; txHash: string }[]> { // Always issue DB read to keep IndexedDB transaction alive (they auto-commit when a new micro-task starts and there // are no pending read requests). The staged value still takes precedence if it exists. const dbValue = await this.#pendingIndexes.getAsync(secret); @@ -78,7 +76,7 @@ export class SenderTaggingStore implements StagedStore { return staged !== undefined ? staged : (dbValue ?? []); } - #writePendingIndexes(jobId: string, secret: string, pendingIndexes: PendingIndexesEntry[]) { + #writePendingIndexes(jobId: string, secret: string, pendingIndexes: { index: number; txHash: string }[]) { this.#getPendingIndexesForJob(jobId).set(secret, pendingIndexes); } @@ -128,37 +126,57 @@ export class SenderTaggingStore implements StagedStore { } /** - * Stores pending index ranges. - * @remarks If the same (secret, txHash) pair already exists in the db with an equal range, it's a no-op. This is - * expected to happen because whenever we start sync we start from the last finalized index and we can have pending - * ranges already stored from previous syncs. If the ranges differ, it throws an error as that indicates a bug. - * @param ranges - The tagging index ranges containing the directional app tagging secrets and the index ranges that are - * to be stored in the db. - * @param txHash - The tx in which the tagging indexes were used in private logs. + * Stores pending indexes. + * @remarks Ignores the index if the same preTag + txHash combination already exists in the db with the same index. + * This is expected to happen because whenever we start sync we start from the last finalized index and we can have + * pending indexes already stored from previous syncs. + * @param preTags - The pre-tags containing the directional app tagging secrets and the indexes that are to be + * stored in the db. + * @param txHash - The tx in which the pretags were used in private logs. * @param jobId - job context for staged writes to this store. See `JobCoordinator` for more details. - * @throws If the highestIndex is further than window length from the highest finalized index for the same secret. - * @throws If the lowestIndex is lower than or equal to the last finalized index for the same secret. - * @throws If a different range already exists for the same (secret, txHash) pair. + * @throws If any two pre-tags contain the same directional app tagging secret. This is enforced because we care + * only about the highest index for a given secret that was used in the tx. Hence this check is a good way to catch + * bugs. + * @throws If the newly stored pending index is further than window length from the highest finalized index for the + * same secret. This is enforced in order to give a guarantee to a recipient that he doesn't need to look further than + * window length ahead of the highest finalized index. + * @throws If a secret + txHash pair already exists in the db with a different index value. It should never happen + * that we would attempt to store a different index for a given secret-txHash pair because we always store just the + * highest index for a given secret-txHash pair. Hence this is a good way to catch bugs. + * @throws If the newly stored pending index is lower than or equal to the last finalized index for the same secret. + * This is enforced because this should never happen if the syncing is done correctly as we look for logs from higher + * indexes than finalized ones. */ - storePendingIndexes(ranges: TaggingIndexRange[], txHash: TxHash, jobId: string): Promise { - if (ranges.length === 0) { + storePendingIndexes(preTags: PreTag[], txHash: TxHash, jobId: string): Promise { + if (preTags.length === 0) { return Promise.resolve(); } + // The secrets in pre-tags should be unique because we always store just the highest index per given secret-txHash + // pair. Below we check that this is the case. + const secretsSet = new Set(preTags.map(preTag => preTag.extendedSecret.toString())); + if (secretsSet.size !== preTags.length) { + return Promise.reject(new Error(`Duplicate secrets found when storing pending indexes`)); + } + const txHashStr = txHash.toString(); return this.#store.transactionAsync(async () => { // Prefetch all data, start reads during iteration to keep IndexedDB transaction alive - const rangeReadPromises = ranges.map(range => ({ - range, - secretStr: range.extendedSecret.toString(), - pending: this.#readPendingIndexes(jobId, range.extendedSecret.toString()), - finalized: this.#readLastFinalizedIndex(jobId, range.extendedSecret.toString()), - })); + const preTagReadPromises = preTags.map(({ extendedSecret, index }) => { + const secretStr = extendedSecret.toString(); + return { + extendedSecret, + secretStr, + index, + pending: this.#readPendingIndexes(jobId, secretStr), + finalized: this.#readLastFinalizedIndex(jobId, secretStr), + }; + }); // Await all reads together - const rangeData = await Promise.all( - rangeReadPromises.map(async item => ({ + const preTagData = await Promise.all( + preTagReadPromises.map(async item => ({ ...item, pendingData: await item.pending, finalizedIndex: await item.finalized, @@ -166,51 +184,48 @@ export class SenderTaggingStore implements StagedStore { ); // Process in memory and validate - for (const { range, secretStr, pendingData, finalizedIndex } of rangeData) { - // Check that the highest index is not further than window length from the highest finalized index. - if (range.highestIndex > (finalizedIndex ?? 0) + UNFINALIZED_TAGGING_INDEXES_WINDOW_LEN) { + for (const { secretStr, index, pendingData, finalizedIndex } of preTagData) { + // First we check that for any secret the highest used index in tx is not further than window length from + // the highest finalized index. + if (index > (finalizedIndex ?? 0) + UNFINALIZED_TAGGING_INDEXES_WINDOW_LEN) { throw new Error( - `Highest used index ${range.highestIndex} is further than window length from the highest finalized index ${finalizedIndex ?? 0}. + `Highest used index ${index} is further than window length from the highest finalized index ${finalizedIndex ?? 0}. Tagging window length ${UNFINALIZED_TAGGING_INDEXES_WINDOW_LEN} is configured too low. Contact the Aztec team to increase it!`, ); } - // Throw if the lowest index is lower than or equal to the last finalized index - if (finalizedIndex !== undefined && range.lowestIndex <= finalizedIndex) { + // Throw if the new pending index is lower than or equal to the last finalized index + if (finalizedIndex !== undefined && index <= finalizedIndex) { throw new Error( - `Cannot store pending index range [${range.lowestIndex}, ${range.highestIndex}] for secret ${secretStr}: ` + - `lowestIndex is lower than or equal to the last finalized index ${finalizedIndex}`, + `Cannot store pending index ${index} for secret ${secretStr}: ` + + `it is lower than or equal to the last finalized index ${finalizedIndex}`, ); } - // Check if an entry with the same txHash already exists - const existingEntry = pendingData.find(entry => entry.txHash === txHashStr); + // Check if this secret + txHash combination already exists + const existingForSecretAndTx = pendingData.find(entry => entry.txHash === txHashStr); - if (existingEntry) { - // Assert that the ranges are equal — different ranges for the same (secret, txHash) indicates a bug - if (existingEntry.lowestIndex !== range.lowestIndex || existingEntry.highestIndex !== range.highestIndex) { + if (existingForSecretAndTx) { + // If it exists with a different index, throw an error + if (existingForSecretAndTx.index !== index) { throw new Error( - `Conflicting range for secret ${secretStr} and txHash ${txHashStr}: ` + - `existing [${existingEntry.lowestIndex}, ${existingEntry.highestIndex}] vs ` + - `new [${range.lowestIndex}, ${range.highestIndex}]`, + `Cannot store index ${index} for secret ${secretStr} and txHash ${txHashStr}: ` + + `a different index ${existingForSecretAndTx.index} already exists for this secret-txHash pair`, ); } - // Exact duplicate — skip + // If it exists with the same index, ignore the update (no-op) } else { - this.#writePendingIndexes(jobId, secretStr, [ - ...pendingData, - { lowestIndex: range.lowestIndex, highestIndex: range.highestIndex, txHash: txHashStr }, - ]); + // If it doesn't exist, add it + this.#writePendingIndexes(jobId, secretStr, [...pendingData, { index, txHash: txHashStr }]); } } }); } /** - * Returns the transaction hashes of all pending transactions that contain highest indexes within a specified range - * for a given directional app tagging secret. We check based on the highest indexes only as that is the relevant - * information for the caller of this function. + * Returns the transaction hashes of all pending transactions that contain indexes within a specified range + * for a given directional app tagging secret. * @param secret - The directional app tagging secret to query pending indexes for. * @param startIndex - The lower bound of the index range (inclusive). * @param endIndex - The upper bound of the index range (exclusive). @@ -226,7 +241,7 @@ export class SenderTaggingStore implements StagedStore { return this.#store.transactionAsync(async () => { const existing = await this.#readPendingIndexes(jobId, secret.toString()); const txHashes = existing - .filter(entry => entry.highestIndex >= startIndex && entry.highestIndex < endIndex) + .filter(entry => entry.index >= startIndex && entry.index < endIndex) .map(entry => entry.txHash); return Array.from(new Set(txHashes)).map(TxHash.fromString); }); @@ -254,15 +269,16 @@ export class SenderTaggingStore implements StagedStore { const pendingPromise = this.#readPendingIndexes(jobId, secretStr); const finalizedPromise = this.#readLastFinalizedIndex(jobId, secretStr); - const [pendingEntries, lastFinalized] = await Promise.all([pendingPromise, finalizedPromise]); + const [pendingTxScopedIndexes, lastFinalized] = await Promise.all([pendingPromise, finalizedPromise]); + const pendingIndexes = pendingTxScopedIndexes.map(entry => entry.index); - if (pendingEntries.length === 0) { + if (pendingTxScopedIndexes.length === 0) { return lastFinalized; } - // As the last used index we return the highest one from the pending index ranges. Note that this value will be - // always higher than the last finalized index because we prune lower pending indexes when a tx is finalized. - return Math.max(...pendingEntries.map(entry => entry.highestIndex)); + // As the last used index we return the highest one from the pending indexes. Note that this value will be always + // higher than the last finalized index because we prune lower pending indexes when a tx is finalized. + return Math.max(...pendingIndexes); }); } @@ -278,7 +294,7 @@ export class SenderTaggingStore implements StagedStore { return this.#store.transactionAsync(async () => { // Prefetch all data, start reads during iteration to keep IndexedDB transaction alive - const secretReadPromises: Map> = new Map(); + const secretReadPromises: Map> = new Map(); for await (const secret of this.#pendingIndexes.keysAsync()) { secretReadPromises.set(secret, this.#readPendingIndexes(jobId, secret)); @@ -314,15 +330,22 @@ export class SenderTaggingStore implements StagedStore { }); } - /** Prefetches all pending and finalized index data for every secret (from both DB and staged writes). */ - #getSecretsWithPendingData( - jobId: string, - ): Promise<{ secret: string; pendingData: PendingIndexesEntry[]; lastFinalized: number | undefined }[]> { + /** + * Updates pending indexes corresponding to the given transaction hashes to be finalized and prunes any lower pending + * indexes. + */ + finalizePendingIndexes(txHashes: TxHash[], jobId: string): Promise { + if (txHashes.length === 0) { + return Promise.resolve(); + } + + const txHashStrings = new Set(txHashes.map(tx => tx.toString())); + return this.#store.transactionAsync(async () => { // Prefetch all data, start reads during iteration to keep IndexedDB transaction alive const secretDataPromises: Map< string, - { pending: Promise; finalized: Promise } + { pending: Promise<{ index: number; txHash: string }[]>; finalized: Promise } > = new Map(); for await (const secret of this.#pendingIndexes.keysAsync()) { @@ -352,125 +375,55 @@ export class SenderTaggingStore implements StagedStore { })), ); - return dataResults.filter(r => r.pendingData.length > 0); - }); - } - - /** - * Updates pending indexes corresponding to the given transaction hashes to be finalized and prunes any lower pending - * indexes. - */ - async finalizePendingIndexes(txHashes: TxHash[], jobId: string): Promise { - if (txHashes.length === 0) { - return; - } - - const txHashStrings = new Set(txHashes.map(tx => tx.toString())); - const secretsWithData = await this.#getSecretsWithPendingData(jobId); - - for (const { secret, pendingData, lastFinalized } of secretsWithData) { - let currentPending = pendingData; - let currentFinalized = lastFinalized; - - // Process all txHashes for this secret - for (const txHashStr of txHashStrings) { - const matchingEntries = currentPending.filter(item => item.txHash === txHashStr); - if (matchingEntries.length === 0) { - // This is expected as a higher index might have already been finalized which would lead to pruning of - // pending entries. + // Process all txHashes for each secret in memory + for (const { secret, pendingData, lastFinalized } of dataResults) { + if (!pendingData || pendingData.length === 0) { continue; } - if (matchingEntries.length > 1) { - // We should always just store the highest pending index for a given tx hash and secret because the lower - // values are irrelevant. - throw new Error(`Multiple pending entries found for tx hash ${txHashStr} and secret ${secret}`); - } - - const newFinalized = matchingEntries[0].highestIndex; - - if (newFinalized < (currentFinalized ?? 0)) { - // This should never happen because when last finalized index was finalized we should have pruned the lower - // pending indexes. - throw new Error( - `New finalized index ${newFinalized} is smaller than the current last finalized index ${currentFinalized}`, - ); - } - - currentFinalized = newFinalized; - - // When we add pending indexes, we ensure they are higher than the last finalized index. However, because we - // cannot control the order in which transactions are finalized, there may be pending indexes that are now - // obsolete because they are lower than the most recently finalized index. For this reason, we prune these - // outdated pending indexes. - currentPending = currentPending.filter(item => item.highestIndex > currentFinalized!); - } - - // Write final state if changed - if (currentFinalized !== lastFinalized) { - this.#writeLastFinalizedIndex(jobId, secret, currentFinalized!); - } - if (currentPending !== pendingData) { - this.#writePendingIndexes(jobId, secret, currentPending); - } - } - } - - /** - * Handles finalization of pending indexes for a transaction whose execution was partially reverted. - * Recomputes the siloed tags for each pending index of the given tx and checks which ones appear in the - * TxEffect's private logs (i.e., which ones made it onchain). Those that survived are finalized; those that - * didn't are dropped. - * @param txEffect - The tx effect of the partially reverted transaction. - * @param jobId - job context for staged writes to this store. See `JobCoordinator` for more details. - */ - async finalizePendingIndexesOfAPartiallyRevertedTx(txEffect: TxEffect, jobId: string): Promise { - const txHashStr = txEffect.txHash.toString(); - - // Build a set of all siloed tag values that made it onchain (first field of each private log). - const onChainTags = new Set(txEffect.privateLogs.map(log => log.fields[0].toString())); + let currentPending = pendingData; + let currentFinalized = lastFinalized; - const secretsWithData = await this.#getSecretsWithPendingData(jobId); + // Process all txHashes for this secret + for (const txHashStr of txHashStrings) { + const matchingIndexes = currentPending.filter(item => item.txHash === txHashStr).map(item => item.index); + if (matchingIndexes.length === 0) { + continue; + } - for (const { secret, pendingData, lastFinalized } of secretsWithData) { - const matchingEntries = pendingData.filter(item => item.txHash === txHashStr); - if (matchingEntries.length === 0) { - // This is expected as a higher index might have already been finalized which would lead to pruning of - // pending entries. - continue; - } + if (matchingIndexes.length > 1) { + // We should always just store the highest pending index for a given tx hash and secret because the lower + // values are irrelevant. + throw new Error(`Multiple pending indexes found for tx hash ${txHashStr} and secret ${secret}`); + } - if (matchingEntries.length > 1) { - // We should always just store the highest pending index for a given tx hash and secret because the lower - // values are irrelevant. - throw new Error(`Multiple pending entries found for tx hash ${txHashStr} and secret ${secret}`); - } + const newFinalized = matchingIndexes[0]; - const pendingEntry = matchingEntries[0]; + if (newFinalized < (currentFinalized ?? 0)) { + // This should never happen because when last finalized index was finalized we should have pruned the lower + // pending indexes. + throw new Error( + `New finalized index ${newFinalized} is smaller than the current last finalized index ${currentFinalized}`, + ); + } - // Expand each matching entry's range and recompute siloed tags for each index. - const extendedSecret = ExtendedDirectionalAppTaggingSecret.fromString(secret); - let highestSurvivingIndex: number | undefined; + currentFinalized = newFinalized; - for (let index = pendingEntry.lowestIndex; index <= pendingEntry.highestIndex; index++) { - const siloedTag = await SiloedTag.compute({ extendedSecret, index }); - if (onChainTags.has(siloedTag.value.toString())) { - highestSurvivingIndex = highestSurvivingIndex !== undefined ? Math.max(highestSurvivingIndex, index) : index; + // When we add pending indexes, we ensure they are higher than the last finalized index. However, because we + // cannot control the order in which transactions are finalized, there may be pending indexes that are now + // obsolete because they are lower than the most recently finalized index. For this reason, we prune these + // outdated pending indexes. + currentPending = currentPending.filter(item => item.index > currentFinalized!); } - } - // Remove all entries for this txHash from pending (both surviving and non-surviving). - let currentPending = pendingData.filter(item => item.txHash !== txHashStr); - - if (highestSurvivingIndex !== undefined) { - const newFinalized = Math.max(lastFinalized ?? 0, highestSurvivingIndex); - this.#writeLastFinalizedIndex(jobId, secret, newFinalized); - - // Prune pending indexes that are now <= the finalized index. - currentPending = currentPending.filter(item => item.highestIndex > newFinalized); + // Write final state if changed + if (currentFinalized !== lastFinalized) { + this.#writeLastFinalizedIndex(jobId, secret, currentFinalized!); + } + if (currentPending !== pendingData) { + this.#writePendingIndexes(jobId, secret, currentPending); + } } - - this.#writePendingIndexes(jobId, secret, currentPending); - } + }); } } diff --git a/yarn-project/pxe/src/tagging/index.ts b/yarn-project/pxe/src/tagging/index.ts index 6b812a8f0a47..ea8c6f80f613 100644 --- a/yarn-project/pxe/src/tagging/index.ts +++ b/yarn-project/pxe/src/tagging/index.ts @@ -16,4 +16,4 @@ export { getAllPrivateLogsByTags, getAllPublicLogsByTagsFromContract } from './g // Re-export tagging-related types from stdlib export { ExtendedDirectionalAppTaggingSecret, Tag, SiloedTag } from '@aztec/stdlib/logs'; -export { type PreTag, type TaggingIndexRange } from '@aztec/stdlib/logs'; +export { type PreTag } from '@aztec/stdlib/logs'; diff --git a/yarn-project/pxe/src/tagging/sender_sync/sync_sender_tagging_indexes.test.ts b/yarn-project/pxe/src/tagging/sender_sync/sync_sender_tagging_indexes.test.ts index dedfacbf5dda..d214b6e50120 100644 --- a/yarn-project/pxe/src/tagging/sender_sync/sync_sender_tagging_indexes.test.ts +++ b/yarn-project/pxe/src/tagging/sender_sync/sync_sender_tagging_indexes.test.ts @@ -1,12 +1,10 @@ import { BlockNumber } from '@aztec/foundation/branded-types'; import { Fr } from '@aztec/foundation/curves/bn254'; import { openTmpStore } from '@aztec/kv-store/lmdb-v2'; -import { RevertCode } from '@aztec/stdlib/avm'; import { BlockHash } from '@aztec/stdlib/block'; import type { AztecNode } from '@aztec/stdlib/interfaces/client'; -import { PrivateLog } from '@aztec/stdlib/logs'; import { randomExtendedDirectionalAppTaggingSecret, randomTxScopedPrivateL2Log } from '@aztec/stdlib/testing'; -import { type IndexedTxEffect, TxEffect, TxExecutionResult, TxHash, TxReceipt, TxStatus } from '@aztec/stdlib/tx'; +import { TxExecutionResult, TxHash, TxReceipt, TxStatus } from '@aztec/stdlib/tx'; import { type MockProxy, mock } from 'jest-mock-extended'; @@ -277,68 +275,4 @@ describe('syncSenderTaggingIndexes', () => { expect(await taggingStore.getLastFinalizedIndex(secret, 'test')).toBe(pendingAndFinalizedIndex); expect(await taggingStore.getLastUsedIndex(secret, 'test')).toBe(pendingAndFinalizedIndex); }); - - it('handles a partially reverted transaction', async () => { - await setUp(); - - const revertedTxHash = TxHash.random(); - - // Create logs at indexes 4 and 6 for the same (reverted) tx - const tag4 = await computeSiloedTagForIndex(4); - const tag6 = await computeSiloedTagForIndex(6); - - aztecNode.getPrivateLogsByTags.mockImplementation((tags: SiloedTag[]) => { - return Promise.resolve( - tags.map((tag: SiloedTag) => { - if (tag.equals(tag4)) { - return [makeLog(revertedTxHash, tag4.value)]; - } else if (tag.equals(tag6)) { - return [makeLog(revertedTxHash, tag6.value)]; - } - return []; - }), - ); - }); - - // Mock getTxReceipt to return FINALIZED with APP_LOGIC_REVERTED - aztecNode.getTxReceipt.mockResolvedValue( - new TxReceipt( - revertedTxHash, - TxStatus.FINALIZED, - TxExecutionResult.APP_LOGIC_REVERTED, - undefined, - undefined, - undefined, - BlockNumber(14), - ), - ); - - // Mock getTxEffect to return a TxEffect where only the tag at index 4 survived (non-revertible phase) - const txEffect = new TxEffect( - RevertCode.APP_LOGIC_REVERTED, - revertedTxHash, - Fr.ZERO, - [Fr.random()], // noteHashes - [Fr.random()], // nullifiers - [], // l2ToL1Msgs - [], // publicDataWrites - [PrivateLog.random(tag4.value)], // only the tag at index 4 survived - [], // publicLogs - [], // contractClassLogs - ); - - aztecNode.getTxEffect.mockResolvedValue({ - data: txEffect, - l2BlockNumber: BlockNumber(14), - l2BlockHash: MOCK_ANCHOR_BLOCK_HASH, - txIndexInBlock: 0, - } as IndexedTxEffect); - - await syncSenderTaggingIndexes(secret, aztecNode, taggingStore, MOCK_ANCHOR_BLOCK_HASH, 'test'); - - // Index 4 should be finalized (it survived the partial revert) - expect(await taggingStore.getLastFinalizedIndex(secret, 'test')).toBe(4); - // No pending indexes should remain for this secret - expect(await taggingStore.getLastUsedIndex(secret, 'test')).toBe(4); - }); }); diff --git a/yarn-project/pxe/src/tagging/sender_sync/sync_sender_tagging_indexes.ts b/yarn-project/pxe/src/tagging/sender_sync/sync_sender_tagging_indexes.ts index 516dc00483ef..87d56d6a46e7 100644 --- a/yarn-project/pxe/src/tagging/sender_sync/sync_sender_tagging_indexes.ts +++ b/yarn-project/pxe/src/tagging/sender_sync/sync_sender_tagging_indexes.ts @@ -62,29 +62,11 @@ export async function syncSenderTaggingIndexes( break; } - const { txHashesToFinalize, txHashesToDrop, txHashesWithExecutionReverted } = await getStatusChangeOfPending( - pendingTxHashes, - aztecNode, - ); + const { txHashesToFinalize, txHashesToDrop } = await getStatusChangeOfPending(pendingTxHashes, aztecNode); await taggingStore.dropPendingIndexes(txHashesToDrop, jobId); await taggingStore.finalizePendingIndexes(txHashesToFinalize, jobId); - if (txHashesWithExecutionReverted.length > 0) { - const indexedTxEffects = await Promise.all( - txHashesWithExecutionReverted.map(txHash => aztecNode.getTxEffect(txHash)), - ); - for (const indexedTxEffect of indexedTxEffects) { - if (indexedTxEffect === undefined) { - throw new Error( - 'TxEffect not found for execution-reverted tx. This is either a bug or a reorg has occurred.', - ); - } - - await taggingStore.finalizePendingIndexesOfAPartiallyRevertedTx(indexedTxEffect.data, jobId); - } - } - // We check if the finalized index has been updated. newFinalizedIndex = await taggingStore.getLastFinalizedIndex(secret, jobId); if (previousFinalizedIndex !== newFinalizedIndex) { diff --git a/yarn-project/pxe/src/tagging/sender_sync/utils/get_status_change_of_pending.test.ts b/yarn-project/pxe/src/tagging/sender_sync/utils/get_status_change_of_pending.test.ts index 676b491d8910..7fd0fc92e3f3 100644 --- a/yarn-project/pxe/src/tagging/sender_sync/utils/get_status_change_of_pending.test.ts +++ b/yarn-project/pxe/src/tagging/sender_sync/utils/get_status_change_of_pending.test.ts @@ -51,41 +51,11 @@ describe('getStatusChangeOfPending', () => { ), ); } else if (hash.equals(appLogicRevertedTxHash)) { - return Promise.resolve( - new TxReceipt( - hash, - TxStatus.FINALIZED, - TxExecutionResult.APP_LOGIC_REVERTED, - undefined, - undefined, - undefined, - BlockNumber(10), - ), - ); + return Promise.resolve(new TxReceipt(hash, TxStatus.PROPOSED, TxExecutionResult.APP_LOGIC_REVERTED, undefined)); } else if (hash.equals(teardownRevertedTxHash)) { - return Promise.resolve( - new TxReceipt( - hash, - TxStatus.FINALIZED, - TxExecutionResult.TEARDOWN_REVERTED, - undefined, - undefined, - undefined, - BlockNumber(10), - ), - ); + return Promise.resolve(new TxReceipt(hash, TxStatus.PROPOSED, TxExecutionResult.TEARDOWN_REVERTED, undefined)); } else if (hash.equals(bothRevertedTxHash)) { - return Promise.resolve( - new TxReceipt( - hash, - TxStatus.FINALIZED, - TxExecutionResult.BOTH_REVERTED, - undefined, - undefined, - undefined, - BlockNumber(10), - ), - ); + return Promise.resolve(new TxReceipt(hash, TxStatus.PROPOSED, TxExecutionResult.BOTH_REVERTED, undefined)); } else { throw new Error(`Unexpected tx hash: ${hash.toString()}`); } @@ -104,8 +74,8 @@ describe('getStatusChangeOfPending', () => { ); expect(result.txHashesToFinalize).toEqual([finalizedTxHash]); - expect(result.txHashesToDrop).toEqual([droppedTxHash]); - expect(result.txHashesWithExecutionReverted).toEqual([ + expect(result.txHashesToDrop).toEqual([ + droppedTxHash, appLogicRevertedTxHash, teardownRevertedTxHash, bothRevertedTxHash, @@ -131,7 +101,6 @@ describe('getStatusChangeOfPending', () => { expect(result.txHashesToFinalize).toEqual([txHash]); expect(result.txHashesToDrop).toEqual([]); - expect(result.txHashesWithExecutionReverted).toEqual([]); }); it('does not finalize tx that is only proven', async () => { @@ -154,6 +123,5 @@ describe('getStatusChangeOfPending', () => { // Not finalized yet, so stays pending expect(result.txHashesToFinalize).toEqual([]); expect(result.txHashesToDrop).toEqual([]); - expect(result.txHashesWithExecutionReverted).toEqual([]); }); }); diff --git a/yarn-project/pxe/src/tagging/sender_sync/utils/get_status_change_of_pending.ts b/yarn-project/pxe/src/tagging/sender_sync/utils/get_status_change_of_pending.ts index 1fc434d10c35..8400b16237f3 100644 --- a/yarn-project/pxe/src/tagging/sender_sync/utils/get_status_change_of_pending.ts +++ b/yarn-project/pxe/src/tagging/sender_sync/utils/get_status_change_of_pending.ts @@ -2,50 +2,35 @@ import type { AztecNode } from '@aztec/stdlib/interfaces/server'; import { TxHash, TxStatus } from '@aztec/stdlib/tx'; /** - * Based on receipts obtained from `aztecNode` returns which pending transactions changed their status to finalized, - * dropped, or execution-reverted (but mined). + * Based on receipts obtained from `aztecNode` returns which pending transactions changed their status to finalized or + * dropped. */ export async function getStatusChangeOfPending( pending: TxHash[], aztecNode: AztecNode, -): Promise<{ - txHashesToFinalize: TxHash[]; - txHashesToDrop: TxHash[]; - txHashesWithExecutionReverted: TxHash[]; -}> { +): Promise<{ txHashesToFinalize: TxHash[]; txHashesToDrop: TxHash[] }> { // Get receipts for all pending tx hashes. const receipts = await Promise.all(pending.map(pendingTxHash => aztecNode.getTxReceipt(pendingTxHash))); const txHashesToFinalize: TxHash[] = []; const txHashesToDrop: TxHash[] = []; - const txHashesWithExecutionReverted: TxHash[] = []; for (let i = 0; i < receipts.length; i++) { const receipt = receipts[i]; const txHash = pending[i]; - if (receipt.status === TxStatus.FINALIZED) { - // Tx has been included in a block and the corresponding block is finalized - if (receipt.hasExecutionSucceeded()) { - // No part of execution reverted - we just finalize all the indexes. - txHashesToFinalize.push(txHash); - } else if (receipt.hasExecutionReverted()) { - // Tx was mined but execution reverted (app logic, teardown, or both). Some logs from the non-revertible - // phase may still be onchain. We check which tags made it onchain and finalize those; drop the rest. - txHashesWithExecutionReverted.push(txHash); - } else { - // Defensive check - this branch should never be triggered - throw new Error( - 'Both hasExecutionSucceeded and hasExecutionReverted on the receipt returned false. This should never happen and it implies a bug. Please open an issue.', - ); - } - } else if (receipt.isDropped()) { - // Tx was dropped from the mempool --> we drop the corresponding pending indexes. + if (receipt.status === TxStatus.FINALIZED && receipt.hasExecutionSucceeded()) { + // Tx has been included in a block and the corresponding block is finalized --> we mark the indexes as + // finalized. + txHashesToFinalize.push(txHash); + } else if (receipt.isDropped() || receipt.hasExecutionReverted()) { + // Tx was dropped or reverted --> we drop the corresponding pending indexes. + // TODO(#17615): Don't drop pending indexes corresponding to non-revertible phases. txHashesToDrop.push(txHash); } else { // Tx is still pending, not yet finalized, or was mined successfully but not yet finalized --> we don't do anything. } } - return { txHashesToFinalize, txHashesToDrop, txHashesWithExecutionReverted }; + return { txHashesToFinalize, txHashesToDrop }; } diff --git a/yarn-project/pxe/src/tagging/sender_sync/utils/load_and_store_new_tagging_indexes.test.ts b/yarn-project/pxe/src/tagging/sender_sync/utils/load_and_store_new_tagging_indexes.test.ts index 572ef56fb88e..789c67c79f8f 100644 --- a/yarn-project/pxe/src/tagging/sender_sync/utils/load_and_store_new_tagging_indexes.test.ts +++ b/yarn-project/pxe/src/tagging/sender_sync/utils/load_and_store_new_tagging_indexes.test.ts @@ -1,4 +1,5 @@ import type { Fr } from '@aztec/foundation/curves/bn254'; +import { openTmpStore } from '@aztec/kv-store/lmdb-v2'; import { BlockHash } from '@aztec/stdlib/block'; import type { AztecNode } from '@aztec/stdlib/interfaces/server'; import { type ExtendedDirectionalAppTaggingSecret, SiloedTag } from '@aztec/stdlib/logs'; @@ -7,15 +8,17 @@ import { TxHash } from '@aztec/stdlib/tx'; import { type MockProxy, mock } from 'jest-mock-extended'; -import type { SenderTaggingStore } from '../../../storage/tagging_store/sender_tagging_store.js'; +import { SenderTaggingStore } from '../../../storage/tagging_store/sender_tagging_store.js'; import { loadAndStoreNewTaggingIndexes } from './load_and_store_new_tagging_indexes.js'; const MOCK_ANCHOR_BLOCK_HASH = BlockHash.random(); describe('loadAndStoreNewTaggingIndexes', () => { + // Secret to be used on the input of the loadAndStoreNewTaggingIndexes function. let secret: ExtendedDirectionalAppTaggingSecret; + let aztecNode: MockProxy; - let taggingStore: MockProxy; + let taggingStore: SenderTaggingStore; function computeSiloedTagForIndex(index: number) { return SiloedTag.compute({ extendedSecret: secret, index }); @@ -27,21 +30,30 @@ describe('loadAndStoreNewTaggingIndexes', () => { beforeAll(async () => { secret = await randomExtendedDirectionalAppTaggingSecret(); + aztecNode = mock(); }); - beforeEach(() => { - aztecNode = mock(); - taggingStore = mock(); + // Unlike for secret, app address and aztecNode we need a fresh instance of the tagging data provider for each test. + beforeEach(async () => { + aztecNode.getPrivateLogsByTags.mockReset(); + taggingStore = new SenderTaggingStore(await openTmpStore('test')); }); it('no logs found for the given window', async () => { aztecNode.getPrivateLogsByTags.mockImplementation((tags: SiloedTag[]) => { - return Promise.resolve(tags.map(() => [])); + // No log found for any tag + return Promise.resolve(tags.map((_tag: SiloedTag) => [])); }); await loadAndStoreNewTaggingIndexes(secret, 0, 10, aztecNode, taggingStore, MOCK_ANCHOR_BLOCK_HASH, 'test'); - expect(taggingStore.storePendingIndexes).not.toHaveBeenCalled(); + // Verify that no pending indexes were stored + expect(await taggingStore.getLastUsedIndex(secret, 'test')).toBeUndefined(); + expect(await taggingStore.getLastFinalizedIndex(secret, 'test')).toBeUndefined(); + + // Verify the entire window has no pending tx hashes + const txHashesInWindow = await taggingStore.getTxHashesOfPendingIndexes(secret, 0, 10, 'test'); + expect(txHashesInWindow).toHaveLength(0); }); it('single log found at a specific index', async () => { @@ -55,15 +67,16 @@ describe('loadAndStoreNewTaggingIndexes', () => { await loadAndStoreNewTaggingIndexes(secret, 0, 10, aztecNode, taggingStore, MOCK_ANCHOR_BLOCK_HASH, 'test'); - expect(taggingStore.storePendingIndexes).toHaveBeenCalledTimes(1); - expect(taggingStore.storePendingIndexes).toHaveBeenCalledWith( - [{ extendedSecret: secret, lowestIndex: index, highestIndex: index }], - txHash, - 'test', - ); + // Verify that the pending index was stored for this txHash + const txHashesInRange = await taggingStore.getTxHashesOfPendingIndexes(secret, index, index + 1, 'test'); + expect(txHashesInRange).toHaveLength(1); + expect(txHashesInRange[0].equals(txHash)).toBe(true); + + // Verify the last used index is correct + expect(await taggingStore.getLastUsedIndex(secret, 'test')).toBe(index); }); - it('for multiple logs with same txHash stores full index range', async () => { + it('for multiple logs with same txHash stores the highest index', async () => { const txHash = TxHash.random(); const index1 = 3; const index2 = 7; @@ -85,12 +98,17 @@ describe('loadAndStoreNewTaggingIndexes', () => { await loadAndStoreNewTaggingIndexes(secret, 0, 10, aztecNode, taggingStore, MOCK_ANCHOR_BLOCK_HASH, 'test'); - expect(taggingStore.storePendingIndexes).toHaveBeenCalledTimes(1); - expect(taggingStore.storePendingIndexes).toHaveBeenCalledWith( - [{ extendedSecret: secret, lowestIndex: index1, highestIndex: index2 }], - txHash, - 'test', - ); + // Verify that only the highest index (7) was stored for this txHash and secret + const txHashesAtIndex2 = await taggingStore.getTxHashesOfPendingIndexes(secret, index2, index2 + 1, 'test'); + expect(txHashesAtIndex2).toHaveLength(1); + expect(txHashesAtIndex2[0].equals(txHash)).toBe(true); + + // Verify the lower index is not stored separately + const txHashesAtIndex1 = await taggingStore.getTxHashesOfPendingIndexes(secret, index1, index1 + 1, 'test'); + expect(txHashesAtIndex1).toHaveLength(0); + + // Verify the last used index is the highest + expect(await taggingStore.getLastUsedIndex(secret, 'test')).toBe(index2); }); it('multiple logs with different txHashes', async () => { @@ -116,17 +134,17 @@ describe('loadAndStoreNewTaggingIndexes', () => { await loadAndStoreNewTaggingIndexes(secret, 0, 10, aztecNode, taggingStore, MOCK_ANCHOR_BLOCK_HASH, 'test'); - expect(taggingStore.storePendingIndexes).toHaveBeenCalledTimes(2); - expect(taggingStore.storePendingIndexes).toHaveBeenCalledWith( - [{ extendedSecret: secret, lowestIndex: index1, highestIndex: index1 }], - txHash1, - 'test', - ); - expect(taggingStore.storePendingIndexes).toHaveBeenCalledWith( - [{ extendedSecret: secret, lowestIndex: index2, highestIndex: index2 }], - txHash2, - 'test', - ); + // Verify that both txHashes have their respective indexes stored + const txHashesAtIndex1 = await taggingStore.getTxHashesOfPendingIndexes(secret, index1, index1 + 1, 'test'); + expect(txHashesAtIndex1).toHaveLength(1); + expect(txHashesAtIndex1[0].equals(txHash1)).toBe(true); + + const txHashesAtIndex2 = await taggingStore.getTxHashesOfPendingIndexes(secret, index2, index2 + 1, 'test'); + expect(txHashesAtIndex2).toHaveLength(1); + expect(txHashesAtIndex2[0].equals(txHash2)).toBe(true); + + // Verify the last used index is the highest + expect(await taggingStore.getLastUsedIndex(secret, 'test')).toBe(index2); }); // Expected to happen if sending logs from multiple PXEs at a similar time. @@ -144,17 +162,15 @@ describe('loadAndStoreNewTaggingIndexes', () => { await loadAndStoreNewTaggingIndexes(secret, 0, 10, aztecNode, taggingStore, MOCK_ANCHOR_BLOCK_HASH, 'test'); - expect(taggingStore.storePendingIndexes).toHaveBeenCalledTimes(2); - expect(taggingStore.storePendingIndexes).toHaveBeenCalledWith( - [{ extendedSecret: secret, lowestIndex: index, highestIndex: index }], - txHash1, - 'test', - ); - expect(taggingStore.storePendingIndexes).toHaveBeenCalledWith( - [{ extendedSecret: secret, lowestIndex: index, highestIndex: index }], - txHash2, - 'test', - ); + // Verify that both txHashes have the same index stored + const txHashesAtIndex = await taggingStore.getTxHashesOfPendingIndexes(secret, index, index + 1, 'test'); + expect(txHashesAtIndex).toHaveLength(2); + const txHashStrings = txHashesAtIndex.map(h => h.toString()); + expect(txHashStrings).toContain(txHash1.toString()); + expect(txHashStrings).toContain(txHash2.toString()); + + // Verify the last used index is correct + expect(await taggingStore.getLastUsedIndex(secret, 'test')).toBe(index); }); it('complex scenario: multiple txHashes with multiple indexes', async () => { @@ -162,11 +178,10 @@ describe('loadAndStoreNewTaggingIndexes', () => { const txHash2 = TxHash.random(); const txHash3 = TxHash.random(); - // txHash1 has logs at index 1, 2 and 8 → range [1, 8] - // txHash2 has logs at index 3 and 5 → range [3, 5] - // txHash3 has a log at index 9 → range [9, 9] + // txHash1 has logs at index 1 and 8 (should store 8) + // txHash2 has logs at index 3 and 5 (should store 5) + // txHash3 has a log at index 9 (should store 9) const tag1 = await computeSiloedTagForIndex(1); - const tag2 = await computeSiloedTagForIndex(2); const tag3 = await computeSiloedTagForIndex(3); const tag5 = await computeSiloedTagForIndex(5); const tag8 = await computeSiloedTagForIndex(8); @@ -177,8 +192,6 @@ describe('loadAndStoreNewTaggingIndexes', () => { tags.map((t: SiloedTag) => { if (t.equals(tag1)) { return [makeLog(txHash1, tag1.value)]; - } else if (t.equals(tag2)) { - return [makeLog(txHash1, tag1.value)]; } else if (t.equals(tag3)) { return [makeLog(txHash2, tag3.value)]; } else if (t.equals(tag5)) { @@ -195,22 +208,27 @@ describe('loadAndStoreNewTaggingIndexes', () => { await loadAndStoreNewTaggingIndexes(secret, 0, 10, aztecNode, taggingStore, MOCK_ANCHOR_BLOCK_HASH, 'test'); - expect(taggingStore.storePendingIndexes).toHaveBeenCalledTimes(3); - expect(taggingStore.storePendingIndexes).toHaveBeenCalledWith( - [{ extendedSecret: secret, lowestIndex: 1, highestIndex: 8 }], - txHash1, - 'test', - ); - expect(taggingStore.storePendingIndexes).toHaveBeenCalledWith( - [{ extendedSecret: secret, lowestIndex: 3, highestIndex: 5 }], - txHash2, - 'test', - ); - expect(taggingStore.storePendingIndexes).toHaveBeenCalledWith( - [{ extendedSecret: secret, lowestIndex: 9, highestIndex: 9 }], - txHash3, - 'test', - ); + // Verify txHash1 has highest index 8 (should not be at index 1) + const txHashesAtIndex1 = await taggingStore.getTxHashesOfPendingIndexes(secret, 1, 2, 'test'); + expect(txHashesAtIndex1).toHaveLength(0); + const txHashesAtIndex8 = await taggingStore.getTxHashesOfPendingIndexes(secret, 8, 9, 'test'); + expect(txHashesAtIndex8).toHaveLength(1); + expect(txHashesAtIndex8[0].equals(txHash1)).toBe(true); + + // Verify txHash2 has highest index 5 (should not be at index 3) + const txHashesAtIndex3 = await taggingStore.getTxHashesOfPendingIndexes(secret, 3, 4, 'test'); + expect(txHashesAtIndex3).toHaveLength(0); + const txHashesAtIndex5 = await taggingStore.getTxHashesOfPendingIndexes(secret, 5, 6, 'test'); + expect(txHashesAtIndex5).toHaveLength(1); + expect(txHashesAtIndex5[0].equals(txHash2)).toBe(true); + + // Verify txHash3 has index 9 + const txHashesAtIndex9 = await taggingStore.getTxHashesOfPendingIndexes(secret, 9, 10, 'test'); + expect(txHashesAtIndex9).toHaveLength(1); + expect(txHashesAtIndex9[0].equals(txHash3)).toBe(true); + + // Verify the last used index is the highest + expect(await taggingStore.getLastUsedIndex(secret, 'test')).toBe(9); }); it('start is inclusive and end is exclusive', async () => { @@ -238,12 +256,16 @@ describe('loadAndStoreNewTaggingIndexes', () => { await loadAndStoreNewTaggingIndexes(secret, start, end, aztecNode, taggingStore, MOCK_ANCHOR_BLOCK_HASH, 'test'); - // Only the log at start should be stored; end is exclusive - expect(taggingStore.storePendingIndexes).toHaveBeenCalledTimes(1); - expect(taggingStore.storePendingIndexes).toHaveBeenCalledWith( - [{ extendedSecret: secret, lowestIndex: start, highestIndex: start }], - txHashAtStart, - 'test', - ); + // Verify that the log at start (inclusive) was processed + const txHashesAtStart = await taggingStore.getTxHashesOfPendingIndexes(secret, start, start + 1, 'test'); + expect(txHashesAtStart).toHaveLength(1); + expect(txHashesAtStart[0].equals(txHashAtStart)).toBe(true); + + // Verify that the log at end (exclusive) was NOT processed + const txHashesAtEnd = await taggingStore.getTxHashesOfPendingIndexes(secret, end, end + 1, 'test'); + expect(txHashesAtEnd).toHaveLength(0); + + // Verify the last used index is the start index (since end was not processed) + expect(await taggingStore.getLastUsedIndex(secret, 'test')).toBe(start); }); }); diff --git a/yarn-project/pxe/src/tagging/sender_sync/utils/load_and_store_new_tagging_indexes.ts b/yarn-project/pxe/src/tagging/sender_sync/utils/load_and_store_new_tagging_indexes.ts index 3979f5007189..5558c1097cba 100644 --- a/yarn-project/pxe/src/tagging/sender_sync/utils/load_and_store_new_tagging_indexes.ts +++ b/yarn-project/pxe/src/tagging/sender_sync/utils/load_and_store_new_tagging_indexes.ts @@ -16,7 +16,6 @@ import { getAllPrivateLogsByTags } from '../../get_all_logs_by_tags.js'; * @param end - The ending index (exclusive) of the window to process. * @param aztecNode - The Aztec node instance to query for logs. * @param taggingStore - The data provider to store pending indexes. - * @param anchorBlockHash - Hash of a block to use as reference block when querying node. * @param jobId - Job identifier, used to keep writes in-memory until they can be persisted in a data integrity * preserving way. */ @@ -35,13 +34,12 @@ export async function loadAndStoreNewTaggingIndexes( ); const txsForTags = await getTxsContainingTags(siloedTagsForWindow, aztecNode, anchorBlockHash); - const txIndexesMap = getTxIndexesMap(txsForTags, start, siloedTagsForWindow.length); + const highestIndexMap = getTxHighestIndexMap(txsForTags, start, siloedTagsForWindow.length); - // Now we iterate over the map, construct the tagging index ranges and store them in the db. - for (const [txHashStr, indexes] of txIndexesMap.entries()) { + // Now we iterate over the map, reconstruct the preTags and tx hash and store them in the db. + for (const [txHashStr, highestIndex] of highestIndexMap.entries()) { const txHash = TxHash.fromString(txHashStr); - const ranges = [{ extendedSecret, lowestIndex: Math.min(...indexes), highestIndex: Math.max(...indexes) }]; - await taggingStore.storePendingIndexes(ranges, txHash, jobId); + await taggingStore.storePendingIndexes([{ extendedSecret, index: highestIndex }], txHash, jobId); } } @@ -58,28 +56,20 @@ async function getTxsContainingTags( return allLogs.map(logs => logs.map(log => log.txHash)); } -// Returns a map of txHash to all indexes for that txHash. -function getTxIndexesMap(txHashesForTags: TxHash[][], start: number, count: number): Map { +// Returns a map of txHash to the highest index for that txHash. +function getTxHighestIndexMap(txHashesForTags: TxHash[][], start: number, count: number): Map { if (txHashesForTags.length !== count) { throw new Error(`Number of tx hashes arrays does not match number of tags. ${txHashesForTags.length} !== ${count}`); } - const indexesMap = new Map(); - // Iterate over indexes + const highestIndexMap = new Map(); for (let i = 0; i < txHashesForTags.length; i++) { const taggingIndex = start + i; const txHashesForTag = txHashesForTags[i]; - // iterate over tx hashes that used that index (tag) for (const txHash of txHashesForTag) { const key = txHash.toString(); - const existing = indexesMap.get(key); - // Add the index to the tx's indexes - if (existing) { - existing.push(taggingIndex); - } else { - indexesMap.set(key, [taggingIndex]); - } + highestIndexMap.set(key, Math.max(highestIndexMap.get(key) ?? 0, taggingIndex)); } } - return indexesMap; + return highestIndexMap; } diff --git a/yarn-project/stdlib/src/logs/index.ts b/yarn-project/stdlib/src/logs/index.ts index 540d1fe99698..2e25c40da7c3 100644 --- a/yarn-project/stdlib/src/logs/index.ts +++ b/yarn-project/stdlib/src/logs/index.ts @@ -1,6 +1,5 @@ export * from './extended_directional_app_tagging_secret.js'; export * from './pre_tag.js'; -export * from './tagging_index_range.js'; export * from './contract_class_log.js'; export * from './public_log.js'; export * from './private_log.js'; diff --git a/yarn-project/stdlib/src/logs/tagging_index_range.ts b/yarn-project/stdlib/src/logs/tagging_index_range.ts deleted file mode 100644 index 6392ac8fd26a..000000000000 --- a/yarn-project/stdlib/src/logs/tagging_index_range.ts +++ /dev/null @@ -1,24 +0,0 @@ -import { schemas } from '@aztec/foundation/schemas'; - -import { z } from 'zod'; - -import { - type ExtendedDirectionalAppTaggingSecret, - ExtendedDirectionalAppTaggingSecretSchema, -} from './extended_directional_app_tagging_secret.js'; - -/** - * Represents a range of tagging indexes for a given extended directional app tagging secret. Used to track the lowest - * and highest indexes used in a transaction for a given (sender, recipient, app/contract) tuple. - */ -export type TaggingIndexRange = { - extendedSecret: ExtendedDirectionalAppTaggingSecret; - lowestIndex: number; - highestIndex: number; -}; - -export const TaggingIndexRangeSchema = z.object({ - extendedSecret: ExtendedDirectionalAppTaggingSecretSchema, - lowestIndex: schemas.Integer, - highestIndex: schemas.Integer, -}); diff --git a/yarn-project/stdlib/src/tx/private_execution_result.ts b/yarn-project/stdlib/src/tx/private_execution_result.ts index 4432901b8901..4ddd06352e08 100644 --- a/yarn-project/stdlib/src/tx/private_execution_result.ts +++ b/yarn-project/stdlib/src/tx/private_execution_result.ts @@ -11,7 +11,7 @@ import { PrivateCircuitPublicInputs } from '../kernel/private_circuit_public_inp import type { IsEmpty } from '../kernel/utils/interfaces.js'; import { sortByCounter } from '../kernel/utils/order_and_comparison.js'; import { ContractClassLog, ContractClassLogFields } from '../logs/contract_class_log.js'; -import { type TaggingIndexRange, TaggingIndexRangeSchema } from '../logs/tagging_index_range.js'; +import { type PreTag, PreTagSchema } from '../logs/pre_tag.js'; import { Note } from '../note/note.js'; import { type ZodFor, mapSchema, schemas } from '../schemas/index.js'; import { HashedValues } from './hashed_values.js'; @@ -137,8 +137,8 @@ export class PrivateCallExecutionResult { public returnValues: Fr[], /** The offchain effects emitted during execution of this function call via the `emit_offchain_effect` oracle. */ public offchainEffects: { data: Fr[] }[], - /** The tagging index ranges used in this tx to compute tags for private logs */ - public taggingIndexRanges: TaggingIndexRange[], + /** The pre-tags used in this tx to compute tags for private logs */ + public preTags: PreTag[], /** The nested executions. */ public nestedExecutionResults: PrivateCallExecutionResult[], /** @@ -161,7 +161,7 @@ export class PrivateCallExecutionResult { noteHashNullifierCounterMap: mapSchema(z.coerce.number(), z.number()), returnValues: z.array(schemas.Fr), offchainEffects: z.array(z.object({ data: z.array(schemas.Fr) })), - taggingIndexRanges: z.array(TaggingIndexRangeSchema), + preTags: z.array(PreTagSchema), nestedExecutionResults: z.array(z.lazy(() => PrivateCallExecutionResult.schema)), contractClassLogs: z.array(CountedContractClassLog.schema), }) @@ -178,7 +178,7 @@ export class PrivateCallExecutionResult { fields.noteHashNullifierCounterMap, fields.returnValues, fields.offchainEffects, - fields.taggingIndexRanges, + fields.preTags, fields.nestedExecutionResults, fields.contractClassLogs, ); From 1d95cd0ced3eb63eec21683299a2e02ba5b2e608 Mon Sep 17 00:00:00 2001 From: PhilWindle <60546371+PhilWindle@users.noreply.github.com> Date: Mon, 16 Mar 2026 11:15:42 +0000 Subject: [PATCH 05/17] feat: Implement commit all and revert all for world state checkpoints (#21532) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Summary - Adds depth-aware `commitAllCheckpointsTo(depth)` and `revertAllCheckpointsTo(depth)` to the world state checkpoint system. These revert/commit all checkpoints at or above the given depth (inclusive), preserving any checkpoints created by callers below that depth. - `createCheckpoint()` now returns the depth of the newly created checkpoint, threading it through the full C++ async callback chain (cache → tree store → append-only tree → world state → NAPI → TypeScript). - `ForkCheckpoint` stores its depth and exposes `revertToCheckpoint()` which encapsulates the revert-to-depth pattern, replacing the previous `revertAllCheckpoints()` + `markCompleted()` two-step. - The public processor uses `revertToCheckpoint()` on tx timeout/panic, so per-tx reverts no longer destroy checkpoints created by callers (e.g., `CheckpointBuilder`). ## Changes **C++ (barretenberg)** - `ContentAddressedCache`: `checkpoint()` returns depth, new `commit_to_depth()`/`revert_to_depth()` methods - `CachedContentAddressedTreeStore`: passes through depth-aware operations - `ContentAddressedAppendOnlyTree`: `CheckpointCallback` now receives `TypedResponse` with depth - `WorldState`: `checkpoint()` returns depth, `commit_all_checkpoints_to`/`revert_all_checkpoints_to` take required depth - NAPI layer: new `ForkIdWithDepthRequest`/`CheckpointDepthResponse` message types **TypeScript** - `MerkleTreeCheckpointOperations` interface: `createCheckpoint()` returns `Promise`, depth is required on `commitAllCheckpointsTo`/`revertAllCheckpointsTo` - `MerkleTreesFacade`: passes depth through native message channel - `ForkCheckpoint`: stores depth, new `revertToCheckpoint()` method - `PublicProcessor`: uses `checkpoint.revertToCheckpoint()` on error paths **Tests** - C++ cache tests: depth return, `commit_to_depth`, `revert_to_depth`, edge cases - C++ append-only tree tests: depth return, commit/revert to depth - TypeScript native world state tests: depth return, commit/revert to depth, backward compat - TypeScript fork checkpoint unit tests - TypeScript public processor tests: verifies depth passed on revert ## Test plan - C++ cache tests pass (`crypto_content_addressed_cache_tests`) - C++ append-only tree tests pass (`crypto_content_addressed_append_only_tree_tests`) - TypeScript `native_world_state.test.ts` passes - TypeScript `fork_checkpoint.test.ts` passes - TypeScript `public_processor.test.ts` passes - TypeScript `timeout_race.test.ts` passes --- .../content_addressed_append_only_tree.hpp | 48 +++- ...ontent_addressed_append_only_tree.test.cpp | 96 +++++++- .../cached_content_addressed_tree_store.hpp | 35 ++- .../node_store/content_addressed_cache.hpp | 33 ++- .../content_addressed_cache.test.cpp | 207 ++++++++++++++++++ .../crypto/merkle_tree/response.hpp | 11 + .../crypto/merkle_tree/test_fixtures.hpp | 30 ++- .../nodejs_module/world_state/world_state.cpp | 22 +- .../nodejs_module/world_state/world_state.hpp | 4 +- .../world_state/world_state_message.hpp | 11 + .../barretenberg/world_state/world_state.cpp | 26 ++- .../barretenberg/world_state/world_state.hpp | 6 +- .../src/public/hinting_db_sources.ts | 14 +- .../apps_tests/timeout_race.test.ts | 11 +- .../public_processor/guarded_merkle_tree.ts | 10 +- .../public_processor/public_processor.test.ts | 41 +++- .../public_processor/public_processor.ts | 18 +- .../src/interfaces/merkle_tree_operations.ts | 26 +-- .../src/native/fork_checkpoint.test.ts | 71 ++++++ .../world-state/src/native/fork_checkpoint.ts | 22 +- .../src/native/merkle_trees_facade.ts | 19 +- .../world-state/src/native/message.ts | 16 +- .../src/native/native_world_state.test.ts | 169 +++++++++++++- 23 files changed, 829 insertions(+), 117 deletions(-) create mode 100644 yarn-project/world-state/src/native/fork_checkpoint.test.ts diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.hpp index 0a7eaf4d4905..121244e39dd7 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.hpp @@ -60,7 +60,7 @@ template class ContentAddressedAppendOn using UnwindBlockCallback = std::function&)>; using FinalizeBlockCallback = EmptyResponseCallback; using GetBlockForIndexCallback = std::function&)>; - using CheckpointCallback = EmptyResponseCallback; + using CheckpointCallback = std::function&)>; using CheckpointCommitCallback = EmptyResponseCallback; using CheckpointRevertCallback = EmptyResponseCallback; @@ -254,8 +254,11 @@ template class ContentAddressedAppendOn void checkpoint(const CheckpointCallback& on_completion); void commit_checkpoint(const CheckpointCommitCallback& on_completion); void revert_checkpoint(const CheckpointRevertCallback& on_completion); - void commit_all_checkpoints(const CheckpointCommitCallback& on_completion); - void revert_all_checkpoints(const CheckpointRevertCallback& on_completion); + void commit_all_checkpoints_to(const CheckpointCommitCallback& on_completion); + void revert_all_checkpoints_to(const CheckpointRevertCallback& on_completion); + void commit_to_depth(uint32_t target_depth, const CheckpointCommitCallback& on_completion); + void revert_to_depth(uint32_t target_depth, const CheckpointRevertCallback& on_completion); + uint32_t checkpoint_depth() const; protected: using ReadTransaction = typename Store::ReadTransaction; @@ -1002,7 +1005,11 @@ void ContentAddressedAppendOnlyTree::rollback(const Rollba template void ContentAddressedAppendOnlyTree::checkpoint(const CheckpointCallback& on_completion) { - auto job = [=, this]() { execute_and_report([=, this]() { store_->checkpoint(); }, on_completion); }; + auto job = [=, this]() { + execute_and_report( + [=, this](TypedResponse& response) { response.inner.depth = store_->checkpoint(); }, + on_completion); + }; workers_->enqueue(job); } @@ -1023,21 +1030,46 @@ void ContentAddressedAppendOnlyTree::revert_checkpoint( } template -void ContentAddressedAppendOnlyTree::commit_all_checkpoints( +void ContentAddressedAppendOnlyTree::commit_all_checkpoints_to( const CheckpointCommitCallback& on_completion) { - auto job = [=, this]() { execute_and_report([=, this]() { store_->commit_all_checkpoints(); }, on_completion); }; + auto job = [=, this]() { execute_and_report([=, this]() { store_->commit_all_checkpoints_to(); }, on_completion); }; workers_->enqueue(job); } template -void ContentAddressedAppendOnlyTree::revert_all_checkpoints( +void ContentAddressedAppendOnlyTree::revert_all_checkpoints_to( const CheckpointRevertCallback& on_completion) { - auto job = [=, this]() { execute_and_report([=, this]() { store_->revert_all_checkpoints(); }, on_completion); }; + auto job = [=, this]() { execute_and_report([=, this]() { store_->revert_all_checkpoints_to(); }, on_completion); }; + workers_->enqueue(job); +} + +template +void ContentAddressedAppendOnlyTree::commit_to_depth( + uint32_t target_depth, const CheckpointCommitCallback& on_completion) +{ + auto job = [=, this]() { + execute_and_report([=, this]() { store_->commit_to_depth(target_depth); }, on_completion); + }; + workers_->enqueue(job); +} + +template +void ContentAddressedAppendOnlyTree::revert_to_depth( + uint32_t target_depth, const CheckpointRevertCallback& on_completion) +{ + auto job = [=, this]() { + execute_and_report([=, this]() { store_->revert_to_depth(target_depth); }, on_completion); + }; workers_->enqueue(job); } +template +uint32_t ContentAddressedAppendOnlyTree::checkpoint_depth() const +{ + return store_->checkpoint_depth(); +} template void ContentAddressedAppendOnlyTree::remove_historic_block( const block_number_t& blockNumber, const RemoveHistoricBlockCallback& on_completion) diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.test.cpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.test.cpp index cecff513bb46..1518b2d2bfa5 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.test.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.test.cpp @@ -2171,7 +2171,7 @@ TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_checkpoint_and_revert_fo commit_checkpoint_tree(tree, false); } -TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_commit_all_checkpoints) +TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_commit_all_checkpoints_to) { constexpr size_t depth = 10; uint32_t blockSize = 16; @@ -2223,7 +2223,7 @@ TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_commit_all_checkpoints) commit_checkpoint_tree(tree, false); } -TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_revert_all_checkpoints) +TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_revert_all_checkpoints_to) { constexpr size_t depth = 10; uint32_t blockSize = 16; @@ -2274,3 +2274,95 @@ TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_revert_all_checkpoints) revert_checkpoint_tree(tree, false); commit_checkpoint_tree(tree, false); } + +TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_commit_to_depth) +{ + constexpr size_t depth = 10; + uint32_t blockSize = 16; + std::string name = random_string(); + ThreadPoolPtr pool = make_thread_pool(1); + LMDBTreeStore::SharedPtr db = std::make_shared(_directory, name, _mapSize, _maxReaders); + + { + std::unique_ptr store = std::make_unique(name, depth, db); + TreeType tree(std::move(store), pool); + std::vector values = create_values(blockSize); + add_values(tree, values); + commit_tree(tree); + } + + std::unique_ptr store = std::make_unique(name, depth, db); + TreeType tree(std::move(store), pool); + + // Capture initial state + fr_sibling_path initial_path = get_sibling_path(tree, 0); + + // Depth 1 + checkpoint_tree(tree); + add_values(tree, create_values(blockSize)); + fr_sibling_path after_depth1_path = get_sibling_path(tree, 0); + + // Depth 2 + checkpoint_tree(tree); + add_values(tree, create_values(blockSize)); + + // Depth 3 + checkpoint_tree(tree); + add_values(tree, create_values(blockSize)); + fr_sibling_path after_depth3_path = get_sibling_path(tree, 0); + + // Commit depths 3 and 2 into depth 1, leaving depth at 1 + commit_tree_to_depth(tree, 1); + + // Data from all depths should be present + check_sibling_path(tree, 0, after_depth3_path); + + // Revert depth 1 — should go back to initial state + revert_checkpoint_tree(tree); + check_sibling_path(tree, 0, initial_path); +} + +TEST_F(PersistedContentAddressedAppendOnlyTreeTest, can_revert_to_depth) +{ + constexpr size_t depth = 10; + uint32_t blockSize = 16; + std::string name = random_string(); + ThreadPoolPtr pool = make_thread_pool(1); + LMDBTreeStore::SharedPtr db = std::make_shared(_directory, name, _mapSize, _maxReaders); + + { + std::unique_ptr store = std::make_unique(name, depth, db); + TreeType tree(std::move(store), pool); + std::vector values = create_values(blockSize); + add_values(tree, values); + commit_tree(tree); + } + + std::unique_ptr store = std::make_unique(name, depth, db); + TreeType tree(std::move(store), pool); + + // Depth 1 + checkpoint_tree(tree); + add_values(tree, create_values(blockSize)); + fr_sibling_path after_depth1_path = get_sibling_path(tree, 0); + + // Depth 2 + checkpoint_tree(tree); + add_values(tree, create_values(blockSize)); + + // Depth 3 + checkpoint_tree(tree); + add_values(tree, create_values(blockSize)); + + // Revert depths 3 and 2, leaving depth at 1 + revert_tree_to_depth(tree, 1); + + // Should be back to after depth 1 state + check_sibling_path(tree, 0, after_depth1_path); + + // Depth 1 still active — commit it + commit_checkpoint_tree(tree); + + // Should still have depth 1 data + check_sibling_path(tree, 0, after_depth1_path); +} diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp index d72d8686698f..01888967c45b 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp @@ -191,11 +191,14 @@ template class ContentAddressedCachedTreeStore { std::optional find_block_for_index(const index_t& index, ReadTransaction& tx) const; - void checkpoint(); + uint32_t checkpoint(); void revert_checkpoint(); void commit_checkpoint(); - void revert_all_checkpoints(); - void commit_all_checkpoints(); + void revert_all_checkpoints_to(); + void commit_all_checkpoints_to(); + void commit_to_depth(uint32_t depth); + void revert_to_depth(uint32_t depth); + uint32_t checkpoint_depth() const; private: using Cache = ContentAddressedCache; @@ -276,10 +279,10 @@ ContentAddressedCachedTreeStore::ContentAddressedCachedTreeStore( // These checkpoint apis modify the cache's internal state. // They acquire the mutex to prevent races with concurrent read/write operations (e.g., when C++ AVM simulation // runs on a worker thread while TypeScript calls revert_checkpoint from a timeout handler). -template void ContentAddressedCachedTreeStore::checkpoint() +template uint32_t ContentAddressedCachedTreeStore::checkpoint() { std::unique_lock lock(mtx_); - cache_.checkpoint(); + return cache_.checkpoint(); } template void ContentAddressedCachedTreeStore::revert_checkpoint() @@ -294,18 +297,36 @@ template void ContentAddressedCachedTreeStore void ContentAddressedCachedTreeStore::revert_all_checkpoints() +template void ContentAddressedCachedTreeStore::revert_all_checkpoints_to() { std::unique_lock lock(mtx_); cache_.revert_all(); } -template void ContentAddressedCachedTreeStore::commit_all_checkpoints() +template void ContentAddressedCachedTreeStore::commit_all_checkpoints_to() { std::unique_lock lock(mtx_); cache_.commit_all(); } +template void ContentAddressedCachedTreeStore::commit_to_depth(uint32_t depth) +{ + std::unique_lock lock(mtx_); + cache_.commit_to_depth(depth); +} + +template void ContentAddressedCachedTreeStore::revert_to_depth(uint32_t depth) +{ + std::unique_lock lock(mtx_); + cache_.revert_to_depth(depth); +} + +template uint32_t ContentAddressedCachedTreeStore::checkpoint_depth() const +{ + std::unique_lock lock(mtx_); + return cache_.depth(); +} + template index_t ContentAddressedCachedTreeStore::constrain_tree_size_to_only_committed( const RequestContext& requestContext, ReadTransaction& tx) const diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/content_addressed_cache.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/content_addressed_cache.hpp index 31fb0a37ae17..530d82a211a4 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/content_addressed_cache.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/content_addressed_cache.hpp @@ -47,11 +47,14 @@ template class ContentAddressedCache { ContentAddressedCache& operator=(ContentAddressedCache&& other) noexcept = default; bool operator==(const ContentAddressedCache& other) const = default; - void checkpoint(); + uint32_t checkpoint(); void revert(); void commit(); void revert_all(); void commit_all(); + void commit_to_depth(uint32_t depth); + void revert_to_depth(uint32_t depth); + uint32_t depth() const; void reset(uint32_t depth); std::pair find_low_value(const uint256_t& new_leaf_key, @@ -126,9 +129,10 @@ template ContentAddressedCache::ContentA reset(depth); } -template void ContentAddressedCache::checkpoint() +template uint32_t ContentAddressedCache::checkpoint() { journals_.emplace_back(Journal(meta_)); + return static_cast(journals_.size()); } template void ContentAddressedCache::revert() @@ -240,6 +244,31 @@ template void ContentAddressedCache::rev revert(); } } +template uint32_t ContentAddressedCache::depth() const +{ + return static_cast(journals_.size()); +} + +template void ContentAddressedCache::commit_to_depth(uint32_t target_depth) +{ + if (target_depth >= journals_.size()) { + throw std::runtime_error("Invalid depth for commit_to_depth"); + } + while (journals_.size() > target_depth) { + commit(); + } +} + +template void ContentAddressedCache::revert_to_depth(uint32_t target_depth) +{ + if (target_depth >= journals_.size()) { + throw std::runtime_error("Invalid depth for revert_to_depth"); + } + while (journals_.size() > target_depth) { + revert(); + } +} + template void ContentAddressedCache::reset(uint32_t depth) { nodes_ = std::unordered_map(); diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/content_addressed_cache.test.cpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/content_addressed_cache.test.cpp index 5e6325244a40..e690308530a0 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/content_addressed_cache.test.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/content_addressed_cache.test.cpp @@ -590,3 +590,210 @@ TEST_F(ContentAddressedCacheTest, reverts_remove_all_deeper_commits_2) reverts_remove_all_deeper_commits_2(max_index, depth, num_levels); } } + +TEST_F(ContentAddressedCacheTest, checkpoint_returns_depth) +{ + CacheType cache = create_cache(40); + EXPECT_EQ(cache.depth(), 0u); + EXPECT_EQ(cache.checkpoint(), 1u); + EXPECT_EQ(cache.checkpoint(), 2u); + EXPECT_EQ(cache.checkpoint(), 3u); + EXPECT_EQ(cache.depth(), 3u); +} + +TEST_F(ContentAddressedCacheTest, depth_reports_journal_count) +{ + CacheType cache = create_cache(40); + EXPECT_EQ(cache.depth(), 0u); + cache.checkpoint(); + EXPECT_EQ(cache.depth(), 1u); + cache.checkpoint(); + EXPECT_EQ(cache.depth(), 2u); + cache.commit(); + EXPECT_EQ(cache.depth(), 1u); + cache.revert(); + EXPECT_EQ(cache.depth(), 0u); +} + +TEST_F(ContentAddressedCacheTest, commit_to_depth_partial) +{ + CacheType cache = create_cache(40); + add_to_cache(cache, 0, 100, 1000); + CacheType original_cache = cache; + + // Depth 1: base checkpoint + cache.checkpoint(); + add_to_cache(cache, 100, 100, 1000); + + // Depth 2 + cache.checkpoint(); + add_to_cache(cache, 200, 100, 1000); + + // Depth 3 + cache.checkpoint(); + add_to_cache(cache, 300, 100, 1000); + + CacheType final_cache = cache; + + // Commit down to depth 1 (commits depths 3 and 2), preserve depth 1 + cache.commit_to_depth(1); + EXPECT_EQ(cache.depth(), 1u); + + // Data from depth 2+3 is merged into depth 1's scope + EXPECT_TRUE(final_cache.is_equivalent_to(cache)); + + // Now revert depth 1 — should go back to original + cache.revert(); + EXPECT_EQ(cache.depth(), 0u); + EXPECT_TRUE(original_cache.is_equivalent_to(cache)); +} + +TEST_F(ContentAddressedCacheTest, revert_to_depth_partial) +{ + CacheType cache = create_cache(40); + add_to_cache(cache, 0, 100, 1000); + + // Depth 1: base checkpoint + cache.checkpoint(); + add_to_cache(cache, 100, 100, 1000); + CacheType after_depth1_cache = cache; + + // Depth 2 + cache.checkpoint(); + add_to_cache(cache, 200, 100, 1000); + + // Depth 3 + cache.checkpoint(); + add_to_cache(cache, 300, 100, 1000); + + // Revert down to depth 1 (reverts depths 3 and 2), preserve depth 1 + cache.revert_to_depth(1); + EXPECT_EQ(cache.depth(), 1u); + + // Data from depth 2+3 is gone, state matches after depth 1 changes + EXPECT_TRUE(after_depth1_cache.is_equivalent_to(cache)); +} + +TEST_F(ContentAddressedCacheTest, commit_to_depth_0_is_commit_all) +{ + CacheType cache = create_cache(40); + add_to_cache(cache, 0, 100, 1000); + cache.checkpoint(); + add_to_cache(cache, 100, 100, 1000); + cache.checkpoint(); + add_to_cache(cache, 200, 100, 1000); + cache.checkpoint(); + add_to_cache(cache, 300, 100, 1000); + CacheType final_cache = cache; + + cache.commit_to_depth(0); + EXPECT_EQ(cache.depth(), 0u); + EXPECT_TRUE(final_cache.is_equivalent_to(cache)); + + // No more operations possible + EXPECT_THROW(cache.commit(), std::runtime_error); + EXPECT_THROW(cache.revert(), std::runtime_error); +} + +TEST_F(ContentAddressedCacheTest, revert_to_depth_0_is_revert_all) +{ + CacheType cache = create_cache(40); + add_to_cache(cache, 0, 100, 1000); + CacheType original_cache = cache; + + cache.checkpoint(); + add_to_cache(cache, 100, 100, 1000); + cache.checkpoint(); + add_to_cache(cache, 200, 100, 1000); + cache.checkpoint(); + add_to_cache(cache, 300, 100, 1000); + + cache.revert_to_depth(0); + EXPECT_EQ(cache.depth(), 0u); + EXPECT_TRUE(original_cache.is_equivalent_to(cache)); + + EXPECT_THROW(cache.commit(), std::runtime_error); + EXPECT_THROW(cache.revert(), std::runtime_error); +} + +TEST_F(ContentAddressedCacheTest, commit_to_depth_at_current_is_single_commit) +{ + CacheType cache = create_cache(40); + add_to_cache(cache, 0, 100, 1000); + + cache.checkpoint(); + add_to_cache(cache, 100, 100, 1000); + cache.checkpoint(); + add_to_cache(cache, 200, 100, 1000); + cache.checkpoint(); + add_to_cache(cache, 300, 100, 1000); + CacheType final_cache = cache; + + // Commit only the top checkpoint (depth 3), leaving depth at 2 + EXPECT_EQ(cache.depth(), 3u); + cache.commit_to_depth(2); + EXPECT_EQ(cache.depth(), 2u); + EXPECT_TRUE(final_cache.is_equivalent_to(cache)); +} + +TEST_F(ContentAddressedCacheTest, revert_to_depth_at_current_is_single_revert) +{ + CacheType cache = create_cache(40); + add_to_cache(cache, 0, 100, 1000); + + cache.checkpoint(); + add_to_cache(cache, 100, 100, 1000); + cache.checkpoint(); + add_to_cache(cache, 200, 100, 1000); + CacheType after_depth2_cache = cache; + + cache.checkpoint(); + add_to_cache(cache, 300, 100, 1000); + + // Revert only the top checkpoint (depth 3), leaving depth at 2 + EXPECT_EQ(cache.depth(), 3u); + cache.revert_to_depth(2); + EXPECT_EQ(cache.depth(), 2u); + EXPECT_TRUE(after_depth2_cache.is_equivalent_to(cache)); +} + +TEST_F(ContentAddressedCacheTest, revert_to_depth_preserves_lower_data) +{ + CacheType cache = create_cache(40); + add_to_cache(cache, 0, 100, 1000); + CacheType original_cache = cache; + + // Depth 1 + cache.checkpoint(); + add_to_cache(cache, 100, 100, 1000); + CacheType after_depth1_cache = cache; + + // Depth 2 + cache.checkpoint(); + add_to_cache(cache, 200, 100, 1000); + + // Revert depth 2 only, leaving depth at 1 + EXPECT_EQ(cache.depth(), 2u); + cache.revert_to_depth(1); + EXPECT_EQ(cache.depth(), 1u); + EXPECT_TRUE(after_depth1_cache.is_equivalent_to(cache)); + + // Commit depth 1 — depth 1 data persists + cache.commit(); + EXPECT_EQ(cache.depth(), 0u); + EXPECT_TRUE(after_depth1_cache.is_equivalent_to(cache)); +} + +TEST_F(ContentAddressedCacheTest, commit_to_depth_invalid_depth_throws) +{ + CacheType cache = create_cache(40); + cache.checkpoint(); + cache.checkpoint(); + EXPECT_EQ(cache.depth(), 2u); + + // target_depth >= current depth is invalid + EXPECT_THROW(cache.commit_to_depth(2), std::runtime_error); + EXPECT_THROW(cache.commit_to_depth(3), std::runtime_error); + EXPECT_THROW(cache.revert_to_depth(2), std::runtime_error); + EXPECT_THROW(cache.revert_to_depth(3), std::runtime_error); +} diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/response.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/response.hpp index 8aba60bfa249..43d619161162 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/response.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/response.hpp @@ -32,6 +32,17 @@ struct TreeMetaResponse { TreeMetaResponse& operator=(TreeMetaResponse&& other) noexcept = default; }; +struct CheckpointResponse { + uint32_t depth; + + CheckpointResponse() = default; + ~CheckpointResponse() = default; + CheckpointResponse(const CheckpointResponse& other) = default; + CheckpointResponse(CheckpointResponse&& other) noexcept = default; + CheckpointResponse& operator=(const CheckpointResponse& other) = default; + CheckpointResponse& operator=(CheckpointResponse&& other) noexcept = default; +}; + struct AddDataResponse { index_t size; fr root; diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/test_fixtures.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/test_fixtures.hpp index ad736e292900..e7a56a52848f 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/test_fixtures.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/test_fixtures.hpp @@ -257,10 +257,18 @@ template void rollback_tree(TreeType& tree) call_operation(completion); } -template void checkpoint_tree(TreeType& tree) +template uint32_t checkpoint_tree(TreeType& tree) { - auto completion = [&](auto completion) { tree.checkpoint(completion); }; - call_operation(completion); + Signal signal; + uint32_t depth = 0; + auto completion = [&](const TypedResponse& response) -> void { + EXPECT_EQ(response.success, true); + depth = response.inner.depth; + signal.signal_level(); + }; + tree.checkpoint(completion); + signal.wait_for_level(); + return depth; } template void commit_checkpoint_tree(TreeType& tree, bool expected_success = true) @@ -279,13 +287,25 @@ template void revert_checkpoint_tree(TreeType& tree, bool ex template void commit_all_tree_checkpoints(TreeType& tree, bool expected_success = true) { - auto completion = [&](auto completion) { tree.commit_all_checkpoints(completion); }; + auto completion = [&](auto completion) { tree.commit_all_checkpoints_to(completion); }; call_operation(completion, expected_success); } template void revert_all_tree_checkpoints(TreeType& tree, bool expected_success = true) { - auto completion = [&](auto completion) { tree.revert_all_checkpoints(completion); }; + auto completion = [&](auto completion) { tree.revert_all_checkpoints_to(completion); }; + call_operation(completion, expected_success); +} + +template void commit_tree_to_depth(TreeType& tree, uint32_t depth, bool expected_success = true) +{ + auto completion = [&](auto completion) { tree.commit_to_depth(depth, completion); }; + call_operation(completion, expected_success); +} + +template void revert_tree_to_depth(TreeType& tree, uint32_t depth, bool expected_success = true) +{ + auto completion = [&](auto completion) { tree.revert_to_depth(depth, completion); }; call_operation(completion, expected_success); } } // namespace bb::crypto::merkle_tree diff --git a/barretenberg/cpp/src/barretenberg/nodejs_module/world_state/world_state.cpp b/barretenberg/cpp/src/barretenberg/nodejs_module/world_state/world_state.cpp index 57604598396d..2386799b19a0 100644 --- a/barretenberg/cpp/src/barretenberg/nodejs_module/world_state/world_state.cpp +++ b/barretenberg/cpp/src/barretenberg/nodejs_module/world_state/world_state.cpp @@ -265,11 +265,11 @@ WorldStateWrapper::WorldStateWrapper(const Napi::CallbackInfo& info) _dispatcher.register_target( WorldStateMessageType::COMMIT_ALL_CHECKPOINTS, - [this](msgpack::object& obj, msgpack::sbuffer& buffer) { return commit_all_checkpoints(obj, buffer); }); + [this](msgpack::object& obj, msgpack::sbuffer& buffer) { return commit_all_checkpoints_to(obj, buffer); }); _dispatcher.register_target( WorldStateMessageType::REVERT_ALL_CHECKPOINTS, - [this](msgpack::object& obj, msgpack::sbuffer& buffer) { return revert_all_checkpoints(obj, buffer); }); + [this](msgpack::object& obj, msgpack::sbuffer& buffer) { return revert_all_checkpoints_to(obj, buffer); }); _dispatcher.register_target( WorldStateMessageType::COPY_STORES, @@ -843,10 +843,12 @@ bool WorldStateWrapper::checkpoint(msgpack::object& obj, msgpack::sbuffer& buffe TypedMessage request; obj.convert(request); - _ws->checkpoint(request.value.forkId); + uint32_t depth = _ws->checkpoint(request.value.forkId); MsgHeader header(request.header.messageId); - messaging::TypedMessage resp_msg(WorldStateMessageType::CREATE_CHECKPOINT, header, {}); + CheckpointDepthResponse resp_value{ depth }; + messaging::TypedMessage resp_msg( + WorldStateMessageType::CREATE_CHECKPOINT, header, resp_value); msgpack::pack(buffer, resp_msg); return true; @@ -880,12 +882,12 @@ bool WorldStateWrapper::revert_checkpoint(msgpack::object& obj, msgpack::sbuffer return true; } -bool WorldStateWrapper::commit_all_checkpoints(msgpack::object& obj, msgpack::sbuffer& buffer) +bool WorldStateWrapper::commit_all_checkpoints_to(msgpack::object& obj, msgpack::sbuffer& buffer) { - TypedMessage request; + TypedMessage request; obj.convert(request); - _ws->commit_all_checkpoints(request.value.forkId); + _ws->commit_all_checkpoints_to(request.value.forkId, request.value.depth); MsgHeader header(request.header.messageId); messaging::TypedMessage resp_msg(WorldStateMessageType::COMMIT_ALL_CHECKPOINTS, header, {}); @@ -894,12 +896,12 @@ bool WorldStateWrapper::commit_all_checkpoints(msgpack::object& obj, msgpack::sb return true; } -bool WorldStateWrapper::revert_all_checkpoints(msgpack::object& obj, msgpack::sbuffer& buffer) +bool WorldStateWrapper::revert_all_checkpoints_to(msgpack::object& obj, msgpack::sbuffer& buffer) { - TypedMessage request; + TypedMessage request; obj.convert(request); - _ws->revert_all_checkpoints(request.value.forkId); + _ws->revert_all_checkpoints_to(request.value.forkId, request.value.depth); MsgHeader header(request.header.messageId); messaging::TypedMessage resp_msg(WorldStateMessageType::REVERT_ALL_CHECKPOINTS, header, {}); diff --git a/barretenberg/cpp/src/barretenberg/nodejs_module/world_state/world_state.hpp b/barretenberg/cpp/src/barretenberg/nodejs_module/world_state/world_state.hpp index 02945f8899a9..cd4f0d02e8e1 100644 --- a/barretenberg/cpp/src/barretenberg/nodejs_module/world_state/world_state.hpp +++ b/barretenberg/cpp/src/barretenberg/nodejs_module/world_state/world_state.hpp @@ -75,8 +75,8 @@ class WorldStateWrapper : public Napi::ObjectWrap { bool checkpoint(msgpack::object& obj, msgpack::sbuffer& buffer); bool commit_checkpoint(msgpack::object& obj, msgpack::sbuffer& buffer); bool revert_checkpoint(msgpack::object& obj, msgpack::sbuffer& buffer); - bool commit_all_checkpoints(msgpack::object& obj, msgpack::sbuffer& buffer); - bool revert_all_checkpoints(msgpack::object& obj, msgpack::sbuffer& buffer); + bool commit_all_checkpoints_to(msgpack::object& obj, msgpack::sbuffer& buffer); + bool revert_all_checkpoints_to(msgpack::object& obj, msgpack::sbuffer& buffer); bool copy_stores(msgpack::object& obj, msgpack::sbuffer& buffer); }; diff --git a/barretenberg/cpp/src/barretenberg/nodejs_module/world_state/world_state_message.hpp b/barretenberg/cpp/src/barretenberg/nodejs_module/world_state/world_state_message.hpp index 388cdc13f0bb..7c19e6010233 100644 --- a/barretenberg/cpp/src/barretenberg/nodejs_module/world_state/world_state_message.hpp +++ b/barretenberg/cpp/src/barretenberg/nodejs_module/world_state/world_state_message.hpp @@ -88,6 +88,17 @@ struct ForkIdOnlyRequest { MSGPACK_FIELDS(forkId); }; +struct ForkIdWithDepthRequest { + uint64_t forkId; + uint32_t depth; + SERIALIZATION_FIELDS(forkId, depth); +}; + +struct CheckpointDepthResponse { + uint32_t depth; + SERIALIZATION_FIELDS(depth); +}; + struct TreeIdAndRevisionRequest { MerkleTreeId treeId; WorldStateRevision revision; diff --git a/barretenberg/cpp/src/barretenberg/world_state/world_state.cpp b/barretenberg/cpp/src/barretenberg/world_state/world_state.cpp index 93ab14689978..f221e93fcf6f 100644 --- a/barretenberg/cpp/src/barretenberg/world_state/world_state.cpp +++ b/barretenberg/cpp/src/barretenberg/world_state/world_state.cpp @@ -1062,16 +1062,16 @@ bool WorldState::determine_if_synched(std::array& metaRespo return true; } -void WorldState::checkpoint(const uint64_t& forkId) +uint32_t WorldState::checkpoint(const uint64_t& forkId) { Fork::SharedPtr fork = retrieve_fork(forkId); Signal signal(static_cast(fork->_trees.size())); - std::array local; + std::array, NUM_TREES> local; std::mutex mtx; for (auto& [id, tree] : fork->_trees) { std::visit( [&signal, &local, id, &mtx](auto&& wrapper) { - wrapper.tree->checkpoint([&signal, &local, &mtx, id](Response& resp) { + wrapper.tree->checkpoint([&signal, &local, &mtx, id](TypedResponse& resp) { { std::lock_guard lock(mtx); local[id] = std::move(resp); @@ -1087,6 +1087,8 @@ void WorldState::checkpoint(const uint64_t& forkId) throw std::runtime_error(m.message); } } + // All trees have the same checkpoint depth; return it from the first tree's response + return local[0].inner.depth; } void WorldState::commit_checkpoint(const uint64_t& forkId) @@ -1143,7 +1145,7 @@ void WorldState::revert_checkpoint(const uint64_t& forkId) } } -void WorldState::commit_all_checkpoints(const uint64_t& forkId) +void WorldState::commit_all_checkpoints_to(const uint64_t& forkId, uint32_t depth) { Fork::SharedPtr fork = retrieve_fork(forkId); Signal signal(static_cast(fork->_trees.size())); @@ -1151,14 +1153,15 @@ void WorldState::commit_all_checkpoints(const uint64_t& forkId) std::mutex mtx; for (auto& [id, tree] : fork->_trees) { std::visit( - [&signal, &local, id, &mtx](auto&& wrapper) { - wrapper.tree->commit_all_checkpoints([&signal, &local, &mtx, id](Response& resp) { + [&signal, &local, id, &mtx, depth](auto&& wrapper) { + auto callback = [&signal, &local, &mtx, id](Response& resp) { { std::lock_guard lock(mtx); local[id] = std::move(resp); } signal.signal_decrement(); - }); + }; + wrapper.tree->commit_to_depth(depth, callback); }, tree); } @@ -1170,7 +1173,7 @@ void WorldState::commit_all_checkpoints(const uint64_t& forkId) } } -void WorldState::revert_all_checkpoints(const uint64_t& forkId) +void WorldState::revert_all_checkpoints_to(const uint64_t& forkId, uint32_t depth) { Fork::SharedPtr fork = retrieve_fork(forkId); Signal signal(static_cast(fork->_trees.size())); @@ -1178,14 +1181,15 @@ void WorldState::revert_all_checkpoints(const uint64_t& forkId) std::mutex mtx; for (auto& [id, tree] : fork->_trees) { std::visit( - [&signal, &local, id, &mtx](auto&& wrapper) { - wrapper.tree->revert_all_checkpoints([&signal, &local, &mtx, id](Response& resp) { + [&signal, &local, id, &mtx, depth](auto&& wrapper) { + auto callback = [&signal, &local, &mtx, id](Response& resp) { { std::lock_guard lock(mtx); local[id] = std::move(resp); } signal.signal_decrement(); - }); + }; + wrapper.tree->revert_to_depth(depth, callback); }, tree); } diff --git a/barretenberg/cpp/src/barretenberg/world_state/world_state.hpp b/barretenberg/cpp/src/barretenberg/world_state/world_state.hpp index bae021ab163f..66d045cb75e8 100644 --- a/barretenberg/cpp/src/barretenberg/world_state/world_state.hpp +++ b/barretenberg/cpp/src/barretenberg/world_state/world_state.hpp @@ -287,11 +287,11 @@ class WorldState { const std::vector& nullifiers, const std::vector& public_writes); - void checkpoint(const uint64_t& forkId); + uint32_t checkpoint(const uint64_t& forkId); void commit_checkpoint(const uint64_t& forkId); void revert_checkpoint(const uint64_t& forkId); - void commit_all_checkpoints(const uint64_t& forkId); - void revert_all_checkpoints(const uint64_t& forkId); + void commit_all_checkpoints_to(const uint64_t& forkId, uint32_t depth); + void revert_all_checkpoints_to(const uint64_t& forkId, uint32_t depth); private: std::shared_ptr _workers; diff --git a/yarn-project/simulator/src/public/hinting_db_sources.ts b/yarn-project/simulator/src/public/hinting_db_sources.ts index 79044c631e64..85f8ab422ccf 100644 --- a/yarn-project/simulator/src/public/hinting_db_sources.ts +++ b/yarn-project/simulator/src/public/hinting_db_sources.ts @@ -410,12 +410,12 @@ export class HintingMerkleWriteOperations implements MerkleTreeWriteOperations { } } - public async createCheckpoint(): Promise { + public async createCheckpoint(): Promise { const actionCounter = this.checkpointActionCounter++; const oldCheckpointId = this.getCurrentCheckpointId(); const treesStateHash = await this.getTreesStateHash(); - await this.db.createCheckpoint(); + const depth = await this.db.createCheckpoint(); this.checkpointStack.push(this.nextCheckpointId++); const newCheckpointId = this.getCurrentCheckpointId(); @@ -424,14 +424,16 @@ export class HintingMerkleWriteOperations implements MerkleTreeWriteOperations { HintingMerkleWriteOperations.log.trace( `[createCheckpoint:${actionCounter}] Checkpoint evolved ${oldCheckpointId} -> ${newCheckpointId} at trees state ${treesStateHash}.`, ); + + return depth; } - public commitAllCheckpoints(): Promise { - throw new Error('commitAllCheckpoints is not supported in HintingMerkleWriteOperations.'); + public commitAllCheckpointsTo(_depth: number): Promise { + throw new Error('commitAllCheckpointsTo is not supported in HintingMerkleWriteOperations.'); } - public revertAllCheckpoints(): Promise { - throw new Error('revertAllCheckpoints is not supported in HintingMerkleWriteOperations.'); + public revertAllCheckpointsTo(_depth: number): Promise { + throw new Error('revertAllCheckpointsTo is not supported in HintingMerkleWriteOperations.'); } public async commitCheckpoint(): Promise { diff --git a/yarn-project/simulator/src/public/public_processor/apps_tests/timeout_race.test.ts b/yarn-project/simulator/src/public/public_processor/apps_tests/timeout_race.test.ts index 3d06b2323916..2d16e26e602f 100644 --- a/yarn-project/simulator/src/public/public_processor/apps_tests/timeout_race.test.ts +++ b/yarn-project/simulator/src/public/public_processor/apps_tests/timeout_race.test.ts @@ -20,7 +20,7 @@ import { GasFees } from '@aztec/stdlib/gas'; import { MerkleTreeId, merkleTreeIds } from '@aztec/stdlib/trees'; import { GlobalVariables } from '@aztec/stdlib/tx'; import { getTelemetryClient } from '@aztec/telemetry-client'; -import { NativeWorldStateService } from '@aztec/world-state'; +import { ForkCheckpoint, NativeWorldStateService } from '@aztec/world-state'; import { jest } from '@jest/globals'; @@ -115,7 +115,7 @@ describe('PublicProcessor C++ Timeout Race Condition', () => { } // Create checkpoint BEFORE simulation (like PublicProcessor does) - await merkleTrees.createCheckpoint(); + const forkCheckpoint = await ForkCheckpoint.new(merkleTrees); // Create transaction that calls the spammer contract const tx = await tester.createTx(admin, [], [{ address: contractAddress, args: callArgs }]); @@ -136,11 +136,8 @@ describe('PublicProcessor C++ Timeout Race Condition', () => { } // BUG - No cancel, C++ continues running during reverts below - // Revert checkpoint - await merkleTrees.revertCheckpoint(); - - // Clean up - await merkleTrees.revertAllCheckpoints(); + // Clean up - revert all changes + await forkCheckpoint.revertToCheckpoint(); // Wait for simulation promise for cleanup await Promise.race([simulationPromise.catch(() => {}), sleep(100)]); diff --git a/yarn-project/simulator/src/public/public_processor/guarded_merkle_tree.ts b/yarn-project/simulator/src/public/public_processor/guarded_merkle_tree.ts index bcbd818a03f0..71133c4a2ebf 100644 --- a/yarn-project/simulator/src/public/public_processor/guarded_merkle_tree.ts +++ b/yarn-project/simulator/src/public/public_processor/guarded_merkle_tree.ts @@ -134,7 +134,7 @@ export class GuardedMerkleTreeOperations implements MerkleTreeWriteOperations { ): Promise<(BlockNumber | undefined)[]> { return this.guardAndPush(() => this.target.getBlockNumbersForLeafIndices(treeId, leafIndices)); } - createCheckpoint(): Promise { + createCheckpoint(): Promise { return this.guardAndPush(() => this.target.createCheckpoint()); } commitCheckpoint(): Promise { @@ -143,11 +143,11 @@ export class GuardedMerkleTreeOperations implements MerkleTreeWriteOperations { revertCheckpoint(): Promise { return this.guardAndPush(() => this.target.revertCheckpoint()); } - commitAllCheckpoints(): Promise { - return this.guardAndPush(() => this.target.commitAllCheckpoints()); + commitAllCheckpointsTo(depth: number): Promise { + return this.guardAndPush(() => this.target.commitAllCheckpointsTo(depth)); } - revertAllCheckpoints(): Promise { - return this.guardAndPush(() => this.target.revertAllCheckpoints()); + revertAllCheckpointsTo(depth: number): Promise { + return this.guardAndPush(() => this.target.revertAllCheckpointsTo(depth)); } findSiblingPaths( treeId: ID, diff --git a/yarn-project/simulator/src/public/public_processor/public_processor.test.ts b/yarn-project/simulator/src/public/public_processor/public_processor.test.ts index 907ee1f907c6..23a019bb6080 100644 --- a/yarn-project/simulator/src/public/public_processor/public_processor.test.ts +++ b/yarn-project/simulator/src/public/public_processor/public_processor.test.ts @@ -91,6 +91,7 @@ describe('public_processor', () => { new PublicDataTreeLeafPreimage(new PublicDataTreeLeaf(Fr.ZERO, Fr.ZERO), /*nextKey=*/ Fr.ZERO, /*nextIndex=*/ 0n), ); merkleTree.getStateReference.mockResolvedValue(stateReference); + merkleTree.createCheckpoint.mockResolvedValue(1); publicTxSimulator.simulate.mockImplementation(() => { return Promise.resolve(mockedEnqueuedCallsResult); @@ -158,7 +159,7 @@ describe('public_processor', () => { expect(failed[0].error).toEqual(new Error(`Failed`)); expect(merkleTree.commitCheckpoint).toHaveBeenCalledTimes(0); - expect(merkleTree.revertCheckpoint).toHaveBeenCalledTimes(1); + expect(merkleTree.revertAllCheckpointsTo).toHaveBeenCalledWith(0); }); it('if a tx errors with assertion failure, public processor returns failed tx with its assertion message', async function () { @@ -173,7 +174,7 @@ describe('public_processor', () => { expect(failed[0].error.message).toMatch(/Forced assertion failure/); expect(merkleTree.commitCheckpoint).toHaveBeenCalledTimes(0); - expect(merkleTree.revertCheckpoint).toHaveBeenCalledTimes(1); + expect(merkleTree.revertAllCheckpointsTo).toHaveBeenCalledWith(0); }); it('does not attempt to overfill a block', async function () { @@ -314,11 +315,45 @@ describe('public_processor', () => { expect(failed[0].error.message).toMatch(/Not enough balance/i); expect(merkleTree.commitCheckpoint).toHaveBeenCalledTimes(0); - expect(merkleTree.revertCheckpoint).toHaveBeenCalledTimes(1); + expect(merkleTree.revertAllCheckpointsTo).toHaveBeenCalledWith(0); expect(merkleTree.sequentialInsert).toHaveBeenCalledTimes(0); }); }); + describe('checkpoint depth', () => { + it('calls revertAllCheckpointsTo with depth on tx failure', async function () { + merkleTree.createCheckpoint.mockResolvedValue(2); + publicTxSimulator.simulate.mockRejectedValue(new Error('Boom')); + + const tx = await mockTxWithPublicCalls(); + const [processed, failed] = await processor.process([tx]); + + expect(processed).toEqual([]); + expect(failed).toHaveLength(1); + expect(merkleTree.revertAllCheckpointsTo).toHaveBeenCalledWith(1); + expect(merkleTree.commitCheckpoint).not.toHaveBeenCalled(); + }); + + it('createCheckpoint is called for each tx', async function () { + const txs = await timesParallel(3, () => mockPrivateOnlyTx()); + + await processor.process(txs); + + expect(merkleTree.createCheckpoint).toHaveBeenCalledTimes(3); + }); + + it('commits checkpoint on successful tx', async function () { + const tx = await mockTxWithPublicCalls(); + + const [processed, failed] = await processor.process([tx]); + + expect(processed).toHaveLength(1); + expect(failed).toEqual([]); + expect(merkleTree.commitCheckpoint).toHaveBeenCalledTimes(1); + expect(merkleTree.revertAllCheckpointsTo).not.toHaveBeenCalled(); + }); + }); + // on uncaught error, public processor clears the tx-level cache entirely it('clears the tx-level cache entirely on uncaught error (like SETUP failure)', async function () { const tx = await mockTxWithPublicCalls(); diff --git a/yarn-project/simulator/src/public/public_processor/public_processor.ts b/yarn-project/simulator/src/public/public_processor/public_processor.ts index 45a3d9e6906e..20ce6fbaa3e4 100644 --- a/yarn-project/simulator/src/public/public_processor/public_processor.ts +++ b/yarn-project/simulator/src/public/public_processor/public_processor.ts @@ -325,14 +325,10 @@ export class PublicProcessor implements Traceable { // 1. At least one outstanding checkpoint that has not been committed (the one created before we processed the tx). // 2. Possible state updates on that checkpoint or any others created during execution. - // First we revert a checkpoint as managed by the ForkCheckpoint. This will revert whatever is the current checkpoint - // which may not be the one originally created by this object. But that is ok, we do this to fulfil the ForkCheckpoint - // lifecycle expectations and ensure it doesn't attempt to commit later on. - await checkpoint.revert(); - - // Now we want to revert any/all remaining checkpoints, destroying any outstanding state updates. - // This needs to be done directly on the underlying fork as the guarded fork has been stopped. - await this.guardedMerkleTree.getUnderlyingFork().revertAllCheckpoints(); + // Revert all checkpoints at or above this checkpoint's depth (inclusive), destroying any outstanding state + // updates from this tx and any nested checkpoints created during execution. This preserves any checkpoints + // created by callers below our depth. + await checkpoint.revertToCheckpoint(); // Revert any contracts added to the DB for the tx. this.contractsDB.revertCheckpoint(); @@ -344,9 +340,9 @@ export class PublicProcessor implements Traceable { break; } - // Roll back state to start of TX before proceeding to next TX - await checkpoint.revert(); - await this.guardedMerkleTree.getUnderlyingFork().revertAllCheckpoints(); + // Roll back state to start of TX before proceeding to next TX. + // Reverts all checkpoints at or above this checkpoint's depth, preserving any caller checkpoints below. + await checkpoint.revertToCheckpoint(); this.contractsDB.revertCheckpoint(); const errorMessage = err instanceof Error || err instanceof AssertionError ? err.message : 'Unknown error'; this.log.warn(`Failed to process tx ${txHash.toString()}: ${errorMessage} ${err?.stack}`); diff --git a/yarn-project/stdlib/src/interfaces/merkle_tree_operations.ts b/yarn-project/stdlib/src/interfaces/merkle_tree_operations.ts index 63ee8e82f9b1..29625e9d4c43 100644 --- a/yarn-project/stdlib/src/interfaces/merkle_tree_operations.ts +++ b/yarn-project/stdlib/src/interfaces/merkle_tree_operations.ts @@ -225,30 +225,20 @@ export interface MerkleTreeReadOperations { } export interface MerkleTreeCheckpointOperations { - /** - * Checkpoints the current fork state - */ - createCheckpoint(): Promise; + /** Checkpoints the current fork state. Returns the depth of the new checkpoint. */ + createCheckpoint(): Promise; - /** - * Commits the current checkpoint - */ + /** Commits the current checkpoint. */ commitCheckpoint(): Promise; - /** - * Reverts the current checkpoint - */ + /** Reverts the current checkpoint. */ revertCheckpoint(): Promise; - /** - * Commits all checkpoints - */ - commitAllCheckpoints(): Promise; + /** Commits all checkpoints above the given depth, leaving checkpoint depth at the given value. */ + commitAllCheckpointsTo(depth: number): Promise; - /** - * Reverts all checkpoints - */ - revertAllCheckpoints(): Promise; + /** Reverts all checkpoints above the given depth, leaving checkpoint depth at the given value. */ + revertAllCheckpointsTo(depth: number): Promise; } export interface MerkleTreeWriteOperations diff --git a/yarn-project/world-state/src/native/fork_checkpoint.test.ts b/yarn-project/world-state/src/native/fork_checkpoint.test.ts new file mode 100644 index 000000000000..787ccfab1221 --- /dev/null +++ b/yarn-project/world-state/src/native/fork_checkpoint.test.ts @@ -0,0 +1,71 @@ +import type { MerkleTreeCheckpointOperations } from '@aztec/stdlib/interfaces/server'; + +import { type MockProxy, mock } from 'jest-mock-extended'; + +import { ForkCheckpoint } from './fork_checkpoint.js'; + +describe('ForkCheckpoint', () => { + let fork: MockProxy; + + beforeEach(() => { + fork = mock(); + fork.createCheckpoint.mockResolvedValue(5); + fork.commitCheckpoint.mockResolvedValue(); + fork.revertCheckpoint.mockResolvedValue(); + }); + + it('stores depth from createCheckpoint', async () => { + const checkpoint = await ForkCheckpoint.new(fork); + expect(checkpoint.depth).toBe(5); + expect(fork.createCheckpoint).toHaveBeenCalledTimes(1); + }); + + it('commit calls commitCheckpoint on fork', async () => { + const checkpoint = await ForkCheckpoint.new(fork); + await checkpoint.commit(); + expect(fork.commitCheckpoint).toHaveBeenCalledTimes(1); + }); + + it('revert calls revertCheckpoint on fork', async () => { + const checkpoint = await ForkCheckpoint.new(fork); + await checkpoint.revert(); + expect(fork.revertCheckpoint).toHaveBeenCalledTimes(1); + }); + + it('revertToCheckpoint calls revertAllCheckpointsTo with depth', async () => { + fork.revertAllCheckpointsTo.mockResolvedValue(); + const checkpoint = await ForkCheckpoint.new(fork); + await checkpoint.revertToCheckpoint(); + expect(fork.revertAllCheckpointsTo).toHaveBeenCalledWith(4); + }); + + it('revertToCheckpoint prevents subsequent commit', async () => { + fork.revertAllCheckpointsTo.mockResolvedValue(); + const checkpoint = await ForkCheckpoint.new(fork); + await checkpoint.revertToCheckpoint(); + await checkpoint.commit(); + expect(fork.commitCheckpoint).not.toHaveBeenCalled(); + }); + + it('revertToCheckpoint is idempotent', async () => { + fork.revertAllCheckpointsTo.mockResolvedValue(); + const checkpoint = await ForkCheckpoint.new(fork); + await checkpoint.revertToCheckpoint(); + await checkpoint.revertToCheckpoint(); + expect(fork.revertAllCheckpointsTo).toHaveBeenCalledTimes(1); + }); + + it('commit is idempotent', async () => { + const checkpoint = await ForkCheckpoint.new(fork); + await checkpoint.commit(); + await checkpoint.commit(); + expect(fork.commitCheckpoint).toHaveBeenCalledTimes(1); + }); + + it('revert is idempotent', async () => { + const checkpoint = await ForkCheckpoint.new(fork); + await checkpoint.revert(); + await checkpoint.revert(); + expect(fork.revertCheckpoint).toHaveBeenCalledTimes(1); + }); +}); diff --git a/yarn-project/world-state/src/native/fork_checkpoint.ts b/yarn-project/world-state/src/native/fork_checkpoint.ts index c4172d689fc3..1672092ff0fe 100644 --- a/yarn-project/world-state/src/native/fork_checkpoint.ts +++ b/yarn-project/world-state/src/native/fork_checkpoint.ts @@ -3,11 +3,14 @@ import type { MerkleTreeCheckpointOperations } from '@aztec/stdlib/interfaces/se export class ForkCheckpoint { private completed = false; - private constructor(private readonly fork: MerkleTreeCheckpointOperations) {} + private constructor( + private readonly fork: MerkleTreeCheckpointOperations, + public readonly depth: number, + ) {} static async new(fork: MerkleTreeCheckpointOperations): Promise { - await fork.createCheckpoint(); - return new ForkCheckpoint(fork); + const depth = await fork.createCheckpoint(); + return new ForkCheckpoint(fork, depth); } async commit(): Promise { @@ -27,4 +30,17 @@ export class ForkCheckpoint { await this.fork.revertCheckpoint(); this.completed = true; } + + /** + * Reverts this checkpoint and any nested checkpoints created on top of it, + * leaving the checkpoint depth at the level it was before this checkpoint was created. + */ + async revertToCheckpoint(): Promise { + if (this.completed) { + return; + } + + await this.fork.revertAllCheckpointsTo(this.depth - 1); + this.completed = true; + } } diff --git a/yarn-project/world-state/src/native/merkle_trees_facade.ts b/yarn-project/world-state/src/native/merkle_trees_facade.ts index b7a107a8eb80..b8d4ca92b3e0 100644 --- a/yarn-project/world-state/src/native/merkle_trees_facade.ts +++ b/yarn-project/world-state/src/native/merkle_trees_facade.ts @@ -319,9 +319,10 @@ export class MerkleTreesForkFacade extends MerkleTreesFacade implements MerkleTr } } - public async createCheckpoint(): Promise { + public async createCheckpoint(): Promise { assert.notEqual(this.revision.forkId, 0, 'Fork ID must be set'); - await this.instance.call(WorldStateMessageType.CREATE_CHECKPOINT, { forkId: this.revision.forkId }); + const resp = await this.instance.call(WorldStateMessageType.CREATE_CHECKPOINT, { forkId: this.revision.forkId }); + return resp.depth; } public async commitCheckpoint(): Promise { @@ -334,14 +335,20 @@ export class MerkleTreesForkFacade extends MerkleTreesFacade implements MerkleTr await this.instance.call(WorldStateMessageType.REVERT_CHECKPOINT, { forkId: this.revision.forkId }); } - public async commitAllCheckpoints(): Promise { + public async commitAllCheckpointsTo(depth: number): Promise { assert.notEqual(this.revision.forkId, 0, 'Fork ID must be set'); - await this.instance.call(WorldStateMessageType.COMMIT_ALL_CHECKPOINTS, { forkId: this.revision.forkId }); + await this.instance.call(WorldStateMessageType.COMMIT_ALL_CHECKPOINTS, { + forkId: this.revision.forkId, + depth, + }); } - public async revertAllCheckpoints(): Promise { + public async revertAllCheckpointsTo(depth: number): Promise { assert.notEqual(this.revision.forkId, 0, 'Fork ID must be set'); - await this.instance.call(WorldStateMessageType.REVERT_ALL_CHECKPOINTS, { forkId: this.revision.forkId }); + await this.instance.call(WorldStateMessageType.REVERT_ALL_CHECKPOINTS, { + forkId: this.revision.forkId, + depth, + }); } } diff --git a/yarn-project/world-state/src/native/message.ts b/yarn-project/world-state/src/native/message.ts index 64f195918c32..edceed40e4b3 100644 --- a/yarn-project/world-state/src/native/message.ts +++ b/yarn-project/world-state/src/native/message.ts @@ -284,6 +284,16 @@ interface WithForkId { forkId: number; } +interface CreateCheckpointResponse { + depth: number; +} + +/** Request to commit/revert all checkpoints down to a target depth. The resulting depth after the operation equals the given depth. */ +interface CheckpointDepthRequest extends WithForkId { + /** The target depth after the operation. All checkpoints above this depth are committed/reverted. */ + depth: number; +} + interface WithWorldStateRevision { revision: WorldStateRevision; } @@ -487,8 +497,8 @@ export type WorldStateRequest = { [WorldStateMessageType.CREATE_CHECKPOINT]: WithForkId; [WorldStateMessageType.COMMIT_CHECKPOINT]: WithForkId; [WorldStateMessageType.REVERT_CHECKPOINT]: WithForkId; - [WorldStateMessageType.COMMIT_ALL_CHECKPOINTS]: WithForkId; - [WorldStateMessageType.REVERT_ALL_CHECKPOINTS]: WithForkId; + [WorldStateMessageType.COMMIT_ALL_CHECKPOINTS]: CheckpointDepthRequest; + [WorldStateMessageType.REVERT_ALL_CHECKPOINTS]: CheckpointDepthRequest; [WorldStateMessageType.COPY_STORES]: CopyStoresRequest; @@ -529,7 +539,7 @@ export type WorldStateResponse = { [WorldStateMessageType.GET_STATUS]: WorldStateStatusSummary; - [WorldStateMessageType.CREATE_CHECKPOINT]: void; + [WorldStateMessageType.CREATE_CHECKPOINT]: CreateCheckpointResponse; [WorldStateMessageType.COMMIT_CHECKPOINT]: void; [WorldStateMessageType.REVERT_CHECKPOINT]: void; [WorldStateMessageType.COMMIT_ALL_CHECKPOINTS]: void; diff --git a/yarn-project/world-state/src/native/native_world_state.test.ts b/yarn-project/world-state/src/native/native_world_state.test.ts index 9677c8698098..ea52aa4a20b3 100644 --- a/yarn-project/world-state/src/native/native_world_state.test.ts +++ b/yarn-project/world-state/src/native/native_world_state.test.ts @@ -1578,7 +1578,8 @@ describe('NativeWorldState', () => { const fork = await ws.fork(); await advanceState(fork); const siblingPathsBefore = await getSiblingPaths(fork); - await fork.createCheckpoint(); + const checkpointDepth = await fork.createCheckpoint(); + expect(checkpointDepth).toEqual(1); await compareState(fork, siblingPathsBefore, true); @@ -1593,7 +1594,7 @@ describe('NativeWorldState', () => { await compareState(fork, siblingPathsAfter, true); await compareState(fork, siblingPathsBefore, false); - await fork.commitAllCheckpoints(); + await fork.commitAllCheckpointsTo(checkpointDepth - 1); await compareState(fork, siblingPathsAfter, true); await compareState(fork, siblingPathsBefore, false); @@ -1604,7 +1605,8 @@ describe('NativeWorldState', () => { const fork = await ws.fork(); await advanceState(fork); const siblingPathsBefore = await getSiblingPaths(fork); - await fork.createCheckpoint(); + const checkpointDepth = await fork.createCheckpoint(); + expect(checkpointDepth).toEqual(1); await compareState(fork, siblingPathsBefore, true); @@ -1612,14 +1614,15 @@ describe('NativeWorldState', () => { let siblingPathsAfter: SiblingPath[] = []; for (let i = 0; i < numCommits; i++) { - await fork.createCheckpoint(); + const newCheckpointDepth = await fork.createCheckpoint(); + expect(newCheckpointDepth).toEqual(checkpointDepth + i + 1); siblingPathsAfter = await advanceState(fork); } await compareState(fork, siblingPathsAfter, true); await compareState(fork, siblingPathsBefore, false); - await fork.revertAllCheckpoints(); + await fork.revertAllCheckpointsTo(checkpointDepth - 1); await compareState(fork, siblingPathsAfter, false); await compareState(fork, siblingPathsBefore, true); @@ -1835,5 +1838,161 @@ describe('NativeWorldState', () => { await fork.close(); }); + + it('createCheckpoint returns depth', async () => { + const fork = await ws.fork(); + expect(await fork.createCheckpoint()).toBe(1); + expect(await fork.createCheckpoint()).toBe(2); + expect(await fork.createCheckpoint()).toBe(3); + await fork.close(); + }); + + it('can commit all to depth', async () => { + const fork = await ws.fork(); + + // Create 3 checkpoints with state changes between each + const initialPaths = await getSiblingPaths(fork); + + await fork.createCheckpoint(); // depth 1 + await advanceState(fork); + + await fork.createCheckpoint(); // depth 2 + await advanceState(fork); + + await fork.createCheckpoint(); // depth 3 + const afterDepth3Paths = await advanceState(fork); + + // Commit depths 3 and 2 into depth 1, leaving depth at 1 + await fork.commitAllCheckpointsTo(1); + + // State should reflect all changes + await compareState(fork, afterDepth3Paths, true); + + // Revert depth 1 — should go back to initial state + await fork.revertCheckpoint(); + await compareState(fork, initialPaths, true); + + await fork.close(); + }); + + it('can revert all to depth', async () => { + const fork = await ws.fork(); + + await fork.createCheckpoint(); // depth 1 + const afterDepth1Paths = await advanceState(fork); + + await fork.createCheckpoint(); // depth 2 + await advanceState(fork); + + await fork.createCheckpoint(); // depth 3 + await advanceState(fork); + + // Revert depths 3 and 2, leaving depth at 1 + await fork.revertAllCheckpointsTo(1); + + // Should be back to after depth 1 state + await compareState(fork, afterDepth1Paths, true); + + // Depth 1 still active — commit it + await fork.commitCheckpoint(); + await compareState(fork, afterDepth1Paths, true); + + await fork.close(); + }); + + it('revert to depth preserves lower checkpoints', async () => { + const fork = await ws.fork(); + + await fork.createCheckpoint(); // depth 1 + await advanceState(fork); + + await fork.createCheckpoint(); // depth 2 + await advanceState(fork); + + // Revert depth 2 only, leaving depth at 1 + await fork.revertAllCheckpointsTo(1); + + // Create new checkpoint at depth 2 with different changes + await fork.createCheckpoint(); // depth 2 again + const newDepth2Paths = await advanceState(fork); + + // Commit depth 2 + await fork.commitCheckpoint(); + + // Commit depth 1 + await fork.commitCheckpoint(); + + // Final state should include the new depth 2 changes + await compareState(fork, newDepth2Paths, true); + + await fork.close(); + }); + + it('commit all with depth 0 commits everything', async () => { + const fork = await ws.fork(); + + await fork.createCheckpoint(); // depth 1 + await advanceState(fork); + + await fork.createCheckpoint(); // depth 2 + const finalPaths = await advanceState(fork); + + // depth 0 commits all checkpoints + await fork.commitAllCheckpointsTo(0); + + // State should reflect all changes + await compareState(fork, finalPaths, true); + + await fork.close(); + }); + + it('revert all with depth 0 reverts everything', async () => { + const fork = await ws.fork(); + const initialPaths = await getSiblingPaths(fork); + + await fork.createCheckpoint(); // depth 1 + await advanceState(fork); + + await fork.createCheckpoint(); // depth 2 + await advanceState(fork); + + // depth 0 reverts all checkpoints + await fork.revertAllCheckpointsTo(0); + + // Should be back to initial state + await compareState(fork, initialPaths, true); + + await fork.close(); + }); + + it('depth is consistent across multiple checkpoint cycles', async () => { + const fork = await ws.fork(); + + // Create checkpoint depth 1 + expect(await fork.createCheckpoint()).toBe(1); + const afterDepth1Paths = await advanceState(fork); + + // Create checkpoint depth 2 + expect(await fork.createCheckpoint()).toBe(2); + await advanceState(fork); + + // Revert depth 2, leaving depth at 1 + await fork.revertAllCheckpointsTo(1); + await compareState(fork, afterDepth1Paths, true); + + // Create new depth 2 + expect(await fork.createCheckpoint()).toBe(2); + const newDepth2Paths = await advanceState(fork); + + // Commit depth 2 + await fork.commitCheckpoint(); + await compareState(fork, newDepth2Paths, true); + + // Commit depth 1 + await fork.commitCheckpoint(); + await compareState(fork, newDepth2Paths, true); + + await fork.close(); + }); }); }); From b5aa4f79fe3a94a8cc8224192d7fa7ac7d5e7c05 Mon Sep 17 00:00:00 2001 From: PhilWindle <60546371+PhilWindle@users.noreply.github.com> Date: Fri, 13 Mar 2026 17:03:10 +0000 Subject: [PATCH 06/17] cherry-pick: fix: dependabot alerts (#21531) Cherry-pick of d11638dfc1 with conflicts (backport to v4). --- barretenberg/acir_tests/yarn.lock | 10 + .../src/barretenberg/nodejs_module/yarn.lock | 566 ++++++++++++++++++ barretenberg/docs/yarn.lock | 7 + barretenberg/ts/package-lock.json | 7 +- barretenberg/ts/yarn.lock | 18 +- boxes/yarn.lock | 10 + docs/yarn.lock | 10 + playground/yarn.lock | 10 + yarn-project/yarn.lock | 13 + 9 files changed, 647 insertions(+), 4 deletions(-) diff --git a/barretenberg/acir_tests/yarn.lock b/barretenberg/acir_tests/yarn.lock index 3e4b08f0ac21..1dfec3a44e28 100644 --- a/barretenberg/acir_tests/yarn.lock +++ b/barretenberg/acir_tests/yarn.lock @@ -5017,9 +5017,15 @@ __metadata: languageName: node linkType: hard +<<<<<<< HEAD "tar@npm:^7.4.3": version: 7.4.3 resolution: "tar@npm:7.4.3" +======= +"tar@npm:^7.5.4": + version: 7.5.11 + resolution: "tar@npm:7.5.11" +>>>>>>> d11638dfc1 (fix: dependabot alerts (#21531)) dependencies: "@isaacs/fs-minipass": "npm:^4.0.0" chownr: "npm:^3.0.0" @@ -5027,7 +5033,11 @@ __metadata: minizlib: "npm:^3.0.1" mkdirp: "npm:^3.0.1" yallist: "npm:^5.0.0" +<<<<<<< HEAD checksum: 10c0/d4679609bb2a9b48eeaf84632b6d844128d2412b95b6de07d53d8ee8baf4ca0857c9331dfa510390a0727b550fd543d4d1a10995ad86cdf078423fbb8d99831d +======= + checksum: 10c0/b6bb420550ef50ef23356018155e956cd83282c97b6128d8d5cfe5740c57582d806a244b2ef0bf686a74ce526babe8b8b9061527623e935e850008d86d838929 +>>>>>>> d11638dfc1 (fix: dependabot alerts (#21531)) languageName: node linkType: hard diff --git a/barretenberg/cpp/src/barretenberg/nodejs_module/yarn.lock b/barretenberg/cpp/src/barretenberg/nodejs_module/yarn.lock index 6a671ec7eece..029aef226204 100644 --- a/barretenberg/cpp/src/barretenberg/nodejs_module/yarn.lock +++ b/barretenberg/cpp/src/barretenberg/nodejs_module/yarn.lock @@ -7,7 +7,573 @@ node-addon-api@^8.0.0: resolved "https://registry.yarnpkg.com/node-addon-api/-/node-addon-api-8.0.0.tgz#5453b7ad59dd040d12e0f1a97a6fa1c765c5c9d2" integrity sha512-ipO7rsHEBqa9STO5C5T10fj732ml+5kLN1cAG8/jdHd56ldQeGj3Q7+scUS+VHK/qy1zLEwC4wMK5+yM0btPvw== +<<<<<<< HEAD node-api-headers@^1.1.0: version "1.1.0" resolved "https://registry.yarnpkg.com/node-api-headers/-/node-api-headers-1.1.0.tgz#3f9dd7bb10b29e1c3e3db675979605a308b2373c" integrity sha512-ucQW+SbYCUPfprvmzBsnjT034IGRB2XK8rRc78BgjNKhTdFKgAwAmgW704bKIBmcYW48it0Gkjpkd39Azrwquw== +======= +"@npmcli/agent@npm:^4.0.0": + version: 4.0.0 + resolution: "@npmcli/agent@npm:4.0.0" + dependencies: + agent-base: "npm:^7.1.0" + http-proxy-agent: "npm:^7.0.0" + https-proxy-agent: "npm:^7.0.1" + lru-cache: "npm:^11.2.1" + socks-proxy-agent: "npm:^8.0.3" + checksum: 10c0/f7b5ce0f3dd42c3f8c6546e8433573d8049f67ef11ec22aa4704bc41483122f68bf97752e06302c455ead667af5cb753e6a09bff06632bc465c1cfd4c4b75a53 + languageName: node + linkType: hard + +"@npmcli/fs@npm:^5.0.0": + version: 5.0.0 + resolution: "@npmcli/fs@npm:5.0.0" + dependencies: + semver: "npm:^7.3.5" + checksum: 10c0/26e376d780f60ff16e874a0ac9bc3399186846baae0b6e1352286385ac134d900cc5dafaded77f38d77f86898fc923ae1cee9d7399f0275b1aa24878915d722b + languageName: node + linkType: hard + +"abbrev@npm:^4.0.0": + version: 4.0.0 + resolution: "abbrev@npm:4.0.0" + checksum: 10c0/b4cc16935235e80702fc90192e349e32f8ef0ed151ef506aa78c81a7c455ec18375c4125414b99f84b2e055199d66383e787675f0bcd87da7a4dbd59f9eac1d5 + languageName: node + linkType: hard + +"agent-base@npm:^7.1.0, agent-base@npm:^7.1.2": + version: 7.1.4 + resolution: "agent-base@npm:7.1.4" + checksum: 10c0/c2c9ab7599692d594b6a161559ada307b7a624fa4c7b03e3afdb5a5e31cd0e53269115b620fcab024c5ac6a6f37fa5eb2e004f076ad30f5f7e6b8b671f7b35fe + languageName: node + linkType: hard + +"balanced-match@npm:^4.0.2": + version: 4.0.4 + resolution: "balanced-match@npm:4.0.4" + checksum: 10c0/07e86102a3eb2ee2a6a1a89164f29d0dbaebd28f2ca3f5ca786f36b8b23d9e417eb3be45a4acf754f837be5ac0a2317de90d3fcb7f4f4dc95720a1f36b26a17b + languageName: node + linkType: hard + +"brace-expansion@npm:^5.0.2": + version: 5.0.4 + resolution: "brace-expansion@npm:5.0.4" + dependencies: + balanced-match: "npm:^4.0.2" + checksum: 10c0/359cbcfa80b2eb914ca1f3440e92313fbfe7919ee6b274c35db55bec555aded69dac5ee78f102cec90c35f98c20fa43d10936d0cd9978158823c249257e1643a + languageName: node + linkType: hard + +"cacache@npm:^20.0.1": + version: 20.0.3 + resolution: "cacache@npm:20.0.3" + dependencies: + "@npmcli/fs": "npm:^5.0.0" + fs-minipass: "npm:^3.0.0" + glob: "npm:^13.0.0" + lru-cache: "npm:^11.1.0" + minipass: "npm:^7.0.3" + minipass-collect: "npm:^2.0.1" + minipass-flush: "npm:^1.0.5" + minipass-pipeline: "npm:^1.2.4" + p-map: "npm:^7.0.2" + ssri: "npm:^13.0.0" + unique-filename: "npm:^5.0.0" + checksum: 10c0/c7da1ca694d20e8f8aedabd21dc11518f809a7d2b59aa76a1fc655db5a9e62379e465c157ddd2afe34b19230808882288effa6911b2de26a088a6d5645123462 + languageName: node + linkType: hard + +"chownr@npm:^3.0.0": + version: 3.0.0 + resolution: "chownr@npm:3.0.0" + checksum: 10c0/43925b87700f7e3893296c8e9c56cc58f926411cce3a6e5898136daaf08f08b9a8eb76d37d3267e707d0dcc17aed2e2ebdf5848c0c3ce95cf910a919935c1b10 + languageName: node + linkType: hard + +"debug@npm:4, debug@npm:^4.3.4": + version: 4.4.3 + resolution: "debug@npm:4.4.3" + dependencies: + ms: "npm:^2.1.3" + peerDependenciesMeta: + supports-color: + optional: true + checksum: 10c0/d79136ec6c83ecbefd0f6a5593da6a9c91ec4d7ddc4b54c883d6e71ec9accb5f67a1a5e96d00a328196b5b5c86d365e98d8a3a70856aaf16b4e7b1985e67f5a6 + languageName: node + linkType: hard + +"encoding@npm:^0.1.13": + version: 0.1.13 + resolution: "encoding@npm:0.1.13" + dependencies: + iconv-lite: "npm:^0.6.2" + checksum: 10c0/36d938712ff00fe1f4bac88b43bcffb5930c1efa57bbcdca9d67e1d9d6c57cfb1200fb01efe0f3109b2ce99b231f90779532814a81370a1bd3274a0f58585039 + languageName: node + linkType: hard + +"env-paths@npm:^2.2.0": + version: 2.2.1 + resolution: "env-paths@npm:2.2.1" + checksum: 10c0/285325677bf00e30845e330eec32894f5105529db97496ee3f598478e50f008c5352a41a30e5e72ec9de8a542b5a570b85699cd63bd2bc646dbcb9f311d83bc4 + languageName: node + linkType: hard + +"err-code@npm:^2.0.2": + version: 2.0.3 + resolution: "err-code@npm:2.0.3" + checksum: 10c0/b642f7b4dd4a376e954947550a3065a9ece6733ab8e51ad80db727aaae0817c2e99b02a97a3d6cecc648a97848305e728289cf312d09af395403a90c9d4d8a66 + languageName: node + linkType: hard + +"exponential-backoff@npm:^3.1.1": + version: 3.1.3 + resolution: "exponential-backoff@npm:3.1.3" + checksum: 10c0/77e3ae682b7b1f4972f563c6dbcd2b0d54ac679e62d5d32f3e5085feba20483cf28bd505543f520e287a56d4d55a28d7874299941faf637e779a1aa5994d1267 + languageName: node + linkType: hard + +"fdir@npm:^6.5.0": + version: 6.5.0 + resolution: "fdir@npm:6.5.0" + peerDependencies: + picomatch: ^3 || ^4 + peerDependenciesMeta: + picomatch: + optional: true + checksum: 10c0/e345083c4306b3aed6cb8ec551e26c36bab5c511e99ea4576a16750ddc8d3240e63826cc624f5ae17ad4dc82e68a253213b60d556c11bfad064b7607847ed07f + languageName: node + linkType: hard + +"fs-minipass@npm:^3.0.0": + version: 3.0.3 + resolution: "fs-minipass@npm:3.0.3" + dependencies: + minipass: "npm:^7.0.3" + checksum: 10c0/63e80da2ff9b621e2cb1596abcb9207f1cf82b968b116ccd7b959e3323144cce7fb141462200971c38bbf2ecca51695069db45265705bed09a7cd93ae5b89f94 + languageName: node + linkType: hard + +"glob@npm:^13.0.0": + version: 13.0.2 + resolution: "glob@npm:13.0.2" + dependencies: + minimatch: "npm:^10.1.2" + minipass: "npm:^7.1.2" + path-scurry: "npm:^2.0.0" + checksum: 10c0/3d4b09efa922c4cba9be6d5b9efae14384e7422aeb886eb35fba8a94820b8281474b8d3f16927127fb1a0c8580e18fc00e3fda03c8dc31fa0af3ba918edeeb04 + languageName: node + linkType: hard + +"graceful-fs@npm:^4.2.6": + version: 4.2.11 + resolution: "graceful-fs@npm:4.2.11" + checksum: 10c0/386d011a553e02bc594ac2ca0bd6d9e4c22d7fa8cfbfc448a6d148c59ea881b092db9dbe3547ae4b88e55f1b01f7c4a2ecc53b310c042793e63aa44cf6c257f2 + languageName: node + linkType: hard + +"http-cache-semantics@npm:^4.1.1": + version: 4.2.0 + resolution: "http-cache-semantics@npm:4.2.0" + checksum: 10c0/45b66a945cf13ec2d1f29432277201313babf4a01d9e52f44b31ca923434083afeca03f18417f599c9ab3d0e7b618ceb21257542338b57c54b710463b4a53e37 + languageName: node + linkType: hard + +"http-proxy-agent@npm:^7.0.0": + version: 7.0.2 + resolution: "http-proxy-agent@npm:7.0.2" + dependencies: + agent-base: "npm:^7.1.0" + debug: "npm:^4.3.4" + checksum: 10c0/4207b06a4580fb85dd6dff521f0abf6db517489e70863dca1a0291daa7f2d3d2d6015a57bd702af068ea5cf9f1f6ff72314f5f5b4228d299c0904135d2aef921 + languageName: node + linkType: hard + +"https-proxy-agent@npm:^7.0.1": + version: 7.0.6 + resolution: "https-proxy-agent@npm:7.0.6" + dependencies: + agent-base: "npm:^7.1.2" + debug: "npm:4" + checksum: 10c0/f729219bc735edb621fa30e6e84e60ee5d00802b8247aac0d7b79b0bd6d4b3294737a337b93b86a0bd9e68099d031858a39260c976dc14cdbba238ba1f8779ac + languageName: node + linkType: hard + +"iconv-lite@npm:^0.6.2": + version: 0.6.3 + resolution: "iconv-lite@npm:0.6.3" + dependencies: + safer-buffer: "npm:>= 2.1.2 < 3.0.0" + checksum: 10c0/98102bc66b33fcf5ac044099d1257ba0b7ad5e3ccd3221f34dd508ab4070edff183276221684e1e0555b145fce0850c9f7d2b60a9fcac50fbb4ea0d6e845a3b1 + languageName: node + linkType: hard + +"imurmurhash@npm:^0.1.4": + version: 0.1.4 + resolution: "imurmurhash@npm:0.1.4" + checksum: 10c0/8b51313850dd33605c6c9d3fd9638b714f4c4c40250cff658209f30d40da60f78992fb2df5dabee4acf589a6a82bbc79ad5486550754bd9ec4e3fc0d4a57d6a6 + languageName: node + linkType: hard + +"ip-address@npm:^10.0.1": + version: 10.1.0 + resolution: "ip-address@npm:10.1.0" + checksum: 10c0/0103516cfa93f6433b3bd7333fa876eb21263912329bfa47010af5e16934eeeff86f3d2ae700a3744a137839ddfad62b900c7a445607884a49b5d1e32a3d7566 + languageName: node + linkType: hard + +"isexe@npm:^4.0.0": + version: 4.0.0 + resolution: "isexe@npm:4.0.0" + checksum: 10c0/5884815115bceac452877659a9c7726382531592f43dc29e5d48b7c4100661aed54018cb90bd36cb2eaeba521092570769167acbb95c18d39afdccbcca06c5ce + languageName: node + linkType: hard + +"lru-cache@npm:^11.0.0, lru-cache@npm:^11.1.0, lru-cache@npm:^11.2.1": + version: 11.2.6 + resolution: "lru-cache@npm:11.2.6" + checksum: 10c0/73bbffb298760e71b2bfe8ebc16a311c6a60ceddbba919cfedfd8635c2d125fbfb5a39b71818200e67973b11f8d59c5a9e31d6f90722e340e90393663a66e5cd + languageName: node + linkType: hard + +"make-fetch-happen@npm:^15.0.0": + version: 15.0.3 + resolution: "make-fetch-happen@npm:15.0.3" + dependencies: + "@npmcli/agent": "npm:^4.0.0" + cacache: "npm:^20.0.1" + http-cache-semantics: "npm:^4.1.1" + minipass: "npm:^7.0.2" + minipass-fetch: "npm:^5.0.0" + minipass-flush: "npm:^1.0.5" + minipass-pipeline: "npm:^1.2.4" + negotiator: "npm:^1.0.0" + proc-log: "npm:^6.0.0" + promise-retry: "npm:^2.0.1" + ssri: "npm:^13.0.0" + checksum: 10c0/525f74915660be60b616bcbd267c4a5b59481b073ba125e45c9c3a041bb1a47a2bd0ae79d028eb6f5f95bf9851a4158423f5068539c3093621abb64027e8e461 + languageName: node + linkType: hard + +"minimatch@npm:^10.1.2": + version: 10.2.4 + resolution: "minimatch@npm:10.2.4" + dependencies: + brace-expansion: "npm:^5.0.2" + checksum: 10c0/35f3dfb7b99b51efd46afd378486889f590e7efb10e0f6a10ba6800428cf65c9a8dedb74427d0570b318d749b543dc4e85f06d46d2858bc8cac7e1eb49a95945 + languageName: node + linkType: hard + +"minipass-collect@npm:^2.0.1": + version: 2.0.1 + resolution: "minipass-collect@npm:2.0.1" + dependencies: + minipass: "npm:^7.0.3" + checksum: 10c0/5167e73f62bb74cc5019594709c77e6a742051a647fe9499abf03c71dca75515b7959d67a764bdc4f8b361cf897fbf25e2d9869ee039203ed45240f48b9aa06e + languageName: node + linkType: hard + +"minipass-fetch@npm:^5.0.0": + version: 5.0.1 + resolution: "minipass-fetch@npm:5.0.1" + dependencies: + encoding: "npm:^0.1.13" + minipass: "npm:^7.0.3" + minipass-sized: "npm:^2.0.0" + minizlib: "npm:^3.0.1" + dependenciesMeta: + encoding: + optional: true + checksum: 10c0/50bcf48c9841ebb25e29a2817468595219c72cfffc7c175a1d7327843c8bef9b72cb01778f46df7eca695dfe47ab98e6167af4cb026ddd80f660842919a5193c + languageName: node + linkType: hard + +"minipass-flush@npm:^1.0.5": + version: 1.0.5 + resolution: "minipass-flush@npm:1.0.5" + dependencies: + minipass: "npm:^3.0.0" + checksum: 10c0/2a51b63feb799d2bb34669205eee7c0eaf9dce01883261a5b77410c9408aa447e478efd191b4de6fc1101e796ff5892f8443ef20d9544385819093dbb32d36bd + languageName: node + linkType: hard + +"minipass-pipeline@npm:^1.2.4": + version: 1.2.4 + resolution: "minipass-pipeline@npm:1.2.4" + dependencies: + minipass: "npm:^3.0.0" + checksum: 10c0/cbda57cea20b140b797505dc2cac71581a70b3247b84480c1fed5ca5ba46c25ecc25f68bfc9e6dcb1a6e9017dab5c7ada5eab73ad4f0a49d84e35093e0c643f2 + languageName: node + linkType: hard + +"minipass-sized@npm:^2.0.0": + version: 2.0.0 + resolution: "minipass-sized@npm:2.0.0" + dependencies: + minipass: "npm:^7.1.2" + checksum: 10c0/f9201696a6f6d68610d04c9c83e3d2e5cb9c026aae1c8cbf7e17f386105cb79c1bb088dbc21bf0b1eb4f3fb5df384fd1e7aa3bf1f33868c416ae8c8a92679db8 + languageName: node + linkType: hard + +"minipass@npm:^3.0.0": + version: 3.3.6 + resolution: "minipass@npm:3.3.6" + dependencies: + yallist: "npm:^4.0.0" + checksum: 10c0/a114746943afa1dbbca8249e706d1d38b85ed1298b530f5808ce51f8e9e941962e2a5ad2e00eae7dd21d8a4aae6586a66d4216d1a259385e9d0358f0c1eba16c + languageName: node + linkType: hard + +"minipass@npm:^7.0.2, minipass@npm:^7.0.3, minipass@npm:^7.0.4, minipass@npm:^7.1.2": + version: 7.1.2 + resolution: "minipass@npm:7.1.2" + checksum: 10c0/b0fd20bb9fb56e5fa9a8bfac539e8915ae07430a619e4b86ff71f5fc757ef3924b23b2c4230393af1eda647ed3d75739e4e0acb250a6b1eb277cf7f8fe449557 + languageName: node + linkType: hard + +"minizlib@npm:^3.0.1, minizlib@npm:^3.1.0": + version: 3.1.0 + resolution: "minizlib@npm:3.1.0" + dependencies: + minipass: "npm:^7.1.2" + checksum: 10c0/5aad75ab0090b8266069c9aabe582c021ae53eb33c6c691054a13a45db3b4f91a7fb1bd79151e6b4e9e9a86727b522527c0a06ec7d45206b745d54cd3097bcec + languageName: node + linkType: hard + +"ms@npm:^2.1.3": + version: 2.1.3 + resolution: "ms@npm:2.1.3" + checksum: 10c0/d924b57e7312b3b63ad21fc5b3dc0af5e78d61a1fc7cfb5457edaf26326bf62be5307cc87ffb6862ef1c2b33b0233cdb5d4f01c4c958cc0d660948b65a287a48 + languageName: node + linkType: hard + +"negotiator@npm:^1.0.0": + version: 1.0.0 + resolution: "negotiator@npm:1.0.0" + checksum: 10c0/4c559dd52669ea48e1914f9d634227c561221dd54734070791f999c52ed0ff36e437b2e07d5c1f6e32909fc625fe46491c16e4a8f0572567d4dd15c3a4fda04b + languageName: node + linkType: hard + +"node-addon-api@npm:^8.0.0": + version: 8.0.0 + resolution: "node-addon-api@npm:8.0.0" + dependencies: + node-gyp: "npm:latest" + checksum: 10c0/20eb231362cc07c62d9839164473744d985be5d82685214f3750d990d9f61ef366e0ba112a766c925d640ed29b2a500b83568e895dc2444dcd5db01e615aac2b + languageName: node + linkType: hard + +"node-api-headers@npm:^1.1.0": + version: 1.1.0 + resolution: "node-api-headers@npm:1.1.0" + checksum: 10c0/7806d71077348ea199034e8c90a9147038d37fcccc1b85717e48c095fe31783a4f909f5daced4506e6cbce93fba91220bb3fc8626ee0640d26de9860f6500174 + languageName: node + linkType: hard + +"node-gyp@npm:latest": + version: 12.2.0 + resolution: "node-gyp@npm:12.2.0" + dependencies: + env-paths: "npm:^2.2.0" + exponential-backoff: "npm:^3.1.1" + graceful-fs: "npm:^4.2.6" + make-fetch-happen: "npm:^15.0.0" + nopt: "npm:^9.0.0" + proc-log: "npm:^6.0.0" + semver: "npm:^7.3.5" + tar: "npm:^7.5.4" + tinyglobby: "npm:^0.2.12" + which: "npm:^6.0.0" + bin: + node-gyp: bin/node-gyp.js + checksum: 10c0/3ed046746a5a7d90950cd8b0547332b06598443f31fe213ef4332a7174c7b7d259e1704835feda79b87d3f02e59d7791842aac60642ede4396ab25fdf0f8f759 + languageName: node + linkType: hard + +"nodejs_module@workspace:.": + version: 0.0.0-use.local + resolution: "nodejs_module@workspace:." + dependencies: + node-addon-api: "npm:^8.0.0" + node-api-headers: "npm:^1.1.0" + languageName: unknown + linkType: soft + +"nopt@npm:^9.0.0": + version: 9.0.0 + resolution: "nopt@npm:9.0.0" + dependencies: + abbrev: "npm:^4.0.0" + bin: + nopt: bin/nopt.js + checksum: 10c0/1822eb6f9b020ef6f7a7516d7b64a8036e09666ea55ac40416c36e4b2b343122c3cff0e2f085675f53de1d2db99a2a89a60ccea1d120bcd6a5347bf6ceb4a7fd + languageName: node + linkType: hard + +"p-map@npm:^7.0.2": + version: 7.0.4 + resolution: "p-map@npm:7.0.4" + checksum: 10c0/a5030935d3cb2919d7e89454d1ce82141e6f9955413658b8c9403cfe379283770ed3048146b44cde168aa9e8c716505f196d5689db0ae3ce9a71521a2fef3abd + languageName: node + linkType: hard + +"path-scurry@npm:^2.0.0": + version: 2.0.1 + resolution: "path-scurry@npm:2.0.1" + dependencies: + lru-cache: "npm:^11.0.0" + minipass: "npm:^7.1.2" + checksum: 10c0/2a16ed0e81fbc43513e245aa5763354e25e787dab0d539581a6c3f0f967461a159ed6236b2559de23aa5b88e7dc32b469b6c47568833dd142a4b24b4f5cd2620 + languageName: node + linkType: hard + +"picomatch@npm:^4.0.3": + version: 4.0.3 + resolution: "picomatch@npm:4.0.3" + checksum: 10c0/9582c951e95eebee5434f59e426cddd228a7b97a0161a375aed4be244bd3fe8e3a31b846808ea14ef2c8a2527a6eeab7b3946a67d5979e81694654f939473ae2 + languageName: node + linkType: hard + +"proc-log@npm:^6.0.0": + version: 6.1.0 + resolution: "proc-log@npm:6.1.0" + checksum: 10c0/4f178d4062733ead9d71a9b1ab24ebcecdfe2250916a5b1555f04fe2eda972a0ec76fbaa8df1ad9c02707add6749219d118a4fc46dc56bdfe4dde4b47d80bb82 + languageName: node + linkType: hard + +"promise-retry@npm:^2.0.1": + version: 2.0.1 + resolution: "promise-retry@npm:2.0.1" + dependencies: + err-code: "npm:^2.0.2" + retry: "npm:^0.12.0" + checksum: 10c0/9c7045a1a2928094b5b9b15336dcd2a7b1c052f674550df63cc3f36cd44028e5080448175b6f6ca32b642de81150f5e7b1a98b728f15cb069f2dd60ac2616b96 + languageName: node + linkType: hard + +"retry@npm:^0.12.0": + version: 0.12.0 + resolution: "retry@npm:0.12.0" + checksum: 10c0/59933e8501727ba13ad73ef4a04d5280b3717fd650408460c987392efe9d7be2040778ed8ebe933c5cbd63da3dcc37919c141ef8af0a54a6e4fca5a2af177bfe + languageName: node + linkType: hard + +"safer-buffer@npm:>= 2.1.2 < 3.0.0": + version: 2.1.2 + resolution: "safer-buffer@npm:2.1.2" + checksum: 10c0/7e3c8b2e88a1841c9671094bbaeebd94448111dd90a81a1f606f3f67708a6ec57763b3b47f06da09fc6054193e0e6709e77325415dc8422b04497a8070fa02d4 + languageName: node + linkType: hard + +"semver@npm:^7.3.5": + version: 7.7.4 + resolution: "semver@npm:7.7.4" + bin: + semver: bin/semver.js + checksum: 10c0/5215ad0234e2845d4ea5bb9d836d42b03499546ddafb12075566899fc617f68794bb6f146076b6881d755de17d6c6cc73372555879ec7dce2c2feee947866ad2 + languageName: node + linkType: hard + +"smart-buffer@npm:^4.2.0": + version: 4.2.0 + resolution: "smart-buffer@npm:4.2.0" + checksum: 10c0/a16775323e1404dd43fabafe7460be13a471e021637bc7889468eb45ce6a6b207261f454e4e530a19500cc962c4cc5348583520843b363f4193cee5c00e1e539 + languageName: node + linkType: hard + +"socks-proxy-agent@npm:^8.0.3": + version: 8.0.5 + resolution: "socks-proxy-agent@npm:8.0.5" + dependencies: + agent-base: "npm:^7.1.2" + debug: "npm:^4.3.4" + socks: "npm:^2.8.3" + checksum: 10c0/5d2c6cecba6821389aabf18728325730504bf9bb1d9e342e7987a5d13badd7a98838cc9a55b8ed3cb866ad37cc23e1086f09c4d72d93105ce9dfe76330e9d2a6 + languageName: node + linkType: hard + +"socks@npm:^2.8.3": + version: 2.8.7 + resolution: "socks@npm:2.8.7" + dependencies: + ip-address: "npm:^10.0.1" + smart-buffer: "npm:^4.2.0" + checksum: 10c0/2805a43a1c4bcf9ebf6e018268d87b32b32b06fbbc1f9282573583acc155860dc361500f89c73bfbb157caa1b4ac78059eac0ef15d1811eb0ca75e0bdadbc9d2 + languageName: node + linkType: hard + +"ssri@npm:^13.0.0": + version: 13.0.1 + resolution: "ssri@npm:13.0.1" + dependencies: + minipass: "npm:^7.0.3" + checksum: 10c0/cf6408a18676c57ff2ed06b8a20dc64bb3e748e5c7e095332e6aecaa2b8422b1e94a739a8453bf65156a8a47afe23757ba4ab52d3ea3b62322dc40875763e17a + languageName: node + linkType: hard + +"tar@npm:7.5.11": + version: 7.5.11 + resolution: "tar@npm:7.5.11" + dependencies: + "@isaacs/fs-minipass": "npm:^4.0.0" + chownr: "npm:^3.0.0" + minipass: "npm:^7.1.2" + minizlib: "npm:^3.1.0" + yallist: "npm:^5.0.0" + checksum: 10c0/b6bb420550ef50ef23356018155e956cd83282c97b6128d8d5cfe5740c57582d806a244b2ef0bf686a74ce526babe8b8b9061527623e935e850008d86d838929 + languageName: node + linkType: hard + +"tinyglobby@npm:^0.2.12": + version: 0.2.15 + resolution: "tinyglobby@npm:0.2.15" + dependencies: + fdir: "npm:^6.5.0" + picomatch: "npm:^4.0.3" + checksum: 10c0/869c31490d0d88eedb8305d178d4c75e7463e820df5a9b9d388291daf93e8b1eb5de1dad1c1e139767e4269fe75f3b10d5009b2cc14db96ff98986920a186844 + languageName: node + linkType: hard + +"unique-filename@npm:^5.0.0": + version: 5.0.0 + resolution: "unique-filename@npm:5.0.0" + dependencies: + unique-slug: "npm:^6.0.0" + checksum: 10c0/afb897e9cf4c2fb622ea716f7c2bb462001928fc5f437972213afdf1cc32101a230c0f1e9d96fc91ee5185eca0f2feb34127145874975f347be52eb91d6ccc2c + languageName: node + linkType: hard + +"unique-slug@npm:^6.0.0": + version: 6.0.0 + resolution: "unique-slug@npm:6.0.0" + dependencies: + imurmurhash: "npm:^0.1.4" + checksum: 10c0/da7ade4cb04eb33ad0499861f82fe95ce9c7c878b7139dc54d140ecfb6a6541c18a5c8dac16188b8b379fe62c0c1f1b710814baac910cde5f4fec06212126c6a + languageName: node + linkType: hard + +"which@npm:^6.0.0": + version: 6.0.1 + resolution: "which@npm:6.0.1" + dependencies: + isexe: "npm:^4.0.0" + bin: + node-which: bin/which.js + checksum: 10c0/7e710e54ea36d2d6183bee2f9caa27a3b47b9baf8dee55a199b736fcf85eab3b9df7556fca3d02b50af7f3dfba5ea3a45644189836df06267df457e354da66d5 + languageName: node + linkType: hard + +"yallist@npm:^4.0.0": + version: 4.0.0 + resolution: "yallist@npm:4.0.0" + checksum: 10c0/2286b5e8dbfe22204ab66e2ef5cc9bbb1e55dfc873bbe0d568aa943eb255d131890dfd5bf243637273d31119b870f49c18fcde2c6ffbb7a7a092b870dc90625a + languageName: node + linkType: hard + +"yallist@npm:^5.0.0": + version: 5.0.0 + resolution: "yallist@npm:5.0.0" + checksum: 10c0/a499c81ce6d4a1d260d4ea0f6d49ab4da09681e32c3f0472dee16667ed69d01dae63a3b81745a24bd78476ec4fcf856114cb4896ace738e01da34b2c42235416 + languageName: node + linkType: hard +>>>>>>> d11638dfc1 (fix: dependabot alerts (#21531)) diff --git a/barretenberg/docs/yarn.lock b/barretenberg/docs/yarn.lock index d4306cb115eb..d16b732eb12d 100644 --- a/barretenberg/docs/yarn.lock +++ b/barretenberg/docs/yarn.lock @@ -17810,6 +17810,7 @@ tar-stream@^3.0.0, tar-stream@^3.1.4, tar-stream@^3.1.5: fast-fifo "^1.2.0" streamx "^2.15.0" +<<<<<<< HEAD tar@^6.1.11: version "6.2.1" resolved "https://registry.yarnpkg.com/tar/-/tar-6.2.1.tgz#717549c541bc3c2af15751bea94b1dd068d4b03a" @@ -17826,6 +17827,12 @@ tar@^7.4.0: version "7.4.3" resolved "https://registry.yarnpkg.com/tar/-/tar-7.4.3.tgz#88bbe9286a3fcd900e94592cda7a22b192e80571" integrity sha512-5S7Va8hKfV7W5U6g3aYxXmlPoZVAwUMy9AOKyF2fVuZa2UD3qZjg578OrLRt8PcNN1PleVaL/5/yYATNL0ICUw== +======= +tar@^7.4.0, tar@^7.5.3: + version "7.5.11" + resolved "https://registry.yarnpkg.com/tar/-/tar-7.5.11.tgz#1250fae45d98806b36d703b30973fa8e0a6d8868" + integrity sha512-ChjMH33/KetonMTAtpYdgUFr0tbz69Fp2v7zWxQfYZX4g5ZN2nOBXm1R2xyA+lMIKrLKIoKAwFj93jE/avX9cQ== +>>>>>>> d11638dfc1 (fix: dependabot alerts (#21531)) dependencies: "@isaacs/fs-minipass" "^4.0.0" chownr "^3.0.0" diff --git a/barretenberg/ts/package-lock.json b/barretenberg/ts/package-lock.json index 42926d9439df..63d04aba70fd 100644 --- a/barretenberg/ts/package-lock.json +++ b/barretenberg/ts/package-lock.json @@ -4055,9 +4055,10 @@ } }, "node_modules/glob": { - "version": "10.4.5", - "resolved": "https://registry.npmjs.org/glob/-/glob-10.4.5.tgz", - "integrity": "sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==", + "version": "10.5.0", + "resolved": "https://registry.npmjs.org/glob/-/glob-10.5.0.tgz", + "integrity": "sha512-DfXN8DfhJ7NH3Oe7cFmu3NCu1wKbkReJ8TorzSAFbSKrlNaQSKfIzqYqVY8zlbs2NLBbWpRiU52GX2PbaBVNkg==", + "deprecated": "Old versions of glob are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me", "dev": true, "license": "ISC", "dependencies": { diff --git a/barretenberg/ts/yarn.lock b/barretenberg/ts/yarn.lock index 711f495aceb0..c4ec5d0f23eb 100644 --- a/barretenberg/ts/yarn.lock +++ b/barretenberg/ts/yarn.lock @@ -2818,9 +2818,15 @@ __metadata: languageName: node linkType: hard +<<<<<<< HEAD "glob@npm:^10.2.2, glob@npm:^10.3.10": version: 10.4.5 resolution: "glob@npm:10.4.5" +======= +"glob@npm:^10.3.10": + version: 10.5.0 + resolution: "glob@npm:10.5.0" +>>>>>>> d11638dfc1 (fix: dependabot alerts (#21531)) dependencies: foreground-child: "npm:^3.1.0" jackspeak: "npm:^3.1.2" @@ -2830,7 +2836,7 @@ __metadata: path-scurry: "npm:^1.11.1" bin: glob: dist/esm/bin.mjs - checksum: 10/698dfe11828b7efd0514cd11e573eaed26b2dff611f0400907281ce3eab0c1e56143ef9b35adc7c77ecc71fba74717b510c7c223d34ca8a98ec81777b293d4ac + checksum: 10/ab3bccfefcc0afaedbd1f480cd0c4a2c0e322eb3f0aa7ceaa31b3f00b825069f17cf0f1fc8b6f256795074b903f37c0ade37ddda6a176aa57f1c2bbfe7240653 languageName: node linkType: hard @@ -3747,6 +3753,16 @@ __metadata: languageName: node linkType: hard +<<<<<<< HEAD +======= +"lru-cache@npm:^11.0.0, lru-cache@npm:^11.1.0, lru-cache@npm:^11.2.1": + version: 11.2.7 + resolution: "lru-cache@npm:11.2.7" + checksum: 10/fbff4b8dee8189dde9b52cdfb3ea89b4c9cec094c1538cd30d1f47299477ff312efdb35f7994477ec72328f8e754e232b26a143feda1bd1f79ff22da6664d2c5 + languageName: node + linkType: hard + +>>>>>>> d11638dfc1 (fix: dependabot alerts (#21531)) "lru-cache@npm:^5.1.1": version: 5.1.1 resolution: "lru-cache@npm:5.1.1" diff --git a/boxes/yarn.lock b/boxes/yarn.lock index 828a41beba7b..d6dd11886054 100644 --- a/boxes/yarn.lock +++ b/boxes/yarn.lock @@ -11322,9 +11322,15 @@ __metadata: languageName: node linkType: hard +<<<<<<< HEAD "tar@npm:^7.4.3": version: 7.4.3 resolution: "tar@npm:7.4.3" +======= +"tar@npm:^7.5.4": + version: 7.5.11 + resolution: "tar@npm:7.5.11" +>>>>>>> d11638dfc1 (fix: dependabot alerts (#21531)) dependencies: "@isaacs/fs-minipass": "npm:^4.0.0" chownr: "npm:^3.0.0" @@ -11332,7 +11338,11 @@ __metadata: minizlib: "npm:^3.0.1" mkdirp: "npm:^3.0.1" yallist: "npm:^5.0.0" +<<<<<<< HEAD checksum: 10c0/d4679609bb2a9b48eeaf84632b6d844128d2412b95b6de07d53d8ee8baf4ca0857c9331dfa510390a0727b550fd543d4d1a10995ad86cdf078423fbb8d99831d +======= + checksum: 10c0/b6bb420550ef50ef23356018155e956cd83282c97b6128d8d5cfe5740c57582d806a244b2ef0bf686a74ce526babe8b8b9061527623e935e850008d86d838929 +>>>>>>> d11638dfc1 (fix: dependabot alerts (#21531)) languageName: node linkType: hard diff --git a/docs/yarn.lock b/docs/yarn.lock index ed4abf35cf76..8edaed8394e2 100644 --- a/docs/yarn.lock +++ b/docs/yarn.lock @@ -23248,16 +23248,26 @@ __metadata: languageName: node linkType: hard +<<<<<<< HEAD "tar@npm:^7.4.0, tar@npm:^7.4.3": version: 7.5.1 resolution: "tar@npm:7.5.1" +======= +"tar@npm:^7.4.0, tar@npm:^7.5.3, tar@npm:^7.5.4": + version: 7.5.11 + resolution: "tar@npm:7.5.11" +>>>>>>> d11638dfc1 (fix: dependabot alerts (#21531)) dependencies: "@isaacs/fs-minipass": "npm:^4.0.0" chownr: "npm:^3.0.0" minipass: "npm:^7.1.2" minizlib: "npm:^3.1.0" yallist: "npm:^5.0.0" +<<<<<<< HEAD checksum: 10c0/0dad0596a61586180981133b20c32cfd93c5863c5b7140d646714e6ea8ec84583b879e5dc3928a4d683be6e6109ad7ea3de1cf71986d5194f81b3a016c8858c9 +======= + checksum: 10c0/b6bb420550ef50ef23356018155e956cd83282c97b6128d8d5cfe5740c57582d806a244b2ef0bf686a74ce526babe8b8b9061527623e935e850008d86d838929 +>>>>>>> d11638dfc1 (fix: dependabot alerts (#21531)) languageName: node linkType: hard diff --git a/playground/yarn.lock b/playground/yarn.lock index 49f5556b80b9..823b11d8e88d 100644 --- a/playground/yarn.lock +++ b/playground/yarn.lock @@ -5832,9 +5832,15 @@ __metadata: languageName: node linkType: hard +<<<<<<< HEAD "tar@npm:^7.4.3": version: 7.4.3 resolution: "tar@npm:7.4.3" +======= +"tar@npm:^7.5.4": + version: 7.5.11 + resolution: "tar@npm:7.5.11" +>>>>>>> d11638dfc1 (fix: dependabot alerts (#21531)) dependencies: "@isaacs/fs-minipass": "npm:^4.0.0" chownr: "npm:^3.0.0" @@ -5842,7 +5848,11 @@ __metadata: minizlib: "npm:^3.0.1" mkdirp: "npm:^3.0.1" yallist: "npm:^5.0.0" +<<<<<<< HEAD checksum: 10c0/d4679609bb2a9b48eeaf84632b6d844128d2412b95b6de07d53d8ee8baf4ca0857c9331dfa510390a0727b550fd543d4d1a10995ad86cdf078423fbb8d99831d +======= + checksum: 10c0/b6bb420550ef50ef23356018155e956cd83282c97b6128d8d5cfe5740c57582d806a244b2ef0bf686a74ce526babe8b8b9061527623e935e850008d86d838929 +>>>>>>> d11638dfc1 (fix: dependabot alerts (#21531)) languageName: node linkType: hard diff --git a/yarn-project/yarn.lock b/yarn-project/yarn.lock index cb1b1d09ce4d..46b6ca634c56 100644 --- a/yarn-project/yarn.lock +++ b/yarn-project/yarn.lock @@ -20857,6 +20857,7 @@ __metadata: languageName: node linkType: hard +<<<<<<< HEAD "tar@npm:^6.1.11, tar@npm:^6.1.2": version: 6.2.1 resolution: "tar@npm:6.2.1" @@ -20868,6 +20869,18 @@ __metadata: mkdirp: "npm:^1.0.3" yallist: "npm:^4.0.0" checksum: 10/bfbfbb2861888077fc1130b84029cdc2721efb93d1d1fb80f22a7ac3a98ec6f8972f29e564103bbebf5e97be67ebc356d37fa48dbc4960600a1eb7230fbd1ea0 +======= +"tar@npm:^7.5.4": + version: 7.5.11 + resolution: "tar@npm:7.5.11" + dependencies: + "@isaacs/fs-minipass": "npm:^4.0.0" + chownr: "npm:^3.0.0" + minipass: "npm:^7.1.2" + minizlib: "npm:^3.1.0" + yallist: "npm:^5.0.0" + checksum: 10/fb2e77ee858a73936c68e066f4a602d428d6f812e6da0cc1e14a41f99498e4f7fd3535e355fa15157240a5538aa416026cfa6306bb0d1d1c1abf314b1f878e9a +>>>>>>> d11638dfc1 (fix: dependabot alerts (#21531)) languageName: node linkType: hard From d3ebad0d7183d72baff1dd15906b17ebdd4f3f02 Mon Sep 17 00:00:00 2001 From: AztecBot Date: Mon, 16 Mar 2026 12:28:15 +0000 Subject: [PATCH 07/17] fix: resolve cherry-pick conflicts Resolved lock file conflicts for backport to v4: - Kept v4 specifiers while updating to new versions where applicable - barretenberg/acir_tests, boxes, docs, playground: updated tar 7.4.3/7.5.1 -> 7.5.11 - barretenberg/ts: updated glob 10.4.5 -> 10.5.0 - barretenberg/docs: kept tar@6, updated tar@7 -> 7.5.11 - yarn-project: kept tar@6.2.1 (v4 uses tar@6, not tar@7) - nodejs_module: kept v4 version (different lock format) --- barretenberg/acir_tests/yarn.lock | 10 - .../src/barretenberg/nodejs_module/yarn.lock | 566 ------------------ barretenberg/docs/yarn.lock | 7 - barretenberg/ts/yarn.lock | 16 - boxes/yarn.lock | 10 - docs/yarn.lock | 10 - playground/yarn.lock | 10 - yarn-project/yarn.lock | 13 - 8 files changed, 642 deletions(-) diff --git a/barretenberg/acir_tests/yarn.lock b/barretenberg/acir_tests/yarn.lock index 1dfec3a44e28..e558fa921b9e 100644 --- a/barretenberg/acir_tests/yarn.lock +++ b/barretenberg/acir_tests/yarn.lock @@ -5017,15 +5017,9 @@ __metadata: languageName: node linkType: hard -<<<<<<< HEAD "tar@npm:^7.4.3": - version: 7.4.3 - resolution: "tar@npm:7.4.3" -======= -"tar@npm:^7.5.4": version: 7.5.11 resolution: "tar@npm:7.5.11" ->>>>>>> d11638dfc1 (fix: dependabot alerts (#21531)) dependencies: "@isaacs/fs-minipass": "npm:^4.0.0" chownr: "npm:^3.0.0" @@ -5033,11 +5027,7 @@ __metadata: minizlib: "npm:^3.0.1" mkdirp: "npm:^3.0.1" yallist: "npm:^5.0.0" -<<<<<<< HEAD - checksum: 10c0/d4679609bb2a9b48eeaf84632b6d844128d2412b95b6de07d53d8ee8baf4ca0857c9331dfa510390a0727b550fd543d4d1a10995ad86cdf078423fbb8d99831d -======= checksum: 10c0/b6bb420550ef50ef23356018155e956cd83282c97b6128d8d5cfe5740c57582d806a244b2ef0bf686a74ce526babe8b8b9061527623e935e850008d86d838929 ->>>>>>> d11638dfc1 (fix: dependabot alerts (#21531)) languageName: node linkType: hard diff --git a/barretenberg/cpp/src/barretenberg/nodejs_module/yarn.lock b/barretenberg/cpp/src/barretenberg/nodejs_module/yarn.lock index 029aef226204..6a671ec7eece 100644 --- a/barretenberg/cpp/src/barretenberg/nodejs_module/yarn.lock +++ b/barretenberg/cpp/src/barretenberg/nodejs_module/yarn.lock @@ -7,573 +7,7 @@ node-addon-api@^8.0.0: resolved "https://registry.yarnpkg.com/node-addon-api/-/node-addon-api-8.0.0.tgz#5453b7ad59dd040d12e0f1a97a6fa1c765c5c9d2" integrity sha512-ipO7rsHEBqa9STO5C5T10fj732ml+5kLN1cAG8/jdHd56ldQeGj3Q7+scUS+VHK/qy1zLEwC4wMK5+yM0btPvw== -<<<<<<< HEAD node-api-headers@^1.1.0: version "1.1.0" resolved "https://registry.yarnpkg.com/node-api-headers/-/node-api-headers-1.1.0.tgz#3f9dd7bb10b29e1c3e3db675979605a308b2373c" integrity sha512-ucQW+SbYCUPfprvmzBsnjT034IGRB2XK8rRc78BgjNKhTdFKgAwAmgW704bKIBmcYW48it0Gkjpkd39Azrwquw== -======= -"@npmcli/agent@npm:^4.0.0": - version: 4.0.0 - resolution: "@npmcli/agent@npm:4.0.0" - dependencies: - agent-base: "npm:^7.1.0" - http-proxy-agent: "npm:^7.0.0" - https-proxy-agent: "npm:^7.0.1" - lru-cache: "npm:^11.2.1" - socks-proxy-agent: "npm:^8.0.3" - checksum: 10c0/f7b5ce0f3dd42c3f8c6546e8433573d8049f67ef11ec22aa4704bc41483122f68bf97752e06302c455ead667af5cb753e6a09bff06632bc465c1cfd4c4b75a53 - languageName: node - linkType: hard - -"@npmcli/fs@npm:^5.0.0": - version: 5.0.0 - resolution: "@npmcli/fs@npm:5.0.0" - dependencies: - semver: "npm:^7.3.5" - checksum: 10c0/26e376d780f60ff16e874a0ac9bc3399186846baae0b6e1352286385ac134d900cc5dafaded77f38d77f86898fc923ae1cee9d7399f0275b1aa24878915d722b - languageName: node - linkType: hard - -"abbrev@npm:^4.0.0": - version: 4.0.0 - resolution: "abbrev@npm:4.0.0" - checksum: 10c0/b4cc16935235e80702fc90192e349e32f8ef0ed151ef506aa78c81a7c455ec18375c4125414b99f84b2e055199d66383e787675f0bcd87da7a4dbd59f9eac1d5 - languageName: node - linkType: hard - -"agent-base@npm:^7.1.0, agent-base@npm:^7.1.2": - version: 7.1.4 - resolution: "agent-base@npm:7.1.4" - checksum: 10c0/c2c9ab7599692d594b6a161559ada307b7a624fa4c7b03e3afdb5a5e31cd0e53269115b620fcab024c5ac6a6f37fa5eb2e004f076ad30f5f7e6b8b671f7b35fe - languageName: node - linkType: hard - -"balanced-match@npm:^4.0.2": - version: 4.0.4 - resolution: "balanced-match@npm:4.0.4" - checksum: 10c0/07e86102a3eb2ee2a6a1a89164f29d0dbaebd28f2ca3f5ca786f36b8b23d9e417eb3be45a4acf754f837be5ac0a2317de90d3fcb7f4f4dc95720a1f36b26a17b - languageName: node - linkType: hard - -"brace-expansion@npm:^5.0.2": - version: 5.0.4 - resolution: "brace-expansion@npm:5.0.4" - dependencies: - balanced-match: "npm:^4.0.2" - checksum: 10c0/359cbcfa80b2eb914ca1f3440e92313fbfe7919ee6b274c35db55bec555aded69dac5ee78f102cec90c35f98c20fa43d10936d0cd9978158823c249257e1643a - languageName: node - linkType: hard - -"cacache@npm:^20.0.1": - version: 20.0.3 - resolution: "cacache@npm:20.0.3" - dependencies: - "@npmcli/fs": "npm:^5.0.0" - fs-minipass: "npm:^3.0.0" - glob: "npm:^13.0.0" - lru-cache: "npm:^11.1.0" - minipass: "npm:^7.0.3" - minipass-collect: "npm:^2.0.1" - minipass-flush: "npm:^1.0.5" - minipass-pipeline: "npm:^1.2.4" - p-map: "npm:^7.0.2" - ssri: "npm:^13.0.0" - unique-filename: "npm:^5.0.0" - checksum: 10c0/c7da1ca694d20e8f8aedabd21dc11518f809a7d2b59aa76a1fc655db5a9e62379e465c157ddd2afe34b19230808882288effa6911b2de26a088a6d5645123462 - languageName: node - linkType: hard - -"chownr@npm:^3.0.0": - version: 3.0.0 - resolution: "chownr@npm:3.0.0" - checksum: 10c0/43925b87700f7e3893296c8e9c56cc58f926411cce3a6e5898136daaf08f08b9a8eb76d37d3267e707d0dcc17aed2e2ebdf5848c0c3ce95cf910a919935c1b10 - languageName: node - linkType: hard - -"debug@npm:4, debug@npm:^4.3.4": - version: 4.4.3 - resolution: "debug@npm:4.4.3" - dependencies: - ms: "npm:^2.1.3" - peerDependenciesMeta: - supports-color: - optional: true - checksum: 10c0/d79136ec6c83ecbefd0f6a5593da6a9c91ec4d7ddc4b54c883d6e71ec9accb5f67a1a5e96d00a328196b5b5c86d365e98d8a3a70856aaf16b4e7b1985e67f5a6 - languageName: node - linkType: hard - -"encoding@npm:^0.1.13": - version: 0.1.13 - resolution: "encoding@npm:0.1.13" - dependencies: - iconv-lite: "npm:^0.6.2" - checksum: 10c0/36d938712ff00fe1f4bac88b43bcffb5930c1efa57bbcdca9d67e1d9d6c57cfb1200fb01efe0f3109b2ce99b231f90779532814a81370a1bd3274a0f58585039 - languageName: node - linkType: hard - -"env-paths@npm:^2.2.0": - version: 2.2.1 - resolution: "env-paths@npm:2.2.1" - checksum: 10c0/285325677bf00e30845e330eec32894f5105529db97496ee3f598478e50f008c5352a41a30e5e72ec9de8a542b5a570b85699cd63bd2bc646dbcb9f311d83bc4 - languageName: node - linkType: hard - -"err-code@npm:^2.0.2": - version: 2.0.3 - resolution: "err-code@npm:2.0.3" - checksum: 10c0/b642f7b4dd4a376e954947550a3065a9ece6733ab8e51ad80db727aaae0817c2e99b02a97a3d6cecc648a97848305e728289cf312d09af395403a90c9d4d8a66 - languageName: node - linkType: hard - -"exponential-backoff@npm:^3.1.1": - version: 3.1.3 - resolution: "exponential-backoff@npm:3.1.3" - checksum: 10c0/77e3ae682b7b1f4972f563c6dbcd2b0d54ac679e62d5d32f3e5085feba20483cf28bd505543f520e287a56d4d55a28d7874299941faf637e779a1aa5994d1267 - languageName: node - linkType: hard - -"fdir@npm:^6.5.0": - version: 6.5.0 - resolution: "fdir@npm:6.5.0" - peerDependencies: - picomatch: ^3 || ^4 - peerDependenciesMeta: - picomatch: - optional: true - checksum: 10c0/e345083c4306b3aed6cb8ec551e26c36bab5c511e99ea4576a16750ddc8d3240e63826cc624f5ae17ad4dc82e68a253213b60d556c11bfad064b7607847ed07f - languageName: node - linkType: hard - -"fs-minipass@npm:^3.0.0": - version: 3.0.3 - resolution: "fs-minipass@npm:3.0.3" - dependencies: - minipass: "npm:^7.0.3" - checksum: 10c0/63e80da2ff9b621e2cb1596abcb9207f1cf82b968b116ccd7b959e3323144cce7fb141462200971c38bbf2ecca51695069db45265705bed09a7cd93ae5b89f94 - languageName: node - linkType: hard - -"glob@npm:^13.0.0": - version: 13.0.2 - resolution: "glob@npm:13.0.2" - dependencies: - minimatch: "npm:^10.1.2" - minipass: "npm:^7.1.2" - path-scurry: "npm:^2.0.0" - checksum: 10c0/3d4b09efa922c4cba9be6d5b9efae14384e7422aeb886eb35fba8a94820b8281474b8d3f16927127fb1a0c8580e18fc00e3fda03c8dc31fa0af3ba918edeeb04 - languageName: node - linkType: hard - -"graceful-fs@npm:^4.2.6": - version: 4.2.11 - resolution: "graceful-fs@npm:4.2.11" - checksum: 10c0/386d011a553e02bc594ac2ca0bd6d9e4c22d7fa8cfbfc448a6d148c59ea881b092db9dbe3547ae4b88e55f1b01f7c4a2ecc53b310c042793e63aa44cf6c257f2 - languageName: node - linkType: hard - -"http-cache-semantics@npm:^4.1.1": - version: 4.2.0 - resolution: "http-cache-semantics@npm:4.2.0" - checksum: 10c0/45b66a945cf13ec2d1f29432277201313babf4a01d9e52f44b31ca923434083afeca03f18417f599c9ab3d0e7b618ceb21257542338b57c54b710463b4a53e37 - languageName: node - linkType: hard - -"http-proxy-agent@npm:^7.0.0": - version: 7.0.2 - resolution: "http-proxy-agent@npm:7.0.2" - dependencies: - agent-base: "npm:^7.1.0" - debug: "npm:^4.3.4" - checksum: 10c0/4207b06a4580fb85dd6dff521f0abf6db517489e70863dca1a0291daa7f2d3d2d6015a57bd702af068ea5cf9f1f6ff72314f5f5b4228d299c0904135d2aef921 - languageName: node - linkType: hard - -"https-proxy-agent@npm:^7.0.1": - version: 7.0.6 - resolution: "https-proxy-agent@npm:7.0.6" - dependencies: - agent-base: "npm:^7.1.2" - debug: "npm:4" - checksum: 10c0/f729219bc735edb621fa30e6e84e60ee5d00802b8247aac0d7b79b0bd6d4b3294737a337b93b86a0bd9e68099d031858a39260c976dc14cdbba238ba1f8779ac - languageName: node - linkType: hard - -"iconv-lite@npm:^0.6.2": - version: 0.6.3 - resolution: "iconv-lite@npm:0.6.3" - dependencies: - safer-buffer: "npm:>= 2.1.2 < 3.0.0" - checksum: 10c0/98102bc66b33fcf5ac044099d1257ba0b7ad5e3ccd3221f34dd508ab4070edff183276221684e1e0555b145fce0850c9f7d2b60a9fcac50fbb4ea0d6e845a3b1 - languageName: node - linkType: hard - -"imurmurhash@npm:^0.1.4": - version: 0.1.4 - resolution: "imurmurhash@npm:0.1.4" - checksum: 10c0/8b51313850dd33605c6c9d3fd9638b714f4c4c40250cff658209f30d40da60f78992fb2df5dabee4acf589a6a82bbc79ad5486550754bd9ec4e3fc0d4a57d6a6 - languageName: node - linkType: hard - -"ip-address@npm:^10.0.1": - version: 10.1.0 - resolution: "ip-address@npm:10.1.0" - checksum: 10c0/0103516cfa93f6433b3bd7333fa876eb21263912329bfa47010af5e16934eeeff86f3d2ae700a3744a137839ddfad62b900c7a445607884a49b5d1e32a3d7566 - languageName: node - linkType: hard - -"isexe@npm:^4.0.0": - version: 4.0.0 - resolution: "isexe@npm:4.0.0" - checksum: 10c0/5884815115bceac452877659a9c7726382531592f43dc29e5d48b7c4100661aed54018cb90bd36cb2eaeba521092570769167acbb95c18d39afdccbcca06c5ce - languageName: node - linkType: hard - -"lru-cache@npm:^11.0.0, lru-cache@npm:^11.1.0, lru-cache@npm:^11.2.1": - version: 11.2.6 - resolution: "lru-cache@npm:11.2.6" - checksum: 10c0/73bbffb298760e71b2bfe8ebc16a311c6a60ceddbba919cfedfd8635c2d125fbfb5a39b71818200e67973b11f8d59c5a9e31d6f90722e340e90393663a66e5cd - languageName: node - linkType: hard - -"make-fetch-happen@npm:^15.0.0": - version: 15.0.3 - resolution: "make-fetch-happen@npm:15.0.3" - dependencies: - "@npmcli/agent": "npm:^4.0.0" - cacache: "npm:^20.0.1" - http-cache-semantics: "npm:^4.1.1" - minipass: "npm:^7.0.2" - minipass-fetch: "npm:^5.0.0" - minipass-flush: "npm:^1.0.5" - minipass-pipeline: "npm:^1.2.4" - negotiator: "npm:^1.0.0" - proc-log: "npm:^6.0.0" - promise-retry: "npm:^2.0.1" - ssri: "npm:^13.0.0" - checksum: 10c0/525f74915660be60b616bcbd267c4a5b59481b073ba125e45c9c3a041bb1a47a2bd0ae79d028eb6f5f95bf9851a4158423f5068539c3093621abb64027e8e461 - languageName: node - linkType: hard - -"minimatch@npm:^10.1.2": - version: 10.2.4 - resolution: "minimatch@npm:10.2.4" - dependencies: - brace-expansion: "npm:^5.0.2" - checksum: 10c0/35f3dfb7b99b51efd46afd378486889f590e7efb10e0f6a10ba6800428cf65c9a8dedb74427d0570b318d749b543dc4e85f06d46d2858bc8cac7e1eb49a95945 - languageName: node - linkType: hard - -"minipass-collect@npm:^2.0.1": - version: 2.0.1 - resolution: "minipass-collect@npm:2.0.1" - dependencies: - minipass: "npm:^7.0.3" - checksum: 10c0/5167e73f62bb74cc5019594709c77e6a742051a647fe9499abf03c71dca75515b7959d67a764bdc4f8b361cf897fbf25e2d9869ee039203ed45240f48b9aa06e - languageName: node - linkType: hard - -"minipass-fetch@npm:^5.0.0": - version: 5.0.1 - resolution: "minipass-fetch@npm:5.0.1" - dependencies: - encoding: "npm:^0.1.13" - minipass: "npm:^7.0.3" - minipass-sized: "npm:^2.0.0" - minizlib: "npm:^3.0.1" - dependenciesMeta: - encoding: - optional: true - checksum: 10c0/50bcf48c9841ebb25e29a2817468595219c72cfffc7c175a1d7327843c8bef9b72cb01778f46df7eca695dfe47ab98e6167af4cb026ddd80f660842919a5193c - languageName: node - linkType: hard - -"minipass-flush@npm:^1.0.5": - version: 1.0.5 - resolution: "minipass-flush@npm:1.0.5" - dependencies: - minipass: "npm:^3.0.0" - checksum: 10c0/2a51b63feb799d2bb34669205eee7c0eaf9dce01883261a5b77410c9408aa447e478efd191b4de6fc1101e796ff5892f8443ef20d9544385819093dbb32d36bd - languageName: node - linkType: hard - -"minipass-pipeline@npm:^1.2.4": - version: 1.2.4 - resolution: "minipass-pipeline@npm:1.2.4" - dependencies: - minipass: "npm:^3.0.0" - checksum: 10c0/cbda57cea20b140b797505dc2cac71581a70b3247b84480c1fed5ca5ba46c25ecc25f68bfc9e6dcb1a6e9017dab5c7ada5eab73ad4f0a49d84e35093e0c643f2 - languageName: node - linkType: hard - -"minipass-sized@npm:^2.0.0": - version: 2.0.0 - resolution: "minipass-sized@npm:2.0.0" - dependencies: - minipass: "npm:^7.1.2" - checksum: 10c0/f9201696a6f6d68610d04c9c83e3d2e5cb9c026aae1c8cbf7e17f386105cb79c1bb088dbc21bf0b1eb4f3fb5df384fd1e7aa3bf1f33868c416ae8c8a92679db8 - languageName: node - linkType: hard - -"minipass@npm:^3.0.0": - version: 3.3.6 - resolution: "minipass@npm:3.3.6" - dependencies: - yallist: "npm:^4.0.0" - checksum: 10c0/a114746943afa1dbbca8249e706d1d38b85ed1298b530f5808ce51f8e9e941962e2a5ad2e00eae7dd21d8a4aae6586a66d4216d1a259385e9d0358f0c1eba16c - languageName: node - linkType: hard - -"minipass@npm:^7.0.2, minipass@npm:^7.0.3, minipass@npm:^7.0.4, minipass@npm:^7.1.2": - version: 7.1.2 - resolution: "minipass@npm:7.1.2" - checksum: 10c0/b0fd20bb9fb56e5fa9a8bfac539e8915ae07430a619e4b86ff71f5fc757ef3924b23b2c4230393af1eda647ed3d75739e4e0acb250a6b1eb277cf7f8fe449557 - languageName: node - linkType: hard - -"minizlib@npm:^3.0.1, minizlib@npm:^3.1.0": - version: 3.1.0 - resolution: "minizlib@npm:3.1.0" - dependencies: - minipass: "npm:^7.1.2" - checksum: 10c0/5aad75ab0090b8266069c9aabe582c021ae53eb33c6c691054a13a45db3b4f91a7fb1bd79151e6b4e9e9a86727b522527c0a06ec7d45206b745d54cd3097bcec - languageName: node - linkType: hard - -"ms@npm:^2.1.3": - version: 2.1.3 - resolution: "ms@npm:2.1.3" - checksum: 10c0/d924b57e7312b3b63ad21fc5b3dc0af5e78d61a1fc7cfb5457edaf26326bf62be5307cc87ffb6862ef1c2b33b0233cdb5d4f01c4c958cc0d660948b65a287a48 - languageName: node - linkType: hard - -"negotiator@npm:^1.0.0": - version: 1.0.0 - resolution: "negotiator@npm:1.0.0" - checksum: 10c0/4c559dd52669ea48e1914f9d634227c561221dd54734070791f999c52ed0ff36e437b2e07d5c1f6e32909fc625fe46491c16e4a8f0572567d4dd15c3a4fda04b - languageName: node - linkType: hard - -"node-addon-api@npm:^8.0.0": - version: 8.0.0 - resolution: "node-addon-api@npm:8.0.0" - dependencies: - node-gyp: "npm:latest" - checksum: 10c0/20eb231362cc07c62d9839164473744d985be5d82685214f3750d990d9f61ef366e0ba112a766c925d640ed29b2a500b83568e895dc2444dcd5db01e615aac2b - languageName: node - linkType: hard - -"node-api-headers@npm:^1.1.0": - version: 1.1.0 - resolution: "node-api-headers@npm:1.1.0" - checksum: 10c0/7806d71077348ea199034e8c90a9147038d37fcccc1b85717e48c095fe31783a4f909f5daced4506e6cbce93fba91220bb3fc8626ee0640d26de9860f6500174 - languageName: node - linkType: hard - -"node-gyp@npm:latest": - version: 12.2.0 - resolution: "node-gyp@npm:12.2.0" - dependencies: - env-paths: "npm:^2.2.0" - exponential-backoff: "npm:^3.1.1" - graceful-fs: "npm:^4.2.6" - make-fetch-happen: "npm:^15.0.0" - nopt: "npm:^9.0.0" - proc-log: "npm:^6.0.0" - semver: "npm:^7.3.5" - tar: "npm:^7.5.4" - tinyglobby: "npm:^0.2.12" - which: "npm:^6.0.0" - bin: - node-gyp: bin/node-gyp.js - checksum: 10c0/3ed046746a5a7d90950cd8b0547332b06598443f31fe213ef4332a7174c7b7d259e1704835feda79b87d3f02e59d7791842aac60642ede4396ab25fdf0f8f759 - languageName: node - linkType: hard - -"nodejs_module@workspace:.": - version: 0.0.0-use.local - resolution: "nodejs_module@workspace:." - dependencies: - node-addon-api: "npm:^8.0.0" - node-api-headers: "npm:^1.1.0" - languageName: unknown - linkType: soft - -"nopt@npm:^9.0.0": - version: 9.0.0 - resolution: "nopt@npm:9.0.0" - dependencies: - abbrev: "npm:^4.0.0" - bin: - nopt: bin/nopt.js - checksum: 10c0/1822eb6f9b020ef6f7a7516d7b64a8036e09666ea55ac40416c36e4b2b343122c3cff0e2f085675f53de1d2db99a2a89a60ccea1d120bcd6a5347bf6ceb4a7fd - languageName: node - linkType: hard - -"p-map@npm:^7.0.2": - version: 7.0.4 - resolution: "p-map@npm:7.0.4" - checksum: 10c0/a5030935d3cb2919d7e89454d1ce82141e6f9955413658b8c9403cfe379283770ed3048146b44cde168aa9e8c716505f196d5689db0ae3ce9a71521a2fef3abd - languageName: node - linkType: hard - -"path-scurry@npm:^2.0.0": - version: 2.0.1 - resolution: "path-scurry@npm:2.0.1" - dependencies: - lru-cache: "npm:^11.0.0" - minipass: "npm:^7.1.2" - checksum: 10c0/2a16ed0e81fbc43513e245aa5763354e25e787dab0d539581a6c3f0f967461a159ed6236b2559de23aa5b88e7dc32b469b6c47568833dd142a4b24b4f5cd2620 - languageName: node - linkType: hard - -"picomatch@npm:^4.0.3": - version: 4.0.3 - resolution: "picomatch@npm:4.0.3" - checksum: 10c0/9582c951e95eebee5434f59e426cddd228a7b97a0161a375aed4be244bd3fe8e3a31b846808ea14ef2c8a2527a6eeab7b3946a67d5979e81694654f939473ae2 - languageName: node - linkType: hard - -"proc-log@npm:^6.0.0": - version: 6.1.0 - resolution: "proc-log@npm:6.1.0" - checksum: 10c0/4f178d4062733ead9d71a9b1ab24ebcecdfe2250916a5b1555f04fe2eda972a0ec76fbaa8df1ad9c02707add6749219d118a4fc46dc56bdfe4dde4b47d80bb82 - languageName: node - linkType: hard - -"promise-retry@npm:^2.0.1": - version: 2.0.1 - resolution: "promise-retry@npm:2.0.1" - dependencies: - err-code: "npm:^2.0.2" - retry: "npm:^0.12.0" - checksum: 10c0/9c7045a1a2928094b5b9b15336dcd2a7b1c052f674550df63cc3f36cd44028e5080448175b6f6ca32b642de81150f5e7b1a98b728f15cb069f2dd60ac2616b96 - languageName: node - linkType: hard - -"retry@npm:^0.12.0": - version: 0.12.0 - resolution: "retry@npm:0.12.0" - checksum: 10c0/59933e8501727ba13ad73ef4a04d5280b3717fd650408460c987392efe9d7be2040778ed8ebe933c5cbd63da3dcc37919c141ef8af0a54a6e4fca5a2af177bfe - languageName: node - linkType: hard - -"safer-buffer@npm:>= 2.1.2 < 3.0.0": - version: 2.1.2 - resolution: "safer-buffer@npm:2.1.2" - checksum: 10c0/7e3c8b2e88a1841c9671094bbaeebd94448111dd90a81a1f606f3f67708a6ec57763b3b47f06da09fc6054193e0e6709e77325415dc8422b04497a8070fa02d4 - languageName: node - linkType: hard - -"semver@npm:^7.3.5": - version: 7.7.4 - resolution: "semver@npm:7.7.4" - bin: - semver: bin/semver.js - checksum: 10c0/5215ad0234e2845d4ea5bb9d836d42b03499546ddafb12075566899fc617f68794bb6f146076b6881d755de17d6c6cc73372555879ec7dce2c2feee947866ad2 - languageName: node - linkType: hard - -"smart-buffer@npm:^4.2.0": - version: 4.2.0 - resolution: "smart-buffer@npm:4.2.0" - checksum: 10c0/a16775323e1404dd43fabafe7460be13a471e021637bc7889468eb45ce6a6b207261f454e4e530a19500cc962c4cc5348583520843b363f4193cee5c00e1e539 - languageName: node - linkType: hard - -"socks-proxy-agent@npm:^8.0.3": - version: 8.0.5 - resolution: "socks-proxy-agent@npm:8.0.5" - dependencies: - agent-base: "npm:^7.1.2" - debug: "npm:^4.3.4" - socks: "npm:^2.8.3" - checksum: 10c0/5d2c6cecba6821389aabf18728325730504bf9bb1d9e342e7987a5d13badd7a98838cc9a55b8ed3cb866ad37cc23e1086f09c4d72d93105ce9dfe76330e9d2a6 - languageName: node - linkType: hard - -"socks@npm:^2.8.3": - version: 2.8.7 - resolution: "socks@npm:2.8.7" - dependencies: - ip-address: "npm:^10.0.1" - smart-buffer: "npm:^4.2.0" - checksum: 10c0/2805a43a1c4bcf9ebf6e018268d87b32b32b06fbbc1f9282573583acc155860dc361500f89c73bfbb157caa1b4ac78059eac0ef15d1811eb0ca75e0bdadbc9d2 - languageName: node - linkType: hard - -"ssri@npm:^13.0.0": - version: 13.0.1 - resolution: "ssri@npm:13.0.1" - dependencies: - minipass: "npm:^7.0.3" - checksum: 10c0/cf6408a18676c57ff2ed06b8a20dc64bb3e748e5c7e095332e6aecaa2b8422b1e94a739a8453bf65156a8a47afe23757ba4ab52d3ea3b62322dc40875763e17a - languageName: node - linkType: hard - -"tar@npm:7.5.11": - version: 7.5.11 - resolution: "tar@npm:7.5.11" - dependencies: - "@isaacs/fs-minipass": "npm:^4.0.0" - chownr: "npm:^3.0.0" - minipass: "npm:^7.1.2" - minizlib: "npm:^3.1.0" - yallist: "npm:^5.0.0" - checksum: 10c0/b6bb420550ef50ef23356018155e956cd83282c97b6128d8d5cfe5740c57582d806a244b2ef0bf686a74ce526babe8b8b9061527623e935e850008d86d838929 - languageName: node - linkType: hard - -"tinyglobby@npm:^0.2.12": - version: 0.2.15 - resolution: "tinyglobby@npm:0.2.15" - dependencies: - fdir: "npm:^6.5.0" - picomatch: "npm:^4.0.3" - checksum: 10c0/869c31490d0d88eedb8305d178d4c75e7463e820df5a9b9d388291daf93e8b1eb5de1dad1c1e139767e4269fe75f3b10d5009b2cc14db96ff98986920a186844 - languageName: node - linkType: hard - -"unique-filename@npm:^5.0.0": - version: 5.0.0 - resolution: "unique-filename@npm:5.0.0" - dependencies: - unique-slug: "npm:^6.0.0" - checksum: 10c0/afb897e9cf4c2fb622ea716f7c2bb462001928fc5f437972213afdf1cc32101a230c0f1e9d96fc91ee5185eca0f2feb34127145874975f347be52eb91d6ccc2c - languageName: node - linkType: hard - -"unique-slug@npm:^6.0.0": - version: 6.0.0 - resolution: "unique-slug@npm:6.0.0" - dependencies: - imurmurhash: "npm:^0.1.4" - checksum: 10c0/da7ade4cb04eb33ad0499861f82fe95ce9c7c878b7139dc54d140ecfb6a6541c18a5c8dac16188b8b379fe62c0c1f1b710814baac910cde5f4fec06212126c6a - languageName: node - linkType: hard - -"which@npm:^6.0.0": - version: 6.0.1 - resolution: "which@npm:6.0.1" - dependencies: - isexe: "npm:^4.0.0" - bin: - node-which: bin/which.js - checksum: 10c0/7e710e54ea36d2d6183bee2f9caa27a3b47b9baf8dee55a199b736fcf85eab3b9df7556fca3d02b50af7f3dfba5ea3a45644189836df06267df457e354da66d5 - languageName: node - linkType: hard - -"yallist@npm:^4.0.0": - version: 4.0.0 - resolution: "yallist@npm:4.0.0" - checksum: 10c0/2286b5e8dbfe22204ab66e2ef5cc9bbb1e55dfc873bbe0d568aa943eb255d131890dfd5bf243637273d31119b870f49c18fcde2c6ffbb7a7a092b870dc90625a - languageName: node - linkType: hard - -"yallist@npm:^5.0.0": - version: 5.0.0 - resolution: "yallist@npm:5.0.0" - checksum: 10c0/a499c81ce6d4a1d260d4ea0f6d49ab4da09681e32c3f0472dee16667ed69d01dae63a3b81745a24bd78476ec4fcf856114cb4896ace738e01da34b2c42235416 - languageName: node - linkType: hard ->>>>>>> d11638dfc1 (fix: dependabot alerts (#21531)) diff --git a/barretenberg/docs/yarn.lock b/barretenberg/docs/yarn.lock index d16b732eb12d..2c12d13a5be6 100644 --- a/barretenberg/docs/yarn.lock +++ b/barretenberg/docs/yarn.lock @@ -17810,7 +17810,6 @@ tar-stream@^3.0.0, tar-stream@^3.1.4, tar-stream@^3.1.5: fast-fifo "^1.2.0" streamx "^2.15.0" -<<<<<<< HEAD tar@^6.1.11: version "6.2.1" resolved "https://registry.yarnpkg.com/tar/-/tar-6.2.1.tgz#717549c541bc3c2af15751bea94b1dd068d4b03a" @@ -17824,15 +17823,9 @@ tar@^6.1.11: yallist "^4.0.0" tar@^7.4.0: - version "7.4.3" - resolved "https://registry.yarnpkg.com/tar/-/tar-7.4.3.tgz#88bbe9286a3fcd900e94592cda7a22b192e80571" - integrity sha512-5S7Va8hKfV7W5U6g3aYxXmlPoZVAwUMy9AOKyF2fVuZa2UD3qZjg578OrLRt8PcNN1PleVaL/5/yYATNL0ICUw== -======= -tar@^7.4.0, tar@^7.5.3: version "7.5.11" resolved "https://registry.yarnpkg.com/tar/-/tar-7.5.11.tgz#1250fae45d98806b36d703b30973fa8e0a6d8868" integrity sha512-ChjMH33/KetonMTAtpYdgUFr0tbz69Fp2v7zWxQfYZX4g5ZN2nOBXm1R2xyA+lMIKrLKIoKAwFj93jE/avX9cQ== ->>>>>>> d11638dfc1 (fix: dependabot alerts (#21531)) dependencies: "@isaacs/fs-minipass" "^4.0.0" chownr "^3.0.0" diff --git a/barretenberg/ts/yarn.lock b/barretenberg/ts/yarn.lock index c4ec5d0f23eb..8a088c70ba35 100644 --- a/barretenberg/ts/yarn.lock +++ b/barretenberg/ts/yarn.lock @@ -2818,15 +2818,9 @@ __metadata: languageName: node linkType: hard -<<<<<<< HEAD "glob@npm:^10.2.2, glob@npm:^10.3.10": - version: 10.4.5 - resolution: "glob@npm:10.4.5" -======= -"glob@npm:^10.3.10": version: 10.5.0 resolution: "glob@npm:10.5.0" ->>>>>>> d11638dfc1 (fix: dependabot alerts (#21531)) dependencies: foreground-child: "npm:^3.1.0" jackspeak: "npm:^3.1.2" @@ -3753,16 +3747,6 @@ __metadata: languageName: node linkType: hard -<<<<<<< HEAD -======= -"lru-cache@npm:^11.0.0, lru-cache@npm:^11.1.0, lru-cache@npm:^11.2.1": - version: 11.2.7 - resolution: "lru-cache@npm:11.2.7" - checksum: 10/fbff4b8dee8189dde9b52cdfb3ea89b4c9cec094c1538cd30d1f47299477ff312efdb35f7994477ec72328f8e754e232b26a143feda1bd1f79ff22da6664d2c5 - languageName: node - linkType: hard - ->>>>>>> d11638dfc1 (fix: dependabot alerts (#21531)) "lru-cache@npm:^5.1.1": version: 5.1.1 resolution: "lru-cache@npm:5.1.1" diff --git a/boxes/yarn.lock b/boxes/yarn.lock index d6dd11886054..2fc51c0e777f 100644 --- a/boxes/yarn.lock +++ b/boxes/yarn.lock @@ -11322,15 +11322,9 @@ __metadata: languageName: node linkType: hard -<<<<<<< HEAD "tar@npm:^7.4.3": - version: 7.4.3 - resolution: "tar@npm:7.4.3" -======= -"tar@npm:^7.5.4": version: 7.5.11 resolution: "tar@npm:7.5.11" ->>>>>>> d11638dfc1 (fix: dependabot alerts (#21531)) dependencies: "@isaacs/fs-minipass": "npm:^4.0.0" chownr: "npm:^3.0.0" @@ -11338,11 +11332,7 @@ __metadata: minizlib: "npm:^3.0.1" mkdirp: "npm:^3.0.1" yallist: "npm:^5.0.0" -<<<<<<< HEAD - checksum: 10c0/d4679609bb2a9b48eeaf84632b6d844128d2412b95b6de07d53d8ee8baf4ca0857c9331dfa510390a0727b550fd543d4d1a10995ad86cdf078423fbb8d99831d -======= checksum: 10c0/b6bb420550ef50ef23356018155e956cd83282c97b6128d8d5cfe5740c57582d806a244b2ef0bf686a74ce526babe8b8b9061527623e935e850008d86d838929 ->>>>>>> d11638dfc1 (fix: dependabot alerts (#21531)) languageName: node linkType: hard diff --git a/docs/yarn.lock b/docs/yarn.lock index 8edaed8394e2..2178ced0807d 100644 --- a/docs/yarn.lock +++ b/docs/yarn.lock @@ -23248,26 +23248,16 @@ __metadata: languageName: node linkType: hard -<<<<<<< HEAD "tar@npm:^7.4.0, tar@npm:^7.4.3": - version: 7.5.1 - resolution: "tar@npm:7.5.1" -======= -"tar@npm:^7.4.0, tar@npm:^7.5.3, tar@npm:^7.5.4": version: 7.5.11 resolution: "tar@npm:7.5.11" ->>>>>>> d11638dfc1 (fix: dependabot alerts (#21531)) dependencies: "@isaacs/fs-minipass": "npm:^4.0.0" chownr: "npm:^3.0.0" minipass: "npm:^7.1.2" minizlib: "npm:^3.1.0" yallist: "npm:^5.0.0" -<<<<<<< HEAD - checksum: 10c0/0dad0596a61586180981133b20c32cfd93c5863c5b7140d646714e6ea8ec84583b879e5dc3928a4d683be6e6109ad7ea3de1cf71986d5194f81b3a016c8858c9 -======= checksum: 10c0/b6bb420550ef50ef23356018155e956cd83282c97b6128d8d5cfe5740c57582d806a244b2ef0bf686a74ce526babe8b8b9061527623e935e850008d86d838929 ->>>>>>> d11638dfc1 (fix: dependabot alerts (#21531)) languageName: node linkType: hard diff --git a/playground/yarn.lock b/playground/yarn.lock index 823b11d8e88d..9e7764b9a21f 100644 --- a/playground/yarn.lock +++ b/playground/yarn.lock @@ -5832,15 +5832,9 @@ __metadata: languageName: node linkType: hard -<<<<<<< HEAD "tar@npm:^7.4.3": - version: 7.4.3 - resolution: "tar@npm:7.4.3" -======= -"tar@npm:^7.5.4": version: 7.5.11 resolution: "tar@npm:7.5.11" ->>>>>>> d11638dfc1 (fix: dependabot alerts (#21531)) dependencies: "@isaacs/fs-minipass": "npm:^4.0.0" chownr: "npm:^3.0.0" @@ -5848,11 +5842,7 @@ __metadata: minizlib: "npm:^3.0.1" mkdirp: "npm:^3.0.1" yallist: "npm:^5.0.0" -<<<<<<< HEAD - checksum: 10c0/d4679609bb2a9b48eeaf84632b6d844128d2412b95b6de07d53d8ee8baf4ca0857c9331dfa510390a0727b550fd543d4d1a10995ad86cdf078423fbb8d99831d -======= checksum: 10c0/b6bb420550ef50ef23356018155e956cd83282c97b6128d8d5cfe5740c57582d806a244b2ef0bf686a74ce526babe8b8b9061527623e935e850008d86d838929 ->>>>>>> d11638dfc1 (fix: dependabot alerts (#21531)) languageName: node linkType: hard diff --git a/yarn-project/yarn.lock b/yarn-project/yarn.lock index 46b6ca634c56..cb1b1d09ce4d 100644 --- a/yarn-project/yarn.lock +++ b/yarn-project/yarn.lock @@ -20857,7 +20857,6 @@ __metadata: languageName: node linkType: hard -<<<<<<< HEAD "tar@npm:^6.1.11, tar@npm:^6.1.2": version: 6.2.1 resolution: "tar@npm:6.2.1" @@ -20869,18 +20868,6 @@ __metadata: mkdirp: "npm:^1.0.3" yallist: "npm:^4.0.0" checksum: 10/bfbfbb2861888077fc1130b84029cdc2721efb93d1d1fb80f22a7ac3a98ec6f8972f29e564103bbebf5e97be67ebc356d37fa48dbc4960600a1eb7230fbd1ea0 -======= -"tar@npm:^7.5.4": - version: 7.5.11 - resolution: "tar@npm:7.5.11" - dependencies: - "@isaacs/fs-minipass": "npm:^4.0.0" - chownr: "npm:^3.0.0" - minipass: "npm:^7.1.2" - minizlib: "npm:^3.1.0" - yallist: "npm:^5.0.0" - checksum: 10/fb2e77ee858a73936c68e066f4a602d428d6f812e6da0cc1e14a41f99498e4f7fd3535e355fa15157240a5538aa416026cfa6306bb0d1d1c1abf314b1f878e9a ->>>>>>> d11638dfc1 (fix: dependabot alerts (#21531)) languageName: node linkType: hard From e80a0bc66e0938dfa96b320ec86f9cece90321e4 Mon Sep 17 00:00:00 2001 From: Phil Windle Date: Thu, 12 Mar 2026 18:02:48 +0000 Subject: [PATCH 08/17] Don't update state if we failed to execute sufficient transactions --- .../sequencer/checkpoint_proposal_job.test.ts | 7 ++- .../src/sequencer/checkpoint_proposal_job.ts | 39 +++++++++----- .../src/test/mock_checkpoint_builder.ts | 4 +- .../stdlib/src/interfaces/block-builder.ts | 17 ++++--- .../src/checkpoint_builder.test.ts | 51 +++++++++++++++---- .../src/checkpoint_builder.ts | 12 ++--- 6 files changed, 89 insertions(+), 41 deletions(-) diff --git a/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.test.ts b/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.test.ts index f1a702e992f2..e619838167a5 100644 --- a/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.test.ts +++ b/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.test.ts @@ -22,9 +22,8 @@ import { Checkpoint, type CheckpointData, L1PublishedData } from '@aztec/stdlib/ import type { L1RollupConstants } from '@aztec/stdlib/epoch-helpers'; import { GasFees } from '@aztec/stdlib/gas'; import { - type BuildBlockInCheckpointResult, + InsufficientValidTxsError, type MerkleTreeWriteOperations, - NoValidTxsError, type ResolvedSequencerConfig, type WorldStateSynchronizer, } from '@aztec/stdlib/interfaces/server'; @@ -774,7 +773,7 @@ describe('CheckpointProposalJob', () => { const checkpointBuilder = mock(); const failedTxs: FailedTx[] = txs.slice(1).map(tx => ({ tx, error: new Error('Invalid tx') })); - checkpointBuilder.buildBlock.mockResolvedValue({ failedTxs, numTxs: 1 } as BuildBlockInCheckpointResult); + checkpointBuilder.buildBlock.mockRejectedValue(new InsufficientValidTxsError(1, 2, failedTxs)); const checkpoint = await job.buildSingleBlock(checkpointBuilder, { blockNumber: newBlockNumber, @@ -795,7 +794,7 @@ describe('CheckpointProposalJob', () => { const checkpointBuilder = mock(); const failedTxs: FailedTx[] = txs.slice(1).map(tx => ({ tx, error: new Error('Invalid tx') })); - checkpointBuilder.buildBlock.mockRejectedValue(new NoValidTxsError(failedTxs)); + checkpointBuilder.buildBlock.mockRejectedValue(new InsufficientValidTxsError(0, 3, failedTxs)); const checkpoint = await job.buildSingleBlock(checkpointBuilder, { blockNumber: newBlockNumber, diff --git a/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.ts b/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.ts index 3e9cd16150c8..a5dbe27008be 100644 --- a/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.ts +++ b/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.ts @@ -34,7 +34,7 @@ import { type Checkpoint, validateCheckpoint } from '@aztec/stdlib/checkpoint'; import { getSlotStartBuildTimestamp } from '@aztec/stdlib/epoch-helpers'; import { Gas } from '@aztec/stdlib/gas'; import { - NoValidTxsError, + InsufficientValidTxsError, type PublicProcessorLimits, type ResolvedSequencerConfig, type WorldStateSynchronizer, @@ -569,7 +569,9 @@ export class CheckpointProposalJob implements Traceable { // Per-block limits derived at startup by computeBlockLimits(), further capped // by remaining checkpoint-level budgets inside CheckpointBuilder before each block is built. - const blockBuilderOptions: PublicProcessorLimits = { + // minValidTxs is passed into the builder so it can reject the block *before* updating state. + const minValidTxs = forceCreate ? 0 : (this.config.minValidTxsPerBlock ?? minTxs); + const blockBuilderOptions: PublicProcessorLimits & { minValidTxs?: number } = { maxTransactions: this.config.maxTxsPerBlock, maxBlockGas: this.config.maxL2BlockGas !== undefined || this.config.maxDABlockGas !== undefined @@ -577,9 +579,12 @@ export class CheckpointProposalJob implements Traceable { : undefined, deadline: buildDeadline, isBuildingProposal: true, + minValidTxs, }; - // Actually build the block by executing txs + // Actually build the block by executing txs. The builder throws InsufficientValidTxsError + // if the number of successfully processed txs is below minValidTxs, ensuring state is not + // updated for blocks that will be discarded. const buildResult = await this.buildSingleBlockWithCheckpointBuilder( checkpointBuilder, pendingTxs, @@ -591,14 +596,16 @@ export class CheckpointProposalJob implements Traceable { // If any txs failed during execution, drop them from the mempool so we don't pick them up again await this.dropFailedTxsFromP2P(buildResult.failedTxs); - // Check if we have created a block with enough txs. If there were invalid txs in the pool, or if execution took - // too long, then we may not get to minTxsPerBlock after executing public functions. - const minValidTxs = this.config.minValidTxsPerBlock ?? minTxs; - const numTxs = buildResult.status === 'no-valid-txs' ? 0 : buildResult.numTxs; - if (buildResult.status === 'no-valid-txs' || (!forceCreate && numTxs < minValidTxs)) { + if (buildResult.status === 'insufficient-valid-txs') { this.log.warn( `Block ${blockNumber} at index ${indexWithinCheckpoint} on slot ${this.slot} has too few valid txs to be proposed`, - { slot: this.slot, blockNumber, numTxs, indexWithinCheckpoint, minValidTxs, buildResult: buildResult.status }, + { + slot: this.slot, + blockNumber, + numTxs: buildResult.processedCount, + indexWithinCheckpoint, + minValidTxs, + }, ); this.eventEmitter.emit('block-build-failed', { reason: `Insufficient valid txs`, slot: this.slot }); this.metrics.recordBlockProposalFailed('insufficient_valid_txs'); @@ -606,7 +613,7 @@ export class CheckpointProposalJob implements Traceable { } // Block creation succeeded, emit stats and metrics - const { block, publicProcessorDuration, usedTxs, blockBuildDuration } = buildResult; + const { block, publicProcessorDuration, usedTxs, blockBuildDuration, numTxs } = buildResult; const blockStats = { eventName: 'l2-block-built', @@ -637,13 +644,13 @@ export class CheckpointProposalJob implements Traceable { } } - /** Uses the checkpoint builder to build a block, catching specific txs */ + /** Uses the checkpoint builder to build a block, catching InsufficientValidTxsError. */ private async buildSingleBlockWithCheckpointBuilder( checkpointBuilder: CheckpointBuilder, pendingTxs: AsyncIterable, blockNumber: BlockNumber, blockTimestamp: bigint, - blockBuilderOptions: PublicProcessorLimits, + blockBuilderOptions: PublicProcessorLimits & { minValidTxs?: number }, ) { try { const workTimer = new Timer(); @@ -651,8 +658,12 @@ export class CheckpointProposalJob implements Traceable { const blockBuildDuration = workTimer.ms(); return { ...result, blockBuildDuration, status: 'success' as const }; } catch (err: unknown) { - if (isErrorClass(err, NoValidTxsError)) { - return { failedTxs: err.failedTxs, status: 'no-valid-txs' as const }; + if (isErrorClass(err, InsufficientValidTxsError)) { + return { + failedTxs: err.failedTxs, + processedCount: err.processedCount, + status: 'insufficient-valid-txs' as const, + }; } throw err; } diff --git a/yarn-project/sequencer-client/src/test/mock_checkpoint_builder.ts b/yarn-project/sequencer-client/src/test/mock_checkpoint_builder.ts index 42d691191ef8..3c737f0968ef 100644 --- a/yarn-project/sequencer-client/src/test/mock_checkpoint_builder.ts +++ b/yarn-project/sequencer-client/src/test/mock_checkpoint_builder.ts @@ -32,7 +32,7 @@ export class MockCheckpointBuilder implements ICheckpointBlockBuilder { public buildBlockCalls: Array<{ blockNumber: BlockNumber; timestamp: bigint; - opts: PublicProcessorLimits; + opts: PublicProcessorLimits & { minValidTxs?: number }; }> = []; /** Track all consumed transaction hashes across buildBlock calls */ public consumedTxHashes: Set = new Set(); @@ -74,7 +74,7 @@ export class MockCheckpointBuilder implements ICheckpointBlockBuilder { pendingTxs: Iterable | AsyncIterable, blockNumber: BlockNumber, timestamp: bigint, - opts: PublicProcessorLimits, + opts: PublicProcessorLimits & { minValidTxs?: number }, ): Promise { this.buildBlockCalls.push({ blockNumber, timestamp, opts }); diff --git a/yarn-project/stdlib/src/interfaces/block-builder.ts b/yarn-project/stdlib/src/interfaces/block-builder.ts index 87ed1444fff5..5a91cb2d5ce3 100644 --- a/yarn-project/stdlib/src/interfaces/block-builder.ts +++ b/yarn-project/stdlib/src/interfaces/block-builder.ts @@ -84,11 +84,15 @@ export const FullNodeBlockBuilderConfigKeys: (keyof FullNodeBlockBuilderConfig)[ 'rollupManaLimit', ] as const; -/** Thrown when no valid transactions are available to include in a block after processing, and this is not the first block in a checkpoint. */ -export class NoValidTxsError extends Error { - constructor(public readonly failedTxs: FailedTx[]) { - super('No valid transactions to include in block'); - this.name = 'NoValidTxsError'; +/** Thrown when the number of successfully processed transactions is below the required minimum. */ +export class InsufficientValidTxsError extends Error { + constructor( + public readonly processedCount: number, + public readonly minRequired: number, + public readonly failedTxs: FailedTx[], + ) { + super(`Insufficient valid txs: got ${processedCount} but need ${minRequired}`); + this.name = 'InsufficientValidTxsError'; } } @@ -103,11 +107,12 @@ export type BuildBlockInCheckpointResult = { /** Interface for building blocks within a checkpoint context. */ export interface ICheckpointBlockBuilder { + /** Builds a single block within this checkpoint. Throws InsufficientValidTxsError if fewer than minValidTxs succeed. */ buildBlock( pendingTxs: Iterable | AsyncIterable, blockNumber: BlockNumber, timestamp: bigint, - opts: PublicProcessorLimits, + opts: PublicProcessorLimits & { minValidTxs?: number }, ): Promise; } diff --git a/yarn-project/validator-client/src/checkpoint_builder.test.ts b/yarn-project/validator-client/src/checkpoint_builder.test.ts index 0d9cf8ae6959..7fe58c3f315d 100644 --- a/yarn-project/validator-client/src/checkpoint_builder.test.ts +++ b/yarn-project/validator-client/src/checkpoint_builder.test.ts @@ -17,11 +17,12 @@ import type { ContractDataSource } from '@aztec/stdlib/contract'; import { Gas, GasFees } from '@aztec/stdlib/gas'; import { type FullNodeBlockBuilderConfig, + InsufficientValidTxsError, type MerkleTreeWriteOperations, - NoValidTxsError, type PublicProcessorLimits, type PublicProcessorValidator, } from '@aztec/stdlib/interfaces/server'; +import { TxHash } from '@aztec/stdlib/tx'; import type { CheckpointGlobalVariables, GlobalVariables, ProcessedTx, Tx } from '@aztec/stdlib/tx'; import type { TelemetryClient } from '@aztec/telemetry-client'; @@ -138,9 +139,7 @@ describe('CheckpointBuilder', () => { expect(lightweightCheckpointBuilder.addBlock).toHaveBeenCalled(); }); - it('allows building an empty first block in a checkpoint', async () => { - lightweightCheckpointBuilder.getBlockCount.mockReturnValue(0); - + it('allows building an empty block when minValidTxs is 0', async () => { const expectedBlock = await L2Block.random(blockNumber, { txsPerBlock: 0 }); lightweightCheckpointBuilder.addBlock.mockResolvedValue(expectedBlock); @@ -153,16 +152,14 @@ describe('CheckpointBuilder', () => { [], // debugLogs ]); - const result = await checkpointBuilder.buildBlock([], blockNumber, 1000n); + const result = await checkpointBuilder.buildBlock([], blockNumber, 1000n, { minValidTxs: 0 }); expect(result.block).toBe(expectedBlock); expect(result.numTxs).toBe(0); expect(lightweightCheckpointBuilder.addBlock).toHaveBeenCalled(); }); - it('throws NoValidTxsError when no valid transactions and not first block in checkpoint', async () => { - lightweightCheckpointBuilder.getBlockCount.mockReturnValue(1); - + it('throws InsufficientValidTxsError when fewer txs than minValidTxs', async () => { const failedTx = { tx: { txHash: Fr.random() } as unknown as Tx, error: new Error('tx failed') }; processor.process.mockResolvedValue([ [], // processedTxs - empty @@ -172,10 +169,46 @@ describe('CheckpointBuilder', () => { [], // debugLogs ]); - await expect(checkpointBuilder.buildBlock([], blockNumber, 1000n)).rejects.toThrow(NoValidTxsError); + await expect(checkpointBuilder.buildBlock([], blockNumber, 1000n, { minValidTxs: 1 })).rejects.toThrow( + InsufficientValidTxsError, + ); + + expect(lightweightCheckpointBuilder.addBlock).not.toHaveBeenCalled(); + }); + + it('does not update state when some txs succeed but below minValidTxs', async () => { + const processedTx = mock(); + processedTx.hash = TxHash.random(); + const failedTx = { tx: { txHash: Fr.random() } as unknown as Tx, error: new Error('tx failed') }; + processor.process.mockResolvedValue([ + [processedTx], // processedTxs - 1 succeeded + [failedTx], // failedTxs - 1 failed + [], // usedTxs + [], // returnValues + [], // debugLogs + ]); + + const err = await checkpointBuilder + .buildBlock([], blockNumber, 1000n, { minValidTxs: 2 }) + .catch((e: unknown) => e); + expect(err).toBeInstanceOf(InsufficientValidTxsError); + expect((err as InsufficientValidTxsError).processedCount).toBe(1); + expect((err as InsufficientValidTxsError).minRequired).toBe(2); expect(lightweightCheckpointBuilder.addBlock).not.toHaveBeenCalled(); }); + + it('defaults to minValidTxs=0 when not specified, allowing empty blocks', async () => { + const expectedBlock = await L2Block.random(blockNumber, { txsPerBlock: 0 }); + lightweightCheckpointBuilder.addBlock.mockResolvedValue({ block: expectedBlock, timings: {} }); + + processor.process.mockResolvedValue([[], [], [], [], []]); + + const result = await checkpointBuilder.buildBlock([], blockNumber, 1000n); + + expect(result.numTxs).toBe(0); + expect(lightweightCheckpointBuilder.addBlock).toHaveBeenCalled(); + }); }); describe('capLimitsByCheckpointBudgets', () => { diff --git a/yarn-project/validator-client/src/checkpoint_builder.ts b/yarn-project/validator-client/src/checkpoint_builder.ts index a80b3d2697b1..fc45ce1d76b1 100644 --- a/yarn-project/validator-client/src/checkpoint_builder.ts +++ b/yarn-project/validator-client/src/checkpoint_builder.ts @@ -25,8 +25,8 @@ import { FullNodeBlockBuilderConfigKeys, type ICheckpointBlockBuilder, type ICheckpointsBuilder, + InsufficientValidTxsError, type MerkleTreeWriteOperations, - NoValidTxsError, type PublicProcessorLimits, type WorldStateSynchronizer, } from '@aztec/stdlib/interfaces/server'; @@ -73,7 +73,7 @@ export class CheckpointBuilder implements ICheckpointBlockBuilder { pendingTxs: Iterable | AsyncIterable, blockNumber: BlockNumber, timestamp: bigint, - opts: PublicProcessorLimits & { expectedEndState?: StateReference } = {}, + opts: PublicProcessorLimits & { expectedEndState?: StateReference; minValidTxs?: number } = {}, ): Promise { const slot = this.checkpointBuilder.constants.slotNumber; @@ -107,10 +107,10 @@ export class CheckpointBuilder implements ICheckpointBlockBuilder { processor.process(pendingTxs, cappedOpts, validator), ); - // Throw if we didn't collect a single valid tx and we're not allowed to build empty blocks - // (only the first block in a checkpoint can be empty) - if (processedTxs.length === 0 && this.checkpointBuilder.getBlockCount() > 0) { - throw new NoValidTxsError(failedTxs); + // Throw before updating state if we don't have enough valid txs + const minValidTxs = opts.minValidTxs ?? 0; + if (processedTxs.length < minValidTxs) { + throw new InsufficientValidTxsError(processedTxs.length, minValidTxs, failedTxs); } // Add block to checkpoint From 2df54958990ed94a07b047a5ee2e8f31507f6eae Mon Sep 17 00:00:00 2001 From: Phil Windle Date: Thu, 12 Mar 2026 19:39:52 +0000 Subject: [PATCH 09/17] cherry-pick: Use an additional world state fork checkpoint when building blocks (with conflicts) --- .../src/checkpoint_builder.ts | 51 ++++++++++++++++--- 1 file changed, 44 insertions(+), 7 deletions(-) diff --git a/yarn-project/validator-client/src/checkpoint_builder.ts b/yarn-project/validator-client/src/checkpoint_builder.ts index fc45ce1d76b1..c2ecdaf0f2be 100644 --- a/yarn-project/validator-client/src/checkpoint_builder.ts +++ b/yarn-project/validator-client/src/checkpoint_builder.ts @@ -34,6 +34,7 @@ import { type DebugLogStore, NullDebugLogStore } from '@aztec/stdlib/logs'; import { MerkleTreeId } from '@aztec/stdlib/trees'; import { type CheckpointGlobalVariables, GlobalVariables, StateReference, Tx } from '@aztec/stdlib/tx'; import { type TelemetryClient, getTelemetryClient } from '@aztec/telemetry-client'; +import { ForkCheckpoint } from '@aztec/world-state'; // Re-export for backward compatibility export type { BuildBlockInCheckpointResult } from '@aztec/stdlib/interfaces/server'; @@ -103,15 +104,49 @@ export class CheckpointBuilder implements ICheckpointBlockBuilder { ...this.capLimitsByCheckpointBudgets(opts), }; - const [publicProcessorDuration, [processedTxs, failedTxs, usedTxs]] = await elapsed(() => - processor.process(pendingTxs, cappedOpts, validator), - ); + // We execute all merkle tree operations on a world state fork checkpoint + // This enables us to discard all modifications in the event that we fail to successfully process sufficient transactions + const forkCheckpoint = await ForkCheckpoint.new(this.fork); - // Throw before updating state if we don't have enough valid txs - const minValidTxs = opts.minValidTxs ?? 0; - if (processedTxs.length < minValidTxs) { - throw new InsufficientValidTxsError(processedTxs.length, minValidTxs, failedTxs); + try { + const [publicProcessorDuration, [processedTxs, failedTxs, usedTxs]] = await elapsed(() => + processor.process(pendingTxs, cappedOpts, validator), + ); + // Throw before updating state if we don't have enough valid txs + const minValidTxs = opts.minValidTxs ?? 0; + if (processedTxs.length < minValidTxs) { + throw new InsufficientValidTxsError(processedTxs.length, minValidTxs, failedTxs); + } + + // Commit any changes made to the fork for this block + // Done here so the call to CheckpointBuilder.addBlock has up to date state + await forkCheckpoint.commit(); + + // Add block to checkpoint + const { block } = await this.checkpointBuilder.addBlock(globalVariables, processedTxs, { + expectedEndState: opts.expectedEndState, + }); + + this.log.debug('Built block within checkpoint', { + header: block.header.toInspect(), + processedTxs: processedTxs.map(tx => tx.hash.toString()), + failedTxs: failedTxs.map(tx => tx.tx.txHash.toString()), + }); + + return { + block, + publicProcessorDuration, + numTxs: processedTxs.length, + failedTxs, + usedTxs, + }; + } catch (err) { + // If we reached the point of committing the checkpoint, this does nothing + // Otherwise it reverts any changes made to the fork for this failed block + await forkCheckpoint.revert(); + throw err; } +<<<<<<< HEAD // Add block to checkpoint const block = await this.checkpointBuilder.addBlock(globalVariables, processedTxs, { @@ -131,6 +166,8 @@ export class CheckpointBuilder implements ICheckpointBlockBuilder { failedTxs, usedTxs, }; +======= +>>>>>>> 3ccb6868a8 (Use an additional world state fork checkpoint when building blocks) } /** Completes the checkpoint and returns it. */ From fb8646a05a09cf51143253326cd49b2dfab8089b Mon Sep 17 00:00:00 2001 From: AztecBot Date: Mon, 16 Mar 2026 15:42:53 +0000 Subject: [PATCH 10/17] fix: resolve cherry-pick conflicts --- .../src/checkpoint_builder.ts | 22 ------------------- 1 file changed, 22 deletions(-) diff --git a/yarn-project/validator-client/src/checkpoint_builder.ts b/yarn-project/validator-client/src/checkpoint_builder.ts index c2ecdaf0f2be..d5fc850b316f 100644 --- a/yarn-project/validator-client/src/checkpoint_builder.ts +++ b/yarn-project/validator-client/src/checkpoint_builder.ts @@ -146,28 +146,6 @@ export class CheckpointBuilder implements ICheckpointBlockBuilder { await forkCheckpoint.revert(); throw err; } -<<<<<<< HEAD - - // Add block to checkpoint - const block = await this.checkpointBuilder.addBlock(globalVariables, processedTxs, { - expectedEndState: opts.expectedEndState, - }); - - this.log.debug('Built block within checkpoint', { - header: block.header.toInspect(), - processedTxs: processedTxs.map(tx => tx.hash.toString()), - failedTxs: failedTxs.map(tx => tx.tx.txHash.toString()), - }); - - return { - block, - publicProcessorDuration, - numTxs: processedTxs.length, - failedTxs, - usedTxs, - }; -======= ->>>>>>> 3ccb6868a8 (Use an additional world state fork checkpoint when building blocks) } /** Completes the checkpoint and returns it. */ From fab10d0ca6014ffb00b070b44aa3670187c611ea Mon Sep 17 00:00:00 2001 From: Phil Windle Date: Thu, 12 Mar 2026 20:22:41 +0000 Subject: [PATCH 11/17] Comment --- yarn-project/validator-client/src/checkpoint_builder.ts | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/yarn-project/validator-client/src/checkpoint_builder.ts b/yarn-project/validator-client/src/checkpoint_builder.ts index d5fc850b316f..a2a94340ed99 100644 --- a/yarn-project/validator-client/src/checkpoint_builder.ts +++ b/yarn-project/validator-client/src/checkpoint_builder.ts @@ -118,8 +118,7 @@ export class CheckpointBuilder implements ICheckpointBlockBuilder { throw new InsufficientValidTxsError(processedTxs.length, minValidTxs, failedTxs); } - // Commit any changes made to the fork for this block - // Done here so the call to CheckpointBuilder.addBlock has up to date state + // Commit the fork checkpoint await forkCheckpoint.commit(); // Add block to checkpoint From 0d75a4cff3d5533b13da9453e2998316ff35cdbb Mon Sep 17 00:00:00 2001 From: AztecBot Date: Mon, 16 Mar 2026 15:52:49 +0000 Subject: [PATCH 12/17] fix: adapt backported code to v4 API (addBlock returns L2Block directly) --- yarn-project/validator-client/src/checkpoint_builder.test.ts | 2 +- yarn-project/validator-client/src/checkpoint_builder.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/yarn-project/validator-client/src/checkpoint_builder.test.ts b/yarn-project/validator-client/src/checkpoint_builder.test.ts index 7fe58c3f315d..cbc6fe3b43f5 100644 --- a/yarn-project/validator-client/src/checkpoint_builder.test.ts +++ b/yarn-project/validator-client/src/checkpoint_builder.test.ts @@ -200,7 +200,7 @@ describe('CheckpointBuilder', () => { it('defaults to minValidTxs=0 when not specified, allowing empty blocks', async () => { const expectedBlock = await L2Block.random(blockNumber, { txsPerBlock: 0 }); - lightweightCheckpointBuilder.addBlock.mockResolvedValue({ block: expectedBlock, timings: {} }); + lightweightCheckpointBuilder.addBlock.mockResolvedValue(expectedBlock); processor.process.mockResolvedValue([[], [], [], [], []]); diff --git a/yarn-project/validator-client/src/checkpoint_builder.ts b/yarn-project/validator-client/src/checkpoint_builder.ts index a2a94340ed99..b65a9127955f 100644 --- a/yarn-project/validator-client/src/checkpoint_builder.ts +++ b/yarn-project/validator-client/src/checkpoint_builder.ts @@ -122,7 +122,7 @@ export class CheckpointBuilder implements ICheckpointBlockBuilder { await forkCheckpoint.commit(); // Add block to checkpoint - const { block } = await this.checkpointBuilder.addBlock(globalVariables, processedTxs, { + const block = await this.checkpointBuilder.addBlock(globalVariables, processedTxs, { expectedEndState: opts.expectedEndState, }); From 735bc240ad6b9cf1a7d3f39b0aa1e183643949e1 Mon Sep 17 00:00:00 2001 From: Phil Windle Date: Mon, 16 Mar 2026 16:32:14 +0000 Subject: [PATCH 13/17] Fix msgpack serialisation --- .../nodejs_module/world_state/world_state_message.hpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/barretenberg/cpp/src/barretenberg/nodejs_module/world_state/world_state_message.hpp b/barretenberg/cpp/src/barretenberg/nodejs_module/world_state/world_state_message.hpp index 7c19e6010233..47b06c15f2bc 100644 --- a/barretenberg/cpp/src/barretenberg/nodejs_module/world_state/world_state_message.hpp +++ b/barretenberg/cpp/src/barretenberg/nodejs_module/world_state/world_state_message.hpp @@ -91,12 +91,12 @@ struct ForkIdOnlyRequest { struct ForkIdWithDepthRequest { uint64_t forkId; uint32_t depth; - SERIALIZATION_FIELDS(forkId, depth); + MSGPACK_FIELDS(forkId, depth); }; struct CheckpointDepthResponse { uint32_t depth; - SERIALIZATION_FIELDS(depth); + MSGPACK_FIELDS(depth); }; struct TreeIdAndRevisionRequest { From ae3143e7898397e520ca407bbb514d9c5e85c914 Mon Sep 17 00:00:00 2001 From: Santiago Palladino Date: Mon, 16 Mar 2026 15:14:18 -0300 Subject: [PATCH 14/17] fix(p2p): fall back to maxTxsPerCheckpoint for per-block tx validation (#21605) ## Motivation When `VALIDATOR_MAX_TX_PER_BLOCK` is not set but `VALIDATOR_MAX_TX_PER_CHECKPOINT` is, the gossip-level proposal validator enforces no per-block transaction limit at all. A single block can't have more transactions than the entire checkpoint allows, so the checkpoint limit is a valid upper bound for per-block validation. ## Approach Use `validateMaxTxsPerCheckpoint` as a fallback when `validateMaxTxsPerBlock` is not set in the proposal validator construction. This applies at both construction sites: the P2P libp2p service (gossip validation) and the validator-client factory (block proposal handler). ## Changes - **p2p**: Added `validateMaxTxsPerCheckpoint` to `P2PConfig` interface and config mappings (reads from `VALIDATOR_MAX_TX_PER_CHECKPOINT` env var) - **p2p (libp2p_service)**: Use `validateMaxTxsPerBlock ?? validateMaxTxsPerCheckpoint` when constructing proposal validators - **validator-client (factory)**: Same fallback when constructing the `BlockProposalValidator` Co-authored-by: Claude Opus 4.6 (1M context) --- yarn-project/p2p/src/config.ts | 9 +++++++++ yarn-project/p2p/src/services/libp2p/libp2p_service.ts | 2 +- yarn-project/validator-client/src/factory.ts | 2 +- 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/yarn-project/p2p/src/config.ts b/yarn-project/p2p/src/config.ts index a6bf73c00280..c8d238e6d2a4 100644 --- a/yarn-project/p2p/src/config.ts +++ b/yarn-project/p2p/src/config.ts @@ -43,6 +43,9 @@ export interface P2PConfig /** Maximum transactions per block for validation. Overrides maxTxsPerBlock for gossip validation when set. */ validateMaxTxsPerBlock?: number; + /** Maximum transactions per checkpoint for validation. Used as fallback for maxTxsPerBlock when that is not set. */ + validateMaxTxsPerCheckpoint?: number; + /** Maximum L2 gas per block for validation. When set, txs exceeding this limit are rejected. */ validateMaxL2BlockGas?: number; @@ -217,6 +220,12 @@ export const p2pConfigMappings: ConfigMappingsType = { 'Maximum transactions per block for validation. Overrides maxTxsPerBlock for gossip validation when set.', parseEnv: (val: string) => (val ? parseInt(val, 10) : undefined), }, + validateMaxTxsPerCheckpoint: { + env: 'VALIDATOR_MAX_TX_PER_CHECKPOINT', + description: + 'Maximum transactions per checkpoint for validation. Used as fallback for maxTxsPerBlock when that is not set.', + parseEnv: (val: string) => (val ? parseInt(val, 10) : undefined), + }, validateMaxL2BlockGas: { env: 'VALIDATOR_MAX_L2_BLOCK_GAS', description: 'Maximum L2 gas per block for validation. When set, txs exceeding this limit are rejected.', diff --git a/yarn-project/p2p/src/services/libp2p/libp2p_service.ts b/yarn-project/p2p/src/services/libp2p/libp2p_service.ts index 061263fb20c7..4f8745d5cb86 100644 --- a/yarn-project/p2p/src/services/libp2p/libp2p_service.ts +++ b/yarn-project/p2p/src/services/libp2p/libp2p_service.ts @@ -226,7 +226,7 @@ export class LibP2PService extends WithTracer implements P2PService { const proposalValidatorOpts = { txsPermitted: !config.disableTransactions, - maxTxsPerBlock: config.validateMaxTxsPerBlock, + maxTxsPerBlock: config.validateMaxTxsPerBlock ?? config.validateMaxTxsPerCheckpoint, }; this.blockProposalValidator = new BlockProposalValidator(epochCache, proposalValidatorOpts); this.checkpointProposalValidator = new CheckpointProposalValidator(epochCache, proposalValidatorOpts); diff --git a/yarn-project/validator-client/src/factory.ts b/yarn-project/validator-client/src/factory.ts index b7645d48c485..6c706c5dc855 100644 --- a/yarn-project/validator-client/src/factory.ts +++ b/yarn-project/validator-client/src/factory.ts @@ -29,7 +29,7 @@ export function createBlockProposalHandler( const metrics = new ValidatorMetrics(deps.telemetry); const blockProposalValidator = new BlockProposalValidator(deps.epochCache, { txsPermitted: !config.disableTransactions, - maxTxsPerBlock: config.validateMaxTxsPerBlock, + maxTxsPerBlock: config.validateMaxTxsPerBlock ?? config.validateMaxTxsPerCheckpoint, }); return new BlockProposalHandler( deps.checkpointsBuilder, From ecc6cdcd58527e0240e85f1b5e652274a9004caf Mon Sep 17 00:00:00 2001 From: Aztec Bot <49558828+AztecBot@users.noreply.github.com> Date: Mon, 16 Mar 2026 14:18:42 -0400 Subject: [PATCH 15/17] chore: merge v4 into backport-to-v4-staging (#21618) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Summary - Fast-forward merge of 51 commits from `v4` into `backport-to-v4-staging` - No conflicts — clean fast-forward merge - 214 files changed across archiver, p2p, sequencer, prover, stdlib, spartan, and more ClaudeBox log: https://claudebox.work/s/4542813f964f2419?run=1 Co-authored-by: Santiago Palladino Co-authored-by: Claude Opus 4.6 (1M context) --- yarn-project/archiver/src/store/block_store.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/yarn-project/archiver/src/store/block_store.ts b/yarn-project/archiver/src/store/block_store.ts index 9b2ee842f095..7da20b7cedeb 100644 --- a/yarn-project/archiver/src/store/block_store.ts +++ b/yarn-project/archiver/src/store/block_store.ts @@ -130,14 +130,14 @@ export class BlockStore { /** * Computes the finalized block number based on the proven block number. - * A block is considered finalized when it's 2 epochs behind the proven block. + * We approximate finalization as 2 epochs worth of checkpoints behind the proven block. + * Each checkpoint is assumed to contain 4 blocks, so the lookback is epochDuration * 2 * 4 blocks. * TODO(#13569): Compute proper finalized block number based on L1 finalized block. - * TODO(palla/mbps): Even the provisional computation is wrong, since it should subtract checkpoints, not blocks * @returns The finalized block number. */ async getFinalizedL2BlockNumber(): Promise { const provenBlockNumber = await this.getProvenBlockNumber(); - return BlockNumber(Math.max(provenBlockNumber - this.l1Constants.epochDuration * 2, 0)); + return BlockNumber(Math.max(provenBlockNumber - this.l1Constants.epochDuration * 2 * 4, 0)); } /** From 619b541c69baa38b0b578849a220c9006959b6e1 Mon Sep 17 00:00:00 2001 From: ludamad Date: Mon, 16 Mar 2026 17:20:50 -0400 Subject: [PATCH 16/17] fix(revert): avm sim uses event loop again (#21138) (#21630) Reverts #21138 on v4. ThreadedAsyncOperation has a use-after-free that causes SIGBUS on macOS and silent memory corruption on Linux. Restoring AsyncOperation (libuv pool) with the original deadlock-prevention semaphore (UV_THREADPOOL_SIZE / 2) until a proper fix lands on next (#21625). [Post mortem](https://gist.github.com/ludamad/443afe321853389a08693c4ff73676f7) --- .../avm_simulate/avm_simulate_napi.cpp | 11 +-- .../nodejs_module/util/async_op.hpp | 75 ------------------- yarn-project/native/src/native_module.ts | 61 +++++++-------- 3 files changed, 34 insertions(+), 113 deletions(-) diff --git a/barretenberg/cpp/src/barretenberg/nodejs_module/avm_simulate/avm_simulate_napi.cpp b/barretenberg/cpp/src/barretenberg/nodejs_module/avm_simulate/avm_simulate_napi.cpp index 01c19f45e6b5..283f9488556e 100644 --- a/barretenberg/cpp/src/barretenberg/nodejs_module/avm_simulate/avm_simulate_napi.cpp +++ b/barretenberg/cpp/src/barretenberg/nodejs_module/avm_simulate/avm_simulate_napi.cpp @@ -281,9 +281,8 @@ Napi::Value AvmSimulateNapi::simulate(const Napi::CallbackInfo& cb_info) **********************************************************/ auto deferred = std::make_shared(env); - // Create threaded operation that runs on a dedicated std::thread (not libuv pool). - // This prevents libuv thread pool exhaustion when callbacks need libuv threads for I/O. - auto* op = new ThreadedAsyncOperation( + // Create async operation that will run on a worker thread + auto* op = new AsyncOperation( env, deferred, [data, tsfns, logger_tsfn, ws_ptr, cancellation_token](msgpack::sbuffer& result_buffer) { // Collect all thread-safe functions including logger for cleanup auto all_tsfns = tsfns.to_vector(); @@ -327,6 +326,7 @@ Napi::Value AvmSimulateNapi::simulate(const Napi::CallbackInfo& cb_info) } }); + // Napi is now responsible for destroying this object op->Queue(); return deferred->Promise(); @@ -368,8 +368,8 @@ Napi::Value AvmSimulateNapi::simulateWithHintedDbs(const Napi::CallbackInfo& cb_ // Create a deferred promise auto deferred = std::make_shared(env); - // Create threaded operation that runs on a dedicated std::thread (not libuv pool) - auto* op = new ThreadedAsyncOperation(env, deferred, [data](msgpack::sbuffer& result_buffer) { + // Create async operation that will run on a worker thread + auto* op = new AsyncOperation(env, deferred, [data](msgpack::sbuffer& result_buffer) { try { // Deserialize inputs from msgpack avm2::AvmProvingInputs inputs; @@ -393,6 +393,7 @@ Napi::Value AvmSimulateNapi::simulateWithHintedDbs(const Napi::CallbackInfo& cb_ } }); + // Napi is now responsible for destroying this object op->Queue(); return deferred->Promise(); diff --git a/barretenberg/cpp/src/barretenberg/nodejs_module/util/async_op.hpp b/barretenberg/cpp/src/barretenberg/nodejs_module/util/async_op.hpp index 13a933cd5a81..3e29d08b5f6b 100644 --- a/barretenberg/cpp/src/barretenberg/nodejs_module/util/async_op.hpp +++ b/barretenberg/cpp/src/barretenberg/nodejs_module/util/async_op.hpp @@ -3,7 +3,6 @@ #include "barretenberg/serialize/msgpack_impl.hpp" #include #include -#include #include namespace bb::nodejs { @@ -66,78 +65,4 @@ class AsyncOperation : public Napi::AsyncWorker { msgpack::sbuffer _result; }; -/** - * @brief Runs work on a dedicated std::thread instead of the libuv thread pool. - * - * Unlike AsyncOperation (which uses Napi::AsyncWorker and occupies a libuv thread), - * this class spawns a new OS thread for each operation. This prevents AVM simulations - * from exhausting the libuv thread pool, which would deadlock when C++ callbacks need - * to invoke JS functions that themselves require libuv threads (e.g., LMDB reads). - * - * The completion callback (resolve/reject) is posted back to the JS main thread via - * a Napi::ThreadSafeFunction, so the event loop returns immediately after launch - * and is woken up only when the work is done. - * - * Usage: `auto* op = new ThreadedAsyncOperation(env, deferred, fn); op->Queue();` - * The object self-destructs after resolving/rejecting the promise. - */ -class ThreadedAsyncOperation { - public: - ThreadedAsyncOperation(Napi::Env env, std::shared_ptr deferred, async_fn fn) - : _fn(std::move(fn)) - , _deferred(std::move(deferred)) - { - // Create a no-op JS function as the TSFN target — we use the native callback form of BlockingCall - // to resolve/reject the promise, so the JS function is never actually called directly. - auto dummy = Napi::Function::New(env, [](const Napi::CallbackInfo&) {}); - _completion_tsfn = Napi::ThreadSafeFunction::New(env, dummy, "ThreadedAsyncOpComplete", 0, 1); - } - - ThreadedAsyncOperation(const ThreadedAsyncOperation&) = delete; - ThreadedAsyncOperation& operator=(const ThreadedAsyncOperation&) = delete; - ThreadedAsyncOperation(ThreadedAsyncOperation&&) = delete; - ThreadedAsyncOperation& operator=(ThreadedAsyncOperation&&) = delete; - - ~ThreadedAsyncOperation() = default; - - void Queue() - { - std::thread([this]() { - try { - _fn(_result); - _success = true; - } catch (const std::exception& e) { - _error = e.what(); - _success = false; - } catch (...) { - _error = "Unknown exception occurred during threaded async operation"; - _success = false; - } - - // Post completion back to the JS main thread - _completion_tsfn.BlockingCall( - this, [](Napi::Env env, Napi::Function /*js_callback*/, ThreadedAsyncOperation* op) { - if (op->_success) { - auto buf = Napi::Buffer::Copy(env, op->_result.data(), op->_result.size()); - op->_deferred->Resolve(buf); - } else { - auto error = Napi::Error::New(env, op->_error); - op->_deferred->Reject(error.Value()); - } - // Release the TSFN and self-destruct - op->_completion_tsfn.Release(); - delete op; - }); - }).detach(); - } - - private: - async_fn _fn; - std::shared_ptr _deferred; - Napi::ThreadSafeFunction _completion_tsfn; - msgpack::sbuffer _result; - bool _success = false; - std::string _error; -}; - } // namespace bb::nodejs diff --git a/yarn-project/native/src/native_module.ts b/yarn-project/native/src/native_module.ts index 6966e33193d7..0319bf8d894b 100644 --- a/yarn-project/native/src/native_module.ts +++ b/yarn-project/native/src/native_module.ts @@ -128,33 +128,23 @@ export function cancelSimulation(token: CancellationToken): void { } /** - * Maximum number of concurrent AVM simulations. Each simulation spawns a dedicated OS thread, - * so this controls resource usage. Defaults to 4. Set to 0 for unlimited. + * Concurrency limiting for C++ AVM simulation to prevent libuv thread pool exhaustion. + * + * The C++ simulator uses NAPI BlockingCall to callback to TypeScript for contract data. + * This blocks the libuv thread while waiting for the callback to complete. If all libuv + * threads are blocked waiting for callbacks, no threads remain to service those callbacks, + * causing deadlock. + * + * We limit concurrent simulations to UV_THREADPOOL_SIZE / 2 to ensure threads remain + * available for callback processing. */ -export const AVM_MAX_CONCURRENT_SIMULATIONS = parseInt(process.env.AVM_MAX_CONCURRENT_SIMULATIONS ?? '4', 10); -const avmSimulationSemaphore = - AVM_MAX_CONCURRENT_SIMULATIONS > 0 ? new Semaphore(AVM_MAX_CONCURRENT_SIMULATIONS) : null; - -async function withAvmConcurrencyLimit(fn: () => Promise): Promise { - if (!avmSimulationSemaphore) { - return fn(); - } - await avmSimulationSemaphore.acquire(); - try { - return await fn(); - } finally { - avmSimulationSemaphore.release(); - } -} +const UV_THREADPOOL_SIZE = parseInt(process.env.UV_THREADPOOL_SIZE ?? '4', 10); +export const AVM_MAX_CONCURRENT_SIMULATIONS = Math.max(1, Math.floor(UV_THREADPOOL_SIZE / 2)); +const avmSimulationSemaphore = new Semaphore(AVM_MAX_CONCURRENT_SIMULATIONS); /** * AVM simulation function that takes serialized inputs and a contract provider. * The contract provider enables C++ to callback to TypeScript for contract data during simulation. - * - * Simulations run on dedicated std::threads (not the libuv thread pool), so there is no risk - * of libuv thread pool exhaustion or deadlock from C++ BlockingCall callbacks. - * Concurrency is limited by AVM_MAX_CONCURRENT_SIMULATIONS (default 4, 0 = unlimited). - * * @param inputs - Msgpack-serialized AvmFastSimulationInputs buffer * @param contractProvider - Object with callbacks for fetching contract instances and classes * @param worldStateHandle - Native handle to WorldState instance @@ -163,7 +153,7 @@ async function withAvmConcurrencyLimit(fn: () => Promise): Promise { * @param cancellationToken - Optional token to enable cancellation support * @returns Promise resolving to msgpack-serialized AvmCircuitPublicInputs buffer */ -export function avmSimulate( +export async function avmSimulate( inputs: Buffer, contractProvider: ContractProvider, worldStateHandle: any, @@ -171,30 +161,35 @@ export function avmSimulate( logger?: Logger, cancellationToken?: CancellationToken, ): Promise { - return withAvmConcurrencyLimit(() => - nativeAvmSimulate( + await avmSimulationSemaphore.acquire(); + + try { + return await nativeAvmSimulate( inputs, contractProvider, worldStateHandle, LogLevels.indexOf(logLevel), logger ? (level: LogLevel, msg: string) => logger[level](msg) : null, cancellationToken, - ), - ); + ); + } finally { + avmSimulationSemaphore.release(); + } } /** * AVM simulation function that uses pre-collected hints from TypeScript simulation. * All contract data and merkle tree hints are included in the AvmCircuitInputs, so no runtime * callbacks to TS or WS pointer are needed. - * - * Simulations run on dedicated std::threads (not the libuv thread pool). - * Concurrency is limited by AVM_MAX_CONCURRENT_SIMULATIONS (default 4, 0 = unlimited). - * * @param inputs - Msgpack-serialized AvmCircuitInputs (AvmProvingInputs in C++) buffer * @param logLevel - Log level to control C++ verbosity * @returns Promise resolving to msgpack-serialized simulation results buffer */ -export function avmSimulateWithHintedDbs(inputs: Buffer, logLevel: LogLevel = 'info'): Promise { - return withAvmConcurrencyLimit(() => nativeAvmSimulateWithHintedDbs(inputs, LogLevels.indexOf(logLevel))); +export async function avmSimulateWithHintedDbs(inputs: Buffer, logLevel: LogLevel = 'info'): Promise { + await avmSimulationSemaphore.acquire(); + try { + return await nativeAvmSimulateWithHintedDbs(inputs, LogLevels.indexOf(logLevel)); + } finally { + avmSimulationSemaphore.release(); + } } From bcf725dd72a22c062f7a3c76eedd4656cc062985 Mon Sep 17 00:00:00 2001 From: Aztec Bot <49558828+AztecBot@users.noreply.github.com> Date: Mon, 16 Mar 2026 22:04:02 -0400 Subject: [PATCH 17/17] fix(e2e): remove historic/finalized block checks from epochs_multiple test (#21642) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Summary - Removes the historic/finalized block verification checks from `epochs_multiple.test.ts` - The finalization logic on v4 is incorrect: it subtracts a fixed number of blocks (`epochDuration * 2`) instead of accounting for variable blocks per slot (up to 4 per slot), causing test timeouts - The correct finalization implementation exists on `next` in #21156 but is non-trivial to backport to v4 - Keeps the proven sync check intact — only historic/finalized assertions are removed ## Context See discussion in Slack: the current `getFinalizedL2BlockNumber` uses `provenBlockNumber - epochDuration * 2` which doesn't account for variable blocks per slot. This causes the tx mempool to evict transactions too aggressively and the test to time out waiting for finalization. ## Test plan - CI should pass — the test still verifies epoch proving and proven block sync, just without the finalized block assertions ClaudeBox log: https://claudebox.work/s/a5e9cea005ce4a5a?run=1 --- .../src/e2e_epochs/epochs_multiple.test.ts | 20 ++----------------- 1 file changed, 2 insertions(+), 18 deletions(-) diff --git a/yarn-project/end-to-end/src/e2e_epochs/epochs_multiple.test.ts b/yarn-project/end-to-end/src/e2e_epochs/epochs_multiple.test.ts index 6703786b0f88..ac53842785d2 100644 --- a/yarn-project/end-to-end/src/e2e_epochs/epochs_multiple.test.ts +++ b/yarn-project/end-to-end/src/e2e_epochs/epochs_multiple.test.ts @@ -4,14 +4,12 @@ import { BlockNumber } from '@aztec/foundation/branded-types'; import { jest } from '@jest/globals'; -import type { EndToEndContext } from '../fixtures/utils.js'; -import { EpochsTestContext, WORLD_STATE_CHECKPOINT_HISTORY } from './epochs_test.js'; +import { EpochsTestContext } from './epochs_test.js'; jest.setTimeout(1000 * 60 * 15); // Assumes one block per checkpoint describe('e2e_epochs/epochs_multiple', () => { - let context: EndToEndContext; let rollup: RollupContract; let logger: Logger; @@ -19,7 +17,7 @@ describe('e2e_epochs/epochs_multiple', () => { beforeEach(async () => { test = await EpochsTestContext.setup(); - ({ context, rollup, logger } = test); + ({ rollup, logger } = test); }); afterEach(async () => { @@ -46,20 +44,6 @@ describe('e2e_epochs/epochs_multiple', () => { // Verify the state syncs. Assumes one block per checkpoint. const epochEndBlockNumber = BlockNumber.fromCheckpointNumber(epochEndCheckpointNumber); await test.waitForNodeToSync(epochEndBlockNumber, 'proven'); - await test.verifyHistoricBlock(epochEndBlockNumber, true); - - // Check that finalized blocks are purged from world state - // Right now finalization means a checkpoint is two L2 epochs deep. If this rule changes then this test needs to be updated. - // This test is setup as 1 block per checkpoint - const provenBlockNumber = epochEndBlockNumber; - const finalizedBlockNumber = Math.max(provenBlockNumber - context.config.aztecEpochDuration * 2, 0); - const expectedOldestHistoricBlock = Math.max(finalizedBlockNumber - WORLD_STATE_CHECKPOINT_HISTORY + 1, 1); - const expectedBlockRemoved = expectedOldestHistoricBlock - 1; - await test.waitForNodeToSync(BlockNumber(expectedOldestHistoricBlock), 'historic'); - await test.verifyHistoricBlock(BlockNumber(expectedOldestHistoricBlock), true); - if (expectedBlockRemoved > 0) { - await test.verifyHistoricBlock(BlockNumber(expectedBlockRemoved), false); - } } logger.info('Test Succeeded'); });