diff --git a/bootstrap.sh b/bootstrap.sh index 86c64db5f0ac..db6d2383f7a8 100755 --- a/bootstrap.sh +++ b/bootstrap.sh @@ -307,11 +307,9 @@ function build_and_test { if [ "$finished" == "$make_pid" ]; then make_pid= - if [ -z "${1:-}" ]; then - # TODO: Handle this better to they can be run as part of the Makefile dependency tree. - start_txes - make noir-projects-txe-tests - fi + # TODO: Handle this better so they can be run as part of the Makefile dependency tree. + start_txes + make noir-projects-txe-tests # Signal tests complete, handled by parallel -E STOP. echo STOP >> $test_cmds_file diff --git a/docs/docs-operate/operators/reference/changelog/v4.md b/docs/docs-operate/operators/reference/changelog/v4.md index de6d28118215..42e5dbd12538 100644 --- a/docs/docs-operate/operators/reference/changelog/v4.md +++ b/docs/docs-operate/operators/reference/changelog/v4.md @@ -137,6 +137,18 @@ Transaction submission via RPC now returns structured rejection codes when a tra **Impact**: Improved developer experience — callers can now programmatically handle specific rejection reasons. +### RPC transaction replacement price bump + +Transactions submitted via RPC that clash on nullifiers with existing pool transactions must now pay at least X% more in priority fee to replace them. The same bump applies when the pool is full and the incoming tx needs to evict the lowest-priority tx. P2P gossip behavior is unchanged. + +**Configuration:** + +```bash +P2P_RPC_PRICE_BUMP_PERCENTAGE=10 # default: 10 (percent) +``` + +Set to `0` to disable the percentage-based bump (still requires strictly higher fee). + ## Changed defaults ## Troubleshooting diff --git a/l1-contracts/src/core/slashing/TallySlashingProposer.sol b/l1-contracts/src/core/slashing/TallySlashingProposer.sol index 2b944d0ed78c..7bd31a611f5a 100644 --- a/l1-contracts/src/core/slashing/TallySlashingProposer.sol +++ b/l1-contracts/src/core/slashing/TallySlashingProposer.sol @@ -903,14 +903,6 @@ contract TallySlashingProposer is EIP712 { if (escapeHatchEpochs[epochIndex]) { continue; } - - // Skip validators for epochs without a valid committee (e.g. early epochs - // before the validator set was sampled). Without this check, indexing into - // an empty committee array would revert and block execution of the round. - if (_committees[epochIndex].length != COMMITTEE_SIZE) { - continue; - } - uint256 packedVotes = tallyMatrix[i]; // Skip if no votes for this validator diff --git a/l1-contracts/test/slashing/TallySlashingProposer.t.sol b/l1-contracts/test/slashing/TallySlashingProposer.t.sol index 38330fab1245..f7571ee957d6 100644 --- a/l1-contracts/test/slashing/TallySlashingProposer.t.sol +++ b/l1-contracts/test/slashing/TallySlashingProposer.t.sol @@ -991,9 +991,9 @@ contract TallySlashingProposerTest is TestBase { // Round FIRST_SLASH_ROUND targets epochs 0 and 1, which have no committees // because they precede the validator set sampling lag. // - // Before the fix, casting votes that reach quorum for validator slots in these - // committee-less epochs would cause executeRound (and getTally) to revert with - // an array out-of-bounds access when indexing _committees[epochIndex][validatorIndex]. + // Casting votes that reach quorum for validator slots in these committee-less epochs cause + // executeRound (and getTally) to revert with an array out-of-bounds access when + // indexing _committees[epochIndex][validatorIndex]. _jumpToSlashRound(FIRST_SLASH_ROUND); SlashRound targetRound = slashingProposer.getCurrentRound(); @@ -1020,16 +1020,18 @@ contract TallySlashingProposerTest is TestBase { assertEq(committees[0].length, 0, "Epoch 0 should have empty committee"); assertEq(committees[1].length, 0, "Epoch 1 should have empty committee"); - // getTally should not revert and should return 0 actions + // getTally should revert because of out of bounds + vm.expectRevert(); TallySlashingProposer.SlashAction[] memory actions = slashingProposer.getTally(targetRound, committees); - assertEq(actions.length, 0, "Should have no slash actions for empty committees"); + assertEq(actions.length, 0, "Should have no slash actions since reverted"); - // executeRound should also succeed + // Reverts because of out of bounds + vm.expectRevert(); slashingProposer.executeRound(targetRound, committees); - // Verify round is marked as executed + // Verify round is not marked as executed (bool isExecuted,) = slashingProposer.getRound(targetRound); - assertTrue(isExecuted, "Round should be marked as executed"); + assertFalse(isExecuted, "Round should not be marked as executed"); } function test_revertWhenSlashAmountIsZero() public { diff --git a/noir-projects/aztec-nr/aztec/src/keys/ephemeral.nr b/noir-projects/aztec-nr/aztec/src/keys/ephemeral.nr index e8d59d42be8c..0e24bb42a526 100644 --- a/noir-projects/aztec-nr/aztec/src/keys/ephemeral.nr +++ b/noir-projects/aztec-nr/aztec/src/keys/ephemeral.nr @@ -2,8 +2,9 @@ use std::embedded_curve_ops::{EmbeddedCurveScalar, fixed_base_scalar_mul}; use crate::protocol::{point::Point, scalar::Scalar}; -use crate::oracle::random::random; +use crate::{oracle::random::random, utils::point::get_sign_of_point}; +/// Generates a random ephemeral key pair. pub fn generate_ephemeral_key_pair() -> (Scalar, Point) { // @todo Need to draw randomness from the full domain of Fq not only Fr @@ -20,3 +21,56 @@ pub fn generate_ephemeral_key_pair() -> (Scalar, Point) { (eph_sk, eph_pk) } + +/// Generates a random ephemeral key pair with a positive y-coordinate. +/// +/// Unlike [`generate_ephemeral_key_pair`], the y-coordinate of the public key is guaranteed to be a positive value +/// (i.e. [`crate::utils::point::get_sign_of_point`] will return `true`). +/// +/// This is useful as it means it is possible to just broadcast the x-coordinate as a single `Field` and then +/// reconstruct the original public key using [`crate::utils::point::point_from_x_coord_and_sign`] with `sign: true`. +pub fn generate_positive_ephemeral_key_pair() -> (Scalar, Point) { + // Safety: we use the randomness to preserve the privacy of both the sender and recipient via encryption, so a + // malicious sender could use non-random values to reveal the plaintext. But they already know it themselves + // anyway, and so the recipient already trusts them to not disclose this information. We can therefore assume that + // the sender will cooperate in the random value generation. + let eph_sk = unsafe { generate_secret_key_for_positive_public_key() }; + let eph_pk = fixed_base_scalar_mul(eph_sk); + + assert(get_sign_of_point(eph_pk), "Got an ephemeral public key with a negative y coordinate"); + + (eph_sk, eph_pk) +} + +unconstrained fn generate_secret_key_for_positive_public_key() -> EmbeddedCurveScalar { + let mut sk = std::mem::zeroed(); + + loop { + // We simply produce random secret keys until we find one that has results in a positive public key. About half + // of all public keys fulfill this condition, so this should only take a few iterations at most. + + // @todo Need to draw randomness from the full domain of Fq not only Fr + sk = EmbeddedCurveScalar::from_field(random()); + let pk = fixed_base_scalar_mul(sk); + if get_sign_of_point(pk) { + break; + } + } + + sk +} + +mod test { + use crate::utils::point::get_sign_of_point; + use super::generate_positive_ephemeral_key_pair; + + #[test] + fn generate_positive_ephemeral_key_pair_produces_positive_keys() { + // About half of random points are negative, so testing just a couple gives us high confidence that + // `generate_positive_ephemeral_key_pair` is indeed producing positive ones. + for _ in 0..10 { + let (_, pk) = generate_positive_ephemeral_key_pair(); + assert(get_sign_of_point(pk)); + } + } +} diff --git a/noir-projects/aztec-nr/aztec/src/messages/encoding.nr b/noir-projects/aztec-nr/aztec/src/messages/encoding.nr index d810aac2abf2..dc484086cf8a 100644 --- a/noir-projects/aztec-nr/aztec/src/messages/encoding.nr +++ b/noir-projects/aztec-nr/aztec/src/messages/encoding.nr @@ -9,20 +9,22 @@ use crate::utils::array; // fields, so MESSAGE_CIPHERTEXT_LEN is the size of the message in fields. pub global MESSAGE_CIPHERTEXT_LEN: u32 = PRIVATE_LOG_CIPHERTEXT_LEN; -// TODO(#12750): The global variables below should not be here as they are AES128 specific. ciphertext_length (2) + 14 -// bytes pkcs#7 AES padding. +// TODO(#12750): The global variables below should not be here as they are AES128 specific. +// The header plaintext is 2 bytes (ciphertext length), padded to the 16-byte AES block size by PKCS#7. pub(crate) global HEADER_CIPHERTEXT_SIZE_IN_BYTES: u32 = 16; +// AES PKCS#7 always adds at least one byte of padding. Since each plaintext field is 32 bytes (a multiple of the +// 16-byte AES block size), a full 16-byte padding block is always appended. +pub(crate) global AES128_PKCS7_EXPANSION_IN_BYTES: u32 = 16; pub global EPH_PK_X_SIZE_IN_FIELDS: u32 = 1; -pub global EPH_PK_SIGN_BYTE_SIZE_IN_BYTES: u32 = 1; -// (17 - 1) * 31 - 16 - 1 = 479 Note: We multiply by 31 because ciphertext bytes are stored in fields using +// (15 - 1) * 31 - 16 - 16 = 402. Note: We multiply by 31 because ciphertext bytes are stored in fields using // bytes_to_fields, which packs 31 bytes per field (since a Field is ~254 bits and can safely store 31 whole bytes). -global MESSAGE_PLAINTEXT_SIZE_IN_BYTES: u32 = (MESSAGE_CIPHERTEXT_LEN - EPH_PK_X_SIZE_IN_FIELDS) * 31 +pub(crate) global MESSAGE_PLAINTEXT_SIZE_IN_BYTES: u32 = (MESSAGE_CIPHERTEXT_LEN - EPH_PK_X_SIZE_IN_FIELDS) * 31 - HEADER_CIPHERTEXT_SIZE_IN_BYTES - - EPH_PK_SIGN_BYTE_SIZE_IN_BYTES; + - AES128_PKCS7_EXPANSION_IN_BYTES; // The plaintext bytes represent Field values that were originally serialized using fields_to_bytes, which converts -// each Field to 32 bytes. To convert the plaintext bytes back to fields, we divide by 32. 479 / 32 = 14 +// each Field to 32 bytes. To convert the plaintext bytes back to fields, we divide by 32. 402 / 32 = 12 pub global MESSAGE_PLAINTEXT_LEN: u32 = MESSAGE_PLAINTEXT_SIZE_IN_BYTES / 32; pub global MESSAGE_EXPANDED_METADATA_LEN: u32 = 1; @@ -244,4 +246,27 @@ mod tests { assert_eq(original_msg_type, unpacked_msg_type); assert_eq(original_msg_metadata, unpacked_msg_metadata); } + + #[test] + unconstrained fn encode_decode_max_size_message() { + let msg_type_id: u64 = 42; + let msg_metadata: u64 = 99; + let mut msg_content = [0; MAX_MESSAGE_CONTENT_LEN]; + for i in 0..MAX_MESSAGE_CONTENT_LEN { + msg_content[i] = i as Field; + } + + let encoded = encode_message(msg_type_id, msg_metadata, msg_content); + let (decoded_type_id, decoded_metadata, decoded_content) = decode_message(BoundedVec::from_array(encoded)); + + assert_eq(decoded_type_id, msg_type_id); + assert_eq(decoded_metadata, msg_metadata); + assert_eq(decoded_content, BoundedVec::from_array(msg_content)); + } + + #[test(should_fail_with = "Invalid message content: it must have a length of at most MAX_MESSAGE_CONTENT_LEN")] + fn encode_oversized_message_fails() { + let msg_content = [0; MAX_MESSAGE_CONTENT_LEN + 1]; + let _ = encode_message(0, 0, msg_content); + } } diff --git a/noir-projects/aztec-nr/aztec/src/messages/encryption/aes128.nr b/noir-projects/aztec-nr/aztec/src/messages/encryption/aes128.nr index 97f32377be8f..e4b6a07b1a84 100644 --- a/noir-projects/aztec-nr/aztec/src/messages/encryption/aes128.nr +++ b/noir-projects/aztec-nr/aztec/src/messages/encryption/aes128.nr @@ -7,11 +7,11 @@ use crate::protocol::{ }; use crate::{ - keys::{ecdh_shared_secret::derive_ecdh_shared_secret, ephemeral::generate_ephemeral_key_pair}, + keys::{ecdh_shared_secret::derive_ecdh_shared_secret, ephemeral::generate_positive_ephemeral_key_pair}, messages::{ encoding::{ - EPH_PK_SIGN_BYTE_SIZE_IN_BYTES, EPH_PK_X_SIZE_IN_FIELDS, HEADER_CIPHERTEXT_SIZE_IN_BYTES, - MESSAGE_CIPHERTEXT_LEN, MESSAGE_PLAINTEXT_LEN, + EPH_PK_X_SIZE_IN_FIELDS, HEADER_CIPHERTEXT_SIZE_IN_BYTES, MESSAGE_CIPHERTEXT_LEN, MESSAGE_PLAINTEXT_LEN, + MESSAGE_PLAINTEXT_SIZE_IN_BYTES, }, encryption::message_encryption::MessageEncryption, logs::arithmetic_generics_utils::{ @@ -25,7 +25,7 @@ use crate::{ bytes_to_fields::{bytes_from_fields, bytes_to_fields}, fields_to_bytes::{fields_from_bytes, fields_to_bytes}, }, - point::{get_sign_of_point, point_from_x_coord_and_sign}, + point::point_from_x_coord_and_sign, random::get_random_bytes, }, }; @@ -150,20 +150,102 @@ pub fn derive_aes_symmetric_key_and_iv_from_ecdh_shared_secret_using_poseidon2_u pub struct AES128 {} impl MessageEncryption for AES128 { + + /// AES128-CBC encryption for Aztec protocol messages. + /// + /// ## Overview + /// + /// The plaintext is an array of up to `MESSAGE_PLAINTEXT_LEN` (12) fields. The output is always exactly + /// `MESSAGE_CIPHERTEXT_LEN` (15) fields, regardless of plaintext size. Unused trailing fields are filled with + /// random data so that all encrypted messages are indistinguishable by size. + /// + /// ## PKCS#7 Padding + /// + /// AES operates on 16-byte blocks, so the plaintext must be padded to a multiple of 16. PKCS#7 padding always + /// adds at least 1 byte (so the receiver can always detect and strip it), which means: + /// - 1 B plaintext -> 15 B padding -> 16 B total + /// - 15 B plaintext -> 1 B padding -> 16 B total + /// - 16 B plaintext -> 16 B padding -> 32 B total (full extra block) + /// + /// In general: if the plaintext is already a multiple of 16, a full 16-byte padding block is appended. + /// + /// ## Encryption Steps + /// + /// **1. Body encryption.** The plaintext fields are serialized to bytes (32 bytes per field) and AES-128-CBC + /// encrypted. Since 32 is a multiple of 16, PKCS#7 always adds a full 16-byte padding block (see above): + /// + /// ```text + /// +---------------------------------------------+ + /// | body ct | + /// | PlaintextLen*32 + 16 B | + /// +-------------------------------+--------------+ + /// | encrypted plaintext fields | PKCS#7 (16B) | + /// | (serialized at 32 B each) | | + /// +-------------------------------+--------------+ + /// ``` + /// + /// **2. Header encryption.** The byte length of `body_ct` is stored as a 2-byte big-endian integer. This 2-byte + /// header plaintext is then AES-encrypted; PKCS#7 pads the remaining 14 bytes to fill one 16-byte AES block, + /// producing a 16-byte header ciphertext: + /// + /// ```text + /// +---------------------------+ + /// | header ct | + /// | 16 B | + /// +--------+------------------+ + /// | body ct| PKCS#7 (14B) | + /// | length | | + /// | (2 B) | | + /// +--------+------------------+ + /// ``` + /// + /// ## Wire Format + /// + /// Messages are transmitted as fields, not bytes. A field is ~254 bits and can safely store 31 whole bytes, so + /// we need to pack our byte data into 31-byte chunks. This packing drives the wire format. + /// + /// **Step 1 -- Assemble bytes.** The ciphertexts are laid out in a byte array, padded with random bytes to a + /// multiple of 31 so it divides evenly into fields: + /// + /// ```text + /// +------------+-------------------------+---------+ + /// | header ct | body ct | byte pad| + /// | 16 B | PlaintextLen*32 + 16 B | (random)| + /// +------------+-------------------------+---------+ + /// |<-------- padded to a multiple of 31 B -------->| + /// ``` + /// + /// **Step 2 -- Pack into fields.** The byte array is split into 31-byte chunks, each stored in one field. The + /// ephemeral public key x-coordinate is prepended as its own field. Any remaining fields (up to 15 total) are + /// filled with random data so that all messages are the same size: + /// + /// ```text + /// +----------+-------------------------+-------------------+ + /// | eph_pk.x | message-byte fields | random field pad | + /// | | (packed 31 B per field) | (fills to 15) | + /// +----------+-------------------------+-------------------+ + /// |<---------- MESSAGE_CIPHERTEXT_LEN = 15 fields ------->| + /// ``` + /// + /// ## Key Derivation + /// + /// Two (key, IV) pairs are derived from the ECDH shared secret via Poseidon2 hashing with different domain + /// separators: one pair for the body ciphertext and one for the header ciphertext. fn encrypt( plaintext: [Field; PlaintextLen], recipient: AztecAddress, ) -> [Field; MESSAGE_CIPHERTEXT_LEN] { + std::static_assert( + PlaintextLen <= MESSAGE_PLAINTEXT_LEN, + "Plaintext length exceeds MESSAGE_PLAINTEXT_LEN", + ); + // AES 128 operates on bytes, not fields, so we need to convert the fields to bytes. (This process is then // reversed when processing the message in `process_message_ciphertext`) let plaintext_bytes = fields_to_bytes(plaintext); - // ***************************************************************************** Compute the shared secret - // ***************************************************************************** - - let (eph_sk, eph_pk) = generate_ephemeral_key_pair(); - - let eph_pk_sign_byte: u8 = get_sign_of_point(eph_pk) as u8; + // Derive ECDH shared secret with recipient using a fresh ephemeral keypair. + let (eph_sk, eph_pk) = generate_positive_ephemeral_key_pair(); // (not to be confused with the tagging shared secret) TODO (#17158): Currently we unwrap the Option returned // by derive_ecdh_shared_secret. We need to handle the case where the ephemeral public key is invalid to @@ -189,15 +271,7 @@ impl MessageEncryption for AES128 { ); // TODO: also use this shared secret for deriving note randomness. - // ***************************************************************************** Convert the plaintext into - // whatever format the encryption function expects - // ***************************************************************************** - - // Already done for this strategy: AES expects bytes. - - // ***************************************************************************** Encrypt the plaintext - // ***************************************************************************** - + // AES128-CBC encrypt the plaintext bytes. // It is safe to call the `unsafe` function here, because we know the `shared_secret` was derived using an // AztecAddress (the recipient). See the block comment at the start of this unsafe target function for more // info. @@ -209,22 +283,15 @@ impl MessageEncryption for AES128 { let ciphertext_bytes = aes128_encrypt(plaintext_bytes, body_iv, body_sym_key); - // |full_pt| = |pt_length| + |pt| - // |pt_aes_padding| = 16 - (|full_pt| % 16) - // or... since a % b is the same as a - b * (a // b) (integer division), so: - // |pt_aes_padding| = 16 - (|full_pt| - 16 * (|full_pt| // 16)) - // |ct| = |full_pt| + |pt_aes_padding| - // = |full_pt| + 16 - (|full_pt| - 16 * (|full_pt| // 16)) = 16 + 16 * (|full_pt| // 16) = 16 * (1 + - // |full_pt| // 16) + // Each plaintext field is 32 bytes (a multiple of the 16-byte AES block + // size), so PKCS#7 always appends a full 16-byte padding block: + // |ciphertext| = PlaintextLen*32 + 16 = 16 * (1 + PlaintextLen*32 / 16) std::static_assert( ciphertext_bytes.len() == 16 * (1 + (PlaintextLen * 32) / 16), "unexpected ciphertext length", ); - // ***************************************************************************** Compute the header ciphertext - // ***************************************************************************** - - // Header contains only the length of the ciphertext stored in 2 bytes. + // Encrypt a 2-byte header containing the body ciphertext length. let mut header_plaintext: [u8; 2] = [0 as u8; 2]; let ciphertext_bytes_length = ciphertext_bytes.len(); header_plaintext[0] = (ciphertext_bytes_length >> 8) as u8; @@ -233,16 +300,14 @@ impl MessageEncryption for AES128 { // Note: the aes128_encrypt builtin fn automatically appends bytes to the input, according to pkcs#7; hence why // the output `header_ciphertext_bytes` is 16 bytes larger than the input in this case. let header_ciphertext_bytes = aes128_encrypt(header_plaintext, header_iv, header_sym_key); - // I recall that converting a slice to an array incurs constraints, so I'll check the length this way instead: + // Verify expected header ciphertext size at compile time. std::static_assert( header_ciphertext_bytes.len() == HEADER_CIPHERTEXT_SIZE_IN_BYTES, "unexpected ciphertext header length", ); - // ***************************************************************************** Prepend / append more bytes of - // data to the ciphertext, before converting back to fields. - // ***************************************************************************** - + // Assemble the message byte array: + // [header_ct (16B)] [body_ct] [padding to mult of 31] let mut message_bytes_padding_to_mult_31 = get_arr_of_size__message_bytes_padding__from_PT::(); // Safety: this randomness won't be constrained to be random. It's in the interest of the executor of this fn @@ -256,8 +321,7 @@ impl MessageEncryption for AES128 { "Unexpected error: message_bytes.len() should be divisible by 31, by construction.", ); - message_bytes[0] = eph_pk_sign_byte; - let mut offset = 1; + let mut offset = 0; for i in 0..header_ciphertext_bytes.len() { message_bytes[offset + i] = header_ciphertext_bytes[i]; } @@ -279,23 +343,18 @@ impl MessageEncryption for AES128 { // computation used to obtain the offset computes the expected value (which we _can_ do in a static check), and // then add a cheap runtime check to also validate that the offset matches this. std::static_assert( - 1 + header_ciphertext_bytes.len() + ciphertext_bytes.len() + message_bytes_padding_to_mult_31.len() + header_ciphertext_bytes.len() + ciphertext_bytes.len() + message_bytes_padding_to_mult_31.len() == message_bytes.len(), "unexpected message length", ); assert(offset == message_bytes.len(), "unexpected encrypted message length"); - // ***************************************************************************** Convert bytes back to fields - // ***************************************************************************** - + // Pack message bytes into fields (31 bytes per field) and prepend eph_pk.x. // TODO(#12749): As Mike pointed out, we need to make messages produced by different encryption schemes // indistinguishable from each other and for this reason the output here and in the last for-loop of this // function should cover a full field. let message_bytes_as_fields = bytes_to_fields(message_bytes); - // ***************************************************************************** Prepend / append fields, to - // create the final message ***************************************************************************** - let mut ciphertext: [Field; MESSAGE_CIPHERTEXT_LEN] = [0; MESSAGE_CIPHERTEXT_LEN]; ciphertext[0] = eph_pk.x; @@ -334,12 +393,10 @@ impl MessageEncryption for AES128 { // Convert the ciphertext represented as fields to a byte representation (its original format) let ciphertext_without_eph_pk_x = bytes_from_fields(ciphertext_without_eph_pk_x_fields); - // First byte of the ciphertext represents the ephemeral public key sign - let eph_pk_sign_bool = ciphertext_without_eph_pk_x.get(0) != 0; - - // With the sign and the x-coordinate of the ephemeral public key, we can reconstruct the point. This may fail - // however, as not all x-coordinates are on the curve. In that case, we simply return `Option::none`. - point_from_x_coord_and_sign(eph_pk_x, eph_pk_sign_bool).map(|eph_pk| { + // With the x-coordinate of the ephemeral public key we can reconstruct the point as we know that the + // y-coordinate must be positive. This may fail however, as not all x-coordinates are on the curve. In that + // case, we simply return `Option::none`. + point_from_x_coord_and_sign(eph_pk_x, true).map(|eph_pk| { // Derive shared secret let ciphertext_shared_secret = get_shared_secret(recipient, eph_pk); @@ -351,7 +408,7 @@ impl MessageEncryption for AES128 { let (header_sym_key, header_iv) = pairs[1]; // Extract the header ciphertext - let header_start = EPH_PK_SIGN_BYTE_SIZE_IN_BYTES; // Skip eph_pk_sign byte + let header_start = 0; let header_ciphertext: [u8; HEADER_CIPHERTEXT_SIZE_IN_BYTES] = array::subarray(ciphertext_without_eph_pk_x.storage(), header_start); // We need to convert the array to a BoundedVec because the oracle expects a BoundedVec as it's designed to @@ -368,16 +425,16 @@ impl MessageEncryption for AES128 { // Extract and decrypt main ciphertext let ciphertext_start = header_start + HEADER_CIPHERTEXT_SIZE_IN_BYTES; - let ciphertext_with_padding: [u8; (MESSAGE_CIPHERTEXT_LEN - EPH_PK_X_SIZE_IN_FIELDS) * 31 - HEADER_CIPHERTEXT_SIZE_IN_BYTES - EPH_PK_SIGN_BYTE_SIZE_IN_BYTES] = + let ciphertext_with_padding: [u8; MESSAGE_PLAINTEXT_SIZE_IN_BYTES] = array::subarray(ciphertext_without_eph_pk_x.storage(), ciphertext_start); - let ciphertext: BoundedVec = + let ciphertext: BoundedVec = BoundedVec::from_parts(ciphertext_with_padding, ciphertext_length); // Decrypt main ciphertext and return it let plaintext_bytes = aes128_decrypt_oracle(ciphertext, body_iv, body_sym_key); - // Each field of the original note message was serialized to 32 bytes so we convert the bytes back to - // fields. + // Each field of the original message was serialized to 32 bytes so we convert + // the bytes back to fields. fields_from_bytes(plaintext_bytes) }) } @@ -489,6 +546,48 @@ mod test { let _ = AES128::encrypt([1, 2, 3, 4], invalid_address); } + // Documents the PKCS#7 padding behavior that `encrypt` relies on (see its static_assert). + #[test] + fn pkcs7_padding_always_adds_at_least_one_byte() { + let key = [0 as u8; 16]; + let iv = [0 as u8; 16]; + + // 1 byte input + 15 bytes padding = 16 bytes + assert_eq(std::aes128::aes128_encrypt([0; 1], iv, key).len(), 16); + + // 15 bytes input + 1 byte padding = 16 bytes + assert_eq(std::aes128::aes128_encrypt([0; 15], iv, key).len(), 16); + + // 16 bytes input (block-aligned) + full 16-byte padding block = 32 bytes + assert_eq(std::aes128::aes128_encrypt([0; 16], iv, key).len(), 32); + } + + #[test] + unconstrained fn encrypt_decrypt_max_size_plaintext() { + let mut env = TestEnvironment::new(); + let recipient = env.create_light_account(); + + env.private_context(|_| { + let mut plaintext = [0; MESSAGE_PLAINTEXT_LEN]; + for i in 0..MESSAGE_PLAINTEXT_LEN { + plaintext[i] = i as Field; + } + let ciphertext = AES128::encrypt(plaintext, recipient); + + assert_eq( + AES128::decrypt(BoundedVec::from_array(ciphertext), recipient).unwrap(), + BoundedVec::from_array(plaintext), + ); + }); + } + + #[test(should_fail_with = "Plaintext length exceeds MESSAGE_PLAINTEXT_LEN")] + unconstrained fn encrypt_oversized_plaintext() { + let address = AztecAddress { inner: 3 }; + let plaintext: [Field; MESSAGE_PLAINTEXT_LEN + 1] = [0; MESSAGE_PLAINTEXT_LEN + 1]; + let _ = AES128::encrypt(plaintext, address); + } + #[test] unconstrained fn random_address_point_produces_valid_points() { // About half of random addresses are invalid, so testing just a couple gives us high confidence that diff --git a/noir-projects/aztec-nr/aztec/src/messages/logs/arithmetic_generics_utils.nr b/noir-projects/aztec-nr/aztec/src/messages/logs/arithmetic_generics_utils.nr index f9b304e80cd4..6bd7e79a12e4 100644 --- a/noir-projects/aztec-nr/aztec/src/messages/logs/arithmetic_generics_utils.nr +++ b/noir-projects/aztec-nr/aztec/src/messages/logs/arithmetic_generics_utils.nr @@ -31,16 +31,16 @@ fn get_arr_of_size__ciphertext( [0; FullPt + PtAesPadding] } -// Ok, so we have the following bytes: eph_pk_sign, header_ciphertext, ciphertext: Let mbwop = 1 + +// Ok, so we have the following bytes: header_ciphertext, ciphertext: Let mbwop = // HEADER_CIPHERTEXT_SIZE_IN_BYTES + |ct| // aka message bytes without padding fn get_arr_of_size__message_bytes_without_padding( _ct: [u8; Ct], -) -> [u8; 1 + HEADER_CIPHERTEXT_SIZE_IN_BYTES + Ct] { - [0; 1 + HEADER_CIPHERTEXT_SIZE_IN_BYTES + Ct] +) -> [u8; HEADER_CIPHERTEXT_SIZE_IN_BYTES + Ct] { + [0; HEADER_CIPHERTEXT_SIZE_IN_BYTES + Ct] } // Recall: -// mbwop := 1 + HEADER_CIPHERTEXT_SIZE_IN_BYTES + |ct| // aka message bytes without padding +// mbwop := HEADER_CIPHERTEXT_SIZE_IN_BYTES + |ct| // aka message bytes without padding // We now want to pad b to the next multiple of 31, so as to "fill" fields. Let p be that padding. p = 31 * ceil(mbwop // / 31) - mbwop // = 31 * ((mbwop + 30) // 31) - mbwop @@ -51,16 +51,16 @@ fn get_arr_of_size__message_bytes_padding( [0; (31 * ((Mbwop + 30) / 31)) - Mbwop] } -// |message_bytes| = 1 + HEADER_CIPHERTEXT_SIZE_IN_BYTES + |ct| + p // aka message bytes (with +// |message_bytes| = HEADER_CIPHERTEXT_SIZE_IN_BYTES + |ct| + p // aka message bytes (with // padding) Recall: -// mbwop := 1 + HEADER_CIPHERTEXT_SIZE_IN_BYTES + |ct| p is the padding +// mbwop := HEADER_CIPHERTEXT_SIZE_IN_BYTES + |ct| p is the padding fn get_arr_of_size__message_bytes(_mbwop: [u8; MBWOP], _p: [u8; P]) -> [u8; MBWOP + P] { [0; MBWOP + P] } // The return type is pasted from the LSP's expectation, because it was too difficult to match its weird way of doing // algebra. It doesn't know all rules of arithmetic. Pt is the plaintext length. -pub(crate) fn get_arr_of_size__message_bytes_padding__from_PT() -> [u8; ((((((Pt + (16 - (Pt % 16))) + HEADER_CIPHERTEXT_SIZE_IN_BYTES + 1) + 30) / 31) * 31) - ((Pt + (16 - (Pt % 16))) + HEADER_CIPHERTEXT_SIZE_IN_BYTES + 1))] { +pub(crate) fn get_arr_of_size__message_bytes_padding__from_PT() -> [u8; ((((((Pt + (16 - (Pt % 16))) + HEADER_CIPHERTEXT_SIZE_IN_BYTES) + 30) / 31) * 31) - ((Pt + (16 - (Pt % 16))) + HEADER_CIPHERTEXT_SIZE_IN_BYTES))] { let full_pt = get_arr_of_size__full_plaintext::(); let pt_aes_padding = get_arr_of_size__plaintext_aes_padding(full_pt); let ct = get_arr_of_size__ciphertext(full_pt, pt_aes_padding); @@ -71,7 +71,7 @@ pub(crate) fn get_arr_of_size__message_bytes_padding__from_PT() -> // The return type is pasted from the LSP's expectation, because it was too difficult to match its weird way of doing // algebra. It doesn't know all rules of arithmetic. -pub(crate) fn get_arr_of_size__message_bytes__from_PT() -> [u8; (((Pt + (16 - (Pt % 16))) + HEADER_CIPHERTEXT_SIZE_IN_BYTES + 1) + ((((((Pt + (16 - (Pt % 16))) + HEADER_CIPHERTEXT_SIZE_IN_BYTES + 1) + 30) / 31) * 31) - ((Pt + (16 - (Pt % 16))) + HEADER_CIPHERTEXT_SIZE_IN_BYTES + 1)))] { +pub(crate) fn get_arr_of_size__message_bytes__from_PT() -> [u8; (((Pt + (16 - (Pt % 16))) + HEADER_CIPHERTEXT_SIZE_IN_BYTES) + ((((((Pt + (16 - (Pt % 16))) + HEADER_CIPHERTEXT_SIZE_IN_BYTES) + 30) / 31) * 31) - ((Pt + (16 - (Pt % 16))) + HEADER_CIPHERTEXT_SIZE_IN_BYTES)))] { let full_pt = get_arr_of_size__full_plaintext::(); let pt_aes_padding = get_arr_of_size__plaintext_aes_padding(full_pt); let ct = get_arr_of_size__ciphertext(full_pt, pt_aes_padding); diff --git a/noir-projects/aztec-nr/aztec/src/messages/logs/note.nr b/noir-projects/aztec-nr/aztec/src/messages/logs/note.nr index 6b7380b2a197..84a72a48e534 100644 --- a/noir-projects/aztec-nr/aztec/src/messages/logs/note.nr +++ b/noir-projects/aztec-nr/aztec/src/messages/logs/note.nr @@ -89,7 +89,7 @@ mod test { use crate::{ messages::{ encoding::decode_message, - logs::note::{decode_private_note_message, encode_private_note_message}, + logs::note::{decode_private_note_message, encode_private_note_message, MAX_NOTE_PACKED_LEN}, msg_type::PRIVATE_NOTE_MSG_TYPE_ID, }, note::note_interface::NoteType, @@ -121,4 +121,55 @@ mod test { assert_eq(randomness, RANDOMNESS); assert_eq(packed_note, BoundedVec::from_array(note.pack())); } + + #[derive(Packable)] + struct MaxSizeNote { + data: [Field; MAX_NOTE_PACKED_LEN], + } + + impl NoteType for MaxSizeNote { + fn get_id() -> Field { + 0 + } + } + + #[test] + unconstrained fn encode_decode_max_size_note() { + let mut data = [0; MAX_NOTE_PACKED_LEN]; + for i in 0..MAX_NOTE_PACKED_LEN { + data[i] = i as Field; + } + let note = MaxSizeNote { data }; + + let encoded = encode_private_note_message(note, OWNER, STORAGE_SLOT, RANDOMNESS); + let (msg_type_id, msg_metadata, msg_content) = decode_message(BoundedVec::from_array(encoded)); + + assert_eq(msg_type_id, PRIVATE_NOTE_MSG_TYPE_ID); + + let (note_type_id, owner, storage_slot, randomness, packed_note) = + decode_private_note_message(msg_metadata, msg_content); + + assert_eq(note_type_id, MaxSizeNote::get_id()); + assert_eq(owner, OWNER); + assert_eq(storage_slot, STORAGE_SLOT); + assert_eq(randomness, RANDOMNESS); + assert_eq(packed_note, BoundedVec::from_array(data)); + } + + #[derive(Packable)] + struct OversizedNote { + data: [Field; MAX_NOTE_PACKED_LEN + 1], + } + + impl NoteType for OversizedNote { + fn get_id() -> Field { + 0 + } + } + + #[test(should_fail_with = "Invalid message content: it must have a length of at most MAX_MESSAGE_CONTENT_LEN")] + fn encode_oversized_note_fails() { + let note = OversizedNote { data: [0; MAX_NOTE_PACKED_LEN + 1] }; + let _ = encode_private_note_message(note, OWNER, STORAGE_SLOT, RANDOMNESS); + } } diff --git a/noir-projects/aztec-nr/aztec/src/messages/processing/event_validation_request.nr b/noir-projects/aztec-nr/aztec/src/messages/processing/event_validation_request.nr index f7ed86b32672..8e757e1fbf1c 100644 --- a/noir-projects/aztec-nr/aztec/src/messages/processing/event_validation_request.nr +++ b/noir-projects/aztec-nr/aztec/src/messages/processing/event_validation_request.nr @@ -42,7 +42,7 @@ mod test { 3, // randomness 4, // serialized_event[0] 5, // serialized_event[1] - 0, 0, 0, 0, 0, 0, 0, 0, 0, // serialized_event padding + 0, 0, 0, 0, 0, 0, 0, 0, // serialized_event padding 2, // bounded_vec_len 6, // event_commitment 7, // tx_hash diff --git a/noir-projects/aztec-nr/aztec/src/messages/processing/note_validation_request.nr b/noir-projects/aztec-nr/aztec/src/messages/processing/note_validation_request.nr index 060fb61eb3a6..00d0e1ef4738 100644 --- a/noir-projects/aztec-nr/aztec/src/messages/processing/note_validation_request.nr +++ b/noir-projects/aztec-nr/aztec/src/messages/processing/note_validation_request.nr @@ -55,7 +55,6 @@ mod test { 0x0000000000000000000000000000000000000000000000000000000000000000, 0x0000000000000000000000000000000000000000000000000000000000000000, 0x0000000000000000000000000000000000000000000000000000000000000000, - 0x0000000000000000000000000000000000000000000000000000000000000000, 0x0000000000000000000000000000000000000000000000000000000000000002, 0x0000000000000000000000000000000000000000000000000000000000000006, 0x0000000000000000000000000000000000000000000000000000000000000007, diff --git a/noir-projects/aztec-nr/aztec/src/test/helpers/txe_oracles.nr b/noir-projects/aztec-nr/aztec/src/test/helpers/txe_oracles.nr index f9fe915c9c46..f1d61d82e8cc 100644 --- a/noir-projects/aztec-nr/aztec/src/test/helpers/txe_oracles.nr +++ b/noir-projects/aztec-nr/aztec/src/test/helpers/txe_oracles.nr @@ -11,7 +11,7 @@ use crate::protocol::{ }; global MAX_PRIVATE_EVENTS_PER_TXE_QUERY: u32 = 5; -global MAX_EVENT_SERIALIZATION_LENGTH: u32 = 12; +global MAX_EVENT_SERIALIZATION_LENGTH: u32 = 10; pub unconstrained fn deploy( path: str, diff --git a/noir-projects/noir-contracts-comp-failures/contracts/invalid_note/expected_error b/noir-projects/noir-contracts-comp-failures/contracts/invalid_note/expected_error index 94e94af204cf..ee90fdf8adb9 100644 --- a/noir-projects/noir-contracts-comp-failures/contracts/invalid_note/expected_error +++ b/noir-projects/noir-contracts-comp-failures/contracts/invalid_note/expected_error @@ -1 +1 @@ -InvalidNote has a packed length of 10 fields, which exceeds the maximum allowed length of 9 fields +InvalidNote has a packed length of 9 fields, which exceeds the maximum allowed length of 8 fields diff --git a/noir-projects/noir-contracts/bootstrap.sh b/noir-projects/noir-contracts/bootstrap.sh index e38c87d5ac44..4795fe645f4d 100755 --- a/noir-projects/noir-contracts/bootstrap.sh +++ b/noir-projects/noir-contracts/bootstrap.sh @@ -213,6 +213,13 @@ function build { rm -rf target mkdir -p $tmp_dir local contracts=$(grep -oP "(?<=$folder_name/)[^\"]+" Nargo.toml) + + # If pinned contracts exist, extract them and skip their compilation. + if [ -f pinned-protocol-contracts.tar.gz ]; then + echo_stderr "Using pinned-protocol-contracts.tar.gz for pinned contracts." + tar xzf pinned-protocol-contracts.tar.gz -C target + contracts=$(echo "$contracts" | grep -vE "^protocol/|^fees/sponsored_fpc_contract$") + fi else local contracts="$@" fi @@ -276,6 +283,19 @@ function format { $NARGO fmt } +function pin-build { + # Force a real build by removing any existing pinned archive. + rm -f pinned-protocol-contracts.tar.gz + local protocol_contracts=$(grep -oP '(?<=contracts/)[^"]+' Nargo.toml | grep "^protocol/") + local fees_contracts=$(grep -oP '(?<=contracts/)[^"]+' Nargo.toml | grep "^fees/") + build $protocol_contracts $fees_contracts + # Bundle protocol contracts plus SponsoredFPC (FPC is excluded — only SponsoredFPC is pinned). + local protocol_artifacts=$(jq -r '.[]' protocol_contracts.json | sed 's/$/.json/') + echo_stderr "Creating pinned-protocol-contracts.tar.gz..." + (cd target && tar czf ../pinned-protocol-contracts.tar.gz $protocol_artifacts sponsored_fpc_contract-SponsoredFPC.json) + echo_stderr "Done. pinned-protocol-contracts.tar.gz created. Commit it to pin these artifacts." +} + case "$cmd" in "clean-keys") for artifact in target/*.json; do @@ -290,6 +310,9 @@ case "$cmd" in "compile") VERBOSE=${VERBOSE:-1} build "$@" ;; + "pin-build") + pin-build + ;; *) default_cmd_handler "$@" ;; diff --git a/noir-projects/noir-contracts/pinned-protocol-contracts.tar.gz b/noir-projects/noir-contracts/pinned-protocol-contracts.tar.gz new file mode 100644 index 000000000000..ecd57382f5c2 Binary files /dev/null and b/noir-projects/noir-contracts/pinned-protocol-contracts.tar.gz differ diff --git a/spartan/environments/network-defaults.yml b/spartan/environments/network-defaults.yml index 6e8f28bd01bb..daec5d93e53e 100644 --- a/spartan/environments/network-defaults.yml +++ b/spartan/environments/network-defaults.yml @@ -232,9 +232,6 @@ networks: # P2P P2P_MAX_PENDING_TX_COUNT: 1000 P2P_TX_POOL_DELETE_TXS_AFTER_REORG: false - # Auto-update - AUTO_UPDATE: none - AUTO_UPDATE_URL: "" # Telemetry PUBLIC_OTEL_OPT_OUT: true PUBLIC_OTEL_EXPORTER_OTLP_METRICS_ENDPOINT: "" @@ -252,6 +249,7 @@ networks: SLASH_UNKNOWN_PENALTY: 10e18 SLASH_INVALID_BLOCK_PENALTY: 10e18 SLASH_GRACE_PERIOD_L2_SLOTS: 0 + ENABLE_VERSION_CHECK: true testnet: <<: *prodlike @@ -296,6 +294,7 @@ networks: SLASH_UNKNOWN_PENALTY: 10e18 SLASH_INVALID_BLOCK_PENALTY: 10e18 SLASH_GRACE_PERIOD_L2_SLOTS: 64 + ENABLE_VERSION_CHECK: true mainnet: <<: *prodlike @@ -303,7 +302,7 @@ networks: AZTEC_SLOT_DURATION: 72 AZTEC_ACTIVATION_THRESHOLD: 200000e18 AZTEC_EJECTION_THRESHOLD: 100000e18 - AZTEC_LOCAL_EJECTION_THRESHOLD: 162000e18 + AZTEC_LOCAL_EJECTION_THRESHOLD: 190000e18 AZTEC_SLASH_AMOUNT_SMALL: 2000e18 AZTEC_SLASH_AMOUNT_MEDIUM: 2000e18 AZTEC_SLASH_AMOUNT_LARGE: 2000e18 @@ -338,12 +337,10 @@ networks: # P2P P2P_MAX_PENDING_TX_COUNT: 0 P2P_TX_POOL_DELETE_TXS_AFTER_REORG: true - # Auto-update - AUTO_UPDATE: notify - AUTO_UPDATE_URL: "https://storage.googleapis.com/aztec-mainnet/auto-update/mainnet.json" # Telemetry - PUBLIC_OTEL_EXPORTER_OTLP_METRICS_ENDPOINT: "https://telemetry.alpha-testnet.aztec-labs.com/v1/metrics" - PUBLIC_OTEL_COLLECT_FROM: sequencer + PUBLIC_OTEL_EXPORTER_OTLP_METRICS_ENDPOINT: "" + PUBLIC_OTEL_COLLECT_FROM: "" + ENABLE_VERSION_CHECK: false # Slasher penalties - more lenient initially SLASH_PRUNE_PENALTY: 0 SLASH_DATA_WITHHOLDING_PENALTY: 0 diff --git a/spartan/environments/prove-n-tps-real.env b/spartan/environments/prove-n-tps-real.env index 129abf2e7750..63af6079257c 100644 --- a/spartan/environments/prove-n-tps-real.env +++ b/spartan/environments/prove-n-tps-real.env @@ -37,8 +37,11 @@ PROVER_PUBLISHER_MNEMONIC_START_INDEX=8000 PROVER_AGENT_POLL_INTERVAL_MS=10000 PUBLISHERS_PER_PROVER=1 -SEQ_MAX_TX_PER_BLOCK=80 +SEQ_MAX_TX_PER_BLOCK=18 SEQ_MIN_TX_PER_BLOCK=0 +SEQ_BLOCK_DURATION_MS=6000 +SEQ_L1_PUBLISHING_TIME_ALLOWANCE_IN_SLOT=36 +SEQ_BUILD_CHECKPOINT_IF_EMPTY=true P2P_MAX_TX_POOL_SIZE=1000000000 DEBUG_P2P_INSTRUMENT_MESSAGES=true diff --git a/spartan/metrics/grafana/dashboards/aztec_validators.json b/spartan/metrics/grafana/dashboards/aztec_validators.json index 190cdb8261fb..27d6d8677037 100644 --- a/spartan/metrics/grafana/dashboards/aztec_validators.json +++ b/spartan/metrics/grafana/dashboards/aztec_validators.json @@ -2602,6 +2602,268 @@ ], "title": "Archiver Database Item Count", "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 88 + }, + "id": 700, + "panels": [], + "title": "Attester Epoch Participation", + "type": "row" + }, + { + "datasource": { + "default": true, + "type": "prometheus", + "uid": "${data_source}" + }, + "description": "The current epoch number, which represents the total number of epochs elapsed since genesis.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "decimals": 0, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "blue", + "value": null + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 4, + "x": 0, + "y": 89 + }, + "id": 701, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "center", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "11.2.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${data_source}" + }, + "editorMode": "code", + "expr": "max(aztec_validator_current_epoch{k8s_namespace_name=\"$namespace\", service_instance_id=~\"$service_instance\"})", + "instant": true, + "legendFormat": "Current Epoch", + "range": false, + "refId": "A" + } + ], + "title": "Current Epoch", + "type": "stat" + }, + { + "datasource": { + "default": true, + "type": "prometheus", + "uid": "${data_source}" + }, + "description": "Cumulative number of epochs in which each attester successfully submitted at least one attestation.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "Epochs attested", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "stepAfter", + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 0, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 20, + "x": 4, + "y": 89 + }, + "id": 702, + "options": { + "legend": { + "calcs": [ + "lastNotNull" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "11.2.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${data_source}" + }, + "editorMode": "code", + "expr": "aztec_validator_attested_epoch_count{k8s_namespace_name=\"$namespace\", service_instance_id=~\"$service_instance\"}", + "legendFormat": "{{k8s_pod_name}} / {{aztec_attester_address}}", + "range": true, + "refId": "A" + } + ], + "title": "Attested Epochs per Attester", + "type": "timeseries" + }, + { + "datasource": { + "default": true, + "type": "prometheus", + "uid": "${data_source}" + }, + "description": "Fraction of total epochs in which each attester successfully participated (attested epochs ÷ current epoch × 100%).", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "decimals": 1, + "mappings": [], + "max": 100, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "red", + "value": null + }, + { + "color": "orange", + "value": 50 + }, + { + "color": "yellow", + "value": 75 + }, + { + "color": "green", + "value": 90 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 97 + }, + "id": 703, + "options": { + "displayMode": "gradient", + "minVizHeight": 10, + "minVizWidth": 0, + "namePlacement": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showUnfilled": true, + "valueMode": "color" + }, + "pluginVersion": "11.2.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${data_source}" + }, + "editorMode": "code", + "expr": "aztec_validator_attested_epoch_count{k8s_namespace_name=\"$namespace\", service_instance_id=~\"$service_instance\"} / on() group_left() max by() (aztec_validator_current_epoch{k8s_namespace_name=\"$namespace\", service_instance_id=~\"$service_instance\"}) * 100", + "instant": true, + "legendFormat": "{{k8s_pod_name}} / {{aztec_attester_address}}", + "range": false, + "refId": "A" + } + ], + "title": "Attester Participation Rate", + "type": "bargauge" } ], "refresh": "30s", @@ -2706,6 +2968,6 @@ "timezone": "", "title": "Validator node overview", "uid": "aztec-validators", - "version": 8, + "version": 9, "weekStart": "" } diff --git a/spartan/scripts/deploy_network.sh b/spartan/scripts/deploy_network.sh index f94f71d7dfed..93e776ef615a 100755 --- a/spartan/scripts/deploy_network.sh +++ b/spartan/scripts/deploy_network.sh @@ -108,6 +108,7 @@ PROVER_FAILED_PROOF_STORE=${PROVER_FAILED_PROOF_STORE:-} SEQ_MIN_TX_PER_BLOCK=${SEQ_MIN_TX_PER_BLOCK:-0} SEQ_MAX_TX_PER_BLOCK=${SEQ_MAX_TX_PER_BLOCK:-8} SEQ_BLOCK_DURATION_MS=${SEQ_BLOCK_DURATION_MS:-} +SEQ_L1_PUBLISHING_TIME_ALLOWANCE_IN_SLOT=${SEQ_L1_PUBLISHING_TIME_ALLOWANCE_IN_SLOT:-} SEQ_BUILD_CHECKPOINT_IF_EMPTY=${SEQ_BUILD_CHECKPOINT_IF_EMPTY:-} SEQ_ENFORCE_TIME_TABLE=${SEQ_ENFORCE_TIME_TABLE:-} SEQ_SKIP_CHECKPOINT_PUBLISH_PERCENT=${SEQ_SKIP_CHECKPOINT_PUBLISH_PERCENT:-0} @@ -513,6 +514,7 @@ VALIDATOR_HA_REPLICAS = ${VALIDATOR_HA_REPLICAS} SEQ_MIN_TX_PER_BLOCK = ${SEQ_MIN_TX_PER_BLOCK} SEQ_MAX_TX_PER_BLOCK = ${SEQ_MAX_TX_PER_BLOCK} SEQ_BLOCK_DURATION_MS = ${SEQ_BLOCK_DURATION_MS:-null} +SEQ_L1_PUBLISHING_TIME_ALLOWANCE_IN_SLOT = ${SEQ_L1_PUBLISHING_TIME_ALLOWANCE_IN_SLOT:-null} SEQ_BUILD_CHECKPOINT_IF_EMPTY = ${SEQ_BUILD_CHECKPOINT_IF_EMPTY:-null} SEQ_ENFORCE_TIME_TABLE = ${SEQ_ENFORCE_TIME_TABLE:-null} SEQ_SKIP_CHECKPOINT_PUBLISH_PERCENT = ${SEQ_SKIP_CHECKPOINT_PUBLISH_PERCENT} diff --git a/spartan/scripts/extract_proving_metrics.ts b/spartan/scripts/extract_proving_metrics.ts new file mode 100755 index 000000000000..46d1d01ae966 --- /dev/null +++ b/spartan/scripts/extract_proving_metrics.ts @@ -0,0 +1,852 @@ +#!/usr/bin/env -S node --experimental-strip-types --no-warnings +/** + * Extract proving metrics from GCP Cloud Logging for a prover node. + * + * Usage: + * ./extract_proving_metrics.ts --start [--end ] [--epoch ] [--project ] [--pod ] + * + * Examples: + * # Auto-detect first epoch with >=1 tx after the given start time: + * ./extract_proving_metrics.ts prove-n-tps-real --start 2026-03-01T19:00:00Z + * + * # Specify epoch number: + * ./extract_proving_metrics.ts prove-n-tps-real --start 2026-03-01T19:00:00Z --epoch 3 + * + * # Explicit time range (no auto-detection): + * ./extract_proving_metrics.ts prove-n-tps-real --start 2026-03-01T19:58:00Z --end 2026-03-01T20:25:00Z + */ + +import { exec } from "node:child_process"; +import { promisify } from "node:util"; + +const execAsync = promisify(exec); + +// ── CLI arg parsing ────────────────────────────────────────────────────────── + +function parseArgs(argv: string[]): { + namespace: string; + start: string; + end: string; + epoch: number | undefined; + project: string; + pod: string; +} { + const args = argv.slice(2); + const positional: string[] = []; + const flags: Record = {}; + + for (let i = 0; i < args.length; i++) { + if (args[i].startsWith("--")) { + const key = args[i].slice(2); + if (i + 1 < args.length && !args[i + 1].startsWith("--")) { + flags[key] = args[++i]; + } else { + flags[key] = "true"; + } + } else { + positional.push(args[i]); + } + } + + const namespace = positional[0]; + if (!namespace) { + console.error( + "Usage: extract_proving_metrics.ts --start [--end ] [--epoch ] [--project ] [--pod ]", + ); + process.exit(1); + } + if (!flags.start) { + console.error("Error: --start is required (ISO 8601 timestamp)"); + process.exit(1); + } + + // Default end: now + const defaultEnd = new Date().toISOString(); + + return { + namespace, + start: flags.start, + end: flags.end || defaultEnd, + epoch: flags.epoch !== undefined ? parseInt(flags.epoch) : undefined, + project: flags.project || "testnet-440309", + pod: flags.pod || `${namespace}-prover-node-0`, + }; +} + +const config = parseArgs(process.argv); + +// ── GCP log query helpers ──────────────────────────────────────────────────── + +interface LogEntry { + timestamp: string; + jsonPayload?: { + message?: string; + [key: string]: any; + }; + [key: string]: any; +} + +function buildFilter( + textFilter: string, + opts?: { module?: string; pod?: string }, +): string { + const pod = opts?.pod ?? config.pod; + let filter = + `resource.type="k8s_container"` + + ` AND resource.labels.namespace_name="${config.namespace}"` + + ` AND resource.labels.pod_name="${pod}"` + + ` AND timestamp>="${config.start}"` + + ` AND timestamp<="${config.end}"` + + ` AND jsonPayload.message=~"${textFilter}"`; + if (opts?.module) { + filter += ` AND jsonPayload.module="${opts.module}"`; + } + return filter; +} + +async function queryLogs( + name: string, + textFilter: string, + opts?: { module?: string; pod?: string }, +): Promise { + const filter = buildFilter(textFilter, opts); + const cmd = [ + "gcloud", + "logging", + "read", + JSON.stringify(filter), + `--project=${config.project}`, + "--format=json", + `--freshness=7d`, + ].join(" "); + + process.stderr.write(` Querying: ${name}...\n`); + try { + const { stdout } = await execAsync(cmd, { maxBuffer: 50 * 1024 * 1024 }); + const entries: LogEntry[] = JSON.parse(stdout || "[]"); + process.stderr.write(` ${name}: ${entries.length} entries\n`); + return entries; + } catch (err: any) { + process.stderr.write(` ${name}: ERROR - ${err.message?.split("\n")[0]}\n`); + return []; + } +} + +// ── Epoch auto-detection ───────────────────────────────────────────────────── + +async function scanForEpoch(): Promise<{ start: string; end: string }> { + process.stderr.write( + `Scanning for epoch in ${config.start} to ${config.end}...\n\n`, + ); + + const [epochStarts, epochFinalized] = await Promise.all([ + queryLogs("scan-epoch-starts", "Starting epoch.*proving job"), + queryLogs("scan-epoch-finalized", "Finalized proof for epoch"), + ]); + + process.stderr.write("\n"); + + // Parse all epoch start entries + const starts: { + epoch: number; + txCount: number; + timestamp: string; + }[] = []; + for (const entry of epochStarts) { + const m = msg(entry); + const p = entry.jsonPayload || {}; + const epochMatch = m.match( + /Starting epoch (\d+).*checkpoints (\d+) to (\d+)/, + ); + if (epochMatch) { + starts.push({ + epoch: parseInt(epochMatch[1]), + txCount: p.epochSizeTxs ?? 0, + timestamp: entry.timestamp, + }); + } + } + + // Sort by timestamp ascending + starts.sort((a, b) => a.timestamp.localeCompare(b.timestamp)); + + // Pick target epoch + let target: (typeof starts)[0] | undefined; + if (config.epoch !== undefined) { + target = starts.find((s) => s.epoch === config.epoch); + if (!target) { + process.stderr.write( + `Warning: epoch ${config.epoch} not found in scan window. Using full window.\n`, + ); + return { start: config.start, end: config.end }; + } + } else { + target = starts.find((s) => s.txCount >= 1); + if (!target) { + process.stderr.write( + `Warning: no epoch with >=1 tx found in scan window. Using full window.\n`, + ); + return { start: config.start, end: config.end }; + } + } + + process.stderr.write( + `Found epoch ${target.epoch} (${target.txCount} txs) at ${target.timestamp}\n`, + ); + + // Find matching finalized entry + const finalized = epochFinalized.find((entry) => { + const m = msg(entry); + const match = m.match(/Finalized proof for epoch (\d+)/); + return match && parseInt(match[1]) === target.epoch; + }); + + // Epoch start timestamp minus a few seconds to capture all leading logs + const narrowedStart = new Date( + new Date(target.timestamp).getTime() - 5000, + ).toISOString(); + + let narrowedEnd: string; + if (finalized) { + // Pad 60s after finalized to capture trailing logs + narrowedEnd = new Date( + new Date(finalized.timestamp).getTime() + 60000, + ).toISOString(); + process.stderr.write( + `Epoch ${target.epoch} finalized at ${finalized.timestamp}\n`, + ); + } else { + narrowedEnd = config.end; + process.stderr.write( + `Epoch ${target.epoch} finalized entry not found, using scan window end.\n`, + ); + } + + process.stderr.write( + `Narrowed window: ${narrowedStart} to ${narrowedEnd}\n\n`, + ); + + return { start: narrowedStart, end: narrowedEnd }; +} + +// ── Pipeline order for proving job types ───────────────────────────────────── + +const PIPELINE_ORDER = [ + "PARITY_BASE", + "PARITY_ROOT", + "PUBLIC_CHONK_VERIFIER", + "PUBLIC_VM", + "PUBLIC_TX_BASE_ROLLUP", + "TX_MERGE_ROLLUP", + "BLOCK_ROOT_ROLLUP", + "BLOCK_ROOT_FIRST_ROLLUP", + "BLOCK_ROOT_SINGLE_TX_ROLLUP", + "BLOCK_MERGE_ROLLUP", + "CHECKPOINT_ROOT_ROLLUP", + "CHECKPOINT_MERGE_ROLLUP", + "ROOT_ROLLUP", +]; + +// ── Query definitions ──────────────────────────────────────────────────────── + +async function fetchAllData() { + process.stderr.write( + `Fetching logs for ${config.pod} in ${config.namespace}\n`, + ); + process.stderr.write(`Time range: ${config.start} to ${config.end}\n\n`); + + const brokerPod = `${config.namespace}-prover-broker-0`; + + const [ + epochStart, + blobFields, + blobBatching, + startingBlock, + processedTxs, + addingTxs, + epochFinalized, + brokerNewJobs, + brokerCompleteJobs, + ] = await Promise.all([ + queryLogs("epoch-start", "Starting epoch.*proving job"), + queryLogs("blob-fields", "Blob fields per checkpoint"), + queryLogs("blob-batching", "Final blob batching"), + queryLogs("starting-block", "Starting block", { + module: "prover-client:orchestrator", + }), + queryLogs("processed-txs", "Processed.*successful txs"), + queryLogs("adding-txs", "Adding.*transactions to block"), + queryLogs("epoch-finalized", "Finalized proof for epoch"), + queryLogs("broker-new-jobs", "New proving job", { pod: brokerPod }), + queryLogs("broker-complete-jobs", "Proving job complete", { + pod: brokerPod, + }), + ]); + + process.stderr.write("\n"); + return { + epochStart, + blobFields, + blobBatching, + startingBlock, + processedTxs, + addingTxs, + epochFinalized, + brokerNewJobs, + brokerCompleteJobs, + }; +} + +// ── Time helpers ───────────────────────────────────────────────────────────── + +function formatDelta(ms: number): string { + const totalSeconds = Math.round(ms / 1000); + if (totalSeconds < 60) return `${totalSeconds}s`; + const minutes = Math.floor(totalSeconds / 60); + const seconds = totalSeconds % 60; + if (minutes < 60) return `${minutes}m ${seconds}s`; + const hours = Math.floor(minutes / 60); + const mins = minutes % 60; + return `${hours}h ${mins}m ${seconds}s`; +} + +function minTimestamp(entries: LogEntry[]): string | null { + if (entries.length === 0) return null; + return entries.reduce( + (min, e) => (e.timestamp < min ? e.timestamp : min), + entries[0].timestamp, + ); +} + +function maxTimestamp(entries: LogEntry[]): string | null { + if (entries.length === 0) return null; + return entries.reduce( + (max, e) => (e.timestamp > max ? e.timestamp : max), + entries[0].timestamp, + ); +} + +// ── Parsing helpers ────────────────────────────────────────────────────────── + +function msg(entry: LogEntry): string { + return entry.jsonPayload?.message || ""; +} + +function parseEpochStart(entries: LogEntry[]): { + epoch: number; + fromCheckpoint: number; + toCheckpoint: number; + fromBlock: number; + toBlock: number; + txCount: number; +} | null { + if (entries.length === 0) return null; + const entry = entries[0]; + const m = msg(entry); + const p = entry.jsonPayload || {}; + // Message: "Starting epoch 3 proving job with checkpoints 33 to 64" + // Structured fields: epochNumber, fromBlock, toBlock, epochSizeTxs + const epochMatch = m.match( + /Starting epoch (\d+).*checkpoints (\d+) to (\d+)/, + ); + if (!epochMatch) return null; + return { + epoch: parseInt(epochMatch[1]), + fromCheckpoint: parseInt(epochMatch[2]), + toCheckpoint: parseInt(epochMatch[3]), + fromBlock: p.fromBlock ?? 0, + toBlock: p.toBlock ?? 0, + txCount: p.epochSizeTxs ?? 0, + }; +} + +function parseBlobFields(entries: LogEntry[]): string | null { + if (entries.length === 0) return null; + const m = msg(entries[0]); + // "Blob fields per checkpoint: 211.92427600175142ms" + const match = m.match(/Blob fields per checkpoint:\s*([\d.]+)ms/); + return match ? `${parseFloat(match[1]).toFixed(2)}ms` : null; +} + +function parseBlobBatching(entries: LogEntry[]): string | null { + if (entries.length === 0) return null; + const m = msg(entries[0]); + // "Final blob batching challeneger: 3408.9118730016053ms" (note typo in source) + const match = m.match(/Final blob batching.*?:\s*([\d.]+)ms/); + return match ? `${parseFloat(match[1]).toFixed(2)}ms` : null; +} + +interface BlockInfo { + blockNumber: number; + slot: number; + txCount: number; + processingTime: number; // seconds +} + +function parseStartingBlocks( + entries: LogEntry[], +): Map { + // "Starting block 175 for slot 112." + const result = new Map(); + for (const entry of entries) { + const m2 = msg(entry); + const match = m2.match(/Starting block (\d+) for slot (\d+)/); + if (match) { + const blockNumber = parseInt(match[1]); + const slot = parseInt(match[2]); + result.set(blockNumber, { blockNumber, slot }); + } + } + return result; +} + +function parseProcessedTxs( + entries: LogEntry[], +): { timestamp: string; txCount: number; duration: number }[] { + // "Processed 18 successful txs and 0 failed txs in 29.2s" + const results: { timestamp: string; txCount: number; duration: number }[] = + []; + for (const entry of entries) { + const m2 = msg(entry); + const match = m2.match(/Processed (\d+) successful txs.*?in ([\d.]+)s/); + if (match) { + results.push({ + timestamp: entry.timestamp, + txCount: parseInt(match[1]), + duration: parseFloat(match[2]), + }); + } + } + return results; +} + +function parseAddingTxs( + entries: LogEntry[], +): { timestamp: string; txCount: number; blockNumber: number }[] { + // "Adding 6 transactions to block 175" + const results: { timestamp: string; txCount: number; blockNumber: number }[] = + []; + for (const entry of entries) { + const m2 = msg(entry); + const match = m2.match(/Adding (\d+) transactions to block (\d+)/); + if (match) { + results.push({ + timestamp: entry.timestamp, + txCount: parseInt(match[1]), + blockNumber: parseInt(match[2]), + }); + } + } + return results; +} + +function parseEpochFinalized(entries: LogEntry[]): { duration: string } | null { + if (entries.length === 0) return null; + const p = entries[0].jsonPayload || {}; + // Duration is in jsonPayload.duration (milliseconds) + const durationMs = p.duration; + if (durationMs == null) return null; + const totalSeconds = durationMs / 1000; + const minutes = Math.floor(totalSeconds / 60); + const seconds = Math.round(totalSeconds % 60); + return { duration: `${minutes}m ${seconds}s` }; +} + +// ── Broker job parsing ─────────────────────────────────────────────────────── + +interface BrokerJobStage { + enqueuedFirst: number; + enqueuedLast: number; + completedFirst: number; + completedLast: number; + count: number; + completedCount: number; +} + +interface BrokerJobDuration { + type: string; + durationMs: number; +} + +function parseBrokerJobs( + newEntries: LogEntry[], + completeEntries: LogEntry[], +): { stages: Map; durations: BrokerJobDuration[] } { + // Index "new" entries by provingJobId for duration matching + const newById = new Map(); + + // Aggregate per-type timestamps + const stages = new Map(); + + for (const entry of newEntries) { + const m2 = msg(entry); + const typeMatch = m2.match(/id=\d+:(\w+):/); + if (!typeMatch) continue; + const type = typeMatch[1]; + const ts = new Date(entry.timestamp).getTime(); + const jobId = entry.jsonPayload?.provingJobId; + if (jobId) { + newById.set(jobId, { type, timestamp: ts }); + } + + const existing = stages.get(type); + if (existing) { + existing.enqueuedFirst = Math.min(existing.enqueuedFirst, ts); + existing.enqueuedLast = Math.max(existing.enqueuedLast, ts); + existing.count++; + } else { + stages.set(type, { + enqueuedFirst: ts, + enqueuedLast: ts, + completedFirst: Infinity, + completedLast: -Infinity, + count: 1, + completedCount: 0, + }); + } + } + + const durations: BrokerJobDuration[] = []; + + for (const entry of completeEntries) { + const m2 = msg(entry); + const typeMatch = m2.match(/type=(\w+)/); + if (!typeMatch) continue; + const type = typeMatch[1]; + const ts = new Date(entry.timestamp).getTime(); + + const existing = stages.get(type); + if (existing) { + existing.completedFirst = Math.min(existing.completedFirst, ts); + existing.completedLast = Math.max(existing.completedLast, ts); + existing.completedCount++; + } else { + stages.set(type, { + enqueuedFirst: Infinity, + enqueuedLast: -Infinity, + completedFirst: ts, + completedLast: ts, + count: 0, + completedCount: 1, + }); + } + + // Match with new entry for per-job duration + const jobId = entry.jsonPayload?.provingJobId; + if (jobId) { + const newEntry = newById.get(jobId); + if (newEntry) { + durations.push({ type, durationMs: ts - newEntry.timestamp }); + } + } + } + + return { stages, durations }; +} + +function computeDurationStats( + durations: BrokerJobDuration[], +): Map< + string, + { count: number; median: number; mean: number; p90: number; max: number } +> { + // Group by type + const byType = new Map(); + for (const d of durations) { + const arr = byType.get(d.type); + if (arr) { + arr.push(d.durationMs); + } else { + byType.set(d.type, [d.durationMs]); + } + } + + const stats = new Map< + string, + { count: number; median: number; mean: number; p90: number; max: number } + >(); + for (const [type, values] of byType) { + values.sort((a, b) => a - b); + const count = values.length; + const mean = values.reduce((a, b) => a + b, 0) / count; + const median = + count % 2 === 0 + ? (values[count / 2 - 1] + values[count / 2]) / 2 + : values[Math.floor(count / 2)]; + const p90Index = Math.min(Math.ceil(count * 0.9) - 1, count - 1); + const p90 = values[p90Index]; + const max = values[count - 1]; + stats.set(type, { count, median, mean, p90, max }); + } + + return stats; +} + +function sortedJobTypes(stages: Map): string[] { + const ordered: string[] = []; + const seen = new Set(); + + // First add types in pipeline order + for (const type of PIPELINE_ORDER) { + if (stages.has(type)) { + ordered.push(type); + seen.add(type); + } + } + + // Then append any remaining types sorted by first enqueue time + const remaining = [...stages.entries()] + .filter(([type]) => !seen.has(type)) + .sort((a, b) => a[1].enqueuedFirst - b[1].enqueuedFirst) + .map(([type]) => type); + + return [...ordered, ...remaining]; +} + +// ── Correlate per-block data ───────────────────────────────────────────────── + +function correlateBlocks( + processedTxs: { timestamp: string; txCount: number; duration: number }[], + addingTxs: { timestamp: string; txCount: number; blockNumber: number }[], + startingBlocks: Map, +): BlockInfo[] { + // "Processed" and "Adding" entries share identical timestamps. + // Sort both by timestamp and zip 1:1. Slot comes from "Starting block" entries. + const sorted_processed = [...processedTxs].sort((a, b) => + a.timestamp.localeCompare(b.timestamp), + ); + const sorted_adding = [...addingTxs].sort((a, b) => + a.timestamp.localeCompare(b.timestamp), + ); + + const blocks: BlockInfo[] = []; + + if (sorted_processed.length !== sorted_adding.length) { + process.stderr.write( + `Warning: processed (${sorted_processed.length}) and adding (${sorted_adding.length}) entry counts differ. ` + + `Correlating by position.\n`, + ); + } + + const count = Math.min(sorted_processed.length, sorted_adding.length); + for (let i = 0; i < count; i++) { + const blockNumber = sorted_adding[i].blockNumber; + const slotInfo = startingBlocks.get(blockNumber); + blocks.push({ + blockNumber, + slot: slotInfo?.slot ?? 0, + txCount: sorted_adding[i].txCount, + processingTime: sorted_processed[i].duration, + }); + } + + // Sort by block number for output + blocks.sort((a, b) => a.blockNumber - b.blockNumber); + return blocks; +} + +// ── Format output ──────────────────────────────────────────────────────────── + +function formatOutput(data: Awaited>): string { + const lines: string[] = []; + + const epochInfo = parseEpochStart(data.epochStart); + if (epochInfo) { + const checkpointCount = + epochInfo.toCheckpoint - epochInfo.fromCheckpoint + 1; + const blockCount = epochInfo.toBlock - epochInfo.fromBlock + 1; + lines.push(`Epoch ${epochInfo.epoch} stats:`); + lines.push( + ` Checkpoints: ${checkpointCount} (${epochInfo.fromCheckpoint} to ${epochInfo.toCheckpoint}), ` + + `Blocks: ${blockCount} (${epochInfo.fromBlock} to ${epochInfo.toBlock}), ` + + `Txs: ${epochInfo.txCount}`, + ); + } else { + lines.push("Epoch stats: not found"); + } + + const blobFieldsTime = parseBlobFields(data.blobFields); + if (blobFieldsTime) { + lines.push(` Blob fields per checkpoint: ${blobFieldsTime}`); + } + + const blobBatchingTime = parseBlobBatching(data.blobBatching); + if (blobBatchingTime) { + lines.push(` Blob batching: ${blobBatchingTime}`); + } + + // Timeline + const epochStartTs = data.epochStart[0]?.timestamp; + const blocksStartTs = minTimestamp(data.startingBlock); + const blocksEndTs = maxTimestamp(data.processedTxs); + const epochEndTs = data.epochFinalized[0]?.timestamp; + + const { stages, durations } = parseBrokerJobs( + data.brokerNewJobs, + data.brokerCompleteJobs, + ); + + if (epochStartTs) { + const t0 = new Date(epochStartTs).getTime(); + + // Build all timeline events: [timestamp_ms, label] + const events: [number, string][] = []; + if (epochStartTs) + events.push([new Date(epochStartTs).getTime(), "Epoch started proving"]); + if (blocksStartTs) + events.push([ + new Date(blocksStartTs).getTime(), + "Blocks started processing", + ]); + if (blocksEndTs) + events.push([ + new Date(blocksEndTs).getTime(), + "Blocks finished processing", + ]); + if (epochEndTs) + events.push([new Date(epochEndTs).getTime(), "Epoch finished proving"]); + + // Add per-stage proving events + for (const [type, s] of stages) { + if (s.count > 0) { + events.push([ + s.enqueuedFirst, + `${type} first enqueued (${s.count} jobs)`, + ]); + if (s.count > 1) { + events.push([s.enqueuedLast, `${type} last enqueued`]); + } + } + if (s.completedCount > 0) { + events.push([s.completedLast, `${type} last proof completed`]); + } + } + + // Sort chronologically (stable) + events.sort((a, b) => a[0] - b[0]); + + const labelWidth = Math.max(...events.map(([, label]) => label.length)); + lines.push(""); + lines.push("Timeline:"); + for (const [ts, label] of events) { + const delta = ts - t0; + lines.push(` ${label.padEnd(labelWidth)} T+${formatDelta(delta)}`); + } + } + if (stages.size > 0 && epochStartTs) { + const t0 = new Date(epochStartTs).getTime(); + const types = sortedJobTypes(stages); + const typeWidth = Math.max(...types.map((t) => t.length)); + const countWidth = Math.max( + ...types.map((t) => String(stages.get(t)!.count).length), + ); + + lines.push(""); + lines.push("Proving jobs by stage:"); + for (const type of types) { + const s = stages.get(type)!; + const countStr = String(s.count).padStart(countWidth); + const typeStr = type.padEnd(typeWidth); + + // Enqueue range + let enqueueStr: string; + if (s.count === 0) { + enqueueStr = "n/a"; + } else if (s.count === 1) { + enqueueStr = `enqueued T+${formatDelta(s.enqueuedFirst - t0)}`; + } else { + enqueueStr = `enqueued T+${formatDelta(s.enqueuedFirst - t0)}..T+${formatDelta(s.enqueuedLast - t0)}`; + } + + // Complete range + let completeStr: string; + if (s.completedCount === 0) { + completeStr = "not completed"; + } else if (s.completedCount === 1) { + completeStr = `completed T+${formatDelta(s.completedFirst - t0)}`; + } else { + completeStr = `completed T+${formatDelta(s.completedFirst - t0)}..T+${formatDelta(s.completedLast - t0)}`; + } + + // Wall-clock duration from first enqueue to last complete + let wallStr = ""; + if (s.count > 0 && s.completedCount > 0) { + wallStr = ` (${formatDelta(s.completedLast - s.enqueuedFirst)})`; + } + + lines.push( + ` ${typeStr} ${countStr} jobs ${enqueueStr.padEnd(35)}${completeStr}${wallStr}`, + ); + } + } + + // Per-job duration stats + if (durations.length > 0) { + const durationStats = computeDurationStats(durations); + const types = sortedJobTypes(stages).filter((t) => durationStats.has(t)); + // Add any types not in stages (shouldn't happen but be safe) + for (const t of durationStats.keys()) { + if (!types.includes(t)) types.push(t); + } + + if (types.length > 0) { + const typeWidth = Math.max(...types.map((t) => t.length), 4); + const formatS = (ms: number) => `${(ms / 1000).toFixed(1)}s`; + + lines.push(""); + lines.push("Per-job duration stats:"); + lines.push( + ` ${"Type".padEnd(typeWidth)} ${"Count".padStart(6)} ${"Median".padStart(8)} ${"Mean".padStart(8)} ${"p90".padStart(8)} ${"Max".padStart(8)}`, + ); + for (const type of types) { + const s = durationStats.get(type)!; + lines.push( + ` ${type.padEnd(typeWidth)} ${String(s.count).padStart(6)} ${formatS(s.median).padStart(8)} ${formatS(s.mean).padStart(8)} ${formatS(s.p90).padStart(8)} ${formatS(s.max).padStart(8)}`, + ); + } + } + } + + // Per-block data + const processedTxs = parseProcessedTxs(data.processedTxs); + const addingTxs = parseAddingTxs(data.addingTxs); + const startingBlocks = parseStartingBlocks(data.startingBlock); + const blocks = correlateBlocks(processedTxs, addingTxs, startingBlocks); + + if (blocks.length > 0) { + lines.push(""); + lines.push(`Per block (sorted by block number):`); + for (const block of blocks) { + const time = block.processingTime.toFixed(1); + lines.push( + ` Block ${block.blockNumber} (slot ${block.slot}): ${block.txCount} txs, processing ${time}s`, + ); + } + } + + // Epoch proof duration + const finalized = parseEpochFinalized(data.epochFinalized); + if (finalized) { + lines.push(""); + lines.push(`Epoch proof duration: ${finalized.duration}`); + } + + return lines.join("\n"); +} + +// ── Main ───────────────────────────────────────────────────────────────────── + +async function main() { + const scanResult = await scanForEpoch(); + config.start = scanResult.start; + config.end = scanResult.end; + + const data = await fetchAllData(); + const output = formatOutput(data); + console.log(output); +} + +main().catch((err) => { + console.error(`Fatal: ${err.message}`); + process.exit(1); +}); diff --git a/spartan/scripts/network_pause.sh b/spartan/scripts/network_pause.sh index 3d28a5389b44..706b9553b911 100755 --- a/spartan/scripts/network_pause.sh +++ b/spartan/scripts/network_pause.sh @@ -31,16 +31,55 @@ if [[ -z "$NAMESPACE" ]]; then usage fi -log "Snapshotting $NAMESPACE" -$scripts_dir/manual_snapshot.sh $NAMESPACE +CONFIGMAP_NAME="network-pause-state" -log "Waiting for snapshot upload" -sleep 60 # staging-ignition takes 28s +# Guard against double-pause (would overwrite saved state with zeros) +if kubectl get configmap "$CONFIGMAP_NAME" -n "$NAMESPACE" &>/dev/null; then + die "Namespace $NAMESPACE is already paused (ConfigMap $CONFIGMAP_NAME exists). Run network_resume.sh first." +fi + +# Snapshot if the cronjob exists (not all networks have snapshots enabled) +SNAPSHOT_CRONJOB="$NAMESPACE-snapshot-aztec-snapshots" +if kubectl get cronjob "$SNAPSHOT_CRONJOB" -n "$NAMESPACE" &>/dev/null; then + log "Snapshotting $NAMESPACE" + $scripts_dir/manual_snapshot.sh $NAMESPACE + log "Waiting for snapshot upload" + sleep 60 # staging-ignition takes 28s +else + log "Snapshot cronjob not found ($SNAPSHOT_CRONJOB), skipping snapshot" +fi + +# Collect current replica counts before scaling down +log "Collecting current replica counts" + +SS_JSON=$(kubectl get statefulset -n "$NAMESPACE" -o json | \ + jq '[.items[] | {key: .metadata.name, value: .spec.replicas}] | from_entries') +DEPLOY_JSON=$(kubectl get deployment -n "$NAMESPACE" -o json | \ + jq '[.items[] | {key: .metadata.name, value: .spec.replicas}] | from_entries') + +CRONJOB_JSON=$(kubectl get cronjob -n "$NAMESPACE" -o json | \ + jq '[.items[] | select(.spec.suspend != true) | .metadata.name]') + +STATE_JSON=$(jq -n \ + --arg paused_at "$(date -Is)" \ + --argjson statefulsets "$SS_JSON" \ + --argjson deployments "$DEPLOY_JSON" \ + --argjson cronjobs "$CRONJOB_JSON" \ + '{paused_at: $paused_at, statefulsets: $statefulsets, deployments: $deployments, cronjobs: $cronjobs}') + +log "Saving pause state to ConfigMap $CONFIGMAP_NAME" +kubectl create configmap "$CONFIGMAP_NAME" \ + -n "$NAMESPACE" \ + --from-literal=state="$STATE_JSON" + +# Scale everything down except eth-devnet (L1 beacon chain cannot recover from long pauses) log "Pausing namespace $NAMESPACE" for item_type in statefulset deployment; do - for item in $(kubectl get $item_type -n $NAMESPACE -o jsonpath='{.items[*].metadata.name}'); do - kubectl scale -n $NAMESPACE $item_type/$item --replicas 0 + for item in $(kubectl get "$item_type" -n "$NAMESPACE" -o json | \ + jq -r '.items[] | select(.metadata.labels["app.kubernetes.io/name"] != "eth-devnet") | .metadata.name'); do + log " Scaling $item_type/$item to 0" + kubectl scale -n "$NAMESPACE" "$item_type/$item" --replicas 0 done done @@ -48,3 +87,5 @@ log "Suspending cronjobs" for item in $(kubectl get cronjob -n $NAMESPACE -o jsonpath='{.items[*].metadata.name}'); do kubectl -n $NAMESPACE patch cronjobs $item -p '{"spec" : {"suspend" : true }}' done + +log "Namespace $NAMESPACE paused successfully. State saved to ConfigMap $CONFIGMAP_NAME." diff --git a/spartan/scripts/network_resume.sh b/spartan/scripts/network_resume.sh new file mode 100755 index 000000000000..6058a17bf250 --- /dev/null +++ b/spartan/scripts/network_resume.sh @@ -0,0 +1,76 @@ +#!/usr/bin/env bash + +set -euo pipefail + +spartan=$(git rev-parse --show-toplevel)/spartan +scripts_dir=$spartan/scripts + +log() { echo "[INFO] $(date -Is) - $*"; } +err() { echo "[ERROR] $(date -Is) - $*" >&2; } +die() { err "$*"; exit 1; } + +usage() { + echo "Usage: $0 [namespace]" + echo "" + echo "Arguments:" + echo " namespace - Kubernetes namespace (default: from NAMESPACE env var)" + echo "" + echo "Environment variables:" + echo " NAMESPACE - K8s namespace (required if not passed as argument)" + echo "" + exit 1 +} + +NAMESPACE="${1:-${NAMESPACE:-}}" + +if [[ "${1:-}" == "-h" || "${1:-}" == "--help" ]]; then + usage +fi + +if [[ -z "$NAMESPACE" ]]; then + usage +fi + +CONFIGMAP_NAME="network-pause-state" + +# Read saved state +log "Reading pause state from ConfigMap $CONFIGMAP_NAME" +STATE_JSON=$(kubectl get configmap "$CONFIGMAP_NAME" -n "$NAMESPACE" -o jsonpath='{.data.state}') || \ + die "ConfigMap $CONFIGMAP_NAME not found in namespace $NAMESPACE. Is the network paused?" + +echo "$STATE_JSON" | jq . >/dev/null 2>&1 || die "Invalid JSON in ConfigMap $CONFIGMAP_NAME" +paused_at=$(echo "$STATE_JSON" | jq -r '.paused_at') +log "Network was paused at $paused_at" + +# Restore statefulset replicas +log "Restoring statefulsets" +for name in $(echo "$STATE_JSON" | jq -r '.statefulsets | keys[]'); do + replicas=$(echo "$STATE_JSON" | jq -r --arg name "$name" '.statefulsets[$name]') + if [[ "$replicas" -gt 0 ]]; then + log " Scaling statefulset/$name to $replicas replicas" + kubectl scale -n "$NAMESPACE" statefulset/"$name" --replicas "$replicas" + fi +done + +# Restore deployment replicas +log "Restoring deployments" +for name in $(echo "$STATE_JSON" | jq -r '.deployments | keys[]'); do + replicas=$(echo "$STATE_JSON" | jq -r --arg name "$name" '.deployments[$name]') + if [[ "$replicas" -gt 0 ]]; then + log " Scaling deployment/$name to $replicas replicas" + kubectl scale -n "$NAMESPACE" deployment/"$name" --replicas "$replicas" + fi +done + +# Unsuspend only cronjobs that were active before pause +log "Unsuspending cronjobs" +for name in $(echo "$STATE_JSON" | jq -r '.cronjobs[]'); do + log " Unsuspending cronjob/$name" + kubectl -n "$NAMESPACE" patch cronjobs "$name" -p '{"spec" : {"suspend" : false }}' +done + +# Clean up +log "Cleaning up ConfigMap $CONFIGMAP_NAME" +kubectl delete configmap "$CONFIGMAP_NAME" -n "$NAMESPACE" + +log "Namespace $NAMESPACE resumed successfully." diff --git a/spartan/terraform/deploy-aztec-infra/main.tf b/spartan/terraform/deploy-aztec-infra/main.tf index 488f7e3200a8..5f5370cb2995 100644 --- a/spartan/terraform/deploy-aztec-infra/main.tf +++ b/spartan/terraform/deploy-aztec-infra/main.tf @@ -213,6 +213,7 @@ locals { "validator.node.env.SEQ_MIN_TX_PER_BLOCK" = var.SEQ_MIN_TX_PER_BLOCK "validator.node.env.SEQ_MAX_TX_PER_BLOCK" = var.SEQ_MAX_TX_PER_BLOCK "validator.node.env.SEQ_BLOCK_DURATION_MS" = var.SEQ_BLOCK_DURATION_MS + "validator.node.env.SEQ_L1_PUBLISHING_TIME_ALLOWANCE_IN_SLOT" = var.SEQ_L1_PUBLISHING_TIME_ALLOWANCE_IN_SLOT "validator.node.env.SEQ_BUILD_CHECKPOINT_IF_EMPTY" = var.SEQ_BUILD_CHECKPOINT_IF_EMPTY "validator.node.env.SEQ_ENFORCE_TIME_TABLE" = var.SEQ_ENFORCE_TIME_TABLE "validator.node.env.P2P_TX_POOL_DELETE_TXS_AFTER_REORG" = var.P2P_TX_POOL_DELETE_TXS_AFTER_REORG diff --git a/spartan/terraform/deploy-aztec-infra/variables.tf b/spartan/terraform/deploy-aztec-infra/variables.tf index a03fa5550cc6..a37f80a93790 100644 --- a/spartan/terraform/deploy-aztec-infra/variables.tf +++ b/spartan/terraform/deploy-aztec-infra/variables.tf @@ -363,6 +363,13 @@ variable "SEQ_BLOCK_DURATION_MS" { default = null } +variable "SEQ_L1_PUBLISHING_TIME_ALLOWANCE_IN_SLOT" { + description = "Time allocated for publishing to L1, in seconds" + type = string + nullable = true + default = null +} + variable "SEQ_BUILD_CHECKPOINT_IF_EMPTY" { description = "Have sequencer build and publish an empty checkpoint if there are no txs" type = string diff --git a/yarn-project/archiver/src/modules/validation.test.ts b/yarn-project/archiver/src/modules/validation.test.ts index aa11589bb5d5..0bfeb50f1566 100644 --- a/yarn-project/archiver/src/modules/validation.test.ts +++ b/yarn-project/archiver/src/modules/validation.test.ts @@ -2,7 +2,7 @@ import type { EpochCache } from '@aztec/epoch-cache'; import { CheckpointNumber, EpochNumber, SlotNumber } from '@aztec/foundation/branded-types'; import { Buffer32 } from '@aztec/foundation/buffer'; import { times } from '@aztec/foundation/collection'; -import { Secp256k1Signer } from '@aztec/foundation/crypto/secp256k1-signer'; +import { Secp256k1Signer, flipSignature } from '@aztec/foundation/crypto/secp256k1-signer'; import { Signature } from '@aztec/foundation/eth-signature'; import { type Logger, createLogger } from '@aztec/foundation/log'; import { CommitteeAttestation, EthAddress } from '@aztec/stdlib/block'; @@ -153,6 +153,27 @@ describe('validateCheckpointAttestations', () => { expect(result.invalidIndex).toBe(0); }); + it('fails if an attestation signature has a high-s value (malleable signature)', async () => { + const checkpoint = await makeCheckpoint(signers.slice(0, 4), committee); + + // Flip the signature at index 2 to give it a high-s value + const original = checkpoint.attestations[2]; + const flipped = flipSignature(original.signature); + checkpoint.attestations[2] = new CommitteeAttestation(original.address, flipped); + + // Verify the flipped signature is detected as invalid + const attestations = getAttestationInfoFromPublishedCheckpoint(checkpoint); + expect(attestations[2].status).toBe('invalid-signature'); + + const result = await validateCheckpointAttestations(checkpoint, epochCache, constants, logger); + assert(!result.valid); + assert(result.reason === 'invalid-attestation'); + expect(result.checkpoint.checkpointNumber).toEqual(checkpoint.checkpoint.number); + expect(result.checkpoint.archive.toString()).toEqual(checkpoint.checkpoint.archive.root.toString()); + expect(result.committee).toEqual(committee); + expect(result.invalidIndex).toBe(2); + }); + it('reports correct index when invalid attestation follows provided address', async () => { const checkpoint = await makeCheckpoint(signers.slice(0, 3), committee); diff --git a/yarn-project/aztec/src/bin/index.ts b/yarn-project/aztec/src/bin/index.ts index c1565d92576f..55d55831457d 100644 --- a/yarn-project/aztec/src/bin/index.ts +++ b/yarn-project/aztec/src/bin/index.ts @@ -11,6 +11,7 @@ import { injectCommands as injectMiscCommands } from '@aztec/cli/misc'; import { injectCommands as injectValidatorKeysCommands } from '@aztec/cli/validator_keys'; import { getActiveNetworkName } from '@aztec/foundation/config'; import { createConsoleLogger, createLogger } from '@aztec/foundation/log'; +import { getPackageVersion } from '@aztec/stdlib/update-checker'; import { Command } from 'commander'; @@ -18,7 +19,6 @@ import { injectCompileCommand } from '../cli/cmds/compile.js'; import { injectMigrateCommand } from '../cli/cmds/migrate_ha_db.js'; import { injectProfileCommand } from '../cli/cmds/profile.js'; import { injectAztecCommands } from '../cli/index.js'; -import { getCliVersion } from '../cli/release_version.js'; const NETWORK_FLAG = 'network'; @@ -47,7 +47,7 @@ async function main() { await enrichEnvironmentWithNetworkConfig(networkName); enrichEnvironmentWithChainName(networkName); - const cliVersion = getCliVersion(); + const cliVersion = getPackageVersion() ?? 'unknown'; let program = new Command('aztec'); program.description('Aztec command line interface').version(cliVersion).enablePositionalOptions(); program = injectAztecCommands(program, userLog, debugLogger); diff --git a/yarn-project/aztec/src/cli/aztec_start_action.ts b/yarn-project/aztec/src/cli/aztec_start_action.ts index 3b966865084e..4304d7160755 100644 --- a/yarn-project/aztec/src/cli/aztec_start_action.ts +++ b/yarn-project/aztec/src/cli/aztec_start_action.ts @@ -1,3 +1,4 @@ +import { getActiveNetworkName } from '@aztec/foundation/config'; import { type NamespacedApiHandlers, createNamespacedSafeJsonRpcServer, @@ -7,13 +8,13 @@ import { import type { LogFn, Logger } from '@aztec/foundation/log'; import type { ChainConfig } from '@aztec/stdlib/config'; import { AztecNodeApiSchema } from '@aztec/stdlib/interfaces/client'; +import { getPackageVersion } from '@aztec/stdlib/update-checker'; import { getVersioningMiddleware } from '@aztec/stdlib/versioning'; import { getOtelJsonRpcPropagationMiddleware } from '@aztec/telemetry-client'; import { createLocalNetwork } from '../local-network/index.js'; import { github, splash } from '../splash.js'; import { resolveAdminApiKey } from './admin_api_key_store.js'; -import { getCliVersion } from './release_version.js'; import { extractNamespacedOptions, installSignalHandlers } from './util.js'; import { getVersions } from './versioning.js'; @@ -25,7 +26,7 @@ export async function aztecStart(options: any, userLog: LogFn, debugLogger: Logg let config: ChainConfig | undefined = undefined; if (options.localNetwork) { - const cliVersion = getCliVersion(); + const cliVersion = getPackageVersion() ?? 'unknown'; const localNetwork = extractNamespacedOptions(options, 'local-network'); localNetwork.testAccounts = true; userLog(`${splash}\n${github}\n\n`); @@ -57,7 +58,8 @@ export async function aztecStart(options: any, userLog: LogFn, debugLogger: Logg if (options.node) { const { startNode } = await import('./cmds/start_node.js'); - ({ config } = await startNode(options, signalHandlers, services, adminServices, userLog)); + const networkName = getActiveNetworkName(options.network); + ({ config } = await startNode(options, signalHandlers, services, adminServices, userLog, networkName)); } else if (options.bot) { const { startBot } = await import('./cmds/start_bot.js'); await startBot(options, signalHandlers, services, userLog); diff --git a/yarn-project/aztec/src/cli/aztec_start_options.ts b/yarn-project/aztec/src/cli/aztec_start_options.ts index 616613c7fb51..863291bf19c1 100644 --- a/yarn-project/aztec/src/cli/aztec_start_options.ts +++ b/yarn-project/aztec/src/cli/aztec_start_options.ts @@ -105,8 +105,7 @@ export const aztecStartOptions: { [key: string]: AztecStartOption[] } = { env: 'NETWORK', }, - configToFlag('--auto-update', sharedNodeConfigMappings.autoUpdate), - configToFlag('--auto-update-url', sharedNodeConfigMappings.autoUpdateUrl), + configToFlag('--enable-version-check', sharedNodeConfigMappings.enableVersionCheck), configToFlag('--sync-mode', sharedNodeConfigMappings.syncMode), configToFlag('--snapshots-urls', sharedNodeConfigMappings.snapshotsUrls), diff --git a/yarn-project/aztec/src/cli/cmds/start_node.ts b/yarn-project/aztec/src/cli/cmds/start_node.ts index a034cf3a6f5a..21839223820f 100644 --- a/yarn-project/aztec/src/cli/cmds/start_node.ts +++ b/yarn-project/aztec/src/cli/cmds/start_node.ts @@ -5,8 +5,8 @@ import { getSponsoredFPCAddress } from '@aztec/cli/cli-utils'; import { getL1Config } from '@aztec/cli/config'; import { getPublicClient } from '@aztec/ethereum/client'; import { RegistryContract, RollupContract } from '@aztec/ethereum/contracts'; -import { SecretValue } from '@aztec/foundation/config'; -import { EthAddress } from '@aztec/foundation/eth-address'; +import { type NetworkNames, SecretValue } from '@aztec/foundation/config'; +import type { EthAddress } from '@aztec/foundation/eth-address'; import type { NamespacedApiHandlers } from '@aztec/foundation/json-rpc/server'; import { startHttpRpcServer } from '@aztec/foundation/json-rpc/server'; import { Agent, makeUndiciFetch } from '@aztec/foundation/json-rpc/undici'; @@ -14,6 +14,7 @@ import type { LogFn } from '@aztec/foundation/log'; import { sleep } from '@aztec/foundation/sleep'; import { ProvingJobConsumerSchema, createProvingJobBrokerClient } from '@aztec/prover-client/broker'; import { type CliPXEOptions, type PXEConfig, allPxeConfigMappings } from '@aztec/pxe/config'; +import { AztecAddress } from '@aztec/stdlib/aztec-address'; import { AztecNodeAdminApiSchema, AztecNodeApiSchema } from '@aztec/stdlib/interfaces/client'; import { P2PApiSchema, ProverNodeApiSchema, type ProvingJobBroker } from '@aztec/stdlib/interfaces/server'; import { @@ -32,7 +33,7 @@ import { extractNamespacedOptions, extractRelevantOptions, preloadCrsDataForVerifying, - setupUpdateMonitor, + setupVersionChecker, } from '../util.js'; import { getVersions } from '../versioning.js'; import { startProverBroker } from './start_prover_broker.js'; @@ -109,6 +110,7 @@ export async function startNode( services: NamespacedApiHandlers, adminServices: NamespacedApiHandlers, userLog: LogFn, + networkName: NetworkNames, ): Promise<{ config: AztecNodeConfig }> { // All options set from environment variables const configFromEnvVars = getConfigEnvVars(); @@ -154,7 +156,8 @@ export async function startNode( const testAccounts = nodeConfig.testAccounts ? (await getInitialTestAccountsData()).map(a => a.address) : []; const sponsoredFPCAccounts = nodeConfig.sponsoredFPC ? [await getSponsoredFPCAddress()] : []; - const initialFundedAccounts = testAccounts.concat(sponsoredFPCAccounts); + const prefundAddresses = (nodeConfig.prefundAddresses ?? []).map(a => AztecAddress.fromString(a)); + const initialFundedAccounts = testAccounts.concat(sponsoredFPCAccounts).concat(prefundAddresses); userLog(`Initial funded accounts: ${initialFundedAccounts.map(a => a.toString()).join(', ')}`); @@ -268,16 +271,19 @@ export async function startNode( await addBot(options, signalHandlers, services, wallet, node, telemetry, undefined); } - if (nodeConfig.autoUpdate !== 'disabled' && nodeConfig.autoUpdateUrl) { - await setupUpdateMonitor( - nodeConfig.autoUpdate, - new URL(nodeConfig.autoUpdateUrl), - followsCanonicalRollup, - getPublicClient(nodeConfig!), - nodeConfig.l1Contracts.registryAddress, - signalHandlers, - async config => node.setConfig((await AztecNodeAdminApiSchema.setConfig.parameters().parseAsync([config]))[0]), - ); + if (nodeConfig.enableVersionCheck && networkName !== 'local') { + const cacheDir = process.env.DATA_DIRECTORY ? `${process.env.DATA_DIRECTORY}/cache` : undefined; + try { + await setupVersionChecker( + networkName, + followsCanonicalRollup, + getPublicClient(nodeConfig!), + signalHandlers, + cacheDir, + ); + } catch { + /* no-op */ + } } return { config: nodeConfig }; diff --git a/yarn-project/aztec/src/cli/cmds/start_prover_broker.ts b/yarn-project/aztec/src/cli/cmds/start_prover_broker.ts index ae3d087b02dd..75c320265f5b 100644 --- a/yarn-project/aztec/src/cli/cmds/start_prover_broker.ts +++ b/yarn-project/aztec/src/cli/cmds/start_prover_broker.ts @@ -1,5 +1,4 @@ import { getL1Config } from '@aztec/cli/config'; -import { getPublicClient } from '@aztec/ethereum/client'; import type { NamespacedApiHandlers } from '@aztec/foundation/json-rpc/server'; import type { LogFn } from '@aztec/foundation/log'; import { @@ -13,7 +12,7 @@ import { getProverNodeBrokerConfigFromEnv } from '@aztec/prover-node'; import type { ProvingJobBroker } from '@aztec/stdlib/interfaces/server'; import { getConfigEnvVars as getTelemetryClientConfig, initTelemetryClient } from '@aztec/telemetry-client'; -import { extractRelevantOptions, setupUpdateMonitor } from '../util.js'; +import { extractRelevantOptions } from '../util.js'; export async function startProverBroker( options: any, @@ -35,7 +34,6 @@ export async function startProverBroker( throw new Error('L1 registry address is required to start Aztec Node without --deploy-aztec-contracts option'); } - const followsCanonicalRollup = typeof config.rollupVersion !== 'number'; const { addresses, config: rollupConfig } = await getL1Config( config.l1Contracts.registryAddress, config.l1RpcUrls, @@ -49,17 +47,6 @@ export async function startProverBroker( const client = await initTelemetryClient(getTelemetryClientConfig()); const broker = await createAndStartProvingBroker(config, client); - if (options.autoUpdate !== 'disabled' && options.autoUpdateUrl) { - await setupUpdateMonitor( - options.autoUpdate, - new URL(options.autoUpdateUrl), - followsCanonicalRollup, - getPublicClient(config), - config.l1Contracts.registryAddress, - signalHandlers, - ); - } - services.proverBroker = [ broker, config.proverBrokerDebugReplayEnabled ? ProvingJobBrokerSchemaWithDebug : ProvingJobBrokerSchema, diff --git a/yarn-project/aztec/src/cli/release_version.ts b/yarn-project/aztec/src/cli/release_version.ts deleted file mode 100644 index fa00edbe31d9..000000000000 --- a/yarn-project/aztec/src/cli/release_version.ts +++ /dev/null @@ -1,21 +0,0 @@ -import { fileURLToPath } from '@aztec/foundation/url'; - -import { readFileSync } from 'fs'; -import { dirname, resolve } from 'path'; - -export const getCliVersion = () => { - const packageJsonPath = resolve(dirname(fileURLToPath(import.meta.url)), '../../package.json'); - const cliVersion: string = JSON.parse(readFileSync(packageJsonPath).toString()).version; - - // If the version is 0.1.0, this is a placeholder version and we are in a docker container; query release please for the latest version - if (cliVersion === '0.1.0') { - const releasePleasePath = resolve( - dirname(fileURLToPath(import.meta.url)), - '../../../../.release-please-manifest.json', - ); - const releaseVersion = JSON.parse(readFileSync(releasePleasePath).toString())['.']; - return releaseVersion; - } - - return cliVersion; -}; diff --git a/yarn-project/aztec/src/cli/util.ts b/yarn-project/aztec/src/cli/util.ts index f8df0184ca63..0b650d7d0fdc 100644 --- a/yarn-project/aztec/src/cli/util.ts +++ b/yarn-project/aztec/src/cli/util.ts @@ -1,17 +1,18 @@ import type { AztecNodeConfig } from '@aztec/aztec-node'; import type { AccountManager } from '@aztec/aztec.js/wallet'; +import { getNetworkConfig } from '@aztec/cli/config'; +import { RegistryContract } from '@aztec/ethereum/contracts'; import type { ViemClient } from '@aztec/ethereum/types'; -import type { ConfigMappingsType } from '@aztec/foundation/config'; -import { EthAddress } from '@aztec/foundation/eth-address'; +import type { ConfigMappingsType, NetworkNames } from '@aztec/foundation/config'; import { jsonStringify } from '@aztec/foundation/json-rpc'; import { type LogFn, createLogger } from '@aztec/foundation/log'; -import type { SharedNodeConfig } from '@aztec/node-lib/config'; import type { ProverConfig } from '@aztec/stdlib/interfaces/server'; -import { getTelemetryClient } from '@aztec/telemetry-client/start'; +import { type VersionCheck, getPackageVersion } from '@aztec/stdlib/update-checker'; import type { EmbeddedWallet } from '@aztec/wallets/embedded'; import chalk from 'chalk'; import type { Command } from 'commander'; +import type { Hex } from 'viem'; import { type AztecStartOption, aztecStartOptions } from './aztec_start_options.js'; @@ -290,92 +291,58 @@ export async function preloadCrsDataForServerSideProving( } } -export async function setupUpdateMonitor( - autoUpdateMode: SharedNodeConfig['autoUpdate'], - updatesLocation: URL, +export async function setupVersionChecker( + network: NetworkNames, followsCanonicalRollup: boolean, publicClient: ViemClient, - registryContractAddress: EthAddress, signalHandlers: Array<() => Promise>, - updateNodeConfig?: (config: object) => Promise, -) { - const logger = createLogger('update-check'); - const { UpdateChecker } = await import('@aztec/stdlib/update-checker'); - const checker = await UpdateChecker.new({ - baseURL: updatesLocation, - publicClient, - registryContractAddress, - }); + cacheDir?: string, +): Promise { + const networkConfig = await getNetworkConfig(network, cacheDir); + if (!networkConfig) { + return; + } - // eslint-disable-next-line @typescript-eslint/no-misused-promises - checker.on('newRollupVersion', async ({ latestVersion, currentVersion }) => { - if (isShuttingDown()) { - return; - } + const { VersionChecker } = await import('@aztec/stdlib/update-checker'); - // if node follows canonical rollup then this is equivalent to a config update - if (!followsCanonicalRollup) { - return; - } + const logger = createLogger('version_check'); + const registry = new RegistryContract(publicClient, networkConfig.registryAddress as Hex); - if (autoUpdateMode === 'config' || autoUpdateMode === 'config-and-version') { - logger.info(`New rollup version detected. Please restart the node`, { latestVersion, currentVersion }); - await shutdown(logger.info, ExitCode.ROLLUP_UPGRADE, signalHandlers); - } else if (autoUpdateMode === 'notify') { - logger.warn(`New rollup detected. Please restart the node`, { latestVersion, currentVersion }); - } + const checks: Array = []; + checks.push({ + name: 'node', + currentVersion: getPackageVersion() ?? 'unknown', + getLatestVersion: async () => { + const cfg = await getNetworkConfig(network, cacheDir); + return cfg?.nodeVersion; + }, }); - // eslint-disable-next-line @typescript-eslint/no-misused-promises - checker.on('newNodeVersion', async ({ latestVersion, currentVersion }) => { - if (isShuttingDown()) { - return; - } - if (autoUpdateMode === 'config-and-version') { - logger.info(`New node version detected. Please update and restart the node`, { latestVersion, currentVersion }); - await shutdown(logger.info, ExitCode.VERSION_UPGRADE, signalHandlers); - } else if (autoUpdateMode === 'notify') { - logger.info(`New node version detected. Please update and restart the node`, { latestVersion, currentVersion }); + if (followsCanonicalRollup) { + const getLatestVersion = async () => { + const version = (await registry.getRollupVersions()).at(-1); + return version !== undefined ? String(version) : undefined; + }; + const currentVersion = await getLatestVersion(); + if (currentVersion !== undefined) { + checks.push({ + name: 'rollup', + currentVersion, + getLatestVersion, + }); } - }); + } - // eslint-disable-next-line @typescript-eslint/no-misused-promises - checker.on('updateNodeConfig', async config => { + const checker = new VersionChecker(checks, 600_000, logger); + checker.on('newVersion', ({ name, latestVersion, currentVersion }) => { if (isShuttingDown()) { return; } - if ((autoUpdateMode === 'config' || autoUpdateMode === 'config-and-version') && updateNodeConfig) { - logger.warn(`Config change detected. Updating node`, config); - try { - await updateNodeConfig(config); - } catch (err) { - logger.warn('Failed to update config', { err }); - } - } - // don't notify on these config changes - }); - - checker.on('updatePublicTelemetryConfig', config => { - if (autoUpdateMode === 'config' || autoUpdateMode === 'config-and-version') { - logger.warn(`Public telemetry config change detected. Updating telemetry client`, config); - try { - const publicIncludeMetrics: unknown = (config as any).publicIncludeMetrics; - if (Array.isArray(publicIncludeMetrics) && publicIncludeMetrics.every(m => typeof m === 'string')) { - getTelemetryClient().setExportedPublicTelemetry(publicIncludeMetrics); - } - const publicMetricsCollectFrom: unknown = (config as any).publicMetricsCollectFrom; - if (Array.isArray(publicMetricsCollectFrom) && publicMetricsCollectFrom.every(m => typeof m === 'string')) { - getTelemetryClient().setPublicTelemetryCollectFrom(publicMetricsCollectFrom); - } - } catch (err) { - logger.warn('Failed to update config', { err }); - } - } - // don't notify on these config changes + logger.warn(`New ${name} version available`, { latestVersion, currentVersion }); }); - checker.start(); + signalHandlers.push(() => checker.stop()); } export function stringifyConfig(config: object): string { diff --git a/yarn-project/aztec/src/local-network/local-network.ts b/yarn-project/aztec/src/local-network/local-network.ts index 1b3882881359..a2d04e8b22ab 100644 --- a/yarn-project/aztec/src/local-network/local-network.ts +++ b/yarn-project/aztec/src/local-network/local-network.ts @@ -19,6 +19,7 @@ import { DateProvider, TestDateProvider } from '@aztec/foundation/timer'; import { getVKTreeRoot } from '@aztec/noir-protocol-circuits-types/vk-tree'; import { protocolContractsHash } from '@aztec/protocol-contracts'; import { SequencerState } from '@aztec/sequencer-client'; +import { AztecAddress } from '@aztec/stdlib/aztec-address'; import type { ProvingJobBroker } from '@aztec/stdlib/interfaces/server'; import type { PublicDataTreeLeaf } from '@aztec/stdlib/trees'; import { @@ -138,9 +139,12 @@ export async function createLocalNetwork(config: Partial = { const bananaFPC = await getBananaFPCAddress(initialAccounts); const sponsoredFPC = await getSponsoredFPCAddress(); - const fundedAddresses = initialAccounts.length - ? [...initialAccounts.map(a => a.address), bananaFPC, sponsoredFPC] - : []; + const prefundAddresses = (aztecNodeConfig.prefundAddresses ?? []).map(a => AztecAddress.fromString(a)); + const fundedAddresses = [ + ...initialAccounts.map(a => a.address), + ...(initialAccounts.length ? [bananaFPC, sponsoredFPC] : []), + ...prefundAddresses, + ]; const { genesisArchiveRoot, prefilledPublicData, fundingNeeded } = await getGenesisValues(fundedAddresses); const dateProvider = new TestDateProvider(); diff --git a/yarn-project/cli/src/config/cached_fetch.test.ts b/yarn-project/cli/src/config/cached_fetch.test.ts new file mode 100644 index 000000000000..186bdb4ae2bf --- /dev/null +++ b/yarn-project/cli/src/config/cached_fetch.test.ts @@ -0,0 +1,243 @@ +import { jest } from '@jest/globals'; +import { mkdir, readFile, rm, writeFile } from 'fs/promises'; +import { tmpdir } from 'os'; +import { join } from 'path'; + +import { cachedFetch, parseMaxAge } from './cached_fetch.js'; + +describe('cachedFetch', () => { + let tempDir: string; + let cacheFile: string; + let metaFile: string; + let mockFetch: jest.Mock; + const noopLog: any = { trace: () => {}, warn: () => {}, info: () => {} }; + + beforeEach(async () => { + tempDir = join(tmpdir(), `cached-fetch-test-${Date.now()}-${Math.random().toString(36).slice(2)}`); + await mkdir(tempDir, { recursive: true }); + cacheFile = join(tempDir, 'cache.json'); + metaFile = cacheFile + '.meta'; + mockFetch = jest.fn(); + }); + + afterEach(async () => { + await rm(tempDir, { recursive: true, force: true }); + }); + + function mockResponse(body: any, init?: { status?: number; headers?: Record }): Response { + const status = init?.status ?? 200; + const headers = new Headers(init?.headers ?? {}); + return { + ok: status >= 200 && status < 300, + status, + statusText: status === 304 ? 'Not Modified' : 'OK', + headers, + json: () => Promise.resolve(body), + } as Response; + } + + async function writeCacheFiles(data: any, opts?: { etag?: string; expiresAt?: number }) { + await writeFile(cacheFile, JSON.stringify(data), 'utf-8'); + await writeFile( + metaFile, + JSON.stringify({ etag: opts?.etag, expiresAt: opts?.expiresAt ?? Date.now() + 60_000 }), + 'utf-8', + ); + } + + it('returns cached data without fetching when cache is fresh', async () => { + const data = { key: 'cached-value' }; + await writeCacheFiles(data, { expiresAt: Date.now() + 60_000 }); + + const result = await cachedFetch('https://example.com/data.json', { cacheFile }, mockFetch, noopLog); + + expect(result).toEqual(data); + expect(mockFetch).not.toHaveBeenCalled(); + }); + + it('sends conditional request with If-None-Match when cache is stale and has ETag', async () => { + const data = { key: 'stale-value' }; + await writeCacheFiles(data, { etag: '"abc123"', expiresAt: Date.now() - 1000 }); + + mockFetch.mockResolvedValue( + mockResponse(null, { + status: 304, + headers: { 'cache-control': 'max-age=300' }, + }), + ); + + const result = await cachedFetch('https://example.com/data.json', { cacheFile }, mockFetch, noopLog); + + expect(result).toEqual(data); + expect(mockFetch).toHaveBeenCalledWith('https://example.com/data.json', { + headers: { 'If-None-Match': '"abc123"' }, + }); + + // Data file should be unchanged + expect(JSON.parse(await readFile(cacheFile, 'utf-8'))).toEqual(data); + // Meta file should have updated expiry + const meta = JSON.parse(await readFile(metaFile, 'utf-8')); + expect(meta.expiresAt).toBeGreaterThan(Date.now()); + }); + + it('returns new data and stores ETag on 200 response', async () => { + const staleData = { key: 'old' }; + const freshData = { key: 'new' }; + await writeCacheFiles(staleData, { etag: '"old-etag"', expiresAt: Date.now() - 1000 }); + + mockFetch.mockResolvedValue( + mockResponse(freshData, { + status: 200, + headers: { etag: '"new-etag"', 'cache-control': 'max-age=600' }, + }), + ); + + const result = await cachedFetch('https://example.com/data.json', { cacheFile }, mockFetch, noopLog); + + expect(result).toEqual(freshData); + + // Data file should have new data (raw JSON) + expect(JSON.parse(await readFile(cacheFile, 'utf-8'))).toEqual(freshData); + // Meta file should have new ETag and expiry + const meta = JSON.parse(await readFile(metaFile, 'utf-8')); + expect(meta.etag).toBe('"new-etag"'); + expect(meta.expiresAt).toBeGreaterThan(Date.now()); + }); + + it('fetches normally without caching when no cacheFile is provided', async () => { + const data = { key: 'no-cache' }; + mockFetch.mockResolvedValue(mockResponse(data)); + + const result = await cachedFetch('https://example.com/data.json', {}, mockFetch, noopLog); + + expect(result).toEqual(data); + expect(mockFetch).toHaveBeenCalledWith('https://example.com/data.json'); + }); + + it('falls back to normal fetch when metadata file is missing', async () => { + // Write only data file, no meta file (simulates upgrade from old code) + await writeFile(cacheFile, JSON.stringify({ key: 'old-format' }), 'utf-8'); + + const freshData = { key: 'fresh' }; + mockFetch.mockResolvedValue( + mockResponse(freshData, { + status: 200, + headers: { 'cache-control': 'max-age=300' }, + }), + ); + + const result = await cachedFetch('https://example.com/data.json', { cacheFile }, mockFetch, noopLog); + + expect(result).toEqual(freshData); + // Should have fetched without If-None-Match since no meta + expect(mockFetch).toHaveBeenCalledWith('https://example.com/data.json', { headers: {} }); + }); + + it('falls back to normal fetch when metadata file is corrupt', async () => { + await writeFile(cacheFile, JSON.stringify({ key: 'data' }), 'utf-8'); + await writeFile(metaFile, 'not-json!!!', 'utf-8'); + + const freshData = { key: 'fresh' }; + mockFetch.mockResolvedValue( + mockResponse(freshData, { + status: 200, + headers: { 'cache-control': 'max-age=300' }, + }), + ); + + const result = await cachedFetch('https://example.com/data.json', { cacheFile }, mockFetch, noopLog); + + expect(result).toEqual(freshData); + expect(mockFetch).toHaveBeenCalledWith('https://example.com/data.json', { headers: {} }); + }); + + it('falls back to normal fetch when data file is missing but metadata exists', async () => { + await writeFile(metaFile, JSON.stringify({ etag: '"abc"', expiresAt: Date.now() + 60_000 }), 'utf-8'); + + const freshData = { key: 'fresh' }; + mockFetch.mockResolvedValue( + mockResponse(freshData, { + status: 200, + headers: { 'cache-control': 'max-age=300' }, + }), + ); + + const result = await cachedFetch('https://example.com/data.json', { cacheFile }, mockFetch, noopLog); + + expect(result).toEqual(freshData); + // Should not send If-None-Match since data is missing + expect(mockFetch).toHaveBeenCalledWith('https://example.com/data.json', { headers: {} }); + }); + + it('uses defaultMaxAgeMs when server sends no Cache-Control header', async () => { + const data = { key: 'value' }; + mockFetch.mockResolvedValue( + mockResponse(data, { + status: 200, + headers: { etag: '"some-etag"' }, + }), + ); + + const defaultMaxAgeMs = 120_000; // 2 minutes + const before = Date.now(); + await cachedFetch('https://example.com/data.json', { cacheFile, defaultMaxAgeMs }, mockFetch, noopLog); + + const meta = JSON.parse(await readFile(metaFile, 'utf-8')); + expect(meta.expiresAt).toBeGreaterThanOrEqual(before + defaultMaxAgeMs); + expect(meta.expiresAt).toBeLessThanOrEqual(Date.now() + defaultMaxAgeMs); + }); + + it('returns stale cache data when fetch fails', async () => { + const data = { key: 'stale-fallback' }; + await writeCacheFiles(data, { expiresAt: Date.now() - 1000 }); + + mockFetch.mockRejectedValue(new Error('Network error')); + + const result = await cachedFetch('https://example.com/data.json', { cacheFile }, mockFetch, noopLog); + + expect(result).toEqual(data); + }); + + it('returns stale cache data when server returns non-ok status', async () => { + const data = { key: 'stale-server-error' }; + await writeCacheFiles(data, { expiresAt: Date.now() - 1000 }); + + mockFetch.mockResolvedValue(mockResponse(null, { status: 500 })); + + const result = await cachedFetch('https://example.com/data.json', { cacheFile }, mockFetch, noopLog); + + expect(result).toEqual(data); + }); + + it('returns undefined when fetch fails and no cache exists', async () => { + mockFetch.mockRejectedValue(new Error('Network error')); + + const result = await cachedFetch('https://example.com/data.json', { cacheFile }, mockFetch, noopLog); + + expect(result).toBeUndefined(); + }); +}); + +describe('parseMaxAge', () => { + it('extracts max-age from Cache-Control header', () => { + const response = { headers: { get: (name: string) => (name === 'cache-control' ? 'max-age=300' : null) } }; + expect(parseMaxAge(response)).toBe(300_000); + }); + + it('handles max-age with other directives', () => { + const response = { + headers: { get: (name: string) => (name === 'cache-control' ? 'public, max-age=600, must-revalidate' : null) }, + }; + expect(parseMaxAge(response)).toBe(600_000); + }); + + it('returns undefined when no Cache-Control header', () => { + const response = { headers: { get: () => null } }; + expect(parseMaxAge(response)).toBeUndefined(); + }); + + it('returns undefined when no max-age in Cache-Control', () => { + const response = { headers: { get: (name: string) => (name === 'cache-control' ? 'no-cache' : null) } }; + expect(parseMaxAge(response)).toBeUndefined(); + }); +}); diff --git a/yarn-project/cli/src/config/cached_fetch.ts b/yarn-project/cli/src/config/cached_fetch.ts index 74518805c11f..37c745f94a50 100644 --- a/yarn-project/cli/src/config/cached_fetch.ts +++ b/yarn-project/cli/src/config/cached_fetch.ts @@ -1,24 +1,48 @@ import { createLogger } from '@aztec/aztec.js/log'; -import { mkdir, readFile, stat, writeFile } from 'fs/promises'; +import { mkdir, readFile, writeFile } from 'fs/promises'; import { dirname } from 'path'; export interface CachedFetchOptions { - /** Cache duration in milliseconds */ - cacheDurationMs: number; - /** The cache file */ + /** The cache file path for storing data. If not provided, no caching is performed. */ cacheFile?: string; + /** Fallback max-age in milliseconds when server sends no Cache-Control header. Defaults to 5 minutes. */ + defaultMaxAgeMs?: number; +} + +/** Cache metadata stored in a sidecar .meta file alongside the data file. */ +interface CacheMeta { + etag?: string; + expiresAt: number; +} + +const DEFAULT_MAX_AGE_MS = 5 * 60 * 1000; // 5 minutes + +/** Extracts max-age value in milliseconds from a Response's Cache-Control header. Returns undefined if not present. */ +export function parseMaxAge(response: { headers: { get(name: string): string | null } }): number | undefined { + const cacheControl = response.headers.get('cache-control'); + if (!cacheControl) { + return undefined; + } + const match = cacheControl.match(/max-age=(\d+)/); + if (!match) { + return undefined; + } + return parseInt(match[1], 10) * 1000; } /** - * Fetches data from a URL with file-based caching support. - * This utility can be used by both remote config and bootnodes fetching. + * Fetches data from a URL with file-based HTTP conditional caching. + * + * Data is stored as raw JSON in the cache file (same format as the server returns). + * Caching metadata (ETag, expiry) is stored in a separate sidecar `.meta` file. + * This keeps the data file human-readable and backward-compatible with older code. * * @param url - The URL to fetch from - * @param networkName - Network name for cache directory structure - * @param options - Caching and error handling options - * @param cacheDir - Optional cache directory (defaults to no caching) - * @returns The fetched and parsed JSON data, or undefined if fetch fails and throwOnError is false + * @param options - Caching options + * @param fetch - Fetch implementation (defaults to globalThis.fetch) + * @param log - Logger instance + * @returns The fetched and parsed JSON data, or undefined if fetch fails */ export async function cachedFetch( url: string, @@ -26,42 +50,106 @@ export async function cachedFetch( fetch = globalThis.fetch, log = createLogger('cached_fetch'), ): Promise { - const { cacheDurationMs, cacheFile } = options; + const { cacheFile, defaultMaxAgeMs = DEFAULT_MAX_AGE_MS } = options; + + // If no cacheFile, just fetch normally without caching + if (!cacheFile) { + return fetchAndParse(url, fetch, log); + } + + const metaFile = cacheFile + '.meta'; - // Try to read from cache first + // Try to read metadata + let meta: CacheMeta | undefined; try { - if (cacheFile) { - const info = await stat(cacheFile); - if (info.mtimeMs + cacheDurationMs > Date.now()) { - const cachedData = JSON.parse(await readFile(cacheFile, 'utf-8')); - return cachedData; - } - } + meta = JSON.parse(await readFile(metaFile, 'utf-8')); } catch { - log.trace('Failed to read data from cache'); + log.trace('No usable cache metadata found'); } + // Try to read cached data + let cachedData: T | undefined; try { - const response = await fetch(url); + cachedData = JSON.parse(await readFile(cacheFile, 'utf-8')); + } catch { + log.trace('No usable cached data found'); + } + + // If metadata and data exist and cache is fresh, return directly + if (meta && cachedData !== undefined && meta.expiresAt > Date.now()) { + return cachedData; + } + + // Cache is stale or missing — make a (possibly conditional) request + try { + const headers: Record = {}; + if (meta?.etag && cachedData !== undefined) { + headers['If-None-Match'] = meta.etag; + } + + const response = await fetch(url, { headers }); + + if (response.status === 304 && cachedData !== undefined) { + // Not modified — recompute expiry from new response headers and return cached data + const maxAgeMs = parseMaxAge(response) ?? defaultMaxAgeMs; + await writeMetaFile(metaFile, { etag: meta?.etag, expiresAt: Date.now() + maxAgeMs }, log); + return cachedData; + } + if (!response.ok) { log.warn(`Failed to fetch from ${url}: ${response.status} ${response.statusText}`); - return undefined; + return cachedData; } - const data = await response.json(); + // 200 — parse new data and cache it + const data = (await response.json()) as T; + const maxAgeMs = parseMaxAge(response) ?? defaultMaxAgeMs; + const etag = response.headers.get('etag') ?? undefined; - try { - if (cacheFile) { - await mkdir(dirname(cacheFile), { recursive: true }); - await writeFile(cacheFile, JSON.stringify(data), 'utf-8'); - } - } catch (err) { - log.warn('Failed to cache data on disk: ' + cacheFile, { cacheFile, err }); - } + await ensureDir(cacheFile, log); + await Promise.all([ + writeFile(cacheFile, JSON.stringify(data), 'utf-8'), + writeFile(metaFile, JSON.stringify({ etag, expiresAt: Date.now() + maxAgeMs }), 'utf-8'), + ]); return data; + } catch (err) { + log.warn(`Failed to fetch from ${url}`, { err }); + return cachedData; + } +} + +async function fetchAndParse( + url: string, + fetch: typeof globalThis.fetch, + log: ReturnType, +): Promise { + try { + const response = await fetch(url); + if (!response.ok) { + log.warn(`Failed to fetch from ${url}: ${response.status} ${response.statusText}`); + return undefined; + } + return (await response.json()) as T; } catch (err) { log.warn(`Failed to fetch from ${url}`, { err }); return undefined; } } + +async function ensureDir(filePath: string, log: ReturnType) { + try { + await mkdir(dirname(filePath), { recursive: true }); + } catch (err) { + log.warn('Failed to create cache directory for: ' + filePath, { err }); + } +} + +async function writeMetaFile(metaFile: string, meta: CacheMeta, log: ReturnType) { + try { + await mkdir(dirname(metaFile), { recursive: true }); + await writeFile(metaFile, JSON.stringify(meta), 'utf-8'); + } catch (err) { + log.warn('Failed to write cache metadata: ' + metaFile, { err }); + } +} diff --git a/yarn-project/cli/src/config/network_config.ts b/yarn-project/cli/src/config/network_config.ts index 820f5f1b5da5..998acadae315 100644 --- a/yarn-project/cli/src/config/network_config.ts +++ b/yarn-project/cli/src/config/network_config.ts @@ -9,7 +9,6 @@ import { enrichEthAddressVar, enrichVar } from './enrich_env.js'; const DEFAULT_CONFIG_URL = 'https://raw.githubusercontent.com/AztecProtocol/networks/refs/heads/main/network_config.json'; const FALLBACK_CONFIG_URL = 'https://metadata.aztec.network/network_config.json'; -const NETWORK_CONFIG_CACHE_DURATION_MS = 60 * 60 * 1000; // 1 hour /** * Fetches remote network configuration from GitHub with caching support. @@ -87,7 +86,6 @@ async function fetchNetworkConfigFromUrl( if (url.protocol === 'http:' || url.protocol === 'https:') { rawConfig = await cachedFetch(url.href, { - cacheDurationMs: NETWORK_CONFIG_CACHE_DURATION_MS, cacheFile: cacheDir ? join(cacheDir, networkName, 'network_config.json') : undefined, }); } else if (url.protocol === 'file:') { diff --git a/yarn-project/end-to-end/src/e2e_bot.test.ts b/yarn-project/end-to-end/src/e2e_bot.test.ts index 9ed66d5b27f6..00c5b06e16c3 100644 --- a/yarn-project/end-to-end/src/e2e_bot.test.ts +++ b/yarn-project/end-to-end/src/e2e_bot.test.ts @@ -134,6 +134,8 @@ describe('e2e_bot', () => { // TODO: this should be taken from the `setup` call above l1Mnemonic: new SecretValue('test test test test test test test test test test test junk'), flushSetupTransactions: true, + // Increase fee headroom to handle fee volatility from rapid block building in tests + minFeePadding: 9, }; { @@ -172,6 +174,8 @@ describe('e2e_bot', () => { // TODO: this should be taken from the `setup` call above l1Mnemonic: new SecretValue('test test test test test test test test test test test junk'), flushSetupTransactions: true, + // Increase fee headroom to handle fee volatility from rapid block building in tests + minFeePadding: 9, }; { diff --git a/yarn-project/end-to-end/src/e2e_epochs/epochs_invalidate_block.parallel.test.ts b/yarn-project/end-to-end/src/e2e_epochs/epochs_invalidate_block.parallel.test.ts index 80c2fec76ff0..e4740dde721d 100644 --- a/yarn-project/end-to-end/src/e2e_epochs/epochs_invalidate_block.parallel.test.ts +++ b/yarn-project/end-to-end/src/e2e_epochs/epochs_invalidate_block.parallel.test.ts @@ -25,7 +25,6 @@ import { privateKeyToAccount } from 'viem/accounts'; import { getAnvilPort } from '../fixtures/fixtures.js'; import { type EndToEndContext, getPrivateKeyFromIndex } from '../fixtures/utils.js'; -import { proveInteraction } from '../test-wallet/utils.js'; import { EpochsTestContext } from './epochs_test.js'; jest.setTimeout(1000 * 60 * 10); @@ -109,6 +108,120 @@ describe('e2e_epochs/epochs_invalidate_block', () => { await test.teardown(); }); + /** + * Configures all sequencers with an attack config, enables the attack for a single checkpoint, + * disables it after the first checkpoint is mined (also stopping block production), and waits + * for the checkpoint to be invalidated. Verifies the chain rolled back to the initial state. + */ + async function runInvalidationTest(opts: { + attackConfig: Record; + disableConfig: Record; + }) { + const sequencers = nodes.map(node => node.getSequencer()!); + const initialCheckpointNumber = (await nodes[0].getL2Tips()).checkpointed.checkpoint.number; + + sequencers.forEach(sequencer => { + sequencer.updateConfig({ ...opts.attackConfig, minTxsPerBlock: 0 }); + }); + + // Disable the attack after the first checkpoint is mined and prevent further block production + test.monitor.once('checkpoint', ({ checkpointNumber }) => { + logger.warn(`Disabling attack after checkpoint ${checkpointNumber} has been mined`); + sequencers.forEach(sequencer => { + sequencer.updateConfig({ ...opts.disableConfig, minTxsPerBlock: 100 }); + }); + }); + + await Promise.all(sequencers.map(s => s.start())); + + // Wait for the CheckpointInvalidated event + const checkpointInvalidatedFilter = await l1Client.createContractEventFilter({ + address: rollupContract.address, + abi: RollupAbi, + eventName: 'CheckpointInvalidated', + fromBlock: 1n, + toBlock: 'latest', + }); + + const checkpointInvalidatedEvents = await retryUntil( + async () => { + const events = await l1Client.getFilterLogs({ filter: checkpointInvalidatedFilter }); + return events.length > 0 ? events : undefined; + }, + 'CheckpointInvalidated event', + test.L2_SLOT_DURATION_IN_S * 5, + 0.1, + ); + + // Verify the checkpoint was invalidated and the chain rolled back + const [event] = checkpointInvalidatedEvents; + logger.warn(`CheckpointInvalidated event emitted`, { event }); + expect(event.args.checkpointNumber).toBeGreaterThan(initialCheckpointNumber); + expect(await test.rollup.getCheckpointNumber()).toEqual(initialCheckpointNumber); + + logger.warn(`Test succeeded '${expect.getState().currentTestName}'`); + } + + /** + * Configures all sequencers with an attack config, starts them, waits for two consecutive + * invalidations of the same checkpoint (confirming the invalid-then-re-invalidated pattern), + * disables the attack, and verifies the chain progresses and all nodes sync. + */ + async function runDoubleInvalidationTest(opts: { + attackConfig: Record; + disableConfig: Record; + }) { + const sequencers = nodes.map(node => node.getSequencer()!); + sequencers.forEach(sequencer => { + sequencer.updateConfig({ ...opts.attackConfig, minTxsPerBlock: 0 }); + }); + + await Promise.all(sequencers.map(s => s.start())); + + // Wait until we see two invalidations, both should be for the same checkpoint + let lastInvalidatedCheckpointNumber: CheckpointNumber | undefined; + const invalidatePromise = promiseWithResolvers(); + const unsubscribe = rollupContract.listenToCheckpointInvalidated(data => { + logger.warn(`Checkpoint ${data.checkpointNumber} has been invalidated`, data); + if (lastInvalidatedCheckpointNumber === undefined) { + lastInvalidatedCheckpointNumber = data.checkpointNumber; + } else { + expect(data.checkpointNumber).toEqual(lastInvalidatedCheckpointNumber); + invalidatePromise.resolve(); + unsubscribe(); + } + }); + await Promise.race([ + timeoutPromise(1000 * test.L2_SLOT_DURATION_IN_S * 8, 'Waiting for two checkpoint invalidations'), + invalidatePromise.promise, + ]); + + sequencers.forEach(sequencer => { + sequencer.updateConfig(opts.disableConfig); + }); + + // Ensure chain progresses + const targetCheckpointNumber = CheckpointNumber(lastInvalidatedCheckpointNumber! + 2); + logger.warn(`Waiting until checkpoint ${targetCheckpointNumber} has been mined`); + await test.monitor.waitUntilCheckpoint(targetCheckpointNumber); + + // Wait for all nodes to sync + const targetBlock = targetCheckpointNumber; + logger.warn(`Waiting for all nodes to sync to block ${targetBlock}`); + await retryUntil( + async () => { + const blockNumbers = await Promise.all(nodes.map(node => node.getBlockNumber())); + logger.info(`Node synced block numbers: ${blockNumbers.join(', ')}`); + return blockNumbers.every(bn => bn > targetBlock); + }, + 'Node sync check', + test.L2_SLOT_DURATION_IN_S * 5, + 0.5, + ); + + logger.warn(`Test succeeded '${expect.getState().currentTestName}'`); + } + it('proposer invalidates previous checkpoint with multiple blocks while posting its own', async () => { const sequencers = nodes.map(node => node.getSequencer()!); const [initialCheckpointNumber, initialBlockNumber] = await nodes[0] @@ -213,123 +326,46 @@ describe('e2e_epochs/epochs_invalidate_block', () => { // Slot S+1: Checkpoint N is invalidated, and checkpoint N' (same number) is proposed instead, but also has invalid attestations // Slot S+2: Proposer tries to invalidate checkpoint N, when they should invalidate checkpoint N' instead, and fails it('chain progresses if a checkpoint with insufficient attestations is invalidated with an invalid one', async () => { - // Configure all sequencers to skip collecting attestations before starting and always build blocks - logger.warn('Configuring all sequencers to skip attestation collection'); - const sequencers = nodes.map(node => node.getSequencer()!); - sequencers.forEach(sequencer => { - sequencer.updateConfig({ skipCollectingAttestations: true, minTxsPerBlock: 0 }); + await runDoubleInvalidationTest({ + attackConfig: { skipCollectingAttestations: true }, + disableConfig: { skipCollectingAttestations: false }, }); - - // Start all sequencers - await Promise.all(sequencers.map(s => s.start())); - logger.warn(`Started all sequencers with skipCollectingAttestations=true`); - - // Wait until we see two invalidations, both should be for the same checkpoint - let lastInvalidatedCheckpointNumber: CheckpointNumber | undefined; - const invalidatePromise = promiseWithResolvers(); - const unsubscribe = rollupContract.listenToCheckpointInvalidated(data => { - logger.warn(`Checkpoint ${data.checkpointNumber} has been invalidated`, data); - if (lastInvalidatedCheckpointNumber === undefined) { - lastInvalidatedCheckpointNumber = data.checkpointNumber; - } else { - expect(data.checkpointNumber).toEqual(lastInvalidatedCheckpointNumber); - invalidatePromise.resolve(); - unsubscribe(); - } - }); - await Promise.race([timeoutPromise(1000 * test.L2_SLOT_DURATION_IN_S * 8), invalidatePromise.promise]); - - // Disable skipCollectingAttestations and send txs so MBPS can produce multi-block checkpoints - sequencers.forEach(sequencer => { - sequencer.updateConfig({ skipCollectingAttestations: false }); - }); - logger.warn('Sending transactions to enable multi-block checkpoints'); - const from = context.accounts[0]; - for (let i = 0; i < 4; i++) { - const tx = await proveInteraction(context.wallet, testContract.methods.emit_nullifier(new Fr(100 + i)), { from }); - await tx.send({ wait: NO_WAIT }); - } - - // Ensure chain progresses - const targetCheckpointNumber = CheckpointNumber(lastInvalidatedCheckpointNumber! + 2); - logger.warn(`Waiting until checkpoint ${targetCheckpointNumber} has been mined`); - await test.monitor.waitUntilCheckpoint(targetCheckpointNumber); - - // Wait for all nodes to sync the new block - const targetBlock = targetCheckpointNumber; - logger.warn(`Waiting for all nodes to sync to block ${targetBlock}`); - await retryUntil( - async () => { - const blockNumbers = await Promise.all(nodes.map(node => node.getBlockNumber())); - logger.info(`Node synced block numbers: ${blockNumbers.join(', ')}`); - return blockNumbers.every(bn => bn > targetBlock); - }, - 'Node sync check', - test.L2_SLOT_DURATION_IN_S * 5, - 0.5, - ); - - await test.assertMultipleBlocksPerSlot(2); - - logger.warn(`Test succeeded '${expect.getState().currentTestName}'`); }); // Regression for Joe's Q42025 London attack. Same as above but with an invalid signature instead of insufficient ones. it('chain progresses if a checkpoint with an invalid attestation is invalidated with an invalid one', async () => { - // Configure all sequencers to skip collecting attestations before starting and always build blocks - logger.warn('Configuring all sequencers to inject one invalid attestation'); - const sequencers = nodes.map(node => node.getSequencer()!); - sequencers.forEach(sequencer => { - sequencer.updateConfig({ injectFakeAttestation: true, minTxsPerBlock: 0 }); + await runDoubleInvalidationTest({ + attackConfig: { injectFakeAttestation: true }, + disableConfig: { injectFakeAttestation: false }, }); + }); - // Start all sequencers - await Promise.all(sequencers.map(s => s.start())); - logger.warn(`Started all sequencers with injectFakeAttestation=true`); - - // Wait until we see two invalidations, both should be for the same checkpoint - let lastInvalidatedCheckpointNumber: CheckpointNumber | undefined; - const invalidatePromise = promiseWithResolvers(); - const unsubscribe = rollupContract.listenToCheckpointInvalidated(data => { - logger.warn(`Checkpoint ${data.checkpointNumber} has been invalidated`, data); - if (lastInvalidatedCheckpointNumber === undefined) { - lastInvalidatedCheckpointNumber = data.checkpointNumber; - } else { - expect(data.checkpointNumber).toEqual(lastInvalidatedCheckpointNumber); - invalidatePromise.resolve(); - unsubscribe(); - } + // Regression for A-71: Ensure the node correctly invalidates checkpoints where an attestation has a malleable + // signature (high-s value). The Rollup contract uses OpenZeppelin's ECDSA recover which rejects high-s values + // per EIP-2, so these signatures recover to address(0) on L1 but may succeed offchain. + it('proposer invalidates checkpoint with high-s value attestation', async () => { + await runInvalidationTest({ + attackConfig: { injectHighSValueAttestation: true }, + disableConfig: { injectHighSValueAttestation: false }, }); - await Promise.race([ - timeoutPromise(1000 * test.L2_SLOT_DURATION_IN_S * 8, 'Invalidating checkpoints'), - invalidatePromise.promise, - ]); + }); - // Disable injectFakeAttestations - sequencers.forEach(sequencer => { - sequencer.updateConfig({ injectFakeAttestation: false }); + // Regression for A-71: Ensure the node correctly invalidates checkpoints where an attestation's signature + // cannot be recovered (e.g. r=0). On L1, ecrecover returns address(0) for such signatures. + it('proposer invalidates checkpoint with unrecoverable signature attestation', async () => { + await runInvalidationTest({ + attackConfig: { injectUnrecoverableSignatureAttestation: true }, + disableConfig: { injectUnrecoverableSignatureAttestation: false }, }); + }); - // Ensure chain progresses - const targetCheckpointNumber = CheckpointNumber(lastInvalidatedCheckpointNumber! + 2); - logger.warn(`Waiting until checkpoint ${targetCheckpointNumber} has been mined`); - await test.monitor.waitUntilCheckpoint(targetCheckpointNumber); - - // Wait for all nodes to sync the new block - const targetBlock = targetCheckpointNumber; - logger.warn(`Waiting for all nodes to sync to block ${targetBlock}`); - await retryUntil( - async () => { - const blockNumbers = await Promise.all(nodes.map(node => node.getBlockNumber())); - logger.info(`Node synced block numbers: ${blockNumbers.join(', ')}`); - return blockNumbers.every(bn => bn > targetBlock); - }, - 'Node sync check', - test.L2_SLOT_DURATION_IN_S * 5, - 0.5, - ); - - logger.warn(`Test succeeded '${expect.getState().currentTestName}'`); + // Regression for the node accepting attestations that did not conform to the committee order, + // but L1 requires the same ordering. See #18219. + it('proposer invalidates previous block with shuffled attestations', async () => { + await runInvalidationTest({ + attackConfig: { shuffleAttestationOrdering: true }, + disableConfig: { shuffleAttestationOrdering: false }, + }); }); // Here we disable invalidation checks from two of the proposers. Our goal is to get two invalid checkpoints @@ -441,116 +477,6 @@ describe('e2e_epochs/epochs_invalidate_block', () => { logger.warn(`Test succeeded '${expect.getState().currentTestName}'`); }); - it('proposer invalidates previous checkpoint without publishing its own', async () => { - const sequencers = nodes.map(node => node.getSequencer()!); - const initialCheckpointNumber = (await nodes[0].getL2Tips()).checkpointed.checkpoint.number; - - // Configure all sequencers to skip collecting attestations before starting - logger.warn('Configuring all sequencers to skip attestation collection and always publish blocks'); - sequencers.forEach(sequencer => { - sequencer.updateConfig({ skipCollectingAttestations: true, minTxsPerBlock: 0 }); - }); - - // Disable skipCollectingAttestations after the first block is mined and prevent sequencers from publishing any more blocks - test.monitor.once('checkpoint', ({ checkpointNumber }) => { - logger.warn(`Disabling skipCollectingAttestations after L2 block ${checkpointNumber} has been mined`); - sequencers.forEach(sequencer => { - sequencer.updateConfig({ skipCollectingAttestations: false, minTxsPerBlock: 100 }); - }); - }); - - // Start all sequencers - await Promise.all(sequencers.map(s => s.start())); - logger.warn(`Started all sequencers with skipCollectingAttestations=true`); - - // Create a filter for CheckpointInvalidated events - const checkpointInvalidatedFilter = await l1Client.createContractEventFilter({ - address: rollupContract.address, - abi: RollupAbi, - eventName: 'CheckpointInvalidated', - fromBlock: 1n, - toBlock: 'latest', - }); - - // The next proposer should invalidate the previous checkpoint - logger.warn('Waiting for next proposer to invalidate the previous checkpoint'); - - // Wait for the CheckpointInvalidated event - const checkpointInvalidatedEvents = await retryUntil( - async () => { - const events = await l1Client.getFilterLogs({ filter: checkpointInvalidatedFilter }); - return events.length > 0 ? events : undefined; - }, - 'CheckpointInvalidated event', - test.L2_SLOT_DURATION_IN_S * 5, - 0.1, - ); - - // Verify the CheckpointInvalidated event was emitted and that the block was removed - const [event] = checkpointInvalidatedEvents; - logger.warn(`CheckpointInvalidated event emitted`, { event }); - expect(event.args.checkpointNumber).toBeGreaterThan(initialCheckpointNumber); - expect(await test.rollup.getCheckpointNumber()).toEqual(initialCheckpointNumber); - - logger.warn(`Test succeeded '${expect.getState().currentTestName}'`); - }); - - // Same as test above but with shuffled attestations instead of missing attestations - // REFACTOR: Remove code duplication with above test (and others?) - it('proposer invalidates previous block with shuffled attestations', async () => { - const sequencers = nodes.map(node => node.getSequencer()!); - const initialCheckpointNumber = (await nodes[0].getL2Tips()).checkpointed.checkpoint.number; - - // Configure all sequencers to shuffle attestations before starting - logger.warn('Configuring all sequencers to shuffle attestations and always publish blocks'); - sequencers.forEach(sequencer => { - sequencer.updateConfig({ shuffleAttestationOrdering: true, minTxsPerBlock: 0 }); - }); - - // Disable shuffleAttestationOrdering after the first block is mined and prevent sequencers from publishing any more blocks - test.monitor.once('checkpoint', ({ checkpointNumber }) => { - logger.warn(`Disabling shuffleAttestationOrdering after L2 block ${checkpointNumber} has been mined`); - sequencers.forEach(sequencer => { - sequencer.updateConfig({ shuffleAttestationOrdering: false, minTxsPerBlock: 100 }); - }); - }); - - // Start all sequencers - await Promise.all(sequencers.map(s => s.start())); - logger.warn(`Started all sequencers with shuffleAttestationOrdering=true`); - - // Create a filter for CheckpointInvalidated events - const checkpointInvalidatedFilter = await l1Client.createContractEventFilter({ - address: rollupContract.address, - abi: RollupAbi, - eventName: 'CheckpointInvalidated', - fromBlock: 1n, - toBlock: 'latest', - }); - - // The next proposer should invalidate the previous checkpoint - logger.warn('Waiting for next proposer to invalidate the previous checkpoint'); - - // Wait for the CheckpointInvalidated event - const checkpointInvalidatedEvents = await retryUntil( - async () => { - const events = await l1Client.getFilterLogs({ filter: checkpointInvalidatedFilter }); - return events.length > 0 ? events : undefined; - }, - 'CheckpointInvalidated event', - test.L2_SLOT_DURATION_IN_S * 5, - 0.1, - ); - - // Verify the CheckpointInvalidated event was emitted and that the block was removed - const [event] = checkpointInvalidatedEvents; - logger.warn(`CheckpointInvalidated event emitted`, { event }); - expect(event.args.checkpointNumber).toBeGreaterThan(initialCheckpointNumber); - expect(await test.rollup.getCheckpointNumber()).toEqual(initialCheckpointNumber); - - logger.warn(`Test succeeded '${expect.getState().currentTestName}'`); - }); - it('committee member invalidates a block if proposer does not come through', async () => { const sequencers = nodes.map(node => node.getSequencer()!); const initialCheckpointNumber = await nodes[0].getL2Tips().then(t => t.checkpointed.checkpoint.number); diff --git a/yarn-project/end-to-end/src/spartan/n_tps_prove.test.ts b/yarn-project/end-to-end/src/spartan/n_tps_prove.test.ts index 5c1750c41b24..0e20ccefc471 100644 --- a/yarn-project/end-to-end/src/spartan/n_tps_prove.test.ts +++ b/yarn-project/end-to-end/src/spartan/n_tps_prove.test.ts @@ -8,13 +8,13 @@ import { RollupCheatCodes } from '@aztec/aztec/testing'; import { INITIAL_L2_BLOCK_NUM } from '@aztec/constants'; import { EthCheatCodesWithState } from '@aztec/ethereum/test'; import { SlotNumber } from '@aztec/foundation/branded-types'; -import { timesAsync } from '@aztec/foundation/collection'; +import { timesParallel } from '@aztec/foundation/collection'; import { Fr } from '@aztec/foundation/curves/bn254'; import { type Logger, createLogger } from '@aztec/foundation/log'; import { retryUntil } from '@aztec/foundation/retry'; import { sleep } from '@aztec/foundation/sleep'; -import { DateProvider } from '@aztec/foundation/timer'; -import { BenchmarkingContract } from '@aztec/noir-test-contracts.js/Benchmarking'; +import { DateProvider, Timer } from '@aztec/foundation/timer'; +import { AvmGadgetsTestContract } from '@aztec/noir-test-contracts.js/AvmGadgetsTest'; import { GasFees } from '@aztec/stdlib/gas'; import { deriveSigningKey } from '@aztec/stdlib/keys'; import { Tx, TxHash } from '@aztec/stdlib/tx'; @@ -122,7 +122,7 @@ describe(`prove ${TARGET_TPS}TPS test`, () => { let producerPromises: Promise[]; let aztecNode: AztecNode; - let benchmarkContract: BenchmarkingContract; + let benchmarkContract: AvmGadgetsTestContract; let metrics: ProvingMetrics; let childProcesses: ChildProcess[]; @@ -269,7 +269,7 @@ describe(`prove ${TARGET_TPS}TPS test`, () => { ); logger.info(`Creating ${NUM_WALLETS} wallet(s)...`); - testWallets = await timesAsync(NUM_WALLETS, i => { + testWallets = await timesParallel(NUM_WALLETS, i => { logger.info(`Creating wallet ${i + 1}/${NUM_WALLETS}`); return createWorkerWalletClient(rpcUrl, config.REAL_VERIFIER, logger); }); @@ -278,33 +278,31 @@ describe(`prove ${TARGET_TPS}TPS test`, () => { // Register FPC and create/deploy accounts const fpcAddress = await getSponsoredFPCAddress(); const sponsor = new SponsoredFeePaymentMethod(fpcAddress); - accountAddresses = []; - for (const wallet of wallets) { - const secret = Fr.random(); - const salt = Fr.random(); - // Register account inside worker (populates TestWallet.accounts map) - const address = await wallet.registerAccount(secret, salt); - // Register FPC in worker's PXE - await registerSponsoredFPC(wallet); - // Deploy via standard AccountManager flow (from: ZERO -> SignerlessAccount, no account lookup) - const manager = await AccountManager.create( - wallet, - secret, - new SchnorrAccountContract(deriveSigningKey(secret)), - salt, - ); - const deployMethod = await manager.getDeployMethod(); - await deployMethod.send({ - from: AztecAddress.ZERO, - fee: { paymentMethod: sponsor }, - wait: { timeout: 2400 }, - }); - logger.info(`Account deployed at ${address}`); - accountAddresses.push(address); - } + accountAddresses = await Promise.all( + wallets.map(async wallet => { + const secret = Fr.random(); + const salt = Fr.random(); + const address = await wallet.registerAccount(secret, salt); + await registerSponsoredFPC(wallet); + const manager = await AccountManager.create( + wallet, + secret, + new SchnorrAccountContract(deriveSigningKey(secret)), + salt, + ); + const deployMethod = await manager.getDeployMethod(); + await deployMethod.send({ + from: AztecAddress.ZERO, + fee: { paymentMethod: sponsor }, + wait: { timeout: 2400 }, + }); + logger.info(`Account deployed at ${address}`); + return address; + }), + ); logger.info('Deploying benchmark contract...'); - benchmarkContract = await BenchmarkingContract.deploy(wallets[0]).send({ + benchmarkContract = await AvmGadgetsTestContract.deploy(wallets[0]).send({ from: accountAddresses[0], fee: { paymentMethod: sponsor }, }); @@ -365,6 +363,14 @@ describe(`prove ${TARGET_TPS}TPS test`, () => { await scaleProverAgents(config.NAMESPACE, 10, logger); }); + afterAll(async () => { + try { + await scaleProverAgents(config.NAMESPACE, 2, logger); + } catch (err) { + logger.error(`Failed to scale prover agents: ${err}`); + } + }); + it(`sends ${TARGET_TPS} TPS for a full epoch and waits for proof`, async () => { const [testEpoch, startSlot, { proven: startProvenBlockNumber, pending: startBlockNumber }] = await Promise.all([ rollupCheatCodes.getEpoch(), @@ -432,6 +438,8 @@ describe(`prove ${TARGET_TPS}TPS test`, () => { let failureCount = 0; const batchSize = 10; + const TX_MINING_TIMEOUT_S = epochDurationSeconds; + const miningTimer = new Timer(); while (pendingTxs.size > 0) { const entries = [...pendingTxs.entries()]; const start = Math.floor(Math.random() * Math.max(1, entries.length - batchSize + 1)); @@ -464,6 +472,22 @@ describe(`prove ${TARGET_TPS}TPS test`, () => { ); } + if (miningTimer.s() > TX_MINING_TIMEOUT_S) { + const remainingHashes = [...pendingTxs.values()].map(h => h.toString()); + logger.warn( + `Timed out waiting for ${pendingTxs.size}/${totalSent} transactions after ${TX_MINING_TIMEOUT_S}s. ` + + `These transactions likely were not included in this epoch's blocks. ` + + `Remaining tx hashes: ${remainingHashes.join(', ')}`, + ); + break; + } + + if (processedCount === 0) { + logger.info( + `Still waiting for ${pendingTxs.size}/${totalSent} transactions (${Math.floor(miningTimer.s())}s elapsed)`, + ); + } + await sleep(500); } @@ -483,6 +507,8 @@ describe(`prove ${TARGET_TPS}TPS test`, () => { // Poll for proof completion while detecting reorgs let lastBlockNumber = endBlockNumber; + const PROOF_TIMEOUT_S = epochDurationSeconds; + const proofTimer = new Timer(); while (true) { const [provenBlock, currentBlockNumber] = await Promise.all([ @@ -507,7 +533,13 @@ describe(`prove ${TARGET_TPS}TPS test`, () => { break; } - logger.debug(`Proven: ${provenBlock}, Pending: ${currentBlockNumber}, Target: ${targetProvenBlock}`); + if (proofTimer.s() > PROOF_TIMEOUT_S) { + throw new Error( + `Timed out waiting for proof after ${PROOF_TIMEOUT_S}s. Proven: ${provenBlock}, Target: ${targetProvenBlock}`, + ); + } + + logger.info(`Proven: ${provenBlock}, Pending: ${currentBlockNumber}, Target: ${targetProvenBlock}`); lastBlockNumber = currentBlockNumber; await sleep(10 * 1000); // Poll every 10 seconds @@ -530,7 +562,7 @@ describe(`prove ${TARGET_TPS}TPS test`, () => { async function createTx( wallet: WorkerWallet, accountAddress: AztecAddress, - benchmarkContract: BenchmarkingContract, + benchmarkContract: AvmGadgetsTestContract, logger: Logger, ): Promise { logger.info('Creating prototype transaction...'); @@ -539,7 +571,7 @@ async function createTx( from: accountAddress, fee: { paymentMethod: sponsor, gasSettings: { maxPriorityFeesPerGas: GasFees.empty() } }, }; - const interaction = benchmarkContract.methods.sha256_hash_1024(Array(1024).fill(42)); + const interaction = benchmarkContract.methods.keccak_hash_1400(Array(1400).fill(42)); const execPayload = await interaction.request(options); const tx = await wallet.proveTx(execPayload, toSendOptions(options)); logger.info('Prototype transaction created'); @@ -579,7 +611,7 @@ async function cloneTx(tx: Tx, aztecNode: AztecNode): Promise { async function startProducing( producer: WalletTxProducer, - benchmarkContract: BenchmarkingContract, + benchmarkContract: AvmGadgetsTestContract, aztecNode: AztecNode, signal: AbortSignal, logger: Logger, diff --git a/yarn-project/ethereum/src/config.ts b/yarn-project/ethereum/src/config.ts index 5c271cd915cb..6ccee3ce7c2a 100644 --- a/yarn-project/ethereum/src/config.ts +++ b/yarn-project/ethereum/src/config.ts @@ -19,6 +19,8 @@ export type GenesisStateConfig = { testAccounts: boolean; /** Whether to populate the genesis state with initial fee juice for the sponsored FPC */ sponsoredFPC: boolean; + /** Additional addresses to prefund with fee juice at genesis */ + prefundAddresses: string[]; }; export type L1ContractsConfig = { @@ -259,6 +261,16 @@ export const genesisStateConfigMappings: ConfigMappingsType description: 'Whether to populate the genesis state with initial fee juice for the sponsored FPC.', ...booleanConfigHelper(false), }, + prefundAddresses: { + env: 'PREFUND_ADDRESSES', + description: 'Comma-separated list of Aztec addresses to prefund with fee juice at genesis.', + parseEnv: (val: string) => + val + .split(',') + .map(a => a.trim()) + .filter(a => a.length > 0), + defaultValue: [], + }, }; export function getL1ContractsConfigEnvVars(): L1ContractsConfig { diff --git a/yarn-project/foundation/src/config/env_var.ts b/yarn-project/foundation/src/config/env_var.ts index 9eb0ef616f8c..6cbd2d15ecae 100644 --- a/yarn-project/foundation/src/config/env_var.ts +++ b/yarn-project/foundation/src/config/env_var.ts @@ -152,6 +152,7 @@ export type EnvVar = | 'P2P_DROP_TX_CHANCE' | 'P2P_TX_POOL_DELETE_TXS_AFTER_REORG' | 'P2P_MIN_TX_POOL_AGE_MS' + | 'P2P_RPC_PRICE_BUMP_PERCENTAGE' | 'DEBUG_P2P_INSTRUMENT_MESSAGES' | 'PEER_ID_PRIVATE_KEY' | 'PEER_ID_PRIVATE_KEY_PATH' @@ -247,6 +248,7 @@ export type EnvVar = | 'TELEMETRY' | 'TEST_ACCOUNTS' | 'SPONSORED_FPC' + | 'PREFUND_ADDRESSES' | 'TX_COLLECTION_FAST_NODES_TIMEOUT_BEFORE_REQ_RESP_MS' | 'TX_COLLECTION_SLOW_NODES_INTERVAL_MS' | 'TX_COLLECTION_SLOW_REQ_RESP_INTERVAL_MS' @@ -340,9 +342,8 @@ export type EnvVar = | 'K8S_POD_NAME' | 'K8S_POD_UID' | 'K8S_NAMESPACE_NAME' + | 'ENABLE_VERSION_CHECK' | 'VALIDATOR_REEXECUTE_DEADLINE_MS' - | 'AUTO_UPDATE' - | 'AUTO_UPDATE_URL' | 'WEB3_SIGNER_URL' | 'SKIP_ARCHIVER_INITIAL_SYNC' | 'BLOB_ALLOW_EMPTY_SOURCES' diff --git a/yarn-project/foundation/src/config/network_config.ts b/yarn-project/foundation/src/config/network_config.ts index 5604eca90ff5..b4cdc8549533 100644 --- a/yarn-project/foundation/src/config/network_config.ts +++ b/yarn-project/foundation/src/config/network_config.ts @@ -9,6 +9,7 @@ export const NetworkConfigSchema = z feeAssetHandlerAddress: z.string().optional(), l1ChainId: z.number(), blockDurationMs: z.number().positive().optional(), + nodeVersion: z.string().optional(), }) .passthrough(); // Allow additional unknown fields to pass through diff --git a/yarn-project/foundation/src/crypto/secp256k1-signer/malleability.test.ts b/yarn-project/foundation/src/crypto/secp256k1-signer/malleability.test.ts index 4874b7ceabfe..d787f6908145 100644 --- a/yarn-project/foundation/src/crypto/secp256k1-signer/malleability.test.ts +++ b/yarn-project/foundation/src/crypto/secp256k1-signer/malleability.test.ts @@ -8,6 +8,8 @@ import { Secp256k1Signer } from './secp256k1_signer.js'; import { Secp256k1Error, flipSignature, + generateRecoverableSignature, + generateUnrecoverableSignature, makeEthSignDigest, normalizeSignature, recoverAddress, @@ -139,3 +141,21 @@ describe('ecdsa malleability', () => { expect(recoveredAddress.toString()).toEqual(expectedAddress.toString()); }); }); + +describe('generateRecoverableSignature', () => { + it('produces a signature from which an address can be recovered', () => { + const sig = generateRecoverableSignature(); + const hash = Buffer32.random(); + const recovered = tryRecoverAddress(hash, sig); + expect(recovered).toBeDefined(); + }); +}); + +describe('generateUnrecoverableSignature', () => { + it('produces a signature from which no address can be recovered', () => { + const sig = generateUnrecoverableSignature(); + const hash = Buffer32.random(); + const recovered = tryRecoverAddress(hash, sig); + expect(recovered).toBeUndefined(); + }); +}); diff --git a/yarn-project/foundation/src/crypto/secp256k1-signer/utils.ts b/yarn-project/foundation/src/crypto/secp256k1-signer/utils.ts index a8a459d01b4e..2226ae635550 100644 --- a/yarn-project/foundation/src/crypto/secp256k1-signer/utils.ts +++ b/yarn-project/foundation/src/crypto/secp256k1-signer/utils.ts @@ -210,3 +210,35 @@ export function recoverPublicKey(hash: Buffer32, signature: Signature, opts: Rec const publicKey = sig.recoverPublicKey(hash.buffer).toHex(false); return Buffer.from(publicKey, 'hex'); } + +/** Arbitrary hash used for testing signature recoverability. */ +const PROBE_HASH = Buffer32.fromBuffer(keccak256(Buffer.from('signature-recoverability-probe'))); + +/** + * Generates a random valid ECDSA signature that is recoverable to some address. + * Since Signature.random() produces real signatures via secp256k1 signing, the result is always + * recoverable, but we verify defensively by checking tryRecoverAddress. + */ +export function generateRecoverableSignature(): Signature { + for (let i = 0; i < 100; i++) { + const sig = Signature.random(); + if (tryRecoverAddress(PROBE_HASH, sig) !== undefined) { + return sig; + } + } + throw new Secp256k1Error('Failed to generate a recoverable signature after 100 attempts'); +} + +/** + * Generates a random signature where ECDSA address recovery fails. + * Uses random r/s values (not from real signing) so that r is unlikely to be a valid secp256k1 x-coordinate. + */ +export function generateUnrecoverableSignature(): Signature { + for (let i = 0; i < 100; i++) { + const sig = new Signature(Buffer32.random(), Buffer32.random(), 27); + if (tryRecoverAddress(PROBE_HASH, sig) === undefined) { + return sig; + } + } + throw new Secp256k1Error('Failed to generate an unrecoverable signature after 100 attempts'); +} diff --git a/yarn-project/foundation/src/eth-signature/eth_signature.test.ts b/yarn-project/foundation/src/eth-signature/eth_signature.test.ts index 2c642c2c32a8..dd19ae14520b 100644 --- a/yarn-project/foundation/src/eth-signature/eth_signature.test.ts +++ b/yarn-project/foundation/src/eth-signature/eth_signature.test.ts @@ -1,7 +1,9 @@ import { Buffer32 } from '@aztec/foundation/buffer'; -import { Secp256k1Signer, recoverAddress } from '@aztec/foundation/crypto/secp256k1-signer'; +import { Secp256k1Signer, recoverAddress, tryRecoverAddress } from '@aztec/foundation/crypto/secp256k1-signer'; import { Fr } from '@aztec/foundation/curves/bn254'; +import { secp256k1 } from '@noble/curves/secp256k1'; + import { Signature } from './eth_signature.js'; const randomSigner = () => { @@ -62,4 +64,24 @@ describe('eth signature', () => { const deserialized = Signature.fromString(serialized); checkEquivalence(signature, deserialized); }); + + it('random() produces a valid recoverable signature with low s-value', () => { + const sig = Signature.random(); + + // v should be 27 or 28 + expect([27, 28]).toContain(sig.v); + + // Signature should not be empty + expect(sig.isEmpty()).toBe(false); + + // s should be in the low half of the curve (low s-value) + const sBigInt = sig.s.toBigInt(); + const halfN = secp256k1.CURVE.n / 2n; + expect(sBigInt).toBeLessThanOrEqual(halfN); + + // Signature should be recoverable (tryRecoverAddress should return an address for any hash) + const hash = Buffer32.random(); + const recovered = tryRecoverAddress(hash, sig); + expect(recovered).toBeDefined(); + }); }); diff --git a/yarn-project/foundation/src/eth-signature/eth_signature.ts b/yarn-project/foundation/src/eth-signature/eth_signature.ts index c76343f37540..62545d3cc4e9 100644 --- a/yarn-project/foundation/src/eth-signature/eth_signature.ts +++ b/yarn-project/foundation/src/eth-signature/eth_signature.ts @@ -1,8 +1,10 @@ import { Buffer32 } from '@aztec/foundation/buffer'; import { BufferReader, serializeToBuffer } from '@aztec/foundation/serialize'; +import { secp256k1 } from '@noble/curves/secp256k1'; import { z } from 'zod'; +import { randomBytes } from '../crypto/random/index.js'; import { hasHexPrefix, hexToBuffer } from '../string/index.js'; /** @@ -77,8 +79,12 @@ export class Signature { return new Signature(Buffer32.fromBuffer(hexToBuffer(sig.r)), Buffer32.fromBuffer(hexToBuffer(sig.s)), sig.yParity); } + /** Generates a random valid ECDSA signature with a low s-value by signing a random message with a random key. */ static random(): Signature { - return new Signature(Buffer32.random(), Buffer32.random(), 1); + const privateKey = randomBytes(32); + const message = randomBytes(32); + const { r, s, recovery } = secp256k1.sign(message, privateKey); + return new Signature(Buffer32.fromBigInt(r), Buffer32.fromBigInt(s), recovery ? 28 : 27); } static empty(): Signature { diff --git a/yarn-project/node-lib/src/config/index.ts b/yarn-project/node-lib/src/config/index.ts index c8403d2b09bc..280204ca4f4e 100644 --- a/yarn-project/node-lib/src/config/index.ts +++ b/yarn-project/node-lib/src/config/index.ts @@ -5,16 +5,12 @@ export type SharedNodeConfig = { testAccounts: boolean; /** Whether to populate the genesis state with initial fee juice for the sponsored FPC */ sponsoredFPC: boolean; + /** Additional addresses to prefund with fee juice at genesis */ + prefundAddresses: string[]; /** Sync mode: full to always sync via L1, snapshot to download a snapshot if there is no local data, force-snapshot to download even if there is local data. */ syncMode: 'full' | 'snapshot' | 'force-snapshot'; /** Base URLs for snapshots index. Index file will be searched at `SNAPSHOTS_BASE_URL/aztec-L1_CHAIN_ID-VERSION-ROLLUP_ADDRESS/index.json` */ snapshotsUrls?: string[]; - - /** Auto update mode: disabled - to completely ignore remote signals to update the node. enabled - to respect the signals (potentially shutting this node down). log - check for updates but log a warning instead of applying them*/ - autoUpdate?: 'disabled' | 'notify' | 'config' | 'config-and-version'; - /** The base URL against which to check for updates */ - autoUpdateUrl?: string; - /** URL of the Web3Signer instance */ web3SignerUrl?: string; /** Whether to run in fisherman mode */ @@ -22,6 +18,9 @@ export type SharedNodeConfig = { /** Force verification of tx Chonk proofs. Only used for testnet */ debugForceTxProofVerification: boolean; + + /** Check if the node version matches the latest version for the network */ + enableVersionCheck: boolean; }; export const sharedNodeConfigMappings: ConfigMappingsType = { @@ -35,6 +34,16 @@ export const sharedNodeConfigMappings: ConfigMappingsType = { description: 'Whether to populate the genesis state with initial fee juice for the sponsored FPC.', ...booleanConfigHelper(false), }, + prefundAddresses: { + env: 'PREFUND_ADDRESSES', + description: 'Comma-separated list of Aztec addresses to prefund with fee juice at genesis.', + parseEnv: (val: string) => + val + .split(',') + .map(a => a.trim()) + .filter(a => a.length > 0), + defaultValue: [], + }, syncMode: { env: 'SYNC_MODE', description: @@ -52,15 +61,6 @@ export const sharedNodeConfigMappings: ConfigMappingsType = { fallback: ['SYNC_SNAPSHOTS_URL'], defaultValue: [], }, - autoUpdate: { - env: 'AUTO_UPDATE', - description: 'The auto update mode for this node', - defaultValue: 'disabled', - }, - autoUpdateUrl: { - env: 'AUTO_UPDATE_URL', - description: 'Base URL to check for updates', - }, web3SignerUrl: { env: 'WEB3_SIGNER_URL', description: 'URL of the Web3Signer instance', @@ -76,4 +76,10 @@ export const sharedNodeConfigMappings: ConfigMappingsType = { description: 'Whether to force tx proof verification. Only has an effect if real proving is turned off', ...booleanConfigHelper(false), }, + + enableVersionCheck: { + env: 'ENABLE_VERSION_CHECK', + description: 'Check if the node is running the latest version and is following the latest rollup', + ...booleanConfigHelper(true), + }, }; diff --git a/yarn-project/p2p/src/client/factory.ts b/yarn-project/p2p/src/client/factory.ts index c6b431ca5a12..eb277343c5a6 100644 --- a/yarn-project/p2p/src/client/factory.ts +++ b/yarn-project/p2p/src/client/factory.ts @@ -99,6 +99,7 @@ export async function createP2PClient( maxPendingTxCount: config.maxPendingTxCount, archivedTxLimit: config.archivedTxLimit, minTxPoolAgeMs: config.minTxPoolAgeMs, + priceBumpPercentage: config.priceBumpPercentage, }, dateProvider, ); diff --git a/yarn-project/p2p/src/config.ts b/yarn-project/p2p/src/config.ts index 039e433aa163..72d10b3c9417 100644 --- a/yarn-project/p2p/src/config.ts +++ b/yarn-project/p2p/src/config.ts @@ -1,6 +1,7 @@ import { type ConfigMappingsType, SecretValue, + bigintConfigHelper, booleanConfigHelper, getConfigFromMappings, getDefaultConfig, @@ -193,6 +194,9 @@ export interface P2PConfig /** Minimum age (ms) a transaction must have been in the pool before it's eligible for block building. */ minTxPoolAgeMs: number; + + /** Minimum percentage fee increase required to replace an existing tx via RPC (0 = no bump). */ + priceBumpPercentage: bigint; } export const DEFAULT_P2P_PORT = 40400; @@ -472,6 +476,12 @@ export const p2pConfigMappings: ConfigMappingsType = { description: 'Minimum age (ms) a transaction must have been in the pool before it is eligible for block building.', ...numberConfigHelper(2_000), }, + priceBumpPercentage: { + env: 'P2P_RPC_PRICE_BUMP_PERCENTAGE', + description: + 'Minimum percentage fee increase required to replace an existing tx via RPC. Even at 0%, replacement still requires paying at least 1 unit more.', + ...bigintConfigHelper(10n), + }, ...sharedSequencerConfigMappings, ...p2pReqRespConfigMappings, ...batchTxRequesterConfigMappings, diff --git a/yarn-project/p2p/src/mem_pools/tx_pool_v2/README.md b/yarn-project/p2p/src/mem_pools/tx_pool_v2/README.md index 25dfc1d12435..64cf64650ba3 100644 --- a/yarn-project/p2p/src/mem_pools/tx_pool_v2/README.md +++ b/yarn-project/p2p/src/mem_pools/tx_pool_v2/README.md @@ -158,7 +158,7 @@ Checked before adding a transaction to the pending pool: | Rule | Purpose | |------|---------| -| `NullifierConflictRule` | Handles transactions with conflicting nullifiers. Higher priority tx wins. | +| `NullifierConflictRule` | Handles transactions with conflicting nullifiers. Higher priority tx wins. For RPC submissions, a configurable price bump percentage is required. | | `FeePayerBalancePreAddRule` | Ensures fee payer has sufficient balance for all their pending txs. | | `LowPriorityPreAddRule` | Rejects txs when pool is full and new tx has lowest priority. | @@ -233,6 +233,14 @@ await pool.updateConfig({ }); ``` +### Price Bump (RPC Transaction Replacement) + +When a transaction is submitted via RPC and clashes on nullifiers with an existing pool transaction, the incoming tx must pay at least `priceBumpPercentage`% more in priority fee (i.e. `>= existingFee + existingFee * bump / 100`) to replace it. This prevents spam via small fee increments. The same bump applies when the pool is full and the incoming tx needs to evict the lowest-priority tx. + +- **Env var**: `P2P_RPC_PRICE_BUMP_PERCENTAGE` (default: 10) +- **Scope**: RPC submissions only. P2P gossip uses `comparePriority` (fee + hash tiebreaker) with no bump. +- Even with a 0% bump, a replacement tx must pay at least 1 unit more than the existing fee. + ## Return Values ### AddTxsResult diff --git a/yarn-project/p2p/src/mem_pools/tx_pool_v2/eviction/interfaces.ts b/yarn-project/p2p/src/mem_pools/tx_pool_v2/eviction/interfaces.ts index 32135758973d..dd488bb1597e 100644 --- a/yarn-project/p2p/src/mem_pools/tx_pool_v2/eviction/interfaces.ts +++ b/yarn-project/p2p/src/mem_pools/tx_pool_v2/eviction/interfaces.ts @@ -100,7 +100,15 @@ export type TxPoolRejectionError = availableBalance: bigint; feeLimit: bigint; } - | { code: typeof TxPoolRejectionCode.NULLIFIER_CONFLICT; message: string; conflictingTxHash: string } + | { + code: typeof TxPoolRejectionCode.NULLIFIER_CONFLICT; + message: string; + conflictingTxHash: string; + /** Minimum fee needed to replace the conflicting tx (only set when price bump applies). */ + minimumPriceBumpFee?: bigint; + /** Incoming tx's priority fee. */ + txPriorityFee?: bigint; + } | { code: typeof TxPoolRejectionCode.INTERNAL_ERROR; message: string }; /** @@ -121,6 +129,8 @@ export interface PreAddResult { export interface PreAddContext { /** If true, compare priority fee only (no tx hash tiebreaker). Used for RPC submissions. */ feeComparisonOnly?: boolean; + /** Percentage-based price bump required for tx replacement. Only set for RPC submissions. */ + priceBumpPercentage?: bigint; } /** diff --git a/yarn-project/p2p/src/mem_pools/tx_pool_v2/eviction/low_priority_pre_add_rule.test.ts b/yarn-project/p2p/src/mem_pools/tx_pool_v2/eviction/low_priority_pre_add_rule.test.ts index fd66a0df4aee..57df8e341e49 100644 --- a/yarn-project/p2p/src/mem_pools/tx_pool_v2/eviction/low_priority_pre_add_rule.test.ts +++ b/yarn-project/p2p/src/mem_pools/tx_pool_v2/eviction/low_priority_pre_add_rule.test.ts @@ -209,5 +209,71 @@ describe('LowPriorityPreAddRule', () => { expect(result2.shouldIgnore).toBe(true); }); }); + + describe('with priceBumpPercentage', () => { + it('evicts when incoming fee exceeds the bump threshold', async () => { + const lowestPriorityMeta = createMeta('0x2222', 100n); + const poolAccess = createPoolAccess(100, lowestPriorityMeta); + const incomingMeta = createMeta('0x1111', 111n); // Above 10% bump + + const context: PreAddContext = { feeComparisonOnly: true, priceBumpPercentage: 10n }; + const result = await rule.check(incomingMeta, poolAccess, context); + + expect(result.shouldIgnore).toBe(false); + expect(result.txHashesToEvict).toContain(lowestPriorityMeta.txHash); + }); + + it('evicts when incoming fee is exactly at the bump threshold', async () => { + const lowestPriorityMeta = createMeta('0x2222', 100n); + const poolAccess = createPoolAccess(100, lowestPriorityMeta); + const incomingMeta = createMeta('0x1111', 110n); // Exactly 10% bump — accepted + + const context: PreAddContext = { feeComparisonOnly: true, priceBumpPercentage: 10n }; + const result = await rule.check(incomingMeta, poolAccess, context); + + expect(result.shouldIgnore).toBe(false); + expect(result.txHashesToEvict).toContain(lowestPriorityMeta.txHash); + }); + + it('ignores when incoming fee is below the bump threshold', async () => { + const lowestPriorityMeta = createMeta('0x2222', 100n); + const poolAccess = createPoolAccess(100, lowestPriorityMeta); + const incomingMeta = createMeta('0x1111', 109n); // Below 10% bump + + const context: PreAddContext = { feeComparisonOnly: true, priceBumpPercentage: 10n }; + const result = await rule.check(incomingMeta, poolAccess, context); + + expect(result.shouldIgnore).toBe(true); + expect(result.reason?.code).toBe(TxPoolRejectionCode.LOW_PRIORITY_FEE); + if (result.reason?.code === TxPoolRejectionCode.LOW_PRIORITY_FEE) { + expect(result.reason.minimumPriorityFee).toBe(110n); + expect(result.reason.txPriorityFee).toBe(109n); + } + }); + + it('without price bump (P2P path), behavior unchanged', async () => { + const lowestPriorityMeta = createMeta('0x2222', 100n); + const poolAccess = createPoolAccess(100, lowestPriorityMeta); + const incomingMeta = createMeta('0x1111', 101n); + + // No context — uses comparePriority, 101 > 100 so incoming wins + const result = await rule.check(incomingMeta, poolAccess); + + expect(result.shouldIgnore).toBe(false); + expect(result.txHashesToEvict).toContain(lowestPriorityMeta.txHash); + }); + + it('with 0% bump, rejects equal fee (minimum bump of 1)', async () => { + const lowestPriorityMeta = createMeta('0x2222', 100n); + const poolAccess = createPoolAccess(100, lowestPriorityMeta); + const incomingMeta = createMeta('0x1111', 100n); + + const context: PreAddContext = { feeComparisonOnly: true, priceBumpPercentage: 0n }; + const result = await rule.check(incomingMeta, poolAccess, context); + + expect(result.shouldIgnore).toBe(true); + expect(result.txHashesToEvict).toHaveLength(0); + }); + }); }); }); diff --git a/yarn-project/p2p/src/mem_pools/tx_pool_v2/eviction/low_priority_pre_add_rule.ts b/yarn-project/p2p/src/mem_pools/tx_pool_v2/eviction/low_priority_pre_add_rule.ts index b4d5ef8382db..013ffe6f8c6e 100644 --- a/yarn-project/p2p/src/mem_pools/tx_pool_v2/eviction/low_priority_pre_add_rule.ts +++ b/yarn-project/p2p/src/mem_pools/tx_pool_v2/eviction/low_priority_pre_add_rule.ts @@ -1,6 +1,6 @@ import { createLogger } from '@aztec/foundation/log'; -import { type TxMetaData, comparePriority } from '../tx_metadata.js'; +import { type TxMetaData, comparePriority, getMinimumPriceBumpFee } from '../tx_metadata.js'; import { type EvictionConfig, type PreAddContext, @@ -48,10 +48,14 @@ export class LowPriorityPreAddRule implements PreAddRule { } // Compare incoming tx against lowest priority tx. - // feeOnly mode (RPC): use strict fee comparison only — avoids churn from hash ordering - // Default (gossip): use full comparePriority (fee + tx hash tiebreaker) for determinism + // feeOnly mode (RPC): use strict fee comparison only — avoids churn from hash ordering. + // When price bump is also set, require the bumped fee threshold. + // Default (gossip): use full comparePriority (fee + tx hash tiebreaker) for determinism. const isHigherPriority = context?.feeComparisonOnly - ? incomingMeta.priorityFee > lowestPriorityMeta.priorityFee + ? context.priceBumpPercentage !== undefined + ? incomingMeta.priorityFee >= + getMinimumPriceBumpFee(lowestPriorityMeta.priorityFee, context.priceBumpPercentage) + : incomingMeta.priorityFee > lowestPriorityMeta.priorityFee : comparePriority(incomingMeta, lowestPriorityMeta) > 0; if (isHigherPriority) { @@ -66,6 +70,11 @@ export class LowPriorityPreAddRule implements PreAddRule { } // Incoming tx has equal or lower priority - ignore it (it would be evicted anyway) + const minimumFee = + context?.feeComparisonOnly && context.priceBumpPercentage !== undefined + ? getMinimumPriceBumpFee(lowestPriorityMeta.priorityFee, context.priceBumpPercentage) + : lowestPriorityMeta.priorityFee + 1n; + this.log.debug( `Pool at capacity (${currentCount}/${this.maxPoolSize}), ignoring ${incomingMeta.txHash} ` + `(priority ${incomingMeta.priorityFee}) - lower than existing minimum (priority ${lowestPriorityMeta.priorityFee})`, @@ -75,8 +84,8 @@ export class LowPriorityPreAddRule implements PreAddRule { txHashesToEvict: [], reason: { code: TxPoolRejectionCode.LOW_PRIORITY_FEE, - message: `Tx does not meet minimum priority fee. Required: ${lowestPriorityMeta.priorityFee + 1n}, got: ${incomingMeta.priorityFee}`, - minimumPriorityFee: lowestPriorityMeta.priorityFee + 1n, + message: `Tx does not meet minimum priority fee. Required: ${minimumFee}, got: ${incomingMeta.priorityFee}`, + minimumPriorityFee: minimumFee, txPriorityFee: incomingMeta.priorityFee, }, }); diff --git a/yarn-project/p2p/src/mem_pools/tx_pool_v2/eviction/nullifier_conflict_rule.test.ts b/yarn-project/p2p/src/mem_pools/tx_pool_v2/eviction/nullifier_conflict_rule.test.ts index 5108966f9047..f30ba1387587 100644 --- a/yarn-project/p2p/src/mem_pools/tx_pool_v2/eviction/nullifier_conflict_rule.test.ts +++ b/yarn-project/p2p/src/mem_pools/tx_pool_v2/eviction/nullifier_conflict_rule.test.ts @@ -1,5 +1,5 @@ import { type TxMetaData, stubTxMetaData } from '../tx_metadata.js'; -import type { PreAddPoolAccess } from './interfaces.js'; +import { type PreAddContext, type PreAddPoolAccess, TxPoolRejectionCode } from './interfaces.js'; import { NullifierConflictRule } from './nullifier_conflict_rule.js'; describe('NullifierConflictRule', () => { @@ -255,6 +255,108 @@ describe('NullifierConflictRule', () => { }); }); + describe('with priceBumpPercentage context', () => { + it('accepts tx when fee exceeds 10% bump threshold', async () => { + const sharedNullifier = '0xshared_null'; + const existingMeta = createMeta('0x2222', 100n, [sharedNullifier]); + const incomingMeta = createMeta('0x1111', 111n, [sharedNullifier]); // Above 10% + + const metadataMap = new Map([['0x2222', existingMeta]]); + const nullifierMap = new Map([[sharedNullifier, '0x2222']]); + poolAccess = createPoolAccess(nullifierMap, metadataMap); + + const context: PreAddContext = { feeComparisonOnly: true, priceBumpPercentage: 10n }; + const result = await rule.check(incomingMeta, poolAccess, context); + + expect(result.shouldIgnore).toBe(false); + expect(result.txHashesToEvict).toContain('0x2222'); + }); + + it('accepts tx when fee is exactly at 10% bump threshold', async () => { + const sharedNullifier = '0xshared_null'; + const existingMeta = createMeta('0x2222', 100n, [sharedNullifier]); + const incomingMeta = createMeta('0x1111', 110n, [sharedNullifier]); // Exactly 10% — accepted + + const metadataMap = new Map([['0x2222', existingMeta]]); + const nullifierMap = new Map([[sharedNullifier, '0x2222']]); + poolAccess = createPoolAccess(nullifierMap, metadataMap); + + const context: PreAddContext = { feeComparisonOnly: true, priceBumpPercentage: 10n }; + const result = await rule.check(incomingMeta, poolAccess, context); + + expect(result.shouldIgnore).toBe(false); + expect(result.txHashesToEvict).toContain('0x2222'); + }); + + it('rejects tx when fee is below 10% bump threshold', async () => { + const sharedNullifier = '0xshared_null'; + const existingMeta = createMeta('0x2222', 100n, [sharedNullifier]); + const incomingMeta = createMeta('0x1111', 109n, [sharedNullifier]); // Below 10% + + const metadataMap = new Map([['0x2222', existingMeta]]); + const nullifierMap = new Map([[sharedNullifier, '0x2222']]); + poolAccess = createPoolAccess(nullifierMap, metadataMap); + + const context: PreAddContext = { feeComparisonOnly: true, priceBumpPercentage: 10n }; + const result = await rule.check(incomingMeta, poolAccess, context); + + expect(result.shouldIgnore).toBe(true); + expect(result.reason?.code).toBe(TxPoolRejectionCode.NULLIFIER_CONFLICT); + if (result.reason?.code === TxPoolRejectionCode.NULLIFIER_CONFLICT) { + expect(result.reason.minimumPriceBumpFee).toBe(110n); + expect(result.reason.txPriorityFee).toBe(109n); + } + }); + + it('accepts tx well above bump threshold', async () => { + const sharedNullifier = '0xshared_null'; + const existingMeta = createMeta('0x2222', 100n, [sharedNullifier]); + const incomingMeta = createMeta('0x1111', 200n, [sharedNullifier]); + + const metadataMap = new Map([['0x2222', existingMeta]]); + const nullifierMap = new Map([[sharedNullifier, '0x2222']]); + poolAccess = createPoolAccess(nullifierMap, metadataMap); + + const context: PreAddContext = { feeComparisonOnly: true, priceBumpPercentage: 10n }; + const result = await rule.check(incomingMeta, poolAccess, context); + + expect(result.shouldIgnore).toBe(false); + expect(result.txHashesToEvict).toContain('0x2222'); + }); + + it('without price bump (P2P path), behavior is unchanged', async () => { + const sharedNullifier = '0xshared_null'; + const existingMeta = createMeta('0x2222', 100n, [sharedNullifier]); + const incomingMeta = createMeta('0x1111', 101n, [sharedNullifier]); // 1% above, not enough for 10% bump + + const metadataMap = new Map([['0x2222', existingMeta]]); + const nullifierMap = new Map([[sharedNullifier, '0x2222']]); + poolAccess = createPoolAccess(nullifierMap, metadataMap); + + // No context (P2P) — uses comparePriority, 101 > 100 means incoming wins + const result = await rule.check(incomingMeta, poolAccess); + + expect(result.shouldIgnore).toBe(false); + expect(result.txHashesToEvict).toContain('0x2222'); + }); + + it('with 0% price bump, rejects equal fee (minimum bump of 1)', async () => { + const sharedNullifier = '0xshared_null'; + const existingMeta = createMeta('0x2222', 100n, [sharedNullifier]); + const incomingMeta = createMeta('0x1111', 100n, [sharedNullifier]); + + const metadataMap = new Map([['0x2222', existingMeta]]); + const nullifierMap = new Map([[sharedNullifier, '0x2222']]); + poolAccess = createPoolAccess(nullifierMap, metadataMap); + + const context: PreAddContext = { feeComparisonOnly: true, priceBumpPercentage: 0n }; + const result = await rule.check(incomingMeta, poolAccess, context); + + expect(result.shouldIgnore).toBe(true); + expect(result.txHashesToEvict).toHaveLength(0); + }); + }); + describe('edge cases', () => { it('skips self-reference (incoming tx hash in conflict list)', async () => { const nullifier = '0xnull1'; diff --git a/yarn-project/p2p/src/mem_pools/tx_pool_v2/eviction/nullifier_conflict_rule.ts b/yarn-project/p2p/src/mem_pools/tx_pool_v2/eviction/nullifier_conflict_rule.ts index 9b638e13e83d..534a6fa4526e 100644 --- a/yarn-project/p2p/src/mem_pools/tx_pool_v2/eviction/nullifier_conflict_rule.ts +++ b/yarn-project/p2p/src/mem_pools/tx_pool_v2/eviction/nullifier_conflict_rule.ts @@ -15,11 +15,12 @@ export class NullifierConflictRule implements PreAddRule { private log = createLogger('p2p:tx_pool_v2:nullifier_conflict_rule'); - check(incomingMeta: TxMetaData, poolAccess: PreAddPoolAccess, _context?: PreAddContext): Promise { + check(incomingMeta: TxMetaData, poolAccess: PreAddPoolAccess, context?: PreAddContext): Promise { const result = checkNullifierConflict( incomingMeta, nullifier => poolAccess.getTxHashByNullifier(nullifier), txHash => poolAccess.getMetadata(txHash), + context?.priceBumpPercentage, ); if (result.shouldIgnore) { diff --git a/yarn-project/p2p/src/mem_pools/tx_pool_v2/interfaces.ts b/yarn-project/p2p/src/mem_pools/tx_pool_v2/interfaces.ts index 5bbf06a16676..a54de6d1293b 100644 --- a/yarn-project/p2p/src/mem_pools/tx_pool_v2/interfaces.ts +++ b/yarn-project/p2p/src/mem_pools/tx_pool_v2/interfaces.ts @@ -44,6 +44,8 @@ export type TxPoolV2Config = { minTxPoolAgeMs: number; /** Maximum number of evicted tx hashes to remember for metrics tracking */ evictedTxCacheSize: number; + /** Minimum percentage fee increase required to replace an existing tx via RPC (0 = no bump). */ + priceBumpPercentage: bigint; }; /** @@ -54,6 +56,7 @@ export const DEFAULT_TX_POOL_V2_CONFIG: TxPoolV2Config = { archivedTxLimit: 0, // 0 = disabled minTxPoolAgeMs: 2_000, evictedTxCacheSize: 10_000, + priceBumpPercentage: 10n, }; /** diff --git a/yarn-project/p2p/src/mem_pools/tx_pool_v2/tx_metadata.test.ts b/yarn-project/p2p/src/mem_pools/tx_pool_v2/tx_metadata.test.ts index d139138d5489..a4ba74f53105 100644 --- a/yarn-project/p2p/src/mem_pools/tx_pool_v2/tx_metadata.test.ts +++ b/yarn-project/p2p/src/mem_pools/tx_pool_v2/tx_metadata.test.ts @@ -1,7 +1,13 @@ import { mockTx } from '@aztec/stdlib/testing'; import { TxPoolRejectionCode } from './eviction/interfaces.js'; -import { buildTxMetaData, checkNullifierConflict, comparePriority, stubTxMetaData } from './tx_metadata.js'; +import { + buildTxMetaData, + checkNullifierConflict, + comparePriority, + getMinimumPriceBumpFee, + stubTxMetaData, +} from './tx_metadata.js'; describe('TxMetaData', () => { describe('buildTxMetaData', () => { @@ -260,5 +266,127 @@ describe('TxMetaData', () => { expect(result.shouldIgnore).toBe(false); expect(result.txHashesToEvict).toEqual([]); }); + + describe('with priceBumpPercentage', () => { + it('accepts incoming tx when fee exceeds the bump threshold', () => { + const existing = makeMeta('0x2222', 100n, ['0xnull1']); + const incoming = makeMeta('0x1111', 111n, ['0xnull1']); // Above 10% bump + + const result = checkNullifierConflict( + incoming, + () => existing.txHash, + () => existing, + 10n, // 10% bump + ); + + expect(result.shouldIgnore).toBe(false); + expect(result.txHashesToEvict).toEqual([existing.txHash]); + }); + + it('accepts incoming tx when fee is exactly at the bump threshold', () => { + const existing = makeMeta('0x2222', 100n, ['0xnull1']); + const incoming = makeMeta('0x1111', 110n, ['0xnull1']); // Exactly 10% bump — accepted + + const result = checkNullifierConflict( + incoming, + () => existing.txHash, + () => existing, + 10n, + ); + + expect(result.shouldIgnore).toBe(false); + expect(result.txHashesToEvict).toEqual([existing.txHash]); + }); + + it('rejects incoming tx when fee is below the bump threshold', () => { + const existing = makeMeta('0x2222', 100n, ['0xnull1']); + const incoming = makeMeta('0x1111', 109n, ['0xnull1']); // Below 10% bump + + const result = checkNullifierConflict( + incoming, + () => existing.txHash, + () => existing, + 10n, + ); + + expect(result.shouldIgnore).toBe(true); + expect(result.txHashesToEvict).toEqual([]); + expect(result.reason?.code).toBe(TxPoolRejectionCode.NULLIFIER_CONFLICT); + if (result.reason?.code === TxPoolRejectionCode.NULLIFIER_CONFLICT) { + expect(result.reason.minimumPriceBumpFee).toBe(110n); + expect(result.reason.txPriorityFee).toBe(109n); + } + }); + + it('accepts incoming tx well above the bump threshold', () => { + const existing = makeMeta('0x2222', 100n, ['0xnull1']); + const incoming = makeMeta('0x1111', 200n, ['0xnull1']); + + const result = checkNullifierConflict( + incoming, + () => existing.txHash, + () => existing, + 10n, + ); + + expect(result.shouldIgnore).toBe(false); + expect(result.txHashesToEvict).toEqual([existing.txHash]); + }); + + it('with 0% bump, rejects equal fee (minimum bump of 1)', () => { + const existing = makeMeta('0x2222', 100n, ['0xnull1']); + const incoming = makeMeta('0x1111', 100n, ['0xnull1']); + + const result = checkNullifierConflict( + incoming, + () => existing.txHash, + () => existing, + 0n, // 0% bump + ); + + expect(result.shouldIgnore).toBe(true); + expect(result.txHashesToEvict).toEqual([]); + }); + + it('without price bump, uses comparePriority (P2P path unchanged)', () => { + const existing = makeMeta('0x2222', 100n, ['0xnull1']); + const incoming = makeMeta('0x1111', 100n, ['0xnull1']); + + // No priceBumpPercentage — uses comparePriority, which for equal fees uses hash tiebreaker + const result = checkNullifierConflict( + incoming, + () => existing.txHash, + () => existing, + ); + + // With equal fees, the result depends on hash tiebreaker + // 0x1111 < 0x2222 so incoming has lower priority → should be ignored + expect(result.shouldIgnore).toBe(true); + }); + }); + }); + + describe('getMinimumPriceBumpFee', () => { + it('calculates 10% bump correctly', () => { + expect(getMinimumPriceBumpFee(100n, 10n)).toBe(110n); + }); + + it('calculates 0% bump (returns fee + 1 minimum bump)', () => { + expect(getMinimumPriceBumpFee(100n, 0n)).toBe(101n); + }); + + it('handles 0 existing fee (minimum bump of 1)', () => { + expect(getMinimumPriceBumpFee(0n, 10n)).toBe(1n); + }); + + it('handles large percentages', () => { + expect(getMinimumPriceBumpFee(100n, 100n)).toBe(200n); + expect(getMinimumPriceBumpFee(100n, 200n)).toBe(300n); + }); + + it('truncates fractional result (integer division)', () => { + // 33 * 10 / 100 = 3.3 → truncated to 3, so 33 + 3 = 36 + expect(getMinimumPriceBumpFee(33n, 10n)).toBe(36n); + }); }); }); diff --git a/yarn-project/p2p/src/mem_pools/tx_pool_v2/tx_metadata.ts b/yarn-project/p2p/src/mem_pools/tx_pool_v2/tx_metadata.ts index 316f551bcc6c..3874a7aab292 100644 --- a/yarn-project/p2p/src/mem_pools/tx_pool_v2/tx_metadata.ts +++ b/yarn-project/p2p/src/mem_pools/tx_pool_v2/tx_metadata.ts @@ -190,21 +190,38 @@ export function comparePriority(a: PriorityComparable, b: PriorityComparable): n return compareTxHash(a.txHashBigInt, b.txHashBigInt); } +/** + * Returns the minimum fee required to replace an existing tx with the given price bump percentage. + * Uses integer arithmetic: `existingFee + existingFee * priceBumpPercentage / 100`. + */ +export function getMinimumPriceBumpFee(existingFee: bigint, priceBumpPercentage: bigint): bigint { + const bump = (existingFee * priceBumpPercentage) / 100n; + // Ensure the minimum bump is at least 1, so that replacement always requires + // paying strictly more — even with 0% bump or zero existing fee. + const effectiveBump = bump > 0n ? bump : 1n; + return existingFee + effectiveBump; +} + /** * Checks for nullifier conflicts between an incoming transaction and existing pool state. * * When the incoming tx shares nullifiers with existing pending txs: - * - If the incoming tx has strictly higher priority, mark conflicting txs for eviction - * - If any conflicting tx has equal or higher priority, ignore the incoming tx + * - If the incoming tx meets or exceeds the required priority, mark conflicting txs for eviction + * - Otherwise, ignore the incoming tx + * + * When `priceBumpPercentage` is provided (RPC path), uses fee-only comparison with the + * percentage bump instead of `comparePriority`. * * @param incomingMeta - Metadata for the incoming transaction * @param getTxHashByNullifier - Accessor to find which tx uses a nullifier * @param getMetadata - Accessor to get metadata for a tx hash + * @param priceBumpPercentage - Optional percentage bump required for fee-based replacement */ export function checkNullifierConflict( incomingMeta: TxMetaData, getTxHashByNullifier: (nullifier: string) => string | undefined, getMetadata: (txHash: string) => TxMetaData | undefined, + priceBumpPercentage?: bigint, ): PreAddResult { const txHashesToEvict: string[] = []; @@ -225,19 +242,32 @@ export function checkNullifierConflict( continue; } - // If incoming tx has strictly higher priority, mark for eviction - // Otherwise, ignore incoming tx (ties go to existing tx) - // Use comparePriority for deterministic ordering (includes txHash as tiebreaker) - if (comparePriority(incomingMeta, conflictingMeta) > 0) { + // When price bump is set (RPC path), require the incoming fee to meet the bumped threshold. + // Otherwise (P2P path), use full comparePriority with tx hash tiebreaker. + const isHigherPriority = + priceBumpPercentage !== undefined + ? incomingMeta.priorityFee >= getMinimumPriceBumpFee(conflictingMeta.priorityFee, priceBumpPercentage) + : comparePriority(incomingMeta, conflictingMeta) > 0; + + if (isHigherPriority) { txHashesToEvict.push(conflictingHashStr); } else { + const minimumFee = + priceBumpPercentage !== undefined + ? getMinimumPriceBumpFee(conflictingMeta.priorityFee, priceBumpPercentage) + : undefined; return { shouldIgnore: true, txHashesToEvict: [], reason: { code: TxPoolRejectionCode.NULLIFIER_CONFLICT, - message: `Nullifier conflict with existing tx ${conflictingHashStr}`, + message: + minimumFee !== undefined + ? `Nullifier conflict with existing tx ${conflictingHashStr}. Minimum required fee: ${minimumFee}, got: ${incomingMeta.priorityFee}` + : `Nullifier conflict with existing tx ${conflictingHashStr}`, conflictingTxHash: conflictingHashStr, + minimumPriceBumpFee: minimumFee, + txPriorityFee: minimumFee !== undefined ? incomingMeta.priorityFee : undefined, }, }; } diff --git a/yarn-project/p2p/src/mem_pools/tx_pool_v2/tx_pool_v2_impl.ts b/yarn-project/p2p/src/mem_pools/tx_pool_v2/tx_pool_v2_impl.ts index a1d565ccfd3a..ca001974be1f 100644 --- a/yarn-project/p2p/src/mem_pools/tx_pool_v2/tx_pool_v2_impl.ts +++ b/yarn-project/p2p/src/mem_pools/tx_pool_v2/tx_pool_v2_impl.ts @@ -213,7 +213,9 @@ export class TxPoolV2Impl { // in-memory reads, and buffered DB writes. Nothing here can throw an unhandled exception. const poolAccess = this.#createPreAddPoolAccess(); const preAddContext: PreAddContext | undefined = - opts.feeComparisonOnly !== undefined ? { feeComparisonOnly: opts.feeComparisonOnly } : undefined; + opts.feeComparisonOnly !== undefined + ? { feeComparisonOnly: opts.feeComparisonOnly, priceBumpPercentage: this.#config.priceBumpPercentage } + : undefined; await this.#store.transactionAsync(async () => { for (const tx of txs) { diff --git a/yarn-project/pxe/src/contract_function_simulator/noir-structs/event_validation_request.test.ts b/yarn-project/pxe/src/contract_function_simulator/noir-structs/event_validation_request.test.ts index 44c74418d3fb..63816730790f 100644 --- a/yarn-project/pxe/src/contract_function_simulator/noir-structs/event_validation_request.test.ts +++ b/yarn-project/pxe/src/contract_function_simulator/noir-structs/event_validation_request.test.ts @@ -20,7 +20,6 @@ describe('EventValidationRequest', () => { 0, 0, 0, - 0, 0, // serialized_event padding end 2, // bounded_vec_len 6, // event_commitment diff --git a/yarn-project/pxe/src/contract_function_simulator/noir-structs/event_validation_request.ts b/yarn-project/pxe/src/contract_function_simulator/noir-structs/event_validation_request.ts index 6749a66299f0..8a33dd551923 100644 --- a/yarn-project/pxe/src/contract_function_simulator/noir-structs/event_validation_request.ts +++ b/yarn-project/pxe/src/contract_function_simulator/noir-structs/event_validation_request.ts @@ -5,7 +5,7 @@ import { AztecAddress } from '@aztec/stdlib/aztec-address'; import { TxHash } from '@aztec/stdlib/tx'; // TODO(#14617): should we compute this from constants? This value is aztec-nr specific. -const MAX_EVENT_SERIALIZED_LEN = 11; +const MAX_EVENT_SERIALIZED_LEN = 10; /** * Intermediate struct used to perform batch event validation by PXE. The `utilityValidateAndStoreEnqueuedNotesAndEvents` oracle diff --git a/yarn-project/pxe/src/contract_function_simulator/noir-structs/note_validation_request.test.ts b/yarn-project/pxe/src/contract_function_simulator/noir-structs/note_validation_request.test.ts index e798eb3c190c..4ac64de1d016 100644 --- a/yarn-project/pxe/src/contract_function_simulator/noir-structs/note_validation_request.test.ts +++ b/yarn-project/pxe/src/contract_function_simulator/noir-structs/note_validation_request.test.ts @@ -19,8 +19,7 @@ describe('NoteValidationRequest', () => { '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', - '0x0000000000000000000000000000000000000000000000000000000000000000', - '0x0000000000000000000000000000000000000000000000000000000000000000', // content end (MAX_NOTE_PACKED_LEN = 10) + '0x0000000000000000000000000000000000000000000000000000000000000000', // content end (MAX_NOTE_PACKED_LEN = 8) '0x0000000000000000000000000000000000000000000000000000000000000002', // content length '0x0000000000000000000000000000000000000000000000000000000000000006', // note hash '0x0000000000000000000000000000000000000000000000000000000000000007', // nullifier @@ -57,9 +56,8 @@ describe('NoteValidationRequest', () => { '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', - '0x0000000000000000000000000000000000000000000000000000000000000000', - '0x0000000000000000000000000000000000000000000000000000000000000000', // content end (MAX_NOTE_PACKED_LEN = 10) - '0x0000000000000000000000000000000000000000000000000000000000000000', // extra item, this is a malformed serialization + '0x0000000000000000000000000000000000000000000000000000000000000000', // content end (MAX_NOTE_PACKED_LEN = 8) + '0x0000000000000000000000000000000000000000000000000000000000000000', // extra field beyond MAX_NOTE_PACKED_LEN, this is a malformed serialization '0x0000000000000000000000000000000000000000000000000000000000000002', // content length '0x0000000000000000000000000000000000000000000000000000000000000006', // note hash '0x0000000000000000000000000000000000000000000000000000000000000007', // nullifier diff --git a/yarn-project/pxe/src/contract_function_simulator/noir-structs/note_validation_request.ts b/yarn-project/pxe/src/contract_function_simulator/noir-structs/note_validation_request.ts index 8b793434ac40..02ebba99e96e 100644 --- a/yarn-project/pxe/src/contract_function_simulator/noir-structs/note_validation_request.ts +++ b/yarn-project/pxe/src/contract_function_simulator/noir-structs/note_validation_request.ts @@ -4,7 +4,7 @@ import { AztecAddress } from '@aztec/stdlib/aztec-address'; import { TxHash } from '@aztec/stdlib/tx'; // TODO(#14617): should we compute this from constants? This value is aztec-nr specific. -export const MAX_NOTE_PACKED_LEN = 9; +export const MAX_NOTE_PACKED_LEN = 8; /** * Intermediate struct used to perform batch note validation by PXE. The `utilityValidateAndStoreEnqueuedNotesAndEvents` oracle diff --git a/yarn-project/sequencer-client/src/config.ts b/yarn-project/sequencer-client/src/config.ts index 61dcb2344c17..d6aa5d93f9f5 100644 --- a/yarn-project/sequencer-client/src/config.ts +++ b/yarn-project/sequencer-client/src/config.ts @@ -53,6 +53,8 @@ export const DefaultSequencerConfig: ResolvedSequencerConfig = { skipInvalidateBlockAsProposer: false, broadcastInvalidBlockProposal: false, injectFakeAttestation: false, + injectHighSValueAttestation: false, + injectUnrecoverableSignatureAttestation: false, fishermanMode: false, shuffleAttestationOrdering: false, skipPushProposedBlocksToArchiver: false, @@ -182,6 +184,14 @@ export const sequencerConfigMappings: ConfigMappingsType = { description: 'Inject a fake attestation (for testing only)', ...booleanConfigHelper(DefaultSequencerConfig.injectFakeAttestation), }, + injectHighSValueAttestation: { + description: 'Inject a malleable attestation with a high-s value (for testing only)', + ...booleanConfigHelper(DefaultSequencerConfig.injectHighSValueAttestation), + }, + injectUnrecoverableSignatureAttestation: { + description: 'Inject an attestation with an unrecoverable signature (for testing only)', + ...booleanConfigHelper(DefaultSequencerConfig.injectUnrecoverableSignatureAttestation), + }, fishermanMode: { env: 'FISHERMAN_MODE', description: diff --git a/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.ts b/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.ts index 184e83a76506..b9e960fb7c1f 100644 --- a/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.ts +++ b/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.ts @@ -9,6 +9,11 @@ import { SlotNumber, } from '@aztec/foundation/branded-types'; import { randomInt } from '@aztec/foundation/crypto/random'; +import { + flipSignature, + generateRecoverableSignature, + generateUnrecoverableSignature, +} from '@aztec/foundation/crypto/secp256k1-signer'; import { Fr } from '@aztec/foundation/curves/bn254'; import { EthAddress } from '@aztec/foundation/eth-address'; import { Signature } from '@aztec/foundation/eth-signature'; @@ -759,7 +764,12 @@ export class CheckpointProposalJob implements Traceable { const sorted = orderAttestations(trimmed, committee); // Manipulate the attestations if we've been configured to do so - if (this.config.injectFakeAttestation || this.config.shuffleAttestationOrdering) { + if ( + this.config.injectFakeAttestation || + this.config.injectHighSValueAttestation || + this.config.injectUnrecoverableSignatureAttestation || + this.config.shuffleAttestationOrdering + ) { return this.manipulateAttestations(proposal.slotNumber, epoch, seed, committee, sorted); } @@ -788,7 +798,11 @@ export class CheckpointProposalJob implements Traceable { this.epochCache.computeProposerIndex(slotNumber, epoch, seed, BigInt(committee.length)), ); - if (this.config.injectFakeAttestation) { + if ( + this.config.injectFakeAttestation || + this.config.injectHighSValueAttestation || + this.config.injectUnrecoverableSignatureAttestation + ) { // Find non-empty attestations that are not from the proposer const nonProposerIndices: number[] = []; for (let i = 0; i < attestations.length; i++) { @@ -798,8 +812,20 @@ export class CheckpointProposalJob implements Traceable { } if (nonProposerIndices.length > 0) { const targetIndex = nonProposerIndices[randomInt(nonProposerIndices.length)]; - this.log.warn(`Injecting fake attestation in checkpoint for slot ${slotNumber} at index ${targetIndex}`); - unfreeze(attestations[targetIndex]).signature = Signature.random(); + if (this.config.injectHighSValueAttestation) { + this.log.warn( + `Injecting high-s value attestation in checkpoint for slot ${slotNumber} at index ${targetIndex}`, + ); + unfreeze(attestations[targetIndex]).signature = flipSignature(attestations[targetIndex].signature); + } else if (this.config.injectUnrecoverableSignatureAttestation) { + this.log.warn( + `Injecting unrecoverable signature attestation in checkpoint for slot ${slotNumber} at index ${targetIndex}`, + ); + unfreeze(attestations[targetIndex]).signature = generateUnrecoverableSignature(); + } else { + this.log.warn(`Injecting fake attestation in checkpoint for slot ${slotNumber} at index ${targetIndex}`); + unfreeze(attestations[targetIndex]).signature = generateRecoverableSignature(); + } } return new CommitteeAttestationsAndSigners(attestations); } diff --git a/yarn-project/stdlib/src/interfaces/configs.ts b/yarn-project/stdlib/src/interfaces/configs.ts index 3cd2912c078f..88b1366a6889 100644 --- a/yarn-project/stdlib/src/interfaces/configs.ts +++ b/yarn-project/stdlib/src/interfaces/configs.ts @@ -59,6 +59,10 @@ export interface SequencerConfig { broadcastInvalidBlockProposal?: boolean; /** Inject a fake attestation (for testing only) */ injectFakeAttestation?: boolean; + /** Inject a malleable attestation with a high-s value (for testing only) */ + injectHighSValueAttestation?: boolean; + /** Inject an attestation with an unrecoverable signature (for testing only) */ + injectUnrecoverableSignatureAttestation?: boolean; /** Whether to run in fisherman mode: builds blocks on every slot for validation without publishing */ fishermanMode?: boolean; /** Shuffle attestation ordering to create invalid ordering (for testing only) */ @@ -104,6 +108,8 @@ export const SequencerConfigSchema = zodFor()( secondsBeforeInvalidatingBlockAsNonCommitteeMember: z.number(), broadcastInvalidBlockProposal: z.boolean().optional(), injectFakeAttestation: z.boolean().optional(), + injectHighSValueAttestation: z.boolean().optional(), + injectUnrecoverableSignatureAttestation: z.boolean().optional(), fishermanMode: z.boolean().optional(), shuffleAttestationOrdering: z.boolean().optional(), blockDurationMs: z.number().positive().optional(), diff --git a/yarn-project/stdlib/src/update-checker/index.ts b/yarn-project/stdlib/src/update-checker/index.ts index 958afdb51dd2..65d4570f3d1a 100644 --- a/yarn-project/stdlib/src/update-checker/index.ts +++ b/yarn-project/stdlib/src/update-checker/index.ts @@ -1 +1,2 @@ -export { UpdateChecker, getPackageVersion } from './update-checker.js'; +export * from './package_version.js'; +export * from './version_checker.js'; diff --git a/yarn-project/stdlib/src/update-checker/package_version.ts b/yarn-project/stdlib/src/update-checker/package_version.ts new file mode 100644 index 000000000000..c186b4de9bba --- /dev/null +++ b/yarn-project/stdlib/src/update-checker/package_version.ts @@ -0,0 +1,17 @@ +import { fileURLToPath } from '@aztec/foundation/url'; + +import { readFileSync } from 'fs'; +import { dirname, resolve } from 'path'; + +/** Returns the package version from the release-please manifest, or undefined if not found. */ +export function getPackageVersion(): string | undefined { + try { + const releasePleaseManifestPath = resolve( + dirname(fileURLToPath(import.meta.url)), + '../../../../.release-please-manifest.json', + ); + return JSON.parse(readFileSync(releasePleaseManifestPath).toString())['.']; + } catch { + return undefined; + } +} diff --git a/yarn-project/stdlib/src/update-checker/update-checker.test.ts b/yarn-project/stdlib/src/update-checker/update-checker.test.ts deleted file mode 100644 index 890a5080d0d3..000000000000 --- a/yarn-project/stdlib/src/update-checker/update-checker.test.ts +++ /dev/null @@ -1,194 +0,0 @@ -import { randomBigInt } from '@aztec/foundation/crypto/random'; - -import { jest } from '@jest/globals'; - -import { type EventMap, UpdateChecker } from './update-checker.js'; - -describe('UpdateChecker', () => { - let checker: UpdateChecker; - let fetch: jest.Mock; - let getCanonicalRollupVersion: jest.Mock<() => Promise>; - let rollupVersionAtStart: bigint; - let nodeVersionAtStart: string; - let eventHandlers: { - [K in keyof EventMap]: jest.Mock<(...args: EventMap[K]) => void>; - }; - - beforeEach(() => { - nodeVersionAtStart = '0.1.0'; - rollupVersionAtStart = randomBigInt(1000n); - fetch = jest.fn(() => Promise.resolve(new Response(JSON.stringify({ version: nodeVersionAtStart })))); - getCanonicalRollupVersion = jest.fn(() => Promise.resolve(rollupVersionAtStart)); - - checker = new UpdateChecker( - new URL('http://localhost'), - nodeVersionAtStart, - rollupVersionAtStart, - fetch, - getCanonicalRollupVersion, - 100, - ); - - eventHandlers = { - updateNodeConfig: jest.fn(), - newNodeVersion: jest.fn(), - newRollupVersion: jest.fn(), - updatePublicTelemetryConfig: jest.fn(), - }; - - for (const [event, fn] of Object.entries(eventHandlers)) { - checker.on(event as keyof EventMap, fn); - } - }); - - it.each([ - ['it detects no change', () => {}], - [ - 'fetching config fails', - () => { - fetch.mockRejectedValue(new Error('test error')); - }, - ], - [ - 'fetching rollup address fails', - () => { - getCanonicalRollupVersion.mockRejectedValue(new Error('test error')); - }, - ], - [ - 'the config does not match the schema', - () => { - fetch.mockResolvedValue( - new Response( - JSON.stringify({ - foo: 'bar', - }), - ), - ); - }, - ], - [ - 'the config does not match the schema', - () => { - fetch.mockResolvedValue( - new Response( - JSON.stringify({ - version: 1, - }), - ), - ); - }, - ], - ])('does not emit an event if %s', async (_, patchFn) => { - patchFn(); - for (let run = 0; run < 5; run++) { - await expect(checker.trigger()).resolves.toBeUndefined(); - for (const fn of Object.values(eventHandlers)) { - expect(fn).not.toHaveBeenCalled(); - } - } - }); - - it.each<[keyof EventMap, () => void]>([ - [ - 'newRollupVersion', - () => { - // ensure the new version is completely different to the previous one - getCanonicalRollupVersion.mockResolvedValueOnce(1000n + randomBigInt(1000n)); - }, - ], - [ - 'newNodeVersion', - () => { - fetch.mockResolvedValueOnce(new Response(JSON.stringify({ version: '0.1.0-foo' }))); - }, - ], - [ - 'updateNodeConfig', - () => { - fetch.mockResolvedValueOnce(new Response(JSON.stringify({ config: { maxTxsPerBlock: 16 } }))); - }, - ], - [ - 'updatePublicTelemetryConfig', - () => { - fetch.mockResolvedValueOnce( - new Response(JSON.stringify({ publicTelemetry: { publicIncludeMetrics: ['aztec'] } })), - ); - }, - ], - ])('emits event: %s', async (event, patchFn) => { - patchFn(); - await expect(checker.trigger()).resolves.toBeUndefined(); - expect(eventHandlers[event]).toHaveBeenCalled(); - }); - - it('calls updateConfig only when config changes', async () => { - fetch.mockResolvedValue( - new Response( - JSON.stringify({ - version: nodeVersionAtStart, - config: { - foo: 'bar', - }, - }), - ), - ); - - await checker.trigger(); - expect(eventHandlers.updateNodeConfig).toHaveBeenCalledTimes(1); - - await checker.trigger(); - expect(eventHandlers.updateNodeConfig).toHaveBeenCalledTimes(1); - - fetch.mockResolvedValue( - new Response( - JSON.stringify({ - version: nodeVersionAtStart, - config: { - bar: 'baz', - }, - }), - ), - ); - - await checker.trigger(); - expect(eventHandlers.updateNodeConfig).toHaveBeenCalledTimes(2); - }); - - it('calls updatePublicTelemetryConfig only when config changes', async () => { - fetch.mockResolvedValue( - new Response( - JSON.stringify({ - publicTelemetry: { - publicIncludeMetrics: ['aztec'], - }, - }), - ), - ); - - await checker.trigger(); - expect(eventHandlers.updatePublicTelemetryConfig).toHaveBeenCalledTimes(1); - - await checker.trigger(); - expect(eventHandlers.updatePublicTelemetryConfig).toHaveBeenCalledTimes(1); - - fetch.mockResolvedValue( - new Response( - JSON.stringify({ - publicTelemetry: { - publicIncludeMetrics: ['aztec.validator'], - }, - }), - ), - ); - - await checker.trigger(); - expect(eventHandlers.updatePublicTelemetryConfig).toHaveBeenCalledTimes(2); - }); - - it('reaches out to the expected config URL', async () => { - await checker.trigger(); - expect(fetch).toHaveBeenCalledWith(new URL(`http://localhost`)); - }); -}); diff --git a/yarn-project/stdlib/src/update-checker/update-checker.ts b/yarn-project/stdlib/src/update-checker/update-checker.ts deleted file mode 100644 index 3bf27f948599..000000000000 --- a/yarn-project/stdlib/src/update-checker/update-checker.ts +++ /dev/null @@ -1,166 +0,0 @@ -import { RegistryContract } from '@aztec/ethereum/contracts'; -import type { ViemClient } from '@aztec/ethereum/types'; -import { EthAddress } from '@aztec/foundation/eth-address'; -import { createLogger } from '@aztec/foundation/log'; -import { RunningPromise } from '@aztec/foundation/running-promise'; -import { fileURLToPath } from '@aztec/foundation/url'; - -import { EventEmitter } from 'events'; -import { readFileSync } from 'fs'; -import { dirname, resolve } from 'path'; -import { isDeepStrictEqual } from 'util'; -import { z } from 'zod'; - -const updateConfigSchema = z.object({ - version: z.string().optional(), - publicTelemetry: z.any().optional(), - config: z.any().optional(), -}); - -export type EventMap = { - newRollupVersion: [{ currentVersion: bigint; latestVersion: bigint }]; - newNodeVersion: [{ currentVersion: string; latestVersion: string }]; - updateNodeConfig: [object]; - updatePublicTelemetryConfig: [object]; -}; - -type Config = { - baseURL: URL; - nodeVersion?: string; - checkIntervalMs?: number; - registryContractAddress: EthAddress; - publicClient: ViemClient; - fetch?: typeof fetch; -}; - -export class UpdateChecker extends EventEmitter { - private runningPromise: RunningPromise; - private lastPatchedConfig: object = {}; - private lastPatchedPublicTelemetryConfig: object = {}; - - constructor( - private updatesUrl: URL, - private nodeVersion: string | undefined, - private rollupVersion: bigint, - private fetch: typeof globalThis.fetch, - private getLatestRollupVersion: () => Promise, - private checkIntervalMs = 10 * 60_000, // every 10 mins - private log = createLogger('foundation:update-check'), - ) { - super(); - this.runningPromise = new RunningPromise(this.runChecks, this.log, this.checkIntervalMs); - } - - public static async new(config: Config): Promise { - const registryContract = new RegistryContract(config.publicClient, config.registryContractAddress); - const getLatestRollupVersion = () => registryContract.getRollupVersions().then(versions => versions.at(-1)!); - - return new UpdateChecker( - config.baseURL, - config.nodeVersion ?? getPackageVersion(), - await getLatestRollupVersion(), - config.fetch ?? fetch, - getLatestRollupVersion, - config.checkIntervalMs, - ); - } - - public start(): void { - if (this.runningPromise.isRunning()) { - this.log.debug(`Can't start update checker again`); - return; - } - - this.log.info('Starting update checker', { - nodeVersion: this.nodeVersion, - rollupVersion: this.rollupVersion, - }); - this.runningPromise.start(); - } - - public stop(): Promise { - if (!this.runningPromise.isRunning()) { - this.log.debug(`Can't stop update checker because it is not running`); - return Promise.resolve(); - } - return this.runningPromise.stop(); - } - - public trigger(): Promise { - return this.runningPromise.trigger(); - } - - private runChecks = async (): Promise => { - await Promise.all([this.checkRollupVersion(), this.checkConfig()]); - }; - - private async checkRollupVersion(): Promise { - try { - const canonicalRollupVersion = await this.getLatestRollupVersion(); - if (canonicalRollupVersion !== this.rollupVersion) { - this.log.debug('New canonical rollup version', { - currentVersion: this.rollupVersion, - latestVersion: canonicalRollupVersion, - }); - this.emit('newRollupVersion', { currentVersion: this.rollupVersion, latestVersion: canonicalRollupVersion }); - } - } catch (err) { - this.log.warn(`Failed to check if there is a new rollup`, err); - } - } - - private async checkConfig(): Promise { - try { - const response = await this.fetch(this.updatesUrl); - const body = await response.json(); - if (!response.ok) { - this.log.warn(`Unexpected HTTP response checking for updates`, { - status: response.status, - body: await response.text(), - url: this.updatesUrl, - }); - } - - const { version, config, publicTelemetry } = updateConfigSchema.parse(body); - - if (this.nodeVersion && version && version !== this.nodeVersion) { - this.log.debug('New node version', { currentVersion: this.nodeVersion, latestVersion: version }); - this.emit('newNodeVersion', { currentVersion: this.nodeVersion, latestVersion: version }); - } - - if (config && Object.keys(config).length > 0 && !isDeepStrictEqual(config, this.lastPatchedConfig)) { - this.log.debug('New node config', { config }); - this.lastPatchedConfig = config; - this.emit('updateNodeConfig', config); - } - - if ( - publicTelemetry && - Object.keys(publicTelemetry).length > 0 && - !isDeepStrictEqual(publicTelemetry, this.lastPatchedPublicTelemetryConfig) - ) { - this.log.debug('New metrics config', { config }); - this.lastPatchedPublicTelemetryConfig = publicTelemetry; - this.emit('updatePublicTelemetryConfig', publicTelemetry); - } - } catch (err) { - this.log.warn(`Failed to check if there is an update`, err); - } - } -} - -/** - * Returns package version. - */ -export function getPackageVersion(): string | undefined { - try { - const releasePleaseManifestPath = resolve( - dirname(fileURLToPath(import.meta.url)), - '../../../../.release-please-manifest.json', - ); - const version = JSON.parse(readFileSync(releasePleaseManifestPath).toString())['.']; - return version; - } catch { - return undefined; - } -} diff --git a/yarn-project/stdlib/src/update-checker/version_checker.test.ts b/yarn-project/stdlib/src/update-checker/version_checker.test.ts new file mode 100644 index 000000000000..b5f8b8029b5e --- /dev/null +++ b/yarn-project/stdlib/src/update-checker/version_checker.test.ts @@ -0,0 +1,80 @@ +import { jest } from '@jest/globals'; + +import { type EventMap, type VersionCheck, VersionChecker } from './version_checker.js'; + +describe('VersionChecker', () => { + let checker: VersionChecker; + let getLatestNodeVersion: jest.Mock<() => Promise>; + let getLatestRollupVersion: jest.Mock<() => Promise>; + let eventHandler: jest.Mock<(...args: EventMap['newVersion']) => void>; + + beforeEach(() => { + getLatestNodeVersion = jest.fn(() => Promise.resolve('0.1.0')); + getLatestRollupVersion = jest.fn(() => Promise.resolve('42')); + + const checks: VersionCheck[] = [ + { name: 'node', currentVersion: '0.1.0', getLatestVersion: getLatestNodeVersion }, + { name: 'rollup', currentVersion: '42', getLatestVersion: getLatestRollupVersion }, + ]; + + checker = new VersionChecker(checks, 100); + + eventHandler = jest.fn(); + checker.on('newVersion', eventHandler); + }); + + it.each([ + ['it detects no change', () => {}], + [ + 'fetching node version fails', + () => { + getLatestNodeVersion.mockRejectedValue(new Error('test error')); + }, + ], + [ + 'fetching rollup version fails', + () => { + getLatestRollupVersion.mockRejectedValue(new Error('test error')); + }, + ], + [ + 'fetching node version returns undefined', + () => { + getLatestNodeVersion.mockResolvedValue(undefined); + }, + ], + ])('does not emit an event if %s', async (_, patchFn) => { + patchFn(); + for (let run = 0; run < 5; run++) { + await expect(checker.trigger()).resolves.toBeUndefined(); + expect(eventHandler).not.toHaveBeenCalled(); + } + }); + + it('emits newVersion when node version changes', async () => { + getLatestNodeVersion.mockResolvedValueOnce('0.2.0'); + await checker.trigger(); + expect(eventHandler).toHaveBeenCalledWith({ + name: 'node', + currentVersion: '0.1.0', + latestVersion: '0.2.0', + }); + }); + + it('emits newVersion when rollup version changes', async () => { + getLatestRollupVersion.mockResolvedValueOnce('999'); + await checker.trigger(); + expect(eventHandler).toHaveBeenCalledWith({ + name: 'rollup', + currentVersion: '42', + latestVersion: '999', + }); + }); + + it('emits for each changed version independently', async () => { + getLatestNodeVersion.mockResolvedValueOnce('0.2.0'); + getLatestRollupVersion.mockResolvedValueOnce('999'); + await checker.trigger(); + expect(eventHandler).toHaveBeenCalledTimes(2); + }); +}); diff --git a/yarn-project/stdlib/src/update-checker/version_checker.ts b/yarn-project/stdlib/src/update-checker/version_checker.ts new file mode 100644 index 000000000000..b239ad9a2ec1 --- /dev/null +++ b/yarn-project/stdlib/src/update-checker/version_checker.ts @@ -0,0 +1,65 @@ +import { createLogger } from '@aztec/foundation/log'; +import { RunningPromise } from '@aztec/foundation/promise'; + +import { EventEmitter } from 'node:events'; + +export type EventMap = { + newVersion: [{ name: string; currentVersion: string; latestVersion: string }]; +}; + +export type VersionCheck = { + name: string; + currentVersion: string; + getLatestVersion: () => Promise; +}; + +export class VersionChecker extends EventEmitter { + private runningPromise: RunningPromise; + constructor( + private checks: Array, + intervalCheckMs = 60_000, + private logger = createLogger('version_checker'), + ) { + super(); + this.runningPromise = new RunningPromise(this.run, logger, intervalCheckMs); + } + + public start(): void { + if (this.runningPromise.isRunning()) { + this.logger.warn('VersionChecker is already running'); + return; + } + + this.runningPromise.start(); + this.logger.info('Version check started'); + } + + public trigger(): Promise { + return this.runningPromise.trigger(); + } + + public async stop(): Promise { + if (!this.runningPromise.isRunning()) { + this.logger.warn('VersionChecker is not running'); + return; + } + + await this.runningPromise.stop(); + this.logger.info('Version checker stopped'); + } + + private run = async () => { + await Promise.allSettled(this.checks.map(check => this.checkVersion(check))); + }; + + private async checkVersion({ name, currentVersion, getLatestVersion }: VersionCheck): Promise { + try { + const latestVersion = await getLatestVersion(); + if (latestVersion && latestVersion !== currentVersion) { + this.emit('newVersion', { name, latestVersion, currentVersion }); + } + } catch (err) { + this.logger.warn(`Error checking for new ${name} versions: ${err}`, { err }); + } + } +} diff --git a/yarn-project/telemetry-client/src/attributes.ts b/yarn-project/telemetry-client/src/attributes.ts index 297746ae2a61..e6f02c1cad67 100644 --- a/yarn-project/telemetry-client/src/attributes.ts +++ b/yarn-project/telemetry-client/src/attributes.ts @@ -150,3 +150,6 @@ export const L1_BLOCK_PROPOSAL_TX_TARGET = 'aztec.l1.block_proposal_tx_target'; /** Whether tracing methods were used to extract block proposal data */ export const L1_BLOCK_PROPOSAL_USED_TRACE = 'aztec.l1.block_proposal_used_trace'; + +/** The address of an attester (validator) participating in consensus */ +export const ATTESTER_ADDRESS = 'aztec.attester.address'; diff --git a/yarn-project/telemetry-client/src/metrics.ts b/yarn-project/telemetry-client/src/metrics.ts index 10f98f09b528..4c1a58ba1761 100644 --- a/yarn-project/telemetry-client/src/metrics.ts +++ b/yarn-project/telemetry-client/src/metrics.ts @@ -1254,6 +1254,16 @@ export const VALIDATOR_ATTESTATION_FAILED_NODE_ISSUE_COUNT: MetricDefinition = { description: 'The number of failed attestations due to node issues (timeout, missing data, etc.)', valueType: ValueType.INT, }; +export const VALIDATOR_CURRENT_EPOCH: MetricDefinition = { + name: 'aztec.validator.current_epoch', + description: 'The current epoch number, reflecting total epochs elapsed since genesis', + valueType: ValueType.INT, +}; +export const VALIDATOR_ATTESTED_EPOCH_COUNT: MetricDefinition = { + name: 'aztec.validator.attested_epoch_count', + description: 'The number of epochs in which this node successfully submitted at least one attestation', + valueType: ValueType.INT, +}; export const NODEJS_EVENT_LOOP_DELAY_MIN: MetricDefinition = { name: 'nodejs.eventloop.delay.min', diff --git a/yarn-project/txe/src/rpc_translator.ts b/yarn-project/txe/src/rpc_translator.ts index 95995654f675..55deeb05b91e 100644 --- a/yarn-project/txe/src/rpc_translator.ts +++ b/yarn-project/txe/src/rpc_translator.ts @@ -30,7 +30,7 @@ import { toSingle, } from './util/encoding.js'; -const MAX_EVENT_LEN = 12; // This is MAX_MESSAGE_CONTENT_LEN - PRIVATE_EVENT_RESERVED_FIELDS +const MAX_EVENT_LEN = 10; // This is MAX_MESSAGE_CONTENT_LEN - PRIVATE_EVENT_MSG_PLAINTEXT_RESERVED_FIELDS_LEN const MAX_PRIVATE_EVENTS_PER_TXE_QUERY = 5; export class UnavailableOracleError extends Error { diff --git a/yarn-project/validator-client/src/metrics.ts b/yarn-project/validator-client/src/metrics.ts index 26c35cec5948..160ac8c17280 100644 --- a/yarn-project/validator-client/src/metrics.ts +++ b/yarn-project/validator-client/src/metrics.ts @@ -1,3 +1,5 @@ +import type { EpochNumber } from '@aztec/foundation/branded-types'; +import type { EthAddress } from '@aztec/foundation/eth-address'; import type { BlockProposal } from '@aztec/stdlib/p2p'; import { Attributes, @@ -16,6 +18,8 @@ export class ValidatorMetrics { private successfulAttestationsCount: UpDownCounter; private failedAttestationsBadProposalCount: UpDownCounter; private failedAttestationsNodeIssueCount: UpDownCounter; + private currentEpoch: Gauge; + private attestedEpochCount: UpDownCounter; private reexMana: Histogram; private reexTx: Histogram; @@ -64,6 +68,10 @@ export class ValidatorMetrics { }, ); + this.currentEpoch = meter.createGauge(Metrics.VALIDATOR_CURRENT_EPOCH); + + this.attestedEpochCount = createUpDownCounterWithDefault(meter, Metrics.VALIDATOR_ATTESTED_EPOCH_COUNT); + this.reexMana = meter.createHistogram(Metrics.VALIDATOR_RE_EXECUTION_MANA); this.reexTx = meter.createHistogram(Metrics.VALIDATOR_RE_EXECUTION_TX_COUNT); @@ -110,4 +118,14 @@ export class ValidatorMetrics { [Attributes.IS_COMMITTEE_MEMBER]: inCommittee, }); } + + /** Update the gauge tracking the current epoch number (proxy for total epochs elapsed). */ + public setCurrentEpoch(epoch: EpochNumber) { + this.currentEpoch.record(Number(epoch)); + } + + /** Increment the count of epochs in which the given attester submitted at least one attestation. */ + public incAttestedEpochCount(attester: EthAddress) { + this.attestedEpochCount.add(1, { [Attributes.ATTESTER_ADDRESS]: attester.toString() }); + } } diff --git a/yarn-project/validator-client/src/validator.ts b/yarn-project/validator-client/src/validator.ts index fd0ae9852837..892c43942e6e 100644 --- a/yarn-project/validator-client/src/validator.ts +++ b/yarn-project/validator-client/src/validator.ts @@ -89,6 +89,8 @@ export class ValidatorClient extends (EventEmitter as new () => WatcherEmitter) private lastEpochForCommitteeUpdateLoop: EpochNumber | undefined; private epochCacheUpdateLoop: RunningPromise; + /** Tracks the last epoch in which each attester successfully submitted at least one attestation. */ + private lastAttestedEpochByAttester: Map = new Map(); private proposersOfInvalidBlocks: Set = new Set(); @@ -160,6 +162,7 @@ export class ValidatorClient extends (EventEmitter as new () => WatcherEmitter) this.log.trace(`No committee found for slot`); return; } + this.metrics.setCurrentEpoch(epoch); if (epoch !== this.lastEpochForCommitteeUpdateLoop) { const me = this.getValidatorAddresses(); const committeeSet = new Set(committee.map(v => v.toString())); @@ -556,6 +559,17 @@ export class ValidatorClient extends (EventEmitter as new () => WatcherEmitter) this.metrics.incSuccessfulAttestations(inCommittee.length); + // Track epoch participation per attester: count each (attester, epoch) pair at most once + const proposalEpoch = getEpochAtSlot(slotNumber, this.epochCache.getL1Constants()); + for (const attester of inCommittee) { + const key = attester.toString(); + const lastEpoch = this.lastAttestedEpochByAttester.get(key); + if (lastEpoch === undefined || proposalEpoch > lastEpoch) { + this.lastAttestedEpochByAttester.set(key, proposalEpoch); + this.metrics.incAttestedEpochCount(attester); + } + } + // Determine which validators should attest let attestors: EthAddress[]; if (partOfCommittee) {