diff --git a/noir-projects/aztec-nr/aztec/src/messages/discovery/nonce_discovery.nr b/noir-projects/aztec-nr/aztec/src/messages/discovery/nonce_discovery.nr index a9a406549990..a7d3c276aa67 100644 --- a/noir-projects/aztec-nr/aztec/src/messages/discovery/nonce_discovery.nr +++ b/noir-projects/aztec-nr/aztec/src/messages/discovery/nonce_discovery.nr @@ -43,14 +43,15 @@ pub unconstrained fn attempt_note_nonce_discovery( // We need to find nonces (typically just one) that result in a note hash that, once siloed into a unique note // hash, is one of the note hashes created by the transaction. - unique_note_hashes_in_tx.for_eachi(|i, expected_unique_note_hash| { - // Nonces are computed by hashing the first nullifier in the transaction with the index of the note in the new - // note hashes array. We therefore know for each note in every transaction what its nonce is. - let candidate_nonce = compute_note_hash_nonce(first_nullifier_in_tx, i); - - // Given note nonce, note content and metadata, we can compute the note hash and silo it to check if it matches - // the note hash at the array index we're currently processing. TODO(#11157): handle failed - // note_hash_and_nullifier computation + // The nonce is meant to be derived from the index of the note hash in the transaction effects array. However, due + // to an issue in the kernels the nonce might actually use any of the possible note hash indices - not necessarily + // the one that corresponds to the note hash. Hence, we need to try them all. + for i in 0..MAX_NOTE_HASHES_PER_TX { + let nonce_for_i = compute_note_hash_nonce(first_nullifier_in_tx, i); + + // Given note nonce, note content and metadata, we can compute the note hash and silo it to check if + // the resulting unique note matches any in the transaction. + // TODO(#11157): handle failed note_hash_and_nullifier computation let hashes = compute_note_hash_and_nullifier( packed_note, owner, @@ -58,23 +59,37 @@ pub unconstrained fn attempt_note_nonce_discovery( note_type_id, contract_address, randomness, - candidate_nonce, + nonce_for_i, ) .expect(f"Failed to compute a note hash for note type {note_type_id}"); - let siloed_note_hash = compute_siloed_note_hash(contract_address, hashes.note_hash); - let unique_note_hash = compute_unique_note_hash(candidate_nonce, siloed_note_hash); + let siloed_note_hash_for_i = compute_siloed_note_hash(contract_address, hashes.note_hash); + let unique_note_hash_for_i = compute_unique_note_hash(nonce_for_i, siloed_note_hash_for_i); - if unique_note_hash == expected_unique_note_hash { - // Note that while we did check that the note hash is the preimage of the expected unique note hash, we - // perform no validations on the nullifier - we fundamentally cannot, since only the application knows how - // to compute nullifiers. We simply trust it to have provided the correct one: if it hasn't, then PXE may - // fail to realize that a given note has been nullified already, and calls to the application could result - // in invalid transactions (with duplicate nullifiers). This is not a concern because an application - // already has more direct means of making a call to it fail the transaction. + let matching_notes = bvec_filter( + unique_note_hashes_in_tx, + |unique_note_hash_in_tx| unique_note_hash_in_tx == unique_note_hash_for_i, + ); + if matching_notes.len() > 1 { + let identical_note_hashes = matching_notes.len(); + // Note that we don't actually check that the note hashes array contains unique values, only that the note + // we found is unique. We don't expect for this to ever happen (it'd indicate a malicious node or PXE, + // which + // are both assumed to be cooperative) so testing for it just in case is unnecessary, but we _do_ need to + // handle it if we find a duplicate. + panic( + f"Received {identical_note_hashes} identical note hashes for a transaction - these should all be unique", + ) + } else if matching_notes.len() == 1 { + // Note that while we did check that the note hash is the preimage of a unique note hash, we perform no + // validations on the nullifier - we fundamentally cannot, since only the application knows how to compute + // nullifiers. We simply trust it to have provided the correct one: if it hasn't, then PXE may fail to + // realize that a given note has been nullified already, and calls to the application could result in + // invalid transactions (with duplicate nullifiers). This is not a concern because an application already + // has more direct means of making a call to it fail the transaction. discovered_notes.push( DiscoveredNoteInfo { - note_nonce: candidate_nonce, + note_nonce: nonce_for_i, note_hash: hashes.note_hash, // TODO: The None case will be handled in a followup PR. // https://linear.app/aztec-labs/issue/F-265/store-external-notes @@ -88,7 +103,7 @@ pub unconstrained fn attempt_note_nonce_discovery( // multiple times in the same transaction with different nonces. This typically doesn't happen due to notes // containing random values in order to hide their contents. } - }); + } debug_log_format( "Found valid nonces for a total of {0} notes", @@ -98,6 +113,22 @@ pub unconstrained fn attempt_note_nonce_discovery( *discovered_notes } +// There is no BoundedVec::filter in the stdlib, so we use this until that is implemented. +unconstrained fn bvec_filter( + bvec: BoundedVec, + filter: fn[Env](T) -> bool, +) -> BoundedVec { + let filtered = &mut BoundedVec::new(); + + bvec.for_each(|value| { + if filter(value) { + filtered.push(value); + } + }); + + *filtered +} + mod test { use crate::{ messages::{discovery::NoteHashAndNullifier, logs::note::MAX_NOTE_PACKED_LEN}, @@ -309,4 +340,96 @@ mod test { & (discovered_note.inner_nullifier == second_note_and_data.inner_nullifier) })); } + + #[test] + unconstrained fn single_note_misaligned_nonce() { + let note_index_in_tx = 2; + let note_and_data = construct_note(VALUE, note_index_in_tx); + + let mut unique_note_hashes_in_tx = BoundedVec::from_array([ + random(), random(), random(), random(), random(), random(), random(), + ]); + + // The note is not at the correct index + unique_note_hashes_in_tx.set(note_index_in_tx + 1, note_and_data.unique_note_hash); + + let discovered_notes = attempt_note_nonce_discovery( + unique_note_hashes_in_tx, + FIRST_NULLIFIER_IN_TX, + compute_note_hash_and_nullifier, + CONTRACT_ADDRESS, + OWNER, + STORAGE_SLOT, + RANDOMNESS, + MockNote::get_id(), + BoundedVec::from_array(note_and_data.note.pack()), + ); + + assert_eq(discovered_notes.len(), 1); + let discovered_note = discovered_notes.get(0); + + assert_eq(discovered_note.note_nonce, note_and_data.note_nonce); + assert_eq(discovered_note.note_hash, note_and_data.note_hash); + assert_eq(discovered_note.inner_nullifier, note_and_data.inner_nullifier); + } + + #[test] + unconstrained fn single_note_nonce_with_index_past_note_hashes_in_tx() { + let mut unique_note_hashes_in_tx = BoundedVec::from_array([ + random(), random(), random(), random(), random(), random(), random(), + ]); + + // The nonce is computed with an index that does not exist in the tx + let note_index_in_tx = unique_note_hashes_in_tx.len() + 5; + let note_and_data = construct_note(VALUE, note_index_in_tx); + + // The note is inserted at an arbitrary index - its true index is out of the array's bounds + unique_note_hashes_in_tx.set(2, note_and_data.unique_note_hash); + + let discovered_notes = attempt_note_nonce_discovery( + unique_note_hashes_in_tx, + FIRST_NULLIFIER_IN_TX, + compute_note_hash_and_nullifier, + CONTRACT_ADDRESS, + OWNER, + STORAGE_SLOT, + RANDOMNESS, + MockNote::get_id(), + BoundedVec::from_array(note_and_data.note.pack()), + ); + + assert_eq(discovered_notes.len(), 1); + let discovered_note = discovered_notes.get(0); + + assert_eq(discovered_note.note_nonce, note_and_data.note_nonce); + assert_eq(discovered_note.note_hash, note_and_data.note_hash); + assert_eq(discovered_note.inner_nullifier, note_and_data.inner_nullifier); + } + + #[test(should_fail_with = "identical note hashes for a transaction")] + unconstrained fn duplicate_unique_note_hashes() { + let note_index_in_tx = 2; + let note_and_data = construct_note(VALUE, note_index_in_tx); + + let mut unique_note_hashes_in_tx = BoundedVec::from_array([ + random(), random(), random(), random(), random(), random(), random(), + ]); + + // The same unique note hash is present in two indices in the array, which is not allowed. Note that we don't + // test all note hashes for uniqueness, only those that we actually find. + unique_note_hashes_in_tx.set(note_index_in_tx, note_and_data.unique_note_hash); + unique_note_hashes_in_tx.set(note_index_in_tx + 1, note_and_data.unique_note_hash); + + let _ = attempt_note_nonce_discovery( + unique_note_hashes_in_tx, + FIRST_NULLIFIER_IN_TX, + compute_note_hash_and_nullifier, + CONTRACT_ADDRESS, + OWNER, + STORAGE_SLOT, + RANDOMNESS, + MockNote::get_id(), + BoundedVec::from_array(note_and_data.note.pack()), + ); + } } diff --git a/noir-projects/aztec-nr/aztec/src/note/note_metadata.nr b/noir-projects/aztec-nr/aztec/src/note/note_metadata.nr index e006b361f654..cee82e502885 100644 --- a/noir-projects/aztec-nr/aztec/src/note/note_metadata.nr +++ b/noir-projects/aztec-nr/aztec/src/note/note_metadata.nr @@ -82,8 +82,8 @@ impl NoteMetadata { self.stage == NoteStage.PENDING_PREVIOUS_PHASE } - /// Returns true if the note is settled, i.e. if it's been created in a prior transaction and is therefore already - /// in the note hash tree. + /// Returns `true` if the note is settled, i.e. if it's been created in a prior transaction and is therefore + /// already in the note hash tree. pub fn is_settled(self) -> bool { self.stage == NoteStage.SETTLED } diff --git a/spartan/scripts/deploy_network.sh b/spartan/scripts/deploy_network.sh index 676ae4d6e7e4..aa5026399d0f 100755 --- a/spartan/scripts/deploy_network.sh +++ b/spartan/scripts/deploy_network.sh @@ -107,7 +107,11 @@ PROVER_FAILED_PROOF_STORE=${PROVER_FAILED_PROOF_STORE:-} SEQ_MIN_TX_PER_BLOCK=${SEQ_MIN_TX_PER_BLOCK:-1} SEQ_MAX_TX_PER_BLOCK=${SEQ_MAX_TX_PER_BLOCK:-null} SEQ_MAX_TX_PER_CHECKPOINT=${SEQ_MAX_TX_PER_CHECKPOINT:-8} +<<<<<<< HEAD +SEQ_PER_BLOCK_ALLOCATION_MULTIPLIER=${SEQ_PER_BLOCK_ALLOCATION_MULTIPLIER:-2} +======= SEQ_PER_BLOCK_ALLOCATION_MULTIPLIER=${SEQ_PER_BLOCK_ALLOCATION_MULTIPLIER:-} +>>>>>>> origin/v4 SEQ_BLOCK_DURATION_MS=${SEQ_BLOCK_DURATION_MS:-} SEQ_L1_PUBLISHING_TIME_ALLOWANCE_IN_SLOT=${SEQ_L1_PUBLISHING_TIME_ALLOWANCE_IN_SLOT:-} SEQ_BUILD_CHECKPOINT_IF_EMPTY=${SEQ_BUILD_CHECKPOINT_IF_EMPTY:-} diff --git a/yarn-project/.claude/rules/bash-no-echo-exit.md b/yarn-project/.claude/rules/bash-no-echo-exit.md new file mode 100644 index 000000000000..cbe045dd4189 --- /dev/null +++ b/yarn-project/.claude/rules/bash-no-echo-exit.md @@ -0,0 +1,13 @@ +# Bash Command Rules + +**NEVER append `; echo "EXIT: $?"` or similar exit-code suffixes to any command.** The Bash tool already reports exit codes directly. Adding these suffixes is redundant and causes unnecessary permission prompts. + +Bad: +```bash +yarn test src/file.test.ts > /tmp/out.log 2>&1; echo "EXIT: $?" +``` + +Good: +```bash +yarn test src/file.test.ts > /tmp/out.log 2>&1 +``` diff --git a/yarn-project/archiver/src/archiver-store.test.ts b/yarn-project/archiver/src/archiver-store.test.ts index 084c4dd89573..ef0b7f5d0789 100644 --- a/yarn-project/archiver/src/archiver-store.test.ts +++ b/yarn-project/archiver/src/archiver-store.test.ts @@ -24,7 +24,7 @@ import { EventEmitter } from 'events'; import { type MockProxy, mock } from 'jest-mock-extended'; import { Archiver, type ArchiverEmitter } from './archiver.js'; -import { InitialBlockNumberNotSequentialError } from './errors.js'; +import { BlockNumberNotSequentialError } from './errors.js'; import type { ArchiverInstrumentation } from './modules/instrumentation.js'; import { ArchiverL1Synchronizer } from './modules/l1_synchronizer.js'; import { KVArchiverDataStore } from './store/kv_archiver_store.js'; @@ -265,7 +265,7 @@ describe('Archiver Store', () => { await archiver.addBlock(block1); // Block 3 should be rejected because block 2 is missing - await expect(archiver.addBlock(block3)).rejects.toThrow(InitialBlockNumberNotSequentialError); + await expect(archiver.addBlock(block3)).rejects.toThrow(BlockNumberNotSequentialError); }); it('rejects blocks with duplicate block numbers', async () => { @@ -276,7 +276,7 @@ describe('Archiver Store', () => { await archiver.addBlock(block2); // Adding block 2 again shoud be rejected - await expect(archiver.addBlock(block2)).rejects.toThrow(InitialBlockNumberNotSequentialError); + await expect(archiver.addBlock(block2)).rejects.toThrow(BlockNumberNotSequentialError); }); it('rejects first block if not starting from block 1', async () => { diff --git a/yarn-project/archiver/src/archiver-sync.test.ts b/yarn-project/archiver/src/archiver-sync.test.ts index c95735262b64..e0050f28e8a0 100644 --- a/yarn-project/archiver/src/archiver-sync.test.ts +++ b/yarn-project/archiver/src/archiver-sync.test.ts @@ -1327,7 +1327,7 @@ describe('Archiver Sync', () => { expect(await archiver.getSynchedCheckpointNumber()).toEqual(CheckpointNumber(1)); const blockAlreadySyncedFromCheckpoint = cp1.blocks[cp1.blocks.length - 1]; - // Now try and add one of the blocks via the addProposedBlocks method. It should throw + // Now try and add one of the blocks via the addProposedBlock method. It should throw await expect(archiver.addBlock(blockAlreadySyncedFromCheckpoint)).rejects.toThrow(); }, 10_000); @@ -1428,8 +1428,12 @@ describe('Archiver Sync', () => { const { checkpoint: cp3 } = await fake.addCheckpoint(CheckpointNumber(3), { l1BlockNumber: 5010n }); // Add blocks from BOTH checkpoints locally (matching the L1 checkpoints) - await archiverStore.addProposedBlocks(cp2.blocks, { force: true }); - await archiverStore.addProposedBlocks(cp3.blocks, { force: true }); + for (const block of cp2.blocks) { + await archiverStore.addProposedBlock(block, { force: true }); + } + for (const block of cp3.blocks) { + await archiverStore.addProposedBlock(block, { force: true }); + } // Verify all blocks are visible locally const lastBlockInCheckpoint3 = cp3.blocks[cp3.blocks.length - 1].number; diff --git a/yarn-project/archiver/src/archiver.ts b/yarn-project/archiver/src/archiver.ts index f27e8cbab3c7..3955cfc83c22 100644 --- a/yarn-project/archiver/src/archiver.ts +++ b/yarn-project/archiver/src/archiver.ts @@ -30,7 +30,7 @@ import { import { type TelemetryClient, type Traceable, type Tracer, trackSpan } from '@aztec/telemetry-client'; import { type ArchiverConfig, mapArchiverConfig } from './config.js'; -import { NoBlobBodiesFoundError } from './errors.js'; +import { BlockAlreadyCheckpointedError, NoBlobBodiesFoundError } from './errors.js'; import { validateAndLogTraceAvailability } from './l1/validate_trace.js'; import { ArchiverDataSourceBase } from './modules/data_source_base.js'; import { ArchiverDataStoreUpdater } from './modules/data_store_updater.js'; @@ -242,10 +242,15 @@ export class Archiver extends ArchiverDataSourceBase implements L2BlockSink, Tra } try { - await this.updater.addProposedBlocks([block]); + await this.updater.addProposedBlock(block); this.log.debug(`Added block ${block.number} to store`); resolve(); } catch (err: any) { + if (err instanceof BlockAlreadyCheckpointedError) { + this.log.debug(`Proposed block ${block.number} matches already checkpointed block, ignoring late proposal`); + resolve(); + continue; + } this.log.error(`Failed to add block ${block.number} to store: ${err.message}`); reject(err); } diff --git a/yarn-project/archiver/src/errors.ts b/yarn-project/archiver/src/errors.ts index 9ef345cf89e8..f47122bc4514 100644 --- a/yarn-project/archiver/src/errors.ts +++ b/yarn-project/archiver/src/errors.ts @@ -6,24 +6,9 @@ export class NoBlobBodiesFoundError extends Error { } } -export class InitialBlockNumberNotSequentialError extends Error { - constructor( - public readonly newBlockNumber: number, - public readonly previousBlockNumber: number | undefined, - ) { - super( - `Cannot insert new block ${newBlockNumber} given previous block number in store is ${ - previousBlockNumber ?? 'undefined' - }`, - ); - } -} - export class BlockNumberNotSequentialError extends Error { constructor(newBlockNumber: number, previous: number | undefined) { - super( - `Cannot insert new block ${newBlockNumber} given previous block number in batch is ${previous ?? 'undefined'}`, - ); + super(`Cannot insert new block ${newBlockNumber} given previous block number is ${previous ?? 'undefined'}`); } } @@ -48,14 +33,6 @@ export class CheckpointNumberNotSequentialError extends Error { } } -export class CheckpointNumberNotConsistentError extends Error { - constructor(newCheckpointNumber: number, previous: number | undefined) { - super( - `Cannot insert block for new checkpoint ${newCheckpointNumber} given previous block was checkpoint ${previous ?? 'undefined'}`, - ); - } -} - export class BlockIndexNotSequentialError extends Error { constructor(newBlockIndex: number, previousBlockIndex: number | undefined) { super( @@ -89,6 +66,15 @@ export class BlockNotFoundError extends Error { } } +/** Thrown when a proposed block matches a block that was already checkpointed. This is expected for late proposals. */ +export class BlockAlreadyCheckpointedError extends Error { + constructor(public readonly blockNumber: number) { + super(`Block ${blockNumber} has already been checkpointed with the same content`); + this.name = 'BlockAlreadyCheckpointedError'; + } +} + +/** Thrown when a proposed block conflicts with an already checkpointed block (different content). */ export class CannotOverwriteCheckpointedBlockError extends Error { constructor( public readonly blockNumber: number, diff --git a/yarn-project/archiver/src/modules/data_store_updater.test.ts b/yarn-project/archiver/src/modules/data_store_updater.test.ts index 94721e4c22ea..d93e2361efa4 100644 --- a/yarn-project/archiver/src/modules/data_store_updater.test.ts +++ b/yarn-project/archiver/src/modules/data_store_updater.test.ts @@ -57,7 +57,7 @@ describe('ArchiverDataStoreUpdater', () => { }); describe('contract data', () => { - it('stores contract class and instance data when blocks are added via addProposedBlocks', async () => { + it('stores contract class and instance data when blocks are added via addProposedBlock', async () => { // Create block with contract class and instance logs const block = await L2Block.random(BlockNumber(1), { checkpointNumber: CheckpointNumber(1), @@ -66,7 +66,7 @@ describe('ArchiverDataStoreUpdater', () => { block.body.txEffects[0].contractClassLogs = [contractClassLog]; block.body.txEffects[0].privateLogs = [PrivateLog.fromBuffer(getSampleContractInstancePublishedEventPayload())]; - await updater.addProposedBlocks([block]); + await updater.addProposedBlock(block); // Verify contract class was stored const retrievedClass = await store.getContractClass(contractClassId); @@ -92,7 +92,7 @@ describe('ArchiverDataStoreUpdater', () => { PrivateLog.fromBuffer(getSampleContractInstancePublishedEventPayload()), ]; - await updater.addProposedBlocks([localBlock]); + await updater.addProposedBlock(localBlock); // Verify contract data was stored const timestamp = localBlock.header.globalVariables.timestamp + 1n; @@ -155,7 +155,7 @@ describe('ArchiverDataStoreUpdater', () => { slotNumber: SlotNumber(100), }); - await updater.addProposedBlocks([block]); + await updater.addProposedBlock(block); // Create checkpoint with the SAME block (same archive root) const publishedCheckpoint = makePublishedCheckpoint(makeCheckpoint([block]), 10); @@ -175,7 +175,7 @@ describe('ArchiverDataStoreUpdater', () => { indexWithinCheckpoint: IndexWithinCheckpoint(0), slotNumber: SlotNumber(100), }); - await updater.addProposedBlocks([localBlock]); + await updater.addProposedBlock(localBlock); const publicLogsBefore = await store.getPublicLogs({}); expect(publicLogsBefore.logs.map(l => l.log)).toEqual(localBlock.body.txEffects.flatMap(tx => tx.publicLogs)); @@ -203,7 +203,7 @@ describe('ArchiverDataStoreUpdater', () => { indexWithinCheckpoint: IndexWithinCheckpoint(0), slotNumber: SlotNumber(100), }); - await updater.addProposedBlocks([localBlock]); + await updater.addProposedBlock(localBlock); const publicLogsBefore = await store.getPublicLogs({}); expect(publicLogsBefore.logs.map(l => l.log)).toEqual(localBlock.body.txEffects.flatMap(tx => tx.publicLogs)); diff --git a/yarn-project/archiver/src/modules/data_store_updater.ts b/yarn-project/archiver/src/modules/data_store_updater.ts index f1a0ed35fc51..6bf2975b3c3f 100644 --- a/yarn-project/archiver/src/modules/data_store_updater.ts +++ b/yarn-project/archiver/src/modules/data_store_updater.ts @@ -52,29 +52,29 @@ export class ArchiverDataStoreUpdater { ) {} /** - * Adds proposed blocks to the store with contract class/instance extraction from logs. - * These are uncheckpointed blocks that have been proposed by the sequencer but not yet included in a checkpoint on L1. + * Adds a proposed block to the store with contract class/instance extraction from logs. + * This is an uncheckpointed block that has been proposed by the sequencer but not yet included in a checkpoint on L1. * Extracts ContractClassPublished, ContractInstancePublished, ContractInstanceUpdated events, * and individually broadcasted functions from the block logs. * - * @param blocks - The proposed L2 blocks to add. + * @param block - The proposed L2 block to add. * @param pendingChainValidationStatus - Optional validation status to set. * @returns True if the operation is successful. */ - public async addProposedBlocks( - blocks: L2Block[], + public async addProposedBlock( + block: L2Block, pendingChainValidationStatus?: ValidateCheckpointResult, ): Promise { const result = await this.store.transactionAsync(async () => { - await this.store.addProposedBlocks(blocks); + await this.store.addProposedBlock(block); const opResults = await Promise.all([ // Update the pending chain validation status if provided pendingChainValidationStatus && this.store.setPendingChainValidationStatus(pendingChainValidationStatus), - // Add any logs emitted during the retrieved blocks - this.store.addLogs(blocks), - // Unroll all logs emitted during the retrieved blocks and extract any contract classes and instances from them - ...blocks.map(block => this.addContractDataToDb(block)), + // Add any logs emitted during the retrieved block + this.store.addLogs([block]), + // Unroll all logs emitted during the retrieved block and extract any contract classes and instances from it + this.addContractDataToDb(block), ]); await this.l2TipsCache?.refresh(); @@ -108,7 +108,7 @@ export class ArchiverDataStoreUpdater { await this.store.addCheckpoints(checkpoints); - // Filter out blocks that were already inserted via addProposedBlocks() to avoid duplicating logs/contract data + // Filter out blocks that were already inserted via addProposedBlock() to avoid duplicating logs/contract data const newBlocks = checkpoints .flatMap((ch: PublishedCheckpoint) => ch.checkpoint.blocks) .filter(b => lastAlreadyInsertedBlockNumber === undefined || b.number > lastAlreadyInsertedBlockNumber); diff --git a/yarn-project/archiver/src/store/block_store.ts b/yarn-project/archiver/src/store/block_store.ts index a9ec9a501c85..d6e28a72ff01 100644 --- a/yarn-project/archiver/src/store/block_store.ts +++ b/yarn-project/archiver/src/store/block_store.ts @@ -35,15 +35,14 @@ import { } from '@aztec/stdlib/tx'; import { + BlockAlreadyCheckpointedError, BlockArchiveNotConsistentError, BlockIndexNotSequentialError, BlockNotFoundError, BlockNumberNotSequentialError, CannotOverwriteCheckpointedBlockError, CheckpointNotFoundError, - CheckpointNumberNotConsistentError, CheckpointNumberNotSequentialError, - InitialBlockNumberNotSequentialError, InitialCheckpointNumberNotSequentialError, } from '../errors.js'; @@ -141,23 +140,18 @@ export class BlockStore { } /** - * Append new proposed blocks to the store's list. All blocks must be for the 'current' checkpoint. - * These are uncheckpointed blocks that have been proposed by the sequencer but not yet included in a checkpoint on L1. + * Append a new proposed block to the store. + * This is an uncheckpointed block that has been proposed by the sequencer but not yet included in a checkpoint on L1. * For checkpointed blocks (already published to L1), use addCheckpoints() instead. - * @param blocks - The proposed L2 blocks to be added to the store. + * @param block - The proposed L2 block to be added to the store. * @returns True if the operation is successful. */ - async addProposedBlocks(blocks: L2Block[], opts: { force?: boolean } = {}): Promise { - if (blocks.length === 0) { - return true; - } - + async addProposedBlock(block: L2Block, opts: { force?: boolean } = {}): Promise { return await this.db.transactionAsync(async () => { - // Check that the block immediately before the first block to be added is present in the store. - const firstBlockNumber = blocks[0].number; - const firstBlockCheckpointNumber = blocks[0].checkpointNumber; - const firstBlockIndex = blocks[0].indexWithinCheckpoint; - const firstBlockLastArchive = blocks[0].header.lastArchive.root; + const blockNumber = block.number; + const blockCheckpointNumber = block.checkpointNumber; + const blockIndex = block.indexWithinCheckpoint; + const blockLastArchive = block.header.lastArchive.root; // Extract the latest block and checkpoint numbers const previousBlockNumber = await this.getLatestBlockNumber(); @@ -165,71 +159,52 @@ export class BlockStore { // Verify we're not overwriting checkpointed blocks const lastCheckpointedBlockNumber = await this.getCheckpointedL2BlockNumber(); - if (!opts.force && firstBlockNumber <= lastCheckpointedBlockNumber) { - throw new CannotOverwriteCheckpointedBlockError(firstBlockNumber, lastCheckpointedBlockNumber); + if (!opts.force && blockNumber <= lastCheckpointedBlockNumber) { + // Check if the proposed block matches the already-checkpointed one + const existingBlock = await this.getBlock(BlockNumber(blockNumber)); + if (existingBlock && existingBlock.archive.root.equals(block.archive.root)) { + throw new BlockAlreadyCheckpointedError(blockNumber); + } + throw new CannotOverwriteCheckpointedBlockError(blockNumber, lastCheckpointedBlockNumber); } - // Check that the first block number is the expected one - if (!opts.force && previousBlockNumber !== firstBlockNumber - 1) { - throw new InitialBlockNumberNotSequentialError(firstBlockNumber, previousBlockNumber); + // Check that the block number is the expected one + if (!opts.force && previousBlockNumber !== blockNumber - 1) { + throw new BlockNumberNotSequentialError(blockNumber, previousBlockNumber); } // The same check as above but for checkpoints - if (!opts.force && previousCheckpointNumber !== firstBlockCheckpointNumber - 1) { - throw new InitialCheckpointNumberNotSequentialError(firstBlockCheckpointNumber, previousCheckpointNumber); + if (!opts.force && previousCheckpointNumber !== blockCheckpointNumber - 1) { + throw new CheckpointNumberNotSequentialError(blockCheckpointNumber, previousCheckpointNumber); } // Extract the previous block if there is one and see if it is for the same checkpoint or not const previousBlockResult = await this.getBlock(previousBlockNumber); - let expectedFirstblockIndex = 0; + let expectedBlockIndex = 0; let previousBlockIndex: number | undefined = undefined; if (previousBlockResult !== undefined) { - if (previousBlockResult.checkpointNumber === firstBlockCheckpointNumber) { + if (previousBlockResult.checkpointNumber === blockCheckpointNumber) { // The previous block is for the same checkpoint, therefore our index should follow it previousBlockIndex = previousBlockResult.indexWithinCheckpoint; - expectedFirstblockIndex = previousBlockIndex + 1; + expectedBlockIndex = previousBlockIndex + 1; } - if (!previousBlockResult.archive.root.equals(firstBlockLastArchive)) { + if (!previousBlockResult.archive.root.equals(blockLastArchive)) { throw new BlockArchiveNotConsistentError( - firstBlockNumber, + blockNumber, previousBlockResult.number, - firstBlockLastArchive, + blockLastArchive, previousBlockResult.archive.root, ); } } - // Now check that the first block has the expected index value - if (!opts.force && expectedFirstblockIndex !== firstBlockIndex) { - throw new BlockIndexNotSequentialError(firstBlockIndex, previousBlockIndex); + // Now check that the block has the expected index value + if (!opts.force && expectedBlockIndex !== blockIndex) { + throw new BlockIndexNotSequentialError(blockIndex, previousBlockIndex); } - // Iterate over blocks array and insert them, checking that the block numbers and indexes are sequential. Also check they are for the correct checkpoint. - let previousBlock: L2Block | undefined = undefined; - for (const block of blocks) { - if (!opts.force && previousBlock) { - if (previousBlock.number + 1 !== block.number) { - throw new BlockNumberNotSequentialError(block.number, previousBlock.number); - } - if (previousBlock.indexWithinCheckpoint + 1 !== block.indexWithinCheckpoint) { - throw new BlockIndexNotSequentialError(block.indexWithinCheckpoint, previousBlock.indexWithinCheckpoint); - } - if (!previousBlock.archive.root.equals(block.header.lastArchive.root)) { - throw new BlockArchiveNotConsistentError( - block.number, - previousBlock.number, - block.header.lastArchive.root, - previousBlock.archive.root, - ); - } - } - if (!opts.force && firstBlockCheckpointNumber !== block.checkpointNumber) { - throw new CheckpointNumberNotConsistentError(block.checkpointNumber, firstBlockCheckpointNumber); - } - previousBlock = block; - await this.addBlockToDatabase(block, block.checkpointNumber, block.indexWithinCheckpoint); - } + await this.addBlockToDatabase(block, block.checkpointNumber, block.indexWithinCheckpoint); return true; }); diff --git a/yarn-project/archiver/src/store/kv_archiver_store.test.ts b/yarn-project/archiver/src/store/kv_archiver_store.test.ts index d05044ded8d2..a77015844573 100644 --- a/yarn-project/archiver/src/store/kv_archiver_store.test.ts +++ b/yarn-project/archiver/src/store/kv_archiver_store.test.ts @@ -42,13 +42,12 @@ import { AppendOnlyTreeSnapshot } from '@aztec/stdlib/trees'; import { type IndexedTxEffect, TxHash } from '@aztec/stdlib/tx'; import { + BlockAlreadyCheckpointedError, BlockArchiveNotConsistentError, BlockIndexNotSequentialError, BlockNumberNotSequentialError, CannotOverwriteCheckpointedBlockError, - CheckpointNumberNotConsistentError, CheckpointNumberNotSequentialError, - InitialBlockNumberNotSequentialError, InitialCheckpointNumberNotSequentialError, } from '../errors.js'; import { MessageStoreError } from '../store/message_store.js'; @@ -67,6 +66,18 @@ import { } from '../test/mock_structs.js'; import { type ArchiverL1SynchPoint, KVArchiverDataStore } from './kv_archiver_store.js'; +async function addProposedBlocks( + store: KVArchiverDataStore, + blocks: L2Block[], + opts?: { force?: boolean }, +): Promise { + let result = true; + for (const block of blocks) { + result = (await store.addProposedBlock(block, opts)) && result; + } + return result; +} + describe('KVArchiverDataStore', () => { let store: KVArchiverDataStore; let publishedCheckpoints: PublishedCheckpoint[]; @@ -388,7 +399,7 @@ describe('KVArchiverDataStore', () => { indexWithinCheckpoint: IndexWithinCheckpoint(0), lastArchive: lastBlockArchive, }); - await store.addProposedBlocks([block2]); + await store.addProposedBlock(block2); // Verify state: checkpoint 1 exists, block 2 exists but is orphaned (no checkpoint 2) expect(await store.getSynchedCheckpointNumber()).toBe(1); @@ -431,7 +442,7 @@ describe('KVArchiverDataStore', () => { indexWithinCheckpoint: IndexWithinCheckpoint(2), lastArchive: block3.archive, }); - await store.addProposedBlocks([block2, block3, block4]); + await addProposedBlocks(store, [block2, block3, block4]); expect(await store.getSynchedCheckpointNumber()).toBe(1); expect(await store.getLatestBlockNumber()).toBe(4); @@ -727,7 +738,7 @@ describe('KVArchiverDataStore', () => { lastArchive: block5.archive, }); - await store.addProposedBlocks([block4, block5, block6]); + await addProposedBlocks(store, [block4, block5, block6]); // Checkpoint number should still be 1 (no new checkpoint added) expect(await store.getSynchedCheckpointNumber()).toBe(1); @@ -755,7 +766,7 @@ describe('KVArchiverDataStore', () => { indexWithinCheckpoint: IndexWithinCheckpoint(1), lastArchive: block3.archive, }); - await store.addProposedBlocks([block3, block4]); + await addProposedBlocks(store, [block3, block4]); // getBlock should work for both checkpointed and uncheckpointed blocks expect((await store.getBlock(BlockNumber(1)))?.number).toBe(1); @@ -769,7 +780,7 @@ describe('KVArchiverDataStore', () => { indexWithinCheckpoint: IndexWithinCheckpoint(2), lastArchive: block4.archive, }); - await store.addProposedBlocks([block5]); + await store.addProposedBlock(block5); // Verify the uncheckpointed blocks have correct data const retrieved3 = await store.getBlock(BlockNumber(3)); @@ -794,7 +805,7 @@ describe('KVArchiverDataStore', () => { indexWithinCheckpoint: IndexWithinCheckpoint(1), lastArchive: block1.archive, }); - await store.addProposedBlocks([block1, block2]); + await addProposedBlocks(store, [block1, block2]); // getBlockByHash should work for uncheckpointed blocks const hash1 = await block1.header.hash(); @@ -818,7 +829,7 @@ describe('KVArchiverDataStore', () => { indexWithinCheckpoint: IndexWithinCheckpoint(1), lastArchive: block1.archive, }); - await store.addProposedBlocks([block1, block2]); + await addProposedBlocks(store, [block1, block2]); // getBlockByArchive should work for uncheckpointed blocks const archive1 = block1.archive.root; @@ -851,7 +862,7 @@ describe('KVArchiverDataStore', () => { indexWithinCheckpoint: IndexWithinCheckpoint(1), lastArchive: block3.archive, }); - await store.addProposedBlocks([block3, block4]); + await addProposedBlocks(store, [block3, block4]); // getCheckpointedBlock should work for checkpointed blocks expect((await store.getCheckpointedBlock(BlockNumber(1)))?.block.number).toBe(1); @@ -872,7 +883,7 @@ describe('KVArchiverDataStore', () => { checkpointNumber: CheckpointNumber(1), indexWithinCheckpoint: IndexWithinCheckpoint(0), }); - await store.addProposedBlocks([block1]); + await store.addProposedBlock(block1); const hash = await block1.header.hash(); @@ -889,7 +900,7 @@ describe('KVArchiverDataStore', () => { checkpointNumber: CheckpointNumber(1), indexWithinCheckpoint: IndexWithinCheckpoint(0), }); - await store.addProposedBlocks([block1]); + await store.addProposedBlock(block1); const archive = block1.archive.root; @@ -916,7 +927,7 @@ describe('KVArchiverDataStore', () => { indexWithinCheckpoint: IndexWithinCheckpoint(2), lastArchive: block2.archive, }); - await store.addProposedBlocks([block1, block2, block3]); + await addProposedBlocks(store, [block1, block2, block3]); expect(await store.getSynchedCheckpointNumber()).toBe(0); expect(await store.getLatestBlockNumber()).toBe(3); @@ -976,7 +987,7 @@ describe('KVArchiverDataStore', () => { indexWithinCheckpoint: IndexWithinCheckpoint(2), lastArchive: block4.archive, }); - await store.addProposedBlocks([block3, block4, block5]); + await addProposedBlocks(store, [block3, block4, block5]); expect(await store.getSynchedCheckpointNumber()).toBe(1); expect(await store.getLatestBlockNumber()).toBe(5); @@ -1035,7 +1046,7 @@ describe('KVArchiverDataStore', () => { indexWithinCheckpoint: IndexWithinCheckpoint(1), lastArchive: block3.archive, }); - await store.addProposedBlocks([block3, block4]); + await addProposedBlocks(store, [block3, block4]); // getBlocks should retrieve all blocks const allBlocks = await store.getBlocks(BlockNumber(1), 10); @@ -1044,32 +1055,7 @@ describe('KVArchiverDataStore', () => { }); }); - describe('addProposedBlocks validation', () => { - it('throws if blocks have different checkpoint numbers', async () => { - // First, establish checkpoint 1 with blocks 1-2 - const checkpoint1 = makePublishedCheckpoint( - await Checkpoint.random(CheckpointNumber(1), { numBlocks: 2, startBlockNumber: 1 }), - 10, - ); - await store.addCheckpoints([checkpoint1]); - - // Try to add blocks 3 and 4 with different checkpoint numbers - // Chain archives correctly to test the checkpoint number validation - const lastBlockArchive = checkpoint1.checkpoint.blocks.at(-1)!.archive; - const block3 = await L2Block.random(BlockNumber(3), { - checkpointNumber: CheckpointNumber(2), - indexWithinCheckpoint: IndexWithinCheckpoint(0), - lastArchive: lastBlockArchive, - }); - const block4 = await L2Block.random(BlockNumber(4), { - checkpointNumber: CheckpointNumber(3), - indexWithinCheckpoint: IndexWithinCheckpoint(1), - lastArchive: block3.archive, - }); - - await expect(store.addProposedBlocks([block3, block4])).rejects.toThrow(CheckpointNumberNotConsistentError); - }); - + describe('addProposedBlock validation', () => { it('throws if checkpoint number is not the current checkpoint', async () => { // First, establish checkpoint 1 with blocks 1-2 const checkpoint1 = makePublishedCheckpoint( @@ -1078,19 +1064,13 @@ describe('KVArchiverDataStore', () => { ); await store.addCheckpoints([checkpoint1]); - // Try to add blocks for checkpoint 3 (skipping checkpoint 2) + // Try to add a block for checkpoint 3 (skipping checkpoint 2) const block3 = await L2Block.random(BlockNumber(3), { checkpointNumber: CheckpointNumber(3), indexWithinCheckpoint: IndexWithinCheckpoint(0), }); - const block4 = await L2Block.random(BlockNumber(4), { - checkpointNumber: CheckpointNumber(3), - indexWithinCheckpoint: IndexWithinCheckpoint(1), - }); - await expect(store.addProposedBlocks([block3, block4])).rejects.toThrow( - InitialCheckpointNumberNotSequentialError, - ); + await expect(store.addProposedBlock(block3)).rejects.toThrow(CheckpointNumberNotSequentialError); }); it('allows blocks with the same checkpoint number for the current checkpoint', async () => { @@ -1114,7 +1094,7 @@ describe('KVArchiverDataStore', () => { lastArchive: block3.archive, }); - await expect(store.addProposedBlocks([block3, block4])).resolves.toBe(true); + await expect(addProposedBlocks(store, [block3, block4])).resolves.toBe(true); // Verify blocks were added expect((await store.getBlock(BlockNumber(3)))?.equals(block3)).toBe(true); @@ -1133,7 +1113,7 @@ describe('KVArchiverDataStore', () => { lastArchive: block1.archive, }); - await expect(store.addProposedBlocks([block1, block2])).resolves.toBe(true); + await expect(addProposedBlocks(store, [block1, block2])).resolves.toBe(true); // Verify blocks were added expect((await store.getBlock(BlockNumber(1)))?.equals(block1)).toBe(true); @@ -1152,24 +1132,18 @@ describe('KVArchiverDataStore', () => { indexWithinCheckpoint: IndexWithinCheckpoint(0), }); - await expect(store.addProposedBlocks([block1])).resolves.toBe(true); - await expect(store.addProposedBlocks([block2])).rejects.toThrow(InitialBlockNumberNotSequentialError); + await expect(store.addProposedBlock(block1)).resolves.toBe(true); + await expect(store.addProposedBlock(block2)).rejects.toThrow(BlockNumberNotSequentialError); }); it('throws if first block has wrong checkpoint number when store is empty', async () => { - // Try to add blocks for checkpoint 2 when store is empty (should start at 1) + // Try to add a block for checkpoint 2 when store is empty (should start at 1) const block1 = await L2Block.random(BlockNumber(1), { checkpointNumber: CheckpointNumber(2), indexWithinCheckpoint: IndexWithinCheckpoint(0), }); - const block2 = await L2Block.random(BlockNumber(2), { - checkpointNumber: CheckpointNumber(2), - indexWithinCheckpoint: IndexWithinCheckpoint(1), - }); - await expect(store.addProposedBlocks([block1, block2])).rejects.toThrow( - InitialCheckpointNumberNotSequentialError, - ); + await expect(store.addProposedBlock(block1)).rejects.toThrow(CheckpointNumberNotSequentialError); }); it('allows adding more blocks to the same checkpoint in separate calls', async () => { @@ -1187,7 +1161,7 @@ describe('KVArchiverDataStore', () => { indexWithinCheckpoint: IndexWithinCheckpoint(0), lastArchive: lastBlockArchive, }); - await expect(store.addProposedBlocks([block3])).resolves.toBe(true); + await expect(store.addProposedBlock(block3)).resolves.toBe(true); // Add block 4 for the same checkpoint 2 in a separate call const block4 = await L2Block.random(BlockNumber(4), { @@ -1195,7 +1169,7 @@ describe('KVArchiverDataStore', () => { indexWithinCheckpoint: IndexWithinCheckpoint(1), lastArchive: block3.archive, }); - await expect(store.addProposedBlocks([block4])).resolves.toBe(true); + await expect(store.addProposedBlock(block4)).resolves.toBe(true); expect(await store.getLatestBlockNumber()).toBe(4); }); @@ -1215,7 +1189,7 @@ describe('KVArchiverDataStore', () => { indexWithinCheckpoint: IndexWithinCheckpoint(0), lastArchive: lastBlockArchive, }); - await expect(store.addProposedBlocks([block3])).resolves.toBe(true); + await expect(store.addProposedBlock(block3)).resolves.toBe(true); // Add block 4 for the same checkpoint 2 in a separate call but with a missing index const block4 = await L2Block.random(BlockNumber(4), { @@ -1223,7 +1197,7 @@ describe('KVArchiverDataStore', () => { indexWithinCheckpoint: IndexWithinCheckpoint(2), lastArchive: block3.archive, }); - await expect(store.addProposedBlocks([block4])).rejects.toThrow(BlockIndexNotSequentialError); + await expect(store.addProposedBlock(block4)).rejects.toThrow(BlockIndexNotSequentialError); expect(await store.getLatestBlockNumber()).toBe(3); }); @@ -1243,7 +1217,7 @@ describe('KVArchiverDataStore', () => { indexWithinCheckpoint: IndexWithinCheckpoint(0), lastArchive: lastBlockArchive, }); - await store.addProposedBlocks([block3]); + await store.addProposedBlock(block3); // Try to add block 4 for checkpoint 3 (should fail because current checkpoint is still 2) const block4 = await L2Block.random(BlockNumber(4), { @@ -1251,7 +1225,7 @@ describe('KVArchiverDataStore', () => { indexWithinCheckpoint: IndexWithinCheckpoint(0), lastArchive: block3.archive, }); - await expect(store.addProposedBlocks([block4])).rejects.toThrow(InitialCheckpointNumberNotSequentialError); + await expect(store.addProposedBlock(block4)).rejects.toThrow(CheckpointNumberNotSequentialError); }); it('force option bypasses checkpoint number validation', async () => { @@ -1275,7 +1249,7 @@ describe('KVArchiverDataStore', () => { lastArchive: block3.archive, }); - await expect(store.addProposedBlocks([block3, block4], { force: true })).resolves.toBe(true); + await expect(addProposedBlocks(store, [block3, block4], { force: true })).resolves.toBe(true); }); it('force option bypasses blockindex number validation', async () => { @@ -1299,7 +1273,7 @@ describe('KVArchiverDataStore', () => { lastArchive: block3.archive, }); - await expect(store.addProposedBlocks([block3, block4], { force: true })).resolves.toBe(true); + await expect(addProposedBlocks(store, [block3, block4], { force: true })).resolves.toBe(true); }); it('throws if adding blocks with non-consecutive archives', async () => { @@ -1315,7 +1289,7 @@ describe('KVArchiverDataStore', () => { checkpointNumber: CheckpointNumber(2), indexWithinCheckpoint: IndexWithinCheckpoint(0), }); - await expect(store.addProposedBlocks([block3])).rejects.toThrow(BlockArchiveNotConsistentError); + await expect(store.addProposedBlock(block3)).rejects.toThrow(BlockArchiveNotConsistentError); expect(await store.getLatestBlockNumber()).toBe(2); }); @@ -1335,7 +1309,7 @@ describe('KVArchiverDataStore', () => { indexWithinCheckpoint: IndexWithinCheckpoint(0), lastArchive: lastBlockArchive, }); - await expect(store.addProposedBlocks([block3])).resolves.toBe(true); + await expect(store.addProposedBlock(block3)).resolves.toBe(true); // Add block 4 with incorrect archive (should fail) const block4 = await L2Block.random(BlockNumber(4), { @@ -1343,7 +1317,7 @@ describe('KVArchiverDataStore', () => { indexWithinCheckpoint: IndexWithinCheckpoint(1), lastArchive: AppendOnlyTreeSnapshot.random(), }); - await expect(store.addProposedBlocks([block4])).rejects.toThrow(BlockArchiveNotConsistentError); + await expect(store.addProposedBlock(block4)).rejects.toThrow(BlockArchiveNotConsistentError); expect(await store.getLatestBlockNumber()).toBe(3); }); @@ -1363,14 +1337,26 @@ describe('KVArchiverDataStore', () => { checkpointNumber: CheckpointNumber(1), indexWithinCheckpoint: IndexWithinCheckpoint(1), }); - await expect(store.addProposedBlocks([block2])).rejects.toThrow(CannotOverwriteCheckpointedBlockError); + await expect(store.addProposedBlock(block2)).rejects.toThrow(CannotOverwriteCheckpointedBlockError); // Try to add a block that would overwrite checkpointed block 1 const block1 = await L2Block.random(BlockNumber(1), { checkpointNumber: CheckpointNumber(1), indexWithinCheckpoint: IndexWithinCheckpoint(0), }); - await expect(store.addProposedBlocks([block1])).rejects.toThrow(CannotOverwriteCheckpointedBlockError); + await expect(store.addProposedBlock(block1)).rejects.toThrow(CannotOverwriteCheckpointedBlockError); + }); + + it('throws BlockAlreadyCheckpointedError if proposed block matches the checkpointed one', async () => { + const checkpoint1 = makePublishedCheckpoint( + await Checkpoint.random(CheckpointNumber(1), { numBlocks: 2, startBlockNumber: 1 }), + 10, + ); + await store.addCheckpoints([checkpoint1]); + + // Re-propose the same block that was already checkpointed + const checkpointedBlock = checkpoint1.checkpoint.blocks[1]; + await expect(store.addProposedBlock(checkpointedBlock)).rejects.toThrow(BlockAlreadyCheckpointedError); }); }); @@ -1801,7 +1787,7 @@ describe('KVArchiverDataStore', () => { it('deleteLogs', async () => { const block = publishedCheckpoints[0].checkpoint.blocks[0]; - await store.addProposedBlocks([block]); + await store.addProposedBlock(block); await expect(store.addLogs([block])).resolves.toEqual(true); expect((await store.getPublicLogs({ fromBlock: BlockNumber(1) })).logs.length).toEqual( @@ -3052,7 +3038,7 @@ describe('KVArchiverDataStore', () => { }); describe('idempotency', () => { - it('handles adding blocks via addProposedBlocks then same blocks via addCheckpoints', async () => { + it('handles adding blocks via addProposedBlock then same blocks via addCheckpoints', async () => { // First add checkpoint 1 to establish a base const checkpoint1 = makePublishedCheckpoint( await Checkpoint.random(CheckpointNumber(1), { numBlocks: 1, startBlockNumber: 1 }), @@ -3060,13 +3046,13 @@ describe('KVArchiverDataStore', () => { ); await store.addCheckpoints([checkpoint1]); - // Add provisional block 2 via addProposedBlocks + // Add provisional block 2 via addProposedBlock const provisionalBlock = await L2Block.random(BlockNumber(2), { checkpointNumber: CheckpointNumber(2), indexWithinCheckpoint: IndexWithinCheckpoint(0), lastArchive: checkpoint1.checkpoint.blocks[0].archive, }); - await store.addProposedBlocks([provisionalBlock]); + await store.addProposedBlock(provisionalBlock); // Now add checkpoint 2 containing the same block via addCheckpoints const checkpoint2 = new Checkpoint( @@ -3172,7 +3158,7 @@ describe('KVArchiverDataStore', () => { slotNumber: SlotNumber(101), // Different slot number }); - await store.addProposedBlocks([block1, block2, block3]); + await addProposedBlocks(store, [block1, block2, block3]); const blocksForSlot100 = await store.getBlocksForSlot(SlotNumber(100)); expect(blocksForSlot100.length).toBe(2); @@ -3192,7 +3178,7 @@ describe('KVArchiverDataStore', () => { slotNumber: SlotNumber(100), }); - await store.addProposedBlocks([block1]); + await store.addProposedBlock(block1); const blocksForSlot999 = await store.getBlocksForSlot(SlotNumber(999)); expect(blocksForSlot999).toEqual([]); @@ -3223,7 +3209,7 @@ describe('KVArchiverDataStore', () => { slotNumber: SlotNumber(50), }); - await store.addProposedBlocks([block1, block2, block3]); + await addProposedBlocks(store, [block1, block2, block3]); const blocksForSlot = await store.getBlocksForSlot(SlotNumber(50)); expect(blocksForSlot.length).toBe(3); @@ -3256,7 +3242,7 @@ describe('KVArchiverDataStore', () => { lastArchive: block3.archive, }); - await store.addProposedBlocks([block1, block2, block3, block4]); + await addProposedBlocks(store, [block1, block2, block3, block4]); expect(await store.getLatestBlockNumber()).toBe(4); // Remove blocks after block 2 @@ -3285,7 +3271,7 @@ describe('KVArchiverDataStore', () => { lastArchive: block2.archive, }); - await store.addProposedBlocks([block1, block2, block3]); + await addProposedBlocks(store, [block1, block2, block3]); // Remove blocks after block 1 const removedBlocks = await store.removeBlocksAfter(BlockNumber(1)); @@ -3306,7 +3292,7 @@ describe('KVArchiverDataStore', () => { lastArchive: block1.archive, }); - await store.addProposedBlocks([block1, block2]); + await addProposedBlocks(store, [block1, block2]); // Remove blocks after block 2 (none to remove) const removedBlocks = await store.removeBlocksAfter(BlockNumber(2)); @@ -3334,7 +3320,7 @@ describe('KVArchiverDataStore', () => { txsPerBlock: 2, }); - await store.addProposedBlocks([block1, block2]); + await addProposedBlocks(store, [block1, block2]); // Verify block2 is retrievable by hash and archive before removal const block2Hash = await block2.header.hash(); @@ -3386,7 +3372,7 @@ describe('KVArchiverDataStore', () => { lastArchive: block1.archive, }); - await store.addProposedBlocks([block1, block2]); + await addProposedBlocks(store, [block1, block2]); const removedBlocks = await store.removeBlocksAfter(BlockNumber(0)); diff --git a/yarn-project/archiver/src/store/kv_archiver_store.ts b/yarn-project/archiver/src/store/kv_archiver_store.ts index d46075e2a588..25efd120f66f 100644 --- a/yarn-project/archiver/src/store/kv_archiver_store.ts +++ b/yarn-project/archiver/src/store/kv_archiver_store.ts @@ -246,14 +246,14 @@ export class KVArchiverDataStore implements ContractDataSource { } /** - * Append new proposed blocks to the store's list. - * These are uncheckpointed blocks that have been proposed by the sequencer but not yet included in a checkpoint on L1. + * Append a new proposed block to the store. + * This is an uncheckpointed block that has been proposed by the sequencer but not yet included in a checkpoint on L1. * For checkpointed blocks (already published to L1), use addCheckpoints() instead. - * @param blocks - The proposed L2 blocks to be added to the store. + * @param block - The proposed L2 block to be added to the store. * @returns True if the operation is successful. */ - addProposedBlocks(blocks: L2Block[], opts: { force?: boolean; checkpointNumber?: number } = {}): Promise { - return this.#blockStore.addProposedBlocks(blocks, opts); + addProposedBlock(block: L2Block, opts: { force?: boolean } = {}): Promise { + return this.#blockStore.addProposedBlock(block, opts); } /** diff --git a/yarn-project/aztec-node/src/aztec-node/server.test.ts b/yarn-project/aztec-node/src/aztec-node/server.test.ts index 96119e91cbdc..1a32af54415c 100644 --- a/yarn-project/aztec-node/src/aztec-node/server.test.ts +++ b/yarn-project/aztec-node/src/aztec-node/server.test.ts @@ -1,7 +1,7 @@ import { TestCircuitVerifier } from '@aztec/bb-prover'; import { EpochCache } from '@aztec/epoch-cache'; import type { RollupContract } from '@aztec/ethereum/contracts'; -import { BlockNumber } from '@aztec/foundation/branded-types'; +import { BlockNumber, CheckpointNumber, EpochNumber, SlotNumber } from '@aztec/foundation/branded-types'; import { Fr } from '@aztec/foundation/curves/bn254'; import { EthAddress } from '@aztec/foundation/eth-address'; import { BadRequestError } from '@aztec/foundation/json-rpc'; @@ -16,7 +16,8 @@ import { computeFeePayerBalanceLeafSlot } from '@aztec/protocol-contracts/fee-ju import type { GlobalVariableBuilder, SequencerClient } from '@aztec/sequencer-client'; import type { SlasherClientInterface } from '@aztec/slasher'; import { AztecAddress } from '@aztec/stdlib/aztec-address'; -import { BlockHash, L2Block, type L2BlockSource } from '@aztec/stdlib/block'; +import { BlockHash, type BlockParameter, CheckpointedL2Block, L2Block, type L2BlockSource } from '@aztec/stdlib/block'; +import { L1PublishedData } from '@aztec/stdlib/checkpoint'; import type { ContractDataSource } from '@aztec/stdlib/contract'; import { EmptyL1RollupConstants } from '@aztec/stdlib/epoch-helpers'; import { GasFees } from '@aztec/stdlib/gas'; @@ -35,6 +36,7 @@ import { TX_ERROR_INVALID_EXPIRATION_TIMESTAMP, TX_ERROR_SIZE_ABOVE_LIMIT, Tx, + TxEffect, } from '@aztec/stdlib/tx'; import { getPackageVersion } from '@aztec/stdlib/update-checker'; import type { ValidatorClient } from '@aztec/validator-client'; @@ -62,13 +64,20 @@ class MockDateProvider extends DateProvider { } } +class TestAztecNodeService extends AztecNodeService { + public override getWorldState(block: BlockParameter) { + return super.getWorldState(block); + } +} + describe('aztec node', () => { let p2p: MockProxy; let globalVariablesBuilder: MockProxy; let merkleTreeOps: MockProxy; + let worldState: MockProxy; let l2BlockSource: MockProxy; let lastBlockNumber: BlockNumber; - let node: AztecNodeService; + let node: TestAztecNodeService; let feePayer: AztecAddress; let epochCache: EpochCache; let nodeConfig: AztecNodeConfig; @@ -130,9 +139,10 @@ describe('aztec node', () => { } }); - const worldState = mock({ + worldState = mock({ getCommitted: () => merkleTreeOps, }); + worldState.syncImmediate.mockImplementation(() => Promise.resolve(lastBlockNumber)); l2BlockSource = mock(); l2BlockSource.getBlockNumber.mockImplementation(() => Promise.resolve(lastBlockNumber)); @@ -170,7 +180,7 @@ describe('aztec node', () => { new MockDateProvider(), ); - node = new AztecNodeService( + node = new TestAztecNodeService( nodeConfig, p2p, l2BlockSource, @@ -515,6 +525,89 @@ describe('aztec node', () => { ); }); }); + + describe('getWorldState', () => { + let snapshotMerkleTreeOps: MockProxy; + let initialHeader: BlockHeader; + + beforeEach(() => { + lastBlockNumber = BlockNumber(5); + initialHeader = BlockHeader.empty({ + globalVariables: GlobalVariables.empty({ blockNumber: BlockNumber.ZERO }), + }); + merkleTreeOps.getInitialHeader.mockReturnValue(initialHeader); + snapshotMerkleTreeOps = mock(); + worldState.getSnapshot.mockReturnValue(snapshotMerkleTreeOps); + }); + + it('returns committed db for latest', async () => { + const result = await node.getWorldState('latest'); + expect(result).toBe(merkleTreeOps); + expect(worldState.getSnapshot).not.toHaveBeenCalled(); + }); + + it('returns snapshot for a block number within sync range', async () => { + const result = await node.getWorldState(BlockNumber(3)); + expect(result).toBe(snapshotMerkleTreeOps); + expect(worldState.getSnapshot).toHaveBeenCalledWith(BlockNumber(3)); + }); + + it('throws for a block number beyond sync range', async () => { + await expect(node.getWorldState(BlockNumber(10))).rejects.toThrow(/not yet synced/); + }); + + it('throws for a block hash whose block number is beyond sync range', async () => { + const blockHash = BlockHash.random(); + const header = BlockHeader.empty({ + globalVariables: GlobalVariables.empty({ blockNumber: BlockNumber(10) }), + }); + l2BlockSource.getBlockHeaderByHash.mockResolvedValue(header); + + await expect(node.getWorldState(blockHash)).rejects.toThrow(/not yet synced/); + }); + + it('resolves block hash to block number via archiver and returns snapshot', async () => { + const blockHash = BlockHash.random(); + const header = BlockHeader.empty({ + globalVariables: GlobalVariables.empty({ blockNumber: BlockNumber(3) }), + }); + l2BlockSource.getBlockHeaderByHash.mockResolvedValue(header); + snapshotMerkleTreeOps.getLeafValue.mockResolvedValue(blockHash); + + const result = await node.getWorldState(blockHash); + expect(result).toBe(snapshotMerkleTreeOps); + expect(worldState.getSnapshot).toHaveBeenCalledWith(BlockNumber(3)); + }); + + it('throws when block hash is not found in archiver', async () => { + const blockHash = BlockHash.random(); + l2BlockSource.getBlockHeaderByHash.mockResolvedValue(undefined); + + await expect(node.getWorldState(blockHash)).rejects.toThrow(/not found when querying world state/); + }); + + it('throws when world-state block hash does not match requested hash (reorg)', async () => { + const blockHash = BlockHash.random(); + const differentHash = BlockHash.random(); + const header = BlockHeader.empty({ + globalVariables: GlobalVariables.empty({ blockNumber: BlockNumber(3) }), + }); + l2BlockSource.getBlockHeaderByHash.mockResolvedValue(header); + // World state returns a different hash for the same block number + snapshotMerkleTreeOps.getLeafValue.mockResolvedValue(differentHash); + + await expect(node.getWorldState(blockHash)).rejects.toThrow(/not found in world state at block number/); + }); + + it('returns snapshot at block 0 for initial header hash', async () => { + const initialHash = await initialHeader.hash(); + const initialBlockHash = new BlockHash(initialHash); + + const result = await node.getWorldState(initialBlockHash); + expect(worldState.getSnapshot).toHaveBeenCalledWith(BlockNumber.ZERO); + expect(result).toBe(snapshotMerkleTreeOps); + }); + }); }); describe('simulatePublicCalls', () => { @@ -804,4 +897,54 @@ describe('aztec node', () => { }); }); }); + + describe('getL2ToL1Messages', () => { + const makeCheckpointedBlock = (slotNumber: number, l2ToL1MsgsByTx: Fr[][]): CheckpointedL2Block => { + const block = L2Block.empty( + BlockHeader.empty({ + globalVariables: GlobalVariables.empty({ slotNumber: SlotNumber(slotNumber) }), + }), + ); + // Override the body's txEffects with our custom l2ToL1Msgs + unfreeze(block.body).txEffects = l2ToL1MsgsByTx.map(msgs => ({ l2ToL1Msgs: msgs }) as TxEffect); + return new CheckpointedL2Block(CheckpointNumber(0), block, new L1PublishedData(0n, 0n, '0x0'), []); + }; + + it('groups blocks by slot number into checkpoints', async () => { + const msg1 = Fr.random(); + const msg2 = Fr.random(); + const msg3 = Fr.random(); + + // Two blocks in slot 1, one block in slot 2 + const blocks = [ + makeCheckpointedBlock(1, [[msg1]]), + makeCheckpointedBlock(1, [[msg2]]), + makeCheckpointedBlock(2, [[msg3]]), + ]; + + l2BlockSource.getCheckpointedBlocksForEpoch.mockResolvedValue(blocks); + + const result = await node.getL2ToL1Messages(EpochNumber(0)); + + // First checkpoint (slot 1): 2 blocks, each with 1 tx with 1 message + // Second checkpoint (slot 2): 1 block with 1 tx with 1 message + expect(result).toEqual([[[[msg1]], [[msg2]]], [[[msg3]]]]); + }); + + it('correctly includes blocks in slot zero', async () => { + const msg1 = Fr.random(); + const msg2 = Fr.random(); + + // Block in slot 0, block in slot 1 + const blocks = [makeCheckpointedBlock(0, [[msg1]]), makeCheckpointedBlock(1, [[msg2]])]; + + l2BlockSource.getCheckpointedBlocksForEpoch.mockResolvedValue(blocks); + + const result = await node.getL2ToL1Messages(EpochNumber(0)); + + // First checkpoint (slot 0): 1 block with 1 tx with 1 message + // Second checkpoint (slot 1): 1 block with 1 tx with 1 message + expect(result).toEqual([[[[msg1]]], [[[msg2]]]]); + }); + }); }); diff --git a/yarn-project/aztec-node/src/aztec-node/server.ts b/yarn-project/aztec-node/src/aztec-node/server.ts index 242c8204f744..da94fd8fb1c5 100644 --- a/yarn-project/aztec-node/src/aztec-node/server.ts +++ b/yarn-project/aztec-node/src/aztec-node/server.ts @@ -9,7 +9,7 @@ import { getPublicClient } from '@aztec/ethereum/client'; import { RegistryContract, RollupContract } from '@aztec/ethereum/contracts'; import type { L1ContractAddresses } from '@aztec/ethereum/l1-contract-addresses'; import { BlockNumber, CheckpointNumber, EpochNumber, SlotNumber } from '@aztec/foundation/branded-types'; -import { compactArray, pick, unique } from '@aztec/foundation/collection'; +import { chunkBy, compactArray, pick, unique } from '@aztec/foundation/collection'; import { Fr } from '@aztec/foundation/curves/bn254'; import { EthAddress } from '@aztec/foundation/eth-address'; import { BadRequestError } from '@aztec/foundation/json-rpc'; @@ -966,7 +966,7 @@ export class AztecNodeService implements AztecNode, AztecNodeAdmin, Traceable { treeId: MerkleTreeId, leafValues: Fr[], ): Promise<(DataInBlock | undefined)[]> { - const committedDb = await this.#getWorldState(referenceBlock); + const committedDb = await this.getWorldState(referenceBlock); const maybeIndices = await committedDb.findLeafIndices( treeId, leafValues.map(x => x.toBuffer()), @@ -1034,7 +1034,7 @@ export class AztecNodeService implements AztecNode, AztecNodeAdmin, Traceable { referenceBlock: BlockParameter, blockHash: BlockHash, ): Promise | undefined> { - const committedDb = await this.#getWorldState(referenceBlock); + const committedDb = await this.getWorldState(referenceBlock); const [pathAndIndex] = await committedDb.findSiblingPaths(MerkleTreeId.ARCHIVE, [blockHash]); return pathAndIndex === undefined ? undefined @@ -1045,7 +1045,7 @@ export class AztecNodeService implements AztecNode, AztecNodeAdmin, Traceable { referenceBlock: BlockParameter, noteHash: Fr, ): Promise | undefined> { - const committedDb = await this.#getWorldState(referenceBlock); + const committedDb = await this.getWorldState(referenceBlock); const [pathAndIndex] = await committedDb.findSiblingPaths( MerkleTreeId.NOTE_HASH_TREE, [noteHash], @@ -1059,7 +1059,7 @@ export class AztecNodeService implements AztecNode, AztecNodeAdmin, Traceable { referenceBlock: BlockParameter, l1ToL2Message: Fr, ): Promise<[bigint, SiblingPath] | undefined> { - const db = await this.#getWorldState(referenceBlock); + const db = await this.getWorldState(referenceBlock); const [witness] = await db.findSiblingPaths(MerkleTreeId.L1_TO_L2_MESSAGE_TREE, [l1ToL2Message]); if (!witness) { return undefined; @@ -1092,19 +1092,9 @@ export class AztecNodeService implements AztecNode, AztecNodeAdmin, Traceable { public async getL2ToL1Messages(epoch: EpochNumber): Promise { // Assumes `getCheckpointedBlocksForEpoch` returns blocks in ascending order of block number. const checkpointedBlocks = await this.blockSource.getCheckpointedBlocksForEpoch(epoch); - const blocksInCheckpoints: L2Block[][] = []; - let previousSlotNumber = SlotNumber.ZERO; - let checkpointIndex = -1; - for (const checkpointedBlock of checkpointedBlocks) { - const block = checkpointedBlock.block; - const slotNumber = block.header.globalVariables.slotNumber; - if (slotNumber !== previousSlotNumber) { - checkpointIndex++; - blocksInCheckpoints.push([]); - previousSlotNumber = slotNumber; - } - blocksInCheckpoints[checkpointIndex].push(block); - } + const blocksInCheckpoints = chunkBy(checkpointedBlocks, cb => cb.block.header.globalVariables.slotNumber).map( + group => group.map(cb => cb.block), + ); return blocksInCheckpoints.map(blocks => blocks.map(block => block.body.txEffects.map(txEffect => txEffect.l2ToL1Msgs)), ); @@ -1114,7 +1104,7 @@ export class AztecNodeService implements AztecNode, AztecNodeAdmin, Traceable { referenceBlock: BlockParameter, nullifier: Fr, ): Promise { - const db = await this.#getWorldState(referenceBlock); + const db = await this.getWorldState(referenceBlock); const [witness] = await db.findSiblingPaths(MerkleTreeId.NULLIFIER_TREE, [nullifier.toBuffer()]); if (!witness) { return undefined; @@ -1148,7 +1138,7 @@ export class AztecNodeService implements AztecNode, AztecNodeAdmin, Traceable { referenceBlock: BlockParameter, nullifier: Fr, ): Promise { - const committedDb = await this.#getWorldState(referenceBlock); + const committedDb = await this.getWorldState(referenceBlock); const findResult = await committedDb.getPreviousValueIndex(MerkleTreeId.NULLIFIER_TREE, nullifier.toBigInt()); if (!findResult) { return undefined; @@ -1164,7 +1154,7 @@ export class AztecNodeService implements AztecNode, AztecNodeAdmin, Traceable { } async getPublicDataWitness(referenceBlock: BlockParameter, leafSlot: Fr): Promise { - const committedDb = await this.#getWorldState(referenceBlock); + const committedDb = await this.getWorldState(referenceBlock); const lowLeafResult = await committedDb.getPreviousValueIndex(MerkleTreeId.PUBLIC_DATA_TREE, leafSlot.toBigInt()); if (!lowLeafResult) { return undefined; @@ -1179,7 +1169,7 @@ export class AztecNodeService implements AztecNode, AztecNodeAdmin, Traceable { } public async getPublicStorageAt(referenceBlock: BlockParameter, contract: AztecAddress, slot: Fr): Promise { - const committedDb = await this.#getWorldState(referenceBlock); + const committedDb = await this.getWorldState(referenceBlock); const leafSlot = await computePublicDataTreeLeafSlot(contract, slot); const lowLeafResult = await committedDb.getPreviousValueIndex(MerkleTreeId.PUBLIC_DATA_TREE, leafSlot.toBigInt()); @@ -1610,7 +1600,7 @@ export class AztecNodeService implements AztecNode, AztecNodeAdmin, Traceable { * @param block - The block parameter (block number, block hash, or 'latest') at which to get the data. * @returns An instance of a committed MerkleTreeOperations */ - async #getWorldState(block: BlockParameter) { + protected async getWorldState(block: BlockParameter) { let blockSyncedTo: BlockNumber = BlockNumber.ZERO; try { // Attempt to sync the world state if necessary @@ -1624,6 +1614,8 @@ export class AztecNodeService implements AztecNode, AztecNodeAdmin, Traceable { return this.worldStateSynchronizer.getCommitted(); } + // Get the block number, either directly from the parameter or by quering the archiver with the block hash + let blockNumber: BlockNumber; if (BlockHash.isBlockHash(block)) { const initialBlockHash = await this.#getInitialHeaderHash(); if (block.equals(initialBlockHash)) { @@ -1637,22 +1629,31 @@ export class AztecNodeService implements AztecNode, AztecNodeAdmin, Traceable { `Block hash ${block.toString()} not found when querying world state. If the node API has been queried with anchor block hash possibly a reorg has occurred.`, ); } - const blockNumber = header.getBlockNumber(); - this.log.debug(`Using snapshot for block ${blockNumber}, world state synced upto ${blockSyncedTo}`); - return this.worldStateSynchronizer.getSnapshot(blockNumber); + + blockNumber = header.getBlockNumber(); + } else { + blockNumber = block as BlockNumber; + } + + // Check it's within world state sync range + if (blockNumber > blockSyncedTo) { + throw new Error(`Queried block ${block} not yet synced by the node (node is synced upto ${blockSyncedTo}).`); } + this.log.debug(`Using snapshot for block ${blockNumber}, world state synced upto ${blockSyncedTo}`); - // Block number provided - { - const blockNumber = block as BlockNumber; + const snapshot = this.worldStateSynchronizer.getSnapshot(blockNumber); - if (blockNumber > blockSyncedTo) { - throw new Error(`Queried block ${block} not yet synced by the node (node is synced upto ${blockSyncedTo}).`); + // Double-check world-state synced to the same block hash as was requested + if (BlockHash.isBlockHash(block)) { + const blockHash = await snapshot.getLeafValue(MerkleTreeId.ARCHIVE, BigInt(blockNumber)); + if (!blockHash || !new BlockHash(blockHash).equals(block)) { + throw new Error( + `Block hash ${block.toString()} not found in world state at block number ${blockNumber}. If the node API has been queried with anchor block hash possibly a reorg has occurred.`, + ); } - - this.log.debug(`Using snapshot for block ${blockNumber}, world state synced upto ${blockSyncedTo}`); - return this.worldStateSynchronizer.getSnapshot(blockNumber); } + + return snapshot; } /** diff --git a/yarn-project/foundation/src/config/env_var.ts b/yarn-project/foundation/src/config/env_var.ts index a547219e839c..44fc28ecc8ef 100644 --- a/yarn-project/foundation/src/config/env_var.ts +++ b/yarn-project/foundation/src/config/env_var.ts @@ -209,6 +209,7 @@ export type EnvVar = | 'SEQ_MAX_DA_BLOCK_GAS' | 'SEQ_MAX_L2_BLOCK_GAS' | 'SEQ_PER_BLOCK_ALLOCATION_MULTIPLIER' + | 'SEQ_REDISTRIBUTE_CHECKPOINT_BUDGET' | 'SEQ_PUBLISHER_PRIVATE_KEY' | 'SEQ_PUBLISHER_PRIVATE_KEYS' | 'SEQ_PUBLISHER_ADDRESSES' diff --git a/yarn-project/sequencer-client/src/client/sequencer-client.test.ts b/yarn-project/sequencer-client/src/client/sequencer-client.test.ts index e325cefca47d..f2eb896e4746 100644 --- a/yarn-project/sequencer-client/src/client/sequencer-client.test.ts +++ b/yarn-project/sequencer-client/src/client/sequencer-client.test.ts @@ -22,8 +22,8 @@ describe('computeBlockLimits', () => { describe('L2 gas', () => { it('derives maxL2BlockGas from rollupManaLimit when not explicitly set', () => { const rollupManaLimit = 1_000_000; - // Single block mode (maxNumberOfBlocks=1), default multiplier=2: - // min(1_000_000, ceil(1_000_000 / 1 * 2)) = min(1_000_000, 2_000_000) = 1_000_000 + // Single block mode (maxNumberOfBlocks=1), default multiplier=1.2: + // min(1_000_000, ceil(1_000_000 / 1 * 1.2)) = min(1_000_000, 1_200_000) = 1_000_000 const result = computeBlockLimits(makeConfig(), rollupManaLimit, 12, log); expect(result.maxL2BlockGas).toBe(rollupManaLimit); }); @@ -43,8 +43,8 @@ describe('computeBlockLimits', () => { const daLimit = MAX_PROCESSABLE_DA_GAS_PER_CHECKPOINT; it('derives maxDABlockGas from DA checkpoint limit when not explicitly set', () => { - // Single block mode (maxNumberOfBlocks=1), default multiplier=2: - // min(daLimit, ceil(daLimit / 1 * 2)) = min(daLimit, daLimit * 2) = daLimit + // Single block mode (maxNumberOfBlocks=1), default multiplier=1.2: + // min(daLimit, ceil(daLimit / 1 * 1.2)) = min(daLimit, daLimit * 1.2) = daLimit const result = computeBlockLimits(makeConfig(), 1_000_000, 12, log); expect(result.maxDABlockGas).toBe(daLimit); }); @@ -78,14 +78,14 @@ describe('computeBlockLimits', () => { }); it('derives maxTxsPerBlock from maxTxsPerCheckpoint when per-block not set', () => { - // Multi-block mode with maxNumberOfBlocks=5, multiplier=2: - // min(100, ceil(100 / 5 * 2)) = min(100, 40) = 40 + // Multi-block mode with maxNumberOfBlocks=5, multiplier=1.2: + // min(100, ceil(100 / 5 * 1.2)) = min(100, 24) = 24 const config = makeConfig({ maxTxsPerCheckpoint: 100, blockDurationMs: 8000, }); const result = computeBlockLimits(config, 1_000_000, 12, log); - expect(result.maxTxsPerBlock).toBe(40); + expect(result.maxTxsPerBlock).toBe(24); }); }); @@ -97,14 +97,20 @@ describe('computeBlockLimits', () => { // timeReservedAtEnd = 8 + 19 = 27 // timeAvailableForBlocks = 72 - 1 - 27 = 44 // maxNumberOfBlocks = floor(44 / 8) = 5 - // With multiplier=2 and rollupManaLimit=1_000_000: - // maxL2BlockGas = min(1_000_000, ceil(1_000_000 / 5 * 2)) = min(1_000_000, 400_000) = 400_000 + // With multiplier=1.2 and rollupManaLimit=1_000_000: + // maxL2BlockGas = min(1_000_000, ceil(1_000_000 / 5 * 1.2)) = min(1_000_000, 240_000) = 240_000 const config = makeConfig({ blockDurationMs: 8000 }); const result = computeBlockLimits(config, 1_000_000, 12, log); - expect(result.maxL2BlockGas).toBe(400_000); + expect(result.maxL2BlockGas).toBe(240_000); const daLimit = MAX_PROCESSABLE_DA_GAS_PER_CHECKPOINT; - expect(result.maxDABlockGas).toBe(Math.min(daLimit, Math.ceil((daLimit / 5) * 2))); + expect(result.maxDABlockGas).toBe(Math.min(daLimit, Math.ceil((daLimit / 5) * 1.2))); + }); + + it('returns maxBlocksPerCheckpoint from timetable', () => { + const config = makeConfig({ blockDurationMs: 8000 }); + const result = computeBlockLimits(config, 1_000_000, 12, log); + expect(result.maxBlocksPerCheckpoint).toBe(5); }); }); }); diff --git a/yarn-project/sequencer-client/src/client/sequencer-client.ts b/yarn-project/sequencer-client/src/client/sequencer-client.ts index 22e1be967576..0efeafb01f10 100644 --- a/yarn-project/sequencer-client/src/client/sequencer-client.ts +++ b/yarn-project/sequencer-client/src/client/sequencer-client.ts @@ -160,7 +160,7 @@ export class SequencerClient { const l1PublishingTimeBasedOnChain = isAnvilTestChain(config.l1ChainId) ? 1 : ethereumSlotDuration; const l1PublishingTime = config.l1PublishingTime ?? l1PublishingTimeBasedOnChain; - const { maxL2BlockGas, maxDABlockGas, maxTxsPerBlock } = computeBlockLimits( + const { maxL2BlockGas, maxDABlockGas, maxTxsPerBlock, maxBlocksPerCheckpoint } = computeBlockLimits( config, rollupManaLimit, l1PublishingTime, @@ -183,7 +183,7 @@ export class SequencerClient { deps.dateProvider, epochCache, rollupContract, - { ...config, l1PublishingTime, maxL2BlockGas, maxDABlockGas, maxTxsPerBlock }, + { ...config, l1PublishingTime, maxL2BlockGas, maxDABlockGas, maxTxsPerBlock, maxBlocksPerCheckpoint }, telemetryClient, log, ); @@ -257,7 +257,7 @@ export function computeBlockLimits( rollupManaLimit: number, l1PublishingTime: number, log: ReturnType, -): { maxL2BlockGas: number; maxDABlockGas: number; maxTxsPerBlock: number } { +): { maxL2BlockGas: number; maxDABlockGas: number; maxTxsPerBlock: number; maxBlocksPerCheckpoint: number } { const maxNumberOfBlocks = new SequencerTimetable({ ethereumSlotDuration: config.ethereumSlotDuration, aztecSlotDuration: config.aztecSlotDuration, @@ -331,5 +331,5 @@ export function computeBlockLimits( multiplier, }); - return { maxL2BlockGas, maxDABlockGas, maxTxsPerBlock }; + return { maxL2BlockGas, maxDABlockGas, maxTxsPerBlock, maxBlocksPerCheckpoint: maxNumberOfBlocks }; } diff --git a/yarn-project/sequencer-client/src/config.ts b/yarn-project/sequencer-client/src/config.ts index 117839911491..e0ce28583791 100644 --- a/yarn-project/sequencer-client/src/config.ts +++ b/yarn-project/sequencer-client/src/config.ts @@ -40,7 +40,8 @@ export const DefaultSequencerConfig = { minTxsPerBlock: 1, buildCheckpointIfEmpty: false, publishTxsWithProposals: false, - perBlockAllocationMultiplier: 2, + perBlockAllocationMultiplier: 1.2, + redistributeCheckpointBudget: true, enforceTimeTable: true, attestationPropagationTime: DEFAULT_P2P_PROPAGATION_TIME, secondsBeforeInvalidatingBlockAsCommitteeMember: 144, // 12 L1 blocks @@ -112,6 +113,15 @@ export const sequencerConfigMappings: ConfigMappingsType = { ' Values greater than one allow early blocks to use more than their even share, relying on checkpoint-level capping for later blocks.', ...numberConfigHelper(DefaultSequencerConfig.perBlockAllocationMultiplier), }, + redistributeCheckpointBudget: { + env: 'SEQ_REDISTRIBUTE_CHECKPOINT_BUDGET', + description: + 'Redistribute remaining checkpoint budget evenly across remaining blocks instead of allowing a single block to consume the entire remaining budget.', + ...booleanConfigHelper(DefaultSequencerConfig.redistributeCheckpointBudget), + }, + maxBlocksPerCheckpoint: { + description: 'Computed max number of blocks per checkpoint from timetable.', + }, coinbase: { env: 'COINBASE', parseEnv: (val: string) => (val ? EthAddress.fromString(val) : undefined), diff --git a/yarn-project/stdlib/src/interfaces/block-builder.ts b/yarn-project/stdlib/src/interfaces/block-builder.ts index 7b79c7134760..8cd272141823 100644 --- a/yarn-project/stdlib/src/interfaces/block-builder.ts +++ b/yarn-project/stdlib/src/interfaces/block-builder.ts @@ -64,6 +64,9 @@ export type FullNodeBlockBuilderConfig = Pick; export const FullNodeBlockBuilderConfigKeys: (keyof FullNodeBlockBuilderConfig)[] = [ @@ -79,6 +82,9 @@ export const FullNodeBlockBuilderConfigKeys: (keyof FullNodeBlockBuilderConfig)[ 'maxL2BlockGas', 'maxDABlockGas', 'rollupManaLimit', + 'redistributeCheckpointBudget', + 'perBlockAllocationMultiplier', + 'maxBlocksPerCheckpoint', ] as const; /** Thrown when no valid transactions are available to include in a block after processing, and this is not the first block in a checkpoint. */ diff --git a/yarn-project/stdlib/src/interfaces/configs.ts b/yarn-project/stdlib/src/interfaces/configs.ts index a6009002575f..b986445a4c6a 100644 --- a/yarn-project/stdlib/src/interfaces/configs.ts +++ b/yarn-project/stdlib/src/interfaces/configs.ts @@ -27,6 +27,10 @@ export interface SequencerConfig { maxDABlockGas?: number; /** Per-block gas budget multiplier for both L2 and DA gas. Budget = (checkpointLimit / maxBlocks) * multiplier. */ perBlockAllocationMultiplier?: number; + /** Redistribute remaining checkpoint budget evenly across remaining blocks instead of allowing a single block to consume the entire remaining budget. */ + redistributeCheckpointBudget?: boolean; + /** Computed max number of blocks per checkpoint from timetable. */ + maxBlocksPerCheckpoint?: number; /** Recipient of block reward. */ coinbase?: EthAddress; /** Address to receive fees. */ @@ -94,6 +98,8 @@ export const SequencerConfigSchema = zodFor()( publishTxsWithProposals: z.boolean().optional(), maxDABlockGas: z.number().optional(), perBlockAllocationMultiplier: z.number().optional(), + redistributeCheckpointBudget: z.boolean().optional(), + maxBlocksPerCheckpoint: z.number().optional(), coinbase: schemas.EthAddress.optional(), feeRecipient: schemas.AztecAddress.optional(), acvmWorkingDirectory: z.string().optional(), @@ -142,7 +148,9 @@ type SequencerConfigOptionalKeys = | 'maxTxsPerCheckpoint' | 'maxL2BlockGas' | 'maxDABlockGas' - | 'perBlockAllocationMultiplier'; + | 'perBlockAllocationMultiplier' + | 'redistributeCheckpointBudget' + | 'maxBlocksPerCheckpoint'; export type ResolvedSequencerConfig = Prettify< Required> & Pick diff --git a/yarn-project/validator-client/README.md b/yarn-project/validator-client/README.md index 4c475117d3d1..0974b95f94b1 100644 --- a/yarn-project/validator-client/README.md +++ b/yarn-project/validator-client/README.md @@ -239,11 +239,11 @@ L1 enforces gas and blob capacity per checkpoint. The node enforces these during Per-block budgets prevent one block from consuming the entire checkpoint budget. -**Proposer**: `computeBlockLimits()` derives budgets at startup as `min(checkpointLimit, ceil(checkpointLimit / maxBlocks * multiplier))`, where `maxBlocks` comes from the timetable and `multiplier` defaults to 2. The multiplier greater than 1 allows early blocks to use more than their even share of the checkpoint budget, since different blocks hit different limit dimensions (L2 gas, DA gas, blob fields) — a strict even split would waste capacity. Operators can override via `SEQ_MAX_L2_BLOCK_GAS` / `SEQ_MAX_DA_BLOCK_GAS` / `SEQ_MAX_TX_PER_BLOCK` (capped at checkpoint limits). Per-block TX limits follow the same derivation pattern when `SEQ_MAX_TX_PER_CHECKPOINT` is set. +**Proposer**: `computeBlockLimits()` derives budgets at startup as `min(checkpointLimit, ceil(checkpointLimit / maxBlocks * multiplier))`, where `maxBlocks` comes from the timetable and `multiplier` defaults to 1.2. The multiplier greater than 1 allows early blocks to use more than their even share of the checkpoint budget, since different blocks hit different limit dimensions (L2 gas, DA gas, blob fields) — a strict even split would waste capacity. Operators can override via `SEQ_MAX_L2_BLOCK_GAS` / `SEQ_MAX_DA_BLOCK_GAS` / `SEQ_MAX_TX_PER_BLOCK` (capped at checkpoint limits). Per-block TX limits follow the same derivation pattern when `SEQ_MAX_TX_PER_CHECKPOINT` is set. **Validator**: Optionally enforces per-block limits via `VALIDATOR_MAX_L2_BLOCK_GAS`, `VALIDATOR_MAX_DA_BLOCK_GAS`, and `VALIDATOR_MAX_TX_PER_BLOCK`. When set, these are passed to `buildBlock` during re-execution and to `validateCheckpoint` for final validation. When unset, no per-block limit is enforced for that dimension (checkpoint-level protocol limits still apply). These are independent of the `SEQ_` vars so operators can tune proposer and validation limits separately. -**Checkpoint-level capping**: `CheckpointBuilder.capLimitsByCheckpointBudgets()` always runs before tx processing, capping per-block limits by `checkpointBudget - sum(used by prior blocks)` for all three gas dimensions and for transaction count (when `SEQ_MAX_TX_PER_CHECKPOINT` is set). This applies to both proposer and validator paths. +**Checkpoint-level capping**: `CheckpointBuilder.capLimitsByCheckpointBudgets()` always runs before tx processing, capping per-block limits by the remaining checkpoint budget. When `SEQ_REDISTRIBUTE_CHECKPOINT_BUDGET` is enabled (default: true), the remaining budget is distributed evenly across remaining blocks with the multiplier applied: `min(perBlockLimit, ceil(remainingBudget / remainingBlocks * multiplier), remainingBudget)`. This prevents early blocks from consuming the entire checkpoint budget, producing smoother distribution. When disabled, each block can consume up to the full remaining budget, ie caps by `checkpointBudget - sum(used by prior blocks)`. This applies to all four dimensions (L2 gas, DA gas, blob fields, transaction count). Validators always cap by the total remaining. ### Per-transaction enforcement @@ -259,7 +259,8 @@ Per-block budgets prevent one block from consuming the entire checkpoint budget. | `SEQ_MAX_DA_BLOCK_GAS` | *auto* | Per-block DA gas. Auto-derived from checkpoint DA limit / maxBlocks * multiplier. | | `SEQ_MAX_TX_PER_BLOCK` | *none* | Per-block tx count. If `SEQ_MAX_TX_PER_CHECKPOINT` is set and per-block is not, derived as `ceil(checkpointLimit / maxBlocks * multiplier)`. | | `SEQ_MAX_TX_PER_CHECKPOINT` | *none* | Total txs across all blocks in a checkpoint. When set, per-block tx limit is derived from it (unless explicitly overridden) and checkpoint-level capping is enforced. | -| `SEQ_PER_BLOCK_ALLOCATION_MULTIPLIER` | 2 | Multiplier for per-block budget computation. | +| `SEQ_PER_BLOCK_ALLOCATION_MULTIPLIER` | 1.2 | Multiplier for per-block budget computation. | +| `SEQ_REDISTRIBUTE_CHECKPOINT_BUDGET` | true | Redistribute remaining checkpoint budget evenly across remaining blocks instead of allowing one block to consume it all. | | `VALIDATOR_MAX_L2_BLOCK_GAS` | *none* | Per-block L2 gas limit for validation. Proposals exceeding this are rejected. | | `VALIDATOR_MAX_DA_BLOCK_GAS` | *none* | Per-block DA gas limit for validation. Proposals exceeding this are rejected. | | `VALIDATOR_MAX_TX_PER_BLOCK` | *none* | Per-block tx count limit for validation. Proposals exceeding this are rejected. | diff --git a/yarn-project/validator-client/src/checkpoint_builder.test.ts b/yarn-project/validator-client/src/checkpoint_builder.test.ts index 0d9cf8ae6959..3ba838a6c205 100644 --- a/yarn-project/validator-client/src/checkpoint_builder.test.ts +++ b/yarn-project/validator-client/src/checkpoint_builder.test.ts @@ -85,6 +85,7 @@ describe('CheckpointBuilder', () => { l1ChainId: 1, rollupVersion: 1, rollupManaLimit: 200_000_000, + redistributeCheckpointBudget: false, ...overrideConfig, }; @@ -416,4 +417,127 @@ describe('CheckpointBuilder', () => { expect(capped.maxTransactions).toBeUndefined(); }); }); + + describe('redistributeCheckpointBudget', () => { + it('evenly splits budget with multiplier=1', () => { + const rollupManaLimit = 1_000_000; + setupBuilder({ + redistributeCheckpointBudget: true, + perBlockAllocationMultiplier: 1, + maxBlocksPerCheckpoint: 5, + rollupManaLimit, + }); + + lightweightCheckpointBuilder.getBlocks.mockReturnValue([]); + + const opts: PublicProcessorLimits = {}; + const capped = (checkpointBuilder as TestCheckpointBuilder).testCapLimits(opts); + + // Fair share = ceil(1_000_000 / 5 * 1) = 200_000 + expect(capped.maxBlockGas!.l2Gas).toBe(200_000); + }); + + it('computes fair share with multiplier=1.2, 5 max blocks, 2 existing', () => { + const rollupManaLimit = 1_000_000; + setupBuilder({ + redistributeCheckpointBudget: true, + perBlockAllocationMultiplier: 1.2, + maxBlocksPerCheckpoint: 5, + rollupManaLimit, + }); + + // 2 existing blocks used 400_000 mana total + lightweightCheckpointBuilder.getBlocks.mockReturnValue([ + createMockBlock({ manaUsed: 200_000, txBlobFields: [10], blockBlobFieldCount: 20 }), + createMockBlock({ manaUsed: 200_000, txBlobFields: [10], blockBlobFieldCount: 20 }), + ]); + + const opts: PublicProcessorLimits = {}; + const capped = (checkpointBuilder as TestCheckpointBuilder).testCapLimits(opts); + + // remainingMana = 600_000, remainingBlocks = 3, multiplier = 1.2 + // fairShare = ceil(600_000 / 3 * 1.2) = ceil(240_000) = 240_000 + expect(capped.maxBlockGas!.l2Gas).toBe(240_000); + }); + + it('gives all remaining budget to last block (remainingBlocks=1)', () => { + const rollupManaLimit = 1_000_000; + setupBuilder({ + redistributeCheckpointBudget: true, + perBlockAllocationMultiplier: 1.2, + maxBlocksPerCheckpoint: 3, + rollupManaLimit, + }); + + // 2 existing blocks used 800_000 total + lightweightCheckpointBuilder.getBlocks.mockReturnValue([ + createMockBlock({ manaUsed: 400_000, txBlobFields: [10], blockBlobFieldCount: 20 }), + createMockBlock({ manaUsed: 400_000, txBlobFields: [10], blockBlobFieldCount: 20 }), + ]); + + const opts: PublicProcessorLimits = {}; + const capped = (checkpointBuilder as TestCheckpointBuilder).testCapLimits(opts); + + // remainingMana = 200_000, remainingBlocks = 1, multiplier = 1.2 + // fairShare = ceil(200_000 / 1 * 1.2) = 240_000. min(200_000, 240_000, 200_000) = 200_000 + expect(capped.maxBlockGas!.l2Gas).toBe(200_000); + }); + + it('uses old behavior when redistributeCheckpointBudget is false', () => { + const rollupManaLimit = 1_000_000; + setupBuilder({ + redistributeCheckpointBudget: false, + maxBlocksPerCheckpoint: 5, + rollupManaLimit, + }); + + lightweightCheckpointBuilder.getBlocks.mockReturnValue([ + createMockBlock({ manaUsed: 200_000, txBlobFields: [10], blockBlobFieldCount: 20 }), + ]); + + const opts: PublicProcessorLimits = {}; + const capped = (checkpointBuilder as TestCheckpointBuilder).testCapLimits(opts); + + // Old behavior: no fair share, just remaining budget = 800_000 + expect(capped.maxBlockGas!.l2Gas).toBe(800_000); + }); + + it('redistributes DA gas across remaining blocks', () => { + setupBuilder({ + redistributeCheckpointBudget: true, + perBlockAllocationMultiplier: 1, + maxBlocksPerCheckpoint: 4, + }); + + lightweightCheckpointBuilder.getBlocks.mockReturnValue([]); + + const opts: PublicProcessorLimits = {}; + const capped = (checkpointBuilder as TestCheckpointBuilder).testCapLimits(opts); + + // fairShareDA = ceil(MAX_PROCESSABLE_DA_GAS_PER_CHECKPOINT / 4 * 1) + const expectedDA = Math.ceil(MAX_PROCESSABLE_DA_GAS_PER_CHECKPOINT / 4); + expect(capped.maxBlockGas!.daGas).toBe(expectedDA); + }); + + it('redistributes tx count across remaining blocks', () => { + setupBuilder({ + redistributeCheckpointBudget: true, + perBlockAllocationMultiplier: 1, + maxBlocksPerCheckpoint: 4, + maxTxsPerCheckpoint: 100, + }); + + // 1 existing block with 10 txs + lightweightCheckpointBuilder.getBlocks.mockReturnValue([ + createMockBlock({ manaUsed: 0, txBlobFields: new Array(10).fill(1), blockBlobFieldCount: 20 }), + ]); + + const opts: PublicProcessorLimits = {}; + const capped = (checkpointBuilder as TestCheckpointBuilder).testCapLimits(opts); + + // remainingTxs = 90, remainingBlocks = 3, multiplier = 1 + // fairShareTxs = ceil(90 / 3 * 1) = 30 + expect(capped.maxTransactions).toBe(30); + }); + }); }); diff --git a/yarn-project/validator-client/src/checkpoint_builder.ts b/yarn-project/validator-client/src/checkpoint_builder.ts index a80b3d2697b1..e6c00d525868 100644 --- a/yarn-project/validator-client/src/checkpoint_builder.ts +++ b/yarn-project/validator-client/src/checkpoint_builder.ts @@ -178,23 +178,31 @@ export class CheckpointBuilder implements ICheckpointBlockBuilder { const blockEndOverhead = getNumBlockEndBlobFields(isFirstBlock); const maxBlobFieldsForTxs = totalBlobCapacity - usedBlobFields - blockEndOverhead; - // Cap L2 gas by remaining checkpoint mana - const cappedL2Gas = Math.min(opts.maxBlockGas?.l2Gas ?? remainingMana, remainingMana); + // When redistributeCheckpointBudget is enabled (default), compute a fair share of remaining budget + // across remaining blocks scaled by the multiplier, instead of letting one block consume it all. + const redistribute = this.config.redistributeCheckpointBudget !== false; + const remainingBlocks = Math.max(1, (this.config.maxBlocksPerCheckpoint ?? 1) - existingBlocks.length); + const multiplier = this.config.perBlockAllocationMultiplier ?? 1.2; - // Cap DA gas by remaining checkpoint DA gas budget - const cappedDAGas = Math.min(opts.maxBlockGas?.daGas ?? remainingDAGas, remainingDAGas); + // Cap L2 gas by remaining checkpoint mana (with fair share when redistributing) + const fairShareL2 = redistribute ? Math.ceil((remainingMana / remainingBlocks) * multiplier) : Infinity; + const cappedL2Gas = Math.min(opts.maxBlockGas?.l2Gas ?? Infinity, fairShareL2, remainingMana); - // Cap blob fields by remaining checkpoint blob capacity - const cappedBlobFields = - opts.maxBlobFields !== undefined ? Math.min(opts.maxBlobFields, maxBlobFieldsForTxs) : maxBlobFieldsForTxs; + // Cap DA gas by remaining checkpoint DA gas budget (with fair share when redistributing) + const fairShareDA = redistribute ? Math.ceil((remainingDAGas / remainingBlocks) * multiplier) : Infinity; + const cappedDAGas = Math.min(opts.maxBlockGas?.daGas ?? remainingDAGas, fairShareDA, remainingDAGas); - // Cap transaction count by remaining checkpoint tx budget + // Cap blob fields by remaining checkpoint blob capacity (with fair share when redistributing) + const fairShareBlobs = redistribute ? Math.ceil((maxBlobFieldsForTxs / remainingBlocks) * multiplier) : Infinity; + const cappedBlobFields = Math.min(opts.maxBlobFields ?? Infinity, fairShareBlobs, maxBlobFieldsForTxs); + + // Cap transaction count by remaining checkpoint tx budget (with fair share when redistributing) let cappedMaxTransactions: number | undefined; if (this.config.maxTxsPerCheckpoint !== undefined) { const usedTxs = sum(existingBlocks.map(b => b.body.txEffects.length)); const remainingTxs = Math.max(0, this.config.maxTxsPerCheckpoint - usedTxs); - cappedMaxTransactions = - opts.maxTransactions !== undefined ? Math.min(opts.maxTransactions, remainingTxs) : remainingTxs; + const fairShareTxs = redistribute ? Math.ceil((remainingTxs / remainingBlocks) * multiplier) : Infinity; + cappedMaxTransactions = Math.min(opts.maxTransactions ?? Infinity, fairShareTxs, remainingTxs); } else { cappedMaxTransactions = opts.maxTransactions; }