diff --git a/noir-projects/noir-contracts/contracts/spam_contract/src/main.nr b/noir-projects/noir-contracts/contracts/spam_contract/src/main.nr index e6206501feee..6e3f3e9601fd 100644 --- a/noir-projects/noir-contracts/contracts/spam_contract/src/main.nr +++ b/noir-projects/noir-contracts/contracts/spam_contract/src/main.nr @@ -6,6 +6,8 @@ use dep::aztec::macros::aztec; #[aztec] contract Spam { + global MAX_EXTERNAL_CALL_SPAM = 4; + use dep::aztec::{ prelude::{Map, AztecAddress, PublicMutable}, encrypted_logs::{encrypted_note_emission::encode_and_encrypt_note_unconstrained}, @@ -29,7 +31,12 @@ contract Spam { } #[private] - fn spam(nullifier_seed: Field, nullifier_count: u32, call_public: bool) { + fn spam( + nullifier_seed: Field, + nullifier_count: u32, + call_public: bool, + call_recursive_spam: bool + ) { let caller = context.msg_sender(); let caller_keys = get_public_keys(caller); let amount = U128::from_integer(1); @@ -58,6 +65,10 @@ contract Spam { MAX_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX ).enqueue(&mut context); } + + if (call_recursive_spam) { + Spam::at(context.this_address()).public_spam_with_external_call(0, MAX_PUBLIC_DATA_UPDATE_REQUESTS_PER_CALL).enqueue(&mut context); + } } #[public] @@ -69,4 +80,28 @@ contract Spam { storage.public_balances.at(i as Field).write(prev + one); } } + + // TODO(md): add something to the public spam such that we can test further enqueued calls + #[public] + #[internal] + fn public_spam_with_external_call(start: u32, end: u32) { + let one = U128::from_integer(1); + for i in start..end { + let prev = storage.public_balances.at(i as Field).read(); + storage.public_balances.at(i as Field).write(prev + one); + } + + Spam::at(context.this_address()).recursive_spam(0).call(&mut context); + } + + #[public] + fn recursive_spam(counter: u32) { + let one = U128::from_integer(1); + if (counter < MAX_EXTERNAL_CALL_SPAM) { + let prev = storage.public_balances.at(counter as Field).read(); + storage.public_balances.at(counter as Field).write(prev + one); + + Spam::at(context.this_address()).recursive_spam(counter + 1).call(&mut context); + } + } } diff --git a/yarn-project/aztec-node/src/aztec-node/server.ts b/yarn-project/aztec-node/src/aztec-node/server.ts index 83860d56b893..d509407c4bfb 100644 --- a/yarn-project/aztec-node/src/aztec-node/server.ts +++ b/yarn-project/aztec-node/src/aztec-node/server.ts @@ -166,7 +166,7 @@ export class AztecNodeService implements AztecNode { const simulationProvider = await createSimulationProvider(config, log); - const validatorClient = createValidatorClient(config, p2pClient); + const validatorClient = createValidatorClient(config, p2pClient, worldStateSynchronizer, archiver, telemetry); // now create the sequencer const sequencer = config.disableSequencer diff --git a/yarn-project/circuits.js/src/hash/hash.ts b/yarn-project/circuits.js/src/hash/hash.ts index 6083676209d9..eab83b28f0ba 100644 --- a/yarn-project/circuits.js/src/hash/hash.ts +++ b/yarn-project/circuits.js/src/hash/hash.ts @@ -4,7 +4,8 @@ import { Fr } from '@aztec/foundation/fields'; import { numToUInt8, numToUInt16BE, numToUInt32BE } from '@aztec/foundation/serialize'; import { GeneratorIndex } from '../constants.gen.js'; -import { type ScopedL2ToL1Message, VerificationKey } from '../structs/index.js'; +import { type ScopedL2ToL1Message } from '../structs/l2_to_l1_message.js'; +import { VerificationKey } from '../structs/verification_key.js'; /** * Computes a hash of a given verification key. diff --git a/yarn-project/circuits.js/src/structs/public_data_update_request.ts b/yarn-project/circuits.js/src/structs/public_data_update_request.ts index d048a1100a45..fb4a17e21c52 100644 --- a/yarn-project/circuits.js/src/structs/public_data_update_request.ts +++ b/yarn-project/circuits.js/src/structs/public_data_update_request.ts @@ -1,8 +1,12 @@ +import { type AztecAddress } from '@aztec/foundation/aztec-address'; import { Fr } from '@aztec/foundation/fields'; import { BufferReader, FieldReader, serializeToBuffer } from '@aztec/foundation/serialize'; import { inspect } from 'util'; +import { computePublicDataTreeLeafSlot } from '../hash/hash.js'; +import { type ContractStorageUpdateRequest } from './contract_storage_update_request.js'; + /** * Write operations on the public data tree including the previous value. */ @@ -74,6 +78,12 @@ export class PublicDataUpdateRequest { return new PublicDataUpdateRequest(Fr.fromBuffer(reader), Fr.fromBuffer(reader), reader.readNumber()); } + static fromContractStorageUpdateRequest(contractAddress: AztecAddress, updateRequest: ContractStorageUpdateRequest) { + const leafSlot = computePublicDataTreeLeafSlot(contractAddress, updateRequest.storageSlot); + + return new PublicDataUpdateRequest(leafSlot, updateRequest.newValue, updateRequest.counter); + } + static empty() { return new PublicDataUpdateRequest(Fr.ZERO, Fr.ZERO, 0); } diff --git a/yarn-project/end-to-end/Earthfile b/yarn-project/end-to-end/Earthfile index de86cd9f3eb0..a8346f103d88 100644 --- a/yarn-project/end-to-end/Earthfile +++ b/yarn-project/end-to-end/Earthfile @@ -2,7 +2,7 @@ VERSION 0.8 e2e-p2p: LOCALLY - RUN ./scripts/e2e_test.sh ./src/e2e_p2p_network.test.ts + RUN ./scripts/e2e_test.sh ./src/e2e_p2p/ --runInBand e2e-l1-with-wall-time: LOCALLY diff --git a/yarn-project/end-to-end/scripts/e2e_test.sh b/yarn-project/end-to-end/scripts/e2e_test.sh index f826a164000f..3dcf49edfacc 100755 --- a/yarn-project/end-to-end/scripts/e2e_test.sh +++ b/yarn-project/end-to-end/scripts/e2e_test.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Usage: ./e2e_test.sh +# Usage: ./e2e_test.sh <...extra_args> # Optional environment variables: # HARDWARE_CONCURRENCY (default: "") @@ -8,6 +8,8 @@ set -eu # Main positional parameter TEST="$1" +shift + # Default values for environment variables HARDWARE_CONCURRENCY="${HARDWARE_CONCURRENCY:-}" FAKE_PROOFS="${FAKE_PROOFS:-}" @@ -18,4 +20,4 @@ if ! docker image ls --format '{{.Repository}}:{{.Tag}}' | grep -q "aztecprotoco exit 1 fi -docker run -e HARDWARE_CONCURRENCY="$HARDWARE_CONCURRENCY" -e FAKE_PROOFS="$FAKE_PROOFS" --rm aztecprotocol/end-to-end:$AZTEC_DOCKER_TAG "$TEST" +docker run -e HARDWARE_CONCURRENCY="$HARDWARE_CONCURRENCY" -e FAKE_PROOFS="$FAKE_PROOFS" --rm aztecprotocol/end-to-end:$AZTEC_DOCKER_TAG "$TEST" $@ diff --git a/yarn-project/end-to-end/src/e2e_p2p/gossip_network.test.ts b/yarn-project/end-to-end/src/e2e_p2p/gossip_network.test.ts new file mode 100644 index 000000000000..2520cd225b49 --- /dev/null +++ b/yarn-project/end-to-end/src/e2e_p2p/gossip_network.test.ts @@ -0,0 +1,76 @@ +import { type AztecNodeService } from '@aztec/aztec-node'; +import { sleep } from '@aztec/aztec.js'; + +import fs from 'fs'; + +import { type NodeContext, createNodes } from '../fixtures/setup_p2p_test.js'; +import { P2PNetworkTest, WAIT_FOR_TX_TIMEOUT } from './p2p_network.js'; +import { createPXEServiceAndSubmitTransactions } from './shared.js'; + +// Don't set this to a higher value than 9 because each node will use a different L1 publisher account and anvil seeds +const NUM_NODES = 4; +const NUM_TXS_PER_NODE = 2; +const BOOT_NODE_UDP_PORT = 40600; + +const DATA_DIR = './data/gossip'; + +describe('e2e_p2p_network', () => { + let t: P2PNetworkTest; + let nodes: AztecNodeService[]; + + beforeEach(async () => { + t = await P2PNetworkTest.create('e2e_p2p_network', NUM_NODES, BOOT_NODE_UDP_PORT); + await t.applyBaseSnapshots(); + await t.setup(); + }); + + afterEach(async () => { + await t.stopNodes(nodes); + await t.teardown(); + for (let i = 0; i < NUM_NODES; i++) { + fs.rmSync(`${DATA_DIR}-${i}`, { recursive: true, force: true }); + } + }); + + it('should rollup txs from all peers', async () => { + // create the bootstrap node for the network + if (!t.bootstrapNodeEnr) { + throw new Error('Bootstrap node ENR is not available'); + } + // create our network of nodes and submit txs into each of them + // the number of txs per node and the number of txs per rollup + // should be set so that the only way for rollups to be built + // is if the txs are successfully gossiped around the nodes. + const contexts: NodeContext[] = []; + t.logger.info('Creating nodes'); + nodes = await createNodes( + t.ctx.aztecNodeConfig, + t.peerIdPrivateKeys, + t.bootstrapNodeEnr, + NUM_NODES, + BOOT_NODE_UDP_PORT, + DATA_DIR, + ); + + // wait a bit for peers to discover each other + await sleep(4000); + + t.logger.info('Submitting transactions'); + for (const node of nodes) { + const context = await createPXEServiceAndSubmitTransactions(t.logger, node, NUM_TXS_PER_NODE); + contexts.push(context); + } + + t.logger.info('Waiting for transactions to be mined'); + // now ensure that all txs were successfully mined + await Promise.all( + contexts.flatMap((context, i) => + context.txs.map(async (tx, j) => { + t.logger.info(`Waiting for tx ${i}-${j}: ${await tx.getTxHash()} to be mined`); + return tx.wait({ timeout: WAIT_FOR_TX_TIMEOUT }); + }), + ), + ); + t.logger.info('All transactions mined'); + }); +}); diff --git a/yarn-project/end-to-end/src/e2e_p2p/p2p_network.ts b/yarn-project/end-to-end/src/e2e_p2p/p2p_network.ts new file mode 100644 index 000000000000..9f6f9f3c7ac3 --- /dev/null +++ b/yarn-project/end-to-end/src/e2e_p2p/p2p_network.ts @@ -0,0 +1,198 @@ +import { getSchnorrAccount } from '@aztec/accounts/schnorr'; +import { type AztecNodeConfig, type AztecNodeService } from '@aztec/aztec-node'; +import { type AccountWalletWithSecretKey, EthCheatCodes } from '@aztec/aztec.js'; +import { AZTEC_SLOT_DURATION, ETHEREUM_SLOT_DURATION, EthAddress } from '@aztec/circuits.js'; +import { type DebugLogger, createDebugLogger } from '@aztec/foundation/log'; +import { RollupAbi } from '@aztec/l1-artifacts'; +import { SpamContract } from '@aztec/noir-contracts.js'; +import { type BootstrapNode } from '@aztec/p2p'; + +import getPort from 'get-port'; +import { getContract } from 'viem'; +import { privateKeyToAccount } from 'viem/accounts'; + +import { + createBootstrapNodeFromPrivateKey, + createValidatorConfig, + generateNodePrivateKeys, + generatePeerIdPrivateKeys, +} from '../fixtures/setup_p2p_test.js'; +import { + type ISnapshotManager, + type SubsystemsContext, + addAccounts, + createSnapshotManager, +} from '../fixtures/snapshot_manager.js'; +import { getPrivateKeyFromIndex } from '../fixtures/utils.js'; + +// Use a fixed bootstrap node private key so that we can re-use the same snapshot and the nodes can find each other +const BOOTSTRAP_NODE_PRIVATE_KEY = '080212208f988fc0899e4a73a5aee4d271a5f20670603a756ad8d84f2c94263a6427c591'; +export const WAIT_FOR_TX_TIMEOUT = AZTEC_SLOT_DURATION * 3; + +export class P2PNetworkTest { + private snapshotManager: ISnapshotManager; + private baseAccount; + + public logger: DebugLogger; + + public ctx!: SubsystemsContext; + public nodePrivateKeys: `0x${string}`[] = []; + public peerIdPrivateKeys: string[] = []; + + public bootstrapNodeEnr: string = ''; + + // The re-execution test needs a wallet and a spam contract + public wallet?: AccountWalletWithSecretKey; + public spamContract?: SpamContract; + + constructor( + testName: string, + public bootstrapNode: BootstrapNode, + public bootNodePort: number, + private numberOfNodes: number, + initialValidatorAddress: string, + initialValidatorConfig: AztecNodeConfig, + ) { + this.logger = createDebugLogger(`aztec:e2e_p2p:${testName}`); + + // Set up the base account and node private keys for the initial network deployment + this.baseAccount = privateKeyToAccount(`0x${getPrivateKeyFromIndex(0)!.toString('hex')}`); + this.nodePrivateKeys = generateNodePrivateKeys(1, numberOfNodes); + this.peerIdPrivateKeys = generatePeerIdPrivateKeys(numberOfNodes); + + this.bootstrapNodeEnr = bootstrapNode.getENR().encodeTxt(); + + const initialValidators = [EthAddress.fromString(initialValidatorAddress)]; + + this.snapshotManager = createSnapshotManager(`e2e_p2p_network/${testName}`, process.env.E2E_DATA_PATH, { + ...initialValidatorConfig, + l1BlockTime: ETHEREUM_SLOT_DURATION, + salt: 420, + initialValidators, + }); + } + + static async create(testName: string, numberOfNodes: number, basePort?: number) { + const port = basePort || (await getPort()); + + const bootstrapNode = await createBootstrapNodeFromPrivateKey(BOOTSTRAP_NODE_PRIVATE_KEY, port); + const bootstrapNodeEnr = bootstrapNode.getENR().encodeTxt(); + + const initialValidatorConfig = await createValidatorConfig({} as AztecNodeConfig, bootstrapNodeEnr); + const intiailValidatorAddress = privateKeyToAccount(initialValidatorConfig.publisherPrivateKey).address; + + return new P2PNetworkTest( + testName, + bootstrapNode, + port, + numberOfNodes, + intiailValidatorAddress, + initialValidatorConfig, + ); + } + + async applyBaseSnapshots() { + await this.snapshotManager.snapshot('add-validators', async ({ deployL1ContractsValues, aztecNodeConfig }) => { + const rollup = getContract({ + address: deployL1ContractsValues.l1ContractAddresses.rollupAddress.toString(), + abi: RollupAbi, + client: deployL1ContractsValues.walletClient, + }); + + this.logger.verbose(`Adding ${this.numberOfNodes} validators`); + + const txHashes: `0x${string}`[] = []; + for (let i = 0; i < this.numberOfNodes; i++) { + const account = privateKeyToAccount(this.nodePrivateKeys[i]!); + const txHash = await rollup.write.addValidator([account.address]); + txHashes.push(txHash); + + this.logger.debug(`Adding ${account.address} as validator`); + } + + // Wait for all the transactions adding validators to be mined + await Promise.all( + txHashes.map(txHash => + deployL1ContractsValues.publicClient.waitForTransactionReceipt({ + hash: txHash, + }), + ), + ); + + //@note Now we jump ahead to the next epoch such that the validator committee is picked + // INTERVAL MINING: If we are using anvil interval mining this will NOT progress the time! + // Which means that the validator set will still be empty! So anyone can propose. + const slotsInEpoch = await rollup.read.EPOCH_DURATION(); + const timestamp = await rollup.read.getTimestampForSlot([slotsInEpoch]); + const cheatCodes = new EthCheatCodes(aztecNodeConfig.l1RpcUrl); + try { + await cheatCodes.warp(Number(timestamp)); + } catch (err) { + this.logger.debug('Warp failed, time already satisfied'); + } + + // Send and await a tx to make sure we mine a block for the warp to correctly progress. + await deployL1ContractsValues.publicClient.waitForTransactionReceipt({ + hash: await deployL1ContractsValues.walletClient.sendTransaction({ + to: this.baseAccount.address, + value: 1n, + account: this.baseAccount, + }), + }); + }); + } + + async setupAccount() { + await this.snapshotManager.snapshot( + 'setup-account', + addAccounts(1, this.logger, false), + async ({ accountKeys }, ctx) => { + const accountManagers = accountKeys.map(ak => getSchnorrAccount(ctx.pxe, ak[0], ak[1], 1)); + await Promise.all(accountManagers.map(a => a.register())); + const wallets = await Promise.all(accountManagers.map(a => a.getWallet())); + this.wallet = wallets[0]; + }, + ); + } + + async deploySpamContract() { + await this.snapshotManager.snapshot( + 'add-spam-contract', + async () => { + if (!this.wallet) { + throw new Error('Call snapshot t.setupAccount before deploying account contract'); + } + + const spamContract = await SpamContract.deploy(this.wallet).send().deployed(); + return { contractAddress: spamContract.address }; + }, + async ({ contractAddress }) => { + if (!this.wallet) { + throw new Error('Call snapshot t.setupAccount before deploying account contract'); + } + this.spamContract = await SpamContract.at(contractAddress, this.wallet); + }, + ); + } + + async setup() { + this.ctx = await this.snapshotManager.setup(); + + // TODO(md): make it such that the test can set these up + this.ctx.aztecNodeConfig.minTxsPerBlock = 4; + this.ctx.aztecNodeConfig.maxTxsPerBlock = 4; + } + + async stopNodes(nodes: AztecNodeService[]) { + this.logger.info('Stopping nodes'); + for (const node of nodes) { + await node.stop(); + } + await this.bootstrapNode.stop(); + this.logger.info('Nodes stopped'); + } + + async teardown() { + await this.snapshotManager.teardown(); + } +} diff --git a/yarn-project/end-to-end/src/e2e_p2p/rediscovery.test.ts b/yarn-project/end-to-end/src/e2e_p2p/rediscovery.test.ts new file mode 100644 index 000000000000..51c222e238ad --- /dev/null +++ b/yarn-project/end-to-end/src/e2e_p2p/rediscovery.test.ts @@ -0,0 +1,94 @@ +import { type AztecNodeService } from '@aztec/aztec-node'; +import { sleep } from '@aztec/aztec.js'; + +import fs from 'fs'; + +import { type NodeContext, createNode, createNodes } from '../fixtures/setup_p2p_test.js'; +import { P2PNetworkTest, WAIT_FOR_TX_TIMEOUT } from './p2p_network.js'; +import { createPXEServiceAndSubmitTransactions } from './shared.js'; + +// Don't set this to a higher value than 9 because each node will use a different L1 publisher account and anvil seeds +const NUM_NODES = 4; +const NUM_TXS_PER_NODE = 2; +const BOOT_NODE_UDP_PORT = 40400; + +const DATA_DIR = './data/rediscovery'; + +describe('e2e_p2p_rediscovery', () => { + let t: P2PNetworkTest; + let nodes: AztecNodeService[]; + + beforeEach(async () => { + t = await P2PNetworkTest.create('e2e_p2p_rediscovery', NUM_NODES, BOOT_NODE_UDP_PORT); + await t.applyBaseSnapshots(); + await t.setup(); + }); + + afterEach(async () => { + await t.stopNodes(nodes); + await t.teardown(); + for (let i = 0; i < NUM_NODES; i++) { + fs.rmSync(`${DATA_DIR}-${i}`, { recursive: true, force: true }); + } + }); + + it('should re-discover stored peers without bootstrap node', async () => { + const contexts: NodeContext[] = []; + nodes = await createNodes( + t.ctx.aztecNodeConfig, + t.peerIdPrivateKeys, + t.bootstrapNodeEnr, + NUM_NODES, + BOOT_NODE_UDP_PORT, + DATA_DIR, + ); + + // wait a bit for peers to discover each other + await sleep(3000); + + // stop bootstrap node + await t.bootstrapNode.stop(); + + // create new nodes from datadir + const newNodes: AztecNodeService[] = []; + + // stop all nodes + for (let i = 0; i < NUM_NODES; i++) { + const node = nodes[i]; + await node.stop(); + t.logger.info(`Node ${i} stopped`); + await sleep(1200); + + const newNode = await createNode( + t.ctx.aztecNodeConfig, + t.peerIdPrivateKeys[i], + i + 1 + BOOT_NODE_UDP_PORT, + undefined, + i, + `${DATA_DIR}-${i}`, + ); + t.logger.info(`Node ${i} restarted`); + newNodes.push(newNode); + } + nodes = newNodes; + + // wait a bit for peers to discover each other + await sleep(2000); + + for (const node of newNodes) { + const context = await createPXEServiceAndSubmitTransactions(t.logger, node, NUM_TXS_PER_NODE); + contexts.push(context); + } + + // now ensure that all txs were successfully mined + + await Promise.all( + contexts.flatMap((context, i) => + context.txs.map(async (tx, j) => { + t.logger.info(`Waiting for tx ${i}-${j}: ${await tx.getTxHash()} to be mined`); + return tx.wait({ timeout: WAIT_FOR_TX_TIMEOUT }); + }), + ), + ); + }); +}); diff --git a/yarn-project/end-to-end/src/e2e_p2p/reex.test.ts b/yarn-project/end-to-end/src/e2e_p2p/reex.test.ts new file mode 100644 index 000000000000..a3f79c4bcbeb --- /dev/null +++ b/yarn-project/end-to-end/src/e2e_p2p/reex.test.ts @@ -0,0 +1,81 @@ +import { type AztecNodeService } from '@aztec/aztec-node'; +import { sleep } from '@aztec/aztec.js'; + +import { beforeAll, describe, it } from '@jest/globals'; +import fs from 'fs'; + +import { createNodes } from '../fixtures/setup_p2p_test.js'; +import { P2PNetworkTest, WAIT_FOR_TX_TIMEOUT } from './p2p_network.js'; +import { submitComplexTxsTo } from './shared.js'; + +const NUM_NODES = 4; +const NUM_TXS_PER_NODE = 1; +const BOOT_NODE_UDP_PORT = 41000; + +const DATA_DIR = './data/re-ex'; + +describe('e2e_p2p_reex', () => { + let t: P2PNetworkTest; + beforeAll(async () => { + t = await P2PNetworkTest.create('e2e_p2p_reex', NUM_NODES, BOOT_NODE_UDP_PORT); + + t.logger.verbose('Setup account'); + await t.setupAccount(); + + t.logger.verbose('Deploy spam contract'); + await t.deploySpamContract(); + + t.logger.verbose('Apply base snapshots'); + await t.applyBaseSnapshots(); + + t.logger.verbose('Setup nodes'); + await t.setup(); + }); + + afterAll(async () => { + await t.teardown(); + for (let i = 0; i < NUM_NODES; i++) { + fs.rmSync(`${DATA_DIR}-${i}`, { recursive: true, force: true }); + } + }); + + it('validators should re-execute transactions before attesting', async () => { + // create the bootstrap node for the network + if (!t.bootstrapNodeEnr) { + throw new Error('Bootstrap node ENR is not available'); + } + + t.ctx.aztecNodeConfig.validatorReEx = true; + + const nodes: AztecNodeService[] = await createNodes( + t.ctx.aztecNodeConfig, + t.peerIdPrivateKeys, + t.bootstrapNodeEnr, + NUM_NODES, + BOOT_NODE_UDP_PORT, + ); + + // wait a bit for peers to discover each other + await sleep(4000); + + // tODO: use a tx with nested calls + nodes.forEach(node => { + node.getSequencer()?.updateSequencerConfig({ + minTxsPerBlock: NUM_TXS_PER_NODE, + maxTxsPerBlock: NUM_TXS_PER_NODE, + }); + }); + const txs = await submitComplexTxsTo(t.logger, t.spamContract!, NUM_TXS_PER_NODE); + + // now ensure that all txs were successfully mined + await Promise.all( + txs.map(async (tx, i) => { + t.logger.info(`Waiting for tx ${i}: ${await tx.getTxHash()} to be mined`); + return tx.wait({ timeout: WAIT_FOR_TX_TIMEOUT }); + }), + ); + + // shutdown all nodes. + await t.stopNodes(nodes); + }); +}); diff --git a/yarn-project/end-to-end/src/e2e_p2p/reqresp_tx.test.ts b/yarn-project/end-to-end/src/e2e_p2p/reqresp_tx.test.ts new file mode 100644 index 000000000000..8000c72bc099 --- /dev/null +++ b/yarn-project/end-to-end/src/e2e_p2p/reqresp_tx.test.ts @@ -0,0 +1,106 @@ +import { type AztecNodeService } from '@aztec/aztec-node'; +import { sleep } from '@aztec/aztec.js'; + +import { jest } from '@jest/globals'; +import fs from 'fs'; + +import { type NodeContext, createNodes } from '../fixtures/setup_p2p_test.js'; +import { P2PNetworkTest, WAIT_FOR_TX_TIMEOUT } from './p2p_network.js'; +import { createPXEServiceAndSubmitTransactions } from './shared.js'; + +// Don't set this to a higher value than 9 because each node will use a different L1 publisher account and anvil seeds +const NUM_NODES = 4; +const NUM_TXS_PER_NODE = 2; +const BOOT_NODE_UDP_PORT = 40800; + +const DATA_DIR = './data/data-reqresp'; + +describe('e2e_p2p_reqresp_tx', () => { + let t: P2PNetworkTest; + let nodes: AztecNodeService[]; + + beforeEach(async () => { + t = await P2PNetworkTest.create('e2e_p2p_reqresp_tx', NUM_NODES, BOOT_NODE_UDP_PORT); + await t.applyBaseSnapshots(); + await t.setup(); + }); + + afterEach(async () => { + await t.stopNodes(nodes); + await t.teardown(); + for (let i = 0; i < NUM_NODES; i++) { + fs.rmSync(`${DATA_DIR}-${i}`, { recursive: true, force: true }); + } + }); + + // NOTE: If this test fails in a PR where the shuffling algorithm is changed, then it is failing as the node with + // the mocked p2p layer is being picked as the sequencer, and it does not have any transactions in it's mempool. + // If this is the case, then we should update the test to switch off the mempool of a different node. + // adjust `nodeToTurnOffTxGossip` in the test below. + it('should produce an attestation by requesting tx data over the p2p network', async () => { + /** + * Birds eye overview of the test + * 1. We spin up x nodes + * 2. We turn off receiving a tx via gossip from two of the nodes + * 3. We send a transactions and gossip it to other nodes + * 4. The disabled nodes will receive an attestation that it does not have the data for + * 5. They will request this data over the p2p layer + * 6. We receive all of the attestations that we need and we produce the block + * + * Note: we do not attempt to let this node produce a block, as it will not have received any transactions + * from the other pxes. + */ + + if (!t.bootstrapNodeEnr) { + throw new Error('Bootstrap node ENR is not available'); + } + const contexts: NodeContext[] = []; + + t.logger.info('Creating nodes'); + nodes = await createNodes( + t.ctx.aztecNodeConfig, + t.peerIdPrivateKeys, + t.bootstrapNodeEnr, + NUM_NODES, + BOOT_NODE_UDP_PORT, + DATA_DIR, + ); + + // wait a bit for peers to discover each other + await sleep(4000); + + t.logger.info('Turning off tx gossip'); + // Replace the p2p node implementation of some of the nodes with a spy such that it does not store transactions that are gossiped to it + // Original implementation of `processTxFromPeer` will store received transactions in the tx pool. + // We have chosen nodes 0,3 as they do not get chosen to be the sequencer in this test. + const nodeToTurnOffTxGossip = [0, 3]; + for (const nodeIndex of nodeToTurnOffTxGossip) { + jest + .spyOn((nodes[nodeIndex] as any).p2pClient.p2pService, 'processTxFromPeer') + .mockImplementation((): Promise => { + return Promise.resolve(); + }); + } + + t.logger.info('Submitting transactions'); + // Only submit transactions to the first two nodes, so that we avoid our sequencer with a mocked p2p layer being picked to produce a block. + // If the shuffling algorithm changes, then this will need to be updated. + for (let i = 1; i < 3; i++) { + const context = await createPXEServiceAndSubmitTransactions(t.logger, nodes[i], NUM_TXS_PER_NODE); + contexts.push(context); + } + + t.logger.info('Waiting for transactions to be mined'); + await Promise.all( + contexts.flatMap((context, i) => + context.txs.map(async (tx, j) => { + t.logger.info(`Waiting for tx ${i}-${j}: ${await tx.getTxHash()} to be mined`); + await tx.wait({ timeout: WAIT_FOR_TX_TIMEOUT }); + t.logger.info(`Tx ${i}-${j}: ${await tx.getTxHash()} has been mined`); + return await tx.getTxHash(); + }), + ), + ); + t.logger.info('All transactions mined'); + }); +}); diff --git a/yarn-project/end-to-end/src/e2e_p2p/shared.ts b/yarn-project/end-to-end/src/e2e_p2p/shared.ts new file mode 100644 index 000000000000..5685f4b26bd3 --- /dev/null +++ b/yarn-project/end-to-end/src/e2e_p2p/shared.ts @@ -0,0 +1,92 @@ +import { getSchnorrAccount } from '@aztec/accounts/schnorr'; +import { type AztecNodeService } from '@aztec/aztec-node'; +import { type DebugLogger, type SentTx } from '@aztec/aztec.js'; +import { CompleteAddress, TxStatus } from '@aztec/aztec.js'; +import { Fr, GrumpkinScalar } from '@aztec/foundation/fields'; +import { type SpamContract } from '@aztec/noir-contracts.js'; +import { type PXEService, createPXEService, getPXEServiceConfig as getRpcConfig } from '@aztec/pxe'; + +import { expect } from '@jest/globals'; + +import { type NodeContext } from '../fixtures/setup_p2p_test.js'; + +// submits a set of transactions to the provided Private eXecution Environment (PXE) +export const submitComplexTxsTo = async (logger: DebugLogger, spamContract: SpamContract, numTxs: number) => { + const txs: SentTx[] = []; + + const seed = 1234n; + const spamCount = 15; + for (let i = 0; i < numTxs; i++) { + // TODO: check out batch call for deployments + + // Send a public mint tx - this will be minted from the token contract to the pxe account + // const tx = token.methods.mint_public(accountManager.getCompleteAddress().address, 1n).send() + const tx = spamContract.methods.spam(seed + BigInt(i * spamCount), spamCount, false, true).send(); + const txHash = await tx.getTxHash(); + + logger.info(`Tx sent with hash ${txHash}`); + const receipt = await tx.getReceipt(); + expect(receipt).toEqual( + expect.objectContaining({ + status: TxStatus.PENDING, + error: '', + }), + ); + logger.info(`Receipt received for ${txHash}`); + txs.push(tx); + } + return txs; +}; + +// creates an instance of the PXE and submit a given number of transactions to it. +export const createPXEServiceAndSubmitTransactions = async ( + logger: DebugLogger, + node: AztecNodeService, + numTxs: number, +): Promise => { + const rpcConfig = getRpcConfig(); + const pxeService = await createPXEService(node, rpcConfig, true); + + const secretKey = Fr.random(); + const completeAddress = CompleteAddress.fromSecretKeyAndPartialAddress(secretKey, Fr.random()); + await pxeService.registerAccount(secretKey, completeAddress.partialAddress); + + const txs = await submitTxsTo(logger, pxeService, numTxs); + return { + txs, + account: completeAddress.address, + pxeService, + node, + }; +}; + +// submits a set of transactions to the provided Private eXecution Environment (PXE) +const submitTxsTo = async (logger: DebugLogger, pxe: PXEService, numTxs: number) => { + const txs: SentTx[] = []; + for (let i = 0; i < numTxs; i++) { + const accountManager = getSchnorrAccount(pxe, Fr.random(), GrumpkinScalar.random(), Fr.random()); + const deployMethod = await accountManager.getDeployMethod(); + await deployMethod.create({ + contractAddressSalt: accountManager.salt, + skipClassRegistration: true, + skipPublicDeployment: true, + universalDeploy: true, + }); + await deployMethod.prove({}); + const tx = deployMethod.send(); + + const txHash = await tx.getTxHash(); + + logger.info(`Tx sent with hash ${txHash}`); + const receipt = await tx.getReceipt(); + expect(receipt).toEqual( + expect.objectContaining({ + status: TxStatus.PENDING, + error: '', + }), + ); + logger.info(`Receipt received for ${txHash}`); + txs.push(tx); + } + return txs; +}; diff --git a/yarn-project/end-to-end/src/e2e_p2p_network.test.ts b/yarn-project/end-to-end/src/e2e_p2p_network.test.ts deleted file mode 100644 index 6caf8f6bec4a..000000000000 --- a/yarn-project/end-to-end/src/e2e_p2p_network.test.ts +++ /dev/null @@ -1,345 +0,0 @@ -import { getSchnorrAccount } from '@aztec/accounts/schnorr'; -import { type AztecNodeConfig, type AztecNodeService } from '@aztec/aztec-node'; -import { - CompleteAddress, - type DebugLogger, - type DeployL1Contracts, - EthCheatCodes, - Fr, - GrumpkinScalar, - type SentTx, - TxStatus, - sleep, -} from '@aztec/aztec.js'; -import { ETHEREUM_SLOT_DURATION, EthAddress } from '@aztec/circuits.js'; -import { RollupAbi } from '@aztec/l1-artifacts'; -import { type BootstrapNode } from '@aztec/p2p'; -import { type PXEService, createPXEService, getPXEServiceConfig as getRpcConfig } from '@aztec/pxe'; - -import { jest } from '@jest/globals'; -import fs from 'fs'; -import { getContract } from 'viem'; -import { privateKeyToAccount } from 'viem/accounts'; - -import { - type NodeContext, - createBootstrapNode, - createNode, - createNodes, - generatePeerIdPrivateKeys, -} from './fixtures/setup_p2p_test.js'; -import { getPrivateKeyFromIndex, setup } from './fixtures/utils.js'; - -// Don't set this to a higher value than 9 because each node will use a different L1 publisher account and anvil seeds -const NUM_NODES = 4; -const NUM_TXS_PER_BLOCK = 4; -const NUM_TXS_PER_NODE = 2; -const BOOT_NODE_UDP_PORT = 40400; - -const PEER_ID_PRIVATE_KEYS = generatePeerIdPrivateKeys(NUM_NODES); - -describe('e2e_p2p_network', () => { - let config: AztecNodeConfig; - let logger: DebugLogger; - let teardown: () => Promise; - let bootstrapNode: BootstrapNode; - let bootstrapNodeEnr: string; - let deployL1ContractsValues: DeployL1Contracts; - - beforeEach(async () => { - // If we want to test with interval mining, we can use the local host and start `anvil --block-time 12` - const useLocalHost = false; - if (useLocalHost) { - jest.setTimeout(300_000); - } - const options = useLocalHost ? { l1RpcUrl: 'http://127.0.0.1:8545' } : {}; - - // We need the very first node to be the sequencer for this is the one doing everything throughout the setup. - // Without it we will wait forever. - const account = privateKeyToAccount(`0x${getPrivateKeyFromIndex(0)!.toString('hex')}`); - - const initialValidators = [EthAddress.fromString(account.address)]; - - ({ teardown, config, logger, deployL1ContractsValues } = await setup(0, { - initialValidators, - l1BlockTime: ETHEREUM_SLOT_DURATION, - salt: 420, - ...options, - })); - - bootstrapNode = await createBootstrapNode(BOOT_NODE_UDP_PORT); - bootstrapNodeEnr = bootstrapNode.getENR().encodeTxt(); - - config.minTxsPerBlock = NUM_TXS_PER_BLOCK; - config.maxTxsPerBlock = NUM_TXS_PER_BLOCK; - - const rollup = getContract({ - address: deployL1ContractsValues.l1ContractAddresses.rollupAddress.toString(), - abi: RollupAbi, - client: deployL1ContractsValues.walletClient, - }); - - for (let i = 0; i < NUM_NODES; i++) { - const account = privateKeyToAccount(`0x${getPrivateKeyFromIndex(i + 1)!.toString('hex')}`); - await rollup.write.addValidator([account.address]); - logger.debug(`Adding ${account.address} as validator`); - } - - // Remove the initial sequencer from the set! This was the sequencer we used for perform the setup. - logger.debug(`Removing ${account.address} as validator`); - const txHash = await rollup.write.removeValidator([account.address]); - - await deployL1ContractsValues.publicClient.waitForTransactionReceipt({ hash: txHash }); - - //@note Now we jump ahead to the next epoch such that the validator committee is picked - // INTERVAL MINING: If we are using anvil interval mining this will NOT progress the time! - // Which means that the validator set will still be empty! So anyone can propose. - const slotsInEpoch = await rollup.read.EPOCH_DURATION(); - const timestamp = await rollup.read.getTimestampForSlot([slotsInEpoch]); - const cheatCodes = new EthCheatCodes(config.l1RpcUrl); - try { - await cheatCodes.warp(Number(timestamp)); - } catch (err) { - logger.debug('Warp failed, time already satisfied'); - } - - // Send and await a tx to make sure we mine a block for the warp to correctly progress. - await deployL1ContractsValues.publicClient.waitForTransactionReceipt({ - hash: await deployL1ContractsValues.walletClient.sendTransaction({ to: account.address, value: 1n, account }), - }); - }); - - const stopNodes = async (bootstrap: BootstrapNode, nodes: AztecNodeService[]) => { - for (const node of nodes) { - await node.stop(); - } - await bootstrap.stop(); - }; - - afterEach(() => teardown()); - - afterAll(() => { - for (let i = 0; i < NUM_NODES; i++) { - fs.rmSync(`./data-${i}`, { recursive: true, force: true }); - } - }); - - it('should rollup txs from all peers', async () => { - // create the bootstrap node for the network - if (!bootstrapNodeEnr) { - throw new Error('Bootstrap node ENR is not available'); - } - // create our network of nodes and submit txs into each of them - // the number of txs per node and the number of txs per rollup - // should be set so that the only way for rollups to be built - // is if the txs are successfully gossiped around the nodes. - const contexts: NodeContext[] = []; - const nodes: AztecNodeService[] = await createNodes( - config, - PEER_ID_PRIVATE_KEYS, - bootstrapNodeEnr, - NUM_NODES, - BOOT_NODE_UDP_PORT, - ); - - // wait a bit for peers to discover each other - await sleep(4000); - - for (const node of nodes) { - const context = await createPXEServiceAndSubmitTransactions(node, NUM_TXS_PER_NODE); - contexts.push(context); - } - - // now ensure that all txs were successfully mined - await Promise.all( - contexts.flatMap((context, i) => - context.txs.map(async (tx, j) => { - logger.info(`Waiting for tx ${i}-${j}: ${await tx.getTxHash()} to be mined`); - return tx.wait(); - }), - ), - ); - - // shutdown all nodes. - await stopNodes(bootstrapNode, nodes); - }); - - // NOTE: If this test fails in a PR where the shuffling algorithm is changed, then it is failing as the node with - // the mocked p2p layer is being picked as the sequencer, and it does not have any transactions in it's mempool. - // If this is the case, then we should update the test to switch off the mempool of a different node. - // adjust `nodeToTurnOffTxGossip` in the test below. - it('should produce an attestation by requesting tx data over the p2p network', async () => { - /** - * Birds eye overview of the test - * 1. We spin up x nodes - * 2. We turn off receiving a tx via gossip from two of the nodes - * 3. We send a transactions and gossip it to other nodes - * 4. The disabled nodes will receive an attestation that it does not have the data for - * 5. They will request this data over the p2p layer - * 6. We receive all of the attestations that we need and we produce the block - * - * Note: we do not attempt to let this node produce a block, as it will not have received any transactions - * from the other pxes. - */ - - if (!bootstrapNodeEnr) { - throw new Error('Bootstrap node ENR is not available'); - } - const contexts: NodeContext[] = []; - const nodes: AztecNodeService[] = await createNodes( - config, - PEER_ID_PRIVATE_KEYS, - bootstrapNodeEnr, - NUM_NODES, - BOOT_NODE_UDP_PORT, - ); - - // wait a bit for peers to discover each other - await sleep(4000); - - // Replace the p2p node implementation of some of the nodes with a spy such that it does not store transactions that are gossiped to it - // Original implementation of `processTxFromPeer` will store received transactions in the tx pool. - // We have chosen nodes 0,3 as they do not get chosen to be the sequencer in this test. - const nodeToTurnOffTxGossip = [0, 3]; - for (const nodeIndex of nodeToTurnOffTxGossip) { - jest - .spyOn((nodes[nodeIndex] as any).p2pClient.p2pService, 'processTxFromPeer') - .mockImplementation((): Promise => { - return Promise.resolve(); - }); - } - - // Only submit transactions to the first two nodes, so that we avoid our sequencer with a mocked p2p layer being picked to produce a block. - // If the shuffling algorithm changes, then this will need to be updated. - for (let i = 0; i < 2; i++) { - const context = await createPXEServiceAndSubmitTransactions(nodes[i], NUM_TXS_PER_NODE); - contexts.push(context); - } - - await Promise.all( - contexts.flatMap((context, i) => - context.txs.map(async (tx, j) => { - logger.info(`Waiting for tx ${i}-${j}: ${await tx.getTxHash()} to be mined`); - await tx.wait(); - logger.info(`Tx ${i}-${j}: ${await tx.getTxHash()} has been mined`); - return await tx.getTxHash(); - }), - ), - ); - - await stopNodes(bootstrapNode, nodes); - }); - - it('should re-discover stored peers without bootstrap node', async () => { - const contexts: NodeContext[] = []; - const nodes: AztecNodeService[] = await createNodes( - config, - PEER_ID_PRIVATE_KEYS, - bootstrapNodeEnr, - NUM_NODES, - BOOT_NODE_UDP_PORT, - ); - - // wait a bit for peers to discover each other - await sleep(3000); - - // stop bootstrap node - await bootstrapNode.stop(); - - // create new nodes from datadir - const newNodes: AztecNodeService[] = []; - - // stop all nodes - for (let i = 0; i < NUM_NODES; i++) { - const node = nodes[i]; - await node.stop(); - logger.info(`Node ${i} stopped`); - await sleep(1200); - // TODO: make a restart nodes function - const newNode = await createNode( - config, - PEER_ID_PRIVATE_KEYS[i], - i + 1 + BOOT_NODE_UDP_PORT, - undefined, - i, - `./data-${i}`, - ); - logger.info(`Node ${i} restarted`); - newNodes.push(newNode); - } - - // wait a bit for peers to discover each other - await sleep(2000); - - for (const node of newNodes) { - const context = await createPXEServiceAndSubmitTransactions(node, NUM_TXS_PER_NODE); - contexts.push(context); - } - - // now ensure that all txs were successfully mined - await Promise.all( - contexts.flatMap((context, i) => - context.txs.map(async (tx, j) => { - logger.info(`Waiting for tx ${i}-${j}: ${await tx.getTxHash()} to be mined`); - return tx.wait(); - }), - ), - ); - - // shutdown all nodes. - await stopNodes(bootstrapNode, newNodes); - }); - - // creates an instance of the PXE and submit a given number of transactions to it. - const createPXEServiceAndSubmitTransactions = async ( - node: AztecNodeService, - numTxs: number, - ): Promise => { - const rpcConfig = getRpcConfig(); - const pxeService = await createPXEService(node, rpcConfig, true); - - const secretKey = Fr.random(); - const completeAddress = CompleteAddress.fromSecretKeyAndPartialAddress(secretKey, Fr.random()); - await pxeService.registerAccount(secretKey, completeAddress.partialAddress); - - const txs = await submitTxsTo(pxeService, numTxs); - return { - txs, - account: completeAddress.address, - pxeService, - node, - }; - }; - - // submits a set of transactions to the provided Private eXecution Environment (PXE) - const submitTxsTo = async (pxe: PXEService, numTxs: number) => { - const txs: SentTx[] = []; - for (let i = 0; i < numTxs; i++) { - // const tx = getSchnorrAccount(pxe, Fr.random(), GrumpkinScalar.random(), Fr.random()).deploy(); - const accountManager = getSchnorrAccount(pxe, Fr.random(), GrumpkinScalar.random(), Fr.random()); - const deployMethod = await accountManager.getDeployMethod(); - await deployMethod.create({ - contractAddressSalt: accountManager.salt, - skipClassRegistration: true, - skipPublicDeployment: true, - universalDeploy: true, - }); - await deployMethod.prove({}); - const tx = deployMethod.send(); - - const txHash = await tx.getTxHash(); - - logger.info(`Tx sent with hash ${txHash}`); - const receipt = await tx.getReceipt(); - expect(receipt).toEqual( - expect.objectContaining({ - status: TxStatus.PENDING, - error: '', - }), - ); - logger.info(`Receipt received for ${txHash}`); - txs.push(tx); - } - return txs; - }; -}); diff --git a/yarn-project/end-to-end/src/e2e_synching.test.ts b/yarn-project/end-to-end/src/e2e_synching.test.ts index 18b1f7b4285d..e70b6dbddfe2 100644 --- a/yarn-project/end-to-end/src/e2e_synching.test.ts +++ b/yarn-project/end-to-end/src/e2e_synching.test.ts @@ -11,7 +11,6 @@ * * To run the Setup run with the `AZTEC_GENERATE_TEST_DATA=1` flag. Without * this flag, we will run in execution. - * * There is functionality to store the `stats` of a sync, but currently we * will simply be writing it to the log instead. * @@ -277,10 +276,10 @@ class TestVariant { const txs = []; for (let i = 0; i < this.txCount; i++) { const batch = new BatchCall(this.wallets[i], [ - this.spam.methods.spam(this.seed, 16, false).request(), - this.spam.methods.spam(this.seed + 16n, 16, false).request(), - this.spam.methods.spam(this.seed + 32n, 16, false).request(), - this.spam.methods.spam(this.seed + 48n, 15, true).request(), + this.spam.methods.spam(this.seed, 16, false, false).request(), + this.spam.methods.spam(this.seed + 16n, 16, false, false).request(), + this.spam.methods.spam(this.seed + 32n, 16, false, false).request(), + this.spam.methods.spam(this.seed + 48n, 15, true, false).request(), ]); this.seed += 100n; diff --git a/yarn-project/end-to-end/src/fixtures/setup_p2p_test.ts b/yarn-project/end-to-end/src/fixtures/setup_p2p_test.ts index 560677f6bee0..659443a14457 100644 --- a/yarn-project/end-to-end/src/fixtures/setup_p2p_test.ts +++ b/yarn-project/end-to-end/src/fixtures/setup_p2p_test.ts @@ -8,6 +8,7 @@ import { type BootnodeConfig, BootstrapNode, createLibP2PPeerId } from '@aztec/p import { type PXEService } from '@aztec/pxe'; import { NoopTelemetryClient } from '@aztec/telemetry-client/noop'; +import getPort from 'get-port'; import { generatePrivateKey } from 'viem/accounts'; import { getPrivateKeyFromIndex } from './utils.js'; @@ -19,54 +20,96 @@ export interface NodeContext { account: AztecAddress; } +export function generateNodePrivateKeys(startIndex: number, numberOfNodes: number): `0x${string}`[] { + const nodePrivateKeys: `0x${string}`[] = []; + // Do not start from 0 as it is used during setup + for (let i = startIndex; i < startIndex + numberOfNodes; i++) { + nodePrivateKeys.push(`0x${getPrivateKeyFromIndex(i)!.toString('hex')}`); + } + return nodePrivateKeys; +} + +export function generatePeerIdPrivateKey(): string { + // magic number is multiaddr prefix: https://multiformats.io/multiaddr/ for secp256k1 + return '08021220' + generatePrivateKey().substr(2, 66); +} + export function generatePeerIdPrivateKeys(numberOfPeers: number): string[] { const peerIdPrivateKeys = []; for (let i = 0; i < numberOfPeers; i++) { - // magic number is multiaddr prefix: https://multiformats.io/multiaddr/ - peerIdPrivateKeys.push('08021220' + generatePrivateKey().substr(2, 66)); + peerIdPrivateKeys.push(generatePeerIdPrivateKey()); } return peerIdPrivateKeys; } -export async function createNodes( +export function createNodes( config: AztecNodeConfig, peerIdPrivateKeys: string[], bootstrapNodeEnr: string, numNodes: number, bootNodePort: number, + dataDirectory?: string, ): Promise { - const nodes = []; + const nodePromises = []; for (let i = 0; i < numNodes; i++) { - const node = await createNode(config, peerIdPrivateKeys[i], i + 1 + bootNodePort, bootstrapNodeEnr, i); - nodes.push(node); + // We run on ports from the bootnode upwards if a port if provided, otherwise we get a random port + const port = bootNodePort + i + 1; + + const dataDir = dataDirectory ? `${dataDirectory}-${i}` : undefined; + const nodePromise = createNode(config, peerIdPrivateKeys[i], port, bootstrapNodeEnr, i, dataDir); + nodePromises.push(nodePromise); } - return nodes; + return Promise.all(nodePromises); } // creates a P2P enabled instance of Aztec Node Service export async function createNode( config: AztecNodeConfig, peerIdPrivateKey: string, - tcpListenPort: number, + tcpPort: number, bootstrapNode: string | undefined, publisherAddressIndex: number, dataDirectory?: string, ) { - // We use different L1 publisher accounts in order to avoid duplicate tx nonces. We start from - // publisherAddressIndex + 1 because index 0 was already used during test environment setup. - const publisherPrivKey = getPrivateKeyFromIndex(publisherAddressIndex + 1); - config.publisherPrivateKey = `0x${publisherPrivKey!.toString('hex')}`; + const validatorConfig = await createValidatorConfig( + config, + bootstrapNode, + tcpPort, + peerIdPrivateKey, + publisherAddressIndex, + dataDirectory, + ); + return await AztecNodeService.createAndSync( + validatorConfig, + new NoopTelemetryClient(), + createDebugLogger(`aztec:node-${tcpPort}`), + ); +} + +export async function createValidatorConfig( + config: AztecNodeConfig, + bootstrapNodeEnr?: string, + port?: number, + peerIdPrivateKey?: string, + accountIndex: number = 0, + dataDirectory?: string, +) { + peerIdPrivateKey = peerIdPrivateKey ?? generatePeerIdPrivateKey(); + port = port ?? (await getPort()); + + const privateKey = getPrivateKeyFromIndex(accountIndex); + const privateKeyHex: `0x${string}` = `0x${privateKey!.toString('hex')}`; - const validatorPrivKey = getPrivateKeyFromIndex(1 + publisherAddressIndex); - config.validatorPrivateKey = `0x${validatorPrivKey!.toString('hex')}`; + config.publisherPrivateKey = privateKeyHex; + config.validatorPrivateKey = privateKeyHex; - const newConfig: AztecNodeConfig = { + const nodeConfig: AztecNodeConfig = { ...config, peerIdPrivateKey: peerIdPrivateKey, - udpListenAddress: `0.0.0.0:${tcpListenPort}`, - tcpListenAddress: `0.0.0.0:${tcpListenPort}`, - tcpAnnounceAddress: `127.0.0.1:${tcpListenPort}`, - udpAnnounceAddress: `127.0.0.1:${tcpListenPort}`, + udpListenAddress: `0.0.0.0:${port}`, + tcpListenAddress: `0.0.0.0:${port}`, + tcpAnnounceAddress: `127.0.0.1:${port}`, + udpAnnounceAddress: `127.0.0.1:${port}`, minTxsPerBlock: config.minTxsPerBlock, maxTxsPerBlock: config.maxTxsPerBlock, p2pEnabled: true, @@ -74,26 +117,36 @@ export async function createNode( l2QueueSize: 1, transactionProtocol: '', dataDirectory, - bootstrapNodes: bootstrapNode ? [bootstrapNode] : [], + bootstrapNodes: bootstrapNodeEnr ? [bootstrapNodeEnr] : [], }; - return await AztecNodeService.createAndSync( - newConfig, - new NoopTelemetryClient(), - createDebugLogger(`aztec:node-${tcpListenPort}`), - ); + + return nodeConfig; } -export async function createBootstrapNode(port: number) { - const peerId = await createLibP2PPeerId(); - const bootstrapNode = new BootstrapNode(); - const config: BootnodeConfig = { +export function createBootstrapNodeConfig(privateKey: string, port: number): BootnodeConfig { + return { udpListenAddress: `0.0.0.0:${port}`, udpAnnounceAddress: `127.0.0.1:${port}`, - peerIdPrivateKey: Buffer.from(peerId.privateKey!).toString('hex'), + peerIdPrivateKey: privateKey, minPeerCount: 10, maxPeerCount: 100, }; - await bootstrapNode.start(config); +} + +export function createBootstrapNodeFromPrivateKey(privateKey: string, port: number): Promise { + const config = createBootstrapNodeConfig(privateKey, port); + return startBootstrapNode(config); +} + +export async function createBootstrapNode(port: number): Promise { + const peerId = await createLibP2PPeerId(); + const config = createBootstrapNodeConfig(Buffer.from(peerId.privateKey!).toString('hex'), port); + return startBootstrapNode(config); +} + +async function startBootstrapNode(config: BootnodeConfig) { + const bootstrapNode = new BootstrapNode(); + await bootstrapNode.start(config); return bootstrapNode; } diff --git a/yarn-project/end-to-end/src/fixtures/snapshot_manager.ts b/yarn-project/end-to-end/src/fixtures/snapshot_manager.ts index 1972e17960e3..ad0bea2cb2d3 100644 --- a/yarn-project/end-to-end/src/fixtures/snapshot_manager.ts +++ b/yarn-project/end-to-end/src/fixtures/snapshot_manager.ts @@ -22,8 +22,8 @@ import { DefaultMultiCallEntrypoint } from '@aztec/aztec.js/entrypoint'; import { type DeployL1ContractsArgs, createL1Clients } from '@aztec/ethereum'; import { asyncMap } from '@aztec/foundation/async-map'; import { type Logger, createDebugLogger } from '@aztec/foundation/log'; -import { makeBackoff, retry } from '@aztec/foundation/retry'; import { resolver, reviver } from '@aztec/foundation/serialize'; +import { RollupAbi } from '@aztec/l1-artifacts'; import { type ProverNode, type ProverNodeConfig, createProverNode } from '@aztec/prover-node'; import { type PXEService, createPXEService, getPXEServiceConfig } from '@aztec/pxe'; import { NoopTelemetryClient } from '@aztec/telemetry-client/noop'; @@ -34,14 +34,20 @@ import { existsSync, mkdirSync, readFileSync, writeFileSync } from 'fs'; import { copySync, removeSync } from 'fs-extra/esm'; import getPort from 'get-port'; import { join } from 'path'; -import { type Hex } from 'viem'; +import { type Hex, getContract } from 'viem'; import { mnemonicToAccount } from 'viem/accounts'; import { MNEMONIC } from './fixtures.js'; import { getACVMConfig } from './get_acvm_config.js'; import { getBBConfig } from './get_bb_config.js'; import { setupL1Contracts } from './setup_l1_contracts.js'; -import { deployCanonicalAuthRegistry, deployCanonicalRouter, getPrivateKeyFromIndex } from './utils.js'; +import { + type SetupOptions, + deployCanonicalAuthRegistry, + deployCanonicalRouter, + getPrivateKeyFromIndex, + startAnvil, +} from './utils.js'; export type SubsystemsContext = { anvil: Anvil; @@ -66,8 +72,11 @@ type SnapshotEntry = { export function createSnapshotManager( testName: string, dataPath?: string, - config: Partial = {}, - deployL1ContractsArgs: Partial = { assumeProvenThrough: Number.MAX_SAFE_INTEGER }, + config: Partial = {}, + deployL1ContractsArgs: Partial = { + assumeProvenThrough: Number.MAX_SAFE_INTEGER, + initialValidators: [], + }, ) { return dataPath ? new SnapshotManager(testName, dataPath, config, deployL1ContractsArgs) @@ -141,7 +150,7 @@ class SnapshotManager implements ISnapshotManager { constructor( testName: string, private dataPath: string, - private config: Partial = {}, + private config: Partial = {}, private deployL1ContractsArgs: Partial = { assumeProvenThrough: Number.MAX_SAFE_INTEGER }, ) { this.livePath = join(this.dataPath, 'live', testName); @@ -290,31 +299,24 @@ export async function createAndSyncProverNode( async function setupFromFresh( statePath: string | undefined, logger: Logger, - config: Partial = {}, + opts: SetupOptions = {}, deployL1ContractsArgs: Partial = { assumeProvenThrough: Number.MAX_SAFE_INTEGER, + initialValidators: [], }, ): Promise { logger.verbose(`Initializing state...`); // Fetch the AztecNode config. // TODO: For some reason this is currently the union of a bunch of subsystems. That needs fixing. - const aztecNodeConfig: AztecNodeConfig = { ...getConfigEnvVars(), ...config }; + const aztecNodeConfig: AztecNodeConfig = { ...getConfigEnvVars(), ...opts }; aztecNodeConfig.dataDirectory = statePath; // Start anvil. We go via a wrapper script to ensure if the parent dies, anvil dies. logger.verbose('Starting anvil...'); - const anvil = await retry( - async () => { - const ethereumHostPort = await getPort(); - aztecNodeConfig.l1RpcUrl = `http://127.0.0.1:${ethereumHostPort}`; - const anvil = createAnvil({ anvilBinary: './scripts/anvil_kill_wrapper.sh', port: ethereumHostPort }); - await anvil.start(); - return anvil; - }, - 'Start anvil', - makeBackoff([5, 5, 5]), - ); + const res = await startAnvil(opts.l1BlockTime); + const anvil = res.anvil; + aztecNodeConfig.l1RpcUrl = res.rpcUrl; // Deploy our L1 contracts. logger.verbose('Deploying L1 contracts...'); @@ -322,18 +324,23 @@ async function setupFromFresh( const publisherPrivKeyRaw = hdAccount.getHdKey().privateKey; const publisherPrivKey = publisherPrivKeyRaw === null ? null : Buffer.from(publisherPrivKeyRaw); - const validatorPrivKey = getPrivateKeyFromIndex(1); - const proverNodePrivateKey = getPrivateKeyFromIndex(2); + const validatorPrivKey = getPrivateKeyFromIndex(0); + const proverNodePrivateKey = getPrivateKeyFromIndex(0); aztecNodeConfig.publisherPrivateKey = `0x${publisherPrivKey!.toString('hex')}`; aztecNodeConfig.validatorPrivateKey = `0x${validatorPrivKey!.toString('hex')}`; - const deployL1ContractsValues = await setupL1Contracts( - aztecNodeConfig.l1RpcUrl, - hdAccount, - logger, - deployL1ContractsArgs, - ); + const ethCheatCodes = new EthCheatCodes(aztecNodeConfig.l1RpcUrl); + + if (opts.l1StartTime) { + await ethCheatCodes.warp(opts.l1StartTime); + } + + const deployL1ContractsValues = await setupL1Contracts(aztecNodeConfig.l1RpcUrl, hdAccount, logger, { + salt: opts.salt, + initialValidators: opts.initialValidators, + ...deployL1ContractsArgs, + }); aztecNodeConfig.l1Contracts = deployL1ContractsValues.l1ContractAddresses; aztecNodeConfig.l1PublishRetryIntervalMS = 100; @@ -387,6 +394,19 @@ async function setupFromFresh( writeFileSync(`${statePath}/aztec_node_config.json`, JSON.stringify(aztecNodeConfig)); } + // If initial validators are provided, we need to remove them from the node. + if (deployL1ContractsArgs.initialValidators && deployL1ContractsArgs.initialValidators?.length > 0) { + const rollup = getContract({ + address: deployL1ContractsValues.l1ContractAddresses.rollupAddress.toString(), + abi: RollupAbi, + client: deployL1ContractsValues.walletClient, + }); + + logger.debug(`Removing ${deployL1ContractsArgs.initialValidators[0].toString()} as validator`); + const txHash = await rollup.write.removeValidator([deployL1ContractsArgs.initialValidators[0].toString()]); + await deployL1ContractsValues.publicClient.waitForTransactionReceipt({ hash: txHash }); + } + return { aztecNodeConfig, anvil, @@ -407,7 +427,6 @@ async function setupFromFresh( async function setupFromState(statePath: string, logger: Logger): Promise { logger.verbose(`Initializing with saved state at ${statePath}...`); - // Load config. // TODO: For some reason this is currently the union of a bunch of subsystems. That needs fixing. const aztecNodeConfig: AztecNodeConfig = JSON.parse( readFileSync(`${statePath}/aztec_node_config.json`, 'utf-8'), diff --git a/yarn-project/end-to-end/src/fixtures/utils.ts b/yarn-project/end-to-end/src/fixtures/utils.ts index d13ba870ca2a..db4431949968 100644 --- a/yarn-project/end-to-end/src/fixtures/utils.ts +++ b/yarn-project/end-to-end/src/fixtures/utils.ts @@ -284,7 +284,7 @@ async function setupWithRemoteEnvironment( } /** Options for the e2e tests setup */ -type SetupOptions = { +export type SetupOptions = { /** State load */ stateLoad?: string; /** Previously deployed contracts on L1 */ diff --git a/yarn-project/foundation/src/collection/index.ts b/yarn-project/foundation/src/collection/index.ts index 00f8115dd60b..d315cf2142f7 100644 --- a/yarn-project/foundation/src/collection/index.ts +++ b/yarn-project/foundation/src/collection/index.ts @@ -1,2 +1,3 @@ export * from './array.js'; export * from './object.js'; +export * from './ordered_map.js'; diff --git a/yarn-project/foundation/src/collection/ordered_map.test.ts b/yarn-project/foundation/src/collection/ordered_map.test.ts new file mode 100644 index 000000000000..611dd6534ba5 --- /dev/null +++ b/yarn-project/foundation/src/collection/ordered_map.test.ts @@ -0,0 +1,101 @@ +import { OrderedMap } from './ordered_map.js'; + +describe('OrderedMap', () => { + let orderedMap: OrderedMap; + + beforeEach(() => { + orderedMap = new OrderedMap(); + }); + + it('set should add new key-value pairs', () => { + orderedMap.set('a', 1); + expect(orderedMap.get('a')).toBe(1); + expect(orderedMap.has('a')).toBe(true); + }); + + it('set should update existing keys and move them to the end', () => { + orderedMap.set('a', 1); + orderedMap.set('b', 2); + orderedMap.set('a', 3); + expect(Array.from(orderedMap.values())).toEqual([2, 3]); + }); + + it('get should retrieve values for existing keys', () => { + orderedMap.set('a', 1); + expect(orderedMap.get('a')).toBe(1); + }); + + it('get should return undefined for non-existent keys', () => { + expect(orderedMap.get('a')).toBeUndefined(); + }); + + it('has should return true for existing keys', () => { + orderedMap.set('a', 1); + expect(orderedMap.has('a')).toBe(true); + }); + + it('has should return false for non-existent keys', () => { + expect(orderedMap.has('a')).toBe(false); + }); + + it('delete should remove existing keys and return true', () => { + orderedMap.set('a', 1); + expect(orderedMap.delete('a')).toBe(true); + expect(orderedMap.has('a')).toBe(false); + expect(Array.from(orderedMap.values())).toEqual([]); + }); + + test('delete should return false for non-existent keys', () => { + expect(orderedMap.delete('a')).toBe(false); + }); + + it('values should return an array of values in insertion order', () => { + orderedMap.set('a', 1); + orderedMap.set('b', 2); + orderedMap.set('c', 3); + expect(Array.from(orderedMap.values())).toEqual([1, 2, 3]); + }); + + it('values should reflect the updated order after re-insertion', () => { + orderedMap.set('a', 1); + orderedMap.set('b', 2); + orderedMap.set('a', 3); + expect(Array.from(orderedMap.values())).toEqual([2, 3]); + }); + + it('iterator should yield key-value pairs in insertion order', () => { + orderedMap.set('a', 1); + orderedMap.set('b', 2); + orderedMap.set('c', 3); + expect(Array.from(orderedMap)).toEqual([ + ['a', 1], + ['b', 2], + ['c', 3], + ]); + }); + + it('iterator should reflect the updated order after re-insertion', () => { + orderedMap.set('a', 1); + orderedMap.set('b', 2); + orderedMap.set('a', 3); + expect(Array.from(orderedMap)).toEqual([ + ['b', 2], + ['a', 3], + ]); + }); + + it('multiple operations should maintain correct order and values', () => { + orderedMap.set('a', 1); + orderedMap.set('b', 2); + orderedMap.set('c', 3); + orderedMap.delete('b'); + orderedMap.set('d', 4); + orderedMap.set('a', 5); + expect(Array.from(orderedMap.values())).toEqual([3, 4, 5]); + expect(Array.from(orderedMap)).toEqual([ + ['c', 3], + ['d', 4], + ['a', 5], + ]); + }); +}); diff --git a/yarn-project/foundation/src/collection/ordered_map.ts b/yarn-project/foundation/src/collection/ordered_map.ts new file mode 100644 index 000000000000..8cb7fef18918 --- /dev/null +++ b/yarn-project/foundation/src/collection/ordered_map.ts @@ -0,0 +1,49 @@ +// This exists for public state squashing +// +// It has advantages over an traditional map as when we delete an item, +// we can insert to the back of the map +// This filtering is inefficient and a bottleneck over large lists +// Additionally getting values implements a copy +export class OrderedMap { + map = new Map(); + keys: K[] = []; + + constructor() {} + + set(key: K, value: V) { + if (this.map.has(key)) { + // Remove the key from the keys array + this.keys = this.keys.filter(k => k !== key); + } + // Add the key to the end of the keys array + this.keys.push(key); + // Set the value in the map + this.map.set(key, value); + } + + get(key: K) { + return this.map.get(key); + } + + has(key: K) { + return this.map.has(key); + } + + delete(key: K) { + if (this.map.delete(key)) { + this.keys = this.keys.filter(k => k !== key); + return true; + } + return false; + } + + values() { + return this.keys.map(key => this.map.get(key)!); + } + + *[Symbol.iterator]() { + for (const key of this.keys) { + yield [key, this.map.get(key)]; + } + } +} diff --git a/yarn-project/foundation/src/config/env_var.ts b/yarn-project/foundation/src/config/env_var.ts index 146a4724b007..06ed30f66681 100644 --- a/yarn-project/foundation/src/config/env_var.ts +++ b/yarn-project/foundation/src/config/env_var.ts @@ -128,6 +128,7 @@ export type EnvVar = | 'VALIDATOR_ATTESTATIONS_WAIT_TIMEOUT_MS' | 'VALIDATOR_DISABLED' | 'VALIDATOR_PRIVATE_KEY' + | 'VALIDATOR_RE_EX' | 'VERSION' | 'WS_BLOCK_CHECK_INTERVAL_MS' | 'WS_L2_BLOCK_QUEUE_SIZE' diff --git a/yarn-project/p2p/src/service/libp2p_service.ts b/yarn-project/p2p/src/service/libp2p_service.ts index 2c6778eb8fb0..755439b5f2e3 100644 --- a/yarn-project/p2p/src/service/libp2p_service.ts +++ b/yarn-project/p2p/src/service/libp2p_service.ts @@ -182,12 +182,12 @@ export class LibP2PService implements P2PService { await this.discoveryRunningPromise?.stop(); this.logger.debug('Stopping peer discovery service...'); await this.peerDiscoveryService.stop(); + this.logger.debug('Request response service stopped...'); + await this.reqresp.stop(); this.logger.debug('Stopping LibP2P...'); await this.stopLibP2P(); this.logger.info('LibP2P service stopped'); this.logger.debug('Stopping request response service...'); - await this.reqresp.stop(); - this.logger.debug('Request response service stopped...'); } /** diff --git a/yarn-project/p2p/src/service/reqresp/reqresp.ts b/yarn-project/p2p/src/service/reqresp/reqresp.ts index 41a9fd97d1ea..a2249015c2f0 100644 --- a/yarn-project/p2p/src/service/reqresp/reqresp.ts +++ b/yarn-project/p2p/src/service/reqresp/reqresp.ts @@ -36,8 +36,6 @@ import { RequestResponseRateLimiter } from './rate_limiter/rate_limiter.js'; export class ReqResp { protected readonly logger: Logger; - private abortController: AbortController = new AbortController(); - private overallRequestTimeoutMs: number; private individualRequestTimeoutMs: number; @@ -78,9 +76,16 @@ export class ReqResp { for (const protocol of Object.keys(this.subProtocolHandlers)) { await this.libp2p.unhandle(protocol); } + + // Close all active connections + const closeStreamPromises = this.libp2p.getConnections().map(connection => connection.close()); + await Promise.all(closeStreamPromises); + this.logger.debug('ReqResp: All active streams closed'); + this.rateLimiter.stop(); - await this.libp2p.stop(); - this.abortController.abort(); + this.logger.debug('ReqResp: Rate limiter stopped'); + + // NOTE: We assume libp2p instance is managed by the caller } /** @@ -187,7 +192,6 @@ export class ReqResp { let stream: Stream | undefined; try { stream = await this.libp2p.dialProtocol(peerId, subProtocol); - this.logger.debug(`Stream opened with ${peerId.toString()} for ${subProtocol}`); // Open the stream with a timeout diff --git a/yarn-project/prover-client/src/orchestrator/block-building-helpers.ts b/yarn-project/prover-client/src/orchestrator/block-building-helpers.ts index dd798f38b359..045f098b4722 100644 --- a/yarn-project/prover-client/src/orchestrator/block-building-helpers.ts +++ b/yarn-project/prover-client/src/orchestrator/block-building-helpers.ts @@ -123,6 +123,7 @@ export async function buildBaseRollupInput( tx.data.end.nullifiers.map(n => n.toBuffer()), NULLIFIER_SUBTREE_HEIGHT, ); + if (nullifierWitnessLeaves === undefined) { throw new Error(`Could not craft nullifier batch insertion proofs`); } @@ -424,6 +425,7 @@ export async function processPublicDataUpdateRequests(tx: ProcessedTx, db: Merkl const allPublicDataWrites = allPublicDataUpdateRequests.map( ({ leafSlot, newValue }) => new PublicDataTreeLeaf(leafSlot, newValue), ); + const { lowLeavesWitnessData, newSubtreeSiblingPath, sortedNewLeaves, sortedNewLeavesIndexes } = await db.batchInsert( MerkleTreeId.PUBLIC_DATA_TREE, allPublicDataWrites.map(x => x.toBuffer()), diff --git a/yarn-project/sequencer-client/src/block_builder/light.ts b/yarn-project/sequencer-client/src/block_builder/light.ts index 58c741faec0c..d27b69cf367a 100644 --- a/yarn-project/sequencer-client/src/block_builder/light.ts +++ b/yarn-project/sequencer-client/src/block_builder/light.ts @@ -39,7 +39,7 @@ export class LightweightBlockBuilder implements BlockBuilder { constructor(private db: MerkleTreeOperations, private telemetry: TelemetryClient) {} async startNewBlock(numTxs: number, globalVariables: GlobalVariables, l1ToL2Messages: Fr[]): Promise { - this.logger.verbose('Starting new block', { numTxs, globalVariables, l1ToL2Messages }); + this.logger.verbose('Starting new block', { numTxs, globalVariables: globalVariables.toJSON(), l1ToL2Messages }); this.numTxs = numTxs; this.globalVariables = globalVariables; this.l1ToL2Messages = padArrayEnd(l1ToL2Messages, Fr.ZERO, NUMBER_OF_L1_L2_MESSAGES_PER_ROLLUP); diff --git a/yarn-project/sequencer-client/src/sequencer/sequencer.ts b/yarn-project/sequencer-client/src/sequencer/sequencer.ts index 29de94931086..65a55e6e484b 100644 --- a/yarn-project/sequencer-client/src/sequencer/sequencer.ts +++ b/yarn-project/sequencer-client/src/sequencer/sequencer.ts @@ -437,9 +437,12 @@ export class Sequencer { const blockBuilder = this.blockBuilderFactory.create(this.worldState.getLatest()); await blockBuilder.startNewBlock(blockSize, newGlobalVariables, l1ToL2Messages); + const timer = new Timer(); const [publicProcessorDuration, [processedTxs, failedTxs]] = await elapsed(() => processor.process(validTxs, blockSize, blockBuilder, this.txValidatorFactory.validatorForProcessedTxs()), ); + this.log.info(`Public processor duration ${timer.ms()}ms`); + if (failedTxs.length > 0) { const failedTxData = failedTxs.map(fail => fail.tx); this.log.debug(`Dropping failed txs ${Tx.getHashes(failedTxData).join(', ')}`); @@ -520,7 +523,7 @@ export class Sequencer { this.log.debug(`Attesting committee length ${committee.length}`); if (committee.length === 0) { - this.log.debug(`Attesting committee length is 0, skipping`); + this.log.verbose(`Attesting committee length is 0, skipping`); return undefined; } diff --git a/yarn-project/simulator/src/public/executor.ts b/yarn-project/simulator/src/public/executor.ts index 52f664df4361..f6e1e7a421e9 100644 --- a/yarn-project/simulator/src/public/executor.ts +++ b/yarn-project/simulator/src/public/executor.ts @@ -43,6 +43,7 @@ export class PublicExecutor { globalVariables: GlobalVariables, availableGas: Gas, _txContext: TxContext, + // TODO(md): this will be shared? Why do we need to pass it everywhere? pendingSiloedNullifiers: Nullifier[], transactionFee: Fr = Fr.ZERO, startSideEffectCounter: number = 0, diff --git a/yarn-project/simulator/src/public/index.ts b/yarn-project/simulator/src/public/index.ts index 5f744c3211fc..5bc58ac6e62e 100644 --- a/yarn-project/simulator/src/public/index.ts +++ b/yarn-project/simulator/src/public/index.ts @@ -8,4 +8,5 @@ export * from './public_db_sources.js'; export * from './public_kernel.js'; export * from './public_kernel_circuit_simulator.js'; export { PublicProcessor, PublicProcessorFactory } from './public_processor.js'; +export { LightPublicProcessor } from './light_public_processor.js'; export { PublicSideEffectTrace } from './side_effect_trace.js'; diff --git a/yarn-project/simulator/src/public/light_public_processor.test.ts b/yarn-project/simulator/src/public/light_public_processor.test.ts new file mode 100644 index 000000000000..5fe782e15999 --- /dev/null +++ b/yarn-project/simulator/src/public/light_public_processor.test.ts @@ -0,0 +1,697 @@ +import { + type PublicExecutionRequest, + SimulationError, + type TreeInfo, + type TxValidator, + mockTx, +} from '@aztec/circuit-types'; +import { + AppendOnlyTreeSnapshot, + AztecAddress, + ContractStorageRead, + ContractStorageUpdateRequest, + Fr, + Gas, + GasFees, + GasSettings, + GlobalVariables, + Header, + MAX_NOTE_HASHES_PER_TX, + MAX_NULLIFIERS_PER_TX, + PUBLIC_DATA_TREE_HEIGHT, + PartialStateReference, + PublicAccumulatedDataBuilder, + PublicDataTreeLeafPreimage, + PublicDataUpdateRequest, + StateReference, +} from '@aztec/circuits.js'; +import { fr, makeSelector } from '@aztec/circuits.js/testing'; +import { padArrayEnd } from '@aztec/foundation/collection'; +import { type FieldsOf } from '@aztec/foundation/types'; +import { openTmpStore } from '@aztec/kv-store/utils'; +import { type AppendOnlyTree, Poseidon, StandardTree, newTree } from '@aztec/merkle-tree'; +import { LightPublicProcessor, type PublicExecutionResult, type WorldStateDB } from '@aztec/simulator'; +import { NoopTelemetryClient } from '@aztec/telemetry-client/noop'; +import { type MerkleTreeOperations } from '@aztec/world-state'; + +import { jest } from '@jest/globals'; +import { type MockProxy, mock } from 'jest-mock-extended'; + +import { PublicExecutionResultBuilder, makeFunctionCall } from '../mocks/fixtures.js'; +import { type NewStateUpdates } from './light_public_processor.js'; + +describe('public_processor', () => { + let db: MockProxy; + let worldStateDB: MockProxy; + let txValidator: MockProxy>; + let publicExecutorSpy: any; + + let root: Buffer; + + let processor: LightPublicProcessor; + + beforeEach(() => { + db = mock(); + worldStateDB = mock(); + txValidator = mock>(); + + root = Buffer.alloc(32, 5); + + db.getTreeInfo.mockResolvedValue({ root } as TreeInfo); + worldStateDB.storageRead.mockResolvedValue(Fr.ZERO); + + // Always return true for validation + txValidator.validateTxs.mockImplementation((txs: any[]) => { + return Promise.resolve([txs, []]); + }); + }); + + describe('Light Public Processor', () => { + let publicDataTree: AppendOnlyTree; + + beforeAll(async () => { + publicDataTree = await newTree( + StandardTree, + openTmpStore(), + new Poseidon(), + 'PublicData', + Fr, + PUBLIC_DATA_TREE_HEIGHT, + 1, // Add a default low leaf for the public data hints to be proved against. + ); + }); + + beforeEach(() => { + const snap = new AppendOnlyTreeSnapshot( + Fr.fromBuffer(publicDataTree.getRoot(true)), + Number(publicDataTree.getNumLeaves(true)), + ); + + const header = Header.empty(); + const stateReference = new StateReference( + header.state.l1ToL2MessageTree, + new PartialStateReference(header.state.partial.noteHashTree, header.state.partial.nullifierTree, snap), + ); + // Clone the whole state because somewhere down the line (AbstractPhaseManager) the public data root is modified in the referenced header directly :/ + header.state = StateReference.fromBuffer(stateReference.toBuffer()); + + db.getStateReference.mockResolvedValue(stateReference); + db.getSiblingPath.mockResolvedValue(publicDataTree.getSiblingPath(0n, false)); + db.getPreviousValueIndex.mockResolvedValue({ index: 0n, alreadyPresent: true }); + db.getLeafPreimage.mockResolvedValue(new PublicDataTreeLeafPreimage(new Fr(0), new Fr(0), new Fr(0), 0n)); + + processor = new LightPublicProcessor( + db, + worldStateDB, + GlobalVariables.from({ ...GlobalVariables.empty(), gasFees: GasFees.default() }), + header, + txValidator, + new NoopTelemetryClient(), + ); + + publicExecutorSpy = jest.spyOn(processor.publicExecutor, 'simulate'); + + publicExecutorSpy.mockImplementation((req: PublicExecutionRequest) => { + const result = PublicExecutionResultBuilder.fromPublicExecutionRequest({ + request: req as PublicExecutionRequest, + }).build(); + return Promise.resolve(result); + }); + }); + + it('rolls back app logic db updates on failed public execution, but persists setup', async function () { + const tx = mockTx(1, { + hasLogs: true, + numberOfNonRevertiblePublicCallRequests: 1, + numberOfRevertiblePublicCallRequests: 1, + hasPublicTeardownCallRequest: true, + }); + + const nonRevertibleRequests = tx.getNonRevertiblePublicExecutionRequests(); + const revertibleRequests = tx.getRevertiblePublicExecutionRequests(); + const teardownRequest = tx.getPublicTeardownExecutionRequest()!; + + const teardownGas = tx.data.constants.txContext.gasSettings.getTeardownLimits(); + const teardownResultSettings = { startGasLeft: teardownGas, endGasLeft: teardownGas }; + + const nestedContractAddress = AztecAddress.fromBigInt(112233n); + const contractSlotA = fr(0x100); + const contractSlotB = fr(0x150); + const contractSlotC = fr(0x200); + const contractSlotD = fr(0x250); + const contractSlotE = fr(0x300); + const contractSlotF = fr(0x350); + + let simulatorCallCount = 0; + const simulatorResults: PublicExecutionResult[] = [ + // Setup + PublicExecutionResultBuilder.fromPublicExecutionRequest({ + request: nonRevertibleRequests[0], + contractStorageUpdateRequests: [new ContractStorageUpdateRequest(contractSlotA, fr(0x101), 11)], + }).build(), + + // App Logic + PublicExecutionResultBuilder.fromPublicExecutionRequest({ + request: revertibleRequests[0], + nestedExecutions: [ + PublicExecutionResultBuilder.fromFunctionCall({ + from: revertibleRequests[0].callContext.storageContractAddress, + tx: makeFunctionCall('', nestedContractAddress, makeSelector(5)), + contractStorageUpdateRequests: [ + new ContractStorageUpdateRequest(contractSlotA, fr(0x102), 13), + new ContractStorageUpdateRequest(contractSlotB, fr(0x151), 14), + new ContractStorageUpdateRequest(contractSlotC, fr(0x200), 15), + ], + }).build(), + PublicExecutionResultBuilder.fromFunctionCall({ + from: revertibleRequests[0].contractAddress, + tx: makeFunctionCall('', nestedContractAddress, makeSelector(5)), + revertReason: new SimulationError('Simulation Failed', []), + }).build(), + ], + }).build(), + + // Teardown + PublicExecutionResultBuilder.fromPublicExecutionRequest({ + request: teardownRequest, + nestedExecutions: [ + PublicExecutionResultBuilder.fromFunctionCall({ + from: teardownRequest.callContext.storageContractAddress, + tx: makeFunctionCall('', nestedContractAddress, makeSelector(5)), + contractStorageUpdateRequests: [ + new ContractStorageUpdateRequest(contractSlotC, fr(0x201), 16), + new ContractStorageUpdateRequest(contractSlotD, fr(0x251), 17), + new ContractStorageUpdateRequest(contractSlotE, fr(0x301), 18), + new ContractStorageUpdateRequest(contractSlotF, fr(0x351), 19), + ], + }).build(teardownResultSettings), + ], + }).build(teardownResultSettings), + ]; + + jest.spyOn((processor as any).publicExecutor, 'simulate').mockImplementation(execution => { + if (simulatorCallCount < simulatorResults.length) { + return Promise.resolve(simulatorResults[simulatorCallCount++]); + } else { + throw new Error(`Unexpected execution request: ${execution}, call count: ${simulatorCallCount}`); + } + }); + + await processor.process([tx]); + + expect(processor.publicExecutor.simulate).toHaveBeenCalledTimes(3); + expect(worldStateDB.checkpoint).toHaveBeenCalledTimes(1); + // We should not need to roll back to the checkpoint, the nested call reverting should not + // mean that the parent call should revert! + expect(worldStateDB.rollbackToCheckpoint).toHaveBeenCalledTimes(1); + expect(worldStateDB.commit).toHaveBeenCalledTimes(1); + expect(worldStateDB.rollbackToCommit).toHaveBeenCalledTimes(0); + }); + + it('fails a transaction that reverts in setup', async function () { + const tx = mockTx(1, { + numberOfNonRevertiblePublicCallRequests: 1, + numberOfRevertiblePublicCallRequests: 1, + hasPublicTeardownCallRequest: true, + }); + + const nonRevertibleRequests = tx.getNonRevertiblePublicExecutionRequests(); + const revertibleRequests = tx.getRevertiblePublicExecutionRequests(); + const teardownRequest = tx.getPublicTeardownExecutionRequest()!; + + const nestedContractAddress = AztecAddress.fromBigInt(112233n); + const contractSlotA = fr(0x100); + const contractSlotB = fr(0x150); + const contractSlotC = fr(0x200); + + let simulatorCallCount = 0; + const simulatorResults: PublicExecutionResult[] = [ + // Setup + PublicExecutionResultBuilder.fromPublicExecutionRequest({ + request: nonRevertibleRequests[0], + contractStorageUpdateRequests: [new ContractStorageUpdateRequest(contractSlotA, fr(0x101), 11)], + nestedExecutions: [ + PublicExecutionResultBuilder.fromFunctionCall({ + from: nonRevertibleRequests[0].callContext.storageContractAddress, + tx: makeFunctionCall('', nestedContractAddress, makeSelector(5)), + contractStorageUpdateRequests: [ + new ContractStorageUpdateRequest(contractSlotA, fr(0x102), 12), + new ContractStorageUpdateRequest(contractSlotB, fr(0x151), 13), + ], + }).build(), + PublicExecutionResultBuilder.fromFunctionCall({ + from: nonRevertibleRequests[0].callContext.storageContractAddress, + tx: makeFunctionCall('', nestedContractAddress, makeSelector(5)), + revertReason: new SimulationError('Simulation Failed', []), + }).build(), + ], + }).build(), + + // App Logic + PublicExecutionResultBuilder.fromPublicExecutionRequest({ + request: revertibleRequests[0], + }).build(), + + // Teardown + PublicExecutionResultBuilder.fromPublicExecutionRequest({ + request: teardownRequest, + nestedExecutions: [ + PublicExecutionResultBuilder.fromFunctionCall({ + from: teardownRequest.callContext.storageContractAddress, + tx: makeFunctionCall('', nestedContractAddress, makeSelector(5)), + contractStorageUpdateRequests: [new ContractStorageUpdateRequest(contractSlotC, fr(0x202), 16)], + }).build(), + ], + }).build(), + ]; + + jest.spyOn((processor as any).publicExecutor, 'simulate').mockImplementation(execution => { + if (simulatorCallCount < simulatorResults.length) { + return Promise.resolve(simulatorResults[simulatorCallCount++]); + } else { + throw new Error(`Unexpected execution request: ${execution}, call count: ${simulatorCallCount}`); + } + }); + + await expect(async () => { + await processor.process([tx]); + }).rejects.toThrow('Reverted in setup'); + + expect(processor.publicExecutor.simulate).toHaveBeenCalledTimes(1); + + expect(worldStateDB.checkpoint).toHaveBeenCalledTimes(0); + expect(worldStateDB.rollbackToCheckpoint).toHaveBeenCalledTimes(0); + expect(worldStateDB.commit).toHaveBeenCalledTimes(0); + expect(worldStateDB.rollbackToCommit).toHaveBeenCalledTimes(1); + }); + + it('includes a transaction that reverts in teardown', async function () { + const tx = mockTx(1, { + hasLogs: true, + numberOfNonRevertiblePublicCallRequests: 1, + numberOfRevertiblePublicCallRequests: 1, + hasPublicTeardownCallRequest: true, + }); + + const nonRevertibleRequests = tx.getNonRevertiblePublicExecutionRequests(); + const revertibleRequests = tx.getRevertiblePublicExecutionRequests(); + const teardownRequest = tx.getPublicTeardownExecutionRequest()!; + + const teardownGas = tx.data.constants.txContext.gasSettings.getTeardownLimits(); + const teardownResultSettings = { startGasLeft: teardownGas, endGasLeft: teardownGas }; + + const nestedContractAddress = AztecAddress.fromBigInt(112233n); + const contractSlotA = fr(0x100); + const contractSlotB = fr(0x150); + const contractSlotC = fr(0x200); + + let simulatorCallCount = 0; + const simulatorResults: PublicExecutionResult[] = [ + // Setup + PublicExecutionResultBuilder.fromPublicExecutionRequest({ + request: nonRevertibleRequests[0], + contractStorageUpdateRequests: [new ContractStorageUpdateRequest(contractSlotA, fr(0x101), 11)], + nestedExecutions: [ + PublicExecutionResultBuilder.fromFunctionCall({ + from: nonRevertibleRequests[0].callContext.storageContractAddress, + tx: makeFunctionCall('', nestedContractAddress, makeSelector(5)), + contractStorageUpdateRequests: [ + new ContractStorageUpdateRequest(contractSlotA, fr(0x102), 12), + new ContractStorageUpdateRequest(contractSlotB, fr(0x151), 13), + ], + }).build(), + ], + }).build(), + + // App Logic + PublicExecutionResultBuilder.fromPublicExecutionRequest({ + request: revertibleRequests[0], + contractStorageUpdateRequests: [ + new ContractStorageUpdateRequest(contractSlotB, fr(0x152), 14), + new ContractStorageUpdateRequest(contractSlotC, fr(0x201), 15), + ], + }).build(), + + // Teardown + PublicExecutionResultBuilder.fromPublicExecutionRequest({ + request: teardownRequest, + nestedExecutions: [ + PublicExecutionResultBuilder.fromFunctionCall({ + from: teardownRequest.callContext.storageContractAddress, + tx: makeFunctionCall('', nestedContractAddress, makeSelector(5)), + contractStorageUpdateRequests: [new ContractStorageUpdateRequest(contractSlotC, fr(0x202), 16)], + }).build(teardownResultSettings), + PublicExecutionResultBuilder.fromFunctionCall({ + from: teardownRequest.callContext.storageContractAddress, + tx: makeFunctionCall('', nestedContractAddress, makeSelector(5)), + contractStorageUpdateRequests: [new ContractStorageUpdateRequest(contractSlotC, fr(0x202), 17)], + revertReason: new SimulationError('Simulation Failed', []), + }).build(teardownResultSettings), + ], + }).build(teardownResultSettings), + ]; + + jest.spyOn(processor.publicExecutor, 'simulate').mockImplementation(execution => { + if (simulatorCallCount < simulatorResults.length) { + return Promise.resolve(simulatorResults[simulatorCallCount++]); + } else { + throw new Error(`Unexpected execution request: ${execution}, call count: ${simulatorCallCount}`); + } + }); + + await processor.process([tx]); + + expect(processor.publicExecutor.simulate).toHaveBeenCalledTimes(3); + expect(worldStateDB.checkpoint).toHaveBeenCalledTimes(1); + expect(worldStateDB.rollbackToCheckpoint).toHaveBeenCalledTimes(1); + expect(worldStateDB.commit).toHaveBeenCalledTimes(1); + expect(worldStateDB.rollbackToCommit).toHaveBeenCalledTimes(0); + }); + + it('includes a transaction that reverts in app logic and teardown', async function () { + const tx = mockTx(1, { + hasLogs: true, + numberOfNonRevertiblePublicCallRequests: 1, + numberOfRevertiblePublicCallRequests: 1, + hasPublicTeardownCallRequest: true, + }); + + const nonRevertibleRequests = tx.getNonRevertiblePublicExecutionRequests(); + const revertibleRequests = tx.getRevertiblePublicExecutionRequests(); + const teardownRequest = tx.getPublicTeardownExecutionRequest()!; + + const teardownGas = tx.data.constants.txContext.gasSettings.getTeardownLimits(); + const teardownResultSettings = { startGasLeft: teardownGas, endGasLeft: teardownGas }; + + const nestedContractAddress = AztecAddress.fromBigInt(112233n); + const contractSlotA = fr(0x100); + const contractSlotB = fr(0x150); + const contractSlotC = fr(0x200); + + let simulatorCallCount = 0; + const simulatorResults: PublicExecutionResult[] = [ + // Setup + PublicExecutionResultBuilder.fromPublicExecutionRequest({ + request: nonRevertibleRequests[0], + contractStorageUpdateRequests: [new ContractStorageUpdateRequest(contractSlotA, fr(0x101), 11)], + nestedExecutions: [ + PublicExecutionResultBuilder.fromFunctionCall({ + from: nonRevertibleRequests[0].callContext.storageContractAddress, + tx: makeFunctionCall('', nestedContractAddress, makeSelector(5)), + contractStorageUpdateRequests: [ + new ContractStorageUpdateRequest(contractSlotA, fr(0x102), 12), + new ContractStorageUpdateRequest(contractSlotB, fr(0x151), 13), + ], + }).build(), + ], + }).build(), + + // App Logic + PublicExecutionResultBuilder.fromPublicExecutionRequest({ + request: revertibleRequests[0], + contractStorageUpdateRequests: [ + new ContractStorageUpdateRequest(contractSlotB, fr(0x152), 14), + new ContractStorageUpdateRequest(contractSlotC, fr(0x201), 15), + ], + revertReason: new SimulationError('Simulation Failed', []), + }).build(), + + // Teardown + PublicExecutionResultBuilder.fromPublicExecutionRequest({ + request: teardownRequest, + nestedExecutions: [ + PublicExecutionResultBuilder.fromFunctionCall({ + from: teardownRequest.callContext.storageContractAddress, + tx: makeFunctionCall('', nestedContractAddress, makeSelector(5)), + contractStorageUpdateRequests: [new ContractStorageUpdateRequest(contractSlotC, fr(0x202), 16)], + }).build(teardownResultSettings), + PublicExecutionResultBuilder.fromFunctionCall({ + from: teardownRequest.callContext.storageContractAddress, + tx: makeFunctionCall('', nestedContractAddress, makeSelector(5)), + contractStorageUpdateRequests: [new ContractStorageUpdateRequest(contractSlotC, fr(0x202), 16)], + revertReason: new SimulationError('Simulation Failed', []), + }).build(teardownResultSettings), + ], + }).build(teardownResultSettings), + ]; + + jest.spyOn(processor.publicExecutor, 'simulate').mockImplementation(execution => { + if (simulatorCallCount < simulatorResults.length) { + return Promise.resolve(simulatorResults[simulatorCallCount++]); + } else { + throw new Error(`Unexpected execution request: ${execution}, call count: ${simulatorCallCount}`); + } + }); + + const stateUpdateSpy = jest.spyOn(processor as any, 'writeStateUpdates'); + + await processor.process([tx]); + + expect(worldStateDB.checkpoint).toHaveBeenCalledTimes(1); + expect(worldStateDB.rollbackToCheckpoint).toHaveBeenCalledTimes(2); + expect(worldStateDB.commit).toHaveBeenCalledTimes(1); + expect(worldStateDB.rollbackToCommit).toHaveBeenCalledTimes(0); + + const nestContractAddress = simulatorResults[0].nestedExecutions[0].executionRequest.contractAddress; + const expectedPublicDataWrites = [ + PublicDataUpdateRequest.fromContractStorageUpdateRequest( + nonRevertibleRequests[0].callContext.storageContractAddress, + new ContractStorageUpdateRequest(contractSlotA, fr(0x101), 11), + ), + PublicDataUpdateRequest.fromContractStorageUpdateRequest( + nestContractAddress, + new ContractStorageUpdateRequest(contractSlotA, fr(0x102), 12), + ), + PublicDataUpdateRequest.fromContractStorageUpdateRequest( + nestContractAddress, + new ContractStorageUpdateRequest(contractSlotB, fr(0x151), 13), + ), + ]; + // Tx hash + state updates + const expectedStateUpdatesOpject: NewStateUpdates = { + nullifiers: padArrayEnd([tx.data.getNonEmptyNullifiers()[0]], Fr.ZERO, MAX_NULLIFIERS_PER_TX), + noteHashes: padArrayEnd([], Fr.ZERO, MAX_NOTE_HASHES_PER_TX), + publicDataWrites: expectedPublicDataWrites, + }; + expect(stateUpdateSpy).toHaveBeenCalledWith(expectedStateUpdatesOpject); + }); + + it('runs a tx with all phases', async function () { + const tx = mockTx(1, { + numberOfNonRevertiblePublicCallRequests: 1, + numberOfRevertiblePublicCallRequests: 1, + hasPublicTeardownCallRequest: true, + }); + + const nonRevertibleRequests = tx.getNonRevertiblePublicExecutionRequests(); + const revertibleRequests = tx.getRevertiblePublicExecutionRequests(); + const teardownRequest = tx.getPublicTeardownExecutionRequest()!; + + // TODO(md): gas + const gasLimits = Gas.from({ l2Gas: 1e9, daGas: 1e9 }); + const teardownGas = Gas.from({ l2Gas: 1e7, daGas: 1e7 }); + tx.data.constants.txContext.gasSettings = GasSettings.from({ + gasLimits: gasLimits, + teardownGasLimits: teardownGas, + inclusionFee: new Fr(1e4), + maxFeesPerGas: { feePerDaGas: new Fr(10), feePerL2Gas: new Fr(10) }, + }); + + // Private kernel tail to public pushes teardown gas allocation into revertible gas used + tx.data.forPublic!.end = PublicAccumulatedDataBuilder.fromPublicAccumulatedData(tx.data.forPublic!.end) + .withGasUsed(teardownGas) + .build(); + tx.data.forPublic!.endNonRevertibleData = PublicAccumulatedDataBuilder.fromPublicAccumulatedData( + tx.data.forPublic!.endNonRevertibleData, + ) + .withGasUsed(Gas.empty()) + .build(); + + const nestedContractAddress = revertibleRequests[0].callContext.storageContractAddress; + const contractSlotA = fr(0x100); + const contractSlotB = fr(0x150); + const contractSlotC = fr(0x200); + + let simulatorCallCount = 0; + + const initialGas = gasLimits.sub(teardownGas); + const setupGasUsed = Gas.from({ l2Gas: 1e6 }); + const appGasUsed = Gas.from({ l2Gas: 2e6, daGas: 2e6 }); + const teardownGasUsed = Gas.from({ l2Gas: 3e6, daGas: 3e6 }); + const afterSetupGas = initialGas.sub(setupGasUsed); + const afterAppGas = afterSetupGas.sub(appGasUsed); + const afterTeardownGas = teardownGas.sub(teardownGasUsed); + + // Inclusion fee plus block gas fees times total gas used + const expectedTxFee = 1e4 + (1e7 + 1e6 + 2e6) * 1 + (1e7 + 2e6) * 1; + const transactionFee = new Fr(expectedTxFee); + + const simulatorResults: PublicExecutionResult[] = [ + // Setup + PublicExecutionResultBuilder.fromPublicExecutionRequest({ request: nonRevertibleRequests[0] }).build({ + startGasLeft: initialGas, + endGasLeft: afterSetupGas, + }), + + // App Logic + PublicExecutionResultBuilder.fromPublicExecutionRequest({ + request: revertibleRequests[0], + contractStorageUpdateRequests: [ + new ContractStorageUpdateRequest(contractSlotA, fr(0x101), 10), + new ContractStorageUpdateRequest(contractSlotB, fr(0x151), 11), + ], + contractStorageReads: [new ContractStorageRead(contractSlotA, fr(0x100), 19)], + }).build({ + startGasLeft: afterSetupGas, + endGasLeft: afterAppGas, + }), + + // Teardown + PublicExecutionResultBuilder.fromPublicExecutionRequest({ + request: teardownRequest, + nestedExecutions: [ + PublicExecutionResultBuilder.fromFunctionCall({ + from: teardownRequest.callContext.storageContractAddress, + tx: makeFunctionCall('', nestedContractAddress, makeSelector(5)), + contractStorageUpdateRequests: [ + new ContractStorageUpdateRequest(contractSlotA, fr(0x103), 16), + new ContractStorageUpdateRequest(contractSlotC, fr(0x201), 17), + ], + contractStorageReads: [new ContractStorageRead(contractSlotA, fr(0x102), 15)], + }).build({ startGasLeft: teardownGas, endGasLeft: teardownGas, transactionFee }), + PublicExecutionResultBuilder.fromFunctionCall({ + from: teardownRequest.callContext.storageContractAddress, + tx: makeFunctionCall('', nestedContractAddress, makeSelector(5)), + contractStorageUpdateRequests: [ + new ContractStorageUpdateRequest(contractSlotA, fr(0x102), 13), + new ContractStorageUpdateRequest(contractSlotB, fr(0x152), 14), + ], + contractStorageReads: [new ContractStorageRead(contractSlotA, fr(0x101), 12)], + }).build({ startGasLeft: teardownGas, endGasLeft: teardownGas, transactionFee }), + ], + }).build({ + startGasLeft: teardownGas, + endGasLeft: afterTeardownGas, + transactionFee, + }), + ]; + + jest.spyOn(processor.publicExecutor, 'simulate').mockImplementation(execution => { + if (simulatorCallCount < simulatorResults.length) { + const result = simulatorResults[simulatorCallCount++]; + return Promise.resolve(result); + } else { + throw new Error(`Unexpected execution request: ${execution}, call count: ${simulatorCallCount}`); + } + }); + + const stateUpdateSpy = jest.spyOn(processor as any, 'writeStateUpdates'); + + await processor.process([tx]); + + const expectedSimulateCall = (availableGas: Partial>, _txFee: number) => [ + expect.anything(), // PublicExecution + expect.anything(), // GlobalVariables + Gas.from(availableGas), + expect.anything(), // TxContext + expect.anything(), // pendingNullifiers + new Fr(0), + // new Fr(txFee), + expect.anything(), // SideEffectCounter + ]; + + expect(processor.publicExecutor.simulate).toHaveBeenCalledTimes(3); + expect(processor.publicExecutor.simulate).toHaveBeenNthCalledWith(1, ...expectedSimulateCall(initialGas, 0)); + expect(processor.publicExecutor.simulate).toHaveBeenNthCalledWith(2, ...expectedSimulateCall(afterSetupGas, 0)); + expect(processor.publicExecutor.simulate).toHaveBeenNthCalledWith( + 3, + ...expectedSimulateCall(teardownGas, expectedTxFee), + ); + + expect(worldStateDB.checkpoint).toHaveBeenCalledTimes(1); + expect(worldStateDB.rollbackToCheckpoint).toHaveBeenCalledTimes(0); + expect(worldStateDB.commit).toHaveBeenCalledTimes(1); + expect(worldStateDB.rollbackToCommit).toHaveBeenCalledTimes(0); + + const expectedPublicDataWrites = [ + PublicDataUpdateRequest.fromContractStorageUpdateRequest( + nestedContractAddress, + new ContractStorageUpdateRequest(contractSlotA, fr(0x103), 16), + ), + PublicDataUpdateRequest.fromContractStorageUpdateRequest( + nestedContractAddress, + new ContractStorageUpdateRequest(contractSlotC, fr(0x201), 17), + ), + PublicDataUpdateRequest.fromContractStorageUpdateRequest( + nestedContractAddress, + new ContractStorageUpdateRequest(contractSlotB, fr(0x152), 14), + ), + ]; + // Tx hash + state updates + const expectedStateUpdatesOpject: NewStateUpdates = { + nullifiers: padArrayEnd([tx.data.getNonEmptyNullifiers()[0]], Fr.ZERO, MAX_NULLIFIERS_PER_TX), + noteHashes: padArrayEnd([], Fr.ZERO, MAX_NOTE_HASHES_PER_TX), + publicDataWrites: expectedPublicDataWrites, + }; + expect(stateUpdateSpy).toHaveBeenCalledWith(expectedStateUpdatesOpject); + }); + + it('runs a tx with only teardown', async function () { + const tx = mockTx(1, { + numberOfNonRevertiblePublicCallRequests: 0, + numberOfRevertiblePublicCallRequests: 0, + hasPublicTeardownCallRequest: true, + }); + + const teardownRequest = tx.getPublicTeardownExecutionRequest()!; + + const gasLimits = Gas.from({ l2Gas: 1e9, daGas: 1e9 }); + const teardownGas = Gas.from({ l2Gas: 1e7, daGas: 1e7 }); + tx.data.constants.txContext.gasSettings = GasSettings.from({ + gasLimits: gasLimits, + teardownGasLimits: teardownGas, + inclusionFee: new Fr(1e4), + maxFeesPerGas: { feePerDaGas: new Fr(10), feePerL2Gas: new Fr(10) }, + }); + + // Private kernel tail to public pushes teardown gas allocation into revertible gas used + tx.data.forPublic!.end = PublicAccumulatedDataBuilder.fromPublicAccumulatedData(tx.data.forPublic!.end) + .withGasUsed(teardownGas) + .build(); + tx.data.forPublic!.endNonRevertibleData = PublicAccumulatedDataBuilder.fromPublicAccumulatedData( + tx.data.forPublic!.endNonRevertibleData, + ) + .withGasUsed(Gas.empty()) + .build(); + + let simulatorCallCount = 0; + const txOverhead = 1e4; + const expectedTxFee = txOverhead + teardownGas.l2Gas * 1 + teardownGas.daGas * 1; + const transactionFee = new Fr(expectedTxFee); + const teardownGasUsed = Gas.from({ l2Gas: 1e6, daGas: 1e6 }); + + const simulatorResults: PublicExecutionResult[] = [ + // Teardown + PublicExecutionResultBuilder.fromPublicExecutionRequest({ + request: teardownRequest, + nestedExecutions: [], + }).build({ + startGasLeft: teardownGas, + endGasLeft: teardownGas.sub(teardownGasUsed), + transactionFee, + }), + ]; + + jest.spyOn(processor.publicExecutor, 'simulate').mockImplementation(execution => { + if (simulatorCallCount < simulatorResults.length) { + const result = simulatorResults[simulatorCallCount++]; + return Promise.resolve(result); + } else { + throw new Error(`Unexpected execution request: ${execution}, call count: ${simulatorCallCount}`); + } + }); + + await processor.process([tx]); + }); + }); +}); diff --git a/yarn-project/simulator/src/public/light_public_processor.ts b/yarn-project/simulator/src/public/light_public_processor.ts new file mode 100644 index 000000000000..da2637675d7f --- /dev/null +++ b/yarn-project/simulator/src/public/light_public_processor.ts @@ -0,0 +1,458 @@ +// A minimal version of the public processor - that does not have the fluff +import { MerkleTreeId, Tx, type TxValidator } from '@aztec/circuit-types'; +import { + Gas, + type GlobalVariables, + type Header, + MAX_NOTE_HASHES_PER_TX, + MAX_NULLIFIERS_PER_TX, + MAX_TOTAL_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX, + NULLIFIER_SUBTREE_HEIGHT, + type Nullifier, + PUBLIC_DATA_SUBTREE_HEIGHT, + PublicDataTreeLeaf, + PublicDataUpdateRequest, + type ScopedNoteHash, + type ScopedNullifier, + getNonEmptyItems, +} from '@aztec/circuits.js'; +import { siloNoteHash, siloNullifier } from '@aztec/circuits.js/hash'; +import { makeTuple } from '@aztec/foundation/array'; +import { OrderedMap, padArrayEnd } from '@aztec/foundation/collection'; +import { Fr } from '@aztec/foundation/fields'; +import { type TelemetryClient } from '@aztec/telemetry-client'; +import { type MerkleTreeOperations } from '@aztec/world-state'; + +import { type PublicExecutionResult } from './execution.js'; +import { PublicExecutor } from './executor.js'; +import { type WorldStateDB } from './public_db_sources.js'; + +export class InvalidTransactionsFound extends Error { + constructor() { + super('Double spend tx found'); + } +} + +export type NewStateUpdates = { + nullifiers: Fr[]; + noteHashes: Fr[]; + publicDataWrites: PublicDataUpdateRequest[]; +}; + +/** + * A variant of the public processor that does not run the kernel circuits + * + * TODO(make issues): + * - Gas accounting is not complete - https://github.com/AztecProtocol/aztec-packages/issues/8962 + * - Calculating the state root (archive) is not complete - https://github.com/AztecProtocol/aztec-packages/issues/8961 + */ +export class LightPublicProcessor { + public publicExecutor: PublicExecutor; + + // State + private pendingNullifiers: Nullifier[]; + + constructor( + private merkleTrees: MerkleTreeOperations, + private worldStateDB: WorldStateDB, + private globalVariables: GlobalVariables, + historicalHeader: Header, + private txValidator: TxValidator, + telemetryClient: TelemetryClient, + ) { + this.publicExecutor = new PublicExecutor(worldStateDB, historicalHeader, telemetryClient); + this.pendingNullifiers = []; + } + + addNullifiers(nullifiers: Nullifier[]) { + this.pendingNullifiers.push(...nullifiers); + } + + public getTreeSnapshots() { + return Promise.all([ + this.merkleTrees.getTreeInfo(MerkleTreeId.NULLIFIER_TREE), + this.merkleTrees.getTreeInfo(MerkleTreeId.NOTE_HASH_TREE), + this.merkleTrees.getTreeInfo(MerkleTreeId.PUBLIC_DATA_TREE), + this.merkleTrees.getTreeInfo(MerkleTreeId.L1_TO_L2_MESSAGE_TREE), + this.merkleTrees.getTreeInfo(MerkleTreeId.ARCHIVE), + ]); + } + + /** + * Process a list of transactions + * + * If any of the transactions are invalid, then we throw an error + * @param txs - The transactions to process + */ + public async process(txs: Tx[]) { + // TODO(md): do we need dummy transactions? + txs = txs.map(tx => Tx.clone(tx)); + + await this.validateTransactions(txs); + + for (const tx of txs) { + if (tx.hasPublicCalls()) { + await this.executeEnqueuedCallsAndApplyStateUpdates(tx); + } else { + await this.applyPrivateStateUpdates(tx); + + // TODO(md): do i need to do this? + // Apply empty public data writes + const emptyPublicDataWrites = makeTuple(MAX_TOTAL_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX, () => + PublicDataTreeLeaf.empty(), + ); + await this.merkleTrees.batchInsert( + MerkleTreeId.PUBLIC_DATA_TREE, + emptyPublicDataWrites.map(x => x.toBuffer()), + PUBLIC_DATA_SUBTREE_HEIGHT, + ); + } + } + } + + /** + * Validate the transactions + * + * If any of the transactions are invalid, we throw an error + * Halting block validation + * @param txs - The transactions to validate + */ + async validateTransactions(txs: Tx[]) { + const [_, invalidTxs] = await this.txValidator.validateTxs(txs); + + if (invalidTxs.length > 0) { + throw new InvalidTransactionsFound(); + } + } + + /** + * Execute the public calls and apply the state updates + * + * If the transaction has reverted in setup, we throw an error + * @param tx - The transaction to execute + */ + async executeEnqueuedCallsAndApplyStateUpdates(tx: Tx) { + const publicExecutionResults = await this.executePublicCalls(tx); + + // The transaction has reverted in setup - the block should fail + if (!publicExecutionResults) { + throw new Error('Reverted in setup'); + } + + if (tx.data.forPublic) { + const stateUpdates = this.accumulateTransactionAndExecutedStateUpdates(tx, publicExecutionResults); + await this.writeStateUpdates(stateUpdates); + } else { + throw new Error('Public transaction did have public data'); + } + } + + /** + * Take the state updates from each of the transactions and merge them together + * + * 1. Non Revertible calls come first + * 2. Private updates come second + * 3. Public updates come third + */ + accumulateTransactionAndExecutedStateUpdates(tx: Tx, publicExecutionResults: NewStateUpdates) { + const { nullifiers, noteHashes, publicDataWrites } = publicExecutionResults; + const { + nullifiers: nonRevertibleNullifiers, + noteHashes: nonRevertibleNoteHashes, + publicDataUpdateRequests: nonRevertiblePublicDataUpdateRequests, + } = tx.data.forPublic!.endNonRevertibleData; + const { + nullifiers: txNullifiers, + noteHashes: txNoteHashes, + publicDataUpdateRequests: txPublicDataUpdateRequests, + } = tx.data.forPublic!.end; + + const nonEmptyNonRevertibleNullifiers = getNonEmptyItems(nonRevertibleNullifiers).map(n => n.value); + const nonEmptyTxNullifiers = getNonEmptyItems(txNullifiers).map(n => n.value); + const publicNullifiers = getNonEmptyItems(nullifiers); + + const nonEmptyNonRevertibleNoteHashes = getNonEmptyItems(nonRevertibleNoteHashes).map(n => n.value); + const nonEmptyTxNoteHashes = getNonEmptyItems(txNoteHashes).map(n => n.value); + const publicNoteHashes = getNonEmptyItems(noteHashes); + + const nonEmptyTxPublicDataUpdateRequests = txPublicDataUpdateRequests.filter(p => !p.isEmpty()); + const nonEmptyNonRevertiblePublicDataUpdateRequests = nonRevertiblePublicDataUpdateRequests.filter( + p => !p.isEmpty(), + ); + const nonEmptyPublicDataWrites = publicDataWrites.filter(p => !p.isEmpty()); + + const allNullifiers = padArrayEnd( + [...nonEmptyNonRevertibleNullifiers, ...nonEmptyTxNullifiers, ...publicNullifiers], + Fr.ZERO, + MAX_NULLIFIERS_PER_TX, + ); + const allNoteHashes = padArrayEnd( + [...nonEmptyNonRevertibleNoteHashes, ...nonEmptyTxNoteHashes, ...publicNoteHashes], + Fr.ZERO, + MAX_NOTE_HASHES_PER_TX, + ); + const allPublicDataUpdateRequests = [ + ...nonEmptyNonRevertiblePublicDataUpdateRequests, + ...nonEmptyTxPublicDataUpdateRequests, + ...nonEmptyPublicDataWrites, + ]; + + return { + nullifiers: allNullifiers, + noteHashes: allNoteHashes, + publicDataWrites: allPublicDataUpdateRequests, + }; + } + + async writeStateUpdates(stateUpdates: NewStateUpdates) { + const { nullifiers, noteHashes, publicDataWrites } = stateUpdates; + + // Convert public state toto the tree leaves + const allPublicDataWrites = publicDataWrites.map( + ({ leafSlot, newValue }) => new PublicDataTreeLeaf(leafSlot, newValue), + ); + + await this.merkleTrees.appendLeaves(MerkleTreeId.NOTE_HASH_TREE, noteHashes); + await this.merkleTrees.batchInsert( + MerkleTreeId.NULLIFIER_TREE, + nullifiers.map(n => n.toBuffer()), + NULLIFIER_SUBTREE_HEIGHT, + ); + await this.merkleTrees.batchInsert( + MerkleTreeId.PUBLIC_DATA_TREE, + allPublicDataWrites.map(x => x.toBuffer()), + PUBLIC_DATA_SUBTREE_HEIGHT, + ); + } + + public async executePublicCalls(tx: Tx) { + // Transactions are split up into a number of parts + // 1. Non revertible calls - these run with the public kernel setup + // - This includes fee payment transactions + // 2. Public Calls - These are the the noir code that are enqueued in the tx by the users + // - This runs with the public kernel app logic + // 3. Teardown Call - This is the public teardown call that does fee refunds + // - This runs with the public kernel teardown + await this.worldStateDB.addNewContracts(tx); + + // Call stacks + const nonRevertibleCalls = tx.getNonRevertiblePublicExecutionRequests(); + const publicCalls = tx.getRevertiblePublicExecutionRequests(); + const teardownCall = tx.getPublicTeardownExecutionRequest()!; + + // Gas + const teardownGasLimit = tx.data.constants.txContext.gasSettings.getTeardownLimits(); + let gasRemaining = tx.data.constants.txContext.gasSettings.getLimits().sub(teardownGasLimit); + + // Store the successful results for db insertions + const nonRevertiblePublicExecutionResults: PublicExecutionResult[] = []; + const publicExecutionResults: PublicExecutionResult[] = []; + + // Get side effect counters + const publicKernelOutput = tx.data.toPublicKernelCircuitPublicInputs(); + let startSideEffectCounter = publicKernelOutput.endSideEffectCounter + 1; + + // Execute the non revertible calls + for (const call of nonRevertibleCalls) { + const res = await this.publicExecutor.simulate( + call, + this.globalVariables, + gasRemaining, + tx.data.constants.txContext, + this.pendingNullifiers, + Fr.ZERO, + startSideEffectCounter, + ); + startSideEffectCounter = Number(res.endSideEffectCounter.toBigInt()) + 1; + gasRemaining = Gas.from(res.endGasLeft); + + if (!this.temporaryDidCallOrNestedCallRevert(res)) { + this.addNullifiers(res.nullifiers); + nonRevertiblePublicExecutionResults.push(res); + await this.worldStateDB.checkpoint(); + } else { + await this.worldStateDB.removeNewContracts(tx); + await this.worldStateDB.rollbackToCommit(); + + return; + } + } + + for (const call of publicCalls) { + // Execute the non revertible calls + const res = await this.publicExecutor.simulate( + call, + this.globalVariables, + gasRemaining, + tx.data.constants.txContext, + this.pendingNullifiers, + Fr.ZERO, + startSideEffectCounter, + ); + startSideEffectCounter = Number(res.endSideEffectCounter.toBigInt()) + 1; + gasRemaining = Gas.from(res.endGasLeft); + + if (!this.temporaryDidCallOrNestedCallRevert(res)) { + this.addNullifiers(res.nullifiers); + publicExecutionResults.push(res); + } else { + // Similarly, if a teardown call fails, it will revert + // back to the setup state + await this.worldStateDB.removeNewContracts(tx); + await this.worldStateDB.rollbackToCheckpoint(); + } + } + + if (teardownCall) { + const res = await this.publicExecutor.simulate( + teardownCall, + this.globalVariables, + teardownGasLimit, + tx.data.constants.txContext, + this.pendingNullifiers, + Fr.ZERO, + startSideEffectCounter, + ); + + if (!this.temporaryDidCallOrNestedCallRevert(res)) { + this.addNullifiers(res.nullifiers); + publicExecutionResults.push(res); + } else { + // Similarly, if a public calls fail, it will revert + // back to the setup state + await this.worldStateDB.removeNewContracts(tx); + await this.worldStateDB.rollbackToCheckpoint(); + } + } + + await this.worldStateDB.commit(); + + // Sweep up performed state updates + return this.aggregateResults(nonRevertiblePublicExecutionResults, publicExecutionResults); + } + + aggregateResults(nonRevertibleResults: PublicExecutionResult[], revertibleResults: PublicExecutionResult[]) { + const nonRevertibleNestedExecutions = nonRevertibleResults.flatMap(res => this.collectNestedExecutions(res)); + const nonRevertible = this.aggregatePublicExecutionResults(nonRevertibleNestedExecutions); + + const revertibleNestedExecutions = revertibleResults.flatMap(res => this.collectNestedExecutions(res)); + const revertible = this.aggregatePublicExecutionResults(revertibleNestedExecutions); + + return { + nullifiers: [...nonRevertible.nullifiers, ...revertible.nullifiers], + noteHashes: [...nonRevertible.newNoteHashes, ...revertible.newNoteHashes], + publicDataWrites: [...nonRevertible.publicDataWrites, ...revertible.publicDataWrites], + }; + } + + aggregatePublicExecutionResults(results: PublicExecutionResult[]) { + const txCallNewNullifiers: ScopedNullifier[][] = []; + const txCallNewNoteHashes: ScopedNoteHash[][] = []; + const txCallPublicDataWrites: PublicDataUpdateRequest[][] = []; + + for (const res of results) { + // Scope the nullifiers, note hashes and public data writes to the contract address + txCallNewNullifiers.push( + getNonEmptyItems(res.nullifiers).map(n => n.scope(res.executionRequest.contractAddress)), + ); + txCallNewNoteHashes.push( + getNonEmptyItems(res.noteHashes).map(n => n.scope(res.executionRequest.contractAddress)), + ); + txCallPublicDataWrites.push( + getNonEmptyItems(res.contractStorageUpdateRequests).map(req => + PublicDataUpdateRequest.fromContractStorageUpdateRequest(res.executionRequest.contractAddress, req), + ), + ); + } + + // Reverse + const newNullifiers = txCallNewNullifiers.flat(); + const newNoteHashes = txCallNewNoteHashes.flat(); + const newPublicDataWrites = txCallPublicDataWrites.flat(); + + // Squash data writes + const uniquePublicDataWrites = this.removeDuplicatesFromStart(newPublicDataWrites); + + const returning = { + nullifiers: newNullifiers.map(n => siloNullifier(n.contractAddress, n.value)), + newNoteHashes: newNoteHashes.map(n => siloNoteHash(n.contractAddress, n.value)), + publicDataWrites: uniquePublicDataWrites, + }; + return returning; + } + + collectNestedExecutions(result: PublicExecutionResult) { + const nestedExecutions: PublicExecutionResult[] = []; + for (const res of result.nestedExecutions) { + nestedExecutions.push(...this.collectNestedExecutions(res)); + } + return [result, ...nestedExecutions]; + } + + // There is an assumption based on how the block builder works, that the transactions + // provided here CANNOT revert, else they are not added to the block as the kernels + // will fail + // This will change whenever the vm is changed to be able to revert + public async applyPrivateStateUpdates(tx: Tx) { + const insertionPromises: Promise[] = []; + if (tx.data.forRollup) { + const { nullifiers, noteHashes } = tx.data.forRollup.end; + if (nullifiers) { + insertionPromises.push( + this.merkleTrees.batchInsert( + MerkleTreeId.NULLIFIER_TREE, + nullifiers.map(n => n.toBuffer()), + NULLIFIER_SUBTREE_HEIGHT, + ), + ); + } + + if (noteHashes) { + insertionPromises.push(this.merkleTrees.appendLeaves(MerkleTreeId.NOTE_HASH_TREE, noteHashes)); + } + } + + await Promise.all(insertionPromises); + } + + /** + * Remove duplicates keeping the oldest item, where duplicates are defined by the leaf slot and side effect counter + * @param items + * @returns + */ + removeDuplicatesFromStart(items: PublicDataUpdateRequest[]) { + const slotMap: OrderedMap = new OrderedMap(); + + for (const obj of items) { + const { leafSlot, sideEffectCounter } = obj; + + if (!slotMap.has(leafSlot.toBigInt())) { + slotMap.set(leafSlot.toBigInt(), obj); + } + if (sideEffectCounter > slotMap.get(leafSlot.toBigInt())!.sideEffectCounter) { + // Remove the first instance + slotMap.delete(leafSlot.toBigInt()); + slotMap.set(leafSlot.toBigInt(), obj); + } + } + + return Array.from(slotMap.values()); + } + + // This is a temporary method, in our current kernel model, + // nested calls will trigger an entire execution reversion + // if any nested call reverts + temporaryDidCallOrNestedCallRevert(result: PublicExecutionResult) { + if (result.revertReason) { + return true; + } + if (result.nestedExecutions.length > 0) { + for (const nested of result.nestedExecutions) { + if (this.temporaryDidCallOrNestedCallRevert(nested)) { + return true; + } + } + } + return false; + } +} diff --git a/yarn-project/validator-client/package.json b/yarn-project/validator-client/package.json index 23ca3b591068..ced03c4c44f2 100644 --- a/yarn-project/validator-client/package.json +++ b/yarn-project/validator-client/package.json @@ -61,6 +61,7 @@ "@aztec/ethereum": "workspace:^", "@aztec/foundation": "workspace:^", "@aztec/p2p": "workspace:^", + "@aztec/simulator": "workspace:^", "@aztec/types": "workspace:^", "koa": "^2.14.2", "koa-router": "^12.0.0", diff --git a/yarn-project/validator-client/src/config.ts b/yarn-project/validator-client/src/config.ts index 241bffbfda19..6eac49903a7c 100644 --- a/yarn-project/validator-client/src/config.ts +++ b/yarn-project/validator-client/src/config.ts @@ -22,6 +22,9 @@ export interface ValidatorClientConfig { /** Wait for attestations timeout */ attestationWaitTimeoutMs: number; + + /** Re-execute transactions before attesting */ + validatorReEx: boolean; } export const validatorClientConfigMappings: ConfigMappingsType = { @@ -45,6 +48,11 @@ export const validatorClientConfigMappings: ConfigMappingsType[] = [ + new DataTxValidator(), + new MetadataTxValidator(globalVariables.chainId, globalVariables.blockNumber), + new DoubleSpendTxValidator(worldStateDB), + ]; + const txValidator: TxValidator = new AggregateTxValidator(...txValidaors); + + return new LightPublicProcessor( + merkleTrees, + worldStateDB, + globalVariables, + historicalHeader, + txValidator, + this.telemetryClient, + ); + } +} diff --git a/yarn-project/validator-client/src/errors/validator.error.ts b/yarn-project/validator-client/src/errors/validator.error.ts index 73c3eaf27296..0c1bac26ead2 100644 --- a/yarn-project/validator-client/src/errors/validator.error.ts +++ b/yarn-project/validator-client/src/errors/validator.error.ts @@ -23,3 +23,21 @@ export class TransactionsNotAvailableError extends ValidatorError { super(`Transactions not available: ${txHashes.join(', ')}`); } } + +export class FailedToReExecuteTransactionsError extends ValidatorError { + constructor(txHashes: TxHash[]) { + super(`Failed to re-execute transactions: ${txHashes.join(', ')}`); + } +} + +export class ReExStateMismatchError extends ValidatorError { + constructor() { + super('Re-execution state mismatch'); + } +} + +export class PublicProcessorNotProvidedError extends ValidatorError { + constructor() { + super('Public processor not provided'); + } +} diff --git a/yarn-project/validator-client/src/factory.ts b/yarn-project/validator-client/src/factory.ts index 1438448d8509..47e5cac0666b 100644 --- a/yarn-project/validator-client/src/factory.ts +++ b/yarn-project/validator-client/src/factory.ts @@ -1,17 +1,33 @@ +import { type ArchiveSource } from '@aztec/archiver'; +import { type WorldStateSynchronizer } from '@aztec/circuit-types'; import { type P2P } from '@aztec/p2p'; import { generatePrivateKey } from 'viem/accounts'; +import { type TelemetryClient } from '../../telemetry-client/src/telemetry.js'; import { type ValidatorClientConfig } from './config.js'; +import { LightPublicProcessorFactory } from './duties/light_public_processor_factory.js'; import { ValidatorClient } from './validator.js'; -export function createValidatorClient(config: ValidatorClientConfig, p2pClient: P2P) { +export function createValidatorClient( + config: ValidatorClientConfig, + p2pClient: P2P, + worldStateSynchronizer: WorldStateSynchronizer, + archiver: ArchiveSource, + telemetry: TelemetryClient, +) { if (config.disableValidator) { return undefined; } - // TODO: should this be exposed via a flag? if (config.validatorPrivateKey === undefined || config.validatorPrivateKey === '') { config.validatorPrivateKey = generatePrivateKey(); } + + // We only craete a public processor factory if re-execution is enabled + if (config.validatorReEx) { + const publicProcessorFactory = new LightPublicProcessorFactory(worldStateSynchronizer, archiver, telemetry); + return ValidatorClient.new(config, p2pClient, publicProcessorFactory); + } + return ValidatorClient.new(config, p2pClient); } diff --git a/yarn-project/validator-client/src/validator.test.ts b/yarn-project/validator-client/src/validator.test.ts index fc870184fecd..077c29ba261f 100644 --- a/yarn-project/validator-client/src/validator.test.ts +++ b/yarn-project/validator-client/src/validator.test.ts @@ -6,6 +6,7 @@ import { makeHeader } from '@aztec/circuits.js/testing'; import { EthAddress } from '@aztec/foundation/eth-address'; import { Fr } from '@aztec/foundation/fields'; import { type P2P } from '@aztec/p2p'; +import { type LightPublicProcessor } from '@aztec/simulator'; import { describe, expect, it } from '@jest/globals'; import { type MockProxy, mock } from 'jest-mock-extended'; @@ -13,9 +14,11 @@ import { type PrivateKeyAccount, generatePrivateKey, privateKeyToAccount } from import { makeBlockProposal } from '../../circuit-types/src/p2p/mocks.js'; import { type ValidatorClientConfig } from './config.js'; +import { type LightPublicProcessorFactory } from './duties/light_public_processor_factory.js'; import { AttestationTimeoutError, InvalidValidatorPrivateKeyError, + PublicProcessorNotProvidedError, TransactionsNotAvailableError, } from './errors/validator.error.js'; import { ValidatorClient } from './validator.js'; @@ -25,11 +28,18 @@ describe('ValidationService', () => { let validatorClient: ValidatorClient; let p2pClient: MockProxy; let validatorAccount: PrivateKeyAccount; + let lightPublicProcessorFactory: MockProxy; + let lightPublicProcessor: MockProxy; beforeEach(() => { p2pClient = mock(); p2pClient.getAttestationsForSlot.mockImplementation(() => Promise.resolve([])); + lightPublicProcessorFactory = mock(); + lightPublicProcessor = mock(); + + lightPublicProcessorFactory.createWithSyncedState.mockImplementation(() => Promise.resolve(lightPublicProcessor)); + const validatorPrivateKey = generatePrivateKey(); validatorAccount = privateKeyToAccount(validatorPrivateKey); @@ -38,6 +48,7 @@ describe('ValidationService', () => { attestationPoolingIntervalMs: 1000, attestationWaitTimeoutMs: 1000, disableValidator: false, + validatorReEx: false, }; validatorClient = ValidatorClient.new(config, p2pClient); }); @@ -47,6 +58,11 @@ describe('ValidationService', () => { expect(() => ValidatorClient.new(config, p2pClient)).toThrow(InvalidValidatorPrivateKeyError); }); + it('Should throw an error if re-execution is enabled but no public processor is provided', () => { + config.validatorReEx = true; + expect(() => ValidatorClient.new(config, p2pClient)).toThrow(PublicProcessorNotProvidedError); + }); + it('Should create a valid block proposal', async () => { const header = makeHeader(); const archive = Fr.random(); @@ -78,4 +94,19 @@ describe('ValidationService', () => { TransactionsNotAvailableError, ); }); + + it('Should not return an attestation if re-execution fails', async () => { + const proposal = makeBlockProposal(); + + // mock the p2pClient.getTxStatus to return undefined for all transactions + p2pClient.getTxStatus.mockImplementation(() => undefined); + + const val = ValidatorClient.new(config, p2pClient, lightPublicProcessorFactory); + lightPublicProcessor.process.mockImplementation(() => { + throw new Error(); + }); + + const attestation = await val.attestToProposal(proposal); + expect(attestation).toBeUndefined(); + }); }); diff --git a/yarn-project/validator-client/src/validator.ts b/yarn-project/validator-client/src/validator.ts index 7e96391197fc..b518b46f3a9d 100644 --- a/yarn-project/validator-client/src/validator.ts +++ b/yarn-project/validator-client/src/validator.ts @@ -1,16 +1,21 @@ -import { type BlockAttestation, type BlockProposal, type TxHash } from '@aztec/circuit-types'; +import { type BlockAttestation, type BlockProposal, type Tx, type TxHash } from '@aztec/circuit-types'; import { type Header } from '@aztec/circuits.js'; import { Buffer32 } from '@aztec/foundation/buffer'; import { type Fr } from '@aztec/foundation/fields'; import { createDebugLogger } from '@aztec/foundation/log'; import { sleep } from '@aztec/foundation/sleep'; +import { Timer } from '@aztec/foundation/timer'; import { type P2P } from '@aztec/p2p'; +import { type LightPublicProcessor } from '@aztec/simulator'; import { type ValidatorClientConfig } from './config.js'; +import { type LightPublicProcessorFactory } from './duties/light_public_processor_factory.js'; import { ValidationService } from './duties/validation_service.js'; import { AttestationTimeoutError, InvalidValidatorPrivateKeyError, + PublicProcessorNotProvidedError, + ReExStateMismatchError, TransactionsNotAvailableError, } from './errors/validator.error.js'; import { type ValidatorKeyStore } from './key_store/interface.js'; @@ -36,30 +41,32 @@ export class ValidatorClient implements Validator { constructor( keyStore: ValidatorKeyStore, private p2pClient: P2P, - private attestationPoolingIntervalMs: number, - private attestationWaitTimeoutMs: number, + private config: ValidatorClientConfig, + private lightPublicProcessorFactory?: LightPublicProcessorFactory | undefined, private log = createDebugLogger('aztec:validator'), ) { - //TODO: We need to setup and store all of the currently active validators https://github.com/AztecProtocol/aztec-packages/issues/7962 - this.validationService = new ValidationService(keyStore); this.log.verbose('Initialized validator'); } - static new(config: ValidatorClientConfig, p2pClient: P2P) { + static new( + config: ValidatorClientConfig, + p2pClient: P2P, + publicProcessorFactory?: LightPublicProcessorFactory | undefined, + ) { if (!config.validatorPrivateKey) { throw new InvalidValidatorPrivateKeyError(); } + //TODO: We need to setup and store all of the currently active validators https://github.com/AztecProtocol/aztec-packages/issues/7962 + if (config.validatorReEx && publicProcessorFactory === undefined) { + throw new PublicProcessorNotProvidedError(); + } + const privateKey = validatePrivateKey(config.validatorPrivateKey); const localKeyStore = new LocalKeyStore(privateKey); - const validator = new ValidatorClient( - localKeyStore, - p2pClient, - config.attestationPoolingIntervalMs, - config.attestationWaitTimeoutMs, - ); + const validator = new ValidatorClient(localKeyStore, p2pClient, config, publicProcessorFactory); validator.registerBlockProposalHandler(); return validator; } @@ -83,9 +90,17 @@ export class ValidatorClient implements Validator { // Check that all of the tranasctions in the proposal are available in the tx pool before attesting try { await this.ensureTransactionsAreAvailable(proposal); + + if (this.config.validatorReEx) { + this.log.verbose(`Re-executing transactions in the proposal before attesting`); + await this.reExecuteTransactions(proposal); + } } catch (error: any) { if (error instanceof TransactionsNotAvailableError) { this.log.error(`Transactions not available, skipping attestation ${error.message}`); + } else { + // Catch all error handler + this.log.error(`Failed to attest to proposal: ${error.message}`); } return undefined; } @@ -97,6 +112,84 @@ export class ValidatorClient implements Validator { return this.validationService.attestToProposal(proposal); } + /** + * Re-execute the transactions in the proposal and check that the state updates match the header state + * @param proposal - The proposal to re-execute + */ + async reExecuteTransactions(proposal: BlockProposal) { + const { header, txHashes } = proposal.payload; + + const txs = (await Promise.all(txHashes.map(tx => this.p2pClient.getTxByHash(tx)))).filter(tx => tx !== undefined); + + // If we cannot request all of the transactions, then we should fail + if (txs.length !== txHashes.length) { + this.log.error(`Failed to get transactions from the network: ${txHashes.join(', ')}`); + throw new TransactionsNotAvailableError(txHashes); + } + + // Assertion: This check will fail if re-execution is not enabled + if (!this.lightPublicProcessorFactory) { + throw new PublicProcessorNotProvidedError(); + } + + // We sync the state to the previous block as the public processor will not process the current block + const targetBlockNumber = header.globalVariables.blockNumber.toNumber() - 1; + this.log.verbose(`Re-ex: Syncing state to block number ${targetBlockNumber}`); + + const lightProcessor = await this.lightPublicProcessorFactory.createWithSyncedState( + targetBlockNumber, + undefined, + header.globalVariables, + ); + + this.log.verbose(`Re-ex: Re-executing transactions`); + const timer = new Timer(); + await lightProcessor.process(txs as Tx[]); + this.log.verbose(`Re-ex: Re-execution complete ${timer.ms()}ms`); + + // This function will throw an error if state updates do not match + await this.checkReExecutedStateRoots(header, lightProcessor); + } + + /** + * Check that the state updates match the header state + * + * TODO(md): + * - Check archive + * - Check l1 to l2 messages + * + * @param header - The header to check + * @param lightProcessor - The light processor to check + */ + private async checkReExecutedStateRoots(header: Header, lightProcessor: LightPublicProcessor) { + const [newNullifierTree, newNoteHashTree, newPublicDataTree] = await lightProcessor.getTreeSnapshots(); + + if (!header.state.partial.nullifierTree.root.toBuffer().equals(newNullifierTree.root)) { + this.log.error( + `Re-ex: nullifierTree does not match, ${header.state.partial.nullifierTree.root + .toBuffer() + .toString('hex')} !== ${newNullifierTree.root.toString('hex')}`, + ); + throw new ReExStateMismatchError(); + } + if (!header.state.partial.noteHashTree.root.toBuffer().equals(newNoteHashTree.root)) { + this.log.error( + `Re-ex: noteHashTree does not match, ${header.state.partial.noteHashTree.root + .toBuffer() + .toString('hex')} !== ${newNoteHashTree.root.toString('hex')}`, + ); + throw new ReExStateMismatchError(); + } + if (!header.state.partial.publicDataTree.root.toBuffer().equals(newPublicDataTree.root)) { + this.log.error( + `Re-ex: publicDataTree does not match, ${header.state.partial.publicDataTree.root + .toBuffer() + .toString('hex')} !== ${newPublicDataTree.root.toString('hex')}`, + ); + throw new ReExStateMismatchError(); + } + } + /** * Ensure that all of the transactions in the proposal are available in the tx pool before attesting * @@ -155,15 +248,15 @@ export class ValidatorClient implements Validator { } const elapsedTime = Date.now() - startTime; - if (elapsedTime > this.attestationWaitTimeoutMs) { + if (elapsedTime > this.config.attestationWaitTimeoutMs) { this.log.error(`Timeout waiting for ${numberOfRequiredAttestations} attestations for slot, ${slot}`); throw new AttestationTimeoutError(numberOfRequiredAttestations, slot); } this.log.verbose( - `Collected ${attestations.length} attestations so far, waiting ${this.attestationPoolingIntervalMs}ms for more...`, + `Collected ${attestations.length} attestations so far, waiting ${this.config.attestationPoolingIntervalMs}ms for more...`, ); - await sleep(this.attestationPoolingIntervalMs); + await sleep(this.config.attestationPoolingIntervalMs); } } } diff --git a/yarn-project/validator-client/tsconfig.json b/yarn-project/validator-client/tsconfig.json index a4198a7f0c2f..f1ed8204d2e8 100644 --- a/yarn-project/validator-client/tsconfig.json +++ b/yarn-project/validator-client/tsconfig.json @@ -21,6 +21,9 @@ { "path": "../p2p" }, + { + "path": "../simulator" + }, { "path": "../types" } diff --git a/yarn-project/yarn.lock b/yarn-project/yarn.lock index 0c725d90abe1..19f2a201cdf1 100644 --- a/yarn-project/yarn.lock +++ b/yarn-project/yarn.lock @@ -1227,6 +1227,7 @@ __metadata: "@aztec/ethereum": "workspace:^" "@aztec/foundation": "workspace:^" "@aztec/p2p": "workspace:^" + "@aztec/simulator": "workspace:^" "@aztec/types": "workspace:^" "@jest/globals": ^29.5.0 "@types/jest": ^29.5.0