diff --git a/.claude/agents/analyze-logs.md b/.claude/agents/analyze-logs.md index baed73996667..3d9b4d3387f5 100644 --- a/.claude/agents/analyze-logs.md +++ b/.claude/agents/analyze-logs.md @@ -39,14 +39,14 @@ Return a condensed summary: ## Key Events Timeline -**IMPORTANT**: Include BOTH absolute timestamps (for referencing original logs) AND relative offsets from test start (for easier understanding and cross-run comparison). +**IMPORTANT**: Include BOTH absolute timestamps (for referencing original logs) AND relative offsets from test start (for easier understanding and cross-run comparison). Include the **actor** column when logs come from multi-node tests. -| Time | Offset | Level | Module | Event | -|------|--------|-------|--------|-------| -| 11:18:42 | +0.0s | INFO | e2e | Running test my_test | -| 11:18:44 | +2.1s | INFO | sequencer | Building block | -| 11:18:47 | +5.3s | ERROR | sequencer | Failed to build block | -| 11:18:50 | +8.0s | WARN | p2p | Connection timeout | +| Time | Offset | Actor | Level | Module | Event | +|------|--------|-------|-------|--------|-------| +| 11:18:42 | +0.0s | | INFO | e2e | Running test my_test | +| 11:18:44 | +2.1s | validator-0 | INFO | sequencer | Building block | +| 11:18:47 | +5.3s | validator-0 | ERROR | sequencer | Failed to build block | +| 11:18:50 | +8.0s | node-0 | WARN | p2p | Connection timeout | (Offset = seconds since "Running test" marker. Makes it easy to compare timing between failed and successful runs.) @@ -75,15 +75,27 @@ Return a condensed summary: Aztec logs follow this format: ``` -HH:MM:SS [HH:MM:SS.mmm] LEVEL: module:submodule Message {optional json} +HH:MM:SS [HH:MM:SS.mmm] LEVEL: module [actor] [instanceId] Message {optional json} ``` -Example: +- **module**: Always present. The component emitting the log (colon-separated hierarchy). +- **actor**: Optional. Identifies *which process/node* is emitting the log in multi-node tests (e.g., `validator-0`, `node-1`, `prover-0`). Only set in e2e tests that spin up multiple nodes. +- **instanceId**: Optional. Identifies *which instance* of a component within a single process (e.g., `checkpoint-5`, `epoch-3`). Used when multiple instances of the same component exist concurrently (e.g., one epoch-proving-job per epoch). + +Both `actor` and `instanceId` appear between the module name and the message in pretty-printed output. + +Examples: ``` 11:18:42 [11:18:42.518] WARN: node:blob-client:client No L1 consensus host urls configured 11:18:42 [11:18:42.562] INFO: world_state Created world state synchroniser with block history of 2 +11:18:44 [11:18:44.100] INFO: sequencer validator-0 Building block 1 +11:18:45 [11:18:45.200] INFO: checkpoint-builder validator-1 checkpoint-5 Building block 1 ``` +In the last two examples: +- `validator-0` / `validator-1` is the **actor** (which node) +- `checkpoint-5` is the **instanceId** (which checkpoint instance) + **Levels** (in priority order for investigation): 1. `ERROR` - Always investigate 2. `WARN` - Often important @@ -165,16 +177,25 @@ FAIL src/e2e_something.test.ts ## Multi-Actor Awareness Tests may have multiple actors running concurrently: -- Multiple nodes -- Sequencers -- Validators -- Provers +- Multiple nodes (`node-0`, `node-1`, ...) +- Validators (`validator-0`, `validator-1`, ...) +- Provers (`prover-0`, `prover-1`, ...) + +The **actor** field in logs identifies which node/process emitted a log line. It appears between the module name and the message: +``` +INFO: sequencer validator-0 Building block 1 # Sequencer on validator-0 +INFO: sequencer validator-1 Building block 1 # Sequencer on validator-1 +INFO: p2p node-0 Received block proposal # P2P on node-0 +``` + +**Filtering by actor**: When investigating a specific node's behavior, filter logs by the actor name (e.g., grep for `validator-0`) to isolate that node's log stream. -Log entries belong to different actors, sometimes (but not always) identified by a keyword or number in the module. Example: +**Instance IDs**: Within a single actor, the **instanceId** field distinguishes multiple instances of the same component. For example, epoch-proving-jobs spawn one per epoch: ``` -INFO: node:MAIN-aztec-node Starting... # Main node -INFO: node:SECONDARY-aztec-node Starting... # Secondary node +INFO: epoch-proving-job prover-0 epoch-3 Starting proof generation +INFO: epoch-proving-job prover-0 epoch-4 Starting proof generation ``` +Here `prover-0` is the actor and `epoch-3`/`epoch-4` are instance IDs. ## High-Value Log Sources diff --git a/.github/workflows/publish-misc-pages.yml b/.github/workflows/publish-misc-pages.yml new file mode 100644 index 000000000000..3b2e362e2b5c --- /dev/null +++ b/.github/workflows/publish-misc-pages.yml @@ -0,0 +1,36 @@ +name: Publish misc GitHub Pages +on: + push: + branches: + - next + paths: + - "yarn-project/scripts/latency-explorer/**" + +jobs: + publish: + runs-on: ubuntu-latest + steps: + - name: Checkout source repo + uses: actions/checkout@v4 + + - name: Checkout benchmark-page-data repo + uses: actions/checkout@v4 + with: + repository: AztecProtocol/benchmark-page-data + token: ${{ secrets.AZTEC_BOT_GITHUB_TOKEN }} + path: benchmark-page-data + + - name: Copy latency explorer + run: | + mkdir -p benchmark-page-data/misc/tx-latency-explorer + cp yarn-project/scripts/latency-explorer/index.html benchmark-page-data/misc/tx-latency-explorer/index.html + + - name: Push changes + working-directory: benchmark-page-data + run: | + git config user.name "AztecBot" + git config user.email "tech@aztecprotocol.com" + git add . + git diff --cached --quiet && echo "No changes to publish" && exit 0 + git commit -m "Update tx-latency-explorer from ${{ github.sha }}" + git push diff --git a/ci3/run_test_cmd b/ci3/run_test_cmd index 61d28160f859..1586444573f7 100755 --- a/ci3/run_test_cmd +++ b/ci3/run_test_cmd @@ -47,8 +47,9 @@ if [ "$CI" -eq 1 ]; then allow_flakes=1 track_test_fail=1 - # Track the test in the history tracker if this is in merge queue or targeting a version branch. - if [[ "$is_merge_queue" -eq 1 || ("${TARGET_BRANCH:-}" =~ ^v[0-9]) ]]; then + # Track the test in the history tracker if this is in merge queue, targeting a version branch, or + # targeting a merge-train. + if [[ "$is_merge_queue" -eq 1 || ("${TARGET_BRANCH:-}" =~ ^v[0-9]) || ("${TARGET_BRANCH:-}" == merge-train/*) ]]; then track_test_history=1 fi diff --git a/spartan/aztec-node/templates/_pod-template.yaml b/spartan/aztec-node/templates/_pod-template.yaml index 2011f6a609a8..b6989b8543ba 100644 --- a/spartan/aztec-node/templates/_pod-template.yaml +++ b/spartan/aztec-node/templates/_pod-template.yaml @@ -248,6 +248,10 @@ spec: - name: SLASH_DUPLICATE_PROPOSAL_PENALTY value: {{ .Values.node.slash.duplicateProposalPenalty | quote }} {{- end }} + {{- if .Values.node.slash.duplicateAttestationPenalty }} + - name: SLASH_DUPLICATE_ATTESTATION_PENALTY + value: {{ .Values.node.slash.duplicateAttestationPenalty | quote }} + {{- end }} {{- if .Values.node.slash.attestDescendantOfInvalidPenalty }} - name: SLASH_ATTEST_DESCENDANT_OF_INVALID_PENALTY value: {{ .Values.node.slash.attestDescendantOfInvalidPenalty | quote }} diff --git a/spartan/bootstrap.sh b/spartan/bootstrap.sh index f0889e796d67..d769085d45ad 100755 --- a/spartan/bootstrap.sh +++ b/spartan/bootstrap.sh @@ -153,6 +153,7 @@ function network_bench { echo_header "spartan bench" gcp_auth + export K8S_ENRICHER=${K8S_ENRICHER:-1} network_bench_cmds | parallelize 1 } @@ -165,6 +166,7 @@ function proving_bench { echo_header "spartan proving bench" gcp_auth + export K8S_ENRICHER=${K8S_ENRICHER:-1} proving_bench_cmds | parallelize 1 } @@ -219,6 +221,7 @@ case "$cmd" in # Run the network deploy script DENOISE=1 denoise "./scripts/network_deploy.sh $env_file" + export K8S_ENRICHER=${K8S_ENRICHER:-1} if [[ "${RUN_TESTS:-}" == "true" ]]; then if [[ -n "$test_set" ]]; then network_tests_$test_set "$env_file" @@ -281,8 +284,8 @@ case "$cmd" in source scripts/source_network_env.sh source_network_env ${KIND_ENV:-kind-provers} namespace="upgrade-rollup-version${NAME_POSTFIX:-}" - INSTALL_METRICS=false \ - ./scripts/test_kind.sh src/spartan/upgrade_rollup_version.test.ts "$namespace" + export K8S_ENRICHER=${K8S_ENRICHER:-1} + ./scripts/test_kind.sh src/spartan/upgrade_rollup_version.test.ts "$namespace" ;; "network_teardown") env_file="$1" diff --git a/spartan/environments/mbps-net.env b/spartan/environments/mbps-net.env new file mode 100644 index 000000000000..083e25e74bee --- /dev/null +++ b/spartan/environments/mbps-net.env @@ -0,0 +1,60 @@ +CREATE_ETH_DEVNET=false +GCP_REGION=us-west1-a +CLUSTER=aztec-gke-private +NETWORK=next-net +NAMESPACE=mbps-net +DESTROY_NAMESPACE=true +ETHEREUM_CHAIN_ID=11155111 +ETHEREUM_RPC_URLS=REPLACE_WITH_GCP_SECRET +ETHEREUM_CONSENSUS_HOST_URLS=REPLACE_WITH_GCP_SECRET +ETHEREUM_CONSENSUS_HOST_API_KEYS=REPLACE_WITH_GCP_SECRET +ETHEREUM_CONSENSUS_HOST_API_KEY_HEADERS=REPLACE_WITH_GCP_SECRET +FUNDING_PRIVATE_KEY=REPLACE_WITH_GCP_SECRET +LABS_INFRA_MNEMONIC=REPLACE_WITH_GCP_SECRET +ROLLUP_DEPLOYMENT_PRIVATE_KEY=REPLACE_WITH_GCP_SECRET +OTEL_COLLECTOR_ENDPOINT=REPLACE_WITH_GCP_SECRET +VERIFY_CONTRACTS=false +ETHERSCAN_API_KEY=REPLACE_WITH_GCP_SECRET +DEPLOY_INTERNAL_BOOTNODE=true +STORE_SNAPSHOT_URL= +BLOB_BUCKET_DIRECTORY=${BLOB_BUCKET_DIRECTORY:-next-net/blobs} +R2_ACCESS_KEY_ID=REPLACE_WITH_GCP_SECRET +R2_SECRET_ACCESS_KEY=REPLACE_WITH_GCP_SECRET +PROVER_FAILED_PROOF_STORE=gs://aztec-develop/next-net/failed-proofs +TEST_ACCOUNTS=true +SPONSORED_FPC=true +SEQ_MIN_TX_PER_BLOCK=0 +SEQ_MAX_TX_PER_BLOCK=8 +AZTEC_EPOCH_DURATION=32 +REAL_VERIFIER=false +PROVER_REAL_PROOFS=false + +SEQ_BUILD_CHECKPOINT_IF_EMPTY=true +SEQ_BLOCK_DURATION_MS=6000 +LOG_LEVEL=verbose + +AZTEC_LAG_IN_EPOCHS_FOR_VALIDATOR_SET=2 +AZTEC_LAG_IN_EPOCHS_FOR_RANDAO=2 + +VALIDATOR_REPLICAS=4 +VALIDATORS_PER_NODE=12 +PUBLISHERS_PER_VALIDATOR_KEY=2 +VALIDATOR_PUBLISHER_MNEMONIC_START_INDEX=5000 + +PUBLISHERS_PER_PROVER=2 +PROVER_PUBLISHER_MNEMONIC_START_INDEX=8000 + +BOT_TRANSFERS_REPLICAS=1 +BOT_TRANSFERS_TX_INTERVAL_SECONDS=8 +BOT_TRANSFERS_FOLLOW_CHAIN=PENDING + +BOT_SWAPS_REPLICAS=1 +BOT_SWAPS_FOLLOW_CHAIN=PENDING +BOT_SWAPS_TX_INTERVAL_SECONDS=8 + +REDEPLOY_ROLLUP_CONTRACTS=true + +DEBUG_P2P_INSTRUMENT_MESSAGES=true + +VALIDATOR_HA_REPLICAS=1 +VALIDATOR_RESOURCE_PROFILE="prod-spot" \ No newline at end of file diff --git a/spartan/environments/network-defaults.yml b/spartan/environments/network-defaults.yml index 4a93c5aef734..9291bc82795c 100644 --- a/spartan/environments/network-defaults.yml +++ b/spartan/environments/network-defaults.yml @@ -139,6 +139,8 @@ slasher: &slasher SLASH_ATTEST_DESCENDANT_OF_INVALID_PENALTY: 10e18 # Penalty for proposing two different block or checkpoint proposal for the same position. SLASH_DUPLICATE_PROPOSAL_PENALTY: 10e18 + # Penalty for signing attestations for different proposals at the same slot. + SLASH_DUPLICATE_ATTESTATION_PENALTY: 10e18 # Penalty for unknown offenses. SLASH_UNKNOWN_PENALTY: 10e18 # Penalty for broadcasting an invalid block. @@ -240,6 +242,7 @@ networks: SLASH_INACTIVITY_PENALTY: 10e18 SLASH_PROPOSE_INVALID_ATTESTATIONS_PENALTY: 10e18 SLASH_DUPLICATE_PROPOSAL_PENALTY: 10e18 + SLASH_DUPLICATE_ATTESTATION_PENALTY: 10e18 SLASH_ATTEST_DESCENDANT_OF_INVALID_PENALTY: 10e18 SLASH_UNKNOWN_PENALTY: 10e18 SLASH_INVALID_BLOCK_PENALTY: 10e18 @@ -278,6 +281,7 @@ networks: SLASH_INACTIVITY_PENALTY: 10e18 SLASH_PROPOSE_INVALID_ATTESTATIONS_PENALTY: 10e18 SLASH_DUPLICATE_PROPOSAL_PENALTY: 10e18 + SLASH_DUPLICATE_ATTESTATION_PENALTY: 10e18 SLASH_ATTEST_DESCENDANT_OF_INVALID_PENALTY: 10e18 SLASH_UNKNOWN_PENALTY: 10e18 SLASH_INVALID_BLOCK_PENALTY: 10e18 @@ -334,6 +338,7 @@ networks: SLASH_INACTIVITY_PENALTY: 2000e18 SLASH_PROPOSE_INVALID_ATTESTATIONS_PENALTY: 2000e18 SLASH_DUPLICATE_PROPOSAL_PENALTY: 2000e18 + SLASH_DUPLICATE_ATTESTATION_PENALTY: 2000e18 SLASH_ATTEST_DESCENDANT_OF_INVALID_PENALTY: 2000e18 SLASH_UNKNOWN_PENALTY: 2000e18 SLASH_INVALID_BLOCK_PENALTY: 2000e18 diff --git a/spartan/environments/testnet-canary.env b/spartan/environments/testnet-canary.env deleted file mode 100644 index 5a643cf29ace..000000000000 --- a/spartan/environments/testnet-canary.env +++ /dev/null @@ -1,46 +0,0 @@ -CREATE_ETH_DEVNET=false -GCP_REGION=us-west1-a -CLUSTER=aztec-gke-public - -NAMESPACE=${NAMESPACE:-testnet} -NETWORK=testnet -TEST_ACCOUNTS=false -SPONSORED_FPC=true -SEQ_MIN_TX_PER_BLOCK=0 -SEQ_MAX_TX_PER_BLOCK=0 -TRANSACTIONS_DISABLED=true - -ETHEREUM_CHAIN_ID=11155111 -ETHEREUM_RPC_URLS=REPLACE_WITH_GCP_SECRET -ETHEREUM_CONSENSUS_HOST_URLS=REPLACE_WITH_GCP_SECRET -ETHEREUM_CONSENSUS_HOST_API_KEYS=REPLACE_WITH_GCP_SECRET -ETHEREUM_CONSENSUS_HOST_API_KEY_HEADERS=REPLACE_WITH_GCP_SECRET -FUNDING_PRIVATE_KEY=REPLACE_WITH_GCP_SECRET -LABS_INFRA_MNEMONIC=REPLACE_WITH_GCP_SECRET -ROLLUP_DEPLOYMENT_PRIVATE_KEY=REPLACE_WITH_GCP_SECRET -OTEL_COLLECTOR_ENDPOINT=REPLACE_WITH_GCP_SECRET -VERIFY_CONTRACTS=true -ETHERSCAN_API_KEY=REPLACE_WITH_GCP_SECRET - -SNAPSHOT_BUCKET_DIRECTORY=${SNAPSHOT_BUCKET_DIRECTORY:-testnet} -BLOB_BUCKET_DIRECTORY=${BLOB_BUCKET_DIRECTORY:-testnet/blobs} -R2_ACCESS_KEY_ID=REPLACE_WITH_GCP_SECRET -R2_SECRET_ACCESS_KEY=REPLACE_WITH_GCP_SECRET -DEPLOY_INTERNAL_BOOTNODE=false -BOT_TRANSFERS_REPLICAS=0 -BOT_SWAPS_REPLICAS=0 -FLUSH_ENTRY_QUEUE=false - -P2P_TX_POOL_DELETE_TXS_AFTER_REORG=true - -DEPLOY_ARCHIVAL_NODE=false - -RPC_INGRESS_ENABLED=false - -VALIDATOR_REPLICAS=4 -VALIDATORS_PER_NODE=12 -PUBLISHERS_PER_VALIDATOR_KEY=2 -VALIDATOR_PUBLISHER_MNEMONIC_START_INDEX=5000 - -PUBLISHERS_PER_PROVER=2 -PROVER_PUBLISHER_MNEMONIC_START_INDEX=8000 diff --git a/spartan/scripts/deploy_network.sh b/spartan/scripts/deploy_network.sh index cb7d3449dfb3..e6007c3fd3f4 100755 --- a/spartan/scripts/deploy_network.sh +++ b/spartan/scripts/deploy_network.sh @@ -484,6 +484,7 @@ SLASH_PRUNE_PENALTY = ${SLASH_PRUNE_PENALTY:-null} SLASH_DATA_WITHHOLDING_PENALTY = ${SLASH_DATA_WITHHOLDING_PENALTY:-null} SLASH_PROPOSE_INVALID_ATTESTATIONS_PENALTY = ${SLASH_PROPOSE_INVALID_ATTESTATIONS_PENALTY:-null} SLASH_DUPLICATE_PROPOSAL_PENALTY = ${SLASH_DUPLICATE_PROPOSAL_PENALTY:-null} +SLASH_DUPLICATE_ATTESTATION_PENALTY = ${SLASH_DUPLICATE_ATTESTATION_PENALTY:-null} SLASH_ATTEST_DESCENDANT_OF_INVALID_PENALTY = ${SLASH_ATTEST_DESCENDANT_OF_INVALID_PENALTY:-null} SLASH_UNKNOWN_PENALTY = ${SLASH_UNKNOWN_PENALTY:-null} SLASH_INVALID_BLOCK_PENALTY = ${SLASH_INVALID_BLOCK_PENALTY:-null} diff --git a/spartan/scripts/k8s_enricher.ts b/spartan/scripts/k8s_enricher.ts index 127e35072f66..529803c6a4fc 100755 --- a/spartan/scripts/k8s_enricher.ts +++ b/spartan/scripts/k8s_enricher.ts @@ -35,6 +35,12 @@ if (cacheLogMode) { const namespace = args[0]; +// Previously this also ran, then Alex got bombarded running hundreds of prover agents. +// This is now set on the relevant tests. +if (process.env.K8S_ENRICHER !== '1') { + process.exit(0); +} + if (!namespace) { console.error('Usage: k8s_enricher.ts [--cache-log]'); process.exit(1); diff --git a/spartan/terraform/cloudflare/main.tf b/spartan/terraform/cloudflare/main.tf index 469f7f93afd1..4925a3ae535c 100644 --- a/spartan/terraform/cloudflare/main.tf +++ b/spartan/terraform/cloudflare/main.tf @@ -37,24 +37,42 @@ resource "cloudflare_r2_custom_domain" "aztec_labs_snapshots_com" { enabled = true } +locals { + top_level_folders = toset([ + "devnet", + "ignition-sepolia", + "next-net", + "staging-ignition", + "staging-public", + ]) +} + # Lifecycle rules to automatically delete old objects resource "cloudflare_r2_bucket_lifecycle" "cleanup" { account_id = var.R2_ACCOUNT_ID bucket_name = cloudflare_r2_bucket.bucket.name - rules = [ - { - id = "delete-snapshot-files" - enabled = true - conditions = { suffix = ".db" } - delete_objects_transition = { days = var.SNAPSHOT_RETENTION_DAYS } - }, - { - id = "delete-blob-files" - enabled = true - conditions = { suffix = ".data" } - delete_objects_transition = { days = var.BLOB_RETENTION_DAYS } - }, - ] + rules = flatten([ + for folder in local.top_level_folders : [ + { + id = "delete-snapshots-${folder}" + enabled = true + conditions = { prefix = "${folder}/aztec" } + delete_objects_transition = { days = var.SNAPSHOT_RETENTION_DAYS } + }, + { + id = "delete-blobs-${folder}" + enabled = true + conditions = { prefix = "${folder}/blobs" } + delete_objects_transition = { days = var.BLOB_RETENTION_DAYS } + }, + { + id = "delete-txs-${folder}" + enabled = true + conditions = { prefix = "${folder}/txs" } + delete_objects_transition = { days = var.TX_RETENTION_DAYS } + }, + ] + ]) } diff --git a/spartan/terraform/cloudflare/variables.tf b/spartan/terraform/cloudflare/variables.tf index 32910b8304dd..0b2ab062cf0d 100644 --- a/spartan/terraform/cloudflare/variables.tf +++ b/spartan/terraform/cloudflare/variables.tf @@ -36,3 +36,9 @@ variable "SNAPSHOT_RETENTION_DAYS" { default = 7 description = "Number of days to retain snapshots" } + +variable "TX_RETENTION_DAYS" { + type = number + default = 1 + description = "Number of days to retain txs" +} diff --git a/spartan/terraform/deploy-aztec-infra/main.tf b/spartan/terraform/deploy-aztec-infra/main.tf index 8f98ebd35d07..1222f613773f 100644 --- a/spartan/terraform/deploy-aztec-infra/main.tf +++ b/spartan/terraform/deploy-aztec-infra/main.tf @@ -180,6 +180,7 @@ locals { "validator.slash.dataWithholdingPenalty" = var.SLASH_DATA_WITHHOLDING_PENALTY "validator.slash.proposeInvalidAttestationsPenalty" = var.SLASH_PROPOSE_INVALID_ATTESTATIONS_PENALTY "validator.slash.duplicateProposalPenalty" = var.SLASH_DUPLICATE_PROPOSAL_PENALTY + "validator.slash.duplicateAttestationPenalty" = var.SLASH_DUPLICATE_ATTESTATION_PENALTY "validator.slash.attestDescendantOfInvalidPenalty" = var.SLASH_ATTEST_DESCENDANT_OF_INVALID_PENALTY "validator.slash.unknownPenalty" = var.SLASH_UNKNOWN_PENALTY "validator.slash.invalidBlockPenalty" = var.SLASH_INVALID_BLOCK_PENALTY diff --git a/spartan/terraform/deploy-aztec-infra/variables.tf b/spartan/terraform/deploy-aztec-infra/variables.tf index 3edf41f7bcbe..d5d412557818 100644 --- a/spartan/terraform/deploy-aztec-infra/variables.tf +++ b/spartan/terraform/deploy-aztec-infra/variables.tf @@ -405,6 +405,12 @@ variable "SLASH_DUPLICATE_PROPOSAL_PENALTY" { nullable = true } +variable "SLASH_DUPLICATE_ATTESTATION_PENALTY" { + description = "The slash duplicate attestation penalty" + type = string + nullable = true +} + variable "SLASH_ATTEST_DESCENDANT_OF_INVALID_PENALTY" { description = "The slash attest descendant of invalid penalty" type = string diff --git a/yarn-project/.claude/rules/typescript-style.md b/yarn-project/.claude/rules/typescript-style.md index f6819c771fd4..5ea500e2e736 100644 --- a/yarn-project/.claude/rules/typescript-style.md +++ b/yarn-project/.claude/rules/typescript-style.md @@ -1,3 +1,7 @@ +--- +globs: "*.ts,*.tsx,*.mts,*.cts" +--- + # TypeScript Code Style ## Type Safety diff --git a/yarn-project/CLAUDE.md b/yarn-project/CLAUDE.md index 79c8fd9b0dd3..e36bea16575c 100644 --- a/yarn-project/CLAUDE.md +++ b/yarn-project/CLAUDE.md @@ -75,7 +75,7 @@ For long-running tests or verbose output, redirect to a temp file and use native yarn workspace @aztec/ test src/file.test.ts > /tmp/test-output.log 2>&1 ``` -Then use **Read** or **Grep** to examine `/tmp/test-output.log`. Never use `| tail` or `| head` to limit output—use native tools instead. +Then use **Read** or **Grep** to examine `/tmp/test-output.log`. Never use `| tail` or `| head` to limit output—use native tools instead. Never append `; echo "EXIT: $?"` or similar—the Bash tool already reports exit codes directly. ### End-to-End Tests diff --git a/yarn-project/end-to-end/src/bench/client_flows/client_flows_benchmark.ts b/yarn-project/end-to-end/src/bench/client_flows/client_flows_benchmark.ts index 8626179a25eb..2b516a862e8e 100644 --- a/yarn-project/end-to-end/src/bench/client_flows/client_flows_benchmark.ts +++ b/yarn-project/end-to-end/src/bench/client_flows/client_flows_benchmark.ts @@ -138,10 +138,10 @@ export class ClientFlowsBenchmark { }); await this.applyBaseSetup(); - await this.context.aztecNodeService!.setConfig({ feeRecipient: this.sequencerAddress, coinbase: this.coinbase }); + await this.context.aztecNodeService.setConfig({ feeRecipient: this.sequencerAddress, coinbase: this.coinbase }); const rollupContract = RollupContract.getFromConfig(this.context.config); - this.chainMonitor = new ChainMonitor(rollupContract, this.context.dateProvider!, this.logger, 200).start(); + this.chainMonitor = new ChainMonitor(rollupContract, this.context.dateProvider, this.logger, 200).start(); return this; } @@ -207,7 +207,7 @@ export class ClientFlowsBenchmark { const [{ address: adminAddress }, { address: sequencerAddress }] = deployedAccounts; this.adminWallet = this.context.wallet; - this.aztecNode = this.context.aztecNodeService!; + this.aztecNode = this.context.aztecNodeService; this.cheatCodes = this.context.cheatCodes; this.adminAddress = adminAddress; @@ -235,8 +235,8 @@ export class ClientFlowsBenchmark { this.feeJuiceContract = FeeJuiceContract.at(ProtocolContractAddress.FeeJuice, this.adminWallet); this.feeJuiceBridgeTestHarness = await FeeJuicePortalTestingHarnessFactory.create({ - aztecNode: this.context.aztecNodeService!, - aztecNodeAdmin: this.context.aztecNodeService!, + aztecNode: this.context.aztecNodeService, + aztecNodeAdmin: this.context.aztecNodeService, l1Client: this.context.deployL1ContractsValues.l1Client, wallet: this.adminWallet, logger: this.logger, diff --git a/yarn-project/end-to-end/src/bench/utils.ts b/yarn-project/end-to-end/src/bench/utils.ts index e79f893be745..370baefc8b6c 100644 --- a/yarn-project/end-to-end/src/bench/utils.ts +++ b/yarn-project/end-to-end/src/bench/utils.ts @@ -28,7 +28,7 @@ export async function benchmarkSetup( const contract = await BenchmarkingContract.deploy(context.wallet).send({ from: defaultAccountAddress }); context.logger.info(`Deployed benchmarking contract at ${contract.address}`); const sequencer = (context.aztecNode as AztecNodeService).getSequencer()!; - const telemetry = context.telemetryClient! as BenchmarkTelemetryClient; + const telemetry = context.telemetryClient as BenchmarkTelemetryClient; context.logger.warn(`Cleared benchmark data points from setup`); telemetry.clear(); const origTeardown = context.teardown.bind(context); diff --git a/yarn-project/end-to-end/src/composed/ha/e2e_ha_full.test.ts b/yarn-project/end-to-end/src/composed/ha/e2e_ha_full.test.ts index ff374b8f4a40..15de724cf9b9 100644 --- a/yarn-project/end-to-end/src/composed/ha/e2e_ha_full.test.ts +++ b/yarn-project/end-to-end/src/composed/ha/e2e_ha_full.test.ts @@ -374,14 +374,26 @@ describe('HA Full Setup', () => { }); it('should distribute work across multiple HA nodes', async () => { - logger.info('Testing that multiple HA nodes are participating and work is distributed'); + logger.info('Testing HA resilience by killing nodes after they produce blocks'); - // Deploy multiple contracts to generate several blocks - // We send transactions sequentially and wait for each to be mined to ensure we get distinct blocks - const blockCount = 5; + // We'll produce NODE_COUNT blocks (5 total with NODE_COUNT=5) + // Each node produces exactly 1 block, and we kill it after it produces + // The last remaining node will produce the final block + const blockCount = NODE_COUNT; const receipts = []; + const killedNodes: number[] = []; // Track indices of killed nodes + const blockProducers = new Map(); // Map block index to node ID + let previousBlockNumber: number | undefined; + + const nodeIds: string[] = []; + for (const service of haNodeServices) { + nodeIds.push((await service.getConfig()).nodeId); + } for (let i = 0; i < blockCount; i++) { + logger.info(`\n=== Producing block ${i + 1}/${blockCount} ===`); + logger.info(`Active nodes: ${haNodeServices.length - killedNodes.length}/${NODE_COUNT}`); + const deployer = new ContractDeployer(StatefulTestContractArtifact, wallet); const receipt = await deployer.deploy(ownerAddress, ownerAddress, i + 100).send({ from: ownerAddress, @@ -393,18 +405,77 @@ describe('HA Full Setup', () => { expect(receipt.blockNumber).toBeDefined(); + // Verify this transaction is in a different block than the previous one + if (previousBlockNumber !== undefined) { + expect(receipt.blockNumber).toBeGreaterThan(previousBlockNumber); + } + + previousBlockNumber = receipt.blockNumber; receipts.push(receipt); - logger.info(`Block ${i + 1}/${blockCount} created: ${receipt.blockNumber}`); + + // Find which node produced this block + const [block] = await aztecNode.getCheckpointedBlocks(receipt.blockNumber!, 1); + if (!block) { + throw new Error(`Block ${receipt.blockNumber} not found`); + } + const slotNumber = BigInt(block.block.header.globalVariables.slotNumber); + const duties = await getValidatorDuties(mainPool, slotNumber); + const blockProposalDuty = duties.find(d => d.dutyType === 'BLOCK_PROPOSAL'); + + if (!blockProposalDuty) { + throw new Error(`No block proposal duty found for slot ${slotNumber}`); + } + + blockProducers.set(i, blockProposalDuty.nodeId); + logger.info(`Block ${receipt.blockNumber} produced by node ${blockProposalDuty.nodeId}`); + + // Kill the node that produced this block, unless it's the last block + if (i < blockCount - 1) { + const producerNodeId = blockProposalDuty.nodeId; + const nodeIndexToKill = nodeIds.findIndex(nodeId => nodeId === producerNodeId); + + if (nodeIndexToKill === -1) { + throw new Error(`Could not find active node with ID ${producerNodeId}`); + } + + logger.info(`Killing node ${producerNodeId} that produced this block`); + await haNodeServices[nodeIndexToKill].stop(); + killedNodes.push(nodeIndexToKill); + } else { + logger.info(`Last block produced.`); + } + + logger.info(`Block ${i + 1}/${blockCount} completed. Killed nodes: ${killedNodes.length}/${NODE_COUNT}`); } - // Verify we actually got 5 distinct blocks + // Verify we got the expected number of distinct blocks const blockNumbers = receipts.map(r => r.blockNumber!).sort((a, b) => a - b); const uniqueBlockNumbers = new Set(blockNumbers); expect(uniqueBlockNumbers.size).toBe(blockCount); logger.info(`Created ${uniqueBlockNumbers.size} distinct blocks: ${Array.from(uniqueBlockNumbers).join(', ')}`); - const quorum = Math.floor((COMMITTEE_SIZE * 2) / 3) + 1; + // Verify each node produced at least 1 block + const nodeBlockCounts = new Map(); + for (const nodeId of blockProducers.values()) { + const count = nodeBlockCounts.get(nodeId) || 0; + nodeBlockCounts.set(nodeId, count + 1); + } + + logger.info(`Block production by node: ${JSON.stringify(Array.from(nodeBlockCounts.entries()))}`); + + // Verify: each node should have produced at least 1 block + // (there may be empty blocks produced during node transitions) + for (const [nodeId, count] of nodeBlockCounts.entries()) { + expect(count).toBeGreaterThanOrEqual(1); + logger.info(`Node ${nodeId} produced ${count} block(s) as expected`); + } + + // Verify all nodes participated (NODE_COUNT nodes total) + expect(nodeBlockCounts.size).toBe(NODE_COUNT); + logger.info(`All ${NODE_COUNT} nodes participated in block production`); + // Verify no double-signing occurred across all blocks + const quorum = Math.floor((COMMITTEE_SIZE * 2) / 3) + 1; for (const receipt of receipts) { const [block] = await aztecNode.getCheckpointedBlocks(receipt.blockNumber!, 1); if (!block) { @@ -424,7 +495,16 @@ describe('HA Full Setup', () => { ); // P2P LAYER CHECK: Verify only one attestation per validator was sent over P2P - const p2pNode = haNodeServices[0]; + // Find first active node for P2P check + let p2pNodeIndex = 0; + for (let idx = 0; idx < haNodeServices.length; idx++) { + if (!killedNodes.includes(idx)) { + p2pNodeIndex = idx; + break; + } + } + + const p2pNode = haNodeServices[p2pNodeIndex]; const p2p = p2pNode.getP2P(); const slot = SlotNumber(Number(slotNumber)); diff --git a/yarn-project/end-to-end/src/e2e_blacklist_token_contract/blacklist_token_contract_test.ts b/yarn-project/end-to-end/src/e2e_blacklist_token_contract/blacklist_token_contract_test.ts index 249568f38980..ab5821667a1c 100644 --- a/yarn-project/end-to-end/src/e2e_blacklist_token_contract/blacklist_token_contract_test.ts +++ b/yarn-project/end-to-end/src/e2e_blacklist_token_contract/blacklist_token_contract_test.ts @@ -92,7 +92,7 @@ export class BlacklistTokenContractTest { }); this.cheatCodes = this.context.cheatCodes; - this.aztecNode = this.context.aztecNodeService!; + this.aztecNode = this.context.aztecNodeService; this.sequencer = this.context.sequencer!; this.wallet = this.context.wallet; this.adminAddress = deployedAccounts[0].address; diff --git a/yarn-project/end-to-end/src/e2e_block_building.test.ts b/yarn-project/end-to-end/src/e2e_block_building.test.ts index c9650b63bff4..24db07503c15 100644 --- a/yarn-project/end-to-end/src/e2e_block_building.test.ts +++ b/yarn-project/end-to-end/src/e2e_block_building.test.ts @@ -38,9 +38,9 @@ describe('e2e_block_building', () => { let minterAddress: AztecAddress; let aztecNode: AztecNode; - let aztecNodeAdmin: AztecNodeAdmin | undefined; + let aztecNodeAdmin: AztecNodeAdmin; let sequencer: TestSequencerClient; - let watcher: AnvilTestWatcher | undefined; + let watcher: AnvilTestWatcher; let teardown: () => Promise; afterEach(() => { @@ -68,11 +68,11 @@ describe('e2e_block_building', () => { }); beforeEach(async () => { - await aztecNodeAdmin!.setConfig({ minTxsPerBlock: 1 }); + await aztecNodeAdmin.setConfig({ minTxsPerBlock: 1 }); }); afterEach(async () => { - await aztecNodeAdmin!.setConfig({ minTxsPerBlock: 1 }); + await aztecNodeAdmin.setConfig({ minTxsPerBlock: 1 }); // Clean up any mocks jest.restoreAllMocks(); }); @@ -90,7 +90,7 @@ describe('e2e_block_building', () => { // We add a delay to every public tx processing logger.info(`Updating aztec node config`); - await aztecNodeAdmin!.setConfig({ + await aztecNodeAdmin.setConfig({ fakeProcessingDelayPerTxMs: 300, minTxsPerBlock: 1, maxTxsPerBlock: TX_COUNT, @@ -126,7 +126,7 @@ describe('e2e_block_building', () => { // Assemble N contract deployment txs // We need to create them sequentially since we cannot have parallel calls to a circuit const TX_COUNT = 8; - await aztecNodeAdmin!.setConfig({ minTxsPerBlock: TX_COUNT }); + await aztecNodeAdmin.setConfig({ minTxsPerBlock: TX_COUNT }); // Need to have value > 0, so adding + 1 // We need to do so, because noir currently will fail if the multiscalarmul is in an `if` @@ -171,7 +171,7 @@ describe('e2e_block_building', () => { // Assemble N contract deployment txs // We need to create them sequentially since we cannot have parallel calls to a circuit const TX_COUNT = 4; - await aztecNodeAdmin!.setConfig({ minTxsPerBlock: TX_COUNT }); + await aztecNodeAdmin.setConfig({ minTxsPerBlock: TX_COUNT }); const methods = times(TX_COUNT, i => contract.methods.increment_public_value(ownerAddress, i)); const provenTxs = []; @@ -198,7 +198,7 @@ describe('e2e_block_building', () => { const contract = await StatefulTestContract.deploy(wallet, ownerAddress, 1).send({ from: ownerAddress }); const another = await TestContract.deploy(wallet).send({ from: ownerAddress }); - await aztecNodeAdmin!.setConfig({ minTxsPerBlock: 16, maxTxsPerBlock: 16 }); + await aztecNodeAdmin.setConfig({ minTxsPerBlock: 16, maxTxsPerBlock: 16 }); // Flood nullifiers to grow the size of the nullifier tree. // Can probably do this more efficiently by batching multiple emit_nullifier calls @@ -211,7 +211,7 @@ describe('e2e_block_building', () => { await Promise.all(sentNullifierTxs); logger.info(`Nullifier txs sent`); - await aztecNodeAdmin!.setConfig({ minTxsPerBlock: 4, maxTxsPerBlock: 4 }); + await aztecNodeAdmin.setConfig({ minTxsPerBlock: 4, maxTxsPerBlock: 4 }); // Now send public functions const TX_COUNT = 128; @@ -228,7 +228,7 @@ describe('e2e_block_building', () => { it.skip('can call public function from different tx in same block as deployed', async () => { // Ensure both txs will land on the same block - await aztecNodeAdmin!.setConfig({ minTxsPerBlock: 2 }); + await aztecNodeAdmin.setConfig({ minTxsPerBlock: 2 }); // Deploy a contract in the first transaction // In the same block, call a public method on the contract @@ -501,7 +501,7 @@ describe('e2e_block_building', () => { }); logger.info('Updating txs per block to 4'); - await aztecNodeAdmin!.setConfig({ minTxsPerBlock: 4, maxTxsPerBlock: 4 }); + await aztecNodeAdmin.setConfig({ minTxsPerBlock: 4, maxTxsPerBlock: 4 }); logger.info('Spamming the network with public txs'); const txs = []; @@ -593,7 +593,7 @@ describe('e2e_block_building', () => { await sleep(1000); } - watcher!.setIsMarkingAsProven(false); + watcher.setIsMarkingAsProven(false); }); afterEach(() => teardown()); diff --git a/yarn-project/end-to-end/src/e2e_cross_chain_messaging/cross_chain_messaging_test.ts b/yarn-project/end-to-end/src/e2e_cross_chain_messaging/cross_chain_messaging_test.ts index da544278e802..320e0ee819f0 100644 --- a/yarn-project/end-to-end/src/e2e_cross_chain_messaging/cross_chain_messaging_test.ts +++ b/yarn-project/end-to-end/src/e2e_cross_chain_messaging/cross_chain_messaging_test.ts @@ -109,16 +109,16 @@ export class CrossChainMessagingTest { async applyBaseSetup() { // Set up base context fields - this.aztecNode = this.context.aztecNodeService!; + this.aztecNode = this.context.aztecNodeService; this.wallet = this.context.wallet; this.aztecNodeConfig = this.context.config; this.cheatCodes = this.context.cheatCodes; this.deployL1ContractsValues = this.context.deployL1ContractsValues; - this.aztecNodeAdmin = this.context.aztecNodeService!; + this.aztecNodeAdmin = this.context.aztecNodeService; if (this.requireEpochProven) { // Turn off the watcher to prevent it from keep marking blocks as proven. - this.context.watcher!.setIsMarkingAsProven(false); + this.context.watcher.setIsMarkingAsProven(false); } // Deploy 3 accounts diff --git a/yarn-project/end-to-end/src/e2e_cross_chain_messaging/l1_to_l2.test.ts b/yarn-project/end-to-end/src/e2e_cross_chain_messaging/l1_to_l2.test.ts index 8011c01637a5..e83b195c5f84 100644 --- a/yarn-project/end-to-end/src/e2e_cross_chain_messaging/l1_to_l2.test.ts +++ b/yarn-project/end-to-end/src/e2e_cross_chain_messaging/l1_to_l2.test.ts @@ -172,7 +172,7 @@ describe('e2e_cross_chain_messaging l1_to_l2', () => { // Stop proving const lastProven = await aztecNode.getBlockNumber(); log.warn(`Stopping proof submission at block ${lastProven} to allow drift`); - t.context.watcher!.setIsMarkingAsProven(false); + t.context.watcher.setIsMarkingAsProven(false); // Mine several blocks to ensure drift log.warn(`Mining blocks to allow drift`); @@ -214,14 +214,14 @@ describe('e2e_cross_chain_messaging l1_to_l2', () => { // On private, we simulate the tx locally and check that we get a missing message error, then we advance to the next block await expect(() => consume().simulate({ from: user1Address })).rejects.toThrow(/No L1 to L2 message found/); await tryAdvanceBlock(); - await t.context.watcher!.markAsProven(); + await t.context.watcher.markAsProven(); } else { // On public, we actually send the tx and check that it reverts due to the missing message. // This advances the block too as a side-effect. Note that we do not rely on a simulation since the cross chain messages // do not get added at the beginning of the block during node_simulatePublicCalls (maybe they should?). const receipt = await consume().send({ from: user1Address, wait: { dontThrowOnRevert: true } }); expect(receipt.executionResult).toEqual(TxExecutionResult.APP_LOGIC_REVERTED); - await t.context.watcher!.markAsProven(); + await t.context.watcher.markAsProven(); } }); diff --git a/yarn-project/end-to-end/src/e2e_deploy_contract/deploy_test.ts b/yarn-project/end-to-end/src/e2e_deploy_contract/deploy_test.ts index 6cc58dff15ba..7ff44d95f17e 100644 --- a/yarn-project/end-to-end/src/e2e_deploy_contract/deploy_test.ts +++ b/yarn-project/end-to-end/src/e2e_deploy_contract/deploy_test.ts @@ -30,9 +30,9 @@ export class DeployTest { fundSponsoredFPC: true, skipAccountDeployment: true, }); - this.aztecNode = this.context.aztecNodeService!; + this.aztecNode = this.context.aztecNodeService; this.wallet = this.context.wallet; - this.aztecNodeAdmin = this.context.aztecNodeService!; + this.aztecNodeAdmin = this.context.aztecNodeService; await this.applyInitialAccount(); return this; } diff --git a/yarn-project/end-to-end/src/e2e_epochs/epochs_invalidate_block.parallel.test.ts b/yarn-project/end-to-end/src/e2e_epochs/epochs_invalidate_block.parallel.test.ts index abbee51678c3..01ac7e29aec0 100644 --- a/yarn-project/end-to-end/src/e2e_epochs/epochs_invalidate_block.parallel.test.ts +++ b/yarn-project/end-to-end/src/e2e_epochs/epochs_invalidate_block.parallel.test.ts @@ -172,7 +172,7 @@ describe('e2e_epochs/epochs_invalidate_block', () => { logger.warn(`Transaction included in block ${receipt.blockNumber}`); // Check that we have tagged an offense for that - const offenses = await context.aztecNodeAdmin!.getSlashOffenses('all'); + const offenses = await context.aztecNodeAdmin.getSlashOffenses('all'); expect(offenses.length).toBeGreaterThan(0); const invalidBlockOffense = offenses.find(o => o.offenseType === OffenseType.PROPOSED_INSUFFICIENT_ATTESTATIONS); expect(invalidBlockOffense).toBeDefined(); diff --git a/yarn-project/end-to-end/src/e2e_epochs/epochs_l1_reorgs.test.ts b/yarn-project/end-to-end/src/e2e_epochs/epochs_l1_reorgs.test.ts index 080dec8d4103..58bbab1d6806 100644 --- a/yarn-project/end-to-end/src/e2e_epochs/epochs_l1_reorgs.test.ts +++ b/yarn-project/end-to-end/src/e2e_epochs/epochs_l1_reorgs.test.ts @@ -220,7 +220,7 @@ describe('e2e_epochs/epochs_l1_reorgs', () => { await retryUntil(() => getCheckpointNumber(node).then(b => b === CHECKPOINT_NUMBER), 'node sync', 10, 0.1); logger.warn(`Reached checkpoint ${CHECKPOINT_NUMBER}. Stopping block production.`); - await context.aztecNodeAdmin!.setConfig({ minTxsPerBlock: 100 }); + await context.aztecNodeAdmin.setConfig({ minTxsPerBlock: 100 }); // Remove the L2 block from L1 const l1BlocksToReorg = monitor.l1BlockNumber - l1BlockNumber + 1; @@ -248,7 +248,7 @@ describe('e2e_epochs/epochs_l1_reorgs', () => { sequencerDelayer.cancelNextTx(); await retryUntil(() => sequencerDelayer.getCancelledTxs().length, 'next block', L2_SLOT_DURATION_IN_S * 2, 0.1); const [l2BlockTx] = sequencerDelayer.getCancelledTxs(); - await context.aztecNodeAdmin!.setConfig({ minTxsPerBlock: 100 }); + await context.aztecNodeAdmin.setConfig({ minTxsPerBlock: 100 }); // Save the L1 block number when the L2 block would have been mined const l1BlockNumber = monitor.l1BlockNumber; diff --git a/yarn-project/end-to-end/src/e2e_epochs/epochs_manual_rollback.test.ts b/yarn-project/end-to-end/src/e2e_epochs/epochs_manual_rollback.test.ts index e546a3554fdd..5110d74959fc 100644 --- a/yarn-project/end-to-end/src/e2e_epochs/epochs_manual_rollback.test.ts +++ b/yarn-project/end-to-end/src/e2e_epochs/epochs_manual_rollback.test.ts @@ -43,7 +43,7 @@ describe('e2e_epochs/manual_rollback', () => { await retryUntil(async () => await node.getBlockNumber().then(b => b >= 4), 'sync to 4', 10, 0.1); logger.info(`Synced to checkpoint 4. Pausing syncing and rolling back the chain.`); - await context.aztecNodeAdmin!.pauseSync(); + await context.aztecNodeAdmin.pauseSync(); context.sequencer?.updateConfig({ minTxsPerBlock: 100 }); // Ensure no new blocks are produced await context.cheatCodes.eth.reorg(2); const checkpointAfterReorg = await rollup.getCheckpointNumber(); @@ -52,7 +52,7 @@ describe('e2e_epochs/manual_rollback', () => { logger.info(`Manually rolling back node to ${checkpointAfterReorg - 1}.`); const blockAfterReorg = Number(checkpointAfterReorg - 1); - await context.aztecNodeAdmin!.rollbackTo(blockAfterReorg); + await context.aztecNodeAdmin.rollbackTo(blockAfterReorg); expect(await node.getBlockNumber()).toEqual(blockAfterReorg); logger.info(`Waiting for node to re-sync to ${blockAfterReorg}.`); diff --git a/yarn-project/end-to-end/src/e2e_epochs/epochs_proof_public_cross_chain.test.ts b/yarn-project/end-to-end/src/e2e_epochs/epochs_proof_public_cross_chain.test.ts index 80bc26693e5e..9fbd0a9a4fda 100644 --- a/yarn-project/end-to-end/src/e2e_epochs/epochs_proof_public_cross_chain.test.ts +++ b/yarn-project/end-to-end/src/e2e_epochs/epochs_proof_public_cross_chain.test.ts @@ -40,7 +40,7 @@ describe('e2e_epochs/epochs_proof_public_cross_chain', () => { it('submits proof with a tx with public l1-to-l2 message claim', async () => { // Deploy a contract that consumes L1 to L2 messages - await context.aztecNodeAdmin!.setConfig({ minTxsPerBlock: 0 }); + await context.aztecNodeAdmin.setConfig({ minTxsPerBlock: 0 }); logger.warn(`Deploying test contract`); const testContract = await TestContract.deploy(context.wallet).send({ from: context.accounts[0] }); logger.warn(`Test contract deployed at ${testContract.address}`); diff --git a/yarn-project/end-to-end/src/e2e_epochs/epochs_test.ts b/yarn-project/end-to-end/src/e2e_epochs/epochs_test.ts index 894e488e0802..2c4d7458d7c6 100644 --- a/yarn-project/end-to-end/src/e2e_epochs/epochs_test.ts +++ b/yarn-project/end-to-end/src/e2e_epochs/epochs_test.ts @@ -405,7 +405,7 @@ export class EpochsTestContext { privateKeyToAccount(this.getNextPrivateKey()), this.l1Client.chain, ), - this.context.dateProvider!, + this.context.dateProvider, { ethereumSlotDuration: this.L1_BLOCK_TIME_IN_S }, ); expect(await client.getBalance({ address: client.account.address })).toBeGreaterThan(0n); diff --git a/yarn-project/end-to-end/src/e2e_fees/failures.test.ts b/yarn-project/end-to-end/src/e2e_fees/failures.test.ts index 42394548cda7..0c4c54c5032c 100644 --- a/yarn-project/end-to-end/src/e2e_fees/failures.test.ts +++ b/yarn-project/end-to-end/src/e2e_fees/failures.test.ts @@ -34,7 +34,7 @@ describe('e2e_fees failures', () => { // Prove up until the current state by just marking it as proven. // Then turn off the watcher to prevent it from keep proving - await t.context.watcher!.trigger(); + await t.context.watcher.trigger(); await t.cheatCodes.rollup.advanceToNextEpoch(); await t.catchUpProvenChain(); t.setIsMarkingAsProven(false); @@ -78,7 +78,7 @@ describe('e2e_fees failures', () => { await expectMapping(t.getGasBalanceFn, [aliceAddress, bananaFPC.address], [initialAliceGas, initialFPCGas]); // We wait until the proven chain is caught up so all previous fees are paid out. - await t.context.watcher!.trigger(); + await t.context.watcher.trigger(); await t.cheatCodes.rollup.advanceToNextEpoch(); await t.catchUpProvenChain(); @@ -100,7 +100,7 @@ describe('e2e_fees failures', () => { // @note There is a potential race condition here if other tests send transactions that get into the same // epoch and thereby pays out fees at the same time (when proven). - await t.context.watcher!.trigger(); + await t.context.watcher.trigger(); await t.cheatCodes.rollup.advanceToNextEpoch(); await t.catchUpProvenChain(); diff --git a/yarn-project/end-to-end/src/e2e_fees/fees_test.ts b/yarn-project/end-to-end/src/e2e_fees/fees_test.ts index 835e8e828e47..0b32970c3541 100644 --- a/yarn-project/end-to-end/src/e2e_fees/fees_test.ts +++ b/yarn-project/end-to-end/src/e2e_fees/fees_test.ts @@ -113,7 +113,7 @@ export class FeesTest { }); this.rollupContract = RollupContract.getFromConfig(this.context.config); - this.chainMonitor = new ChainMonitor(this.rollupContract, this.context.dateProvider!, this.logger, 200).start(); + this.chainMonitor = new ChainMonitor(this.rollupContract, this.context.dateProvider, this.logger, 200).start(); await this.applyBaseSetup(); @@ -126,7 +126,7 @@ export class FeesTest { } setIsMarkingAsProven(b: boolean) { - this.context.watcher!.setIsMarkingAsProven(b); + this.context.watcher.setIsMarkingAsProven(b); } async catchUpProvenChain() { @@ -188,8 +188,8 @@ export class FeesTest { }); this.wallet = this.context.wallet; - this.aztecNode = this.context.aztecNodeService!; - this.aztecNodeAdmin = this.context.aztecNodeService!; + this.aztecNode = this.context.aztecNodeService; + this.aztecNodeAdmin = this.context.aztecNodeService; this.gasSettings = GasSettings.default({ maxFeesPerGas: (await this.aztecNode.getCurrentMinFees()).mul(2) }); this.cheatCodes = this.context.cheatCodes; this.accounts = deployedAccounts.map(a => a.address); @@ -221,8 +221,8 @@ export class FeesTest { ); this.feeJuiceBridgeTestHarness = await FeeJuicePortalTestingHarnessFactory.create({ - aztecNode: this.context.aztecNodeService!, - aztecNodeAdmin: this.context.aztecNodeService!, + aztecNode: this.context.aztecNodeService, + aztecNodeAdmin: this.context.aztecNodeService, l1Client: this.context.deployL1ContractsValues.l1Client, wallet: this.wallet, logger: this.logger, diff --git a/yarn-project/end-to-end/src/e2e_nested_contract/nested_contract_test.ts b/yarn-project/end-to-end/src/e2e_nested_contract/nested_contract_test.ts index e60e6187d756..5632daf6c133 100644 --- a/yarn-project/end-to-end/src/e2e_nested_contract/nested_contract_test.ts +++ b/yarn-project/end-to-end/src/e2e_nested_contract/nested_contract_test.ts @@ -44,7 +44,7 @@ export class NestedContractTest { }); this.wallet = this.context.wallet; [{ address: this.defaultAccountAddress }] = deployedAccounts; - this.aztecNode = this.context.aztecNodeService!; + this.aztecNode = this.context.aztecNodeService; this.logger.info('Public deploy accounts'); await publicDeployAccounts(this.wallet, [this.defaultAccountAddress]); diff --git a/yarn-project/end-to-end/src/e2e_p2p/add_rollup.test.ts b/yarn-project/end-to-end/src/e2e_p2p/add_rollup.test.ts index 97cc8688614f..24f282843330 100644 --- a/yarn-project/end-to-end/src/e2e_p2p/add_rollup.test.ts +++ b/yarn-project/end-to-end/src/e2e_p2p/add_rollup.test.ts @@ -90,7 +90,7 @@ describe('e2e_p2p_add_rollup', () => { l1TxUtils = createL1TxUtilsFromViemWallet(t.ctx.deployL1ContractsValues.l1Client); - t.ctx.watcher!.setIsMarkingAsProven(false); + t.ctx.watcher.setIsMarkingAsProven(false); }); afterAll(async () => { @@ -235,7 +235,7 @@ describe('e2e_p2p_add_rollup', () => { t.logger.info('Creating nodes'); nodes = await createNodes( { ...t.ctx.aztecNodeConfig, governanceProposerPayload: newPayloadAddress }, - t.ctx.dateProvider!, + t.ctx.dateProvider, t.bootstrapNodeEnr, NUM_VALIDATORS, BOOT_NODE_UDP_PORT, @@ -251,7 +251,7 @@ describe('e2e_p2p_add_rollup', () => { BOOT_NODE_UDP_PORT + NUM_VALIDATORS + 1, t.bootstrapNodeEnr, ATTESTER_PRIVATE_KEYS_START_INDEX + NUM_VALIDATORS + 1, - { dateProvider: t.ctx.dateProvider! }, + { dateProvider: t.ctx.dateProvider }, t.prefilledPublicData, `${DATA_DIR}-prover`, shouldCollectMetrics(), @@ -357,7 +357,7 @@ describe('e2e_p2p_add_rollup', () => { const leafId = getL2ToL1MessageLeafId(l2ToL1MessageResult); // We need to advance to the next epoch so that the out hash will be set to outbox when the epoch is proven. - const cheatcodes = RollupCheatCodes.create(l1RpcUrls, l1ContractAddresses, t.ctx.dateProvider!); + const cheatcodes = RollupCheatCodes.create(l1RpcUrls, l1ContractAddresses, t.ctx.dateProvider); await cheatcodes.advanceToEpoch(EpochNumber(epoch + 1)); await waitForProven(node, l2OutgoingReceipt, { provenTimeout: 300 }); @@ -551,7 +551,7 @@ describe('e2e_p2p_add_rollup', () => { nodes = await createNodes( newConfig, - t.ctx.dateProvider!, + t.ctx.dateProvider, t.bootstrapNodeEnr, NUM_VALIDATORS, BOOT_NODE_UDP_PORT, @@ -566,7 +566,7 @@ describe('e2e_p2p_add_rollup', () => { BOOT_NODE_UDP_PORT + NUM_VALIDATORS + 1, t.bootstrapNodeEnr, ATTESTER_PRIVATE_KEYS_START_INDEX + NUM_VALIDATORS + 1, - { dateProvider: t.ctx.dateProvider! }, + { dateProvider: t.ctx.dateProvider }, prefilledPublicData, `${DATA_DIR_NEW}-prover`, shouldCollectMetrics(), diff --git a/yarn-project/end-to-end/src/e2e_p2p/broadcasted_invalid_block_proposal_slash.test.ts b/yarn-project/end-to-end/src/e2e_p2p/broadcasted_invalid_block_proposal_slash.test.ts index fc904fffddc5..ed3b62b21c65 100644 --- a/yarn-project/end-to-end/src/e2e_p2p/broadcasted_invalid_block_proposal_slash.test.ts +++ b/yarn-project/end-to-end/src/e2e_p2p/broadcasted_invalid_block_proposal_slash.test.ts @@ -118,7 +118,7 @@ describe('e2e_p2p_broadcasted_invalid_block_proposal_slash', () => { }; const invalidProposerNodes = await createNodes( invalidProposerConfig, - t.ctx.dateProvider!, + t.ctx.dateProvider, t.bootstrapNodeEnr, 1, BOOT_NODE_UDP_PORT, @@ -134,7 +134,7 @@ describe('e2e_p2p_broadcasted_invalid_block_proposal_slash', () => { // Create remaining honest nodes const honestNodes = await createNodes( t.ctx.aztecNodeConfig, - t.ctx.dateProvider!, + t.ctx.dateProvider, t.bootstrapNodeEnr, NUM_VALIDATORS - 1, BOOT_NODE_UDP_PORT, diff --git a/yarn-project/end-to-end/src/e2e_p2p/data_withholding_slash.test.ts b/yarn-project/end-to-end/src/e2e_p2p/data_withholding_slash.test.ts index 371067f8d77d..917dba22df30 100644 --- a/yarn-project/end-to-end/src/e2e_p2p/data_withholding_slash.test.ts +++ b/yarn-project/end-to-end/src/e2e_p2p/data_withholding_slash.test.ts @@ -120,7 +120,7 @@ describe('e2e_p2p_data_withholding_slash', () => { t.logger.warn('Creating nodes'); nodes = await createNodes( t.ctx.aztecNodeConfig, - t.ctx.dateProvider!, + t.ctx.dateProvider, t.bootstrapNodeEnr, NUM_VALIDATORS, BOOT_NODE_UDP_PORT, @@ -165,7 +165,7 @@ describe('e2e_p2p_data_withholding_slash', () => { t.logger.warn('Re-creating nodes'); nodes = await createNodes( t.ctx.aztecNodeConfig, - t.ctx.dateProvider!, + t.ctx.dateProvider, t.bootstrapNodeEnr, NUM_VALIDATORS, BOOT_NODE_UDP_PORT, diff --git a/yarn-project/end-to-end/src/e2e_p2p/duplicate_attestation_slash.test.ts b/yarn-project/end-to-end/src/e2e_p2p/duplicate_attestation_slash.test.ts new file mode 100644 index 000000000000..2f68d908d458 --- /dev/null +++ b/yarn-project/end-to-end/src/e2e_p2p/duplicate_attestation_slash.test.ts @@ -0,0 +1,249 @@ +import type { AztecNodeService } from '@aztec/aztec-node'; +import type { TestAztecNodeService } from '@aztec/aztec-node/test'; +import { EthAddress } from '@aztec/aztec.js/addresses'; +import { EpochNumber, SlotNumber } from '@aztec/foundation/branded-types'; +import { bufferToHex } from '@aztec/foundation/string'; +import { OffenseType } from '@aztec/slasher'; + +import { jest } from '@jest/globals'; +import fs from 'fs'; +import os from 'os'; +import path from 'path'; +import { privateKeyToAccount } from 'viem/accounts'; + +import { shouldCollectMetrics } from '../fixtures/fixtures.js'; +import { ATTESTER_PRIVATE_KEYS_START_INDEX, createNode } from '../fixtures/setup_p2p_test.js'; +import { getPrivateKeyFromIndex } from '../fixtures/utils.js'; +import { P2PNetworkTest } from './p2p_network.js'; +import { awaitCommitteeExists, awaitOffenseDetected } from './shared.js'; + +const TEST_TIMEOUT = 600_000; // 10 minutes + +jest.setTimeout(TEST_TIMEOUT); + +const NUM_VALIDATORS = 4; +const BOOT_NODE_UDP_PORT = 4600; +const COMMITTEE_SIZE = NUM_VALIDATORS; +const ETHEREUM_SLOT_DURATION = 8; +const AZTEC_SLOT_DURATION = ETHEREUM_SLOT_DURATION * 3; +const BLOCK_DURATION = 4; + +const DATA_DIR = fs.mkdtempSync(path.join(os.tmpdir(), 'duplicate-attestation-slash-')); + +/** + * Test that slashing occurs when a validator sends duplicate attestations (equivocation). + * + * The setup of the test is as follows: + * 1. Create 4 validator nodes total: + * - 2 honest validators with unique keys + * - 2 "malicious proposer" validators that share the SAME validator key but have DIFFERENT coinbase addresses + * (these will create duplicate proposals for the same slot) + * - The malicious proposer validators also have `attestToEquivocatedProposals: true` which makes them attest + * to BOTH proposals when they receive them - this is the attestation equivocation we want to detect + * 2. The two nodes with the same proposer key will both detect they are proposers for the same slot and race to propose + * 3. Since they have different coinbase addresses, their proposals will have different archives (different content) + * 4. The malicious attester nodes (with attestToEquivocatedProposals enabled) will attest to BOTH proposals + * 5. Honest validators will detect the duplicate attestations and emit a slash event + * + * NOTE: This test triggers BOTH duplicate proposal (from malicious proposers sharing a key) AND duplicate attestation + * (from the malicious proposers attesting to multiple proposals). We verify specifically that the duplicate + * attestation offense is recorded. + */ +describe('e2e_p2p_duplicate_attestation_slash', () => { + let t: P2PNetworkTest; + let nodes: AztecNodeService[]; + + // Small slashing unit so we don't kick anyone out + const slashingUnit = BigInt(1e14); + const slashingQuorum = 3; + const slashingRoundSize = 4; + const aztecEpochDuration = 2; + + beforeEach(async () => { + t = await P2PNetworkTest.create({ + testName: 'e2e_p2p_duplicate_attestation_slash', + numberOfNodes: 0, + numberOfValidators: NUM_VALIDATORS, + basePort: BOOT_NODE_UDP_PORT, + metricsPort: shouldCollectMetrics(), + initialConfig: { + listenAddress: '127.0.0.1', + aztecEpochDuration, + ethereumSlotDuration: ETHEREUM_SLOT_DURATION, + aztecSlotDuration: AZTEC_SLOT_DURATION, + aztecTargetCommitteeSize: COMMITTEE_SIZE, + aztecProofSubmissionEpochs: 1024, // effectively do not reorg + slashInactivityConsecutiveEpochThreshold: 32, // effectively do not slash for inactivity + minTxsPerBlock: 0, // always be building + mockGossipSubNetwork: true, // do not worry about p2p connectivity issues + slashingQuorum, + slashingRoundSizeInEpochs: slashingRoundSize / aztecEpochDuration, + slashAmountSmall: slashingUnit, + slashAmountMedium: slashingUnit * 2n, + slashAmountLarge: slashingUnit * 3n, + enforceTimeTable: true, + blockDurationMs: BLOCK_DURATION * 1000, + slashDuplicateProposalPenalty: slashingUnit, + slashDuplicateAttestationPenalty: slashingUnit, + slashingOffsetInRounds: 1, + }, + }); + + await t.setup(); + await t.applyBaseSetup(); + }); + + afterEach(async () => { + await t.stopNodes(nodes); + await t.teardown(); + for (let i = 0; i < NUM_VALIDATORS; i++) { + fs.rmSync(`${DATA_DIR}-${i}`, { recursive: true, force: true, maxRetries: 3 }); + } + }); + + const debugRollup = async () => { + await t.ctx.cheatCodes.rollup.debugRollup(); + }; + + it('slashes validator who sends duplicate attestations', async () => { + const { rollup } = await t.getContracts(); + + // Jump forward to an epoch in the future such that the validator set is not empty + await t.ctx.cheatCodes.rollup.advanceToEpoch(EpochNumber(4)); + await debugRollup(); + + t.logger.warn('Creating nodes'); + + // Get the attester private key that will be shared between two malicious proposer nodes + // We'll use validator index 0 for the "malicious" proposer validator key + const maliciousProposerIndex = 0; + const maliciousProposerPrivateKey = getPrivateKeyFromIndex( + ATTESTER_PRIVATE_KEYS_START_INDEX + maliciousProposerIndex, + )!; + const maliciousProposerAddress = EthAddress.fromString( + privateKeyToAccount(`0x${maliciousProposerPrivateKey.toString('hex')}`).address, + ); + + t.logger.warn(`Malicious proposer address: ${maliciousProposerAddress.toString()}`); + + // Create two nodes with the SAME validator key but DIFFERENT coinbase addresses + // This will cause them to create proposals with different content for the same slot + // Additionally, enable attestToEquivocatedProposals so they will attest to BOTH proposals + const maliciousProposerPrivateKeyHex = bufferToHex(maliciousProposerPrivateKey); + const coinbase1 = EthAddress.random(); + const coinbase2 = EthAddress.random(); + + t.logger.warn(`Creating malicious proposer node 1 with coinbase ${coinbase1.toString()}`); + const maliciousNode1 = await createNode( + { + ...t.ctx.aztecNodeConfig, + validatorPrivateKey: maliciousProposerPrivateKeyHex, + coinbase: coinbase1, + attestToEquivocatedProposals: true, // Attest to all proposals - creates duplicate attestations + broadcastEquivocatedProposals: true, // Don't abort checkpoint building on duplicate block proposals + }, + t.ctx.dateProvider!, + BOOT_NODE_UDP_PORT + 1, + t.bootstrapNodeEnr, + maliciousProposerIndex, + t.prefilledPublicData, + `${DATA_DIR}-0`, + shouldCollectMetrics(), + ); + + t.logger.warn(`Creating malicious proposer node 2 with coinbase ${coinbase2.toString()}`); + const maliciousNode2 = await createNode( + { + ...t.ctx.aztecNodeConfig, + validatorPrivateKey: maliciousProposerPrivateKeyHex, + coinbase: coinbase2, + attestToEquivocatedProposals: true, // Attest to all proposals - creates duplicate attestations + broadcastEquivocatedProposals: true, // Don't abort checkpoint building on duplicate block proposals + }, + t.ctx.dateProvider!, + BOOT_NODE_UDP_PORT + 2, + t.bootstrapNodeEnr, + maliciousProposerIndex, + t.prefilledPublicData, + `${DATA_DIR}-1`, + shouldCollectMetrics(), + ); + + // Create honest nodes with unique validator keys (indices 1 and 2) + t.logger.warn('Creating honest nodes'); + const honestNode1 = await createNode( + t.ctx.aztecNodeConfig, + t.ctx.dateProvider!, + BOOT_NODE_UDP_PORT + 3, + t.bootstrapNodeEnr, + 1, + t.prefilledPublicData, + `${DATA_DIR}-2`, + shouldCollectMetrics(), + ); + const honestNode2 = await createNode( + t.ctx.aztecNodeConfig, + t.ctx.dateProvider!, + BOOT_NODE_UDP_PORT + 4, + t.bootstrapNodeEnr, + 2, + t.prefilledPublicData, + `${DATA_DIR}-3`, + shouldCollectMetrics(), + ); + + nodes = [maliciousNode1, maliciousNode2, honestNode1, honestNode2]; + + // Wait for P2P mesh and the committee to be fully formed before proceeding + await t.waitForP2PMeshConnectivity(nodes, NUM_VALIDATORS); + await awaitCommitteeExists({ rollup, logger: t.logger }); + + // Wait for offenses to be detected + // We expect BOTH duplicate proposal AND duplicate attestation offenses + // The malicious proposer nodes create duplicate proposals (same key, different coinbase) + // The malicious proposer nodes also create duplicate attestations (attestToEquivocatedProposals enabled) + t.logger.warn('Waiting for duplicate attestation offense to be detected...'); + const offenses = await awaitOffenseDetected({ + epochDuration: t.ctx.aztecNodeConfig.aztecEpochDuration, + logger: t.logger, + nodeAdmin: honestNode1, // Use honest node to check for offenses + slashingRoundSize, + waitUntilOffenseCount: 2, // Wait for both duplicate proposal and duplicate attestation + timeoutSeconds: AZTEC_SLOT_DURATION * 16, + }); + + t.logger.warn(`Collected offenses`, { offenses }); + + // Verify we have detected the duplicate attestation offense + const duplicateAttestationOffenses = offenses.filter( + offense => offense.offenseType === OffenseType.DUPLICATE_ATTESTATION, + ); + const duplicateProposalOffenses = offenses.filter( + offense => offense.offenseType === OffenseType.DUPLICATE_PROPOSAL, + ); + + t.logger.info(`Found ${duplicateAttestationOffenses.length} duplicate attestation offenses`); + t.logger.info(`Found ${duplicateProposalOffenses.length} duplicate proposal offenses`); + + // We should have at least one duplicate attestation offense + expect(duplicateAttestationOffenses.length).toBeGreaterThan(0); + + // Verify the duplicate attestation offense is from the malicious proposer address + // (since they are the ones with attestToEquivocatedProposals enabled) + for (const offense of duplicateAttestationOffenses) { + expect(offense.offenseType).toEqual(OffenseType.DUPLICATE_ATTESTATION); + expect(offense.validator.toString()).toEqual(maliciousProposerAddress.toString()); + } + + // Verify that for each duplicate attestation offense, the attester for that slot is the malicious validator + const epochCache = (honestNode1 as TestAztecNodeService).epochCache; + for (const offense of duplicateAttestationOffenses) { + const offenseSlot = SlotNumber(Number(offense.epochOrSlot)); + const committeeInfo = await epochCache.getCommittee(offenseSlot); + t.logger.info(`Offense slot ${offenseSlot}: committee includes attester ${maliciousProposerAddress.toString()}`); + expect(committeeInfo.committee?.map(addr => addr.toString())).toContain(maliciousProposerAddress.toString()); + } + + t.logger.warn('Duplicate attestation offense correctly detected and recorded'); + }); +}); diff --git a/yarn-project/end-to-end/src/e2e_p2p/duplicate_proposal_slash.test.ts b/yarn-project/end-to-end/src/e2e_p2p/duplicate_proposal_slash.test.ts index 50479bf4ce50..374e4527d4ef 100644 --- a/yarn-project/end-to-end/src/e2e_p2p/duplicate_proposal_slash.test.ts +++ b/yarn-project/end-to-end/src/e2e_p2p/duplicate_proposal_slash.test.ts @@ -125,8 +125,13 @@ describe('e2e_p2p_duplicate_proposal_slash', () => { t.logger.warn(`Creating malicious node 1 with coinbase ${coinbase1.toString()}`); const maliciousNode1 = await createNode( - { ...t.ctx.aztecNodeConfig, validatorPrivateKey: maliciousPrivateKeyHex, coinbase: coinbase1 }, - t.ctx.dateProvider!, + { + ...t.ctx.aztecNodeConfig, + validatorPrivateKey: maliciousPrivateKeyHex, + coinbase: coinbase1, + broadcastEquivocatedProposals: true, + }, + t.ctx.dateProvider, BOOT_NODE_UDP_PORT + 1, t.bootstrapNodeEnr, maliciousValidatorIndex, @@ -137,8 +142,13 @@ describe('e2e_p2p_duplicate_proposal_slash', () => { t.logger.warn(`Creating malicious node 2 with coinbase ${coinbase2.toString()}`); const maliciousNode2 = await createNode( - { ...t.ctx.aztecNodeConfig, validatorPrivateKey: maliciousPrivateKeyHex, coinbase: coinbase2 }, - t.ctx.dateProvider!, + { + ...t.ctx.aztecNodeConfig, + validatorPrivateKey: maliciousPrivateKeyHex, + coinbase: coinbase2, + broadcastEquivocatedProposals: true, + }, + t.ctx.dateProvider, BOOT_NODE_UDP_PORT + 2, t.bootstrapNodeEnr, maliciousValidatorIndex, @@ -151,7 +161,7 @@ describe('e2e_p2p_duplicate_proposal_slash', () => { t.logger.warn('Creating honest nodes'); const honestNode1 = await createNode( t.ctx.aztecNodeConfig, - t.ctx.dateProvider!, + t.ctx.dateProvider, BOOT_NODE_UDP_PORT + 3, t.bootstrapNodeEnr, 1, @@ -161,7 +171,7 @@ describe('e2e_p2p_duplicate_proposal_slash', () => { ); const honestNode2 = await createNode( t.ctx.aztecNodeConfig, - t.ctx.dateProvider!, + t.ctx.dateProvider, BOOT_NODE_UDP_PORT + 4, t.bootstrapNodeEnr, 2, diff --git a/yarn-project/end-to-end/src/e2e_p2p/gossip_network.test.ts b/yarn-project/end-to-end/src/e2e_p2p/gossip_network.test.ts index 11b1be6a9ab5..1978c217722b 100644 --- a/yarn-project/end-to-end/src/e2e_p2p/gossip_network.test.ts +++ b/yarn-project/end-to-end/src/e2e_p2p/gossip_network.test.ts @@ -107,7 +107,7 @@ describe('e2e_p2p_network', () => { t.logger.info('Creating validator nodes'); nodes = await createNodes( t.ctx.aztecNodeConfig, - t.ctx.dateProvider!, + t.ctx.dateProvider, t.bootstrapNodeEnr, NUM_VALIDATORS, BOOT_NODE_UDP_PORT, @@ -124,7 +124,7 @@ describe('e2e_p2p_network', () => { BOOT_NODE_UDP_PORT + NUM_VALIDATORS + 1, t.bootstrapNodeEnr, ATTESTER_PRIVATE_KEYS_START_INDEX + NUM_VALIDATORS + 1, - { dateProvider: t.ctx.dateProvider! }, + { dateProvider: t.ctx.dateProvider }, t.prefilledPublicData, `${DATA_DIR}-prover`, shouldCollectMetrics(), @@ -135,7 +135,7 @@ describe('e2e_p2p_network', () => { const monitoringNodeConfig: AztecNodeConfig = { ...t.ctx.aztecNodeConfig, alwaysReexecuteBlockProposals: true }; monitoringNode = await createNonValidatorNode( monitoringNodeConfig, - t.ctx.dateProvider!, + t.ctx.dateProvider, BOOT_NODE_UDP_PORT + NUM_VALIDATORS + 2, t.bootstrapNodeEnr, t.prefilledPublicData, diff --git a/yarn-project/end-to-end/src/e2e_p2p/gossip_network_no_cheat.test.ts b/yarn-project/end-to-end/src/e2e_p2p/gossip_network_no_cheat.test.ts index 44d25d9cc016..40abce5a9573 100644 --- a/yarn-project/end-to-end/src/e2e_p2p/gossip_network_no_cheat.test.ts +++ b/yarn-project/end-to-end/src/e2e_p2p/gossip_network_no_cheat.test.ts @@ -175,7 +175,7 @@ describe('e2e_p2p_network', () => { // Set the system time in the node, only after we have warped the time and waited for a block // Time is only set in the NEXT block - t.ctx.dateProvider!.setTime(Number(timestamp) * 1000); + t.ctx.dateProvider.setTime(Number(timestamp) * 1000); // create our network of nodes and submit txs into each of them // the number of txs per node and the number of txs per rollup @@ -185,7 +185,7 @@ describe('e2e_p2p_network', () => { t.logger.info('Creating nodes'); nodes = await createNodes( t.ctx.aztecNodeConfig, - t.ctx.dateProvider!, + t.ctx.dateProvider, t.bootstrapNodeEnr, NUM_VALIDATORS, BOOT_NODE_UDP_PORT, diff --git a/yarn-project/end-to-end/src/e2e_p2p/inactivity_slash_test.ts b/yarn-project/end-to-end/src/e2e_p2p/inactivity_slash_test.ts index b5b2da8438e8..7897fb1269ad 100644 --- a/yarn-project/end-to-end/src/e2e_p2p/inactivity_slash_test.ts +++ b/yarn-project/end-to-end/src/e2e_p2p/inactivity_slash_test.ts @@ -98,13 +98,13 @@ export class P2PInactivityTest { this.rollup = rollup; if (!this.keepInitialNode) { - await this.test.ctx.aztecNodeService!.stop(); + await this.test.ctx.aztecNodeService.stop(); } // Create all active nodes this.activeNodes = await createNodes( this.test.ctx.aztecNodeConfig, - this.test.ctx.dateProvider!, + this.test.ctx.dateProvider, this.test.bootstrapNodeEnr, NUM_NODES - this.inactiveNodeCount - Number(this.keepInitialNode), BOOT_NODE_UDP_PORT, @@ -118,7 +118,7 @@ export class P2PInactivityTest { const inactiveConfig = { ...this.test.ctx.aztecNodeConfig, dontStartSequencer: true }; this.inactiveNodes = await createNodes( inactiveConfig, - this.test.ctx.dateProvider!, + this.test.ctx.dateProvider, this.test.bootstrapNodeEnr, this.inactiveNodeCount, BOOT_NODE_UDP_PORT, @@ -129,7 +129,7 @@ export class P2PInactivityTest { ); this.nodes = [ - ...(this.keepInitialNode ? [this.test.ctx.aztecNodeService!] : []), + ...(this.keepInitialNode ? [this.test.ctx.aztecNodeService] : []), ...this.activeNodes, ...this.inactiveNodes, ]; diff --git a/yarn-project/end-to-end/src/e2e_p2p/multiple_validators_sentinel.parallel.test.ts b/yarn-project/end-to-end/src/e2e_p2p/multiple_validators_sentinel.parallel.test.ts index 68b70d7966e7..94e33fc0d53e 100644 --- a/yarn-project/end-to-end/src/e2e_p2p/multiple_validators_sentinel.parallel.test.ts +++ b/yarn-project/end-to-end/src/e2e_p2p/multiple_validators_sentinel.parallel.test.ts @@ -64,7 +64,7 @@ describe('e2e_p2p_multiple_validators_sentinel', () => { nodes = await createNodes( t.ctx.aztecNodeConfig, - t.ctx.dateProvider!, + t.ctx.dateProvider, t.bootstrapNodeEnr, NUM_NODES, BOOT_NODE_UDP_PORT, @@ -77,7 +77,7 @@ describe('e2e_p2p_multiple_validators_sentinel', () => { sentinel = await createNonValidatorNode( t.ctx.aztecNodeConfig, - t.ctx.dateProvider!, + t.ctx.dateProvider, BOOT_NODE_UDP_PORT + 1 + NUM_NODES, t.bootstrapNodeEnr, t.prefilledPublicData, diff --git a/yarn-project/end-to-end/src/e2e_p2p/p2p_network.ts b/yarn-project/end-to-end/src/e2e_p2p/p2p_network.ts index 4f81c9913126..4dd5cefce8fd 100644 --- a/yarn-project/end-to-end/src/e2e_p2p/p2p_network.ts +++ b/yarn-project/end-to-end/src/e2e_p2p/p2p_network.ts @@ -333,9 +333,9 @@ export class P2PNetworkTest { const block = await this.context.deployL1ContractsValues.l1Client.getBlock({ blockNumber: receipt.blockNumber, }); - this.context.dateProvider!.setTime(Number(block.timestamp) * 1000); + this.context.dateProvider.setTime(Number(block.timestamp) * 1000); - await this.context.aztecNodeService!.stop(); + await this.context.aztecNodeService.stop(); } async sendDummyTx() { @@ -374,8 +374,8 @@ export class P2PNetworkTest { this.prefilledPublicData = prefilledPublicData; const rollupContract = RollupContract.getFromL1ContractsValues(this.context.deployL1ContractsValues); - this.monitor = new ChainMonitor(rollupContract, this.context.dateProvider!).start(); - this.monitor.on('l1-block', ({ timestamp }) => this.context.dateProvider!.setTime(Number(timestamp) * 1000)); + this.monitor = new ChainMonitor(rollupContract, this.context.dateProvider).start(); + this.monitor.on('l1-block', ({ timestamp }) => this.context.dateProvider.setTime(Number(timestamp) * 1000)); } async stopNodes(nodes: AztecNodeService[]) { diff --git a/yarn-project/end-to-end/src/e2e_p2p/preferred_gossip_network.test.ts b/yarn-project/end-to-end/src/e2e_p2p/preferred_gossip_network.test.ts index 5453d47ebcce..5693c9da05bd 100644 --- a/yarn-project/end-to-end/src/e2e_p2p/preferred_gossip_network.test.ts +++ b/yarn-project/end-to-end/src/e2e_p2p/preferred_gossip_network.test.ts @@ -149,7 +149,7 @@ describe('e2e_p2p_preferred_network', () => { }); afterEach(async () => { - await t.stopNodes([t.ctx.aztecNodeService!].concat(nodes).concat(validators).concat(preferredNodes)); + await t.stopNodes([t.ctx.aztecNodeService].concat(nodes).concat(validators).concat(preferredNodes)); await t.teardown(); for (let i = 0; i < NUM_NODES + NUM_VALIDATORS + NUM_PREFERRED_NODES; i++) { fs.rmSync(`${DATA_DIR}-${i}`, { recursive: true, force: true, maxRetries: 3 }); @@ -190,7 +190,7 @@ describe('e2e_p2p_preferred_network', () => { preferredNodes = await createNodes( preferredNodeConfig, - t.ctx.dateProvider!, + t.ctx.dateProvider, t.bootstrapNodeEnr, NUM_PREFERRED_NODES, BOOT_NODE_UDP_PORT, @@ -224,7 +224,7 @@ describe('e2e_p2p_preferred_network', () => { t.logger.info('Creating nodes'); nodes = await createNodes( nodeConfig, - t.ctx.dateProvider!, + t.ctx.dateProvider, t.bootstrapNodeEnr, NUM_NODES, BOOT_NODE_UDP_PORT, @@ -247,7 +247,7 @@ describe('e2e_p2p_preferred_network', () => { validators = await createNodes( validatorConfig, - t.ctx.dateProvider!, + t.ctx.dateProvider, t.bootstrapNodeEnr, NUM_VALIDATORS - 1, BOOT_NODE_UDP_PORT, @@ -271,7 +271,7 @@ describe('e2e_p2p_preferred_network', () => { const noDiscoveryValidators = await createNodes( lastValidatorConfig, - t.ctx.dateProvider!, + t.ctx.dateProvider, t.bootstrapNodeEnr, 1, BOOT_NODE_UDP_PORT, @@ -282,7 +282,7 @@ describe('e2e_p2p_preferred_network', () => { indexOffset, ); - const allNodes = [...nodes, ...preferredNodes, ...validators, ...noDiscoveryValidators, t.ctx.aztecNodeService!]; + const allNodes = [...nodes, ...preferredNodes, ...validators, ...noDiscoveryValidators, t.ctx.aztecNodeService]; const identifiers = nodes .map((_, i) => `Node ${i + 1}`) .concat(preferredNodes.map((_, i) => `Preferred Node ${i + 1}`)) diff --git a/yarn-project/end-to-end/src/e2e_p2p/rediscovery.test.ts b/yarn-project/end-to-end/src/e2e_p2p/rediscovery.test.ts index 54a6dec3c628..b445b909f419 100644 --- a/yarn-project/end-to-end/src/e2e_p2p/rediscovery.test.ts +++ b/yarn-project/end-to-end/src/e2e_p2p/rediscovery.test.ts @@ -54,7 +54,7 @@ describe('e2e_p2p_rediscovery', () => { const txsSentViaDifferentNodes: TxHash[][] = []; nodes = await createNodes( t.ctx.aztecNodeConfig, - t.ctx.dateProvider!, + t.ctx.dateProvider, t.bootstrapNodeEnr, NUM_VALIDATORS, BOOT_NODE_UDP_PORT, @@ -87,7 +87,7 @@ describe('e2e_p2p_rediscovery', () => { const newNode = await createNode( t.ctx.aztecNodeConfig, - t.ctx.dateProvider!, + t.ctx.dateProvider, i + 1 + BOOT_NODE_UDP_PORT, undefined, i, diff --git a/yarn-project/end-to-end/src/e2e_p2p/reex.test.ts b/yarn-project/end-to-end/src/e2e_p2p/reex.test.ts index f72c8bfd5941..74a22d9cc457 100644 --- a/yarn-project/end-to-end/src/e2e_p2p/reex.test.ts +++ b/yarn-project/end-to-end/src/e2e_p2p/reex.test.ts @@ -56,7 +56,7 @@ describe('e2e_p2p_reex', () => { await t.applyBaseSetup(); t.logger.info('Stopping main node sequencer'); - await t.ctx.aztecNodeService!.getSequencer()?.stop(); + await t.ctx.aztecNodeService.getSequencer()?.stop(); if (!t.bootstrapNodeEnr) { throw new Error('Bootstrap node ENR is not available'); @@ -70,7 +70,7 @@ describe('e2e_p2p_reex', () => { minTxsPerBlock: 1, maxTxsPerBlock: NUM_TXS_PER_NODE, }, - t.ctx.dateProvider!, + t.ctx.dateProvider, t.bootstrapNodeEnr, NUM_VALIDATORS, BASE_BOOT_NODE_UDP_PORT, @@ -230,7 +230,7 @@ describe('e2e_p2p_reex', () => { // Start a fresh slot and resume proposals const [ts] = await t.ctx.cheatCodes.rollup.advanceToNextSlot(); - t.ctx.dateProvider!.setTime(Number(ts) * 1000); + t.ctx.dateProvider.setTime(Number(ts) * 1000); await resumeProposals(); diff --git a/yarn-project/end-to-end/src/e2e_p2p/reqresp/utils.ts b/yarn-project/end-to-end/src/e2e_p2p/reqresp/utils.ts index 310443fafafa..b5860060ba5f 100644 --- a/yarn-project/end-to-end/src/e2e_p2p/reqresp/utils.ts +++ b/yarn-project/end-to-end/src/e2e_p2p/reqresp/utils.ts @@ -81,7 +81,7 @@ export async function runReqrespTxTest(params: { const nodes = await createNodes( aztecNodeConfig, - t.ctx.dateProvider!, + t.ctx.dateProvider, t.bootstrapNodeEnr, NUM_VALIDATORS, BOOT_NODE_UDP_PORT, @@ -95,7 +95,7 @@ export async function runReqrespTxTest(params: { await t.setupAccount(); - const targetBlockNumber = await t.ctx.aztecNodeService!.getBlockNumber(); + const targetBlockNumber = await t.ctx.aztecNodeService.getBlockNumber(); await retryUntil( async () => { const blockNumbers = await Promise.all(nodes.map(node => node.getBlockNumber())); @@ -108,7 +108,7 @@ export async function runReqrespTxTest(params: { t.logger.info('Preparing transactions to send'); const txBatches = await timesAsync(2, () => - prepareTransactions(t.logger, t.ctx.aztecNodeService!, NUM_TXS_PER_NODE, t.fundedAccount), + prepareTransactions(t.logger, t.ctx.aztecNodeService, NUM_TXS_PER_NODE, t.fundedAccount), ); t.logger.info('Removing initial node'); @@ -116,7 +116,7 @@ export async function runReqrespTxTest(params: { t.logger.info('Starting fresh slot'); const [timestamp] = await t.ctx.cheatCodes.rollup.advanceToNextSlot(); - t.ctx.dateProvider!.setTime(Number(timestamp) * 1000); + t.ctx.dateProvider.setTime(Number(timestamp) * 1000); const startSlotTimestamp = BigInt(timestamp); const { proposerIndexes, nodesToTurnOffTxGossip } = await getProposerIndexes(t, startSlotTimestamp); diff --git a/yarn-project/end-to-end/src/e2e_p2p/slash_veto_demo.test.ts b/yarn-project/end-to-end/src/e2e_p2p/slash_veto_demo.test.ts index 452d20d10610..f7cd63e5bfc0 100644 --- a/yarn-project/end-to-end/src/e2e_p2p/slash_veto_demo.test.ts +++ b/yarn-project/end-to-end/src/e2e_p2p/slash_veto_demo.test.ts @@ -102,7 +102,7 @@ describe('veto slash', () => { nodes = await createNodes( t.ctx.aztecNodeConfig, - t.ctx.dateProvider!, + t.ctx.dateProvider, t.bootstrapNodeEnr, NUM_NODES, // Note we do not create the last validator yet, so it shows as offline BOOT_NODE_UDP_PORT, @@ -117,7 +117,7 @@ describe('veto slash', () => { ); vetoerL1TxUtils = createL1TxUtilsFromViemWallet(vetoerL1Client, { logger: t.logger, - dateProvider: t.ctx.dateProvider!, + dateProvider: t.ctx.dateProvider, }); ({ rollup } = await t.getContracts()); @@ -201,7 +201,7 @@ describe('veto slash', () => { debugLogger.info(`\n\ninitializing slasher with proposer: ${proposer}\n\n`); const txUtils = createL1TxUtilsFromViemWallet(deployerClient, { logger: t.logger, - dateProvider: t.ctx.dateProvider!, + dateProvider: t.ctx.dateProvider, }); await txUtils.sendAndMonitorTransaction({ to: slasher.toString(), diff --git a/yarn-project/end-to-end/src/e2e_p2p/upgrade_governance_proposer.test.ts b/yarn-project/end-to-end/src/e2e_p2p/upgrade_governance_proposer.test.ts index 505de78b8068..0eed2f499a95 100644 --- a/yarn-project/end-to-end/src/e2e_p2p/upgrade_governance_proposer.test.ts +++ b/yarn-project/end-to-end/src/e2e_p2p/upgrade_governance_proposer.test.ts @@ -148,7 +148,7 @@ describe('e2e_p2p_governance_proposer', () => { t.logger.info('Creating nodes'); nodes = await createNodes( { ...t.ctx.aztecNodeConfig, governanceProposerPayload: newPayloadAddress }, - t.ctx.dateProvider!, + t.ctx.dateProvider, t.bootstrapNodeEnr, NUM_VALIDATORS, BOOT_NODE_UDP_PORT, diff --git a/yarn-project/end-to-end/src/e2e_p2p/valid_epoch_pruned_slash.test.ts b/yarn-project/end-to-end/src/e2e_p2p/valid_epoch_pruned_slash.test.ts index b55b0cb0ce59..6349efaecb31 100644 --- a/yarn-project/end-to-end/src/e2e_p2p/valid_epoch_pruned_slash.test.ts +++ b/yarn-project/end-to-end/src/e2e_p2p/valid_epoch_pruned_slash.test.ts @@ -113,7 +113,7 @@ describe('e2e_p2p_valid_epoch_pruned_slash', () => { t.logger.warn(`Creating ${NUM_VALIDATORS} new nodes`); nodes = await createNodes( t.ctx.aztecNodeConfig, - t.ctx.dateProvider!, + t.ctx.dateProvider, t.bootstrapNodeEnr, NUM_VALIDATORS, BOOT_NODE_UDP_PORT, diff --git a/yarn-project/end-to-end/src/e2e_p2p/validators_sentinel.test.ts b/yarn-project/end-to-end/src/e2e_p2p/validators_sentinel.test.ts index 0d1a43d0564b..0b62ad197753 100644 --- a/yarn-project/end-to-end/src/e2e_p2p/validators_sentinel.test.ts +++ b/yarn-project/end-to-end/src/e2e_p2p/validators_sentinel.test.ts @@ -58,7 +58,7 @@ describe('e2e_p2p_validators_sentinel', () => { nodes = await createNodes( t.ctx.aztecNodeConfig, - t.ctx.dateProvider!, + t.ctx.dateProvider, t.bootstrapNodeEnr, NUM_NODES, // Note we do not create the last validator yet, so it shows as offline BOOT_NODE_UDP_PORT, @@ -158,7 +158,7 @@ describe('e2e_p2p_validators_sentinel', () => { const nodeIndex = NUM_NODES + 1; const newNode = await createNode( t.ctx.aztecNodeConfig, - t.ctx.dateProvider!, + t.ctx.dateProvider, BOOT_NODE_UDP_PORT + nodeIndex + 1, t.bootstrapNodeEnr!, nodeIndex, diff --git a/yarn-project/end-to-end/src/e2e_snapshot_sync.test.ts b/yarn-project/end-to-end/src/e2e_snapshot_sync.test.ts index 4a92e5f9f338..cc77061328af 100644 --- a/yarn-project/end-to-end/src/e2e_snapshot_sync.test.ts +++ b/yarn-project/end-to-end/src/e2e_snapshot_sync.test.ts @@ -95,7 +95,7 @@ describe('e2e_snapshot_sync', () => { it('creates a snapshot', async () => { log.warn(`Creating snapshot`); - await context.aztecNodeAdmin!.startSnapshotUpload(snapshotLocation); + await context.aztecNodeAdmin.startSnapshotUpload(snapshotLocation); await retryUntil(() => readdir(snapshotDir).then(files => files.length > 0), 'snapshot-created', 90, 1); log.warn(`Snapshot created`); }); diff --git a/yarn-project/end-to-end/src/e2e_synching.test.ts b/yarn-project/end-to-end/src/e2e_synching.test.ts index b0f763a1bcfb..d81037b0a638 100644 --- a/yarn-project/end-to-end/src/e2e_synching.test.ts +++ b/yarn-project/end-to-end/src/e2e_synching.test.ts @@ -403,7 +403,7 @@ describe('e2e_synching', () => { await (aztecNode as any).stop(); await (sequencer as any).stop(); - await watcher?.stop(); + await watcher.stop(); const blobClient = await createBlobClientWithFileStores(config, createLogger('test:blob-client:client')); @@ -411,7 +411,7 @@ describe('e2e_synching', () => { const l1TxUtils = createL1TxUtilsWithBlobsFromViemWallet( deployL1ContractsValues.l1Client, - { logger, dateProvider: dateProvider! }, + { logger, dateProvider }, config, ); const rollupAddress = deployL1ContractsValues.l1ContractAddresses.rollupAddress.toString(); @@ -450,7 +450,7 @@ describe('e2e_synching', () => { slashingProposerContract, slashFactoryContract, epochCache, - dateProvider: dateProvider!, + dateProvider, metrics: sequencerPublisherMetrics, lastActions: {}, }, diff --git a/yarn-project/end-to-end/src/e2e_token_contract/token_contract_test.ts b/yarn-project/end-to-end/src/e2e_token_contract/token_contract_test.ts index 3ab11d635292..ddc2ffd4ce78 100644 --- a/yarn-project/end-to-end/src/e2e_token_contract/token_contract_test.ts +++ b/yarn-project/end-to-end/src/e2e_token_contract/token_contract_test.ts @@ -70,7 +70,7 @@ export class TokenContractTest { initialFundedAccounts: this.context.initialFundedAccounts, }); - this.node = this.context.aztecNodeService!; + this.node = this.context.aztecNodeService; this.wallet = this.context.wallet; [this.adminAddress, this.account1Address, this.account2Address] = deployedAccounts.map(acc => acc.address); diff --git a/yarn-project/end-to-end/src/fixtures/e2e_prover_test.ts b/yarn-project/end-to-end/src/fixtures/e2e_prover_test.ts index 2cd582cf2e4b..004c672d2afb 100644 --- a/yarn-project/end-to-end/src/fixtures/e2e_prover_test.ts +++ b/yarn-project/end-to-end/src/fixtures/e2e_prover_test.ts @@ -144,7 +144,7 @@ export class FullProverTest { this.logger.info(`Enabling proving`, { realProofs: this.realProofs }); // We don't wish to mark as proven automatically, so we set the flag to false - this.context.watcher!.setIsMarkingAsProven(false); + this.context.watcher.setIsMarkingAsProven(false); this.simulatedProverNode = this.context.proverNode!; ({ @@ -152,7 +152,7 @@ export class FullProverTest { deployL1ContractsValues: this.l1Contracts, cheatCodes: this.cheatCodes, } = this.context); - this.aztecNodeAdmin = this.context.aztecNodeService!; + this.aztecNodeAdmin = this.context.aztecNodeService; const config = this.context.aztecNodeConfig; const blobClient = await createBlobClientWithFileStores(config, this.logger); @@ -225,7 +225,7 @@ export class FullProverTest { this.logger.verbose('Starting archiver for new prover node'); const archiver = await createArchiver( { ...this.context.aztecNodeConfig, dataDirectory: undefined }, - { blobClient, dateProvider: this.context.dateProvider! }, + { blobClient, dateProvider: this.context.dateProvider }, { blockUntilSync: true }, ); diff --git a/yarn-project/end-to-end/src/fixtures/setup.ts b/yarn-project/end-to-end/src/fixtures/setup.ts index 41cdb60c49b0..32d4255595b2 100644 --- a/yarn-project/end-to-end/src/fixtures/setup.ts +++ b/yarn-project/end-to-end/src/fixtures/setup.ts @@ -1,5 +1,5 @@ import { SchnorrAccountContractArtifact } from '@aztec/accounts/schnorr'; -import { type InitialAccountData, generateSchnorrAccounts, getInitialTestAccountsData } from '@aztec/accounts/testing'; +import { type InitialAccountData, generateSchnorrAccounts } from '@aztec/accounts/testing'; import { type Archiver, createArchiver } from '@aztec/archiver'; import { type AztecNodeConfig, AztecNodeService, getConfigEnvVars } from '@aztec/aztec-node'; import { AztecAddress, EthAddress } from '@aztec/aztec.js/addresses'; @@ -13,7 +13,7 @@ import { import { publishContractClass, publishInstance } from '@aztec/aztec.js/deployment'; import { Fr } from '@aztec/aztec.js/fields'; import { type Logger, createLogger } from '@aztec/aztec.js/log'; -import { type AztecNode, createAztecNodeClient, waitForNode } from '@aztec/aztec.js/node'; +import type { AztecNode } from '@aztec/aztec.js/node'; import type { Wallet } from '@aztec/aztec.js/wallet'; import { AnvilTestWatcher, CheatCodes } from '@aztec/aztec/testing'; import { createBlobClientWithFileStores } from '@aztec/blob-client/client'; @@ -92,9 +92,6 @@ import { getEndToEndTestTelemetryClient } from './with_telemetry_utils.js'; export { startAnvil }; -const { AZTEC_NODE_URL = '' } = process.env; -const getAztecUrl = () => AZTEC_NODE_URL; - let telemetry: TelemetryClient | undefined = undefined; async function getTelemetryClient(partialConfig: Partial & { benchmark?: boolean } = {}) { if (!telemetry) { @@ -217,13 +214,13 @@ export type EndToEndContext = { anvil: Anvil | undefined; /** The Aztec Node service or client a connected to it. */ aztecNode: AztecNode; - /** The Aztec Node as a service (only set if running locally). */ - aztecNodeService: AztecNodeService | undefined; - /** Client to the Aztec Node admin interface (undefined if connected to remote environment) */ - aztecNodeAdmin: AztecNodeAdmin | undefined; + /** The Aztec Node as a service. */ + aztecNodeService: AztecNodeService; + /** Client to the Aztec Node admin interface. */ + aztecNodeAdmin: AztecNodeAdmin; /** The prover node service (only set if startProverNode is true) */ proverNode: ProverNode | undefined; - /** A client to the sequencer service (undefined if connected to remote environment) */ + /** A client to the sequencer service. */ sequencer: SequencerClient | undefined; /** Return values from deployAztecL1Contracts function. */ deployL1ContractsValues: DeployAztecL1ContractsReturnType; @@ -243,12 +240,12 @@ export type EndToEndContext = { cheatCodes: CheatCodes; /** The cheat codes for L1 */ ethCheatCodes: EthCheatCodes; - /** The anvil test watcher (undefined if connected to remote environment) */ - watcher: AnvilTestWatcher | undefined; - /** Allows tweaking current system time, used by the epoch cache only (undefined if connected to remote environment) */ - dateProvider: TestDateProvider | undefined; + /** The anvil test watcher. */ + watcher: AnvilTestWatcher; + /** Allows tweaking current system time, used by the epoch cache only. */ + dateProvider: TestDateProvider; /** Telemetry client */ - telemetryClient: TelemetryClient | undefined; + telemetryClient: TelemetryClient; /** Mock gossip sub network used for gossipping messages (only if mockGossipSubNetwork was set to true in opts) */ mockGossipSubNetwork: MockGossipSubNetwork | undefined; /** Prefilled public data used for setting up nodes. */ @@ -258,87 +255,11 @@ export type EndToEndContext = { /** BB config (only set if running locally). */ bbConfig: Awaited>; /** Directory to cleanup on teardown. */ - directoryToCleanup: string | undefined; + directoryToCleanup: string; /** Function to stop the started services. */ teardown: () => Promise; }; -/** - * Function to setup the test against a remote deployment. It is assumed that L1 contract are already deployed - */ -async function setupWithRemoteEnvironment( - account: HDAccount | PrivateKeyAccount, - config: AztecNodeConfig & SetupOptions, - logger: Logger, - numberOfAccounts: number, -): Promise { - const aztecNodeUrl = getAztecUrl(); - logger.verbose(`Creating Aztec Node client to remote host ${aztecNodeUrl}`); - const aztecNode = createAztecNodeClient(aztecNodeUrl); - await waitForNode(aztecNode, logger); - logger.verbose('JSON RPC client connected to Aztec Node'); - logger.verbose(`Retrieving contract addresses from ${aztecNodeUrl}`); - const { l1ContractAddresses, rollupVersion } = await aztecNode.getNodeInfo(); - - const l1Client = createExtendedL1Client(config.l1RpcUrls, account, foundry); - - const deployL1ContractsValues: DeployAztecL1ContractsReturnType = { - l1ContractAddresses, - l1Client, - rollupVersion, - }; - const ethCheatCodes = new EthCheatCodes(config.l1RpcUrls, new DateProvider()); - const wallet = await TestWallet.create(aztecNode); - - if (config.walletMinFeePadding !== undefined) { - wallet.setMinFeePadding(config.walletMinFeePadding); - } - - const cheatCodes = await CheatCodes.create(config.l1RpcUrls, aztecNode, new DateProvider()); - const teardown = () => Promise.resolve(); - - logger.verbose('Populating wallet from already registered accounts...'); - const initialFundedAccounts = await getInitialTestAccountsData(); - - if (initialFundedAccounts.length < numberOfAccounts) { - throw new Error(`Required ${numberOfAccounts} accounts. Found ${initialFundedAccounts.length}.`); - } - - const testAccounts = await Promise.all( - initialFundedAccounts.slice(0, numberOfAccounts).map(async account => { - const accountManager = await wallet.createSchnorrAccount(account.secret, account.salt, account.signingKey); - return accountManager.address; - }), - ); - - return { - anvil: undefined, - aztecNode, - aztecNodeService: undefined, - aztecNodeAdmin: undefined, - sequencer: undefined, - proverNode: undefined, - deployL1ContractsValues, - config, - aztecNodeConfig: config, - initialFundedAccounts, - wallet, - accounts: testAccounts, - logger, - cheatCodes, - ethCheatCodes, - prefilledPublicData: undefined, - mockGossipSubNetwork: undefined, - watcher: undefined, - dateProvider: undefined, - telemetryClient: undefined, - acvmConfig: undefined, - bbConfig: undefined, - directoryToCleanup: undefined, - teardown, - }; -} - /** * Sets up the environment for the end-to-end tests. * @param numberOfAccounts - The number of new accounts to be created once the PXE is initiated. @@ -381,12 +302,6 @@ export async function setup( if (!isAnvilTestChain(chain.id)) { throw new Error(`No ETHEREUM_HOSTS set but non anvil chain requested`); } - if (AZTEC_NODE_URL) { - throw new Error( - `AZTEC_NODE_URL provided but no ETHEREUM_HOSTS set. Refusing to run, please set both variables so tests can deploy L1 contracts to the same Anvil instance`, - ); - } - const res = await startAnvil({ l1BlockTime: opts.ethereumSlotDuration, accounts: opts.anvilAccounts, @@ -441,11 +356,6 @@ export async function setup( config.coinbase = EthAddress.fromString(publisherHdAccount.address); } - if (AZTEC_NODE_URL) { - // we are setting up against a remote environment, l1 contracts are assumed to already be deployed - return await setupWithRemoteEnvironment(publisherHdAccount!, config, logger, numberOfAccounts); - } - // Determine which addresses to fund in genesis const initialFundedAccounts = opts.initialFundedAccounts ?? @@ -683,7 +593,7 @@ export async function setup( logger.error(`Error during e2e test teardown`, err); } finally { try { - await telemetryClient?.stop(); + await telemetryClient.stop(); } catch (err) { logger.error(`Error during telemetry client stop`, err); } diff --git a/yarn-project/end-to-end/src/shared/uniswap_l1_l2.ts b/yarn-project/end-to-end/src/shared/uniswap_l1_l2.ts index b52c71f5bd30..e2802f3b7afe 100644 --- a/yarn-project/end-to-end/src/shared/uniswap_l1_l2.ts +++ b/yarn-project/end-to-end/src/shared/uniswap_l1_l2.ts @@ -84,7 +84,7 @@ export const uniswapL1L2TestSuite = ( l1Client = deployL1ContractsValues.l1Client; - t.watcher?.setIsMarkingAsProven(false); + t.watcher.setIsMarkingAsProven(false); if (Number(await l1Client.getBlockNumber()) < expectedForkBlockNumber) { throw new Error('This test must be run on a fork of mainnet with the expected fork block'); diff --git a/yarn-project/end-to-end/src/spartan/upgrade_rollup_version.test.ts b/yarn-project/end-to-end/src/spartan/upgrade_rollup_version.test.ts index 117842042bc7..bec7c674e878 100644 --- a/yarn-project/end-to-end/src/spartan/upgrade_rollup_version.test.ts +++ b/yarn-project/end-to-end/src/spartan/upgrade_rollup_version.test.ts @@ -10,7 +10,6 @@ import { retryUntil } from '@aztec/foundation/retry'; import { GSEAbi, GovernanceAbi, TestERC20Abi } from '@aztec/l1-artifacts'; import { jest } from '@jest/globals'; -import type { ChildProcess } from 'child_process'; import fs from 'fs'; import omit from 'lodash.omit'; import path from 'path'; @@ -19,12 +18,13 @@ import { mnemonicToAccount } from 'viem/accounts'; import { MNEMONIC } from '../fixtures/fixtures.js'; import { + type ServiceEndpoint, + getEthereumEndpoint, getGitProjectRoot, + getRPCEndpoint, rollAztecPods, runProjectScript, setupEnvironment, - startPortForwardForEthereum, - startPortForwardForRPC, updateSequencersConfig, waitForResourceByLabel, } from './utils.js'; @@ -109,25 +109,21 @@ describe('spartan_upgrade_rollup_version', () => { let nodeInfo: NodeInfo; let ETHEREUM_HOSTS: string[]; let originalL1ContractAddresses: L1ContractAddresses; - const forwardProcesses: ChildProcess[] = []; + const endpoints: ServiceEndpoint[] = []; jest.setTimeout(3 * 60 * 60 * 1000); // Governance flow can take a while afterAll(() => { - forwardProcesses.forEach(p => p.kill()); + endpoints.forEach(e => e.process?.kill()); }); beforeAll(async () => { - const { process: aztecRpcProcess, port: aztecRpcPort } = await startPortForwardForRPC(config.NAMESPACE); - const { process: ethereumProcess, port: ethereumPort } = await startPortForwardForEthereum(config.NAMESPACE); - forwardProcesses.push(aztecRpcProcess); - forwardProcesses.push(ethereumProcess); + const rpcEndpoint = await getRPCEndpoint(config.NAMESPACE); + const ethEndpoint = await getEthereumEndpoint(config.NAMESPACE); + endpoints.push(rpcEndpoint, ethEndpoint); - const nodeUrl = `http://127.0.0.1:${aztecRpcPort}`; - const ethereumUrl = `http://127.0.0.1:${ethereumPort}`; - - aztecNode = createAztecNodeClient(nodeUrl); + aztecNode = createAztecNodeClient(rpcEndpoint.url); nodeInfo = await aztecNode.getNodeInfo(); - ETHEREUM_HOSTS = [ethereumUrl]; + ETHEREUM_HOSTS = [ethEndpoint.url]; originalL1ContractAddresses = omit(nodeInfo.l1ContractAddresses, [ 'slashFactoryAddress', @@ -601,10 +597,9 @@ describe('spartan_upgrade_rollup_version', () => { } // Reconnect to the node via RPC after pods restart - const { process: aztecRpcProcess2, port: aztecRpcPort2 } = await startPortForwardForRPC(config.NAMESPACE); - forwardProcesses.push(aztecRpcProcess2); - const nodeUrl2 = `http://127.0.0.1:${aztecRpcPort2}`; - aztecNode = createAztecNodeClient(nodeUrl2); + const rpcEndpoint2 = await getRPCEndpoint(config.NAMESPACE); + endpoints.push(rpcEndpoint2); + aztecNode = createAztecNodeClient(rpcEndpoint2.url); const newNodeInfo = await aztecNode.getNodeInfo(); diff --git a/yarn-project/foundation/src/array/sorted_array.test.ts b/yarn-project/foundation/src/array/sorted_array.test.ts index 0e2417458e5b..0e6b0ad378a4 100644 --- a/yarn-project/foundation/src/array/sorted_array.test.ts +++ b/yarn-project/foundation/src/array/sorted_array.test.ts @@ -2,6 +2,7 @@ import { dedupeSortedArray, findInSortedArray, findIndexInSortedArray, + findInsertionIndexInSortedArray, insertIntoSortedArray, merge, removeAnyOf, @@ -125,6 +126,55 @@ describe('sorted_array', () => { } }); + describe('findInsertionIndexInSortedArray', () => { + it('returns 0 for empty array', () => { + expect(findInsertionIndexInSortedArray([], 1, cmp)).toBe(0); + }); + + it('returns count of elements <= needle', () => { + const tests: [number[], number, number][] = [ + [[5], 3, 0], + [[5], 5, 1], + [[5], 7, 1], + + [[1, 3, 5, 7], 0, 0], + [[1, 3, 5, 7], 1, 1], + [[1, 3, 5, 7], 2, 1], + [[1, 3, 5, 7], 3, 2], + [[1, 3, 5, 7], 4, 2], + [[1, 3, 5, 7], 5, 3], + [[1, 3, 5, 7], 6, 3], + [[1, 3, 5, 7], 7, 4], + [[1, 3, 5, 7], 8, 4], + ]; + for (const [arr, needle, expected] of tests) { + expect(findInsertionIndexInSortedArray(arr, needle, cmp)).toBe(expected); + } + }); + + it('handles duplicates by returning index after all equal elements', () => { + expect(findInsertionIndexInSortedArray([1, 2, 2, 2, 3], 2, cmp)).toBe(4); + expect(findInsertionIndexInSortedArray([2, 2, 2], 2, cmp)).toBe(3); + expect(findInsertionIndexInSortedArray([1, 1, 1, 2], 1, cmp)).toBe(3); + }); + + it('works with heterogeneous types', () => { + type Timer = { deadline: number; callback: () => void }; + const arr: Timer[] = [ + { deadline: 100, callback: () => {} }, + { deadline: 300, callback: () => {} }, + { deadline: 500, callback: () => {} }, + ]; + const cmpByDeadline = (timer: Timer, needle: { deadline: number }) => cmp(timer.deadline, needle.deadline); + + expect(findInsertionIndexInSortedArray(arr, { deadline: 0 }, cmpByDeadline)).toBe(0); + expect(findInsertionIndexInSortedArray(arr, { deadline: 100 }, cmpByDeadline)).toBe(1); + expect(findInsertionIndexInSortedArray(arr, { deadline: 200 }, cmpByDeadline)).toBe(1); + expect(findInsertionIndexInSortedArray(arr, { deadline: 300 }, cmpByDeadline)).toBe(2); + expect(findInsertionIndexInSortedArray(arr, { deadline: 600 }, cmpByDeadline)).toBe(3); + }); + }); + it('findIndexInSortedArray with duplicates returns any valid occurrence', () => { // Binary search doesn't guarantee first occurrence, just any valid occurrence const arr = [1, 2, 2, 2, 3]; diff --git a/yarn-project/foundation/src/array/sorted_array.ts b/yarn-project/foundation/src/array/sorted_array.ts index a622661024ae..d3ba1e678ce2 100644 --- a/yarn-project/foundation/src/array/sorted_array.ts +++ b/yarn-project/foundation/src/array/sorted_array.ts @@ -21,34 +21,39 @@ export function dedupeSortedArray(arr: T[], cmp: Cmp): void { } export function insertIntoSortedArray(arr: T[], item: T, cmp: Cmp, allowDuplicates = true): boolean { + const index = findInsertionIndexInSortedArray(arr, item, cmp); + + if (!allowDuplicates) { + // Check element before insertion point (upper bound returns index after equal elements) + if (index > 0 && cmp(arr[index - 1], item) === 0) { + return false; + } + } + + arr.splice(index, 0, item); + return true; +} + +/** + * Finds the index where needle would be inserted to maintain sorted order. + * Returns the count of elements less than or equal to needle. + */ +export function findInsertionIndexInSortedArray(values: T[], needle: N, cmp: (a: T, b: N) => number): number { let start = 0; - let end = arr.length; + let end = values.length; while (start < end) { const mid = start + (((end - start) / 2) | 0); - const comparison = cmp(arr[mid], item); + const comparison = cmp(values[mid], needle); - if (comparison < 0) { + if (comparison <= 0) { start = mid + 1; } else { end = mid; } } - if (!allowDuplicates) { - // Check element at insertion point - if (start < arr.length && cmp(arr[start], item) === 0) { - return false; - } - - // Check element before insertion point (in case we landed after duplicates) - if (start > 0 && cmp(arr[start - 1], item) === 0) { - return false; - } - } - - arr.splice(start, 0, item); - return true; + return start; } export function findIndexInSortedArray(values: T[], needle: N, cmp: (a: T, b: N) => number): number { diff --git a/yarn-project/foundation/src/config/env_var.ts b/yarn-project/foundation/src/config/env_var.ts index fbc42a161bdb..7d3d4bcf32d1 100644 --- a/yarn-project/foundation/src/config/env_var.ts +++ b/yarn-project/foundation/src/config/env_var.ts @@ -221,6 +221,7 @@ export type EnvVar = | 'SLASH_INACTIVITY_CONSECUTIVE_EPOCH_THRESHOLD' | 'SLASH_INVALID_BLOCK_PENALTY' | 'SLASH_DUPLICATE_PROPOSAL_PENALTY' + | 'SLASH_DUPLICATE_ATTESTATION_PENALTY' | 'SLASH_OVERRIDE_PAYLOAD' | 'SLASH_PROPOSE_INVALID_ATTESTATIONS_PENALTY' | 'SLASH_ATTEST_DESCENDANT_OF_INVALID_PENALTY' @@ -246,8 +247,10 @@ export type EnvVar = | 'TX_COLLECTION_NODE_RPC_MAX_BATCH_SIZE' | 'TX_COLLECTION_NODE_RPC_URLS' | 'TX_COLLECTION_MISSING_TXS_COLLECTOR_TYPE' + | 'TX_COLLECTION_FILE_STORE_URLS' + | 'TX_COLLECTION_FILE_STORE_SLOW_DELAY_MS' + | 'TX_COLLECTION_FILE_STORE_FAST_DELAY_MS' | 'TX_FILE_STORE_URL' - | 'TX_FILE_STORE_DOWNLOAD_URL' | 'TX_FILE_STORE_UPLOAD_CONCURRENCY' | 'TX_FILE_STORE_MAX_QUEUE_SIZE' | 'TX_FILE_STORE_ENABLED' diff --git a/yarn-project/foundation/src/queue/base_memory_queue.ts b/yarn-project/foundation/src/queue/base_memory_queue.ts index 8446e9adf04a..6c99f7619abb 100644 --- a/yarn-project/foundation/src/queue/base_memory_queue.ts +++ b/yarn-project/foundation/src/queue/base_memory_queue.ts @@ -122,7 +122,7 @@ export abstract class BaseMemoryQueue { * @param handler - A function that takes an item of type T and returns a Promise after processing the item. * @returns A Promise that resolves when the queue is finished processing. */ - public async process(handler: (item: T) => Promise) { + public async process(handler: (item: T) => Promise | void) { try { while (true) { const item = await this.get(); diff --git a/yarn-project/foundation/src/timer/date.test.ts b/yarn-project/foundation/src/timer/date.test.ts index 55aeac96d550..b3746dc21ebc 100644 --- a/yarn-project/foundation/src/timer/date.test.ts +++ b/yarn-project/foundation/src/timer/date.test.ts @@ -3,31 +3,34 @@ import { TestDateProvider } from './date.js'; describe('TestDateProvider', () => { let dateProvider: TestDateProvider; + beforeEach(() => { dateProvider = new TestDateProvider(); }); - it('should return the current datetime', () => { - const currentTime = Date.now(); - const result = dateProvider.now(); - expect(result).toBeGreaterThanOrEqual(currentTime); - expect(result).toBeLessThan(currentTime + 100); - }); + describe('now', () => { + it('should return the current datetime', () => { + const currentTime = Date.now(); + const result = dateProvider.now(); + expect(result).toBeGreaterThanOrEqual(currentTime); + expect(result).toBeLessThan(currentTime + 100); + }); - it('should return the overridden datetime', () => { - const overriddenTime = Date.now() + 1000; - dateProvider.setTime(overriddenTime); - const result = dateProvider.now(); - expect(result).toBeGreaterThanOrEqual(overriddenTime); - expect(result).toBeLessThan(overriddenTime + 100); - }); + it('should return the overridden datetime', () => { + const overriddenTime = Date.now() + 1000; + dateProvider.setTime(overriddenTime); + const result = dateProvider.now(); + expect(result).toBeGreaterThanOrEqual(overriddenTime); + expect(result).toBeLessThan(overriddenTime + 100); + }); - it('should keep ticking after overriding', async () => { - const overriddenTime = Date.now() + 1000; - dateProvider.setTime(overriddenTime); - await sleep(510); - const result = dateProvider.now(); - expect(result).toBeGreaterThanOrEqual(overriddenTime + 500); - expect(result).toBeLessThan(overriddenTime + 600); + it('should keep ticking after overriding', async () => { + const overriddenTime = Date.now() + 1000; + dateProvider.setTime(overriddenTime); + await sleep(510); + const result = dateProvider.now(); + expect(result).toBeGreaterThanOrEqual(overriddenTime + 500); + expect(result).toBeLessThan(overriddenTime + 600); + }); }); }); diff --git a/yarn-project/p2p/src/client/factory.ts b/yarn-project/p2p/src/client/factory.ts index 14c10d1ee77e..a1646d91549f 100644 --- a/yarn-project/p2p/src/client/factory.ts +++ b/yarn-project/p2p/src/client/factory.ts @@ -18,6 +18,7 @@ import type { MemPools } from '../mem_pools/interface.js'; import { AztecKVTxPool, type TxPool } from '../mem_pools/tx_pool/index.js'; import { DummyP2PService } from '../services/dummy_service.js'; import { LibP2PService } from '../services/index.js'; +import { createFileStoreTxSources } from '../services/tx_collection/file_store_tx_source.js'; import { TxCollection } from '../services/tx_collection/tx_collection.js'; import { type TxSource, createNodeRpcTxSources } from '../services/tx_collection/tx_source.js'; import { TxFileStore } from '../services/tx_file_store/tx_file_store.js'; @@ -105,12 +106,23 @@ export async function createP2PClient( }); } + const fileStoreSources = await createFileStoreTxSources( + config.txCollectionFileStoreUrls, + logger.createChild('file-store-tx-source'), + ); + if (fileStoreSources.length > 0) { + logger.info(`Using ${fileStoreSources.length} file store sources for tx collection.`, { + stores: fileStoreSources.map(s => s.getInfo()), + }); + } + const txCollection = new TxCollection( p2pService.getBatchTxRequesterService(), nodeSources, l1Constants, mempools.txPool, config, + fileStoreSources, dateProvider, telemetry, logger.createChild('tx-collection'), diff --git a/yarn-project/p2p/src/client/interface.ts b/yarn-project/p2p/src/client/interface.ts index c0638acb650e..6489c62773f5 100644 --- a/yarn-project/p2p/src/client/interface.ts +++ b/yarn-project/p2p/src/client/interface.ts @@ -14,6 +14,7 @@ import type { ReqRespSubProtocolValidators, } from '../services/reqresp/interface.js'; import type { + DuplicateAttestationInfo, DuplicateProposalInfo, P2PBlockReceivedCallback, P2PCheckpointReceivedCallback, @@ -90,6 +91,15 @@ export type P2P = P2PApiFull & */ registerDuplicateProposalCallback(callback: (info: DuplicateProposalInfo) => void): void; + /** + * Registers a callback invoked when a duplicate attestation is detected (equivocation). + * A validator signing attestations for different proposals at the same slot. + * The callback is triggered on the first duplicate (when count goes from 1 to 2). + * + * @param callback - Function called with info about the duplicate attestation + */ + registerDuplicateAttestationCallback(callback: (info: DuplicateAttestationInfo) => void): void; + /** * Request a list of transactions from another peer by their tx hashes. * @param txHashes - Hashes of the txs to query. diff --git a/yarn-project/p2p/src/client/p2p_client.ts b/yarn-project/p2p/src/client/p2p_client.ts index 0552af7ab4cd..3afbcf88dfe4 100644 --- a/yarn-project/p2p/src/client/p2p_client.ts +++ b/yarn-project/p2p/src/client/p2p_client.ts @@ -40,6 +40,7 @@ import { } from '../services/reqresp/interface.js'; import { chunkTxHashesRequest } from '../services/reqresp/protocols/tx.js'; import type { + DuplicateAttestationInfo, DuplicateProposalInfo, P2PBlockReceivedCallback, P2PCheckpointReceivedCallback, @@ -339,9 +340,17 @@ export class P2PClient public async broadcastProposal(proposal: BlockProposal): Promise { this.log.verbose(`Broadcasting proposal for slot ${proposal.slotNumber} to peers`); // Store our own proposal so we can respond to req/resp requests for it - const { totalForPosition } = await this.attestationPool.tryAddBlockProposal(proposal); - if (totalForPosition > 1) { - throw new Error(`Attempted to broadcast a duplicate block proposal for slot ${proposal.slotNumber}`); + const { count } = await this.attestationPool.tryAddBlockProposal(proposal); + if (count > 1) { + if (this.config.broadcastEquivocatedProposals) { + this.log.warn(`Broadcasting equivocated block proposal for slot ${proposal.slotNumber}`, { + slot: proposal.slotNumber, + archive: proposal.archive.toString(), + count, + }); + } else { + throw new Error(`Attempted to broadcast a duplicate block proposal for slot ${proposal.slotNumber}`); + } } return this.p2pService.propagate(proposal); } @@ -393,6 +402,10 @@ export class P2PClient this.p2pService.registerDuplicateProposalCallback(callback); } + public registerDuplicateAttestationCallback(callback: (info: DuplicateAttestationInfo) => void): void { + this.p2pService.registerDuplicateAttestationCallback(callback); + } + /** * Uses the batched Request Response protocol to request a set of transactions from the network. */ diff --git a/yarn-project/p2p/src/client/test/p2p_client.integration_message_propagation.test.ts b/yarn-project/p2p/src/client/test/p2p_client.integration_message_propagation.test.ts index b1c8c22a3d83..aa036728e254 100644 --- a/yarn-project/p2p/src/client/test/p2p_client.integration_message_propagation.test.ts +++ b/yarn-project/p2p/src/client/test/p2p_client.integration_message_propagation.test.ts @@ -80,6 +80,7 @@ describe('p2p client integration message propagation', () => { }); attestationPool.isEmpty.mockResolvedValue(true); + attestationPool.tryAddBlockProposal.mockResolvedValue({ added: true, alreadyExists: false, count: 1 }); worldState.status.mockResolvedValue({ state: mock(), diff --git a/yarn-project/p2p/src/config.ts b/yarn-project/p2p/src/config.ts index e9b44881d873..ad66cdb570cf 100644 --- a/yarn-project/p2p/src/config.ts +++ b/yarn-project/p2p/src/config.ts @@ -184,6 +184,9 @@ export interface P2PConfig /** Whether to run in fisherman mode: validates all proposals and attestations but does not broadcast attestations or participate in consensus */ fishermanMode: boolean; + + /** Broadcast block proposals even when a conflicting proposal for the same slot already exists in the pool (for testing purposes only). */ + broadcastEquivocatedProposals?: boolean; } export const DEFAULT_P2P_PORT = 40400; @@ -448,6 +451,11 @@ export const p2pConfigMappings: ConfigMappingsType = { 'Whether to run in fisherman mode: validates all proposals and attestations but does not broadcast attestations or participate in consensus.', ...booleanConfigHelper(false), }, + broadcastEquivocatedProposals: { + description: + 'Broadcast block proposals even when a conflicting proposal for the same slot already exists in the pool (for testing purposes only).', + ...booleanConfigHelper(false), + }, ...sharedSequencerConfigMappings, ...p2pReqRespConfigMappings, ...batchTxRequesterConfigMappings, diff --git a/yarn-project/p2p/src/mem_pools/attestation_pool/attestation_pool.test.ts b/yarn-project/p2p/src/mem_pools/attestation_pool/attestation_pool.test.ts index 066e7dd6b9ff..cc7850c1e902 100644 --- a/yarn-project/p2p/src/mem_pools/attestation_pool/attestation_pool.test.ts +++ b/yarn-project/p2p/src/mem_pools/attestation_pool/attestation_pool.test.ts @@ -5,7 +5,7 @@ import type { AztecAsyncKVStore } from '@aztec/kv-store'; import { openTmpStore } from '@aztec/kv-store/lmdb-v2'; import { makeBlockHeader, makeBlockProposal } from '@aztec/stdlib/testing'; -import { ATTESTATION_CAP_BUFFER, AttestationPool } from './attestation_pool.js'; +import { AttestationPool, MAX_ATTESTATIONS_PER_SLOT_AND_SIGNER } from './attestation_pool.js'; import { describeAttestationPool } from './attestation_pool_test_suite.js'; import { mockCheckpointAttestation } from './mocks.js'; @@ -38,38 +38,144 @@ describe('Attestation Pool', () => { }); }); - describe('Checkpoint Attestation cap exceeded', () => { - it('should cap unique checkpoint attestations per (slot, proposalId) at committeeSize + buffer', async () => { + describe('Checkpoint Attestation behavior', () => { + it('should add attestations from multiple signers for the same proposal', async () => { const slotNumber = 100; const archive = Fr.random(); - // Committee size and buffer (buffer is enforced inside the pool; here we pass only committeeSize) - const committeeSize = 5; - const buffer = ATTESTATION_CAP_BUFFER; - const limit = committeeSize + buffer; - - // Create 'limit' distinct checkpoint attestations for the same (slot, proposalId) - const signers = Array.from({ length: limit }, () => Secp256k1Signer.random()); + // Create distinct checkpoint attestations for the same (slot, proposalId) from different signers + const numSigners = 10; + const signers = Array.from({ length: numSigners }, () => Secp256k1Signer.random()); const attestations = signers.map(s => mockCheckpointAttestation(s, slotNumber, archive)); // Add each attestation using tryAddCheckpointAttestation + // count is the number of attestations by this signer for this slot for (let i = 0; i < attestations.length; i++) { - const result = await attestationPool.tryAddCheckpointAttestation(attestations[i], committeeSize); + const result = await attestationPool.tryAddCheckpointAttestation(attestations[i]); expect(result.added).toBe(true); - expect(result.totalForPosition).toBe(i + 1); + expect(result.count).toBe(1); // First attestation from this signer for this slot } - // A new attestation from a new signer should not be added (cap reached) - const extra = mockCheckpointAttestation(Secp256k1Signer.random(), slotNumber, archive); - const extraResult = await attestationPool.tryAddCheckpointAttestation(extra, committeeSize); - expect(extraResult.added).toBe(false); - expect(extraResult.alreadyExists).toBe(false); - expect(extraResult.totalForPosition).toBe(limit); - // Re-adding an existing attestation should return alreadyExists - const existingResult = await attestationPool.tryAddCheckpointAttestation(attestations[0], committeeSize); + const existingResult = await attestationPool.tryAddCheckpointAttestation(attestations[0]); expect(existingResult.added).toBe(false); expect(existingResult.alreadyExists).toBe(true); + expect(existingResult.count).toBe(1); // This signer has 1 attestation for this slot + }); + }); + + describe('Duplicate attestation detection (equivocation)', () => { + it('should detect duplicate attestations from same signer for same slot but different proposals', async () => { + const slotNumber = 100; + const signer = Secp256k1Signer.random(); + + // First attestation - should succeed with count=1 + const archive1 = Fr.random(); + const attestation1 = mockCheckpointAttestation(signer, slotNumber, archive1); + const result1 = await attestationPool.tryAddCheckpointAttestation(attestation1); + expect(result1.added).toBe(true); + expect(result1.count).toBe(1); // Attestations from this signer + + // Second attestation from same signer for same slot but different proposal (equivocation!) + const archive2 = Fr.random(); + const attestation2 = mockCheckpointAttestation(signer, slotNumber, archive2); + const result2 = await attestationPool.tryAddCheckpointAttestation(attestation2); + expect(result2.added).toBe(true); + expect(result2.count).toBe(2); // This is the first duplicate - triggers slashing + + // Third attestation from same signer (if we want to track more) + const archive3 = Fr.random(); + const attestation3 = mockCheckpointAttestation(signer, slotNumber, archive3); + const result3 = await attestationPool.tryAddCheckpointAttestation(attestation3); + expect(result3.added).toBe(true); + expect(result3.count).toBe(3); // Attestations from this signer + }); + + it('should reject attestations when signer exceeds per-slot cap', async () => { + const slotNumber = 100; + const signer = Secp256k1Signer.random(); + + // Add attestations up to the per-signer-per-slot cap + for (let i = 0; i < MAX_ATTESTATIONS_PER_SLOT_AND_SIGNER; i++) { + const archive = Fr.random(); + const attestation = mockCheckpointAttestation(signer, slotNumber, archive); + const result = await attestationPool.tryAddCheckpointAttestation(attestation); + expect(result.added).toBe(true); + expect(result.count).toBe(i + 1); // Attestations from this signer + } + + // One more attestation from the same signer should be rejected + const extraArchive = Fr.random(); + const extraAttestation = mockCheckpointAttestation(signer, slotNumber, extraArchive); + const extraResult = await attestationPool.tryAddCheckpointAttestation(extraAttestation); + expect(extraResult.added).toBe(false); + expect(extraResult.alreadyExists).toBe(false); + expect(extraResult.count).toBe(MAX_ATTESTATIONS_PER_SLOT_AND_SIGNER); // Attestations from this signer + }); + + it('should not detect duplicates for attestations from different signers', async () => { + const slotNumber = 100; + const archive = Fr.random(); + + // First signer + const signer1 = Secp256k1Signer.random(); + const attestation1 = mockCheckpointAttestation(signer1, slotNumber, archive); + const result1 = await attestationPool.tryAddCheckpointAttestation(attestation1); + expect(result1.added).toBe(true); + expect(result1.count).toBe(1); // Attestations from this signer + + // Second signer for same slot and proposal - not a duplicate, just another attestation + const signer2 = Secp256k1Signer.random(); + const attestation2 = mockCheckpointAttestation(signer2, slotNumber, archive); + const result2 = await attestationPool.tryAddCheckpointAttestation(attestation2); + expect(result2.added).toBe(true); + expect(result2.count).toBe(1); // Different signer, so count is 1 + }); + + it('should not detect duplicates for attestations from same signer but different slots', async () => { + const signer = Secp256k1Signer.random(); + const archive = Fr.random(); + + // Attestation for slot 100 + const attestation1 = mockCheckpointAttestation(signer, 100, archive); + const result1 = await attestationPool.tryAddCheckpointAttestation(attestation1); + expect(result1.added).toBe(true); + expect(result1.count).toBe(1); // Attestations from this signer for slot 100 + + // Attestation for slot 101 - different slot, not a duplicate + const attestation2 = mockCheckpointAttestation(signer, 101, archive); + const result2 = await attestationPool.tryAddCheckpointAttestation(attestation2); + expect(result2.added).toBe(true); + expect(result2.count).toBe(1); // Different slot, so count is 1 + }); + + it('should clean up per-slot-signer index when deleting old data', async () => { + const signer = Secp256k1Signer.random(); + + // Add attestations for slot 100 (to be deleted) + const attestation1 = mockCheckpointAttestation(signer, 100, Fr.random()); + await attestationPool.tryAddCheckpointAttestation(attestation1); + const attestation2 = mockCheckpointAttestation(signer, 100, Fr.random()); + await attestationPool.tryAddCheckpointAttestation(attestation2); + + // Add attestation for slot 200 (to be kept) + const attestation3 = mockCheckpointAttestation(signer, 200, Fr.random()); + await attestationPool.tryAddCheckpointAttestation(attestation3); + + // Delete data older than slot 150 + await attestationPool.deleteOlderThan(SlotNumber(150)); + + // Now adding attestations for slot 100 should start fresh + const newAttestation = mockCheckpointAttestation(signer, 100, Fr.random()); + const result = await attestationPool.tryAddCheckpointAttestation(newAttestation); + expect(result.added).toBe(true); + expect(result.count).toBe(1); // Attestations from this signer for this slot (index was cleaned up) + + // Slot 200 should still have 1 attestation from this signer + const slotNumber200Attestation = mockCheckpointAttestation(signer, 200, Fr.random()); + const result200 = await attestationPool.tryAddCheckpointAttestation(slotNumber200Attestation); + expect(result200.added).toBe(true); + expect(result200.count).toBe(2); // Original + new from same signer }); }); }); diff --git a/yarn-project/p2p/src/mem_pools/attestation_pool/attestation_pool.ts b/yarn-project/p2p/src/mem_pools/attestation_pool/attestation_pool.ts index f1389e5c1be3..7f4626a035c7 100644 --- a/yarn-project/p2p/src/mem_pools/attestation_pool/attestation_pool.ts +++ b/yarn-project/p2p/src/mem_pools/attestation_pool/attestation_pool.ts @@ -19,13 +19,17 @@ export type TryAddResult = { added: boolean; /** Whether the exact item already existed */ alreadyExists: boolean; - /** Total items for this position - used for duplicate detection */ - totalForPosition: number; + /** Count of items for the position. Meaning varies by method: + * - tryAddBlockProposal: proposals at (slot, indexWithinCheckpoint) + * - tryAddCheckpointProposal: proposals at slot + * - tryAddCheckpointAttestation: attestations by this signer for this slot */ + count: number; }; -export const MAX_PROPOSALS_PER_SLOT = 5; -export const MAX_PROPOSALS_PER_POSITION = 3; -export const ATTESTATION_CAP_BUFFER = 10; +export const MAX_CHECKPOINT_PROPOSALS_PER_SLOT = 5; +export const MAX_BLOCK_PROPOSALS_PER_POSITION = 3; +/** Maximum attestations a single signer can make per slot before being rejected. */ +export const MAX_ATTESTATIONS_PER_SLOT_AND_SIGNER = 3; /** Public API interface for attestation pools. Used for typing mocks and test implementations. */ export type AttestationPoolApi = Pick< @@ -69,6 +73,10 @@ export class AttestationPool { // Key: (slot << 10) | indexWithinCheckpoint, Value: archive string private blockProposalsForSlotAndIndex: AztecAsyncMultiMap; + // Checkpoint attestations indexed by (slot, signer) for tracking attestations per (slot, signer) for duplicate detection + // Key: `${Fr(slot).toString()}-${signerAddress}` string (padded for lexicographic ordering), Value: `proposalId` strings + private checkpointAttestationsPerSlotAndSigner: AztecAsyncMultiMap; + constructor( private store: AztecAsyncKVStore, telemetry: TelemetryClient = getTelemetryClient(), @@ -80,6 +88,7 @@ export class AttestationPool { // Initialize checkpoint attestations storage this.checkpointAttestations = store.openMap('checkpoint_attestations'); + this.checkpointAttestationsPerSlotAndSigner = store.openMultiMap('checkpoint_attestations_per_slot_and_signer'); // Initialize checkpoint proposal storage this.checkpointProposals = store.openMap('checkpoint_proposals'); @@ -133,6 +142,12 @@ export class AttestationPool { return { start: `${proposalKey}-`, end: `${proposalKey}-Z` }; } + /** Creates a key for the per-signer-per-slot attestation index. Uses padded slot for lexicographic ordering. */ + private getSlotSignerKey(slot: SlotNumber, signerAddress: string): string { + const slotStr = new Fr(slot).toString(); + return `${slotStr}-${signerAddress}`; + } + /** Number of bits reserved for indexWithinCheckpoint in position keys. */ private static readonly INDEX_BITS = 10; /** Maximum indexWithinCheckpoint value (2^10 - 1 = 1023). */ @@ -166,21 +181,21 @@ export class AttestationPool { // Check if already exists const alreadyExists = await this.blockProposals.hasAsync(proposalId); if (alreadyExists) { - const totalForPosition = await this.getBlockProposalCountForPosition( + const count = await this.getBlockProposalCountForPosition( blockProposal.slotNumber, blockProposal.indexWithinCheckpoint, ); - return { added: false, alreadyExists: true, totalForPosition }; + return { added: false, alreadyExists: true, count }; } // Get current count for position and check cap, do not add if exceeded - const totalForPosition = await this.getBlockProposalCountForPosition( + const count = await this.getBlockProposalCountForPosition( blockProposal.slotNumber, blockProposal.indexWithinCheckpoint, ); - if (totalForPosition >= MAX_PROPOSALS_PER_POSITION) { - return { added: false, alreadyExists: false, totalForPosition }; + if (count >= MAX_BLOCK_PROPOSALS_PER_POSITION) { + return { added: false, alreadyExists: false, count }; } // Add the proposal @@ -195,7 +210,7 @@ export class AttestationPool { }, ); - return { added: true, alreadyExists: false, totalForPosition: totalForPosition + 1 }; + return { added: true, alreadyExists: false, count: count + 1 }; }); } @@ -261,14 +276,14 @@ export class AttestationPool { // Check if already exists const alreadyExists = await this.checkpointProposals.hasAsync(proposalId); if (alreadyExists) { - const totalForPosition = await this.checkpointProposalsForSlot.getValueCountAsync(proposal.slotNumber); - return { added: false, alreadyExists: true, totalForPosition }; + const count = await this.checkpointProposalsForSlot.getValueCountAsync(proposal.slotNumber); + return { added: false, alreadyExists: true, count }; } // Get current count for slot and check cap - const totalForPosition = await this.checkpointProposalsForSlot.getValueCountAsync(proposal.slotNumber); - if (totalForPosition >= MAX_PROPOSALS_PER_SLOT) { - return { added: false, alreadyExists: false, totalForPosition }; + const count = await this.checkpointProposalsForSlot.getValueCountAsync(proposal.slotNumber); + if (count >= MAX_CHECKPOINT_PROPOSALS_PER_SLOT) { + return { added: false, alreadyExists: false, count }; } // Add the proposal if cap not exceeded @@ -279,7 +294,7 @@ export class AttestationPool { slotNumber: proposal.slotNumber, }); - return { added: true, alreadyExists: false, totalForPosition: totalForPosition + 1 }; + return { added: true, alreadyExists: false, count: count + 1 }; }); } @@ -409,6 +424,14 @@ export class AttestationPool { numberOfAttestations++; } + // Clean up per-signer-per-slot index. Keys are formatted as `${Fr(slot).toString()}-${signerAddress}`. + // Since Fr pads to fixed-width hex, Fr(oldestSlot) is lexicographically greater than any key with + // a smaller slot (even with the signer suffix), so using it as the exclusive end bound is correct. + const slotSignerEndKey = new Fr(oldestSlot).toString(); + for await (const key of this.checkpointAttestationsPerSlotAndSigner.keysAsync({ end: slotSignerEndKey })) { + await this.checkpointAttestationsPerSlotAndSigner.delete(key); + } + // Delete checkpoint proposals for slots < oldestSlot, using checkpointProposalsForSlot as index for await (const slot of this.checkpointProposalsForSlot.keysAsync({ end: oldestSlot })) { const proposalIds = await toArray(this.checkpointProposalsForSlot.getValuesAsync(slot)); @@ -445,61 +468,81 @@ export class AttestationPool { * * This method performs validation and addition in a single call: * - Checks if the attestation already exists (returns alreadyExists: true if so) - * - Checks if the (slot, proposalId) has reached the attestation cap (returns added: false if so) + * - Checks if this signer has reached the per-signer attestation cap for this slot * - Adds the attestation if validation passes * * @param attestation - The checkpoint attestation to add - * @param committeeSize - Committee size for the attestation's slot - * @returns Result indicating whether the attestation was added and existence info + * @returns Result indicating whether the attestation was added, existence info, and count of + * attestations by this signer for this slot (for equivocation detection) */ - public async tryAddCheckpointAttestation( - attestation: CheckpointAttestation, - committeeSize: number, - ): Promise { + public async tryAddCheckpointAttestation(attestation: CheckpointAttestation): Promise { const slotNumber = attestation.payload.header.slotNumber; const proposalId = attestation.archive.toString(); const sender = attestation.getSender(); if (!sender) { - return { added: false, alreadyExists: false, totalForPosition: 0 }; + return { added: false, alreadyExists: false, count: 0 }; } + const signerAddress = sender.toString(); + return await this.store.transactionAsync(async () => { - const key = this.getAttestationKey(slotNumber, proposalId, sender.toString()); + const key = this.getAttestationKey(slotNumber, proposalId, signerAddress); const alreadyExists = await this.checkpointAttestations.hasAsync(key); + // Get count of attestations by this signer for this slot (for duplicate detection) + const signerAttestationCount = await this.getSignerAttestationCountForSlot(slotNumber, signerAddress); + if (alreadyExists) { - const total = await this.getAttestationCount(slotNumber, proposalId); - return { added: false, alreadyExists: true, totalForPosition: total }; + return { + added: false, + alreadyExists: true, + count: signerAttestationCount, + }; } - const limit = committeeSize + ATTESTATION_CAP_BUFFER; - const currentCount = await this.getAttestationCount(slotNumber, proposalId); - - if (currentCount >= limit) { - return { added: false, alreadyExists: false, totalForPosition: currentCount }; + // Check if this signer has exceeded the per-signer cap for this slot + if (signerAttestationCount >= MAX_ATTESTATIONS_PER_SLOT_AND_SIGNER) { + this.log.debug(`Rejecting attestation: signer ${signerAddress} exceeded per-slot cap for slot ${slotNumber}`, { + slotNumber, + signerAddress, + proposalId, + signerAttestationCount, + }); + return { + added: false, + alreadyExists: false, + count: signerAttestationCount, + }; } + // Add the attestation await this.checkpointAttestations.set(key, attestation.toBuffer()); - this.log.debug(`Added checkpoint attestation for slot ${slotNumber} from ${sender.toString()}`, { + // Track this attestation in the per-signer-per-slot index for duplicate detection + const slotSignerKey = this.getSlotSignerKey(slotNumber, signerAddress); + await this.checkpointAttestationsPerSlotAndSigner.set(slotSignerKey, proposalId); + + this.log.debug(`Added checkpoint attestation for slot ${slotNumber} from ${signerAddress}`, { signature: attestation.signature.toString(), slotNumber, - address: sender.toString(), + address: signerAddress, proposalId, }); - return { added: true, alreadyExists: false, totalForPosition: currentCount + 1 }; + + // Return the new count + return { + added: true, + alreadyExists: false, + count: signerAttestationCount + 1, + }; }); } - /** Gets the count of attestations for a given (slot, proposalId). */ - private async getAttestationCount(slot: SlotNumber, proposalId: string): Promise { - const range = this.getAttestationKeyRangeForProposal(slot, proposalId); - let count = 0; - for await (const _ of this.checkpointAttestations.keysAsync(range)) { - count++; - } - return count; + /** Gets the count of attestations by a specific signer for a given slot. */ + private async getSignerAttestationCountForSlot(slot: SlotNumber, signerAddress: string): Promise { + const slotSignerKey = this.getSlotSignerKey(slot, signerAddress); + return await this.checkpointAttestationsPerSlotAndSigner.getValueCountAsync(slotSignerKey); } } diff --git a/yarn-project/p2p/src/mem_pools/attestation_pool/attestation_pool_test_suite.ts b/yarn-project/p2p/src/mem_pools/attestation_pool/attestation_pool_test_suite.ts index 31b6f38fe353..20a198da71a0 100644 --- a/yarn-project/p2p/src/mem_pools/attestation_pool/attestation_pool_test_suite.ts +++ b/yarn-project/p2p/src/mem_pools/attestation_pool/attestation_pool_test_suite.ts @@ -10,7 +10,11 @@ import { makeCheckpointProposal, } from '@aztec/stdlib/testing'; -import { type AttestationPool, MAX_PROPOSALS_PER_POSITION, MAX_PROPOSALS_PER_SLOT } from './attestation_pool.js'; +import { + type AttestationPool, + MAX_BLOCK_PROPOSALS_PER_POSITION, + MAX_CHECKPOINT_PROPOSALS_PER_SLOT, +} from './attestation_pool.js'; import { mockCheckpointAttestation } from './mocks.js'; const NUMBER_OF_SIGNERS_PER_TEST = 4; @@ -191,7 +195,7 @@ export function describeAttestationPool(getAttestationPool: () => AttestationPoo expect(result.added).toBe(true); expect(result.alreadyExists).toBe(false); - expect(result.totalForPosition).toBe(1); + expect(result.count).toBe(1); const retrievedProposal = await ap.getBlockProposal(proposalId); @@ -258,7 +262,7 @@ export function describeAttestationPool(getAttestationPool: () => AttestationPoo expect(result.added).toBe(true); expect(result.alreadyExists).toBe(false); - expect(result.totalForPosition).toBe(1); + expect(result.count).toBe(1); const retrievedProposal = await ap.getCheckpointProposal(proposalId); @@ -324,12 +328,12 @@ export function describeAttestationPool(getAttestationPool: () => AttestationPoo it('should return added=false when exceeding capacity', async () => { const slotNumber = 420; - // Add MAX_PROPOSALS_PER_SLOT proposals - for (let i = 0; i < MAX_PROPOSALS_PER_SLOT; i++) { + // Add MAX_CHECKPOINT_PROPOSALS_PER_SLOT proposals + for (let i = 0; i < MAX_CHECKPOINT_PROPOSALS_PER_SLOT; i++) { const proposal = await mockCheckpointProposalForPool(signers[i % NUMBER_OF_SIGNERS_PER_TEST], slotNumber); const result = await ap.tryAddCheckpointProposal(proposal); expect(result.added).toBe(true); - expect(result.totalForPosition).toBe(i + 1); + expect(result.count).toBe(i + 1); } // The next proposal should not be added @@ -337,7 +341,7 @@ export function describeAttestationPool(getAttestationPool: () => AttestationPoo const result = await ap.tryAddCheckpointProposal(extraProposal); expect(result.added).toBe(false); expect(result.alreadyExists).toBe(false); - expect(result.totalForPosition).toBe(MAX_PROPOSALS_PER_SLOT); + expect(result.count).toBe(MAX_CHECKPOINT_PROPOSALS_PER_SLOT); }); }); @@ -358,13 +362,13 @@ export function describeAttestationPool(getAttestationPool: () => AttestationPoo }; describe('tryAddBlockProposal duplicate detection', () => { - it('should return totalForPosition=1 when pool is empty', async () => { + it('should return count=1 when pool is empty', async () => { const proposal = await mockBlockProposalWithIndex(signers[0], 100, 0); const result = await ap.tryAddBlockProposal(proposal); expect(result.added).toBe(true); expect(result.alreadyExists).toBe(false); - expect(result.totalForPosition).toBe(1); + expect(result.count).toBe(1); }); it('should return alreadyExists when same proposal exists', async () => { @@ -375,17 +379,17 @@ export function describeAttestationPool(getAttestationPool: () => AttestationPoo expect(result.added).toBe(false); expect(result.alreadyExists).toBe(true); - expect(result.totalForPosition).toBe(1); + expect(result.count).toBe(1); }); - it('should detect duplicate via totalForPosition when different proposal exists at same position', async () => { + it('should detect duplicate via count when different proposal exists at same position', async () => { const slotNumber = 100; const indexWithinCheckpoint = 2; // Add first proposal const proposal1 = await mockBlockProposalWithIndex(signers[0], slotNumber, indexWithinCheckpoint); const result1 = await ap.tryAddBlockProposal(proposal1); - expect(result1.totalForPosition).toBe(1); + expect(result1.count).toBe(1); // Add a different proposal at same position - this is a duplicate (equivocation) const proposal2 = await mockBlockProposalWithIndex(signers[1], slotNumber, indexWithinCheckpoint); @@ -393,8 +397,8 @@ export function describeAttestationPool(getAttestationPool: () => AttestationPoo expect(result2.added).toBe(true); expect(result2.alreadyExists).toBe(false); - // totalForPosition >= 2 indicates duplicate detection - expect(result2.totalForPosition).toBe(2); + // count >= 2 indicates duplicate detection + expect(result2.count).toBe(2); }); it('should not detect duplicate for different positions in same slot', async () => { @@ -409,8 +413,8 @@ export function describeAttestationPool(getAttestationPool: () => AttestationPoo const result = await ap.tryAddBlockProposal(proposal2); expect(result.added).toBe(true); - // totalForPosition = 1 means no duplicate for this position - expect(result.totalForPosition).toBe(1); + // count = 1 means no duplicate for this position + expect(result.count).toBe(1); }); it('should not detect duplicate for same position in different slots', async () => { @@ -425,37 +429,37 @@ export function describeAttestationPool(getAttestationPool: () => AttestationPoo const result = await ap.tryAddBlockProposal(proposal2); expect(result.added).toBe(true); - // totalForPosition = 1 means no duplicate for this position - expect(result.totalForPosition).toBe(1); + // count = 1 means no duplicate for this position + expect(result.count).toBe(1); }); - it('should track multiple duplicates correctly via totalForPosition', async () => { + it('should track multiple duplicates correctly via count', async () => { const slotNumber = 100; const indexWithinCheckpoint = 0; // Add multiple proposals for same position const proposal1 = await mockBlockProposalWithIndex(signers[0], slotNumber, indexWithinCheckpoint); const result1 = await ap.tryAddBlockProposal(proposal1); - expect(result1.totalForPosition).toBe(1); + expect(result1.count).toBe(1); const proposal2 = await mockBlockProposalWithIndex(signers[1], slotNumber, indexWithinCheckpoint); const result2 = await ap.tryAddBlockProposal(proposal2); - expect(result2.totalForPosition).toBe(2); + expect(result2.count).toBe(2); // Add a third proposal for same position const proposal3 = await mockBlockProposalWithIndex(signers[2], slotNumber, indexWithinCheckpoint); const result3 = await ap.tryAddBlockProposal(proposal3); expect(result3.added).toBe(true); - expect(result3.totalForPosition).toBe(3); + expect(result3.count).toBe(3); }); it('should return added=false when exceeding capacity', async () => { const slotNumber = 100; const indexWithinCheckpoint = 0; - // Add MAX_PROPOSALS_PER_POSITION proposals - for (let i = 0; i < MAX_PROPOSALS_PER_POSITION; i++) { + // Add MAX_BLOCK_PROPOSALS_PER_POSITION proposals + for (let i = 0; i < MAX_BLOCK_PROPOSALS_PER_POSITION; i++) { const proposal = await mockBlockProposalWithIndex( signers[i % NUMBER_OF_SIGNERS_PER_TEST], slotNumber, @@ -463,7 +467,7 @@ export function describeAttestationPool(getAttestationPool: () => AttestationPoo ); const result = await ap.tryAddBlockProposal(proposal); expect(result.added).toBe(true); - expect(result.totalForPosition).toBe(i + 1); + expect(result.count).toBe(i + 1); } // The next proposal should not be added @@ -471,7 +475,7 @@ export function describeAttestationPool(getAttestationPool: () => AttestationPoo const result = await ap.tryAddBlockProposal(extraProposal); expect(result.added).toBe(false); expect(result.alreadyExists).toBe(false); - expect(result.totalForPosition).toBe(MAX_PROPOSALS_PER_POSITION); + expect(result.count).toBe(MAX_BLOCK_PROPOSALS_PER_POSITION); }); it('should clean up block position index when deleting old data', async () => { @@ -482,18 +486,18 @@ export function describeAttestationPool(getAttestationPool: () => AttestationPoo const proposal1 = await mockBlockProposalWithIndex(signers[0], slotNumber, indexWithinCheckpoint); await ap.tryAddBlockProposal(proposal1); - // Verify it's tracked (adding another should show totalForPosition = 2) + // Verify it's tracked (adding another should show count = 2) const proposal2 = await mockBlockProposalWithIndex(signers[1], slotNumber, indexWithinCheckpoint); let result = await ap.tryAddBlockProposal(proposal2); - expect(result.totalForPosition).toBe(2); + expect(result.count).toBe(2); // Delete old data await ap.deleteOlderThan(SlotNumber(slotNumber + 1)); - // Verify position index is cleaned up (totalForPosition should be 1 now) + // Verify position index is cleaned up (count should be 1 now) const proposal3 = await mockBlockProposalWithIndex(signers[2], slotNumber, indexWithinCheckpoint); result = await ap.tryAddBlockProposal(proposal3); - expect(result.totalForPosition).toBe(1); + expect(result.count).toBe(1); }); it('should correctly delete block proposals at slot boundary', async () => { @@ -514,16 +518,16 @@ export function describeAttestationPool(getAttestationPool: () => AttestationPoo // Slot 99 proposals should have their index cleaned up const newProposal99 = await mockBlockProposalWithIndex(signers[0], 99, 0); const result99 = await ap.tryAddBlockProposal(newProposal99); - expect(result99.totalForPosition).toBe(1); // Index was cleaned up + expect(result99.count).toBe(1); // Index was cleaned up // Slot 100 and 101 should still be tracked const newProposal100 = await mockBlockProposalWithIndex(signers[1], 100, 0); const result100 = await ap.tryAddBlockProposal(newProposal100); - expect(result100.totalForPosition).toBe(2); // Still has the original + expect(result100.count).toBe(2); // Still has the original const newProposal101 = await mockBlockProposalWithIndex(signers[2], 101, 0); const result101 = await ap.tryAddBlockProposal(newProposal101); - expect(result101.totalForPosition).toBe(2); // Still has the original + expect(result101.count).toBe(2); // Still has the original }); it('should delete all indices for a given slot', async () => { @@ -544,15 +548,15 @@ export function describeAttestationPool(getAttestationPool: () => AttestationPoo // All indices should be cleaned up const newProposal0 = await mockBlockProposalWithIndex(signers[0], slotNumber, 0); const result0 = await ap.tryAddBlockProposal(newProposal0); - expect(result0.totalForPosition).toBe(1); + expect(result0.count).toBe(1); const newProposal1 = await mockBlockProposalWithIndex(signers[1], slotNumber, 1); const result1 = await ap.tryAddBlockProposal(newProposal1); - expect(result1.totalForPosition).toBe(1); + expect(result1.count).toBe(1); const newProposal2 = await mockBlockProposalWithIndex(signers[2], slotNumber, 2); const result2 = await ap.tryAddBlockProposal(newProposal2); - expect(result2.totalForPosition).toBe(1); + expect(result2.count).toBe(1); }); it('should delete block proposals from storage when deleting old data', async () => { @@ -598,13 +602,13 @@ export function describeAttestationPool(getAttestationPool: () => AttestationPoo return proposal.toCore(); }; - it('should return totalForPosition=1 when pool is empty', async () => { + it('should return count=1 when pool is empty', async () => { const proposal = await mockCheckpointProposalCoreForPool(signers[0], 100); const result = await ap.tryAddCheckpointProposal(proposal); expect(result.added).toBe(true); expect(result.alreadyExists).toBe(false); - expect(result.totalForPosition).toBe(1); + expect(result.count).toBe(1); }); it('should return alreadyExists when same proposal exists', async () => { @@ -615,16 +619,16 @@ export function describeAttestationPool(getAttestationPool: () => AttestationPoo expect(result.added).toBe(false); expect(result.alreadyExists).toBe(true); - expect(result.totalForPosition).toBe(1); + expect(result.count).toBe(1); }); - it('should detect duplicate via totalForPosition when different proposal exists for same slot', async () => { + it('should detect duplicate via count when different proposal exists for same slot', async () => { const slotNumber = 100; // Add first proposal const proposal1 = await mockCheckpointProposalCoreForPool(signers[0], slotNumber); const result1 = await ap.tryAddCheckpointProposal(proposal1); - expect(result1.totalForPosition).toBe(1); + expect(result1.count).toBe(1); // Add a different proposal for same slot - this is a duplicate (equivocation) const proposal2 = await mockCheckpointProposalCoreForPool(signers[1], slotNumber); @@ -632,8 +636,8 @@ export function describeAttestationPool(getAttestationPool: () => AttestationPoo expect(result2.added).toBe(true); expect(result2.alreadyExists).toBe(false); - // totalForPosition >= 2 indicates duplicate detection - expect(result2.totalForPosition).toBe(2); + // count >= 2 indicates duplicate detection + expect(result2.count).toBe(2); }); it('should not detect duplicate for different slots', async () => { @@ -646,28 +650,28 @@ export function describeAttestationPool(getAttestationPool: () => AttestationPoo const result = await ap.tryAddCheckpointProposal(proposal2); expect(result.added).toBe(true); - // totalForPosition = 1 means no duplicate for this slot - expect(result.totalForPosition).toBe(1); + // count = 1 means no duplicate for this slot + expect(result.count).toBe(1); }); - it('should track multiple duplicates correctly via totalForPosition', async () => { + it('should track multiple duplicates correctly via count', async () => { const slotNumber = 100; // Add multiple proposals for same slot const proposal1 = await mockCheckpointProposalCoreForPool(signers[0], slotNumber); const result1 = await ap.tryAddCheckpointProposal(proposal1); - expect(result1.totalForPosition).toBe(1); + expect(result1.count).toBe(1); const proposal2 = await mockCheckpointProposalCoreForPool(signers[1], slotNumber); const result2 = await ap.tryAddCheckpointProposal(proposal2); - expect(result2.totalForPosition).toBe(2); + expect(result2.count).toBe(2); // Add a third proposal for same slot const proposal3 = await mockCheckpointProposalCoreForPool(signers[2], slotNumber); const result3 = await ap.tryAddCheckpointProposal(proposal3); expect(result3.added).toBe(true); - expect(result3.totalForPosition).toBe(3); + expect(result3.count).toBe(3); }); it('should not count attestations as proposals for duplicate detection', async () => { @@ -684,8 +688,8 @@ export function describeAttestationPool(getAttestationPool: () => AttestationPoo expect(result.added).toBe(true); expect(result.alreadyExists).toBe(false); - // totalForPosition should be 1, NOT 2 - attestations should not count as proposals - expect(result.totalForPosition).toBe(1); + // count should be 1, NOT 2 - attestations should not count as proposals + expect(result.count).toBe(1); }); it('should not count attestations for different proposals as duplicates', async () => { @@ -703,14 +707,14 @@ export function describeAttestationPool(getAttestationPool: () => AttestationPoo const result1 = await ap.tryAddCheckpointProposal(proposal1); expect(result1.added).toBe(true); - expect(result1.totalForPosition).toBe(1); + expect(result1.count).toBe(1); // Add the second checkpoint proposal - this IS a duplicate (different archive, same slot) const proposal2 = await mockCheckpointProposalCoreForPool(signers[3], slotNumber, archive2); const result2 = await ap.tryAddCheckpointProposal(proposal2); expect(result2.added).toBe(true); - expect(result2.totalForPosition).toBe(2); + expect(result2.count).toBe(2); }); }); }); diff --git a/yarn-project/p2p/src/mem_pools/attestation_pool/index.ts b/yarn-project/p2p/src/mem_pools/attestation_pool/index.ts index a9d9b3bf773f..25588c9616b1 100644 --- a/yarn-project/p2p/src/mem_pools/attestation_pool/index.ts +++ b/yarn-project/p2p/src/mem_pools/attestation_pool/index.ts @@ -3,7 +3,7 @@ export { type AttestationPoolApi, type TryAddResult, createTestAttestationPool, - MAX_PROPOSALS_PER_SLOT, - MAX_PROPOSALS_PER_POSITION, - ATTESTATION_CAP_BUFFER, + MAX_CHECKPOINT_PROPOSALS_PER_SLOT, + MAX_BLOCK_PROPOSALS_PER_POSITION, + MAX_ATTESTATIONS_PER_SLOT_AND_SIGNER, } from './attestation_pool.js'; diff --git a/yarn-project/p2p/src/services/dummy_service.ts b/yarn-project/p2p/src/services/dummy_service.ts index 0a6f382ac3b8..44e6c4367512 100644 --- a/yarn-project/p2p/src/services/dummy_service.ts +++ b/yarn-project/p2p/src/services/dummy_service.ts @@ -26,6 +26,7 @@ import { ReqRespStatus } from './reqresp/status.js'; import { type P2PBlockReceivedCallback, type P2PCheckpointReceivedCallback, + type P2PDuplicateAttestationCallback, type P2PDuplicateProposalCallback, type P2PService, type PeerDiscoveryService, @@ -88,6 +89,11 @@ export class DummyP2PService implements P2PService { */ public registerDuplicateProposalCallback(_callback: P2PDuplicateProposalCallback): void {} + /** + * Register a callback for when a duplicate attestation is detected + */ + public registerDuplicateAttestationCallback(_callback: P2PDuplicateAttestationCallback): void {} + /** * Sends a request to a peer. * @param _protocol - The protocol to send the request on. diff --git a/yarn-project/p2p/src/services/libp2p/libp2p_service.test.ts b/yarn-project/p2p/src/services/libp2p/libp2p_service.test.ts index d5933de628af..34603f63d97e 100644 --- a/yarn-project/p2p/src/services/libp2p/libp2p_service.test.ts +++ b/yarn-project/p2p/src/services/libp2p/libp2p_service.test.ts @@ -28,8 +28,8 @@ import { type MockProxy, mock } from 'jest-mock-extended'; import { type P2PConfig, p2pConfigMappings } from '../../config.js'; import { AttestationPool, - MAX_PROPOSALS_PER_POSITION, - MAX_PROPOSALS_PER_SLOT, + MAX_BLOCK_PROPOSALS_PER_POSITION, + MAX_CHECKPOINT_PROPOSALS_PER_SLOT, } from '../../mem_pools/attestation_pool/attestation_pool.js'; import type { MemPools } from '../../mem_pools/interface.js'; import type { TxPool } from '../../mem_pools/tx_pool/tx_pool.js'; @@ -510,8 +510,8 @@ describe('LibP2PService', () => { const header = makeBlockHeader(1, { slotNumber: currentSlot }); const indexWithinCheckpoint = IndexWithinCheckpoint(0); - // Add MAX_PROPOSALS_PER_POSITION proposals - for (let i = 0; i < MAX_PROPOSALS_PER_POSITION; i++) { + // Add MAX_BLOCK_PROPOSALS_PER_POSITION proposals + for (let i = 0; i < MAX_BLOCK_PROPOSALS_PER_POSITION; i++) { const individualSigner = Secp256k1Signer.random(); mockEpochCache.getProposerAttesterAddressInSlot.mockResolvedValue(individualSigner.address); const proposal = await makeBlockProposal({ @@ -745,8 +745,8 @@ describe('LibP2PService', () => { const checkpointHeader = makeCheckpointHeader(1, { slotNumber: currentSlot }); const blockHeader = makeBlockHeader(1, { slotNumber: currentSlot }); - // Fill checkpoint slot to MAX_PROPOSALS_PER_SLOT - for (let i = 0; i < MAX_PROPOSALS_PER_SLOT; i++) { + // Fill checkpoint slot to MAX_CHECKPOINT_PROPOSALS_PER_SLOT + for (let i = 0; i < MAX_CHECKPOINT_PROPOSALS_PER_SLOT; i++) { const individualSigner = Secp256k1Signer.random(); mockEpochCache.getProposerAttesterAddressInSlot.mockResolvedValue(individualSigner.address); const proposal = await makeCheckpointProposal({ diff --git a/yarn-project/p2p/src/services/libp2p/libp2p_service.ts b/yarn-project/p2p/src/services/libp2p/libp2p_service.ts index 255b5206121e..66279ddb0545 100644 --- a/yarn-project/p2p/src/services/libp2p/libp2p_service.ts +++ b/yarn-project/p2p/src/services/libp2p/libp2p_service.ts @@ -114,6 +114,7 @@ import { ReqResp } from '../reqresp/reqresp.js'; import type { P2PBlockReceivedCallback, P2PCheckpointReceivedCallback, + P2PDuplicateAttestationCallback, P2PService, PeerDiscoveryService, } from '../service.js'; @@ -156,6 +157,9 @@ export class LibP2PService extends type: 'checkpoint' | 'block'; }) => void; + /** Callback invoked when a duplicate attestation is detected (triggers slashing). */ + private duplicateAttestationCallback?: P2PDuplicateAttestationCallback; + /** * Callback for when a block is received from a peer. * @param block - The block received from the peer. @@ -673,6 +677,15 @@ export class LibP2PService extends this.duplicateProposalCallback = callback; } + /** + * Registers a callback to be invoked when a duplicate attestation is detected. + * A validator signing attestations for different proposals at the same slot. + * This callback is triggered on the first duplicate (when count goes from 1 to 2). + */ + public registerDuplicateAttestationCallback(callback: P2PDuplicateAttestationCallback): void { + this.duplicateAttestationCallback = callback; + } + /** * Subscribes to a topic. * @param topic - The topic to subscribe to. @@ -979,40 +992,53 @@ export class LibP2PService extends return { result: TopicValidatorResult.Ignore, obj: attestation }; } - // Get committee size for the attestation's slot - const slot = attestation.payload.header.slotNumber; - const { committee } = await this.epochCache.getCommittee(slot); - const committeeSize = committee?.length ?? 0; - // Try to add the attestation: this handles existence check, cap check, and adding in one call - const { added, alreadyExists } = await this.mempools.attestationPool.tryAddCheckpointAttestation( - attestation, - committeeSize, - ); + // count is the number of attestations by this signer for this slot (for duplicate detection) + const slot = attestation.payload.header.slotNumber; + const { added, alreadyExists, count } = + await this.mempools.attestationPool.tryAddCheckpointAttestation(attestation); this.logger.trace(`Validate propagated checkpoint attestation`, { added, alreadyExists, + count, [Attributes.SLOT_NUMBER]: slot.toString(), [Attributes.P2P_ID]: peerId.toString(), }); - // Duplicate attestation received, no need to re-broadcast + // Exact same attestation received, no need to re-broadcast if (alreadyExists) { return { result: TopicValidatorResult.Ignore, obj: attestation }; } - // Could not add (cap reached), no need to re-broadcast + // Could not add (cap reached for signer), no need to re-broadcast if (!added) { - this.logger.warn(`Dropping checkpoint attestation due to per-(slot, proposalId) attestation cap`, { + this.logger.warn(`Dropping checkpoint attestation due to cap`, { slot: slot.toString(), archive: attestation.archive.toString(), source: peerId.toString(), + attester: attestation.getSender()?.toString(), + count, }); return { result: TopicValidatorResult.Ignore, obj: attestation }; } - // Attestation was added successfully + // Check if this is a duplicate attestation (signer attested to a different proposal at the same slot) + // count is the number of attestations by this signer for this slot + if (count === 2) { + const attester = attestation.getSender(); + if (attester) { + this.logger.warn(`Detected duplicate attestation (equivocation) at slot ${slot}`, { + slot: slot.toString(), + archive: attestation.archive.toString(), + source: peerId.toString(), + attester: attester.toString(), + }); + this.duplicateAttestationCallback?.({ slot, attester }); + } + } + + // Attestation was added successfully - accept it so other nodes can also detect the equivocation return { result: TopicValidatorResult.Accept, obj: attestation }; } @@ -1058,8 +1084,8 @@ export class LibP2PService extends } // Try to add the proposal: this handles existence check, cap check, and adding in one call - const { added, alreadyExists, totalForPosition } = await this.mempools.attestationPool.tryAddBlockProposal(block); - const isEquivocated = totalForPosition !== undefined && totalForPosition > 1; + const { added, alreadyExists, count } = await this.mempools.attestationPool.tryAddBlockProposal(block); + const isEquivocated = count !== undefined && count > 1; // Duplicate proposal received, no need to re-broadcast if (alreadyExists) { @@ -1078,7 +1104,7 @@ export class LibP2PService extends this.logger.warn(`Penalizing peer for block proposal exceeding per-position cap`, { ...block.toBlockInfo(), indexWithinCheckpoint: block.indexWithinCheckpoint, - totalForPosition, + count, proposer: block.getSender()?.toString(), source: peerId.toString(), }); @@ -1095,7 +1121,7 @@ export class LibP2PService extends proposer: proposer?.toString(), }); // Invoke the duplicate callback on the first duplicate spotted only - if (proposer && totalForPosition === 2) { + if (proposer && count === 2) { this.duplicateProposalCallback?.({ slot: block.slotNumber, proposer, type: 'block' }); } return { result: TopicValidatorResult.Accept, obj: block, metadata: { isEquivocated } }; @@ -1212,8 +1238,8 @@ export class LibP2PService extends // Try to add the checkpoint proposal core: this handles existence check, cap check, and adding in one call const checkpointCore = checkpoint.toCore(); const tryAddResult = await this.mempools.attestationPool.tryAddCheckpointProposal(checkpointCore); - const { added, alreadyExists, totalForPosition } = tryAddResult; - const isEquivocated = totalForPosition !== undefined && totalForPosition > 1; + const { added, alreadyExists, count } = tryAddResult; + const isEquivocated = count !== undefined && count > 1; // Duplicate proposal received, do not re-broadcast if (alreadyExists) { @@ -1234,7 +1260,7 @@ export class LibP2PService extends this.peerManager.penalizePeer(peerId, PeerErrorSeverity.HighToleranceError); this.logger.warn(`Penalizing peer for checkpoint proposal exceeding per-slot cap`, { ...checkpoint.toCheckpointInfo(), - totalForPosition, + count, source: peerId.toString(), }); return { result: TopicValidatorResult.Reject, obj: checkpoint, metadata: { isEquivocated, processBlock } }; @@ -1250,7 +1276,7 @@ export class LibP2PService extends proposer: proposer?.toString(), }); // Invoke the duplicate callback on the first duplicate spotted only - if (proposer && totalForPosition === 2) { + if (proposer && count === 2) { this.duplicateProposalCallback?.({ slot: checkpoint.slotNumber, proposer, type: 'checkpoint' }); } return { diff --git a/yarn-project/p2p/src/services/service.ts b/yarn-project/p2p/src/services/service.ts index 14ebff2227ee..b7b3b6fa42d0 100644 --- a/yarn-project/p2p/src/services/service.ts +++ b/yarn-project/p2p/src/services/service.ts @@ -57,6 +57,19 @@ export type DuplicateProposalInfo = { */ export type P2PDuplicateProposalCallback = (info: DuplicateProposalInfo) => void; +/** Minimal info passed to the duplicate attestation callback. */ +export type DuplicateAttestationInfo = { + slot: SlotNumber; + attester: EthAddress; +}; + +/** + * Callback for when a duplicate attestation is detected (equivocation). + * A validator signing attestations for different proposals at the same slot. + * Invoked on the first duplicate (when count goes from 1 to 2). + */ +export type P2PDuplicateAttestationCallback = (info: DuplicateAttestationInfo) => void; + /** * The interface for a P2P service implementation. */ @@ -106,6 +119,13 @@ export interface P2PService { */ registerDuplicateProposalCallback(callback: P2PDuplicateProposalCallback): void; + /** + * Registers a callback invoked when a duplicate attestation is detected (equivocation). + * A validator signing attestations for different proposals at the same slot. + * The callback is triggered on the first duplicate (when count goes from 1 to 2). + */ + registerDuplicateAttestationCallback(callback: P2PDuplicateAttestationCallback): void; + getEnr(): ENR | undefined; getPeers(includePending?: boolean): PeerInfo[]; diff --git a/yarn-project/p2p/src/services/tx_collection/config.ts b/yarn-project/p2p/src/services/tx_collection/config.ts index 8811ef37ffda..2c3d821bb440 100644 --- a/yarn-project/p2p/src/services/tx_collection/config.ts +++ b/yarn-project/p2p/src/services/tx_collection/config.ts @@ -31,6 +31,12 @@ export type TxCollectionConfig = { txCollectionNodeRpcMaxBatchSize: number; /** Which collector implementation to use for missing txs collection */ txCollectionMissingTxsCollectorType: MissingTxsCollectorType; + /** A comma-separated list of file store URLs (s3://, gs://, file://, http://) for tx collection */ + txCollectionFileStoreUrls: string[]; + /** Delay in ms before file store collection starts after slow collection is triggered */ + txCollectionFileStoreSlowDelayMs: number; + /** Delay in ms before file store collection starts after fast collection is triggered */ + txCollectionFileStoreFastDelayMs: number; }; export const txCollectionConfigMappings: ConfigMappingsType = { @@ -95,4 +101,24 @@ export const txCollectionConfigMappings: ConfigMappingsType description: 'Which collector implementation to use for missing txs collection (new or old)', ...enumConfigHelper(['new', 'old'] as const, 'new'), }, + txCollectionFileStoreUrls: { + env: 'TX_COLLECTION_FILE_STORE_URLS', + description: 'A comma-separated list of file store URLs (s3://, gs://, file://, http://) for tx collection', + parseEnv: (val: string) => + val + .split(',') + .map(url => url.trim()) + .filter(url => url.length > 0), + defaultValue: [], + }, + txCollectionFileStoreSlowDelayMs: { + env: 'TX_COLLECTION_FILE_STORE_SLOW_DELAY_MS', + description: 'Delay before file store collection starts after slow collection', + ...numberConfigHelper(24_000), + }, + txCollectionFileStoreFastDelayMs: { + env: 'TX_COLLECTION_FILE_STORE_FAST_DELAY_MS', + description: 'Delay before file store collection starts after fast collection', + ...numberConfigHelper(2_000), + }, }; diff --git a/yarn-project/p2p/src/services/tx_collection/fast_tx_collection.ts b/yarn-project/p2p/src/services/tx_collection/fast_tx_collection.ts index 7b2312ccd53d..bbb6c2c54b97 100644 --- a/yarn-project/p2p/src/services/tx_collection/fast_tx_collection.ts +++ b/yarn-project/p2p/src/services/tx_collection/fast_tx_collection.ts @@ -77,7 +77,7 @@ export class FastTxCollection { // This promise is used to await for the collection to finish during the main collectFast method. // It gets resolved in `foundTxs` when all txs have been collected, or rejected if the request is aborted or hits the deadline. const promise = promiseWithResolvers(); - setTimeout(() => promise.reject(new TimeoutError(`Timed out while collecting txs`)), timeout); + const timeoutTimer = setTimeout(() => promise.reject(new TimeoutError(`Timed out while collecting txs`)), timeout); const request: FastCollectionRequest = { ...input, @@ -89,6 +89,7 @@ export class FastTxCollection { }; const [duration] = await elapsed(() => this.collectFast(request, { ...opts })); + clearTimeout(timeoutTimer); this.log.verbose( `Collected ${request.foundTxs.size} txs out of ${txHashes.length} for ${input.type} at slot ${blockInfo.slotNumber}`, diff --git a/yarn-project/p2p/src/services/tx_collection/file_store_tx_collection.test.ts b/yarn-project/p2p/src/services/tx_collection/file_store_tx_collection.test.ts new file mode 100644 index 000000000000..e2f702ea4154 --- /dev/null +++ b/yarn-project/p2p/src/services/tx_collection/file_store_tx_collection.test.ts @@ -0,0 +1,194 @@ +import { createLogger } from '@aztec/foundation/log'; +import { promiseWithResolvers } from '@aztec/foundation/promise'; +import { Tx, TxHash } from '@aztec/stdlib/tx'; +import { getTelemetryClient } from '@aztec/telemetry-client'; + +import { jest } from '@jest/globals'; +import { type MockProxy, mock } from 'jest-mock-extended'; + +import type { TxPool } from '../../mem_pools/index.js'; +import { FileStoreTxCollection } from './file_store_tx_collection.js'; +import type { FileStoreTxSource } from './file_store_tx_source.js'; +import { TxCollectionSink } from './tx_collection_sink.js'; + +describe('FileStoreTxCollection', () => { + let fileStoreCollection: FileStoreTxCollection; + let fileStoreSources: MockProxy[]; + let txCollectionSink: TxCollectionSink; + let txPool: MockProxy; + + let txs: Tx[]; + let txHashes: TxHash[]; + + const makeFileStoreSource = (name: string) => { + const source = mock(); + source.getInfo.mockReturnValue(name); + source.getTxsByHash.mockResolvedValue([]); + return source; + }; + + const makeTx = async () => { + const tx = Tx.random(); + await tx.recomputeHash(); + return tx; + }; + + const setFileStoreTxs = (source: MockProxy, txs: Tx[]) => { + source.getTxsByHash.mockImplementation(hashes => { + return Promise.resolve(hashes.map(h => txs.find(tx => tx.getTxHash().equals(h)))); + }); + }; + + /** Waits for the sink to emit txs-added events for the expected number of txs. */ + const waitForTxsAdded = (expectedCount: number) => { + const { promise, resolve } = promiseWithResolvers(); + let count = 0; + const handler = ({ txs }: { txs: Tx[] }) => { + count += txs.length; + if (count >= expectedCount) { + txCollectionSink.removeListener('txs-added', handler); + resolve(); + } + }; + txCollectionSink.on('txs-added', handler); + return promise; + }; + + beforeEach(async () => { + jest.spyOn(Math, 'random').mockReturnValue(0); + + txPool = mock(); + txPool.addTxs.mockImplementation(txs => Promise.resolve(txs.length)); + + const log = createLogger('test'); + txCollectionSink = new TxCollectionSink(txPool, getTelemetryClient(), log); + + fileStoreSources = [makeFileStoreSource('store1'), makeFileStoreSource('store2')]; + + fileStoreCollection = new FileStoreTxCollection(fileStoreSources, txCollectionSink, log); + + txs = await Promise.all([makeTx(), makeTx(), makeTx()]); + txHashes = txs.map(tx => tx.getTxHash()); + }); + + afterEach(async () => { + await fileStoreCollection.stop(); + jest.restoreAllMocks(); + }); + + it('downloads txs immediately when startCollecting is called', async () => { + setFileStoreTxs(fileStoreSources[0], txs); + + fileStoreCollection.start(); + + // Set up event listener before calling startCollecting + const txsAddedPromise = waitForTxsAdded(txs.length); + + fileStoreCollection.startCollecting(txHashes); + + // Wait for all txs to be processed via events + await txsAddedPromise; + + expect(fileStoreSources[0].getTxsByHash).toHaveBeenCalled(); + expect(txPool.addTxs).toHaveBeenCalledWith(expect.arrayContaining([txs[0]]), { source: 'tx-collection' }); + expect(txPool.addTxs).toHaveBeenCalledWith(expect.arrayContaining([txs[1]]), { source: 'tx-collection' }); + expect(txPool.addTxs).toHaveBeenCalledWith(expect.arrayContaining([txs[2]]), { source: 'tx-collection' }); + }); + + it('skips txs marked as found while queued', async () => { + setFileStoreTxs(fileStoreSources[0], txs); + + fileStoreCollection.start(); + + // Queue all txs, then mark the first as found before workers process it + fileStoreCollection.startCollecting(txHashes); + fileStoreCollection.foundTxs([txs[0]]); + + // Set up event listener - only 2 txs should be downloaded + const txsAddedPromise = waitForTxsAdded(2); + + // Wait for workers to process + await txsAddedPromise; + + // First tx should not have been requested from file store + const allCalls = fileStoreSources.flatMap(s => s.getTxsByHash.mock.calls); + const requestedHashes = allCalls.flat().flat(); + expect(requestedHashes).not.toContainEqual(txHashes[0]); + }); + + it('stops tracking txs when foundTxs is called', async () => { + setFileStoreTxs(fileStoreSources[0], txs); + + fileStoreCollection.start(); + + // Mark first tx as found before queueing + fileStoreCollection.foundTxs([txs[0]]); + + // Set up event listener - only 2 txs should be downloaded + const txsAddedPromise = waitForTxsAdded(2); + + // Queue all txs - but first one was already found + fileStoreCollection.startCollecting(txHashes); + + // Wait for workers to process + await txsAddedPromise; + + // First tx should not have been requested from any file store + const allCalls = fileStoreSources.flatMap(s => s.getTxsByHash.mock.calls); + const requestedHashes = allCalls.flat().flat(); + expect(requestedHashes).not.toContainEqual(txHashes[0]); + + // Verify second and third tx were downloaded + expect(txPool.addTxs).toHaveBeenCalledWith(expect.arrayContaining([txs[1]]), { source: 'tx-collection' }); + expect(txPool.addTxs).toHaveBeenCalledWith(expect.arrayContaining([txs[2]]), { source: 'tx-collection' }); + }); + + it('tries multiple file stores when tx not found in first', async () => { + // Only second store has tx[0] + setFileStoreTxs(fileStoreSources[1], [txs[0]]); + + fileStoreCollection.start(); + + // Set up event listener + const txsAddedPromise = waitForTxsAdded(1); + + fileStoreCollection.startCollecting([txHashes[0]]); + await txsAddedPromise; + + // First store was tried but didn't have it + expect(fileStoreSources[0].getTxsByHash).toHaveBeenCalled(); + // Second store was tried and found it + expect(fileStoreSources[1].getTxsByHash).toHaveBeenCalled(); + expect(txPool.addTxs).toHaveBeenCalledWith([txs[0]], { source: 'tx-collection' }); + }); + + it('does not start workers if no file store sources are configured', async () => { + const log = createLogger('test'); + fileStoreCollection = new FileStoreTxCollection([], txCollectionSink, log); + fileStoreCollection.start(); + fileStoreCollection.startCollecting(txHashes); + + // Give some time for potential processing + await new Promise(resolve => setTimeout(resolve, 50)); + + expect(fileStoreSources[0].getTxsByHash).not.toHaveBeenCalled(); + }); + + it('does not re-queue txs that are already pending', async () => { + setFileStoreTxs(fileStoreSources[0], txs); + + fileStoreCollection.start(); + + // Set up event listener + const txsAddedPromise = waitForTxsAdded(txs.length); + + fileStoreCollection.startCollecting(txHashes); + fileStoreCollection.startCollecting(txHashes); // Duplicate call + + await txsAddedPromise; + + // Each tx should only be downloaded once + const allCalls = fileStoreSources.flatMap(s => s.getTxsByHash.mock.calls); + expect(allCalls.length).toBe(txHashes.length); + }); +}); diff --git a/yarn-project/p2p/src/services/tx_collection/file_store_tx_collection.ts b/yarn-project/p2p/src/services/tx_collection/file_store_tx_collection.ts new file mode 100644 index 000000000000..13a2a0b52ca3 --- /dev/null +++ b/yarn-project/p2p/src/services/tx_collection/file_store_tx_collection.ts @@ -0,0 +1,146 @@ +import { type Logger, createLogger } from '@aztec/foundation/log'; +import { FifoMemoryQueue } from '@aztec/foundation/queue'; +import { Tx, TxHash } from '@aztec/stdlib/tx'; + +import type { FileStoreTxSource } from './file_store_tx_source.js'; +import type { TxCollectionSink } from './tx_collection_sink.js'; + +// Internal constants (not configurable by node operators) +const FILE_STORE_DOWNLOAD_CONCURRENCY = 5; // Max concurrent downloads + +/** + * Collects txs from file stores as a fallback after P2P methods have been tried. + * Runs in parallel to slow/fast collection. The delay before starting file store + * collection is managed by the TxCollection orchestrator, not this class. + */ +export class FileStoreTxCollection { + /** Set of tx hashes that have been queued for download (prevents duplicate queueing). */ + private pendingTxs = new Set(); + + /** + * Tracks tx hashes found elsewhere, even before startCollecting is called. + * Needed because the orchestrator delays startCollecting via a real sleep, but foundTxs + * may arrive during that delay — before the hashes are added to pendingTxs. + */ + private foundTxHashes = new Set(); + + /** Queue of tx hashes to be downloaded. */ + private downloadQueue = new FifoMemoryQueue(); + + /** Worker promises for concurrent downloads. */ + private workers: Promise[] = []; + + /** Whether the collection has been started. */ + private started = false; + + constructor( + private readonly fileStoreSources: FileStoreTxSource[], + private readonly txCollectionSink: TxCollectionSink, + private readonly log: Logger = createLogger('p2p:file_store_tx_collection'), + ) {} + + /** Starts the file store collection workers. */ + public start() { + if (this.fileStoreSources.length === 0) { + this.log.debug('No file store sources configured, skipping file store collection'); + return; + } + + this.started = true; + this.downloadQueue = new FifoMemoryQueue(); + + // Start concurrent download workers + for (let i = 0; i < FILE_STORE_DOWNLOAD_CONCURRENCY; i++) { + this.workers.push(this.downloadQueue.process(txHash => this.processDownload(txHash))); + } + + this.log.info(`Started file store tx collection with ${this.fileStoreSources.length} sources`, { + sources: this.fileStoreSources.map(s => s.getInfo()), + concurrency: FILE_STORE_DOWNLOAD_CONCURRENCY, + }); + } + + /** Stops all collection activity. */ + public async stop() { + if (!this.started) { + return; + } + this.started = false; + this.downloadQueue.end(); + await Promise.all(this.workers); + this.workers = []; + this.pendingTxs.clear(); + this.foundTxHashes.clear(); + } + + /** Remove the given tx hashes from pending. */ + public stopCollecting(txHashes: TxHash[]) { + for (const txHash of txHashes) { + const hashStr = txHash.toString(); + this.pendingTxs.delete(hashStr); + } + } + + /** Clears all pending state. Items already in the download queue will still be processed but won't be re-queued. */ + public clearPending() { + this.pendingTxs.clear(); + this.foundTxHashes.clear(); + } + + /** Queue the given tx hashes for file store collection. */ + public startCollecting(txHashes: TxHash[]) { + for (const txHash of txHashes) { + const hashStr = txHash.toString(); + if (!this.pendingTxs.has(hashStr) && !this.foundTxHashes.has(hashStr)) { + this.pendingTxs.add(hashStr); + this.downloadQueue.put(txHash); + } + } + } + + /** Stop tracking txs that were found elsewhere. */ + public foundTxs(txs: Tx[]) { + for (const tx of txs) { + const hashStr = tx.getTxHash().toString(); + this.pendingTxs.delete(hashStr); + this.foundTxHashes.add(hashStr); + } + } + + /** Processes a single tx hash from the download queue. */ + private async processDownload(txHash: TxHash) { + const hashStr = txHash.toString(); + + // Skip if already found by another method + if (!this.pendingTxs.has(hashStr)) { + return; + } + + await this.downloadTx(txHash); + this.pendingTxs.delete(hashStr); + } + + /** Attempt to download a tx from file stores (round-robin). */ + private async downloadTx(txHash: TxHash) { + const startIndex = Math.floor(Math.random() * this.fileStoreSources.length); + for (let i = startIndex; i < startIndex + this.fileStoreSources.length; i++) { + const source = this.fileStoreSources[i % this.fileStoreSources.length]; + + try { + const result = await this.txCollectionSink.collect(hashes => source.getTxsByHash(hashes), [txHash], { + description: `file-store ${source.getInfo()}`, + method: 'file-store', + fileStore: source.getInfo(), + }); + + if (result.txs.length > 0) { + return; + } + } catch (err) { + this.log.trace(`Failed to download tx ${txHash} from ${source.getInfo()}`, { err }); + } + } + + this.log.trace(`Tx ${txHash} not found in any file store`); + } +} diff --git a/yarn-project/p2p/src/services/tx_collection/file_store_tx_source.ts b/yarn-project/p2p/src/services/tx_collection/file_store_tx_source.ts new file mode 100644 index 000000000000..b88f6b028ede --- /dev/null +++ b/yarn-project/p2p/src/services/tx_collection/file_store_tx_source.ts @@ -0,0 +1,70 @@ +import { type Logger, createLogger } from '@aztec/foundation/log'; +import { type ReadOnlyFileStore, createReadOnlyFileStore } from '@aztec/stdlib/file-store'; +import { Tx, type TxHash } from '@aztec/stdlib/tx'; + +import type { TxSource } from './tx_source.js'; + +/** TxSource implementation that downloads txs from a file store. */ +export class FileStoreTxSource implements TxSource { + private constructor( + private readonly fileStore: ReadOnlyFileStore, + private readonly baseUrl: string, + private readonly log: Logger, + ) {} + + /** + * Creates a FileStoreTxSource from a URL. + * @param url - The file store URL (s3://, gs://, file://, http://, https://). + * @param log - Optional logger. + * @returns The FileStoreTxSource instance, or undefined if creation fails. + */ + public static async create( + url: string, + log: Logger = createLogger('p2p:file_store_tx_source'), + ): Promise { + try { + const fileStore = await createReadOnlyFileStore(url, log); + if (!fileStore) { + log.warn(`Failed to create file store for URL: ${url}`); + return undefined; + } + return new FileStoreTxSource(fileStore, url, log); + } catch (err) { + log.warn(`Error creating file store for URL: ${url}`, { error: err }); + return undefined; + } + } + + public getInfo(): string { + return `file-store:${this.baseUrl}`; + } + + public getTxsByHash(txHashes: TxHash[]): Promise<(Tx | undefined)[]> { + return Promise.all( + txHashes.map(async txHash => { + const path = `txs/${txHash.toString()}.bin`; + try { + const buffer = await this.fileStore.read(path); + return Tx.fromBuffer(buffer); + } catch { + // Tx not found or error reading - return undefined + return undefined; + } + }), + ); + } +} + +/** + * Creates FileStoreTxSource instances from URLs. + * @param urls - Array of file store URLs. + * @param log - Optional logger. + * @returns Array of successfully created FileStoreTxSource instances. + */ +export async function createFileStoreTxSources( + urls: string[], + log: Logger = createLogger('p2p:file_store_tx_source'), +): Promise { + const sources = await Promise.all(urls.map(url => FileStoreTxSource.create(url, log))); + return sources.filter((s): s is FileStoreTxSource => s !== undefined); +} diff --git a/yarn-project/p2p/src/services/tx_collection/index.ts b/yarn-project/p2p/src/services/tx_collection/index.ts index aa4a01d54e81..9349339ce29b 100644 --- a/yarn-project/p2p/src/services/tx_collection/index.ts +++ b/yarn-project/p2p/src/services/tx_collection/index.ts @@ -5,3 +5,4 @@ export { BatchTxRequesterCollector, SendBatchRequestCollector, } from './proposal_tx_collector.js'; +export { FileStoreTxSource, createFileStoreTxSources } from './file_store_tx_source.js'; diff --git a/yarn-project/p2p/src/services/tx_collection/tx_collection.test.ts b/yarn-project/p2p/src/services/tx_collection/tx_collection.test.ts index 4453656890e8..4cbb82e43ed5 100644 --- a/yarn-project/p2p/src/services/tx_collection/tx_collection.test.ts +++ b/yarn-project/p2p/src/services/tx_collection/tx_collection.test.ts @@ -22,6 +22,7 @@ import { chunkTxHashesRequest } from '../reqresp/protocols/tx.js'; import { ReqRespStatus } from '../reqresp/status.js'; import { type TxCollectionConfig, txCollectionConfigMappings } from './config.js'; import { FastTxCollection } from './fast_tx_collection.js'; +import type { FileStoreTxSource } from './file_store_tx_source.js'; import type { SlowTxCollection } from './slow_tx_collection.js'; import { type FastCollectionRequest, TxCollection } from './tx_collection.js'; import type { TxSource } from './tx_source.js'; @@ -50,8 +51,15 @@ describe('TxCollection', () => { return node; }; + const makeFileStoreSource = (name: string) => { + const source = mock(); + source.getInfo.mockReturnValue(name); + source.getTxsByHash.mockResolvedValue([]); + return source; + }; + const makeTx = async (txHash?: string | TxHash) => { - const tx = Tx.random({ txHash }) as Tx; + const tx = Tx.random({ txHash }); await tx.recomputeHash(); return tx; }; @@ -132,6 +140,8 @@ describe('TxCollection', () => { txCollectionFastMaxParallelRequestsPerNode: 2, txCollectionFastNodeIntervalMs: 100, txCollectionMissingTxsCollectorType: 'old', + txCollectionFileStoreSlowDelayMs: 100, + txCollectionFileStoreFastDelayMs: 100, }; txs = await Promise.all([makeTx(), makeTx(), makeTx()]); @@ -140,7 +150,7 @@ describe('TxCollection', () => { deadline = new Date(dateProvider.now() + 60 * 60 * 1000); mockP2PService.reqResp = reqResp; - txCollection = new TestTxCollection(mockP2PService, nodes, constants, txPool, config, dateProvider); + txCollection = new TestTxCollection(mockP2PService, nodes, constants, txPool, config, [], dateProvider); }); afterEach(async () => { @@ -230,7 +240,7 @@ describe('TxCollection', () => { }); it('collects missing txs directly via reqresp if there are no nodes configured', async () => { - txCollection = new TestTxCollection(mockP2PService, [], constants, txPool, config, dateProvider); + txCollection = new TestTxCollection(mockP2PService, [], constants, txPool, config, [], dateProvider); txCollection.startCollecting(block, txHashes); setReqRespTxs([txs[0]]); @@ -260,7 +270,7 @@ describe('TxCollection', () => { it('does not request missing txs being collected via fast collection', async () => { config = { ...config, txCollectionDisableSlowDuringFastRequests: false }; - txCollection = new TestTxCollection(mockP2PService, nodes, constants, txPool, config, dateProvider); + txCollection = new TestTxCollection(mockP2PService, nodes, constants, txPool, config, [], dateProvider); const innerCollectFastPromise = promiseWithResolvers(); jest.spyOn(txCollection.fastCollection, 'collectFast').mockImplementation(async request => { @@ -280,7 +290,7 @@ describe('TxCollection', () => { it('pauses slow collection if fast collection is ongoing', async () => { config = { ...config, txCollectionDisableSlowDuringFastRequests: true }; - txCollection = new TestTxCollection(mockP2PService, nodes, constants, txPool, config, dateProvider); + txCollection = new TestTxCollection(mockP2PService, nodes, constants, txPool, config, [], dateProvider); const innerCollectFastPromise = promiseWithResolvers(); jest.spyOn(txCollection.fastCollection, 'collectFast').mockImplementation(async request => { @@ -301,7 +311,7 @@ describe('TxCollection', () => { it('stops collecting a tx when found via fast collection', async () => { config = { ...config, txCollectionDisableSlowDuringFastRequests: true }; - txCollection = new TestTxCollection(mockP2PService, nodes, constants, txPool, config, dateProvider); + txCollection = new TestTxCollection(mockP2PService, nodes, constants, txPool, config, [], dateProvider); setNodeTxs(nodes[0], txs); txCollection.startCollecting(block, txHashes); @@ -420,7 +430,7 @@ describe('TxCollection', () => { }); it('collects via reqresp if no nodes are configured', async () => { - txCollection = new TestTxCollection(mockP2PService, [], constants, txPool, config, dateProvider); + txCollection = new TestTxCollection(mockP2PService, [], constants, txPool, config, [], dateProvider); setReqRespTxs(txs); const collected = await txCollection.collectFastForBlock(block, txHashes, { deadline }); expectReqRespToHaveBeenCalledWith(txHashes); @@ -499,6 +509,69 @@ describe('TxCollection', () => { expect(reqResp.sendBatchRequest).not.toHaveBeenCalled(); }); }); + + describe('file store collection', () => { + let fileStoreSources: MockProxy[]; + + const setFileStoreTxs = (source: MockProxy, txsToReturn: Tx[]) => { + source.getTxsByHash.mockImplementation(hashes => { + return Promise.resolve(hashes.map(h => txsToReturn.find(tx => tx.txHash.equals(h)))); + }); + }; + + beforeEach(() => { + fileStoreSources = [makeFileStoreSource('store1')]; + txCollection = new TestTxCollection( + mockP2PService, + nodes, + constants, + txPool, + config, + fileStoreSources, + dateProvider, + ); + }); + + it('collects txs from file store after slow delay', async () => { + setFileStoreTxs(fileStoreSources[0], txs); + txPool.addTxs.mockImplementation(addedTxs => Promise.resolve(addedTxs.length)); + txPool.hasTx.mockResolvedValue(false); + + await txCollection.start(); + txCollection.startCollecting(block, txHashes); + + // File store should not have been called yet (delay hasn't elapsed) + expect(fileStoreSources[0].getTxsByHash).not.toHaveBeenCalled(); + + // Advance time past the 4s slow delay + dateProvider.setTime(dateProvider.now() + 200); + // Allow the async sleep resolution and worker processing to complete + await sleep(100); + + // File store should now have been called for each tx + expect(fileStoreSources[0].getTxsByHash).toHaveBeenCalled(); + }); + + it('does not download txs from file store if found via P2P before delay expires', async () => { + setFileStoreTxs(fileStoreSources[0], txs); + txPool.addTxs.mockImplementation(addedTxs => Promise.resolve(addedTxs.length)); + txPool.hasTx.mockResolvedValue(false); + + await txCollection.start(); + txCollection.startCollecting(block, txHashes); + + // Simulate all txs found via P2P before delay expires + await txCollection.handleTxsAddedToPool({ txs, source: 'test' }); + + // Now advance time past the delay + dateProvider.setTime(dateProvider.now() + 200); + await sleep(100); + + // File store should not have downloaded any txs because they were all found + const allCalls = fileStoreSources.flatMap(s => s.getTxsByHash.mock.calls); + expect(allCalls.length).toBe(0); + }); + }); }); class TestFastTxCollection extends FastTxCollection { @@ -513,5 +586,6 @@ class TestFastTxCollection extends FastTxCollection { class TestTxCollection extends TxCollection { declare slowCollection: SlowTxCollection; declare fastCollection: TestFastTxCollection; + declare fileStoreCollection: TxCollection['fileStoreCollection']; declare handleTxsAddedToPool: TxPoolEvents['txs-added']; } diff --git a/yarn-project/p2p/src/services/tx_collection/tx_collection.ts b/yarn-project/p2p/src/services/tx_collection/tx_collection.ts index 1b2b7fb54cc0..38c3f65cbcbd 100644 --- a/yarn-project/p2p/src/services/tx_collection/tx_collection.ts +++ b/yarn-project/p2p/src/services/tx_collection/tx_collection.ts @@ -2,6 +2,7 @@ import { BlockNumber } from '@aztec/foundation/branded-types'; import { compactArray } from '@aztec/foundation/collection'; import { type Logger, createLogger } from '@aztec/foundation/log'; import { type PromiseWithResolvers, RunningPromise } from '@aztec/foundation/promise'; +import { sleep } from '@aztec/foundation/sleep'; import { DateProvider } from '@aztec/foundation/timer'; import type { L2Block, L2BlockInfo } from '@aztec/stdlib/block'; import type { L1RollupConstants } from '@aztec/stdlib/epoch-helpers'; @@ -16,11 +17,13 @@ import type { TxPoolEvents } from '../../mem_pools/tx_pool/tx_pool.js'; import type { BatchTxRequesterLibP2PService } from '../reqresp/batch-tx-requester/interface.js'; import type { TxCollectionConfig } from './config.js'; import { FastTxCollection } from './fast_tx_collection.js'; +import { FileStoreTxCollection } from './file_store_tx_collection.js'; +import type { FileStoreTxSource } from './file_store_tx_source.js'; import { SlowTxCollection } from './slow_tx_collection.js'; import { TxCollectionSink } from './tx_collection_sink.js'; import type { TxSource } from './tx_source.js'; -export type CollectionMethod = 'fast-req-resp' | 'fast-node-rpc' | 'slow-req-resp' | 'slow-node-rpc'; +export type CollectionMethod = 'fast-req-resp' | 'fast-node-rpc' | 'slow-req-resp' | 'slow-node-rpc' | 'file-store'; export type MissingTxInfo = { blockNumber: BlockNumber; deadline: Date; readyForReqResp: boolean }; @@ -54,6 +57,9 @@ export class TxCollection { /** Fast collection methods */ protected readonly fastCollection: FastTxCollection; + /** File store collection */ + protected readonly fileStoreCollection: FileStoreTxCollection; + /** Loop for periodically reconciling found transactions from the tx pool in case we missed some */ private readonly reconcileFoundTxsLoop: RunningPromise; @@ -66,12 +72,19 @@ export class TxCollection { /** Handler for the txs-added event from the tx collection sink */ protected readonly handleTxsFound: TxPoolEvents['txs-added']; + /** Whether the service has been started. */ + private started = false; + + /** Whether file store sources are configured. */ + private readonly hasFileStoreSources: boolean; + constructor( private readonly p2pService: BatchTxRequesterLibP2PService, private readonly nodes: TxSource[], private readonly constants: L1RollupConstants, private readonly txPool: TxPool, private readonly config: TxCollectionConfig, + fileStoreSources: FileStoreTxSource[] = [], private readonly dateProvider: DateProvider = new DateProvider(), telemetryClient: TelemetryClient = getTelemetryClient(), private readonly log: Logger = createLogger('p2p:tx_collection_service'), @@ -98,6 +111,9 @@ export class TxCollection { this.log, ); + this.hasFileStoreSources = fileStoreSources.length > 0; + this.fileStoreCollection = new FileStoreTxCollection(fileStoreSources, this.txCollectionSink, this.log); + this.reconcileFoundTxsLoop = new RunningPromise( () => this.reconcileFoundTxsWithPool(), this.log, @@ -120,7 +136,9 @@ export class TxCollection { /** Starts all collection loops. */ public start(): Promise { + this.started = true; this.slowCollection.start(); + this.fileStoreCollection.start(); this.reconcileFoundTxsLoop.start(); // TODO(palla/txs): Collect mined unproven tx hashes for txs we dont have in the pool and populate missingTxs on startup @@ -129,7 +147,13 @@ export class TxCollection { /** Stops all activity. */ public async stop() { - await Promise.all([this.slowCollection.stop(), this.fastCollection.stop(), this.reconcileFoundTxsLoop.stop()]); + this.started = false; + await Promise.all([ + this.slowCollection.stop(), + this.fastCollection.stop(), + this.fileStoreCollection.stop(), + this.reconcileFoundTxsLoop.stop(), + ]); this.txPool.removeListener('txs-added', this.handleTxsAddedToPool); this.txCollectionSink.removeListener('txs-added', this.handleTxsFound); @@ -147,7 +171,18 @@ export class TxCollection { /** Starts collecting the given tx hashes for the given L2Block in the slow loop */ public startCollecting(block: L2Block, txHashes: TxHash[]) { - return this.slowCollection.startCollecting(block, txHashes); + this.slowCollection.startCollecting(block, txHashes); + + // Delay file store collection to give P2P methods time to find txs first + if (this.hasFileStoreSources) { + sleep(this.config.txCollectionFileStoreSlowDelayMs) + .then(() => { + if (this.started) { + this.fileStoreCollection.startCollecting(txHashes); + } + }) + .catch(err => this.log.error('Error in file store slow delay', err)); + } } /** Collects the set of txs for the given block proposal as fast as possible */ @@ -175,6 +210,19 @@ export class TxCollection { txHashes: TxHash[] | string[], opts: { deadline: Date; pinnedPeer?: PeerId }, ) { + const hashes = txHashes.map(h => (typeof h === 'string' ? TxHash.fromString(h) : h)); + + // Delay file store collection to give P2P methods time to find txs first + if (this.hasFileStoreSources) { + sleep(this.config.txCollectionFileStoreFastDelayMs) + .then(() => { + if (this.started) { + this.fileStoreCollection.startCollecting(hashes); + } + }) + .catch(err => this.log.error('Error in file store fast delay', err)); + } + return this.fastCollection.collectFastFor(input, txHashes, opts); } @@ -182,6 +230,7 @@ export class TxCollection { private foundTxs(txs: Tx[]) { this.slowCollection.foundTxs(txs); this.fastCollection.foundTxs(txs); + this.fileStoreCollection.foundTxs(txs); } /** @@ -191,6 +240,7 @@ export class TxCollection { public stopCollectingForBlocksUpTo(blockNumber: BlockNumber): void { this.slowCollection.stopCollectingForBlocksUpTo(blockNumber); this.fastCollection.stopCollectingForBlocksUpTo(blockNumber); + this.fileStoreCollection.clearPending(); } /** @@ -200,6 +250,7 @@ export class TxCollection { public stopCollectingForBlocksAfter(blockNumber: BlockNumber): void { this.slowCollection.stopCollectingForBlocksAfter(blockNumber); this.fastCollection.stopCollectingForBlocksAfter(blockNumber); + this.fileStoreCollection.clearPending(); } /** Every now and then, check if the pool has received one of the txs we are looking for, just to catch any race conditions */ diff --git a/yarn-project/p2p/src/services/tx_file_store/config.ts b/yarn-project/p2p/src/services/tx_file_store/config.ts index fa78d90331f0..edd1d9c90e46 100644 --- a/yarn-project/p2p/src/services/tx_file_store/config.ts +++ b/yarn-project/p2p/src/services/tx_file_store/config.ts @@ -6,8 +6,6 @@ import { type ConfigMappingsType, booleanConfigHelper, numberConfigHelper } from export type TxFileStoreConfig = { /** URL for uploading txs to file storage (s3://, gs://, file://) */ txFileStoreUrl?: string; - /** URL for downloading txs from file storage */ - txFileStoreDownloadUrl?: string; /** Max concurrent uploads */ txFileStoreUploadConcurrency: number; /** Max queue size to prevent unbounded memory growth */ @@ -21,10 +19,6 @@ export const txFileStoreConfigMappings: ConfigMappingsType = env: 'TX_FILE_STORE_URL', description: 'URL for uploading txs to file storage (s3://, gs://, file://)', }, - txFileStoreDownloadUrl: { - env: 'TX_FILE_STORE_DOWNLOAD_URL', - description: 'URL for downloading txs from file storage', - }, txFileStoreUploadConcurrency: { env: 'TX_FILE_STORE_UPLOAD_CONCURRENCY', description: 'Maximum number of concurrent tx uploads', diff --git a/yarn-project/p2p/src/services/tx_file_store/tx_file_store.test.ts b/yarn-project/p2p/src/services/tx_file_store/tx_file_store.test.ts index caa240b6b489..47ad859c1ef6 100644 --- a/yarn-project/p2p/src/services/tx_file_store/tx_file_store.test.ts +++ b/yarn-project/p2p/src/services/tx_file_store/tx_file_store.test.ts @@ -54,7 +54,6 @@ describe('TxFileStore', () => { config = { txFileStoreEnabled: true, txFileStoreUrl: `file://${tmpDir}`, - txFileStoreDownloadUrl: `file://${tmpDir}`, txFileStoreUploadConcurrency: 2, txFileStoreMaxQueueSize: 10, }; diff --git a/yarn-project/p2p/src/test-helpers/testbench-utils.ts b/yarn-project/p2p/src/test-helpers/testbench-utils.ts index 9fbd09495938..3f2ec8ee5906 100644 --- a/yarn-project/p2p/src/test-helpers/testbench-utils.ts +++ b/yarn-project/p2p/src/test-helpers/testbench-utils.ts @@ -155,10 +155,10 @@ export class InMemoryAttestationPool { const id = blockProposal.archive.toString(); const alreadyExists = this.proposals.has(id); if (alreadyExists) { - return Promise.resolve({ added: false, alreadyExists: true, totalForPosition: 1 }); + return Promise.resolve({ added: false, alreadyExists: true, count: 1 }); } this.proposals.set(id, blockProposal); - return Promise.resolve({ added: true, alreadyExists: false, totalForPosition: 1 }); + return Promise.resolve({ added: true, alreadyExists: false, count: 1 }); } getBlockProposal(id: string): Promise { @@ -166,7 +166,7 @@ export class InMemoryAttestationPool { } tryAddCheckpointProposal(_proposal: CheckpointProposal): Promise { - return Promise.resolve({ added: true, alreadyExists: false, totalForPosition: 1 }); + return Promise.resolve({ added: true, alreadyExists: false, count: 1 }); } getCheckpointProposal(_id: string): Promise { @@ -188,8 +188,8 @@ export class InMemoryAttestationPool { return Promise.resolve([]); } - tryAddCheckpointAttestation(_attestation: CheckpointAttestation, _committeeSize: number): Promise { - return Promise.resolve({ added: true, alreadyExists: false, totalForPosition: 1 }); + tryAddCheckpointAttestation(_attestation: CheckpointAttestation): Promise { + return Promise.resolve({ added: true, alreadyExists: false, count: 1 }); } isEmpty(): Promise { diff --git a/yarn-project/scripts/latency-explorer/index.html b/yarn-project/scripts/latency-explorer/index.html new file mode 100644 index 000000000000..b23f55a818f7 --- /dev/null +++ b/yarn-project/scripts/latency-explorer/index.html @@ -0,0 +1,949 @@ + + + + + + + Aztec L2 Latency Explorer + + + + +
+

Aztec L2 Latency Explorer

+

Visualize expected user-perceived latency from transaction send to effects visible in the proposed chain

+
+ +
+

Configuration

+
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+
+ + +
+
+ +
+

Derived Values

+
+
+ Max Blocks / Slot + - +
+
+ Init Offset + - +
+
+ Checkpoint Final. + - +
+
+ Min Latency + - +
+
+ Max Latency + - +
+
+ Avg Latency + - +
+
+ Median Latency + - +
+
+ p95 Latency + - +
+
+
+ +
+

Latency by Send Time

+
+ +
+
+ +
+

Cumulative Probability

+
+ +
+
+ +
+

Sequencer Timetable

+
+
+ Max + Blocks / Slot + - +
+
+ Init + Offset + - +
+
+ Checkpoint + Finalization + - +
+
+ Time + Available for Blocks + - +
+
+ First Block + Start + - +
+
+ Last + Block End + - +
+
+ Execution + Time / Block + - +
+
+ Block Windows + - +
+
+ Dead + Zone Start + - +
+
+
+ +
+

Transaction Lifecycle

+

This tool models the user-perceived latency from sending a transaction to seeing its effects in the proposed + chain. The lifecycle proceeds through the following stages:

+

1. TX submission. The user submits a transaction to their local Aztec node.

+

2. P2P propagation to proposer. The node broadcasts the TX to the P2P network. It takes one P2P + propagation delay for the TX to reach the current slot's proposer.

+

3. Waiting for the next block. The proposer builds blocks in fixed-duration sub-slots within the + L2 slot. The TX must wait until the next block building window starts, since the proposer snapshots the TX pool at + the beginning of each block.

+

4. Block execution. The proposer executes the transactions in the block. The actual execution + time depends on block fill (how many and how complex the transactions are). Once done, the block proposal is + broadcast to the network without waiting for the full block window to elapse.

+

5. P2P propagation back. The block proposal propagates back through the P2P network to the + user's node (another one-way propagation delay).

+

6. Node re-execution. The user's node re-executes the block to update its local world state + (merkle trees, nullifiers, public data). Only after this step are the transaction's effects visible in the + proposed chain.

+

7. Slot wrap. If the TX arrives too late in the slot and misses all block building windows, it + must wait for the next slot's proposer to pick it up, adding up to one full slot duration of extra latency.

+

Note that this models "proposed chain" visibility -- the TX effects are visible locally before checkpoint + confirmation on L1 or epoch proving.

+
+ + + + + + + + \ No newline at end of file diff --git a/yarn-project/slasher/src/config.ts b/yarn-project/slasher/src/config.ts index ddda18385ed4..79cef1e58b1a 100644 --- a/yarn-project/slasher/src/config.ts +++ b/yarn-project/slasher/src/config.ts @@ -24,6 +24,7 @@ export const DefaultSlasherConfig: SlasherConfig = { slashInactivityConsecutiveEpochThreshold: slasherDefaultEnv.SLASH_INACTIVITY_CONSECUTIVE_EPOCH_THRESHOLD, slashBroadcastedInvalidBlockPenalty: BigInt(slasherDefaultEnv.SLASH_INVALID_BLOCK_PENALTY), slashDuplicateProposalPenalty: BigInt(slasherDefaultEnv.SLASH_DUPLICATE_PROPOSAL_PENALTY), + slashDuplicateAttestationPenalty: BigInt(slasherDefaultEnv.SLASH_DUPLICATE_ATTESTATION_PENALTY), slashInactivityPenalty: BigInt(slasherDefaultEnv.SLASH_INACTIVITY_PENALTY), slashProposeInvalidAttestationsPenalty: BigInt(slasherDefaultEnv.SLASH_PROPOSE_INVALID_ATTESTATIONS_PENALTY), slashAttestDescendantOfInvalidPenalty: BigInt(slasherDefaultEnv.SLASH_ATTEST_DESCENDANT_OF_INVALID_PENALTY), @@ -94,6 +95,12 @@ export const slasherConfigMappings: ConfigMappingsType = { description: 'Penalty amount for slashing a validator for sending duplicate proposals.', ...bigintConfigHelper(DefaultSlasherConfig.slashDuplicateProposalPenalty), }, + slashDuplicateAttestationPenalty: { + env: 'SLASH_DUPLICATE_ATTESTATION_PENALTY', + description: + 'Penalty amount for slashing a validator for signing attestations for different proposals at the same slot.', + ...bigintConfigHelper(DefaultSlasherConfig.slashDuplicateAttestationPenalty), + }, slashInactivityTargetPercentage: { env: 'SLASH_INACTIVITY_TARGET_PERCENTAGE', description: diff --git a/yarn-project/stdlib/src/interfaces/aztec-node-admin.test.ts b/yarn-project/stdlib/src/interfaces/aztec-node-admin.test.ts index e0c65d40489e..01ad4f83e43a 100644 --- a/yarn-project/stdlib/src/interfaces/aztec-node-admin.test.ts +++ b/yarn-project/stdlib/src/interfaces/aztec-node-admin.test.ts @@ -151,6 +151,7 @@ class MockAztecNodeAdmin implements AztecNodeAdmin { slashInactivityPenalty: 1000n, slashBroadcastedInvalidBlockPenalty: 1n, slashDuplicateProposalPenalty: 1n, + slashDuplicateAttestationPenalty: 1n, secondsBeforeInvalidatingBlockAsCommitteeMember: 0, secondsBeforeInvalidatingBlockAsNonCommitteeMember: 0, slashProposeInvalidAttestationsPenalty: 1000n, diff --git a/yarn-project/stdlib/src/interfaces/slasher.ts b/yarn-project/stdlib/src/interfaces/slasher.ts index 4e241021a475..44ebbd97f790 100644 --- a/yarn-project/stdlib/src/interfaces/slasher.ts +++ b/yarn-project/stdlib/src/interfaces/slasher.ts @@ -19,6 +19,7 @@ export interface SlasherConfig { slashInactivityPenalty: bigint; slashBroadcastedInvalidBlockPenalty: bigint; slashDuplicateProposalPenalty: bigint; + slashDuplicateAttestationPenalty: bigint; slashProposeInvalidAttestationsPenalty: bigint; slashAttestDescendantOfInvalidPenalty: bigint; slashUnknownPenalty: bigint; @@ -42,6 +43,7 @@ export const SlasherConfigSchema = zodFor()( slashInactivityPenalty: schemas.BigInt, slashProposeInvalidAttestationsPenalty: schemas.BigInt, slashDuplicateProposalPenalty: schemas.BigInt, + slashDuplicateAttestationPenalty: schemas.BigInt, slashAttestDescendantOfInvalidPenalty: schemas.BigInt, slashUnknownPenalty: schemas.BigInt, slashOffenseExpirationRounds: z.number(), diff --git a/yarn-project/stdlib/src/interfaces/validator.ts b/yarn-project/stdlib/src/interfaces/validator.ts index 916db68d0255..48cb4a1b8dd4 100644 --- a/yarn-project/stdlib/src/interfaces/validator.ts +++ b/yarn-project/stdlib/src/interfaces/validator.ts @@ -56,11 +56,17 @@ export type ValidatorClientConfig = ValidatorHASignerConfig & { /** Skip pushing re-executed blocks to archiver (default: false) */ skipPushProposedBlocksToArchiver?: boolean; + + /** Agree to attest to equivocated checkpoint proposals (for testing purposes only) */ + attestToEquivocatedProposals?: boolean; }; export type ValidatorClientFullConfig = ValidatorClientConfig & Pick & - Pick & { + Pick< + SlasherConfig, + 'slashBroadcastedInvalidBlockPenalty' | 'slashDuplicateProposalPenalty' | 'slashDuplicateAttestationPenalty' + > & { /** * Whether transactions are disabled for this node * @remarks This should match the property in P2PConfig. It's not picked from there to avoid circular dependencies. @@ -79,6 +85,7 @@ export const ValidatorClientConfigSchema = zodFor = { [OffenseType.PROPOSED_INCORRECT_ATTESTATIONS]: 6n, [OffenseType.ATTESTED_DESCENDANT_OF_INVALID]: 7n, [OffenseType.DUPLICATE_PROPOSAL]: 8n, + [OffenseType.DUPLICATE_ATTESTATION]: 9n, }; export function bigIntToOffense(offense: bigint): OffenseType { @@ -83,6 +88,8 @@ export function bigIntToOffense(offense: bigint): OffenseType { return OffenseType.ATTESTED_DESCENDANT_OF_INVALID; case 8n: return OffenseType.DUPLICATE_PROPOSAL; + case 9n: + return OffenseType.DUPLICATE_ATTESTATION; default: throw new Error(`Unknown offense: ${offense}`); } diff --git a/yarn-project/txe/src/state_machine/dummy_p2p_client.ts b/yarn-project/txe/src/state_machine/dummy_p2p_client.ts index b3ff0ecec717..452b5d1d8c5a 100644 --- a/yarn-project/txe/src/state_machine/dummy_p2p_client.ts +++ b/yarn-project/txe/src/state_machine/dummy_p2p_client.ts @@ -6,6 +6,7 @@ import type { P2PBlockReceivedCallback, P2PCheckpointReceivedCallback, P2PConfig, + P2PDuplicateAttestationCallback, P2PDuplicateProposalCallback, P2PSyncState, PeerId, @@ -211,4 +212,8 @@ export class DummyP2P implements P2P { public registerDuplicateProposalCallback(_callback: P2PDuplicateProposalCallback): void { throw new Error('DummyP2P does not implement "registerDuplicateProposalCallback"'); } + + public registerDuplicateAttestationCallback(_callback: P2PDuplicateAttestationCallback): void { + throw new Error('DummyP2P does not implement "registerDuplicateAttestationCallback"'); + } } diff --git a/yarn-project/validator-client/README.md b/yarn-project/validator-client/README.md index 118b313072d5..bb232bc28184 100644 --- a/yarn-project/validator-client/README.md +++ b/yarn-project/validator-client/README.md @@ -78,6 +78,7 @@ These rules must always hold: 3. **inHash is constant**: All blocks in a checkpoint share the same L1-to-L2 messages hash 4. **Sequential indexWithinCheckpoint**: Block N must have `indexWithinCheckpoint = parent.indexWithinCheckpoint + 1` 5. **One proposer per slot**: Each slot has exactly one designated proposer. Sending multiple proposals for the same position (slot, indexWithinCheckpoint) with different content is equivocation and slashable +6. **One attestation per slot**: Validators should only attest to one checkpoint per slot. Attesting to different proposals (different archives) for the same slot is equivocation and slashable ## Validation Flow @@ -155,16 +156,17 @@ Time | Proposer | Validator ## Configuration -| Flag | Purpose | -| ------------------------------------- | --------------------------------------------------------------------- | -| `validatorReexecute` | Re-execute transactions to verify proposals | -| `fishermanMode` | Validate proposals but don't broadcast attestations (monitoring only) | -| `alwaysReexecuteBlockProposals` | Force re-execution even when not in committee | -| `slashBroadcastedInvalidBlockPenalty` | Penalty amount for invalid proposals (0 = disabled) | -| `slashDuplicateProposalPenalty` | Penalty amount for duplicate proposals (0 = disabled) | -| `validatorReexecuteDeadlineMs` | Time reserved at end of slot for propagation/publishing | -| `attestationPollingIntervalMs` | How often to poll for attestations when collecting | -| `disabledValidators` | Validator addresses to exclude from duties | +| Flag | Purpose | +| ------------------------------------- | -------------------------------------------------------------------------------------- | +| `validatorReexecute` | Re-execute transactions to verify proposals | +| `fishermanMode` | Validate proposals but don't broadcast attestations (monitoring only) | +| `alwaysReexecuteBlockProposals` | Force re-execution even when not in committee | +| `slashBroadcastedInvalidBlockPenalty` | Penalty amount for invalid proposals (0 = disabled) | +| `slashDuplicateProposalPenalty` | Penalty amount for duplicate proposals (0 = disabled) | +| `slashDuplicateAttestationPenalty` | Penalty amount for duplicate attestations (0 = disabled) | +| `validatorReexecuteDeadlineMs` | Time reserved at end of slot for propagation/publishing | +| `attestationPollingIntervalMs` | How often to poll for attestations when collecting | +| `disabledValidators` | Validator addresses to exclude from duties | ### High Availability (HA) Keystore diff --git a/yarn-project/validator-client/src/config.ts b/yarn-project/validator-client/src/config.ts index 9a002c3843c1..6b367c214248 100644 --- a/yarn-project/validator-client/src/config.ts +++ b/yarn-project/validator-client/src/config.ts @@ -73,6 +73,10 @@ export const validatorClientConfigMappings: ConfigMappingsType { // Create 5 HA validator instances for use across all tests const baseConfig: ValidatorClientConfig & - Pick = { + Pick< + SlasherConfig, + 'slashBroadcastedInvalidBlockPenalty' | 'slashDuplicateProposalPenalty' | 'slashDuplicateAttestationPenalty' + > = { validatorPrivateKeys: new SecretValue(validatorPrivateKeys), attestationPollingIntervalMs: 1000, disableValidator: false, @@ -129,6 +132,7 @@ describe('ValidatorClient HA Integration', () => { slashBroadcastedInvalidBlockPenalty: 1n, l1Contracts: { rollupAddress }, slashDuplicateProposalPenalty: 1n, + slashDuplicateAttestationPenalty: 1n, haSigningEnabled: true, nodeId: 'ha-node-1', // temporary pollingIntervalMs: 100, @@ -168,7 +172,10 @@ describe('ValidatorClient HA Integration', () => { async function createHAValidator( pool: Pool, config: ValidatorClientConfig & - Pick, + Pick< + SlasherConfig, + 'slashBroadcastedInvalidBlockPenalty' | 'slashDuplicateProposalPenalty' | 'slashDuplicateAttestationPenalty' + >, ): Promise { // Track pool for cleanup pools.push(pool); diff --git a/yarn-project/validator-client/src/validator.integration.test.ts b/yarn-project/validator-client/src/validator.integration.test.ts index ffb248852b25..2e0551135539 100644 --- a/yarn-project/validator-client/src/validator.integration.test.ts +++ b/yarn-project/validator-client/src/validator.integration.test.ts @@ -26,7 +26,7 @@ import { type L1RollupConstants, getTimestampForSlot } from '@aztec/stdlib/epoch import { GasFees } from '@aztec/stdlib/gas'; import { tryStop } from '@aztec/stdlib/interfaces/server'; import { computeInHashFromL1ToL2Messages } from '@aztec/stdlib/messaging'; -import type { BlockProposal } from '@aztec/stdlib/p2p'; +import { type BlockProposal, CheckpointProposal } from '@aztec/stdlib/p2p'; import { mockTx } from '@aztec/stdlib/testing'; import type { PublicDataTreeLeaf } from '@aztec/stdlib/trees'; import { BlockHeader, type CheckpointGlobalVariables, Tx } from '@aztec/stdlib/tx'; @@ -170,6 +170,7 @@ describe('ValidatorClient Integration', () => { validatorReexecute: true, slashBroadcastedInvalidBlockPenalty: 10n, slashDuplicateProposalPenalty: 10n, + slashDuplicateAttestationPenalty: 10n, haSigningEnabled: false, skipCheckpointProposalValidation: false, skipPushProposedBlocksToArchiver: false, @@ -522,12 +523,13 @@ describe('ValidatorClient Integration', () => { () => buildTxs(2), ); - // Create a checkpoint proposal with wrong archive root - const badProposal = await proposer.validator.createCheckpointProposal( + // Create a checkpoint proposal with wrong archive root directly, bypassing the + // validator's anti-equivocation guard (which prevents two proposals for the same slot) + const badProposal = await CheckpointProposal.createProposalFromSigner( checkpoint.header, Fr.random(), // Wrong archive root undefined, - proposerSigner.address, + payload => Promise.resolve(proposerSigner.sign(payload)), ); await attestorValidateBlocks(blocks); diff --git a/yarn-project/validator-client/src/validator.test.ts b/yarn-project/validator-client/src/validator.test.ts index 62fa685ed8a8..ac288b1a58a1 100644 --- a/yarn-project/validator-client/src/validator.test.ts +++ b/yarn-project/validator-client/src/validator.test.ts @@ -55,7 +55,10 @@ import { ValidatorClient } from './validator.js'; describe('ValidatorClient', () => { let config: ValidatorClientConfig & - Pick & { + Pick< + SlasherConfig, + 'slashBroadcastedInvalidBlockPenalty' | 'slashDuplicateProposalPenalty' | 'slashDuplicateAttestationPenalty' + > & { disableTransactions: boolean; }; let validatorClient: ValidatorClient; @@ -118,6 +121,7 @@ describe('ValidatorClient', () => { validatorReexecute: false, slashBroadcastedInvalidBlockPenalty: 1n, slashDuplicateProposalPenalty: 1n, + slashDuplicateAttestationPenalty: 1n, disableTransactions: false, haSigningEnabled: false, l1Contracts: { rollupAddress: EthAddress.random() }, diff --git a/yarn-project/validator-client/src/validator.ts b/yarn-project/validator-client/src/validator.ts index 65fd888baba4..682aaf6997ed 100644 --- a/yarn-project/validator-client/src/validator.ts +++ b/yarn-project/validator-client/src/validator.ts @@ -18,7 +18,7 @@ import { RunningPromise } from '@aztec/foundation/running-promise'; import { sleep } from '@aztec/foundation/sleep'; import { DateProvider } from '@aztec/foundation/timer'; import type { KeystoreManager } from '@aztec/node-keystore'; -import type { DuplicateProposalInfo, P2P, PeerId } from '@aztec/p2p'; +import type { DuplicateAttestationInfo, DuplicateProposalInfo, P2P, PeerId } from '@aztec/p2p'; import { AuthRequest, AuthResponse, BlockProposalValidator, ReqRespSubProtocol } from '@aztec/p2p'; import { OffenseType, WANT_TO_SLASH_EVENT, type Watcher, type WatcherEmitter } from '@aztec/slasher'; import type { AztecAddress } from '@aztec/stdlib/aztec-address'; @@ -32,14 +32,14 @@ import type { WorldStateSynchronizer, } from '@aztec/stdlib/interfaces/server'; import { type L1ToL2MessageSource, accumulateCheckpointOutHashes } from '@aztec/stdlib/messaging'; -import type { - BlockProposal, - BlockProposalOptions, - CheckpointAttestation, - CheckpointProposalCore, - CheckpointProposalOptions, +import { + type BlockProposal, + type BlockProposalOptions, + type CheckpointAttestation, + CheckpointProposal, + type CheckpointProposalCore, + type CheckpointProposalOptions, } from '@aztec/stdlib/p2p'; -import { CheckpointProposal } from '@aztec/stdlib/p2p'; import type { CheckpointHeader } from '@aztec/stdlib/rollup'; import type { BlockHeader, CheckpointGlobalVariables, Tx } from '@aztec/stdlib/tx'; import { AttestationTimeoutError } from '@aztec/stdlib/validators'; @@ -80,14 +80,20 @@ export class ValidatorClient extends (EventEmitter as new () => WatcherEmitter) // Whether it has already registered handlers on the p2p client private hasRegisteredHandlers = false; - // Used to check if we are sending the same proposal twice - private previousProposal?: BlockProposal; + /** Tracks the last block proposal we created, to detect duplicate proposal attempts. */ + private lastProposedBlock?: BlockProposal; + + /** Tracks the last checkpoint proposal we created. */ + private lastProposedCheckpoint?: CheckpointProposal; private lastEpochForCommitteeUpdateLoop: EpochNumber | undefined; private epochCacheUpdateLoop: RunningPromise; private proposersOfInvalidBlocks: Set = new Set(); + /** Tracks the last checkpoint proposal we attested to, to prevent equivocation. */ + private lastAttestedProposal?: CheckpointProposalCore; + protected constructor( private keyStore: ExtendedValidatorKeyStore, private epochCache: EpochCache, @@ -314,6 +320,11 @@ export class ValidatorClient extends (EventEmitter as new () => WatcherEmitter) this.handleDuplicateProposal(info); }); + // Duplicate attestation handler - triggers slashing for attestation equivocation + this.p2pClient.registerDuplicateAttestationCallback((info: DuplicateAttestationInfo) => { + this.handleDuplicateAttestation(info); + }); + const myAddresses = this.getValidatorAddresses(); this.p2pClient.registerThisValidatorAddresses(myAddresses); @@ -341,6 +352,15 @@ export class ValidatorClient extends (EventEmitter as new () => WatcherEmitter) return false; } + // Ignore proposals from ourselves (may happen in HA setups) + if (this.getValidatorAddresses().some(addr => addr.equals(proposer))) { + this.log.warn(`Ignoring block proposal from self for slot ${slotNumber}`, { + proposer: proposer.toString(), + slotNumber, + }); + return false; + } + // Check if we're in the committee (for metrics purposes) const inCommittee = await this.epochCache.filterInCommittee(slotNumber, this.getValidatorAddresses()); const partOfCommittee = inCommittee.length > 0; @@ -442,6 +462,15 @@ export class ValidatorClient extends (EventEmitter as new () => WatcherEmitter) return undefined; } + // Ignore proposals from ourselves (may happen in HA setups) + if (this.getValidatorAddresses().some(addr => addr.equals(proposer))) { + this.log.warn(`Ignoring block proposal from self for slot ${slotNumber}`, { + proposer: proposer.toString(), + slotNumber, + }); + return undefined; + } + // Check that I have any address in current committee before attesting const inCommittee = await this.epochCache.filterInCommittee(slotNumber, this.getValidatorAddresses()); const partOfCommittee = inCommittee.length > 0; @@ -515,14 +544,44 @@ export class ValidatorClient extends (EventEmitter as new () => WatcherEmitter) return undefined; } - return this.createCheckpointAttestationsFromProposal(proposal, attestors); + return await this.createCheckpointAttestationsFromProposal(proposal, attestors); + } + + /** + * Checks if we should attest to a slot based on equivocation prevention rules. + * @returns true if we should attest, false if we should skip + */ + private shouldAttestToSlot(slotNumber: SlotNumber): boolean { + // If attestToEquivocatedProposals is true, always allow + if (this.config.attestToEquivocatedProposals) { + return true; + } + + // Check if incoming slot is strictly greater than last attested + if (this.lastAttestedProposal && slotNumber <= this.lastAttestedProposal.slotNumber) { + this.log.warn( + `Refusing to process a proposal for slot ${slotNumber} given we already attested to a proposal for slot ${this.lastAttestedProposal.slotNumber}`, + ); + return false; + } + + return true; } private async createCheckpointAttestationsFromProposal( proposal: CheckpointProposalCore, attestors: EthAddress[] = [], - ): Promise { + ): Promise { + // Equivocation check: must happen right before signing to minimize the race window + if (!this.shouldAttestToSlot(proposal.slotNumber)) { + return undefined; + } + const attestations = await this.validationService.attestToCheckpointProposal(proposal, attestors); + + // Track the proposal we attested to (to prevent equivocation) + this.lastAttestedProposal = proposal; + await this.p2pClient.addOwnCheckpointAttestations(attestations); return attestations; } @@ -750,6 +809,28 @@ export class ValidatorClient extends (EventEmitter as new () => WatcherEmitter) ]); } + /** + * Handle detection of a duplicate attestation (equivocation). + * Emits a slash event when an attester signs attestations for different proposals at the same slot. + */ + private handleDuplicateAttestation(info: DuplicateAttestationInfo): void { + const { slot, attester } = info; + + this.log.warn(`Triggering slash event for duplicate attestation from ${attester.toString()} at slot ${slot}`, { + attester: attester.toString(), + slot, + }); + + this.emit(WANT_TO_SLASH_EVENT, [ + { + validator: attester, + amount: this.config.slashDuplicateAttestationPenalty, + offenseType: OffenseType.DUPLICATE_ATTESTATION, + epochOrSlot: BigInt(slot), + }, + ]); + } + async createBlockProposal( blockHeader: BlockHeader, indexWithinCheckpoint: IndexWithinCheckpoint, @@ -759,11 +840,19 @@ export class ValidatorClient extends (EventEmitter as new () => WatcherEmitter) proposerAddress: EthAddress | undefined, options: BlockProposalOptions = {}, ): Promise { - // TODO(palla/mbps): Prevent double proposals properly - // if (this.previousProposal?.slotNumber === blockHeader.globalVariables.slotNumber) { - // this.log.verbose(`Already made a proposal for the same slot, skipping proposal`); - // return Promise.resolve(undefined); - // } + // Validate that we're not creating a proposal for an older or equal position + if (this.lastProposedBlock) { + const lastSlot = this.lastProposedBlock.slotNumber; + const lastIndex = this.lastProposedBlock.indexWithinCheckpoint; + const newSlot = blockHeader.globalVariables.slotNumber; + + if (newSlot < lastSlot || (newSlot === lastSlot && indexWithinCheckpoint <= lastIndex)) { + throw new Error( + `Cannot create block proposal for slot ${newSlot} index ${indexWithinCheckpoint}: ` + + `already proposed block for slot ${lastSlot} index ${lastIndex}`, + ); + } + } this.log.info( `Assembling block proposal for block ${blockHeader.globalVariables.blockNumber} slot ${blockHeader.globalVariables.slotNumber}`, @@ -780,7 +869,7 @@ export class ValidatorClient extends (EventEmitter as new () => WatcherEmitter) broadcastInvalidBlockProposal: this.config.broadcastInvalidBlockProposal, }, ); - this.previousProposal = newProposal; + this.lastProposedBlock = newProposal; return newProposal; } @@ -791,14 +880,29 @@ export class ValidatorClient extends (EventEmitter as new () => WatcherEmitter) proposerAddress: EthAddress | undefined, options: CheckpointProposalOptions = {}, ): Promise { + // Validate that we're not creating a proposal for an older or equal slot + if (this.lastProposedCheckpoint) { + const lastSlot = this.lastProposedCheckpoint.slotNumber; + const newSlot = checkpointHeader.slotNumber; + + if (newSlot <= lastSlot) { + throw new Error( + `Cannot create checkpoint proposal for slot ${newSlot}: ` + + `already proposed checkpoint for slot ${lastSlot}`, + ); + } + } + this.log.info(`Assembling checkpoint proposal for slot ${checkpointHeader.slotNumber}`); - return await this.validationService.createCheckpointProposal( + const newProposal = await this.validationService.createCheckpointProposal( checkpointHeader, archive, lastBlockInfo, proposerAddress, options, ); + this.lastProposedCheckpoint = newProposal; + return newProposal; } async broadcastBlockProposal(proposal: BlockProposal): Promise { @@ -820,6 +924,10 @@ export class ValidatorClient extends (EventEmitter as new () => WatcherEmitter) this.log.debug(`Collecting ${inCommittee.length} self-attestations for slot ${slot}`, { inCommittee }); const attestations = await this.createCheckpointAttestationsFromProposal(proposal, inCommittee); + if (!attestations) { + return []; + } + // We broadcast our own attestations to our peers so, in case our block does not get mined on L1, // other nodes can see that our validators did attest to this block proposal, and do not slash us // due to inactivity for missed attestations.