diff --git a/.github/release-please-v4.json b/.github/release-please-v4.json index e20a5dc85b0e..af042c8872a5 100644 --- a/.github/release-please-v4.json +++ b/.github/release-please-v4.json @@ -9,13 +9,14 @@ "include-component-in-tag": false, "release-search-depth": 50, "commit-search-depth": 150, + "bootstrap-sha": "60d353cded21d7ebeed9bc64b5ae87ebba5f9a38", "sequential-calls": true, "changelog-sections": [ { "type": "feat", "section": "Features", "hidden": false }, { "type": "fix", "section": "Bug Fixes", "hidden": false }, - { "type": "chore", "section": "Miscellaneous", "hidden": false }, - { "type": "test", "section": "Miscellaneous", "hidden": false }, - { "type": "refactor", "section": "Miscellaneous", "hidden": false }, + { "type": "chore", "section": "Miscellaneous", "hidden": true }, + { "type": "test", "section": "Miscellaneous", "hidden": true }, + { "type": "refactor", "section": "Miscellaneous", "hidden": true }, { "type": "docs", "section": "Documentation", "hidden": false } ], "packages": { diff --git a/.github/workflows/deploy-network.yml b/.github/workflows/deploy-network.yml index f788d58a9ed8..fbe634adef15 100644 --- a/.github/workflows/deploy-network.yml +++ b/.github/workflows/deploy-network.yml @@ -30,6 +30,10 @@ on: required: false type: boolean default: false + source_tag: + description: "Source tag that triggered this deploy" + required: false + type: string workflow_dispatch: inputs: network: @@ -59,6 +63,10 @@ on: required: false type: boolean default: false + source_tag: + description: "Source tag that triggered this deploy" + required: false + type: string concurrency: group: deploy-network-${{ inputs.network }}-${{ inputs.namespace || inputs.network }}-${{ inputs.semver }}-${{ github.ref || github.ref_name }} @@ -184,6 +192,22 @@ jobs: echo "cluster=" >> $GITHUB_OUTPUT fi + - name: Step summary + if: always() + run: | + { + echo "## Deploy Network" + echo "" + echo "| Item | Value |" + echo "|------|-------|" + echo "| Network | \`${{ inputs.network }}\` |" + echo "| Semver | \`${{ inputs.semver }}\` |" + echo "| Ref | \`${{ steps.checkout-ref.outputs.ref }}\` |" + if [[ -n "${{ inputs.source_tag }}" ]]; then + echo "| Source Tag | [\`${{ inputs.source_tag }}\`](https://github.com/${{ github.repository }}/releases/tag/${{ inputs.source_tag }}) |" + fi + } >> "$GITHUB_STEP_SUMMARY" + - name: Notify Slack on failure if: failure() env: diff --git a/.github/workflows/deploy-staging-public.yml b/.github/workflows/deploy-staging-public.yml new file mode 100644 index 000000000000..3979017bf698 --- /dev/null +++ b/.github/workflows/deploy-staging-public.yml @@ -0,0 +1,94 @@ +name: Deploy to staging-public + +on: + push: + branches: + - v4 + workflow_dispatch: {} + +concurrency: + group: deploy-staging-public + cancel-in-progress: true + +env: + GITHUB_TOKEN: ${{ secrets.AZTEC_BOT_GITHUB_TOKEN }} + +jobs: + determine-tag: + runs-on: ubuntu-latest + outputs: + tag: ${{ steps.poll-tag.outputs.tag }} + semver: ${{ steps.poll-tag.outputs.semver }} + steps: + - name: Checkout + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 + with: + token: ${{ secrets.AZTEC_BOT_GITHUB_TOKEN }} + fetch-depth: 0 + + - name: Read version from manifest + id: manifest + run: | + VERSION=$(jq -r '."."' .release-please-manifest.json) + echo "version=$VERSION" + echo "version=$VERSION" >> $GITHUB_OUTPUT + + - name: Poll for tag at HEAD + id: poll-tag + run: | + # wait for tag to be pushed (either RC or stable release) + VERSION="${{ steps.manifest.outputs.version }}" + HEAD_SHA=$(git rev-parse HEAD) + MAX_ATTEMPTS=60 + echo "Looking for tag matching v${VERSION} or v${VERSION}-rc.* at HEAD ($HEAD_SHA)" + + for i in $(seq 1 $MAX_ATTEMPTS); do + git fetch --tags --force + + TAG=$(git tag --points-at HEAD | grep -E "^v${VERSION}(-rc\.[0-9]+)?$" | sort -V | tail -n 1 || true) + + if [ -n "$TAG" ]; then + echo "Found tag: $TAG" + SEMVER="${VERSION}" + echo "tag=$TAG" >> $GITHUB_OUTPUT + echo "semver=$SEMVER" >> $GITHUB_OUTPUT + exit 0 + fi + + echo "Attempt $i/$MAX_ATTEMPTS: No matching tag yet, waiting 10s..." + sleep 10 + done + + echo "Error: No tag found for v${VERSION} at HEAD after 10 minutes" + exit 1 + + wait-for-ci3: + needs: determine-tag + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 + with: + fetch-depth: 1 + + - name: Setup Node + uses: actions/setup-node@v4 + with: + node-version: 22 + + - name: Wait for CI3 + run: spartan/scripts/wait_for_ci3.ts "${{ needs.determine-tag.outputs.tag }}" + + deploy: + needs: [determine-tag, wait-for-ci3] + runs-on: ubuntu-latest + steps: + - name: Trigger deploy-network on next branch + run: | + echo "Triggering deploy-network for staging-public with semver=${{ needs.determine-tag.outputs.semver }}" + gh workflow run deploy-network.yml \ + --repo "${{ github.repository }}" \ + --ref next \ + -f network=staging-public \ + -f semver="${{ needs.determine-tag.outputs.semver }}" \ + -f source_tag="${{ needs.determine-tag.outputs.tag }}" diff --git a/.github/workflows/nightly-spartan-bench.yml b/.github/workflows/nightly-spartan-bench.yml index e331be8f8ec5..f30b36ff6ee6 100644 --- a/.github/workflows/nightly-spartan-bench.yml +++ b/.github/workflows/nightly-spartan-bench.yml @@ -6,7 +6,7 @@ on: workflow_dispatch: inputs: nightly_tag: - description: "Nightly tag to use (e.g., 2.3.4-spartan.20251209). Leave empty to auto-detect." + description: "Nightly tag to use (e.g., 2.3.4-nightly.20251209). Leave empty to auto-detect." required: false type: string @@ -21,7 +21,7 @@ jobs: - name: Checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 with: - ref: merge-train/spartan + ref: next - name: Determine nightly tag id: nightly-tag @@ -30,7 +30,7 @@ jobs: nightly_tag="${{ inputs.nightly_tag }}" else current_version=$(jq -r '."."' .release-please-manifest.json) - nightly_tag="${current_version}-spartan.$(date -u +%Y%m%d)" + nightly_tag="${current_version}-nightly.$(date -u +%Y%m%d)" fi echo "nightly_tag=$nightly_tag" >> $GITHUB_OUTPUT echo "Using nightly tag: $nightly_tag" @@ -126,7 +126,7 @@ jobs: - name: Checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 with: - ref: merge-train/spartan + ref: next - name: Determine nightly tag id: nightly-tag @@ -135,7 +135,7 @@ jobs: nightly_tag="${{ inputs.nightly_tag }}" else current_version=$(jq -r '."."' .release-please-manifest.json) - nightly_tag="${current_version}-spartan.$(date -u +%Y%m%d)" + nightly_tag="${current_version}-nightly.$(date -u +%Y%m%d)" fi echo "nightly_tag=$nightly_tag" >> $GITHUB_OUTPUT echo "Using nightly tag: $nightly_tag" diff --git a/.github/workflows/release-please.yml b/.github/workflows/release-please.yml index 8d26264b0834..904364e64244 100644 --- a/.github/workflows/release-please.yml +++ b/.github/workflows/release-please.yml @@ -134,6 +134,74 @@ jobs: echo "✅ Created tag: $TAG_NAME" + dedupe-release-notes: + name: Deduplicate release notes + needs: [release-please] + if: ${{ needs.release-please.outputs.release-pr }} + runs-on: ubuntu-latest + permissions: + contents: write + pull-requests: write + steps: + - name: Check if release notes branch exists + id: check-branch + env: + GH_TOKEN: ${{ secrets.AZTEC_BOT_GITHUB_TOKEN }} + run: | + NOTES_BRANCH="${{ fromJSON(needs.release-please.outputs.release-pr).headBranchName }}--release-notes" + if git ls-remote --exit-code "https://x-access-token:${GH_TOKEN}@github.com/${{ github.repository }}.git" "refs/heads/$NOTES_BRANCH" >/dev/null 2>&1; then + echo "exists=true" >> $GITHUB_OUTPUT + echo "branch=$NOTES_BRANCH" >> $GITHUB_OUTPUT + else + echo "exists=false" >> $GITHUB_OUTPUT + echo "Release notes branch $NOTES_BRANCH does not exist, skipping deduplication" + fi + + - name: Checkout release notes branch + if: steps.check-branch.outputs.exists == 'true' + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 + with: + ref: ${{ steps.check-branch.outputs.branch }} + fetch-depth: 1 + token: ${{ secrets.AZTEC_BOT_GITHUB_TOKEN }} + + - name: Fetch dedupe script from target branch + if: steps.check-branch.outputs.exists == 'true' + run: | + git fetch origin ${{ github.ref_name }} --depth=1 + git show origin/${{ github.ref_name }}:scripts/dedupe_release_notes.py > /tmp/dedupe_release_notes.py + + - name: Configure Git + if: steps.check-branch.outputs.exists == 'true' + run: | + git config --global user.name AztecBot + git config --global user.email tech@aztecprotocol.com + + - name: Deduplicate release notes + if: steps.check-branch.outputs.exists == 'true' + run: | + if [ -f release-notes.md ]; then + python3 /tmp/dedupe_release_notes.py release-notes.md + git add release-notes.md + if ! git diff --cached --quiet; then + git commit -m "chore: deduplicate release notes" + git push + fi + fi + + - name: Update PR body with deduped notes + if: steps.check-branch.outputs.exists == 'true' + env: + GH_TOKEN: ${{ secrets.AZTEC_BOT_GITHUB_TOKEN }} + run: | + if [ -f release-notes.md ]; then + PR_NUMBER=${{ fromJSON(needs.release-please.outputs.release-pr).number }} + NOTES_SIZE=$(wc -c < release-notes.md) + if [ "$NOTES_SIZE" -lt 60000 ]; then + gh pr edit "$PR_NUMBER" --body "$(cat release-notes.md)" + fi + fi + update-docs: name: Update docs env: diff --git a/.github/workflows/weekly-proving-bench.yml b/.github/workflows/weekly-proving-bench.yml index f9f561f6d88e..bd066b47df88 100644 --- a/.github/workflows/weekly-proving-bench.yml +++ b/.github/workflows/weekly-proving-bench.yml @@ -6,7 +6,7 @@ on: workflow_dispatch: inputs: nightly_tag: - description: "Nightly tag to use (e.g., 2.3.4-spartan.20251209). Leave empty to auto-detect." + description: "Nightly tag to use (e.g., 2.3.4-nightly.20251209). Leave empty to auto-detect." required: false type: string @@ -21,7 +21,7 @@ jobs: - name: Checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 with: - ref: merge-train/spartan + ref: next - name: Determine nightly tag id: nightly-tag @@ -30,7 +30,7 @@ jobs: nightly_tag="${{ inputs.nightly_tag }}" else current_version=$(jq -r '."."' .release-please-manifest.json) - nightly_tag="${current_version}-spartan.$(date -u +%Y%m%d)" + nightly_tag="${current_version}-nightly.$(date -u +%Y%m%d)" fi echo "nightly_tag=$nightly_tag" >> $GITHUB_OUTPUT echo "Using nightly tag: $nightly_tag" diff --git a/ci3/docker_isolate b/ci3/docker_isolate index 6eccabdba8b1..07fd643546a4 100755 --- a/ci3/docker_isolate +++ b/ci3/docker_isolate @@ -77,6 +77,7 @@ cid=$(docker run -d \ -e TMPFS_SIZE \ -e USE_HOME_TMP \ -e AVM \ + -e ELU_MONITOR_FILE \ "${arg_env_vars[@]}" \ aztecprotocol/build:3.0 \ /bin/bash -c "$cmd") diff --git a/ci3/exec_test b/ci3/exec_test index 78622dcc8c40..ec1ded1a58f7 100755 --- a/ci3/exec_test +++ b/ci3/exec_test @@ -43,6 +43,9 @@ EOF # If the test has a verbose mode, we want it enabled. export VERBOSE=1 +# ELU monitor file path (e2e tests write event loop data here, uploaded after test finishes). +export ELU_MONITOR_FILE=$HOME/.elu_monitor_$$.log + # Run the test with timestamps via process substitution (preserves exit code) set +e if [ "${ISOLATE:-0}" -eq 1 ]; then @@ -54,4 +57,11 @@ fi code=$? set -e +# Upload ELU data if present (written by e2e tests, absent for C++/Rust/Noir). +if [ -s "$ELU_MONITOR_FILE" ]; then + echo "ELU file: $ELU_MONITOR_FILE ($(wc -l < "$ELU_MONITOR_FILE") lines, $(wc -c < "$ELU_MONITOR_FILE") bytes)" + cat "$ELU_MONITOR_FILE" | cache_log "elufile:${NAME:-unknown}" + rm -f "$ELU_MONITOR_FILE" +fi + exit $code diff --git a/release-image/Dockerfile b/release-image/Dockerfile index 48c6c06a9572..4a4c329b3ff7 100644 --- a/release-image/Dockerfile +++ b/release-image/Dockerfile @@ -7,6 +7,7 @@ ARG BUILD_METADATA="" ENV BUILD_METADATA=$BUILD_METADATA # Provide paths to bb and acvm for use in yarn-project, also create default working directories for each +ENV UV_THREADPOOL_SIZE=8 ENV BB_WORKING_DIRECTORY=/usr/src/bb ENV BB_BINARY_PATH=/usr/src/barretenberg/cpp/build/bin/bb-avm ENV ACVM_WORKING_DIRECTORY=/usr/src/acvm diff --git a/scripts/dedupe_release_notes.py b/scripts/dedupe_release_notes.py index 13799f5ee841..ed41f0ed1507 100755 --- a/scripts/dedupe_release_notes.py +++ b/scripts/dedupe_release_notes.py @@ -75,7 +75,7 @@ def deduplicate_release_notes(input_file, output_file=None): if output_file is None: output_file = input_file - with open(input_file, 'r', encoding='utf-8') as f: + with open(input_file, 'r', encoding='utf-8', errors='replace') as f: lines = f.readlines() original_count = len(lines) diff --git a/spartan/aztec-node/templates/_pod-template.yaml b/spartan/aztec-node/templates/_pod-template.yaml index 250e4e6d49f9..c8e9345c733c 100644 --- a/spartan/aztec-node/templates/_pod-template.yaml +++ b/spartan/aztec-node/templates/_pod-template.yaml @@ -287,9 +287,9 @@ spec: - name: L1_CONSENSUS_HOST_URLS value: {{ join "," .Values.global.l1ConsensusUrls | quote }} - name: L1_CONSENSUS_HOST_API_KEYS - value: {{ join "," .Values.global.l1ConsensusHostApiKeys | quote }} + value: {{ .Values.global.l1ConsensusHostApiKeys | quote }} - name: L1_CONSENSUS_HOST_API_KEY_HEADERS - value: {{ join "," .Values.global.l1ConsensusHostApiKeyHeaders | quote }} + value: {{ .Values.global.l1ConsensusHostApiKeyHeaders | quote }} {{- end }} - name: DATA_DIRECTORY value: "{{ .Values.node.storage.dataDirectory }}/data" diff --git a/spartan/aztec-node/values.yaml b/spartan/aztec-node/values.yaml index d522e4541616..2ed08bf5720d 100644 --- a/spartan/aztec-node/values.yaml +++ b/spartan/aztec-node/values.yaml @@ -29,9 +29,9 @@ global: l1ConsensusUrls: [] ## Only when api key is required via header, otherwise just provide in l1ConsensusHostUrls ## Example: "1234abcd" - l1ConsensusHostApiKeys: [] + l1ConsensusHostApiKeys: "" ## Example: "X-API-KEY" - l1ConsensusHostApiKeyHeaders: [] + l1ConsensusHostApiKeyHeaders: "" aztecImage: repository: aztecprotocol/aztec diff --git a/spartan/aztec-prover-stack/values.yaml b/spartan/aztec-prover-stack/values.yaml index 250591371df9..89380d45e8f2 100644 --- a/spartan/aztec-prover-stack/values.yaml +++ b/spartan/aztec-prover-stack/values.yaml @@ -6,8 +6,8 @@ global: l1ExecutionUrls: [] l1ConsensusUrls: [] - l1ConsensusHostApiKeys: [] - l1ConsensusHostApiKeyHeaders: [] + l1ConsensusHostApiKeys: "" + l1ConsensusHostApiKeyHeaders: "" node: nodeType: "prover-node" diff --git a/spartan/aztec-validator/values.yaml b/spartan/aztec-validator/values.yaml index 05639db89a9d..26f68e65e6df 100644 --- a/spartan/aztec-validator/values.yaml +++ b/spartan/aztec-validator/values.yaml @@ -6,8 +6,8 @@ global: l1ExecutionUrls: [] l1ConsensusUrls: [] - l1ConsensusHostApiKeys: [] - l1ConsensusHostApiKeyHeaders: [] + l1ConsensusHostApiKeys: "" + l1ConsensusHostApiKeyHeaders: "" validator: nodeType: "sequencer-node" diff --git a/spartan/environments/staging-public.env b/spartan/environments/staging-public.env index a8a591d0d35c..35ec3b1db6fb 100644 --- a/spartan/environments/staging-public.env +++ b/spartan/environments/staging-public.env @@ -14,17 +14,16 @@ ROLLUP_DEPLOYMENT_PRIVATE_KEY=REPLACE_WITH_GCP_SECRET OTEL_COLLECTOR_ENDPOINT=REPLACE_WITH_GCP_SECRET VERIFY_CONTRACTS=true ETHERSCAN_API_KEY=REPLACE_WITH_GCP_SECRET -DEPLOY_INTERNAL_BOOTNODE=false +DEPLOY_INTERNAL_BOOTNODE=true SNAPSHOT_BUCKET_DIRECTORY=${SNAPSHOT_BUCKET_DIRECTORY:-staging-public} BLOB_BUCKET_DIRECTORY=${BLOB_BUCKET_DIRECTORY:-staging-public/blobs} R2_ACCESS_KEY_ID=REPLACE_WITH_GCP_SECRET R2_SECRET_ACCESS_KEY=REPLACE_WITH_GCP_SECRET -PROVER_FAILED_PROOF_STORE=gs://aztec-develop/staging-public/failed-proofs TEST_ACCOUNTS=false SPONSORED_FPC=true + SEQ_MIN_TX_PER_BLOCK=0 -SEQ_MAX_TX_PER_BLOCK=1 -PROVER_REPLICAS=4 +SEQ_MAX_TX_PER_BLOCK=4 CREATE_ROLLUP_CONTRACTS=${CREATE_ROLLUP_CONTRACTS:-false} P2P_TX_POOL_DELETE_TXS_AFTER_REORG=true @@ -33,7 +32,11 @@ VALIDATOR_REPLICAS=5 VALIDATORS_PER_NODE=16 PUBLISHERS_PER_VALIDATOR_KEY=2 VALIDATOR_PUBLISHER_MNEMONIC_START_INDEX=5000 +VALIDATOR_HA_REPLICAS=1 +VALIDATOR_RESOURCE_PROFILE="prod-spot" +PROVER_FAILED_PROOF_STORE=gs://aztec-develop/staging-public/failed-proofs +PROVER_REPLICAS=4 PUBLISHERS_PER_PROVER=2 PROVER_PUBLISHER_MNEMONIC_START_INDEX=8000 diff --git a/spartan/metrics/grafana/alerts/rules.yaml b/spartan/metrics/grafana/alerts/rules.yaml index bfe88fa23d11..213204f4da5c 100644 --- a/spartan/metrics/grafana/alerts/rules.yaml +++ b/spartan/metrics/grafana/alerts/rules.yaml @@ -1340,3 +1340,88 @@ groups: labels: "": "" isPaused: false + - uid: pcomdgyzwtlaxv + title: Proof failed + condition: C + data: + - refId: A + relativeTimeRange: + from: 600 + to: 0 + datasourceUid: spartan-metrics-prometheus + model: + editorMode: code + expr: sum by (k8s_namespace_name) (increase(aztec_proving_queue_rejected_jobs_count[$__rate_interval])) + instant: true + intervalMs: 60000 + legendFormat: __auto + maxDataPoints: 43200 + range: false + refId: A + - refId: B + relativeTimeRange: + from: 600 + to: 0 + datasourceUid: __expr__ + model: + conditions: + - evaluator: + params: [] + type: gt + operator: + type: and + query: + params: + - B + reducer: + params: [] + type: last + type: query + datasource: + type: __expr__ + uid: __expr__ + expression: A + intervalMs: 1000 + maxDataPoints: 43200 + reducer: last + refId: B + type: reduce + - refId: C + relativeTimeRange: + from: 600 + to: 0 + datasourceUid: __expr__ + model: + conditions: + - evaluator: + params: + - 0 + type: gt + operator: + type: and + query: + params: + - C + reducer: + params: [] + type: last + type: query + datasource: + type: __expr__ + uid: __expr__ + expression: B + intervalMs: 1000 + maxDataPoints: 43200 + refId: C + type: threshold + dashboardUid: "" + panelId: 0 + noDataState: NoData + execErrState: Error + for: 5m + annotations: + summary: Proof failed + description: One or more proving jobs were rejected in {{ ` {{ $labels.k8s_namespace_name }} ` }}. + labels: + <<: *common_labels + isPaused: false diff --git a/spartan/metrics/grafana/dashboards/aztec_provers.json b/spartan/metrics/grafana/dashboards/aztec_provers.json index 37d2c8c6bb5f..03b957020b90 100644 --- a/spartan/metrics/grafana/dashboards/aztec_provers.json +++ b/spartan/metrics/grafana/dashboards/aztec_provers.json @@ -358,6 +358,19 @@ "legendFormat": "Failed jobs", "range": true, "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${data_source}" + }, + "editorMode": "code", + "expr": "sum(aztec_proving_queue_aborted_jobs_count{k8s_namespace_name=\"$namespace\"}) or vector(0)", + "hide": false, + "instant": false, + "legendFormat": "Aborted jobs", + "range": true, + "refId": "C" } ], "title": "Job status", diff --git a/spartan/terraform/deploy-aztec-infra/main.tf b/spartan/terraform/deploy-aztec-infra/main.tf index 14194ae4a5cf..b00d4f8447f6 100644 --- a/spartan/terraform/deploy-aztec-infra/main.tf +++ b/spartan/terraform/deploy-aztec-infra/main.tf @@ -113,11 +113,20 @@ locals { "global.testAccounts" = var.TEST_ACCOUNTS } + common_inline_values = yamlencode({ + global = merge( + length(var.L1_CONSENSUS_HOST_API_KEYS) > 0 ? { + l1ConsensusHostApiKeys = join(",", var.L1_CONSENSUS_HOST_API_KEYS) + } : {}, + length(var.L1_CONSENSUS_HOST_API_KEY_HEADERS) > 0 ? { + l1ConsensusHostApiKeyHeaders = join(",", var.L1_CONSENSUS_HOST_API_KEY_HEADERS) + } : {} + ) + }) + common_list_settings = { - "global.l1ExecutionUrls" = var.L1_RPC_URLS - "global.l1ConsensusUrls" = var.L1_CONSENSUS_HOST_URLS - "global.l1ConsensusHostApiKeys" = var.L1_CONSENSUS_HOST_API_KEYS - "global.l1ConsensusHostApiKeyHeaders" = var.L1_CONSENSUS_HOST_API_KEY_HEADERS + "global.l1ExecutionUrls" = var.L1_RPC_URLS + "global.l1ConsensusUrls" = var.L1_CONSENSUS_HOST_URLS } # Generate a set of _external_ host ports to use for P2P @@ -205,7 +214,7 @@ locals { "validator.node.env.SEQ_MAX_TX_PER_BLOCK" = var.SEQ_MAX_TX_PER_BLOCK "validator.node.env.SEQ_BLOCK_DURATION_MS" = var.SEQ_BLOCK_DURATION_MS "validator.node.env.SEQ_BUILD_CHECKPOINT_IF_EMPTY" = var.SEQ_BUILD_CHECKPOINT_IF_EMPTY - "validator.node.env.SEQ_ENFORCE_TIME_TABLE" = var.SEQ_ENFORCE_TIME_TABLE + "validator.node.env.SEQ_ENFORCE_TIME_TABLE" = var.SEQ_ENFORCE_TIME_TABLE "validator.node.env.P2P_TX_POOL_DELETE_TXS_AFTER_REORG" = var.P2P_TX_POOL_DELETE_TXS_AFTER_REORG "validator.node.env.L1_PRIORITY_FEE_BUMP_PERCENTAGE" = var.VALIDATOR_L1_PRIORITY_FEE_BUMP_PERCENTAGE "validator.node.env.L1_PRIORITY_FEE_RETRY_BUMP_PERCENTAGE" = var.VALIDATOR_L1_PRIORITY_FEE_RETRY_BUMP_PERCENTAGE @@ -424,7 +433,7 @@ locals { type = local.is_kind ? "ClusterIP" : "LoadBalancer" } } - })], var.FISHERMAN_MODE ? [yamlencode({ + })], var.FISHERMAN_MODE ? [yamlencode({ node = { logLevel = var.FISHERMAN_LOG_LEVEL } @@ -682,6 +691,7 @@ resource "helm_release" "releases" { values = concat( [for v in each.value.values : file("./values/${v}")], + [local.common_inline_values], lookup(each.value, "inline_values", []) ) diff --git a/yarn-project/aztec-node/src/aztec-node/server.ts b/yarn-project/aztec-node/src/aztec-node/server.ts index 96b30ee28dfa..99481ba2373e 100644 --- a/yarn-project/aztec-node/src/aztec-node/server.ts +++ b/yarn-project/aztec-node/src/aztec-node/server.ts @@ -20,7 +20,13 @@ import { MembershipWitness, SiblingPath } from '@aztec/foundation/trees'; import { type KeyStore, KeystoreManager, loadKeystores, mergeKeystores } from '@aztec/node-keystore'; import { trySnapshotSync, uploadSnapshot } from '@aztec/node-lib/actions'; import { createForwarderL1TxUtilsFromSigners, createL1TxUtilsFromSigners } from '@aztec/node-lib/factories'; -import { type P2P, type P2PClientDeps, createP2PClient, getDefaultAllowedSetupFunctions } from '@aztec/p2p'; +import { + type P2P, + type P2PClientDeps, + createP2PClient, + createTxValidatorForAcceptingTxsOverRPC, + getDefaultAllowedSetupFunctions, +} from '@aztec/p2p'; import { ProtocolContractAddress } from '@aztec/protocol-contracts'; import { type ProverNode, type ProverNodeDeps, createProverNode } from '@aztec/prover-node'; import { createKeyStoreForProver } from '@aztec/prover-node/config'; @@ -105,7 +111,6 @@ import { ValidatorClient, createBlockProposalHandler, createValidatorClient, - createValidatorForAcceptingTxs, } from '@aztec/validator-client'; import { createWorldStateSynchronizer } from '@aztec/world-state'; @@ -1292,7 +1297,7 @@ export class AztecNodeService implements AztecNode, AztecNodeAdmin, Traceable { // We accept transactions if they are not expired by the next slot (checked based on the ExpirationTimestamp field) const { ts: nextSlotTimestamp } = this.epochCache.getEpochAndSlotInNextL1Slot(); const blockNumber = BlockNumber((await this.blockSource.getBlockNumber()) + 1); - const validator = createValidatorForAcceptingTxs( + const validator = createTxValidatorForAcceptingTxsOverRPC( db, this.contractDataSource, verifier, diff --git a/yarn-project/bootstrap.sh b/yarn-project/bootstrap.sh index a7992576d9e5..fdf24a891720 100755 --- a/yarn-project/bootstrap.sh +++ b/yarn-project/bootstrap.sh @@ -247,6 +247,7 @@ function bench_cmds { echo "$hash BENCH_OUTPUT=bench-out/tx.bench.json yarn-project/scripts/run_test.sh stdlib/src/tx/tx_bench.test.ts" echo "$hash:ISOLATE=1:CPUS=10:MEM=16g:LOG_LEVEL=silent BENCH_OUTPUT=bench-out/proving_broker.bench.json yarn-project/scripts/run_test.sh prover-client/src/test/proving_broker_testbench.test.ts" echo "$hash:ISOLATE=1:CPUS=16:MEM=16g BENCH_OUTPUT=bench-out/avm_bulk_test.bench.json yarn-project/scripts/run_test.sh bb-prover/src/avm_proving_tests/avm_bulk.test.ts" + echo "$hash BENCH_OUTPUT=bench-out/lightweight_checkpoint_builder.bench.json yarn-project/scripts/run_test.sh prover-client/src/light/lightweight_checkpoint_builder.bench.test.ts" } function release_packages { diff --git a/yarn-project/end-to-end/src/e2e_p2p/gossip_network.test.ts b/yarn-project/end-to-end/src/e2e_p2p/gossip_network.test.ts index ff561ecfff69..2872ae1ab0c9 100644 --- a/yarn-project/end-to-end/src/e2e_p2p/gossip_network.test.ts +++ b/yarn-project/end-to-end/src/e2e_p2p/gossip_network.test.ts @@ -30,6 +30,8 @@ const CHECK_ALERTS = process.env.CHECK_ALERTS === 'true'; const NUM_VALIDATORS = 4; const NUM_TXS_PER_NODE = 2; const BOOT_NODE_UDP_PORT = 4500; +const AZTEC_SLOT_DURATION = 36; +const AZTEC_EPOCH_DURATION = 4; const DATA_DIR = fs.mkdtempSync(path.join(os.tmpdir(), 'gossip-')); @@ -61,8 +63,8 @@ describe('e2e_p2p_network', () => { startProverNode: false, // we'll start our own using p2p initialConfig: { ...SHORTENED_BLOCK_TIME_CONFIG_NO_PRUNES, - aztecSlotDuration: 36, - aztecEpochDuration: 4, + aztecSlotDuration: AZTEC_SLOT_DURATION, + aztecEpochDuration: AZTEC_EPOCH_DURATION, slashingRoundSizeInEpochs: 2, slashingQuorum: 5, listenAddress: '127.0.0.1', @@ -211,7 +213,7 @@ describe('e2e_p2p_network', () => { return provenBlock > 0; }, 'proven block', - 120, + SHORTENED_BLOCK_TIME_CONFIG_NO_PRUNES.aztecProofSubmissionWindow * AZTEC_EPOCH_DURATION * AZTEC_SLOT_DURATION, ); }); }); diff --git a/yarn-project/end-to-end/src/fixtures/elu_monitor.ts b/yarn-project/end-to-end/src/fixtures/elu_monitor.ts new file mode 100644 index 000000000000..a25cd3b145be --- /dev/null +++ b/yarn-project/end-to-end/src/fixtures/elu_monitor.ts @@ -0,0 +1,126 @@ +import { appendFileSync } from 'node:fs'; +import { type EventLoopUtilization, type IntervalHistogram, monitorEventLoopDelay, performance } from 'node:perf_hooks'; + +const NANOS_PER_MS = 1_000_000; + +/** Samples event-loop utilization, delay histogram, and heap usage per test, writing columnar text to a file. */ +export class EluMonitor { + private filePath: string; + private intervalMs: number; + private timer: ReturnType | undefined; + private lastELU: EventLoopUtilization | undefined; + private histogram: IntervalHistogram; + private testName: string | undefined; + private testStart: number | undefined; + private eluSamples: number[] = []; + + constructor(filePath: string, intervalMs?: number) { + this.filePath = filePath; + this.intervalMs = intervalMs ?? 2000; + this.histogram = monitorEventLoopDelay({ resolution: 20 }); + } + + /** Begin sampling for a test. Writes a header line and starts the periodic sampler. */ + startTest(testName: string): void { + this.stopTest(); + + this.testName = testName; + this.testStart = performance.now(); + this.eluSamples = []; + + appendFileSync(this.filePath, `\n=== Test: ${testName} ===\n`); + appendFileSync( + this.filePath, + padColumns('TIME', 'ELU', 'EL_DLY_P50', 'EL_DLY_P99', 'EL_DLY_MAX', 'HEAP_MB') + '\n', + ); + + this.lastELU = performance.eventLoopUtilization(); + this.histogram.enable(); + + this.timer = setInterval(() => this.sample(), this.intervalMs); + // Allow the process to exit even if the timer is still running. + this.timer.unref(); + } + + /** Stop sampling and write a summary line. */ + stopTest(): void { + if (!this.timer) { + return; + } + + // Take a final sample before stopping. + this.sample(); + + clearInterval(this.timer); + this.timer = undefined; + this.histogram.disable(); + this.histogram.reset(); + + this.writeSummary(); + + this.lastELU = undefined; + this.testName = undefined; + this.testStart = undefined; + this.eluSamples = []; + } + + /** Alias for stopTest — call on process exit to flush any remaining data. */ + stop(): void { + this.stopTest(); + } + + private sample(): void { + const newELU = performance.eventLoopUtilization(); + const delta = performance.eventLoopUtilization(newELU, this.lastELU); + this.lastELU = newELU; + + const elu = delta.utilization; + this.eluSamples.push(elu); + + const p50 = this.histogram.percentile(50) / NANOS_PER_MS; + const p99 = this.histogram.percentile(99) / NANOS_PER_MS; + const max = this.histogram.max / NANOS_PER_MS; + const heapMb = Math.round(process.memoryUsage().heapUsed / (1024 * 1024)); + + const now = new Date(); + const time = [now.getHours(), now.getMinutes(), now.getSeconds()].map(n => String(n).padStart(2, '0')).join(':'); + + const line = padColumns( + time, + elu.toFixed(2), + `${p50.toFixed(1)}ms`, + `${p99.toFixed(1)}ms`, + `${max.toFixed(1)}ms`, + String(heapMb), + ); + appendFileSync(this.filePath, line + '\n'); + + // Reset histogram so next sample only reflects the new interval. + this.histogram.reset(); + } + + private writeSummary(): void { + if (this.eluSamples.length === 0 || this.testStart === undefined) { + return; + } + + const mean = this.eluSamples.reduce((a, b) => a + b, 0) / this.eluSamples.length; + const maxElu = Math.max(...this.eluSamples); + const sorted = [...this.eluSamples].sort((a, b) => a - b); + const p90Elu = sorted[Math.floor(sorted.length * 0.9)] ?? maxElu; + const durationS = ((performance.now() - this.testStart) / 1000).toFixed(1); + + let summary = `--- Summary: mean_elu=${mean.toFixed(2)} max_elu=${maxElu.toFixed(2)} p90_elu=${p90Elu.toFixed(2)} duration=${durationS}s`; + if (maxElu > 0.85) { + summary += ' WARNING:ELU>0.85'; + } + summary += ' ---\n'; + + appendFileSync(this.filePath, summary); + } +} + +function padColumns(...cols: string[]): string { + const widths = [11, 7, 12, 12, 12, 8]; + return cols.map((col, i) => col.padEnd(widths[i] ?? 10)).join(''); +} diff --git a/yarn-project/end-to-end/src/shared/jest_setup.ts b/yarn-project/end-to-end/src/shared/jest_setup.ts index 6d06fbdbacfa..698c01b1e8cb 100644 --- a/yarn-project/end-to-end/src/shared/jest_setup.ts +++ b/yarn-project/end-to-end/src/shared/jest_setup.ts @@ -1,8 +1,18 @@ import { createLogger } from '@aztec/aztec.js/log'; -import { beforeEach, expect } from '@jest/globals'; +import { afterEach, beforeEach, expect } from '@jest/globals'; import { basename } from 'path'; +import { EluMonitor } from '../fixtures/elu_monitor.js'; + +const eluMonitor = process.env.ELU_MONITOR_FILE + ? new EluMonitor(process.env.ELU_MONITOR_FILE, Number(process.env.ELU_MONITOR_INTERVAL_MS) || undefined) + : undefined; + +if (eluMonitor) { + process.on('exit', () => eluMonitor.stop()); +} + beforeEach(() => { const { testPath, currentTestName } = expect.getState(); if (!testPath || !currentTestName) { @@ -10,4 +20,9 @@ beforeEach(() => { } const logger = createLogger(`e2e:${basename(testPath).replace('.test.ts', '')}`); logger.info(`Running test: ${currentTestName}`); + eluMonitor?.startTest(currentTestName); +}); + +afterEach(() => { + eluMonitor?.stopTest(); }); diff --git a/yarn-project/end-to-end/src/test-wallet/worker_wallet.ts b/yarn-project/end-to-end/src/test-wallet/worker_wallet.ts index ca4cd97644c1..3857b3d2fe95 100644 --- a/yarn-project/end-to-end/src/test-wallet/worker_wallet.ts +++ b/yarn-project/end-to-end/src/test-wallet/worker_wallet.ts @@ -70,7 +70,7 @@ export class WorkerWallet implements Wallet { env: { ...parentEnv, ...(process.stderr.isTTY || process.env.FORCE_COLOR ? { FORCE_COLOR: '1' } : {}), - LOG_LEVEL: process.env.WORKER_LOG_LEVEL ?? 'warning', + LOG_LEVEL: process.env.WORKER_LOG_LEVEL ?? 'warn', }, }); diff --git a/yarn-project/foundation/src/config/env_var.ts b/yarn-project/foundation/src/config/env_var.ts index 37c888fcf253..c7bdfed39b44 100644 --- a/yarn-project/foundation/src/config/env_var.ts +++ b/yarn-project/foundation/src/config/env_var.ts @@ -82,6 +82,7 @@ export type EnvVar = | 'L1_CONSENSUS_HOST_URLS' | 'L1_CONSENSUS_HOST_API_KEYS' | 'L1_CONSENSUS_HOST_API_KEY_HEADERS' + | 'L1_TX_FAILED_STORE' | 'LOG_JSON' | 'LOG_MULTILINE' | 'LOG_NO_COLOR_PER_ACTOR' diff --git a/yarn-project/node-lib/src/actions/snapshot-sync.ts b/yarn-project/node-lib/src/actions/snapshot-sync.ts index 6d7274b3c2b1..d60760bf6aa1 100644 --- a/yarn-project/node-lib/src/actions/snapshot-sync.ts +++ b/yarn-project/node-lib/src/actions/snapshot-sync.ts @@ -82,7 +82,11 @@ export async function trySnapshotSync(config: SnapshotSyncConfig, log: Logger) { } const currentL1BlockNumber = await getPublicClient(config).getBlockNumber(); - if (archiverL1BlockNumber && currentL1BlockNumber - archiverL1BlockNumber < minL1BlocksToTriggerReplace) { + if ( + archiverL1BlockNumber && + currentL1BlockNumber >= archiverL1BlockNumber && + currentL1BlockNumber - archiverL1BlockNumber < minL1BlocksToTriggerReplace + ) { log.verbose( `Skipping snapshot sync as archiver is less than ${ currentL1BlockNumber - archiverL1BlockNumber diff --git a/yarn-project/p2p/src/client/factory.ts b/yarn-project/p2p/src/client/factory.ts index 805500ee22c1..497eb08d6a39 100644 --- a/yarn-project/p2p/src/client/factory.ts +++ b/yarn-project/p2p/src/client/factory.ts @@ -1,15 +1,15 @@ import type { EpochCacheInterface } from '@aztec/epoch-cache'; +import { BlockNumber } from '@aztec/foundation/branded-types'; import { type Logger, createLogger } from '@aztec/foundation/log'; import { DateProvider } from '@aztec/foundation/timer'; import type { AztecAsyncKVStore } from '@aztec/kv-store'; import type { DataStoreConfig } from '@aztec/kv-store/config'; import { AztecLMDBStoreV2, createStore } from '@aztec/kv-store/lmdb-v2'; -import type { BlockHash, L2BlockSource } from '@aztec/stdlib/block'; +import type { L2BlockSource } from '@aztec/stdlib/block'; import type { ChainConfig } from '@aztec/stdlib/config'; import type { ContractDataSource } from '@aztec/stdlib/contract'; import type { AztecNode, ClientProtocolCircuitVerifier, WorldStateSynchronizer } from '@aztec/stdlib/interfaces/server'; import { P2PClientType } from '@aztec/stdlib/p2p'; -import { MerkleTreeId } from '@aztec/stdlib/trees'; import { type TelemetryClient, getTelemetryClient } from '@aztec/telemetry-client'; import { P2PClient } from '../client/p2p_client.js'; @@ -17,11 +17,8 @@ import type { P2PConfig } from '../config.js'; import { AttestationPool, type AttestationPoolApi } from '../mem_pools/attestation_pool/attestation_pool.js'; import type { MemPools } from '../mem_pools/interface.js'; import type { TxPoolV2 } from '../mem_pools/tx_pool_v2/interfaces.js'; -import type { TxMetaData } from '../mem_pools/tx_pool_v2/tx_metadata.js'; import { AztecKVTxPoolV2 } from '../mem_pools/tx_pool_v2/tx_pool_v2.js'; -import { AggregateTxValidator } from '../msg_validators/tx_validator/aggregate_tx_validator.js'; -import { BlockHeaderTxValidator } from '../msg_validators/tx_validator/block_header_validator.js'; -import { DoubleSpendTxValidator } from '../msg_validators/tx_validator/double_spend_validator.js'; +import { createTxValidatorForTransactionsEnteringPendingTxPool } from '../msg_validators/index.js'; import { DummyP2PService } from '../services/dummy_service.js'; import { LibP2PService } from '../services/index.js'; import { createFileStoreTxSources } from '../services/tx_collection/file_store_tx_source.js'; @@ -80,32 +77,6 @@ export async function createP2PClient( const rollupAddress = inputConfig.l1Contracts.rollupAddress.toString().toLowerCase().replace(/^0x/, ''); const txFileStoreBasePath = `aztec-${inputConfig.l1ChainId}-${inputConfig.rollupVersion}-0x${rollupAddress}`; - /** Validator factory for pool re-validation (double-spend + block header only). */ - const createPoolTxValidator = async () => { - await worldStateSynchronizer.syncImmediate(); - return new AggregateTxValidator( - new DoubleSpendTxValidator( - { - nullifiersExist: async (nullifiers: Buffer[]) => { - const merkleTree = worldStateSynchronizer.getCommitted(); - const indices = await merkleTree.findLeafIndices(MerkleTreeId.NULLIFIER_TREE, nullifiers); - return indices.map(index => index !== undefined); - }, - }, - bindings, - ), - new BlockHeaderTxValidator( - { - getArchiveIndices: (archives: BlockHash[]) => { - const merkleTree = worldStateSynchronizer.getCommitted(); - return merkleTree.findLeafIndices(MerkleTreeId.ARCHIVE, archives); - }, - }, - bindings, - ), - ); - }; - const txPool = deps.txPool ?? new AztecKVTxPoolV2( @@ -114,7 +85,16 @@ export async function createP2PClient( { l2BlockSource: archiver, worldStateSynchronizer, - createTxValidator: createPoolTxValidator, + createTxValidator: async () => { + // We accept transactions if they are not expired by the next slot and block number (checked based on the ExpirationTimestamp field) + const currentBlockNumber = await archiver.getBlockNumber(); + const { ts: nextSlotTimestamp } = epochCache.getEpochAndSlotInNextL1Slot(); + return createTxValidatorForTransactionsEnteringPendingTxPool( + worldStateSynchronizer, + nextSlotTimestamp, + BlockNumber(currentBlockNumber + 1), + ); + }, }, telemetry, { diff --git a/yarn-project/p2p/src/client/interface.ts b/yarn-project/p2p/src/client/interface.ts index 6199b7158fe0..220802ee1b85 100644 --- a/yarn-project/p2p/src/client/interface.ts +++ b/yarn-project/p2p/src/client/interface.ts @@ -140,14 +140,6 @@ export type P2P = P2PApiFull & */ hasTxsInPool(txHashes: TxHash[]): Promise; - /** - * Returns transactions in the transaction pool by hash, requesting from the network if not found. - * @param txHashes - Hashes of tx to return. - * @param pinnedPeerId - An optional peer id that will be used to request the tx from (in addition to other random peers). - * @returns An array of tx or undefined. - */ - getTxsByHash(txHashes: TxHash[], pinnedPeerId: PeerId | undefined): Promise<(Tx | undefined)[]>; - /** * Returns an archived transaction from the transaction pool by its hash. * @param txHash - Hash of tx to return. @@ -224,8 +216,8 @@ export type P2P = P2PApiFull & updateP2PConfig(config: Partial): Promise; - /** Validates a set of txs. */ - validate(txs: Tx[]): Promise; + /** Validates a set of txs received in a block proposal. */ + validateTxsReceivedInBlockProposal(txs: Tx[]): Promise; /** Clears the db. */ clear(): Promise; diff --git a/yarn-project/p2p/src/client/p2p_client.test.ts b/yarn-project/p2p/src/client/p2p_client.test.ts index 064e5a29d4b5..9dc1c59982f4 100644 --- a/yarn-project/p2p/src/client/p2p_client.test.ts +++ b/yarn-project/p2p/src/client/p2p_client.test.ts @@ -9,7 +9,7 @@ import { L2Block } from '@aztec/stdlib/block'; import { EmptyL1RollupConstants, type L1RollupConstants } from '@aztec/stdlib/epoch-helpers'; import { P2PClientType } from '@aztec/stdlib/p2p'; import { mockTx } from '@aztec/stdlib/testing'; -import { TxArray, TxHash, TxHashArray } from '@aztec/stdlib/tx'; +import { TxHash } from '@aztec/stdlib/tx'; import { expect, jest } from '@jest/globals'; import { type MockProxy, mock } from 'jest-mock-extended'; @@ -19,7 +19,6 @@ import type { P2PService } from '../index.js'; import { type AttestationPool, createTestAttestationPool } from '../mem_pools/attestation_pool/attestation_pool.js'; import type { MemPools } from '../mem_pools/interface.js'; import type { TxPoolV2 } from '../mem_pools/tx_pool_v2/interfaces.js'; -import { ReqRespSubProtocol } from '../services/reqresp/interface.js'; import type { TxCollection } from '../services/tx_collection/tx_collection.js'; import { P2PClient } from './p2p_client.js'; @@ -198,82 +197,6 @@ describe('P2P Client', () => { await client.stop(); }); - it('request transactions handles missing items', async () => { - const mockTx1 = await mockTx(); - const mockTx2 = await mockTx(); - const mockTx3 = await mockTx(); - - // None of the txs are in the pool - txPool.getTxByHash.mockResolvedValue(undefined); - - // P2P service will not return tx2 - p2pService.sendBatchRequest.mockResolvedValue([new TxArray(...[mockTx1, mockTx3])]); - - // Spy on the tx pool addPendingTxs method, it should not be called for the missing tx - const addTxsSpy = jest.spyOn(txPool, 'addPendingTxs'); - - await client.start(); - - // We query for all 3 txs via getTxsByHash which internally requests from the network - const txHashes = await Promise.all([mockTx1.getTxHash(), mockTx2.getTxHash(), mockTx3.getTxHash()]); - const results = await client.getTxsByHash(txHashes, undefined); - - // We should receive the found transactions (tx2 will be undefined) - expect(results).toEqual([mockTx1, undefined, mockTx3]); - - // P2P should have been called with the 3 tx hashes (all missing from pool) - expect(p2pService.sendBatchRequest).toHaveBeenCalledWith( - ReqRespSubProtocol.TX, - txHashes.map(hash => new TxHashArray(...[hash])), - undefined, - expect.anything(), - expect.anything(), - expect.anything(), - ); - - // Retrieved txs should have been added to the pool - expect(addTxsSpy).toHaveBeenCalledTimes(1); - expect(addTxsSpy).toHaveBeenCalledWith([mockTx1, mockTx3]); - - await client.stop(); - }); - - it('getTxsByHash handles missing items', async () => { - // We expect the node to fetch this item from their local p2p pool - const txInMempool = await mockTx(); - // We expect this transaction to be requested from the network - const txToBeRequested = await mockTx(); - // We expect this transaction to not be found - const txToNotBeFound = await mockTx(); - - txPool.getTxByHash.mockImplementation(txHash => - Promise.resolve(txHash === txInMempool.getTxHash() ? txInMempool : undefined), - ); - - const addTxsSpy = jest.spyOn(txPool, 'addPendingTxs'); - - p2pService.sendBatchRequest.mockResolvedValue([new TxArray(...[txToBeRequested])]); - - await client.start(); - - const query = await Promise.all([txInMempool.getTxHash(), txToBeRequested.getTxHash(), txToNotBeFound.getTxHash()]); - const results = await client.getTxsByHash(query, undefined); - - // We should return the resolved transactions (txToNotBeFound is undefined) - expect(results).toEqual([txInMempool, txToBeRequested, undefined]); - // We should add the found requested transactions to the pool - expect(addTxsSpy).toHaveBeenCalledWith([txToBeRequested]); - // The p2p service should have been called to request the missing txs - expect(p2pService.sendBatchRequest).toHaveBeenCalledWith( - ReqRespSubProtocol.TX, - expect.anything(), - undefined, - expect.anything(), - expect.anything(), - expect.anything(), - ); - }); - it('getPendingTxs respects pagination', async () => { const txs = await timesAsync(20, i => mockTx(i)); txPool.getPendingTxHashes.mockResolvedValue(await Promise.all(txs.map(tx => tx.getTxHash()))); diff --git a/yarn-project/p2p/src/client/p2p_client.ts b/yarn-project/p2p/src/client/p2p_client.ts index 2e164bd80361..0f6941d213e2 100644 --- a/yarn-project/p2p/src/client/p2p_client.ts +++ b/yarn-project/p2p/src/client/p2p_client.ts @@ -44,7 +44,6 @@ import { type ReqRespSubProtocolHandler, type ReqRespSubProtocolValidators, } from '../services/reqresp/interface.js'; -import { chunkTxHashesRequest } from '../services/reqresp/protocols/tx.js'; import type { DuplicateAttestationInfo, DuplicateProposalInfo, @@ -433,36 +432,6 @@ export class P2PClient this.p2pService.registerDuplicateAttestationCallback(callback); } - /** - * Uses the batched Request Response protocol to request a set of transactions from the network. - */ - private async requestTxsByHash(txHashes: TxHash[], pinnedPeerId: PeerId | undefined): Promise { - const timeoutMs = 8000; // Longer timeout for now - const maxRetryAttempts = 10; // Keep retrying within the timeout - const requests = chunkTxHashesRequest(txHashes); - const maxPeers = Math.min(Math.ceil(requests.length / 3), 10); - - const txBatches = await this.p2pService.sendBatchRequest( - ReqRespSubProtocol.TX, - requests, - pinnedPeerId, - timeoutMs, - maxPeers, - maxRetryAttempts, - ); - - const txs = txBatches.flat(); - if (txs.length > 0) { - await this.txPool.addPendingTxs(txs); - } - - const txHashesStr = txHashes.map(tx => tx.toString()).join(', '); - this.log.debug(`Requested txs ${txHashesStr} (${txs.length} / ${txHashes.length}) from peers`); - - // We return all transactions, even the not found ones to the caller, such they can handle missing items themselves. - return txs; - } - public async getPendingTxs(limit?: number, after?: TxHash): Promise { if (limit !== undefined && limit <= 0) { throw new TypeError('limit must be greater than 0'); @@ -530,49 +499,6 @@ export class P2PClient return this.txPool.hasTxs(txHashes); } - /** - * Returns transactions in the transaction pool by hash. - * If a transaction is not in the pool, it will be requested from the network. - * @param txHashes - Hashes of the transactions to look for. - * @returns The txs found, or undefined if not found in the order requested. - */ - async getTxsByHash(txHashes: TxHash[], pinnedPeerId: PeerId | undefined): Promise<(Tx | undefined)[]> { - const txs = await Promise.all(txHashes.map(txHash => this.txPool.getTxByHash(txHash))); - const missingTxHashes = txs - .map((tx, index) => [tx, index] as const) - .filter(([tx, _index]) => !tx) - .map(([_tx, index]) => txHashes[index]); - - if (missingTxHashes.length === 0) { - return txs as Tx[]; - } - - const missingTxs = await this.requestTxsByHash(missingTxHashes, pinnedPeerId); - // TODO: optimize - // Merge the found txs in order - const mergingTxs = txHashes.map(txHash => { - // Is it in the txs list from the mempool? - for (const tx of txs) { - if (tx !== undefined && tx.getTxHash().equals(txHash)) { - return tx; - } - } - - // Is it in the fetched missing txs? - // Note: this is an O(n^2) operation, but we expect the number of missing txs to be small. - for (const tx of missingTxs) { - if (tx.getTxHash().equals(txHash)) { - return tx; - } - } - - // Otherwise return undefined - return undefined; - }); - - return mergingTxs; - } - /** * Returns an archived transaction in the transaction pool by its hash. * @param txHash - Hash of the archived transaction to look for. @@ -839,8 +765,8 @@ export class P2PClient this.log.debug(`Moved from state ${P2PClientState[oldState]} to ${P2PClientState[this.currentState]}`); } - public validate(txs: Tx[]): Promise { - return this.p2pService.validate(txs); + public validateTxsReceivedInBlockProposal(txs: Tx[]): Promise { + return this.p2pService.validateTxsReceivedInBlockProposal(txs); } /** diff --git a/yarn-project/p2p/src/client/test/p2p_client.integration_txs.test.ts b/yarn-project/p2p/src/client/test/p2p_client.integration_txs.test.ts deleted file mode 100644 index 5cacb81e4b97..000000000000 --- a/yarn-project/p2p/src/client/test/p2p_client.integration_txs.test.ts +++ /dev/null @@ -1,354 +0,0 @@ -import type { EpochCache } from '@aztec/epoch-cache'; -import { BlockNumber } from '@aztec/foundation/branded-types'; -import { times } from '@aztec/foundation/collection'; -import { type Logger, createLogger } from '@aztec/foundation/log'; -import { sleep } from '@aztec/foundation/sleep'; -import { emptyChainConfig } from '@aztec/stdlib/config'; -import type { WorldStateSynchronizer } from '@aztec/stdlib/interfaces/server'; -import { PeerErrorSeverity } from '@aztec/stdlib/p2p'; -import { Tx, TxHash } from '@aztec/stdlib/tx'; - -import { describe, expect, it, jest } from '@jest/globals'; -import { type MockProxy, mock } from 'jest-mock-extended'; - -import type { P2PClient } from '../../client/p2p_client.js'; -import { type P2PConfig, getP2PDefaultConfig } from '../../config.js'; -import type { AttestationPool } from '../../mem_pools/attestation_pool/attestation_pool.js'; -import type { TxPoolV2 } from '../../mem_pools/tx_pool_v2/interfaces.js'; -import { ReqRespSubProtocol } from '../../services/reqresp/interface.js'; -import { chunkTxHashesRequest } from '../../services/reqresp/protocols/tx.js'; -import { makeAndStartTestP2PClients } from '../../test-helpers/make-test-p2p-clients.js'; -import { createMockTxWithMetadata } from '../../test-helpers/mock-tx-helpers.js'; - -/** Calls the protected requestTxsByHash method on a P2PClient for testing. */ -const requestTxsByHash = (client: P2PClient, txHashes: TxHash[], pinnedPeerId?: unknown): Promise => - ( - client as unknown as { requestTxsByHash(txHashes: TxHash[], pinnedPeerId: unknown): Promise } - ).requestTxsByHash(txHashes, pinnedPeerId); - -const TEST_TIMEOUT = 120000; -jest.setTimeout(TEST_TIMEOUT); - -const NUMBER_OF_PEERS = 2; - -describe('p2p client integration', () => { - let txPool: MockProxy; - let attestationPool: MockProxy; - let epochCache: MockProxy; - let worldState: MockProxy; - - let logger: Logger; - let p2pBaseConfig: P2PConfig; - - let clients: P2PClient[] = []; - - beforeEach(() => { - clients = []; - txPool = mock(); - attestationPool = mock(); - epochCache = mock(); - worldState = mock(); - - logger = createLogger('p2p:test:integration'); - p2pBaseConfig = { ...emptyChainConfig, ...getP2PDefaultConfig() }; - - //@ts-expect-error - we want to mock the getEpochAndSlotInNextL1Slot method, mocking ts is enough - epochCache.getEpochAndSlotInNextL1Slot.mockReturnValue({ ts: BigInt(0) }); - epochCache.getRegisteredValidators.mockResolvedValue([]); - epochCache.getL1Constants.mockReturnValue({ - l1StartBlock: 0n, - l1GenesisTime: 0n, - slotDuration: 24, - epochDuration: 16, - ethereumSlotDuration: 12, - proofSubmissionEpochs: 2, - targetCommitteeSize: 48, - }); - - txPool.isEmpty.mockResolvedValue(true); - txPool.hasTxs.mockResolvedValue([]); - txPool.addPendingTxs.mockResolvedValue({ accepted: [], ignored: [], rejected: [] }); - txPool.getTxsByHash.mockImplementation(() => { - return Promise.resolve([] as Tx[]); - }); - - attestationPool.isEmpty.mockResolvedValue(true); - - worldState.status.mockResolvedValue({ - state: mock(), - syncSummary: { - latestBlockNumber: BlockNumber.ZERO, - latestBlockHash: '', - finalizedBlockNumber: BlockNumber.ZERO, - treesAreSynched: false, - oldestHistoricBlockNumber: BlockNumber.ZERO, - }, - }); - logger.info(`Starting test ${expect.getState().currentTestName}`); - }); - - afterEach(async () => { - logger.info(`Tearing down state for ${expect.getState().currentTestName}`); - await shutdown(clients); - logger.info('Shut down p2p clients'); - - jest.restoreAllMocks(); - jest.resetAllMocks(); - jest.clearAllMocks(); - - clients = []; - }); - - // Shutdown all test clients - const shutdown = async (clients: P2PClient[]) => { - await Promise.all(clients.map(client => client.stop())); - await sleep(1000); - }; - - it('returns an empty array if unable to find a transaction from another peer', async () => { - // We want to create a set of nodes and request transaction from them - // Not using a before each as a the wind down is not working as expected - const config = { ...emptyChainConfig, ...getP2PDefaultConfig() }; - clients = ( - await makeAndStartTestP2PClients(NUMBER_OF_PEERS, { - p2pBaseConfig: config, - mockAttestationPool: attestationPool, - mockTxPool: txPool, - mockEpochCache: epochCache, - mockWorldState: worldState, - logger, - }) - ).map(x => x.client); - const [client1] = clients; - - await sleep(2000); - logger.info(`Finished waiting for clients to connect`); - - // Perform a get tx request from client 1 - const tx = await createMockTxWithMetadata(config); - const txHash = tx.getTxHash(); - - const requestedTxs = await requestTxsByHash(client1, [txHash], undefined); - expect(requestedTxs).toEqual([]); - }); - - it('can request a transaction from another peer', async () => { - // We want to create a set of nodes and request transaction from them - clients = ( - await makeAndStartTestP2PClients(NUMBER_OF_PEERS, { - p2pBaseConfig, - mockAttestationPool: attestationPool, - mockTxPool: txPool, - mockEpochCache: epochCache, - mockWorldState: worldState, - logger, - }) - ).map(x => x.client); - const [client1] = clients; - - // Give the nodes time to discover each other - await sleep(6000); - logger.info(`Finished waiting for clients to connect`); - - // Perform a get tx request from client 1 - const tx = await createMockTxWithMetadata(p2pBaseConfig); - const txHash = tx.getTxHash(); - // Mock the tx pool to return the tx we are looking for - txPool.getTxByHash.mockImplementationOnce(() => Promise.resolve(tx)); - - const requestedTxs = await requestTxsByHash(client1, [txHash], undefined); - expect(requestedTxs).toHaveLength(1); - const requestedTx = requestedTxs[0]; - - // Expect the tx to be the returned tx to be the same as the one we mocked - expect(requestedTx?.toBuffer()).toStrictEqual(tx.toBuffer()); - }); - - it('request batches of txs from another peer', async () => { - const txToRequestCount = 8; - const txBatchSize = 1; - - // We want to create a set of nodes and request transaction from them - clients = ( - await makeAndStartTestP2PClients(3, { - p2pBaseConfig, - mockAttestationPool: attestationPool, - mockTxPool: txPool, - mockEpochCache: epochCache, - mockWorldState: worldState, - logger, - }) - ).map(x => x.client); - const [client1] = clients; - - // Give the nodes time to discover each other - await sleep(6000); - logger.info(`Finished waiting for clients to connect`); - - // Perform a get tx request from client 1 - const txs = await Promise.all(times(txToRequestCount, i => createMockTxWithMetadata(p2pBaseConfig, i))); - const txHashes = await Promise.all(txs.map(tx => tx.getTxHash())); - - // Mock the tx pool to return the tx we are looking for - txPool.getTxByHash.mockImplementation((hash: TxHash) => Promise.resolve(txs.find(t => t.txHash?.equals(hash)))); - //@ts-expect-error - we want to spy on the sendBatchRequest method - const sendBatchSpy = jest.spyOn(client1.p2pService, 'sendBatchRequest'); - //@ts-expect-error - we want to spy on the sendRequestToPeer method - const sendRequestToPeerSpy = jest.spyOn(client1.p2pService.reqresp, 'sendRequestToPeer'); - - const resultingTxs = await requestTxsByHash(client1, txHashes, undefined); - expect(resultingTxs).toHaveLength(txs.length); - - // Expect the tx to be the returned tx to be the same as the one we mocked - resultingTxs.forEach((requestedTx: Tx, i: number) => { - expect(requestedTx.toBuffer()).toStrictEqual(txs[i].toBuffer()); - }); - - const request = chunkTxHashesRequest(txHashes, txBatchSize); - expect(request).toHaveLength(Math.ceil(txToRequestCount / txBatchSize)); - - expect(sendBatchSpy).toHaveBeenCalledWith( - ReqRespSubProtocol.TX, - request, - undefined, // pinnedPeer - expect.anything(), // timeoutMs - expect.anything(), // maxPeers - expect.anything(), // maxRetryAttempts - ); - - expect(sendRequestToPeerSpy).toHaveBeenCalledTimes(request.length); - }); - - it('request batches of txs from another peers', async () => { - const txToRequestCount = 8; - const txBatchSize = 1; - - // We want to create a set of nodes and request transaction from them - clients = ( - await makeAndStartTestP2PClients(3, { - p2pBaseConfig, - mockAttestationPool: attestationPool, - mockTxPool: txPool, - mockEpochCache: epochCache, - mockWorldState: worldState, - logger, - }) - ).map(x => x.client); - const [client1] = clients; - - // Give the nodes time to discover each other - await sleep(6000); - logger.info(`Finished waiting for clients to connect`); - - // Perform a get tx request from client 1 - const txs = await Promise.all(times(txToRequestCount, i => createMockTxWithMetadata(p2pBaseConfig, i))); - const txHashes = await Promise.all(txs.map(tx => tx.getTxHash())); - - // Mock the tx pool to return every other tx we are looking for - txPool.getTxByHash.mockImplementation((hash: TxHash) => { - const idx = txs.findIndex(t => t.txHash.equals(hash)); - return idx % 2 === 0 ? Promise.resolve(txs[idx]) : Promise.resolve(undefined); - }); - //@ts-expect-error - we want to spy on the sendBatchRequest method - const sendBatchSpy = jest.spyOn(client1.p2pService, 'sendBatchRequest'); - //@ts-expect-error - we want to spy on the sendRequestToPeer method - const sendRequestToPeerSpy = jest.spyOn(client1.p2pService.reqresp, 'sendRequestToPeer'); - - const resultingTxs = await requestTxsByHash(client1, txHashes, undefined); - expect(resultingTxs).toHaveLength(txs.length / 2); - - // Expect the tx to be the returned tx to be the same as the one we mocked - // Note we have only returned the half of the txs, so we expect the resulting txs to be every other tx - resultingTxs.forEach((requestedTx: Tx, i: number) => { - expect(requestedTx.toBuffer()).toStrictEqual(txs[2 * i].toBuffer()); - }); - - const request = chunkTxHashesRequest(txHashes, txBatchSize); - expect(request).toHaveLength(Math.ceil(txToRequestCount / txBatchSize)); - expect(request[0]).toHaveLength(txBatchSize); - - expect(sendBatchSpy).toHaveBeenCalledWith( - ReqRespSubProtocol.TX, - request, - undefined, // pinnedPeer - expect.anything(), // timeoutMs - expect.anything(), // maxPeers - expect.anything(), // maxRetryAttempts - ); - - expect(sendRequestToPeerSpy.mock.calls.length).toBeGreaterThanOrEqual(request.length); - }); - - it('will penalize peers that send invalid proofs', async () => { - // We want to create a set of nodes and request transaction from them - clients = ( - await makeAndStartTestP2PClients(NUMBER_OF_PEERS, { - p2pBaseConfig, - mockAttestationPool: attestationPool, - mockTxPool: txPool, - mockEpochCache: epochCache, - mockWorldState: worldState, - alwaysTrueVerifier: false, - logger, - }) - ).map(x => x.client); - const [client1, client2] = clients; - const client2PeerId = await client2.getEnr()!.peerId(); - - // Give the nodes time to discover each other - await sleep(6000); - logger.info(`Finished waiting for clients to connect`); - - const penalizePeerSpy = jest.spyOn((client1 as any).p2pService.peerManager, 'penalizePeer'); - - // Perform a get tx request from client 1 - const tx = await createMockTxWithMetadata(p2pBaseConfig); - const txHash = tx.getTxHash(); - - // Return the correct tx with an invalid proof -> active attack - txPool.getTxByHash.mockImplementationOnce(() => Promise.resolve(tx)); - - const requestedTxs = await requestTxsByHash(client1, [txHash], undefined); - // Even though we got a response, the proof was deemed invalid - expect(requestedTxs).toEqual([]); - - expect(penalizePeerSpy).toHaveBeenCalledWith(client2PeerId, PeerErrorSeverity.LowToleranceError); - }); - - it('will penalize peers that send the wrong transaction', async () => { - // We want to create a set of nodes and request transaction from them - clients = ( - await makeAndStartTestP2PClients(NUMBER_OF_PEERS, { - p2pBaseConfig, - mockAttestationPool: attestationPool, - mockTxPool: txPool, - mockEpochCache: epochCache, - mockWorldState: worldState, - alwaysTrueVerifier: true, - logger, - }) - ).map(x => x.client); - const [client1, client2] = clients; - const client2PeerId = (await client2.getEnr()?.peerId())!; - - // Give the nodes time to discover each other - await sleep(6000); - logger.info(`Finished waiting for clients to connect`); - - const penalizePeerSpy = jest.spyOn((client1 as any).p2pService.peerManager, 'penalizePeer'); - - // Perform a get tx request from client 1 - const tx = await createMockTxWithMetadata(p2pBaseConfig); - const txHash = tx.getTxHash(); - const tx2 = await createMockTxWithMetadata(p2pBaseConfig, 420); - - // Return an invalid tx - txPool.getTxByHash.mockImplementationOnce(() => Promise.resolve(tx2)); - - const requestedTxs = await requestTxsByHash(client1, [txHash], undefined); - // Even though we got a response, the proof was deemed invalid - expect(requestedTxs).toEqual([]); - - expect(penalizePeerSpy).toHaveBeenCalledWith(client2PeerId, PeerErrorSeverity.MidToleranceError); - }); -}); diff --git a/yarn-project/p2p/src/mem_pools/tx_pool_v2/interfaces.ts b/yarn-project/p2p/src/mem_pools/tx_pool_v2/interfaces.ts index 4f706cfc720c..a1a1ed0d9d69 100644 --- a/yarn-project/p2p/src/mem_pools/tx_pool_v2/interfaces.ts +++ b/yarn-project/p2p/src/mem_pools/tx_pool_v2/interfaces.ts @@ -110,12 +110,12 @@ export interface TxPoolV2 extends TypedEventEmitter { addPendingTxs(txs: Tx[], opts?: { source?: string; feeComparisonOnly?: boolean }): Promise; /** - * Checks if a transaction can be added without modifying the pool. - * Performs the same validation as addPendingTxs but doesn't persist changes. + * Checks if the pool would accept a transaction without modifying state. + * Used as a pre-check before expensive proof verification. * @param tx - Transaction to check - * @returns Result: 'accepted', 'ignored' (if already in pool or undesirable), or 'rejected' (if validation fails) + * @returns 'accepted' if the pool would accept, 'ignored' if already in pool or undesirable */ - canAddPendingTx(tx: Tx): Promise<'accepted' | 'ignored' | 'rejected'>; + canAddPendingTx(tx: Tx): Promise<'accepted' | 'ignored'>; /** * Adds transactions as immediately protected for a given slot. diff --git a/yarn-project/p2p/src/mem_pools/tx_pool_v2/tx_metadata.test.ts b/yarn-project/p2p/src/mem_pools/tx_pool_v2/tx_metadata.test.ts index 63417fa8559a..a2e6172c2922 100644 --- a/yarn-project/p2p/src/mem_pools/tx_pool_v2/tx_metadata.test.ts +++ b/yarn-project/p2p/src/mem_pools/tx_pool_v2/tx_metadata.test.ts @@ -36,6 +36,35 @@ describe('TxMetaData', () => { expect(nullifier).toMatch(/^0x[0-9a-f]+$/i); } }); + + it('sets forPublic to truthy for public transactions', async () => { + const tx = await mockTx(1, { numberOfNonRevertiblePublicCallRequests: 1 }); + expect(tx.data.forPublic).toBeDefined(); + const meta = await buildTxMetaData(tx); + + expect(meta.data.forPublic).toBeTruthy(); + }); + + it('sets forPublic to falsy for private transactions', async () => { + const tx = await mockTx(1, { + numberOfNonRevertiblePublicCallRequests: 0, + numberOfRevertiblePublicCallRequests: 0, + hasPublicTeardownCallRequest: false, + }); + expect(tx.data.forPublic).not.toBeDefined(); + const meta = await buildTxMetaData(tx); + + expect(meta.data.forPublic).toBeFalsy(); + }); + + it('preserves gas limits in validation data', async () => { + const tx = await mockTx(1); + const meta = await buildTxMetaData(tx); + + expect(meta.data.constants.txContext.gasSettings.gasLimits).toEqual( + tx.data.constants.txContext.gasSettings.gasLimits, + ); + }); }); describe('comparePriority', () => { diff --git a/yarn-project/p2p/src/mem_pools/tx_pool_v2/tx_metadata.ts b/yarn-project/p2p/src/mem_pools/tx_pool_v2/tx_metadata.ts index 1649075062a1..45a452c6086d 100644 --- a/yarn-project/p2p/src/mem_pools/tx_pool_v2/tx_metadata.ts +++ b/yarn-project/p2p/src/mem_pools/tx_pool_v2/tx_metadata.ts @@ -2,6 +2,7 @@ import { BlockNumber } from '@aztec/foundation/branded-types'; import { Fr } from '@aztec/foundation/curves/bn254'; import { ProtocolContractAddress } from '@aztec/protocol-contracts'; import { BlockHash, type L2BlockId } from '@aztec/stdlib/block'; +import { Gas } from '@aztec/stdlib/gas'; import type { Tx } from '@aztec/stdlib/tx'; import { getFeePayerBalanceDelta } from '../../msg_validators/tx_validator/fee_payer_balance.js'; @@ -12,6 +13,8 @@ import { type PreAddResult, TxPoolRejectionCode } from './eviction/interfaces.js export type TxMetaValidationData = { getNonEmptyNullifiers(): Fr[]; expirationTimestamp: bigint; + /** Whether the tx has public calls. Used to select the correct L2 gas minimum. */ + forPublic?: unknown; constants: { anchorBlockHeader: { hash(): Promise; @@ -19,6 +22,9 @@ export type TxMetaValidationData = { blockNumber: BlockNumber; }; }; + txContext: { + gasSettings: { gasLimits: Gas }; + }; }; }; @@ -105,11 +111,15 @@ export async function buildTxMetaData(tx: Tx): Promise { data: { getNonEmptyNullifiers: () => nullifierFrs, expirationTimestamp, + forPublic: !!tx.data.forPublic, constants: { anchorBlockHeader: { hash: () => Promise.resolve(anchorBlockHeaderHashFr), globalVariables: { blockNumber: anchorBlockNumber }, }, + txContext: { + gasSettings: { gasLimits: tx.data.constants.txContext.gasSettings.gasLimits }, + }, }, }, }; @@ -237,6 +247,9 @@ export function stubTxMetaValidationData(overrides: { expirationTimestamp?: bigi hash: () => Promise.resolve(new BlockHash(Fr.ZERO)), globalVariables: { blockNumber: BlockNumber(0) }, }, + txContext: { + gasSettings: { gasLimits: Gas.empty() }, + }, }, }; } diff --git a/yarn-project/p2p/src/mem_pools/tx_pool_v2/tx_pool_v2.test.ts b/yarn-project/p2p/src/mem_pools/tx_pool_v2/tx_pool_v2.test.ts index 2095815d1762..aa0b9af8affa 100644 --- a/yarn-project/p2p/src/mem_pools/tx_pool_v2/tx_pool_v2.test.ts +++ b/yarn-project/p2p/src/mem_pools/tx_pool_v2/tx_pool_v2.test.ts @@ -1,3 +1,11 @@ +import { + DEFAULT_DA_GAS_LIMIT, + DEFAULT_TEARDOWN_DA_GAS_LIMIT, + MAX_PROCESSABLE_L2_GAS, + PRIVATE_TX_L2_GAS_OVERHEAD, + PUBLIC_TX_L2_GAS_OVERHEAD, + TX_DA_GAS_OVERHEAD, +} from '@aztec/constants'; import { BlockNumber, CheckpointNumber, IndexWithinCheckpoint, SlotNumber } from '@aztec/foundation/branded-types'; import { timesAsync } from '@aztec/foundation/collection'; import { Fr } from '@aztec/foundation/curves/bn254'; @@ -6,7 +14,7 @@ import { openTmpStore } from '@aztec/kv-store/lmdb-v2'; import { RevertCode } from '@aztec/stdlib/avm'; import { AztecAddress } from '@aztec/stdlib/aztec-address'; import { Body, L2Block, type L2BlockId, type L2BlockSource } from '@aztec/stdlib/block'; -import { GasFees, GasSettings } from '@aztec/stdlib/gas'; +import { Gas, GasFees, GasSettings } from '@aztec/stdlib/gas'; import type { MerkleTreeReadOperations, WorldStateSynchronizer } from '@aztec/stdlib/interfaces/server'; import { mockTx } from '@aztec/stdlib/testing'; import { @@ -19,6 +27,7 @@ import { BlockHeader, GlobalVariables, type Tx, TxEffect, TxHash, type TxValidat import { type MockProxy, mock } from 'jest-mock-extended'; +import { GasLimitsValidator } from '../../msg_validators/tx_validator/gas_validator.js'; import type { TxMetaData } from './tx_metadata.js'; import { AztecKVTxPoolV2 } from './tx_pool_v2.js'; @@ -553,16 +562,6 @@ describe('TxPoolV2', () => { expect(await rejectingPool.getPendingTxCount()).toBe(0); }); - it('canAddPendingTx returns rejected for transaction that fails validation', async () => { - const tx = await mockTx(1); - txsToReject.add(tx.getTxHash().toString()); - - const result = await rejectingPool.canAddPendingTx(tx); - - expect(result).toBe('rejected'); - expect(await rejectingPool.getPendingTxCount()).toBe(0); // State unchanged - }); - it('addPendingTxs handles batch with mixed accepted and rejected', async () => { const tx1 = await mockTx(1); const tx2 = await mockTx(2); @@ -644,6 +643,117 @@ describe('TxPoolV2', () => { }); }); + describe('gas limits validation', () => { + let gasPool: AztecKVTxPoolV2; + let gasStore: Awaited>; + let gasArchiveStore: Awaited>; + + beforeEach(async () => { + gasStore = await openTmpStore('p2p'); + gasArchiveStore = await openTmpStore('archive'); + gasPool = new AztecKVTxPoolV2(gasStore, gasArchiveStore, { + l2BlockSource: mockL2BlockSource, + worldStateSynchronizer: mockWorldState, + createTxValidator: () => Promise.resolve(new GasLimitsValidator()), + }); + await gasPool.start(); + }); + + afterEach(async () => { + await gasPool.stop(); + await gasStore.delete(); + await gasArchiveStore.delete(); + }); + + const makePublicTxWithGas = async (seed: number, gasLimits: Gas) => { + const tx = await mockTx(seed, { numberOfNonRevertiblePublicCallRequests: 1 }); + tx.data.constants.txContext.gasSettings = GasSettings.default({ + gasLimits, + maxFeesPerGas: DEFAULT_MAX_FEES_PER_GAS, + }); + return tx; + }; + + const makePrivateTxWithGas = async (seed: number, gasLimits: Gas) => { + const tx = await mockTx(seed, { + numberOfNonRevertiblePublicCallRequests: 0, + numberOfRevertiblePublicCallRequests: 0, + hasPublicTeardownCallRequest: false, + }); + tx.data.constants.txContext.gasSettings = GasSettings.default({ + gasLimits, + maxFeesPerGas: DEFAULT_MAX_FEES_PER_GAS, + }); + return tx; + }; + + it('accepts public tx at exactly the minimum gas limits', async () => { + const tx = await makePublicTxWithGas(1, new Gas(TX_DA_GAS_OVERHEAD, PUBLIC_TX_L2_GAS_OVERHEAD)); + const result = await gasPool.addPendingTxs([tx]); + expect(result.accepted).toHaveLength(1); + expect(result.rejected).toHaveLength(0); + }); + + it('accepts private tx at exactly the minimum gas limits', async () => { + const tx = await makePrivateTxWithGas(1, new Gas(TX_DA_GAS_OVERHEAD, PRIVATE_TX_L2_GAS_OVERHEAD)); + const result = await gasPool.addPendingTxs([tx]); + expect(result.accepted).toHaveLength(1); + expect(result.rejected).toHaveLength(0); + }); + + it('rejects public tx below the public L2 gas minimum', async () => { + const tx = await makePublicTxWithGas(1, new Gas(TX_DA_GAS_OVERHEAD, PUBLIC_TX_L2_GAS_OVERHEAD - 1)); + const result = await gasPool.addPendingTxs([tx]); + expect(result.accepted).toHaveLength(0); + expect(toStrings(result.rejected)).toContain(hashOf(tx)); + }); + + it('rejects private tx below the private L2 gas minimum', async () => { + const tx = await makePrivateTxWithGas(1, new Gas(TX_DA_GAS_OVERHEAD, PRIVATE_TX_L2_GAS_OVERHEAD - 1)); + const result = await gasPool.addPendingTxs([tx]); + expect(result.accepted).toHaveLength(0); + expect(toStrings(result.rejected)).toContain(hashOf(tx)); + }); + + it('rejects public tx at private L2 gas minimum (between the two thresholds)', async () => { + const tx = await makePublicTxWithGas(1, new Gas(TX_DA_GAS_OVERHEAD, PRIVATE_TX_L2_GAS_OVERHEAD)); + const result = await gasPool.addPendingTxs([tx]); + expect(result.accepted).toHaveLength(0); + expect(toStrings(result.rejected)).toContain(hashOf(tx)); + }); + + it('rejects tx below DA gas minimum', async () => { + const tx = await makePublicTxWithGas(1, new Gas(TX_DA_GAS_OVERHEAD - 1, PUBLIC_TX_L2_GAS_OVERHEAD)); + const result = await gasPool.addPendingTxs([tx]); + expect(result.accepted).toHaveLength(0); + expect(toStrings(result.rejected)).toContain(hashOf(tx)); + }); + + it('rejects public tx if L2 gas limit is too high', async () => { + const tx = await makePublicTxWithGas(1, new Gas(DEFAULT_DA_GAS_LIMIT, MAX_PROCESSABLE_L2_GAS + 1)); + tx.data.constants.txContext.gasSettings = GasSettings.default({ + gasLimits: new Gas(DEFAULT_DA_GAS_LIMIT, MAX_PROCESSABLE_L2_GAS + 1), + maxFeesPerGas: DEFAULT_MAX_FEES_PER_GAS, + teardownGasLimits: new Gas(DEFAULT_TEARDOWN_DA_GAS_LIMIT, 1), + }); + const result = await gasPool.addPendingTxs([tx]); + expect(result.accepted).toHaveLength(0); + expect(toStrings(result.rejected)).toContain(hashOf(tx)); + }); + + it('rejects private tx if L2 gas limit is too high', async () => { + const tx = await makePrivateTxWithGas(1, new Gas(DEFAULT_DA_GAS_LIMIT, MAX_PROCESSABLE_L2_GAS + 1)); + tx.data.constants.txContext.gasSettings = GasSettings.default({ + gasLimits: new Gas(DEFAULT_DA_GAS_LIMIT, MAX_PROCESSABLE_L2_GAS + 1), + maxFeesPerGas: DEFAULT_MAX_FEES_PER_GAS, + teardownGasLimits: new Gas(DEFAULT_TEARDOWN_DA_GAS_LIMIT, 1), + }); + const result = await gasPool.addPendingTxs([tx]); + expect(result.accepted).toHaveLength(0); + expect(toStrings(result.rejected)).toContain(hashOf(tx)); + }); + }); + describe('addProtectedTxs', () => { it('adds new transactions as protected', async () => { const tx = await mockTx(1); diff --git a/yarn-project/p2p/src/mem_pools/tx_pool_v2/tx_pool_v2.ts b/yarn-project/p2p/src/mem_pools/tx_pool_v2/tx_pool_v2.ts index c8ec01271baf..4f65a0d6b0ae 100644 --- a/yarn-project/p2p/src/mem_pools/tx_pool_v2/tx_pool_v2.ts +++ b/yarn-project/p2p/src/mem_pools/tx_pool_v2/tx_pool_v2.ts @@ -74,7 +74,7 @@ export class AztecKVTxPoolV2 extends (EventEmitter as new () => TypedEventEmitte return this.#queue.put(() => this.#impl.addPendingTxs(txs, opts)); } - canAddPendingTx(tx: Tx): Promise<'accepted' | 'ignored' | 'rejected'> { + canAddPendingTx(tx: Tx): Promise<'accepted' | 'ignored'> { return this.#queue.put(() => this.#impl.canAddPendingTx(tx)); } diff --git a/yarn-project/p2p/src/mem_pools/tx_pool_v2/tx_pool_v2_impl.ts b/yarn-project/p2p/src/mem_pools/tx_pool_v2/tx_pool_v2_impl.ts index e2e5d672436c..94e4f224dc94 100644 --- a/yarn-project/p2p/src/mem_pools/tx_pool_v2/tx_pool_v2_impl.ts +++ b/yarn-project/p2p/src/mem_pools/tx_pool_v2/tx_pool_v2_impl.ts @@ -327,7 +327,7 @@ export class TxPoolV2Impl { return { status: 'accepted' }; } - async canAddPendingTx(tx: Tx): Promise<'accepted' | 'ignored' | 'rejected'> { + async canAddPendingTx(tx: Tx): Promise<'accepted' | 'ignored'> { const txHashStr = tx.getTxHash().toString(); // Check if already in pool @@ -335,14 +335,8 @@ export class TxPoolV2Impl { return 'ignored'; } - // Build metadata and validate using metadata + // Build metadata and check pre-add rules const meta = await buildTxMetaData(tx); - const validationResult = await this.#validateMeta(meta, undefined, 'can add pending'); - if (validationResult !== true) { - return 'rejected'; - } - - // Use pre-add rules const poolAccess = this.#createPreAddPoolAccess(); const preAddResult = await this.#evictionManager.runPreAddRules(meta, poolAccess); diff --git a/yarn-project/p2p/src/msg_validators/tx_validator/README.md b/yarn-project/p2p/src/msg_validators/tx_validator/README.md new file mode 100644 index 000000000000..91087c1559c5 --- /dev/null +++ b/yarn-project/p2p/src/msg_validators/tx_validator/README.md @@ -0,0 +1,115 @@ +# Transaction Validation + +This module defines the transaction validators and the factory functions that assemble them for each entry point into the system. + +## Validation Strategy + +Transactions enter the system through different paths. **Unsolicited** transactions (gossip and RPC) are fully validated before acceptance. **Solicited** transactions (req/resp and block proposals) are only checked for well-formedness because we must store them for block re-execution — they may ultimately be invalid, which is caught during block building and reported as part of block validation/attestation. + +When solicited transactions fail to be mined, they may be migrated to the pending pool. At that point, the pool runs the state-dependent checks that were skipped on initial receipt. + +## Entry Points + +### 1. Gossip (libp2p pubsub) + +**Factory**: `createFirstStageTxValidationsForGossipedTransactions` + `createSecondStageTxValidationsForGossipedTransactions` +**Called from**: `LibP2PService.handleGossipedTx()` in `libp2p_service.ts` + +Unsolicited transactions from any peer. Fully validated in two stages with a pool pre-check in between to avoid wasting CPU on proof verification for transactions the pool would reject: + +| Step | What runs | On failure | +|------|-----------|------------| +| **Stage 1** (fast) | TxPermitted, Data, Metadata, Timestamp, DoubleSpend, Gas, Phases, BlockHeader | Penalize peer, reject tx | +| **Pool pre-check** | `canAddPendingTx` — checks for duplicates, pool capacity | Ignore tx (no penalty) | +| **Stage 2** (slow) | Proof verification | Penalize peer, reject tx | +| **Pool add** | `addPendingTxs` | Accept, ignore, or reject | + +Each stage-1 and stage-2 validator is paired with a `PeerErrorSeverity`. If a validator fails, the sending peer is penalized with that severity. The `doubleSpendValidator` has special handling: its severity is determined by how recently the nullifier appeared (recent = high tolerance, old = low tolerance). + +### 2. JSON-RPC + +**Factory**: `createTxValidatorForAcceptingTxsOverRPC` +**Called from**: `AztecNodeService.isValidTx()` in `aztec-node/server.ts` + +Unsolicited transactions from a local wallet/PXE. Runs the full set of checks as a single aggregate validator: + +- TxPermitted, Size, Data, Metadata, Timestamp, DoubleSpend, Phases, BlockHeader +- Gas (optional — skipped when `skipFeeEnforcement` is set) +- Proof verification (optional — skipped for simulations when no verifier is provided) + +### 3. Req/resp and block proposals + +**Factories**: `createTxValidatorForReqResponseReceivedTxs`, `createTxValidatorForBlockProposalReceivedTxs` +**Called from**: `LibP2PService.validateRequestedTx()`, `LibP2PService.validateTxsReceivedInBlockProposal()`, and `BatchRequestTxValidator` in `batch-tx-requester/tx_validator.ts` + +Solicited transactions — we requested these from peers or received them as part of a block proposal we need to validate. We must accept them for re-execution even if they are invalid against the current state. Only well-formedness is checked: + +- Metadata, Size, Data, Proof + +State-dependent checks are deferred to either the block building validator (for txs included in blocks) or the pending pool migration validator (for unmined txs migrating to pending). + +### 4. Block building + +**Factory**: `createTxValidatorForBlockBuilding` +**Called from**: `CheckpointBuilder.makeBlockBuilderDeps()` in `validator-client/checkpoint_builder.ts` + +Transactions already in the pool, about to be sequenced into a block. Re-validates against the current state of the block being built. **This is where invalid txs that entered via req/resp or block proposals are caught** — their invalidity is reported as part of block validation/attestation. + +Runs: +- Timestamp, DoubleSpend, Phases, Gas, BlockHeader + +Does **not** run: +- Proof, Data — already verified on entry (by gossip, RPC, or req/resp validators) + +### 5. Pending pool migration + +**Factory**: `createTxValidatorForTransactionsEnteringPendingTxPool` +**Called from**: `TxPoolV2Impl` (injected as the `createTxValidator` factory via `TxPoolV2Dependencies`) + +When transactions that arrived via req/resp or block proposals fail to be mined, they may need to be included in our pending pool. These txs only had well-formedness checks on receipt, so the pool runs the state-dependent checks they missed before accepting them. + +This validator is invoked on **every** transaction potentially entering the pending pool: +- `addPendingTxs` — validating each tx before adding +- `prepareForSlot` — unprotecting txs back to pending after a slot ends +- `handlePrunedBlocks` — unmining txs from pruned blocks back to pending +- Startup hydration — revalidating persisted non-mined txs on node restart + +Runs: +- DoubleSpend, BlockHeader, GasLimits, Timestamp + +Operates on `TxMetaData` (pre-built by the pool) rather than full `Tx` objects. + +## Individual Validators + +| Validator | What it checks | Benchmarked verification duration | +|-----------|---------------|---------------| +| `TxPermittedValidator` | Whether the system is accepting transactions (controlled by config flag) | 1.56 us | +| `DataTxValidator` | Transaction data integrity — correct structure, non-empty fields | 4.10–18.18 ms | +| `SizeTxValidator` | Transaction does not exceed maximum size limits | 2.28 us | +| `MetadataTxValidator` | Chain ID, rollup version, protocol contracts hash, VK tree root | 4.18 us | +| `TimestampTxValidator` | Transaction has not expired (expiration timestamp vs next slot) | 1.56 us | +| `DoubleSpendTxValidator` | Nullifiers do not already exist in the nullifier tree | 106.08 us | +| `GasTxValidator` | Gas limits are within bounds (delegates to `GasLimitsValidator`), max fee per gas meets current block fees, and fee payer has sufficient FeeJuice balance | 1.02 ms | +| `GasLimitsValidator` | Gas limits are >= fixed minimums and <= AVM max processable L2 gas. Used standalone in pool migration; also called internally by `GasTxValidator` | 3–10 us | +| `PhasesTxValidator` | Public function calls in setup phase are on the allow list | 10.12–13.12 us | +| `BlockHeaderTxValidator` | Transaction's anchor block hash exists in the archive tree | 98.88 us | +| `TxProofValidator` | Client proof verifies correctly | ~250ms | + +## Validator Coverage by Entry Point + +| Validator | Gossip | RPC | Req/resp | Block building | Pool migration | +|-----------|--------|-----|----------|----------------|----------------| +| TxPermitted | Stage 1 | Yes | — | — | — | +| Data | Stage 1 | Yes | Yes | — | — | +| Size | — | Yes | Yes | — | — | +| Metadata | Stage 1 | Yes | Yes | — | — | +| Timestamp | Stage 1 | Yes | — | Yes | Yes | +| DoubleSpend | Stage 1 | Yes | — | Yes | Yes | +| Gas (balance + limits) | Stage 1 | Optional* | — | Yes | — | +| GasLimits (standalone) | — | — | — | — | Yes | +| Phases | Stage 1 | Yes | — | Yes | — | +| BlockHeader | Stage 1 | Yes | — | Yes | Yes | +| Proof | Stage 2 | Optional** | Yes | — | — | + +\* Gas balance check is skipped when `skipFeeEnforcement` is set (testing/dev). `GasTxValidator` internally delegates to `GasLimitsValidator` as its first step, so gas limits are checked wherever `GasTxValidator` runs. Pool migration uses `GasLimitsValidator` standalone because it doesn't need the balance or fee-per-gas checks. +\** Proof verification is skipped for simulations (no verifier provided). diff --git a/yarn-project/p2p/src/msg_validators/tx_validator/aggregate_tx_validator.ts b/yarn-project/p2p/src/msg_validators/tx_validator/aggregate_tx_validator.ts index c7f9ef2b5d24..8b81492ef5d3 100644 --- a/yarn-project/p2p/src/msg_validators/tx_validator/aggregate_tx_validator.ts +++ b/yarn-project/p2p/src/msg_validators/tx_validator/aggregate_tx_validator.ts @@ -1,18 +1,18 @@ import type { TxValidationResult, TxValidator } from '@aztec/stdlib/tx'; export class AggregateTxValidator implements TxValidator { - #validators: TxValidator[]; + readonly validators: TxValidator[]; constructor(...validators: TxValidator[]) { if (validators.length === 0) { throw new Error('At least one validator must be provided'); } - this.#validators = validators; + this.validators = validators; } async validateTx(tx: T): Promise { const aggregate: { result: string; reason?: string[] } = { result: 'valid', reason: [] }; - for (const validator of this.#validators) { + for (const validator of this.validators) { const result = await validator.validateTx(tx); if (result.result === 'invalid') { aggregate.result = 'invalid'; diff --git a/yarn-project/p2p/src/msg_validators/tx_validator/factory.test.ts b/yarn-project/p2p/src/msg_validators/tx_validator/factory.test.ts index d03af1db77fe..b52fb4ac740a 100644 --- a/yarn-project/p2p/src/msg_validators/tx_validator/factory.test.ts +++ b/yarn-project/p2p/src/msg_validators/tx_validator/factory.test.ts @@ -2,14 +2,43 @@ import { BlockNumber } from '@aztec/foundation/branded-types'; import { Fr } from '@aztec/foundation/curves/bn254'; import type { ContractDataSource } from '@aztec/stdlib/contract'; import { GasFees } from '@aztec/stdlib/gas'; -import type { ClientProtocolCircuitVerifier, WorldStateSynchronizer } from '@aztec/stdlib/interfaces/server'; +import type { + ClientProtocolCircuitVerifier, + MerkleTreeReadOperations, + WorldStateSynchronizer, +} from '@aztec/stdlib/interfaces/server'; +import { PeerErrorSeverity } from '@aztec/stdlib/p2p'; +import type { GlobalVariables } from '@aztec/stdlib/tx'; import { type MockProxy, mock } from 'jest-mock-extended'; -import { createTxMessageValidators } from './factory.js'; +import { AggregateTxValidator } from './aggregate_tx_validator.js'; +import { BlockHeaderTxValidator } from './block_header_validator.js'; +import { DataTxValidator } from './data_validator.js'; +import { DoubleSpendTxValidator } from './double_spend_validator.js'; +import { + createFirstStageTxValidationsForGossipedTransactions, + createSecondStageTxValidationsForGossipedTransactions, + createTxValidatorForAcceptingTxsOverRPC, + createTxValidatorForBlockBuilding, + createTxValidatorForBlockProposalReceivedTxs, + createTxValidatorForReqResponseReceivedTxs, + createTxValidatorForTransactionsEnteringPendingTxPool, +} from './factory.js'; +import { GasLimitsValidator, GasTxValidator } from './gas_validator.js'; +import { MetadataTxValidator } from './metadata_validator.js'; +import { PhasesTxValidator } from './phases_validator.js'; +import { SizeTxValidator } from './size_validator.js'; +import { TimestampTxValidator } from './timestamp_validator.js'; +import { TxPermittedValidator } from './tx_permitted_validator.js'; +import { TxProofValidator } from './tx_proof_validator.js'; -describe('GasTxValidator', () => { - // Mocks +/** Extract the constructor names from the validators inside an AggregateTxValidator. */ +function getValidatorNames(aggregate: AggregateTxValidator): string[] { + return aggregate.validators.map(v => v.constructor.name); +} + +describe('Validator factory functions', () => { let synchronizer: MockProxy; let contractSource: MockProxy; let proofVerifier: MockProxy; @@ -20,29 +49,268 @@ describe('GasTxValidator', () => { proofVerifier = mock(); }); - it('inserts tx proof validator last', () => { - const validators = createTxMessageValidators( - 0n, - BlockNumber(2), - synchronizer, - new GasFees(1, 1), - 1, - 2, - Fr.ZERO, - contractSource, - proofVerifier, - true, - ); - expect(Object.keys(validators[0])).toEqual([ - 'txsPermittedValidator', - 'dataValidator', - 'metadataValidator', - 'timestampValidator', - 'doubleSpendValidator', - 'gasValidator', - 'phasesValidator', - 'blockHeaderValidator', - ]); - expect(Object.keys(validators[1])).toEqual(['proofValidator']); + describe('createFirstStageTxValidationsForGossipedTransactions', () => { + it('returns the expected set of first-stage validator keys', () => { + const validators = createFirstStageTxValidationsForGossipedTransactions( + 0n, + BlockNumber(2), + synchronizer, + new GasFees(1, 1), + 1, + 2, + Fr.ZERO, + contractSource, + true, + ); + + expect(Object.keys(validators)).toEqual([ + 'timestampValidator', + 'txsPermittedValidator', + 'txSizeValidator', + 'metadataValidator', + 'phasesValidator', + 'blockHeaderValidator', + 'doubleSpendValidator', + 'gasValidator', + 'dataValidator', + ]); + }); + + it('does not include a proof validator', () => { + const validators = createFirstStageTxValidationsForGossipedTransactions( + 0n, + BlockNumber(2), + synchronizer, + new GasFees(1, 1), + 1, + 2, + Fr.ZERO, + contractSource, + true, + ); + + expect(Object.keys(validators)).not.toContain('proofValidator'); + }); + + it('assigns expected severities to each validator', () => { + const validators = createFirstStageTxValidationsForGossipedTransactions( + 0n, + BlockNumber(2), + synchronizer, + new GasFees(1, 1), + 1, + 2, + Fr.ZERO, + contractSource, + true, + ); + + // Timestamp and block header are high tolerance (more likely to be stale rather than malicious) + expect(validators.timestampValidator.severity).toBe(PeerErrorSeverity.HighToleranceError); + expect(validators.blockHeaderValidator.severity).toBe(PeerErrorSeverity.HighToleranceError); + + // Others are mid tolerance + expect(validators.txsPermittedValidator.severity).toBe(PeerErrorSeverity.MidToleranceError); + expect(validators.dataValidator.severity).toBe(PeerErrorSeverity.MidToleranceError); + expect(validators.metadataValidator.severity).toBe(PeerErrorSeverity.MidToleranceError); + expect(validators.doubleSpendValidator.severity).toBe(PeerErrorSeverity.MidToleranceError); + expect(validators.gasValidator.severity).toBe(PeerErrorSeverity.MidToleranceError); + expect(validators.phasesValidator.severity).toBe(PeerErrorSeverity.MidToleranceError); + }); + + it('each entry has a validator with a validateTx method', () => { + const validators = createFirstStageTxValidationsForGossipedTransactions( + 0n, + BlockNumber(2), + synchronizer, + new GasFees(1, 1), + 1, + 2, + Fr.ZERO, + contractSource, + true, + ); + + for (const [, entry] of Object.entries(validators)) { + expect(entry.validator).toBeDefined(); + expect(typeof entry.validator.validateTx).toBe('function'); + } + }); + }); + + describe('createSecondStageTxValidationsForGossipedTransactions', () => { + it('returns only the proof validator', () => { + const validators = createSecondStageTxValidationsForGossipedTransactions(proofVerifier); + + expect(Object.keys(validators)).toEqual(['proofValidator']); + }); + + it('assigns low tolerance severity to proof validator', () => { + const validators = createSecondStageTxValidationsForGossipedTransactions(proofVerifier); + + expect(validators.proofValidator.severity).toBe(PeerErrorSeverity.LowToleranceError); + }); + + it('proof validator has a validateTx method', () => { + const validators = createSecondStageTxValidationsForGossipedTransactions(proofVerifier); + + expect(typeof validators.proofValidator.validator.validateTx).toBe('function'); + }); + }); + + describe('createTxValidatorForReqResponseReceivedTxs', () => { + it('contains well-formedness validators only', () => { + const validator = createTxValidatorForReqResponseReceivedTxs(proofVerifier, { + l1ChainId: 1, + rollupVersion: 2, + }); + + const aggregate = validator as AggregateTxValidator; + expect(getValidatorNames(aggregate)).toEqual([ + MetadataTxValidator.name, + SizeTxValidator.name, + DataTxValidator.name, + TxProofValidator.name, + ]); + }); + }); + + describe('createTxValidatorForBlockProposalReceivedTxs', () => { + it('contains the same well-formedness validators as req/resp', () => { + const validator = createTxValidatorForBlockProposalReceivedTxs(proofVerifier, { + l1ChainId: 1, + rollupVersion: 2, + }); + + const aggregate = validator as AggregateTxValidator; + expect(getValidatorNames(aggregate)).toEqual([ + MetadataTxValidator.name, + SizeTxValidator.name, + DataTxValidator.name, + TxProofValidator.name, + ]); + }); + }); + + describe('createTxValidatorForAcceptingTxsOverRPC', () => { + let db: MockProxy; + + beforeEach(() => { + db = mock(); + }); + + it('contains the full set of validators with fee enforcement and proof verification', () => { + const validator = createTxValidatorForAcceptingTxsOverRPC(db, contractSource, proofVerifier, { + l1ChainId: 1, + rollupVersion: 2, + setupAllowList: [], + gasFees: new GasFees(1, 1), + timestamp: 100n, + blockNumber: BlockNumber(5), + txsPermitted: true, + }); + + const aggregate = validator as AggregateTxValidator; + expect(getValidatorNames(aggregate)).toEqual([ + TxPermittedValidator.name, + TimestampTxValidator.name, + SizeTxValidator.name, + MetadataTxValidator.name, + PhasesTxValidator.name, + BlockHeaderTxValidator.name, + DoubleSpendTxValidator.name, + DataTxValidator.name, + GasTxValidator.name, + TxProofValidator.name, + ]); + }); + + it('excludes gas validator when fee enforcement is skipped', () => { + const validator = createTxValidatorForAcceptingTxsOverRPC(db, contractSource, proofVerifier, { + l1ChainId: 1, + rollupVersion: 2, + setupAllowList: [], + gasFees: new GasFees(1, 1), + skipFeeEnforcement: true, + timestamp: 100n, + blockNumber: BlockNumber(5), + txsPermitted: true, + }); + + const aggregate = validator as AggregateTxValidator; + const names = getValidatorNames(aggregate); + expect(names).not.toContain(GasTxValidator.name); + expect(names).toContain(TxProofValidator.name); + }); + + it('excludes proof validator when no verifier is provided', () => { + const validator = createTxValidatorForAcceptingTxsOverRPC(db, contractSource, undefined, { + l1ChainId: 1, + rollupVersion: 2, + setupAllowList: [], + gasFees: new GasFees(1, 1), + timestamp: 100n, + blockNumber: BlockNumber(5), + txsPermitted: true, + }); + + const aggregate = validator as AggregateTxValidator; + const names = getValidatorNames(aggregate); + expect(names).not.toContain(TxProofValidator.name); + expect(names).toContain(GasTxValidator.name); + }); + }); + + describe('createTxValidatorForBlockBuilding', () => { + let db: MockProxy; + let globalVariables: MockProxy; + + beforeEach(() => { + db = mock(); + globalVariables = mock(); + globalVariables.timestamp = 100n; + globalVariables.blockNumber = BlockNumber(5); + globalVariables.gasFees = new GasFees(1, 1); + }); + + it('contains state-dependent validators only (no proof, no data)', () => { + const result = createTxValidatorForBlockBuilding(db, contractSource, globalVariables, []); + + const aggregate = result.preprocessValidator as AggregateTxValidator; + expect(getValidatorNames(aggregate)).toEqual([ + TimestampTxValidator.name, + PhasesTxValidator.name, + BlockHeaderTxValidator.name, + DoubleSpendTxValidator.name, + GasTxValidator.name, + ]); + }); + + it('returns a nullifierCache alongside the preprocessValidator', () => { + const result = createTxValidatorForBlockBuilding(db, contractSource, globalVariables, []); + + expect(result.nullifierCache).toBeDefined(); + expect(typeof result.nullifierCache!.addNullifiers).toBe('function'); + }); + }); + + describe('createTxValidatorForTransactionsEnteringPendingTxPool', () => { + it('contains the state-dependent checks missed by well-formedness validators', async () => { + const validator = await createTxValidatorForTransactionsEnteringPendingTxPool(synchronizer, 100n, BlockNumber(5)); + + const aggregate = validator as AggregateTxValidator; + expect(getValidatorNames(aggregate)).toEqual([ + GasLimitsValidator.name, + TimestampTxValidator.name, + DoubleSpendTxValidator.name, + BlockHeaderTxValidator.name, + ]); + }); + + it('syncs world state before creating the validator', async () => { + await createTxValidatorForTransactionsEnteringPendingTxPool(synchronizer, 100n, BlockNumber(5)); + + expect(synchronizer.syncImmediate).toHaveBeenCalled(); + }); }); }); diff --git a/yarn-project/p2p/src/msg_validators/tx_validator/factory.ts b/yarn-project/p2p/src/msg_validators/tx_validator/factory.ts index 3b92c19f1a49..cf915d26421c 100644 --- a/yarn-project/p2p/src/msg_validators/tx_validator/factory.ts +++ b/yarn-project/p2p/src/msg_validators/tx_validator/factory.ts @@ -1,41 +1,91 @@ +/** + * Transaction validator factories for each tx entry point. + * + * Unsolicited transactions (gossip and RPC) are fully validated before acceptance. + * Transactions received via req/resp or block proposals are only checked for + * well-formedness because we must include them for block re-execution — they may + * ultimately be invalid, which is caught during block building and reported as + * part of block validation/attestation. See the README in this directory for the + * full validation strategy. + * + * 1. **Gossip** — full validation in two stages with a pool pre-check in between. + * Stage 1 (fast): metadata, data, timestamps, double-spend, gas, phases, block header. + * Pool pre-check: `canAddPendingTx` — skips proof verification if pool would reject. + * Stage 2 (slow): proof verification. + * Orchestrated by `handleGossipedTx` in `libp2p_service.ts`. + * + * 2. **JSON-RPC** — full validation including all state-dependent checks. + * Proof verification and fee enforcement are configurable for testing purposes. + * + * 3. **Req/resp & block proposals** — well-formedness checks only (metadata, size, + * data, proof). Stored for re-execution; validity against state is not checked here. + * + * 4. **Block building** — re-validates against current state immediately before + * sequencing. Catches invalid txs that entered via req/resp or block proposals. + * Proof and data checks are skipped since they were verified on entry. + * + * 5. **Pending pool migration** — when unmined txs (e.g. from req/resp or block + * proposals) are migrated to the pending pool, the pool runs the state-dependent + * checks they missed: double-spend, block header, gas limits, and timestamps. + * This runs on every tx potentially entering the pending pool. + */ import { BlockNumber } from '@aztec/foundation/branded-types'; import { Fr } from '@aztec/foundation/curves/bn254'; import type { LoggerBindings } from '@aztec/foundation/log'; import { getVKTreeRoot } from '@aztec/noir-protocol-circuits-types/vk-tree'; import { ProtocolContractAddress, protocolContractsHash } from '@aztec/protocol-contracts'; +import type { BlockHash } from '@aztec/stdlib/block'; import type { ContractDataSource } from '@aztec/stdlib/contract'; import type { GasFees } from '@aztec/stdlib/gas'; import type { AllowedElement, ClientProtocolCircuitVerifier, + MerkleTreeReadOperations, + PublicProcessorValidator, WorldStateSynchronizer, } from '@aztec/stdlib/interfaces/server'; import { PeerErrorSeverity } from '@aztec/stdlib/p2p'; -import { DatabasePublicStateSource, MerkleTreeId } from '@aztec/stdlib/trees'; -import type { Tx, TxValidationResult, TxValidator } from '@aztec/stdlib/tx'; +import { DatabasePublicStateSource, MerkleTreeId, type PublicStateSource } from '@aztec/stdlib/trees'; +import type { GlobalVariables, Tx, TxValidationResult, TxValidator } from '@aztec/stdlib/tx'; import type { UInt64 } from '@aztec/stdlib/types'; +import type { TxMetaData } from '../../mem_pools/tx_pool_v2/tx_metadata.js'; import { AggregateTxValidator } from './aggregate_tx_validator.js'; import { ArchiveCache } from './archive_cache.js'; -import { BlockHeaderTxValidator } from './block_header_validator.js'; +import { type ArchiveSource, BlockHeaderTxValidator } from './block_header_validator.js'; import { DataTxValidator } from './data_validator.js'; -import { DoubleSpendTxValidator } from './double_spend_validator.js'; -import { GasTxValidator } from './gas_validator.js'; +import { DoubleSpendTxValidator, type NullifierSource } from './double_spend_validator.js'; +import { GasLimitsValidator, GasTxValidator } from './gas_validator.js'; import { MetadataTxValidator } from './metadata_validator.js'; +import { NullifierCache } from './nullifier_cache.js'; import { PhasesTxValidator } from './phases_validator.js'; import { SizeTxValidator } from './size_validator.js'; import { TimestampTxValidator } from './timestamp_validator.js'; import { TxPermittedValidator } from './tx_permitted_validator.js'; import { TxProofValidator } from './tx_proof_validator.js'; -export interface MessageValidator { +/** + * A validator paired with a peer penalty severity. + * Used for gossip validation where each validator's failure triggers a peer penalization + * with the associated severity level. + */ +export interface TransactionValidator { validator: { validateTx(tx: Tx): Promise; }; severity: PeerErrorSeverity; } -export function createTxMessageValidators( +/** + * First stage of gossip validation — fast checks run before the pool pre-check. + * + * If any validator fails, the peer is penalized and the tx is rejected immediately, + * without consulting the pool or running proof verification. + * + * The `doubleSpendValidator` failure is special-cased by the caller (`handleGossipedTx`) + * to determine severity based on how recently the nullifier appeared. + */ +export function createFirstStageTxValidationsForGossipedTransactions( timestamp: UInt64, blockNumber: BlockNumber, worldStateSynchronizer: WorldStateSynchronizer, @@ -44,86 +94,106 @@ export function createTxMessageValidators( rollupVersion: number, protocolContractsHash: Fr, contractDataSource: ContractDataSource, - proofVerifier: ClientProtocolCircuitVerifier, txsPermitted: boolean, allowedInSetup: AllowedElement[] = [], bindings?: LoggerBindings, -): Record[] { +): Record { const merkleTree = worldStateSynchronizer.getCommitted(); - return [ - { - txsPermittedValidator: { - validator: new TxPermittedValidator(txsPermitted, bindings), - severity: PeerErrorSeverity.MidToleranceError, - }, - dataValidator: { - validator: new DataTxValidator(bindings), - severity: PeerErrorSeverity.HighToleranceError, - }, - metadataValidator: { - validator: new MetadataTxValidator( - { - l1ChainId: new Fr(l1ChainId), - rollupVersion: new Fr(rollupVersion), - protocolContractsHash, - vkTreeRoot: getVKTreeRoot(), - }, - bindings, - ), - severity: PeerErrorSeverity.HighToleranceError, - }, - timestampValidator: { - validator: new TimestampTxValidator( - { - timestamp, - blockNumber, - }, - bindings, - ), - severity: PeerErrorSeverity.MidToleranceError, - }, - doubleSpendValidator: { - validator: new DoubleSpendTxValidator( - { - nullifiersExist: async (nullifiers: Buffer[]) => { - const merkleTree = worldStateSynchronizer.getCommitted(); - const indices = await merkleTree.findLeafIndices(MerkleTreeId.NULLIFIER_TREE, nullifiers); - return indices.map(index => index !== undefined); - }, + return { + timestampValidator: { + validator: new TimestampTxValidator( + { + timestamp, + blockNumber, + }, + bindings, + ), + severity: PeerErrorSeverity.HighToleranceError, + }, + txsPermittedValidator: { + validator: new TxPermittedValidator(txsPermitted, bindings), + severity: PeerErrorSeverity.MidToleranceError, + }, + txSizeValidator: { + validator: new SizeTxValidator(bindings), + severity: PeerErrorSeverity.MidToleranceError, + }, + metadataValidator: { + validator: new MetadataTxValidator( + { + l1ChainId: new Fr(l1ChainId), + rollupVersion: new Fr(rollupVersion), + protocolContractsHash, + vkTreeRoot: getVKTreeRoot(), + }, + bindings, + ), + severity: PeerErrorSeverity.MidToleranceError, + }, + phasesValidator: { + validator: new PhasesTxValidator(contractDataSource, allowedInSetup, timestamp, bindings), + severity: PeerErrorSeverity.MidToleranceError, + }, + blockHeaderValidator: { + validator: new BlockHeaderTxValidator(new ArchiveCache(merkleTree), bindings), + severity: PeerErrorSeverity.HighToleranceError, + }, + doubleSpendValidator: { + validator: new DoubleSpendTxValidator( + { + nullifiersExist: async (nullifiers: Buffer[]) => { + const merkleTree = worldStateSynchronizer.getCommitted(); + const indices = await merkleTree.findLeafIndices(MerkleTreeId.NULLIFIER_TREE, nullifiers); + return indices.map(index => index !== undefined); }, - bindings, - ), - severity: PeerErrorSeverity.HighToleranceError, - }, - gasValidator: { - validator: new GasTxValidator( - new DatabasePublicStateSource(merkleTree), - ProtocolContractAddress.FeeJuice, - gasFees, - bindings, - ), - severity: PeerErrorSeverity.HighToleranceError, - }, - phasesValidator: { - validator: new PhasesTxValidator(contractDataSource, allowedInSetup, timestamp, bindings), - severity: PeerErrorSeverity.MidToleranceError, - }, - blockHeaderValidator: { - validator: new BlockHeaderTxValidator(new ArchiveCache(merkleTree), bindings), - severity: PeerErrorSeverity.HighToleranceError, - }, + }, + bindings, + ), + severity: PeerErrorSeverity.MidToleranceError, // This is handled specifically at the point of rejection by considering a recent window where it may have been valid }, - { - proofValidator: { - validator: new TxProofValidator(proofVerifier, bindings), - severity: PeerErrorSeverity.MidToleranceError, - }, + gasValidator: { + validator: new GasTxValidator( + new DatabasePublicStateSource(merkleTree), + ProtocolContractAddress.FeeJuice, + gasFees, + bindings, + ), + severity: PeerErrorSeverity.MidToleranceError, }, - ]; + dataValidator: { + validator: new DataTxValidator(bindings), + severity: PeerErrorSeverity.MidToleranceError, + }, + }; } -export function createTxReqRespValidator( +/** + * Second stage of gossip validation — expensive proof verification. + * + * Only runs after the first stage passes AND `canAddPendingTx` confirms the pool would + * accept the tx. This avoids wasting CPU on proof verification for txs the pool would reject + * (e.g., duplicates, insufficient balance, pool full). + */ +export function createSecondStageTxValidationsForGossipedTransactions( + proofVerifier: ClientProtocolCircuitVerifier, + bindings?: LoggerBindings, +): Record { + return { + proofValidator: { + validator: new TxProofValidator(proofVerifier, bindings), + severity: PeerErrorSeverity.LowToleranceError, + }, + }; +} + +/** + * Well-formedness checks only: metadata, size, data, and proof. + * Used for req/resp and block proposal txs. These txs must be accepted for block + * re-execution even though they may be invalid against current state — that is + * caught later by the block building validator. + */ +function createTxValidatorForMinimumTxIntegrityChecks( verifier: ClientProtocolCircuitVerifier, { l1ChainId, @@ -149,3 +219,209 @@ export function createTxReqRespValidator( new TxProofValidator(verifier, bindings), ); } + +/** + * Validators for txs received via req/resp or filestores. + * Checks well-formedness only — we must accept these for re-execution even if they + * are invalid against current state. State-dependent checks happen when the tx + * enters the pending pool or during block building. + */ +export function createTxValidatorForReqResponseReceivedTxs( + verifier: ClientProtocolCircuitVerifier, + { + l1ChainId, + rollupVersion, + }: { + l1ChainId: number; + rollupVersion: number; + }, + bindings?: LoggerBindings, +): TxValidator { + return createTxValidatorForMinimumTxIntegrityChecks(verifier, { l1ChainId, rollupVersion }, bindings); +} + +/** + * Validators for txs received in block proposals. + * Same as req/resp — well-formedness only. We must store these for block + * re-execution; their validity against state is checked during block building. + */ +export function createTxValidatorForBlockProposalReceivedTxs( + verifier: ClientProtocolCircuitVerifier, + { + l1ChainId, + rollupVersion, + }: { + l1ChainId: number; + rollupVersion: number; + }, + bindings?: LoggerBindings, +): TxValidator { + return createTxValidatorForMinimumTxIntegrityChecks(verifier, { l1ChainId, rollupVersion }, bindings); +} + +/** + * Validators for unsolicited txs received over JSON-RPC (from a local wallet/PXE). + * Full validation — all state-dependent checks are run. Proof verification is optional + * (can be skipped for testing purposes). Fee enforcement is also optional (skipped for testing/dev). + * Called from `AztecNodeService.isValidTx()`. + */ +export function createTxValidatorForAcceptingTxsOverRPC( + db: MerkleTreeReadOperations, + contractDataSource: ContractDataSource, + verifier: ClientProtocolCircuitVerifier | undefined, + { + l1ChainId, + rollupVersion, + setupAllowList, + gasFees, + skipFeeEnforcement, + timestamp, + blockNumber, + txsPermitted, + }: { + l1ChainId: number; + rollupVersion: number; + setupAllowList: AllowedElement[]; + gasFees: GasFees; + skipFeeEnforcement?: boolean; + timestamp: UInt64; + blockNumber: BlockNumber; + txsPermitted: boolean; + }, + bindings?: LoggerBindings, +): TxValidator { + const validators: TxValidator[] = [ + new TxPermittedValidator(txsPermitted, bindings), + new TimestampTxValidator( + { + timestamp, + blockNumber, + }, + bindings, + ), + new SizeTxValidator(bindings), + new MetadataTxValidator( + { + l1ChainId: new Fr(l1ChainId), + rollupVersion: new Fr(rollupVersion), + protocolContractsHash, + vkTreeRoot: getVKTreeRoot(), + }, + bindings, + ), + new PhasesTxValidator(contractDataSource, setupAllowList, timestamp, bindings), + new BlockHeaderTxValidator(new ArchiveCache(db), bindings), + new DoubleSpendTxValidator(new NullifierCache(db), bindings), + new DataTxValidator(bindings), + ]; + + if (!skipFeeEnforcement) { + validators.push( + new GasTxValidator(new DatabasePublicStateSource(db), ProtocolContractAddress.FeeJuice, gasFees, bindings), + ); + } + + if (verifier) { + validators.push(new TxProofValidator(verifier, bindings)); + } + + return new AggregateTxValidator(...validators); +} + +/** + * Validators for txs about to be included in a block by the sequencer. + * Re-validates against current state. This is where invalid txs that entered via + * req/resp or block proposals are caught — their invalidity is reported as part + * of block validation/attestation. Proof and data checks are omitted since they + * were already verified on entry. + * Called from `CheckpointBuilder.makeBlockBuilderDeps()`. + */ +export function createTxValidatorForBlockBuilding( + db: MerkleTreeReadOperations, + contractDataSource: ContractDataSource, + globalVariables: GlobalVariables, + setupAllowList: AllowedElement[], + bindings?: LoggerBindings, +): PublicProcessorValidator { + const nullifierCache = new NullifierCache(db); + const archiveCache = new ArchiveCache(db); + const publicStateSource = new DatabasePublicStateSource(db); + + return { + preprocessValidator: createTxValidatorForValidatingAgainstCurrentState( + nullifierCache, + archiveCache, + publicStateSource, + contractDataSource, + globalVariables, + setupAllowList, + bindings, + ), + nullifierCache, + }; +} + +function createTxValidatorForValidatingAgainstCurrentState( + nullifierSource: NullifierSource, + archiveSource: ArchiveSource, + publicStateSource: PublicStateSource, + contractDataSource: ContractDataSource, + globalVariables: GlobalVariables, + setupAllowList: AllowedElement[], + bindings?: LoggerBindings, +): TxValidator { + // We don't include the TxProofValidator nor the DataTxValidator here because they are already checked by the time we get to block building. + return new AggregateTxValidator( + new TimestampTxValidator( + { + timestamp: globalVariables.timestamp, + blockNumber: globalVariables.blockNumber, + }, + bindings, + ), + new PhasesTxValidator(contractDataSource, setupAllowList, globalVariables.timestamp, bindings), + new BlockHeaderTxValidator(archiveSource, bindings), + new DoubleSpendTxValidator(nullifierSource, bindings), + new GasTxValidator(publicStateSource, ProtocolContractAddress.FeeJuice, globalVariables.gasFees, bindings), + ); +} + +/** + * Validators for txs migrating to the pending pool. + * + * Txs that arrived via req/resp or block proposals only had well-formedness checks + * on receipt. When they fail to be mined and are migrated to the pending pool, we + * run the state-dependent checks they missed: double-spend, block header, gas limits, + * and timestamp expiry. This is run on EVERY tx potentially entering the pending pool + * — called inside `TxPoolV2Impl` during `addPendingTxs`, `prepareForSlot` (unprotect), + * `handlePrunedBlocks` (unmine), and startup hydration. + * + * Operates on `TxMetaData` rather than full `Tx` since metadata is pre-built by the pool. + * Injected into `TxPoolV2` as the `createTxValidator` factory in `TxPoolV2Dependencies`. + */ +export async function createTxValidatorForTransactionsEnteringPendingTxPool( + worldStateSynchronizer: WorldStateSynchronizer, + timestamp: bigint, + blockNumber: BlockNumber, + bindings?: LoggerBindings, +): Promise> { + await worldStateSynchronizer.syncImmediate(); + const merkleTree = worldStateSynchronizer.getCommitted(); + const nullifierSource: NullifierSource = { + nullifiersExist: async (nullifiers: Buffer[]) => { + const indices = await merkleTree.findLeafIndices(MerkleTreeId.NULLIFIER_TREE, nullifiers); + return indices.map(index => index !== undefined); + }, + }; + const archiveSource: ArchiveSource = { + getArchiveIndices: (archives: BlockHash[]) => { + return merkleTree.findLeafIndices(MerkleTreeId.ARCHIVE, archives); + }, + }; + return new AggregateTxValidator( + new GasLimitsValidator(bindings), + new TimestampTxValidator({ timestamp, blockNumber }, bindings), + new DoubleSpendTxValidator(nullifierSource, bindings), + new BlockHeaderTxValidator(archiveSource, bindings), + ); +} diff --git a/yarn-project/p2p/src/msg_validators/tx_validator/gas_validator.test.ts b/yarn-project/p2p/src/msg_validators/tx_validator/gas_validator.test.ts index e014c085f6bf..d616b5547802 100644 --- a/yarn-project/p2p/src/msg_validators/tx_validator/gas_validator.test.ts +++ b/yarn-project/p2p/src/msg_validators/tx_validator/gas_validator.test.ts @@ -2,6 +2,7 @@ import { DEFAULT_DA_GAS_LIMIT, DEFAULT_TEARDOWN_DA_GAS_LIMIT, MAX_PROCESSABLE_L2_GAS, + PRIVATE_TX_L2_GAS_OVERHEAD, PUBLIC_TX_L2_GAS_OVERHEAD, TX_DA_GAS_OVERHEAD, } from '@aztec/constants'; @@ -112,44 +113,101 @@ describe('GasTxValidator', () => { await expectInvalid(tx, TX_ERROR_INSUFFICIENT_FEE_PAYER_BALANCE); }); - it('rejects txs if the DA gas limit is not above the minimum amount', async () => { - assert(!!tx.data.forPublic); - tx.data.constants.txContext.gasSettings = GasSettings.default({ - gasLimits: new Gas(1, PUBLIC_TX_L2_GAS_OVERHEAD), - maxFeesPerGas: gasFees.clone(), + const makePrivateTx = async () => { + const privateTx = await mockTx(1, { + numberOfNonRevertiblePublicCallRequests: 0, + numberOfRevertiblePublicCallRequests: 0, + hasPublicTeardownCallRequest: false, }); - await expectInvalid(tx, TX_ERROR_INSUFFICIENT_GAS_LIMIT); - }); + assert(!privateTx.data.forPublic); + privateTx.data.feePayer = payer; + privateTx.data.constants.txContext.gasSettings = GasSettings.default({ maxFeesPerGas: gasFees.clone() }); + return privateTx; + }; - it('rejects txs if the L2 gas limit is not above the minimum amount in a public tx', async () => { - assert(!!tx.data.forPublic); - tx.data.constants.txContext.gasSettings = GasSettings.default({ - gasLimits: new Gas(TX_DA_GAS_OVERHEAD, 1), - maxFeesPerGas: gasFees.clone(), + describe('gas limits', () => { + it('accepts public tx at exactly the minimum gas limits', async () => { + assert(!!tx.data.forPublic); + tx.data.constants.txContext.gasSettings = GasSettings.default({ + gasLimits: new Gas(TX_DA_GAS_OVERHEAD, PUBLIC_TX_L2_GAS_OVERHEAD), + maxFeesPerGas: gasFees.clone(), + }); + mockBalance(tx.data.constants.txContext.gasSettings.getFeeLimit().toBigInt()); + await expectValid(tx); }); - await expectInvalid(tx, TX_ERROR_INSUFFICIENT_GAS_LIMIT); - }); - it('rejects txs if the L2 gas limit is not above the minimum amount in a private tx', async () => { - tx = await mockTx(1, { - numberOfNonRevertiblePublicCallRequests: 0, - numberOfRevertiblePublicCallRequests: 0, - hasPublicTeardownCallRequest: false, + it('accepts private tx at exactly the minimum gas limits', async () => { + const privateTx = await makePrivateTx(); + privateTx.data.constants.txContext.gasSettings = GasSettings.default({ + gasLimits: new Gas(TX_DA_GAS_OVERHEAD, PRIVATE_TX_L2_GAS_OVERHEAD), + maxFeesPerGas: gasFees.clone(), + }); + mockBalance(privateTx.data.constants.txContext.gasSettings.getFeeLimit().toBigInt()); + await expectValid(privateTx); }); - assert(!tx.data.forPublic); - tx.data.constants.txContext.gasSettings = GasSettings.default({ - gasLimits: new Gas(TX_DA_GAS_OVERHEAD, 1), - maxFeesPerGas: gasFees.clone(), + + it('rejects public tx below the public L2 gas minimum', async () => { + assert(!!tx.data.forPublic); + tx.data.constants.txContext.gasSettings = GasSettings.default({ + gasLimits: new Gas(TX_DA_GAS_OVERHEAD, PUBLIC_TX_L2_GAS_OVERHEAD - 1), + maxFeesPerGas: gasFees.clone(), + }); + await expectInvalid(tx, TX_ERROR_INSUFFICIENT_GAS_LIMIT); + }); + + it('rejects private tx below the private L2 gas minimum', async () => { + const privateTx = await makePrivateTx(); + privateTx.data.constants.txContext.gasSettings = GasSettings.default({ + gasLimits: new Gas(TX_DA_GAS_OVERHEAD, PRIVATE_TX_L2_GAS_OVERHEAD - 1), + maxFeesPerGas: gasFees.clone(), + }); + await expectInvalid(privateTx, TX_ERROR_INSUFFICIENT_GAS_LIMIT); + }); + + it('rejects public tx at private L2 gas minimum (between the two thresholds)', async () => { + assert(!!tx.data.forPublic); + // PRIVATE_TX_L2_GAS_OVERHEAD is enough for a private tx but not for a public tx. + tx.data.constants.txContext.gasSettings = GasSettings.default({ + gasLimits: new Gas(TX_DA_GAS_OVERHEAD, PRIVATE_TX_L2_GAS_OVERHEAD), + maxFeesPerGas: gasFees.clone(), + }); + await expectInvalid(tx, TX_ERROR_INSUFFICIENT_GAS_LIMIT); + }); + + it('rejects tx below DA gas minimum', async () => { + tx.data.constants.txContext.gasSettings = GasSettings.default({ + gasLimits: new Gas(TX_DA_GAS_OVERHEAD - 1, PUBLIC_TX_L2_GAS_OVERHEAD), + maxFeesPerGas: gasFees.clone(), + }); + await expectInvalid(tx, TX_ERROR_INSUFFICIENT_GAS_LIMIT); + }); + + it('rejects tx below both DA and L2 gas minimums', async () => { + tx.data.constants.txContext.gasSettings = GasSettings.default({ + gasLimits: new Gas(1, 1), + maxFeesPerGas: gasFees.clone(), + }); + await expectInvalid(tx, TX_ERROR_INSUFFICIENT_GAS_LIMIT); + }); + + it('rejects public tx if L2 gas limit is too high', async () => { + tx.data.constants.txContext.gasSettings = GasSettings.default({ + gasLimits: new Gas(DEFAULT_DA_GAS_LIMIT, MAX_PROCESSABLE_L2_GAS + 1), + maxFeesPerGas: gasFees.clone(), + teardownGasLimits: new Gas(DEFAULT_TEARDOWN_DA_GAS_LIMIT, 1), + }); + await expectInvalid(tx, TX_ERROR_GAS_LIMIT_TOO_HIGH); }); - await expectInvalid(tx, TX_ERROR_INSUFFICIENT_GAS_LIMIT); - }); - it('rejects txs if the DA and L2 gas limits are not above the minimum amount', async () => { - tx.data.constants.txContext.gasSettings = GasSettings.default({ - gasLimits: new Gas(1, 1), - maxFeesPerGas: gasFees.clone(), + it('rejects private tx if L2 gas limit is too high', async () => { + const privateTx = await makePrivateTx(); + privateTx.data.constants.txContext.gasSettings = GasSettings.default({ + gasLimits: new Gas(DEFAULT_DA_GAS_LIMIT, MAX_PROCESSABLE_L2_GAS + 1), + maxFeesPerGas: gasFees.clone(), + teardownGasLimits: new Gas(DEFAULT_TEARDOWN_DA_GAS_LIMIT, 1), + }); + await expectInvalid(privateTx, TX_ERROR_GAS_LIMIT_TOO_HIGH); }); - await expectInvalid(tx, TX_ERROR_INSUFFICIENT_GAS_LIMIT); }); it('skips txs with not enough fee per da gas', async () => { @@ -161,13 +219,4 @@ describe('GasTxValidator', () => { gasFees.feePerL2Gas = gasFees.feePerL2Gas + 1n; await expectSkipped(tx, TX_ERROR_INSUFFICIENT_FEE_PER_GAS); }); - - it('rejects txs if the l2 gas limit is too high', async () => { - tx.data.constants.txContext.gasSettings = GasSettings.default({ - gasLimits: new Gas(DEFAULT_DA_GAS_LIMIT, MAX_PROCESSABLE_L2_GAS + 1), - maxFeesPerGas: gasFees.clone(), - teardownGasLimits: new Gas(DEFAULT_TEARDOWN_DA_GAS_LIMIT, 1), - }); - await expectInvalid(tx, TX_ERROR_GAS_LIMIT_TOO_HIGH); - }); }); diff --git a/yarn-project/p2p/src/msg_validators/tx_validator/gas_validator.ts b/yarn-project/p2p/src/msg_validators/tx_validator/gas_validator.ts index a404cb0287bd..8ace5ceabbc6 100644 --- a/yarn-project/p2p/src/msg_validators/tx_validator/gas_validator.ts +++ b/yarn-project/p2p/src/msg_validators/tx_validator/gas_validator.ts @@ -21,6 +21,86 @@ import { import { getFeePayerClaimAmount, getTxFeeLimit } from './fee_payer_balance.js'; +/** Structural interface for types that carry gas limit data, used by {@link GasLimitsValidator}. */ +export interface HasGasLimitData { + txHash: { toString(): string }; + data: { + // We just need to know whether there is something here or not + forPublic?: unknown; + constants: { + txContext: { + gasSettings: { gasLimits: Gas }; + }; + }; + }; +} + +/** + * Validates that a transaction's gas limits are within acceptable bounds. + * + * Rejects transactions whose gas limits fall below the fixed minimums (FIXED_DA_GAS, + * FIXED_L2_GAS) or exceed the AVM's maximum processable L2 gas. This is a cheap, + * stateless check that operates on gas settings alone. + * + * Generic over T so it can validate both full {@link Tx} objects and {@link TxMetaData} + * (used during pending pool migration). + * + * Used by: pending pool migration (via factory), and indirectly by {@link GasTxValidator}. + */ +export class GasLimitsValidator implements TxValidator { + #log: Logger; + + constructor(bindings?: LoggerBindings) { + this.#log = createLogger('sequencer:tx_validator:tx_gas', bindings); + } + + validateTx(tx: T): Promise { + return Promise.resolve(this.validateGasLimit(tx)); + } + + /** Checks gas limits are >= fixed minimums and <= AVM max processable L2 gas. */ + validateGasLimit(tx: T): TxValidationResult { + const gasLimits = tx.data.constants.txContext.gasSettings.gasLimits; + const minGasLimits = new Gas( + TX_DA_GAS_OVERHEAD, + tx.data.forPublic ? PUBLIC_TX_L2_GAS_OVERHEAD : PRIVATE_TX_L2_GAS_OVERHEAD, + ); + + if (minGasLimits.gtAny(gasLimits)) { + this.#log.verbose(`Rejecting transaction due to the gas limit(s) not being above the minimum gas limit`, { + gasLimits, + minGasLimits, + }); + return { result: 'invalid', reason: [TX_ERROR_INSUFFICIENT_GAS_LIMIT] }; + } + + if (gasLimits.l2Gas > MAX_PROCESSABLE_L2_GAS) { + this.#log.verbose(`Rejecting transaction due to the gas limit(s) being higher than the maximum processable gas`, { + gasLimits, + minGasLimits, + }); + return { result: 'invalid', reason: [TX_ERROR_GAS_LIMIT_TOO_HIGH] }; + } + + return { result: 'valid' }; + } +} + +/** + * Validates that a transaction can pay its gas fees. + * + * Runs three checks in order: + * 1. **Gas limits** (delegates to {@link GasLimitsValidator}) — rejects if limits are + * out of bounds. + * 2. **Max fee per gas** — skips (not rejects) the tx if its maxFeesPerGas is below + * the current block's gas fees. We skip rather than reject because the tx may + * become eligible in a later block with lower fees. + * 3. **Fee payer balance** — reads the fee payer's FeeJuice balance from public state, + * adds any pending claim from a setup-phase `_increase_public_balance` call, and + * rejects if the total is less than the tx's fee limit (gasLimits * maxFeePerGas). + * + * Used by: gossip (stage 1), RPC, and block building validators. + */ export class GasTxValidator implements TxValidator { #log: Logger; #publicDataSource: PublicStateSource; @@ -31,7 +111,7 @@ export class GasTxValidator implements TxValidator { publicDataSource: PublicStateSource, feeJuiceAddress: AztecAddress, gasFees: GasFees, - bindings?: LoggerBindings, + private bindings?: LoggerBindings, ) { this.#log = createLogger('sequencer:tx_validator:tx_gas', bindings); this.#publicDataSource = publicDataSource; @@ -40,7 +120,7 @@ export class GasTxValidator implements TxValidator { } async validateTx(tx: Tx): Promise { - const gasLimitValidation = this.#validateGasLimit(tx); + const gasLimitValidation = new GasLimitsValidator(this.bindings).validateGasLimit(tx); if (gasLimitValidation.result === 'invalid') { return Promise.resolve(gasLimitValidation); } @@ -74,34 +154,9 @@ export class GasTxValidator implements TxValidator { } /** - * Check whether the tx's gas limit is above the minimum amount. + * Checks the fee payer has enough FeeJuice balance to cover the tx's fee limit. + * Accounts for any pending claim from a setup-phase `_increase_public_balance` call. */ - #validateGasLimit(tx: Tx): TxValidationResult { - const gasLimits = tx.data.constants.txContext.gasSettings.gasLimits; - const minGasLimits = new Gas( - TX_DA_GAS_OVERHEAD, - tx.data.forPublic ? PUBLIC_TX_L2_GAS_OVERHEAD : PRIVATE_TX_L2_GAS_OVERHEAD, - ); - - if (minGasLimits.gtAny(gasLimits)) { - this.#log.verbose(`Rejecting transaction due to the gas limit(s) not being above the minimum gas limit`, { - gasLimits, - minGasLimits, - }); - return { result: 'invalid', reason: [TX_ERROR_INSUFFICIENT_GAS_LIMIT] }; - } - - if (gasLimits.l2Gas > MAX_PROCESSABLE_L2_GAS) { - this.#log.verbose(`Rejecting transaction due to the gas limit(s) being higher than the maximum processable gas`, { - gasLimits, - minGasLimits, - }); - return { result: 'invalid', reason: [TX_ERROR_GAS_LIMIT_TOO_HIGH] }; - } - - return { result: 'valid' }; - } - public async validateTxFee(tx: Tx): Promise { const feePayer = tx.data.feePayer; diff --git a/yarn-project/p2p/src/msg_validators/tx_validator/index.ts b/yarn-project/p2p/src/msg_validators/tx_validator/index.ts index 12322d2fe779..dcdd4fb050e9 100644 --- a/yarn-project/p2p/src/msg_validators/tx_validator/index.ts +++ b/yarn-project/p2p/src/msg_validators/tx_validator/index.ts @@ -12,3 +12,4 @@ export * from './archive_cache.js'; export * from './tx_permitted_validator.js'; export * from './timestamp_validator.js'; export * from './size_validator.js'; +export * from './factory.js'; diff --git a/yarn-project/validator-client/src/tx_validator/nullifier_cache.test.ts b/yarn-project/p2p/src/msg_validators/tx_validator/nullifier_cache.test.ts similarity index 100% rename from yarn-project/validator-client/src/tx_validator/nullifier_cache.test.ts rename to yarn-project/p2p/src/msg_validators/tx_validator/nullifier_cache.test.ts diff --git a/yarn-project/validator-client/src/tx_validator/nullifier_cache.ts b/yarn-project/p2p/src/msg_validators/tx_validator/nullifier_cache.ts similarity index 100% rename from yarn-project/validator-client/src/tx_validator/nullifier_cache.ts rename to yarn-project/p2p/src/msg_validators/tx_validator/nullifier_cache.ts diff --git a/yarn-project/p2p/src/services/dummy_service.ts b/yarn-project/p2p/src/services/dummy_service.ts index 3b0c57eb2651..73f4c4d21abd 100644 --- a/yarn-project/p2p/src/services/dummy_service.ts +++ b/yarn-project/p2p/src/services/dummy_service.ts @@ -141,14 +141,10 @@ export class DummyP2PService implements P2PService { return undefined; } - validate(_txs: Tx[]): Promise { + validateTxsReceivedInBlockProposal(_txs: Tx[]): Promise { return Promise.resolve(); } - validatePropagatedTx(_tx: Tx, _peerId: PeerId): Promise { - return Promise.resolve(true); - } - addReqRespSubProtocol( _subProtocol: ReqRespSubProtocol, _handler: ReqRespSubProtocolHandler, diff --git a/yarn-project/p2p/src/services/libp2p/libp2p_service.test.ts b/yarn-project/p2p/src/services/libp2p/libp2p_service.test.ts index c11b78877d81..d9bf686d8f70 100644 --- a/yarn-project/p2p/src/services/libp2p/libp2p_service.test.ts +++ b/yarn-project/p2p/src/services/libp2p/libp2p_service.test.ts @@ -34,6 +34,7 @@ import { } from '../../mem_pools/attestation_pool/attestation_pool.js'; import type { MemPools } from '../../mem_pools/interface.js'; import type { TxPoolV2 } from '../../mem_pools/tx_pool_v2/interfaces.js'; +import type { TransactionValidator } from '../../msg_validators/tx_validator/factory.js'; import type { PubSubLibp2p } from '../../util.js'; import type { PeerManagerInterface } from '../peer-manager/interface.js'; import type { ReqRespInterface } from '../reqresp/interface.js'; @@ -149,13 +150,25 @@ describe('LibP2PService', () => { }, } as any; + const txArchiver = mock(); + txArchiver.getBlockNumber.mockResolvedValue(BlockNumber(1)); + + const txEpochCache = mock(); + txEpochCache.getEpochAndSlotInNextL1Slot.mockReturnValue({ + epoch: 0n, + slot: 1n, + ts: 100n, + } as any); + txService = createTestLibP2PService({ peerManager: txPeerManager, node: txNode, txPool, + archiver: txArchiver, + epochCache: txEpochCache, }); - // Make validatePropagatedTx pass by default - txService.validatePropagatedTxMock.mockResolvedValue(true); + // By default, canAddPendingTx returns 'accepted' so the flow proceeds to pool add + txPool.canAddPendingTx.mockResolvedValue('accepted'); }); it('should propagate (Accept) when pool accepts the transaction', async () => { @@ -209,13 +222,94 @@ describe('LibP2PService', () => { it('should NOT propagate (Reject) when gossip validation fails', async () => { const tx = await mockTx(); - txService.validatePropagatedTxMock.mockResolvedValue(false); + txService.firstStageValidationPasses = false; + + await txService.handleGossipedTx(tx.toBuffer(), 'test-msg-id', txPeerId); + + expect(txReportSpy).toHaveBeenCalledWith('test-msg-id', MOCK_PEER_ID, TopicValidatorResult.Reject); + expect(txPool.addPendingTxs).not.toHaveBeenCalled(); + }); + + it('should Ignore and skip proof verification when canAddPendingTx returns ignored', async () => { + const tx = await mockTx(); + + txPool.canAddPendingTx.mockResolvedValue('ignored'); + + await txService.handleGossipedTx(tx.toBuffer(), 'test-msg-id', txPeerId); + + expect(txReportSpy).toHaveBeenCalledWith('test-msg-id', MOCK_PEER_ID, TopicValidatorResult.Ignore); + // Pool pre-check was called + expect(txPool.canAddPendingTx).toHaveBeenCalled(); + // addPendingTxs should NOT be called — we short-circuited before it + expect(txPool.addPendingTxs).not.toHaveBeenCalled(); + }); + + it('should not call canAddPendingTx when first-stage validation fails', async () => { + const tx = await mockTx(); + + txService.firstStageValidationPasses = false; + + await txService.handleGossipedTx(tx.toBuffer(), 'test-msg-id', txPeerId); + + expect(txPool.canAddPendingTx).not.toHaveBeenCalled(); + expect(txPool.addPendingTxs).not.toHaveBeenCalled(); + }); + + it('should Reject and penalize peer when second-stage (proof) validation fails', async () => { + const tx = await mockTx(); + + txService.secondStageValidationPasses = false; await txService.handleGossipedTx(tx.toBuffer(), 'test-msg-id', txPeerId); expect(txReportSpy).toHaveBeenCalledWith('test-msg-id', MOCK_PEER_ID, TopicValidatorResult.Reject); + expect(txPeerManager.penalizePeer).toHaveBeenCalledWith(txPeerId, PeerErrorSeverity.LowToleranceError); + // canAddPendingTx was called (first stage passed), but addPendingTxs was NOT (second stage failed) + expect(txPool.canAddPendingTx).toHaveBeenCalled(); expect(txPool.addPendingTxs).not.toHaveBeenCalled(); }); + + it('should penalize peer with the severity from the failing first-stage validator', async () => { + const tx = await mockTx(); + + txService.firstStageValidationPasses = false; + txService.firstStageSeverity = PeerErrorSeverity.MidToleranceError; + + await txService.handleGossipedTx(tx.toBuffer(), 'test-msg-id', txPeerId); + + expect(txPeerManager.penalizePeer).toHaveBeenCalledWith(txPeerId, PeerErrorSeverity.MidToleranceError); + }); + + it('should not penalize peer when canAddPendingTx returns ignored', async () => { + const tx = await mockTx(); + + txPool.canAddPendingTx.mockResolvedValue('ignored'); + + await txService.handleGossipedTx(tx.toBuffer(), 'test-msg-id', txPeerId); + + expect(txPeerManager.penalizePeer).not.toHaveBeenCalled(); + }); + + it('should call addPendingTxs only after both validation stages pass and pool pre-check accepts', async () => { + const tx = await mockTx(); + const txHash = tx.getTxHash(); + + txPool.canAddPendingTx.mockResolvedValue('accepted'); + txPool.addPendingTxs.mockResolvedValue({ + accepted: [txHash], + ignored: [], + rejected: [], + }); + + await txService.handleGossipedTx(tx.toBuffer(), 'test-msg-id', txPeerId); + + // Verify the full happy path: canAddPendingTx → addPendingTxs → Accept + expect(txPool.canAddPendingTx).toHaveBeenCalled(); + expect(txPool.addPendingTxs).toHaveBeenCalledWith(expect.arrayContaining([expect.objectContaining({})]), { + source: 'gossip', + }); + expect(txReportSpy).toHaveBeenCalledWith('test-msg-id', MOCK_PEER_ID, TopicValidatorResult.Accept); + }); }); describe('validateRequestedBlock', () => { @@ -976,8 +1070,17 @@ class TestLibP2PService extends LibP2PService { /** Mocked validateRequestedTx for testing. */ public validateRequestedTxMock: jest.Mock; - /** Mocked validatePropagatedTx for testing gossip tx handling. */ - public validatePropagatedTxMock: jest.Mock<(tx: Tx, peerId: PeerId) => Promise>; + /** Controls whether first-stage gossip validation passes. Set to false to simulate first-stage failure. */ + public firstStageValidationPasses = true; + + /** Controls whether second-stage gossip validation passes. Set to false to simulate proof verification failure. */ + public secondStageValidationPasses = true; + + /** Controls the name of the failing first-stage validator (e.g., 'doubleSpendValidator' to trigger special handling). */ + public firstStageFailingValidatorName = 'failingValidator'; + + /** Controls the severity returned by the failing first-stage validator. */ + public firstStageSeverity: PeerErrorSeverity = PeerErrorSeverity.LowToleranceError; /** Stub validator returned by createRequestedTxValidator. */ private stubValidator: TxValidator; @@ -1032,7 +1135,6 @@ class TestLibP2PService extends LibP2PService { this.testEpochCache = epochCache; this.validateRequestedTxMock = jest.fn(() => Promise.resolve()); - this.validatePropagatedTxMock = jest.fn(() => Promise.resolve(true)); this.stubValidator = { validateTx: () => Promise.resolve({ result: 'valid' as const }), }; @@ -1048,9 +1150,30 @@ class TestLibP2PService extends LibP2PService { return super.handleGossipedTx(payloadData, msgId, source); } - /** Override to use the mock for validatePropagatedTx. */ - protected override validatePropagatedTx(tx: Tx, peerId: PeerId): Promise { - return this.validatePropagatedTxMock(tx, peerId); + /** Override to use test flag for first-stage validators. Returns a failing validator when firstStageValidationPasses is false. */ + protected override createFirstStageMessageValidators(): Promise> { + if (this.firstStageValidationPasses) { + return Promise.resolve({}); + } + return Promise.resolve({ + [this.firstStageFailingValidatorName]: { + validator: { validateTx: () => Promise.resolve({ result: 'invalid' as const, reason: ['Test failure'] }) }, + severity: this.firstStageSeverity, + }, + }); + } + + /** Override to use test flag for second-stage validators. Returns a failing validator when secondStageValidationPasses is false. */ + protected override createSecondStageMessageValidators(): Record { + if (this.secondStageValidationPasses) { + return {}; + } + return { + proofValidator: { + validator: { validateTx: () => Promise.resolve({ result: 'invalid' as const, reason: ['Proof failure'] }) }, + severity: PeerErrorSeverity.LowToleranceError, + }, + }; } /** Exposes the protected validateRequestedBlock for testing. */ diff --git a/yarn-project/p2p/src/services/libp2p/libp2p_service.ts b/yarn-project/p2p/src/services/libp2p/libp2p_service.ts index 3274cf749197..7215f2bc449e 100644 --- a/yarn-project/p2p/src/services/libp2p/libp2p_service.ts +++ b/yarn-project/p2p/src/services/libp2p/libp2p_service.ts @@ -69,9 +69,11 @@ import { } from '../../msg_validators/index.js'; import { MessageSeenValidator } from '../../msg_validators/msg_seen_validator/msg_seen_validator.js'; import { - type MessageValidator, - createTxMessageValidators, - createTxReqRespValidator, + type TransactionValidator, + createFirstStageTxValidationsForGossipedTransactions, + createSecondStageTxValidationsForGossipedTransactions, + createTxValidatorForBlockProposalReceivedTxs, + createTxValidatorForReqResponseReceivedTxs, } from '../../msg_validators/tx_validator/factory.js'; import { GossipSubEvent } from '../../types/index.js'; import { type PubSubLibp2p, convertToMultiaddr } from '../../util.js'; @@ -87,6 +89,9 @@ import { PeerScoring } from '../peer-manager/peer_scoring.js'; import type { BatchTxRequesterLibP2PService } from '../reqresp/batch-tx-requester/interface.js'; import type { P2PReqRespConfig } from '../reqresp/config.js'; import { + AuthRequest, + BlockTxsRequest, + BlockTxsResponse, DEFAULT_SUB_PROTOCOL_VALIDATORS, type ReqRespInterface, type ReqRespResponse, @@ -94,14 +99,9 @@ import { type ReqRespSubProtocolHandler, type ReqRespSubProtocolHandlers, type ReqRespSubProtocolValidators, + StatusMessage, type SubProtocolMap, ValidationError, -} from '../reqresp/index.js'; -import { - AuthRequest, - BlockTxsRequest, - BlockTxsResponse, - StatusMessage, pingHandler, reqGoodbyeHandler, reqRespBlockHandler, @@ -906,15 +906,45 @@ export class LibP2PService extends protected async handleGossipedTx(payloadData: Buffer, msgId: string, source: PeerId) { const validationFunc: () => Promise> = async () => { const tx = Tx.fromBuffer(payloadData); - const isValid = await this.validatePropagatedTx(tx, source); - if (!isValid) { - this.logger.trace(`Rejecting invalid propagated tx`, { - [Attributes.P2P_ID]: source.toString(), - }); + + const currentBlockNumber = await this.archiver.getBlockNumber(); + const { ts: nextSlotTimestamp } = this.epochCache.getEpochAndSlotInNextL1Slot(); + + // Stage 1: fast validators (metadata, data, timestamps, double-spend, gas, phases, block header) + const firstStageValidators = await this.createFirstStageMessageValidators(currentBlockNumber, nextSlotTimestamp); + const firstStageOutcome = await this.runValidations(tx, firstStageValidators); + if (!firstStageOutcome.allPassed) { + const { name } = firstStageOutcome.failure; + let { severity } = firstStageOutcome.failure; + + // Double spend validator has a special case handler. We perform more detailed examination + // as to how recently the nullifier was entered into the tree and if the transaction should + // have 'known' the nullifier existed. This determines the severity of the penalty applied to the peer. + if (name === 'doubleSpendValidator') { + const txBlockNumber = BlockNumber(currentBlockNumber + 1); + severity = await this.handleDoubleSpendFailure(tx, txBlockNumber); + } + + this.peerManager.penalizePeer(source, severity); + return { result: TopicValidatorResult.Reject }; + } + + // Pool pre-check: see if the pool would accept this tx before doing expensive proof verification + const canAdd = await this.mempools.txPool.canAddPendingTx(tx); + if (canAdd === 'ignored') { + return { result: TopicValidatorResult.Ignore, obj: tx }; + } + + // Stage 2: expensive proof verification + const secondStageValidators = this.createSecondStageMessageValidators(); + const secondStageOutcome = await this.runValidations(tx, secondStageValidators); + if (!secondStageOutcome.allPassed) { + const { severity } = secondStageOutcome.failure; + this.peerManager.penalizePeer(source, severity); return { result: TopicValidatorResult.Reject }; } - // Propagate only on pool acceptance + // Pool add: persist the tx const txHash = tx.getTxHash(); const addResult = await this.mempools.txPool.addPendingTxs([tx], { source: 'gossip' }); @@ -922,7 +952,6 @@ export class LibP2PService extends const wasIgnored = addResult.ignored.some(h => h.equals(txHash)); this.logger.trace(`Validate propagated tx`, { - isValid, wasAccepted, wasIgnored, [Attributes.P2P_ID]: source.toString(), @@ -1537,43 +1566,12 @@ export class LibP2PService extends } protected createRequestedTxValidator(): TxValidator { - return createTxReqRespValidator(this.proofVerifier, { + return createTxValidatorForReqResponseReceivedTxs(this.proofVerifier, { l1ChainId: this.config.l1ChainId, rollupVersion: this.config.rollupVersion, }); } - @trackSpan('Libp2pService.validatePropagatedTx', tx => ({ - [Attributes.TX_HASH]: tx.getTxHash().toString(), - })) - protected async validatePropagatedTx(tx: Tx, peerId: PeerId): Promise { - const currentBlockNumber = await this.archiver.getBlockNumber(); - - // We accept transactions if they are not expired by the next slot (checked based on the ExpirationTimestamp field) - const { ts: nextSlotTimestamp } = this.epochCache.getEpochAndSlotInNextL1Slot(); - const messageValidators = await this.createMessageValidators(currentBlockNumber, nextSlotTimestamp); - - for (const validator of messageValidators) { - const outcome = await this.runValidations(tx, validator); - - if (outcome.allPassed) { - continue; - } - const { name } = outcome.failure; - let { severity } = outcome.failure; - - // Double spend validator has a special case handler - if (name === 'doubleSpendValidator') { - const txBlockNumber = BlockNumber(currentBlockNumber + 1); // tx is expected to be in the next block - severity = await this.handleDoubleSpendFailure(tx, txBlockNumber); - } - - this.peerManager.penalizePeer(peerId, severity); - return false; - } - return true; - } - private async getGasFees(blockNumber: BlockNumber): Promise { if (blockNumber === this.feesCache?.blockNumber) { return this.feesCache.gasFees; @@ -1601,60 +1599,53 @@ export class LibP2PService extends }; } - public async validate(txs: Tx[]): Promise { - const currentBlockNumber = await this.archiver.getBlockNumber(); - - // We accept transactions if they are not expired by the next slot (checked based on the ExpirationTimestamp field) - const { ts: nextSlotTimestamp } = this.epochCache.getEpochAndSlotInNextL1Slot(); - const messageValidators = await this.createMessageValidators(currentBlockNumber, nextSlotTimestamp); + public async validateTxsReceivedInBlockProposal(txs: Tx[]): Promise { + const validator = createTxValidatorForBlockProposalReceivedTxs( + this.proofVerifier, + { l1ChainId: this.config.l1ChainId, rollupVersion: this.config.rollupVersion }, + this.logger.getBindings(), + ); - await Promise.all( + const results = await Promise.all( txs.map(async tx => { - for (const validator of messageValidators) { - const outcome = await this.runValidations(tx, validator); - if (!outcome.allPassed) { - throw new Error('Invalid tx detected', { cause: { outcome } }); - } - } + const result = await validator.validateTx(tx); + return result.result !== 'invalid'; }), ); + if (results.some(value => value === false)) { + throw new Error('Invalid tx detected'); + } } - /** - * Create message validators for the given block number and timestamp. - * - * Each validator is a pair of a validator and a severity. - * If a validator fails, the peer is penalized with the severity of the validator. - * - * @param currentBlockNumber - The current synced block number. - * @param nextSlotTimestamp - The timestamp of the next slot (used to validate txs are not expired). - * @returns The message validators. - */ - private async createMessageValidators( + /** Creates the first stage (fast) validators for gossiped transactions. */ + protected async createFirstStageMessageValidators( currentBlockNumber: BlockNumber, nextSlotTimestamp: UInt64, - ): Promise[]> { + ): Promise> { const gasFees = await this.getGasFees(currentBlockNumber); const allowedInSetup = this.config.txPublicSetupAllowList ?? (await getDefaultAllowedSetupFunctions()); + const blockNumber = BlockNumber(currentBlockNumber + 1); - const blockNumberInWhichTheTxIsConsideredToBeIncluded = BlockNumber(currentBlockNumber + 1); - - return createTxMessageValidators( + return createFirstStageTxValidationsForGossipedTransactions( nextSlotTimestamp, - blockNumberInWhichTheTxIsConsideredToBeIncluded, + blockNumber, this.worldStateSynchronizer, gasFees, this.config.l1ChainId, this.config.rollupVersion, protocolContractsHash, this.archiver, - this.proofVerifier, !this.config.disableTransactions, allowedInSetup, this.logger.getBindings(), ); } + /** Creates the second stage (expensive proof verification) validators for gossiped transactions. */ + protected createSecondStageMessageValidators(): Record { + return createSecondStageTxValidationsForGossipedTransactions(this.proofVerifier, this.logger.getBindings()); + } + /** * Run validations on a tx. * @param tx - The tx to validate. @@ -1663,7 +1654,7 @@ export class LibP2PService extends */ private async runValidations( tx: Tx, - messageValidators: Record, + messageValidators: Record, ): Promise { const validationPromises = Object.entries(messageValidators).map(async ([name, { validator, severity }]) => { const { result } = await validator.validateTx(tx); diff --git a/yarn-project/p2p/src/services/reqresp/batch-tx-requester/batch_tx_requester.test.ts b/yarn-project/p2p/src/services/reqresp/batch-tx-requester/batch_tx_requester.test.ts index 03f37cad2482..9cd2997d3a2a 100644 --- a/yarn-project/p2p/src/services/reqresp/batch-tx-requester/batch_tx_requester.test.ts +++ b/yarn-project/p2p/src/services/reqresp/batch-tx-requester/batch_tx_requester.test.ts @@ -8,12 +8,12 @@ import { type ISemaphore, Semaphore } from '@aztec/foundation/queue'; import { retryUntil } from '@aztec/foundation/retry'; import { sleep } from '@aztec/foundation/sleep'; import { DateProvider } from '@aztec/foundation/timer'; -import type { BlockProposal } from '@aztec/stdlib/p2p'; -import { PeerErrorSeverity } from '@aztec/stdlib/p2p'; +import { type BlockProposal, PeerErrorSeverity } from '@aztec/stdlib/p2p'; import { makeBlockHeader, makeBlockProposal } from '@aztec/stdlib/testing'; import { Tx, TxArray, TxHash, type TxValidationResult } from '@aztec/stdlib/tx'; import { describe, expect, it, jest } from '@jest/globals'; +import type { PeerId } from '@libp2p/interface'; import { type MockProxy, mock } from 'jest-mock-extended'; import { createSecp256k1PeerId } from '../../../index.js'; @@ -73,6 +73,21 @@ describe('BatchTxRequester', () => { }); }); + function sampleAllPeers(sampler: () => PeerId | undefined): string[] | undefined { + const seen = new Set(); + const ordered: string[] = []; + let currentPeer = sampler()?.toString(); + if (currentPeer === undefined) { + return undefined; + } + while (!seen.has(currentPeer)) { + seen.add(currentPeer); + ordered.push(currentPeer); + currentPeer = sampler()!.toString(); + } + return ordered; + } + describe('Dumb peers', () => { it('should create correct TX_BATCH_SIZE chunks with single dumb worker', async () => { const txCount = 16; @@ -228,7 +243,9 @@ describe('BatchTxRequester', () => { const peers = await Promise.all([createSecp256k1PeerId(), createSecp256k1PeerId(), createSecp256k1PeerId()]); connectionSampler.getPeerListSortedByConnectionCountAsc.mockReturnValue(peers); - const peerCollection = new TestPeerCollection(new PeerCollection(peers, undefined, new DateProvider())); + const peerCollection = new TestPeerCollection( + new PeerCollection(connectionSampler, undefined, new DateProvider()), + ); // Define which transactions each peer has (same as happy path) const peerTransactions = new Map([ @@ -373,7 +390,7 @@ describe('BatchTxRequester', () => { const peers = await Promise.all([createSecp256k1PeerId(), createSecp256k1PeerId()]); connectionSampler.getPeerListSortedByConnectionCountAsc.mockReturnValue(peers); - const peerCollection = new PeerCollection(peers, pinnedPeer, dateProvider); + const peerCollection = new PeerCollection(connectionSampler, pinnedPeer, dateProvider); const peerTransactions = new Map([ [peers[0].toString(), Array.from({ length: 16 }, (_, i) => i)], // peer1 has all transactions, peer2 none ]); @@ -404,9 +421,11 @@ describe('BatchTxRequester', () => { expect(result!.length).toBe(txCount); // Verify peer promotion behavior - expect(peerCollection.getSmartPeers().size).toBe(1); - expect(peerCollection.getSmartPeers()).toContain(peers[0].toString()); - expect(peerCollection.getSmartPeers()).not.toContain(peers[1].toString()); + const smartPeers = sampleAllPeers(peerCollection.nextSmartPeerToQuery.bind(peerCollection)); + expect(smartPeers).toBeDefined(); + expect(smartPeers!.length).toBe(1); + expect(smartPeers).toContain(peers[0].toString()); + expect(smartPeers).not.toContain(peers[1].toString()); // The exact release count depends on timing of concurrent async operations. // We verify a minimum of 7 releases which accounts for: @@ -420,7 +439,10 @@ describe('BatchTxRequester', () => { }); it('Should track smart peer collection behavior with multiple promotions', async () => { - const txCount = 20; + // With batch_size=8, 3 workers handle batches 0-7, 8-15, 16-23 in the first round. + // Using 30 txs ensures txs 24-29 remain unfetched after the dumb round, + // so every peer still has unique missing txs when decideIfPeerIsSmart runs. + const txCount = 30; const deadline = 3_000; const dateProvider = new DateProvider(); const pinnedPeer = undefined; @@ -436,13 +458,14 @@ describe('BatchTxRequester', () => { const peers = await Promise.all([createSecp256k1PeerId(), createSecp256k1PeerId(), createSecp256k1PeerId()]); connectionSampler.getPeerListSortedByConnectionCountAsc.mockReturnValue(peers); - const peerCollection = new PeerCollection(peers, pinnedPeer, dateProvider); + const peerCollection = new PeerCollection(connectionSampler, pinnedPeer, dateProvider); - // Define which transactions each peer has + // Each peer has txs spanning beyond its assigned batch, so after its batch is + // processed it still has txs that are missing → gets promoted to smart. const peerTransactions = new Map([ - [peers[0].toString(), Array.from({ length: 10 }, (_, i) => i)], // peer1: txs 0-9 - [peers[1].toString(), Array.from({ length: 10 }, (_, i) => i + 5)], // peer2: txs 5-14 - [peers[2].toString(), Array.from({ length: 10 }, (_, i) => i + 10)], // peer3: txs 10-19 + [peers[0].toString(), Array.from({ length: 15 }, (_, i) => i)], // peer0: txs 0-14 + [peers[1].toString(), Array.from({ length: 15 }, (_, i) => i + 10)], // peer1: txs 10-24 + [peers[2].toString(), Array.from({ length: 10 }, (_, i) => i + 20)], // peer2: txs 20-29 ]); const { mockImplementation } = createRequestLogger(blockProposal, new Set(), peerTransactions); @@ -471,7 +494,7 @@ describe('BatchTxRequester', () => { expect(result!.length).toBe(txCount); // Verify all peers were promoted to smart - expect(peerCollection.getSmartPeers().size).toBe(peers.length); + expect(sampleAllPeers(peerCollection.nextSmartPeerToQuery.bind(peerCollection))!.length).toBe(peers.length); expect(semaphore.acquiredCount).toBe(3); }); @@ -492,7 +515,7 @@ describe('BatchTxRequester', () => { const peers = await Promise.all([createSecp256k1PeerId(), createSecp256k1PeerId(), createSecp256k1PeerId()]); connectionSampler.getPeerListSortedByConnectionCountAsc.mockReturnValue(peers); - const peerCollection = new PeerCollection(peers, pinnedPeer, dateProvider); + const peerCollection = new PeerCollection(connectionSampler, pinnedPeer, dateProvider); // Define which transactions each peer has const peerTransactions = new Map([ @@ -552,7 +575,7 @@ describe('BatchTxRequester', () => { connectionSampler.getPeerListSortedByConnectionCountAsc.mockReturnValue(peers); - const peerCollection = new PeerCollection(peers, pinnedPeer, dateProvider); + const peerCollection = new PeerCollection(connectionSampler, pinnedPeer, dateProvider); // Mock implementation that makes peer0 fail consistently, peer1 succeed const { mockImplementation } = createRequestLogger( @@ -585,7 +608,7 @@ describe('BatchTxRequester', () => { expect(peerCollection.getBadPeers()).not.toContain(peers[1].toString()); // Verify bad peer is excluded from queries - peer0 should be in bad peers - expect(peerCollection.getDumbPeersToQuery()).not.toContain(peers[0].toString()); + expect(peerCollection.nextDumbPeerToQuery()).toBeUndefined(); }); it('should recover bad peer after successful response', async () => { @@ -605,7 +628,7 @@ describe('BatchTxRequester', () => { const peers = await Promise.all([createSecp256k1PeerId(), createSecp256k1PeerId()]); connectionSampler.getPeerListSortedByConnectionCountAsc.mockReturnValue(peers); - const peerCollection = new PeerCollection(peers, pinnedPeer, dateProvider); + const peerCollection = new PeerCollection(connectionSampler, pinnedPeer, dateProvider); let requestCount = 0; // Mock implementation: first 4 requests fail (exceed threshold), then succeed @@ -656,7 +679,7 @@ describe('BatchTxRequester', () => { // Verify peer was initially marked bad but then recovered // Since peer succeeded in the end, it should not be in bad peers list expect(peerCollection.getBadPeers()).not.toContain(peers[0].toString()); - expect(peerCollection.getDumbPeersToQuery()).toContain(peers[0].toString()); + expect(sampleAllPeers(peerCollection.nextDumbPeerToQuery.bind(peerCollection))).toContain(peers[0].toString()); }); it('should handle multiple peers with different bad peer states', async () => { @@ -687,7 +710,7 @@ describe('BatchTxRequester', () => { ]); const semaphore = new TestSemaphore(new Semaphore(0)); - const peerCollection = new PeerCollection(peers, pinnedPeer, dateProvider); + const peerCollection = new PeerCollection(connectionSampler, pinnedPeer, dateProvider); const peerRequestCounts = new Map(); // eslint-disable-next-line require-await @@ -776,7 +799,7 @@ describe('BatchTxRequester', () => { expect(peerCollection.getBadPeers()).not.toContain(peers[2].toString()); // peer2: recovered // Verify query availability - const dumbPeersToQuery = peerCollection.getDumbPeersToQuery(); + const dumbPeersToQuery = sampleAllPeers(peerCollection.nextDumbPeerToQuery.bind(peerCollection)); expect(dumbPeersToQuery).not.toContain(peers[0].toString()); // bad peer excluded expect(dumbPeersToQuery).toContain(peers[1].toString()); // good peer included expect(dumbPeersToQuery).toContain(peers[2].toString()); // recovered peer included @@ -796,31 +819,35 @@ describe('BatchTxRequester', () => { const peers = await Promise.all([createSecp256k1PeerId(), createSecp256k1PeerId()]); connectionSampler.getPeerListSortedByConnectionCountAsc.mockReturnValue(peers); - const peerCollection = new TestPeerCollection(new PeerCollection(peers, pinnedPeer, clock)); + const peerCollection = new TestPeerCollection(new PeerCollection(connectionSampler, pinnedPeer, clock)); // Manually mark peer as rate limited peerCollection.markPeerRateLimitExceeded(peers[0]); // Verify peer is initially rate limited and excluded expect(peerCollection.getRateLimitExceededPeers()).toContain(peers[0].toString()); - expect(peerCollection.getDumbPeersToQuery()).not.toContain(peers[0].toString()); + expect(sampleAllPeers(peerCollection.nextDumbPeerToQuery.bind(peerCollection))).not.toContain( + peers[0].toString(), + ); // Test TTL behavior at different time points // Just before TTL expiration: still rate limited clock.advanceTo(RATE_LIMIT_EXCEEDED_PEER_CACHE_TTL - 1); expect(peerCollection.getRateLimitExceededPeers()).toContain(peers[0].toString()); - expect(peerCollection.getDumbPeersToQuery()).not.toContain(peers[0].toString()); + expect(sampleAllPeers(peerCollection.nextDumbPeerToQuery.bind(peerCollection))).not.toContain( + peers[0].toString(), + ); // Right at TTL expiration: not rate limited anymore clock.advanceTo(RATE_LIMIT_EXCEEDED_PEER_CACHE_TTL); expect(peerCollection.getRateLimitExceededPeers()).not.toContain(peers[0].toString()); - expect(peerCollection.getDumbPeersToQuery()).toContain(peers[0].toString()); + expect(sampleAllPeers(peerCollection.nextDumbPeerToQuery.bind(peerCollection))).toContain(peers[0].toString()); // After TTL expiration: not rate limited anymore clock.advanceTo(RATE_LIMIT_EXCEEDED_PEER_CACHE_TTL + 1); expect(peerCollection.getRateLimitExceededPeers()).not.toContain(peers[0].toString()); - expect(peerCollection.getDumbPeersToQuery()).toContain(peers[0].toString()); + expect(sampleAllPeers(peerCollection.nextDumbPeerToQuery.bind(peerCollection))).toContain(peers[0].toString()); // Test multiple rate limit cycles peerCollection.markPeerRateLimitExceeded(peers[0]); // Rate limit again @@ -828,7 +855,7 @@ describe('BatchTxRequester', () => { clock.advanceTo(clock.now() + RATE_LIMIT_EXCEEDED_PEER_CACHE_TTL + 1); expect(peerCollection.getRateLimitExceededPeers()).not.toContain(peers[0].toString()); - expect(peerCollection.getDumbPeersToQuery()).toContain(peers[0].toString()); + expect(sampleAllPeers(peerCollection.nextDumbPeerToQuery.bind(peerCollection))).toContain(peers[0].toString()); }); it('should exclude rate limited peer from queries and recover after TTL expiration', async () => { @@ -848,7 +875,7 @@ describe('BatchTxRequester', () => { const peers = await Promise.all([createSecp256k1PeerId(), createSecp256k1PeerId()]); connectionSampler.getPeerListSortedByConnectionCountAsc.mockReturnValue(peers); - const innerPeerCollection = new PeerCollection(peers, pinnedPeer, clock); + const innerPeerCollection = new PeerCollection(connectionSampler, pinnedPeer, clock); const peerCollection = new TestPeerCollection(innerPeerCollection); const peerTransactions = new Map([ @@ -919,8 +946,8 @@ describe('BatchTxRequester', () => { // Verify peer0 is no longer rate limited after TTL expiration expect(peerCollection.getRateLimitExceededPeers().size).toBe(0); - expect(peerCollection.getDumbPeersToQuery()).toContain(peers[0].toString()); - expect(peerCollection.getDumbPeersToQuery()).toContain(peers[1].toString()); + expect(sampleAllPeers(peerCollection.nextDumbPeerToQuery.bind(peerCollection))).toContain(peers[0].toString()); + expect(sampleAllPeers(peerCollection.nextDumbPeerToQuery.bind(peerCollection))).toContain(peers[1].toString()); }); }); @@ -1133,7 +1160,7 @@ describe('BatchTxRequester', () => { const peers = await Promise.all([createSecp256k1PeerId(), createSecp256k1PeerId()]); connectionSampler.getPeerListSortedByConnectionCountAsc.mockReturnValue(peers); - const peerCollection = new TestPeerCollection(new PeerCollection(peers, undefined, clock)); + const peerCollection = new TestPeerCollection(new PeerCollection(connectionSampler, undefined, clock)); const peerTransactions = new Map([[peers[0].toString(), Array.from({ length: txCount / 2 }, (_, i) => i)]]); const { mockImplementation } = createRequestLogger(blockProposal, new Set(), peerTransactions, 100); @@ -1195,7 +1222,9 @@ describe('BatchTxRequester', () => { const peers = await Promise.all([createSecp256k1PeerId(), createSecp256k1PeerId()]); connectionSampler.getPeerListSortedByConnectionCountAsc.mockReturnValue(peers); - const peerCollection = new TestPeerCollection(new PeerCollection(peers, undefined, new DateProvider())); + const peerCollection = new TestPeerCollection( + new PeerCollection(connectionSampler, undefined, new DateProvider()), + ); // Define which transactions each peer has const peerTransactions = new Map([ @@ -1525,7 +1554,9 @@ describe('BatchTxRequester', () => { connectionSampler.getPeerListSortedByConnectionCountAsc.mockReturnValue(peers); const [pinnedPeer, regularPeer] = peers; - const peerCollection = new TestPeerCollection(new PeerCollection(peers, pinnedPeer, new DateProvider())); + const peerCollection = new TestPeerCollection( + new PeerCollection(connectionSampler, pinnedPeer, new DateProvider()), + ); // Both peers have all transactions const peerTransactions = new Map([ @@ -1555,8 +1586,9 @@ describe('BatchTxRequester', () => { await BatchTxRequester.collectAllTxs(requester.run()); // Verify pinned peer was never marked as smart - expect(peerCollection.getSmartPeers()).not.toContain(pinnedPeer.toString()); - expect(peerCollection.getSmartPeersToQuery()).not.toContain(pinnedPeer.toString()); + expect(sampleAllPeers(peerCollection.nextSmartPeerToQuery.bind(peerCollection))).not.toContain( + pinnedPeer.toString(), + ); }); it('should handle pinned peer being rate limited and recover', async () => { @@ -1574,7 +1606,7 @@ describe('BatchTxRequester', () => { const peers = await Promise.all([createSecp256k1PeerId(), createSecp256k1PeerId()]); const [pinnedPeer, regularPeer] = peers; - const peerCollection = new TestPeerCollection(new PeerCollection(peers, pinnedPeer, clock)); + const peerCollection = new TestPeerCollection(new PeerCollection(connectionSampler, pinnedPeer, clock)); connectionSampler.getPeerListSortedByConnectionCountAsc.mockReturnValue(peers); @@ -1660,7 +1692,9 @@ describe('BatchTxRequester', () => { const peers = await Promise.all([createSecp256k1PeerId(), createSecp256k1PeerId()]); const [pinnedPeer, regularPeer] = peers; - const peerCollection = new TestPeerCollection(new PeerCollection(peers, pinnedPeer, new DateProvider())); + const peerCollection = new TestPeerCollection( + new PeerCollection(connectionSampler, pinnedPeer, new DateProvider()), + ); connectionSampler.getPeerListSortedByConnectionCountAsc.mockReturnValue(peers); @@ -1755,6 +1789,261 @@ describe('BatchTxRequester', () => { }); }); +describe('PeerCollection - Dynamic peer list', () => { + it('should reflect new peers joining', async () => { + const connectionSampler = mock(); + const dateProvider = new DateProvider(); + + const [peer1, peer2, peer3] = await Promise.all([ + createSecp256k1PeerId(), + createSecp256k1PeerId(), + createSecp256k1PeerId(), + ]); + + // Start with 2 peers + connectionSampler.getPeerListSortedByConnectionCountAsc.mockReturnValue([peer1, peer2]); + const pc = new PeerCollection(connectionSampler, undefined, dateProvider); + + assertPeerSequence(pc.nextDumbPeerToQuery.bind(pc), [peer1, peer2]); + + // A third peer joins + connectionSampler.getPeerListSortedByConnectionCountAsc.mockReturnValue([peer1, peer2, peer3]); + + // peer3 is the first unqueried peer (peer1 and peer2 were already sampled), + // then the round resets and continues from peer1. + assertPeerSequence(pc.nextDumbPeerToQuery.bind(pc), [peer3, peer1, peer2]); + }); + + it('should reflect peers leaving', async () => { + const connectionSampler = mock(); + const dateProvider = new DateProvider(); + + const [peer1, peer2, peer3] = await Promise.all([ + createSecp256k1PeerId(), + createSecp256k1PeerId(), + createSecp256k1PeerId(), + ]); + + connectionSampler.getPeerListSortedByConnectionCountAsc.mockReturnValue([peer1, peer2, peer3]); + const pc = new PeerCollection(connectionSampler, undefined, dateProvider); + + // peer2 disconnects + connectionSampler.getPeerListSortedByConnectionCountAsc.mockReturnValue([peer1, peer3]); + + assertPeerSequence(pc.nextDumbPeerToQuery.bind(pc), [peer1, peer3]); + }); + + it('should reflect new smart peers joining', async () => { + const connectionSampler = mock(); + const dateProvider = new DateProvider(); + + const [peer1, peer2, peer3] = await Promise.all([ + createSecp256k1PeerId(), + createSecp256k1PeerId(), + createSecp256k1PeerId(), + ]); + + // Start with 2 peers, both marked smart + connectionSampler.getPeerListSortedByConnectionCountAsc.mockReturnValue([peer1, peer2]); + const pc = new PeerCollection(connectionSampler, undefined, dateProvider); + pc.markPeerSmart(peer1); + pc.markPeerSmart(peer2); + + assertPeerSequence(pc.nextSmartPeerToQuery.bind(pc), [peer1, peer2]); + + // A third smart peer joins + connectionSampler.getPeerListSortedByConnectionCountAsc.mockReturnValue([peer1, peer2, peer3]); + pc.markPeerSmart(peer3); + + // peer3 is the first unqueried smart peer (peer1 and peer2 were already sampled), + // then the round resets and continues from peer1. + assertPeerSequence(pc.nextSmartPeerToQuery.bind(pc), [peer3, peer1, peer2]); + }); + + it('should reflect smart peers leaving', async () => { + const connectionSampler = mock(); + const dateProvider = new DateProvider(); + + const [peer1, peer2, peer3] = await Promise.all([ + createSecp256k1PeerId(), + createSecp256k1PeerId(), + createSecp256k1PeerId(), + ]); + + connectionSampler.getPeerListSortedByConnectionCountAsc.mockReturnValue([peer1, peer2, peer3]); + const pc = new PeerCollection(connectionSampler, undefined, dateProvider); + pc.markPeerSmart(peer1); + pc.markPeerSmart(peer2); + pc.markPeerSmart(peer3); + + assertPeerSequence(pc.nextSmartPeerToQuery.bind(pc), [peer1, peer2, peer3]); + + // peer2 disconnects + connectionSampler.getPeerListSortedByConnectionCountAsc.mockReturnValue([peer1, peer3]); + + assertPeerSequence(pc.nextSmartPeerToQuery.bind(pc), [peer1, peer3]); + }); + + it('should retain smart status after disconnect and reconnect', async () => { + const connectionSampler = mock(); + const dateProvider = new DateProvider(); + + const [peer1, peer2] = await Promise.all([createSecp256k1PeerId(), createSecp256k1PeerId()]); + + connectionSampler.getPeerListSortedByConnectionCountAsc.mockReturnValue([peer1, peer2]); + const pc = new PeerCollection(connectionSampler, undefined, dateProvider); + pc.markPeerSmart(peer1); + + // peer1 disconnects — no longer available as smart + connectionSampler.getPeerListSortedByConnectionCountAsc.mockReturnValue([peer2]); + + expect(pc.nextSmartPeerToQuery()).toBeUndefined(); + + // peer1 reconnects — should still be smart + connectionSampler.getPeerListSortedByConnectionCountAsc.mockReturnValue([peer1, peer2]); + + expect(pc.nextSmartPeerToQuery()?.toString()).toBe(peer1.toString()); + // peer2 should still be dumb + expect(pc.nextDumbPeerToQuery()?.toString()).toBe(peer2.toString()); + }); + + it('should return undefined when all peers leave', async () => { + const connectionSampler = mock(); + const dateProvider = new DateProvider(); + + const [peer1] = await Promise.all([createSecp256k1PeerId()]); + + connectionSampler.getPeerListSortedByConnectionCountAsc.mockReturnValue([peer1]); + const pc = new PeerCollection(connectionSampler, undefined, dateProvider); + + expect(pc.nextDumbPeerToQuery()).not.toBeUndefined(); + + // All peers disconnect + connectionSampler.getPeerListSortedByConnectionCountAsc.mockReturnValue([]); + + expect(pc.nextDumbPeerToQuery()).toBeUndefined(); + expect(pc.nextSmartPeerToQuery()).toBeUndefined(); + }); + + it('should recover when all peers disconnect and more peers reconnect', async () => { + const connectionSampler = mock(); + const dateProvider = new DateProvider(); + + const peers = await Promise.all(Array.from({ length: 6 }, () => createSecp256k1PeerId())); + const [peer1, peer2, peer3, peer4, peer5, peer6] = peers; + + // Start with 3 peers + connectionSampler.getPeerListSortedByConnectionCountAsc.mockReturnValue([peer1, peer2, peer3]); + const pc = new PeerCollection(connectionSampler, undefined, dateProvider); + + // Sample one peer before disconnection + const firstSampled = pc.nextDumbPeerToQuery(); + expect(firstSampled).not.toBeUndefined(); + + // All peers disconnect + connectionSampler.getPeerListSortedByConnectionCountAsc.mockReturnValue([]); + + expect(pc.nextDumbPeerToQuery()).toBeUndefined(); + expect(pc.nextDumbPeerToQuery()).toBeUndefined(); + + // Original 3 plus 3 new peers connect + connectionSampler.getPeerListSortedByConnectionCountAsc.mockReturnValue([peer1, peer2, peer3, peer4, peer5, peer6]); + + // peer1 was already sampled before disconnect, so it appears last in the first cycle + assertPeerSequence(pc.nextDumbPeerToQuery.bind(pc), [peer2, peer3, peer4, peer5, peer6, peer1]); + }); + + it('should exclude pinned peer dumb sampling, and smart sampling', async () => { + const connectionSampler = mock(); + const dateProvider = new DateProvider(); + + const [peer1, peer2, pinnedPeer] = await Promise.all([ + createSecp256k1PeerId(), + createSecp256k1PeerId(), + createSecp256k1PeerId(), + ]); + + // Connection sampler returns all 3 peers including the pinned one + connectionSampler.getPeerListSortedByConnectionCountAsc.mockReturnValue([pinnedPeer, peer1, peer2]); + const pc = new PeerCollection(connectionSampler, pinnedPeer, dateProvider); + + // Pinned peer is excluded from dumb sampling + assertPeerSequence(pc.nextDumbPeerToQuery.bind(pc), [peer1, peer2]); + + // Mark all peers as smart (including pinned) + pc.markPeerSmart(peer1); + pc.markPeerSmart(peer2); + pc.markPeerSmart(pinnedPeer); + + // Pinned peer is excluded from smart sampling + assertPeerSequence(pc.nextSmartPeerToQuery.bind(pc), [peer1, peer2]); + }); + + it('should exclude bad, in-flight, and rate-limited peers from available counts', async () => { + const connectionSampler = mock(); + const clock = new TestClock(); + + const [peer1, peer2, peer3, peer4] = await Promise.all([ + createSecp256k1PeerId(), + createSecp256k1PeerId(), + createSecp256k1PeerId(), + createSecp256k1PeerId(), + ]); + + connectionSampler.getPeerListSortedByConnectionCountAsc.mockReturnValue([peer1, peer2, peer3, peer4]); + const pc = new PeerCollection(connectionSampler, undefined, clock, /* badPeerThreshold */ 0); + + // All 4 are dumb initially + assertPeerSequence(pc.nextDumbPeerToQuery.bind(pc), [peer1, peer2, peer3, peer4]); + + // Mark peer1 as bad (threshold=0 means first penalty marks as bad) + pc.penalisePeer(peer1, PeerErrorSeverity.HighToleranceError); + assertPeerSequence(pc.nextDumbPeerToQuery.bind(pc), [peer2, peer3, peer4]); + + // Mark peer2 as in-flight + pc.markPeerInFlight(peer2); + assertPeerSequence(pc.nextDumbPeerToQuery.bind(pc), [peer3, peer4]); + + // Mark peer3 as rate-limited + pc.markPeerRateLimitExceeded(peer3); + assertPeerSequence(pc.nextDumbPeerToQuery.bind(pc), [peer4]); + + // Now test smart counts: promote peer1-peer4 to smart + pc.markPeerSmart(peer1); + pc.markPeerSmart(peer2); + pc.markPeerSmart(peer3); + pc.markPeerSmart(peer4); + + // peer1 is bad, peer2 is in-flight, peer3 is rate-limited → only peer4 available + assertPeerSequence(pc.nextSmartPeerToQuery.bind(pc), [peer4]); + + // Undo exclusions and verify counts recover + pc.unMarkPeerAsBad(peer1); + pc.unMarkPeerInFlight(peer2); + clock.advanceTo(RATE_LIMIT_EXCEEDED_PEER_CACHE_TTL); + + expect(pc.nextDumbPeerToQuery()).toBeUndefined(); + expect(pc.nextSmartPeerToQuery()?.toString()).toBe(peer1.toString()); + expect(pc.nextSmartPeerToQuery()?.toString()).toBe(peer2.toString()); + expect(pc.nextSmartPeerToQuery()?.toString()).toBe(peer3.toString()); + + assertPeerSequence(pc.nextSmartPeerToQuery.bind(pc), [peer1, peer2, peer3, peer4]); // all are smart now + }); + + function assertPeerSequence(sampler: () => PeerId | undefined, expectedPeers: PeerId[] | string[]) { + for (let i: number = 0; i < expectedPeers.length; i++) { + const currentPeer = sampler()?.toString(); + expect(currentPeer).toBe(expectedPeers[i].toString()); + } + + // We need to loop twice to be sure that we don't have any extra peers. + for (let i: number = 0; i < expectedPeers.length; i++) { + const currentPeer = sampler()?.toString(); + expect(currentPeer).toBe(expectedPeers[i].toString()); + } + } +}); + const makeTx = (txHash?: string | TxHash) => Tx.random({ txHash }) as Tx; const createRequestLogger = ( @@ -1845,25 +2134,17 @@ export class TestPeerCollection implements IPeerCollection { constructor(private readonly inner: PeerCollection) {} - getAllPeers(): Set { - return this.inner.getAllPeers(); - } - - getSmartPeers(): Set { - return this.inner.getSmartPeers(); - } - markPeerSmart(peerId: any): void { this.smartPeersMarked.push(peerId.toString()); return this.inner.markPeerSmart(peerId); } - getSmartPeersToQuery(): Array { - return this.inner.getSmartPeersToQuery(); + nextSmartPeerToQuery(): PeerId | undefined { + return this.inner.nextSmartPeerToQuery(); } - getDumbPeersToQuery(): Array { - return this.inner.getDumbPeersToQuery(); + nextDumbPeerToQuery(): PeerId | undefined { + return this.inner.nextDumbPeerToQuery(); } thereAreSomeDumbRatelimitExceededPeers(): boolean { diff --git a/yarn-project/p2p/src/services/reqresp/batch-tx-requester/batch_tx_requester.ts b/yarn-project/p2p/src/services/reqresp/batch-tx-requester/batch_tx_requester.ts index 309b41c3f4bd..d468b06ef7c3 100644 --- a/yarn-project/p2p/src/services/reqresp/batch-tx-requester/batch_tx_requester.ts +++ b/yarn-project/p2p/src/services/reqresp/batch-tx-requester/batch_tx_requester.ts @@ -8,7 +8,6 @@ import { PeerErrorSeverity } from '@aztec/stdlib/p2p'; import { Tx, TxArray, TxHash } from '@aztec/stdlib/tx'; import type { PeerId } from '@libp2p/interface'; -import { peerIdFromString } from '@libp2p/peer-id'; import type { IMissingTxsTracker } from '../../tx_collection/missing_txs_tracker.js'; import { ReqRespSubProtocol } from '.././interface.js'; @@ -90,10 +89,9 @@ export class BatchTxRequester { if (this.opts.peerCollection) { this.peers = this.opts.peerCollection; } else { - const initialPeers = this.p2pService.connectionSampler.getPeerListSortedByConnectionCountAsc(); const badPeerThreshold = this.opts.badPeerThreshold ?? DEFAULT_BATCH_TX_REQUESTER_BAD_PEER_THRESHOLD; this.peers = new PeerCollection( - initialPeers, + this.p2pService.connectionSampler, this.pinnedPeer, this.dateProvider, badPeerThreshold, @@ -227,7 +225,6 @@ export class BatchTxRequester { * Starts dumb worker loops * */ private async dumbRequester() { - const nextPeerIndex = this.makeRoundRobinIndexer(); const nextBatchIndex = this.makeRoundRobinIndexer(); // Chunk missing tx hashes into batches of txBatchSize, wrapping around to ensure no peer gets less than txBatchSize @@ -263,15 +260,9 @@ export class BatchTxRequester { return { blockRequest, txs }; }; - const nextPeer = () => { - const peers = this.peers.getDumbPeersToQuery(); - const idx = nextPeerIndex(() => peers.length); - return idx === undefined ? undefined : peerIdFromString(peers[idx]); - }; - - const workerCount = Math.min(this.dumbParallelWorkerCount, this.peers.getAllPeers().size); + const workerCount = this.dumbParallelWorkerCount; const workers = Array.from({ length: workerCount }, (_, index) => - this.dumbWorkerLoop(nextPeer, makeRequest, index + 1), + this.dumbWorkerLoop(this.peers.nextDumbPeerToQuery.bind(this.peers), makeRequest, index + 1), ); await Promise.allSettled(workers); @@ -332,14 +323,6 @@ export class BatchTxRequester { * Starts smart worker loops * */ private async smartRequester() { - const nextPeerIndex = this.makeRoundRobinIndexer(); - - const nextPeer = () => { - const peers = this.peers.getSmartPeersToQuery(); - const idx = nextPeerIndex(() => peers.length); - return idx === undefined ? undefined : peerIdFromString(peers[idx]); - }; - const makeRequest = (pid: PeerId) => { const txs = this.txsMetadata.getTxsToRequestFromThePeer(pid); const blockRequest = BlockTxsRequest.fromTxsSourceAndMissingTxs(this.blockTxsSource, txs); @@ -350,9 +333,8 @@ export class BatchTxRequester { return { blockRequest, txs }; }; - const workers = Array.from( - { length: Math.min(this.smartParallelWorkerCount, this.peers.getAllPeers().size) }, - (_, index) => this.smartWorkerLoop(nextPeer, makeRequest, index + 1), + const workers = Array.from({ length: this.smartParallelWorkerCount }, (_, index) => + this.smartWorkerLoop(this.peers.nextSmartPeerToQuery.bind(this.peers), makeRequest, index + 1), ); await Promise.allSettled(workers); @@ -387,26 +369,18 @@ export class BatchTxRequester { if (weRanOutOfPeersToQuery) { this.logger.debug(`Worker loop smart: No more peers to query`); - // If there are no more dumb peers to query then none of our peers can become smart, - // thus we can simply exit this worker - const noMoreDumbPeersToQuery = this.peers.getDumbPeersToQuery().length === 0; - if (noMoreDumbPeersToQuery) { - // These might be either smart peers that will get unblocked after _some time_ - const nextSmartPeerDelay = this.peers.getNextSmartPeerAvailabilityDelayMs(); - const thereAreSomeRateLimitedSmartPeers = nextSmartPeerDelay !== undefined; - if (thereAreSomeRateLimitedSmartPeers) { - await this.sleepClampedToDeadline(nextSmartPeerDelay); - continue; - } - - this.logger.debug(`Worker loop smart: No more smart peers to query killing ${workerIndex}`); - break; + // If we have rate limited peers wait for them. + const nextSmartPeerDelay = this.peers.getNextSmartPeerAvailabilityDelayMs(); + const thereAreSomeRateLimitedSmartPeers = nextSmartPeerDelay !== undefined; + if (thereAreSomeRateLimitedSmartPeers) { + await this.sleepClampedToDeadline(nextSmartPeerDelay); + continue; } - // Otherwise there are still some dumb peers that could become smart. // We end up here when all known smart peers became temporarily unavailable via combination of // (bad, in-flight, or rate-limited) or in some weird scenario all current smart peers turn bad which is permanent - // but dumb peers still exist that could become smart. + // but there are dumb peers that could be promoted + // or new peer can join as dumb and be promoted later // // When a dumb peer responds with valid txIndices, it gets // promoted to smart and releases the semaphore, waking this worker. @@ -599,9 +573,7 @@ export class BatchTxRequester { this.markTxsPeerHas(peerId, response); // Unblock smart workers - if (this.peers.getSmartPeersToQuery().length <= this.smartParallelWorkerCount) { - this.smartRequesterSemaphore.release(); - } + this.smartRequesterSemaphore.release(); } private isBlockResponseValid(response: BlockTxsResponse): boolean { diff --git a/yarn-project/p2p/src/services/reqresp/batch-tx-requester/peer_collection.ts b/yarn-project/p2p/src/services/reqresp/batch-tx-requester/peer_collection.ts index ca8e1c4b61c0..4c7baec5316d 100644 --- a/yarn-project/p2p/src/services/reqresp/batch-tx-requester/peer_collection.ts +++ b/yarn-project/p2p/src/services/reqresp/batch-tx-requester/peer_collection.ts @@ -2,18 +2,22 @@ import type { DateProvider } from '@aztec/foundation/timer'; import type { PeerErrorSeverity } from '@aztec/stdlib/p2p'; import type { PeerId } from '@libp2p/interface'; +import { peerIdFromString } from '@libp2p/peer-id'; +import type { ConnectionSampler } from '../connection-sampler/connection_sampler.js'; import { DEFAULT_BATCH_TX_REQUESTER_BAD_PEER_THRESHOLD } from './config.js'; import type { IPeerPenalizer } from './interface.js'; export const RATE_LIMIT_EXCEEDED_PEER_CACHE_TTL = 1000; // 1s export interface IPeerCollection { - getAllPeers(): Set; - getSmartPeers(): Set; markPeerSmart(peerId: PeerId): void; - getSmartPeersToQuery(): Array; - getDumbPeersToQuery(): Array; + + /** Sample next peer in round-robin fashion. No smart peers if returns undefined */ + nextSmartPeerToQuery(): PeerId | undefined; + /** Sample next peer in round-robin fashion. No dumb peers if returns undefined */ + nextDumbPeerToQuery(): PeerId | undefined; + thereAreSomeDumbRatelimitExceededPeers(): boolean; penalisePeer(peerId: PeerId, severity: PeerErrorSeverity): void; unMarkPeerAsBad(peerId: PeerId): void; @@ -28,8 +32,6 @@ export interface IPeerCollection { } export class PeerCollection implements IPeerCollection { - private readonly peers; - private readonly smartPeers = new Set(); private readonly inFlightPeers = new Set(); private readonly rateLimitExceededPeers = new Map(); @@ -37,46 +39,60 @@ export class PeerCollection implements IPeerCollection { private readonly badPeers = new Set(); constructor( - initialPeers: PeerId[], + private readonly connectionSampler: Pick, private readonly pinnedPeerId: PeerId | undefined, private readonly dateProvider: DateProvider, private readonly badPeerThreshold: number = DEFAULT_BATCH_TX_REQUESTER_BAD_PEER_THRESHOLD, private readonly peerPenalizer?: IPeerPenalizer, ) { - this.peers = new Set(initialPeers.map(peer => peer.toString())); - - // Pinned peer is treaded specially, always mark it as in-flight + // Pinned peer is treated specially, always mark it as in-flight // and never return it as part of smart/dumb peers if (this.pinnedPeerId) { const peerIdStr = this.pinnedPeerId.toString(); this.inFlightPeers.add(peerIdStr); - this.peers.delete(peerIdStr); } } - public getAllPeers(): Set { - return this.peers; + public markPeerSmart(peerId: PeerId): void { + this.smartPeers.add(peerId.toString()); } - public getSmartPeers(): Set { - return this.smartPeers; + // We keep track of all peers that are queried for peer sampling algorithm + private queriedSmartPeers: Set = new Set(); + private queriedDumbPeers: Set = new Set(); + + private static nextPeer(allPeers: Set, queried: Set): PeerId | undefined { + if (allPeers.size === 0) { + return undefined; + } + const availablePeers = allPeers.difference(queried); + let [first] = availablePeers; + if (first === undefined) { + // We queried all peers. Start over + [first] = allPeers; + queried.clear(); + } + queried.add(first); + return peerIdFromString(first); } - public markPeerSmart(peerId: PeerId): void { - this.smartPeers.add(peerId.toString()); + public nextSmartPeerToQuery(): PeerId | undefined { + return PeerCollection.nextPeer(this.availableSmartPeers, this.queriedSmartPeers); + } + + public nextDumbPeerToQuery(): PeerId | undefined { + return PeerCollection.nextPeer(this.availableDumbPeers, this.queriedDumbPeers); } - public getSmartPeersToQuery(): Array { - return Array.from( + private get availableSmartPeers(): Set { + return this.peers.intersection( this.smartPeers.difference(this.getBadPeers().union(this.inFlightPeers).union(this.getRateLimitExceededPeers())), ); } - public getDumbPeersToQuery(): Array { - return Array.from( - this.peers.difference( - this.smartPeers.union(this.getBadPeers()).union(this.inFlightPeers).union(this.getRateLimitExceededPeers()), - ), + private get availableDumbPeers(): Set { + return this.peers.difference( + this.smartPeers.union(this.getBadPeers()).union(this.inFlightPeers).union(this.getRateLimitExceededPeers()), ); } @@ -202,4 +218,27 @@ export class PeerCollection implements IPeerCollection { return minExpiry! - now; } + + private orderedPeers: Set = new Set(); + + private get peers(): Set { + const pinnedStr = this.pinnedPeerId?.toString(); + const currentlyConnected = new Set( + this.connectionSampler + .getPeerListSortedByConnectionCountAsc() + .map(p => p.toString()) + .filter(p => p !== pinnedStr), + ); + + // Remove disconnected peers, preserving order of the rest. + this.orderedPeers = this.orderedPeers.intersection(currentlyConnected); + + // Append newly connected peers at the end (lowest priority). + for (const peer of currentlyConnected) { + if (!this.orderedPeers.has(peer)) { + this.orderedPeers.add(peer); + } + } + return this.orderedPeers; + } } diff --git a/yarn-project/p2p/src/services/reqresp/batch-tx-requester/tx_validator.ts b/yarn-project/p2p/src/services/reqresp/batch-tx-requester/tx_validator.ts index cbe33da83d04..3f79866a31c9 100644 --- a/yarn-project/p2p/src/services/reqresp/batch-tx-requester/tx_validator.ts +++ b/yarn-project/p2p/src/services/reqresp/batch-tx-requester/tx_validator.ts @@ -1,7 +1,7 @@ import type { ClientProtocolCircuitVerifier } from '@aztec/stdlib/interfaces/server'; import { Tx, type TxValidationResult, type TxValidator } from '@aztec/stdlib/tx'; -import { createTxReqRespValidator } from '../../../msg_validators/tx_validator/factory.js'; +import { createTxValidatorForReqResponseReceivedTxs } from '../../../msg_validators/index.js'; export interface BatchRequestTxValidatorConfig { l1ChainId: number; @@ -29,7 +29,7 @@ export class BatchRequestTxValidator implements IBatchRequestTxValidator { } static createRequestedTxValidator(config: BatchRequestTxValidatorConfig): TxValidator { - return createTxReqRespValidator(config.proofVerifier, { + return createTxValidatorForReqResponseReceivedTxs(config.proofVerifier, { l1ChainId: config.l1ChainId, rollupVersion: config.rollupVersion, }); diff --git a/yarn-project/p2p/src/services/service.ts b/yarn-project/p2p/src/services/service.ts index 62dcaa1c2c51..59594e169788 100644 --- a/yarn-project/p2p/src/services/service.ts +++ b/yarn-project/p2p/src/services/service.ts @@ -139,7 +139,7 @@ export interface P2PService { /** Returns the number of peers in the GossipSub mesh for a given topic type. */ getGossipMeshPeerCount(topicType: TopicType): number; - validate(txs: Tx[]): Promise; + validateTxsReceivedInBlockProposal(txs: Tx[]): Promise; addReqRespSubProtocol( subProtocol: ReqRespSubProtocol, diff --git a/yarn-project/p2p/src/services/tx_provider.test.ts b/yarn-project/p2p/src/services/tx_provider.test.ts index f21980fffccd..f2effc763541 100644 --- a/yarn-project/p2p/src/services/tx_provider.test.ts +++ b/yarn-project/p2p/src/services/tx_provider.test.ts @@ -17,7 +17,7 @@ describe('TxProvider', () => { // Dependencies let txCollection: MockProxy; let txPool: MockProxy; - let txValidator: MockProxy>; + let txValidator: MockProxy>; // Subject under test let txProvider: TestTxProvider; @@ -81,7 +81,7 @@ describe('TxProvider', () => { txCollection = mock(); txPool = mock(); - txValidator = mock>(); + txValidator = mock>(); txPool.getTxsByHash.mockImplementation(txHashes => Promise.resolve(txHashes.map(txHash => txPools.get(txHash.toString()))), diff --git a/yarn-project/p2p/src/services/tx_provider.ts b/yarn-project/p2p/src/services/tx_provider.ts index 7279609db8cd..311e31162351 100644 --- a/yarn-project/p2p/src/services/tx_provider.ts +++ b/yarn-project/p2p/src/services/tx_provider.ts @@ -25,7 +25,7 @@ export class TxProvider implements ITxProvider { constructor( private txCollection: TxCollection, private txPool: TxPoolV2, - private txValidator: Pick, + private blockProposalTransactionValidator: Pick, private log: Logger = createLogger('p2p:tx-collector'), client: TelemetryClient = getTelemetryClient(), ) { @@ -227,7 +227,7 @@ export class TxProvider implements ITxProvider { if (txs.length === 0) { return; } - await this.txValidator.validate(txs); + await this.blockProposalTransactionValidator.validateTxsReceivedInBlockProposal(txs); await this.txPool.addProtectedTxs(txs, blockHeader); } } diff --git a/yarn-project/p2p/src/test-helpers/testbench-utils.ts b/yarn-project/p2p/src/test-helpers/testbench-utils.ts index ed9e8f3567f4..a4f249ecbe69 100644 --- a/yarn-project/p2p/src/test-helpers/testbench-utils.ts +++ b/yarn-project/p2p/src/test-helpers/testbench-utils.ts @@ -76,7 +76,7 @@ export class InMemoryTxPool extends EventEmitter implements TxPoolV2 { return Promise.resolve({ accepted, ignored: [], rejected: [] }); } - canAddPendingTx(tx: Tx): Promise<'accepted' | 'ignored' | 'rejected'> { + canAddPendingTx(tx: Tx): Promise<'accepted' | 'ignored'> { const key = tx.getTxHash().toString(); if (this.txsByHash.has(key)) { return Promise.resolve('ignored'); diff --git a/yarn-project/prover-client/src/light/lightweight_checkpoint_builder.bench.test.ts b/yarn-project/prover-client/src/light/lightweight_checkpoint_builder.bench.test.ts new file mode 100644 index 000000000000..3c4694db53cf --- /dev/null +++ b/yarn-project/prover-client/src/light/lightweight_checkpoint_builder.bench.test.ts @@ -0,0 +1,168 @@ +import { CONTRACT_CLASS_LOG_SIZE_IN_FIELDS } from '@aztec/constants'; +import { BlockNumber, CheckpointNumber, SlotNumber } from '@aztec/foundation/branded-types'; +import { timesAsync } from '@aztec/foundation/collection'; +import { Fr } from '@aztec/foundation/curves/bn254'; +import { createLogger } from '@aztec/foundation/log'; +import { getVKTreeRoot } from '@aztec/noir-protocol-circuits-types/vk-tree'; +import { ProtocolContractsList } from '@aztec/protocol-contracts'; +import { computeFeePayerBalanceLeafSlot } from '@aztec/protocol-contracts/fee-juice'; +import { PublicDataWrite } from '@aztec/stdlib/avm'; +import { AztecAddress } from '@aztec/stdlib/aztec-address'; +import { EthAddress } from '@aztec/stdlib/block'; +import { GasFees } from '@aztec/stdlib/gas'; +import { ContractClassLog, ContractClassLogFields } from '@aztec/stdlib/logs'; +import { mockProcessedTx } from '@aztec/stdlib/testing'; +import { PublicDataTreeLeaf } from '@aztec/stdlib/trees'; +import type { CheckpointGlobalVariables, ProcessedTx } from '@aztec/stdlib/tx'; +import { GlobalVariables } from '@aztec/stdlib/tx'; +import { NativeWorldStateService } from '@aztec/world-state/native'; + +import { afterAll, afterEach, beforeEach, describe, it, jest } from '@jest/globals'; +import { mkdir, writeFile } from 'node:fs/promises'; +import path from 'node:path'; + +import { LightweightCheckpointBuilder } from './lightweight_checkpoint_builder.js'; + +jest.setTimeout(300_000); + +const logger = createLogger('bench:lightweight-checkpoint-builder'); + +describe('LightweightCheckpointBuilder benchmarks', () => { + let worldState: NativeWorldStateService; + let feePayer: AztecAddress; + let feePayerBalance: Fr; + + const results: { name: string; value: number; unit: string }[] = []; + + const toPrettyString = () => + results.map(({ name, value, unit }) => `${name}: ${value.toFixed(2)} ${unit}`).join('\n'); + + const toGithubActionBenchmarkJSON = (indent = 2) => JSON.stringify(results, null, indent); + + beforeEach(async () => { + feePayer = AztecAddress.fromNumber(42222); + feePayerBalance = new Fr(10n ** 20n); + const feePayerSlot = await computeFeePayerBalanceLeafSlot(feePayer); + const prefilledPublicData = [new PublicDataTreeLeaf(feePayerSlot, feePayerBalance)]; + worldState = await NativeWorldStateService.tmp(undefined, true, prefilledPublicData); + }); + + afterEach(async () => { + await worldState.close(); + }); + + afterAll(async () => { + if (process.env.BENCH_OUTPUT) { + await mkdir(path.dirname(process.env.BENCH_OUTPUT), { recursive: true }); + await writeFile(process.env.BENCH_OUTPUT, toGithubActionBenchmarkJSON()); + } else { + logger.info(`\n${toPrettyString()}\n`); + } + }); + + const makeCheckpointConstants = (slotNumber: SlotNumber): CheckpointGlobalVariables => ({ + chainId: Fr.ZERO, + version: Fr.ZERO, + slotNumber, + timestamp: BigInt(slotNumber) * 123n, + coinbase: EthAddress.ZERO, + feeRecipient: AztecAddress.ZERO, + gasFees: GasFees.empty(), + }); + + const makeGlobalVariables = (blockNumber: BlockNumber, slotNumber: SlotNumber): GlobalVariables => + GlobalVariables.from({ + chainId: Fr.ZERO, + version: Fr.ZERO, + blockNumber, + slotNumber, + timestamp: BigInt(blockNumber) * 123n, + coinbase: EthAddress.ZERO, + feeRecipient: AztecAddress.ZERO, + gasFees: GasFees.empty(), + }); + + const makeProcessedTx = async (globalVariables: GlobalVariables, seed: number): Promise => { + const tx = await mockProcessedTx({ + seed, + globalVariables, + vkTreeRoot: getVKTreeRoot(), + protocolContracts: ProtocolContractsList, + feePayer, + }); + + feePayerBalance = new Fr(feePayerBalance.toBigInt() - tx.txEffect.transactionFee.toBigInt()); + const feePayerSlot = await computeFeePayerBalanceLeafSlot(feePayer); + const feePaymentPublicDataWrite = new PublicDataWrite(feePayerSlot, feePayerBalance); + tx.txEffect.publicDataWrites[0] = feePaymentPublicDataWrite; + if (tx.avmProvingRequest) { + tx.avmProvingRequest.inputs.publicInputs.accumulatedData.publicDataWrites[0] = feePaymentPublicDataWrite; + } + + return tx; + }; + + /** Creates a tx with no side effects but a full contract class log. */ + const makeLogsHeavyProcessedTx = async (globalVariables: GlobalVariables, seed: number): Promise => { + const tx = await makeProcessedTx(globalVariables, seed); + + // Strip side effects: keep only the tx hash nullifier and fee payment public data write. + tx.txEffect.noteHashes = []; + tx.txEffect.nullifiers = [tx.txEffect.nullifiers[0]]; + tx.txEffect.l2ToL1Msgs = []; + tx.txEffect.privateLogs = []; + tx.txEffect.publicDataWrites = [tx.txEffect.publicDataWrites[0]]; + + // Add a full contract class log (CONTRACT_CLASS_LOG_SIZE_IN_FIELDS = 3,023 blob fields). + tx.txEffect.contractClassLogs = [ + new ContractClassLog( + AztecAddress.fromNumber(seed), + ContractClassLogFields.random(CONTRACT_CLASS_LOG_SIZE_IN_FIELDS), + CONTRACT_CLASS_LOG_SIZE_IN_FIELDS, + ), + ]; + + return tx; + }; + + type TxFactory = (globalVariables: GlobalVariables, seed: number) => Promise; + + const testCases: { label: string; numTxs: number; makeTx: TxFactory }[] = [ + { label: 'worst-case', numTxs: 4, makeTx: makeProcessedTx }, + { label: 'worst-case', numTxs: 8, makeTx: makeProcessedTx }, + { label: 'worst-case', numTxs: 16, makeTx: makeProcessedTx }, + { label: 'class-log-heavy', numTxs: 4, makeTx: makeLogsHeavyProcessedTx }, + { label: 'class-log-heavy', numTxs: 8, makeTx: makeLogsHeavyProcessedTx }, + ]; + + describe('addBlock breakdown', () => { + it.each(testCases)('$label $numTxs txs', async ({ label, numTxs, makeTx }) => { + const slotNumber = SlotNumber(15); + const blockNumber = BlockNumber(1); + const constants = makeCheckpointConstants(slotNumber); + const fork = await worldState.fork(); + + const builder = await LightweightCheckpointBuilder.startNewCheckpoint( + CheckpointNumber(1), + constants, + [], + [], + fork, + ); + + const globalVariables = makeGlobalVariables(blockNumber, slotNumber); + const txs = await timesAsync(numTxs, i => makeTx(globalVariables, 5000 + i)); + + const { timings } = await builder.addBlock(globalVariables, txs, { insertTxsEffects: true }); + + const prefix = `addBlock/${label}/${numTxs} txs`; + for (const [step, ms] of Object.entries(timings)) { + results.push({ name: `${prefix}/${step}`, value: ms, unit: 'ms' }); + } + const total = Object.values(timings).reduce((a, b) => a + b, 0); + results.push({ name: `${prefix}/total`, value: total, unit: 'ms' }); + + await fork.close(); + }); + }); +}); diff --git a/yarn-project/prover-client/src/light/lightweight_checkpoint_builder.test.ts b/yarn-project/prover-client/src/light/lightweight_checkpoint_builder.test.ts index 8c791e955acc..0777f473ab69 100644 --- a/yarn-project/prover-client/src/light/lightweight_checkpoint_builder.test.ts +++ b/yarn-project/prover-client/src/light/lightweight_checkpoint_builder.test.ts @@ -109,7 +109,7 @@ describe('LightweightCheckpointBuilder', () => { // Build empty block const globalVariables = makeGlobalVariables(blockNumber, slotNumber); - const block = await checkpointBuilder.addBlock(globalVariables, [], { insertTxsEffects: true }); + const { block } = await checkpointBuilder.addBlock(globalVariables, [], { insertTxsEffects: true }); expect(block.header.globalVariables.blockNumber).toEqual(blockNumber); @@ -155,7 +155,7 @@ describe('LightweightCheckpointBuilder', () => { tx.txEffect.l2ToL1Msgs.push(...msgs); // Build block with tx - insertTxsEffects will handle inserting side effects - const block = await checkpointBuilder.addBlock(globalVariables, [tx], { + const { block } = await checkpointBuilder.addBlock(globalVariables, [tx], { insertTxsEffects: true, }); @@ -202,7 +202,7 @@ describe('LightweightCheckpointBuilder', () => { const txs = await timesAsync(3, i => makeProcessedTx(globalVariables, 1000 + i)); // Build block with txs - insertTxsEffects will handle inserting side effects - const block = await checkpointBuilder.addBlock(globalVariables, txs, { + const { block } = await checkpointBuilder.addBlock(globalVariables, txs, { insertTxsEffects: true, }); @@ -248,7 +248,7 @@ describe('LightweightCheckpointBuilder', () => { const txs = await timesAsync(txsPerBlock, j => makeProcessedTx(globalVariables, 2000 + i * 10 + j)); // Build block - insertTxsEffects will handle inserting side effects - const block = await checkpointBuilder.addBlock(globalVariables, txs, { + const { block } = await checkpointBuilder.addBlock(globalVariables, txs, { insertTxsEffects: true, }); diff --git a/yarn-project/prover-client/src/light/lightweight_checkpoint_builder.ts b/yarn-project/prover-client/src/light/lightweight_checkpoint_builder.ts index cb789075c3f8..ab32be72936d 100644 --- a/yarn-project/prover-client/src/light/lightweight_checkpoint_builder.ts +++ b/yarn-project/prover-client/src/light/lightweight_checkpoint_builder.ts @@ -4,6 +4,7 @@ import { type CheckpointNumber, IndexWithinCheckpoint } from '@aztec/foundation/ import { padArrayEnd } from '@aztec/foundation/collection'; import { Fr } from '@aztec/foundation/curves/bn254'; import { type Logger, type LoggerBindings, createLogger } from '@aztec/foundation/log'; +import { elapsed } from '@aztec/foundation/timer'; import { L2Block } from '@aztec/stdlib/block'; import { Checkpoint } from '@aztec/stdlib/checkpoint'; import type { MerkleTreeWriteOperations } from '@aztec/stdlib/interfaces/server'; @@ -161,7 +162,8 @@ export class LightweightCheckpointBuilder { globalVariables: GlobalVariables, txs: ProcessedTx[], opts: { insertTxsEffects?: boolean; expectedEndState?: StateReference } = {}, - ): Promise { + ): Promise<{ block: L2Block; timings: Record }> { + const timings: Record = {}; const isFirstBlock = this.blocks.length === 0; // Empty blocks are only allowed as the first block in a checkpoint @@ -170,7 +172,9 @@ export class LightweightCheckpointBuilder { } if (isFirstBlock) { - this.lastArchives.push(await getTreeSnapshot(MerkleTreeId.ARCHIVE, this.db)); + const [msGetInitialArchive, initialArchive] = await elapsed(() => getTreeSnapshot(MerkleTreeId.ARCHIVE, this.db)); + this.lastArchives.push(initialArchive); + timings.getInitialArchive = msGetInitialArchive; } const lastArchive = this.lastArchives.at(-1)!; @@ -180,12 +184,17 @@ export class LightweightCheckpointBuilder { `Inserting side effects for ${txs.length} txs for block ${globalVariables.blockNumber} into db`, { txs: txs.map(tx => tx.hash.toString()) }, ); + let msInsertSideEffects = 0; for (const tx of txs) { - await insertSideEffects(tx, this.db); + const [ms] = await elapsed(() => insertSideEffects(tx, this.db)); + msInsertSideEffects += ms; } + timings.insertSideEffects = msInsertSideEffects; } - const endState = await this.db.getStateReference(); + const [msGetEndState, endState] = await elapsed(() => this.db.getStateReference()); + timings.getEndState = msGetEndState; + if (opts.expectedEndState && !endState.equals(opts.expectedEndState)) { this.logger.error('End state after processing txs does not match expected end state', { globalVariables: globalVariables.toInspect(), @@ -195,26 +204,24 @@ export class LightweightCheckpointBuilder { throw new Error(`End state does not match expected end state when building block ${globalVariables.blockNumber}`); } - const { header, body, blockBlobFields } = await buildHeaderAndBodyFromTxs( - txs, - lastArchive, - endState, - globalVariables, - this.spongeBlob, - isFirstBlock, + const [msBuildHeaderAndBody, { header, body, blockBlobFields }] = await elapsed(() => + buildHeaderAndBodyFromTxs(txs, lastArchive, endState, globalVariables, this.spongeBlob, isFirstBlock), ); + timings.buildHeaderAndBody = msBuildHeaderAndBody; header.state.validate(); await this.db.updateArchive(header); - const newArchive = await getTreeSnapshot(MerkleTreeId.ARCHIVE, this.db); + const [msUpdateArchive, newArchive] = await elapsed(() => getTreeSnapshot(MerkleTreeId.ARCHIVE, this.db)); + timings.updateArchive = msUpdateArchive; this.lastArchives.push(newArchive); const indexWithinCheckpoint = IndexWithinCheckpoint(this.blocks.length); const block = new L2Block(newArchive, header, body, this.checkpointNumber, indexWithinCheckpoint); this.blocks.push(block); - await this.spongeBlob.absorb(blockBlobFields); + const [msSpongeAbsorb] = await elapsed(() => this.spongeBlob.absorb(blockBlobFields)); + timings.spongeAbsorb = msSpongeAbsorb; this.blobFields.push(...blockBlobFields); this.logger.debug(`Built block ${header.getBlockNumber()}`, { @@ -225,7 +232,7 @@ export class LightweightCheckpointBuilder { txs: block.body.txEffects.map(tx => tx.txHash.toString()), }); - return block; + return { block, timings }; } async completeCheckpoint(): Promise { diff --git a/yarn-project/prover-client/src/mocks/test_context.ts b/yarn-project/prover-client/src/mocks/test_context.ts index b27adcec0d97..8efb5d8b7e1c 100644 --- a/yarn-project/prover-client/src/mocks/test_context.ts +++ b/yarn-project/prover-client/src/mocks/test_context.ts @@ -262,7 +262,7 @@ export class TestContext { const txs = blockTxs[i]; const state = blockEndStates[i]; - const block = await builder.addBlock(blockGlobalVariables[i], txs, { + const { block } = await builder.addBlock(blockGlobalVariables[i], txs, { expectedEndState: state, insertTxsEffects: true, }); diff --git a/yarn-project/prover-client/src/proving_broker/proving_broker.ts b/yarn-project/prover-client/src/proving_broker/proving_broker.ts index e9d7c191e977..8d9db0e1bf6e 100644 --- a/yarn-project/prover-client/src/proving_broker/proving_broker.ts +++ b/yarn-project/prover-client/src/proving_broker/proving_broker.ts @@ -314,7 +314,7 @@ export class ProvingBroker implements ProvingJobProducer, ProvingJobConsumer, Pr // notify listeners of the cancellation if (!this.resultsCache.has(id)) { this.logger.info(`Cancelling job id=${id}`, { provingJobId: id }); - await this.#reportProvingJobError(id, 'Aborted', false); + await this.#reportProvingJobError(id, 'Aborted', false, undefined, true); } } @@ -395,6 +395,7 @@ export class ProvingBroker implements ProvingJobProducer, ProvingJobConsumer, Pr err: string, retry = false, filter?: ProvingJobFilter, + aborted = false, ): Promise { const info = this.inProgress.get(id); const item = this.jobsCache.get(id); @@ -455,7 +456,11 @@ export class ProvingBroker implements ProvingJobProducer, ProvingJobConsumer, Pr this.promises.get(id)!.resolve(result); this.completedJobNotifications.push(id); - this.instrumentation.incRejectedJobs(item.type); + if (aborted) { + this.instrumentation.incAbortedJobs(item.type); + } else { + this.instrumentation.incRejectedJobs(item.type); + } if (info) { const duration = this.msTimeSource() - info.startedAt; this.instrumentation.recordJobDuration(item.type, duration); diff --git a/yarn-project/prover-client/src/proving_broker/proving_broker_instrumentation.ts b/yarn-project/prover-client/src/proving_broker/proving_broker_instrumentation.ts index 7bcd5d50c780..f559c247cd29 100644 --- a/yarn-project/prover-client/src/proving_broker/proving_broker_instrumentation.ts +++ b/yarn-project/prover-client/src/proving_broker/proving_broker_instrumentation.ts @@ -18,6 +18,7 @@ export class ProvingBrokerInstrumentation { private activeJobs: ObservableGauge; private resolvedJobs: UpDownCounter; private rejectedJobs: UpDownCounter; + private abortedJobs: UpDownCounter; private timedOutJobs: UpDownCounter; private cachedJobs: UpDownCounter; private totalJobs: UpDownCounter; @@ -39,6 +40,8 @@ export class ProvingBrokerInstrumentation { this.rejectedJobs = createUpDownCounterWithDefault(meter, Metrics.PROVING_QUEUE_REJECTED_JOBS, provingJobAttrs); + this.abortedJobs = createUpDownCounterWithDefault(meter, Metrics.PROVING_QUEUE_ABORTED_JOBS, provingJobAttrs); + this.retriedJobs = createUpDownCounterWithDefault(meter, Metrics.PROVING_QUEUE_RETRIED_JOBS, provingJobAttrs); this.timedOutJobs = createUpDownCounterWithDefault(meter, Metrics.PROVING_QUEUE_TIMED_OUT_JOBS, provingJobAttrs); @@ -72,6 +75,12 @@ export class ProvingBrokerInstrumentation { }); } + incAbortedJobs(proofType: ProvingRequestType) { + this.abortedJobs.add(1, { + [Attributes.PROVING_JOB_TYPE]: ProvingRequestType[proofType], + }); + } + incRetriedJobs(proofType: ProvingRequestType) { this.retriedJobs.add(1, { [Attributes.PROVING_JOB_TYPE]: ProvingRequestType[proofType], diff --git a/yarn-project/scripts/replay_failed_l1_tx.mjs b/yarn-project/scripts/replay_failed_l1_tx.mjs new file mode 100644 index 000000000000..54c21032cfb8 --- /dev/null +++ b/yarn-project/scripts/replay_failed_l1_tx.mjs @@ -0,0 +1,145 @@ +#!/usr/bin/env node +/** + * This script fetches a failed L1 transaction from the L1TxFailedStore and re-simulates it. + * Supports GCS, S3, R2, and local file paths. + * Usage: node scripts/replay_failed_l1_tx.mjs gs://bucket/path/simulation/0xabc123.json [--rpc-url URL] + */ +import { createLogger } from '@aztec/foundation/log'; +import { ErrorsAbi, RollupAbi } from '@aztec/l1-artifacts'; +import { createReadOnlyFileStore } from '@aztec/stdlib/file-store'; + +import { createPublicClient, decodeErrorResult, http } from 'viem'; + +const logger = createLogger('script:replay_failed_l1_tx'); + +// Parse arguments +const args = process.argv.slice(2); +let urlStr; +let rpcUrl; + +for (let i = 0; i < args.length; i++) { + if (args[i] === '--rpc-url' && args[i + 1]) { + rpcUrl = args[++i]; + } else if (!urlStr) { + urlStr = args[i]; + } +} + +if (!urlStr) { + logger.error('Usage: node scripts/replay_failed_l1_tx.mjs [--rpc-url URL]'); + logger.error('Supported URIs: gs://bucket/path, s3://bucket/path, file:///path, https://host/path'); + logger.error('Example: node scripts/replay_failed_l1_tx.mjs gs://my-bucket/failed-l1-txs/simulation/0xabc.json'); + process.exit(1); +} + +// Fetch the failed tx data from the store +const store = await createReadOnlyFileStore(urlStr, logger); +if (!store) { + logger.error(`Failed to create file store from: ${urlStr}`); + process.exit(1); +} + +logger.info(`Fetching failed L1 tx from: ${urlStr}`); + +let failedTx; +try { + const data = await store.read(urlStr); + failedTx = JSON.parse(data.toString()); +} catch (err) { + logger.error(`Failed to fetch tx from store: ${err.message}`); + process.exit(1); +} + +logger.info(`Loaded failed L1 tx:`, { + id: failedTx.id, + failureType: failedTx.failureType, + actions: failedTx.context.actions, + timestamp: new Date(failedTx.timestamp).toISOString(), + error: failedTx.error.message, +}); + +// Create a client to re-simulate +const defaultRpcUrl = process.env.ETHEREUM_HOST || 'http://localhost:8545'; +const effectiveRpcUrl = rpcUrl || defaultRpcUrl; +logger.info(`Using RPC URL: ${effectiveRpcUrl}`); + +// Try to detect chain from RPC +const client = createPublicClient({ + transport: http(effectiveRpcUrl), +}); + +const chainId = await client.getChainId(); +logger.info(`Connected to chain ID: ${chainId}`); + +// Re-simulate the transaction +logger.info(`Re-simulating transaction...`); +logger.info(`To: ${failedTx.request.to}`); +logger.info(`Data: ${failedTx.request.data.slice(0, 66)}... (${failedTx.request.data.length} chars)`); +if (failedTx.l1BlockNumber) { + logger.info(`L1 block number: ${failedTx.l1BlockNumber}`); +} + +const blockOverrides = failedTx.l1BlockNumber ? { blockOverrides: { number: BigInt(failedTx.l1BlockNumber) } } : {}; + +try { + // Try using eth_simulateV1 via simulateBlocks + const result = await client.simulateBlocks({ + validation: false, + blocks: [ + { + ...blockOverrides, + calls: [ + { + to: failedTx.request.to, + data: failedTx.request.data, + }, + ], + }, + ], + }); + + if (result[0].calls[0].status === 'failure') { + logger.error(`Simulation failed as expected:`); + const errorData = result[0].calls[0].data; + logger.error(`Raw error data: ${errorData}`); + + // Try to decode the error + const abis = [...ErrorsAbi, ...RollupAbi]; + try { + const decoded = decodeErrorResult({ abi: abis, data: errorData }); + logger.error(`Decoded error: ${decoded.errorName}(${decoded.args?.join(', ')})`); + } catch (decodeErr) { + logger.warn(`Could not decode error: ${decodeErr.message}`); + } + } else { + logger.info(`Simulation succeeded! The issue may have been resolved.`); + logger.info(`Gas used: ${result[0].gasUsed}`); + } +} catch (err) { + logger.error(`Simulation threw an error: ${err.message}`); + + // Try to extract more details + if (err.cause) { + logger.error(`Cause: ${err.cause.message || err.cause}`); + } +} + +// Print summary +logger.info(`\n--- Summary ---`); +logger.info(`Failed tx ID: ${failedTx.id}`); +logger.info(`Failure type: ${failedTx.failureType}`); +logger.info(`Actions: ${failedTx.context.actions.join(', ')}`); +if (failedTx.context.checkpointNumber) { + logger.info(`Checkpoint: ${failedTx.context.checkpointNumber}`); +} +if (failedTx.context.slot) { + logger.info(`Slot: ${failedTx.context.slot}`); +} +logger.info(`Original error: ${failedTx.error.message}`); +if (failedTx.error.name) { + logger.info(`Error name: ${failedTx.error.name}`); +} +if (failedTx.receipt) { + logger.info(`Receipt tx hash: ${failedTx.receipt.transactionHash}`); + logger.info(`Receipt block: ${failedTx.receipt.blockNumber}`); +} diff --git a/yarn-project/scripts/run_test.sh b/yarn-project/scripts/run_test.sh index 437395d26c10..e71c3a2d1bfc 100755 --- a/yarn-project/scripts/run_test.sh +++ b/yarn-project/scripts/run_test.sh @@ -14,6 +14,7 @@ cd ../$dir export RAYON_NUM_THREADS=1 export TOKIO_WORKER_THREADS=1 +export UV_THREADPOOL_SIZE=${UV_THREADPOOL_SIZE:-8} export HARDWARE_CONCURRENCY=${CPUS:-16} export LOG_LEVEL=${LOG_LEVEL:-info} exec node --no-warnings --experimental-vm-modules --loader @swc-node/register \ diff --git a/yarn-project/sequencer-client/src/publisher/config.ts b/yarn-project/sequencer-client/src/publisher/config.ts index 50d4cd8ff16b..ba250c31f0d1 100644 --- a/yarn-project/sequencer-client/src/publisher/config.ts +++ b/yarn-project/sequencer-client/src/publisher/config.ts @@ -48,6 +48,8 @@ export type PublisherConfig = L1TxUtilsConfig & fishermanMode?: boolean; /** Address of the forwarder contract to wrap all L1 transactions through (for testing purposes only) */ publisherForwarderAddress?: EthAddress; + /** Store for failed L1 transaction inputs (test networks only). Format: gs://bucket/path */ + l1TxFailedStore?: string; }; export type ProverPublisherConfig = L1TxUtilsConfig & @@ -62,6 +64,8 @@ export type SequencerPublisherConfig = L1TxUtilsConfig & fishermanMode?: boolean; sequencerPublisherAllowInvalidStates?: boolean; sequencerPublisherForwarderAddress?: EthAddress; + /** Store for failed L1 transaction inputs (test networks only). Format: gs://bucket/path */ + l1TxFailedStore?: string; }; export function getPublisherConfigFromProverConfig(config: ProverPublisherConfig): PublisherConfig { @@ -77,6 +81,7 @@ export function getPublisherConfigFromSequencerConfig(config: SequencerPublisher ...config, publisherAllowInvalidStates: config.sequencerPublisherAllowInvalidStates, publisherForwarderAddress: config.sequencerPublisherForwarderAddress, + l1TxFailedStore: config.l1TxFailedStore, }; } @@ -133,6 +138,10 @@ export const sequencerPublisherConfigMappings: ConfigMappingsType (val ? EthAddress.fromString(val) : undefined), }, + l1TxFailedStore: { + env: 'L1_TX_FAILED_STORE', + description: 'Store for failed L1 transaction inputs (test networks only). Format: gs://bucket/path', + }, }; export const proverPublisherConfigMappings: ConfigMappingsType = { diff --git a/yarn-project/sequencer-client/src/publisher/index.ts b/yarn-project/sequencer-client/src/publisher/index.ts index 08f9a6e5ca08..42e67e527a0e 100644 --- a/yarn-project/sequencer-client/src/publisher/index.ts +++ b/yarn-project/sequencer-client/src/publisher/index.ts @@ -3,3 +3,6 @@ export { SequencerPublisherFactory } from './sequencer-publisher-factory.js'; // Used for tests export { SequencerPublisherMetrics } from './sequencer-publisher-metrics.js'; + +// Failed L1 tx store (optional, for test networks) +export { type FailedL1Tx, type FailedL1TxUri, type L1TxFailedStore } from './l1_tx_failed_store/index.js'; diff --git a/yarn-project/sequencer-client/src/publisher/l1_tx_failed_store/factory.ts b/yarn-project/sequencer-client/src/publisher/l1_tx_failed_store/factory.ts new file mode 100644 index 000000000000..83c179c32a6c --- /dev/null +++ b/yarn-project/sequencer-client/src/publisher/l1_tx_failed_store/factory.ts @@ -0,0 +1,32 @@ +import { type Logger, createLogger } from '@aztec/foundation/log'; +import { createFileStore } from '@aztec/stdlib/file-store'; + +import type { L1TxFailedStore } from './failed_tx_store.js'; +import { FileStoreL1TxFailedStore } from './file_store_failed_tx_store.js'; + +/** + * Creates an L1TxFailedStore from a config string. + * Supports any backend that FileStore supports (GCS, S3, R2, local filesystem). + * @param config - Config string (e.g., 'gs://bucket/path', 's3://bucket/path', 'file:///path'). If undefined, returns undefined. + * @param logger - Optional logger. + * @returns The store instance, or undefined if config is not provided. + */ +export async function createL1TxFailedStore( + config: string | undefined, + logger: Logger = createLogger('sequencer:l1-tx-failed-store'), +): Promise { + if (!config) { + return undefined; + } + + const fileStore = await createFileStore(config, logger); + if (!fileStore) { + throw new Error( + `Failed to create file store from config: '${config}'. ` + + `Supported formats: 'gs://bucket/path', 's3://bucket/path', 'file:///path'.`, + ); + } + + logger.info(`Created L1 tx failed store`, { config }); + return new FileStoreL1TxFailedStore(fileStore, logger); +} diff --git a/yarn-project/sequencer-client/src/publisher/l1_tx_failed_store/failed_tx_store.ts b/yarn-project/sequencer-client/src/publisher/l1_tx_failed_store/failed_tx_store.ts new file mode 100644 index 000000000000..d6f7d5f49f2c --- /dev/null +++ b/yarn-project/sequencer-client/src/publisher/l1_tx_failed_store/failed_tx_store.ts @@ -0,0 +1,55 @@ +import type { Hex } from 'viem'; + +/** URI pointing to a stored failed L1 transaction. */ +export type FailedL1TxUri = string & { __brand: 'FailedL1TxUri' }; + +/** A failed L1 transaction captured for debugging and replay. */ +export type FailedL1Tx = { + /** Tx hash (for reverts) or keccak256(request.data) (for simulation/send failures). */ + id: Hex; + /** Unix timestamp (ms) when failure occurred. */ + timestamp: number; + /** Whether the failure was during simulation or after sending. */ + failureType: 'simulation' | 'revert' | 'send-error'; + /** The actual L1 transaction for replay (multicall-encoded for bundled txs). */ + request: { + to: Hex; + data: Hex; + value?: string; // bigint as string + }; + /** Raw blob data as hex for replay. */ + blobData?: Hex[]; + /** L1 block number at time of failure (simulation target or receipt block). */ + l1BlockNumber: string; // bigint as string + /** Receipt info (present only for on-chain reverts). */ + receipt?: { + transactionHash: Hex; + blockNumber: string; // bigint as string + gasUsed: string; // bigint as string + status: 'reverted'; + }; + /** Error information. */ + error: { + message: string; + /** Decoded error name (e.g., 'Rollup__InvalidProposer'). */ + name?: string; + }; + /** Context metadata. */ + context: { + /** Actions involved (e.g., ['propose', 'governance-signal']). */ + actions: string[]; + /** Individual request data for each action (metadata, not used for replay). */ + requests?: Array<{ action: string; to: Hex; data: Hex }>; + checkpointNumber?: number; + slot?: number; + sender: Hex; + }; +}; + +/** Store for failed L1 transactions for debugging purposes. */ +export interface L1TxFailedStore { + /** Saves a failed transaction and returns its URI. */ + saveFailedTx(tx: FailedL1Tx): Promise; + /** Retrieves a failed transaction by its URI. */ + getFailedTx(uri: FailedL1TxUri): Promise; +} diff --git a/yarn-project/sequencer-client/src/publisher/l1_tx_failed_store/file_store_failed_tx_store.ts b/yarn-project/sequencer-client/src/publisher/l1_tx_failed_store/file_store_failed_tx_store.ts new file mode 100644 index 000000000000..3c74cf23a8a7 --- /dev/null +++ b/yarn-project/sequencer-client/src/publisher/l1_tx_failed_store/file_store_failed_tx_store.ts @@ -0,0 +1,46 @@ +import { type Logger, createLogger } from '@aztec/foundation/log'; +import type { FileStore } from '@aztec/stdlib/file-store'; + +import type { FailedL1Tx, FailedL1TxUri, L1TxFailedStore } from './failed_tx_store.js'; + +/** + * L1TxFailedStore implementation using the FileStore abstraction. + * Supports any backend that FileStore supports (GCS, S3, R2, local filesystem). + */ +export class FileStoreL1TxFailedStore implements L1TxFailedStore { + private readonly log: Logger; + + constructor( + private readonly fileStore: FileStore, + logger?: Logger, + ) { + this.log = logger ?? createLogger('sequencer:l1-tx-failed-store'); + } + + public async saveFailedTx(tx: FailedL1Tx): Promise { + const prefix = tx.receipt ? 'tx' : 'data'; + const path = `${tx.failureType}/${prefix}-${tx.id}.json`; + const json = JSON.stringify(tx, null, 2); + + const uri = await this.fileStore.save(path, Buffer.from(json), { + metadata: { + 'content-type': 'application/json', + actions: tx.context.actions.join(','), + 'failure-type': tx.failureType, + }, + }); + + this.log.info(`Saved failed L1 tx to ${uri}`, { + id: tx.id, + failureType: tx.failureType, + actions: tx.context.actions.join(','), + }); + + return uri as FailedL1TxUri; + } + + public async getFailedTx(uri: FailedL1TxUri): Promise { + const data = await this.fileStore.read(uri); + return JSON.parse(data.toString()) as FailedL1Tx; + } +} diff --git a/yarn-project/sequencer-client/src/publisher/l1_tx_failed_store/index.ts b/yarn-project/sequencer-client/src/publisher/l1_tx_failed_store/index.ts new file mode 100644 index 000000000000..930e34977b8e --- /dev/null +++ b/yarn-project/sequencer-client/src/publisher/l1_tx_failed_store/index.ts @@ -0,0 +1,3 @@ +export { type FailedL1Tx, type FailedL1TxUri, type L1TxFailedStore } from './failed_tx_store.js'; +export { createL1TxFailedStore } from './factory.js'; +export { FileStoreL1TxFailedStore } from './file_store_failed_tx_store.js'; diff --git a/yarn-project/sequencer-client/src/publisher/sequencer-publisher.ts b/yarn-project/sequencer-client/src/publisher/sequencer-publisher.ts index bbc0335d29c8..c147e81facda 100644 --- a/yarn-project/sequencer-client/src/publisher/sequencer-publisher.ts +++ b/yarn-project/sequencer-client/src/publisher/sequencer-publisher.ts @@ -45,9 +45,19 @@ import type { CheckpointHeader } from '@aztec/stdlib/rollup'; import type { L1PublishCheckpointStats } from '@aztec/stdlib/stats'; import { type TelemetryClient, type Tracer, getTelemetryClient, trackSpan } from '@aztec/telemetry-client'; -import { type StateOverride, type TransactionReceipt, type TypedDataDefinition, encodeFunctionData, toHex } from 'viem'; +import { + type Hex, + type StateOverride, + type TransactionReceipt, + type TypedDataDefinition, + encodeFunctionData, + keccak256, + multicall3Abi, + toHex, +} from 'viem'; import type { SequencerPublisherConfig } from './config.js'; +import { type FailedL1Tx, type L1TxFailedStore, createL1TxFailedStore } from './l1_tx_failed_store/index.js'; import { SequencerPublisherMetrics } from './sequencer-publisher-metrics.js'; /** Arguments to the process method of the rollup contract */ @@ -109,6 +119,7 @@ export class SequencerPublisher { private interrupted = false; private metrics: SequencerPublisherMetrics; public epochCache: EpochCache; + private failedTxStore?: Promise; protected governanceLog = createLogger('sequencer:publisher:governance'); protected slashingLog = createLogger('sequencer:publisher:slashing'); @@ -149,7 +160,7 @@ export class SequencerPublisher { protected requests: RequestWithExpiry[] = []; constructor( - private config: Pick & + private config: Pick & Pick & { l1ChainId: number }, deps: { telemetry?: TelemetryClient; @@ -205,6 +216,31 @@ export class SequencerPublisher { this.rollupContract, createLogger('sequencer:publisher:price-oracle'), ); + + // Initialize failed L1 tx store (optional, for test networks) + this.failedTxStore = createL1TxFailedStore(config.l1TxFailedStore, this.log); + } + + /** + * Backs up a failed L1 transaction to the configured store for debugging. + * Does nothing if no store is configured. + */ + private backupFailedTx(failedTx: Omit): void { + if (!this.failedTxStore) { + return; + } + + const tx: FailedL1Tx = { + ...failedTx, + timestamp: Date.now(), + }; + + // Fire and forget - don't block on backup + void this.failedTxStore + .then(store => store?.saveFailedTx(tx)) + .catch(err => { + this.log.warn(`Failed to backup failed L1 tx to store`, err); + }); } public getRollupContract(): RollupContract { @@ -386,6 +422,21 @@ export class SequencerPublisher { validRequests.sort((a, b) => compareActions(a.action, b.action)); try { + // Capture context for failed tx backup before sending + const l1BlockNumber = await this.l1TxUtils.getBlockNumber(); + const multicallData = encodeFunctionData({ + abi: multicall3Abi, + functionName: 'aggregate3', + args: [ + validRequests.map(r => ({ + target: r.request.to!, + callData: r.request.data!, + allowFailure: true, + })), + ], + }); + const blobDataHex = blobConfig?.blobs?.map(b => toHex(b)) as Hex[] | undefined; + this.log.debug('Forwarding transactions', { validRequests: validRequests.map(request => request.action), txConfig, @@ -398,7 +449,12 @@ export class SequencerPublisher { this.rollupContract.address, this.log, ); - const { successfulActions = [], failedActions = [] } = this.callbackBundledTransactions(validRequests, result); + const txContext = { multicallData, blobData: blobDataHex, l1BlockNumber }; + const { successfulActions = [], failedActions = [] } = this.callbackBundledTransactions( + validRequests, + result, + txContext, + ); return { result, expiredActions, sentActions: validActions, successfulActions, failedActions }; } catch (err) { const viemError = formatViemError(err); @@ -418,11 +474,25 @@ export class SequencerPublisher { private callbackBundledTransactions( requests: RequestWithExpiry[], - result?: { receipt: TransactionReceipt } | FormattedViemError, + result: { receipt: TransactionReceipt; errorMsg?: string } | FormattedViemError | undefined, + txContext: { multicallData: Hex; blobData?: Hex[]; l1BlockNumber: bigint }, ) { const actionsListStr = requests.map(r => r.action).join(', '); if (result instanceof FormattedViemError) { this.log.error(`Failed to publish bundled transactions (${actionsListStr})`, result); + this.backupFailedTx({ + id: keccak256(txContext.multicallData), + failureType: 'send-error', + request: { to: MULTI_CALL_3_ADDRESS, data: txContext.multicallData }, + blobData: txContext.blobData, + l1BlockNumber: txContext.l1BlockNumber.toString(), + error: { message: result.message, name: result.name }, + context: { + actions: requests.map(r => r.action), + requests: requests.map(r => ({ action: r.action, to: r.request.to! as Hex, data: r.request.data! })), + sender: this.getSenderAddress().toString(), + }, + }); return { failedActions: requests.map(r => r.action) }; } else { this.log.verbose(`Published bundled transactions (${actionsListStr})`, { result, requests }); @@ -435,6 +505,30 @@ export class SequencerPublisher { failedActions.push(request.action); } } + // Single backup for the whole reverted tx + if (failedActions.length > 0 && result?.receipt?.status === 'reverted') { + this.backupFailedTx({ + id: result.receipt.transactionHash, + failureType: 'revert', + request: { to: MULTI_CALL_3_ADDRESS, data: txContext.multicallData }, + blobData: txContext.blobData, + l1BlockNumber: result.receipt.blockNumber.toString(), + receipt: { + transactionHash: result.receipt.transactionHash, + blockNumber: result.receipt.blockNumber.toString(), + gasUsed: (result.receipt.gasUsed ?? 0n).toString(), + status: 'reverted', + }, + error: { message: result.errorMsg ?? 'Transaction reverted' }, + context: { + actions: failedActions, + requests: requests + .filter(r => failedActions.includes(r.action)) + .map(r => ({ action: r.action, to: r.request.to! as Hex, data: r.request.data! })), + sender: this.getSenderAddress().toString(), + }, + }); + } return { successfulActions, failedActions }; } } @@ -546,6 +640,8 @@ export class SequencerPublisher { const request = this.buildInvalidateCheckpointRequest(validationResult); this.log.debug(`Simulating invalidate checkpoint ${checkpointNumber}`, { ...logData, request }); + const l1BlockNumber = await this.l1TxUtils.getBlockNumber(); + try { const { gasUsed } = await this.l1TxUtils.simulate( request, @@ -597,6 +693,18 @@ export class SequencerPublisher { // Otherwise, throw. We cannot build the next checkpoint if we cannot invalidate the previous one. this.log.error(`Simulation for invalidate checkpoint ${checkpointNumber} failed`, viemError, logData); + this.backupFailedTx({ + id: keccak256(request.data!), + failureType: 'simulation', + request: { to: request.to!, data: request.data!, value: request.value?.toString() }, + l1BlockNumber: l1BlockNumber.toString(), + error: { message: viemError.message, name: viemError.name }, + context: { + actions: [`invalidate-${reason}`], + checkpointNumber, + sender: this.getSenderAddress().toString(), + }, + }); throw new Error(`Failed to simulate invalidate checkpoint ${checkpointNumber}`, { cause: viemError }); } } @@ -744,11 +852,26 @@ export class SequencerPublisher { lastValidL2Slot: slotNumber, }); + const l1BlockNumber = await this.l1TxUtils.getBlockNumber(); + try { await this.l1TxUtils.simulate(request, { time: timestamp }, [], mergeAbis([request.abi ?? [], ErrorsAbi])); this.log.debug(`Simulation for ${action} at slot ${slotNumber} succeeded`, { request }); } catch (err) { - this.log.error(`Failed simulation for ${action} at slot ${slotNumber} (enqueuing the action anyway)`, err); + const viemError = formatViemError(err); + this.log.error(`Failed simulation for ${action} at slot ${slotNumber} (enqueuing the action anyway)`, viemError); + this.backupFailedTx({ + id: keccak256(request.data!), + failureType: 'simulation', + request: { to: request.to!, data: request.data!, value: request.value?.toString() }, + l1BlockNumber: l1BlockNumber.toString(), + error: { message: viemError.message, name: viemError.name }, + context: { + actions: [action], + slot: slotNumber, + sender: this.getSenderAddress().toString(), + }, + }); // Yes, we enqueue the request anyway, in case there was a bug with the simulation itself } @@ -1044,6 +1167,8 @@ export class SequencerPublisher { this.log.debug(`Simulating ${action} for slot ${slotNumber}`, logData); + const l1BlockNumber = await this.l1TxUtils.getBlockNumber(); + let gasUsed: bigint; const simulateAbi = mergeAbis([request.abi ?? [], ErrorsAbi]); try { @@ -1053,6 +1178,19 @@ export class SequencerPublisher { const viemError = formatViemError(err, simulateAbi); this.log.error(`Simulation for ${action} at ${slotNumber} failed`, viemError, logData); + this.backupFailedTx({ + id: keccak256(request.data!), + failureType: 'simulation', + request: { to: request.to!, data: request.data!, value: request.value?.toString() }, + l1BlockNumber: l1BlockNumber.toString(), + error: { message: viemError.message, name: viemError.name }, + context: { + actions: [action], + slot: slotNumber, + sender: this.getSenderAddress().toString(), + }, + }); + return false; } @@ -1136,9 +1274,27 @@ export class SequencerPublisher { kzg, }, ) - .catch(err => { - const { message, metaMessages } = formatViemError(err); - this.log.error(`Failed to validate blobs`, message, { metaMessages }); + .catch(async err => { + const viemError = formatViemError(err); + this.log.error(`Failed to validate blobs`, viemError.message, { metaMessages: viemError.metaMessages }); + const validateBlobsData = encodeFunctionData({ + abi: RollupAbi, + functionName: 'validateBlobs', + args: [blobInput], + }); + const l1BlockNumber = await this.l1TxUtils.getBlockNumber(); + this.backupFailedTx({ + id: keccak256(validateBlobsData), + failureType: 'simulation', + request: { to: this.rollupContract.address as Hex, data: validateBlobsData }, + blobData: encodedData.blobs.map(b => toHex(b.data)) as Hex[], + l1BlockNumber: l1BlockNumber.toString(), + error: { message: viemError.message, name: viemError.name }, + context: { + actions: ['validate-blobs'], + sender: this.getSenderAddress().toString(), + }, + }); throw new Error('Failed to validate blobs'); }); } @@ -1217,6 +1373,8 @@ export class SequencerPublisher { }); } + const l1BlockNumber = await this.l1TxUtils.getBlockNumber(); + const simulationResult = await this.l1TxUtils .simulate( { @@ -1250,6 +1408,18 @@ export class SequencerPublisher { }; } this.log.error(`Failed to simulate propose tx`, viemError); + this.backupFailedTx({ + id: keccak256(rollupData), + failureType: 'simulation', + request: { to: this.rollupContract.address, data: rollupData }, + l1BlockNumber: l1BlockNumber.toString(), + error: { message: viemError.message, name: viemError.name }, + context: { + actions: ['propose'], + slot: Number(args[0].header.slotNumber), + sender: this.getSenderAddress().toString(), + }, + }); throw err; }); diff --git a/yarn-project/simulator/src/public/public_tx_simulator/cpp_public_tx_simulator.ts b/yarn-project/simulator/src/public/public_tx_simulator/cpp_public_tx_simulator.ts index 41b7ca70dd14..2bb70df15f0e 100644 --- a/yarn-project/simulator/src/public/public_tx_simulator/cpp_public_tx_simulator.ts +++ b/yarn-project/simulator/src/public/public_tx_simulator/cpp_public_tx_simulator.ts @@ -1,4 +1,4 @@ -import { type Logger, type LoggerBindings, createLogger, logLevel } from '@aztec/foundation/log'; +import { type Logger, type LoggerBindings, createLogger } from '@aztec/foundation/log'; import { sleep } from '@aztec/foundation/sleep'; import { type CancellationToken, avmSimulate, cancelSimulation, createCancellationToken } from '@aztec/native'; import { ProtocolContractsList } from '@aztec/protocol-contracts'; @@ -100,8 +100,7 @@ export class CppPublicTxSimulator extends PublicTxSimulator implements PublicTxS inputBuffer, contractProvider, wsCppHandle, - logLevel, - // TODO: re-enable logging + this.log.level, undefined, this.cancellationToken, ); diff --git a/yarn-project/simulator/src/public/public_tx_simulator/cpp_public_tx_simulator_with_hinted_dbs.ts b/yarn-project/simulator/src/public/public_tx_simulator/cpp_public_tx_simulator_with_hinted_dbs.ts index f08f753edd47..7b979ffde784 100644 --- a/yarn-project/simulator/src/public/public_tx_simulator/cpp_public_tx_simulator_with_hinted_dbs.ts +++ b/yarn-project/simulator/src/public/public_tx_simulator/cpp_public_tx_simulator_with_hinted_dbs.ts @@ -1,4 +1,4 @@ -import { type Logger, type LoggerBindings, createLogger, logLevel } from '@aztec/foundation/log'; +import { type Logger, type LoggerBindings, createLogger } from '@aztec/foundation/log'; import { avmSimulateWithHintedDbs } from '@aztec/native'; import { AvmCircuitInputs, @@ -75,7 +75,7 @@ export class CppPublicTxSimulatorHintedDbs extends PublicTxSimulator implements let resultBuffer: Buffer; try { - resultBuffer = await avmSimulateWithHintedDbs(inputBuffer, logLevel); + resultBuffer = await avmSimulateWithHintedDbs(inputBuffer, this.log.level); } catch (error: any) { throw new SimulationError(`C++ hinted simulation failed: ${error.message}`, []); } diff --git a/yarn-project/telemetry-client/src/metrics.ts b/yarn-project/telemetry-client/src/metrics.ts index c195ce888502..d30a4185e15b 100644 --- a/yarn-project/telemetry-client/src/metrics.ts +++ b/yarn-project/telemetry-client/src/metrics.ts @@ -1048,6 +1048,11 @@ export const PROVING_QUEUE_REJECTED_JOBS: MetricDefinition = { description: 'Number of rejected proving jobs', valueType: ValueType.INT, }; +export const PROVING_QUEUE_ABORTED_JOBS: MetricDefinition = { + name: 'aztec.proving_queue.aborted_jobs_count', + description: 'Number of aborted proving jobs', + valueType: ValueType.INT, +}; export const PROVING_QUEUE_RETRIED_JOBS: MetricDefinition = { name: 'aztec.proving_queue.retried_jobs_count', description: 'Number of retried proving jobs', diff --git a/yarn-project/txe/src/state_machine/dummy_p2p_client.ts b/yarn-project/txe/src/state_machine/dummy_p2p_client.ts index 1cb90ade171f..4958f8773169 100644 --- a/yarn-project/txe/src/state_machine/dummy_p2p_client.ts +++ b/yarn-project/txe/src/state_machine/dummy_p2p_client.ts @@ -21,7 +21,7 @@ import type { BlockProposal, CheckpointAttestation, CheckpointProposal, TopicTyp import type { BlockHeader, Tx, TxHash } from '@aztec/stdlib/tx'; export class DummyP2P implements P2P { - public validate(_txs: Tx[]): Promise { + public validateTxsReceivedInBlockProposal(_txs: Tx[]): Promise { return Promise.resolve(); } diff --git a/yarn-project/validator-client/src/checkpoint_builder.test.ts b/yarn-project/validator-client/src/checkpoint_builder.test.ts index 38945d92aa4e..abf782d6b8ea 100644 --- a/yarn-project/validator-client/src/checkpoint_builder.test.ts +++ b/yarn-project/validator-client/src/checkpoint_builder.test.ts @@ -87,7 +87,7 @@ describe('CheckpointBuilder', () => { lightweightCheckpointBuilder.getBlockCount.mockReturnValue(0); const expectedBlock = await L2Block.random(blockNumber); - lightweightCheckpointBuilder.addBlock.mockResolvedValue(expectedBlock); + lightweightCheckpointBuilder.addBlock.mockResolvedValue({ block: expectedBlock, timings: {} }); processor.process.mockResolvedValue([ [{ hash: Fr.random(), gasUsed: { publicGas: Gas.empty() } } as unknown as ProcessedTx], @@ -110,7 +110,7 @@ describe('CheckpointBuilder', () => { lightweightCheckpointBuilder.getBlockCount.mockReturnValue(0); const expectedBlock = await L2Block.random(blockNumber, { txsPerBlock: 0 }); - lightweightCheckpointBuilder.addBlock.mockResolvedValue(expectedBlock); + lightweightCheckpointBuilder.addBlock.mockResolvedValue({ block: expectedBlock, timings: {} }); // No transactions processed processor.process.mockResolvedValue([ diff --git a/yarn-project/validator-client/src/checkpoint_builder.ts b/yarn-project/validator-client/src/checkpoint_builder.ts index 04ffeaf33fd8..c8ec5c5671fe 100644 --- a/yarn-project/validator-client/src/checkpoint_builder.ts +++ b/yarn-project/validator-client/src/checkpoint_builder.ts @@ -4,7 +4,7 @@ import { Fr } from '@aztec/foundation/curves/bn254'; import { type Logger, type LoggerBindings, createLogger } from '@aztec/foundation/log'; import { bufferToHex } from '@aztec/foundation/string'; import { DateProvider, elapsed } from '@aztec/foundation/timer'; -import { getDefaultAllowedSetupFunctions } from '@aztec/p2p/msg_validators'; +import { createTxValidatorForBlockBuilding, getDefaultAllowedSetupFunctions } from '@aztec/p2p/msg_validators'; import { LightweightCheckpointBuilder } from '@aztec/prover-client/light'; import { GuardedMerkleTreeOperations, @@ -33,8 +33,6 @@ import { MerkleTreeId } from '@aztec/stdlib/trees'; import { type CheckpointGlobalVariables, GlobalVariables, StateReference, Tx } from '@aztec/stdlib/tx'; import { type TelemetryClient, getTelemetryClient } from '@aztec/telemetry-client'; -import { createValidatorForBlockBuilding } from './tx_validator/tx_validator_factory.js'; - // Re-export for backward compatibility export type { BuildBlockInCheckpointResult } from '@aztec/stdlib/interfaces/server'; @@ -107,7 +105,7 @@ export class CheckpointBuilder implements ICheckpointBlockBuilder { } // Add block to checkpoint - const block = await this.checkpointBuilder.addBlock(globalVariables, processedTxs, { + const { block } = await this.checkpointBuilder.addBlock(globalVariables, processedTxs, { expectedEndState: opts.expectedEndState, }); @@ -178,7 +176,7 @@ export class CheckpointBuilder implements ICheckpointBlockBuilder { this.debugLogStore, ); - const validator = createValidatorForBlockBuilding( + const validator = createTxValidatorForBlockBuilding( fork, this.contractDataSource, globalVariables, diff --git a/yarn-project/validator-client/src/index.ts b/yarn-project/validator-client/src/index.ts index 32621c149525..e1bb317f9f81 100644 --- a/yarn-project/validator-client/src/index.ts +++ b/yarn-project/validator-client/src/index.ts @@ -4,4 +4,3 @@ export * from './config.js'; export * from './factory.js'; export * from './validator.js'; export * from './key_store/index.js'; -export * from './tx_validator/index.js'; diff --git a/yarn-project/validator-client/src/tx_validator/index.ts b/yarn-project/validator-client/src/tx_validator/index.ts deleted file mode 100644 index 5d460503cdbc..000000000000 --- a/yarn-project/validator-client/src/tx_validator/index.ts +++ /dev/null @@ -1,2 +0,0 @@ -export * from './nullifier_cache.js'; -export * from './tx_validator_factory.js'; diff --git a/yarn-project/validator-client/src/tx_validator/tx_validator_factory.ts b/yarn-project/validator-client/src/tx_validator/tx_validator_factory.ts deleted file mode 100644 index 48d0cf3d2a38..000000000000 --- a/yarn-project/validator-client/src/tx_validator/tx_validator_factory.ts +++ /dev/null @@ -1,154 +0,0 @@ -import { BlockNumber } from '@aztec/foundation/branded-types'; -import { Fr } from '@aztec/foundation/curves/bn254'; -import type { LoggerBindings } from '@aztec/foundation/log'; -import { getVKTreeRoot } from '@aztec/noir-protocol-circuits-types/vk-tree'; -import { - AggregateTxValidator, - ArchiveCache, - BlockHeaderTxValidator, - DataTxValidator, - DoubleSpendTxValidator, - GasTxValidator, - MetadataTxValidator, - PhasesTxValidator, - SizeTxValidator, - TimestampTxValidator, - TxPermittedValidator, - TxProofValidator, -} from '@aztec/p2p'; -import { ProtocolContractAddress, protocolContractsHash } from '@aztec/protocol-contracts'; -import type { ContractDataSource } from '@aztec/stdlib/contract'; -import type { GasFees } from '@aztec/stdlib/gas'; -import type { - AllowedElement, - ClientProtocolCircuitVerifier, - MerkleTreeReadOperations, - PublicProcessorValidator, -} from '@aztec/stdlib/interfaces/server'; -import { DatabasePublicStateSource, type PublicStateSource } from '@aztec/stdlib/trees'; -import { GlobalVariables, type Tx, type TxValidator } from '@aztec/stdlib/tx'; -import type { UInt64 } from '@aztec/stdlib/types'; - -import { NullifierCache } from './nullifier_cache.js'; - -export function createValidatorForAcceptingTxs( - db: MerkleTreeReadOperations, - contractDataSource: ContractDataSource, - verifier: ClientProtocolCircuitVerifier | undefined, - { - l1ChainId, - rollupVersion, - setupAllowList, - gasFees, - skipFeeEnforcement, - timestamp, - blockNumber, - txsPermitted, - }: { - l1ChainId: number; - rollupVersion: number; - setupAllowList: AllowedElement[]; - gasFees: GasFees; - skipFeeEnforcement?: boolean; - timestamp: UInt64; - blockNumber: BlockNumber; - txsPermitted: boolean; - }, - bindings?: LoggerBindings, -): TxValidator { - const validators: TxValidator[] = [ - new TxPermittedValidator(txsPermitted, bindings), - new SizeTxValidator(bindings), - new DataTxValidator(bindings), - new MetadataTxValidator( - { - l1ChainId: new Fr(l1ChainId), - rollupVersion: new Fr(rollupVersion), - protocolContractsHash, - vkTreeRoot: getVKTreeRoot(), - }, - bindings, - ), - new TimestampTxValidator( - { - timestamp, - blockNumber, - }, - bindings, - ), - new DoubleSpendTxValidator(new NullifierCache(db), bindings), - new PhasesTxValidator(contractDataSource, setupAllowList, timestamp, bindings), - new BlockHeaderTxValidator(new ArchiveCache(db), bindings), - ]; - - if (!skipFeeEnforcement) { - validators.push( - new GasTxValidator(new DatabasePublicStateSource(db), ProtocolContractAddress.FeeJuice, gasFees, bindings), - ); - } - - if (verifier) { - validators.push(new TxProofValidator(verifier, bindings)); - } - - return new AggregateTxValidator(...validators); -} - -export function createValidatorForBlockBuilding( - db: MerkleTreeReadOperations, - contractDataSource: ContractDataSource, - globalVariables: GlobalVariables, - setupAllowList: AllowedElement[], - bindings?: LoggerBindings, -): PublicProcessorValidator { - const nullifierCache = new NullifierCache(db); - const archiveCache = new ArchiveCache(db); - const publicStateSource = new DatabasePublicStateSource(db); - - return { - preprocessValidator: preprocessValidator( - nullifierCache, - archiveCache, - publicStateSource, - contractDataSource, - globalVariables, - setupAllowList, - bindings, - ), - nullifierCache, - }; -} - -function preprocessValidator( - nullifierCache: NullifierCache, - archiveCache: ArchiveCache, - publicStateSource: PublicStateSource, - contractDataSource: ContractDataSource, - globalVariables: GlobalVariables, - setupAllowList: AllowedElement[], - bindings?: LoggerBindings, -): TxValidator { - // We don't include the TxProofValidator nor the DataTxValidator here because they are already checked by the time we get to block building. - return new AggregateTxValidator( - new MetadataTxValidator( - { - l1ChainId: globalVariables.chainId, - rollupVersion: globalVariables.version, - protocolContractsHash, - vkTreeRoot: getVKTreeRoot(), - }, - bindings, - ), - new TimestampTxValidator( - { - timestamp: globalVariables.timestamp, - blockNumber: globalVariables.blockNumber, - }, - bindings, - ), - new DoubleSpendTxValidator(nullifierCache, bindings), - new PhasesTxValidator(contractDataSource, setupAllowList, globalVariables.timestamp, bindings), - new GasTxValidator(publicStateSource, ProtocolContractAddress.FeeJuice, globalVariables.gasFees, bindings), - new BlockHeaderTxValidator(archiveCache, bindings), - ); -} diff --git a/yarn-project/validator-client/src/validator.test.ts b/yarn-project/validator-client/src/validator.test.ts index 7485939da4fc..7d9c4b975288 100644 --- a/yarn-project/validator-client/src/validator.test.ts +++ b/yarn-project/validator-client/src/validator.test.ts @@ -327,7 +327,6 @@ describe('ValidatorClient', () => { p2pClient.getTxStatus.mockResolvedValue('pending'); p2pClient.hasTxsInPool.mockImplementation(txHashes => Promise.resolve(times(txHashes.length, () => true))); - p2pClient.getTxsByHash.mockImplementation((txHashes: TxHash[]) => Promise.resolve(txHashes.map(makeTxFromHash))); txProvider.getTxsForBlockProposal.mockImplementation((proposal: BlockProposal) => Promise.resolve({