diff --git a/.envrc b/.envrc index c3cc25a7c4..14fa9e90a1 100644 --- a/.envrc +++ b/.envrc @@ -1,16 +1,6 @@ NBS_ONLY_LOAD_ENV_VARS=1 source env.sh -if command -v nix > /dev/null -then - export NIMBUS_NIX_ENV=1 - - cd installer/nix - # watch_file tells direnv that changes to any of the watched files - # should trigger a re-evalution of the environment - watch_file flake.nix - watch_file flake.lock - watch_file shell.nix - - mkdir -p .flake-profiles - eval "$(nix print-dev-env --profile ".flake-profiles/profile")" +if ! has nix_direnv_version || ! nix_direnv_version 3.0.6; then + source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.0.6/direnvrc" "sha256-RYcUJaRMf8oF5LznDrlCXbkOQrywm0HDv1VjYGaJGdM=" fi +use flake diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5ad7a43709..db1abdc7c9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2020-2024 Status Research & Development GmbH +# Copyright (c) 2020-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -37,17 +37,17 @@ jobs: cpu: arm64 - os: windows cpu: amd64 - branch: [~, upstream/version-2-0] + branch: [~, upstream/version-2-2] exclude: - target: os: macos - branch: upstream/version-2-0 + branch: upstream/version-2-2 - target: os: windows - branch: upstream/version-2-0 + branch: upstream/version-2-2 include: - - branch: upstream/version-2-0 - branch-short: version-2-0 + - branch: upstream/version-2-2 + branch-short: version-2-2 nimflags-extra: --mm:refc - target: os: linux @@ -205,9 +205,14 @@ jobs: # The upload creates a combined report that gets posted as a comment on the PR # https://github.com/EnricoMi/publish-unit-test-result-action - name: Upload combined results - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: - name: Unit Test Results ${{ matrix.target.os }}-${{ matrix.target.cpu }} + # upload-artifact requires avoiding "/", because "To maintain file + # system agnostic behavior, these characters are intentionally not + # allowed to prevent potential problems with downloads on different + # file systems". However, GitHub Actions workflows do not support a + # usual assortment of string functions. + name: Unit Test Results ${{ matrix.target.os }}-${{ matrix.target.cpu }}-${{ matrix.branch == 'upstream/version-2-2' && 'version-2-2' || matrix.branch }} path: build/*.xml devbuild: @@ -253,8 +258,8 @@ jobs: - name: Check copyright year if: ${{ !cancelled() }} && github.event_name == 'pull_request' run: | - excluded_files="config.yaml" - excluded_extensions="ans|bin|cfg|json|json\\.template|md|png|service|ssz|txt|lock|nix" + excluded_files="config.yaml|config.nims|beacon_chain.nimble" + excluded_extensions="ans|bin|cfg|yml|json|json\\.template|md|png|service|ssz|tpl|txt|lock|nix|gitignore|envrc" current_year=$(date +"%Y") problematic_files=() @@ -327,7 +332,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Upload - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: Event File path: ${{ github.event_path }} diff --git a/.github/workflows/cron.yml b/.github/workflows/cron.yml deleted file mode 100644 index 397eb01a6f..0000000000 --- a/.github/workflows/cron.yml +++ /dev/null @@ -1,202 +0,0 @@ -# beacon_chain -# Copyright (c) 2021-2024 Status Research & Development GmbH -# Licensed and distributed under either of -# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). -# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). -# at your option. This file may not be copied, modified, or distributed except according to those terms. - -name: Daily -on: - schedule: - - cron: "10 20 * * *" - workflow_dispatch: - #pull_request: - -jobs: - build: - strategy: - fail-fast: false - matrix: - target: - - os: linux - cpu: amd64 - - os: linux - cpu: i386 - - os: macos - cpu: amd64 - - os: windows - cpu: amd64 - branch: [upstream/version-2-0, upstream/devel] - include: - - branch: upstream/version-2-0 - branch-short: version-2-0 - - branch: upstream/devel - branch-short: devel - nimflags-extra: --mm:refc - - target: - os: linux - builder: ubuntu-20.04 - shell: bash - - target: - os: macos - builder: macos-13 - shell: bash - - target: - os: windows - builder: windows-2019 - shell: msys2 {0} - - defaults: - run: - shell: ${{ matrix.shell }} - - name: '${{ matrix.target.os }}-${{ matrix.target.cpu }} (Nim ${{ matrix.branch-short }})' - runs-on: ${{ matrix.builder }} - continue-on-error: ${{ matrix.branch-short == 'devel' }} - steps: - - name: Checkout - if: ${{ github.event_name != 'pull_request' }} - uses: actions/checkout@v4 - with: - ref: unstable - - - name: Checkout (pull request) - if: ${{ github.event_name == 'pull_request' }} - uses: actions/checkout@v4 - - - name: Install build dependencies (Linux i386) - if: runner.os == 'Linux' && matrix.target.cpu == 'i386' - run: | - sudo dpkg --add-architecture i386 - sudo apt-get update -qq - sudo DEBIAN_FRONTEND='noninteractive' apt-get install \ - --no-install-recommends -yq gcc-multilib g++-multilib - mkdir -p external/bin - cat << EOF > external/bin/gcc - #!/bin/bash - exec $(which gcc) -m32 -mno-adx "\$@" - EOF - cat << EOF > external/bin/g++ - #!/bin/bash - exec $(which g++) -m32 -mno-adx "\$@" - EOF - chmod 755 external/bin/gcc external/bin/g++ - echo "${{ github.workspace }}/external/bin" >> $GITHUB_PATH - - - name: MSYS2 (Windows amd64) - if: runner.os == 'Windows' && matrix.target.cpu == 'amd64' - uses: msys2/setup-msys2@v2 - with: - path-type: inherit - install: >- - base-devel - git - mingw-w64-x86_64-toolchain - mingw-w64-x86_64-cmake - - - name: Restore Nim DLLs dependencies (Windows) from cache - if: runner.os == 'Windows' - id: windows-dlls-cache - uses: actions/cache@v4 - with: - path: external/dlls - key: 'dlls-${{ matrix.target.cpu }}' - - - name: Install DLLs dependencies (Windows) - if: > - steps.windows-dlls-cache.outputs.cache-hit != 'true' && - runner.os == 'Windows' - run: | - mkdir -p external - curl -L "https://nim-lang.org/download/windeps.zip" -o external/windeps.zip - 7z x -y external/windeps.zip -oexternal/dlls - - - name: Path to cached dependencies (Windows) - if: > - runner.os == 'Windows' - run: | - echo "${{ github.workspace }}/external/dlls" >> $GITHUB_PATH - # for miniupnp that runs "wingenminiupnpcstrings.exe" from the current dir - echo "." >> $GITHUB_PATH - - - name: Install build dependencies (macOS) - if: runner.os == 'macOS' - run: | - brew install gnu-getopt - brew link --force gnu-getopt - - - name: Derive environment variables - run: | - if [[ '${{ matrix.target.cpu }}' == 'amd64' ]]; then - PLATFORM=x64 - else - PLATFORM=x86 - fi - echo "PLATFORM=${PLATFORM}" >> $GITHUB_ENV - - # Stack usage test on recent enough gcc: - if [[ '${{ runner.os }}' == 'Linux' && '${{ matrix.target.cpu }}' == 'amd64' ]]; then - export NIMFLAGS="${NIMFLAGS} -d:limitStackUsage" - fi - - # libminiupnp / natpmp - if [[ '${{ runner.os }}' == 'Linux' && '${{ matrix.target.cpu }}' == 'i386' ]]; then - export CFLAGS="${CFLAGS} -m32 -mno-adx" - echo "CFLAGS=${CFLAGS}" >> $GITHUB_ENV - fi - - export NIMFLAGS="${NIMFLAGS} ${{ matrix.nimflags-extra }}" - echo "NIMFLAGS=${NIMFLAGS}" >> $GITHUB_ENV - - ncpu="" - make_cmd="make" - case '${{ runner.os }}' in - 'Linux') - ncpu=$(nproc) - ;; - 'macOS') - ncpu=$(sysctl -n hw.ncpu) - ;; - 'Windows') - ncpu=${NUMBER_OF_PROCESSORS} - make_cmd="mingw32-make" - ;; - esac - [[ -z "$ncpu" || $ncpu -le 0 ]] && ncpu=1 - echo "ncpu=${ncpu}" >> $GITHUB_ENV - echo "make_cmd=${make_cmd}" >> $GITHUB_ENV - - - name: Build Nim and Nimbus dependencies - run: | - ${make_cmd} -j ${ncpu} NIM_COMMIT=${{ matrix.branch }} ARCH_OVERRIDE=${PLATFORM} QUICK_AND_DIRTY_COMPILER=1 update - ./env.sh nim --version - - - name: Get latest fixtures commit hash - id: fixtures_version - run: | - getHash() { - git ls-remote "https://github.com/$1" "${2:-HEAD}" | cut -f 1 - } - fixturesHash=$(getHash status-im/nim-eth2-scenarios) - echo "::set-output name=fixtures::${fixturesHash}" - - - name: Restore Ethereum Foundation fixtures from cache - id: fixtures-cache - uses: actions/cache@v4 - with: - path: fixturesCache - key: 'eth2-scenarios-${{ steps.fixtures_version.outputs.fixtures }}' - - - name: Get the Ethereum Foundation fixtures - run: | - scripts/setup_scenarios.sh fixturesCache - - - name: Build all tools - run: | - ${make_cmd} -j ${ncpu} V=1 NIM_COMMIT=${{ matrix.branch }} - # The Windows image runs out of disk space, so make some room - rm -rf nimcache - - - name: Run tests - run: | - ${make_cmd} -j ${ncpu} V=1 NIM_COMMIT=${{ matrix.branch }} DISABLE_TEST_FIXTURES_SCRIPT=1 test diff --git a/.github/workflows/nightly_build.yml b/.github/workflows/nightly_build.yml index fc43ad37df..b6d2a4c852 100644 --- a/.github/workflows/nightly_build.yml +++ b/.github/workflows/nightly_build.yml @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2021-2024 Status Research & Development GmbH +# Copyright (c) 2021-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -41,7 +41,7 @@ jobs: echo "::set-output name=archive_dir::"${NEW_ARCHIVE_DIR} - name: Upload archive artefact - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: Linux_amd64_archive path: | @@ -50,14 +50,14 @@ jobs: retention-days: 2 - name: Upload BN checksum artefact - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: Linux_amd64_checksum path: ./dist/${{ steps.make_dist.outputs.archive_dir }}/build/nimbus_beacon_node.sha512sum retention-days: 2 - name: Upload VC checksum artefact - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: Linux_amd64_checksum path: ./dist/${{ steps.make_dist.outputs.archive_dir }}/build/nimbus_validator_client.sha512sum @@ -95,7 +95,7 @@ jobs: echo "::set-output name=archive_dir::"${NEW_ARCHIVE_DIR} - name: Upload archive artefact - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: Linux_arm64_archive path: | @@ -104,14 +104,14 @@ jobs: retention-days: 2 - name: Upload BN checksum artefact - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: Linux_arm64_checksum path: ./dist/${{ steps.make_dist.outputs.archive_dir }}/build/nimbus_beacon_node.sha512sum retention-days: 2 - name: Upload VC checksum artefact - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: Linux_arm64_checksum path: ./dist/${{ steps.make_dist.outputs.archive_dir }}/build/nimbus_validator_client.sha512sum @@ -149,7 +149,7 @@ jobs: echo "::set-output name=archive_dir::"${NEW_ARCHIVE_DIR} - name: Upload archive artefact - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: Linux_arm_archive path: | @@ -158,14 +158,14 @@ jobs: retention-days: 2 - name: Upload BN checksum artefact - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: Linux_arm_checksum path: ./dist/${{ steps.make_dist.outputs.archive_dir }}/build/nimbus_beacon_node.sha512sum retention-days: 2 - name: Upload VC checksum artefact - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: Linux_arm_checksum path: ./dist/${{ steps.make_dist.outputs.archive_dir }}/build/nimbus_validator_client.sha512sum @@ -195,7 +195,7 @@ jobs: echo "::set-output name=archive_dir::"${NEW_ARCHIVE_DIR} - name: Upload archive artefact - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: Windows_amd64_archive path: | @@ -204,14 +204,14 @@ jobs: retention-days: 2 - name: Upload BN checksum artefact - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: Windows_amd64_checksum path: ./dist/${{ steps.make_dist.outputs.archive_dir }}/build/nimbus_beacon_node.sha512sum retention-days: 2 - name: Upload VC checksum artefact - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: Windows_amd64_checksum path: ./dist/${{ steps.make_dist.outputs.archive_dir }}/build/nimbus_validator_client.sha512sum @@ -241,7 +241,7 @@ jobs: echo "::set-output name=archive_dir::"${NEW_ARCHIVE_DIR} - name: Upload archive artefact - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: macOS_amd64_archive path: | @@ -250,14 +250,14 @@ jobs: retention-days: 2 - name: Upload BN checksum artefact - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: macOS_amd64_checksum path: ./dist/${{ steps.make_dist.outputs.archive_dir }}/build/nimbus_beacon_node.sha512sum retention-days: 2 - name: Upload VC checksum artefact - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: macOS_amd64_checksum path: ./dist/${{ steps.make_dist.outputs.archive_dir }}/build/nimbus_validator_client.sha512sum @@ -287,7 +287,7 @@ jobs: echo "::set-output name=archive_dir::"${NEW_ARCHIVE_DIR} - name: Upload archive artefact - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: macOS_arm64_archive path: | @@ -296,14 +296,14 @@ jobs: retention-days: 2 - name: Upload BN checksum artefact - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: macOS_arm64_checksum path: ./dist/${{ steps.make_dist.outputs.archive_dir }}/build/nimbus_beacon_node.sha512sum retention-days: 2 - name: Upload VC checksum artefact - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: macOS_arm64_checksum path: ./dist/${{ steps.make_dist.outputs.archive_dir }}/build/nimbus_validator_client.sha512sum diff --git a/.gitmodules b/.gitmodules index 13b69f4e43..45f88f8e32 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2019-2024 Status Research & Development GmbH +# Copyright (c) 2019-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at http://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at http://www.apache.org/licenses/LICENSE-2.0). @@ -37,7 +37,7 @@ branch = master [submodule "vendor/NimYAML"] path = vendor/NimYAML - url = https://github.com/status-im/NimYAML.git + url = https://github.com/flyx/NimYAML.git ignore = untracked branch = devel [submodule "vendor/nim-web3"] @@ -234,3 +234,8 @@ path = vendor/nim-minilru url = https://github.com/status-im/nim-minilru.git branch = master +[submodule "vendor/hoodi"] + path = vendor/hoodi + url = https://github.com/eth-clients/hoodi + ignore = untracked + branch = main diff --git a/AllTests-mainnet.md b/AllTests-mainnet.md index 43d70fad64..e283cefe9c 100644 --- a/AllTests-mainnet.md +++ b/AllTests-mainnet.md @@ -4,15 +4,16 @@ AllTests-mainnet ```diff + ancestorSlot OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## Attestation pool electra processing [Preset: mainnet] ```diff + Aggregated attestations with disjoint comittee bits into a single on-chain aggregate [Pres OK ++ Aggregating across committees [Preset: mainnet] OK + Attestations with disjoint comittee bits and equal data into single on-chain aggregate [Pr OK ++ Cache coherence on chain aggregates [Preset: mainnet] OK + Can add and retrieve simple electra attestations [Preset: mainnet] OK ++ Simple add and get with electra nonzero committee [Preset: mainnet] OK + Working with electra aggregates [Preset: mainnet] OK ``` -OK: 4/4 Fail: 0/4 Skip: 0/4 ## Attestation pool processing [Preset: mainnet] ```diff + Attestation from different branch [Preset: mainnet] OK @@ -28,7 +29,6 @@ OK: 4/4 Fail: 0/4 Skip: 0/4 + Trying to add a duplicate block from an old pruned epoch is tagged as an error OK + Working with aggregates [Preset: mainnet] OK ``` -OK: 12/12 Fail: 0/12 Skip: 0/12 ## Backfill ```diff + Backfill to genesis OK @@ -36,7 +36,6 @@ OK: 12/12 Fail: 0/12 Skip: 0/12 + Reload backfill position OK + Restart after each block OK ``` -OK: 4/4 Fail: 0/4 Skip: 0/4 ## Beacon chain DB [Preset: mainnet] ```diff + empty database [Preset: mainnet] OK @@ -66,6 +65,7 @@ OK: 4/4 Fail: 0/4 Skip: 0/4 + sanity check Fulu states [Preset: mainnet] OK + sanity check Fulu states, reusing buffers [Preset: mainnet] OK + sanity check blobs [Preset: mainnet] OK ++ sanity check data columns [Preset: mainnet] OK + sanity check genesis roundtrip [Preset: mainnet] OK + sanity check phase 0 blocks [Preset: mainnet] OK + sanity check phase 0 getState rollback [Preset: mainnet] OK @@ -73,7 +73,6 @@ OK: 4/4 Fail: 0/4 Skip: 0/4 + sanity check phase 0 states, reusing buffers [Preset: mainnet] OK + sanity check state diff roundtrip [Preset: mainnet] OK ``` -OK: 33/33 Fail: 0/33 Skip: 0/33 ## Beacon chain file test suite ```diff + Auto check/repair test (missing data) OK @@ -82,7 +81,6 @@ OK: 33/33 Fail: 0/33 Skip: 0/33 + Auto check/repair test (only header) OK + Fixture file validation OK ``` -OK: 5/5 Fail: 0/5 Skip: 0/5 ## Beacon state [Preset: mainnet] ```diff + Smoke test initialize_beacon_state_from_eth1 [Preset: mainnet] OK @@ -93,18 +91,15 @@ OK: 5/5 Fail: 0/5 Skip: 0/5 + merklizer state roundtrip OK + process_slots OK ``` -OK: 7/7 Fail: 0/7 Skip: 0/7 ## Beacon time ```diff + Dependent slots OK + basics OK ``` -OK: 2/2 Fail: 0/2 Skip: 0/2 ## Beacon validators test suite ```diff + builderBetterBid(builderBoostFactor) test OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## Blinded block conversions ```diff + Bellatrix toSignedBlindedBeaconBlock OK @@ -113,12 +108,10 @@ OK: 1/1 Fail: 0/1 Skip: 0/1 + Electra toSignedBlindedBeaconBlock OK + Fulu toSignedBlindedBeaconBlock OK ``` -OK: 5/5 Fail: 0/5 Skip: 0/5 ## Block pool altair processing [Preset: mainnet] ```diff + Invalid signatures [Preset: mainnet] OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## Block pool processing [Preset: mainnet] ```diff + Adding the same block twice returns a Duplicate error [Preset: mainnet] OK @@ -127,36 +120,39 @@ OK: 1/1 Fail: 0/1 Skip: 0/1 + updateHead updates head and headState [Preset: mainnet] OK + updateState sanity [Preset: mainnet] OK ``` -OK: 5/5 Fail: 0/5 Skip: 0/5 ## Block processor [Preset: mainnet] ```diff + Reverse order block add & get [Preset: mainnet] OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## Block quarantine ```diff ++ Don't re-download unviable blocks OK ++ Keep downloading parent chain even if we hit missing limit OK + Recursive missing parent OK + Unviable smoke test OK ``` -OK: 2/2 Fail: 0/2 Skip: 0/2 ## BlockId and helpers ```diff + atSlot sanity OK + parent sanity OK ``` -OK: 2/2 Fail: 0/2 Skip: 0/2 ## BlockRef and helpers ```diff + get_ancestor sanity OK + isAncestorOf sanity OK ``` -OK: 2/2 Fail: 0/2 Skip: 0/2 ## BlockSlot and helpers ```diff + atSlot sanity OK + parent sanity OK ``` -OK: 2/2 Fail: 0/2 Skip: 0/2 +## Combined scenarios [Beacon Node] [Preset: mainnet] +```diff ++ ImportKeystores should not be blocked by fee recipient setting [Beacon Node] [Preset: main OK ++ ImportKeystores should not be blocked by gas limit setting [Beacon Node] [Preset: mainnet] OK ++ ImportRemoteKeys should not be blocked by fee recipient setting [Beacon Node] [Preset: mai OK ++ ImportRemoteKeys should not be blocked by gas limit setting [Beacon Node] [Preset: mainnet OK +``` ## DeleteKeys requests [Beacon Node] [Preset: mainnet] ```diff + Deleting not existing key [Beacon Node] [Preset: mainnet] OK @@ -164,7 +160,6 @@ OK: 2/2 Fail: 0/2 Skip: 0/2 + Invalid Authorization Token [Beacon Node] [Preset: mainnet] OK + Missing Authorization header [Beacon Node] [Preset: mainnet] OK ``` -OK: 4/4 Fail: 0/4 Skip: 0/4 ## DeleteRemoteKeys requests [Beacon Node] [Preset: mainnet] ```diff + Deleting existing local key and remote key [Beacon Node] [Preset: mainnet] OK @@ -173,7 +168,6 @@ OK: 4/4 Fail: 0/4 Skip: 0/4 + Invalid Authorization Token [Beacon Node] [Preset: mainnet] OK + Missing Authorization header [Beacon Node] [Preset: mainnet] OK ``` -OK: 5/5 Fail: 0/5 Skip: 0/5 ## DepositContractSnapshot ```diff + Migration OK @@ -181,31 +175,15 @@ OK: 5/5 Fail: 0/5 Skip: 0/5 + depositCount OK + isValid OK ``` -OK: 4/4 Fail: 0/4 Skip: 0/4 ## Discovery fork ID ```diff + Expected fork IDs OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## Diverging hardforks ```diff + Non-tail block in common OK + Tail block only in common OK ``` -OK: 2/2 Fail: 0/2 Skip: 0/2 -## EF - EIP7594 - Networking [Preset: mainnet] -```diff -+ Networking - Get Custody Columns - mainnet/eip7594/networking/get_custody_columns/pyspec_t OK -+ Networking - Get Custody Columns - mainnet/eip7594/networking/get_custody_columns/pyspec_t OK -+ Networking - Get Custody Columns - mainnet/eip7594/networking/get_custody_columns/pyspec_t OK -+ Networking - Get Custody Columns - mainnet/eip7594/networking/get_custody_columns/pyspec_t OK -+ Networking - Get Custody Columns - mainnet/eip7594/networking/get_custody_columns/pyspec_t OK -+ Networking - Get Custody Columns - mainnet/eip7594/networking/get_custody_columns/pyspec_t OK -+ Networking - Get Custody Columns - mainnet/eip7594/networking/get_custody_columns/pyspec_t OK -+ Networking - Get Custody Columns - mainnet/eip7594/networking/get_custody_columns/pyspec_t OK -+ Networking - Get Custody Columns - mainnet/eip7594/networking/get_custody_columns/pyspec_t OK -``` -OK: 9/9 Fail: 0/9 Skip: 0/9 ## EF - KZG ```diff + KZG - Blob to KZG commitment - blob_to_kzg_commitment_case_invalid_blob_59d64ff6b4648fad OK @@ -462,9 +440,19 @@ OK: 9/9 Fail: 0/9 Skip: 0/9 + KZG - Verify blob KZG proof batch - verify_blob_kzg_proof_batch_case_invalid_proof_d070689 OK + KZG - Verify blob KZG proof batch - verify_blob_kzg_proof_batch_case_proof_length_differen OK ``` -OK: 253/253 Fail: 0/253 Skip: 0/253 -## EF - KZG - EIP7594 -```diff +## EF - KZG - PeerDAS +```diff ++ KZG - Compute Cells - compute_cells_case_valid_419245fbfe69f145 OK ++ KZG - Compute Cells - compute_cells_case_valid_4aedd1a2a3933c3e OK ++ KZG - Compute Cells - compute_cells_case_valid_6e773f256383918c OK ++ KZG - Compute Cells - compute_cells_case_valid_b0731ef77b166ca8 OK ++ KZG - Compute Cells - compute_cells_case_valid_b81d309b22788820 OK ++ KZG - Compute Cells - compute_cells_case_valid_ed8b5001151417d5 OK ++ KZG - Compute Cells - compute_cells_case_valid_edeb8500a6507818 OK ++ KZG - Compute Cells - compute_cells_invalid_blob_26555bdcbf18a267 OK ++ KZG - Compute Cells - compute_cells_invalid_blob_79fb3cb1ef585a86 OK ++ KZG - Compute Cells - compute_cells_invalid_blob_7e99dea8893c104a OK ++ KZG - Compute Cells - compute_cells_invalid_blob_9d88c33852eb782d OK + KZG - Compute Cells And Proofs - compute_cells_and_kzg_proofs_case_invalid_blob_26555bdcbf OK + KZG - Compute Cells And Proofs - compute_cells_and_kzg_proofs_case_invalid_blob_79fb3cb1ef OK + KZG - Compute Cells And Proofs - compute_cells_and_kzg_proofs_case_invalid_blob_7e99dea889 OK @@ -522,7 +510,23 @@ OK: 253/253 Fail: 0/253 Skip: 0/253 + KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_valid_same_cell_multi OK + KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_valid_zero_cells_fbbd OK ``` -OK: 56/56 Fail: 0/56 Skip: 0/56 +## EF - PeerDAS - Networking [Preset: mainnet] +```diff ++ Networking - Compute Columns for Custody Group - mainnet/fulu/networking/compute_columns_f OK ++ Networking - Compute Columns for Custody Group - mainnet/fulu/networking/compute_columns_f OK ++ Networking - Compute Columns for Custody Group - mainnet/fulu/networking/compute_columns_f OK ++ Networking - Compute Columns for Custody Group - mainnet/fulu/networking/compute_columns_f OK ++ Networking - Compute Columns for Custody Group - mainnet/fulu/networking/compute_columns_f OK ++ Networking - Get Custody Groups - mainnet/fulu/networking/get_custody_groups/pyspec_tests/ OK ++ Networking - Get Custody Groups - mainnet/fulu/networking/get_custody_groups/pyspec_tests/ OK ++ Networking - Get Custody Groups - mainnet/fulu/networking/get_custody_groups/pyspec_tests/ OK ++ Networking - Get Custody Groups - mainnet/fulu/networking/get_custody_groups/pyspec_tests/ OK ++ Networking - Get Custody Groups - mainnet/fulu/networking/get_custody_groups/pyspec_tests/ OK ++ Networking - Get Custody Groups - mainnet/fulu/networking/get_custody_groups/pyspec_tests/ OK ++ Networking - Get Custody Groups - mainnet/fulu/networking/get_custody_groups/pyspec_tests/ OK ++ Networking - Get Custody Groups - mainnet/fulu/networking/get_custody_groups/pyspec_tests/ OK ++ Networking - Get Custody Groups - mainnet/fulu/networking/get_custody_groups/pyspec_tests/ OK +``` ## EF - SSZ generic types ```diff Testing basic_vector inputs - invalid Skip @@ -538,7 +542,6 @@ OK: 56/56 Fail: 0/56 Skip: 0/56 + Testing uints inputs - invalid OK + Testing uints inputs - valid OK ``` -OK: 10/12 Fail: 0/12 Skip: 2/12 ## EIP-4881 ```diff + deposit_cases OK @@ -547,18 +550,11 @@ OK: 10/12 Fail: 0/12 Skip: 2/12 + invalid_snapshot OK + snapshot_cases OK ``` -OK: 5/5 Fail: 0/5 Skip: 0/5 -## EIP-7594 Sampling Tests -```diff -+ EIP7594: Extended Sample Count OK -``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## EIP-7594 Unit Tests ```diff + EIP-7594: Compute Matrix OK + EIP:7594: Recover Matrix OK ``` -OK: 2/2 Fail: 0/2 Skip: 0/2 ## EL Configuration ```diff + Empty config file OK @@ -567,50 +563,45 @@ OK: 2/2 Fail: 0/2 Skip: 0/2 + Old style config files OK + URL parsing OK ``` -OK: 5/5 Fail: 0/5 Skip: 0/5 ## Engine API conversions ```diff + Roundtrip engine RPC V1 and bellatrix ExecutionPayload representations OK + Roundtrip engine RPC V2 and capella ExecutionPayload representations OK + Roundtrip engine RPC V3 and deneb ExecutionPayload representations OK ``` -OK: 3/3 Fail: 0/3 Skip: 0/3 ## Eth1 monitor ```diff + Deposits chain OK + Rewrite URLs OK ``` -OK: 2/2 Fail: 0/2 Skip: 0/2 ## Eth2 specific discovery tests ```diff + Invalid attnets field OK + Subnet query OK + Subnet query after ENR update OK ``` -OK: 3/3 Fail: 0/3 Skip: 0/3 ## Fee recipient management [Beacon Node] [Preset: mainnet] ```diff + Configuring the fee recipient [Beacon Node] [Preset: mainnet] OK ++ Configuring the fee recipient for dynamic validator [Beacon Node] [Preset: mainnet] OK + Invalid Authorization Header [Beacon Node] [Preset: mainnet] OK + Invalid Authorization Token [Beacon Node] [Preset: mainnet] OK + Missing Authorization header [Beacon Node] [Preset: mainnet] OK ++ Obtaining the fee recipient for dynamic validator returns suggested default [Beacon Node] OK + Obtaining the fee recipient of a missing validator returns 404 [Beacon Node] [Preset: main OK + Obtaining the fee recipient of an unconfigured validator returns the suggested default [Be OK + Setting the fee recipient on a missing validator creates a record for it [Beacon Node] [Pr OK ``` -OK: 7/7 Fail: 0/7 Skip: 0/7 ## FinalizedBlocks [Preset: mainnet] ```diff + Basic ops [Preset: mainnet] OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## Fork id compatibility test ```diff + Digest check OK + Fork check OK + Next fork epoch check OK ``` -OK: 3/3 Fail: 0/3 Skip: 0/3 ## Forked SSZ readers ```diff + load altair block OK @@ -625,35 +616,32 @@ OK: 3/3 Fail: 0/3 Skip: 0/3 + load phase0 state OK + should raise on unknown data OK ``` -OK: 11/11 Fail: 0/11 Skip: 0/11 ## Gas limit management [Beacon Node] [Preset: mainnet] ```diff + Configuring the gas limit [Beacon Node] [Preset: mainnet] OK ++ Configuring the gas limit for dynamic validator [Beacon Node] [Preset: mainnet] OK + Invalid Authorization Header [Beacon Node] [Preset: mainnet] OK + Invalid Authorization Token [Beacon Node] [Preset: mainnet] OK + Missing Authorization header [Beacon Node] [Preset: mainnet] OK ++ Obtaining the gas limit for dynamic validator returns suggested default [Beacon Node] [Pre OK + Obtaining the gas limit of a missing validator returns 404 [Beacon Node] [Preset: mainnet] OK + Obtaining the gas limit of an unconfigured validator returns the suggested default [Beacon OK + Setting the gas limit on a missing validator creates a record for it [Beacon Node] [Preset OK ``` -OK: 7/7 Fail: 0/7 Skip: 0/7 ## Gossip fork transition ```diff + Gossip fork transition OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## Gossip validation [Preset: mainnet] ```diff + Empty committee when no committee for slot OK + validateAttestation OK ``` -OK: 2/2 Fail: 0/2 Skip: 0/2 ## Gossip validation - Altair ```diff + Period boundary OK + validateSyncCommitteeMessage - Duplicate pubkey OK ``` -OK: 2/2 Fail: 0/2 Skip: 0/2 ## Graffiti management [Beacon Node] [Preset: mainnet] ```diff + Configuring the graffiti [Beacon Node] [Preset: mainnet] OK @@ -664,7 +652,6 @@ OK: 2/2 Fail: 0/2 Skip: 0/2 + Obtaining the graffiti of an unconfigured validator returns the suggested default [Beacon OK + Setting the graffiti on a missing validator creates a record for it [Beacon Node] [Preset: OK ``` -OK: 7/7 Fail: 0/7 Skip: 0/7 ## Honest validator ```diff + General pubsub topics OK @@ -675,7 +662,6 @@ OK: 7/7 Fail: 0/7 Skip: 0/7 + isNearSyncCommitteePeriod OK + is_aggregator OK ``` -OK: 7/7 Fail: 0/7 Skip: 0/7 ## ImportKeystores requests [Beacon Node] [Preset: mainnet] ```diff + ImportKeystores/ListKeystores/DeleteKeystores [Beacon Node] [Preset: mainnet] OK @@ -683,7 +669,6 @@ OK: 7/7 Fail: 0/7 Skip: 0/7 + Invalid Authorization Token [Beacon Node] [Preset: mainnet] OK + Missing Authorization header [Beacon Node] [Preset: mainnet] OK ``` -OK: 4/4 Fail: 0/4 Skip: 0/4 ## ImportRemoteKeys/ListRemoteKeys/DeleteRemoteKeys [Beacon Node] [Preset: mainnet] ```diff + Importing list of remote keys [Beacon Node] [Preset: mainnet] OK @@ -691,7 +676,6 @@ OK: 4/4 Fail: 0/4 Skip: 0/4 + Invalid Authorization Token [Beacon Node] [Preset: mainnet] OK + Missing Authorization header [Beacon Node] [Preset: mainnet] OK ``` -OK: 4/4 Fail: 0/4 Skip: 0/4 ## Key spliting ```diff + k < n OK @@ -699,7 +683,6 @@ OK: 4/4 Fail: 0/4 Skip: 0/4 + k == n == 100 OK + single share OK ``` -OK: 4/4 Fail: 0/4 Skip: 0/4 ## KeyStorage testing suite ```diff + Load Prysm keystore OK @@ -715,19 +698,16 @@ OK: 4/4 Fail: 0/4 Skip: 0/4 + [SCRYPT] Network Keystore decryption OK + [SCRYPT] Network Keystore encryption OK ``` -OK: 12/12 Fail: 0/12 Skip: 0/12 ## Latest valid hash [Preset: mainnet] ```diff + LVH searching OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## Light client [Preset: mainnet] ```diff + Init from checkpoint OK + Light client sync OK + Pre-Altair OK ``` -OK: 3/3 Fail: 0/3 Skip: 0/3 ## Light client processor [Preset: mainnet] ```diff + Duplicate bootstrap (Optimistic) [Preset: mainnet] OK @@ -743,7 +723,6 @@ OK: 3/3 Fail: 0/3 Skip: 0/3 + Sync (Optimistic) [Preset: mainnet] OK + Sync (Strict) [Preset: mainnet] OK ``` -OK: 12/12 Fail: 0/12 Skip: 0/12 ## ListKeys requests [Beacon Node] [Preset: mainnet] ```diff + Correct token provided [Beacon Node] [Preset: mainnet] OK @@ -751,7 +730,6 @@ OK: 12/12 Fail: 0/12 Skip: 0/12 + Invalid Authorization Token [Beacon Node] [Preset: mainnet] OK + Missing Authorization header [Beacon Node] [Preset: mainnet] OK ``` -OK: 4/4 Fail: 0/4 Skip: 0/4 ## ListRemoteKeys requests [Beacon Node] [Preset: mainnet] ```diff + Correct token provided [Beacon Node] [Preset: mainnet] OK @@ -759,7 +737,18 @@ OK: 4/4 Fail: 0/4 Skip: 0/4 + Invalid Authorization Token [Beacon Node] [Preset: mainnet] OK + Missing Authorization header [Beacon Node] [Preset: mainnet] OK ``` -OK: 4/4 Fail: 0/4 Skip: 0/4 +## MEV calls serialization/deserialization and behavior test suite +```diff ++ /eth/v1/builder/blinded_blocks [json/json] test OK ++ /eth/v1/builder/blinded_blocks [json/ssz] test OK ++ /eth/v1/builder/blinded_blocks [ssz/json] test OK ++ /eth/v1/builder/blinded_blocks [ssz/ssz] test OK ++ /eth/v1/builder/header [json] test OK ++ /eth/v1/builder/header [ssz] test OK ++ /eth/v1/builder/status test OK ++ /eth/v1/builder/validators [json] test OK ++ /eth/v1/builder/validators [ssz] test OK +``` ## Message signatures ```diff + Aggregate and proof signatures OK @@ -771,27 +760,24 @@ OK: 4/4 Fail: 0/4 Skip: 0/4 + Sync committee signed contribution and proof signatures OK + Voluntary exit signatures OK ``` -OK: 8/8 Fail: 0/8 Skip: 0/8 ## Network metadata ```diff + mainnet OK + sepolia OK ``` -OK: 2/2 Fail: 0/2 Skip: 0/2 ## Nimbus remote signer/signing test (verifying-web3signer) ```diff -+ Signing BeaconBlock (getBlockSignature(capella)) OK + Signing BeaconBlock (getBlockSignature(deneb)) OK ++ Signing BeaconBlock (getBlockSignature(electra)) OK + Waiting for signing node (/upcheck) test OK ``` -OK: 3/3 Fail: 0/3 Skip: 0/3 ## Nimbus remote signer/signing test (web3signer) ```diff + Connection timeout test OK + Public keys enumeration (/api/v1/eth2/publicKeys) test OK + Public keys reload (/reload) test OK -+ Signing BeaconBlock (getBlockSignature(capella)) OK + Signing BeaconBlock (getBlockSignature(deneb)) OK ++ Signing BeaconBlock (getBlockSignature(electra)) OK + Signing SC contribution and proof (getContributionAndProofSignature()) OK + Signing SC message (getSyncCommitteeMessage()) OK + Signing SC selection proof (getSyncCommitteeSelectionProof()) OK @@ -805,17 +791,20 @@ OK: 3/3 Fail: 0/3 Skip: 0/3 + Signing voluntary exit (getValidatorExitSignature()) OK + Waiting for signing node (/upcheck) test OK ``` -OK: 17/17 Fail: 0/17 Skip: 0/17 ## Old database versions [Preset: mainnet] ```diff + pre-1.1.0 OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 +## PeerDAS Sampling Tests +```diff ++ PeerDAS: Extended Sample Count OK +``` ## PeerPool testing suite ```diff + Access peers by key test OK + Acquire from empty pool OK + Acquire/Sorting and consistency test OK ++ Custom filters test OK + Delete peer on release text OK + Iterators test OK + Peer lifetime test OK @@ -826,12 +815,10 @@ OK: 1/1 Fail: 0/1 Skip: 0/1 + addPeerNoWait() test OK + deletePeer() test OK ``` -OK: 12/12 Fail: 0/12 Skip: 0/12 ## Pruning ```diff + prune states OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## REST JSON encoding and decoding ```diff + Blob OK @@ -840,7 +827,6 @@ OK: 1/1 Fail: 0/1 Skip: 0/1 + KzgProof OK + Validator pubkey hack OK ``` -OK: 5/5 Fail: 0/5 Skip: 0/5 ## Remove keystore testing suite ```diff + Many remotes OK @@ -849,31 +835,30 @@ OK: 5/5 Fail: 0/5 Skip: 0/5 + Verifying Signer / Single remote OK + vesion 1 OK ``` -OK: 5/5 Fail: 0/5 Skip: 0/5 ## Serialization/deserialization [Beacon Node] [Preset: mainnet] ```diff + Deserialization test vectors OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## Serialization/deserialization test suite ```diff + RestErrorMessage parser tests OK + RestErrorMessage writer tests OK + strictParse(Stuint) tests OK ``` -OK: 3/3 Fail: 0/3 Skip: 0/3 ## Shufflings ```diff + Accelerated shuffling computation OK + Accelerated shuffling computation (with epochRefState jump) OK ``` -OK: 2/2 Fail: 0/2 Skip: 0/2 ## Shufflings (merged) ```diff + Accelerated shuffling computation OK + Accelerated shuffling computation (with epochRefState jump) OK ``` -OK: 2/2 Fail: 0/2 Skip: 0/2 +## Size bounds +```diff ++ SignedBeaconBlockDeneb OK +``` ## Slashing Interchange tests [Preset: mainnet] ```diff + Slashing test: duplicate_pubkey_not_slashable.json OK @@ -915,7 +900,6 @@ OK: 2/2 Fail: 0/2 Skip: 0/2 + Slashing test: single_validator_two_blocks_no_signing_root.json OK + Slashing test: wrong_genesis_validators_root.json OK ``` -OK: 35/38 Fail: 0/38 Skip: 3/38 ## Slashing Protection DB [Preset: mainnet] ```diff + Attestation ordering #1698 OK @@ -931,35 +915,29 @@ OK: 35/38 Fail: 0/38 Skip: 3/38 + SP for surrounding attestations OK + Test valid attestation #1699 OK ``` -OK: 12/12 Fail: 0/12 Skip: 0/12 ## Spec datatypes ```diff + Graffiti bytes OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## Spec helpers ```diff + build_proof - BeaconState OK + hypergeom_cdf OK + integer_squareroot OK ``` -OK: 3/3 Fail: 0/3 Skip: 0/3 ## Specific field types ```diff + root update OK + roundtrip OK ``` -OK: 2/2 Fail: 0/2 Skip: 0/2 ## Starting states ```diff + Starting state without block OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## State history ```diff + getBlockIdAtSlot OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## Sync committee pool ```diff + Aggregating votes OK @@ -970,40 +948,33 @@ OK: 1/1 Fail: 0/1 Skip: 0/1 + Missed slots across sync committee period boundary OK + isSeen OK ``` -OK: 7/7 Fail: 0/7 Skip: 0/7 ## SyncManager test suite ```diff -+ Process all unviable blocks OK + [SyncManager] groupBlobs() test OK -+ [SyncQueue#Backward] Async unordered push test OK -+ [SyncQueue#Backward] Async unordered push with rewind test OK -+ [SyncQueue#Backward] Good response with missing values towards end OK -+ [SyncQueue#Backward] Handle out-of-band sync progress advancement OK -+ [SyncQueue#Backward] Pass through established limits test OK -+ [SyncQueue#Backward] Smoke test OK -+ [SyncQueue#Backward] Start and finish slots equal OK -+ [SyncQueue#Backward] Two full requests success/fail OK ++ [SyncQueue# & Backward] Combination of missing parent and good blocks [3 peers] test OK ++ [SyncQueue# & Backward] Failure request push test OK ++ [SyncQueue# & Backward] Invalid block [3 peers] test OK ++ [SyncQueue# & Backward] Smoke [3 peers] test OK ++ [SyncQueue# & Backward] Smoke [single peer] test OK ++ [SyncQueue# & Backward] Unviable block [3 peers] test OK ++ [SyncQueue# & Forward] Combination of missing parent and good blocks [3 peers] test OK ++ [SyncQueue# & Forward] Failure request push test OK ++ [SyncQueue# & Forward] Invalid block [3 peers] test OK ++ [SyncQueue# & Forward] Smoke [3 peers] test OK ++ [SyncQueue# & Forward] Smoke [single peer] test OK ++ [SyncQueue# & Forward] Unviable block [3 peers] test OK ++ [SyncQueue#Backward] Missing parent and exponential rewind [3 peers] test OK + [SyncQueue#Backward] getRewindPoint() test OK -+ [SyncQueue#Forward] Async unordered push test OK -+ [SyncQueue#Forward] Async unordered push with rewind test OK -+ [SyncQueue#Forward] Good response with missing values towards end OK -+ [SyncQueue#Forward] Handle out-of-band sync progress advancement OK -+ [SyncQueue#Forward] Pass through established limits test OK -+ [SyncQueue#Forward] Smoke test OK -+ [SyncQueue#Forward] Start and finish slots equal OK -+ [SyncQueue#Forward] Two full requests success/fail OK ++ [SyncQueue#Forward] Missing parent and exponential rewind [3 peers] test OK + [SyncQueue#Forward] getRewindPoint() test OK ++ [SyncQueue] checkBlobsResponse() test OK + [SyncQueue] checkResponse() test OK -+ [SyncQueue] contains() test OK -+ [SyncQueue] getLastNonEmptySlot() test OK + [SyncQueue] hasEndGap() test OK ``` -OK: 24/24 Fail: 0/24 Skip: 0/24 ## Type helpers ```diff + BeaconBlock OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## Validator Client test suite ```diff + /eth/v1/validator/beacon_committee_selections serialization/deserialization test OK @@ -1021,7 +992,6 @@ OK: 1/1 Fail: 0/1 Skip: 0/1 + getUniqueVotes() test vectors OK + normalizeUri() test vectors OK ``` -OK: 14/14 Fail: 0/14 Skip: 0/14 ## Validator change pool testing suite ```diff + addValidatorChangeMessage/getAttesterSlashingMessage (Electra) OK @@ -1032,7 +1002,6 @@ OK: 14/14 Fail: 0/14 Skip: 0/14 + addValidatorChangeMessage/getVoluntaryExitMessage OK + pre-pre-fork voluntary exit OK ``` -OK: 7/7 Fail: 0/7 Skip: 0/7 ## Validator pool ```diff + Doppelganger for genesis validator OK @@ -1040,20 +1009,17 @@ OK: 7/7 Fail: 0/7 Skip: 0/7 + Dynamic validator set: queryValidatorsSource() test OK + Dynamic validator set: updateDynamicValidators() test OK ``` -OK: 4/4 Fail: 0/4 Skip: 0/4 ## ValidatorPubKey bucket sort ```diff + incremental construction OK + one-shot construction OK ``` -OK: 2/2 Fail: 0/2 Skip: 0/2 ## Zero signature sanity checks ```diff + SSZ serialization roundtrip of SignedBeaconBlockHeader OK + Zero signatures cannot be loaded into a BLS signature object OK + default initialization of signatures OK ``` -OK: 3/3 Fail: 0/3 Skip: 0/3 ## chain DAG finalization tests [Preset: mainnet] ```diff + init with gaps [Preset: mainnet] OK @@ -1061,7 +1027,6 @@ OK: 3/3 Fail: 0/3 Skip: 0/3 + prune heads on finalization [Preset: mainnet] OK + shutdown during finalization [Preset: mainnet] OK ``` -OK: 4/4 Fail: 0/4 Skip: 0/4 ## createValidatorFiles() ```diff + Add keystore files [LOCAL] OK @@ -1073,27 +1038,23 @@ OK: 4/4 Fail: 0/4 Skip: 0/4 + `createLocalValidatorFiles` with `validatorsDir` without permissions OK + `createValidatorFiles` with already existing dirs and any error OK ``` -OK: 8/8 Fail: 0/8 Skip: 0/8 ## engine API authentication ```diff + HS256 JWS iat token signing OK + HS256 JWS signing OK + getIatToken OK ``` -OK: 3/3 Fail: 0/3 Skip: 0/3 ## eth2.0-deposits-cli compatibility ```diff + restoring mnemonic with password OK + restoring mnemonic without password OK ``` -OK: 2/2 Fail: 0/2 Skip: 0/2 ## removeValidatorFiles() ```diff + Remove nonexistent validator OK + Remove validator files OK + Remove validator files twice OK ``` -OK: 3/3 Fail: 0/3 Skip: 0/3 ## removeValidatorFiles() multiple keystore types ```diff + Remove [LOCAL] when [LOCAL] is missing OK @@ -1103,7 +1064,6 @@ OK: 3/3 Fail: 0/3 Skip: 0/3 + Remove [REMOTE] when [REMOTE] is missing OK + Remove [REMOTE] when [REMOTE] is present OK ``` -OK: 6/6 Fail: 0/6 Skip: 0/6 ## saveKeystore() ```diff + Save [LOCAL] keystore after [LOCAL] keystore with different id OK @@ -1115,18 +1075,15 @@ OK: 6/6 Fail: 0/6 Skip: 0/6 + Save [REMOTE] keystore after [REMOTE] keystore with different id OK + Save [REMOTE] keystore after [REMOTE] keystore with same id OK ``` -OK: 8/8 Fail: 0/8 Skip: 0/8 ## state diff tests [Preset: mainnet] ```diff + random slot differences [Preset: mainnet] OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## subnet tracker ```diff + should register stability subnets on attester duties OK + should register sync committee duties OK ``` -OK: 2/2 Fail: 0/2 Skip: 0/2 ## weak-subjectivity-checkpoint ```diff + Correct values OK @@ -1139,7 +1096,3 @@ OK: 2/2 Fail: 0/2 Skip: 0/2 + non-number epoch OK + shorter root OK ``` -OK: 9/9 Fail: 0/9 Skip: 0/9 - ----TOTAL--- -OK: 775/780 Fail: 0/780 Skip: 5/780 diff --git a/CHANGELOG.md b/CHANGELOG.md index fcab6d63f1..7d6e5c35c6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,147 @@ +2025-03-21 v25.3.1 +================== + +Nimbus `v25.3.1` is a `low-urgency` release except for the Hoodi testnet, for which it's a `high-urgency` release. + +### Improvements + +- Add Hoodi testnet support: + https://github.com/status-im/nimbus-eth2/pull/7021 + https://github.com/status-im/nimbus-eth2/pull/7022 + https://github.com/status-im/nimbus-eth2/pull/7025 + +- Improve block quarantine performance on forked chains: + https://github.com/status-im/nimbus-eth2/pull/7006 + +- Implement getPendingDeposits and getPendingPartialWithdrawals beacon API endpoints: + https://github.com/status-im/nimbus-eth2/pull/7010 + +- Add SSZ encoding support to builder API client calls: + https://github.com/status-im/nimbus-eth2/pull/6970 + +- Stop the `--dump` command-line option dumping correct outgoing attestations: + https://github.com/status-im/nimbus-eth2/pull/7012 + +### Fixes + +- Improve partial blob sidecar response checking: + https://github.com/status-im/nimbus-eth2/pull/6985 + +- Respond to un-synced blocks requests with `ResourceUnavailable`: + https://github.com/status-im/nimbus-eth2/pull/6977 + +2025-03-02 v25.3.0 +================== + +Nimbus `v25.3.0` is a `low-urgency` release except for the Gnosis Chiado testnet, for which it's a `high-urgency` release. + +### Improvements + +- Add Gnosis Chiado testnet Electra fork support: + https://github.com/status-im/nimbus-eth2/pull/6968 + +- Add SSZ support for the registerValidator beacon REST API endpoint: + https://github.com/status-im/nimbus-eth2/pull/6943 + +- Increase trusted node sync state download timeout to 3 minutes: + https://github.com/status-im/nimbus-eth2/pull/6969 + +- Add link to trusted node sync documentation regarding state download timeout: + https://github.com/status-im/nimbus-eth2/pull/6927 + +### Fixes + +- Fix validator client graffiti with web3signer validators: + https://github.com/status-im/nimbus-eth2/pull/6927 + +- Fix sync completion percentages exceeding 100%: + https://github.com/status-im/nimbus-eth2/pull/6922 + +2025-02-13 v25.2.0 +================== + +Nimbus `v25.2.0` is a `low-urgency` release for mainnet, but `high-urgency` release for Sepolia and Holesky due to Pectra-readiness for their upcoming forks. + +### Improvements + +- Add Holesky and Sepolia Electra fork epochs: + https://github.com/status-im/nimbus-eth2/pull/6908 + +- Improve syncing smoothness and steadiness: + https://github.com/status-im/nimbus-eth2/pull/6722 + +- Initiate metrics server later in beacon node startup sequence, to mitigate transient metrics during validator loading: + https://github.com/status-im/nimbus-eth2/pull/6902 + +### Fixes + +- Fix keymanager API listFeeRecipient and getGasLimit endpoints in presence of web3signer validators: + https://github.com/status-im/nimbus-eth2/pull/6916 + +- Update builder API registered fee recipient and gas limit from validator client without restart: + https://github.com/status-im/nimbus-eth2/pull/6907 + +- Fix capital case fork version name being returned in certain beacon API JSON response `version` fields: + https://github.com/status-im/nimbus-eth2/pull/6905 + +2025-01-28 v25.1.0 +================== + +Nimbus `v25.1.0` is a `medium-urgency` release with a gas limit increase, along with beacon API and security fixes. + +### Improvements + +* Increase builder API default gas limit to 36M: + https://github.com/status-im/nimbus-eth2/pull/6763 + +### Fixes + +* With multiple execution clients, wait for valid block response before concluding block is not valid: + https://github.com/status-im/nimbus-eth2/pull/6812 + +* Fix Docker image regression from v24.11.0 which could prevent starting beacon node: + https://github.com/status-im/nimbus-eth2/pull/6803 + +* Fix validator voluntary exiting given potential discrepancies about future fork scheduling: + https://github.com/status-im/nimbus-eth2/pull/6811 + +* Fix `sync_aggregate` value in `getBlockRewards` beacon API endpoint: + https://github.com/status-im/nimbus-eth2/pull/6829 + +* Fix `last_seen_p2p_address` value in `getPeers` beacon API endpoint: + https://github.com/status-im/nimbus-eth2/pull/6595 + +2024-12-12 v24.12.0 +=================== + +Nimbus `v24.12.0` is a `low-urgency` release. + +### Improvements + +* Support `bootstrap_nodes.yaml` bootstrap node specification: + https://github.com/status-im/nimbus-eth2/pull/6751 + +2024-11-29 v24.11.0 +=================== + +Nimbus `v24.11.0` is a `low-urgency` release with performance and compatibility improvements. + +### Improvements + +* Update Holesky bootnodes: + https://github.com/status-im/nimbus-eth2/pull/6703 + +* Improve forward syncing performance: + https://github.com/status-im/nimbus-eth2/pull/6682 + +* Experimental light client-based sync method, for testing only: + https://github.com/status-im/nimbus-eth2/pull/6515 + +### Fixes + +* Avoid validator client network configuration mismatch errors pertaining to unscheduled forks: + https://github.com/status-im/nimbus-eth2/pull/6698 + 2024-10-29 v24.10.0 =================== @@ -2376,7 +2520,7 @@ It also brings further performance optimizations. * A new `slashingdb` sub-command with `import` and `export` options. This allows for safely migrating to Nimbus from another client (as per the [EIP-3076](https://eips.ethereum.org/EIPS/eip-3076) slashing protection interchange format). - Please see the the newly prepared [migration guides](https://nimbus.guide/migration.html) for the details. + Please see the newly prepared [migration guides](https://nimbus.guide/migration.html) for the details. * A new `ncli_db validatorPerf` command. This can be used to perform a textual report for the attestation performance of a particular validator diff --git a/ConsensusSpecPreset-mainnet.md b/ConsensusSpecPreset-mainnet.md index c08408b0d9..7e5edfc6ce 100644 --- a/ConsensusSpecPreset-mainnet.md +++ b/ConsensusSpecPreset-mainnet.md @@ -4,18 +4,15 @@ ConsensusSpecPreset-mainnet ```diff + Effective balance updates - effective_balance_hysteresis [Preset: mainnet] OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## EF - Altair - Epoch Processing - Eth1 data reset [Preset: mainnet] ```diff + Eth1 data reset - eth1_vote_no_reset [Preset: mainnet] OK + Eth1 data reset - eth1_vote_reset [Preset: mainnet] OK ``` -OK: 2/2 Fail: 0/2 Skip: 0/2 ## EF - Altair - Epoch Processing - Historical roots update [Preset: mainnet] ```diff + Historical roots update - historical_root_accumulator [Preset: mainnet] OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## EF - Altair - Epoch Processing - Inactivity [Preset: mainnet] ```diff + Inactivity - all_zero_inactivity_scores_empty_participation [Preset: mainnet] OK @@ -40,7 +37,6 @@ OK: 1/1 Fail: 0/1 Skip: 0/1 + Inactivity - some_slashed_zero_scores_full_participation [Preset: mainnet] OK + Inactivity - some_slashed_zero_scores_full_participation_leaking [Preset: mainnet] OK ``` -OK: 21/21 Fail: 0/21 Skip: 0/21 ## EF - Altair - Epoch Processing - Justification & Finalization [Preset: mainnet] ```diff + Justification & Finalization - 123_ok_support [Preset: mainnet] OK @@ -54,7 +50,6 @@ OK: 21/21 Fail: 0/21 Skip: 0/21 + Justification & Finalization - 23_poor_support [Preset: mainnet] OK + Justification & Finalization - balance_threshold_with_exited_validators [Preset: mainnet] OK ``` -OK: 10/10 Fail: 0/10 Skip: 0/10 ## EF - Altair - Epoch Processing - Participation flag updates [Preset: mainnet] ```diff + Participation flag updates - all_zeroed [Preset: mainnet] OK @@ -68,12 +63,10 @@ OK: 10/10 Fail: 0/10 Skip: 0/10 + Participation flag updates - random_2 [Preset: mainnet] OK + Participation flag updates - random_genesis [Preset: mainnet] OK ``` -OK: 10/10 Fail: 0/10 Skip: 0/10 ## EF - Altair - Epoch Processing - RANDAO mixes reset [Preset: mainnet] ```diff + RANDAO mixes reset - updated_randao_mixes [Preset: mainnet] OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## EF - Altair - Epoch Processing - Registry updates [Preset: mainnet] ```diff + Registry updates - activation_queue_activation_and_ejection__1 [Preset: mainnet] OK @@ -88,7 +81,6 @@ OK: 1/1 Fail: 0/1 Skip: 0/1 + Registry updates - ejection_past_churn_limit_min [Preset: mainnet] OK + Registry updates - invalid_large_withdrawable_epoch [Preset: mainnet] OK ``` -OK: 11/11 Fail: 0/11 Skip: 0/11 ## EF - Altair - Epoch Processing - Rewards and penalties [Preset: mainnet] ```diff + Rewards and penalties - almost_empty_attestations [Preset: mainnet] OK @@ -100,14 +92,13 @@ OK: 11/11 Fail: 0/11 Skip: 0/11 + Rewards and penalties - full_attestation_participation [Preset: mainnet] OK + Rewards and penalties - full_attestation_participation_with_leak [Preset: mainnet] OK + Rewards and penalties - full_attestations_misc_balances [Preset: mainnet] OK -+ Rewards and penalties - full_attestations_one_validaor_one_gwei [Preset: mainnet] OK ++ Rewards and penalties - full_attestations_one_validator_one_gwei [Preset: mainnet] OK + Rewards and penalties - genesis_epoch_full_attestations_no_rewards [Preset: mainnet] OK + Rewards and penalties - genesis_epoch_no_attestations_no_penalties [Preset: mainnet] OK + Rewards and penalties - no_attestations_all_penalties [Preset: mainnet] OK + Rewards and penalties - random_fill_attestations [Preset: mainnet] OK + Rewards and penalties - random_fill_attestations_with_leak [Preset: mainnet] OK ``` -OK: 15/15 Fail: 0/15 Skip: 0/15 ## EF - Altair - Epoch Processing - Slashings [Preset: mainnet] ```diff + Slashings - low_penalty [Preset: mainnet] OK @@ -116,12 +107,10 @@ OK: 15/15 Fail: 0/15 Skip: 0/15 + Slashings - scaled_penalties [Preset: mainnet] OK + Slashings - slashings_with_random_state [Preset: mainnet] OK ``` -OK: 5/5 Fail: 0/5 Skip: 0/5 ## EF - Altair - Epoch Processing - Slashings reset [Preset: mainnet] ```diff + Slashings reset - flush_slashings [Preset: mainnet] OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## EF - Altair - Finality [Preset: mainnet] ```diff + [Valid] EF - Altair - Finality - finality_no_updates_at_genesis [Preset: mainnet] OK @@ -130,7 +119,6 @@ OK: 1/1 Fail: 0/1 Skip: 0/1 + [Valid] EF - Altair - Finality - finality_rule_3 [Preset: mainnet] OK + [Valid] EF - Altair - Finality - finality_rule_4 [Preset: mainnet] OK ``` -OK: 5/5 Fail: 0/5 Skip: 0/5 ## EF - Altair - Fork [Preset: mainnet] ```diff + EF - Altair - Fork - altair_fork_random_0 [Preset: mainnet] OK @@ -148,7 +136,6 @@ OK: 5/5 Fail: 0/5 Skip: 0/5 + EF - Altair - Fork - fork_random_low_balances [Preset: mainnet] OK + EF - Altair - Fork - fork_random_misc_balances [Preset: mainnet] OK ``` -OK: 14/14 Fail: 0/14 Skip: 0/14 ## EF - Altair - Operations - Attestation [Preset: mainnet] ```diff + [Invalid] EF - Altair - Operations - Attestation - invalid_after_max_inclusion_slot OK @@ -193,7 +180,6 @@ OK: 14/14 Fail: 0/14 Skip: 0/14 + [Valid] EF - Altair - Operations - Attestation - one_basic_attestation OK + [Valid] EF - Altair - Operations - Attestation - previous_epoch OK ``` -OK: 41/41 Fail: 0/41 Skip: 0/41 ## EF - Altair - Operations - Attester Slashing [Preset: mainnet] ```diff + [Invalid] EF - Altair - Operations - Attester Slashing - invalid_all_empty_indices OK @@ -227,7 +213,6 @@ OK: 41/41 Fail: 0/41 Skip: 0/41 + [Valid] EF - Altair - Operations - Attester Slashing - proposer_index_slashed OK + [Valid] EF - Altair - Operations - Attester Slashing - with_effective_balance_disparity OK ``` -OK: 30/30 Fail: 0/30 Skip: 0/30 ## EF - Altair - Operations - Block Header [Preset: mainnet] ```diff + [Invalid] EF - Altair - Operations - Block Header - invalid_multiple_blocks_single_slot OK @@ -237,7 +222,6 @@ OK: 30/30 Fail: 0/30 Skip: 0/30 + [Invalid] EF - Altair - Operations - Block Header - invalid_slot_block_header OK + [Valid] EF - Altair - Operations - Block Header - basic_block_header OK ``` -OK: 6/6 Fail: 0/6 Skip: 0/6 ## EF - Altair - Operations - Deposit [Preset: mainnet] ```diff + [Invalid] EF - Altair - Operations - Deposit - invalid_bad_merkle_proof OK @@ -260,7 +244,6 @@ OK: 6/6 Fail: 0/6 Skip: 0/6 + [Valid] EF - Altair - Operations - Deposit - top_up__max_effective_balance OK + [Valid] EF - Altair - Operations - Deposit - top_up__zero_balance OK ``` -OK: 19/19 Fail: 0/19 Skip: 0/19 ## EF - Altair - Operations - Proposer Slashing [Preset: mainnet] ```diff + [Invalid] EF - Altair - Operations - Proposer Slashing - invalid_different_proposer_indice OK @@ -279,7 +262,6 @@ OK: 19/19 Fail: 0/19 Skip: 0/19 + [Valid] EF - Altair - Operations - Proposer Slashing - block_header_from_future OK + [Valid] EF - Altair - Operations - Proposer Slashing - slashed_and_proposer_index_the_sa OK ``` -OK: 15/15 Fail: 0/15 Skip: 0/15 ## EF - Altair - Operations - Sync Aggregate [Preset: mainnet] ```diff + [Invalid] EF - Altair - Operations - Sync Aggregate - invalid_signature_bad_domain OK @@ -309,7 +291,6 @@ OK: 15/15 Fail: 0/15 Skip: 0/15 + [Valid] EF - Altair - Operations - Sync Aggregate - sync_committee_with_participating_ex OK + [Valid] EF - Altair - Operations - Sync Aggregate - sync_committee_with_participating_wi OK ``` -OK: 26/26 Fail: 0/26 Skip: 0/26 ## EF - Altair - Operations - Voluntary Exit [Preset: mainnet] ```diff + [Invalid] EF - Altair - Operations - Voluntary Exit - invalid_incorrect_signature OK @@ -322,7 +303,6 @@ OK: 26/26 Fail: 0/26 Skip: 0/26 + [Valid] EF - Altair - Operations - Voluntary Exit - default_exit_epoch_subsequent_exit OK + [Valid] EF - Altair - Operations - Voluntary Exit - success_exit_queue__min_churn OK ``` -OK: 9/9 Fail: 0/9 Skip: 0/9 ## EF - Altair - Random [Preset: mainnet] ```diff + [Valid] EF - Altair - Random - randomized_0 [Preset: mainnet] OK @@ -342,7 +322,6 @@ OK: 9/9 Fail: 0/9 Skip: 0/9 + [Valid] EF - Altair - Random - randomized_8 [Preset: mainnet] OK + [Valid] EF - Altair - Random - randomized_9 [Preset: mainnet] OK ``` -OK: 16/16 Fail: 0/16 Skip: 0/16 ## EF - Altair - Rewards [Preset: mainnet] ```diff + EF - Altair - Rewards - all_balances_too_low_for_reward [Preset: mainnet] OK @@ -380,7 +359,6 @@ OK: 16/16 Fail: 0/16 Skip: 0/16 + EF - Altair - Rewards - with_slashed_validators [Preset: mainnet] OK + EF - Altair - Rewards - with_slashed_validators_leak [Preset: mainnet] OK ``` -OK: 34/34 Fail: 0/34 Skip: 0/34 ## EF - Altair - SSZ consensus objects [Preset: mainnet] ```diff + Testing AggregateAndProof OK @@ -423,7 +401,6 @@ OK: 34/34 Fail: 0/34 Skip: 0/34 + Testing Validator OK + Testing VoluntaryExit OK ``` -OK: 39/39 Fail: 0/39 Skip: 0/39 ## EF - Altair - Sanity - Blocks [Preset: mainnet] ```diff + [Invalid] EF - Altair - Sanity - Blocks - invalid_all_zeroed_sig [Preset: mainnet] OK @@ -474,9 +451,9 @@ OK: 39/39 Fail: 0/39 Skip: 0/39 + [Valid] EF - Altair - Sanity - Blocks - sync_committee_committee_genesis__half [Preset: OK + [Valid] EF - Altair - Sanity - Blocks - voluntary_exit [Preset: mainnet] OK ``` -OK: 47/47 Fail: 0/47 Skip: 0/47 ## EF - Altair - Sanity - Slots [Preset: mainnet] ```diff ++ EF - Altair - Slots - balance_change_affects_proposer [Preset: mainnet] OK + EF - Altair - Slots - double_empty_epoch [Preset: mainnet] OK + EF - Altair - Slots - empty_epoch [Preset: mainnet] OK + EF - Altair - Slots - historical_accumulator [Preset: mainnet] OK @@ -484,7 +461,6 @@ OK: 47/47 Fail: 0/47 Skip: 0/47 + EF - Altair - Slots - slots_1 [Preset: mainnet] OK + EF - Altair - Slots - slots_2 [Preset: mainnet] OK ``` -OK: 6/6 Fail: 0/6 Skip: 0/6 ## EF - Altair - Transition [Preset: mainnet] ```diff + EF - Altair - Transition - non_empty_historical_roots [Preset: mainnet] OK @@ -510,7 +486,6 @@ OK: 6/6 Fail: 0/6 Skip: 0/6 + EF - Altair - Transition - transition_with_random_half_participation [Preset: mainnet] OK + EF - Altair - Transition - transition_with_random_three_quarters_participation [Preset: ma OK ``` -OK: 22/22 Fail: 0/22 Skip: 0/22 ## EF - Altair - Unittests - Light client - Sync protocol [Preset: mainnet] ```diff + process_light_client_update_finality_updated OK @@ -518,23 +493,19 @@ OK: 22/22 Fail: 0/22 Skip: 0/22 + test_process_light_client_update_at_period_boundary OK + test_process_light_client_update_not_timeout OK ``` -OK: 4/4 Fail: 0/4 Skip: 0/4 ## EF - Bellatrix - Epoch Processing - Effective balance updates [Preset: mainnet] ```diff + Effective balance updates - effective_balance_hysteresis [Preset: mainnet] OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## EF - Bellatrix - Epoch Processing - Eth1 data reset [Preset: mainnet] ```diff + Eth1 data reset - eth1_vote_no_reset [Preset: mainnet] OK + Eth1 data reset - eth1_vote_reset [Preset: mainnet] OK ``` -OK: 2/2 Fail: 0/2 Skip: 0/2 ## EF - Bellatrix - Epoch Processing - Historical roots update [Preset: mainnet] ```diff + Historical roots update - historical_root_accumulator [Preset: mainnet] OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## EF - Bellatrix - Epoch Processing - Inactivity [Preset: mainnet] ```diff + Inactivity - all_zero_inactivity_scores_empty_participation [Preset: mainnet] OK @@ -559,7 +530,6 @@ OK: 1/1 Fail: 0/1 Skip: 0/1 + Inactivity - some_slashed_zero_scores_full_participation [Preset: mainnet] OK + Inactivity - some_slashed_zero_scores_full_participation_leaking [Preset: mainnet] OK ``` -OK: 21/21 Fail: 0/21 Skip: 0/21 ## EF - Bellatrix - Epoch Processing - Justification & Finalization [Preset: mainnet] ```diff + Justification & Finalization - 123_ok_support [Preset: mainnet] OK @@ -573,7 +543,6 @@ OK: 21/21 Fail: 0/21 Skip: 0/21 + Justification & Finalization - 23_poor_support [Preset: mainnet] OK + Justification & Finalization - balance_threshold_with_exited_validators [Preset: mainnet] OK ``` -OK: 10/10 Fail: 0/10 Skip: 0/10 ## EF - Bellatrix - Epoch Processing - Participation flag updates [Preset: mainnet] ```diff + Participation flag updates - all_zeroed [Preset: mainnet] OK @@ -587,12 +556,10 @@ OK: 10/10 Fail: 0/10 Skip: 0/10 + Participation flag updates - random_2 [Preset: mainnet] OK + Participation flag updates - random_genesis [Preset: mainnet] OK ``` -OK: 10/10 Fail: 0/10 Skip: 0/10 ## EF - Bellatrix - Epoch Processing - RANDAO mixes reset [Preset: mainnet] ```diff + RANDAO mixes reset - updated_randao_mixes [Preset: mainnet] OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## EF - Bellatrix - Epoch Processing - Registry updates [Preset: mainnet] ```diff + Registry updates - activation_queue_activation_and_ejection__1 [Preset: mainnet] OK @@ -607,7 +574,6 @@ OK: 1/1 Fail: 0/1 Skip: 0/1 + Registry updates - ejection_past_churn_limit_min [Preset: mainnet] OK + Registry updates - invalid_large_withdrawable_epoch [Preset: mainnet] OK ``` -OK: 11/11 Fail: 0/11 Skip: 0/11 ## EF - Bellatrix - Epoch Processing - Rewards and penalties [Preset: mainnet] ```diff + Rewards and penalties - almost_empty_attestations [Preset: mainnet] OK @@ -619,14 +585,13 @@ OK: 11/11 Fail: 0/11 Skip: 0/11 + Rewards and penalties - full_attestation_participation [Preset: mainnet] OK + Rewards and penalties - full_attestation_participation_with_leak [Preset: mainnet] OK + Rewards and penalties - full_attestations_misc_balances [Preset: mainnet] OK -+ Rewards and penalties - full_attestations_one_validaor_one_gwei [Preset: mainnet] OK ++ Rewards and penalties - full_attestations_one_validator_one_gwei [Preset: mainnet] OK + Rewards and penalties - genesis_epoch_full_attestations_no_rewards [Preset: mainnet] OK + Rewards and penalties - genesis_epoch_no_attestations_no_penalties [Preset: mainnet] OK + Rewards and penalties - no_attestations_all_penalties [Preset: mainnet] OK + Rewards and penalties - random_fill_attestations [Preset: mainnet] OK + Rewards and penalties - random_fill_attestations_with_leak [Preset: mainnet] OK ``` -OK: 15/15 Fail: 0/15 Skip: 0/15 ## EF - Bellatrix - Epoch Processing - Slashings [Preset: mainnet] ```diff + Slashings - low_penalty [Preset: mainnet] OK @@ -635,12 +600,10 @@ OK: 15/15 Fail: 0/15 Skip: 0/15 + Slashings - scaled_penalties [Preset: mainnet] OK + Slashings - slashings_with_random_state [Preset: mainnet] OK ``` -OK: 5/5 Fail: 0/5 Skip: 0/5 ## EF - Bellatrix - Epoch Processing - Slashings reset [Preset: mainnet] ```diff + Slashings reset - flush_slashings [Preset: mainnet] OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## EF - Bellatrix - Finality [Preset: mainnet] ```diff + [Valid] EF - Bellatrix - Finality - finality_no_updates_at_genesis [Preset: mainnet] OK @@ -649,7 +612,6 @@ OK: 1/1 Fail: 0/1 Skip: 0/1 + [Valid] EF - Bellatrix - Finality - finality_rule_3 [Preset: mainnet] OK + [Valid] EF - Bellatrix - Finality - finality_rule_4 [Preset: mainnet] OK ``` -OK: 5/5 Fail: 0/5 Skip: 0/5 ## EF - Bellatrix - Fork [Preset: mainnet] ```diff + EF - Bellatrix - Fork - bellatrix_fork_random_0 [Preset: mainnet] OK @@ -665,7 +627,6 @@ OK: 5/5 Fail: 0/5 Skip: 0/5 + EF - Bellatrix - Fork - fork_random_low_balances [Preset: mainnet] OK + EF - Bellatrix - Fork - fork_random_misc_balances [Preset: mainnet] OK ``` -OK: 12/12 Fail: 0/12 Skip: 0/12 ## EF - Bellatrix - Operations - Attestation [Preset: mainnet] ```diff + [Invalid] EF - Bellatrix - Operations - Attestation - invalid_after_max_inclusion_slot OK @@ -710,7 +671,6 @@ OK: 12/12 Fail: 0/12 Skip: 0/12 + [Valid] EF - Bellatrix - Operations - Attestation - one_basic_attestation OK + [Valid] EF - Bellatrix - Operations - Attestation - previous_epoch OK ``` -OK: 41/41 Fail: 0/41 Skip: 0/41 ## EF - Bellatrix - Operations - Attester Slashing [Preset: mainnet] ```diff + [Invalid] EF - Bellatrix - Operations - Attester Slashing - invalid_all_empty_indices OK @@ -744,7 +704,6 @@ OK: 41/41 Fail: 0/41 Skip: 0/41 + [Valid] EF - Bellatrix - Operations - Attester Slashing - proposer_index_slashed OK + [Valid] EF - Bellatrix - Operations - Attester Slashing - with_effective_balance_dispari OK ``` -OK: 30/30 Fail: 0/30 Skip: 0/30 ## EF - Bellatrix - Operations - Block Header [Preset: mainnet] ```diff + [Invalid] EF - Bellatrix - Operations - Block Header - invalid_multiple_blocks_single_slot OK @@ -754,7 +713,6 @@ OK: 30/30 Fail: 0/30 Skip: 0/30 + [Invalid] EF - Bellatrix - Operations - Block Header - invalid_slot_block_header OK + [Valid] EF - Bellatrix - Operations - Block Header - basic_block_header OK ``` -OK: 6/6 Fail: 0/6 Skip: 0/6 ## EF - Bellatrix - Operations - Deposit [Preset: mainnet] ```diff + [Invalid] EF - Bellatrix - Operations - Deposit - invalid_bad_merkle_proof OK @@ -778,7 +736,6 @@ OK: 6/6 Fail: 0/6 Skip: 0/6 + [Valid] EF - Bellatrix - Operations - Deposit - top_up__max_effective_balance OK + [Valid] EF - Bellatrix - Operations - Deposit - top_up__zero_balance OK ``` -OK: 20/20 Fail: 0/20 Skip: 0/20 ## EF - Bellatrix - Operations - Execution Payload [Preset: mainnet] ```diff + [Invalid] EF - Bellatrix - Operations - Execution Payload - invalid_bad_everything_first_p OK @@ -808,7 +765,6 @@ OK: 20/20 Fail: 0/20 Skip: 0/20 + [Valid] EF - Bellatrix - Operations - Execution Payload - zero_length_transaction_first_ OK + [Valid] EF - Bellatrix - Operations - Execution Payload - zero_length_transaction_regula OK ``` -OK: 26/26 Fail: 0/26 Skip: 0/26 ## EF - Bellatrix - Operations - Proposer Slashing [Preset: mainnet] ```diff + [Invalid] EF - Bellatrix - Operations - Proposer Slashing - invalid_different_proposer_ind OK @@ -827,7 +783,6 @@ OK: 26/26 Fail: 0/26 Skip: 0/26 + [Valid] EF - Bellatrix - Operations - Proposer Slashing - block_header_from_future OK + [Valid] EF - Bellatrix - Operations - Proposer Slashing - slashed_and_proposer_index_the OK ``` -OK: 15/15 Fail: 0/15 Skip: 0/15 ## EF - Bellatrix - Operations - Sync Aggregate [Preset: mainnet] ```diff + [Invalid] EF - Bellatrix - Operations - Sync Aggregate - invalid_signature_bad_domain OK @@ -857,7 +812,6 @@ OK: 15/15 Fail: 0/15 Skip: 0/15 + [Valid] EF - Bellatrix - Operations - Sync Aggregate - sync_committee_with_participating OK + [Valid] EF - Bellatrix - Operations - Sync Aggregate - sync_committee_with_participating OK ``` -OK: 26/26 Fail: 0/26 Skip: 0/26 ## EF - Bellatrix - Operations - Voluntary Exit [Preset: mainnet] ```diff + [Invalid] EF - Bellatrix - Operations - Voluntary Exit - invalid_incorrect_signature OK @@ -876,7 +830,6 @@ OK: 26/26 Fail: 0/26 Skip: 0/26 + [Valid] EF - Bellatrix - Operations - Voluntary Exit - voluntary_exit_with_current_fork_ OK + [Valid] EF - Bellatrix - Operations - Voluntary Exit - voluntary_exit_with_previous_fork OK ``` -OK: 15/15 Fail: 0/15 Skip: 0/15 ## EF - Bellatrix - Random [Preset: mainnet] ```diff + [Valid] EF - Bellatrix - Random - randomized_0 [Preset: mainnet] OK @@ -896,7 +849,6 @@ OK: 15/15 Fail: 0/15 Skip: 0/15 + [Valid] EF - Bellatrix - Random - randomized_8 [Preset: mainnet] OK + [Valid] EF - Bellatrix - Random - randomized_9 [Preset: mainnet] OK ``` -OK: 16/16 Fail: 0/16 Skip: 0/16 ## EF - Bellatrix - Rewards [Preset: mainnet] ```diff + EF - Bellatrix - Rewards - all_balances_too_low_for_reward [Preset: mainnet] OK @@ -934,7 +886,6 @@ OK: 16/16 Fail: 0/16 Skip: 0/16 + EF - Bellatrix - Rewards - with_slashed_validators [Preset: mainnet] OK + EF - Bellatrix - Rewards - with_slashed_validators_leak [Preset: mainnet] OK ``` -OK: 34/34 Fail: 0/34 Skip: 0/34 ## EF - Bellatrix - SSZ consensus objects [Preset: mainnet] ```diff + Testing AggregateAndProof OK @@ -980,7 +931,6 @@ OK: 34/34 Fail: 0/34 Skip: 0/34 + Testing Validator OK + Testing VoluntaryExit OK ``` -OK: 42/42 Fail: 0/42 Skip: 0/42 ## EF - Bellatrix - Sanity - Blocks [Preset: mainnet] ```diff + [Invalid] EF - Bellatrix - Sanity - Blocks - invalid_all_zeroed_sig [Preset: mainnet] OK @@ -1034,9 +984,9 @@ OK: 42/42 Fail: 0/42 Skip: 0/42 + [Valid] EF - Bellatrix - Sanity - Blocks - sync_committee_committee_genesis__half [Prese OK + [Valid] EF - Bellatrix - Sanity - Blocks - voluntary_exit [Preset: mainnet] OK ``` -OK: 50/50 Fail: 0/50 Skip: 0/50 ## EF - Bellatrix - Sanity - Slots [Preset: mainnet] ```diff ++ EF - Bellatrix - Slots - balance_change_affects_proposer [Preset: mainnet] OK + EF - Bellatrix - Slots - double_empty_epoch [Preset: mainnet] OK + EF - Bellatrix - Slots - empty_epoch [Preset: mainnet] OK + EF - Bellatrix - Slots - historical_accumulator [Preset: mainnet] OK @@ -1044,7 +994,6 @@ OK: 50/50 Fail: 0/50 Skip: 0/50 + EF - Bellatrix - Slots - slots_1 [Preset: mainnet] OK + EF - Bellatrix - Slots - slots_2 [Preset: mainnet] OK ``` -OK: 6/6 Fail: 0/6 Skip: 0/6 ## EF - Bellatrix - Transition [Preset: mainnet] ```diff + EF - Bellatrix - Transition - non_empty_historical_roots [Preset: mainnet] OK @@ -1070,23 +1019,19 @@ OK: 6/6 Fail: 0/6 Skip: 0/6 + EF - Bellatrix - Transition - transition_with_random_half_participation [Preset: mainnet] OK + EF - Bellatrix - Transition - transition_with_random_three_quarters_participation [Preset: OK ``` -OK: 22/22 Fail: 0/22 Skip: 0/22 ## EF - Capella - Epoch Processing - Effective balance updates [Preset: mainnet] ```diff + Effective balance updates - effective_balance_hysteresis [Preset: mainnet] OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## EF - Capella - Epoch Processing - Eth1 data reset [Preset: mainnet] ```diff + Eth1 data reset - eth1_vote_no_reset [Preset: mainnet] OK + Eth1 data reset - eth1_vote_reset [Preset: mainnet] OK ``` -OK: 2/2 Fail: 0/2 Skip: 0/2 ## EF - Capella - Epoch Processing - Historical summaries update [Preset: mainnet] ```diff + Historical summaries update - historical_summaries_accumulator [Preset: mainnet] OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## EF - Capella - Epoch Processing - Inactivity [Preset: mainnet] ```diff + Inactivity - all_zero_inactivity_scores_empty_participation [Preset: mainnet] OK @@ -1111,7 +1056,6 @@ OK: 1/1 Fail: 0/1 Skip: 0/1 + Inactivity - some_slashed_zero_scores_full_participation [Preset: mainnet] OK + Inactivity - some_slashed_zero_scores_full_participation_leaking [Preset: mainnet] OK ``` -OK: 21/21 Fail: 0/21 Skip: 0/21 ## EF - Capella - Epoch Processing - Justification & Finalization [Preset: mainnet] ```diff + Justification & Finalization - 123_ok_support [Preset: mainnet] OK @@ -1125,7 +1069,6 @@ OK: 21/21 Fail: 0/21 Skip: 0/21 + Justification & Finalization - 23_poor_support [Preset: mainnet] OK + Justification & Finalization - balance_threshold_with_exited_validators [Preset: mainnet] OK ``` -OK: 10/10 Fail: 0/10 Skip: 0/10 ## EF - Capella - Epoch Processing - Participation flag updates [Preset: mainnet] ```diff + Participation flag updates - all_zeroed [Preset: mainnet] OK @@ -1139,12 +1082,10 @@ OK: 10/10 Fail: 0/10 Skip: 0/10 + Participation flag updates - random_2 [Preset: mainnet] OK + Participation flag updates - random_genesis [Preset: mainnet] OK ``` -OK: 10/10 Fail: 0/10 Skip: 0/10 ## EF - Capella - Epoch Processing - RANDAO mixes reset [Preset: mainnet] ```diff + RANDAO mixes reset - updated_randao_mixes [Preset: mainnet] OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## EF - Capella - Epoch Processing - Registry updates [Preset: mainnet] ```diff + Registry updates - activation_queue_activation_and_ejection__1 [Preset: mainnet] OK @@ -1159,7 +1100,6 @@ OK: 1/1 Fail: 0/1 Skip: 0/1 + Registry updates - ejection_past_churn_limit_min [Preset: mainnet] OK + Registry updates - invalid_large_withdrawable_epoch [Preset: mainnet] OK ``` -OK: 11/11 Fail: 0/11 Skip: 0/11 ## EF - Capella - Epoch Processing - Rewards and penalties [Preset: mainnet] ```diff + Rewards and penalties - almost_empty_attestations [Preset: mainnet] OK @@ -1171,14 +1111,13 @@ OK: 11/11 Fail: 0/11 Skip: 0/11 + Rewards and penalties - full_attestation_participation [Preset: mainnet] OK + Rewards and penalties - full_attestation_participation_with_leak [Preset: mainnet] OK + Rewards and penalties - full_attestations_misc_balances [Preset: mainnet] OK -+ Rewards and penalties - full_attestations_one_validaor_one_gwei [Preset: mainnet] OK ++ Rewards and penalties - full_attestations_one_validator_one_gwei [Preset: mainnet] OK + Rewards and penalties - genesis_epoch_full_attestations_no_rewards [Preset: mainnet] OK + Rewards and penalties - genesis_epoch_no_attestations_no_penalties [Preset: mainnet] OK + Rewards and penalties - no_attestations_all_penalties [Preset: mainnet] OK + Rewards and penalties - random_fill_attestations [Preset: mainnet] OK + Rewards and penalties - random_fill_attestations_with_leak [Preset: mainnet] OK ``` -OK: 15/15 Fail: 0/15 Skip: 0/15 ## EF - Capella - Epoch Processing - Slashings [Preset: mainnet] ```diff + Slashings - low_penalty [Preset: mainnet] OK @@ -1187,12 +1126,10 @@ OK: 15/15 Fail: 0/15 Skip: 0/15 + Slashings - scaled_penalties [Preset: mainnet] OK + Slashings - slashings_with_random_state [Preset: mainnet] OK ``` -OK: 5/5 Fail: 0/5 Skip: 0/5 ## EF - Capella - Epoch Processing - Slashings reset [Preset: mainnet] ```diff + Slashings reset - flush_slashings [Preset: mainnet] OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## EF - Capella - Finality [Preset: mainnet] ```diff + [Valid] EF - Capella - Finality - finality_no_updates_at_genesis [Preset: mainnet] OK @@ -1201,7 +1138,6 @@ OK: 1/1 Fail: 0/1 Skip: 0/1 + [Valid] EF - Capella - Finality - finality_rule_3 [Preset: mainnet] OK + [Valid] EF - Capella - Finality - finality_rule_4 [Preset: mainnet] OK ``` -OK: 5/5 Fail: 0/5 Skip: 0/5 ## EF - Capella - Fork [Preset: mainnet] ```diff + EF - Capella - Fork - capella_fork_random_0 [Preset: mainnet] OK @@ -1217,7 +1153,6 @@ OK: 5/5 Fail: 0/5 Skip: 0/5 + EF - Capella - Fork - fork_random_low_balances [Preset: mainnet] OK + EF - Capella - Fork - fork_random_misc_balances [Preset: mainnet] OK ``` -OK: 12/12 Fail: 0/12 Skip: 0/12 ## EF - Capella - Operations - Attestation [Preset: mainnet] ```diff + [Invalid] EF - Capella - Operations - Attestation - invalid_after_max_inclusion_slot OK @@ -1262,7 +1197,6 @@ OK: 12/12 Fail: 0/12 Skip: 0/12 + [Valid] EF - Capella - Operations - Attestation - one_basic_attestation OK + [Valid] EF - Capella - Operations - Attestation - previous_epoch OK ``` -OK: 41/41 Fail: 0/41 Skip: 0/41 ## EF - Capella - Operations - Attester Slashing [Preset: mainnet] ```diff + [Invalid] EF - Capella - Operations - Attester Slashing - invalid_all_empty_indices OK @@ -1296,7 +1230,6 @@ OK: 41/41 Fail: 0/41 Skip: 0/41 + [Valid] EF - Capella - Operations - Attester Slashing - proposer_index_slashed OK + [Valid] EF - Capella - Operations - Attester Slashing - with_effective_balance_disparity OK ``` -OK: 30/30 Fail: 0/30 Skip: 0/30 ## EF - Capella - Operations - BLS to execution change [Preset: mainnet] ```diff + [Invalid] EF - Capella - Operations - BLS to execution change - invalid_already_0x01 OK @@ -1315,7 +1248,6 @@ OK: 30/30 Fail: 0/30 Skip: 0/30 + [Valid] EF - Capella - Operations - BLS to execution change - success_withdrawable OK + [Valid] EF - Capella - Operations - BLS to execution change - valid_signature_from_staki OK ``` -OK: 15/15 Fail: 0/15 Skip: 0/15 ## EF - Capella - Operations - Block Header [Preset: mainnet] ```diff + [Invalid] EF - Capella - Operations - Block Header - invalid_multiple_blocks_single_slot OK @@ -1325,7 +1257,6 @@ OK: 15/15 Fail: 0/15 Skip: 0/15 + [Invalid] EF - Capella - Operations - Block Header - invalid_slot_block_header OK + [Valid] EF - Capella - Operations - Block Header - basic_block_header OK ``` -OK: 6/6 Fail: 0/6 Skip: 0/6 ## EF - Capella - Operations - Deposit [Preset: mainnet] ```diff + [Invalid] EF - Capella - Operations - Deposit - invalid_bad_merkle_proof OK @@ -1350,7 +1281,6 @@ OK: 6/6 Fail: 0/6 Skip: 0/6 + [Valid] EF - Capella - Operations - Deposit - top_up__max_effective_balance OK + [Valid] EF - Capella - Operations - Deposit - top_up__zero_balance OK ``` -OK: 21/21 Fail: 0/21 Skip: 0/21 ## EF - Capella - Operations - Execution Payload [Preset: mainnet] ```diff + [Invalid] EF - Capella - Operations - Execution Payload - invalid_bad_everything_first_pay OK @@ -1380,7 +1310,6 @@ OK: 21/21 Fail: 0/21 Skip: 0/21 + [Valid] EF - Capella - Operations - Execution Payload - zero_length_transaction_first_pa OK + [Valid] EF - Capella - Operations - Execution Payload - zero_length_transaction_regular_ OK ``` -OK: 26/26 Fail: 0/26 Skip: 0/26 ## EF - Capella - Operations - Proposer Slashing [Preset: mainnet] ```diff + [Invalid] EF - Capella - Operations - Proposer Slashing - invalid_different_proposer_indic OK @@ -1399,7 +1328,6 @@ OK: 26/26 Fail: 0/26 Skip: 0/26 + [Valid] EF - Capella - Operations - Proposer Slashing - block_header_from_future OK + [Valid] EF - Capella - Operations - Proposer Slashing - slashed_and_proposer_index_the_s OK ``` -OK: 15/15 Fail: 0/15 Skip: 0/15 ## EF - Capella - Operations - Sync Aggregate [Preset: mainnet] ```diff + [Invalid] EF - Capella - Operations - Sync Aggregate - invalid_signature_bad_domain OK @@ -1429,7 +1357,6 @@ OK: 15/15 Fail: 0/15 Skip: 0/15 + [Valid] EF - Capella - Operations - Sync Aggregate - sync_committee_with_participating_e OK + [Valid] EF - Capella - Operations - Sync Aggregate - sync_committee_with_participating_w OK ``` -OK: 26/26 Fail: 0/26 Skip: 0/26 ## EF - Capella - Operations - Voluntary Exit [Preset: mainnet] ```diff + [Invalid] EF - Capella - Operations - Voluntary Exit - invalid_incorrect_signature OK @@ -1448,7 +1375,6 @@ OK: 26/26 Fail: 0/26 Skip: 0/26 + [Valid] EF - Capella - Operations - Voluntary Exit - voluntary_exit_with_current_fork_ve OK + [Valid] EF - Capella - Operations - Voluntary Exit - voluntary_exit_with_previous_fork_v OK ``` -OK: 15/15 Fail: 0/15 Skip: 0/15 ## EF - Capella - Operations - Withdrawals [Preset: mainnet] ```diff + [Invalid] EF - Capella - Operations - Withdrawals - invalid_a_lot_fully_withdrawable_too_f OK @@ -1472,6 +1398,9 @@ OK: 15/15 Fail: 0/15 Skip: 0/15 + [Invalid] EF - Capella - Operations - Withdrawals - invalid_two_expected_partial_withdrawa OK + [Valid] EF - Capella - Operations - Withdrawals - all_withdrawal OK + [Valid] EF - Capella - Operations - Withdrawals - no_withdrawals_but_some_next_epoch OK ++ [Valid] EF - Capella - Operations - Withdrawals - partially_withdrawable_validator_legac OK ++ [Valid] EF - Capella - Operations - Withdrawals - partially_withdrawable_validator_legac OK ++ [Valid] EF - Capella - Operations - Withdrawals - partially_withdrawable_validator_legac OK + [Valid] EF - Capella - Operations - Withdrawals - random_0 OK + [Valid] EF - Capella - Operations - Withdrawals - random_full_withdrawals_0 OK + [Valid] EF - Capella - Operations - Withdrawals - random_full_withdrawals_1 OK @@ -1502,7 +1431,6 @@ OK: 15/15 Fail: 0/15 Skip: 0/15 + [Valid] EF - Capella - Operations - Withdrawals - withdrawable_epoch_but_0_effective_bal OK + [Valid] EF - Capella - Operations - Withdrawals - withdrawable_epoch_but_0_effective_bal OK ``` -OK: 50/50 Fail: 0/50 Skip: 0/50 ## EF - Capella - Random [Preset: mainnet] ```diff + [Valid] EF - Capella - Random - randomized_0 [Preset: mainnet] OK @@ -1522,7 +1450,6 @@ OK: 50/50 Fail: 0/50 Skip: 0/50 + [Valid] EF - Capella - Random - randomized_8 [Preset: mainnet] OK + [Valid] EF - Capella - Random - randomized_9 [Preset: mainnet] OK ``` -OK: 16/16 Fail: 0/16 Skip: 0/16 ## EF - Capella - Rewards [Preset: mainnet] ```diff + EF - Capella - Rewards - all_balances_too_low_for_reward [Preset: mainnet] OK @@ -1560,7 +1487,6 @@ OK: 16/16 Fail: 0/16 Skip: 0/16 + EF - Capella - Rewards - with_slashed_validators [Preset: mainnet] OK + EF - Capella - Rewards - with_slashed_validators_leak [Preset: mainnet] OK ``` -OK: 34/34 Fail: 0/34 Skip: 0/34 ## EF - Capella - SSZ consensus objects [Preset: mainnet] ```diff + Testing AggregateAndProof OK @@ -1610,7 +1536,6 @@ OK: 34/34 Fail: 0/34 Skip: 0/34 + Testing VoluntaryExit OK + Testing Withdrawal OK ``` -OK: 46/46 Fail: 0/46 Skip: 0/46 ## EF - Capella - Sanity - Blocks [Preset: mainnet] ```diff + [Invalid] EF - Capella - Sanity - Blocks - invalid_all_zeroed_sig [Preset: mainnet] OK @@ -1676,9 +1601,9 @@ OK: 46/46 Fail: 0/46 Skip: 0/46 + [Valid] EF - Capella - Sanity - Blocks - voluntary_exit [Preset: mainnet] OK + [Valid] EF - Capella - Sanity - Blocks - withdrawal_success_two_blocks [Preset: mainnet] OK ``` -OK: 62/62 Fail: 0/62 Skip: 0/62 ## EF - Capella - Sanity - Slots [Preset: mainnet] ```diff ++ EF - Capella - Slots - balance_change_affects_proposer [Preset: mainnet] OK + EF - Capella - Slots - double_empty_epoch [Preset: mainnet] OK + EF - Capella - Slots - empty_epoch [Preset: mainnet] OK + EF - Capella - Slots - historical_accumulator [Preset: mainnet] OK @@ -1686,7 +1611,6 @@ OK: 62/62 Fail: 0/62 Skip: 0/62 + EF - Capella - Slots - slots_1 [Preset: mainnet] OK + EF - Capella - Slots - slots_2 [Preset: mainnet] OK ``` -OK: 6/6 Fail: 0/6 Skip: 0/6 ## EF - Capella - Transition [Preset: mainnet] ```diff + EF - Capella - Transition - non_empty_historical_roots [Preset: mainnet] OK @@ -1712,7 +1636,6 @@ OK: 6/6 Fail: 0/6 Skip: 0/6 + EF - Capella - Transition - transition_with_random_half_participation [Preset: mainnet] OK + EF - Capella - Transition - transition_with_random_three_quarters_participation [Preset: m OK ``` -OK: 22/22 Fail: 0/22 Skip: 0/22 ## EF - Capella - Unittests - Light client - Sync protocol [Preset: mainnet] ```diff + process_light_client_update_finality_updated OK @@ -1720,23 +1643,19 @@ OK: 22/22 Fail: 0/22 Skip: 0/22 + test_process_light_client_update_at_period_boundary OK + test_process_light_client_update_not_timeout OK ``` -OK: 4/4 Fail: 0/4 Skip: 0/4 ## EF - Deneb - Epoch Processing - Effective balance updates [Preset: mainnet] ```diff + Effective balance updates - effective_balance_hysteresis [Preset: mainnet] OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## EF - Deneb - Epoch Processing - Eth1 data reset [Preset: mainnet] ```diff + Eth1 data reset - eth1_vote_no_reset [Preset: mainnet] OK + Eth1 data reset - eth1_vote_reset [Preset: mainnet] OK ``` -OK: 2/2 Fail: 0/2 Skip: 0/2 ## EF - Deneb - Epoch Processing - Historical summaries update [Preset: mainnet] ```diff + Historical summaries update - historical_summaries_accumulator [Preset: mainnet] OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## EF - Deneb - Epoch Processing - Inactivity [Preset: mainnet] ```diff + Inactivity - all_zero_inactivity_scores_empty_participation [Preset: mainnet] OK @@ -1761,7 +1680,6 @@ OK: 1/1 Fail: 0/1 Skip: 0/1 + Inactivity - some_slashed_zero_scores_full_participation [Preset: mainnet] OK + Inactivity - some_slashed_zero_scores_full_participation_leaking [Preset: mainnet] OK ``` -OK: 21/21 Fail: 0/21 Skip: 0/21 ## EF - Deneb - Epoch Processing - Justification & Finalization [Preset: mainnet] ```diff + Justification & Finalization - 123_ok_support [Preset: mainnet] OK @@ -1775,7 +1693,6 @@ OK: 21/21 Fail: 0/21 Skip: 0/21 + Justification & Finalization - 23_poor_support [Preset: mainnet] OK + Justification & Finalization - balance_threshold_with_exited_validators [Preset: mainnet] OK ``` -OK: 10/10 Fail: 0/10 Skip: 0/10 ## EF - Deneb - Epoch Processing - Participation flag updates [Preset: mainnet] ```diff + Participation flag updates - all_zeroed [Preset: mainnet] OK @@ -1789,12 +1706,10 @@ OK: 10/10 Fail: 0/10 Skip: 0/10 + Participation flag updates - random_2 [Preset: mainnet] OK + Participation flag updates - random_genesis [Preset: mainnet] OK ``` -OK: 10/10 Fail: 0/10 Skip: 0/10 ## EF - Deneb - Epoch Processing - RANDAO mixes reset [Preset: mainnet] ```diff + RANDAO mixes reset - updated_randao_mixes [Preset: mainnet] OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## EF - Deneb - Epoch Processing - Registry updates [Preset: mainnet] ```diff + Registry updates - activation_queue_activation_and_ejection__1 [Preset: mainnet] OK @@ -1809,7 +1724,6 @@ OK: 1/1 Fail: 0/1 Skip: 0/1 + Registry updates - ejection_past_churn_limit_min [Preset: mainnet] OK + Registry updates - invalid_large_withdrawable_epoch [Preset: mainnet] OK ``` -OK: 11/11 Fail: 0/11 Skip: 0/11 ## EF - Deneb - Epoch Processing - Rewards and penalties [Preset: mainnet] ```diff + Rewards and penalties - almost_empty_attestations [Preset: mainnet] OK @@ -1821,14 +1735,13 @@ OK: 11/11 Fail: 0/11 Skip: 0/11 + Rewards and penalties - full_attestation_participation [Preset: mainnet] OK + Rewards and penalties - full_attestation_participation_with_leak [Preset: mainnet] OK + Rewards and penalties - full_attestations_misc_balances [Preset: mainnet] OK -+ Rewards and penalties - full_attestations_one_validaor_one_gwei [Preset: mainnet] OK ++ Rewards and penalties - full_attestations_one_validator_one_gwei [Preset: mainnet] OK + Rewards and penalties - genesis_epoch_full_attestations_no_rewards [Preset: mainnet] OK + Rewards and penalties - genesis_epoch_no_attestations_no_penalties [Preset: mainnet] OK + Rewards and penalties - no_attestations_all_penalties [Preset: mainnet] OK + Rewards and penalties - random_fill_attestations [Preset: mainnet] OK + Rewards and penalties - random_fill_attestations_with_leak [Preset: mainnet] OK ``` -OK: 15/15 Fail: 0/15 Skip: 0/15 ## EF - Deneb - Epoch Processing - Slashings [Preset: mainnet] ```diff + Slashings - low_penalty [Preset: mainnet] OK @@ -1837,12 +1750,10 @@ OK: 15/15 Fail: 0/15 Skip: 0/15 + Slashings - scaled_penalties [Preset: mainnet] OK + Slashings - slashings_with_random_state [Preset: mainnet] OK ``` -OK: 5/5 Fail: 0/5 Skip: 0/5 ## EF - Deneb - Epoch Processing - Slashings reset [Preset: mainnet] ```diff + Slashings reset - flush_slashings [Preset: mainnet] OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## EF - Deneb - Finality [Preset: mainnet] ```diff + [Valid] EF - Deneb - Finality - finality_no_updates_at_genesis [Preset: mainnet] OK @@ -1851,7 +1762,6 @@ OK: 1/1 Fail: 0/1 Skip: 0/1 + [Valid] EF - Deneb - Finality - finality_rule_3 [Preset: mainnet] OK + [Valid] EF - Deneb - Finality - finality_rule_4 [Preset: mainnet] OK ``` -OK: 5/5 Fail: 0/5 Skip: 0/5 ## EF - Deneb - Fork [Preset: mainnet] ```diff + EF - Deneb - Fork - deneb_fork_random_0 [Preset: mainnet] OK @@ -1867,7 +1777,6 @@ OK: 5/5 Fail: 0/5 Skip: 0/5 + EF - Deneb - Fork - fork_random_low_balances [Preset: mainnet] OK + EF - Deneb - Fork - fork_random_misc_balances [Preset: mainnet] OK ``` -OK: 12/12 Fail: 0/12 Skip: 0/12 ## EF - Deneb - Operations - Attestation [Preset: mainnet] ```diff + [Invalid] EF - Deneb - Operations - Attestation - invalid_after_max_inclusion_slot OK @@ -1912,7 +1821,6 @@ OK: 12/12 Fail: 0/12 Skip: 0/12 + [Valid] EF - Deneb - Operations - Attestation - one_basic_attestation OK + [Valid] EF - Deneb - Operations - Attestation - previous_epoch OK ``` -OK: 41/41 Fail: 0/41 Skip: 0/41 ## EF - Deneb - Operations - Attester Slashing [Preset: mainnet] ```diff + [Invalid] EF - Deneb - Operations - Attester Slashing - invalid_all_empty_indices OK @@ -1946,7 +1854,6 @@ OK: 41/41 Fail: 0/41 Skip: 0/41 + [Valid] EF - Deneb - Operations - Attester Slashing - proposer_index_slashed OK + [Valid] EF - Deneb - Operations - Attester Slashing - with_effective_balance_disparity OK ``` -OK: 30/30 Fail: 0/30 Skip: 0/30 ## EF - Deneb - Operations - BLS to execution change [Preset: mainnet] ```diff + [Invalid] EF - Deneb - Operations - BLS to execution change - invalid_already_0x01 OK @@ -1964,7 +1871,6 @@ OK: 30/30 Fail: 0/30 Skip: 0/30 + [Valid] EF - Deneb - Operations - BLS to execution change - success_not_activated OK + [Valid] EF - Deneb - Operations - BLS to execution change - success_withdrawable OK ``` -OK: 14/14 Fail: 0/14 Skip: 0/14 ## EF - Deneb - Operations - Block Header [Preset: mainnet] ```diff + [Invalid] EF - Deneb - Operations - Block Header - invalid_multiple_blocks_single_slot OK @@ -1974,7 +1880,6 @@ OK: 14/14 Fail: 0/14 Skip: 0/14 + [Invalid] EF - Deneb - Operations - Block Header - invalid_slot_block_header OK + [Valid] EF - Deneb - Operations - Block Header - basic_block_header OK ``` -OK: 6/6 Fail: 0/6 Skip: 0/6 ## EF - Deneb - Operations - Deposit [Preset: mainnet] ```diff + [Invalid] EF - Deneb - Operations - Deposit - invalid_bad_merkle_proof OK @@ -1999,7 +1904,6 @@ OK: 6/6 Fail: 0/6 Skip: 0/6 + [Valid] EF - Deneb - Operations - Deposit - top_up__max_effective_balance OK + [Valid] EF - Deneb - Operations - Deposit - top_up__zero_balance OK ``` -OK: 21/21 Fail: 0/21 Skip: 0/21 ## EF - Deneb - Operations - Execution Payload [Preset: mainnet] ```diff + [Invalid] EF - Deneb - Operations - Execution Payload - invalid_bad_everything_first_paylo OK @@ -2026,6 +1930,8 @@ OK: 21/21 Fail: 0/21 Skip: 0/21 + [Valid] EF - Deneb - Operations - Execution Payload - incorrect_transaction_length_1_ext OK + [Valid] EF - Deneb - Operations - Execution Payload - incorrect_transaction_length_32_ex OK + [Valid] EF - Deneb - Operations - Execution Payload - incorrect_transaction_length_empty OK ++ [Valid] EF - Deneb - Operations - Execution Payload - incorrect_transaction_no_blobs_but OK ++ [Valid] EF - Deneb - Operations - Execution Payload - no_commitments_for_transactions OK + [Valid] EF - Deneb - Operations - Execution Payload - no_transactions_with_commitments OK + [Valid] EF - Deneb - Operations - Execution Payload - non_empty_extra_data_first_payload OK + [Valid] EF - Deneb - Operations - Execution Payload - non_empty_extra_data_regular_paylo OK @@ -2041,7 +1947,6 @@ OK: 21/21 Fail: 0/21 Skip: 0/21 + [Valid] EF - Deneb - Operations - Execution Payload - zero_length_transaction_regular_pa OK + [Valid] EF - Deneb - Operations - Execution Payload - zeroed_commitment OK ``` -OK: 38/38 Fail: 0/38 Skip: 0/38 ## EF - Deneb - Operations - Proposer Slashing [Preset: mainnet] ```diff + [Invalid] EF - Deneb - Operations - Proposer Slashing - invalid_different_proposer_indices OK @@ -2060,7 +1965,6 @@ OK: 38/38 Fail: 0/38 Skip: 0/38 + [Valid] EF - Deneb - Operations - Proposer Slashing - block_header_from_future OK + [Valid] EF - Deneb - Operations - Proposer Slashing - slashed_and_proposer_index_the_sam OK ``` -OK: 15/15 Fail: 0/15 Skip: 0/15 ## EF - Deneb - Operations - Sync Aggregate [Preset: mainnet] ```diff + [Invalid] EF - Deneb - Operations - Sync Aggregate - invalid_signature_bad_domain OK @@ -2090,7 +1994,6 @@ OK: 15/15 Fail: 0/15 Skip: 0/15 + [Valid] EF - Deneb - Operations - Sync Aggregate - sync_committee_with_participating_exi OK + [Valid] EF - Deneb - Operations - Sync Aggregate - sync_committee_with_participating_wit OK ``` -OK: 26/26 Fail: 0/26 Skip: 0/26 ## EF - Deneb - Operations - Voluntary Exit [Preset: mainnet] ```diff + [Invalid] EF - Deneb - Operations - Voluntary Exit - invalid_incorrect_signature OK @@ -2109,7 +2012,6 @@ OK: 26/26 Fail: 0/26 Skip: 0/26 + [Valid] EF - Deneb - Operations - Voluntary Exit - voluntary_exit_with_previous_fork_ver OK + [Valid] EF - Deneb - Operations - Voluntary Exit - voluntary_exit_with_previous_fork_ver OK ``` -OK: 15/15 Fail: 0/15 Skip: 0/15 ## EF - Deneb - Operations - Withdrawals [Preset: mainnet] ```diff + [Invalid] EF - Deneb - Operations - Withdrawals - invalid_a_lot_fully_withdrawable_too_few OK @@ -2133,6 +2035,9 @@ OK: 15/15 Fail: 0/15 Skip: 0/15 + [Invalid] EF - Deneb - Operations - Withdrawals - invalid_two_expected_partial_withdrawal_ OK + [Valid] EF - Deneb - Operations - Withdrawals - all_withdrawal OK + [Valid] EF - Deneb - Operations - Withdrawals - no_withdrawals_but_some_next_epoch OK ++ [Valid] EF - Deneb - Operations - Withdrawals - partially_withdrawable_validator_legacy_ OK ++ [Valid] EF - Deneb - Operations - Withdrawals - partially_withdrawable_validator_legacy_ OK ++ [Valid] EF - Deneb - Operations - Withdrawals - partially_withdrawable_validator_legacy_ OK + [Valid] EF - Deneb - Operations - Withdrawals - random_0 OK + [Valid] EF - Deneb - Operations - Withdrawals - random_full_withdrawals_0 OK + [Valid] EF - Deneb - Operations - Withdrawals - random_full_withdrawals_1 OK @@ -2163,7 +2068,6 @@ OK: 15/15 Fail: 0/15 Skip: 0/15 + [Valid] EF - Deneb - Operations - Withdrawals - withdrawable_epoch_but_0_effective_balan OK + [Valid] EF - Deneb - Operations - Withdrawals - withdrawable_epoch_but_0_effective_balan OK ``` -OK: 50/50 Fail: 0/50 Skip: 0/50 ## EF - Deneb - Random [Preset: mainnet] ```diff + [Valid] EF - Deneb - Random - randomized_0 [Preset: mainnet] OK @@ -2183,7 +2087,6 @@ OK: 50/50 Fail: 0/50 Skip: 0/50 + [Valid] EF - Deneb - Random - randomized_8 [Preset: mainnet] OK + [Valid] EF - Deneb - Random - randomized_9 [Preset: mainnet] OK ``` -OK: 16/16 Fail: 0/16 Skip: 0/16 ## EF - Deneb - Rewards [Preset: mainnet] ```diff + EF - Deneb - Rewards - all_balances_too_low_for_reward [Preset: mainnet] OK @@ -2221,7 +2124,6 @@ OK: 16/16 Fail: 0/16 Skip: 0/16 + EF - Deneb - Rewards - with_slashed_validators [Preset: mainnet] OK + EF - Deneb - Rewards - with_slashed_validators_leak [Preset: mainnet] OK ``` -OK: 34/34 Fail: 0/34 Skip: 0/34 ## EF - Deneb - SSZ consensus objects [Preset: mainnet] ```diff + Testing AggregateAndProof OK @@ -2273,7 +2175,6 @@ OK: 34/34 Fail: 0/34 Skip: 0/34 + Testing VoluntaryExit OK + Testing Withdrawal OK ``` -OK: 48/48 Fail: 0/48 Skip: 0/48 ## EF - Deneb - Sanity - Blocks [Preset: mainnet] ```diff + [Invalid] EF - Deneb - Sanity - Blocks - invalid_all_zeroed_sig [Preset: mainnet] OK @@ -2348,9 +2249,9 @@ OK: 48/48 Fail: 0/48 Skip: 0/48 + [Valid] EF - Deneb - Sanity - Blocks - withdrawal_success_two_blocks [Preset: mainnet] OK + [Valid] EF - Deneb - Sanity - Blocks - zero_blob [Preset: mainnet] OK ``` -OK: 71/71 Fail: 0/71 Skip: 0/71 ## EF - Deneb - Sanity - Slots [Preset: mainnet] ```diff ++ EF - Deneb - Slots - balance_change_affects_proposer [Preset: mainnet] OK + EF - Deneb - Slots - double_empty_epoch [Preset: mainnet] OK + EF - Deneb - Slots - empty_epoch [Preset: mainnet] OK + EF - Deneb - Slots - historical_accumulator [Preset: mainnet] OK @@ -2358,7 +2259,6 @@ OK: 71/71 Fail: 0/71 Skip: 0/71 + EF - Deneb - Slots - slots_1 [Preset: mainnet] OK + EF - Deneb - Slots - slots_2 [Preset: mainnet] OK ``` -OK: 6/6 Fail: 0/6 Skip: 0/6 ## EF - Deneb - Transition [Preset: mainnet] ```diff + EF - Deneb - Transition - non_empty_historical_roots [Preset: mainnet] OK @@ -2387,7 +2287,6 @@ OK: 6/6 Fail: 0/6 Skip: 0/6 + EF - Deneb - Transition - transition_with_random_half_participation [Preset: mainnet] OK + EF - Deneb - Transition - transition_with_random_three_quarters_participation [Preset: mai OK ``` -OK: 25/25 Fail: 0/25 Skip: 0/25 ## EF - Deneb - Unittests - Light client - Sync protocol [Preset: mainnet] ```diff + process_light_client_update_finality_updated OK @@ -2395,79 +2294,20 @@ OK: 25/25 Fail: 0/25 Skip: 0/25 + test_process_light_client_update_at_period_boundary OK + test_process_light_client_update_not_timeout OK ``` -OK: 4/4 Fail: 0/4 Skip: 0/4 -## EF - EIP7594 - SSZ consensus objects [Preset: mainnet] -```diff -+ Testing AggregateAndProof OK -+ Testing Attestation OK -+ Testing AttestationData OK -+ Testing AttesterSlashing OK -+ Testing BLSToExecutionChange OK -+ Testing BeaconBlock OK -+ Testing BeaconBlockBody OK -+ Testing BeaconBlockHeader OK -+ Testing BeaconState OK -+ Testing BlobIdentifier OK -+ Testing BlobSidecar OK -+ Testing Checkpoint OK -+ Testing ContributionAndProof OK -+ Testing DataColumnIdentifier OK -+ Testing DataColumnSidecar OK -+ Testing Deposit OK -+ Testing DepositData OK -+ Testing DepositMessage OK -+ Testing Eth1Block OK -+ Testing Eth1Data OK -+ Testing ExecutionPayload OK -+ Testing ExecutionPayloadHeader OK -+ Testing Fork OK -+ Testing ForkData OK -+ Testing HistoricalBatch OK -+ Testing HistoricalSummary OK -+ Testing IndexedAttestation OK -+ Testing LightClientBootstrap OK -+ Testing LightClientFinalityUpdate OK -+ Testing LightClientHeader OK -+ Testing LightClientOptimisticUpdate OK -+ Testing LightClientUpdate OK -+ Testing MatrixEntry OK -+ Testing PendingAttestation OK -+ Testing PowBlock OK -+ Testing ProposerSlashing OK -+ Testing SignedAggregateAndProof OK -+ Testing SignedBLSToExecutionChange OK -+ Testing SignedBeaconBlock OK -+ Testing SignedBeaconBlockHeader OK -+ Testing SignedContributionAndProof OK -+ Testing SignedVoluntaryExit OK -+ Testing SigningData OK -+ Testing SyncAggregate OK -+ Testing SyncAggregatorSelectionData OK -+ Testing SyncCommittee OK -+ Testing SyncCommitteeContribution OK -+ Testing SyncCommitteeMessage OK -+ Testing Validator OK -+ Testing VoluntaryExit OK -+ Testing Withdrawal OK -``` -OK: 51/51 Fail: 0/51 Skip: 0/51 ## EF - Electra - Epoch Processing - Effective balance updates [Preset: mainnet] ```diff + Effective balance updates - effective_balance_hysteresis [Preset: mainnet] OK + Effective balance updates - effective_balance_hysteresis_with_compounding_credentials [Pre OK ``` -OK: 2/2 Fail: 0/2 Skip: 0/2 ## EF - Electra - Epoch Processing - Eth1 data reset [Preset: mainnet] ```diff + Eth1 data reset - eth1_vote_no_reset [Preset: mainnet] OK + Eth1 data reset - eth1_vote_reset [Preset: mainnet] OK ``` -OK: 2/2 Fail: 0/2 Skip: 0/2 ## EF - Electra - Epoch Processing - Historical summaries update [Preset: mainnet] ```diff + Historical summaries update - historical_summaries_accumulator [Preset: mainnet] OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## EF - Electra - Epoch Processing - Inactivity [Preset: mainnet] ```diff + Inactivity - all_zero_inactivity_scores_empty_participation [Preset: mainnet] OK @@ -2492,7 +2332,6 @@ OK: 1/1 Fail: 0/1 Skip: 0/1 + Inactivity - some_slashed_zero_scores_full_participation [Preset: mainnet] OK + Inactivity - some_slashed_zero_scores_full_participation_leaking [Preset: mainnet] OK ``` -OK: 21/21 Fail: 0/21 Skip: 0/21 ## EF - Electra - Epoch Processing - Justification & Finalization [Preset: mainnet] ```diff + Justification & Finalization - 123_ok_support [Preset: mainnet] OK @@ -2506,7 +2345,6 @@ OK: 21/21 Fail: 0/21 Skip: 0/21 + Justification & Finalization - 23_poor_support [Preset: mainnet] OK + Justification & Finalization - balance_threshold_with_exited_validators [Preset: mainnet] OK ``` -OK: 10/10 Fail: 0/10 Skip: 0/10 ## EF - Electra - Epoch Processing - Participation flag updates [Preset: mainnet] ```diff + Participation flag updates - all_zeroed [Preset: mainnet] OK @@ -2520,22 +2358,27 @@ OK: 10/10 Fail: 0/10 Skip: 0/10 + Participation flag updates - random_2 [Preset: mainnet] OK + Participation flag updates - random_genesis [Preset: mainnet] OK ``` -OK: 10/10 Fail: 0/10 Skip: 0/10 ## EF - Electra - Epoch Processing - Pending consolidations [Preset: mainnet] ```diff + Pending consolidations - all_consolidation_cases_together [Preset: mainnet] OK + Pending consolidations - basic_pending_consolidation [Preset: mainnet] OK + Pending consolidations - consolidation_not_yet_withdrawable_validator [Preset: mainnet] OK ++ Pending consolidations - pending_consolidation_balance_computation_compounding [Preset: ma OK ++ Pending consolidations - pending_consolidation_balance_computation_eth1 [Preset: mainnet] OK + Pending consolidations - pending_consolidation_compounding_creds [Preset: mainnet] OK + Pending consolidations - pending_consolidation_future_epoch [Preset: mainnet] OK ++ Pending consolidations - pending_consolidation_source_balance_greater_than_max_effective [ OK ++ Pending consolidations - pending_consolidation_source_balance_greater_than_max_effective_c OK ++ Pending consolidations - pending_consolidation_source_balance_less_than_max_effective [Pre OK ++ Pending consolidations - pending_consolidation_source_balance_less_than_max_effective_comp OK + Pending consolidations - pending_consolidation_with_pending_deposit [Preset: mainnet] OK + Pending consolidations - skip_consolidation_when_source_slashed [Preset: mainnet] OK ``` -OK: 7/7 Fail: 0/7 Skip: 0/7 ## EF - Electra - Epoch Processing - Pending deposits [Preset: mainnet] ```diff + Pending deposits - apply_pending_deposit_compounding_withdrawal_credentials_max [Preset: m OK + Pending deposits - apply_pending_deposit_compounding_withdrawal_credentials_over_max [Pres OK ++ Pending deposits - apply_pending_deposit_compounding_withdrawal_credentials_over_max_next_ OK + Pending deposits - apply_pending_deposit_compounding_withdrawal_credentials_under_max [Pre OK + Pending deposits - apply_pending_deposit_correct_sig_but_forked_state [Preset: mainnet] OK + Pending deposits - apply_pending_deposit_effective_deposit_with_genesis_fork_version [Pres OK @@ -2550,6 +2393,7 @@ OK: 7/7 Fail: 0/7 Skip: 0/7 + Pending deposits - apply_pending_deposit_non_versioned_withdrawal_credentials [Preset: mai OK + Pending deposits - apply_pending_deposit_non_versioned_withdrawal_credentials_over_min_act OK + Pending deposits - apply_pending_deposit_over_min_activation [Preset: mainnet] OK ++ Pending deposits - apply_pending_deposit_over_min_activation_next_increment [Preset: mainn OK + Pending deposits - apply_pending_deposit_success_top_up_to_withdrawn_validator [Preset: ma OK + Pending deposits - apply_pending_deposit_top_up__less_effective_balance [Preset: mainnet] OK + Pending deposits - apply_pending_deposit_top_up__max_effective_balance_compounding [Preset OK @@ -2566,6 +2410,7 @@ OK: 7/7 Fail: 0/7 Skip: 0/7 + Pending deposits - process_pending_deposits_eth1_bridge_transition_pending [Preset: mainne OK + Pending deposits - process_pending_deposits_limit_is_reached [Preset: mainnet] OK + Pending deposits - process_pending_deposits_mixture_of_skipped_and_above_churn [Preset: ma OK ++ Pending deposits - process_pending_deposits_multiple_for_new_validator [Preset: mainnet] OK + Pending deposits - process_pending_deposits_multiple_pending_deposits_above_churn [Preset: OK + Pending deposits - process_pending_deposits_multiple_pending_deposits_below_churn [Preset: OK + Pending deposits - process_pending_deposits_multiple_pending_one_skipped [Preset: mainnet] OK @@ -2576,18 +2421,21 @@ OK: 7/7 Fail: 0/7 Skip: 0/7 + Pending deposits - process_pending_deposits_withdrawable_validator [Preset: mainnet] OK + Pending deposits - process_pending_deposits_withdrawable_validator_not_churned [Preset: ma OK ``` -OK: 41/41 Fail: 0/41 Skip: 0/41 ## EF - Electra - Epoch Processing - RANDAO mixes reset [Preset: mainnet] ```diff + RANDAO mixes reset - updated_randao_mixes [Preset: mainnet] OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## EF - Electra - Epoch Processing - Registry updates [Preset: mainnet] ```diff + Registry updates - activation_queue_activation_and_ejection__1 [Preset: mainnet] OK + Registry updates - activation_queue_activation_and_ejection__churn_limit [Preset: mainnet] OK + Registry updates - activation_queue_activation_and_ejection__exceed_churn_limit [Preset: m OK + Registry updates - activation_queue_efficiency_min [Preset: mainnet] OK ++ Registry updates - activation_queue_eligibility__greater_than_min_activation_balance [Pres OK ++ Registry updates - activation_queue_eligibility__less_than_min_activation_balance [Preset: OK ++ Registry updates - activation_queue_eligibility__min_activation_balance [Preset: mainnet] OK ++ Registry updates - activation_queue_eligibility__min_activation_balance_compounding_creds OK ++ Registry updates - activation_queue_eligibility__min_activation_balance_eth1_creds [Preset OK + Registry updates - activation_queue_no_activation_no_finality [Preset: mainnet] OK + Registry updates - activation_queue_sorting [Preset: mainnet] OK + Registry updates - activation_queue_to_activated_if_finalized [Preset: mainnet] OK @@ -2596,7 +2444,6 @@ OK: 1/1 Fail: 0/1 Skip: 0/1 + Registry updates - ejection_past_churn_limit_min [Preset: mainnet] OK + Registry updates - invalid_large_withdrawable_epoch [Preset: mainnet] OK ``` -OK: 11/11 Fail: 0/11 Skip: 0/11 ## EF - Electra - Epoch Processing - Rewards and penalties [Preset: mainnet] ```diff + Rewards and penalties - almost_empty_attestations [Preset: mainnet] OK @@ -2608,14 +2455,13 @@ OK: 11/11 Fail: 0/11 Skip: 0/11 + Rewards and penalties - full_attestation_participation [Preset: mainnet] OK + Rewards and penalties - full_attestation_participation_with_leak [Preset: mainnet] OK + Rewards and penalties - full_attestations_misc_balances [Preset: mainnet] OK -+ Rewards and penalties - full_attestations_one_validaor_one_gwei [Preset: mainnet] OK ++ Rewards and penalties - full_attestations_one_validator_one_gwei [Preset: mainnet] OK + Rewards and penalties - genesis_epoch_full_attestations_no_rewards [Preset: mainnet] OK + Rewards and penalties - genesis_epoch_no_attestations_no_penalties [Preset: mainnet] OK + Rewards and penalties - no_attestations_all_penalties [Preset: mainnet] OK + Rewards and penalties - random_fill_attestations [Preset: mainnet] OK + Rewards and penalties - random_fill_attestations_with_leak [Preset: mainnet] OK ``` -OK: 15/15 Fail: 0/15 Skip: 0/15 ## EF - Electra - Epoch Processing - Slashings [Preset: mainnet] ```diff + Slashings - low_penalty [Preset: mainnet] OK @@ -2624,12 +2470,10 @@ OK: 15/15 Fail: 0/15 Skip: 0/15 + Slashings - scaled_penalties [Preset: mainnet] OK + Slashings - slashings_with_random_state [Preset: mainnet] OK ``` -OK: 5/5 Fail: 0/5 Skip: 0/5 ## EF - Electra - Epoch Processing - Slashings reset [Preset: mainnet] ```diff + Slashings reset - flush_slashings [Preset: mainnet] OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## EF - Electra - Finality [Preset: mainnet] ```diff + [Valid] EF - Electra - Finality - finality_no_updates_at_genesis [Preset: mainnet] OK @@ -2638,7 +2482,6 @@ OK: 1/1 Fail: 0/1 Skip: 0/1 + [Valid] EF - Electra - Finality - finality_rule_3 [Preset: mainnet] OK + [Valid] EF - Electra - Finality - finality_rule_4 [Preset: mainnet] OK ``` -OK: 5/5 Fail: 0/5 Skip: 0/5 ## EF - Electra - Fork [Preset: mainnet] ```diff + EF - Electra - Fork - electra_fork_random_0 [Preset: mainnet] OK @@ -2648,15 +2491,19 @@ OK: 5/5 Fail: 0/5 Skip: 0/5 + EF - Electra - Fork - electra_fork_random_low_balances [Preset: mainnet] OK + EF - Electra - Fork - electra_fork_random_misc_balances [Preset: mainnet] OK + EF - Electra - Fork - fork_base_state [Preset: mainnet] OK ++ EF - Electra - Fork - fork_earliest_exit_epoch_is_max_validator_exit_epoch [Preset: mainne OK ++ EF - Electra - Fork - fork_earliest_exit_epoch_less_than_current_epoch [Preset: mainnet] OK ++ EF - Electra - Fork - fork_earliest_exit_epoch_no_validator_exits [Preset: mainnet] OK + EF - Electra - Fork - fork_has_compounding_withdrawal_credential [Preset: mainnet] OK ++ EF - Electra - Fork - fork_inactive_compounding_validator_with_excess_balance [Preset: mai OK + EF - Electra - Fork - fork_many_next_epoch [Preset: mainnet] OK + EF - Electra - Fork - fork_next_epoch [Preset: mainnet] OK + EF - Electra - Fork - fork_next_epoch_with_block [Preset: mainnet] OK ++ EF - Electra - Fork - fork_pending_deposits_are_sorted [Preset: mainnet] OK + EF - Electra - Fork - fork_pre_activation [Preset: mainnet] OK + EF - Electra - Fork - fork_random_low_balances [Preset: mainnet] OK + EF - Electra - Fork - fork_random_misc_balances [Preset: mainnet] OK ``` -OK: 14/14 Fail: 0/14 Skip: 0/14 ## EF - Electra - Operations - Attestation [Preset: mainnet] ```diff + [Invalid] EF - Electra - Operations - Attestation - invalid_after_max_inclusion_slot OK @@ -2664,7 +2511,7 @@ OK: 14/14 Fail: 0/14 Skip: 0/14 + [Invalid] EF - Electra - Operations - Attestation - invalid_attestation_signature OK + [Invalid] EF - Electra - Operations - Attestation - invalid_bad_source_root OK + [Invalid] EF - Electra - Operations - Attestation - invalid_before_inclusion_delay OK -+ [Invalid] EF - Electra - Operations - Attestation - invalid_committe_index OK ++ [Invalid] EF - Electra - Operations - Attestation - invalid_committee_index OK + [Invalid] EF - Electra - Operations - Attestation - invalid_correct_attestation_included_a OK + [Invalid] EF - Electra - Operations - Attestation - invalid_current_source_root OK + [Invalid] EF - Electra - Operations - Attestation - invalid_empty_participants_seemingly_v OK @@ -2676,14 +2523,14 @@ OK: 14/14 Fail: 0/14 Skip: 0/14 + [Invalid] EF - Electra - Operations - Attestation - invalid_index OK + [Invalid] EF - Electra - Operations - Attestation - invalid_mismatched_target_and_slot OK + [Invalid] EF - Electra - Operations - Attestation - invalid_new_source_epoch OK -+ [Invalid] EF - Electra - Operations - Attestation - invalid_nonset_committe_bits OK ++ [Invalid] EF - Electra - Operations - Attestation - invalid_nonset_committee_bits OK + [Invalid] EF - Electra - Operations - Attestation - invalid_old_source_epoch OK + [Invalid] EF - Electra - Operations - Attestation - invalid_old_target_epoch OK + [Invalid] EF - Electra - Operations - Attestation - invalid_previous_source_root OK + [Invalid] EF - Electra - Operations - Attestation - invalid_source_root_is_target_root OK + [Invalid] EF - Electra - Operations - Attestation - invalid_too_few_aggregation_bits OK + [Invalid] EF - Electra - Operations - Attestation - invalid_too_many_aggregation_bits OK -+ [Invalid] EF - Electra - Operations - Attestation - invalid_too_many_committe_bits OK ++ [Invalid] EF - Electra - Operations - Attestation - invalid_too_many_committee_bits OK + [Invalid] EF - Electra - Operations - Attestation - invalid_wrong_index_for_committee_sign OK + [Invalid] EF - Electra - Operations - Attestation - invalid_wrong_index_for_slot_0 OK + [Invalid] EF - Electra - Operations - Attestation - invalid_wrong_index_for_slot_1 OK @@ -2705,7 +2552,6 @@ OK: 14/14 Fail: 0/14 Skip: 0/14 + [Valid] EF - Electra - Operations - Attestation - one_basic_attestation OK + [Valid] EF - Electra - Operations - Attestation - previous_epoch OK ``` -OK: 45/45 Fail: 0/45 Skip: 0/45 ## EF - Electra - Operations - Attester Slashing [Preset: mainnet] ```diff + [Invalid] EF - Electra - Operations - Attester Slashing - invalid_all_empty_indices OK @@ -2739,7 +2585,6 @@ OK: 45/45 Fail: 0/45 Skip: 0/45 + [Valid] EF - Electra - Operations - Attester Slashing - proposer_index_slashed OK + [Valid] EF - Electra - Operations - Attester Slashing - with_effective_balance_disparity OK ``` -OK: 30/30 Fail: 0/30 Skip: 0/30 ## EF - Electra - Operations - BLS to execution change [Preset: mainnet] ```diff + [Invalid] EF - Electra - Operations - BLS to execution change - invalid_already_0x01 OK @@ -2757,7 +2602,6 @@ OK: 30/30 Fail: 0/30 Skip: 0/30 + [Valid] EF - Electra - Operations - BLS to execution change - success_not_activated OK + [Valid] EF - Electra - Operations - BLS to execution change - success_withdrawable OK ``` -OK: 14/14 Fail: 0/14 Skip: 0/14 ## EF - Electra - Operations - Block Header [Preset: mainnet] ```diff + [Invalid] EF - Electra - Operations - Block Header - invalid_multiple_blocks_single_slot OK @@ -2767,7 +2611,6 @@ OK: 14/14 Fail: 0/14 Skip: 0/14 + [Invalid] EF - Electra - Operations - Block Header - invalid_slot_block_header OK + [Valid] EF - Electra - Operations - Block Header - basic_block_header OK ``` -OK: 6/6 Fail: 0/6 Skip: 0/6 ## EF - Electra - Operations - Consolidation Request [Preset: mainnet] ```diff + [Valid] EF - Electra - Operations - Consolidation Request - basic_switch_to_compounding OK @@ -2781,7 +2624,6 @@ OK: 6/6 Fail: 0/6 Skip: 0/6 + [Valid] EF - Electra - Operations - Consolidation Request - switch_to_compounding_with_e OK + [Valid] EF - Electra - Operations - Consolidation Request - switch_to_compounding_with_p OK ``` -OK: 10/10 Fail: 0/10 Skip: 0/10 ## EF - Electra - Operations - Deposit [Preset: mainnet] ```diff + [Invalid] EF - Electra - Operations - Deposit - invalid_bad_merkle_proof OK @@ -2806,7 +2648,6 @@ OK: 10/10 Fail: 0/10 Skip: 0/10 + [Valid] EF - Electra - Operations - Deposit - top_up__max_effective_balance OK + [Valid] EF - Electra - Operations - Deposit - top_up__zero_balance OK ``` -OK: 21/21 Fail: 0/21 Skip: 0/21 ## EF - Electra - Operations - Deposit Request [Preset: mainnet] ```diff + [Valid] EF - Electra - Operations - Deposit Request - process_deposit_request_invalid_si OK @@ -2818,7 +2659,6 @@ OK: 21/21 Fail: 0/21 Skip: 0/21 + [Valid] EF - Electra - Operations - Deposit Request - process_deposit_request_top_up_max OK + [Valid] EF - Electra - Operations - Deposit Request - process_deposit_request_top_up_min OK ``` -OK: 8/8 Fail: 0/8 Skip: 0/8 ## EF - Electra - Operations - Execution Payload [Preset: mainnet] ```diff + [Invalid] EF - Electra - Operations - Execution Payload - invalid_bad_everything_first_pay OK @@ -2845,6 +2685,8 @@ OK: 8/8 Fail: 0/8 Skip: 0/8 + [Valid] EF - Electra - Operations - Execution Payload - incorrect_transaction_length_1_e OK + [Valid] EF - Electra - Operations - Execution Payload - incorrect_transaction_length_32_ OK + [Valid] EF - Electra - Operations - Execution Payload - incorrect_transaction_length_emp OK ++ [Valid] EF - Electra - Operations - Execution Payload - incorrect_transaction_no_blobs_b OK ++ [Valid] EF - Electra - Operations - Execution Payload - no_commitments_for_transactions OK + [Valid] EF - Electra - Operations - Execution Payload - no_transactions_with_commitments OK + [Valid] EF - Electra - Operations - Execution Payload - non_empty_extra_data_first_paylo OK + [Valid] EF - Electra - Operations - Execution Payload - non_empty_extra_data_regular_pay OK @@ -2860,7 +2702,6 @@ OK: 8/8 Fail: 0/8 Skip: 0/8 + [Valid] EF - Electra - Operations - Execution Payload - zero_length_transaction_regular_ OK + [Valid] EF - Electra - Operations - Execution Payload - zeroed_commitment OK ``` -OK: 38/38 Fail: 0/38 Skip: 0/38 ## EF - Electra - Operations - Proposer Slashing [Preset: mainnet] ```diff + [Invalid] EF - Electra - Operations - Proposer Slashing - invalid_different_proposer_indic OK @@ -2879,7 +2720,6 @@ OK: 38/38 Fail: 0/38 Skip: 0/38 + [Valid] EF - Electra - Operations - Proposer Slashing - block_header_from_future OK + [Valid] EF - Electra - Operations - Proposer Slashing - slashed_and_proposer_index_the_s OK ``` -OK: 15/15 Fail: 0/15 Skip: 0/15 ## EF - Electra - Operations - Sync Aggregate [Preset: mainnet] ```diff + [Invalid] EF - Electra - Operations - Sync Aggregate - invalid_signature_bad_domain OK @@ -2909,7 +2749,6 @@ OK: 15/15 Fail: 0/15 Skip: 0/15 + [Valid] EF - Electra - Operations - Sync Aggregate - sync_committee_with_participating_e OK + [Valid] EF - Electra - Operations - Sync Aggregate - sync_committee_with_participating_w OK ``` -OK: 26/26 Fail: 0/26 Skip: 0/26 ## EF - Electra - Operations - Voluntary Exit [Preset: mainnet] ```diff + [Invalid] EF - Electra - Operations - Voluntary Exit - invalid_incorrect_signature OK @@ -2937,7 +2776,6 @@ OK: 26/26 Fail: 0/26 Skip: 0/26 + [Valid] EF - Electra - Operations - Voluntary Exit - min_balance_exits_up_to_churn OK + [Valid] EF - Electra - Operations - Voluntary Exit - success_exit_queue__min_churn OK ``` -OK: 24/24 Fail: 0/24 Skip: 0/24 ## EF - Electra - Operations - Withdrawal Request [Preset: mainnet] ```diff + [Valid] EF - Electra - Operations - Withdrawal Request - activation_epoch_less_than_shar OK @@ -2958,8 +2796,8 @@ OK: 24/24 Fail: 0/24 Skip: 0/24 + [Valid] EF - Electra - Operations - Withdrawal Request - partial_withdrawal_incorrect_wi OK + [Valid] EF - Electra - Operations - Withdrawal Request - partial_withdrawal_on_exit_init OK + [Valid] EF - Electra - Operations - Withdrawal Request - pending_withdrawals_consume_all OK ++ [Valid] EF - Electra - Operations - Withdrawal Request - unknown_pubkey OK ``` -OK: 18/18 Fail: 0/18 Skip: 0/18 ## EF - Electra - Operations - Withdrawals [Preset: mainnet] ```diff + [Invalid] EF - Electra - Operations - Withdrawals - invalid_a_lot_fully_withdrawable_too_f OK @@ -2982,8 +2820,31 @@ OK: 18/18 Fail: 0/18 Skip: 0/18 + [Invalid] EF - Electra - Operations - Withdrawals - invalid_one_of_many_incorrectly_partia OK + [Invalid] EF - Electra - Operations - Withdrawals - invalid_two_expected_partial_withdrawa OK + [Valid] EF - Electra - Operations - Withdrawals - all_withdrawal OK ++ [Valid] EF - Electra - Operations - Withdrawals - full_pending_withdrawals_but_first_ski OK ++ [Valid] EF - Electra - Operations - Withdrawals - full_pending_withdrawals_but_first_ski OK ++ [Valid] EF - Electra - Operations - Withdrawals - full_pending_withdrawals_but_first_ski OK + [Valid] EF - Electra - Operations - Withdrawals - no_withdrawals_but_some_next_epoch OK ++ [Valid] EF - Electra - Operations - Withdrawals - partially_withdrawable_validator_compo OK ++ [Valid] EF - Electra - Operations - Withdrawals - partially_withdrawable_validator_compo OK ++ [Valid] EF - Electra - Operations - Withdrawals - partially_withdrawable_validator_compo OK ++ [Valid] EF - Electra - Operations - Withdrawals - partially_withdrawable_validator_compo OK ++ [Valid] EF - Electra - Operations - Withdrawals - partially_withdrawable_validator_compo OK ++ [Valid] EF - Electra - Operations - Withdrawals - partially_withdrawable_validator_compo OK ++ [Valid] EF - Electra - Operations - Withdrawals - partially_withdrawable_validator_legac OK ++ [Valid] EF - Electra - Operations - Withdrawals - partially_withdrawable_validator_legac OK ++ [Valid] EF - Electra - Operations - Withdrawals - partially_withdrawable_validator_legac OK ++ [Valid] EF - Electra - Operations - Withdrawals - pending_withdrawals_at_max OK ++ [Valid] EF - Electra - Operations - Withdrawals - pending_withdrawals_at_max_mixed_with_ OK ++ [Valid] EF - Electra - Operations - Withdrawals - pending_withdrawals_exiting_validator OK ++ [Valid] EF - Electra - Operations - Withdrawals - pending_withdrawals_low_effective_bala OK ++ [Valid] EF - Electra - Operations - Withdrawals - pending_withdrawals_mixed_with_sweep_a OK ++ [Valid] EF - Electra - Operations - Withdrawals - pending_withdrawals_next_epoch OK ++ [Valid] EF - Electra - Operations - Withdrawals - pending_withdrawals_no_excess_balance OK + [Valid] EF - Electra - Operations - Withdrawals - pending_withdrawals_one_skipped_one_ef OK ++ [Valid] EF - Electra - Operations - Withdrawals - pending_withdrawals_with_effective_swe OK ++ [Valid] EF - Electra - Operations - Withdrawals - pending_withdrawals_with_ineffective_s OK ++ [Valid] EF - Electra - Operations - Withdrawals - pending_withdrawals_with_ineffective_s OK ++ [Valid] EF - Electra - Operations - Withdrawals - pending_withdrawals_with_sweep_differe OK + [Valid] EF - Electra - Operations - Withdrawals - random_0 OK + [Valid] EF - Electra - Operations - Withdrawals - random_full_withdrawals_0 OK + [Valid] EF - Electra - Operations - Withdrawals - random_full_withdrawals_1 OK @@ -3018,7 +2879,6 @@ OK: 18/18 Fail: 0/18 Skip: 0/18 + [Valid] EF - Electra - Operations - Withdrawals - withdrawable_epoch_but_0_effective_bal OK + [Valid] EF - Electra - Operations - Withdrawals - withdrawable_epoch_but_0_effective_bal OK ``` -OK: 55/55 Fail: 0/55 Skip: 0/55 ## EF - Electra - Random [Preset: mainnet] ```diff + [Valid] EF - Electra - Random - randomized_0 [Preset: mainnet] OK @@ -3038,7 +2898,6 @@ OK: 55/55 Fail: 0/55 Skip: 0/55 + [Valid] EF - Electra - Random - randomized_8 [Preset: mainnet] OK + [Valid] EF - Electra - Random - randomized_9 [Preset: mainnet] OK ``` -OK: 16/16 Fail: 0/16 Skip: 0/16 ## EF - Electra - Rewards [Preset: mainnet] ```diff + EF - Electra - Rewards - all_balances_too_low_for_reward [Preset: mainnet] OK @@ -3076,7 +2935,6 @@ OK: 16/16 Fail: 0/16 Skip: 0/16 + EF - Electra - Rewards - with_slashed_validators [Preset: mainnet] OK + EF - Electra - Rewards - with_slashed_validators_leak [Preset: mainnet] OK ``` -OK: 34/34 Fail: 0/34 Skip: 0/34 ## EF - Electra - SSZ consensus objects [Preset: mainnet] ```diff + Testing AggregateAndProof OK @@ -3125,6 +2983,7 @@ OK: 34/34 Fail: 0/34 Skip: 0/34 + Testing SignedContributionAndProof OK + Testing SignedVoluntaryExit OK + Testing SigningData OK ++ Testing SingleAttestation OK + Testing SyncAggregate OK + Testing SyncAggregatorSelectionData OK + Testing SyncCommittee OK @@ -3135,7 +2994,6 @@ OK: 34/34 Fail: 0/34 Skip: 0/34 + Testing Withdrawal OK + Testing WithdrawalRequest OK ``` -OK: 55/55 Fail: 0/55 Skip: 0/55 ## EF - Electra - Sanity - Blocks [Preset: mainnet] ```diff + [Invalid] EF - Electra - Sanity - Blocks - deposit_transition__invalid_eth1_deposits_overl OK @@ -3173,8 +3031,10 @@ OK: 55/55 Fail: 0/55 Skip: 0/55 + [Valid] EF - Electra - Sanity - Blocks - cl_exit_and_el_withdrawal_request_in_same_block OK + [Valid] EF - Electra - Sanity - Blocks - deposit_and_bls_change [Preset: mainnet] OK + [Valid] EF - Electra - Sanity - Blocks - deposit_in_block [Preset: mainnet] OK ++ [Valid] EF - Electra - Sanity - Blocks - deposit_request_with_same_pubkey_different_with OK + [Valid] EF - Electra - Sanity - Blocks - deposit_top_up [Preset: mainnet] OK + [Valid] EF - Electra - Sanity - Blocks - deposit_transition__deposit_and_top_up_same_blo OK ++ [Valid] EF - Electra - Sanity - Blocks - deposit_transition__deposit_with_same_pubkey_di OK + [Valid] EF - Electra - Sanity - Blocks - deposit_transition__process_eth1_deposits [Pres OK + [Valid] EF - Electra - Sanity - Blocks - deposit_transition__process_eth1_deposits_up_to OK + [Valid] EF - Electra - Sanity - Blocks - deposit_transition__process_max_eth1_deposits [ OK @@ -3198,6 +3058,8 @@ OK: 55/55 Fail: 0/55 Skip: 0/55 + [Valid] EF - Electra - Sanity - Blocks - mix_blob_tx_and_non_blob_tx [Preset: mainnet] OK + [Valid] EF - Electra - Sanity - Blocks - multiple_different_proposer_slashings_same_bloc OK + [Valid] EF - Electra - Sanity - Blocks - multiple_different_validator_exits_same_block [ OK ++ [Valid] EF - Electra - Sanity - Blocks - multiple_el_partial_withdrawal_requests_differe OK ++ [Valid] EF - Electra - Sanity - Blocks - multiple_el_partial_withdrawal_requests_same_va OK + [Valid] EF - Electra - Sanity - Blocks - one_blob [Preset: mainnet] OK + [Valid] EF - Electra - Sanity - Blocks - one_blob_max_txs [Preset: mainnet] OK + [Valid] EF - Electra - Sanity - Blocks - one_blob_two_txs [Preset: mainnet] OK @@ -3207,6 +3069,7 @@ OK: 55/55 Fail: 0/55 Skip: 0/55 + [Valid] EF - Electra - Sanity - Blocks - proposer_slashing [Preset: mainnet] OK + [Valid] EF - Electra - Sanity - Blocks - skipped_slots [Preset: mainnet] OK + [Valid] EF - Electra - Sanity - Blocks - slash_and_exit_diff_index [Preset: mainnet] OK ++ [Valid] EF - Electra - Sanity - Blocks - switch_to_compounding_requests_when_too_little_ OK + [Valid] EF - Electra - Sanity - Blocks - sync_committee_committee__empty [Preset: mainne OK + [Valid] EF - Electra - Sanity - Blocks - sync_committee_committee__full [Preset: mainnet OK + [Valid] EF - Electra - Sanity - Blocks - sync_committee_committee__half [Preset: mainnet OK @@ -3216,20 +3079,27 @@ OK: 55/55 Fail: 0/55 Skip: 0/55 + [Valid] EF - Electra - Sanity - Blocks - top_up_and_partial_withdrawable_validator [Pres OK + [Valid] EF - Electra - Sanity - Blocks - top_up_to_fully_withdrawn_validator [Preset: ma OK + [Valid] EF - Electra - Sanity - Blocks - voluntary_exit [Preset: mainnet] OK ++ [Valid] EF - Electra - Sanity - Blocks - withdrawal_and_switch_to_compounding_request_sa OK ++ [Valid] EF - Electra - Sanity - Blocks - withdrawal_and_withdrawal_request_same_validato OK + [Valid] EF - Electra - Sanity - Blocks - withdrawal_success_two_blocks [Preset: mainnet] OK + [Valid] EF - Electra - Sanity - Blocks - zero_blob [Preset: mainnet] OK ``` -OK: 80/80 Fail: 0/80 Skip: 0/80 ## EF - Electra - Sanity - Slots [Preset: mainnet] ```diff ++ EF - Electra - Slots - balance_change_affects_proposer [Preset: mainnet] OK + EF - Electra - Slots - double_empty_epoch [Preset: mainnet] OK + EF - Electra - Slots - empty_epoch [Preset: mainnet] OK + EF - Electra - Slots - historical_accumulator [Preset: mainnet] OK ++ EF - Electra - Slots - multiple_pending_deposits_same_pubkey [Preset: mainnet] OK ++ EF - Electra - Slots - multiple_pending_deposits_same_pubkey_above_upward_threshold [Prese OK ++ EF - Electra - Slots - multiple_pending_deposits_same_pubkey_below_upward_threshold [Prese OK ++ EF - Electra - Slots - multiple_pending_deposits_same_pubkey_compounding [Preset: mainnet] OK ++ EF - Electra - Slots - multiple_pending_deposits_same_pubkey_different_signature [Preset: OK + EF - Electra - Slots - over_epoch_boundary [Preset: mainnet] OK ++ EF - Electra - Slots - pending_consolidation [Preset: mainnet] OK + EF - Electra - Slots - slots_1 [Preset: mainnet] OK + EF - Electra - Slots - slots_2 [Preset: mainnet] OK ``` -OK: 6/6 Fail: 0/6 Skip: 0/6 ## EF - Electra - Transition [Preset: mainnet] ```diff + EF - Electra - Transition - non_empty_historical_roots [Preset: mainnet] OK @@ -3245,6 +3115,8 @@ OK: 6/6 Fail: 0/6 Skip: 0/6 + EF - Electra - Transition - transition_with_attester_slashing_right_before_fork [Preset: m OK + EF - Electra - Transition - transition_with_btec_right_after_fork [Preset: mainnet] OK + EF - Electra - Transition - transition_with_btec_right_before_fork [Preset: mainnet] OK ++ EF - Electra - Transition - transition_with_consolidation_request_right_after_fork [Preset OK ++ EF - Electra - Transition - transition_with_deposit_request_right_after_fork [Preset: main OK + EF - Electra - Transition - transition_with_deposit_right_after_fork [Preset: mainnet] OK + EF - Electra - Transition - transition_with_deposit_right_before_fork [Preset: mainnet] OK + EF - Electra - Transition - transition_with_finality [Preset: mainnet] OK @@ -3258,7 +3130,6 @@ OK: 6/6 Fail: 0/6 Skip: 0/6 + EF - Electra - Transition - transition_with_random_half_participation [Preset: mainnet] OK + EF - Electra - Transition - transition_with_random_three_quarters_participation [Preset: m OK ``` -OK: 25/25 Fail: 0/25 Skip: 0/25 ## EF - Electra - Unittests - Light client - Sync protocol [Preset: mainnet] ```diff + process_light_client_update_finality_updated OK @@ -3266,7 +3137,725 @@ OK: 25/25 Fail: 0/25 Skip: 0/25 + test_process_light_client_update_at_period_boundary OK + test_process_light_client_update_not_timeout OK ``` -OK: 4/4 Fail: 0/4 Skip: 0/4 +## EF - Fulu - Epoch Processing - Effective balance updates [Preset: mainnet] +```diff ++ Effective balance updates - effective_balance_hysteresis [Preset: mainnet] OK ++ Effective balance updates - effective_balance_hysteresis_with_compounding_credentials [Pre OK +``` +## EF - Fulu - Epoch Processing - Eth1 data reset [Preset: mainnet] +```diff ++ Eth1 data reset - eth1_vote_no_reset [Preset: mainnet] OK ++ Eth1 data reset - eth1_vote_reset [Preset: mainnet] OK +``` +## EF - Fulu - Epoch Processing - Historical summaries update [Preset: mainnet] +```diff ++ Historical summaries update - historical_summaries_accumulator [Preset: mainnet] OK +``` +## EF - Fulu - Epoch Processing - Inactivity [Preset: mainnet] +```diff ++ Inactivity - all_zero_inactivity_scores_empty_participation [Preset: mainnet] OK ++ Inactivity - all_zero_inactivity_scores_empty_participation_leaking [Preset: mainnet] OK ++ Inactivity - all_zero_inactivity_scores_full_participation [Preset: mainnet] OK ++ Inactivity - all_zero_inactivity_scores_full_participation_leaking [Preset: mainnet] OK ++ Inactivity - all_zero_inactivity_scores_random_participation [Preset: mainnet] OK ++ Inactivity - all_zero_inactivity_scores_random_participation_leaking [Preset: mainnet] OK ++ Inactivity - genesis [Preset: mainnet] OK ++ Inactivity - genesis_random_scores [Preset: mainnet] OK ++ Inactivity - random_inactivity_scores_empty_participation [Preset: mainnet] OK ++ Inactivity - random_inactivity_scores_empty_participation_leaking [Preset: mainnet] OK ++ Inactivity - random_inactivity_scores_full_participation [Preset: mainnet] OK ++ Inactivity - random_inactivity_scores_full_participation_leaking [Preset: mainnet] OK ++ Inactivity - random_inactivity_scores_random_participation [Preset: mainnet] OK ++ Inactivity - random_inactivity_scores_random_participation_leaking [Preset: mainnet] OK ++ Inactivity - randomized_state [Preset: mainnet] OK ++ Inactivity - randomized_state_leaking [Preset: mainnet] OK ++ Inactivity - some_exited_full_random_leaking [Preset: mainnet] OK ++ Inactivity - some_slashed_full_random [Preset: mainnet] OK ++ Inactivity - some_slashed_full_random_leaking [Preset: mainnet] OK ++ Inactivity - some_slashed_zero_scores_full_participation [Preset: mainnet] OK ++ Inactivity - some_slashed_zero_scores_full_participation_leaking [Preset: mainnet] OK +``` +## EF - Fulu - Epoch Processing - Justification & Finalization [Preset: mainnet] +```diff ++ Justification & Finalization - 123_ok_support [Preset: mainnet] OK ++ Justification & Finalization - 123_poor_support [Preset: mainnet] OK ++ Justification & Finalization - 12_ok_support [Preset: mainnet] OK ++ Justification & Finalization - 12_ok_support_messed_target [Preset: mainnet] OK ++ Justification & Finalization - 12_poor_support [Preset: mainnet] OK ++ Justification & Finalization - 234_ok_support [Preset: mainnet] OK ++ Justification & Finalization - 234_poor_support [Preset: mainnet] OK ++ Justification & Finalization - 23_ok_support [Preset: mainnet] OK ++ Justification & Finalization - 23_poor_support [Preset: mainnet] OK ++ Justification & Finalization - balance_threshold_with_exited_validators [Preset: mainnet] OK +``` +## EF - Fulu - Epoch Processing - Participation flag updates [Preset: mainnet] +```diff ++ Participation flag updates - all_zeroed [Preset: mainnet] OK ++ Participation flag updates - current_epoch_zeroed [Preset: mainnet] OK ++ Participation flag updates - current_filled [Preset: mainnet] OK ++ Participation flag updates - filled [Preset: mainnet] OK ++ Participation flag updates - previous_epoch_zeroed [Preset: mainnet] OK ++ Participation flag updates - previous_filled [Preset: mainnet] OK ++ Participation flag updates - random_0 [Preset: mainnet] OK ++ Participation flag updates - random_1 [Preset: mainnet] OK ++ Participation flag updates - random_2 [Preset: mainnet] OK ++ Participation flag updates - random_genesis [Preset: mainnet] OK +``` +## EF - Fulu - Epoch Processing - Pending consolidations [Preset: mainnet] +```diff ++ Pending consolidations - all_consolidation_cases_together [Preset: mainnet] OK ++ Pending consolidations - basic_pending_consolidation [Preset: mainnet] OK ++ Pending consolidations - consolidation_not_yet_withdrawable_validator [Preset: mainnet] OK ++ Pending consolidations - pending_consolidation_balance_computation_compounding [Preset: ma OK ++ Pending consolidations - pending_consolidation_balance_computation_eth1 [Preset: mainnet] OK ++ Pending consolidations - pending_consolidation_compounding_creds [Preset: mainnet] OK ++ Pending consolidations - pending_consolidation_future_epoch [Preset: mainnet] OK ++ Pending consolidations - pending_consolidation_source_balance_greater_than_max_effective [ OK ++ Pending consolidations - pending_consolidation_source_balance_greater_than_max_effective_c OK ++ Pending consolidations - pending_consolidation_source_balance_less_than_max_effective [Pre OK ++ Pending consolidations - pending_consolidation_source_balance_less_than_max_effective_comp OK ++ Pending consolidations - pending_consolidation_with_pending_deposit [Preset: mainnet] OK ++ Pending consolidations - skip_consolidation_when_source_slashed [Preset: mainnet] OK +``` +## EF - Fulu - Epoch Processing - Pending deposits [Preset: mainnet] +```diff ++ Pending deposits - apply_pending_deposit_compounding_withdrawal_credentials_max [Preset: m OK ++ Pending deposits - apply_pending_deposit_compounding_withdrawal_credentials_over_max [Pres OK ++ Pending deposits - apply_pending_deposit_compounding_withdrawal_credentials_over_max_next_ OK ++ Pending deposits - apply_pending_deposit_compounding_withdrawal_credentials_under_max [Pre OK ++ Pending deposits - apply_pending_deposit_correct_sig_but_forked_state [Preset: mainnet] OK ++ Pending deposits - apply_pending_deposit_effective_deposit_with_genesis_fork_version [Pres OK ++ Pending deposits - apply_pending_deposit_eth1_withdrawal_credentials [Preset: mainnet] OK ++ Pending deposits - apply_pending_deposit_incorrect_sig_new_deposit [Preset: mainnet] OK ++ Pending deposits - apply_pending_deposit_incorrect_sig_top_up [Preset: mainnet] OK ++ Pending deposits - apply_pending_deposit_incorrect_withdrawal_credentials_top_up [Preset: OK ++ Pending deposits - apply_pending_deposit_ineffective_deposit_with_bad_fork_version [Preset OK ++ Pending deposits - apply_pending_deposit_key_validate_invalid_decompression [Preset: mainn OK ++ Pending deposits - apply_pending_deposit_key_validate_invalid_subgroup [Preset: mainnet] OK ++ Pending deposits - apply_pending_deposit_min_activation [Preset: mainnet] OK ++ Pending deposits - apply_pending_deposit_non_versioned_withdrawal_credentials [Preset: mai OK ++ Pending deposits - apply_pending_deposit_non_versioned_withdrawal_credentials_over_min_act OK ++ Pending deposits - apply_pending_deposit_over_min_activation [Preset: mainnet] OK ++ Pending deposits - apply_pending_deposit_over_min_activation_next_increment [Preset: mainn OK ++ Pending deposits - apply_pending_deposit_success_top_up_to_withdrawn_validator [Preset: ma OK ++ Pending deposits - apply_pending_deposit_top_up__less_effective_balance [Preset: mainnet] OK ++ Pending deposits - apply_pending_deposit_top_up__max_effective_balance_compounding [Preset OK ++ Pending deposits - apply_pending_deposit_top_up__min_activation_balance [Preset: mainnet] OK ++ Pending deposits - apply_pending_deposit_top_up__min_activation_balance_compounding [Prese OK ++ Pending deposits - apply_pending_deposit_top_up__zero_balance [Preset: mainnet] OK ++ Pending deposits - apply_pending_deposit_under_min_activation [Preset: mainnet] OK ++ Pending deposits - apply_pending_deposit_with_previous_fork_version [Preset: mainnet] OK ++ Pending deposits - ineffective_deposit_with_current_fork_version [Preset: mainnet] OK ++ Pending deposits - process_pending_deposits_balance_above_churn [Preset: mainnet] OK ++ Pending deposits - process_pending_deposits_balance_equal_churn [Preset: mainnet] OK ++ Pending deposits - process_pending_deposits_eth1_bridge_transition_complete [Preset: mainn OK ++ Pending deposits - process_pending_deposits_eth1_bridge_transition_not_applied [Preset: ma OK ++ Pending deposits - process_pending_deposits_eth1_bridge_transition_pending [Preset: mainne OK ++ Pending deposits - process_pending_deposits_limit_is_reached [Preset: mainnet] OK ++ Pending deposits - process_pending_deposits_mixture_of_skipped_and_above_churn [Preset: ma OK ++ Pending deposits - process_pending_deposits_multiple_for_new_validator [Preset: mainnet] OK ++ Pending deposits - process_pending_deposits_multiple_pending_deposits_above_churn [Preset: OK ++ Pending deposits - process_pending_deposits_multiple_pending_deposits_below_churn [Preset: OK ++ Pending deposits - process_pending_deposits_multiple_pending_one_skipped [Preset: mainnet] OK ++ Pending deposits - process_pending_deposits_multiple_skipped_deposits_exiting_validators [ OK ++ Pending deposits - process_pending_deposits_not_finalized [Preset: mainnet] OK ++ Pending deposits - process_pending_deposits_preexisting_churn [Preset: mainnet] OK ++ Pending deposits - process_pending_deposits_skipped_deposit_exiting_validator [Preset: mai OK ++ Pending deposits - process_pending_deposits_withdrawable_validator [Preset: mainnet] OK ++ Pending deposits - process_pending_deposits_withdrawable_validator_not_churned [Preset: ma OK +``` +## EF - Fulu - Epoch Processing - RANDAO mixes reset [Preset: mainnet] +```diff ++ RANDAO mixes reset - updated_randao_mixes [Preset: mainnet] OK +``` +## EF - Fulu - Epoch Processing - Registry updates [Preset: mainnet] +```diff ++ Registry updates - activation_queue_activation_and_ejection__1 [Preset: mainnet] OK ++ Registry updates - activation_queue_activation_and_ejection__churn_limit [Preset: mainnet] OK ++ Registry updates - activation_queue_activation_and_ejection__exceed_churn_limit [Preset: m OK ++ Registry updates - activation_queue_efficiency_min [Preset: mainnet] OK ++ Registry updates - activation_queue_eligibility__greater_than_min_activation_balance [Pres OK ++ Registry updates - activation_queue_eligibility__less_than_min_activation_balance [Preset: OK ++ Registry updates - activation_queue_eligibility__min_activation_balance [Preset: mainnet] OK ++ Registry updates - activation_queue_eligibility__min_activation_balance_compounding_creds OK ++ Registry updates - activation_queue_eligibility__min_activation_balance_eth1_creds [Preset OK ++ Registry updates - activation_queue_no_activation_no_finality [Preset: mainnet] OK ++ Registry updates - activation_queue_sorting [Preset: mainnet] OK ++ Registry updates - activation_queue_to_activated_if_finalized [Preset: mainnet] OK ++ Registry updates - add_to_activation_queue [Preset: mainnet] OK ++ Registry updates - ejection [Preset: mainnet] OK ++ Registry updates - ejection_past_churn_limit_min [Preset: mainnet] OK ++ Registry updates - invalid_large_withdrawable_epoch [Preset: mainnet] OK +``` +## EF - Fulu - Epoch Processing - Rewards and penalties [Preset: mainnet] +```diff ++ Rewards and penalties - almost_empty_attestations [Preset: mainnet] OK ++ Rewards and penalties - almost_empty_attestations_with_leak [Preset: mainnet] OK ++ Rewards and penalties - almost_full_attestations [Preset: mainnet] OK ++ Rewards and penalties - almost_full_attestations_with_leak [Preset: mainnet] OK ++ Rewards and penalties - attestations_some_slashed [Preset: mainnet] OK ++ Rewards and penalties - duplicate_attestation [Preset: mainnet] OK ++ Rewards and penalties - full_attestation_participation [Preset: mainnet] OK ++ Rewards and penalties - full_attestation_participation_with_leak [Preset: mainnet] OK ++ Rewards and penalties - full_attestations_misc_balances [Preset: mainnet] OK ++ Rewards and penalties - full_attestations_one_validator_one_gwei [Preset: mainnet] OK ++ Rewards and penalties - genesis_epoch_full_attestations_no_rewards [Preset: mainnet] OK ++ Rewards and penalties - genesis_epoch_no_attestations_no_penalties [Preset: mainnet] OK ++ Rewards and penalties - no_attestations_all_penalties [Preset: mainnet] OK ++ Rewards and penalties - random_fill_attestations [Preset: mainnet] OK ++ Rewards and penalties - random_fill_attestations_with_leak [Preset: mainnet] OK +``` +## EF - Fulu - Epoch Processing - Slashings [Preset: mainnet] +```diff ++ Slashings - low_penalty [Preset: mainnet] OK ++ Slashings - max_penalties [Preset: mainnet] OK ++ Slashings - minimal_penalty [Preset: mainnet] OK ++ Slashings - scaled_penalties [Preset: mainnet] OK ++ Slashings - slashings_with_random_state [Preset: mainnet] OK +``` +## EF - Fulu - Epoch Processing - Slashings reset [Preset: mainnet] +```diff ++ Slashings reset - flush_slashings [Preset: mainnet] OK +``` +## EF - Fulu - Finality [Preset: mainnet] +```diff ++ [Valid] EF - Fulu - Finality - finality_no_updates_at_genesis [Preset: mainnet] OK ++ [Valid] EF - Fulu - Finality - finality_rule_1 [Preset: mainnet] OK ++ [Valid] EF - Fulu - Finality - finality_rule_2 [Preset: mainnet] OK ++ [Valid] EF - Fulu - Finality - finality_rule_3 [Preset: mainnet] OK ++ [Valid] EF - Fulu - Finality - finality_rule_4 [Preset: mainnet] OK +``` +## EF - Fulu - Fork [Preset: mainnet] +```diff ++ EF - Fulu - Fork - fork_base_state [Preset: mainnet] OK ++ EF - Fulu - Fork - fork_many_next_epoch [Preset: mainnet] OK ++ EF - Fulu - Fork - fork_next_epoch [Preset: mainnet] OK ++ EF - Fulu - Fork - fork_next_epoch_with_block [Preset: mainnet] OK ++ EF - Fulu - Fork - fork_random_low_balances [Preset: mainnet] OK ++ EF - Fulu - Fork - fork_random_misc_balances [Preset: mainnet] OK ++ EF - Fulu - Fork - fulu_fork_random_0 [Preset: mainnet] OK ++ EF - Fulu - Fork - fulu_fork_random_1 [Preset: mainnet] OK ++ EF - Fulu - Fork - fulu_fork_random_2 [Preset: mainnet] OK ++ EF - Fulu - Fork - fulu_fork_random_3 [Preset: mainnet] OK ++ EF - Fulu - Fork - fulu_fork_random_low_balances [Preset: mainnet] OK ++ EF - Fulu - Fork - fulu_fork_random_misc_balances [Preset: mainnet] OK +``` +## EF - Fulu - Operations - Attestation [Preset: mainnet] +```diff ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_after_max_inclusion_slot OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_attestation_data_index_not_zero OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_attestation_signature OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_bad_source_root OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_before_inclusion_delay OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_committee_index OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_correct_attestation_included_afte OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_current_source_root OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_empty_participants_seemingly_vali OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_empty_participants_zeroes_sig OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_future_target_epoch OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_incorrect_head_and_target_include OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_incorrect_head_included_after_max OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_incorrect_target_included_after_m OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_index OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_mismatched_target_and_slot OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_new_source_epoch OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_nonset_committee_bits OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_old_source_epoch OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_old_target_epoch OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_previous_source_root OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_source_root_is_target_root OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_too_few_aggregation_bits OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_too_many_aggregation_bits OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_too_many_committee_bits OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_wrong_index_for_committee_signatu OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_wrong_index_for_slot_0 OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_wrong_index_for_slot_1 OK ++ [Valid] EF - Fulu - Operations - Attestation - at_max_inclusion_slot OK ++ [Valid] EF - Fulu - Operations - Attestation - correct_attestation_included_at_max_inclu OK ++ [Valid] EF - Fulu - Operations - Attestation - correct_attestation_included_at_min_inclu OK ++ [Valid] EF - Fulu - Operations - Attestation - correct_attestation_included_at_one_epoch OK ++ [Valid] EF - Fulu - Operations - Attestation - correct_attestation_included_at_sqrt_epoc OK ++ [Valid] EF - Fulu - Operations - Attestation - incorrect_head_and_target_included_at_epo OK ++ [Valid] EF - Fulu - Operations - Attestation - incorrect_head_and_target_included_at_sqr OK ++ [Valid] EF - Fulu - Operations - Attestation - incorrect_head_and_target_min_inclusion_d OK ++ [Valid] EF - Fulu - Operations - Attestation - incorrect_head_included_at_max_inclusion_ OK ++ [Valid] EF - Fulu - Operations - Attestation - incorrect_head_included_at_min_inclusion_ OK ++ [Valid] EF - Fulu - Operations - Attestation - incorrect_head_included_at_sqrt_epoch_del OK ++ [Valid] EF - Fulu - Operations - Attestation - incorrect_target_included_at_epoch_delay OK ++ [Valid] EF - Fulu - Operations - Attestation - incorrect_target_included_at_min_inclusio OK ++ [Valid] EF - Fulu - Operations - Attestation - incorrect_target_included_at_sqrt_epoch_d OK ++ [Valid] EF - Fulu - Operations - Attestation - multi_proposer_index_iterations OK ++ [Valid] EF - Fulu - Operations - Attestation - one_basic_attestation OK ++ [Valid] EF - Fulu - Operations - Attestation - previous_epoch OK +``` +## EF - Fulu - Operations - Attester Slashing [Preset: mainnet] +```diff ++ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_all_empty_indices OK ++ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_att1_bad_extra_index OK ++ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_att1_bad_replaced_index OK ++ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_att1_duplicate_index_double OK ++ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_att1_duplicate_index_normal OK ++ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_att1_empty_indices OK ++ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_att1_high_index OK ++ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_att2_bad_extra_index OK ++ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_att2_bad_replaced_index OK ++ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_att2_duplicate_index_double OK ++ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_att2_duplicate_index_normal OK ++ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_att2_empty_indices OK ++ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_att2_high_index OK ++ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_incorrect_sig_1 OK ++ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_incorrect_sig_1_and_2 OK ++ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_incorrect_sig_2 OK ++ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_no_double_or_surround OK ++ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_participants_already_slashe OK ++ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_same_data OK ++ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_unsorted_att_1 OK ++ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_unsorted_att_2 OK ++ [Valid] EF - Fulu - Operations - Attester Slashing - already_exited_long_ago OK ++ [Valid] EF - Fulu - Operations - Attester Slashing - already_exited_recent OK ++ [Valid] EF - Fulu - Operations - Attester Slashing - attestation_from_future OK ++ [Valid] EF - Fulu - Operations - Attester Slashing - basic_double OK ++ [Valid] EF - Fulu - Operations - Attester Slashing - basic_surround OK ++ [Valid] EF - Fulu - Operations - Attester Slashing - low_balances OK ++ [Valid] EF - Fulu - Operations - Attester Slashing - misc_balances OK ++ [Valid] EF - Fulu - Operations - Attester Slashing - proposer_index_slashed OK ++ [Valid] EF - Fulu - Operations - Attester Slashing - with_effective_balance_disparity OK +``` +## EF - Fulu - Operations - BLS to execution change [Preset: mainnet] +```diff ++ [Invalid] EF - Fulu - Operations - BLS to execution change - invalid_already_0x01 OK ++ [Invalid] EF - Fulu - Operations - BLS to execution change - invalid_bad_signature OK ++ [Invalid] EF - Fulu - Operations - BLS to execution change - invalid_current_fork_version OK ++ [Invalid] EF - Fulu - Operations - BLS to execution change - invalid_genesis_validators_ro OK ++ [Invalid] EF - Fulu - Operations - BLS to execution change - invalid_incorrect_from_bls_pu OK ++ [Invalid] EF - Fulu - Operations - BLS to execution change - invalid_previous_fork_version OK ++ [Invalid] EF - Fulu - Operations - BLS to execution change - invalid_val_index_out_of_rang OK ++ [Valid] EF - Fulu - Operations - BLS to execution change - genesis_fork_version OK ++ [Valid] EF - Fulu - Operations - BLS to execution change - success OK ++ [Valid] EF - Fulu - Operations - BLS to execution change - success_exited OK ++ [Valid] EF - Fulu - Operations - BLS to execution change - success_in_activation_queue OK ++ [Valid] EF - Fulu - Operations - BLS to execution change - success_in_exit_queue OK ++ [Valid] EF - Fulu - Operations - BLS to execution change - success_not_activated OK ++ [Valid] EF - Fulu - Operations - BLS to execution change - success_withdrawable OK +``` +## EF - Fulu - Operations - Block Header [Preset: mainnet] +```diff ++ [Invalid] EF - Fulu - Operations - Block Header - invalid_multiple_blocks_single_slot OK ++ [Invalid] EF - Fulu - Operations - Block Header - invalid_parent_root OK ++ [Invalid] EF - Fulu - Operations - Block Header - invalid_proposer_index OK ++ [Invalid] EF - Fulu - Operations - Block Header - invalid_proposer_slashed OK ++ [Invalid] EF - Fulu - Operations - Block Header - invalid_slot_block_header OK ++ [Valid] EF - Fulu - Operations - Block Header - basic_block_header OK +``` +## EF - Fulu - Operations - Consolidation Request [Preset: mainnet] +```diff ++ [Valid] EF - Fulu - Operations - Consolidation Request - basic_switch_to_compounding OK ++ [Valid] EF - Fulu - Operations - Consolidation Request - incorrect_not_enough_consolidat OK ++ [Valid] EF - Fulu - Operations - Consolidation Request - switch_to_compounding_exited_so OK ++ [Valid] EF - Fulu - Operations - Consolidation Request - switch_to_compounding_inactive_ OK ++ [Valid] EF - Fulu - Operations - Consolidation Request - switch_to_compounding_not_autho OK ++ [Valid] EF - Fulu - Operations - Consolidation Request - switch_to_compounding_source_bl OK ++ [Valid] EF - Fulu - Operations - Consolidation Request - switch_to_compounding_source_co OK ++ [Valid] EF - Fulu - Operations - Consolidation Request - switch_to_compounding_unknown_s OK ++ [Valid] EF - Fulu - Operations - Consolidation Request - switch_to_compounding_with_exce OK ++ [Valid] EF - Fulu - Operations - Consolidation Request - switch_to_compounding_with_pend OK +``` +## EF - Fulu - Operations - Deposit [Preset: mainnet] +```diff ++ [Invalid] EF - Fulu - Operations - Deposit - invalid_bad_merkle_proof OK ++ [Invalid] EF - Fulu - Operations - Deposit - invalid_wrong_deposit_for_deposit_count OK ++ [Valid] EF - Fulu - Operations - Deposit - correct_sig_but_forked_state OK ++ [Valid] EF - Fulu - Operations - Deposit - effective_deposit_with_genesis_fork_version OK ++ [Valid] EF - Fulu - Operations - Deposit - incorrect_sig_new_deposit OK ++ [Valid] EF - Fulu - Operations - Deposit - incorrect_sig_top_up OK ++ [Valid] EF - Fulu - Operations - Deposit - incorrect_withdrawal_credentials_top_up OK ++ [Valid] EF - Fulu - Operations - Deposit - ineffective_deposit_with_bad_fork_version OK ++ [Valid] EF - Fulu - Operations - Deposit - ineffective_deposit_with_current_fork_version OK ++ [Valid] EF - Fulu - Operations - Deposit - ineffective_deposit_with_previous_fork_versio OK ++ [Valid] EF - Fulu - Operations - Deposit - key_validate_invalid_decompression OK ++ [Valid] EF - Fulu - Operations - Deposit - key_validate_invalid_subgroup OK ++ [Valid] EF - Fulu - Operations - Deposit - new_deposit_eth1_withdrawal_credentials OK ++ [Valid] EF - Fulu - Operations - Deposit - new_deposit_max OK ++ [Valid] EF - Fulu - Operations - Deposit - new_deposit_non_versioned_withdrawal_credenti OK ++ [Valid] EF - Fulu - Operations - Deposit - new_deposit_over_max OK ++ [Valid] EF - Fulu - Operations - Deposit - new_deposit_under_max OK ++ [Valid] EF - Fulu - Operations - Deposit - top_up__less_effective_balance OK ++ [Valid] EF - Fulu - Operations - Deposit - top_up__max_effective_balance OK ++ [Valid] EF - Fulu - Operations - Deposit - top_up__zero_balance OK +``` +## EF - Fulu - Operations - Deposit Request [Preset: mainnet] +```diff ++ [Valid] EF - Fulu - Operations - Deposit Request - process_deposit_request_invalid_sig OK ++ [Valid] EF - Fulu - Operations - Deposit Request - process_deposit_request_max_effective OK ++ [Valid] EF - Fulu - Operations - Deposit Request - process_deposit_request_min_activatio OK ++ [Valid] EF - Fulu - Operations - Deposit Request - process_deposit_request_set_start_ind OK ++ [Valid] EF - Fulu - Operations - Deposit Request - process_deposit_request_set_start_ind OK ++ [Valid] EF - Fulu - Operations - Deposit Request - process_deposit_request_top_up_invali OK ++ [Valid] EF - Fulu - Operations - Deposit Request - process_deposit_request_top_up_max_ef OK ++ [Valid] EF - Fulu - Operations - Deposit Request - process_deposit_request_top_up_min_ac OK +``` +## EF - Fulu - Operations - Execution Payload [Preset: mainnet] +```diff ++ [Invalid] EF - Fulu - Operations - Execution Payload - invalid_bad_everything_first_payloa OK ++ [Invalid] EF - Fulu - Operations - Execution Payload - invalid_bad_everything_regular_payl OK ++ [Invalid] EF - Fulu - Operations - Execution Payload - invalid_bad_parent_hash_first_paylo OK ++ [Invalid] EF - Fulu - Operations - Execution Payload - invalid_bad_parent_hash_regular_pay OK ++ [Invalid] EF - Fulu - Operations - Execution Payload - invalid_bad_pre_randao_regular_payl OK ++ [Invalid] EF - Fulu - Operations - Execution Payload - invalid_bad_prev_randao_first_paylo OK ++ [Invalid] EF - Fulu - Operations - Execution Payload - invalid_correct_input__execution_in OK ++ [Invalid] EF - Fulu - Operations - Execution Payload - invalid_exceed_max_blobs_per_block OK ++ [Invalid] EF - Fulu - Operations - Execution Payload - invalid_future_timestamp_first_payl OK ++ [Invalid] EF - Fulu - Operations - Execution Payload - invalid_future_timestamp_regular_pa OK ++ [Invalid] EF - Fulu - Operations - Execution Payload - invalid_past_timestamp_first_payloa OK ++ [Invalid] EF - Fulu - Operations - Execution Payload - invalid_past_timestamp_regular_payl OK ++ [Valid] EF - Fulu - Operations - Execution Payload - incorrect_blob_tx_type OK ++ [Valid] EF - Fulu - Operations - Execution Payload - incorrect_block_hash OK ++ [Valid] EF - Fulu - Operations - Execution Payload - incorrect_commitment OK ++ [Valid] EF - Fulu - Operations - Execution Payload - incorrect_commitments_order OK ++ [Valid] EF - Fulu - Operations - Execution Payload - incorrect_transaction_length_1_byte OK ++ [Valid] EF - Fulu - Operations - Execution Payload - incorrect_transaction_length_1_extr OK ++ [Valid] EF - Fulu - Operations - Execution Payload - incorrect_transaction_length_32_ext OK ++ [Valid] EF - Fulu - Operations - Execution Payload - incorrect_transaction_length_empty OK ++ [Valid] EF - Fulu - Operations - Execution Payload - incorrect_transaction_no_blobs_but_ OK ++ [Valid] EF - Fulu - Operations - Execution Payload - no_commitments_for_transactions OK ++ [Valid] EF - Fulu - Operations - Execution Payload - no_transactions_with_commitments OK ++ [Valid] EF - Fulu - Operations - Execution Payload - zeroed_commitment OK +``` +## EF - Fulu - Operations - Proposer Slashing [Preset: mainnet] +```diff ++ [Invalid] EF - Fulu - Operations - Proposer Slashing - invalid_different_proposer_indices OK ++ [Invalid] EF - Fulu - Operations - Proposer Slashing - invalid_headers_are_same_sigs_are_d OK ++ [Invalid] EF - Fulu - Operations - Proposer Slashing - invalid_headers_are_same_sigs_are_s OK ++ [Invalid] EF - Fulu - Operations - Proposer Slashing - invalid_incorrect_proposer_index OK ++ [Invalid] EF - Fulu - Operations - Proposer Slashing - invalid_incorrect_sig_1 OK ++ [Invalid] EF - Fulu - Operations - Proposer Slashing - invalid_incorrect_sig_1_and_2 OK ++ [Invalid] EF - Fulu - Operations - Proposer Slashing - invalid_incorrect_sig_1_and_2_swap OK ++ [Invalid] EF - Fulu - Operations - Proposer Slashing - invalid_incorrect_sig_2 OK ++ [Invalid] EF - Fulu - Operations - Proposer Slashing - invalid_proposer_is_not_activated OK ++ [Invalid] EF - Fulu - Operations - Proposer Slashing - invalid_proposer_is_slashed OK ++ [Invalid] EF - Fulu - Operations - Proposer Slashing - invalid_proposer_is_withdrawn OK ++ [Invalid] EF - Fulu - Operations - Proposer Slashing - invalid_slots_of_different_epochs OK ++ [Valid] EF - Fulu - Operations - Proposer Slashing - basic OK ++ [Valid] EF - Fulu - Operations - Proposer Slashing - block_header_from_future OK ++ [Valid] EF - Fulu - Operations - Proposer Slashing - slashed_and_proposer_index_the_same OK +``` +## EF - Fulu - Operations - Sync Aggregate [Preset: mainnet] +```diff ++ [Invalid] EF - Fulu - Operations - Sync Aggregate - invalid_signature_bad_domain OK ++ [Invalid] EF - Fulu - Operations - Sync Aggregate - invalid_signature_extra_participant OK ++ [Invalid] EF - Fulu - Operations - Sync Aggregate - invalid_signature_infinite_signature_w OK ++ [Invalid] EF - Fulu - Operations - Sync Aggregate - invalid_signature_infinite_signature_w OK ++ [Invalid] EF - Fulu - Operations - Sync Aggregate - invalid_signature_missing_participant OK ++ [Invalid] EF - Fulu - Operations - Sync Aggregate - invalid_signature_no_participants OK ++ [Invalid] EF - Fulu - Operations - Sync Aggregate - invalid_signature_past_block OK ++ [Valid] EF - Fulu - Operations - Sync Aggregate - random_all_but_one_participating_with_ OK ++ [Valid] EF - Fulu - Operations - Sync Aggregate - random_high_participation_with_duplica OK ++ [Valid] EF - Fulu - Operations - Sync Aggregate - random_low_participation_with_duplicat OK ++ [Valid] EF - Fulu - Operations - Sync Aggregate - random_misc_balances_and_half_particip OK ++ [Valid] EF - Fulu - Operations - Sync Aggregate - random_only_one_participant_with_dupli OK ++ [Valid] EF - Fulu - Operations - Sync Aggregate - random_with_exits_with_duplicates OK ++ [Valid] EF - Fulu - Operations - Sync Aggregate - sync_committee_rewards_duplicate_commi OK ++ [Valid] EF - Fulu - Operations - Sync Aggregate - sync_committee_rewards_duplicate_commi OK ++ [Valid] EF - Fulu - Operations - Sync Aggregate - sync_committee_rewards_duplicate_commi OK ++ [Valid] EF - Fulu - Operations - Sync Aggregate - sync_committee_rewards_duplicate_commi OK ++ [Valid] EF - Fulu - Operations - Sync Aggregate - sync_committee_rewards_duplicate_commi OK ++ [Valid] EF - Fulu - Operations - Sync Aggregate - sync_committee_rewards_duplicate_commi OK ++ [Valid] EF - Fulu - Operations - Sync Aggregate - sync_committee_rewards_duplicate_commi OK ++ [Valid] EF - Fulu - Operations - Sync Aggregate - sync_committee_rewards_empty_participa OK ++ [Valid] EF - Fulu - Operations - Sync Aggregate - sync_committee_rewards_not_full_partic OK ++ [Valid] EF - Fulu - Operations - Sync Aggregate - sync_committee_with_nonparticipating_e OK ++ [Valid] EF - Fulu - Operations - Sync Aggregate - sync_committee_with_nonparticipating_w OK ++ [Valid] EF - Fulu - Operations - Sync Aggregate - sync_committee_with_participating_exit OK ++ [Valid] EF - Fulu - Operations - Sync Aggregate - sync_committee_with_participating_with OK +``` +## EF - Fulu - Operations - Voluntary Exit [Preset: mainnet] +```diff ++ [Invalid] EF - Fulu - Operations - Voluntary Exit - invalid_incorrect_signature OK ++ [Invalid] EF - Fulu - Operations - Voluntary Exit - invalid_validator_already_exited OK ++ [Invalid] EF - Fulu - Operations - Voluntary Exit - invalid_validator_exit_in_future OK ++ [Invalid] EF - Fulu - Operations - Voluntary Exit - invalid_validator_has_pending_withdraw OK ++ [Invalid] EF - Fulu - Operations - Voluntary Exit - invalid_validator_incorrect_validator_ OK ++ [Invalid] EF - Fulu - Operations - Voluntary Exit - invalid_validator_not_active OK ++ [Invalid] EF - Fulu - Operations - Voluntary Exit - invalid_validator_not_active_long_enou OK ++ [Invalid] EF - Fulu - Operations - Voluntary Exit - invalid_voluntary_exit_with_current_fo OK ++ [Invalid] EF - Fulu - Operations - Voluntary Exit - invalid_voluntary_exit_with_current_fo OK ++ [Invalid] EF - Fulu - Operations - Voluntary Exit - invalid_voluntary_exit_with_genesis_fo OK ++ [Invalid] EF - Fulu - Operations - Voluntary Exit - invalid_voluntary_exit_with_genesis_fo OK ++ [Invalid] EF - Fulu - Operations - Voluntary Exit - voluntary_exit_with_previous_fork_vers OK ++ [Invalid] EF - Fulu - Operations - Voluntary Exit - voluntary_exit_with_previous_fork_vers OK ++ [Valid] EF - Fulu - Operations - Voluntary Exit - basic OK ++ [Valid] EF - Fulu - Operations - Voluntary Exit - default_exit_epoch_subsequent_exit OK ++ [Valid] EF - Fulu - Operations - Voluntary Exit - exit_existing_churn_and_balance_multip OK ++ [Valid] EF - Fulu - Operations - Voluntary Exit - exit_existing_churn_and_churn_limit_ba OK ++ [Valid] EF - Fulu - Operations - Voluntary Exit - exit_with_balance_equal_to_churn_limit OK ++ [Valid] EF - Fulu - Operations - Voluntary Exit - exit_with_balance_multiple_of_churn_li OK ++ [Valid] EF - Fulu - Operations - Voluntary Exit - max_balance_exit OK ++ [Valid] EF - Fulu - Operations - Voluntary Exit - min_balance_exit OK ++ [Valid] EF - Fulu - Operations - Voluntary Exit - min_balance_exits_above_churn OK ++ [Valid] EF - Fulu - Operations - Voluntary Exit - min_balance_exits_up_to_churn OK ++ [Valid] EF - Fulu - Operations - Voluntary Exit - success_exit_queue__min_churn OK +``` +## EF - Fulu - Operations - Withdrawal Request [Preset: mainnet] +```diff ++ [Valid] EF - Fulu - Operations - Withdrawal Request - activation_epoch_less_than_shard_c OK ++ [Valid] EF - Fulu - Operations - Withdrawal Request - basic_withdrawal_request OK ++ [Valid] EF - Fulu - Operations - Withdrawal Request - basic_withdrawal_request_with_comp OK ++ [Valid] EF - Fulu - Operations - Withdrawal Request - basic_withdrawal_request_with_firs OK ++ [Valid] EF - Fulu - Operations - Withdrawal Request - full_exit_request_has_partial_with OK ++ [Valid] EF - Fulu - Operations - Withdrawal Request - incorrect_inactive_validator OK ++ [Valid] EF - Fulu - Operations - Withdrawal Request - incorrect_source_address OK ++ [Valid] EF - Fulu - Operations - Withdrawal Request - incorrect_withdrawal_credential_pr OK ++ [Valid] EF - Fulu - Operations - Withdrawal Request - insufficient_balance OK ++ [Valid] EF - Fulu - Operations - Withdrawal Request - insufficient_effective_balance OK ++ [Valid] EF - Fulu - Operations - Withdrawal Request - no_compounding_credentials OK ++ [Valid] EF - Fulu - Operations - Withdrawal Request - no_excess_balance OK ++ [Valid] EF - Fulu - Operations - Withdrawal Request - on_withdrawal_request_initiated_ex OK ++ [Valid] EF - Fulu - Operations - Withdrawal Request - partial_withdrawal_activation_epoc OK ++ [Valid] EF - Fulu - Operations - Withdrawal Request - partial_withdrawal_incorrect_sourc OK ++ [Valid] EF - Fulu - Operations - Withdrawal Request - partial_withdrawal_incorrect_withd OK ++ [Valid] EF - Fulu - Operations - Withdrawal Request - partial_withdrawal_on_exit_initiat OK ++ [Valid] EF - Fulu - Operations - Withdrawal Request - pending_withdrawals_consume_all_ex OK ++ [Valid] EF - Fulu - Operations - Withdrawal Request - unknown_pubkey OK +``` +## EF - Fulu - Operations - Withdrawals [Preset: mainnet] +```diff ++ [Valid] EF - Fulu - Operations - Withdrawals - full_pending_withdrawals_but_first_skippe OK ++ [Valid] EF - Fulu - Operations - Withdrawals - full_pending_withdrawals_but_first_skippe OK ++ [Valid] EF - Fulu - Operations - Withdrawals - full_pending_withdrawals_but_first_skippe OK ++ [Valid] EF - Fulu - Operations - Withdrawals - partially_withdrawable_validator_compound OK ++ [Valid] EF - Fulu - Operations - Withdrawals - partially_withdrawable_validator_compound OK ++ [Valid] EF - Fulu - Operations - Withdrawals - partially_withdrawable_validator_compound OK ++ [Valid] EF - Fulu - Operations - Withdrawals - partially_withdrawable_validator_compound OK ++ [Valid] EF - Fulu - Operations - Withdrawals - partially_withdrawable_validator_compound OK ++ [Valid] EF - Fulu - Operations - Withdrawals - partially_withdrawable_validator_compound OK ++ [Valid] EF - Fulu - Operations - Withdrawals - partially_withdrawable_validator_legacy_e OK ++ [Valid] EF - Fulu - Operations - Withdrawals - partially_withdrawable_validator_legacy_m OK ++ [Valid] EF - Fulu - Operations - Withdrawals - partially_withdrawable_validator_legacy_m OK ++ [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_at_max OK ++ [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_at_max_mixed_with_swe OK ++ [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_exiting_validator OK ++ [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_low_effective_balance OK ++ [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_mixed_with_sweep_and_ OK ++ [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_next_epoch OK ++ [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_no_excess_balance OK ++ [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_one_skipped_one_effec OK ++ [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_with_effective_sweep_ OK ++ [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_with_ineffective_swee OK ++ [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_with_ineffective_swee OK ++ [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_with_sweep_different_ OK ++ [Valid] EF - Fulu - Operations - Withdrawals - random_0 OK ++ [Valid] EF - Fulu - Operations - Withdrawals - random_partial_withdrawals_1 OK ++ [Valid] EF - Fulu - Operations - Withdrawals - random_partial_withdrawals_2 OK ++ [Valid] EF - Fulu - Operations - Withdrawals - random_partial_withdrawals_3 OK ++ [Valid] EF - Fulu - Operations - Withdrawals - random_partial_withdrawals_4 OK ++ [Valid] EF - Fulu - Operations - Withdrawals - random_partial_withdrawals_5 OK ++ [Valid] EF - Fulu - Operations - Withdrawals - success_excess_balance_but_no_max_effecti OK ++ [Valid] EF - Fulu - Operations - Withdrawals - success_excess_balance_but_no_max_effecti OK ++ [Valid] EF - Fulu - Operations - Withdrawals - success_max_partial_withdrawable OK ++ [Valid] EF - Fulu - Operations - Withdrawals - success_mixed_fully_and_partial_withdrawa OK ++ [Valid] EF - Fulu - Operations - Withdrawals - success_no_excess_balance OK ++ [Valid] EF - Fulu - Operations - Withdrawals - success_no_excess_balance_compounding OK ++ [Valid] EF - Fulu - Operations - Withdrawals - success_no_max_effective_balance OK ++ [Valid] EF - Fulu - Operations - Withdrawals - success_no_max_effective_balance_compound OK ++ [Valid] EF - Fulu - Operations - Withdrawals - success_one_partial_withdrawable_active_a OK ++ [Valid] EF - Fulu - Operations - Withdrawals - success_one_partial_withdrawable_exited OK ++ [Valid] EF - Fulu - Operations - Withdrawals - success_one_partial_withdrawable_exited_a OK ++ [Valid] EF - Fulu - Operations - Withdrawals - success_one_partial_withdrawable_in_exit_ OK ++ [Valid] EF - Fulu - Operations - Withdrawals - success_one_partial_withdrawable_not_yet_ OK ++ [Valid] EF - Fulu - Operations - Withdrawals - success_two_partial_withdrawable OK +``` +## EF - Fulu - Random [Preset: mainnet] +```diff ++ [Valid] EF - Fulu - Random - randomized_0 [Preset: mainnet] OK ++ [Valid] EF - Fulu - Random - randomized_1 [Preset: mainnet] OK ++ [Valid] EF - Fulu - Random - randomized_10 [Preset: mainnet] OK ++ [Valid] EF - Fulu - Random - randomized_11 [Preset: mainnet] OK ++ [Valid] EF - Fulu - Random - randomized_12 [Preset: mainnet] OK ++ [Valid] EF - Fulu - Random - randomized_13 [Preset: mainnet] OK ++ [Valid] EF - Fulu - Random - randomized_14 [Preset: mainnet] OK ++ [Valid] EF - Fulu - Random - randomized_15 [Preset: mainnet] OK ++ [Valid] EF - Fulu - Random - randomized_2 [Preset: mainnet] OK ++ [Valid] EF - Fulu - Random - randomized_3 [Preset: mainnet] OK ++ [Valid] EF - Fulu - Random - randomized_4 [Preset: mainnet] OK ++ [Valid] EF - Fulu - Random - randomized_5 [Preset: mainnet] OK ++ [Valid] EF - Fulu - Random - randomized_6 [Preset: mainnet] OK ++ [Valid] EF - Fulu - Random - randomized_7 [Preset: mainnet] OK ++ [Valid] EF - Fulu - Random - randomized_8 [Preset: mainnet] OK ++ [Valid] EF - Fulu - Random - randomized_9 [Preset: mainnet] OK +``` +## EF - Fulu - Rewards [Preset: mainnet] +```diff ++ EF - Fulu - Rewards - all_balances_too_low_for_reward [Preset: mainnet] OK ++ EF - Fulu - Rewards - empty [Preset: mainnet] OK ++ EF - Fulu - Rewards - empty_leak [Preset: mainnet] OK ++ EF - Fulu - Rewards - full_all_correct [Preset: mainnet] OK ++ EF - Fulu - Rewards - full_but_partial_participation [Preset: mainnet] OK ++ EF - Fulu - Rewards - full_but_partial_participation_leak [Preset: mainnet] OK ++ EF - Fulu - Rewards - full_leak [Preset: mainnet] OK ++ EF - Fulu - Rewards - full_random_0 [Preset: mainnet] OK ++ EF - Fulu - Rewards - full_random_1 [Preset: mainnet] OK ++ EF - Fulu - Rewards - full_random_2 [Preset: mainnet] OK ++ EF - Fulu - Rewards - full_random_3 [Preset: mainnet] OK ++ EF - Fulu - Rewards - full_random_4 [Preset: mainnet] OK ++ EF - Fulu - Rewards - full_random_leak [Preset: mainnet] OK ++ EF - Fulu - Rewards - full_random_low_balances_0 [Preset: mainnet] OK ++ EF - Fulu - Rewards - full_random_low_balances_1 [Preset: mainnet] OK ++ EF - Fulu - Rewards - full_random_misc_balances [Preset: mainnet] OK ++ EF - Fulu - Rewards - full_random_seven_epoch_leak [Preset: mainnet] OK ++ EF - Fulu - Rewards - full_random_ten_epoch_leak [Preset: mainnet] OK ++ EF - Fulu - Rewards - full_random_without_leak_0 [Preset: mainnet] OK ++ EF - Fulu - Rewards - full_random_without_leak_and_current_exit_0 [Preset: mainnet] OK ++ EF - Fulu - Rewards - half_full [Preset: mainnet] OK ++ EF - Fulu - Rewards - half_full_leak [Preset: mainnet] OK ++ EF - Fulu - Rewards - quarter_full [Preset: mainnet] OK ++ EF - Fulu - Rewards - quarter_full_leak [Preset: mainnet] OK ++ EF - Fulu - Rewards - some_very_low_effective_balances_that_attested [Preset: mainnet] OK ++ EF - Fulu - Rewards - some_very_low_effective_balances_that_attested_leak [Preset: mainnet OK ++ EF - Fulu - Rewards - some_very_low_effective_balances_that_did_not_attest [Preset: mainne OK ++ EF - Fulu - Rewards - some_very_low_effective_balances_that_did_not_attest_leak [Preset: m OK ++ EF - Fulu - Rewards - with_exited_validators [Preset: mainnet] OK ++ EF - Fulu - Rewards - with_exited_validators_leak [Preset: mainnet] OK ++ EF - Fulu - Rewards - with_not_yet_activated_validators [Preset: mainnet] OK ++ EF - Fulu - Rewards - with_not_yet_activated_validators_leak [Preset: mainnet] OK ++ EF - Fulu - Rewards - with_slashed_validators [Preset: mainnet] OK ++ EF - Fulu - Rewards - with_slashed_validators_leak [Preset: mainnet] OK +``` +## EF - Fulu - SSZ consensus objects [Preset: mainnet] +```diff ++ Testing AggregateAndProof OK ++ Testing Attestation OK ++ Testing AttestationData OK ++ Testing AttesterSlashing OK ++ Testing BLSToExecutionChange OK ++ Testing BeaconBlock OK ++ Testing BeaconBlockBody OK ++ Testing BeaconBlockHeader OK ++ Testing BeaconState OK ++ Testing BlobIdentifier OK ++ Testing BlobSidecar OK ++ Testing Checkpoint OK ++ Testing ConsolidationRequest OK ++ Testing ContributionAndProof OK ++ Testing DataColumnIdentifier OK ++ Testing DataColumnSidecar OK ++ Testing Deposit OK ++ Testing DepositData OK ++ Testing DepositMessage OK ++ Testing DepositRequest OK ++ Testing Eth1Block OK ++ Testing Eth1Data OK ++ Testing ExecutionPayload OK ++ Testing ExecutionPayloadHeader OK ++ Testing ExecutionRequests OK ++ Testing Fork OK ++ Testing ForkData OK ++ Testing HistoricalBatch OK ++ Testing HistoricalSummary OK ++ Testing IndexedAttestation OK ++ Testing LightClientBootstrap OK ++ Testing LightClientFinalityUpdate OK ++ Testing LightClientHeader OK ++ Testing LightClientOptimisticUpdate OK ++ Testing LightClientUpdate OK ++ Testing MatrixEntry OK ++ Testing PendingAttestation OK ++ Testing PendingConsolidation OK ++ Testing PendingDeposit OK ++ Testing PendingPartialWithdrawal OK ++ Testing PowBlock OK ++ Testing ProposerSlashing OK ++ Testing SignedAggregateAndProof OK ++ Testing SignedBLSToExecutionChange OK ++ Testing SignedBeaconBlock OK ++ Testing SignedBeaconBlockHeader OK ++ Testing SignedContributionAndProof OK ++ Testing SignedVoluntaryExit OK ++ Testing SigningData OK ++ Testing SingleAttestation OK ++ Testing SyncAggregate OK ++ Testing SyncAggregatorSelectionData OK ++ Testing SyncCommittee OK ++ Testing SyncCommitteeContribution OK ++ Testing SyncCommitteeMessage OK ++ Testing Validator OK ++ Testing VoluntaryExit OK ++ Testing Withdrawal OK ++ Testing WithdrawalRequest OK +``` +## EF - Fulu - Sanity - Blocks [Preset: mainnet] +```diff ++ [Invalid] EF - Fulu - Sanity - Blocks - invalid_all_zeroed_sig [Preset: mainnet] OK ++ [Invalid] EF - Fulu - Sanity - Blocks - invalid_duplicate_bls_changes_same_block [Preset: OK ++ [Invalid] EF - Fulu - Sanity - Blocks - invalid_duplicate_deposit_same_block [Preset: main OK ++ [Invalid] EF - Fulu - Sanity - Blocks - invalid_duplicate_proposer_slashings_same_block [P OK ++ [Invalid] EF - Fulu - Sanity - Blocks - invalid_duplicate_validator_exit_same_block [Prese OK ++ [Invalid] EF - Fulu - Sanity - Blocks - invalid_incorrect_block_sig [Preset: mainnet] OK ++ [Invalid] EF - Fulu - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_expected OK ++ [Invalid] EF - Fulu - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_proposer OK ++ [Invalid] EF - Fulu - Sanity - Blocks - invalid_incorrect_state_root [Preset: mainnet] OK ++ [Invalid] EF - Fulu - Sanity - Blocks - invalid_only_increase_deposit_count [Preset: mainn OK ++ [Invalid] EF - Fulu - Sanity - Blocks - invalid_parent_from_same_slot [Preset: mainnet] OK ++ [Invalid] EF - Fulu - Sanity - Blocks - invalid_prev_slot_block_transition [Preset: mainne OK ++ [Invalid] EF - Fulu - Sanity - Blocks - invalid_same_slot_block_transition [Preset: mainne OK ++ [Invalid] EF - Fulu - Sanity - Blocks - invalid_similar_proposer_slashings_same_block [Pre OK ++ [Invalid] EF - Fulu - Sanity - Blocks - invalid_two_bls_changes_of_different_addresses_sam OK ++ [Invalid] EF - Fulu - Sanity - Blocks - invalid_withdrawal_fail_second_block_payload_isnt_ OK ++ [Invalid] EF - Fulu - Sanity - Blocks - slash_and_exit_same_index [Preset: mainnet] OK ++ [Valid] EF - Fulu - Sanity - Blocks - attestation [Preset: mainnet] OK ++ [Valid] EF - Fulu - Sanity - Blocks - attester_slashing [Preset: mainnet] OK ++ [Valid] EF - Fulu - Sanity - Blocks - balance_driven_status_transitions [Preset: mainnet OK ++ [Valid] EF - Fulu - Sanity - Blocks - bls_change [Preset: mainnet] OK ++ [Valid] EF - Fulu - Sanity - Blocks - deposit_and_bls_change [Preset: mainnet] OK ++ [Valid] EF - Fulu - Sanity - Blocks - deposit_in_block [Preset: mainnet] OK ++ [Valid] EF - Fulu - Sanity - Blocks - deposit_top_up [Preset: mainnet] OK ++ [Valid] EF - Fulu - Sanity - Blocks - duplicate_attestation_same_block [Preset: mainnet] OK ++ [Valid] EF - Fulu - Sanity - Blocks - empty_block_transition [Preset: mainnet] OK ++ [Valid] EF - Fulu - Sanity - Blocks - empty_epoch_transition [Preset: mainnet] OK ++ [Valid] EF - Fulu - Sanity - Blocks - exit_and_bls_change [Preset: mainnet] OK ++ [Valid] EF - Fulu - Sanity - Blocks - full_random_operations_0 [Preset: mainnet] OK ++ [Valid] EF - Fulu - Sanity - Blocks - full_random_operations_1 [Preset: mainnet] OK ++ [Valid] EF - Fulu - Sanity - Blocks - full_random_operations_2 [Preset: mainnet] OK ++ [Valid] EF - Fulu - Sanity - Blocks - full_random_operations_3 [Preset: mainnet] OK ++ [Valid] EF - Fulu - Sanity - Blocks - high_proposer_index [Preset: mainnet] OK ++ [Valid] EF - Fulu - Sanity - Blocks - historical_batch [Preset: mainnet] OK ++ [Valid] EF - Fulu - Sanity - Blocks - inactivity_scores_full_participation_leaking [Pres OK ++ [Valid] EF - Fulu - Sanity - Blocks - inactivity_scores_leaking [Preset: mainnet] OK ++ [Valid] EF - Fulu - Sanity - Blocks - many_partial_withdrawals_in_epoch_transition [Pres OK ++ [Valid] EF - Fulu - Sanity - Blocks - multiple_different_proposer_slashings_same_block [ OK ++ [Valid] EF - Fulu - Sanity - Blocks - multiple_different_validator_exits_same_block [Pre OK ++ [Valid] EF - Fulu - Sanity - Blocks - partial_withdrawal_in_epoch_transition [Preset: ma OK ++ [Valid] EF - Fulu - Sanity - Blocks - proposer_after_inactive_index [Preset: mainnet] OK ++ [Valid] EF - Fulu - Sanity - Blocks - proposer_self_slashing [Preset: mainnet] OK ++ [Valid] EF - Fulu - Sanity - Blocks - proposer_slashing [Preset: mainnet] OK ++ [Valid] EF - Fulu - Sanity - Blocks - skipped_slots [Preset: mainnet] OK ++ [Valid] EF - Fulu - Sanity - Blocks - slash_and_exit_diff_index [Preset: mainnet] OK ++ [Valid] EF - Fulu - Sanity - Blocks - sync_committee_committee__empty [Preset: mainnet] OK ++ [Valid] EF - Fulu - Sanity - Blocks - sync_committee_committee__full [Preset: mainnet] OK ++ [Valid] EF - Fulu - Sanity - Blocks - sync_committee_committee__half [Preset: mainnet] OK ++ [Valid] EF - Fulu - Sanity - Blocks - sync_committee_committee_genesis__empty [Preset: m OK ++ [Valid] EF - Fulu - Sanity - Blocks - sync_committee_committee_genesis__full [Preset: ma OK ++ [Valid] EF - Fulu - Sanity - Blocks - sync_committee_committee_genesis__half [Preset: ma OK ++ [Valid] EF - Fulu - Sanity - Blocks - top_up_and_partial_withdrawable_validator [Preset: OK ++ [Valid] EF - Fulu - Sanity - Blocks - top_up_to_fully_withdrawn_validator [Preset: mainn OK ++ [Valid] EF - Fulu - Sanity - Blocks - voluntary_exit [Preset: mainnet] OK ++ [Valid] EF - Fulu - Sanity - Blocks - withdrawal_success_two_blocks [Preset: mainnet] OK +``` +## EF - Fulu - Sanity - Slots [Preset: mainnet] +```diff ++ EF - Fulu - Slots - balance_change_affects_proposer [Preset: mainnet] OK ++ EF - Fulu - Slots - double_empty_epoch [Preset: mainnet] OK ++ EF - Fulu - Slots - empty_epoch [Preset: mainnet] OK ++ EF - Fulu - Slots - historical_accumulator [Preset: mainnet] OK ++ EF - Fulu - Slots - multiple_pending_deposits_same_pubkey [Preset: mainnet] OK ++ EF - Fulu - Slots - multiple_pending_deposits_same_pubkey_above_upward_threshold [Preset: OK ++ EF - Fulu - Slots - multiple_pending_deposits_same_pubkey_below_upward_threshold [Preset: OK ++ EF - Fulu - Slots - multiple_pending_deposits_same_pubkey_compounding [Preset: mainnet] OK ++ EF - Fulu - Slots - multiple_pending_deposits_same_pubkey_different_signature [Preset: mai OK ++ EF - Fulu - Slots - over_epoch_boundary [Preset: mainnet] OK ++ EF - Fulu - Slots - pending_consolidation [Preset: mainnet] OK ++ EF - Fulu - Slots - slots_1 [Preset: mainnet] OK ++ EF - Fulu - Slots - slots_2 [Preset: mainnet] OK +``` ## EF - Light client - Single merkle proof [Preset: mainnet] ```diff + Light client - Single merkle proof - mainnet/altair/light_client/single_merkle_proof/Beaco OK @@ -3288,10 +3877,8 @@ OK: 4/4 Fail: 0/4 Skip: 0/4 + Light client - Single merkle proof - mainnet/electra/light_client/single_merkle_proof/Beac OK + Light client - Single merkle proof - mainnet/electra/light_client/single_merkle_proof/Beac OK ``` -OK: 18/18 Fail: 0/18 Skip: 0/18 ## EF - Merkle proof [Preset: mainnet] ```diff - Merkle proof - Single merkle proof - eip7594 Skip + Merkle proof - Single merkle proof - mainnet/deneb/merkle_proof/single_merkle_proof/Beacon OK + Merkle proof - Single merkle proof - mainnet/deneb/merkle_proof/single_merkle_proof/Beacon OK + Merkle proof - Single merkle proof - mainnet/deneb/merkle_proof/single_merkle_proof/Beacon OK @@ -3300,24 +3887,28 @@ OK: 18/18 Fail: 0/18 Skip: 0/18 + Merkle proof - Single merkle proof - mainnet/electra/merkle_proof/single_merkle_proof/Beac OK + Merkle proof - Single merkle proof - mainnet/electra/merkle_proof/single_merkle_proof/Beac OK + Merkle proof - Single merkle proof - mainnet/electra/merkle_proof/single_merkle_proof/Beac OK ++ Merkle proof - Single merkle proof - mainnet/fulu/merkle_proof/single_merkle_proof/BeaconB OK ++ Merkle proof - Single merkle proof - mainnet/fulu/merkle_proof/single_merkle_proof/BeaconB OK ++ Merkle proof - Single merkle proof - mainnet/fulu/merkle_proof/single_merkle_proof/BeaconB OK ++ Merkle proof - Single merkle proof - mainnet/fulu/merkle_proof/single_merkle_proof/BeaconB OK ++ Merkle proof - Single merkle proof - mainnet/fulu/merkle_proof/single_merkle_proof/BeaconB OK ++ Merkle proof - Single merkle proof - mainnet/fulu/merkle_proof/single_merkle_proof/BeaconB OK ++ Merkle proof - Single merkle proof - mainnet/fulu/merkle_proof/single_merkle_proof/BeaconB OK ++ Merkle proof - Single merkle proof - mainnet/fulu/merkle_proof/single_merkle_proof/BeaconB OK ``` -OK: 8/9 Fail: 0/9 Skip: 1/9 ## EF - Phase 0 - Epoch Processing - Effective balance updates [Preset: mainnet] ```diff + Effective balance updates - effective_balance_hysteresis [Preset: mainnet] OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## EF - Phase 0 - Epoch Processing - Eth1 data reset [Preset: mainnet] ```diff + Eth1 data reset - eth1_vote_no_reset [Preset: mainnet] OK + Eth1 data reset - eth1_vote_reset [Preset: mainnet] OK ``` -OK: 2/2 Fail: 0/2 Skip: 0/2 ## EF - Phase 0 - Epoch Processing - Historical roots update [Preset: mainnet] ```diff + Historical roots update - historical_root_accumulator [Preset: mainnet] OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## EF - Phase 0 - Epoch Processing - Justification & Finalization [Preset: mainnet] ```diff + Justification & Finalization - 123_ok_support [Preset: mainnet] OK @@ -3331,17 +3922,14 @@ OK: 1/1 Fail: 0/1 Skip: 0/1 + Justification & Finalization - 23_poor_support [Preset: mainnet] OK + Justification & Finalization - balance_threshold_with_exited_validators [Preset: mainnet] OK ``` -OK: 10/10 Fail: 0/10 Skip: 0/10 ## EF - Phase 0 - Epoch Processing - Participation record updates [Preset: mainnet] ```diff + Participation record updates - updated_participation_record [Preset: mainnet] OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## EF - Phase 0 - Epoch Processing - RANDAO mixes reset [Preset: mainnet] ```diff + RANDAO mixes reset - updated_randao_mixes [Preset: mainnet] OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## EF - Phase 0 - Epoch Processing - Registry updates [Preset: mainnet] ```diff + Registry updates - activation_queue_activation_and_ejection__1 [Preset: mainnet] OK @@ -3356,7 +3944,6 @@ OK: 1/1 Fail: 0/1 Skip: 0/1 + Registry updates - ejection_past_churn_limit_min [Preset: mainnet] OK + Registry updates - invalid_large_withdrawable_epoch [Preset: mainnet] OK ``` -OK: 11/11 Fail: 0/11 Skip: 0/11 ## EF - Phase 0 - Epoch Processing - Rewards and penalties [Preset: mainnet] ```diff + Rewards and penalties - almost_empty_attestations [Preset: mainnet] OK @@ -3371,7 +3958,7 @@ OK: 11/11 Fail: 0/11 Skip: 0/11 + Rewards and penalties - full_attestation_participation [Preset: mainnet] OK + Rewards and penalties - full_attestation_participation_with_leak [Preset: mainnet] OK + Rewards and penalties - full_attestations_misc_balances [Preset: mainnet] OK -+ Rewards and penalties - full_attestations_one_validaor_one_gwei [Preset: mainnet] OK ++ Rewards and penalties - full_attestations_one_validator_one_gwei [Preset: mainnet] OK + Rewards and penalties - full_attestations_random_incorrect_fields [Preset: mainnet] OK + Rewards and penalties - genesis_epoch_full_attestations_no_rewards [Preset: mainnet] OK + Rewards and penalties - genesis_epoch_no_attestations_no_penalties [Preset: mainnet] OK @@ -3379,7 +3966,6 @@ OK: 11/11 Fail: 0/11 Skip: 0/11 + Rewards and penalties - random_fill_attestations [Preset: mainnet] OK + Rewards and penalties - random_fill_attestations_with_leak [Preset: mainnet] OK ``` -OK: 19/19 Fail: 0/19 Skip: 0/19 ## EF - Phase 0 - Epoch Processing - Slashings [Preset: mainnet] ```diff + Slashings - low_penalty [Preset: mainnet] OK @@ -3388,12 +3974,10 @@ OK: 19/19 Fail: 0/19 Skip: 0/19 + Slashings - scaled_penalties [Preset: mainnet] OK + Slashings - slashings_with_random_state [Preset: mainnet] OK ``` -OK: 5/5 Fail: 0/5 Skip: 0/5 ## EF - Phase 0 - Epoch Processing - Slashings reset [Preset: mainnet] ```diff + Slashings reset - flush_slashings [Preset: mainnet] OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## EF - Phase 0 - Operations - Attestation [Preset: mainnet] ```diff + [Invalid] EF - Phase 0 - Operations - EF - Phase 0 - Operations - Attestation [Preset: ma OK @@ -3438,7 +4022,6 @@ OK: 1/1 Fail: 0/1 Skip: 0/1 + [Valid] EF - Phase 0 - Operations - EF - Phase 0 - Operations - Attestation [Preset: ma OK + [Valid] EF - Phase 0 - Operations - EF - Phase 0 - Operations - Attestation [Preset: ma OK ``` -OK: 41/41 Fail: 0/41 Skip: 0/41 ## EF - Phase 0 - Operations - Attester Slashing [Preset: mainnet] ```diff + [Invalid] EF - Phase 0 - Operations - EF - Phase 0 - Operations - Attester Slashing [Pres OK @@ -3472,7 +4055,6 @@ OK: 41/41 Fail: 0/41 Skip: 0/41 + [Valid] EF - Phase 0 - Operations - EF - Phase 0 - Operations - Attester Slashing [Pres OK + [Valid] EF - Phase 0 - Operations - EF - Phase 0 - Operations - Attester Slashing [Pres OK ``` -OK: 30/30 Fail: 0/30 Skip: 0/30 ## EF - Phase 0 - Operations - Block Header [Preset: mainnet] ```diff + [Invalid] EF - Phase 0 - Operations - EF - Phase 0 - Operations - Block Header [Preset: m OK @@ -3482,7 +4064,6 @@ OK: 30/30 Fail: 0/30 Skip: 0/30 + [Invalid] EF - Phase 0 - Operations - EF - Phase 0 - Operations - Block Header [Preset: m OK + [Valid] EF - Phase 0 - Operations - EF - Phase 0 - Operations - Block Header [Preset: m OK ``` -OK: 6/6 Fail: 0/6 Skip: 0/6 ## EF - Phase 0 - Operations - Deposit [Preset: mainnet] ```diff + [Invalid] EF - Phase 0 - Operations - EF - Phase 0 - Operations - Deposit [Preset: mainne OK @@ -3503,7 +4084,6 @@ OK: 6/6 Fail: 0/6 Skip: 0/6 + [Valid] EF - Phase 0 - Operations - EF - Phase 0 - Operations - Deposit [Preset: mainne OK + [Valid] EF - Phase 0 - Operations - EF - Phase 0 - Operations - Deposit [Preset: mainne OK ``` -OK: 17/17 Fail: 0/17 Skip: 0/17 ## EF - Phase 0 - Operations - Proposer Slashing [Preset: mainnet] ```diff + [Invalid] EF - Phase 0 - Operations - EF - Phase 0 - Operations - Proposer Slashing [Pres OK @@ -3522,7 +4102,6 @@ OK: 17/17 Fail: 0/17 Skip: 0/17 + [Valid] EF - Phase 0 - Operations - EF - Phase 0 - Operations - Proposer Slashing [Pres OK + [Valid] EF - Phase 0 - Operations - EF - Phase 0 - Operations - Proposer Slashing [Pres OK ``` -OK: 15/15 Fail: 0/15 Skip: 0/15 ## EF - Phase 0 - Operations - Voluntary Exit [Preset: mainnet] ```diff + [Invalid] EF - Phase 0 - Operations - EF - Phase 0 - Operations - Voluntary Exit [Preset: OK @@ -3535,7 +4114,6 @@ OK: 15/15 Fail: 0/15 Skip: 0/15 + [Valid] EF - Phase 0 - Operations - EF - Phase 0 - Operations - Voluntary Exit [Preset: OK + [Valid] EF - Phase 0 - Operations - EF - Phase 0 - Operations - Voluntary Exit [Preset: OK ``` -OK: 9/9 Fail: 0/9 Skip: 0/9 ## EF - Phase 0 - Rewards [Preset: mainnet] ```diff + EF - Phase 0 - Rewards - all_balances_too_low_for_reward [Preset: mainnet] OK @@ -3588,7 +4166,6 @@ OK: 9/9 Fail: 0/9 Skip: 0/9 + EF - Phase 0 - Rewards - with_slashed_validators [Preset: mainnet] OK + EF - Phase 0 - Rewards - with_slashed_validators_leak [Preset: mainnet] OK ``` -OK: 49/49 Fail: 0/49 Skip: 0/49 ## EF - Phase 0 - SSZ consensus objects [Preset: mainnet] ```diff + Testing AggregateAndProof OK @@ -3619,9 +4196,9 @@ OK: 49/49 Fail: 0/49 Skip: 0/49 + Testing Validator OK + Testing VoluntaryExit OK ``` -OK: 27/27 Fail: 0/27 Skip: 0/27 ## EF - Phase 0 - Sanity - Slots [Preset: mainnet] ```diff ++ EF - Phase 0 - Slots - balance_change_affects_proposer [Preset: mainnet] OK + EF - Phase 0 - Slots - double_empty_epoch [Preset: mainnet] OK + EF - Phase 0 - Slots - empty_epoch [Preset: mainnet] OK + EF - Phase 0 - Slots - historical_accumulator [Preset: mainnet] OK @@ -3629,7 +4206,6 @@ OK: 27/27 Fail: 0/27 Skip: 0/27 + EF - Phase 0 - Slots - slots_1 [Preset: mainnet] OK + EF - Phase 0 - Slots - slots_2 [Preset: mainnet] OK ``` -OK: 6/6 Fail: 0/6 Skip: 0/6 ## EF - Phase0 - Finality [Preset: mainnet] ```diff + [Valid] EF - Phase0 - Finality - finality_no_updates_at_genesis [Preset: mainnet] OK @@ -3638,7 +4214,6 @@ OK: 6/6 Fail: 0/6 Skip: 0/6 + [Valid] EF - Phase0 - Finality - finality_rule_3 [Preset: mainnet] OK + [Valid] EF - Phase0 - Finality - finality_rule_4 [Preset: mainnet] OK ``` -OK: 5/5 Fail: 0/5 Skip: 0/5 ## EF - Phase0 - Random [Preset: mainnet] ```diff + [Valid] EF - Phase0 - Random - randomized_0 [Preset: mainnet] OK @@ -3658,7 +4233,6 @@ OK: 5/5 Fail: 0/5 Skip: 0/5 + [Valid] EF - Phase0 - Random - randomized_8 [Preset: mainnet] OK + [Valid] EF - Phase0 - Random - randomized_9 [Preset: mainnet] OK ``` -OK: 16/16 Fail: 0/16 Skip: 0/16 ## EF - Phase0 - Sanity - Blocks [Preset: mainnet] ```diff + [Invalid] EF - Phase0 - Sanity - Blocks - invalid_all_zeroed_sig [Preset: mainnet] OK @@ -3702,7 +4276,6 @@ OK: 16/16 Fail: 0/16 Skip: 0/16 + [Valid] EF - Phase0 - Sanity - Blocks - slash_and_exit_diff_index [Preset: mainnet] OK + [Valid] EF - Phase0 - Sanity - Blocks - voluntary_exit [Preset: mainnet] OK ``` -OK: 40/40 Fail: 0/40 Skip: 0/40 ## ForkChoice ```diff + ForkChoice - mainnet/altair/fork_choice/ex_ante/pyspec_tests/ex_ante_attestations_is_great OK @@ -3793,15 +4366,52 @@ OK: 40/40 Fail: 0/40 Skip: 0/40 + ForkChoice - mainnet/deneb/fork_choice/on_block/pyspec_tests/proposer_boost_root_same_slot OK + ForkChoice - mainnet/deneb/fork_choice/on_block/pyspec_tests/simple_blob_data OK ForkChoice - mainnet/deneb/fork_choice/should_override_forkchoice_update/pyspec_tests/shou Skip ++ ForkChoice - mainnet/electra/fork_choice/ex_ante/pyspec_tests/ex_ante_attestations_is_grea OK ++ ForkChoice - mainnet/electra/fork_choice/ex_ante/pyspec_tests/ex_ante_sandwich_with_boost_ OK ++ ForkChoice - mainnet/electra/fork_choice/ex_ante/pyspec_tests/ex_ante_sandwich_with_honest OK ++ ForkChoice - mainnet/electra/fork_choice/ex_ante/pyspec_tests/ex_ante_sandwich_without_att OK ++ ForkChoice - mainnet/electra/fork_choice/ex_ante/pyspec_tests/ex_ante_vanilla OK ++ ForkChoice - mainnet/electra/fork_choice/get_head/pyspec_tests/chain_no_attestations OK ++ ForkChoice - mainnet/electra/fork_choice/get_head/pyspec_tests/discard_equivocations_on_at OK ++ ForkChoice - mainnet/electra/fork_choice/get_head/pyspec_tests/genesis OK ++ ForkChoice - mainnet/electra/fork_choice/get_head/pyspec_tests/proposer_boost_correct_head OK ++ ForkChoice - mainnet/electra/fork_choice/get_head/pyspec_tests/shorter_chain_but_heavier_w OK ++ ForkChoice - mainnet/electra/fork_choice/get_head/pyspec_tests/split_tie_breaker_no_attest OK + ForkChoice - mainnet/electra/fork_choice/get_proposer_head/pyspec_tests/basic_is_head_root Skip + ForkChoice - mainnet/electra/fork_choice/get_proposer_head/pyspec_tests/basic_is_parent_ro Skip ++ ForkChoice - mainnet/electra/fork_choice/on_block/pyspec_tests/basic OK ++ ForkChoice - mainnet/electra/fork_choice/on_block/pyspec_tests/invalid_data_unavailable OK ++ ForkChoice - mainnet/electra/fork_choice/on_block/pyspec_tests/invalid_incorrect_proof OK ++ ForkChoice - mainnet/electra/fork_choice/on_block/pyspec_tests/invalid_wrong_blobs_length OK ++ ForkChoice - mainnet/electra/fork_choice/on_block/pyspec_tests/invalid_wrong_proofs_length OK ++ ForkChoice - mainnet/electra/fork_choice/on_block/pyspec_tests/on_block_bad_parent_root OK + ForkChoice - mainnet/electra/fork_choice/on_block/pyspec_tests/on_block_future_block Skip ++ ForkChoice - mainnet/electra/fork_choice/on_block/pyspec_tests/proposer_boost OK ++ ForkChoice - mainnet/electra/fork_choice/on_block/pyspec_tests/proposer_boost_is_first_blo OK ++ ForkChoice - mainnet/electra/fork_choice/on_block/pyspec_tests/proposer_boost_root_same_sl OK ++ ForkChoice - mainnet/electra/fork_choice/on_block/pyspec_tests/simple_blob_data OK + ForkChoice - mainnet/electra/fork_choice/should_override_forkchoice_update/pyspec_tests/sh Skip ++ ForkChoice - mainnet/fulu/fork_choice/ex_ante/pyspec_tests/ex_ante_attestations_is_greater OK ++ ForkChoice - mainnet/fulu/fork_choice/ex_ante/pyspec_tests/ex_ante_sandwich_with_boost_not OK ++ ForkChoice - mainnet/fulu/fork_choice/ex_ante/pyspec_tests/ex_ante_sandwich_with_honest_at OK ++ ForkChoice - mainnet/fulu/fork_choice/ex_ante/pyspec_tests/ex_ante_sandwich_without_attest OK ++ ForkChoice - mainnet/fulu/fork_choice/ex_ante/pyspec_tests/ex_ante_vanilla OK ++ ForkChoice - mainnet/fulu/fork_choice/get_head/pyspec_tests/chain_no_attestations OK ++ ForkChoice - mainnet/fulu/fork_choice/get_head/pyspec_tests/discard_equivocations_on_attes OK ++ ForkChoice - mainnet/fulu/fork_choice/get_head/pyspec_tests/genesis OK ++ ForkChoice - mainnet/fulu/fork_choice/get_head/pyspec_tests/shorter_chain_but_heavier_weig OK ++ ForkChoice - mainnet/fulu/fork_choice/get_head/pyspec_tests/split_tie_breaker_no_attestati OK ++ ForkChoice - mainnet/fulu/fork_choice/on_block/pyspec_tests/basic OK ++ ForkChoice - mainnet/fulu/fork_choice/on_block/pyspec_tests/on_block_bad_parent_root OK + ForkChoice - mainnet/fulu/fork_choice/on_block/pyspec_tests/on_block_future_block Skip ++ ForkChoice - mainnet/fulu/fork_choice/on_block/pyspec_tests/proposer_boost OK ++ ForkChoice - mainnet/fulu/fork_choice/on_block/pyspec_tests/proposer_boost_is_first_block OK ++ ForkChoice - mainnet/fulu/fork_choice/on_block/pyspec_tests/proposer_boost_root_same_slot_ OK ``` -OK: 69/88 Fail: 0/88 Skip: 19/88 ## Sync ```diff + Sync - mainnet/bellatrix/sync/optimistic/pyspec_tests/from_syncing_to_invalid OK + Sync - mainnet/capella/sync/optimistic/pyspec_tests/from_syncing_to_invalid OK + Sync - mainnet/deneb/sync/optimistic/pyspec_tests/from_syncing_to_invalid OK ++ Sync - mainnet/electra/sync/optimistic/pyspec_tests/from_syncing_to_invalid OK ``` -OK: 3/3 Fail: 0/3 Skip: 0/3 - ----TOTAL--- -OK: 3078/3098 Fail: 0/3098 Skip: 20/3098 diff --git a/ConsensusSpecPreset-minimal.md b/ConsensusSpecPreset-minimal.md index 3990cb4a0d..b8d8ffbe75 100644 --- a/ConsensusSpecPreset-minimal.md +++ b/ConsensusSpecPreset-minimal.md @@ -4,18 +4,15 @@ ConsensusSpecPreset-minimal ```diff + Effective balance updates - effective_balance_hysteresis [Preset: minimal] OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## EF - Altair - Epoch Processing - Eth1 data reset [Preset: minimal] ```diff + Eth1 data reset - eth1_vote_no_reset [Preset: minimal] OK + Eth1 data reset - eth1_vote_reset [Preset: minimal] OK ``` -OK: 2/2 Fail: 0/2 Skip: 0/2 ## EF - Altair - Epoch Processing - Historical roots update [Preset: minimal] ```diff + Historical roots update - historical_root_accumulator [Preset: minimal] OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## EF - Altair - Epoch Processing - Inactivity [Preset: minimal] ```diff + Inactivity - all_zero_inactivity_scores_empty_participation [Preset: minimal] OK @@ -40,7 +37,6 @@ OK: 1/1 Fail: 0/1 Skip: 0/1 + Inactivity - some_slashed_zero_scores_full_participation [Preset: minimal] OK + Inactivity - some_slashed_zero_scores_full_participation_leaking [Preset: minimal] OK ``` -OK: 21/21 Fail: 0/21 Skip: 0/21 ## EF - Altair - Epoch Processing - Justification & Finalization [Preset: minimal] ```diff + Justification & Finalization - 123_ok_support [Preset: minimal] OK @@ -54,7 +50,6 @@ OK: 21/21 Fail: 0/21 Skip: 0/21 + Justification & Finalization - 23_poor_support [Preset: minimal] OK + Justification & Finalization - balance_threshold_with_exited_validators [Preset: minimal] OK ``` -OK: 10/10 Fail: 0/10 Skip: 0/10 ## EF - Altair - Epoch Processing - Participation flag updates [Preset: minimal] ```diff + Participation flag updates - all_zeroed [Preset: minimal] OK @@ -70,12 +65,10 @@ OK: 10/10 Fail: 0/10 Skip: 0/10 + Participation flag updates - random_genesis [Preset: minimal] OK + Participation flag updates - slightly_larger_random [Preset: minimal] OK ``` -OK: 12/12 Fail: 0/12 Skip: 0/12 ## EF - Altair - Epoch Processing - RANDAO mixes reset [Preset: minimal] ```diff + RANDAO mixes reset - updated_randao_mixes [Preset: minimal] OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## EF - Altair - Epoch Processing - Registry updates [Preset: minimal] ```diff + Registry updates - activation_queue_activation_and_ejection__1 [Preset: minimal] OK @@ -94,7 +87,6 @@ OK: 1/1 Fail: 0/1 Skip: 0/1 + Registry updates - ejection_past_churn_limit_scaled [Preset: minimal] OK + Registry updates - invalid_large_withdrawable_epoch [Preset: minimal] OK ``` -OK: 15/15 Fail: 0/15 Skip: 0/15 ## EF - Altair - Epoch Processing - Rewards and penalties [Preset: minimal] ```diff + Rewards and penalties - almost_empty_attestations [Preset: minimal] OK @@ -106,14 +98,13 @@ OK: 15/15 Fail: 0/15 Skip: 0/15 + Rewards and penalties - full_attestation_participation [Preset: minimal] OK + Rewards and penalties - full_attestation_participation_with_leak [Preset: minimal] OK + Rewards and penalties - full_attestations_misc_balances [Preset: minimal] OK -+ Rewards and penalties - full_attestations_one_validaor_one_gwei [Preset: minimal] OK ++ Rewards and penalties - full_attestations_one_validator_one_gwei [Preset: minimal] OK + Rewards and penalties - genesis_epoch_full_attestations_no_rewards [Preset: minimal] OK + Rewards and penalties - genesis_epoch_no_attestations_no_penalties [Preset: minimal] OK + Rewards and penalties - no_attestations_all_penalties [Preset: minimal] OK + Rewards and penalties - random_fill_attestations [Preset: minimal] OK + Rewards and penalties - random_fill_attestations_with_leak [Preset: minimal] OK ``` -OK: 15/15 Fail: 0/15 Skip: 0/15 ## EF - Altair - Epoch Processing - Slashings [Preset: minimal] ```diff + Slashings - low_penalty [Preset: minimal] OK @@ -122,12 +113,10 @@ OK: 15/15 Fail: 0/15 Skip: 0/15 + Slashings - scaled_penalties [Preset: minimal] OK + Slashings - slashings_with_random_state [Preset: minimal] OK ``` -OK: 5/5 Fail: 0/5 Skip: 0/5 ## EF - Altair - Epoch Processing - Slashings reset [Preset: minimal] ```diff + Slashings reset - flush_slashings [Preset: minimal] OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## EF - Altair - Epoch Processing - Sync committee updates [Preset: minimal] ```diff + Sync committee updates - sync_committees_no_progress_not_at_period_boundary [Preset: minim OK @@ -136,7 +125,6 @@ OK: 1/1 Fail: 0/1 Skip: 0/1 + Sync committee updates - sync_committees_progress_misc_balances_not_genesis [Preset: minim OK + Sync committee updates - sync_committees_progress_not_genesis [Preset: minimal] OK ``` -OK: 5/5 Fail: 0/5 Skip: 0/5 ## EF - Altair - Finality [Preset: minimal] ```diff + [Valid] EF - Altair - Finality - finality_no_updates_at_genesis [Preset: minimal] OK @@ -145,7 +133,6 @@ OK: 5/5 Fail: 0/5 Skip: 0/5 + [Valid] EF - Altair - Finality - finality_rule_3 [Preset: minimal] OK + [Valid] EF - Altair - Finality - finality_rule_4 [Preset: minimal] OK ``` -OK: 5/5 Fail: 0/5 Skip: 0/5 ## EF - Altair - Fork [Preset: minimal] ```diff + EF - Altair - Fork - altair_fork_random_0 [Preset: minimal] OK @@ -165,7 +152,6 @@ OK: 5/5 Fail: 0/5 Skip: 0/5 + EF - Altair - Fork - fork_random_low_balances [Preset: minimal] OK + EF - Altair - Fork - fork_random_misc_balances [Preset: minimal] OK ``` -OK: 16/16 Fail: 0/16 Skip: 0/16 ## EF - Altair - Operations - Attestation [Preset: minimal] ```diff + [Invalid] EF - Altair - Operations - Attestation - invalid_after_max_inclusion_slot OK @@ -210,7 +196,6 @@ OK: 16/16 Fail: 0/16 Skip: 0/16 + [Valid] EF - Altair - Operations - Attestation - one_basic_attestation OK + [Valid] EF - Altair - Operations - Attestation - previous_epoch OK ``` -OK: 41/41 Fail: 0/41 Skip: 0/41 ## EF - Altair - Operations - Attester Slashing [Preset: minimal] ```diff + [Invalid] EF - Altair - Operations - Attester Slashing - invalid_all_empty_indices OK @@ -244,7 +229,6 @@ OK: 41/41 Fail: 0/41 Skip: 0/41 + [Valid] EF - Altair - Operations - Attester Slashing - proposer_index_slashed OK + [Valid] EF - Altair - Operations - Attester Slashing - with_effective_balance_disparity OK ``` -OK: 30/30 Fail: 0/30 Skip: 0/30 ## EF - Altair - Operations - Block Header [Preset: minimal] ```diff + [Invalid] EF - Altair - Operations - Block Header - invalid_multiple_blocks_single_slot OK @@ -254,7 +238,6 @@ OK: 30/30 Fail: 0/30 Skip: 0/30 + [Invalid] EF - Altair - Operations - Block Header - invalid_slot_block_header OK + [Valid] EF - Altair - Operations - Block Header - basic_block_header OK ``` -OK: 6/6 Fail: 0/6 Skip: 0/6 ## EF - Altair - Operations - Deposit [Preset: minimal] ```diff + [Invalid] EF - Altair - Operations - Deposit - invalid_bad_merkle_proof OK @@ -277,7 +260,6 @@ OK: 6/6 Fail: 0/6 Skip: 0/6 + [Valid] EF - Altair - Operations - Deposit - top_up__max_effective_balance OK + [Valid] EF - Altair - Operations - Deposit - top_up__zero_balance OK ``` -OK: 19/19 Fail: 0/19 Skip: 0/19 ## EF - Altair - Operations - Proposer Slashing [Preset: minimal] ```diff + [Invalid] EF - Altair - Operations - Proposer Slashing - invalid_different_proposer_indice OK @@ -296,7 +278,6 @@ OK: 19/19 Fail: 0/19 Skip: 0/19 + [Valid] EF - Altair - Operations - Proposer Slashing - block_header_from_future OK + [Valid] EF - Altair - Operations - Proposer Slashing - slashed_and_proposer_index_the_sa OK ``` -OK: 15/15 Fail: 0/15 Skip: 0/15 ## EF - Altair - Operations - Sync Aggregate [Preset: minimal] ```diff + [Invalid] EF - Altair - Operations - Sync Aggregate - invalid_signature_bad_domain OK @@ -324,7 +305,6 @@ OK: 15/15 Fail: 0/15 Skip: 0/15 + [Valid] EF - Altair - Operations - Sync Aggregate - sync_committee_with_participating_wi OK + [Valid] EF - Altair - Operations - Sync Aggregate - valid_signature_future_committee OK ``` -OK: 24/24 Fail: 0/24 Skip: 0/24 ## EF - Altair - Operations - Voluntary Exit [Preset: minimal] ```diff + [Invalid] EF - Altair - Operations - Voluntary Exit - invalid_incorrect_signature OK @@ -338,7 +318,6 @@ OK: 24/24 Fail: 0/24 Skip: 0/24 + [Valid] EF - Altair - Operations - Voluntary Exit - success_exit_queue__min_churn OK + [Valid] EF - Altair - Operations - Voluntary Exit - success_exit_queue__scaled_churn OK ``` -OK: 10/10 Fail: 0/10 Skip: 0/10 ## EF - Altair - Random [Preset: minimal] ```diff + [Valid] EF - Altair - Random - randomized_0 [Preset: minimal] OK @@ -358,7 +337,6 @@ OK: 10/10 Fail: 0/10 Skip: 0/10 + [Valid] EF - Altair - Random - randomized_8 [Preset: minimal] OK + [Valid] EF - Altair - Random - randomized_9 [Preset: minimal] OK ``` -OK: 16/16 Fail: 0/16 Skip: 0/16 ## EF - Altair - Rewards [Preset: minimal] ```diff + EF - Altair - Rewards - all_balances_too_low_for_reward [Preset: minimal] OK @@ -396,7 +374,6 @@ OK: 16/16 Fail: 0/16 Skip: 0/16 + EF - Altair - Rewards - with_slashed_validators [Preset: minimal] OK + EF - Altair - Rewards - with_slashed_validators_leak [Preset: minimal] OK ``` -OK: 34/34 Fail: 0/34 Skip: 0/34 ## EF - Altair - SSZ consensus objects [Preset: minimal] ```diff + Testing AggregateAndProof OK @@ -439,7 +416,6 @@ OK: 34/34 Fail: 0/34 Skip: 0/34 + Testing Validator OK + Testing VoluntaryExit OK ``` -OK: 39/39 Fail: 0/39 Skip: 0/39 ## EF - Altair - Sanity - Blocks [Preset: minimal] ```diff + [Invalid] EF - Altair - Sanity - Blocks - invalid_all_zeroed_sig [Preset: minimal] OK @@ -495,9 +471,9 @@ OK: 39/39 Fail: 0/39 Skip: 0/39 + [Valid] EF - Altair - Sanity - Blocks - sync_committee_committee_genesis__half [Preset: OK + [Valid] EF - Altair - Sanity - Blocks - voluntary_exit [Preset: minimal] OK ``` -OK: 52/52 Fail: 0/52 Skip: 0/52 ## EF - Altair - Sanity - Slots [Preset: minimal] ```diff ++ EF - Altair - Slots - balance_change_affects_proposer [Preset: minimal] OK + EF - Altair - Slots - double_empty_epoch [Preset: minimal] OK + EF - Altair - Slots - empty_epoch [Preset: minimal] OK + EF - Altair - Slots - historical_accumulator [Preset: minimal] OK @@ -505,7 +481,6 @@ OK: 52/52 Fail: 0/52 Skip: 0/52 + EF - Altair - Slots - slots_1 [Preset: minimal] OK + EF - Altair - Slots - slots_2 [Preset: minimal] OK ``` -OK: 6/6 Fail: 0/6 Skip: 0/6 ## EF - Altair - Transition [Preset: minimal] ```diff + EF - Altair - Transition - non_empty_historical_roots [Preset: minimal] OK @@ -535,7 +510,6 @@ OK: 6/6 Fail: 0/6 Skip: 0/6 + EF - Altair - Transition - transition_with_voluntary_exit_right_after_fork [Preset: minima OK + EF - Altair - Transition - transition_with_voluntary_exit_right_before_fork [Preset: minim OK ``` -OK: 26/26 Fail: 0/26 Skip: 0/26 ## EF - Altair - Unittests - Light client - Sync protocol [Preset: minimal] ```diff + process_light_client_update_finality_updated OK @@ -543,23 +517,19 @@ OK: 26/26 Fail: 0/26 Skip: 0/26 + test_process_light_client_update_at_period_boundary OK + test_process_light_client_update_not_timeout OK ``` -OK: 4/4 Fail: 0/4 Skip: 0/4 ## EF - Bellatrix - Epoch Processing - Effective balance updates [Preset: minimal] ```diff + Effective balance updates - effective_balance_hysteresis [Preset: minimal] OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## EF - Bellatrix - Epoch Processing - Eth1 data reset [Preset: minimal] ```diff + Eth1 data reset - eth1_vote_no_reset [Preset: minimal] OK + Eth1 data reset - eth1_vote_reset [Preset: minimal] OK ``` -OK: 2/2 Fail: 0/2 Skip: 0/2 ## EF - Bellatrix - Epoch Processing - Historical roots update [Preset: minimal] ```diff + Historical roots update - historical_root_accumulator [Preset: minimal] OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## EF - Bellatrix - Epoch Processing - Inactivity [Preset: minimal] ```diff + Inactivity - all_zero_inactivity_scores_empty_participation [Preset: minimal] OK @@ -584,7 +554,6 @@ OK: 1/1 Fail: 0/1 Skip: 0/1 + Inactivity - some_slashed_zero_scores_full_participation [Preset: minimal] OK + Inactivity - some_slashed_zero_scores_full_participation_leaking [Preset: minimal] OK ``` -OK: 21/21 Fail: 0/21 Skip: 0/21 ## EF - Bellatrix - Epoch Processing - Justification & Finalization [Preset: minimal] ```diff + Justification & Finalization - 123_ok_support [Preset: minimal] OK @@ -598,7 +567,6 @@ OK: 21/21 Fail: 0/21 Skip: 0/21 + Justification & Finalization - 23_poor_support [Preset: minimal] OK + Justification & Finalization - balance_threshold_with_exited_validators [Preset: minimal] OK ``` -OK: 10/10 Fail: 0/10 Skip: 0/10 ## EF - Bellatrix - Epoch Processing - Participation flag updates [Preset: minimal] ```diff + Participation flag updates - all_zeroed [Preset: minimal] OK @@ -614,12 +582,10 @@ OK: 10/10 Fail: 0/10 Skip: 0/10 + Participation flag updates - random_genesis [Preset: minimal] OK + Participation flag updates - slightly_larger_random [Preset: minimal] OK ``` -OK: 12/12 Fail: 0/12 Skip: 0/12 ## EF - Bellatrix - Epoch Processing - RANDAO mixes reset [Preset: minimal] ```diff + RANDAO mixes reset - updated_randao_mixes [Preset: minimal] OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## EF - Bellatrix - Epoch Processing - Registry updates [Preset: minimal] ```diff + Registry updates - activation_queue_activation_and_ejection__1 [Preset: minimal] OK @@ -638,7 +604,6 @@ OK: 1/1 Fail: 0/1 Skip: 0/1 + Registry updates - ejection_past_churn_limit_scaled [Preset: minimal] OK + Registry updates - invalid_large_withdrawable_epoch [Preset: minimal] OK ``` -OK: 15/15 Fail: 0/15 Skip: 0/15 ## EF - Bellatrix - Epoch Processing - Rewards and penalties [Preset: minimal] ```diff + Rewards and penalties - almost_empty_attestations [Preset: minimal] OK @@ -650,14 +615,13 @@ OK: 15/15 Fail: 0/15 Skip: 0/15 + Rewards and penalties - full_attestation_participation [Preset: minimal] OK + Rewards and penalties - full_attestation_participation_with_leak [Preset: minimal] OK + Rewards and penalties - full_attestations_misc_balances [Preset: minimal] OK -+ Rewards and penalties - full_attestations_one_validaor_one_gwei [Preset: minimal] OK ++ Rewards and penalties - full_attestations_one_validator_one_gwei [Preset: minimal] OK + Rewards and penalties - genesis_epoch_full_attestations_no_rewards [Preset: minimal] OK + Rewards and penalties - genesis_epoch_no_attestations_no_penalties [Preset: minimal] OK + Rewards and penalties - no_attestations_all_penalties [Preset: minimal] OK + Rewards and penalties - random_fill_attestations [Preset: minimal] OK + Rewards and penalties - random_fill_attestations_with_leak [Preset: minimal] OK ``` -OK: 15/15 Fail: 0/15 Skip: 0/15 ## EF - Bellatrix - Epoch Processing - Slashings [Preset: minimal] ```diff + Slashings - low_penalty [Preset: minimal] OK @@ -666,12 +630,10 @@ OK: 15/15 Fail: 0/15 Skip: 0/15 + Slashings - scaled_penalties [Preset: minimal] OK + Slashings - slashings_with_random_state [Preset: minimal] OK ``` -OK: 5/5 Fail: 0/5 Skip: 0/5 ## EF - Bellatrix - Epoch Processing - Slashings reset [Preset: minimal] ```diff + Slashings reset - flush_slashings [Preset: minimal] OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## EF - Bellatrix - Epoch Processing - Sync committee updates [Preset: minimal] ```diff + Sync committee updates - sync_committees_no_progress_not_at_period_boundary [Preset: minim OK @@ -680,7 +642,6 @@ OK: 1/1 Fail: 0/1 Skip: 0/1 + Sync committee updates - sync_committees_progress_misc_balances_not_genesis [Preset: minim OK + Sync committee updates - sync_committees_progress_not_genesis [Preset: minimal] OK ``` -OK: 5/5 Fail: 0/5 Skip: 0/5 ## EF - Bellatrix - Finality [Preset: minimal] ```diff + [Valid] EF - Bellatrix - Finality - finality_no_updates_at_genesis [Preset: minimal] OK @@ -689,7 +650,6 @@ OK: 5/5 Fail: 0/5 Skip: 0/5 + [Valid] EF - Bellatrix - Finality - finality_rule_3 [Preset: minimal] OK + [Valid] EF - Bellatrix - Finality - finality_rule_4 [Preset: minimal] OK ``` -OK: 5/5 Fail: 0/5 Skip: 0/5 ## EF - Bellatrix - Fork [Preset: minimal] ```diff + EF - Bellatrix - Fork - bellatrix_fork_random_0 [Preset: minimal] OK @@ -707,7 +667,6 @@ OK: 5/5 Fail: 0/5 Skip: 0/5 + EF - Bellatrix - Fork - fork_random_low_balances [Preset: minimal] OK + EF - Bellatrix - Fork - fork_random_misc_balances [Preset: minimal] OK ``` -OK: 14/14 Fail: 0/14 Skip: 0/14 ## EF - Bellatrix - Operations - Attestation [Preset: minimal] ```diff + [Invalid] EF - Bellatrix - Operations - Attestation - invalid_after_max_inclusion_slot OK @@ -752,7 +711,6 @@ OK: 14/14 Fail: 0/14 Skip: 0/14 + [Valid] EF - Bellatrix - Operations - Attestation - one_basic_attestation OK + [Valid] EF - Bellatrix - Operations - Attestation - previous_epoch OK ``` -OK: 41/41 Fail: 0/41 Skip: 0/41 ## EF - Bellatrix - Operations - Attester Slashing [Preset: minimal] ```diff + [Invalid] EF - Bellatrix - Operations - Attester Slashing - invalid_all_empty_indices OK @@ -786,7 +744,6 @@ OK: 41/41 Fail: 0/41 Skip: 0/41 + [Valid] EF - Bellatrix - Operations - Attester Slashing - proposer_index_slashed OK + [Valid] EF - Bellatrix - Operations - Attester Slashing - with_effective_balance_dispari OK ``` -OK: 30/30 Fail: 0/30 Skip: 0/30 ## EF - Bellatrix - Operations - Block Header [Preset: minimal] ```diff + [Invalid] EF - Bellatrix - Operations - Block Header - invalid_multiple_blocks_single_slot OK @@ -796,7 +753,6 @@ OK: 30/30 Fail: 0/30 Skip: 0/30 + [Invalid] EF - Bellatrix - Operations - Block Header - invalid_slot_block_header OK + [Valid] EF - Bellatrix - Operations - Block Header - basic_block_header OK ``` -OK: 6/6 Fail: 0/6 Skip: 0/6 ## EF - Bellatrix - Operations - Deposit [Preset: minimal] ```diff + [Invalid] EF - Bellatrix - Operations - Deposit - invalid_bad_merkle_proof OK @@ -820,7 +776,6 @@ OK: 6/6 Fail: 0/6 Skip: 0/6 + [Valid] EF - Bellatrix - Operations - Deposit - top_up__max_effective_balance OK + [Valid] EF - Bellatrix - Operations - Deposit - top_up__zero_balance OK ``` -OK: 20/20 Fail: 0/20 Skip: 0/20 ## EF - Bellatrix - Operations - Execution Payload [Preset: minimal] ```diff + [Invalid] EF - Bellatrix - Operations - Execution Payload - invalid_bad_everything_first_p OK @@ -850,7 +805,6 @@ OK: 20/20 Fail: 0/20 Skip: 0/20 + [Valid] EF - Bellatrix - Operations - Execution Payload - zero_length_transaction_first_ OK + [Valid] EF - Bellatrix - Operations - Execution Payload - zero_length_transaction_regula OK ``` -OK: 26/26 Fail: 0/26 Skip: 0/26 ## EF - Bellatrix - Operations - Proposer Slashing [Preset: minimal] ```diff + [Invalid] EF - Bellatrix - Operations - Proposer Slashing - invalid_different_proposer_ind OK @@ -869,7 +823,6 @@ OK: 26/26 Fail: 0/26 Skip: 0/26 + [Valid] EF - Bellatrix - Operations - Proposer Slashing - block_header_from_future OK + [Valid] EF - Bellatrix - Operations - Proposer Slashing - slashed_and_proposer_index_the OK ``` -OK: 15/15 Fail: 0/15 Skip: 0/15 ## EF - Bellatrix - Operations - Sync Aggregate [Preset: minimal] ```diff + [Invalid] EF - Bellatrix - Operations - Sync Aggregate - invalid_signature_bad_domain OK @@ -897,7 +850,6 @@ OK: 15/15 Fail: 0/15 Skip: 0/15 + [Valid] EF - Bellatrix - Operations - Sync Aggregate - sync_committee_with_participating OK + [Valid] EF - Bellatrix - Operations - Sync Aggregate - valid_signature_future_committee OK ``` -OK: 24/24 Fail: 0/24 Skip: 0/24 ## EF - Bellatrix - Operations - Voluntary Exit [Preset: minimal] ```diff + [Invalid] EF - Bellatrix - Operations - Voluntary Exit - invalid_incorrect_signature OK @@ -917,7 +869,6 @@ OK: 24/24 Fail: 0/24 Skip: 0/24 + [Valid] EF - Bellatrix - Operations - Voluntary Exit - voluntary_exit_with_current_fork_ OK + [Valid] EF - Bellatrix - Operations - Voluntary Exit - voluntary_exit_with_previous_fork OK ``` -OK: 16/16 Fail: 0/16 Skip: 0/16 ## EF - Bellatrix - Random [Preset: minimal] ```diff + [Valid] EF - Bellatrix - Random - randomized_0 [Preset: minimal] OK @@ -937,7 +888,6 @@ OK: 16/16 Fail: 0/16 Skip: 0/16 + [Valid] EF - Bellatrix - Random - randomized_8 [Preset: minimal] OK + [Valid] EF - Bellatrix - Random - randomized_9 [Preset: minimal] OK ``` -OK: 16/16 Fail: 0/16 Skip: 0/16 ## EF - Bellatrix - Rewards [Preset: minimal] ```diff + EF - Bellatrix - Rewards - all_balances_too_low_for_reward [Preset: minimal] OK @@ -975,7 +925,6 @@ OK: 16/16 Fail: 0/16 Skip: 0/16 + EF - Bellatrix - Rewards - with_slashed_validators [Preset: minimal] OK + EF - Bellatrix - Rewards - with_slashed_validators_leak [Preset: minimal] OK ``` -OK: 34/34 Fail: 0/34 Skip: 0/34 ## EF - Bellatrix - SSZ consensus objects [Preset: minimal] ```diff + Testing AggregateAndProof OK @@ -1021,7 +970,6 @@ OK: 34/34 Fail: 0/34 Skip: 0/34 + Testing Validator OK + Testing VoluntaryExit OK ``` -OK: 42/42 Fail: 0/42 Skip: 0/42 ## EF - Bellatrix - Sanity - Blocks [Preset: minimal] ```diff + [Invalid] EF - Bellatrix - Sanity - Blocks - invalid_all_zeroed_sig [Preset: minimal] OK @@ -1080,9 +1028,9 @@ OK: 42/42 Fail: 0/42 Skip: 0/42 + [Valid] EF - Bellatrix - Sanity - Blocks - sync_committee_committee_genesis__half [Prese OK + [Valid] EF - Bellatrix - Sanity - Blocks - voluntary_exit [Preset: minimal] OK ``` -OK: 55/55 Fail: 0/55 Skip: 0/55 ## EF - Bellatrix - Sanity - Slots [Preset: minimal] ```diff ++ EF - Bellatrix - Slots - balance_change_affects_proposer [Preset: minimal] OK + EF - Bellatrix - Slots - double_empty_epoch [Preset: minimal] OK + EF - Bellatrix - Slots - empty_epoch [Preset: minimal] OK + EF - Bellatrix - Slots - historical_accumulator [Preset: minimal] OK @@ -1090,7 +1038,6 @@ OK: 55/55 Fail: 0/55 Skip: 0/55 + EF - Bellatrix - Slots - slots_1 [Preset: minimal] OK + EF - Bellatrix - Slots - slots_2 [Preset: minimal] OK ``` -OK: 6/6 Fail: 0/6 Skip: 0/6 ## EF - Bellatrix - Transition [Preset: minimal] ```diff + EF - Bellatrix - Transition - non_empty_historical_roots [Preset: minimal] OK @@ -1120,23 +1067,19 @@ OK: 6/6 Fail: 0/6 Skip: 0/6 + EF - Bellatrix - Transition - transition_with_voluntary_exit_right_after_fork [Preset: min OK + EF - Bellatrix - Transition - transition_with_voluntary_exit_right_before_fork [Preset: mi OK ``` -OK: 26/26 Fail: 0/26 Skip: 0/26 ## EF - Capella - Epoch Processing - Effective balance updates [Preset: minimal] ```diff + Effective balance updates - effective_balance_hysteresis [Preset: minimal] OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## EF - Capella - Epoch Processing - Eth1 data reset [Preset: minimal] ```diff + Eth1 data reset - eth1_vote_no_reset [Preset: minimal] OK + Eth1 data reset - eth1_vote_reset [Preset: minimal] OK ``` -OK: 2/2 Fail: 0/2 Skip: 0/2 ## EF - Capella - Epoch Processing - Historical summaries update [Preset: minimal] ```diff + Historical summaries update - historical_summaries_accumulator [Preset: minimal] OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## EF - Capella - Epoch Processing - Inactivity [Preset: minimal] ```diff + Inactivity - all_zero_inactivity_scores_empty_participation [Preset: minimal] OK @@ -1161,7 +1104,6 @@ OK: 1/1 Fail: 0/1 Skip: 0/1 + Inactivity - some_slashed_zero_scores_full_participation [Preset: minimal] OK + Inactivity - some_slashed_zero_scores_full_participation_leaking [Preset: minimal] OK ``` -OK: 21/21 Fail: 0/21 Skip: 0/21 ## EF - Capella - Epoch Processing - Justification & Finalization [Preset: minimal] ```diff + Justification & Finalization - 123_ok_support [Preset: minimal] OK @@ -1175,7 +1117,6 @@ OK: 21/21 Fail: 0/21 Skip: 0/21 + Justification & Finalization - 23_poor_support [Preset: minimal] OK + Justification & Finalization - balance_threshold_with_exited_validators [Preset: minimal] OK ``` -OK: 10/10 Fail: 0/10 Skip: 0/10 ## EF - Capella - Epoch Processing - Participation flag updates [Preset: minimal] ```diff + Participation flag updates - all_zeroed [Preset: minimal] OK @@ -1191,12 +1132,10 @@ OK: 10/10 Fail: 0/10 Skip: 0/10 + Participation flag updates - random_genesis [Preset: minimal] OK + Participation flag updates - slightly_larger_random [Preset: minimal] OK ``` -OK: 12/12 Fail: 0/12 Skip: 0/12 ## EF - Capella - Epoch Processing - RANDAO mixes reset [Preset: minimal] ```diff + RANDAO mixes reset - updated_randao_mixes [Preset: minimal] OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## EF - Capella - Epoch Processing - Registry updates [Preset: minimal] ```diff + Registry updates - activation_queue_activation_and_ejection__1 [Preset: minimal] OK @@ -1215,7 +1154,6 @@ OK: 1/1 Fail: 0/1 Skip: 0/1 + Registry updates - ejection_past_churn_limit_scaled [Preset: minimal] OK + Registry updates - invalid_large_withdrawable_epoch [Preset: minimal] OK ``` -OK: 15/15 Fail: 0/15 Skip: 0/15 ## EF - Capella - Epoch Processing - Rewards and penalties [Preset: minimal] ```diff + Rewards and penalties - almost_empty_attestations [Preset: minimal] OK @@ -1227,14 +1165,13 @@ OK: 15/15 Fail: 0/15 Skip: 0/15 + Rewards and penalties - full_attestation_participation [Preset: minimal] OK + Rewards and penalties - full_attestation_participation_with_leak [Preset: minimal] OK + Rewards and penalties - full_attestations_misc_balances [Preset: minimal] OK -+ Rewards and penalties - full_attestations_one_validaor_one_gwei [Preset: minimal] OK ++ Rewards and penalties - full_attestations_one_validator_one_gwei [Preset: minimal] OK + Rewards and penalties - genesis_epoch_full_attestations_no_rewards [Preset: minimal] OK + Rewards and penalties - genesis_epoch_no_attestations_no_penalties [Preset: minimal] OK + Rewards and penalties - no_attestations_all_penalties [Preset: minimal] OK + Rewards and penalties - random_fill_attestations [Preset: minimal] OK + Rewards and penalties - random_fill_attestations_with_leak [Preset: minimal] OK ``` -OK: 15/15 Fail: 0/15 Skip: 0/15 ## EF - Capella - Epoch Processing - Slashings [Preset: minimal] ```diff + Slashings - low_penalty [Preset: minimal] OK @@ -1243,12 +1180,10 @@ OK: 15/15 Fail: 0/15 Skip: 0/15 + Slashings - scaled_penalties [Preset: minimal] OK + Slashings - slashings_with_random_state [Preset: minimal] OK ``` -OK: 5/5 Fail: 0/5 Skip: 0/5 ## EF - Capella - Epoch Processing - Slashings reset [Preset: minimal] ```diff + Slashings reset - flush_slashings [Preset: minimal] OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## EF - Capella - Epoch Processing - Sync committee updates [Preset: minimal] ```diff + Sync committee updates - sync_committees_no_progress_not_at_period_boundary [Preset: minim OK @@ -1257,7 +1192,6 @@ OK: 1/1 Fail: 0/1 Skip: 0/1 + Sync committee updates - sync_committees_progress_misc_balances_not_genesis [Preset: minim OK + Sync committee updates - sync_committees_progress_not_genesis [Preset: minimal] OK ``` -OK: 5/5 Fail: 0/5 Skip: 0/5 ## EF - Capella - Finality [Preset: minimal] ```diff + [Valid] EF - Capella - Finality - finality_no_updates_at_genesis [Preset: minimal] OK @@ -1266,7 +1200,6 @@ OK: 5/5 Fail: 0/5 Skip: 0/5 + [Valid] EF - Capella - Finality - finality_rule_3 [Preset: minimal] OK + [Valid] EF - Capella - Finality - finality_rule_4 [Preset: minimal] OK ``` -OK: 5/5 Fail: 0/5 Skip: 0/5 ## EF - Capella - Fork [Preset: minimal] ```diff + EF - Capella - Fork - capella_fork_random_0 [Preset: minimal] OK @@ -1284,7 +1217,6 @@ OK: 5/5 Fail: 0/5 Skip: 0/5 + EF - Capella - Fork - fork_random_low_balances [Preset: minimal] OK + EF - Capella - Fork - fork_random_misc_balances [Preset: minimal] OK ``` -OK: 14/14 Fail: 0/14 Skip: 0/14 ## EF - Capella - Operations - Attestation [Preset: minimal] ```diff + [Invalid] EF - Capella - Operations - Attestation - invalid_after_max_inclusion_slot OK @@ -1329,7 +1261,6 @@ OK: 14/14 Fail: 0/14 Skip: 0/14 + [Valid] EF - Capella - Operations - Attestation - one_basic_attestation OK + [Valid] EF - Capella - Operations - Attestation - previous_epoch OK ``` -OK: 41/41 Fail: 0/41 Skip: 0/41 ## EF - Capella - Operations - Attester Slashing [Preset: minimal] ```diff + [Invalid] EF - Capella - Operations - Attester Slashing - invalid_all_empty_indices OK @@ -1363,7 +1294,6 @@ OK: 41/41 Fail: 0/41 Skip: 0/41 + [Valid] EF - Capella - Operations - Attester Slashing - proposer_index_slashed OK + [Valid] EF - Capella - Operations - Attester Slashing - with_effective_balance_disparity OK ``` -OK: 30/30 Fail: 0/30 Skip: 0/30 ## EF - Capella - Operations - BLS to execution change [Preset: minimal] ```diff + [Invalid] EF - Capella - Operations - BLS to execution change - invalid_already_0x01 OK @@ -1381,7 +1311,6 @@ OK: 30/30 Fail: 0/30 Skip: 0/30 + [Valid] EF - Capella - Operations - BLS to execution change - success_not_activated OK + [Valid] EF - Capella - Operations - BLS to execution change - success_withdrawable OK ``` -OK: 14/14 Fail: 0/14 Skip: 0/14 ## EF - Capella - Operations - Block Header [Preset: minimal] ```diff + [Invalid] EF - Capella - Operations - Block Header - invalid_multiple_blocks_single_slot OK @@ -1391,7 +1320,6 @@ OK: 14/14 Fail: 0/14 Skip: 0/14 + [Invalid] EF - Capella - Operations - Block Header - invalid_slot_block_header OK + [Valid] EF - Capella - Operations - Block Header - basic_block_header OK ``` -OK: 6/6 Fail: 0/6 Skip: 0/6 ## EF - Capella - Operations - Deposit [Preset: minimal] ```diff + [Invalid] EF - Capella - Operations - Deposit - invalid_bad_merkle_proof OK @@ -1416,7 +1344,6 @@ OK: 6/6 Fail: 0/6 Skip: 0/6 + [Valid] EF - Capella - Operations - Deposit - top_up__max_effective_balance OK + [Valid] EF - Capella - Operations - Deposit - top_up__zero_balance OK ``` -OK: 21/21 Fail: 0/21 Skip: 0/21 ## EF - Capella - Operations - Execution Payload [Preset: minimal] ```diff + [Invalid] EF - Capella - Operations - Execution Payload - invalid_bad_everything_first_pay OK @@ -1446,7 +1373,6 @@ OK: 21/21 Fail: 0/21 Skip: 0/21 + [Valid] EF - Capella - Operations - Execution Payload - zero_length_transaction_first_pa OK + [Valid] EF - Capella - Operations - Execution Payload - zero_length_transaction_regular_ OK ``` -OK: 26/26 Fail: 0/26 Skip: 0/26 ## EF - Capella - Operations - Proposer Slashing [Preset: minimal] ```diff + [Invalid] EF - Capella - Operations - Proposer Slashing - invalid_different_proposer_indic OK @@ -1465,7 +1391,6 @@ OK: 26/26 Fail: 0/26 Skip: 0/26 + [Valid] EF - Capella - Operations - Proposer Slashing - block_header_from_future OK + [Valid] EF - Capella - Operations - Proposer Slashing - slashed_and_proposer_index_the_s OK ``` -OK: 15/15 Fail: 0/15 Skip: 0/15 ## EF - Capella - Operations - Sync Aggregate [Preset: minimal] ```diff + [Invalid] EF - Capella - Operations - Sync Aggregate - invalid_signature_bad_domain OK @@ -1493,7 +1418,6 @@ OK: 15/15 Fail: 0/15 Skip: 0/15 + [Valid] EF - Capella - Operations - Sync Aggregate - sync_committee_with_participating_w OK + [Valid] EF - Capella - Operations - Sync Aggregate - valid_signature_future_committee OK ``` -OK: 24/24 Fail: 0/24 Skip: 0/24 ## EF - Capella - Operations - Voluntary Exit [Preset: minimal] ```diff + [Invalid] EF - Capella - Operations - Voluntary Exit - invalid_incorrect_signature OK @@ -1513,7 +1437,6 @@ OK: 24/24 Fail: 0/24 Skip: 0/24 + [Valid] EF - Capella - Operations - Voluntary Exit - voluntary_exit_with_current_fork_ve OK + [Valid] EF - Capella - Operations - Voluntary Exit - voluntary_exit_with_previous_fork_v OK ``` -OK: 16/16 Fail: 0/16 Skip: 0/16 ## EF - Capella - Operations - Withdrawals [Preset: minimal] ```diff + [Invalid] EF - Capella - Operations - Withdrawals - invalid_a_lot_fully_withdrawable_too_f OK @@ -1537,6 +1460,9 @@ OK: 16/16 Fail: 0/16 Skip: 0/16 + [Invalid] EF - Capella - Operations - Withdrawals - invalid_two_expected_partial_withdrawa OK + [Valid] EF - Capella - Operations - Withdrawals - all_withdrawal OK + [Valid] EF - Capella - Operations - Withdrawals - no_withdrawals_but_some_next_epoch OK ++ [Valid] EF - Capella - Operations - Withdrawals - partially_withdrawable_validator_legac OK ++ [Valid] EF - Capella - Operations - Withdrawals - partially_withdrawable_validator_legac OK ++ [Valid] EF - Capella - Operations - Withdrawals - partially_withdrawable_validator_legac OK + [Valid] EF - Capella - Operations - Withdrawals - random_0 OK + [Valid] EF - Capella - Operations - Withdrawals - random_full_withdrawals_0 OK + [Valid] EF - Capella - Operations - Withdrawals - random_full_withdrawals_1 OK @@ -1568,7 +1494,6 @@ OK: 16/16 Fail: 0/16 Skip: 0/16 + [Valid] EF - Capella - Operations - Withdrawals - withdrawable_epoch_but_0_effective_bal OK + [Valid] EF - Capella - Operations - Withdrawals - withdrawable_epoch_but_0_effective_bal OK ``` -OK: 51/51 Fail: 0/51 Skip: 0/51 ## EF - Capella - Random [Preset: minimal] ```diff + [Valid] EF - Capella - Random - randomized_0 [Preset: minimal] OK @@ -1588,7 +1513,6 @@ OK: 51/51 Fail: 0/51 Skip: 0/51 + [Valid] EF - Capella - Random - randomized_8 [Preset: minimal] OK + [Valid] EF - Capella - Random - randomized_9 [Preset: minimal] OK ``` -OK: 16/16 Fail: 0/16 Skip: 0/16 ## EF - Capella - Rewards [Preset: minimal] ```diff + EF - Capella - Rewards - all_balances_too_low_for_reward [Preset: minimal] OK @@ -1626,7 +1550,6 @@ OK: 16/16 Fail: 0/16 Skip: 0/16 + EF - Capella - Rewards - with_slashed_validators [Preset: minimal] OK + EF - Capella - Rewards - with_slashed_validators_leak [Preset: minimal] OK ``` -OK: 34/34 Fail: 0/34 Skip: 0/34 ## EF - Capella - SSZ consensus objects [Preset: minimal] ```diff + Testing AggregateAndProof OK @@ -1676,7 +1599,6 @@ OK: 34/34 Fail: 0/34 Skip: 0/34 + Testing VoluntaryExit OK + Testing Withdrawal OK ``` -OK: 46/46 Fail: 0/46 Skip: 0/46 ## EF - Capella - Sanity - Blocks [Preset: minimal] ```diff + [Invalid] EF - Capella - Sanity - Blocks - invalid_all_zeroed_sig [Preset: minimal] OK @@ -1749,9 +1671,9 @@ OK: 46/46 Fail: 0/46 Skip: 0/46 + [Valid] EF - Capella - Sanity - Blocks - voluntary_exit [Preset: minimal] OK + [Valid] EF - Capella - Sanity - Blocks - withdrawal_success_two_blocks [Preset: minimal] OK ``` -OK: 69/69 Fail: 0/69 Skip: 0/69 ## EF - Capella - Sanity - Slots [Preset: minimal] ```diff ++ EF - Capella - Slots - balance_change_affects_proposer [Preset: minimal] OK + EF - Capella - Slots - double_empty_epoch [Preset: minimal] OK + EF - Capella - Slots - empty_epoch [Preset: minimal] OK + EF - Capella - Slots - historical_accumulator [Preset: minimal] OK @@ -1759,7 +1681,6 @@ OK: 69/69 Fail: 0/69 Skip: 0/69 + EF - Capella - Slots - slots_1 [Preset: minimal] OK + EF - Capella - Slots - slots_2 [Preset: minimal] OK ``` -OK: 6/6 Fail: 0/6 Skip: 0/6 ## EF - Capella - Transition [Preset: minimal] ```diff + EF - Capella - Transition - non_empty_historical_roots [Preset: minimal] OK @@ -1789,7 +1710,6 @@ OK: 6/6 Fail: 0/6 Skip: 0/6 + EF - Capella - Transition - transition_with_voluntary_exit_right_after_fork [Preset: minim OK + EF - Capella - Transition - transition_with_voluntary_exit_right_before_fork [Preset: mini OK ``` -OK: 26/26 Fail: 0/26 Skip: 0/26 ## EF - Capella - Unittests - Light client - Sync protocol [Preset: minimal] ```diff + process_light_client_update_finality_updated OK @@ -1797,23 +1717,19 @@ OK: 26/26 Fail: 0/26 Skip: 0/26 + test_process_light_client_update_at_period_boundary OK + test_process_light_client_update_not_timeout OK ``` -OK: 4/4 Fail: 0/4 Skip: 0/4 ## EF - Deneb - Epoch Processing - Effective balance updates [Preset: minimal] ```diff + Effective balance updates - effective_balance_hysteresis [Preset: minimal] OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## EF - Deneb - Epoch Processing - Eth1 data reset [Preset: minimal] ```diff + Eth1 data reset - eth1_vote_no_reset [Preset: minimal] OK + Eth1 data reset - eth1_vote_reset [Preset: minimal] OK ``` -OK: 2/2 Fail: 0/2 Skip: 0/2 ## EF - Deneb - Epoch Processing - Historical summaries update [Preset: minimal] ```diff + Historical summaries update - historical_summaries_accumulator [Preset: minimal] OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## EF - Deneb - Epoch Processing - Inactivity [Preset: minimal] ```diff + Inactivity - all_zero_inactivity_scores_empty_participation [Preset: minimal] OK @@ -1838,7 +1754,6 @@ OK: 1/1 Fail: 0/1 Skip: 0/1 + Inactivity - some_slashed_zero_scores_full_participation [Preset: minimal] OK + Inactivity - some_slashed_zero_scores_full_participation_leaking [Preset: minimal] OK ``` -OK: 21/21 Fail: 0/21 Skip: 0/21 ## EF - Deneb - Epoch Processing - Justification & Finalization [Preset: minimal] ```diff + Justification & Finalization - 123_ok_support [Preset: minimal] OK @@ -1852,7 +1767,6 @@ OK: 21/21 Fail: 0/21 Skip: 0/21 + Justification & Finalization - 23_poor_support [Preset: minimal] OK + Justification & Finalization - balance_threshold_with_exited_validators [Preset: minimal] OK ``` -OK: 10/10 Fail: 0/10 Skip: 0/10 ## EF - Deneb - Epoch Processing - Participation flag updates [Preset: minimal] ```diff + Participation flag updates - all_zeroed [Preset: minimal] OK @@ -1868,12 +1782,10 @@ OK: 10/10 Fail: 0/10 Skip: 0/10 + Participation flag updates - random_genesis [Preset: minimal] OK + Participation flag updates - slightly_larger_random [Preset: minimal] OK ``` -OK: 12/12 Fail: 0/12 Skip: 0/12 ## EF - Deneb - Epoch Processing - RANDAO mixes reset [Preset: minimal] ```diff + RANDAO mixes reset - updated_randao_mixes [Preset: minimal] OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## EF - Deneb - Epoch Processing - Registry updates [Preset: minimal] ```diff + Registry updates - activation_churn_limit__equal_to_activation_limit [Preset: minimal] OK @@ -1895,7 +1807,6 @@ OK: 1/1 Fail: 0/1 Skip: 0/1 + Registry updates - ejection_past_churn_limit_scaled [Preset: minimal] OK + Registry updates - invalid_large_withdrawable_epoch [Preset: minimal] OK ``` -OK: 18/18 Fail: 0/18 Skip: 0/18 ## EF - Deneb - Epoch Processing - Rewards and penalties [Preset: minimal] ```diff + Rewards and penalties - almost_empty_attestations [Preset: minimal] OK @@ -1907,14 +1818,13 @@ OK: 18/18 Fail: 0/18 Skip: 0/18 + Rewards and penalties - full_attestation_participation [Preset: minimal] OK + Rewards and penalties - full_attestation_participation_with_leak [Preset: minimal] OK + Rewards and penalties - full_attestations_misc_balances [Preset: minimal] OK -+ Rewards and penalties - full_attestations_one_validaor_one_gwei [Preset: minimal] OK ++ Rewards and penalties - full_attestations_one_validator_one_gwei [Preset: minimal] OK + Rewards and penalties - genesis_epoch_full_attestations_no_rewards [Preset: minimal] OK + Rewards and penalties - genesis_epoch_no_attestations_no_penalties [Preset: minimal] OK + Rewards and penalties - no_attestations_all_penalties [Preset: minimal] OK + Rewards and penalties - random_fill_attestations [Preset: minimal] OK + Rewards and penalties - random_fill_attestations_with_leak [Preset: minimal] OK ``` -OK: 15/15 Fail: 0/15 Skip: 0/15 ## EF - Deneb - Epoch Processing - Slashings [Preset: minimal] ```diff + Slashings - low_penalty [Preset: minimal] OK @@ -1923,12 +1833,10 @@ OK: 15/15 Fail: 0/15 Skip: 0/15 + Slashings - scaled_penalties [Preset: minimal] OK + Slashings - slashings_with_random_state [Preset: minimal] OK ``` -OK: 5/5 Fail: 0/5 Skip: 0/5 ## EF - Deneb - Epoch Processing - Slashings reset [Preset: minimal] ```diff + Slashings reset - flush_slashings [Preset: minimal] OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## EF - Deneb - Epoch Processing - Sync committee updates [Preset: minimal] ```diff + Sync committee updates - sync_committees_no_progress_not_at_period_boundary [Preset: minim OK @@ -1937,7 +1845,6 @@ OK: 1/1 Fail: 0/1 Skip: 0/1 + Sync committee updates - sync_committees_progress_misc_balances_not_genesis [Preset: minim OK + Sync committee updates - sync_committees_progress_not_genesis [Preset: minimal] OK ``` -OK: 5/5 Fail: 0/5 Skip: 0/5 ## EF - Deneb - Finality [Preset: minimal] ```diff + [Valid] EF - Deneb - Finality - finality_no_updates_at_genesis [Preset: minimal] OK @@ -1946,7 +1853,6 @@ OK: 5/5 Fail: 0/5 Skip: 0/5 + [Valid] EF - Deneb - Finality - finality_rule_3 [Preset: minimal] OK + [Valid] EF - Deneb - Finality - finality_rule_4 [Preset: minimal] OK ``` -OK: 5/5 Fail: 0/5 Skip: 0/5 ## EF - Deneb - Fork [Preset: minimal] ```diff + EF - Deneb - Fork - deneb_fork_random_0 [Preset: minimal] OK @@ -1964,7 +1870,6 @@ OK: 5/5 Fail: 0/5 Skip: 0/5 + EF - Deneb - Fork - fork_random_low_balances [Preset: minimal] OK + EF - Deneb - Fork - fork_random_misc_balances [Preset: minimal] OK ``` -OK: 14/14 Fail: 0/14 Skip: 0/14 ## EF - Deneb - Operations - Attestation [Preset: minimal] ```diff + [Invalid] EF - Deneb - Operations - Attestation - invalid_after_max_inclusion_slot OK @@ -2009,7 +1914,6 @@ OK: 14/14 Fail: 0/14 Skip: 0/14 + [Valid] EF - Deneb - Operations - Attestation - one_basic_attestation OK + [Valid] EF - Deneb - Operations - Attestation - previous_epoch OK ``` -OK: 41/41 Fail: 0/41 Skip: 0/41 ## EF - Deneb - Operations - Attester Slashing [Preset: minimal] ```diff + [Invalid] EF - Deneb - Operations - Attester Slashing - invalid_all_empty_indices OK @@ -2043,7 +1947,6 @@ OK: 41/41 Fail: 0/41 Skip: 0/41 + [Valid] EF - Deneb - Operations - Attester Slashing - proposer_index_slashed OK + [Valid] EF - Deneb - Operations - Attester Slashing - with_effective_balance_disparity OK ``` -OK: 30/30 Fail: 0/30 Skip: 0/30 ## EF - Deneb - Operations - BLS to execution change [Preset: minimal] ```diff + [Invalid] EF - Deneb - Operations - BLS to execution change - invalid_already_0x01 OK @@ -2061,7 +1964,6 @@ OK: 30/30 Fail: 0/30 Skip: 0/30 + [Valid] EF - Deneb - Operations - BLS to execution change - success_not_activated OK + [Valid] EF - Deneb - Operations - BLS to execution change - success_withdrawable OK ``` -OK: 14/14 Fail: 0/14 Skip: 0/14 ## EF - Deneb - Operations - Block Header [Preset: minimal] ```diff + [Invalid] EF - Deneb - Operations - Block Header - invalid_multiple_blocks_single_slot OK @@ -2071,7 +1973,6 @@ OK: 14/14 Fail: 0/14 Skip: 0/14 + [Invalid] EF - Deneb - Operations - Block Header - invalid_slot_block_header OK + [Valid] EF - Deneb - Operations - Block Header - basic_block_header OK ``` -OK: 6/6 Fail: 0/6 Skip: 0/6 ## EF - Deneb - Operations - Deposit [Preset: minimal] ```diff + [Invalid] EF - Deneb - Operations - Deposit - invalid_bad_merkle_proof OK @@ -2096,7 +1997,6 @@ OK: 6/6 Fail: 0/6 Skip: 0/6 + [Valid] EF - Deneb - Operations - Deposit - top_up__max_effective_balance OK + [Valid] EF - Deneb - Operations - Deposit - top_up__zero_balance OK ``` -OK: 21/21 Fail: 0/21 Skip: 0/21 ## EF - Deneb - Operations - Execution Payload [Preset: minimal] ```diff + [Invalid] EF - Deneb - Operations - Execution Payload - invalid_bad_everything_first_paylo OK @@ -2123,6 +2023,8 @@ OK: 21/21 Fail: 0/21 Skip: 0/21 + [Valid] EF - Deneb - Operations - Execution Payload - incorrect_transaction_length_1_ext OK + [Valid] EF - Deneb - Operations - Execution Payload - incorrect_transaction_length_32_ex OK + [Valid] EF - Deneb - Operations - Execution Payload - incorrect_transaction_length_empty OK ++ [Valid] EF - Deneb - Operations - Execution Payload - incorrect_transaction_no_blobs_but OK ++ [Valid] EF - Deneb - Operations - Execution Payload - no_commitments_for_transactions OK + [Valid] EF - Deneb - Operations - Execution Payload - no_transactions_with_commitments OK + [Valid] EF - Deneb - Operations - Execution Payload - non_empty_extra_data_first_payload OK + [Valid] EF - Deneb - Operations - Execution Payload - non_empty_extra_data_regular_paylo OK @@ -2138,7 +2040,6 @@ OK: 21/21 Fail: 0/21 Skip: 0/21 + [Valid] EF - Deneb - Operations - Execution Payload - zero_length_transaction_regular_pa OK + [Valid] EF - Deneb - Operations - Execution Payload - zeroed_commitment OK ``` -OK: 38/38 Fail: 0/38 Skip: 0/38 ## EF - Deneb - Operations - Proposer Slashing [Preset: minimal] ```diff + [Invalid] EF - Deneb - Operations - Proposer Slashing - invalid_different_proposer_indices OK @@ -2157,7 +2058,6 @@ OK: 38/38 Fail: 0/38 Skip: 0/38 + [Valid] EF - Deneb - Operations - Proposer Slashing - block_header_from_future OK + [Valid] EF - Deneb - Operations - Proposer Slashing - slashed_and_proposer_index_the_sam OK ``` -OK: 15/15 Fail: 0/15 Skip: 0/15 ## EF - Deneb - Operations - Sync Aggregate [Preset: minimal] ```diff + [Invalid] EF - Deneb - Operations - Sync Aggregate - invalid_signature_bad_domain OK @@ -2185,7 +2085,6 @@ OK: 15/15 Fail: 0/15 Skip: 0/15 + [Valid] EF - Deneb - Operations - Sync Aggregate - sync_committee_with_participating_wit OK + [Valid] EF - Deneb - Operations - Sync Aggregate - valid_signature_future_committee OK ``` -OK: 24/24 Fail: 0/24 Skip: 0/24 ## EF - Deneb - Operations - Voluntary Exit [Preset: minimal] ```diff + [Invalid] EF - Deneb - Operations - Voluntary Exit - invalid_incorrect_signature OK @@ -2205,7 +2104,6 @@ OK: 24/24 Fail: 0/24 Skip: 0/24 + [Valid] EF - Deneb - Operations - Voluntary Exit - voluntary_exit_with_previous_fork_ver OK + [Valid] EF - Deneb - Operations - Voluntary Exit - voluntary_exit_with_previous_fork_ver OK ``` -OK: 16/16 Fail: 0/16 Skip: 0/16 ## EF - Deneb - Operations - Withdrawals [Preset: minimal] ```diff + [Invalid] EF - Deneb - Operations - Withdrawals - invalid_a_lot_fully_withdrawable_too_few OK @@ -2229,6 +2127,9 @@ OK: 16/16 Fail: 0/16 Skip: 0/16 + [Invalid] EF - Deneb - Operations - Withdrawals - invalid_two_expected_partial_withdrawal_ OK + [Valid] EF - Deneb - Operations - Withdrawals - all_withdrawal OK + [Valid] EF - Deneb - Operations - Withdrawals - no_withdrawals_but_some_next_epoch OK ++ [Valid] EF - Deneb - Operations - Withdrawals - partially_withdrawable_validator_legacy_ OK ++ [Valid] EF - Deneb - Operations - Withdrawals - partially_withdrawable_validator_legacy_ OK ++ [Valid] EF - Deneb - Operations - Withdrawals - partially_withdrawable_validator_legacy_ OK + [Valid] EF - Deneb - Operations - Withdrawals - random_0 OK + [Valid] EF - Deneb - Operations - Withdrawals - random_full_withdrawals_0 OK + [Valid] EF - Deneb - Operations - Withdrawals - random_full_withdrawals_1 OK @@ -2260,7 +2161,6 @@ OK: 16/16 Fail: 0/16 Skip: 0/16 + [Valid] EF - Deneb - Operations - Withdrawals - withdrawable_epoch_but_0_effective_balan OK + [Valid] EF - Deneb - Operations - Withdrawals - withdrawable_epoch_but_0_effective_balan OK ``` -OK: 51/51 Fail: 0/51 Skip: 0/51 ## EF - Deneb - Random [Preset: minimal] ```diff + [Valid] EF - Deneb - Random - randomized_0 [Preset: minimal] OK @@ -2280,7 +2180,6 @@ OK: 51/51 Fail: 0/51 Skip: 0/51 + [Valid] EF - Deneb - Random - randomized_8 [Preset: minimal] OK + [Valid] EF - Deneb - Random - randomized_9 [Preset: minimal] OK ``` -OK: 16/16 Fail: 0/16 Skip: 0/16 ## EF - Deneb - Rewards [Preset: minimal] ```diff + EF - Deneb - Rewards - all_balances_too_low_for_reward [Preset: minimal] OK @@ -2318,7 +2217,6 @@ OK: 16/16 Fail: 0/16 Skip: 0/16 + EF - Deneb - Rewards - with_slashed_validators [Preset: minimal] OK + EF - Deneb - Rewards - with_slashed_validators_leak [Preset: minimal] OK ``` -OK: 34/34 Fail: 0/34 Skip: 0/34 ## EF - Deneb - SSZ consensus objects [Preset: minimal] ```diff + Testing AggregateAndProof OK @@ -2370,7 +2268,6 @@ OK: 34/34 Fail: 0/34 Skip: 0/34 + Testing VoluntaryExit OK + Testing Withdrawal OK ``` -OK: 48/48 Fail: 0/48 Skip: 0/48 ## EF - Deneb - Sanity - Blocks [Preset: minimal] ```diff + [Invalid] EF - Deneb - Sanity - Blocks - invalid_all_zeroed_sig [Preset: minimal] OK @@ -2452,9 +2349,9 @@ OK: 48/48 Fail: 0/48 Skip: 0/48 + [Valid] EF - Deneb - Sanity - Blocks - withdrawal_success_two_blocks [Preset: minimal] OK + [Valid] EF - Deneb - Sanity - Blocks - zero_blob [Preset: minimal] OK ``` -OK: 78/78 Fail: 0/78 Skip: 0/78 ## EF - Deneb - Sanity - Slots [Preset: minimal] ```diff ++ EF - Deneb - Slots - balance_change_affects_proposer [Preset: minimal] OK + EF - Deneb - Slots - double_empty_epoch [Preset: minimal] OK + EF - Deneb - Slots - empty_epoch [Preset: minimal] OK + EF - Deneb - Slots - historical_accumulator [Preset: minimal] OK @@ -2462,7 +2359,6 @@ OK: 78/78 Fail: 0/78 Skip: 0/78 + EF - Deneb - Slots - slots_1 [Preset: minimal] OK + EF - Deneb - Slots - slots_2 [Preset: minimal] OK ``` -OK: 6/6 Fail: 0/6 Skip: 0/6 ## EF - Deneb - Transition [Preset: minimal] ```diff + EF - Deneb - Transition - higher_churn_limit_to_lower [Preset: minimal] OK @@ -2496,7 +2392,6 @@ OK: 6/6 Fail: 0/6 Skip: 0/6 + EF - Deneb - Transition - transition_with_voluntary_exit_right_after_fork [Preset: minimal OK + EF - Deneb - Transition - transition_with_voluntary_exit_right_before_fork [Preset: minima OK ``` -OK: 30/30 Fail: 0/30 Skip: 0/30 ## EF - Deneb - Unittests - Light client - Sync protocol [Preset: minimal] ```diff + process_light_client_update_finality_updated OK @@ -2504,79 +2399,20 @@ OK: 30/30 Fail: 0/30 Skip: 0/30 + test_process_light_client_update_at_period_boundary OK + test_process_light_client_update_not_timeout OK ``` -OK: 4/4 Fail: 0/4 Skip: 0/4 -## EF - EIP7594 - SSZ consensus objects [Preset: minimal] -```diff -+ Testing AggregateAndProof OK -+ Testing Attestation OK -+ Testing AttestationData OK -+ Testing AttesterSlashing OK -+ Testing BLSToExecutionChange OK -+ Testing BeaconBlock OK -+ Testing BeaconBlockBody OK -+ Testing BeaconBlockHeader OK -+ Testing BeaconState OK -+ Testing BlobIdentifier OK -+ Testing BlobSidecar OK -+ Testing Checkpoint OK -+ Testing ContributionAndProof OK -+ Testing DataColumnIdentifier OK -+ Testing DataColumnSidecar OK -+ Testing Deposit OK -+ Testing DepositData OK -+ Testing DepositMessage OK -+ Testing Eth1Block OK -+ Testing Eth1Data OK -+ Testing ExecutionPayload OK -+ Testing ExecutionPayloadHeader OK -+ Testing Fork OK -+ Testing ForkData OK -+ Testing HistoricalBatch OK -+ Testing HistoricalSummary OK -+ Testing IndexedAttestation OK -+ Testing LightClientBootstrap OK -+ Testing LightClientFinalityUpdate OK -+ Testing LightClientHeader OK -+ Testing LightClientOptimisticUpdate OK -+ Testing LightClientUpdate OK -+ Testing MatrixEntry OK -+ Testing PendingAttestation OK -+ Testing PowBlock OK -+ Testing ProposerSlashing OK -+ Testing SignedAggregateAndProof OK -+ Testing SignedBLSToExecutionChange OK -+ Testing SignedBeaconBlock OK -+ Testing SignedBeaconBlockHeader OK -+ Testing SignedContributionAndProof OK -+ Testing SignedVoluntaryExit OK -+ Testing SigningData OK -+ Testing SyncAggregate OK -+ Testing SyncAggregatorSelectionData OK -+ Testing SyncCommittee OK -+ Testing SyncCommitteeContribution OK -+ Testing SyncCommitteeMessage OK -+ Testing Validator OK -+ Testing VoluntaryExit OK -+ Testing Withdrawal OK -``` -OK: 51/51 Fail: 0/51 Skip: 0/51 ## EF - Electra - Epoch Processing - Effective balance updates [Preset: minimal] ```diff + Effective balance updates - effective_balance_hysteresis [Preset: minimal] OK + Effective balance updates - effective_balance_hysteresis_with_compounding_credentials [Pre OK ``` -OK: 2/2 Fail: 0/2 Skip: 0/2 ## EF - Electra - Epoch Processing - Eth1 data reset [Preset: minimal] ```diff + Eth1 data reset - eth1_vote_no_reset [Preset: minimal] OK + Eth1 data reset - eth1_vote_reset [Preset: minimal] OK ``` -OK: 2/2 Fail: 0/2 Skip: 0/2 ## EF - Electra - Epoch Processing - Historical summaries update [Preset: minimal] ```diff + Historical summaries update - historical_summaries_accumulator [Preset: minimal] OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## EF - Electra - Epoch Processing - Inactivity [Preset: minimal] ```diff + Inactivity - all_zero_inactivity_scores_empty_participation [Preset: minimal] OK @@ -2601,7 +2437,6 @@ OK: 1/1 Fail: 0/1 Skip: 0/1 + Inactivity - some_slashed_zero_scores_full_participation [Preset: minimal] OK + Inactivity - some_slashed_zero_scores_full_participation_leaking [Preset: minimal] OK ``` -OK: 21/21 Fail: 0/21 Skip: 0/21 ## EF - Electra - Epoch Processing - Justification & Finalization [Preset: minimal] ```diff + Justification & Finalization - 123_ok_support [Preset: minimal] OK @@ -2615,7 +2450,6 @@ OK: 21/21 Fail: 0/21 Skip: 0/21 + Justification & Finalization - 23_poor_support [Preset: minimal] OK + Justification & Finalization - balance_threshold_with_exited_validators [Preset: minimal] OK ``` -OK: 10/10 Fail: 0/10 Skip: 0/10 ## EF - Electra - Epoch Processing - Participation flag updates [Preset: minimal] ```diff + Participation flag updates - all_zeroed [Preset: minimal] OK @@ -2631,22 +2465,27 @@ OK: 10/10 Fail: 0/10 Skip: 0/10 + Participation flag updates - random_genesis [Preset: minimal] OK + Participation flag updates - slightly_larger_random [Preset: minimal] OK ``` -OK: 12/12 Fail: 0/12 Skip: 0/12 ## EF - Electra - Epoch Processing - Pending consolidations [Preset: minimal] ```diff + Pending consolidations - all_consolidation_cases_together [Preset: minimal] OK + Pending consolidations - basic_pending_consolidation [Preset: minimal] OK + Pending consolidations - consolidation_not_yet_withdrawable_validator [Preset: minimal] OK ++ Pending consolidations - pending_consolidation_balance_computation_compounding [Preset: mi OK ++ Pending consolidations - pending_consolidation_balance_computation_eth1 [Preset: minimal] OK + Pending consolidations - pending_consolidation_compounding_creds [Preset: minimal] OK + Pending consolidations - pending_consolidation_future_epoch [Preset: minimal] OK ++ Pending consolidations - pending_consolidation_source_balance_greater_than_max_effective [ OK ++ Pending consolidations - pending_consolidation_source_balance_greater_than_max_effective_c OK ++ Pending consolidations - pending_consolidation_source_balance_less_than_max_effective [Pre OK ++ Pending consolidations - pending_consolidation_source_balance_less_than_max_effective_comp OK + Pending consolidations - pending_consolidation_with_pending_deposit [Preset: minimal] OK + Pending consolidations - skip_consolidation_when_source_slashed [Preset: minimal] OK ``` -OK: 7/7 Fail: 0/7 Skip: 0/7 ## EF - Electra - Epoch Processing - Pending deposits [Preset: minimal] ```diff + Pending deposits - apply_pending_deposit_compounding_withdrawal_credentials_max [Preset: m OK + Pending deposits - apply_pending_deposit_compounding_withdrawal_credentials_over_max [Pres OK ++ Pending deposits - apply_pending_deposit_compounding_withdrawal_credentials_over_max_next_ OK + Pending deposits - apply_pending_deposit_compounding_withdrawal_credentials_under_max [Pre OK + Pending deposits - apply_pending_deposit_correct_sig_but_forked_state [Preset: minimal] OK + Pending deposits - apply_pending_deposit_effective_deposit_with_genesis_fork_version [Pres OK @@ -2661,6 +2500,7 @@ OK: 7/7 Fail: 0/7 Skip: 0/7 + Pending deposits - apply_pending_deposit_non_versioned_withdrawal_credentials [Preset: min OK + Pending deposits - apply_pending_deposit_non_versioned_withdrawal_credentials_over_min_act OK + Pending deposits - apply_pending_deposit_over_min_activation [Preset: minimal] OK ++ Pending deposits - apply_pending_deposit_over_min_activation_next_increment [Preset: minim OK + Pending deposits - apply_pending_deposit_success_top_up_to_withdrawn_validator [Preset: mi OK + Pending deposits - apply_pending_deposit_top_up__less_effective_balance [Preset: minimal] OK + Pending deposits - apply_pending_deposit_top_up__max_effective_balance_compounding [Preset OK @@ -2677,22 +2517,22 @@ OK: 7/7 Fail: 0/7 Skip: 0/7 + Pending deposits - process_pending_deposits_eth1_bridge_transition_pending [Preset: minima OK + Pending deposits - process_pending_deposits_limit_is_reached [Preset: minimal] OK + Pending deposits - process_pending_deposits_mixture_of_skipped_and_above_churn [Preset: mi OK ++ Pending deposits - process_pending_deposits_multiple_for_new_validator [Preset: minimal] OK + Pending deposits - process_pending_deposits_multiple_pending_deposits_above_churn [Preset: OK + Pending deposits - process_pending_deposits_multiple_pending_deposits_below_churn [Preset: OK + Pending deposits - process_pending_deposits_multiple_pending_one_skipped [Preset: minimal] OK + Pending deposits - process_pending_deposits_multiple_skipped_deposits_exiting_validators [ OK + Pending deposits - process_pending_deposits_not_finalized [Preset: minimal] OK + Pending deposits - process_pending_deposits_preexisting_churn [Preset: minimal] OK ++ Pending deposits - process_pending_deposits_scaled_churn [Preset: minimal] OK + Pending deposits - process_pending_deposits_skipped_deposit_exiting_validator [Preset: min OK + Pending deposits - process_pending_deposits_withdrawable_validator [Preset: minimal] OK + Pending deposits - process_pending_deposits_withdrawable_validator_not_churned [Preset: mi OK ``` -OK: 41/41 Fail: 0/41 Skip: 0/41 ## EF - Electra - Epoch Processing - RANDAO mixes reset [Preset: minimal] ```diff + RANDAO mixes reset - updated_randao_mixes [Preset: minimal] OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## EF - Electra - Epoch Processing - Registry updates [Preset: minimal] ```diff + Registry updates - activation_churn_limit__equal_to_activation_limit [Preset: minimal] OK @@ -2705,6 +2545,11 @@ OK: 1/1 Fail: 0/1 Skip: 0/1 + Registry updates - activation_queue_activation_and_ejection__scaled_churn_limit [Preset: m OK + Registry updates - activation_queue_efficiency_min [Preset: minimal] OK + Registry updates - activation_queue_efficiency_scaled [Preset: minimal] OK ++ Registry updates - activation_queue_eligibility__greater_than_min_activation_balance [Pres OK ++ Registry updates - activation_queue_eligibility__less_than_min_activation_balance [Preset: OK ++ Registry updates - activation_queue_eligibility__min_activation_balance [Preset: minimal] OK ++ Registry updates - activation_queue_eligibility__min_activation_balance_compounding_creds OK ++ Registry updates - activation_queue_eligibility__min_activation_balance_eth1_creds [Preset OK + Registry updates - activation_queue_no_activation_no_finality [Preset: minimal] OK + Registry updates - activation_queue_sorting [Preset: minimal] OK + Registry updates - activation_queue_to_activated_if_finalized [Preset: minimal] OK @@ -2714,7 +2559,6 @@ OK: 1/1 Fail: 0/1 Skip: 0/1 + Registry updates - ejection_past_churn_limit_scaled [Preset: minimal] OK + Registry updates - invalid_large_withdrawable_epoch [Preset: minimal] OK ``` -OK: 18/18 Fail: 0/18 Skip: 0/18 ## EF - Electra - Epoch Processing - Rewards and penalties [Preset: minimal] ```diff + Rewards and penalties - almost_empty_attestations [Preset: minimal] OK @@ -2726,14 +2570,13 @@ OK: 18/18 Fail: 0/18 Skip: 0/18 + Rewards and penalties - full_attestation_participation [Preset: minimal] OK + Rewards and penalties - full_attestation_participation_with_leak [Preset: minimal] OK + Rewards and penalties - full_attestations_misc_balances [Preset: minimal] OK -+ Rewards and penalties - full_attestations_one_validaor_one_gwei [Preset: minimal] OK ++ Rewards and penalties - full_attestations_one_validator_one_gwei [Preset: minimal] OK + Rewards and penalties - genesis_epoch_full_attestations_no_rewards [Preset: minimal] OK + Rewards and penalties - genesis_epoch_no_attestations_no_penalties [Preset: minimal] OK + Rewards and penalties - no_attestations_all_penalties [Preset: minimal] OK + Rewards and penalties - random_fill_attestations [Preset: minimal] OK + Rewards and penalties - random_fill_attestations_with_leak [Preset: minimal] OK ``` -OK: 15/15 Fail: 0/15 Skip: 0/15 ## EF - Electra - Epoch Processing - Slashings [Preset: minimal] ```diff + Slashings - low_penalty [Preset: minimal] OK @@ -2742,12 +2585,10 @@ OK: 15/15 Fail: 0/15 Skip: 0/15 + Slashings - scaled_penalties [Preset: minimal] OK + Slashings - slashings_with_random_state [Preset: minimal] OK ``` -OK: 5/5 Fail: 0/5 Skip: 0/5 ## EF - Electra - Epoch Processing - Slashings reset [Preset: minimal] ```diff + Slashings reset - flush_slashings [Preset: minimal] OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## EF - Electra - Epoch Processing - Sync committee updates [Preset: minimal] ```diff + Sync committee updates - sync_committees_no_progress_not_at_period_boundary [Preset: minim OK @@ -2756,7 +2597,6 @@ OK: 1/1 Fail: 0/1 Skip: 0/1 + Sync committee updates - sync_committees_progress_misc_balances_not_genesis [Preset: minim OK + Sync committee updates - sync_committees_progress_not_genesis [Preset: minimal] OK ``` -OK: 5/5 Fail: 0/5 Skip: 0/5 ## EF - Electra - Finality [Preset: minimal] ```diff + [Valid] EF - Electra - Finality - finality_no_updates_at_genesis [Preset: minimal] OK @@ -2765,7 +2605,6 @@ OK: 5/5 Fail: 0/5 Skip: 0/5 + [Valid] EF - Electra - Finality - finality_rule_3 [Preset: minimal] OK + [Valid] EF - Electra - Finality - finality_rule_4 [Preset: minimal] OK ``` -OK: 5/5 Fail: 0/5 Skip: 0/5 ## EF - Electra - Fork [Preset: minimal] ```diff + EF - Electra - Fork - electra_fork_random_0 [Preset: minimal] OK @@ -2776,16 +2615,20 @@ OK: 5/5 Fail: 0/5 Skip: 0/5 + EF - Electra - Fork - electra_fork_random_low_balances [Preset: minimal] OK + EF - Electra - Fork - electra_fork_random_misc_balances [Preset: minimal] OK + EF - Electra - Fork - fork_base_state [Preset: minimal] OK ++ EF - Electra - Fork - fork_earliest_exit_epoch_is_max_validator_exit_epoch [Preset: minima OK ++ EF - Electra - Fork - fork_earliest_exit_epoch_less_than_current_epoch [Preset: minimal] OK ++ EF - Electra - Fork - fork_earliest_exit_epoch_no_validator_exits [Preset: minimal] OK + EF - Electra - Fork - fork_has_compounding_withdrawal_credential [Preset: minimal] OK ++ EF - Electra - Fork - fork_inactive_compounding_validator_with_excess_balance [Preset: min OK + EF - Electra - Fork - fork_many_next_epoch [Preset: minimal] OK + EF - Electra - Fork - fork_next_epoch [Preset: minimal] OK + EF - Electra - Fork - fork_next_epoch_with_block [Preset: minimal] OK ++ EF - Electra - Fork - fork_pending_deposits_are_sorted [Preset: minimal] OK + EF - Electra - Fork - fork_pre_activation [Preset: minimal] OK + EF - Electra - Fork - fork_random_large_validator_set [Preset: minimal] OK + EF - Electra - Fork - fork_random_low_balances [Preset: minimal] OK + EF - Electra - Fork - fork_random_misc_balances [Preset: minimal] OK ``` -OK: 16/16 Fail: 0/16 Skip: 0/16 ## EF - Electra - Operations - Attestation [Preset: minimal] ```diff + [Invalid] EF - Electra - Operations - Attestation - invalid_after_max_inclusion_slot OK @@ -2793,7 +2636,7 @@ OK: 16/16 Fail: 0/16 Skip: 0/16 + [Invalid] EF - Electra - Operations - Attestation - invalid_attestation_signature OK + [Invalid] EF - Electra - Operations - Attestation - invalid_bad_source_root OK + [Invalid] EF - Electra - Operations - Attestation - invalid_before_inclusion_delay OK -+ [Invalid] EF - Electra - Operations - Attestation - invalid_committe_index OK ++ [Invalid] EF - Electra - Operations - Attestation - invalid_committee_index OK + [Invalid] EF - Electra - Operations - Attestation - invalid_correct_attestation_included_a OK + [Invalid] EF - Electra - Operations - Attestation - invalid_current_source_root OK + [Invalid] EF - Electra - Operations - Attestation - invalid_empty_participants_seemingly_v OK @@ -2805,14 +2648,16 @@ OK: 16/16 Fail: 0/16 Skip: 0/16 + [Invalid] EF - Electra - Operations - Attestation - invalid_index OK + [Invalid] EF - Electra - Operations - Attestation - invalid_mismatched_target_and_slot OK + [Invalid] EF - Electra - Operations - Attestation - invalid_new_source_epoch OK -+ [Invalid] EF - Electra - Operations - Attestation - invalid_nonset_committe_bits OK ++ [Invalid] EF - Electra - Operations - Attestation - invalid_nonset_bits_for_one_committee OK ++ [Invalid] EF - Electra - Operations - Attestation - invalid_nonset_committee_bits OK ++ [Invalid] EF - Electra - Operations - Attestation - invalid_nonset_multiple_committee_bits OK + [Invalid] EF - Electra - Operations - Attestation - invalid_old_source_epoch OK + [Invalid] EF - Electra - Operations - Attestation - invalid_old_target_epoch OK + [Invalid] EF - Electra - Operations - Attestation - invalid_previous_source_root OK + [Invalid] EF - Electra - Operations - Attestation - invalid_source_root_is_target_root OK + [Invalid] EF - Electra - Operations - Attestation - invalid_too_few_aggregation_bits OK + [Invalid] EF - Electra - Operations - Attestation - invalid_too_many_aggregation_bits OK -+ [Invalid] EF - Electra - Operations - Attestation - invalid_too_many_committe_bits OK ++ [Invalid] EF - Electra - Operations - Attestation - invalid_too_many_committee_bits OK + [Invalid] EF - Electra - Operations - Attestation - invalid_wrong_index_for_committee_sign OK + [Invalid] EF - Electra - Operations - Attestation - invalid_wrong_index_for_slot_0 OK + [Invalid] EF - Electra - Operations - Attestation - invalid_wrong_index_for_slot_1 OK @@ -2831,10 +2676,11 @@ OK: 16/16 Fail: 0/16 Skip: 0/16 + [Valid] EF - Electra - Operations - Attestation - incorrect_target_included_at_min_inclu OK + [Valid] EF - Electra - Operations - Attestation - incorrect_target_included_at_sqrt_epoc OK + [Valid] EF - Electra - Operations - Attestation - multi_proposer_index_iterations OK ++ [Valid] EF - Electra - Operations - Attestation - multiple_committees OK + [Valid] EF - Electra - Operations - Attestation - one_basic_attestation OK ++ [Valid] EF - Electra - Operations - Attestation - one_committee_with_gap OK + [Valid] EF - Electra - Operations - Attestation - previous_epoch OK ``` -OK: 45/45 Fail: 0/45 Skip: 0/45 ## EF - Electra - Operations - Attester Slashing [Preset: minimal] ```diff + [Invalid] EF - Electra - Operations - Attester Slashing - invalid_all_empty_indices OK @@ -2868,7 +2714,6 @@ OK: 45/45 Fail: 0/45 Skip: 0/45 + [Valid] EF - Electra - Operations - Attester Slashing - proposer_index_slashed OK + [Valid] EF - Electra - Operations - Attester Slashing - with_effective_balance_disparity OK ``` -OK: 30/30 Fail: 0/30 Skip: 0/30 ## EF - Electra - Operations - BLS to execution change [Preset: minimal] ```diff + [Invalid] EF - Electra - Operations - BLS to execution change - invalid_already_0x01 OK @@ -2886,7 +2731,6 @@ OK: 30/30 Fail: 0/30 Skip: 0/30 + [Valid] EF - Electra - Operations - BLS to execution change - success_not_activated OK + [Valid] EF - Electra - Operations - BLS to execution change - success_withdrawable OK ``` -OK: 14/14 Fail: 0/14 Skip: 0/14 ## EF - Electra - Operations - Block Header [Preset: minimal] ```diff + [Invalid] EF - Electra - Operations - Block Header - invalid_multiple_blocks_single_slot OK @@ -2896,14 +2740,13 @@ OK: 14/14 Fail: 0/14 Skip: 0/14 + [Invalid] EF - Electra - Operations - Block Header - invalid_slot_block_header OK + [Valid] EF - Electra - Operations - Block Header - basic_block_header OK ``` -OK: 6/6 Fail: 0/6 Skip: 0/6 ## EF - Electra - Operations - Consolidation Request [Preset: minimal] ```diff + [Valid] EF - Electra - Operations - Consolidation Request - basic_consolidation_in_curre OK + [Valid] EF - Electra - Operations - Consolidation Request - basic_consolidation_in_new_c OK ++ [Valid] EF - Electra - Operations - Consolidation Request - basic_consolidation_source_h OK + [Valid] EF - Electra - Operations - Consolidation Request - basic_consolidation_with_com OK + [Valid] EF - Electra - Operations - Consolidation Request - basic_consolidation_with_exc OK -+ [Valid] EF - Electra - Operations - Consolidation Request - basic_consolidation_with_exc OK + [Valid] EF - Electra - Operations - Consolidation Request - basic_consolidation_with_ins OK + [Valid] EF - Electra - Operations - Consolidation Request - basic_consolidation_with_pre OK + [Valid] EF - Electra - Operations - Consolidation Request - basic_switch_to_compounding OK @@ -2915,10 +2758,15 @@ OK: 6/6 Fail: 0/6 Skip: 0/6 + [Valid] EF - Electra - Operations - Consolidation Request - incorrect_exited_target OK + [Valid] EF - Electra - Operations - Consolidation Request - incorrect_inactive_source OK + [Valid] EF - Electra - Operations - Consolidation Request - incorrect_inactive_target OK -+ [Valid] EF - Electra - Operations - Consolidation Request - incorrect_incorrect_source_a OK + [Valid] EF - Electra - Operations - Consolidation Request - incorrect_no_source_executio OK -+ [Valid] EF - Electra - Operations - Consolidation Request - incorrect_no_target_executio OK + [Valid] EF - Electra - Operations - Consolidation Request - incorrect_not_enough_consoli OK ++ [Valid] EF - Electra - Operations - Consolidation Request - incorrect_same_source_target OK ++ [Valid] EF - Electra - Operations - Consolidation Request - incorrect_source_address OK ++ [Valid] EF - Electra - Operations - Consolidation Request - incorrect_source_has_pending OK ++ [Valid] EF - Electra - Operations - Consolidation Request - incorrect_source_not_active_ OK ++ [Valid] EF - Electra - Operations - Consolidation Request - incorrect_source_with_bls_cr OK ++ [Valid] EF - Electra - Operations - Consolidation Request - incorrect_target_with_bls_cr OK ++ [Valid] EF - Electra - Operations - Consolidation Request - incorrect_target_with_eth1_c OK + [Valid] EF - Electra - Operations - Consolidation Request - incorrect_unknown_source_pub OK + [Valid] EF - Electra - Operations - Consolidation Request - incorrect_unknown_target_pub OK + [Valid] EF - Electra - Operations - Consolidation Request - switch_to_compounding_exited OK @@ -2930,7 +2778,6 @@ OK: 6/6 Fail: 0/6 Skip: 0/6 + [Valid] EF - Electra - Operations - Consolidation Request - switch_to_compounding_with_e OK + [Valid] EF - Electra - Operations - Consolidation Request - switch_to_compounding_with_p OK ``` -OK: 30/30 Fail: 0/30 Skip: 0/30 ## EF - Electra - Operations - Deposit [Preset: minimal] ```diff + [Invalid] EF - Electra - Operations - Deposit - invalid_bad_merkle_proof OK @@ -2955,7 +2802,6 @@ OK: 30/30 Fail: 0/30 Skip: 0/30 + [Valid] EF - Electra - Operations - Deposit - top_up__max_effective_balance OK + [Valid] EF - Electra - Operations - Deposit - top_up__zero_balance OK ``` -OK: 21/21 Fail: 0/21 Skip: 0/21 ## EF - Electra - Operations - Deposit Request [Preset: minimal] ```diff + [Valid] EF - Electra - Operations - Deposit Request - process_deposit_request_invalid_si OK @@ -2967,7 +2813,6 @@ OK: 21/21 Fail: 0/21 Skip: 0/21 + [Valid] EF - Electra - Operations - Deposit Request - process_deposit_request_top_up_max OK + [Valid] EF - Electra - Operations - Deposit Request - process_deposit_request_top_up_min OK ``` -OK: 8/8 Fail: 0/8 Skip: 0/8 ## EF - Electra - Operations - Execution Payload [Preset: minimal] ```diff + [Invalid] EF - Electra - Operations - Execution Payload - invalid_bad_everything_first_pay OK @@ -2994,6 +2839,8 @@ OK: 8/8 Fail: 0/8 Skip: 0/8 + [Valid] EF - Electra - Operations - Execution Payload - incorrect_transaction_length_1_e OK + [Valid] EF - Electra - Operations - Execution Payload - incorrect_transaction_length_32_ OK + [Valid] EF - Electra - Operations - Execution Payload - incorrect_transaction_length_emp OK ++ [Valid] EF - Electra - Operations - Execution Payload - incorrect_transaction_no_blobs_b OK ++ [Valid] EF - Electra - Operations - Execution Payload - no_commitments_for_transactions OK + [Valid] EF - Electra - Operations - Execution Payload - no_transactions_with_commitments OK + [Valid] EF - Electra - Operations - Execution Payload - non_empty_extra_data_first_paylo OK + [Valid] EF - Electra - Operations - Execution Payload - non_empty_extra_data_regular_pay OK @@ -3009,7 +2856,6 @@ OK: 8/8 Fail: 0/8 Skip: 0/8 + [Valid] EF - Electra - Operations - Execution Payload - zero_length_transaction_regular_ OK + [Valid] EF - Electra - Operations - Execution Payload - zeroed_commitment OK ``` -OK: 38/38 Fail: 0/38 Skip: 0/38 ## EF - Electra - Operations - Proposer Slashing [Preset: minimal] ```diff + [Invalid] EF - Electra - Operations - Proposer Slashing - invalid_different_proposer_indic OK @@ -3028,7 +2874,6 @@ OK: 38/38 Fail: 0/38 Skip: 0/38 + [Valid] EF - Electra - Operations - Proposer Slashing - block_header_from_future OK + [Valid] EF - Electra - Operations - Proposer Slashing - slashed_and_proposer_index_the_s OK ``` -OK: 15/15 Fail: 0/15 Skip: 0/15 ## EF - Electra - Operations - Sync Aggregate [Preset: minimal] ```diff + [Invalid] EF - Electra - Operations - Sync Aggregate - invalid_signature_bad_domain OK @@ -3056,7 +2901,6 @@ OK: 15/15 Fail: 0/15 Skip: 0/15 + [Valid] EF - Electra - Operations - Sync Aggregate - sync_committee_with_participating_w OK + [Valid] EF - Electra - Operations - Sync Aggregate - valid_signature_future_committee OK ``` -OK: 24/24 Fail: 0/24 Skip: 0/24 ## EF - Electra - Operations - Voluntary Exit [Preset: minimal] ```diff + [Invalid] EF - Electra - Operations - Voluntary Exit - invalid_incorrect_signature OK @@ -3080,7 +2924,6 @@ OK: 24/24 Fail: 0/24 Skip: 0/24 + [Valid] EF - Electra - Operations - Voluntary Exit - success_exit_queue__min_churn OK + [Valid] EF - Electra - Operations - Voluntary Exit - success_exit_queue__scaled_churn OK ``` -OK: 20/20 Fail: 0/20 Skip: 0/20 ## EF - Electra - Operations - Withdrawal Request [Preset: minimal] ```diff + [Valid] EF - Electra - Operations - Withdrawal Request - activation_epoch_less_than_shar OK @@ -3111,8 +2954,8 @@ OK: 20/20 Fail: 0/20 Skip: 0/20 + [Valid] EF - Electra - Operations - Withdrawal Request - partial_withdrawal_request_with OK + [Valid] EF - Electra - Operations - Withdrawal Request - partial_withdrawal_request_with OK + [Valid] EF - Electra - Operations - Withdrawal Request - pending_withdrawals_consume_all OK ++ [Valid] EF - Electra - Operations - Withdrawal Request - unknown_pubkey OK ``` -OK: 28/28 Fail: 0/28 Skip: 0/28 ## EF - Electra - Operations - Withdrawals [Preset: minimal] ```diff + [Invalid] EF - Electra - Operations - Withdrawals - invalid_a_lot_fully_withdrawable_too_f OK @@ -3135,8 +2978,31 @@ OK: 28/28 Fail: 0/28 Skip: 0/28 + [Invalid] EF - Electra - Operations - Withdrawals - invalid_one_of_many_incorrectly_partia OK + [Invalid] EF - Electra - Operations - Withdrawals - invalid_two_expected_partial_withdrawa OK + [Valid] EF - Electra - Operations - Withdrawals - all_withdrawal OK ++ [Valid] EF - Electra - Operations - Withdrawals - full_pending_withdrawals_but_first_ski OK ++ [Valid] EF - Electra - Operations - Withdrawals - full_pending_withdrawals_but_first_ski OK ++ [Valid] EF - Electra - Operations - Withdrawals - full_pending_withdrawals_but_first_ski OK + [Valid] EF - Electra - Operations - Withdrawals - no_withdrawals_but_some_next_epoch OK ++ [Valid] EF - Electra - Operations - Withdrawals - partially_withdrawable_validator_compo OK ++ [Valid] EF - Electra - Operations - Withdrawals - partially_withdrawable_validator_compo OK ++ [Valid] EF - Electra - Operations - Withdrawals - partially_withdrawable_validator_compo OK ++ [Valid] EF - Electra - Operations - Withdrawals - partially_withdrawable_validator_compo OK ++ [Valid] EF - Electra - Operations - Withdrawals - partially_withdrawable_validator_compo OK ++ [Valid] EF - Electra - Operations - Withdrawals - partially_withdrawable_validator_compo OK ++ [Valid] EF - Electra - Operations - Withdrawals - partially_withdrawable_validator_legac OK ++ [Valid] EF - Electra - Operations - Withdrawals - partially_withdrawable_validator_legac OK ++ [Valid] EF - Electra - Operations - Withdrawals - partially_withdrawable_validator_legac OK ++ [Valid] EF - Electra - Operations - Withdrawals - pending_withdrawals_at_max OK ++ [Valid] EF - Electra - Operations - Withdrawals - pending_withdrawals_at_max_mixed_with_ OK ++ [Valid] EF - Electra - Operations - Withdrawals - pending_withdrawals_exiting_validator OK ++ [Valid] EF - Electra - Operations - Withdrawals - pending_withdrawals_low_effective_bala OK ++ [Valid] EF - Electra - Operations - Withdrawals - pending_withdrawals_mixed_with_sweep_a OK ++ [Valid] EF - Electra - Operations - Withdrawals - pending_withdrawals_next_epoch OK ++ [Valid] EF - Electra - Operations - Withdrawals - pending_withdrawals_no_excess_balance OK + [Valid] EF - Electra - Operations - Withdrawals - pending_withdrawals_one_skipped_one_ef OK ++ [Valid] EF - Electra - Operations - Withdrawals - pending_withdrawals_with_effective_swe OK ++ [Valid] EF - Electra - Operations - Withdrawals - pending_withdrawals_with_ineffective_s OK ++ [Valid] EF - Electra - Operations - Withdrawals - pending_withdrawals_with_ineffective_s OK ++ [Valid] EF - Electra - Operations - Withdrawals - pending_withdrawals_with_sweep_differe OK + [Valid] EF - Electra - Operations - Withdrawals - random_0 OK + [Valid] EF - Electra - Operations - Withdrawals - random_full_withdrawals_0 OK + [Valid] EF - Electra - Operations - Withdrawals - random_full_withdrawals_1 OK @@ -3172,7 +3038,6 @@ OK: 28/28 Fail: 0/28 Skip: 0/28 + [Valid] EF - Electra - Operations - Withdrawals - withdrawable_epoch_but_0_effective_bal OK + [Valid] EF - Electra - Operations - Withdrawals - withdrawable_epoch_but_0_effective_bal OK ``` -OK: 56/56 Fail: 0/56 Skip: 0/56 ## EF - Electra - Random [Preset: minimal] ```diff + [Valid] EF - Electra - Random - randomized_0 [Preset: minimal] OK @@ -3192,7 +3057,6 @@ OK: 56/56 Fail: 0/56 Skip: 0/56 + [Valid] EF - Electra - Random - randomized_8 [Preset: minimal] OK + [Valid] EF - Electra - Random - randomized_9 [Preset: minimal] OK ``` -OK: 16/16 Fail: 0/16 Skip: 0/16 ## EF - Electra - Rewards [Preset: minimal] ```diff + EF - Electra - Rewards - all_balances_too_low_for_reward [Preset: minimal] OK @@ -3230,7 +3094,6 @@ OK: 16/16 Fail: 0/16 Skip: 0/16 + EF - Electra - Rewards - with_slashed_validators [Preset: minimal] OK + EF - Electra - Rewards - with_slashed_validators_leak [Preset: minimal] OK ``` -OK: 34/34 Fail: 0/34 Skip: 0/34 ## EF - Electra - SSZ consensus objects [Preset: minimal] ```diff + Testing AggregateAndProof OK @@ -3279,6 +3142,7 @@ OK: 34/34 Fail: 0/34 Skip: 0/34 + Testing SignedContributionAndProof OK + Testing SignedVoluntaryExit OK + Testing SigningData OK ++ Testing SingleAttestation OK + Testing SyncAggregate OK + Testing SyncAggregatorSelectionData OK + Testing SyncCommittee OK @@ -3289,7 +3153,6 @@ OK: 34/34 Fail: 0/34 Skip: 0/34 + Testing Withdrawal OK + Testing WithdrawalRequest OK ``` -OK: 55/55 Fail: 0/55 Skip: 0/55 ## EF - Electra - Sanity - Blocks [Preset: minimal] ```diff + [Invalid] EF - Electra - Sanity - Blocks - deposit_transition__invalid_eth1_deposits_overl OK @@ -3327,10 +3190,13 @@ OK: 55/55 Fail: 0/55 Skip: 0/55 + [Valid] EF - Electra - Sanity - Blocks - block_transition_randomized_payload [Preset: mi OK + [Valid] EF - Electra - Sanity - Blocks - bls_change [Preset: minimal] OK + [Valid] EF - Electra - Sanity - Blocks - cl_exit_and_el_withdrawal_request_in_same_block OK ++ [Valid] EF - Electra - Sanity - Blocks - consolidation_requests_when_pending_consolidati OK + [Valid] EF - Electra - Sanity - Blocks - deposit_and_bls_change [Preset: minimal] OK + [Valid] EF - Electra - Sanity - Blocks - deposit_in_block [Preset: minimal] OK ++ [Valid] EF - Electra - Sanity - Blocks - deposit_request_with_same_pubkey_different_with OK + [Valid] EF - Electra - Sanity - Blocks - deposit_top_up [Preset: minimal] OK + [Valid] EF - Electra - Sanity - Blocks - deposit_transition__deposit_and_top_up_same_blo OK ++ [Valid] EF - Electra - Sanity - Blocks - deposit_transition__deposit_with_same_pubkey_di OK + [Valid] EF - Electra - Sanity - Blocks - deposit_transition__process_eth1_deposits [Pres OK + [Valid] EF - Electra - Sanity - Blocks - deposit_transition__process_eth1_deposits_up_to OK + [Valid] EF - Electra - Sanity - Blocks - deposit_transition__process_max_eth1_deposits [ OK @@ -3359,6 +3225,8 @@ OK: 55/55 Fail: 0/55 Skip: 0/55 + [Valid] EF - Electra - Sanity - Blocks - mix_blob_tx_and_non_blob_tx [Preset: minimal] OK + [Valid] EF - Electra - Sanity - Blocks - multiple_different_proposer_slashings_same_bloc OK + [Valid] EF - Electra - Sanity - Blocks - multiple_different_validator_exits_same_block [ OK ++ [Valid] EF - Electra - Sanity - Blocks - multiple_el_partial_withdrawal_requests_differe OK ++ [Valid] EF - Electra - Sanity - Blocks - multiple_el_partial_withdrawal_requests_same_va OK + [Valid] EF - Electra - Sanity - Blocks - one_blob [Preset: minimal] OK + [Valid] EF - Electra - Sanity - Blocks - one_blob_max_txs [Preset: minimal] OK + [Valid] EF - Electra - Sanity - Blocks - one_blob_two_txs [Preset: minimal] OK @@ -3368,6 +3236,8 @@ OK: 55/55 Fail: 0/55 Skip: 0/55 + [Valid] EF - Electra - Sanity - Blocks - proposer_slashing [Preset: minimal] OK + [Valid] EF - Electra - Sanity - Blocks - skipped_slots [Preset: minimal] OK + [Valid] EF - Electra - Sanity - Blocks - slash_and_exit_diff_index [Preset: minimal] OK ++ [Valid] EF - Electra - Sanity - Blocks - switch_to_compounding_requests_when_pending_con OK ++ [Valid] EF - Electra - Sanity - Blocks - switch_to_compounding_requests_when_too_little_ OK + [Valid] EF - Electra - Sanity - Blocks - sync_committee_committee__empty [Preset: minima OK + [Valid] EF - Electra - Sanity - Blocks - sync_committee_committee__full [Preset: minimal OK + [Valid] EF - Electra - Sanity - Blocks - sync_committee_committee__half [Preset: minimal OK @@ -3377,20 +3247,29 @@ OK: 55/55 Fail: 0/55 Skip: 0/55 + [Valid] EF - Electra - Sanity - Blocks - top_up_and_partial_withdrawable_validator [Pres OK + [Valid] EF - Electra - Sanity - Blocks - top_up_to_fully_withdrawn_validator [Preset: mi OK + [Valid] EF - Electra - Sanity - Blocks - voluntary_exit [Preset: minimal] OK ++ [Valid] EF - Electra - Sanity - Blocks - withdrawal_and_consolidation_effective_balance_ OK ++ [Valid] EF - Electra - Sanity - Blocks - withdrawal_and_switch_to_compounding_request_sa OK ++ [Valid] EF - Electra - Sanity - Blocks - withdrawal_and_withdrawal_request_same_validato OK ++ [Valid] EF - Electra - Sanity - Blocks - withdrawal_requests_when_pending_withdrawal_que OK + [Valid] EF - Electra - Sanity - Blocks - withdrawal_success_two_blocks [Preset: minimal] OK + [Valid] EF - Electra - Sanity - Blocks - zero_blob [Preset: minimal] OK ``` -OK: 87/87 Fail: 0/87 Skip: 0/87 ## EF - Electra - Sanity - Slots [Preset: minimal] ```diff ++ EF - Electra - Slots - balance_change_affects_proposer [Preset: minimal] OK + EF - Electra - Slots - double_empty_epoch [Preset: minimal] OK + EF - Electra - Slots - empty_epoch [Preset: minimal] OK + EF - Electra - Slots - historical_accumulator [Preset: minimal] OK ++ EF - Electra - Slots - multiple_pending_deposits_same_pubkey [Preset: minimal] OK ++ EF - Electra - Slots - multiple_pending_deposits_same_pubkey_above_upward_threshold [Prese OK ++ EF - Electra - Slots - multiple_pending_deposits_same_pubkey_below_upward_threshold [Prese OK ++ EF - Electra - Slots - multiple_pending_deposits_same_pubkey_compounding [Preset: minimal] OK ++ EF - Electra - Slots - multiple_pending_deposits_same_pubkey_different_signature [Preset: OK + EF - Electra - Slots - over_epoch_boundary [Preset: minimal] OK ++ EF - Electra - Slots - pending_consolidation [Preset: minimal] OK + EF - Electra - Slots - slots_1 [Preset: minimal] OK + EF - Electra - Slots - slots_2 [Preset: minimal] OK ``` -OK: 6/6 Fail: 0/6 Skip: 0/6 ## EF - Electra - Transition [Preset: minimal] ```diff + EF - Electra - Transition - higher_churn_limit_to_lower [Preset: minimal] OK @@ -3407,9 +3286,12 @@ OK: 6/6 Fail: 0/6 Skip: 0/6 + EF - Electra - Transition - transition_with_attester_slashing_right_before_fork [Preset: m OK + EF - Electra - Transition - transition_with_btec_right_after_fork [Preset: minimal] OK + EF - Electra - Transition - transition_with_btec_right_before_fork [Preset: minimal] OK ++ EF - Electra - Transition - transition_with_consolidation_request_right_after_fork [Preset OK ++ EF - Electra - Transition - transition_with_deposit_request_right_after_fork [Preset: mini OK + EF - Electra - Transition - transition_with_deposit_right_after_fork [Preset: minimal] OK + EF - Electra - Transition - transition_with_deposit_right_before_fork [Preset: minimal] OK + EF - Electra - Transition - transition_with_finality [Preset: minimal] OK ++ EF - Electra - Transition - transition_with_full_withdrawal_request_right_after_fork [Pres OK + EF - Electra - Transition - transition_with_leaking_at_fork [Preset: minimal] OK + EF - Electra - Transition - transition_with_leaking_pre_fork [Preset: minimal] OK + EF - Electra - Transition - transition_with_no_attestations_until_after_fork [Preset: mini OK @@ -3424,7 +3306,6 @@ OK: 6/6 Fail: 0/6 Skip: 0/6 + EF - Electra - Transition - transition_with_voluntary_exit_right_after_fork [Preset: minim OK + EF - Electra - Transition - transition_with_voluntary_exit_right_before_fork [Preset: mini OK ``` -OK: 30/30 Fail: 0/30 Skip: 0/30 ## EF - Electra - Unittests - Light client - Sync protocol [Preset: minimal] ```diff + process_light_client_update_finality_updated OK @@ -3432,7 +3313,798 @@ OK: 30/30 Fail: 0/30 Skip: 0/30 + test_process_light_client_update_at_period_boundary OK + test_process_light_client_update_not_timeout OK ``` -OK: 4/4 Fail: 0/4 Skip: 0/4 +## EF - Fulu - Epoch Processing - Effective balance updates [Preset: minimal] +```diff ++ Effective balance updates - effective_balance_hysteresis [Preset: minimal] OK ++ Effective balance updates - effective_balance_hysteresis_with_compounding_credentials [Pre OK +``` +## EF - Fulu - Epoch Processing - Eth1 data reset [Preset: minimal] +```diff ++ Eth1 data reset - eth1_vote_no_reset [Preset: minimal] OK ++ Eth1 data reset - eth1_vote_reset [Preset: minimal] OK +``` +## EF - Fulu - Epoch Processing - Historical summaries update [Preset: minimal] +```diff ++ Historical summaries update - historical_summaries_accumulator [Preset: minimal] OK +``` +## EF - Fulu - Epoch Processing - Inactivity [Preset: minimal] +```diff ++ Inactivity - all_zero_inactivity_scores_empty_participation [Preset: minimal] OK ++ Inactivity - all_zero_inactivity_scores_empty_participation_leaking [Preset: minimal] OK ++ Inactivity - all_zero_inactivity_scores_full_participation [Preset: minimal] OK ++ Inactivity - all_zero_inactivity_scores_full_participation_leaking [Preset: minimal] OK ++ Inactivity - all_zero_inactivity_scores_random_participation [Preset: minimal] OK ++ Inactivity - all_zero_inactivity_scores_random_participation_leaking [Preset: minimal] OK ++ Inactivity - genesis [Preset: minimal] OK ++ Inactivity - genesis_random_scores [Preset: minimal] OK ++ Inactivity - random_inactivity_scores_empty_participation [Preset: minimal] OK ++ Inactivity - random_inactivity_scores_empty_participation_leaking [Preset: minimal] OK ++ Inactivity - random_inactivity_scores_full_participation [Preset: minimal] OK ++ Inactivity - random_inactivity_scores_full_participation_leaking [Preset: minimal] OK ++ Inactivity - random_inactivity_scores_random_participation [Preset: minimal] OK ++ Inactivity - random_inactivity_scores_random_participation_leaking [Preset: minimal] OK ++ Inactivity - randomized_state [Preset: minimal] OK ++ Inactivity - randomized_state_leaking [Preset: minimal] OK ++ Inactivity - some_exited_full_random_leaking [Preset: minimal] OK ++ Inactivity - some_slashed_full_random [Preset: minimal] OK ++ Inactivity - some_slashed_full_random_leaking [Preset: minimal] OK ++ Inactivity - some_slashed_zero_scores_full_participation [Preset: minimal] OK ++ Inactivity - some_slashed_zero_scores_full_participation_leaking [Preset: minimal] OK +``` +## EF - Fulu - Epoch Processing - Justification & Finalization [Preset: minimal] +```diff ++ Justification & Finalization - 123_ok_support [Preset: minimal] OK ++ Justification & Finalization - 123_poor_support [Preset: minimal] OK ++ Justification & Finalization - 12_ok_support [Preset: minimal] OK ++ Justification & Finalization - 12_ok_support_messed_target [Preset: minimal] OK ++ Justification & Finalization - 12_poor_support [Preset: minimal] OK ++ Justification & Finalization - 234_ok_support [Preset: minimal] OK ++ Justification & Finalization - 234_poor_support [Preset: minimal] OK ++ Justification & Finalization - 23_ok_support [Preset: minimal] OK ++ Justification & Finalization - 23_poor_support [Preset: minimal] OK ++ Justification & Finalization - balance_threshold_with_exited_validators [Preset: minimal] OK +``` +## EF - Fulu - Epoch Processing - Participation flag updates [Preset: minimal] +```diff ++ Participation flag updates - all_zeroed [Preset: minimal] OK ++ Participation flag updates - current_epoch_zeroed [Preset: minimal] OK ++ Participation flag updates - current_filled [Preset: minimal] OK ++ Participation flag updates - filled [Preset: minimal] OK ++ Participation flag updates - large_random [Preset: minimal] OK ++ Participation flag updates - previous_epoch_zeroed [Preset: minimal] OK ++ Participation flag updates - previous_filled [Preset: minimal] OK ++ Participation flag updates - random_0 [Preset: minimal] OK ++ Participation flag updates - random_1 [Preset: minimal] OK ++ Participation flag updates - random_2 [Preset: minimal] OK ++ Participation flag updates - random_genesis [Preset: minimal] OK ++ Participation flag updates - slightly_larger_random [Preset: minimal] OK +``` +## EF - Fulu - Epoch Processing - Pending consolidations [Preset: minimal] +```diff ++ Pending consolidations - all_consolidation_cases_together [Preset: minimal] OK ++ Pending consolidations - basic_pending_consolidation [Preset: minimal] OK ++ Pending consolidations - consolidation_not_yet_withdrawable_validator [Preset: minimal] OK ++ Pending consolidations - pending_consolidation_balance_computation_compounding [Preset: mi OK ++ Pending consolidations - pending_consolidation_balance_computation_eth1 [Preset: minimal] OK ++ Pending consolidations - pending_consolidation_compounding_creds [Preset: minimal] OK ++ Pending consolidations - pending_consolidation_future_epoch [Preset: minimal] OK ++ Pending consolidations - pending_consolidation_source_balance_greater_than_max_effective [ OK ++ Pending consolidations - pending_consolidation_source_balance_greater_than_max_effective_c OK ++ Pending consolidations - pending_consolidation_source_balance_less_than_max_effective [Pre OK ++ Pending consolidations - pending_consolidation_source_balance_less_than_max_effective_comp OK ++ Pending consolidations - pending_consolidation_with_pending_deposit [Preset: minimal] OK ++ Pending consolidations - skip_consolidation_when_source_slashed [Preset: minimal] OK +``` +## EF - Fulu - Epoch Processing - Pending deposits [Preset: minimal] +```diff ++ Pending deposits - apply_pending_deposit_compounding_withdrawal_credentials_max [Preset: m OK ++ Pending deposits - apply_pending_deposit_compounding_withdrawal_credentials_over_max [Pres OK ++ Pending deposits - apply_pending_deposit_compounding_withdrawal_credentials_over_max_next_ OK ++ Pending deposits - apply_pending_deposit_compounding_withdrawal_credentials_under_max [Pre OK ++ Pending deposits - apply_pending_deposit_correct_sig_but_forked_state [Preset: minimal] OK ++ Pending deposits - apply_pending_deposit_effective_deposit_with_genesis_fork_version [Pres OK ++ Pending deposits - apply_pending_deposit_eth1_withdrawal_credentials [Preset: minimal] OK ++ Pending deposits - apply_pending_deposit_incorrect_sig_new_deposit [Preset: minimal] OK ++ Pending deposits - apply_pending_deposit_incorrect_sig_top_up [Preset: minimal] OK ++ Pending deposits - apply_pending_deposit_incorrect_withdrawal_credentials_top_up [Preset: OK ++ Pending deposits - apply_pending_deposit_ineffective_deposit_with_bad_fork_version [Preset OK ++ Pending deposits - apply_pending_deposit_key_validate_invalid_decompression [Preset: minim OK ++ Pending deposits - apply_pending_deposit_key_validate_invalid_subgroup [Preset: minimal] OK ++ Pending deposits - apply_pending_deposit_min_activation [Preset: minimal] OK ++ Pending deposits - apply_pending_deposit_non_versioned_withdrawal_credentials [Preset: min OK ++ Pending deposits - apply_pending_deposit_non_versioned_withdrawal_credentials_over_min_act OK ++ Pending deposits - apply_pending_deposit_over_min_activation [Preset: minimal] OK ++ Pending deposits - apply_pending_deposit_over_min_activation_next_increment [Preset: minim OK ++ Pending deposits - apply_pending_deposit_success_top_up_to_withdrawn_validator [Preset: mi OK ++ Pending deposits - apply_pending_deposit_top_up__less_effective_balance [Preset: minimal] OK ++ Pending deposits - apply_pending_deposit_top_up__max_effective_balance_compounding [Preset OK ++ Pending deposits - apply_pending_deposit_top_up__min_activation_balance [Preset: minimal] OK ++ Pending deposits - apply_pending_deposit_top_up__min_activation_balance_compounding [Prese OK ++ Pending deposits - apply_pending_deposit_top_up__zero_balance [Preset: minimal] OK ++ Pending deposits - apply_pending_deposit_under_min_activation [Preset: minimal] OK ++ Pending deposits - apply_pending_deposit_with_previous_fork_version [Preset: minimal] OK ++ Pending deposits - ineffective_deposit_with_current_fork_version [Preset: minimal] OK ++ Pending deposits - process_pending_deposits_balance_above_churn [Preset: minimal] OK ++ Pending deposits - process_pending_deposits_balance_equal_churn [Preset: minimal] OK ++ Pending deposits - process_pending_deposits_eth1_bridge_transition_complete [Preset: minim OK ++ Pending deposits - process_pending_deposits_eth1_bridge_transition_not_applied [Preset: mi OK ++ Pending deposits - process_pending_deposits_eth1_bridge_transition_pending [Preset: minima OK ++ Pending deposits - process_pending_deposits_limit_is_reached [Preset: minimal] OK ++ Pending deposits - process_pending_deposits_mixture_of_skipped_and_above_churn [Preset: mi OK ++ Pending deposits - process_pending_deposits_multiple_for_new_validator [Preset: minimal] OK ++ Pending deposits - process_pending_deposits_multiple_pending_deposits_above_churn [Preset: OK ++ Pending deposits - process_pending_deposits_multiple_pending_deposits_below_churn [Preset: OK ++ Pending deposits - process_pending_deposits_multiple_pending_one_skipped [Preset: minimal] OK ++ Pending deposits - process_pending_deposits_multiple_skipped_deposits_exiting_validators [ OK ++ Pending deposits - process_pending_deposits_not_finalized [Preset: minimal] OK ++ Pending deposits - process_pending_deposits_preexisting_churn [Preset: minimal] OK ++ Pending deposits - process_pending_deposits_scaled_churn [Preset: minimal] OK ++ Pending deposits - process_pending_deposits_skipped_deposit_exiting_validator [Preset: min OK ++ Pending deposits - process_pending_deposits_withdrawable_validator [Preset: minimal] OK ++ Pending deposits - process_pending_deposits_withdrawable_validator_not_churned [Preset: mi OK +``` +## EF - Fulu - Epoch Processing - RANDAO mixes reset [Preset: minimal] +```diff ++ RANDAO mixes reset - updated_randao_mixes [Preset: minimal] OK +``` +## EF - Fulu - Epoch Processing - Registry updates [Preset: minimal] +```diff ++ Registry updates - activation_churn_limit__equal_to_activation_limit [Preset: minimal] OK ++ Registry updates - activation_churn_limit__greater_than_activation_limit [Preset: minimal] OK ++ Registry updates - activation_churn_limit__less_than_activation_limit [Preset: minimal] OK ++ Registry updates - activation_queue_activation_and_ejection__1 [Preset: minimal] OK ++ Registry updates - activation_queue_activation_and_ejection__churn_limit [Preset: minimal] OK ++ Registry updates - activation_queue_activation_and_ejection__exceed_churn_limit [Preset: m OK ++ Registry updates - activation_queue_activation_and_ejection__exceed_scaled_churn_limit [Pr OK ++ Registry updates - activation_queue_activation_and_ejection__scaled_churn_limit [Preset: m OK ++ Registry updates - activation_queue_efficiency_min [Preset: minimal] OK ++ Registry updates - activation_queue_efficiency_scaled [Preset: minimal] OK ++ Registry updates - activation_queue_eligibility__greater_than_min_activation_balance [Pres OK ++ Registry updates - activation_queue_eligibility__less_than_min_activation_balance [Preset: OK ++ Registry updates - activation_queue_eligibility__min_activation_balance [Preset: minimal] OK ++ Registry updates - activation_queue_eligibility__min_activation_balance_compounding_creds OK ++ Registry updates - activation_queue_eligibility__min_activation_balance_eth1_creds [Preset OK ++ Registry updates - activation_queue_no_activation_no_finality [Preset: minimal] OK ++ Registry updates - activation_queue_sorting [Preset: minimal] OK ++ Registry updates - activation_queue_to_activated_if_finalized [Preset: minimal] OK ++ Registry updates - add_to_activation_queue [Preset: minimal] OK ++ Registry updates - ejection [Preset: minimal] OK ++ Registry updates - ejection_past_churn_limit_min [Preset: minimal] OK ++ Registry updates - ejection_past_churn_limit_scaled [Preset: minimal] OK ++ Registry updates - invalid_large_withdrawable_epoch [Preset: minimal] OK +``` +## EF - Fulu - Epoch Processing - Rewards and penalties [Preset: minimal] +```diff ++ Rewards and penalties - almost_empty_attestations [Preset: minimal] OK ++ Rewards and penalties - almost_empty_attestations_with_leak [Preset: minimal] OK ++ Rewards and penalties - almost_full_attestations [Preset: minimal] OK ++ Rewards and penalties - almost_full_attestations_with_leak [Preset: minimal] OK ++ Rewards and penalties - attestations_some_slashed [Preset: minimal] OK ++ Rewards and penalties - duplicate_attestation [Preset: minimal] OK ++ Rewards and penalties - full_attestation_participation [Preset: minimal] OK ++ Rewards and penalties - full_attestation_participation_with_leak [Preset: minimal] OK ++ Rewards and penalties - full_attestations_misc_balances [Preset: minimal] OK ++ Rewards and penalties - full_attestations_one_validator_one_gwei [Preset: minimal] OK ++ Rewards and penalties - genesis_epoch_full_attestations_no_rewards [Preset: minimal] OK ++ Rewards and penalties - genesis_epoch_no_attestations_no_penalties [Preset: minimal] OK ++ Rewards and penalties - no_attestations_all_penalties [Preset: minimal] OK ++ Rewards and penalties - random_fill_attestations [Preset: minimal] OK ++ Rewards and penalties - random_fill_attestations_with_leak [Preset: minimal] OK +``` +## EF - Fulu - Epoch Processing - Slashings [Preset: minimal] +```diff ++ Slashings - low_penalty [Preset: minimal] OK ++ Slashings - max_penalties [Preset: minimal] OK ++ Slashings - minimal_penalty [Preset: minimal] OK ++ Slashings - scaled_penalties [Preset: minimal] OK ++ Slashings - slashings_with_random_state [Preset: minimal] OK +``` +## EF - Fulu - Epoch Processing - Slashings reset [Preset: minimal] +```diff ++ Slashings reset - flush_slashings [Preset: minimal] OK +``` +## EF - Fulu - Epoch Processing - Sync committee updates [Preset: minimal] +```diff ++ Sync committee updates - sync_committees_no_progress_not_at_period_boundary [Preset: minim OK ++ Sync committee updates - sync_committees_progress_genesis [Preset: minimal] OK ++ Sync committee updates - sync_committees_progress_misc_balances_genesis [Preset: minimal] OK ++ Sync committee updates - sync_committees_progress_misc_balances_not_genesis [Preset: minim OK ++ Sync committee updates - sync_committees_progress_not_genesis [Preset: minimal] OK +``` +## EF - Fulu - Finality [Preset: minimal] +```diff ++ [Valid] EF - Fulu - Finality - finality_no_updates_at_genesis [Preset: minimal] OK ++ [Valid] EF - Fulu - Finality - finality_rule_1 [Preset: minimal] OK ++ [Valid] EF - Fulu - Finality - finality_rule_2 [Preset: minimal] OK ++ [Valid] EF - Fulu - Finality - finality_rule_3 [Preset: minimal] OK ++ [Valid] EF - Fulu - Finality - finality_rule_4 [Preset: minimal] OK +``` +## EF - Fulu - Fork [Preset: minimal] +```diff ++ EF - Fulu - Fork - fork_base_state [Preset: minimal] OK ++ EF - Fulu - Fork - fork_many_next_epoch [Preset: minimal] OK ++ EF - Fulu - Fork - fork_next_epoch [Preset: minimal] OK ++ EF - Fulu - Fork - fork_next_epoch_with_block [Preset: minimal] OK ++ EF - Fulu - Fork - fork_random_large_validator_set [Preset: minimal] OK ++ EF - Fulu - Fork - fork_random_low_balances [Preset: minimal] OK ++ EF - Fulu - Fork - fork_random_misc_balances [Preset: minimal] OK ++ EF - Fulu - Fork - fulu_fork_random_0 [Preset: minimal] OK ++ EF - Fulu - Fork - fulu_fork_random_1 [Preset: minimal] OK ++ EF - Fulu - Fork - fulu_fork_random_2 [Preset: minimal] OK ++ EF - Fulu - Fork - fulu_fork_random_3 [Preset: minimal] OK ++ EF - Fulu - Fork - fulu_fork_random_large_validator_set [Preset: minimal] OK ++ EF - Fulu - Fork - fulu_fork_random_low_balances [Preset: minimal] OK ++ EF - Fulu - Fork - fulu_fork_random_misc_balances [Preset: minimal] OK +``` +## EF - Fulu - Operations - Attestation [Preset: minimal] +```diff ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_after_max_inclusion_slot OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_attestation_data_index_not_zero OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_attestation_signature OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_bad_source_root OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_before_inclusion_delay OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_committee_index OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_correct_attestation_included_afte OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_current_source_root OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_empty_participants_seemingly_vali OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_empty_participants_zeroes_sig OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_future_target_epoch OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_incorrect_head_and_target_include OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_incorrect_head_included_after_max OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_incorrect_target_included_after_m OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_index OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_mismatched_target_and_slot OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_new_source_epoch OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_nonset_bits_for_one_committee OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_nonset_committee_bits OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_nonset_multiple_committee_bits OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_old_source_epoch OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_old_target_epoch OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_previous_source_root OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_source_root_is_target_root OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_too_few_aggregation_bits OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_too_many_aggregation_bits OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_too_many_committee_bits OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_wrong_index_for_committee_signatu OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_wrong_index_for_slot_0 OK ++ [Invalid] EF - Fulu - Operations - Attestation - invalid_wrong_index_for_slot_1 OK ++ [Valid] EF - Fulu - Operations - Attestation - at_max_inclusion_slot OK ++ [Valid] EF - Fulu - Operations - Attestation - correct_attestation_included_at_max_inclu OK ++ [Valid] EF - Fulu - Operations - Attestation - correct_attestation_included_at_min_inclu OK ++ [Valid] EF - Fulu - Operations - Attestation - correct_attestation_included_at_one_epoch OK ++ [Valid] EF - Fulu - Operations - Attestation - correct_attestation_included_at_sqrt_epoc OK ++ [Valid] EF - Fulu - Operations - Attestation - incorrect_head_and_target_included_at_epo OK ++ [Valid] EF - Fulu - Operations - Attestation - incorrect_head_and_target_included_at_sqr OK ++ [Valid] EF - Fulu - Operations - Attestation - incorrect_head_and_target_min_inclusion_d OK ++ [Valid] EF - Fulu - Operations - Attestation - incorrect_head_included_at_max_inclusion_ OK ++ [Valid] EF - Fulu - Operations - Attestation - incorrect_head_included_at_min_inclusion_ OK ++ [Valid] EF - Fulu - Operations - Attestation - incorrect_head_included_at_sqrt_epoch_del OK ++ [Valid] EF - Fulu - Operations - Attestation - incorrect_target_included_at_epoch_delay OK ++ [Valid] EF - Fulu - Operations - Attestation - incorrect_target_included_at_min_inclusio OK ++ [Valid] EF - Fulu - Operations - Attestation - incorrect_target_included_at_sqrt_epoch_d OK ++ [Valid] EF - Fulu - Operations - Attestation - multi_proposer_index_iterations OK ++ [Valid] EF - Fulu - Operations - Attestation - multiple_committees OK ++ [Valid] EF - Fulu - Operations - Attestation - one_basic_attestation OK ++ [Valid] EF - Fulu - Operations - Attestation - one_committee_with_gap OK ++ [Valid] EF - Fulu - Operations - Attestation - previous_epoch OK +``` +## EF - Fulu - Operations - Attester Slashing [Preset: minimal] +```diff ++ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_all_empty_indices OK ++ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_att1_bad_extra_index OK ++ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_att1_bad_replaced_index OK ++ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_att1_duplicate_index_double OK ++ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_att1_duplicate_index_normal OK ++ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_att1_empty_indices OK ++ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_att1_high_index OK ++ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_att2_bad_extra_index OK ++ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_att2_bad_replaced_index OK ++ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_att2_duplicate_index_double OK ++ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_att2_duplicate_index_normal OK ++ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_att2_empty_indices OK ++ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_att2_high_index OK ++ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_incorrect_sig_1 OK ++ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_incorrect_sig_1_and_2 OK ++ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_incorrect_sig_2 OK ++ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_no_double_or_surround OK ++ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_participants_already_slashe OK ++ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_same_data OK ++ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_unsorted_att_1 OK ++ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_unsorted_att_2 OK ++ [Valid] EF - Fulu - Operations - Attester Slashing - already_exited_long_ago OK ++ [Valid] EF - Fulu - Operations - Attester Slashing - already_exited_recent OK ++ [Valid] EF - Fulu - Operations - Attester Slashing - attestation_from_future OK ++ [Valid] EF - Fulu - Operations - Attester Slashing - basic_double OK ++ [Valid] EF - Fulu - Operations - Attester Slashing - basic_surround OK ++ [Valid] EF - Fulu - Operations - Attester Slashing - low_balances OK ++ [Valid] EF - Fulu - Operations - Attester Slashing - misc_balances OK ++ [Valid] EF - Fulu - Operations - Attester Slashing - proposer_index_slashed OK ++ [Valid] EF - Fulu - Operations - Attester Slashing - with_effective_balance_disparity OK +``` +## EF - Fulu - Operations - BLS to execution change [Preset: minimal] +```diff ++ [Invalid] EF - Fulu - Operations - BLS to execution change - invalid_already_0x01 OK ++ [Invalid] EF - Fulu - Operations - BLS to execution change - invalid_bad_signature OK ++ [Invalid] EF - Fulu - Operations - BLS to execution change - invalid_current_fork_version OK ++ [Invalid] EF - Fulu - Operations - BLS to execution change - invalid_genesis_validators_ro OK ++ [Invalid] EF - Fulu - Operations - BLS to execution change - invalid_incorrect_from_bls_pu OK ++ [Invalid] EF - Fulu - Operations - BLS to execution change - invalid_previous_fork_version OK ++ [Invalid] EF - Fulu - Operations - BLS to execution change - invalid_val_index_out_of_rang OK ++ [Valid] EF - Fulu - Operations - BLS to execution change - genesis_fork_version OK ++ [Valid] EF - Fulu - Operations - BLS to execution change - success OK ++ [Valid] EF - Fulu - Operations - BLS to execution change - success_exited OK ++ [Valid] EF - Fulu - Operations - BLS to execution change - success_in_activation_queue OK ++ [Valid] EF - Fulu - Operations - BLS to execution change - success_in_exit_queue OK ++ [Valid] EF - Fulu - Operations - BLS to execution change - success_not_activated OK ++ [Valid] EF - Fulu - Operations - BLS to execution change - success_withdrawable OK +``` +## EF - Fulu - Operations - Block Header [Preset: minimal] +```diff ++ [Invalid] EF - Fulu - Operations - Block Header - invalid_multiple_blocks_single_slot OK ++ [Invalid] EF - Fulu - Operations - Block Header - invalid_parent_root OK ++ [Invalid] EF - Fulu - Operations - Block Header - invalid_proposer_index OK ++ [Invalid] EF - Fulu - Operations - Block Header - invalid_proposer_slashed OK ++ [Invalid] EF - Fulu - Operations - Block Header - invalid_slot_block_header OK ++ [Valid] EF - Fulu - Operations - Block Header - basic_block_header OK +``` +## EF - Fulu - Operations - Consolidation Request [Preset: minimal] +```diff ++ [Valid] EF - Fulu - Operations - Consolidation Request - basic_consolidation_in_current_ OK ++ [Valid] EF - Fulu - Operations - Consolidation Request - basic_consolidation_in_new_cons OK ++ [Valid] EF - Fulu - Operations - Consolidation Request - basic_consolidation_source_has_ OK ++ [Valid] EF - Fulu - Operations - Consolidation Request - basic_consolidation_with_compou OK ++ [Valid] EF - Fulu - Operations - Consolidation Request - basic_consolidation_with_excess OK ++ [Valid] EF - Fulu - Operations - Consolidation Request - basic_consolidation_with_insuff OK ++ [Valid] EF - Fulu - Operations - Consolidation Request - basic_consolidation_with_preexi OK ++ [Valid] EF - Fulu - Operations - Consolidation Request - basic_switch_to_compounding OK ++ [Valid] EF - Fulu - Operations - Consolidation Request - consolidation_balance_larger_th OK ++ [Valid] EF - Fulu - Operations - Consolidation Request - consolidation_balance_through_t OK ++ [Valid] EF - Fulu - Operations - Consolidation Request - consolidation_churn_limit_balan OK ++ [Valid] EF - Fulu - Operations - Consolidation Request - incorrect_exceed_pending_consol OK ++ [Valid] EF - Fulu - Operations - Consolidation Request - incorrect_exited_source OK ++ [Valid] EF - Fulu - Operations - Consolidation Request - incorrect_exited_target OK ++ [Valid] EF - Fulu - Operations - Consolidation Request - incorrect_inactive_source OK ++ [Valid] EF - Fulu - Operations - Consolidation Request - incorrect_inactive_target OK ++ [Valid] EF - Fulu - Operations - Consolidation Request - incorrect_no_source_execution_w OK ++ [Valid] EF - Fulu - Operations - Consolidation Request - incorrect_not_enough_consolidat OK ++ [Valid] EF - Fulu - Operations - Consolidation Request - incorrect_same_source_target OK ++ [Valid] EF - Fulu - Operations - Consolidation Request - incorrect_source_address OK ++ [Valid] EF - Fulu - Operations - Consolidation Request - incorrect_source_has_pending_wi OK ++ [Valid] EF - Fulu - Operations - Consolidation Request - incorrect_source_not_active_lon OK ++ [Valid] EF - Fulu - Operations - Consolidation Request - incorrect_source_with_bls_crede OK ++ [Valid] EF - Fulu - Operations - Consolidation Request - incorrect_target_with_bls_crede OK ++ [Valid] EF - Fulu - Operations - Consolidation Request - incorrect_target_with_eth1_cred OK ++ [Valid] EF - Fulu - Operations - Consolidation Request - incorrect_unknown_source_pubkey OK ++ [Valid] EF - Fulu - Operations - Consolidation Request - incorrect_unknown_target_pubkey OK ++ [Valid] EF - Fulu - Operations - Consolidation Request - switch_to_compounding_exited_so OK ++ [Valid] EF - Fulu - Operations - Consolidation Request - switch_to_compounding_inactive_ OK ++ [Valid] EF - Fulu - Operations - Consolidation Request - switch_to_compounding_not_autho OK ++ [Valid] EF - Fulu - Operations - Consolidation Request - switch_to_compounding_source_bl OK ++ [Valid] EF - Fulu - Operations - Consolidation Request - switch_to_compounding_source_co OK ++ [Valid] EF - Fulu - Operations - Consolidation Request - switch_to_compounding_unknown_s OK ++ [Valid] EF - Fulu - Operations - Consolidation Request - switch_to_compounding_with_exce OK ++ [Valid] EF - Fulu - Operations - Consolidation Request - switch_to_compounding_with_pend OK +``` +## EF - Fulu - Operations - Deposit [Preset: minimal] +```diff ++ [Invalid] EF - Fulu - Operations - Deposit - invalid_bad_merkle_proof OK ++ [Invalid] EF - Fulu - Operations - Deposit - invalid_wrong_deposit_for_deposit_count OK ++ [Valid] EF - Fulu - Operations - Deposit - correct_sig_but_forked_state OK ++ [Valid] EF - Fulu - Operations - Deposit - effective_deposit_with_genesis_fork_version OK ++ [Valid] EF - Fulu - Operations - Deposit - incorrect_sig_new_deposit OK ++ [Valid] EF - Fulu - Operations - Deposit - incorrect_sig_top_up OK ++ [Valid] EF - Fulu - Operations - Deposit - incorrect_withdrawal_credentials_top_up OK ++ [Valid] EF - Fulu - Operations - Deposit - ineffective_deposit_with_bad_fork_version OK ++ [Valid] EF - Fulu - Operations - Deposit - ineffective_deposit_with_current_fork_version OK ++ [Valid] EF - Fulu - Operations - Deposit - ineffective_deposit_with_previous_fork_versio OK ++ [Valid] EF - Fulu - Operations - Deposit - key_validate_invalid_decompression OK ++ [Valid] EF - Fulu - Operations - Deposit - key_validate_invalid_subgroup OK ++ [Valid] EF - Fulu - Operations - Deposit - new_deposit_eth1_withdrawal_credentials OK ++ [Valid] EF - Fulu - Operations - Deposit - new_deposit_max OK ++ [Valid] EF - Fulu - Operations - Deposit - new_deposit_non_versioned_withdrawal_credenti OK ++ [Valid] EF - Fulu - Operations - Deposit - new_deposit_over_max OK ++ [Valid] EF - Fulu - Operations - Deposit - new_deposit_under_max OK ++ [Valid] EF - Fulu - Operations - Deposit - top_up__less_effective_balance OK ++ [Valid] EF - Fulu - Operations - Deposit - top_up__max_effective_balance OK ++ [Valid] EF - Fulu - Operations - Deposit - top_up__zero_balance OK +``` +## EF - Fulu - Operations - Deposit Request [Preset: minimal] +```diff ++ [Valid] EF - Fulu - Operations - Deposit Request - process_deposit_request_invalid_sig OK ++ [Valid] EF - Fulu - Operations - Deposit Request - process_deposit_request_max_effective OK ++ [Valid] EF - Fulu - Operations - Deposit Request - process_deposit_request_min_activatio OK ++ [Valid] EF - Fulu - Operations - Deposit Request - process_deposit_request_set_start_ind OK ++ [Valid] EF - Fulu - Operations - Deposit Request - process_deposit_request_set_start_ind OK ++ [Valid] EF - Fulu - Operations - Deposit Request - process_deposit_request_top_up_invali OK ++ [Valid] EF - Fulu - Operations - Deposit Request - process_deposit_request_top_up_max_ef OK ++ [Valid] EF - Fulu - Operations - Deposit Request - process_deposit_request_top_up_min_ac OK +``` +## EF - Fulu - Operations - Execution Payload [Preset: minimal] +```diff ++ [Invalid] EF - Fulu - Operations - Execution Payload - invalid_bad_everything_first_payloa OK ++ [Invalid] EF - Fulu - Operations - Execution Payload - invalid_bad_everything_regular_payl OK ++ [Invalid] EF - Fulu - Operations - Execution Payload - invalid_bad_parent_hash_first_paylo OK ++ [Invalid] EF - Fulu - Operations - Execution Payload - invalid_bad_parent_hash_regular_pay OK ++ [Invalid] EF - Fulu - Operations - Execution Payload - invalid_bad_pre_randao_regular_payl OK ++ [Invalid] EF - Fulu - Operations - Execution Payload - invalid_bad_prev_randao_first_paylo OK ++ [Invalid] EF - Fulu - Operations - Execution Payload - invalid_correct_input__execution_in OK ++ [Invalid] EF - Fulu - Operations - Execution Payload - invalid_exceed_max_blobs_per_block OK ++ [Invalid] EF - Fulu - Operations - Execution Payload - invalid_future_timestamp_first_payl OK ++ [Invalid] EF - Fulu - Operations - Execution Payload - invalid_future_timestamp_regular_pa OK ++ [Invalid] EF - Fulu - Operations - Execution Payload - invalid_past_timestamp_first_payloa OK ++ [Invalid] EF - Fulu - Operations - Execution Payload - invalid_past_timestamp_regular_payl OK ++ [Valid] EF - Fulu - Operations - Execution Payload - incorrect_blob_tx_type OK ++ [Valid] EF - Fulu - Operations - Execution Payload - incorrect_block_hash OK ++ [Valid] EF - Fulu - Operations - Execution Payload - incorrect_commitment OK ++ [Valid] EF - Fulu - Operations - Execution Payload - incorrect_commitments_order OK ++ [Valid] EF - Fulu - Operations - Execution Payload - incorrect_transaction_length_1_byte OK ++ [Valid] EF - Fulu - Operations - Execution Payload - incorrect_transaction_length_1_extr OK ++ [Valid] EF - Fulu - Operations - Execution Payload - incorrect_transaction_length_32_ext OK ++ [Valid] EF - Fulu - Operations - Execution Payload - incorrect_transaction_length_empty OK ++ [Valid] EF - Fulu - Operations - Execution Payload - incorrect_transaction_no_blobs_but_ OK ++ [Valid] EF - Fulu - Operations - Execution Payload - no_commitments_for_transactions OK ++ [Valid] EF - Fulu - Operations - Execution Payload - no_transactions_with_commitments OK ++ [Valid] EF - Fulu - Operations - Execution Payload - zeroed_commitment OK +``` +## EF - Fulu - Operations - Proposer Slashing [Preset: minimal] +```diff ++ [Invalid] EF - Fulu - Operations - Proposer Slashing - invalid_different_proposer_indices OK ++ [Invalid] EF - Fulu - Operations - Proposer Slashing - invalid_headers_are_same_sigs_are_d OK ++ [Invalid] EF - Fulu - Operations - Proposer Slashing - invalid_headers_are_same_sigs_are_s OK ++ [Invalid] EF - Fulu - Operations - Proposer Slashing - invalid_incorrect_proposer_index OK ++ [Invalid] EF - Fulu - Operations - Proposer Slashing - invalid_incorrect_sig_1 OK ++ [Invalid] EF - Fulu - Operations - Proposer Slashing - invalid_incorrect_sig_1_and_2 OK ++ [Invalid] EF - Fulu - Operations - Proposer Slashing - invalid_incorrect_sig_1_and_2_swap OK ++ [Invalid] EF - Fulu - Operations - Proposer Slashing - invalid_incorrect_sig_2 OK ++ [Invalid] EF - Fulu - Operations - Proposer Slashing - invalid_proposer_is_not_activated OK ++ [Invalid] EF - Fulu - Operations - Proposer Slashing - invalid_proposer_is_slashed OK ++ [Invalid] EF - Fulu - Operations - Proposer Slashing - invalid_proposer_is_withdrawn OK ++ [Invalid] EF - Fulu - Operations - Proposer Slashing - invalid_slots_of_different_epochs OK ++ [Valid] EF - Fulu - Operations - Proposer Slashing - basic OK ++ [Valid] EF - Fulu - Operations - Proposer Slashing - block_header_from_future OK ++ [Valid] EF - Fulu - Operations - Proposer Slashing - slashed_and_proposer_index_the_same OK +``` +## EF - Fulu - Operations - Sync Aggregate [Preset: minimal] +```diff ++ [Invalid] EF - Fulu - Operations - Sync Aggregate - invalid_signature_bad_domain OK ++ [Invalid] EF - Fulu - Operations - Sync Aggregate - invalid_signature_extra_participant OK ++ [Invalid] EF - Fulu - Operations - Sync Aggregate - invalid_signature_infinite_signature_w OK ++ [Invalid] EF - Fulu - Operations - Sync Aggregate - invalid_signature_infinite_signature_w OK ++ [Invalid] EF - Fulu - Operations - Sync Aggregate - invalid_signature_missing_participant OK ++ [Invalid] EF - Fulu - Operations - Sync Aggregate - invalid_signature_no_participants OK ++ [Invalid] EF - Fulu - Operations - Sync Aggregate - invalid_signature_past_block OK ++ [Invalid] EF - Fulu - Operations - Sync Aggregate - invalid_signature_previous_committee OK ++ [Valid] EF - Fulu - Operations - Sync Aggregate - proposer_in_committee_with_participati OK ++ [Valid] EF - Fulu - Operations - Sync Aggregate - proposer_in_committee_without_particip OK ++ [Valid] EF - Fulu - Operations - Sync Aggregate - random_all_but_one_participating_witho OK ++ [Valid] EF - Fulu - Operations - Sync Aggregate - random_high_participation_without_dupl OK ++ [Valid] EF - Fulu - Operations - Sync Aggregate - random_low_participation_without_dupli OK ++ [Valid] EF - Fulu - Operations - Sync Aggregate - random_misc_balances_and_half_particip OK ++ [Valid] EF - Fulu - Operations - Sync Aggregate - random_only_one_participant_without_du OK ++ [Valid] EF - Fulu - Operations - Sync Aggregate - random_with_exits_without_duplicates OK ++ [Valid] EF - Fulu - Operations - Sync Aggregate - sync_committee_rewards_empty_participa OK ++ [Valid] EF - Fulu - Operations - Sync Aggregate - sync_committee_rewards_nonduplicate_co OK ++ [Valid] EF - Fulu - Operations - Sync Aggregate - sync_committee_rewards_not_full_partic OK ++ [Valid] EF - Fulu - Operations - Sync Aggregate - sync_committee_with_nonparticipating_e OK ++ [Valid] EF - Fulu - Operations - Sync Aggregate - sync_committee_with_nonparticipating_w OK ++ [Valid] EF - Fulu - Operations - Sync Aggregate - sync_committee_with_participating_exit OK ++ [Valid] EF - Fulu - Operations - Sync Aggregate - sync_committee_with_participating_with OK ++ [Valid] EF - Fulu - Operations - Sync Aggregate - valid_signature_future_committee OK +``` +## EF - Fulu - Operations - Voluntary Exit [Preset: minimal] +```diff ++ [Invalid] EF - Fulu - Operations - Voluntary Exit - invalid_incorrect_signature OK ++ [Invalid] EF - Fulu - Operations - Voluntary Exit - invalid_validator_already_exited OK ++ [Invalid] EF - Fulu - Operations - Voluntary Exit - invalid_validator_exit_in_future OK ++ [Invalid] EF - Fulu - Operations - Voluntary Exit - invalid_validator_has_pending_withdraw OK ++ [Invalid] EF - Fulu - Operations - Voluntary Exit - invalid_validator_incorrect_validator_ OK ++ [Invalid] EF - Fulu - Operations - Voluntary Exit - invalid_validator_not_active OK ++ [Invalid] EF - Fulu - Operations - Voluntary Exit - invalid_validator_not_active_long_enou OK ++ [Invalid] EF - Fulu - Operations - Voluntary Exit - invalid_voluntary_exit_with_current_fo OK ++ [Invalid] EF - Fulu - Operations - Voluntary Exit - invalid_voluntary_exit_with_current_fo OK ++ [Invalid] EF - Fulu - Operations - Voluntary Exit - invalid_voluntary_exit_with_genesis_fo OK ++ [Invalid] EF - Fulu - Operations - Voluntary Exit - invalid_voluntary_exit_with_genesis_fo OK ++ [Invalid] EF - Fulu - Operations - Voluntary Exit - voluntary_exit_with_previous_fork_vers OK ++ [Invalid] EF - Fulu - Operations - Voluntary Exit - voluntary_exit_with_previous_fork_vers OK ++ [Valid] EF - Fulu - Operations - Voluntary Exit - basic OK ++ [Valid] EF - Fulu - Operations - Voluntary Exit - default_exit_epoch_subsequent_exit OK ++ [Valid] EF - Fulu - Operations - Voluntary Exit - min_balance_exit OK ++ [Valid] EF - Fulu - Operations - Voluntary Exit - min_balance_exits_above_churn OK ++ [Valid] EF - Fulu - Operations - Voluntary Exit - min_balance_exits_up_to_churn OK ++ [Valid] EF - Fulu - Operations - Voluntary Exit - success_exit_queue__min_churn OK ++ [Valid] EF - Fulu - Operations - Voluntary Exit - success_exit_queue__scaled_churn OK +``` +## EF - Fulu - Operations - Withdrawal Request [Preset: minimal] +```diff ++ [Valid] EF - Fulu - Operations - Withdrawal Request - activation_epoch_less_than_shard_c OK ++ [Valid] EF - Fulu - Operations - Withdrawal Request - basic_partial_withdrawal_request OK ++ [Valid] EF - Fulu - Operations - Withdrawal Request - basic_partial_withdrawal_request_h OK ++ [Valid] EF - Fulu - Operations - Withdrawal Request - basic_partial_withdrawal_request_l OK ++ [Valid] EF - Fulu - Operations - Withdrawal Request - basic_withdrawal_request OK ++ [Valid] EF - Fulu - Operations - Withdrawal Request - basic_withdrawal_request_with_comp OK ++ [Valid] EF - Fulu - Operations - Withdrawal Request - basic_withdrawal_request_with_firs OK ++ [Valid] EF - Fulu - Operations - Withdrawal Request - basic_withdrawal_request_with_full OK ++ [Valid] EF - Fulu - Operations - Withdrawal Request - full_exit_request_has_partial_with OK ++ [Valid] EF - Fulu - Operations - Withdrawal Request - incorrect_inactive_validator OK ++ [Valid] EF - Fulu - Operations - Withdrawal Request - incorrect_source_address OK ++ [Valid] EF - Fulu - Operations - Withdrawal Request - incorrect_withdrawal_credential_pr OK ++ [Valid] EF - Fulu - Operations - Withdrawal Request - insufficient_balance OK ++ [Valid] EF - Fulu - Operations - Withdrawal Request - insufficient_effective_balance OK ++ [Valid] EF - Fulu - Operations - Withdrawal Request - no_compounding_credentials OK ++ [Valid] EF - Fulu - Operations - Withdrawal Request - no_excess_balance OK ++ [Valid] EF - Fulu - Operations - Withdrawal Request - on_withdrawal_request_initiated_ex OK ++ [Valid] EF - Fulu - Operations - Withdrawal Request - partial_withdrawal_activation_epoc OK ++ [Valid] EF - Fulu - Operations - Withdrawal Request - partial_withdrawal_incorrect_sourc OK ++ [Valid] EF - Fulu - Operations - Withdrawal Request - partial_withdrawal_incorrect_withd OK ++ [Valid] EF - Fulu - Operations - Withdrawal Request - partial_withdrawal_on_exit_initiat OK ++ [Valid] EF - Fulu - Operations - Withdrawal Request - partial_withdrawal_queue_full OK ++ [Valid] EF - Fulu - Operations - Withdrawal Request - partial_withdrawal_request_with_hi OK ++ [Valid] EF - Fulu - Operations - Withdrawal Request - partial_withdrawal_request_with_hi OK ++ [Valid] EF - Fulu - Operations - Withdrawal Request - partial_withdrawal_request_with_lo OK ++ [Valid] EF - Fulu - Operations - Withdrawal Request - partial_withdrawal_request_with_pe OK ++ [Valid] EF - Fulu - Operations - Withdrawal Request - partial_withdrawal_request_with_pe OK ++ [Valid] EF - Fulu - Operations - Withdrawal Request - pending_withdrawals_consume_all_ex OK ++ [Valid] EF - Fulu - Operations - Withdrawal Request - unknown_pubkey OK +``` +## EF - Fulu - Operations - Withdrawals [Preset: minimal] +```diff ++ [Valid] EF - Fulu - Operations - Withdrawals - full_pending_withdrawals_but_first_skippe OK ++ [Valid] EF - Fulu - Operations - Withdrawals - full_pending_withdrawals_but_first_skippe OK ++ [Valid] EF - Fulu - Operations - Withdrawals - full_pending_withdrawals_but_first_skippe OK ++ [Valid] EF - Fulu - Operations - Withdrawals - partially_withdrawable_validator_compound OK ++ [Valid] EF - Fulu - Operations - Withdrawals - partially_withdrawable_validator_compound OK ++ [Valid] EF - Fulu - Operations - Withdrawals - partially_withdrawable_validator_compound OK ++ [Valid] EF - Fulu - Operations - Withdrawals - partially_withdrawable_validator_compound OK ++ [Valid] EF - Fulu - Operations - Withdrawals - partially_withdrawable_validator_compound OK ++ [Valid] EF - Fulu - Operations - Withdrawals - partially_withdrawable_validator_compound OK ++ [Valid] EF - Fulu - Operations - Withdrawals - partially_withdrawable_validator_legacy_e OK ++ [Valid] EF - Fulu - Operations - Withdrawals - partially_withdrawable_validator_legacy_m OK ++ [Valid] EF - Fulu - Operations - Withdrawals - partially_withdrawable_validator_legacy_m OK ++ [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_at_max OK ++ [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_at_max_mixed_with_swe OK ++ [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_exiting_validator OK ++ [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_low_effective_balance OK ++ [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_mixed_with_sweep_and_ OK ++ [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_next_epoch OK ++ [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_no_excess_balance OK ++ [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_one_skipped_one_effec OK ++ [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_with_effective_sweep_ OK ++ [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_with_ineffective_swee OK ++ [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_with_ineffective_swee OK ++ [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_with_sweep_different_ OK ++ [Valid] EF - Fulu - Operations - Withdrawals - random_0 OK ++ [Valid] EF - Fulu - Operations - Withdrawals - random_partial_withdrawals_1 OK ++ [Valid] EF - Fulu - Operations - Withdrawals - random_partial_withdrawals_2 OK ++ [Valid] EF - Fulu - Operations - Withdrawals - random_partial_withdrawals_3 OK ++ [Valid] EF - Fulu - Operations - Withdrawals - random_partial_withdrawals_4 OK ++ [Valid] EF - Fulu - Operations - Withdrawals - random_partial_withdrawals_5 OK ++ [Valid] EF - Fulu - Operations - Withdrawals - success_excess_balance_but_no_max_effecti OK ++ [Valid] EF - Fulu - Operations - Withdrawals - success_excess_balance_but_no_max_effecti OK ++ [Valid] EF - Fulu - Operations - Withdrawals - success_max_partial_withdrawable OK ++ [Valid] EF - Fulu - Operations - Withdrawals - success_max_plus_one_withdrawable OK ++ [Valid] EF - Fulu - Operations - Withdrawals - success_mixed_fully_and_partial_withdrawa OK ++ [Valid] EF - Fulu - Operations - Withdrawals - success_no_excess_balance OK ++ [Valid] EF - Fulu - Operations - Withdrawals - success_no_excess_balance_compounding OK ++ [Valid] EF - Fulu - Operations - Withdrawals - success_no_max_effective_balance OK ++ [Valid] EF - Fulu - Operations - Withdrawals - success_no_max_effective_balance_compound OK ++ [Valid] EF - Fulu - Operations - Withdrawals - success_one_partial_withdrawable_active_a OK ++ [Valid] EF - Fulu - Operations - Withdrawals - success_one_partial_withdrawable_exited OK ++ [Valid] EF - Fulu - Operations - Withdrawals - success_one_partial_withdrawable_exited_a OK ++ [Valid] EF - Fulu - Operations - Withdrawals - success_one_partial_withdrawable_in_exit_ OK ++ [Valid] EF - Fulu - Operations - Withdrawals - success_one_partial_withdrawable_not_yet_ OK ++ [Valid] EF - Fulu - Operations - Withdrawals - success_two_partial_withdrawable OK +``` +## EF - Fulu - Random [Preset: minimal] +```diff ++ [Valid] EF - Fulu - Random - randomized_0 [Preset: minimal] OK ++ [Valid] EF - Fulu - Random - randomized_1 [Preset: minimal] OK ++ [Valid] EF - Fulu - Random - randomized_10 [Preset: minimal] OK ++ [Valid] EF - Fulu - Random - randomized_11 [Preset: minimal] OK ++ [Valid] EF - Fulu - Random - randomized_12 [Preset: minimal] OK ++ [Valid] EF - Fulu - Random - randomized_13 [Preset: minimal] OK ++ [Valid] EF - Fulu - Random - randomized_14 [Preset: minimal] OK ++ [Valid] EF - Fulu - Random - randomized_15 [Preset: minimal] OK ++ [Valid] EF - Fulu - Random - randomized_2 [Preset: minimal] OK ++ [Valid] EF - Fulu - Random - randomized_3 [Preset: minimal] OK ++ [Valid] EF - Fulu - Random - randomized_4 [Preset: minimal] OK ++ [Valid] EF - Fulu - Random - randomized_5 [Preset: minimal] OK ++ [Valid] EF - Fulu - Random - randomized_6 [Preset: minimal] OK ++ [Valid] EF - Fulu - Random - randomized_7 [Preset: minimal] OK ++ [Valid] EF - Fulu - Random - randomized_8 [Preset: minimal] OK ++ [Valid] EF - Fulu - Random - randomized_9 [Preset: minimal] OK +``` +## EF - Fulu - Rewards [Preset: minimal] +```diff ++ EF - Fulu - Rewards - all_balances_too_low_for_reward [Preset: minimal] OK ++ EF - Fulu - Rewards - empty [Preset: minimal] OK ++ EF - Fulu - Rewards - empty_leak [Preset: minimal] OK ++ EF - Fulu - Rewards - full_all_correct [Preset: minimal] OK ++ EF - Fulu - Rewards - full_but_partial_participation [Preset: minimal] OK ++ EF - Fulu - Rewards - full_but_partial_participation_leak [Preset: minimal] OK ++ EF - Fulu - Rewards - full_leak [Preset: minimal] OK ++ EF - Fulu - Rewards - full_random_0 [Preset: minimal] OK ++ EF - Fulu - Rewards - full_random_1 [Preset: minimal] OK ++ EF - Fulu - Rewards - full_random_2 [Preset: minimal] OK ++ EF - Fulu - Rewards - full_random_3 [Preset: minimal] OK ++ EF - Fulu - Rewards - full_random_4 [Preset: minimal] OK ++ EF - Fulu - Rewards - full_random_leak [Preset: minimal] OK ++ EF - Fulu - Rewards - full_random_low_balances_0 [Preset: minimal] OK ++ EF - Fulu - Rewards - full_random_low_balances_1 [Preset: minimal] OK ++ EF - Fulu - Rewards - full_random_misc_balances [Preset: minimal] OK ++ EF - Fulu - Rewards - full_random_seven_epoch_leak [Preset: minimal] OK ++ EF - Fulu - Rewards - full_random_ten_epoch_leak [Preset: minimal] OK ++ EF - Fulu - Rewards - full_random_without_leak_0 [Preset: minimal] OK ++ EF - Fulu - Rewards - full_random_without_leak_and_current_exit_0 [Preset: minimal] OK ++ EF - Fulu - Rewards - half_full [Preset: minimal] OK ++ EF - Fulu - Rewards - half_full_leak [Preset: minimal] OK ++ EF - Fulu - Rewards - quarter_full [Preset: minimal] OK ++ EF - Fulu - Rewards - quarter_full_leak [Preset: minimal] OK ++ EF - Fulu - Rewards - some_very_low_effective_balances_that_attested [Preset: minimal] OK ++ EF - Fulu - Rewards - some_very_low_effective_balances_that_attested_leak [Preset: minimal OK ++ EF - Fulu - Rewards - some_very_low_effective_balances_that_did_not_attest [Preset: minima OK ++ EF - Fulu - Rewards - some_very_low_effective_balances_that_did_not_attest_leak [Preset: m OK ++ EF - Fulu - Rewards - with_exited_validators [Preset: minimal] OK ++ EF - Fulu - Rewards - with_exited_validators_leak [Preset: minimal] OK ++ EF - Fulu - Rewards - with_not_yet_activated_validators [Preset: minimal] OK ++ EF - Fulu - Rewards - with_not_yet_activated_validators_leak [Preset: minimal] OK ++ EF - Fulu - Rewards - with_slashed_validators [Preset: minimal] OK ++ EF - Fulu - Rewards - with_slashed_validators_leak [Preset: minimal] OK +``` +## EF - Fulu - SSZ consensus objects [Preset: minimal] +```diff ++ Testing AggregateAndProof OK ++ Testing Attestation OK ++ Testing AttestationData OK ++ Testing AttesterSlashing OK ++ Testing BLSToExecutionChange OK ++ Testing BeaconBlock OK ++ Testing BeaconBlockBody OK ++ Testing BeaconBlockHeader OK ++ Testing BeaconState OK ++ Testing BlobIdentifier OK ++ Testing BlobSidecar OK ++ Testing Checkpoint OK ++ Testing ConsolidationRequest OK ++ Testing ContributionAndProof OK ++ Testing DataColumnIdentifier OK ++ Testing DataColumnSidecar OK ++ Testing Deposit OK ++ Testing DepositData OK ++ Testing DepositMessage OK ++ Testing DepositRequest OK ++ Testing Eth1Block OK ++ Testing Eth1Data OK ++ Testing ExecutionPayload OK ++ Testing ExecutionPayloadHeader OK ++ Testing ExecutionRequests OK ++ Testing Fork OK ++ Testing ForkData OK ++ Testing HistoricalBatch OK ++ Testing HistoricalSummary OK ++ Testing IndexedAttestation OK ++ Testing LightClientBootstrap OK ++ Testing LightClientFinalityUpdate OK ++ Testing LightClientHeader OK ++ Testing LightClientOptimisticUpdate OK ++ Testing LightClientUpdate OK ++ Testing MatrixEntry OK ++ Testing PendingAttestation OK ++ Testing PendingConsolidation OK ++ Testing PendingDeposit OK ++ Testing PendingPartialWithdrawal OK ++ Testing PowBlock OK ++ Testing ProposerSlashing OK ++ Testing SignedAggregateAndProof OK ++ Testing SignedBLSToExecutionChange OK ++ Testing SignedBeaconBlock OK ++ Testing SignedBeaconBlockHeader OK ++ Testing SignedContributionAndProof OK ++ Testing SignedVoluntaryExit OK ++ Testing SigningData OK ++ Testing SingleAttestation OK ++ Testing SyncAggregate OK ++ Testing SyncAggregatorSelectionData OK ++ Testing SyncCommittee OK ++ Testing SyncCommitteeContribution OK ++ Testing SyncCommitteeMessage OK ++ Testing Validator OK ++ Testing VoluntaryExit OK ++ Testing Withdrawal OK ++ Testing WithdrawalRequest OK +``` +## EF - Fulu - Sanity - Blocks [Preset: minimal] +```diff ++ [Invalid] EF - Fulu - Sanity - Blocks - invalid_all_zeroed_sig [Preset: minimal] OK ++ [Invalid] EF - Fulu - Sanity - Blocks - invalid_duplicate_bls_changes_same_block [Preset: OK ++ [Invalid] EF - Fulu - Sanity - Blocks - invalid_duplicate_deposit_same_block [Preset: mini OK ++ [Invalid] EF - Fulu - Sanity - Blocks - invalid_duplicate_proposer_slashings_same_block [P OK ++ [Invalid] EF - Fulu - Sanity - Blocks - invalid_duplicate_validator_exit_same_block [Prese OK ++ [Invalid] EF - Fulu - Sanity - Blocks - invalid_incorrect_block_sig [Preset: minimal] OK ++ [Invalid] EF - Fulu - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_expected OK ++ [Invalid] EF - Fulu - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_proposer OK ++ [Invalid] EF - Fulu - Sanity - Blocks - invalid_incorrect_state_root [Preset: minimal] OK ++ [Invalid] EF - Fulu - Sanity - Blocks - invalid_only_increase_deposit_count [Preset: minim OK ++ [Invalid] EF - Fulu - Sanity - Blocks - invalid_parent_from_same_slot [Preset: minimal] OK ++ [Invalid] EF - Fulu - Sanity - Blocks - invalid_prev_slot_block_transition [Preset: minima OK ++ [Invalid] EF - Fulu - Sanity - Blocks - invalid_same_slot_block_transition [Preset: minima OK ++ [Invalid] EF - Fulu - Sanity - Blocks - invalid_similar_proposer_slashings_same_block [Pre OK ++ [Invalid] EF - Fulu - Sanity - Blocks - invalid_two_bls_changes_of_different_addresses_sam OK ++ [Invalid] EF - Fulu - Sanity - Blocks - invalid_withdrawal_fail_second_block_payload_isnt_ OK ++ [Invalid] EF - Fulu - Sanity - Blocks - slash_and_exit_same_index [Preset: minimal] OK ++ [Valid] EF - Fulu - Sanity - Blocks - activate_and_partial_withdrawal_max_effective_bala OK ++ [Valid] EF - Fulu - Sanity - Blocks - activate_and_partial_withdrawal_overdeposit [Prese OK ++ [Valid] EF - Fulu - Sanity - Blocks - attestation [Preset: minimal] OK ++ [Valid] EF - Fulu - Sanity - Blocks - attester_slashing [Preset: minimal] OK ++ [Valid] EF - Fulu - Sanity - Blocks - balance_driven_status_transitions [Preset: minimal OK ++ [Valid] EF - Fulu - Sanity - Blocks - bls_change [Preset: minimal] OK ++ [Valid] EF - Fulu - Sanity - Blocks - deposit_and_bls_change [Preset: minimal] OK ++ [Valid] EF - Fulu - Sanity - Blocks - deposit_in_block [Preset: minimal] OK ++ [Valid] EF - Fulu - Sanity - Blocks - deposit_top_up [Preset: minimal] OK ++ [Valid] EF - Fulu - Sanity - Blocks - duplicate_attestation_same_block [Preset: minimal] OK ++ [Valid] EF - Fulu - Sanity - Blocks - empty_block_transition [Preset: minimal] OK ++ [Valid] EF - Fulu - Sanity - Blocks - empty_block_transition_large_validator_set [Preset OK ++ [Valid] EF - Fulu - Sanity - Blocks - empty_epoch_transition [Preset: minimal] OK ++ [Valid] EF - Fulu - Sanity - Blocks - empty_epoch_transition_large_validator_set [Preset OK ++ [Valid] EF - Fulu - Sanity - Blocks - empty_epoch_transition_not_finalizing [Preset: min OK ++ [Valid] EF - Fulu - Sanity - Blocks - eth1_data_votes_consensus [Preset: minimal] OK ++ [Valid] EF - Fulu - Sanity - Blocks - eth1_data_votes_no_consensus [Preset: minimal] OK ++ [Valid] EF - Fulu - Sanity - Blocks - exit_and_bls_change [Preset: minimal] OK ++ [Valid] EF - Fulu - Sanity - Blocks - full_random_operations_0 [Preset: minimal] OK ++ [Valid] EF - Fulu - Sanity - Blocks - full_random_operations_1 [Preset: minimal] OK ++ [Valid] EF - Fulu - Sanity - Blocks - full_random_operations_2 [Preset: minimal] OK ++ [Valid] EF - Fulu - Sanity - Blocks - full_random_operations_3 [Preset: minimal] OK ++ [Valid] EF - Fulu - Sanity - Blocks - high_proposer_index [Preset: minimal] OK ++ [Valid] EF - Fulu - Sanity - Blocks - historical_batch [Preset: minimal] OK ++ [Valid] EF - Fulu - Sanity - Blocks - inactivity_scores_full_participation_leaking [Pres OK ++ [Valid] EF - Fulu - Sanity - Blocks - inactivity_scores_leaking [Preset: minimal] OK ++ [Valid] EF - Fulu - Sanity - Blocks - many_partial_withdrawals_in_epoch_transition [Pres OK ++ [Valid] EF - Fulu - Sanity - Blocks - multiple_different_proposer_slashings_same_block [ OK ++ [Valid] EF - Fulu - Sanity - Blocks - multiple_different_validator_exits_same_block [Pre OK ++ [Valid] EF - Fulu - Sanity - Blocks - partial_withdrawal_in_epoch_transition [Preset: mi OK ++ [Valid] EF - Fulu - Sanity - Blocks - proposer_after_inactive_index [Preset: minimal] OK ++ [Valid] EF - Fulu - Sanity - Blocks - proposer_self_slashing [Preset: minimal] OK ++ [Valid] EF - Fulu - Sanity - Blocks - proposer_slashing [Preset: minimal] OK ++ [Valid] EF - Fulu - Sanity - Blocks - skipped_slots [Preset: minimal] OK ++ [Valid] EF - Fulu - Sanity - Blocks - slash_and_exit_diff_index [Preset: minimal] OK ++ [Valid] EF - Fulu - Sanity - Blocks - sync_committee_committee__empty [Preset: minimal] OK ++ [Valid] EF - Fulu - Sanity - Blocks - sync_committee_committee__full [Preset: minimal] OK ++ [Valid] EF - Fulu - Sanity - Blocks - sync_committee_committee__half [Preset: minimal] OK ++ [Valid] EF - Fulu - Sanity - Blocks - sync_committee_committee_genesis__empty [Preset: m OK ++ [Valid] EF - Fulu - Sanity - Blocks - sync_committee_committee_genesis__full [Preset: mi OK ++ [Valid] EF - Fulu - Sanity - Blocks - sync_committee_committee_genesis__half [Preset: mi OK ++ [Valid] EF - Fulu - Sanity - Blocks - top_up_and_partial_withdrawable_validator [Preset: OK ++ [Valid] EF - Fulu - Sanity - Blocks - top_up_to_fully_withdrawn_validator [Preset: minim OK ++ [Valid] EF - Fulu - Sanity - Blocks - voluntary_exit [Preset: minimal] OK ++ [Valid] EF - Fulu - Sanity - Blocks - withdrawal_success_two_blocks [Preset: minimal] OK +``` +## EF - Fulu - Sanity - Slots [Preset: minimal] +```diff ++ EF - Fulu - Slots - balance_change_affects_proposer [Preset: minimal] OK ++ EF - Fulu - Slots - double_empty_epoch [Preset: minimal] OK ++ EF - Fulu - Slots - empty_epoch [Preset: minimal] OK ++ EF - Fulu - Slots - historical_accumulator [Preset: minimal] OK ++ EF - Fulu - Slots - multiple_pending_deposits_same_pubkey [Preset: minimal] OK ++ EF - Fulu - Slots - multiple_pending_deposits_same_pubkey_above_upward_threshold [Preset: OK ++ EF - Fulu - Slots - multiple_pending_deposits_same_pubkey_below_upward_threshold [Preset: OK ++ EF - Fulu - Slots - multiple_pending_deposits_same_pubkey_compounding [Preset: minimal] OK ++ EF - Fulu - Slots - multiple_pending_deposits_same_pubkey_different_signature [Preset: min OK ++ EF - Fulu - Slots - over_epoch_boundary [Preset: minimal] OK ++ EF - Fulu - Slots - pending_consolidation [Preset: minimal] OK ++ EF - Fulu - Slots - slots_1 [Preset: minimal] OK ++ EF - Fulu - Slots - slots_2 [Preset: minimal] OK +``` +## EF - Light client - Data collection [Preset: minimal] +```diff ++ Light client - Data collection - minimal/altair/light_client/data_collection/pyspec_tests/ OK ++ Light client - Data collection - minimal/bellatrix/light_client/data_collection/pyspec_tes OK ++ Light client - Data collection - minimal/bellatrix/light_client/data_collection/pyspec_tes OK ++ Light client - Data collection - minimal/bellatrix/light_client/data_collection/pyspec_tes OK ++ Light client - Data collection - minimal/capella/light_client/data_collection/pyspec_tests OK ++ Light client - Data collection - minimal/capella/light_client/data_collection/pyspec_tests OK ++ Light client - Data collection - minimal/capella/light_client/data_collection/pyspec_tests OK ++ Light client - Data collection - minimal/deneb/light_client/data_collection/pyspec_tests/l OK ++ Light client - Data collection - minimal/electra/light_client/data_collection/pyspec_tests OK +``` ## EF - Light client - Single merkle proof [Preset: minimal] ```diff + Light client - Single merkle proof - minimal/altair/light_client/single_merkle_proof/Beaco OK @@ -3454,7 +4126,6 @@ OK: 4/4 Fail: 0/4 Skip: 0/4 + Light client - Single merkle proof - minimal/electra/light_client/single_merkle_proof/Beac OK + Light client - Single merkle proof - minimal/electra/light_client/single_merkle_proof/Beac OK ``` -OK: 18/18 Fail: 0/18 Skip: 0/18 ## EF - Light client - Sync [Preset: minimal] ```diff + Light client - Sync - minimal/altair/light_client/sync/pyspec_tests/advance_finality_witho OK @@ -3462,6 +4133,7 @@ OK: 18/18 Fail: 0/18 Skip: 0/18 + Light client - Sync - minimal/altair/light_client/sync/pyspec_tests/deneb_store_with_legac OK + Light client - Sync - minimal/altair/light_client/sync/pyspec_tests/electra_store_with_leg OK + Light client - Sync - minimal/altair/light_client/sync/pyspec_tests/light_client_sync OK ++ Light client - Sync - minimal/altair/light_client/sync/pyspec_tests/light_client_sync_no_f OK + Light client - Sync - minimal/altair/light_client/sync/pyspec_tests/supply_sync_committee_ OK + Light client - Sync - minimal/bellatrix/light_client/sync/pyspec_tests/advance_finality_wi OK + Light client - Sync - minimal/bellatrix/light_client/sync/pyspec_tests/capella_deneb_fork OK @@ -3471,6 +4143,7 @@ OK: 18/18 Fail: 0/18 Skip: 0/18 + Light client - Sync - minimal/bellatrix/light_client/sync/pyspec_tests/deneb_store_with_le OK + Light client - Sync - minimal/bellatrix/light_client/sync/pyspec_tests/electra_store_with_ OK + Light client - Sync - minimal/bellatrix/light_client/sync/pyspec_tests/light_client_sync OK ++ Light client - Sync - minimal/bellatrix/light_client/sync/pyspec_tests/light_client_sync_n OK + Light client - Sync - minimal/bellatrix/light_client/sync/pyspec_tests/supply_sync_committ OK + Light client - Sync - minimal/capella/light_client/sync/pyspec_tests/advance_finality_with OK + Light client - Sync - minimal/capella/light_client/sync/pyspec_tests/deneb_electra_fork OK @@ -3478,17 +4151,19 @@ OK: 18/18 Fail: 0/18 Skip: 0/18 + Light client - Sync - minimal/capella/light_client/sync/pyspec_tests/deneb_store_with_lega OK + Light client - Sync - minimal/capella/light_client/sync/pyspec_tests/electra_store_with_le OK + Light client - Sync - minimal/capella/light_client/sync/pyspec_tests/light_client_sync OK ++ Light client - Sync - minimal/capella/light_client/sync/pyspec_tests/light_client_sync_no_ OK + Light client - Sync - minimal/capella/light_client/sync/pyspec_tests/supply_sync_committee OK + Light client - Sync - minimal/deneb/light_client/sync/pyspec_tests/advance_finality_withou OK + Light client - Sync - minimal/deneb/light_client/sync/pyspec_tests/electra_fork OK + Light client - Sync - minimal/deneb/light_client/sync/pyspec_tests/electra_store_with_lega OK + Light client - Sync - minimal/deneb/light_client/sync/pyspec_tests/light_client_sync OK ++ Light client - Sync - minimal/deneb/light_client/sync/pyspec_tests/light_client_sync_no_fo OK + Light client - Sync - minimal/deneb/light_client/sync/pyspec_tests/supply_sync_committee_f OK + Light client - Sync - minimal/electra/light_client/sync/pyspec_tests/advance_finality_with OK + Light client - Sync - minimal/electra/light_client/sync/pyspec_tests/light_client_sync OK ++ Light client - Sync - minimal/electra/light_client/sync/pyspec_tests/light_client_sync_no_ OK + Light client - Sync - minimal/electra/light_client/sync/pyspec_tests/supply_sync_committee OK ``` -OK: 30/30 Fail: 0/30 Skip: 0/30 ## EF - Light client - Update ranking [Preset: minimal] ```diff + Light client - Update ranking - minimal/altair/light_client/update_ranking/pyspec_tests/up OK @@ -3497,10 +4172,8 @@ OK: 30/30 Fail: 0/30 Skip: 0/30 + Light client - Update ranking - minimal/deneb/light_client/update_ranking/pyspec_tests/upd OK + Light client - Update ranking - minimal/electra/light_client/update_ranking/pyspec_tests/u OK ``` -OK: 5/5 Fail: 0/5 Skip: 0/5 ## EF - Merkle proof [Preset: minimal] ```diff - Merkle proof - Single merkle proof - eip7594 Skip + Merkle proof - Single merkle proof - minimal/deneb/merkle_proof/single_merkle_proof/Beacon OK + Merkle proof - Single merkle proof - minimal/deneb/merkle_proof/single_merkle_proof/Beacon OK + Merkle proof - Single merkle proof - minimal/deneb/merkle_proof/single_merkle_proof/Beacon OK @@ -3509,24 +4182,28 @@ OK: 5/5 Fail: 0/5 Skip: 0/5 + Merkle proof - Single merkle proof - minimal/electra/merkle_proof/single_merkle_proof/Beac OK + Merkle proof - Single merkle proof - minimal/electra/merkle_proof/single_merkle_proof/Beac OK + Merkle proof - Single merkle proof - minimal/electra/merkle_proof/single_merkle_proof/Beac OK ++ Merkle proof - Single merkle proof - minimal/fulu/merkle_proof/single_merkle_proof/BeaconB OK ++ Merkle proof - Single merkle proof - minimal/fulu/merkle_proof/single_merkle_proof/BeaconB OK ++ Merkle proof - Single merkle proof - minimal/fulu/merkle_proof/single_merkle_proof/BeaconB OK ++ Merkle proof - Single merkle proof - minimal/fulu/merkle_proof/single_merkle_proof/BeaconB OK ++ Merkle proof - Single merkle proof - minimal/fulu/merkle_proof/single_merkle_proof/BeaconB OK ++ Merkle proof - Single merkle proof - minimal/fulu/merkle_proof/single_merkle_proof/BeaconB OK ++ Merkle proof - Single merkle proof - minimal/fulu/merkle_proof/single_merkle_proof/BeaconB OK ++ Merkle proof - Single merkle proof - minimal/fulu/merkle_proof/single_merkle_proof/BeaconB OK ``` -OK: 8/9 Fail: 0/9 Skip: 1/9 ## EF - Phase 0 - Epoch Processing - Effective balance updates [Preset: minimal] ```diff + Effective balance updates - effective_balance_hysteresis [Preset: minimal] OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## EF - Phase 0 - Epoch Processing - Eth1 data reset [Preset: minimal] ```diff + Eth1 data reset - eth1_vote_no_reset [Preset: minimal] OK + Eth1 data reset - eth1_vote_reset [Preset: minimal] OK ``` -OK: 2/2 Fail: 0/2 Skip: 0/2 ## EF - Phase 0 - Epoch Processing - Historical roots update [Preset: minimal] ```diff + Historical roots update - historical_root_accumulator [Preset: minimal] OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## EF - Phase 0 - Epoch Processing - Justification & Finalization [Preset: minimal] ```diff + Justification & Finalization - 123_ok_support [Preset: minimal] OK @@ -3540,17 +4217,14 @@ OK: 1/1 Fail: 0/1 Skip: 0/1 + Justification & Finalization - 23_poor_support [Preset: minimal] OK + Justification & Finalization - balance_threshold_with_exited_validators [Preset: minimal] OK ``` -OK: 10/10 Fail: 0/10 Skip: 0/10 ## EF - Phase 0 - Epoch Processing - Participation record updates [Preset: minimal] ```diff + Participation record updates - updated_participation_record [Preset: minimal] OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## EF - Phase 0 - Epoch Processing - RANDAO mixes reset [Preset: minimal] ```diff + RANDAO mixes reset - updated_randao_mixes [Preset: minimal] OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## EF - Phase 0 - Epoch Processing - Registry updates [Preset: minimal] ```diff + Registry updates - activation_queue_activation_and_ejection__1 [Preset: minimal] OK @@ -3569,7 +4243,6 @@ OK: 1/1 Fail: 0/1 Skip: 0/1 + Registry updates - ejection_past_churn_limit_scaled [Preset: minimal] OK + Registry updates - invalid_large_withdrawable_epoch [Preset: minimal] OK ``` -OK: 15/15 Fail: 0/15 Skip: 0/15 ## EF - Phase 0 - Epoch Processing - Rewards and penalties [Preset: minimal] ```diff + Rewards and penalties - almost_empty_attestations [Preset: minimal] OK @@ -3584,7 +4257,7 @@ OK: 15/15 Fail: 0/15 Skip: 0/15 + Rewards and penalties - full_attestation_participation [Preset: minimal] OK + Rewards and penalties - full_attestation_participation_with_leak [Preset: minimal] OK + Rewards and penalties - full_attestations_misc_balances [Preset: minimal] OK -+ Rewards and penalties - full_attestations_one_validaor_one_gwei [Preset: minimal] OK ++ Rewards and penalties - full_attestations_one_validator_one_gwei [Preset: minimal] OK + Rewards and penalties - full_attestations_random_incorrect_fields [Preset: minimal] OK + Rewards and penalties - genesis_epoch_full_attestations_no_rewards [Preset: minimal] OK + Rewards and penalties - genesis_epoch_no_attestations_no_penalties [Preset: minimal] OK @@ -3592,7 +4265,6 @@ OK: 15/15 Fail: 0/15 Skip: 0/15 + Rewards and penalties - random_fill_attestations [Preset: minimal] OK + Rewards and penalties - random_fill_attestations_with_leak [Preset: minimal] OK ``` -OK: 19/19 Fail: 0/19 Skip: 0/19 ## EF - Phase 0 - Epoch Processing - Slashings [Preset: minimal] ```diff + Slashings - low_penalty [Preset: minimal] OK @@ -3601,12 +4273,10 @@ OK: 19/19 Fail: 0/19 Skip: 0/19 + Slashings - scaled_penalties [Preset: minimal] OK + Slashings - slashings_with_random_state [Preset: minimal] OK ``` -OK: 5/5 Fail: 0/5 Skip: 0/5 ## EF - Phase 0 - Epoch Processing - Slashings reset [Preset: minimal] ```diff + Slashings reset - flush_slashings [Preset: minimal] OK ``` -OK: 1/1 Fail: 0/1 Skip: 0/1 ## EF - Phase 0 - Operations - Attestation [Preset: minimal] ```diff + [Invalid] EF - Phase 0 - Operations - EF - Phase 0 - Operations - Attestation [Preset: mi OK @@ -3651,7 +4321,6 @@ OK: 1/1 Fail: 0/1 Skip: 0/1 + [Valid] EF - Phase 0 - Operations - EF - Phase 0 - Operations - Attestation [Preset: mi OK + [Valid] EF - Phase 0 - Operations - EF - Phase 0 - Operations - Attestation [Preset: mi OK ``` -OK: 41/41 Fail: 0/41 Skip: 0/41 ## EF - Phase 0 - Operations - Attester Slashing [Preset: minimal] ```diff + [Invalid] EF - Phase 0 - Operations - EF - Phase 0 - Operations - Attester Slashing [Pres OK @@ -3685,7 +4354,6 @@ OK: 41/41 Fail: 0/41 Skip: 0/41 + [Valid] EF - Phase 0 - Operations - EF - Phase 0 - Operations - Attester Slashing [Pres OK + [Valid] EF - Phase 0 - Operations - EF - Phase 0 - Operations - Attester Slashing [Pres OK ``` -OK: 30/30 Fail: 0/30 Skip: 0/30 ## EF - Phase 0 - Operations - Block Header [Preset: minimal] ```diff + [Invalid] EF - Phase 0 - Operations - EF - Phase 0 - Operations - Block Header [Preset: m OK @@ -3695,7 +4363,6 @@ OK: 30/30 Fail: 0/30 Skip: 0/30 + [Invalid] EF - Phase 0 - Operations - EF - Phase 0 - Operations - Block Header [Preset: m OK + [Valid] EF - Phase 0 - Operations - EF - Phase 0 - Operations - Block Header [Preset: m OK ``` -OK: 6/6 Fail: 0/6 Skip: 0/6 ## EF - Phase 0 - Operations - Deposit [Preset: minimal] ```diff + [Invalid] EF - Phase 0 - Operations - EF - Phase 0 - Operations - Deposit [Preset: minima OK @@ -3716,7 +4383,6 @@ OK: 6/6 Fail: 0/6 Skip: 0/6 + [Valid] EF - Phase 0 - Operations - EF - Phase 0 - Operations - Deposit [Preset: minima OK + [Valid] EF - Phase 0 - Operations - EF - Phase 0 - Operations - Deposit [Preset: minima OK ``` -OK: 17/17 Fail: 0/17 Skip: 0/17 ## EF - Phase 0 - Operations - Proposer Slashing [Preset: minimal] ```diff + [Invalid] EF - Phase 0 - Operations - EF - Phase 0 - Operations - Proposer Slashing [Pres OK @@ -3735,7 +4401,6 @@ OK: 17/17 Fail: 0/17 Skip: 0/17 + [Valid] EF - Phase 0 - Operations - EF - Phase 0 - Operations - Proposer Slashing [Pres OK + [Valid] EF - Phase 0 - Operations - EF - Phase 0 - Operations - Proposer Slashing [Pres OK ``` -OK: 15/15 Fail: 0/15 Skip: 0/15 ## EF - Phase 0 - Operations - Voluntary Exit [Preset: minimal] ```diff + [Invalid] EF - Phase 0 - Operations - EF - Phase 0 - Operations - Voluntary Exit [Preset: OK @@ -3749,7 +4414,6 @@ OK: 15/15 Fail: 0/15 Skip: 0/15 + [Valid] EF - Phase 0 - Operations - EF - Phase 0 - Operations - Voluntary Exit [Preset: OK + [Valid] EF - Phase 0 - Operations - EF - Phase 0 - Operations - Voluntary Exit [Preset: OK ``` -OK: 10/10 Fail: 0/10 Skip: 0/10 ## EF - Phase 0 - Rewards [Preset: minimal] ```diff + EF - Phase 0 - Rewards - all_balances_too_low_for_reward [Preset: minimal] OK @@ -3802,7 +4466,6 @@ OK: 10/10 Fail: 0/10 Skip: 0/10 + EF - Phase 0 - Rewards - with_slashed_validators [Preset: minimal] OK + EF - Phase 0 - Rewards - with_slashed_validators_leak [Preset: minimal] OK ``` -OK: 49/49 Fail: 0/49 Skip: 0/49 ## EF - Phase 0 - SSZ consensus objects [Preset: minimal] ```diff + Testing AggregateAndProof OK @@ -3833,9 +4496,9 @@ OK: 49/49 Fail: 0/49 Skip: 0/49 + Testing Validator OK + Testing VoluntaryExit OK ``` -OK: 27/27 Fail: 0/27 Skip: 0/27 ## EF - Phase 0 - Sanity - Slots [Preset: minimal] ```diff ++ EF - Phase 0 - Slots - balance_change_affects_proposer [Preset: minimal] OK + EF - Phase 0 - Slots - double_empty_epoch [Preset: minimal] OK + EF - Phase 0 - Slots - empty_epoch [Preset: minimal] OK + EF - Phase 0 - Slots - historical_accumulator [Preset: minimal] OK @@ -3843,7 +4506,6 @@ OK: 27/27 Fail: 0/27 Skip: 0/27 + EF - Phase 0 - Slots - slots_1 [Preset: minimal] OK + EF - Phase 0 - Slots - slots_2 [Preset: minimal] OK ``` -OK: 6/6 Fail: 0/6 Skip: 0/6 ## EF - Phase0 - Finality [Preset: minimal] ```diff + [Valid] EF - Phase0 - Finality - finality_no_updates_at_genesis [Preset: minimal] OK @@ -3852,7 +4514,6 @@ OK: 6/6 Fail: 0/6 Skip: 0/6 + [Valid] EF - Phase0 - Finality - finality_rule_3 [Preset: minimal] OK + [Valid] EF - Phase0 - Finality - finality_rule_4 [Preset: minimal] OK ``` -OK: 5/5 Fail: 0/5 Skip: 0/5 ## EF - Phase0 - Random [Preset: minimal] ```diff + [Valid] EF - Phase0 - Random - randomized_0 [Preset: minimal] OK @@ -3872,7 +4533,6 @@ OK: 5/5 Fail: 0/5 Skip: 0/5 + [Valid] EF - Phase0 - Random - randomized_8 [Preset: minimal] OK + [Valid] EF - Phase0 - Random - randomized_9 [Preset: minimal] OK ``` -OK: 16/16 Fail: 0/16 Skip: 0/16 ## EF - Phase0 - Sanity - Blocks [Preset: minimal] ```diff + [Invalid] EF - Phase0 - Sanity - Blocks - invalid_all_zeroed_sig [Preset: minimal] OK @@ -3921,7 +4581,6 @@ OK: 16/16 Fail: 0/16 Skip: 0/16 + [Valid] EF - Phase0 - Sanity - Blocks - slash_and_exit_diff_index [Preset: minimal] OK + [Valid] EF - Phase0 - Sanity - Blocks - voluntary_exit [Preset: minimal] OK ``` -OK: 45/45 Fail: 0/45 Skip: 0/45 ## ForkChoice ```diff + ForkChoice - minimal/altair/fork_choice/ex_ante/pyspec_tests/ex_ante_sandwich_with_honest_ OK @@ -4131,15 +4790,112 @@ OK: 45/45 Fail: 0/45 Skip: 0/45 ForkChoice - minimal/deneb/fork_choice/should_override_forkchoice_update/pyspec_tests/shou Skip + ForkChoice - minimal/deneb/fork_choice/withholding/pyspec_tests/withholding_attack OK + ForkChoice - minimal/deneb/fork_choice/withholding/pyspec_tests/withholding_attack_unviabl OK ++ ForkChoice - minimal/electra/fork_choice/deposit_with_reorg/pyspec_tests/new_validator_dep OK ++ ForkChoice - minimal/electra/fork_choice/ex_ante/pyspec_tests/ex_ante_sandwich_with_honest OK ++ ForkChoice - minimal/electra/fork_choice/ex_ante/pyspec_tests/ex_ante_sandwich_without_att OK ++ ForkChoice - minimal/electra/fork_choice/ex_ante/pyspec_tests/ex_ante_vanilla OK ++ ForkChoice - minimal/electra/fork_choice/get_head/pyspec_tests/chain_no_attestations OK ++ ForkChoice - minimal/electra/fork_choice/get_head/pyspec_tests/discard_equivocations_on_at OK ++ ForkChoice - minimal/electra/fork_choice/get_head/pyspec_tests/discard_equivocations_slash OK ++ ForkChoice - minimal/electra/fork_choice/get_head/pyspec_tests/filtered_block_tree OK ++ ForkChoice - minimal/electra/fork_choice/get_head/pyspec_tests/genesis OK ++ ForkChoice - minimal/electra/fork_choice/get_head/pyspec_tests/proposer_boost_correct_head OK ++ ForkChoice - minimal/electra/fork_choice/get_head/pyspec_tests/shorter_chain_but_heavier_w OK ++ ForkChoice - minimal/electra/fork_choice/get_head/pyspec_tests/split_tie_breaker_no_attest OK ++ ForkChoice - minimal/electra/fork_choice/get_head/pyspec_tests/voting_source_beyond_two_ep OK ++ ForkChoice - minimal/electra/fork_choice/get_head/pyspec_tests/voting_source_within_two_ep OK + ForkChoice - minimal/electra/fork_choice/get_proposer_head/pyspec_tests/basic_is_head_root Skip + ForkChoice - minimal/electra/fork_choice/get_proposer_head/pyspec_tests/basic_is_parent_ro Skip ++ ForkChoice - minimal/electra/fork_choice/on_block/pyspec_tests/basic OK ++ ForkChoice - minimal/electra/fork_choice/on_block/pyspec_tests/incompatible_justification_ OK ++ ForkChoice - minimal/electra/fork_choice/on_block/pyspec_tests/incompatible_justification_ OK ++ ForkChoice - minimal/electra/fork_choice/on_block/pyspec_tests/invalid_data_unavailable OK ++ ForkChoice - minimal/electra/fork_choice/on_block/pyspec_tests/invalid_incorrect_proof OK ++ ForkChoice - minimal/electra/fork_choice/on_block/pyspec_tests/invalid_wrong_blobs_length OK ++ ForkChoice - minimal/electra/fork_choice/on_block/pyspec_tests/invalid_wrong_proofs_length OK ++ ForkChoice - minimal/electra/fork_choice/on_block/pyspec_tests/justification_update_beginn OK ++ ForkChoice - minimal/electra/fork_choice/on_block/pyspec_tests/justification_update_end_of OK ++ ForkChoice - minimal/electra/fork_choice/on_block/pyspec_tests/justification_withholding OK ++ ForkChoice - minimal/electra/fork_choice/on_block/pyspec_tests/justification_withholding_r OK ++ ForkChoice - minimal/electra/fork_choice/on_block/pyspec_tests/justified_update_always_if_ OK ++ ForkChoice - minimal/electra/fork_choice/on_block/pyspec_tests/justified_update_monotonic OK ++ ForkChoice - minimal/electra/fork_choice/on_block/pyspec_tests/justified_update_not_realiz OK ++ ForkChoice - minimal/electra/fork_choice/on_block/pyspec_tests/new_finalized_slot_is_justi OK ++ ForkChoice - minimal/electra/fork_choice/on_block/pyspec_tests/not_pull_up_current_epoch_b OK ++ ForkChoice - minimal/electra/fork_choice/on_block/pyspec_tests/on_block_bad_parent_root OK ++ ForkChoice - minimal/electra/fork_choice/on_block/pyspec_tests/on_block_before_finalized OK ++ ForkChoice - minimal/electra/fork_choice/on_block/pyspec_tests/on_block_checkpoints OK ++ ForkChoice - minimal/electra/fork_choice/on_block/pyspec_tests/on_block_finalized_skip_slo OK ++ ForkChoice - minimal/electra/fork_choice/on_block/pyspec_tests/on_block_finalized_skip_slo OK + ForkChoice - minimal/electra/fork_choice/on_block/pyspec_tests/on_block_future_block Skip ++ ForkChoice - minimal/electra/fork_choice/on_block/pyspec_tests/proposer_boost OK ++ ForkChoice - minimal/electra/fork_choice/on_block/pyspec_tests/proposer_boost_is_first_blo OK ++ ForkChoice - minimal/electra/fork_choice/on_block/pyspec_tests/proposer_boost_root_same_sl OK ++ ForkChoice - minimal/electra/fork_choice/on_block/pyspec_tests/pull_up_on_tick OK ++ ForkChoice - minimal/electra/fork_choice/on_block/pyspec_tests/pull_up_past_epoch_block OK ++ ForkChoice - minimal/electra/fork_choice/on_block/pyspec_tests/simple_blob_data OK ++ ForkChoice - minimal/electra/fork_choice/reorg/pyspec_tests/delayed_justification_current_ OK ++ ForkChoice - minimal/electra/fork_choice/reorg/pyspec_tests/delayed_justification_previous OK ++ ForkChoice - minimal/electra/fork_choice/reorg/pyspec_tests/include_votes_another_empty_ch OK ++ ForkChoice - minimal/electra/fork_choice/reorg/pyspec_tests/include_votes_another_empty_ch OK ++ ForkChoice - minimal/electra/fork_choice/reorg/pyspec_tests/include_votes_another_empty_ch OK ++ ForkChoice - minimal/electra/fork_choice/reorg/pyspec_tests/simple_attempted_reorg_delayed OK ++ ForkChoice - minimal/electra/fork_choice/reorg/pyspec_tests/simple_attempted_reorg_delayed OK ++ ForkChoice - minimal/electra/fork_choice/reorg/pyspec_tests/simple_attempted_reorg_without OK + ForkChoice - minimal/electra/fork_choice/should_override_forkchoice_update/pyspec_tests/sh Skip + ForkChoice - minimal/electra/fork_choice/should_override_forkchoice_update/pyspec_tests/sh Skip ++ ForkChoice - minimal/electra/fork_choice/withholding/pyspec_tests/withholding_attack OK ++ ForkChoice - minimal/electra/fork_choice/withholding/pyspec_tests/withholding_attack_unvia OK ++ ForkChoice - minimal/fulu/fork_choice/ex_ante/pyspec_tests/ex_ante_sandwich_with_honest_at OK ++ ForkChoice - minimal/fulu/fork_choice/ex_ante/pyspec_tests/ex_ante_sandwich_without_attest OK ++ ForkChoice - minimal/fulu/fork_choice/ex_ante/pyspec_tests/ex_ante_vanilla OK ++ ForkChoice - minimal/fulu/fork_choice/get_head/pyspec_tests/chain_no_attestations OK ++ ForkChoice - minimal/fulu/fork_choice/get_head/pyspec_tests/discard_equivocations_on_attes OK ++ ForkChoice - minimal/fulu/fork_choice/get_head/pyspec_tests/discard_equivocations_slashed_ OK ++ ForkChoice - minimal/fulu/fork_choice/get_head/pyspec_tests/filtered_block_tree OK ++ ForkChoice - minimal/fulu/fork_choice/get_head/pyspec_tests/genesis OK ++ ForkChoice - minimal/fulu/fork_choice/get_head/pyspec_tests/shorter_chain_but_heavier_weig OK ++ ForkChoice - minimal/fulu/fork_choice/get_head/pyspec_tests/split_tie_breaker_no_attestati OK ++ ForkChoice - minimal/fulu/fork_choice/get_head/pyspec_tests/voting_source_beyond_two_epoch OK ++ ForkChoice - minimal/fulu/fork_choice/get_head/pyspec_tests/voting_source_within_two_epoch OK ++ ForkChoice - minimal/fulu/fork_choice/on_block/pyspec_tests/basic OK ++ ForkChoice - minimal/fulu/fork_choice/on_block/pyspec_tests/incompatible_justification_upd OK ++ ForkChoice - minimal/fulu/fork_choice/on_block/pyspec_tests/incompatible_justification_upd OK ++ ForkChoice - minimal/fulu/fork_choice/on_block/pyspec_tests/justification_update_beginning OK ++ ForkChoice - minimal/fulu/fork_choice/on_block/pyspec_tests/justification_update_end_of_ep OK ++ ForkChoice - minimal/fulu/fork_choice/on_block/pyspec_tests/justification_withholding OK ++ ForkChoice - minimal/fulu/fork_choice/on_block/pyspec_tests/justification_withholding_reve OK ++ ForkChoice - minimal/fulu/fork_choice/on_block/pyspec_tests/justified_update_always_if_bet OK ++ ForkChoice - minimal/fulu/fork_choice/on_block/pyspec_tests/justified_update_monotonic OK ++ ForkChoice - minimal/fulu/fork_choice/on_block/pyspec_tests/justified_update_not_realized_ OK ++ ForkChoice - minimal/fulu/fork_choice/on_block/pyspec_tests/new_finalized_slot_is_justifie OK ++ ForkChoice - minimal/fulu/fork_choice/on_block/pyspec_tests/not_pull_up_current_epoch_bloc OK ++ ForkChoice - minimal/fulu/fork_choice/on_block/pyspec_tests/on_block_bad_parent_root OK ++ ForkChoice - minimal/fulu/fork_choice/on_block/pyspec_tests/on_block_before_finalized OK ++ ForkChoice - minimal/fulu/fork_choice/on_block/pyspec_tests/on_block_checkpoints OK ++ ForkChoice - minimal/fulu/fork_choice/on_block/pyspec_tests/on_block_finalized_skip_slots OK ++ ForkChoice - minimal/fulu/fork_choice/on_block/pyspec_tests/on_block_finalized_skip_slots_ OK + ForkChoice - minimal/fulu/fork_choice/on_block/pyspec_tests/on_block_future_block Skip ++ ForkChoice - minimal/fulu/fork_choice/on_block/pyspec_tests/proposer_boost OK ++ ForkChoice - minimal/fulu/fork_choice/on_block/pyspec_tests/proposer_boost_is_first_block OK ++ ForkChoice - minimal/fulu/fork_choice/on_block/pyspec_tests/proposer_boost_root_same_slot_ OK ++ ForkChoice - minimal/fulu/fork_choice/on_block/pyspec_tests/pull_up_on_tick OK ++ ForkChoice - minimal/fulu/fork_choice/on_block/pyspec_tests/pull_up_past_epoch_block OK ++ ForkChoice - minimal/fulu/fork_choice/reorg/pyspec_tests/delayed_justification_current_epo OK ++ ForkChoice - minimal/fulu/fork_choice/reorg/pyspec_tests/delayed_justification_previous_ep OK ++ ForkChoice - minimal/fulu/fork_choice/reorg/pyspec_tests/include_votes_another_empty_chain OK ++ ForkChoice - minimal/fulu/fork_choice/reorg/pyspec_tests/include_votes_another_empty_chain OK ++ ForkChoice - minimal/fulu/fork_choice/reorg/pyspec_tests/include_votes_another_empty_chain OK ++ ForkChoice - minimal/fulu/fork_choice/reorg/pyspec_tests/simple_attempted_reorg_delayed_ju OK ++ ForkChoice - minimal/fulu/fork_choice/reorg/pyspec_tests/simple_attempted_reorg_delayed_ju OK ++ ForkChoice - minimal/fulu/fork_choice/reorg/pyspec_tests/simple_attempted_reorg_without_en OK ++ ForkChoice - minimal/fulu/fork_choice/withholding/pyspec_tests/withholding_attack OK ++ ForkChoice - minimal/fulu/fork_choice/withholding/pyspec_tests/withholding_attack_unviable OK ``` -OK: 185/207 Fail: 0/207 Skip: 22/207 ## Sync ```diff + Sync - minimal/bellatrix/sync/optimistic/pyspec_tests/from_syncing_to_invalid OK + Sync - minimal/capella/sync/optimistic/pyspec_tests/from_syncing_to_invalid OK + Sync - minimal/deneb/sync/optimistic/pyspec_tests/from_syncing_to_invalid OK ++ Sync - minimal/electra/sync/optimistic/pyspec_tests/from_syncing_to_invalid OK ``` -OK: 3/3 Fail: 0/3 Skip: 0/3 - ----TOTAL--- -OK: 3385/3408 Fail: 0/3408 Skip: 23/3408 diff --git a/Makefile b/Makefile index 855a637ce5..a3821af672 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -# Copyright (c) 2019-2024 Status Research & Development GmbH. Licensed under +# Copyright (c) 2019-2025 Status Research & Development GmbH. Licensed under # either of: # - Apache License, version 2.0 # - MIT license @@ -72,7 +72,7 @@ TOOLS_CORE := \ ncli_testnet \ $(TOOLS_CORE_CUSTOMCOMPILE) -# This TOOLS/TOOLS_CORE decomposition is a workaroud so nimbus_beacon_node can +# The TOOLS/TOOLS_CORE decomposition is a workaround so nimbus_beacon_node can # build on its own, and if/when that becomes a non-issue, it can be recombined # to a single TOOLS list. TOOLS := $(TOOLS_CORE) nimbus_beacon_node @@ -113,9 +113,9 @@ ifneq ($(OS), Windows_NT) PLATFORM_SPECIFIC_TARGETS += gnosis-build endif -# We don't need these `vendor/holesky` files but fetching them -# may trigger 'This repository is over its data quota' from GitHub -GIT_SUBMODULE_CONFIG := -c lfs.fetchexclude=/public-keys/all.txt,/custom_config_data/genesis.ssz +# We don't need these `vendor/holesky` and `vendor/hoodi` files but +# fetching them may trigger 'This repository is over its data quota' from GitHub +GIT_SUBMODULE_CONFIG := -c lfs.fetchexclude=/public-keys/all.txt,/metadata/genesis.ssz,/parsed/parsedConsensusGenesis.json ifeq ($(NIM_PARAMS),) # "variables.mk" was not included, so we update the submodules. @@ -124,7 +124,7 @@ ifeq ($(NIM_PARAMS),) # with Ctrl+C after deleting the working copy and before getting a chance to # restore it in $(BUILD_SYSTEM_DIR). -# `vendor/holesky` requires Git LFS +# `vendor/holesky` and `vendor/hoodi` require Git LFS ifeq (, $(shell which git-lfs)) ifeq ($(shell uname), Darwin) $(error Git LFS not installed. Run 'brew install git-lfs' to set up) @@ -210,10 +210,10 @@ libbacktrace: # - --base-el-rpc-port + --el-port-offset * [0, --nodes + --light-clients) # - --base-el-ws-port + --el-port-offset * [0, --nodes + --light-clients) # - --base-el-auth-rpc-port + --el-port-offset * [0, --nodes + --light-clients) -UNIT_TEST_BASE_PORT := 9960 -REST_TEST_BASE_PORT := 9990 -MINIMAL_TESTNET_BASE_PORT := 5001 -MAINNET_TESTNET_BASE_PORT := 6501 +UNIT_TEST_BASE_PORT := 29960 +REST_TEST_BASE_PORT := 30990 +MINIMAL_TESTNET_BASE_PORT := 25001 +MAINNET_TESTNET_BASE_PORT := 26501 restapi-test: ./tests/simulation/restapi.sh \ @@ -235,7 +235,7 @@ local-testnet-minimal: --remote-validators-count 512 \ --signer-type $(SIGNER_TYPE) \ --deneb-fork-epoch 0 \ - --electra-fork-epoch 50 \ + --electra-fork-epoch 2 \ --stop-at-epoch 6 \ --disable-htop \ --enable-payload-builder \ @@ -264,7 +264,7 @@ local-testnet-mainnet: --data-dir $@ \ --nodes 2 \ --deneb-fork-epoch 0 \ - --electra-fork-epoch 50 \ + --electra-fork-epoch 2 \ --stop-at-epoch 6 \ --disable-htop \ --base-port $$(( $(MAINNET_TESTNET_BASE_PORT) + EXECUTOR_NUMBER * 400 + 0 )) \ diff --git a/README.md b/README.md index 8f78b4e85c..aa1717d1d7 100644 --- a/README.md +++ b/README.md @@ -32,17 +32,19 @@ Nimbus-eth2 is an extremely efficient consensus layer (eth2) client implementati You can find the information you need to run a beacon node and operate as a validator in [The Book](https://nimbus.guide/). -The [Quickstart](https://nimbus.guide/quick-start.html) in particular will help you quickly connect to either mainnet or the Prater testnet. +The [Quickstart](https://nimbus.guide/quick-start.html) in particular will help you quickly connect to either mainnet or the Hoodi testnet. ## Quickly test your tooling against Nimbus The [Nimbus REST api](https://nimbus.guide/rest-api.html) is now available from: -* http://unstable.mainnet.beacon-api.nimbus.team/ * http://testing.mainnet.beacon-api.nimbus.team/ -* http://unstable.sepolia.beacon-api.nimbus.team/ +* http://unstable.mainnet.beacon-api.nimbus.team/ +* http://testing.hoodi.beacon-api.nimbus.team/ +* http://unstable.hoodi.beacon-api.nimbus.team/ * http://testing.holesky.beacon-api.nimbus.team/ * http://unstable.holesky.beacon-api.nimbus.team/ +* http://unstable.sepolia.beacon-api.nimbus.team/ Note that right now these are very much unstable testing instances. They may be unresponsive at times - so **please do not rely on them for validating**. We may also disable them at any time. diff --git a/beacon_chain.nimble b/beacon_chain.nimble index 1854aba8e6..2a3bba39d3 100644 --- a/beacon_chain.nimble +++ b/beacon_chain.nimble @@ -7,13 +7,13 @@ mode = ScriptMode.Verbose -version = "1.4.2" +version = "24.12.0" author = "Status Research & Development GmbH" description = "The Nimbus beacon chain node is a highly efficient Ethereum 2.0 client" license = "MIT or Apache License 2.0" requires( - "nim >= 1.6.12", + "nim == 2.0.12", "https://github.com/status-im/NimYAML", "bearssl", "blscurve", @@ -45,13 +45,15 @@ requires( "unittest2", "web3", "zlib", + "toml_serialization", + "https://github.com/status-im/nim-kzg4844.git", "zxcvbn" ) -requires "https://gitlab.com/status-im/nimbus-security-resources.git" +requires "https://github.com/status-im/nimbus-security-resources.git" import std/tables -let namedBin = { +namedBin = { "beacon_chain/nimbus_beacon_node": "nimbus_beacon_node", "beacon_chain/nimbus_validator_client": "nimbus_validator_client", "ncli/ncli": "ncli", diff --git a/beacon_chain/beacon_chain_db.nim b/beacon_chain/beacon_chain_db.nim index 747777bf69..3a80bea4cf 100644 --- a/beacon_chain/beacon_chain_db.nim +++ b/beacon_chain/beacon_chain_db.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -20,7 +20,6 @@ import forks, presets, state_transition], - ./spec/datatypes/[phase0, altair, bellatrix], "."/[beacon_chain_db_light_client, filepath] from ./spec/datatypes/capella import BeaconState @@ -116,6 +115,8 @@ type blobs: KvStoreRef # (BlockRoot -> BlobSidecar) + columns: KvStoreRef # (BlockRoot -> DataColumnSidecar) + stateRoots: KvStoreRef # (Slot, BlockRoot) -> StateRoot statesNoVal: array[ConsensusFork, KvStoreRef] # StateRoot -> ForkBeaconStateNoImmutableValidators @@ -254,6 +255,13 @@ func blobkey(root: Eth2Digest, index: BlobIndex) : array[40, byte] = ret +func columnkey(root: Eth2Digest, index: ColumnIndex) : array[40, byte] = + var ret: array[40, byte] + ret[0..<8] = toBytes(index) + ret[8..<40] = root.data + + ret + template expectDb(x: auto): untyped = # There's no meaningful error handling implemented for a corrupt database or # full disk - this requires manual intervention, so we'll panic for now @@ -581,6 +589,10 @@ proc new*(T: type BeaconChainDB, var blobs = kvStore db.openKvStore("deneb_blobs").expectDb() + var columns: KvStoreRef + if cfg.FULU_FORK_EPOCH != FAR_FUTURE_EPOCH: + columns = kvStore db.openKvStore("fulu_columns").expectDb() + # Versions prior to 1.4.0 (altair) stored validators in `immutable_validators` # which stores validator keys in compressed format - this is # slow to load and has been superceded by `immutable_validators2` which uses @@ -616,6 +628,7 @@ proc new*(T: type BeaconChainDB, keyValues: keyValues, blocks: blocks, blobs: blobs, + columns: columns, stateRoots: stateRoots, statesNoVal: statesNoVal, stateDiffs: stateDiffs, @@ -783,6 +796,8 @@ proc close*(db: BeaconChainDB) = if db.db == nil: return # Close things roughly in reverse order + if not isNil(db.columns): + discard db.columns.close() if not isNil(db.blobs): discard db.blobs.close() db.lcData.close() @@ -841,6 +856,17 @@ proc delBlobSidecar*( root: Eth2Digest, index: BlobIndex): bool = db.blobs.del(blobkey(root, index)).expectDb() +proc putDataColumnSidecar*( + db: BeaconChainDB, + value: DataColumnSidecar) = + let block_root = hash_tree_root(value.signed_block_header.message) + db.columns.putSZSSZ(columnkey(block_root, value.index), value) + +proc delDataColumnSidecar*( + db: BeaconChainDB, + root: Eth2Digest, index: ColumnIndex): bool = + db.columns.del(columnkey(root, index)).expectDb() + proc updateImmutableValidators*( db: BeaconChainDB, validators: openArray[Validator]) = # Must be called before storing a state that references the new validators @@ -1105,6 +1131,17 @@ proc getBlobSidecar*(db: BeaconChainDB, root: Eth2Digest, index: BlobIndex, value: var BlobSidecar): bool = db.blobs.getSZSSZ(blobkey(root, index), value) == GetResult.found +proc getDataColumnSidecarSZ*(db: BeaconChainDB, root: Eth2Digest, + index: ColumnIndex, data: var seq[byte]): bool = + let dataPtr = addr data # Short-lived + func decode(data: openArray[byte]) = + assign(dataPtr[], data) + db.columns.get(columnkey(root, index), decode).expectDb() + +proc getDataColumnSidecar*(db: BeaconChainDB, root: Eth2Digest, index: ColumnIndex, + value: var DataColumnSidecar): bool = + db.columns.getSZSSZ(columnkey(root, index), value) == GetResult.found + proc getBlockSZ*( db: BeaconChainDB, key: Eth2Digest, data: var seq[byte], T: type phase0.TrustedSignedBeaconBlock): bool = diff --git a/beacon_chain/beacon_chain_db_immutable.nim b/beacon_chain/beacon_chain_db_immutable.nim index d51d9f8bc9..c95cd9629a 100644 --- a/beacon_chain/beacon_chain_db_immutable.nim +++ b/beacon_chain/beacon_chain_db_immutable.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2021-2024 Status Research & Development GmbH +# Copyright (c) 2021-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -132,7 +132,7 @@ type current_sync_committee*: SyncCommittee # [New in Altair] next_sync_committee*: SyncCommittee # [New in Altair] - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/bellatrix/beacon-chain.md#beaconstate + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/bellatrix/beacon-chain.md#beaconstate # Memory-representation-equivalent to a Bellatrix BeaconState for in-place SSZ # reading and writing BellatrixBeaconStateNoImmutableValidators* = object diff --git a/beacon_chain/beacon_chain_db_light_client.nim b/beacon_chain/beacon_chain_db_light_client.nim index 3ba23b0229..e14a7b353c 100644 --- a/beacon_chain/beacon_chain_db_light_client.nim +++ b/beacon_chain/beacon_chain_db_light_client.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2022-2024 Status Research & Development GmbH +# Copyright (c) 2022-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -28,7 +28,7 @@ logScope: topics = "lcdata" # - Altair: ~38 KB per `SyncCommitteePeriod` (~1.0 MB per month) # - Capella: ~221 KB per `SyncCommitteePeriod` (~6.0 MB per month) # - Deneb: ~225 KB per `SyncCommitteePeriod` (~6.2 MB per month) -# - Electra: ~249 KB per `SyncCommitteePeriod` (~6.8 MB per month) +# - Electra: ~249 KB per `SyncCommitteePeriod` (~6.2 MB per month) # # `lc_xxxxx_current_branches` holds Merkle proofs needed to # construct `LightClientBootstrap` objects. @@ -76,8 +76,8 @@ logScope: topics = "lcdata" # 600 = 32+20+32+32+256+32+8+8+8+8+4+32+32+32+32+32 # - Deneb: 256*(112+4+616+128+40)/1024*28/1024 # 616 = 32+20+32+32+256+32+8+8+8+8+4+32+32+32+32+32+8+8 -# - Electra: 256*(112+4+712+128+40)/1024*28/1024 -# 712 = 32+20+32+32+256+32+8+8+8+8+4+32+32+32+32+32+8+8+32+32+32 +# - Electra: 256*(112+4+616+128+40)/1024*28/1024 +# 616 = 32+20+32+32+256+32+8+8+8+8+4+32+32+32+32+32+8+8 # # Committee branch computations: # - Altair: 256*(5*32+8)/1024*28/1024 @@ -96,9 +96,9 @@ logScope: topics = "lcdata" # # Update computations: # - Altair: (112+24624+5*32+112+6*32+112+8+9)/1024*28/1024 -# - Capella: (4+884+24624+5*32+4+884+6*32+112+8+9)/1024*28/1024 -# - Deneb: (4+900+24624+5*32+4+900+6*32+112+8+9)/1024*28/1024 -# - Electra: (4+996+24624+6*32+4+996+7*32+112+8+9)/1024*28/1024 +# - Capella: (4+844+24624+5*32+4+844+6*32+112+8+9)/1024*28/1024 +# - Deneb: (4+860+24624+5*32+4+860+6*32+112+8+9)/1024*28/1024 +# - Electra: (4+860+24624+6*32+4+860+7*32+112+8+9)/1024*28/1024 type LightClientHeaderStore = object diff --git a/beacon_chain/beacon_chain_file.nim b/beacon_chain/beacon_chain_file.nim index f19d4cf84c..46b7a4bc31 100644 --- a/beacon_chain/beacon_chain_file.nim +++ b/beacon_chain/beacon_chain_file.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -71,7 +71,7 @@ const ChainFileVersion = 1'u32 ChainFileHeaderValue = 0x424D494E'u32 ChainFileBufferSize* = 4096 - MaxChunkSize = int(GOSSIP_MAX_SIZE) + MaxChunkSize = int(MAX_PAYLOAD_SIZE) ChainFileHeaderArray = ChainFileHeaderValue.toBytesLE() IncompleteWriteError = "Unable to write data to file, disk full?" MaxForksCount* = 16384 diff --git a/beacon_chain/beacon_clock.nim b/beacon_chain/beacon_clock.nim index 3a07a6fa4d..f8915e1376 100644 --- a/beacon_chain/beacon_clock.nim +++ b/beacon_chain/beacon_clock.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -27,7 +27,7 @@ type ## which blocks are valid - in particular, blocks are not valid if they ## come from the future as seen from the local clock. ## - ## https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/fork-choice.md#fork-choice + ## https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/fork-choice.md#fork-choice ## # TODO consider NTP and network-adjusted timestamps as outlined here: # https://ethresear.ch/t/network-adjusted-timestamps/4187 diff --git a/beacon_chain/beacon_node.nim b/beacon_chain/beacon_node.nim index 0017eef2d9..04d67e1505 100644 --- a/beacon_chain/beacon_node.nim +++ b/beacon_chain/beacon_node.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -14,6 +14,7 @@ import # Nimble packages chronos, presto, bearssl/rand, + metrics, metrics/chronos_httpserver, # Local modules "."/[beacon_clock, beacon_chain_db, conf, light_client], @@ -46,11 +47,14 @@ type EventBus* = object headQueue*: AsyncEventQueue[HeadChangeInfoObject] blocksQueue*: AsyncEventQueue[EventBeaconBlockObject] - attestQueue*: AsyncEventQueue[phase0.Attestation] + blockGossipQueue*: AsyncEventQueue[EventBeaconBlockGossipObject] + phase0AttestQueue*: AsyncEventQueue[phase0.Attestation] + singleAttestQueue*: AsyncEventQueue[SingleAttestation] exitQueue*: AsyncEventQueue[SignedVoluntaryExit] blsToExecQueue*: AsyncEventQueue[SignedBLSToExecutionChange] propSlashQueue*: AsyncEventQueue[ProposerSlashing] - attSlashQueue*: AsyncEventQueue[phase0.AttesterSlashing] + phase0AttSlashQueue*: AsyncEventQueue[phase0.AttesterSlashing] + electraAttSlashQueue*: AsyncEventQueue[electra.AttesterSlashing] blobSidecarQueue*: AsyncEventQueue[BlobSidecarInfoObject] finalQueue*: AsyncEventQueue[FinalizationInfoObject] reorgQueue*: AsyncEventQueue[ReorgInfoObject] @@ -85,6 +89,7 @@ type elManager*: ELManager restServer*: RestServerRef keymanagerHost*: ref KeymanagerHost + metricsServer*: Opt[MetricsHttpServerRef] keymanagerServer*: RestServerRef keystoreCache*: KeystoreCacheRef eventBus*: EventBus diff --git a/beacon_chain/conf.nim b/beacon_chain/conf.nim index 619f6de755..d12ba63bb8 100644 --- a/beacon_chain/conf.nim +++ b/beacon_chain/conf.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -52,7 +52,7 @@ const defaultSigningNodeRequestTimeout* = 60 defaultBeaconNode* = "http://127.0.0.1:" & $defaultEth2RestPort defaultBeaconNodeUri* = parseUri(defaultBeaconNode) - defaultGasLimit* = 30_000_000 + defaultGasLimit* = 36_000_000 defaultAdminListenAddressDesc* = $defaultAdminListenAddress defaultBeaconNodeDesc = $defaultBeaconNode @@ -253,6 +253,12 @@ type desc: "Subscribe to all subnet topics when gossiping" name: "subscribe-all-subnets" .}: bool + peerdasSupernode* {. + hidden + defaultValue: false, + desc: "Subscribe to all column subnets, thereby becoming a peerdas supernode" + name: "debug-peerdas-supernode" .}: bool + slashingDbKind* {. hidden defaultValue: SlashingDbKind.v2 @@ -570,7 +576,7 @@ type name: "discv5" .}: bool dumpEnabled* {. - desc: "Write SSZ dumps of blocks, attestations and states to data dir" + desc: "Write SSZ dumps of blocks and states to data dir" defaultValue: false name: "dump" .}: bool @@ -1490,13 +1496,8 @@ proc engineApiUrls*(config: BeaconNodeConf): seq[EngineApiUrl] = config.jwtSecret.configJwtSecretOpt) proc loadKzgTrustedSetup*(): Result[void, string] = - const - vendorDir = currentSourcePath.parentDir.replace('\\', '/') & "/../vendor" - trustedSetup = staticRead( - vendorDir & "/nim-kzg4844/kzg4844/csources/src/trusted_setup.txt") - static: doAssert const_preset in ["mainnet", "gnosis", "minimal"] - loadTrustedSetupFromString(trustedSetup, 0) + loadTrustedSetupFromString(kzg.trustedSetup, 0) proc loadKzgTrustedSetup*(trustedSetupPath: string): Result[void, string] = try: diff --git a/beacon_chain/consensus_object_pools/README.md b/beacon_chain/consensus_object_pools/README.md index ecc16080ac..7f3129b9f9 100644 --- a/beacon_chain/consensus_object_pools/README.md +++ b/beacon_chain/consensus_object_pools/README.md @@ -9,13 +9,13 @@ to specs: - unaggregated attestation: https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/phase0/p2p-interface.md#beacon_attestation_subnet_id - voluntary exits: https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/p2p-interface.md#voluntary_exit - Attester slashings: https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/p2p-interface.md#attester_slashing -- Proposer slashings: https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/phase0/p2p-interface.md#proposer_slashing +- Proposer slashings: https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/p2p-interface.md#proposer_slashing After "gossip validation" the consensus objects can be rebroadcasted as they are optimistically good, however for internal processing further verification is needed. For blocks, this means verifying state transition and all contained cryptographic signatures (instead of just the proposer signature). For other consensus objects, it is possible that gossip validation is a superset of consensus verification (TODO). -The pools presenet in this folder are: +The pools present in this folder are: - block_pools: - block_quarantine: for seemingly valid blocks that are on a fork unknown to us. - block_clearance: to verify (state_transition + cryptography) candidate blocks. diff --git a/beacon_chain/consensus_object_pools/attestation_pool.nim b/beacon_chain/consensus_object_pools/attestation_pool.nim index ff70a1f437..af9c5e3887 100644 --- a/beacon_chain/consensus_object_pools/attestation_pool.nim +++ b/beacon_chain/consensus_object_pools/attestation_pool.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -8,7 +8,6 @@ {.push raises: [].} import - std/algorithm, # Status libraries metrics, chronicles, stew/byteutils, @@ -19,6 +18,7 @@ import ../fork_choice/fork_choice, ../beacon_clock +from std/algorithm import sort from std/sequtils import keepItIf, maxIndex export blockchain_dag, fork_choice @@ -33,11 +33,11 @@ const type OnPhase0AttestationCallback = proc(data: phase0.Attestation) {.gcsafe, raises: [].} - OnElectraAttestationCallback = - proc(data: electra.Attestation) {.gcsafe, raises: [].} + OnSingleAttestationCallback = + proc(data: SingleAttestation) {.gcsafe, raises: [].} Validation[CVBType] = object - ## Validations collect a set of signatures for a distict attestation - in + ## Validations collect a set of signatures for a distinct attestation - in ## eth2, a single bit is used to keep track of which signatures have been ## added to the aggregate meaning that only non-overlapping aggregates may ## be further combined. @@ -66,6 +66,10 @@ type ## voted on different states - this map keeps track of each vote keyed by ## getAttestationCandidateKey() + CandidateIdxType {.pure.} = enum + phase0Idx + electraIdx + AttestationPool* = object ## The attestation pool keeps track of all attestations that potentially ## could be added to a block during block production. @@ -96,7 +100,13 @@ type ## sequence based on validator indices onPhase0AttestationAdded: OnPhase0AttestationCallback - onElectraAttestationAdded: OnElectraAttestationCallback + onSingleAttestationAdded: OnSingleAttestationCallback + + CandidateKey = tuple + ## Search key for selecting the final candidates for + ## electra chain aggregates. + hash: Eth2Digest + slot: Slot logScope: topics = "attpool" @@ -106,7 +116,7 @@ declareGauge attestation_pool_block_attestation_packing_time, proc init*(T: type AttestationPool, dag: ChainDAGRef, quarantine: ref Quarantine, onPhase0Attestation: OnPhase0AttestationCallback = nil, - onElectraAttestation: OnElectraAttestationCallback = nil): T = + onSingleAttestation: OnSingleAttestationCallback = nil): T = ## Initialize an AttestationPool from the dag `headState` ## The `finalized_root` works around the finalized_checkpoint of the genesis block ## holding a zero_root. @@ -182,7 +192,7 @@ proc init*(T: type AttestationPool, dag: ChainDAGRef, quarantine: quarantine, forkChoice: forkChoice, onPhase0AttestationAdded: onPhase0Attestation, - onElectraAttestationAdded: onElectraAttestation + onSingleAttestationAdded: onSingleAttestation ) proc addForkChoiceVotes( @@ -198,11 +208,12 @@ proc addForkChoiceVotes( # hopefully the fork choice will heal itself over time. error "Couldn't add attestation to fork choice, bug?", err = v.error() -func candidateIdx(pool: AttestationPool, slot: Slot, - isElectra: bool = false): Opt[int] = +func candidateIdx( + pool: AttestationPool, slot: Slot, candidateIdxType: CandidateIdxType): + Opt[int] = static: doAssert pool.phase0Candidates.len == pool.electraCandidates.len - let poolLength = if isElectra: + let poolLength = if candidateIdxType == CandidateIdxType.electraIdx: pool.electraCandidates.lenu64 else: pool.phase0Candidates.lenu64 if slot >= pool.startingSlot and @@ -351,13 +362,12 @@ func covers( proc addAttestation( entry: var AttestationEntry, - attestation: phase0.Attestation | electra.Attestation, + attestation: phase0.Attestation | electra.Attestation, _: int, signature: CookedSig): bool = logScope: attestation = shortLog(attestation) - let - singleIndex = oneIndex(attestation.aggregation_bits) + let singleIndex = oneIndex(attestation.aggregation_bits) if singleIndex.isSome(): if singleIndex.get() in entry.singles: @@ -392,6 +402,33 @@ proc addAttestation( true +proc addAttestation( + entry: var AttestationEntry, attestation: SingleAttestation, + index_in_committee: int, + signature: CookedSig): bool = + logScope: + attestation = shortLog(attestation) + + if index_in_committee in entry.singles: + trace "SingleAttestation already seen", + singles = entry.singles.len(), + aggregates = entry.aggregates.len() + + return false + + debug "SingleAttestation resolved", + singles = entry.singles.len(), + aggregates = entry.aggregates.len() + + entry.singles[index_in_committee] = signature + + true + +func getAttestationCandidateKey( + attestationDataRoot: Eth2Digest, committee_index: CommitteeIndex): + Eth2Digest = + hash_tree_root([attestationDataRoot, hash_tree_root(committee_index.uint64)]) + func getAttestationCandidateKey( data: AttestationData, committee_index: Opt[CommitteeIndex]): Eth2Digest = @@ -403,18 +440,13 @@ func getAttestationCandidateKey( # i.e. no committees selected, so it can't be an actual Electra attestation hash_tree_root(data) else: - hash_tree_root([hash_tree_root(data), hash_tree_root(committee_index.get.uint64)]) - -func getAttestationCandidateKey( - attestationDataRoot: Eth2Digest, committee_index: CommitteeIndex): - Eth2Digest = - hash_tree_root([attestationDataRoot, hash_tree_root(committee_index.uint64)]) + getAttestationCandidateKey(hash_tree_root(data), committee_index.get) proc addAttestation*( pool: var AttestationPool, - attestation: phase0.Attestation | electra.Attestation, - attesting_indices: openArray[ValidatorIndex], - signature: CookedSig, wallTime: BeaconTime) = + attestation: phase0.Attestation | electra.Attestation | SingleAttestation, + attesting_indices: openArray[ValidatorIndex], beacon_committee_len: int, + index_in_committee: int, signature: CookedSig, wallTime: BeaconTime) = ## Add an attestation to the pool, assuming it's been validated already. ## ## Assuming the votes in the attestation have not already been seen, the @@ -428,7 +460,14 @@ proc addAttestation*( updateCurrent(pool, wallTime.slotOrZero) - let candidateIdx = pool.candidateIdx(attestation.data.slot) + when kind(typeof(attestation)) == ConsensusFork.Electra: + let candidateIdx = pool.candidateIdx( + attestation.data.slot, CandidateIdxType.electraIdx) + elif kind(typeof(attestation)) == ConsensusFork.Phase0: + let candidateIdx = pool.candidateIdx( + attestation.data.slot, CandidateIdxType.phase0Idx) + else: + static: doAssert false if candidateIdx.isNone: debug "Skipping old attestation for block production", startingSlot = pool.startingSlot @@ -442,15 +481,21 @@ proc addAttestation*( # creating an unnecessary AttestationEntry on the hot path and avoiding # multiple lookups template addAttToPool(attCandidates: untyped, entry: untyped, committee_index: untyped) = - let attestation_data_root = getAttestationCandidateKey(entry.data, committee_index) - - attCandidates[candidateIdx.get()].withValue(attestation_data_root, entry) do: - if not addAttestation(entry[], attestation, signature): + # `AttestationData.index == 0` in Electra, but the attestation pool always + # represents an AttestationEntry regardless as having the actual committee + # index. The entry, therefore, is not the same as the AttestationData, and + # thus cannot function as the basis for deriving the hashtable key for the + # entry. Instead use the (correctly data.index == 0) attestation passed to + # addAttestation. + let candidate_key = getAttestationCandidateKey(attestation.data, committee_index) + + attCandidates[candidateIdx.get()].withValue(candidate_key, entry) do: + if not addAttestation(entry[], attestation, index_in_committee, signature): return do: if not addAttestation( - attCandidates[candidateIdx.get()].mgetOrPut(attestation_data_root, entry), - attestation, signature): + attCandidates[candidateIdx.get()].mgetOrPut(candidate_key, entry), + attestation, index_in_committee, signature): # Returns from overall function, not only template return @@ -468,8 +513,9 @@ proc addAttestation*( template addAttToPool(_: electra.Attestation) {.used.} = let - committee_index = get_committee_index_one(attestation.committee_bits).expect("TODO") - data = AttestationData( + committee_index = get_committee_index_one( + attestation.committee_bits).expect("Gossip validation requires this") + data = AttestationData( slot: attestation.data.slot, index: uint64 committee_index, beacon_block_root: attestation.data.beacon_block_root, @@ -483,9 +529,31 @@ proc addAttestation*( attestation.data.slot, attesting_indices, attestation.data.beacon_block_root, wallTime) + # There does not seem to be an SSE stream event corresponding to this, + # because both attestation and single_attestation specifically specify + # the `beacon_attestation_{subnet_id}` topic and that in not possible, + # for this type, in Electra because this case is always an aggregate. + + template addAttToPool(_: SingleAttestation) {.used.} = + let + data = AttestationData( + slot: attestation.data.slot, + index: uint64 attestation.committee_index, + beacon_block_root: attestation.data.beacon_block_root, + source: attestation.data.source, + target: attestation.data.target) + newAttEntry = ElectraAttestationEntry( + data: data, committee_len: beacon_committee_len) + addAttToPool( + pool.electraCandidates, newAttEntry, + Opt.some attestation.committee_index.CommitteeIndex) + pool.addForkChoiceVotes( + attestation.data.slot, attesting_indices, + attestation.data.beacon_block_root, wallTime) + # Send notification about new attestation via callback. - if not(isNil(pool.onElectraAttestationAdded)): - pool.onElectraAttestationAdded(attestation) + if not(isNil(pool.onSingleAttestationAdded)): + pool.onSingleAttestationAdded(attestation) addAttToPool(attestation) @@ -496,11 +564,10 @@ func covers*( ## the existing aggregates, making it redundant ## the `var` attestation pool is needed to use `withValue`, else Table becomes ## unusably inefficient - let candidateIdx = pool.candidateIdx(data.slot) - if candidateIdx.isNone: + let candidateIdx = pool.candidateIdx(data.slot, CandidateIdxType.phase0Idx).valueOr: return false - pool.phase0Candidates[candidateIdx.get()].withValue( + pool.phase0Candidates[candidateIdx].withValue( getAttestationCandidateKey(data, Opt.none CommitteeIndex), entry): if entry[].covers(bits): return true @@ -509,21 +576,21 @@ func covers*( func covers*( pool: var AttestationPool, data: AttestationData, - bits: ElectraCommitteeValidatorsBits): bool = + aggregation_bits: ElectraCommitteeValidatorsBits, + committee_bits: AttestationCommitteeBits): bool = ## Return true iff the given attestation already is fully covered by one of ## the existing aggregates, making it redundant ## the `var` attestation pool is needed to use `withValue`, else Table becomes ## unusably inefficient - let candidateIdx = pool.candidateIdx(data.slot) - if candidateIdx.isNone: + let candidateIdx = pool.candidateIdx(data.slot, CandidateIdxType.electraIdx).valueOr: return false - debugComment "foo" - # needs to know more than attestationdata now - #let attestation_data_root = hash_tree_root(data) - #pool.electraCandidates[candidateIdx.get()].withValue(attestation_data_root, entry): - # if entry[].covers(bits): - # return true + pool.electraCandidates[candidateIdx].withValue( + getAttestationCandidateKey( + data, Opt.some get_committee_index_one( + committee_bits).expect("Gossip validation requires this")), entry): + if entry[].covers(aggregation_bits): + return true false @@ -549,7 +616,8 @@ iterator attestations*( committee_index: Opt[CommitteeIndex]): phase0.Attestation = let candidateIndices = if slot.isSome(): - let candidateIdx = pool.candidateIdx(slot.get()) + let candidateIdx = pool.candidateIdx( + slot.get(), CandidateIdxType.phase0Idx) if candidateIdx.isSome(): candidateIdx.get() .. candidateIdx.get() else: @@ -578,7 +646,8 @@ iterator electraAttestations*( committee_index: Opt[CommitteeIndex]): electra.Attestation = let candidateIndices = if slot.isSome(): - let candidateIdx = pool.candidateIdx(slot.get(), true) + let candidateIdx = pool.candidateIdx( + slot.get(), CandidateIdxType.electraIdx) if candidateIdx.isSome(): candidateIdx.get() .. candidateIdx.get() else: @@ -696,7 +765,7 @@ func score( doAssert aggregation_bits.len() == xxx[].len(), "check_attestation ensures committee length" - # How many votes were in the attestation minues the votes that are the same + # How many votes were in the attestation minus the votes that are the same return bitsScore - aggregation_bits.countOverlap(xxx[]) # Not found in cache - fresh vote meaning all attestations count @@ -751,7 +820,7 @@ proc getAttestationsForBlock*(pool: var AttestationPool, let slot = Slot(maxAttestationSlot - i) - candidateIdx = pool.candidateIdx(slot) + candidateIdx = pool.candidateIdx(slot, CandidateIdxType.phase0Idx) if candidateIdx.isNone(): # Passed the collection horizon - shouldn't happen because it's based on @@ -887,7 +956,7 @@ proc getElectraAttestationsForBlock*( let slot = Slot(maxAttestationSlot - i) - candidateIdx = pool.candidateIdx(slot) + candidateIdx = pool.candidateIdx(slot, CandidateIdxType.electraIdx) if candidateIdx.isNone(): # Passed the collection horizon - shouldn't happen because it's based on @@ -942,7 +1011,7 @@ proc getElectraAttestationsForBlock*( # For each round, we'll look for the best attestation and add it to the result # then re-score the other candidates. var - candidatesPerBlock: Table[(Eth2Digest, Slot), seq[electra.Attestation]] + candidatesPerBlock: OrderedTable[CandidateKey, seq[electra.Attestation]] let totalCandidates = candidates.len() while candidates.len > 0 and candidatesPerBlock.lenu64() < @@ -958,7 +1027,7 @@ proc getElectraAttestationsForBlock*( #TODO: Merge candidates per block structure with the candidates one # and score possible on-chain attestations while collecting candidates - # (previous loop) and reavaluate cache key definition + # (previous loop) and reevaluate cache key definition let entry2 = block: var e2 = entry.data @@ -967,10 +1036,7 @@ proc getElectraAttestationsForBlock*( key = (hash_tree_root(entry2), entry.data.slot) newAtt = entry[].toElectraAttestation(entry[].aggregates[j]) - candidatesPerBlock.withValue(key, candidate): - candidate[].add newAtt - do: - candidatesPerBlock[key] = @[newAtt] + candidatesPerBlock.mgetOrPut(key, @[]).add(newAtt) # Update cache so that the new votes are taken into account when updating # the score below @@ -1052,7 +1118,7 @@ func getElectraAggregatedAttestation*( Opt[electra.Attestation] = let - candidateIdx = pool.candidateIdx(slot) + candidateIdx = pool.candidateIdx(slot, CandidateIdxType.electraIdx) if candidateIdx.isNone: return Opt.none(electra.Attestation) @@ -1080,7 +1146,7 @@ func getElectraAggregatedAttestation*( # be used here, because otherwise they wouldn't have the same value. It thus # leaves the cross-committee aggregation for getElectraAttestationsForBlock, # which does do this. - let candidateIdx = pool.candidateIdx(slot) + let candidateIdx = pool.candidateIdx(slot, CandidateIdxType.electraIdx) if candidateIdx.isNone: return Opt.none(electra.Attestation) @@ -1103,7 +1169,7 @@ func getPhase0AggregatedAttestation*( pool: var AttestationPool, slot: Slot, attestation_data_root: Eth2Digest): Opt[phase0.Attestation] = let - candidateIdx = pool.candidateIdx(slot) + candidateIdx = pool.candidateIdx(slot, CandidateIdxType.phase0Idx) if candidateIdx.isNone: return Opt.none(phase0.Attestation) @@ -1124,7 +1190,7 @@ func getPhase0AggregatedAttestation*( ## Select the attestation that has the most votes going for it in the given ## slot/index ## https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/validator.md#construct-aggregate - let candidateIdx = pool.candidateIdx(slot) + let candidateIdx = pool.candidateIdx(slot, CandidateIdxType.phase0Idx) if candidateIdx.isNone: return Opt.none(phase0.Attestation) @@ -1197,7 +1263,7 @@ proc selectOptimisticHead*( proc prune*(pool: var AttestationPool) = if (let v = pool.forkChoice.prune(); v.isErr): # If pruning fails, it's likely the result of a bug - this shouldn't happen - # but we'll keep running hoping that the fork chocie will recover eventually + # but we'll keep running hoping that the fork choice will recover eventually error "Couldn't prune fork choice, bug?", err = v.error() func validatorSeenAtEpoch*(pool: AttestationPool, epoch: Epoch, diff --git a/beacon_chain/consensus_object_pools/blob_quarantine.nim b/beacon_chain/consensus_object_pools/blob_quarantine.nim index 7bbffd11b6..12da2ca4ff 100644 --- a/beacon_chain/consensus_object_pools/blob_quarantine.nim +++ b/beacon_chain/consensus_object_pools/blob_quarantine.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -14,13 +14,14 @@ import from std/sequtils import mapIt from std/strutils import join -const - MaxBlobs = 3 * SLOTS_PER_EPOCH * MAX_BLOBS_PER_BLOCK - ## Same limit as `MaxOrphans` in `block_quarantine`; - ## blobs may arrive before an orphan is tagged `blobless` +func maxBlobs(MAX_BLOBS_PER_BLOCK_ELECTRA: uint64): uint64 = + # Same limit as `MaxOrphans` in `block_quarantine`; + # blobs may arrive before an orphan is tagged `blobless` + 3 * SLOTS_PER_EPOCH * MAX_BLOBS_PER_BLOCK_ELECTRA type BlobQuarantine* = object + maxBlobs: uint64 blobs*: OrderedTable[(Eth2Digest, BlobIndex, KzgCommitment), ref BlobSidecar] onBlobSidecarCallback*: OnBlobSidecarCallback @@ -39,7 +40,7 @@ func shortLog*(x: seq[BlobFetchRecord]): string = "[" & x.mapIt(shortLog(it.block_root) & shortLog(it.indices)).join(", ") & "]" func put*(quarantine: var BlobQuarantine, blobSidecar: ref BlobSidecar) = - if quarantine.blobs.lenu64 >= MaxBlobs: + if quarantine.blobs.lenu64 >= quarantine.maxBlobs: # FIFO if full. For example, sync manager and request manager can race to # put blobs in at the same time, so one gets blob insert -> block resolve # -> blob insert sequence, which leaves garbage blobs. @@ -61,12 +62,14 @@ func hasBlob*( quarantine: BlobQuarantine, slot: Slot, proposer_index: uint64, - index: BlobIndex): bool = + index: BlobIndex, + kzg_commitment: KzgCommitment): bool = for blob_sidecar in quarantine.blobs.values: template block_header: untyped = blob_sidecar.signed_block_header.message if block_header.slot == slot and block_header.proposer_index == proposer_index and - blob_sidecar.index == index: + blob_sidecar.index == index and + blob_sidecar.kzg_commitment == kzg_commitment: return true false @@ -104,5 +107,8 @@ func blobFetchRecord*(quarantine: BlobQuarantine, BlobFetchRecord(block_root: blck.root, indices: indices) func init*( - T: type BlobQuarantine, onBlobSidecarCallback: OnBlobSidecarCallback): T = - T(onBlobSidecarCallback: onBlobSidecarCallback) + T: type BlobQuarantine, + cfg: RuntimeConfig, + onBlobSidecarCallback: OnBlobSidecarCallback): T = + T(maxBlobs: cfg.MAX_BLOBS_PER_BLOCK_ELECTRA.maxBlobs(), + onBlobSidecarCallback: onBlobSidecarCallback) diff --git a/beacon_chain/consensus_object_pools/block_clearance.nim b/beacon_chain/consensus_object_pools/block_clearance.nim index 2792f6d4d5..b42125bc2c 100644 --- a/beacon_chain/consensus_object_pools/block_clearance.nim +++ b/beacon_chain/consensus_object_pools/block_clearance.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -8,10 +8,8 @@ {.push raises: [].} import - std/sequtils, chronicles, results, - stew/assign2, ../spec/[ beaconstate, forks, signatures, signatures_batch, state_transition, state_transition_epoch], @@ -266,7 +264,7 @@ proc addHeadBlockWithParent*( var cache = StateCache() # We've verified that the slot of the new block is newer than that of the - # parent, so we should now be able to create an approriate clearance state + # parent, so we should now be able to create an appropriate clearance state # onto which we can apply the new block let clearanceBlock = BlockSlotId.init(parent.bid, signedBlock.message.slot) if not updateState( @@ -504,12 +502,11 @@ proc addBackfillBlockData*( return ok() return err(error) startTick = Moment.now() - parentBlock = dag.getForkedBlock(parent.bid.root).get() - trustedStateRoot = - withBlck(parentBlock): - forkyBlck.message.state_root clearanceBlock = BlockSlotId.init(parent.bid, forkyBlck.message.slot) - updateFlags1 = dag.updateFlags + {skipLastStateRootCalculation} + updateFlags1 = dag.updateFlags + # TODO (cheatfate): {skipLastStateRootCalculation} flag here could + # improve performance by 100%, but this approach needs some + # improvements, which is unclear. if not updateState(dag, dag.clearanceState, clearanceBlock, true, cache, updateFlags1): @@ -517,8 +514,6 @@ proc addBackfillBlockData*( "database corrupt?", clearanceBlock = shortLog(clearanceBlock) return err(VerifierError.MissingParent) - dag.clearanceState.setStateRoot(trustedStateRoot) - let proposerVerifyTick = Moment.now() if not(isNil(onStateUpdated)): diff --git a/beacon_chain/consensus_object_pools/block_pools_types.nim b/beacon_chain/consensus_object_pools/block_pools_types.nim index d7f4c34d58..459a768a4e 100644 --- a/beacon_chain/consensus_object_pools/block_pools_types.nim +++ b/beacon_chain/consensus_object_pools/block_pools_types.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -9,12 +9,11 @@ import # Standard library - std/[sets, tables, hashes], + std/[tables, hashes], # Status libraries chronicles, # Internals ../spec/[signatures_batch, forks, helpers], - ../spec/datatypes/[phase0, altair, bellatrix], ".."/[beacon_chain_db, era_db], ../validators/validator_monitor, ./block_dag, block_pools_types_light_client @@ -22,11 +21,11 @@ import from ../spec/datatypes/capella import TrustedSignedBeaconBlock from ../spec/datatypes/deneb import TrustedSignedBeaconBlock -from "."/vanity_logs/vanity_logs import VanityLogs +from "."/vanity_logs/vanity_logs import LogProc, VanityLogs export sets, tables, hashes, helpers, beacon_chain_db, era_db, block_dag, - block_pools_types_light_client, validator_monitor, VanityLogs + block_pools_types_light_client, validator_monitor, LogProc, VanityLogs # ChainDAG and types related to forming a DAG of blocks, keeping track of their # relationships and allowing various forms of lookups @@ -51,6 +50,8 @@ type OnBlockCallback* = proc(data: ForkedTrustedSignedBeaconBlock) {.gcsafe, raises: [].} + OnBlockGossipCallback* = + proc(data: ForkedSignedBeaconBlock) {.gcsafe, raises: [].} OnHeadCallback* = proc(data: HeadChangeInfoObject) {.gcsafe, raises: [].} OnReorgCallback* = @@ -89,7 +90,7 @@ type ## instantiated: sync from genesis or checkpoint, and therefore, what ## features we can offer in terms of historical replay. ## - ## Beacuse the state transition is forwards-only, checkpoint sync generally + ## Because the state transition is forwards-only, checkpoint sync generally ## allows replaying states from that point onwards - anything earlier ## would require a backfill of blocks and a subsequent replay from genesis. ## @@ -233,6 +234,8 @@ type onBlockAdded*: OnBlockCallback ## On block added callback + onBlockGossipAdded*: OnBlockGossipCallback + ## On block gossip added callback onHeadChanged*: OnHeadCallback ## On head changed callback onReorgHappened*: OnReorgCallback @@ -341,6 +344,10 @@ type block_root* {.serializedFieldName: "block".}: Eth2Digest optimistic* {.serializedFieldName: "execution_optimistic".}: Option[bool] + EventBeaconBlockGossipObject* = object + slot*: Slot + block_root* {.serializedFieldName: "block".}: Eth2Digest + template OnBlockAddedCallback*(kind: static ConsensusFork): auto = when kind == ConsensusFork.Fulu: typedesc[OnFuluBlockAdded] @@ -401,6 +408,9 @@ template setFinalizationCb*(dag: ChainDAGRef, cb: OnFinalizedCallback) = template setBlockCb*(dag: ChainDAGRef, cb: OnBlockCallback) = dag.onBlockAdded = cb +template setBlockGossipCb*(dag: ChainDAGRef, cb: OnBlockGossipCallback) = + dag.onBlockGossipAdded = cb + template setHeadCb*(dag: ChainDAGRef, cb: OnHeadCallback) = dag.onHeadChanged = cb @@ -476,3 +486,11 @@ func init*(t: typedesc[EventBeaconBlockObject], block_root: forkyBlck.root, optimistic: optimistic ) + +func init*(t: typedesc[EventBeaconBlockGossipObject], + v: ForkedSignedBeaconBlock): EventBeaconBlockGossipObject = + withBlck(v): + EventBeaconBlockGossipObject( + slot: forkyBlck.message.slot, + block_root: forkyBlck.root + ) \ No newline at end of file diff --git a/beacon_chain/consensus_object_pools/block_quarantine.nim b/beacon_chain/consensus_object_pools/block_quarantine.nim index 18b5221c9d..ccee1527b1 100644 --- a/beacon_chain/consensus_object_pools/block_quarantine.nim +++ b/beacon_chain/consensus_object_pools/block_quarantine.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -8,9 +8,8 @@ {.push raises: [].} import + std/tables, chronicles, - std/[options, tables], - stew/bitops2, ../spec/forks export tables, forks @@ -18,12 +17,14 @@ export tables, forks const MaxRetriesPerMissingItem = 7 ## Exponential backoff, double interval between each attempt - MaxMissingItems = 1024 + MaxMissingItems* = 1024 ## Arbitrary MaxOrphans = SLOTS_PER_EPOCH * 3 ## Enough for finalization in an alternative fork MaxBlobless = SLOTS_PER_EPOCH ## Arbitrary + MaxColumnless = SLOTS_PER_EPOCH + ## Arbitrary MaxUnviables = 16 * 1024 ## About a day of blocks - most likely not needed but it's quite cheap.. @@ -58,6 +59,12 @@ type ## block as well. A blobless block inserted into this table must ## have a resolved parent (i.e., it is not an orphan). + columnless*: OrderedTable[Eth2Digest, ForkedSignedBeaconBlock] + ## Blocks that we don't have columns for. When we have received + ## all columns for this block, we can proceed to resolving the + ## block as well. A columnless block inserted into this table must + ## have a resolved parent (i.e., it is not an orphan) + unviable*: OrderedTable[Eth2Digest, tuple[]] ## Unviable blocks are those that come from a history that does not ## include the finalized checkpoint we're currently following, and can @@ -132,6 +139,10 @@ func removeBlobless*( quarantine: var Quarantine, signedBlock: ForkySignedBeaconBlock) = quarantine.blobless.del(signedBlock.root) +func removeColumnless*( + quarantine: var Quarantine, signedBlock: ForkySignedBeaconBlock) = + quarantine.columnless.del(signedBlock.root) + func isViable( finalizedSlot: Slot, slot: Slot): bool = # The orphan must be newer than the finalization point so that its parent @@ -203,6 +214,9 @@ func removeUnviableBloblessTree( toRemove.setLen(0) func addUnviable*(quarantine: var Quarantine, root: Eth2Digest) = + # Unviable - don't try to download again! + quarantine.missing.del(root) + if root in quarantine.unviable: return @@ -236,6 +250,18 @@ func cleanupBlobless(quarantine: var Quarantine, finalizedSlot: Slot) = quarantine.addUnviable k quarantine.blobless.del k +func cleanupColumnless(quarantine: var Quarantine, finalizedSlot: Slot) = + var toDel: seq[Eth2Digest] + + for k, v in quarantine.columnless: + withBlck(v): + if not isViable(finalizedSlot, forkyBlck.message.slot): + toDel.add k + + for k in toDel: + quarantine.addUnviable k + quarantine.columnless.del k + func clearAfterReorg*(quarantine: var Quarantine) = ## Clear missing and orphans to start with a fresh slate in case of a reorg ## Unviables remain unviable and are not cleared. @@ -257,8 +283,9 @@ func addOrphan*( quarantine: var Quarantine, finalizedSlot: Slot, signedBlock: ForkedSignedBeaconBlock): Result[void, cstring] = ## Adds block to quarantine's `orphans` and `missing` lists. + if not isViable(finalizedSlot, getForkedBlockField(signedBlock, slot)): - quarantine.addUnviable(signedBlock.root) + quarantine.addUnviable(signedBlock.root) # will remove from missing return err("block unviable") quarantine.cleanupOrphans(finalizedSlot) @@ -266,9 +293,14 @@ func addOrphan*( let parent_root = getForkedBlockField(signedBlock, parent_root) if parent_root in quarantine.unviable: - quarantine.unviable[signedBlock.root] = () + quarantine.addUnviable(signedBlock.root) return err("block parent unviable") + # It's no longer missing if we downloaded it - remove before adding to make + # sure parent chains get downloaded even if missing list is full (works as + # long as the orphan was in the missing list, which is likely) + quarantine.missing.del(signedBlock.root) + # Even if the quarantine is full, we need to schedule its parent for # downloading or we'll never get to the bottom of things quarantine.addMissing(parent_root) @@ -283,7 +315,6 @@ func addOrphan*( quarantine.blobless.del oldest_orphan_key[0] quarantine.orphans[(signedBlock.root, signedBlock.signature)] = signedBlock - quarantine.missing.del(signedBlock.root) ok() @@ -325,6 +356,29 @@ proc addBlobless*( quarantine.missing.del(signedBlock.root) true +proc addColumnless*( + quarantine: var Quarantine, finalizedSlot: Slot, + signedBlock: fulu.SignedBeaconBlock): bool = + + if not isViable(finalizedSlot, signedBlock.message.slot): + quarantine.addUnviable(signedBlock.root) + return false + + quarantine.cleanupColumnless(finalizedSlot) + + if quarantine.columnless.lenu64 >= MaxColumnless: + var oldest_columnless_key: Eth2Digest + for k in quarantine.columnless.keys: + oldest_columnless_key = k + break + quarantine.blobless.del oldest_columnless_key + + debug "block quarantine: Adding columnless", blck = shortLog(signedBlock) + quarantine.columnless[signedBlock.root] = + ForkedSignedBeaconBlock.init(signedBlock) + quarantine.missing.del(signedBlock.root) + true + func popBlobless*( quarantine: var Quarantine, root: Eth2Digest): Opt[ForkedSignedBeaconBlock] = @@ -334,6 +388,19 @@ func popBlobless*( else: Opt.none(ForkedSignedBeaconBlock) +func popColumnless*( + quarantine: var Quarantine, + root: Eth2Digest): Opt[ForkedSignedBeaconBlock] = + var blck: ForkedSignedBeaconBlock + if quarantine.columnless.pop(root, blck): + Opt.some(blck) + else: + Opt.none(ForkedSignedBeaconBlock) + iterator peekBlobless*(quarantine: var Quarantine): ForkedSignedBeaconBlock = for k, v in quarantine.blobless.mpairs(): yield v + +iterator peekColumnless*(quarantine: var Quarantine): ForkedSignedBeaconBlock = + for k, v in quarantine.columnless.mpairs(): + yield v diff --git a/beacon_chain/consensus_object_pools/blockchain_dag.nim b/beacon_chain/consensus_object_pools/blockchain_dag.nim index 746869813e..5296b80a59 100644 --- a/beacon_chain/consensus_object_pools/blockchain_dag.nim +++ b/beacon_chain/consensus_object_pools/blockchain_dag.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -1639,11 +1639,11 @@ template forkAtEpoch*(dag: ChainDAGRef, epoch: Epoch): Fork = forkAtEpoch(dag.cfg, epoch) proc getBlockRange*( - dag: ChainDAGRef, startSlot: Slot, skipStep: uint64, + dag: ChainDAGRef, startSlot: Slot, output: var openArray[BlockId]): Natural = ## This function populates an `output` buffer of blocks ## with a slots ranging from `startSlot` up to, but not including, - ## `startSlot + skipStep * output.len`, skipping any slots that don't have + ## `startSlot + output.len`, skipping any slots that don't have ## a block. ## ## Blocks will be written to `output` from the end without gaps, even if @@ -1657,7 +1657,7 @@ proc getBlockRange*( headSlot = dag.head.slot trace "getBlockRange entered", - head = shortLog(dag.head.root), requestedCount, startSlot, skipStep, headSlot + head = shortLog(dag.head.root), requestedCount, startSlot, headSlot if startSlot < dag.backfill.slot: debug "Got request for pre-backfill slot", @@ -1671,11 +1671,9 @@ proc getBlockRange*( runway = uint64(headSlot - startSlot) # This is the number of blocks that will follow the start block - extraSlots = min(runway div skipStep, requestedCount - 1) + extraSlots = min(runway, requestedCount - 1) - # If `skipStep` is very large, `extraSlots` should be 0 from - # the previous line, so `endSlot` will be equal to `startSlot`: - endSlot = startSlot + extraSlots * skipStep + endSlot = startSlot + extraSlots var curSlot = endSlot @@ -1687,7 +1685,7 @@ proc getBlockRange*( if bs.isSome and bs.get().isProposed(): o -= 1 output[o] = bs.get().bid - curSlot -= skipStep + curSlot -= 1 # Handle start slot separately (to avoid underflow when computing curSlot) let bs = dag.getBlockIdAtSlot(startSlot) @@ -1984,7 +1982,7 @@ proc pruneBlocksDAG(dag: ChainDAGRef) = prunedHeads = hlen - dag.heads.len, dagPruneDur = Moment.now() - startTick -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/sync/optimistic.md#helpers +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/sync/optimistic.md#helpers func is_optimistic*(dag: ChainDAGRef, bid: BlockId): bool = let blck = if bid.slot <= dag.finalizedHead.slot: @@ -2324,7 +2322,7 @@ proc loadExecutionBlockHash*( from std/packedsets import PackedSet, incl, items -func getValidatorChangeStatuses( +func getBlsToExecutionChangeStatuses( state: ForkedHashedBeaconState, vis: openArray[ValidatorIndex]): PackedSet[ValidatorIndex] = var res: PackedSet[ValidatorIndex] @@ -2340,6 +2338,7 @@ func checkBlsToExecutionChanges( # Within each fork, BLS_WITHDRAWAL_PREFIX to ETH1_ADDRESS_WITHDRAWAL_PREFIX # and never ETH1_ADDRESS_WITHDRAWAL_PREFIX to BLS_WITHDRAWAL_PREFIX. Latter # can still happen via reorgs. + # # Cases: # 1) unchanged (BLS_WITHDRAWAL_PREFIX or ETH1_ADDRESS_WITHDRAWAL_PREFIX) from # old to new head. @@ -2354,7 +2353,25 @@ func checkBlsToExecutionChanges( # Since it tracks head, it's possible reorgs trigger reporting the same # validator indices multiple times; this is fine. withState(state): - anyIt( vis, forkyState.data.validators[it].has_eth1_withdrawal_credential) + anyIt(vis, forkyState.data.validators[it].has_eth1_withdrawal_credential) + +func getCompoundingStatuses( + state: ForkedHashedBeaconState, vis: openArray[ValidatorIndex]): + PackedSet[ValidatorIndex] = + var res: PackedSet[ValidatorIndex] + withState(state): + for vi in vis: + if forkyState.data.validators[vi].withdrawal_credentials.data[0] != + COMPOUNDING_WITHDRAWAL_PREFIX: + res.incl vi + res + +func checkCompoundingChanges( + state: ForkedHashedBeaconState, vis: PackedSet[ValidatorIndex]): bool = + # Since it tracks head, it's possible reorgs trigger reporting the same + # validator indices multiple times; this is fine. + withState(state): + anyIt(vis, forkyState.data.validators[it].has_compounding_withdrawal_credential) proc updateHead*( dag: ChainDAGRef, newHead: BlockRef, quarantine: var Quarantine, @@ -2393,9 +2410,10 @@ proc updateHead*( let lastHeadStateRoot = getStateRoot(dag.headState) - lastHeadMergeComplete = dag.headState.is_merge_transition_complete() lastHeadKind = dag.headState.kind - lastKnownValidatorsChangeStatuses = getValidatorChangeStatuses( + lastKnownValidatorsChangeStatuses = getBlsToExecutionChangeStatuses( + dag.headState, knownValidators) + lastKnownCompoundingChangeStatuses = getCompoundingStatuses( dag.headState, knownValidators) # Start off by making sure we have the right state - updateState will try @@ -2413,32 +2431,31 @@ proc updateHead*( dag.head = newHead - if dag.headState.is_merge_transition_complete() and not - lastHeadMergeComplete and - dag.vanityLogs.onMergeTransitionBlock != nil: - dag.vanityLogs.onMergeTransitionBlock() - if dag.headState.kind > lastHeadKind: - case dag.headState.kind - of ConsensusFork.Phase0 .. ConsensusFork.Bellatrix: - discard - of ConsensusFork.Capella: - if dag.vanityLogs.onUpgradeToCapella != nil: - dag.vanityLogs.onUpgradeToCapella() - of ConsensusFork.Deneb: - if dag.vanityLogs.onUpgradeToDeneb != nil: - dag.vanityLogs.onUpgradeToDeneb() - of ConsensusFork.Electra: - if dag.vanityLogs.onUpgradeToElectra != nil: - dag.vanityLogs.onUpgradeToElectra() - of ConsensusFork.Fulu: - discard + proc logForkUpgrade(consensusFork: ConsensusFork, handler: LogProc) = + if handler != nil and + dag.headState.kind >= consensusFork and + lastHeadKind < consensusFork: + handler() + + # Policy: Retain back through Mainnet's second latest fork. + ConsensusFork.Capella.logForkUpgrade( + dag.vanityLogs.onUpgradeToCapella) + ConsensusFork.Deneb.logForkUpgrade( + dag.vanityLogs.onUpgradeToDeneb) + ConsensusFork.Electra.logForkUpgrade( + dag.vanityLogs.onUpgradeToElectra) if dag.vanityLogs.onKnownBlsToExecutionChange != nil and checkBlsToExecutionChanges( dag.headState, lastKnownValidatorsChangeStatuses): dag.vanityLogs.onKnownBlsToExecutionChange() + if dag.vanityLogs.onKnownCompoundingChange != nil and + checkCompoundingChanges( + dag.headState, lastKnownCompoundingChangeStatuses): + dag.vanityLogs.onKnownCompoundingChange() + dag.db.putHeadBlock(newHead.root) updateBeaconMetrics(dag.headState, dag.head.bid, cache) @@ -2543,13 +2560,6 @@ proc updateHead*( dag.db.updateFinalizedBlocks(newFinalized) - let oldBlockHash = dag.loadExecutionBlockHash(oldFinalizedHead.blck) - if oldBlockHash.isSome and oldBlockHash.unsafeGet.isZero: - let newBlockHash = dag.loadExecutionBlockHash(dag.finalizedHead.blck) - if newBlockHash.isSome and not newBlockHash.unsafeGet.isZero: - if dag.vanityLogs.onFinalizedMergeTransitionBlock != nil: - dag.vanityLogs.onFinalizedMergeTransitionBlock() - # Pruning the block dag is required every time the finalized head changes # in order to clear out blocks that are no longer viable and should # therefore no longer be considered as part of the chain we're following diff --git a/beacon_chain/consensus_object_pools/consensus_manager.nim b/beacon_chain/consensus_object_pools/consensus_manager.nim index d6261b62bb..86528b515a 100644 --- a/beacon_chain/consensus_object_pools/consensus_manager.nim +++ b/beacon_chain/consensus_object_pools/consensus_manager.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -221,23 +221,17 @@ proc updateExecutionClientHead*( func getKnownValidatorsForBlsChangeTracking( self: ConsensusManager, newHead: BlockRef): seq[ValidatorIndex] = - # Ensure that large nodes won't be overloaded by a nice-to-have, but + # Ensure that large nodes won't be overwhelmed by a nice-to-have, but # inessential cosmetic feature. - const MAX_CHECKED_INDICES = 64 - - if newHead.bid.slot.epoch >= self.dag.cfg.CAPELLA_FORK_EPOCH: - var res = newSeqOfCap[ValidatorIndex](min( - len(self.actionTracker.knownValidators), MAX_CHECKED_INDICES)) - for vi in self.actionTracker.knownValidators.keys(): - res.add vi - if res.len >= MAX_CHECKED_INDICES: - break - res - else: - # It is not possible for any BLS to execution changes, for any validator, - # to have been yet processed. - # https://github.com/nim-lang/Nim/issues/19802 - (static(@[])) + const MAX_CHECKED_INDICES = 32 + + var res = newSeqOfCap[ValidatorIndex](min( + len(self.actionTracker.knownValidators), MAX_CHECKED_INDICES)) + for vi in self.actionTracker.knownValidators.keys(): + res.add vi + if res.len >= MAX_CHECKED_INDICES: + break + res proc updateHead*(self: var ConsensusManager, newHead: BlockRef) = ## Trigger fork choice and update the DAG with the new head block @@ -466,4 +460,4 @@ proc pruneStateCachesAndForkChoice*(self: var ConsensusManager) = # Cleanup DAG & fork choice if we have a finalized head if self.dag.needStateCachesAndForkChoicePruning(): self.dag.pruneStateCachesDAG() - self.attestationPool[].prune() + self.attestationPool[].prune() \ No newline at end of file diff --git a/beacon_chain/consensus_object_pools/data_column_quarantine.nim b/beacon_chain/consensus_object_pools/data_column_quarantine.nim index e073f10740..63c53a1ff4 100644 --- a/beacon_chain/consensus_object_pools/data_column_quarantine.nim +++ b/beacon_chain/consensus_object_pools/data_column_quarantine.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -27,7 +27,7 @@ type supernode*: bool custody_columns*: seq[ColumnIndex] onDataColumnSidecarCallback*: OnDataColumnSidecarCallback - + DataColumnFetchRecord* = object block_root*: Eth2Digest indices*: seq[ColumnIndex] @@ -48,7 +48,7 @@ func put*(quarantine: var DataColumnQuarantine, # insert -> block resolve -> data column insert, which leaves # garbage data columns. # - # This also therefore automatically garbage-collects otherwise valid + # This also therefore automatically garbage-collects otherwise valid # data columns that are correctly signed, point to either correct block # root which isn't ever seen, and then for any reason simply never used. var oldest_column_key: DataColumnIdentifier @@ -56,7 +56,7 @@ func put*(quarantine: var DataColumnQuarantine, oldest_column_key = k break quarantine.data_columns.del(oldest_column_key) - let block_root = + let block_root = hash_tree_root(dataColumnSidecar.signed_block_header.message) discard quarantine.data_columns.hasKeyOrPut( DataColumnIdentifier(block_root: block_root, @@ -78,7 +78,7 @@ func hasDataColumn*( false func peekColumnIndices*(quarantine: DataColumnQuarantine, - blck: electra.SignedBeaconBlock): + blck: fulu.SignedBeaconBlock): seq[ColumnIndex] = # Peeks into the currently received column indices # from quarantine, necessary data availability checks @@ -91,18 +91,17 @@ func peekColumnIndices*(quarantine: DataColumnQuarantine, indices func gatherDataColumns*(quarantine: DataColumnQuarantine, - digest: Eth2Digest): - seq[ref DataColumnSidecar] = - # Returns the current data columns quried by a - # block header + digest: Eth2Digest): + seq[ref DataColumnSidecar] = + # Returns the current data columns queried by a block header var columns: seq[ref DataColumnSidecar] for i in quarantine.custody_columns: - let dc_identifier = + let dc_identifier = DataColumnIdentifier( block_root: digest, index: i) if quarantine.data_columns.hasKey(dc_identifier): - let value = + let value = quarantine.data_columns.getOrDefault(dc_identifier, default(ref DataColumnSidecar)) columns.add(value) @@ -110,7 +109,7 @@ func gatherDataColumns*(quarantine: DataColumnQuarantine, func popDataColumns*( quarantine: var DataColumnQuarantine, digest: Eth2Digest, - blck: electra.SignedBeaconBlock): + blck: fulu.SignedBeaconBlock): seq[ref DataColumnSidecar] = var r: DataColumnSidecars for idx in quarantine.custody_columns: @@ -123,7 +122,7 @@ func popDataColumns*( r func hasMissingDataColumns*(quarantine: DataColumnQuarantine, - blck: electra.SignedBeaconBlock): bool = + blck: fulu.SignedBeaconBlock): bool = # `hasMissingDataColumns` consists of the data columns that, # have been missed over gossip, also in case of a supernode, # the method would return missing columns when the supernode @@ -134,7 +133,7 @@ func hasMissingDataColumns*(quarantine: DataColumnQuarantine, # root request columns over RPC. var col_counter = 0 for idx in quarantine.custody_columns: - let dc_identifier = + let dc_identifier = DataColumnIdentifier( block_root: blck.root, index: idx) @@ -149,13 +148,13 @@ func hasMissingDataColumns*(quarantine: DataColumnQuarantine, return true func hasEnoughDataColumns*(quarantine: DataColumnQuarantine, - blck: electra.SignedBeaconBlock): bool = + blck: fulu.SignedBeaconBlock): bool = # `hasEnoughDataColumns` dictates whether there is `enough` # data columns for a block to be enqueued, ideally for a supernode # if it receives atleast 50%+ gossip and RPC # Once 50%+ columns are available we can use this function to - # check it, and thereby check column reconstructability, right from + # check it, and thereby check column reconstructability, right from # gossip validation, consequently populating the quarantine with # rest of the data columns. if quarantine.supernode: @@ -165,7 +164,7 @@ func hasEnoughDataColumns*(quarantine: DataColumnQuarantine, return true else: for i in quarantine.custody_columns: - let dc_identifier = + let dc_identifier = DataColumnIdentifier( block_root: blck.root, index: i) @@ -175,7 +174,7 @@ func hasEnoughDataColumns*(quarantine: DataColumnQuarantine, return true func dataColumnFetchRecord*(quarantine: DataColumnQuarantine, - blck: electra.SignedBeaconBlock): + blck: fulu.SignedBeaconBlock): DataColumnFetchRecord = var indices: seq[ColumnIndex] for i in quarantine.custody_columns: @@ -187,4 +186,4 @@ func dataColumnFetchRecord*(quarantine: DataColumnQuarantine, if not quarantine.data_columns.hasKey( dc_id): indices.add(idx) - DataColumnFetchRecord(block_root: blck.root, indices: indices) \ No newline at end of file + DataColumnFetchRecord(block_root: blck.root, indices: indices) diff --git a/beacon_chain/consensus_object_pools/spec_cache.nim b/beacon_chain/consensus_object_pools/spec_cache.nim index c6bca31f83..e014317047 100644 --- a/beacon_chain/consensus_object_pools/spec_cache.nim +++ b/beacon_chain/consensus_object_pools/spec_cache.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -8,18 +8,18 @@ {.push raises: [].} import - std/sequtils, results, chronicles, - ../extras, - ../spec/[beaconstate, helpers, network, signatures, validator], + ../spec/[beaconstate, helpers, signatures, validator], ../spec/datatypes/base, ./block_pools_types, blockchain_dag +from std/sequtils import anyIt from ../spec/datatypes/electra import shortLog +from ../spec/network import compute_subnet_for_attestation export - base, extras, block_pools_types, results + base, block_pools_types, results logScope: topics = "spec_cache" @@ -99,21 +99,30 @@ iterator get_attesting_indices*(shufflingRef: ShufflingRef, if bits[index_in_committee]: yield validator_index -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#modified-get_attesting_indices -iterator get_attesting_indices*(shufflingRef: ShufflingRef, - slot: Slot, - committee_bits: AttestationCommitteeBits, - aggregation_bits: ElectraCommitteeValidatorsBits, on_chain: static bool): - ValidatorIndex = +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/electra/beacon-chain.md#modified-get_attesting_indices +iterator get_attesting_indices*( + shufflingRef: ShufflingRef, slot: Slot, + committee_bits: AttestationCommitteeBits, + aggregation_bits: ElectraCommitteeValidatorsBits, on_chain: static bool): + ValidatorIndex = when on_chain: - var pos = 0 - for committee_index in get_committee_indices(committee_bits): - for _, validator_index in get_beacon_committee( - shufflingRef, slot, committee_index): - - if aggregation_bits[pos]: - yield validator_index - pos += 1 + var committee_offset = 0 + for committee_index in committee_bits.oneIndices: + if not (committee_index.uint64 < + get_committee_count_per_slot(shufflingRef)): + continue # invalid attestation, but found in check_attestation() + let committee = get_beacon_committee( + shufflingRef, slot, committee_index.CommitteeIndex) + + if aggregation_bits.len < committee_offset + len(committee): + # Would overflow, invalid attestation caught in check_attestation() + continue + + for i, attester_index in committee: + if aggregation_bits[committee_offset + i]: + yield attester_index + + committee_offset += len(committee) else: let committee_index = get_committee_index_one(committee_bits) for validator_index in get_attesting_indices( @@ -173,13 +182,14 @@ iterator get_attesting_indices*( yield validator iterator get_attesting_indices*( - dag: ChainDAGRef, attestation: electra.TrustedAttestation, + dag: ChainDAGRef, + attestation: electra.Attestation | electra.TrustedAttestation, on_chain: static bool): ValidatorIndex = block gaiBlock: # `return` is not allowed in an inline iterator let slot = check_attestation_slot_target(attestation.data).valueOr: - warn "Invalid attestation slot in trusted attestation", + warn "Invalid attestation slot in attestation", attestation = shortLog(attestation) doAssert strictVerification notin dag.updateFlags break gaiBlock @@ -187,7 +197,7 @@ iterator get_attesting_indices*( dag.getBlockRef(attestation.data.beacon_block_root).valueOr: # Attestation block unknown - this is fairly common because we # discard alternative histories on restart - debug "Pruned block in trusted attestation", + debug "Pruned block in attestation", attestation = shortLog(attestation) break gaiBlock target = @@ -196,7 +206,7 @@ iterator get_attesting_indices*( # leading to the case where the attestation block root is the # finalized head (exists as BlockRef) but its target vote has # already been pruned - notice "Pruned target in trusted attestation", + notice "Pruned target in attestation", blck = shortLog(blck), attestation = shortLog(attestation) doAssert strictVerification notin dag.updateFlags @@ -260,9 +270,9 @@ func get_attesting_indices*(shufflingRef: ShufflingRef, slot: Slot, committee_index: CommitteeIndex, bits: ElectraCommitteeValidatorsBits, - on_chain: static bool): + on_chain: static bool = true): seq[ValidatorIndex] = - static: doAssert not on_chain, "only on_chain supported" + static: doAssert on_chain, "only on_chain supported" for idx in get_attesting_indices(shufflingRef, slot, committee_index, bits): result.add(idx) @@ -283,7 +293,7 @@ func makeAttestationData*( doAssert current_epoch == epochRef.epoch - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/validator.md#attestation-data + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/phase0/validator.md#attestation-data AttestationData( slot: slot, index: committee_index.asUInt64, diff --git a/beacon_chain/consensus_object_pools/sync_committee_msg_pool.nim b/beacon_chain/consensus_object_pools/sync_committee_msg_pool.nim index e0b5ae5222..e6884d82dc 100644 --- a/beacon_chain/consensus_object_pools/sync_committee_msg_pool.nim +++ b/beacon_chain/consensus_object_pools/sync_committee_msg_pool.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -364,7 +364,7 @@ proc produceSyncAggregate*( proc isEpochLeadTime*( pool: SyncCommitteeMsgPool, epochsToSyncPeriod: uint64): bool = - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/validator.md#sync-committee-subnet-stability + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/altair/validator.md#sync-committee-subnet-stability # This ensures a uniform distribution without requiring additional state: # (1/4) = 1/4, 4 slots out # (3/4) * (1/3) = 1/4, 3 slots out diff --git a/beacon_chain/consensus_object_pools/vanity_logs/electra/blink.ans b/beacon_chain/consensus_object_pools/vanity_logs/electra/blink.ans new file mode 100644 index 0000000000..3c652fac49 --- /dev/null +++ b/beacon_chain/consensus_object_pools/vanity_logs/electra/blink.ans @@ -0,0 +1,28 @@ + + ..-|\_/| + .-'..d :.: $ + (_: __. :::::$ + `-' ? :::: $ + \ :::: $ + .--..---..-. .-..---..---..-. .\::::::$ .-. .-..--. .-..-. .-. .--. + | .-'| | || \./ || | || | || | | \ :::::$ | \| || | || || \| || .-' + | `-.| | || |`.'| || .-'| | || |_|| |:.::: $ | |`. || | || || |`. || |"| + `---'`---'`-' `-'`-' `---'`----' `| :::: $..__ `'`--' `-'`-' `-'`---' + .---..-. .-..-. `.::::::.::.`-. + | | || \| ||_| |:::::::::::::`--. + | | || |`. |.-. `.::::.:::::::::::`---..... + `---'`-' `-'`-' `.:::::::::::::::::::::::`\ + |:::::::::::::::::::::::::| + |:::::::::::::::::::::::|:| + |::::::::::::|::::::::::|:| + ':::)___..----\ :::::| .`. \ + |:::| | :| `.::::|: : `.`. + () () O O |:::| |. | |:::| `.: |:| + \ / \ / |:::| | :| `.:::`. .:`.`. + () ()() () () O () |:::| |: | ::::| |::|`.`. + \ / \ / / / / |:::| | :| |:::| | :| ))) + () () O() O () () O () |:::||.:.| () ()::| |:..;((( () +. \. /. \. // O. /. . \. /. \ /. . \ .'::.'|::'|. /. . /.|::|. ` :.\ `/ . +....()O...O()...\()()....()O...O.()....()|:::| |_M()()....()..:::|.()|: :()..() +\../...\./...\../....\../...\./....\../..|:::|\../....\../...|:::<...\_M/|..... + ():::::O:::::()::::::():::::O::::::():::|_N_|:()::::::()::::|_N_|::::()::::::: diff --git a/beacon_chain/consensus_object_pools/vanity_logs/electra/color.ans b/beacon_chain/consensus_object_pools/vanity_logs/electra/color.ans index aea0a7ad97..49ee1fcbab 100644 --- a/beacon_chain/consensus_object_pools/vanity_logs/electra/color.ans +++ b/beacon_chain/consensus_object_pools/vanity_logs/electra/color.ans @@ -1,28 +1,28 @@ - -..-|\_/| -  text .-'..d :.: $ -(_: __. :::::$ -`-' ? :::: $ -\ :::: $ -\::::::$ -area\ :::::$ -|:.::: $ - `| :::: $..__ -`.::::::.::.`-. -|:::::::::::::`--. -here `.::::.:::::::::::`---..... -  `.:::::::::::::::::::::::`\ - |:::::::::::::::::::::::::| - |:::::::::::::::::::::::|:| -  |::::::::::::|::::::::::|:| -':::)___..----\ :::::| .`. \ -|:::| | :|`.::::|: : `.`. -() () O  O |:::| |. ||:::| `.: |:| -\ / \ /   |:::| | :|`.:::`. .:`.`. -() ()()  ()  () O () |:::| |: |::::| |::|`.`. -\  / \ // / / |:::| | :||:::| | :| ))) - () ()  O() O ()  () O  () |:::||.:.| () ()::| |:..;((( () -. \. /. \. // O. /. . \. /. \ /. . \ .'::.'|::'|. /. . /.|::|. ` :.\ `/ . -....()O...O()...\()()....()O...O.()....()|:::| |_M()()....()..:::|.()|: :()..() -\../...\./...\../....\../...\./....\../..|:::|\../....\../...|:::<...\_M/|..... - ():::::O:::::()::::::():::::O::::::():::|_N_|:()::::::()::::|_N_|::::()::::::: + + ..-|\_/| + .-'..d :.: $ + (_: __. :::::$ + `-' ? :::: $ + \ :::: $ + .--..---..-. .-..---..---..-. .\::::::$ .-. .-..--. .-..-. .-. .--. + | .-'| | || \./ || | || | || | | \ :::::$ | \| || | || || \| || .-' + | `-.| | || |`.'| || .-'| | || |_|| |:.::: $ | |`. || | || || |`. || |"| + `---'`---'`-' `-'`-' `---'`----' `| :::: $..__ `'`--' `-'`-' `-'`---' + .---..-. .-..-. `.::::::.::.`-. + | | || \| ||_| |:::::::::::::`--. + | | || |`. |.-. `.::::.:::::::::::`---..... + `---'`-' `-'`-' `.:::::::::::::::::::::::`\ + |:::::::::::::::::::::::::| + |:::::::::::::::::::::::|:| + |::::::::::::|::::::::::|:| + ':::)___..----\ :::::| .`. \ + |:::| | :| `.::::|: : `.`. + () () O O |:::| |. | |:::| `.: |:| + \ / \ / |:::| | :| `.:::`. .:`.`. + () ()() () () O () |:::| |: | ::::| |::|`.`. + \ / \ / / / / |:::| | :| |:::| | :| ))) + () () O() O () () O () |:::||.:.| () ()::| |:..;((( () +. \. /. \. // O. /. . \. /. \ /. . \ .'::.'|::'|. /. . /.|::|. ` :.\ `/ . +....()O...O()...\()()....()O...O.()....()|:::| |_M()()....()..:::|.()|: :()..() +\../...\./...\../....\../...\./....\../..|:::|\../....\../...|:::<...\_M/|..... + ():::::O:::::()::::::():::::O::::::():::|_N_|:()::::::()::::|_N_|::::()::::::: diff --git a/beacon_chain/consensus_object_pools/vanity_logs/electra/mono.txt b/beacon_chain/consensus_object_pools/vanity_logs/electra/mono.txt index bfe25591e6..bd0ac175f2 100644 --- a/beacon_chain/consensus_object_pools/vanity_logs/electra/mono.txt +++ b/beacon_chain/consensus_object_pools/vanity_logs/electra/mono.txt @@ -1,17 +1,17 @@ ..-|\_/| - text .-'..d :.: $ + .-'..d :.: $ (_: __. :::::$ `-' ? :::: $ \ :::: $ - \::::::$ - area \ :::::$ - |:.::: $ - `| :::: $..__ - `.::::::.::.`-. - |:::::::::::::`--. - here `.::::.:::::::::::`---..... - `.:::::::::::::::::::::::`\ + .--..---..-. .-..---..---..-. .\::::::$ .-. .-..--. .-..-. .-. .--. + | .-'| | || \./ || | || | || | | \ :::::$ | \| || | || || \| || .-' + | `-.| | || |`.'| || .-'| | || |_|| |:.::: $ | |`. || | || || |`. || |"| + `---'`---'`-' `-'`-' `---'`----' `| :::: $..__ `'`--' `-'`-' `-'`---' + .---..-. .-..-. `.::::::.::.`-. + | | || \| ||_| |:::::::::::::`--. + | | || |`. |.-. `.::::.:::::::::::`---..... + `---'`-' `-'`-' `.:::::::::::::::::::::::`\ |:::::::::::::::::::::::::| |:::::::::::::::::::::::|:| |::::::::::::|::::::::::|:| diff --git a/beacon_chain/consensus_object_pools/vanity_logs/vanity_logs.nim b/beacon_chain/consensus_object_pools/vanity_logs/vanity_logs.nim index 23f32f0fe1..e3e9a1a797 100644 --- a/beacon_chain/consensus_object_pools/vanity_logs/vanity_logs.nim +++ b/beacon_chain/consensus_object_pools/vanity_logs/vanity_logs.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2022-2024 Status Research & Development GmbH +# Copyright (c) 2022-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -7,21 +7,14 @@ {.push raises: [].} -import - std/os, - chronicles +import chronicles + +from std/os import `/` type - LogProc = proc() {.gcsafe, raises: [].} + LogProc* = proc() {.gcsafe, raises: [].} VanityLogs* = object - # Upon the merge activating, these get displayed, at least once when the - # head becomes post-merge and then when the merge is finalized. If chain - # reorgs happen around the initial merge onMergeTransitionBlock might be - # called several times. - onMergeTransitionBlock*: LogProc - onFinalizedMergeTransitionBlock*: LogProc - # Gets displayed on upgrade to Capella. May be displayed multiple times # in case of chain reorgs around the upgrade. onUpgradeToCapella*: LogProc @@ -38,12 +31,14 @@ type # in case of chain reorgs around the upgrade. onUpgradeToElectra*: LogProc -# Created by http://beatscribe.com/ (beatscribe#1008 on Discord) -# These need to be the main body of the log not to be reformatted or escaped. + # Gets displayed on a change to compounding for a validator known to the + # known in a head block. + onKnownCompoundingChange*: LogProc -proc bellatrixMono*() = notice "\n" & staticRead("bellatrix" / "mono.txt") -proc bellatrixColor*() = notice "\n" & staticRead("bellatrix" / "color.ans") -proc bellatrixBlink*() = notice "\n" & staticRead("bellatrix" / "blink.ans") +# Created by https://beatscribe.com (beatscribe#1008 on Discord) +# These need to be the main body of the log not to be reformatted or escaped. +# +# Policy: Retain retired art files in the directory, but don't link them anymore proc capellaMono*() = notice "\n" & staticRead("capella" / "mono.txt") proc capellaColor*() = notice "\n" & staticRead("capella" / "color.ans") @@ -54,3 +49,4 @@ proc denebColor*() = notice "\n" & staticRead("deneb" / "color.ans") proc electraMono*() = notice "\n" & staticRead("electra" / "mono.txt") proc electraColor*() = notice "\n" & staticRead("electra" / "color.ans") +proc electraBlink*() = notice "\n" & staticRead("electra" / "blink.ans") diff --git a/beacon_chain/deposits.nim b/beacon_chain/deposits.nim index bf822ee509..040d90db2b 100644 --- a/beacon_chain/deposits.nim +++ b/beacon_chain/deposits.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -27,6 +27,16 @@ type of ValidatorStorageKind.Identifier: ident: ValidatorIdent +static: doAssert(high(ConsensusFork) == ConsensusFork.Fulu, + "Update OptionalForks constant!") +const + OptionalForks* = {ConsensusFork.Electra, ConsensusFork.Fulu} + ## When a new ConsensusFork is added and before this fork is activated on + ## `mainnet`, it should be part of `OptionalForks`. + ## In this case, the client will ignore missing _VERSION + ## and _EPOCH constants from the data reported by BN via + ## `/eth/v1/config/spec` API call. + proc getSignedExitMessage( config: BeaconNodeConf, storage: ValidatorStorage, @@ -233,7 +243,8 @@ proc restValidatorExit(config: BeaconNodeConf) {.async.} = let signingFork = try: let response = await client.getSpecVC() if response.status == 200: - let forkConfig = response.data.data.getConsensusForkConfig() + let forkConfig = + response.data.data.getConsensusForkConfig(OptionalForks) if forkConfig.isErr: raise newException(RestError, "Invalid config: " & forkConfig.error) let diff --git a/beacon_chain/el/el_manager.nim b/beacon_chain/el/el_manager.nim index 5b6884d5de..c88defa1ec 100644 --- a/beacon_chain/el/el_manager.nim +++ b/beacon_chain/el/el_manager.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -194,7 +194,7 @@ type depositContractSyncStatus: DepositContractSyncStatus ## Are we sure that this EL has synced the deposit contract? - lastPayloadId: Opt[PayloadID] + lastPayloadId: Opt[Bytes8] FullBlockId* = object number: Eth1BlockNumber @@ -485,7 +485,7 @@ func areSameAs(expectedParams: Option[NextExpectedPayloadParams], expectedParams.get.safeBlockHash == latestSafe and expectedParams.get.finalizedBlockHash == latestFinalized and expectedParams.get.payloadAttributes.timestamp.uint64 == timestamp and - expectedParams.get.payloadAttributes.prevRandao.bytes == randomData.data and + expectedParams.get.payloadAttributes.prevRandao.data == randomData.data and expectedParams.get.payloadAttributes.suggestedFeeRecipient == feeRecipient and expectedParams.get.payloadAttributes.withdrawals == withdrawals @@ -855,7 +855,7 @@ proc sendNewPayloadToSingleEL( payload: engine_api.ExecutionPayloadV3, versioned_hashes: seq[engine_api.VersionedHash], parent_beacon_block_root: FixedBytes[32], - executionRequests: array[3, seq[byte]] + executionRequests: seq[seq[byte]] ): Future[PayloadStatusV1] {.async: (raises: [CatchableError]).} = let rpcClient = await connection.connectedRpcClient() await rpcClient.engine_newPayloadV4( @@ -918,11 +918,13 @@ func compareStatuses( type ELConsensusViolationDetector = object selectedResponse: Opt[int] + selectedStatus: Opt[PayloadExecutionStatus] disagreementAlreadyDetected: bool func init(T: type ELConsensusViolationDetector): T = ELConsensusViolationDetector( selectedResponse: Opt.none(int), + selectedStatus: Opt.none(PayloadExecutionStatus), disagreementAlreadyDetected: false ) @@ -939,11 +941,13 @@ proc processResponse( let status = requests[idx].value().status if d.selectedResponse.isNone: d.selectedResponse = Opt.some(idx) + d.selectedStatus = Opt.some(status) elif not d.disagreementAlreadyDetected: let prevStatus = requests[d.selectedResponse.get].value().status case compareStatuses(status, prevStatus) of newStatusIsPreferable: d.selectedResponse = Opt.some(idx) + d.selectedStatus = Opt.some(status) of oldStatusIsOk: discard of disagreement: @@ -955,6 +959,21 @@ proc processResponse( url2 = connections[idx].engineUrl.url, status2 = status +proc couldBeBetter(d: ELConsensusViolationDetector): bool = + const + SyncingOrAccepted = { + PayloadExecutionStatus.syncing, + PayloadExecutionStatus.accepted + } + if d.disagreementAlreadyDetected: + return false + if d.selectedStatus.isNone(): + return true + if d.selectedStatus.get() in SyncingOrAccepted: + true + else: + false + proc lazyWait(futures: seq[FutureBase]) {.async: (raises: []).} = block: let pending = futures.filterIt(not(it.finished())) @@ -995,15 +1014,30 @@ proc sendNewPayload*( let req = when typeof(blck).kind >= ConsensusFork.Electra: # https://github.com/ethereum/execution-apis/blob/4140e528360fea53c34a766d86a000c6c039100e/src/engine/prague.md#engine_newpayloadv4 - let versioned_hashes = mapIt( - blck.body.blob_kzg_commitments, - engine_api.VersionedHash(kzg_commitment_to_versioned_hash(it))) + let + versioned_hashes = mapIt( + blck.body.blob_kzg_commitments, + engine_api.VersionedHash(kzg_commitment_to_versioned_hash(it))) + # https://github.com/ethereum/execution-apis/blob/7c9772f95c2472ccfc6f6128dc2e1b568284a2da/src/engine/prague.md#request + # "Each list element is a `requests` byte array as defined by + # EIP-7685. The first byte of each element is the `request_type` + # and the remaining bytes are the `request_data`. Elements of + # the list MUST be ordered by `request_type` in ascending order. + # Elements with empty `request_data` MUST be excluded from the + # list." + execution_requests = block: + var requests: seq[seq[byte]] + for request_type, request_data in + [SSZ.encode(blck.body.execution_requests.deposits), + SSZ.encode(blck.body.execution_requests.withdrawals), + SSZ.encode(blck.body.execution_requests.consolidations)]: + if request_data.len > 0: + requests.add @[request_type.byte] & request_data + requests + sendNewPayloadToSingleEL( it, payload, versioned_hashes, - FixedBytes[32] blck.parent_root.data, - [SSZ.encode(blck.body.execution_requests.deposits), - SSZ.encode(blck.body.execution_requests.withdrawals), - SSZ.encode(blck.body.execution_requests.consolidations)]) + FixedBytes[32] blck.parent_root.data, execution_requests) elif typeof(blck).kind == ConsensusFork.Deneb: # https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.1/specs/deneb/beacon-chain.md#process_execution_payload # Verify the execution payload is valid @@ -1055,11 +1089,14 @@ proc sendNewPayload*( await noCancel allFutures(pending) return PayloadExecutionStatus.invalid elif responseProcessor.selectedResponse.isSome(): - # We spawn task which will wait for all other responses which are - # still pending, after 30.seconds all pending requests will be - # cancelled. - asyncSpawn lazyWait(pendingRequests.mapIt(FutureBase(it))) - return requests[responseProcessor.selectedResponse.get].value().status + if (len(pendingRequests) == 0) or + not(responseProcessor.couldBeBetter()): + # We spawn task which will wait for all other responses which are + # still pending, after 30.seconds all pending requests will be + # cancelled. + asyncSpawn lazyWait(pendingRequests.mapIt(FutureBase(it))) + return + requests[responseProcessor.selectedResponse.get].value().status if timeoutExceeded: # Timeout exceeded, cancelling all pending requests. @@ -1130,7 +1167,7 @@ proc forkchoiceUpdated*( # block hash provided by this event is stubbed with # `0x0000000000000000000000000000000000000000000000000000000000000000`." # and - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/bellatrix/validator.md#executionpayload + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/bellatrix/validator.md#executionpayload # notes "`finalized_block_hash` is the hash of the latest finalized execution # payload (`Hash32()` if none yet finalized)" @@ -1305,9 +1342,10 @@ proc exchangeConfigWithSingleEL( # https://chainid.network/ expectedChain = case m.eth1Network.get - of mainnet: 1.Quantity - of sepolia: 11155111.Quantity - of holesky: 17000.Quantity + of mainnet: 1.u256 + of sepolia: 11155111.u256 + of holesky: 17000.u256 + of hoodi: 560048.u256 if expectedChain != providerChain: warn "The specified EL client is connected to a different chain", url = connection.engineUrl, diff --git a/beacon_chain/el/eth1_chain.nim b/beacon_chain/el/eth1_chain.nim index 27808bf403..5383bafe1f 100644 --- a/beacon_chain/el/eth1_chain.nim +++ b/beacon_chain/el/eth1_chain.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -82,11 +82,11 @@ type deposits*: seq[Deposit] hasMissingDeposits*: bool -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/validator.md#get_eth1_data +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/phase0/validator.md#get_eth1_data func compute_time_at_slot(genesis_time: uint64, slot: Slot): uint64 = genesis_time + slot * SECONDS_PER_SLOT -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/validator.md#get_eth1_data +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/phase0/validator.md#get_eth1_data func voting_period_start_time(state: ForkedHashedBeaconState): uint64 = let eth1_voting_period_start_slot = getStateField(state, slot) - getStateField(state, slot) mod @@ -94,7 +94,7 @@ func voting_period_start_time(state: ForkedHashedBeaconState): uint64 = compute_time_at_slot( getStateField(state, genesis_time), eth1_voting_period_start_slot) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/validator.md#get_eth1_data +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/phase0/validator.md#get_eth1_data func is_candidate_block(cfg: RuntimeConfig, blk: Eth1Block, period_start: uint64): bool = @@ -274,7 +274,7 @@ proc trackFinalizedState*(chain: var Eth1Chain, if result: chain.pruneOldBlocks(finalizedStateDepositIndex) -# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/validator.md#get_eth1_data +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/phase0/validator.md#get_eth1_data proc getBlockProposalData*(chain: var Eth1Chain, state: ForkedHashedBeaconState, finalizedEth1Data: Eth1Data, @@ -333,7 +333,7 @@ proc getBlockProposalData*(chain: var Eth1Chain, totalDepositsInNewBlock = withState(state): when consensusFork >= ConsensusFork.Electra: - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/electra/validator.md#deposits + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/electra/validator.md#deposits let eth1_deposit_index_limit = min( forkyState.data.eth1_data.deposit_count, forkyState.data.deposit_requests_start_index) diff --git a/beacon_chain/el/merkle_minimal.nim b/beacon_chain/el/merkle_minimal.nim index 65c4f76e5a..e7c90d3345 100644 --- a/beacon_chain/el/merkle_minimal.nim +++ b/beacon_chain/el/merkle_minimal.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -7,7 +7,7 @@ {.push raises: [].} -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/tests/core/pyspec/eth2spec/utils/merkle_minimal.py +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/tests/core/pyspec/eth2spec/utils/merkle_minimal.py # Merkle tree helpers # --------------------------------------------------------------- diff --git a/beacon_chain/era_db.nim b/beacon_chain/era_db.nim index cf43f8501f..51828fbc84 100644 --- a/beacon_chain/era_db.nim +++ b/beacon_chain/era_db.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -12,7 +12,6 @@ import chronicles, results, snappy, taskpools, ../ncli/era, - ./spec/datatypes/[altair, bellatrix, phase0], ./spec/[beaconstate, forks, signatures_batch], ./consensus_object_pools/block_dag # TODO move to somewhere else to avoid circular deps diff --git a/beacon_chain/fork_choice/fork_choice.nim b/beacon_chain/fork_choice/fork_choice.nim index f77cecae74..338e6fdcbc 100644 --- a/beacon_chain/fork_choice/fork_choice.nim +++ b/beacon_chain/fork_choice/fork_choice.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -22,7 +22,6 @@ import export results, fork_choice_types export proto_array.len -# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/fork-choice.md # This is a port of https://github.com/sigp/lighthouse/pull/804 # which is a port of "Proto-Array": https://github.com/protolambda/lmd-ghost # See also: @@ -109,7 +108,7 @@ proc update_justified( self.update_justified(dag, blck, justified.epoch) ok() -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/fork-choice.md#update_checkpoints +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/fork-choice.md#update_checkpoints proc update_checkpoints( self: var Checkpoints, dag: ChainDAGRef, checkpoints: FinalityCheckpoints): FcResult[void] = @@ -373,7 +372,7 @@ proc get_head*(self: var ForkChoice, self.checkpoints.justified.balances, self.checkpoints.proposer_boost_root) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/fork_choice/safe-block.md#get_safe_beacon_block_root +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/fork_choice/safe-block.md#get_safe_beacon_block_root func get_safe_beacon_block_root*(self: ForkChoice): Eth2Digest = # Use most recent justified block as a stopgap self.checkpoints.justified.checkpoint.root diff --git a/beacon_chain/fork_choice/proto_array.nim b/beacon_chain/fork_choice/proto_array.nim index 2f105a8717..7b344f5090 100644 --- a/beacon_chain/fork_choice/proto_array.nim +++ b/beacon_chain/fork_choice/proto_array.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -592,7 +592,7 @@ func propagateInvalidity*( if parentPhysicalIdx < 0 or parentPhysicalIdx >= self.nodes.len: continue - # Invalidity transmits to all descendents + # Invalidity transmits to all descendants if self.nodes.buf[parentPhysicalIdx].invalid: self.nodes.buf[nodePhysicalIdx].invalid = true diff --git a/beacon_chain/gossip_processing/README.md b/beacon_chain/gossip_processing/README.md index dbb8e00178..1cf74f4ed2 100644 --- a/beacon_chain/gossip_processing/README.md +++ b/beacon_chain/gossip_processing/README.md @@ -10,10 +10,10 @@ This folder holds a collection of modules to: Gossip validation is different from consensus verification in particular for blocks. - Blocks: https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/p2p-interface.md#beacon_block -- Attestations (aggregated): https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/p2p-interface.md#beacon_aggregate_and_proof -- Attestations (unaggregated): https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/p2p-interface.md#attestation-subnets +- Attestations (aggregated): https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/p2p-interface.md#beacon_aggregate_and_proof +- Attestations (unaggregated): https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/phase0/p2p-interface.md#attestation-subnets - Voluntary exits: https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/p2p-interface.md#voluntary_exit -- Proposer slashings: https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/phase0/p2p-interface.md#proposer_slashing +- Proposer slashings: https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/phase0/p2p-interface.md#proposer_slashing - Attester slashing: https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/p2p-interface.md#attester_slashing There are multiple consumers of validated consensus objects: diff --git a/beacon_chain/gossip_processing/batch_validation.nim b/beacon_chain/gossip_processing/batch_validation.nim index 5c98cf10c7..bd700996da 100644 --- a/beacon_chain/gossip_processing/batch_validation.nim +++ b/beacon_chain/gossip_processing/batch_validation.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2019-2024 Status Research & Development GmbH +# Copyright (c) 2019-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at http://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at http://www.apache.org/licenses/LICENSE-2.0). @@ -144,7 +144,7 @@ type proc new*( T: type BatchCrypto, rng: ref HmacDrbgContext, - eager: Eager, genesis_validators_root: Eth2Digest, taskpool: TaskPoolPtr): + eager: Eager, genesis_validators_root: Eth2Digest, taskpool: Taskpool): Result[ref BatchCrypto, string] = let res = (ref BatchCrypto)( rng: rng, taskpool: taskpool, @@ -419,8 +419,9 @@ proc scheduleAttestationCheck*( proc scheduleAggregateChecks*( batchCrypto: ref BatchCrypto, fork: Fork, - signedAggregateAndProof: phase0.SignedAggregateAndProof, dag: ChainDAGRef, - attesting_indices: openArray[ValidatorIndex] + signedAggregateAndProof: + phase0.SignedAggregateAndProof | electra.SignedAggregateAndProof, + dag: ChainDAGRef, attesting_indices: openArray[ValidatorIndex] ): Result[tuple[ aggregatorFut, slotFut, aggregateFut: FutureBatchResult, sig: CookedSig], cstring] = diff --git a/beacon_chain/gossip_processing/block_processor.nim b/beacon_chain/gossip_processing/block_processor.nim index c8a009c7e5..aab58d0df1 100644 --- a/beacon_chain/gossip_processing/block_processor.nim +++ b/beacon_chain/gossip_processing/block_processor.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -351,7 +351,7 @@ proc getExecutionValidity( of PayloadExecutionStatus.invalid, PayloadExecutionStatus.invalid_block_hash: # Blocks come either from gossip or request manager requests. In the - # former case, they've passed libp2p gosisp validation which implies + # former case, they've passed libp2p gossip validation which implies # correct signature for correct proposer,which makes spam expensive, # while for the latter, spam is limited by the request manager. info "execution payload invalid from EL client newPayload", @@ -898,7 +898,7 @@ proc processBlock( # - MUST NOT optimistically import the block. # - MUST NOT apply the block to the fork choice store. # - MAY queue the block for later processing. - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/sync/optimistic.md#execution-engine-errors + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/sync/optimistic.md#execution-engine-errors await sleepAsync(chronos.seconds(1)) self[].enqueueBlock( entry.src, entry.blck, entry.blobs, entry.resfut, entry.maybeFinalized, diff --git a/beacon_chain/gossip_processing/eth2_processor.nim b/beacon_chain/gossip_processing/eth2_processor.nim index 0c7a963ee4..3d1382c4de 100644 --- a/beacon_chain/gossip_processing/eth2_processor.nim +++ b/beacon_chain/gossip_processing/eth2_processor.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -169,7 +169,7 @@ proc new*(T: type Eth2Processor, blobQuarantine: ref BlobQuarantine, rng: ref HmacDrbgContext, getBeaconTime: GetBeaconTimeFn, - taskpool: TaskPoolPtr + taskpool: Taskpool ): ref Eth2Processor = (ref Eth2Processor)( doppelgangerDetectionEnabled: doppelgangerDetectionEnabled, @@ -236,6 +236,9 @@ proc processSignedBeaconBlock*( # propagation of seemingly good blocks trace "Block validated" + if not(isNil(self.dag.onBlockGossipAdded)): + self.dag.onBlockGossipAdded(ForkedSignedBeaconBlock.init(signedBlock)) + let blobs = when typeof(signedBlock).kind >= ConsensusFork.Deneb: if self.blobQuarantine[].hasBlobs(signedBlock): @@ -338,7 +341,7 @@ func clearDoppelgangerProtection*(self: var Eth2Processor) = proc checkForPotentialDoppelganger( self: var Eth2Processor, - attestation: phase0.Attestation | electra.Attestation, + attestation: phase0.Attestation | electra.Attestation | SingleAttestation, attesterIndices: openArray[ValidatorIndex]) = # Only check for attestations after node launch. There might be one slot of # overlap in quick intra-slot restarts so trade off a few true negatives in @@ -360,8 +363,8 @@ proc checkForPotentialDoppelganger( proc processAttestation*( self: ref Eth2Processor, src: MsgSource, - attestation: phase0.Attestation | electra.Attestation, subnet_id: SubnetId, - checkSignature, checkValidator: bool + attestation: phase0.Attestation | SingleAttestation, + subnet_id: SubnetId, checkSignature, checkValidator: bool ): Future[ValidationRes] {.async: (raises: [CancelledError]).} = var wallTime = self.getCurrentBeaconTime() let (afterGenesis, wallSlot) = wallTime.toSlot() @@ -380,14 +383,14 @@ proc processAttestation*( debug "Attestation received", delay # Now proceed to validation - let v = - await self.attestationPool.validateAttestation( - self.batchCrypto, attestation, wallTime, subnet_id, checkSignature) + let v = await self.attestationPool.validateAttestation( + self.batchCrypto, attestation, wallTime, subnet_id, checkSignature) return if v.isOk(): # Due to async validation the wallTime here might have changed wallTime = self.getCurrentBeaconTime() - let (attester_index, sig) = v.get() + let (attester_index, beacon_committee_len, index_in_committee, sig) = + v.get() if checkValidator and (attester_index in self.validatorPool[]): warn "A validator client has attempted to send an attestation from " & @@ -400,7 +403,8 @@ proc processAttestation*( trace "Attestation validated" self.attestationPool[].addAttestation( - attestation, [attester_index], sig, wallTime) + attestation, [attester_index], beacon_committee_len, + index_in_committee, sig, wallTime) self.validatorMonitor[].registerAttestation( src, wallTime, attestation, attester_index) @@ -456,8 +460,11 @@ proc processSignedAggregateAndProof*( trace "Aggregate validated" + # -1 here is the notional index in committee for which the attestation pool + # only requires external input regarding SingleAttestation messages. self.attestationPool[].addAttestation( - signedAggregateAndProof.message.aggregate, attesting_indices, sig, + signedAggregateAndProof.message.aggregate, attesting_indices, + signedAggregateAndProof.message.aggregate.aggregation_bits.len, -1, sig, wallTime) self.validatorMonitor[].registerAggregate( diff --git a/beacon_chain/gossip_processing/gossip_validation.nim b/beacon_chain/gossip_processing/gossip_validation.nim index d42d1c2be2..e0f3e7e851 100644 --- a/beacon_chain/gossip_processing/gossip_validation.nim +++ b/beacon_chain/gossip_processing/gossip_validation.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2019-2024 Status Research & Development GmbH +# Copyright (c) 2019-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at http://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at http://www.apache.org/licenses/LICENSE-2.0). @@ -16,7 +16,7 @@ import # Internals ../spec/[ beaconstate, state_transition_block, forks, - helpers, network, signatures, eip7594_helpers], + helpers, network, signatures, peerdas_helpers], ../consensus_object_pools/[ attestation_pool, blockchain_dag, blob_quarantine, block_quarantine, data_column_quarantine, spec_cache, light_client_pool, sync_committee_msg_pool, @@ -95,7 +95,7 @@ func check_propagation_slot_range( return ok(msgSlot) if consensusFork < ConsensusFork.Deneb: - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/p2p-interface.md#configuration + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/phase0/p2p-interface.md#configuration # The spec value of ATTESTATION_PROPAGATION_SLOT_RANGE is 32, but it can # retransmit attestations on the cusp of being out of spec, and which by # the time they reach their destination might be out of spec. @@ -293,18 +293,22 @@ template checkedReject( pool: ValidatorChangePool, error: ValidationError): untyped = pool.dag.checkedReject(error) +func getMaxBlobsPerBlock(cfg: RuntimeConfig, slot: Slot): uint64 = + if slot >= cfg.ELECTRA_FORK_EPOCH.start_slot: + cfg.MAX_BLOBS_PER_BLOCK_ELECTRA + else: + cfg.MAX_BLOBS_PER_BLOCK + template validateBeaconBlockBellatrix( - signed_beacon_block: phase0.SignedBeaconBlock | altair.SignedBeaconBlock, - parent: BlockRef): untyped = + _: phase0.SignedBeaconBlock | altair.SignedBeaconBlock, + _: BlockRef): untyped = discard # https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/bellatrix/p2p-interface.md#beacon_block template validateBeaconBlockBellatrix( signed_beacon_block: - bellatrix.SignedBeaconBlock | - capella.SignedBeaconBlock | - deneb.SignedBeaconBlock | - electra.SignedBeaconBlock | + bellatrix.SignedBeaconBlock | capella.SignedBeaconBlock | + deneb.SignedBeaconBlock | electra.SignedBeaconBlock | fulu.SignedBeaconBlock, parent: BlockRef): untyped = # If the execution is enabled for the block -- i.e. @@ -354,6 +358,29 @@ template validateBeaconBlockBellatrix( # cannot occur here, because Nimbus's optimistic sync waits for either # `ACCEPTED` or `SYNCING` from the EL to get this far. +template validateBeaconBlockDeneb( + _: ChainDAGRef, + _: + phase0.SignedBeaconBlock | altair.SignedBeaconBlock | + bellatrix.SignedBeaconBlock | capella.SignedBeaconBlock, + _: BeaconTime): untyped = + discard + +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/deneb/p2p-interface.md#beacon_block +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/electra/p2p-interface.md#beacon_block +template validateBeaconBlockDeneb( + dag: ChainDAGRef, + signed_beacon_block: + deneb.SignedBeaconBlock | electra.SignedBeaconBlock | + fulu.SignedBeaconBlock, + wallTime: BeaconTime): untyped = + # [REJECT] The length of KZG commitments is less than or equal to the + # limitation defined in Consensus Layer -- i.e. validate that + # len(body.signed_beacon_block.message.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK + if not (lenu64(signed_beacon_block.message.body.blob_kzg_commitments) <= + dag.cfg.getMaxBlobsPerBlock(signed_beacon_block.message.slot)): + return dag.checkedReject("validateBeaconBlockDeneb: too many blob commitments") + # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/deneb/p2p-interface.md#blob_sidecar_subnet_id proc validateBlobSidecar*( dag: ChainDAGRef, quarantine: ref Quarantine, @@ -367,12 +394,13 @@ proc validateBlobSidecar*( # [REJECT] The sidecar's index is consistent with `MAX_BLOBS_PER_BLOCK` # -- i.e. `blob_sidecar.index < MAX_BLOBS_PER_BLOCK` - if not (blob_sidecar.index < MAX_BLOBS_PER_BLOCK): + if not (blob_sidecar.index < dag.cfg.getMaxBlobsPerBlock(block_header.slot)): return dag.checkedReject("BlobSidecar: index inconsistent") # [REJECT] The sidecar is for the correct subnet -- i.e. # `compute_subnet_for_blob_sidecar(blob_sidecar.index) == subnet_id`. - if not (compute_subnet_for_blob_sidecar(blob_sidecar.index) == subnet_id): + if not (dag.cfg.compute_subnet_for_blob_sidecar( + block_header.slot, blob_sidecar.index) == subnet_id): return dag.checkedReject("BlobSidecar: subnet incorrect") # [IGNORE] The sidecar is not from a future slot (with a @@ -395,8 +423,20 @@ proc validateBlobSidecar*( let block_root = hash_tree_root(block_header) if dag.getBlockRef(block_root).isSome(): return errIgnore("BlobSidecar: already have block") + + # This adds KZG commitment matching to the spec gossip validation. It's an + # IGNORE condition, so it shouldn't affect Nimbus's scoring, and when some + # (slashable) double proposals happen with blobs present, without this one + # or the other block, or potentially both, won't get its full set of blobs + # through gossip validation and have to backfill them later. There is some + # cost in slightly more outgoing bandwidth on such double-proposals but it + # remains insignificant compared with other bandwidth usage. + # + # It would be good to fix this more properly, but this has come up often on + # Pectra devnet-6. if blobQuarantine[].hasBlob( - block_header.slot, block_header.proposer_index, blob_sidecar.index): + block_header.slot, block_header.proposer_index, blob_sidecar.index, + blob_sidecar.kzg_commitment): return errIgnore("BlobSidecar: already have valid blob from same proposer") # [REJECT] The sidecar's inclusion proof is valid as verified by @@ -493,10 +533,10 @@ proc validateBlobSidecar*( ok() -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/_features/eip7594/p2p-interface.md#data_column_sidecar_subnet_id +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/p2p-interface.md#data_column_sidecar_subnet_id proc validateDataColumnSidecar*( dag: ChainDAGRef, quarantine: ref Quarantine, - dataColumnQuarantine: ref DataColumnQuarantine, + dataColumnQuarantine: ref DataColumnQuarantine, data_column_sidecar: DataColumnSidecar, wallTime: BeaconTime, subnet_id: uint64): Result[void, ValidationError] = @@ -508,14 +548,14 @@ proc validateDataColumnSidecar*( if not (data_column_sidecar.index < NUMBER_OF_COLUMNS): return dag.checkedReject("DataColumnSidecar: The sidecar's index should be consistent with NUMBER_OF_COLUMNS") - # [REJECT] The sidecar is for the correct subnet + # [REJECT] The sidecar is for the correct subnet # -- i.e. `compute_subnet_for_data_column_sidecar(blob_sidecar.index) == subnet_id`. if not (compute_subnet_for_data_column_sidecar(data_column_sidecar.index) == subnet_id): return dag.checkedReject("DataColumnSidecar: The sidecar is not for the correct subnet") - # [IGNORE] The sidecar is not from a future slot - # (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. validate that - # `block_header.slot <= current_slot`(a client MAY queue future sidecars for + # [IGNORE] The sidecar is not from a future slot + # (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. validate that + # `block_header.slot <= current_slot`(a client MAY queue future sidecars for # processing at the appropriate slot). if not (block_header.slot <= (wallTime + MAXIMUM_GOSSIP_CLOCK_DISPARITY).slotOrZero): @@ -608,7 +648,7 @@ proc validateDataColumnSidecar*( data_column_sidecar.signed_block_header.signature): return dag.checkedReject("DataColumnSidecar: Invalid proposer signature") - # [REJECT] The sidecar's column data is valid as + # [REJECT] The sidecar's column data is valid as # verified by `verify_data_column_kzg_proofs(sidecar)` block: let r = check_data_column_sidecar_kzg_proofs(data_column_sidecar) @@ -760,6 +800,8 @@ proc validateBeaconBlock*( # validation. validateBeaconBlockBellatrix(signed_beacon_block, parent) + dag.validateBeaconBlockDeneb(signed_beacon_block, wallTime) + # [REJECT] The block is from a higher slot than its parent. if not (signed_beacon_block.message.slot > parent.bid.slot): return dag.checkedReject( @@ -824,7 +866,8 @@ proc validateAttestation*( wallTime: BeaconTime, subnet_id: SubnetId, checkSignature: bool): Future[Result[ - tuple[attesting_index: ValidatorIndex, sig: CookedSig], + tuple[attesting_index: ValidatorIndex, beacon_committee_len: int, + index_in_committee: int, sig: CookedSig], ValidationError]] {.async: (raises: [CancelledError]).} = # Some of the checks below have been reordered compared to the spec, to # perform the cheap checks first - in particular, we want to avoid loading @@ -882,13 +925,12 @@ proc validateAttestation*( # defined by attestation.data.beacon_block_root -- i.e. # get_checkpoint_block(store, attestation.data.beacon_block_root, # store.finalized_checkpoint.epoch) == store.finalized_checkpoint.root - let - shufflingRef = - pool.dag.getShufflingRef(target.blck, target.slot.epoch, false).valueOr: - # Target is verified - shouldn't happen - warn "No shuffling for attestation - report bug", - attestation = shortLog(attestation), target = shortLog(target) - return errIgnore("Attestation: no shuffling") + let shufflingRef = + pool.dag.getShufflingRef(target.blck, target.slot.epoch, false).valueOr: + # Target is verified - shouldn't happen + warn "No shuffling for attestation - report bug", + attestation = shortLog(attestation), target = shortLog(target) + return errIgnore("Attestation: no shuffling") # [REJECT] The committee index is within the expected range -- i.e. # data.index < get_committee_count_per_slot(state, data.target.epoch). @@ -949,7 +991,6 @@ proc validateAttestation*( return errIgnore("Attestation: cannot find validator pubkey") # [REJECT] The signature of `attestation` is valid. - # In the spec, is_valid_indexed_attestation is used to verify the signature - # here, we do a batch verification instead let sig = @@ -984,17 +1025,26 @@ proc validateAttestation*( pool.nextAttestationEpoch[validator_index].subnet = attestation.data.target.epoch + 1 - return ok((validator_index, sig)) + # -1 is a placeholder; it's filled in by processAttestation(), which has + # access to the required information. + ok((validator_index, attestation.aggregation_bits.len, -1, sig)) +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/electra/p2p-interface.md#beacon_attestation_subnet_id proc validateAttestation*( pool: ref AttestationPool, batchCrypto: ref BatchCrypto, - attestation: electra.Attestation, + attestation: SingleAttestation, wallTime: BeaconTime, subnet_id: SubnetId, checkSignature: bool): Future[Result[ - tuple[attesting_index: ValidatorIndex, sig: CookedSig], + tuple[attesting_index: ValidatorIndex, beacon_committee_len: int, + index_in_committee: int, sig: CookedSig], ValidationError]] {.async: (raises: [CancelledError]).} = + # Some of the checks below have been reordered compared to the spec, to + # perform the cheap checks first - in particular, we want to avoid loading + # an `EpochRef` and checking signatures. This reordering might lead to + # different IGNORE/REJECT results in turn affecting gossip scores. + # [REJECT] The attestation's epoch matches its target -- i.e. # attestation.data.target.epoch == # compute_epoch_at_slot(attestation.data.slot) @@ -1004,6 +1054,25 @@ proc validateAttestation*( return pool.checkedReject(v.error()) v.get() + # attestation.data.slot is within the last ATTESTATION_PROPAGATION_SLOT_RANGE + # slots (within a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance) -- i.e. + # attestation.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE >= current_slot + # >= attestation.data.slot (a client MAY queue future attestations for + # processing at the appropriate slot). + # + # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.2/specs/deneb/p2p-interface.md#beacon_attestation_subnet_id + # modifies this for Deneb and newer forks. + block: + let v = check_propagation_slot_range( + pool.dag.cfg.consensusForkAtEpoch(wallTime.slotOrZero.epoch), slot, + wallTime) + if v.isErr(): # [IGNORE] + return err(v.error()) + + # [REJECT] attestation.data.index == 0 + if not (attestation.data.index == 0): + return pool.checkedReject("SingleAttestation: attestation.data.index != 0") + # The block being voted for (attestation.data.beacon_block_root) has been seen # (via both gossip and non-gossip sources) (a client MAY queue attestations # for processing once block is retrieved). @@ -1016,6 +1085,48 @@ proc validateAttestation*( return pool.checkedResult(v.error) v.get() + if attestation.attester_index > high(ValidatorIndex).uint64: + return errReject("SingleAttestation: attester index too high") + let validator_index = attestation.attester_index.ValidatorIndex + + # [REJECT] The signature of `attestation` is valid. + # In the spec, is_valid_indexed_attestation is used to verify the signature - + # here, we do a batch verification instead + var sigchecked = false + var sig: CookedSig + template doSigCheck: untyped = + let + fork = pool.dag.forkAtEpoch(attestation.data.slot.epoch) + pubkey = pool.dag.validatorKey(validator_index).valueOr: + # can't happen, in theory, because we checked the aggregator index above + return errIgnore("Attestation: cannot find validator pubkey") + + sigchecked = true + sig = + if checkSignature: + # Attestation signatures are batch-verified + let deferredCrypto = batchCrypto + .scheduleAttestationCheck( + fork, attestation.data, pubkey, + attestation.signature) + if deferredCrypto.isErr(): + return pool.checkedReject(deferredCrypto.error) + + let (cryptoFut, sig) = deferredCrypto.get() + # Await the crypto check + let x = (await cryptoFut) + case x + of BatchResult.Invalid: + return pool.checkedReject("Attestation: invalid signature") + of BatchResult.Timeout: + beacon_attestations_dropped_queue_full.inc() + return errIgnore("Attestation: timeout checking signature") + of BatchResult.Valid: + sig # keep going only in this case + else: + attestation.signature.load().valueOr: + return pool.checkedReject("Attestation: unable to load signature") + # The following rule follows implicitly from that we clear out any # unviable blocks from the chain dag: # @@ -1023,40 +1134,71 @@ proc validateAttestation*( # defined by attestation.data.beacon_block_root -- i.e. # get_checkpoint_block(store, attestation.data.beacon_block_root, # store.finalized_checkpoint.epoch) == store.finalized_checkpoint.root - let - shufflingRef = + let shufflingRef = + pool.dag.findShufflingRef(target.blck.bid, target.slot.epoch).valueOr: + # getShufflingRef might be slow here, so first try to eliminate by + # signature check + doSigCheck() pool.dag.getShufflingRef(target.blck, target.slot.epoch, false).valueOr: # Target is verified - shouldn't happen - warn "No shuffling for attestation - report bug", + warn "No shuffling for SingleAttestation - report bug", attestation = shortLog(attestation), target = shortLog(target) - return errIgnore("Attestation: no shuffling") + return errIgnore("SingleAttestation: no shuffling") + + # [REJECT] The committee index is within the expected range -- i.e. + # data.index < get_committee_count_per_slot(state, data.target.epoch). + let committee_index = block: + let idx = shufflingRef.get_committee_index(attestation.committee_index) + if idx.isErr(): + return pool.checkedReject( + "Attestation: committee index not within expected range") + idx.get() - let attesting_index = get_attesting_indices_one( - shufflingRef, slot, attestation.committee_bits, - attestation.aggregation_bits, false) + # [REJECT] The attester is a member of the committee -- i.e. + # attestation.attester_index in + # get_beacon_committee(state, attestation.data.slot, index). + let + beacon_committee = get_beacon_committee( + shufflingRef, attestation.data.slot, committee_index) + index_in_committee = find(beacon_committee, validator_index) + if index_in_committee < 0: + return pool.checkedReject("SingleAttestation: attester index not in beacon committee") - # The number of aggregation bits matches the committee size, which ensures - # this condition holds. - doAssert attesting_index.isSome(), - "We've checked bits length and one count already" - let validator_index = attesting_index.get() + # [REJECT] The attestation is for the correct subnet -- i.e. + # compute_subnet_for_attestation(committees_per_slot, + # attestation.data.slot, attestation.data.index) == subnet_id, where + # committees_per_slot = get_committee_count_per_slot(state, + # attestation.data.target.epoch), which may be pre-computed along with the + # committee information for the signature check. + block: + let v = check_attestation_subnet( + shufflingRef, attestation.data.slot, committee_index, subnet_id) + if v.isErr(): # [REJECT] + return pool.checkedReject(v.error) # In the spec, is_valid_indexed_attestation is used to verify the signature - # here, we do a batch verification instead - let sig = - attestation.signature.load().valueOr: - return pool.checkedReject("Attestation: unable to load signature") + if not sigchecked: + # findShufflingRef did find a cached ShufflingRef, which means the early + # signature check was skipped, so do it now. + doSigCheck() - return ok((validator_index, sig)) + # Only valid attestations go in the list, which keeps validator_index + # in range + if not (pool.nextAttestationEpoch.lenu64 > validator_index.uint64): + pool.nextAttestationEpoch.setLen(validator_index.int + 1) + pool.nextAttestationEpoch[validator_index].subnet = + attestation.data.target.epoch + 1 + ok((validator_index, beacon_committee.len, index_in_committee, sig)) # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/phase0/p2p-interface.md#beacon_aggregate_and_proof # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/deneb/p2p-interface.md#beacon_aggregate_and_proof +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/electra/p2p-interface.md#beacon_aggregate_and_proof proc validateAggregate*( - pool: ref AttestationPool, - batchCrypto: ref BatchCrypto, - signedAggregateAndProof: phase0.SignedAggregateAndProof, - wallTime: BeaconTime, - checkSignature = true, checkCover = true): + pool: ref AttestationPool, batchCrypto: ref BatchCrypto, + signedAggregateAndProof: + phase0.SignedAggregateAndProof | electra.SignedAggregateAndProof, + wallTime: BeaconTime, checkSignature = true, checkCover = true): Future[Result[ tuple[attestingIndices: seq[ValidatorIndex], sig: CookedSig], ValidationError]] {.async: (raises: [CancelledError]).} = @@ -1076,6 +1218,11 @@ proc validateAggregate*( return pool.checkedReject(v.error) v.get() + # [REJECT] aggregate.data.index == 0 + when signedAggregateAndProof is electra.SignedAggregateAndProof: + if not(aggregate.data.index == 0): + return pool.checkedReject("Aggregate: Electra aggregate.data.index != 0") + # [IGNORE] aggregate.data.slot is within the last # ATTESTATION_PROPAGATION_SLOT_RANGE slots (with a # MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance) -- i.e. aggregate.data.slot + @@ -1140,23 +1287,38 @@ proc validateAggregate*( # [REJECT] The committee index is within the expected range -- i.e. # data.index < get_committee_count_per_slot(state, data.target.epoch). let committee_index = block: - let idx = shufflingRef.get_committee_index(aggregate.data.index) + when kind(typeof(signedAggregateAndProof)) == ConsensusFork.Electra: + # [REJECT] len(committee_indices) == 1, where committee_indices = + # get_committee_indices(aggregate) + let agg_idx = get_committee_index_one(aggregate.committee_bits).valueOr: + return pool.checkedReject("Aggregate: got multiple committee bits") + let idx = shufflingRef.get_committee_index(agg_idx.uint64) + elif kind(typeof(signedAggregateAndProof)) == ConsensusFork.Phase0: + let idx = shufflingRef.get_committee_index(aggregate.data.index) + else: + static: doAssert false if idx.isErr(): return pool.checkedReject( - "Attestation: committee index not within expected range") + "Aggregate: committee index not within expected range") idx.get() if not aggregate.aggregation_bits.compatible_with_shuffling( shufflingRef, slot, committee_index): return pool.checkedReject( "Aggregate: number of aggregation bits and committee size mismatch") - if checkCover and - pool[].covers(aggregate.data, aggregate.aggregation_bits): - # [IGNORE] A valid aggregate attestation defined by - # `hash_tree_root(aggregate.data)` whose `aggregation_bits` is a non-strict - # superset has _not_ already been seen. - # https://github.com/ethereum/consensus-specs/pull/2847 - return errIgnore("Aggregate: already covered") + # [IGNORE] A valid aggregate attestation defined by + # `hash_tree_root(aggregate.data)` whose `aggregation_bits` is a non-strict + # superset has _not_ already been seen. + # https://github.com/ethereum/consensus-specs/pull/2847 + when kind(typeof(signedAggregateAndProof)) == ConsensusFork.Electra: + if checkCover and + pool[].covers(aggregate.data, aggregate.aggregation_bits, + aggregate.committee_bits): + return errIgnore("Aggregate: already covered") + else: + if checkCover and + pool[].covers(aggregate.data, aggregate.aggregation_bits): + return errIgnore("Aggregate: already covered") # [REJECT] aggregate_and_proof.selection_proof selects the validator as an # aggregator for the slot -- i.e. is_aggregator(state, aggregate.data.slot, @@ -1263,61 +1425,7 @@ proc validateAggregate*( return ok((attesting_indices, sig)) -proc validateAggregate*( - pool: ref AttestationPool, - batchCrypto: ref BatchCrypto, - signedAggregateAndProof: electra.SignedAggregateAndProof, - wallTime: BeaconTime, - checkSignature = true, checkCover = true): - Future[Result[ - tuple[attestingIndices: seq[ValidatorIndex], sig: CookedSig], - ValidationError]] {.async: (raises: [CancelledError]).} = - template aggregate_and_proof: untyped = signedAggregateAndProof.message - template aggregate: untyped = aggregate_and_proof.aggregate - - # [REJECT] The aggregate attestation's epoch matches its target -- i.e. - # `aggregate.data.target.epoch == compute_epoch_at_slot(aggregate.data.slot)` - let slot = block: - let v = check_attestation_slot_target(aggregate.data) - if v.isErr(): - return pool.checkedReject(v.error) - v.get() - - # [REJECT] The block being voted for (aggregate.data.beacon_block_root) - # passes validation. - # [IGNORE] if block is unseen so far and enqueue it in missing blocks - let target = block: - let v = check_beacon_and_target_block(pool[], aggregate.data) - if v.isErr(): # [IGNORE/REJECT] - return pool.checkedResult(v.error) - v.get() - - let - shufflingRef = - pool.dag.getShufflingRef(target.blck, target.slot.epoch, false).valueOr: - # Target is verified - shouldn't happen - warn "No shuffling for attestation - report bug", - aggregate = shortLog(aggregate), target = shortLog(target) - return errIgnore("Aggregate: no shuffling") - - # [REJECT] The committee index is within the expected range -- i.e. - # data.index < get_committee_count_per_slot(state, data.target.epoch). - let committee_index = block: - let idx = shufflingRef.get_committee_index(aggregate.data.index) - if idx.isErr(): - return pool.checkedReject( - "Attestation: committee index not within expected range") - idx.get() - let - attesting_indices = get_attesting_indices( - shufflingRef, slot, committee_index, aggregate.aggregation_bits, false) - sig = - aggregate.signature.load().valueOr: - return pool.checkedReject("Aggregate: unable to load signature") - - ok((attesting_indices, sig)) - -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/capella/p2p-interface.md#bls_to_execution_change +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/capella/p2p-interface.md#bls_to_execution_change proc validateBlsToExecutionChange*( pool: ValidatorChangePool, batchCrypto: ref BatchCrypto, signed_address_change: SignedBLSToExecutionChange, @@ -1371,7 +1479,7 @@ proc validateBlsToExecutionChange*( return ok() -# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/p2p-interface.md#attester_slashing +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/phase0/p2p-interface.md#attester_slashing proc validateAttesterSlashing*( pool: ValidatorChangePool, attester_slashing: phase0.AttesterSlashing | electra.AttesterSlashing): @@ -1403,7 +1511,7 @@ proc validateAttesterSlashing*( ok() -# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/p2p-interface.md#proposer_slashing +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/phase0/p2p-interface.md#proposer_slashing proc validateProposerSlashing*( pool: ValidatorChangePool, proposer_slashing: ProposerSlashing): Result[void, ValidationError] = @@ -1462,7 +1570,7 @@ proc validateVoluntaryExit*( ok() -# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/altair/p2p-interface.md#sync_committee_subnet_id +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/altair/p2p-interface.md#sync_committee_subnet_id proc validateSyncCommitteeMessage*( dag: ChainDAGRef, quarantine: ref Quarantine, diff --git a/beacon_chain/gossip_processing/light_client_processor.nim b/beacon_chain/gossip_processing/light_client_processor.nim index f70e437037..666c3bc1e4 100644 --- a/beacon_chain/gossip_processing/light_client_processor.nim +++ b/beacon_chain/gossip_processing/light_client_processor.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2022-2024 Status Research & Development GmbH +# Copyright (c) 2022-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -564,7 +564,7 @@ proc processLightClientFinalityUpdate*( self.latestFinalityUpdate = finality_update.toOptimistic v -# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/sync-protocol.md#process_light_client_finality_update +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/altair/light-client/sync-protocol.md#process_light_client_finality_update proc processLightClientOptimisticUpdate*( self: var LightClientProcessor, src: MsgSource, optimistic_update: ForkedLightClientOptimisticUpdate diff --git a/beacon_chain/gossip_processing/optimistic_processor.nim b/beacon_chain/gossip_processing/optimistic_processor.nim index 15fd127256..9721a10bb1 100644 --- a/beacon_chain/gossip_processing/optimistic_processor.nim +++ b/beacon_chain/gossip_processing/optimistic_processor.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2019-2024 Status Research & Development GmbH +# Copyright (c) 2019-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at http://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at http://www.apache.org/licenses/LICENSE-2.0). @@ -94,7 +94,7 @@ proc processSignedBeaconBlock*( # Block validation is delegated to the sync committee and is done with delay. # If we forward invalid spam blocks, we may be disconnected + IP banned, # so we avoid accepting any blocks. Since we don't meaningfully contribute - # to the blocks gossip, we may also accummulate negative peer score over time. + # to the blocks gossip, we may also accumulate negative peer score over time. # However, we are actively contributing to other topics, so some of the # negative peer score may be offset through those different topics. # The practical impact depends on the actually deployed scoring heuristics. diff --git a/beacon_chain/libnimbus_lc/libnimbus_lc.h b/beacon_chain/libnimbus_lc/libnimbus_lc.h index 5b3e896147..a1f8e009b1 100644 --- a/beacon_chain/libnimbus_lc/libnimbus_lc.h +++ b/beacon_chain/libnimbus_lc/libnimbus_lc.h @@ -1,6 +1,6 @@ /** * beacon_chain - * Copyright (c) 2023-2024 Status Research & Development GmbH + * Copyright (c) 2023-2025 Status Research & Development GmbH * Licensed and distributed under either of * * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). * * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -94,7 +94,7 @@ typedef struct ETHConsensusConfig ETHConsensusConfig; * based on the given `config.yaml` file content - If successful. * @return `NULL` - If the given `config.yaml` is malformed or incompatible. * - * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/configs/README.md + * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/configs/README.md */ ETH_RESULT_USE_CHECK ETHConsensusConfig *_Nullable ETHConsensusConfigCreateFromYaml(const char *configFileContent); @@ -149,11 +149,11 @@ typedef struct ETHBeaconState ETHBeaconState; * representation - If successful. * @return `NULL` - If the given `sszBytes` is malformed. * - * @see https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#beaconstate - * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/beacon-chain.md#beaconstate + * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/beacon-chain.md#beaconstate + * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/altair/beacon-chain.md#beaconstate * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/bellatrix/beacon-chain.md#beaconstate - * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#beaconstate - * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/configs/README.md + * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/capella/beacon-chain.md#beaconstate + * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/configs/README.md */ ETH_RESULT_USE_CHECK ETHBeaconState *_Nullable ETHBeaconStateCreateFromSsz( @@ -325,8 +325,8 @@ typedef struct ETHLightClientStore ETHLightClientStore; * * @see https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.4.1#/Beacon/getLightClientBootstrap * @see https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.4.1#/Events/eventstream - * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/light-client/light-client.md - * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/weak-subjectivity.md#weak-subjectivity-period + * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/altair/light-client/light-client.md + * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/phase0/weak-subjectivity.md#weak-subjectivity-period */ ETH_RESULT_USE_CHECK ETHLightClientStore *_Nullable ETHLightClientStoreCreateFromBootstrap( @@ -579,7 +579,7 @@ typedef struct ETHLightClientHeader ETHLightClientHeader; * * @return Latest finalized header. * - * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/capella/light-client/sync-protocol.md#modified-lightclientheader + * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/capella/light-client/sync-protocol.md#modified-lightclientheader */ ETH_RESULT_USE_CHECK const ETHLightClientHeader *ETHLightClientStoreGetFinalizedHeader( @@ -598,7 +598,7 @@ const ETHLightClientHeader *ETHLightClientStoreGetFinalizedHeader( * @return Whether or not the next sync committee is currently known. * * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/sync-protocol.md#is_next_sync_committee_known - * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/light-client/light-client.md + * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/altair/light-client/light-client.md */ ETH_RESULT_USE_CHECK bool ETHLightClientStoreIsNextSyncCommitteeKnown(const ETHLightClientStore *store); @@ -614,7 +614,7 @@ bool ETHLightClientStoreIsNextSyncCommitteeKnown(const ETHLightClientStore *stor * * @return Latest optimistic header. * - * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/capella/light-client/sync-protocol.md#modified-lightclientheader + * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/capella/light-client/sync-protocol.md#modified-lightclientheader */ ETH_RESULT_USE_CHECK const ETHLightClientHeader *ETHLightClientStoreGetOptimisticHeader( @@ -672,7 +672,7 @@ void ETHLightClientHeaderDestroy(ETHLightClientHeader *header); * * @return Pointer to a copy of the given header's beacon block root. * - * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/beacon-chain.md#hash_tree_root + * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/beacon-chain.md#hash_tree_root */ ETH_RESULT_USE_CHECK ETHRoot *ETHLightClientHeaderCopyBeaconRoot( @@ -695,7 +695,7 @@ typedef struct ETHBeaconBlockHeader ETHBeaconBlockHeader; * * @return Beacon block header. * - * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/beacon-chain.md#beaconblockheader + * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/beacon-chain.md#beaconblockheader */ ETH_RESULT_USE_CHECK const ETHBeaconBlockHeader *ETHLightClientHeaderGetBeacon( @@ -775,7 +775,7 @@ const ETHRoot *ETHBeaconBlockHeaderGetBodyRoot(const ETHBeaconBlockHeader *beaco * * @return Pointer to a copy of the given header's execution block hash. * - * @see https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/deneb/beacon-chain.md#executionpayloadheader + * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/deneb/beacon-chain.md#executionpayloadheader */ ETH_RESULT_USE_CHECK ETHRoot *ETHLightClientHeaderCopyExecutionHash( @@ -1206,6 +1206,11 @@ const ETHTransaction *ETHTransactionsGet( ETH_RESULT_USE_CHECK const ETHRoot *ETHTransactionGetHash(const ETHTransaction *transaction); +/** + * Chain ID. + */ +typedef ETHUInt256 ETHChainId; + /** * Obtains the chain ID of a transaction. * @@ -1218,7 +1223,7 @@ const ETHRoot *ETHTransactionGetHash(const ETHTransaction *transaction); * @return Chain ID. */ ETH_RESULT_USE_CHECK -const uint64_t *ETHTransactionGetChainId(const ETHTransaction *transaction); +const ETHChainId *ETHTransactionGetChainId(const ETHTransaction *transaction); /** * Obtains the from address of a transaction. @@ -1570,7 +1575,7 @@ const ETHAuthorization *ETHAuthorizationListGet( * @return Chain ID. */ ETH_RESULT_USE_CHECK -const uint64_t *ETHAuthorizationGetChainId( +const ETHChainId *ETHAuthorizationGetChainId( const ETHAuthorization *authorization); /** diff --git a/beacon_chain/libnimbus_lc/libnimbus_lc.nim b/beacon_chain/libnimbus_lc/libnimbus_lc.nim index 7a63ccbd3e..b18b1a32a6 100644 --- a/beacon_chain/libnimbus_lc/libnimbus_lc.nim +++ b/beacon_chain/libnimbus_lc/libnimbus_lc.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -77,7 +77,7 @@ proc ETHConsensusConfigCreateFromYaml( ## * `NULL` - If the given `config.yaml` is malformed or incompatible. ## ## See: - ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/configs/README.md + ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/configs/README.md let cfg = RuntimeConfig.new() try: cfg[] = readRuntimeConfig($configFileContent, "config.yaml")[0] @@ -142,10 +142,10 @@ proc ETHBeaconStateCreateFromSsz( ## ## See: ## * https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#beaconstate - ## * https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#beaconstate + ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/altair/beacon-chain.md#beaconstate ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/bellatrix/beacon-chain.md#beaconstate ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#beaconstate - ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/configs/README.md + ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/configs/README.md let consensusFork = ConsensusFork.decodeString($consensusVersion).valueOr: return nil @@ -328,8 +328,8 @@ proc ETHLightClientStoreCreateFromBootstrap( ## See: ## * https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.4.1#/Beacon/getLightClientBootstrap ## * https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.4.1#/Events/eventstream - ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/light-client/light-client.md - ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/weak-subjectivity.md#weak-subjectivity-period + ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/altair/light-client/light-client.md + ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/phase0/weak-subjectivity.md#weak-subjectivity-period let mediaType = MediaType.init($mediaType) consensusFork = ConsensusFork.decodeString($consensusVersion).valueOr: @@ -735,7 +735,7 @@ func ETHLightClientStoreGetFinalizedHeader( ## * Latest finalized header. ## ## See: - ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/capella/light-client/sync-protocol.md#modified-lightclientheader + ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/capella/light-client/sync-protocol.md#modified-lightclientheader addr store[].finalized_header func ETHLightClientStoreIsNextSyncCommitteeKnown( @@ -754,8 +754,8 @@ func ETHLightClientStoreIsNextSyncCommitteeKnown( ## * Whether or not the next sync committee is currently known. ## ## See: - ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/sync-protocol.md#is_next_sync_committee_known - ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/light-client/light-client.md + ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/altair/light-client/sync-protocol.md#is_next_sync_committee_known + ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/altair/light-client/light-client.md store[].is_next_sync_committee_known func ETHLightClientStoreGetOptimisticHeader( @@ -774,7 +774,7 @@ func ETHLightClientStoreGetOptimisticHeader( ## * Latest optimistic header. ## ## See: - ## * https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/capella/light-client/sync-protocol.md#modified-lightclientheader + ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/capella/light-client/sync-protocol.md#modified-lightclientheader addr store[].optimistic_header func ETHLightClientStoreGetSafetyThreshold( @@ -795,7 +795,7 @@ func ETHLightClientStoreGetSafetyThreshold( ## * Light client store safety threshold. ## ## See: - ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/sync-protocol.md#get_safety_threshold + ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/altair/light-client/sync-protocol.md#get_safety_threshold store[].get_safety_threshold.cint proc ETHLightClientHeaderCreateCopy( @@ -841,7 +841,7 @@ proc ETHLightClientHeaderCopyBeaconRoot( ## * Pointer to a copy of the given header's beacon block root. ## ## See: - ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/beacon-chain.md#hash_tree_root + ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/beacon-chain.md#hash_tree_root discard cfg # Future-proof against new fields, see `get_lc_execution_root`. let root = Eth2Digest.new() root[] = header[].beacon.hash_tree_root() @@ -1305,18 +1305,13 @@ proc ETHExecutionBlockHeaderCreateFromJson( doAssert data.withdrawalsRoot.isSome # Checked above wds = newSeqOfCap[ETHWithdrawal](data.withdrawals.get.len) - for data in data.withdrawals.get: + for wd in data.withdrawals.get: # Check fork consistency - static: doAssert totalSerializedFields(WithdrawalObject) == 4, + static: doAssert totalSerializedFields(eth_types.EthWithdrawal) == 4, "Only update this number once code is adjusted to check new fields!" # Construct withdrawal let - wd = eth_types.EthWithdrawal( - index: distinctBase(data.index), - validatorIndex: distinctBase(data.validatorIndex), - address: distinctBase(data.address).to(EthAddress), - amount: distinctBase(data.amount)) rlpBytes = try: rlp.encode(wd) @@ -1426,7 +1421,7 @@ type storageKeys: seq[Eth2Digest] ETHAuthorization = object - chainId: uint64 + chainId: ChainId address: ExecutionAddress nonce: uint64 authority: ExecutionAddress @@ -1434,7 +1429,7 @@ type ETHTransaction = object hash: Eth2Digest - chainId: uint64 + chainId: ChainId `from`: ExecutionAddress nonce: uint64 maxPriorityFeePerGas: uint64 @@ -1544,8 +1539,6 @@ proc ETHTransactionsCreateFromJson( # Construct transaction static: - doAssert sizeof(uint64) == sizeof(ChainId) - doAssert sizeof(uint64) == sizeof(data.chainId.get) doAssert sizeof(uint64) == sizeof(data.gas) doAssert sizeof(uint64) == sizeof(data.gasPrice) doAssert sizeof(uint64) == sizeof(data.maxPriorityFeePerGas.get) @@ -1561,13 +1554,12 @@ proc ETHTransactionsCreateFromJson( return nil if data.authorizationList.isSome: for authorization in data.authorizationList.get: - static: doAssert sizeof(uint64) == sizeof(authorization.chainId) - if distinctBase(authorization.v) > uint8.high: + if authorization.v > uint8.high: return nil let tx = eth_types.EthTransaction( txType: txType, - chainId: data.chainId.get(0.Quantity).ChainId, + chainId: data.chainId.get(0.chainId), nonce: distinctBase(data.nonce), gasPrice: data.gasPrice.GasInt, maxPriorityFeePerGas: @@ -1583,9 +1575,7 @@ proc ETHTransactionsCreateFromJson( payload: data.input, accessList: if data.accessList.isSome: - data.accessList.get.mapIt(AccessPair( - address: distinctBase(it.address).to(EthAddress), - storageKeys: it.storageKeys.mapIt(distinctBase(it).to(Bytes32)))) + data.accessList.get else: @[], maxFeePerBlobGas: @@ -1598,13 +1588,7 @@ proc ETHTransactionsCreateFromJson( @[], authorizationList: if data.authorizationList.isSome: - data.authorizationList.get.mapIt(Authorization( - chainId: it.chainId.ChainId, - address: distinctBase(it.address).to(EthAddress), - nonce: distinctBase(it.nonce), - v: distinctBase(it.v), - r: it.r, - s: it.s)) + data.authorizationList.get else: @[], V: distinctBase(data.v), @@ -1615,7 +1599,7 @@ proc ETHTransactionsCreateFromJson( rlp.encode(tx) except RlpError: raiseAssert "Unreachable" - hash = keccakHash(rlpBytes) + hash = keccak256(rlpBytes) if data.hash.asEth2Digest != hash: return nil @@ -1664,7 +1648,7 @@ proc ETHTransactionsCreateFromJson( of DestinationType.Regular: tx.to.get of DestinationType.Create: - let hash = keccakHash(rlp.encodeList(fromAddress, tx.nonce)) + let hash = keccak256(rlp.encodeList(fromAddress, tx.nonce)) hash.to(EthAddress) # Compute authorizations @@ -1676,15 +1660,15 @@ proc ETHTransactionsCreateFromJson( authority = recoverSignerAddress(sig, auth.rlpHashForSigning).valueOr: return nil authorizationList.add ETHAuthorization( - chainId: distinctBase(auth.chainId), + chainId: auth.chainId, address: ExecutionAddress(data: auth.address.data), nonce: auth.nonce, authority: ExecutionAddress(data: authority), signature: @sig) txs.add ETHTransaction( - hash: keccakHash(rlpBytes), - chainId: distinctBase(tx.chainId), + hash: keccak256(rlpBytes), + chainId: tx.chainId, `from`: ExecutionAddress(data: fromAddress), nonce: tx.nonce, maxPriorityFeePerGas: tx.maxPriorityFeePerGas.uint64, @@ -1768,7 +1752,7 @@ func ETHTransactionGetHash( addr transaction[].hash func ETHTransactionGetChainId( - transaction: ptr ETHTransaction): ptr uint64 {.exported.} = + transaction: ptr ETHTransaction): ptr ChainId {.exported.} = ## Obtains the chain ID of a transaction. ## ## * The returned value is allocated in the given transaction. @@ -2128,7 +2112,7 @@ func ETHAuthorizationListGet( addr authorizationList[][authorizationIndex.int] func ETHAuthorizationGetChainId( - authorization: ptr ETHAuthorization): ptr uint64 {.exported.} = + authorization: ptr ETHAuthorization): ptr ChainId {.exported.} = ## Obtains the chain ID of an authorization tuple. ## ## * The returned value is allocated in the given authorization tuple. @@ -2376,8 +2360,6 @@ proc ETHReceiptsCreateFromJson( return nil if log.blockNumber.get != data.blockNumber: return nil - if log.data.len mod 32 != 0: - return nil if log.topics.len > 4: return nil diff --git a/beacon_chain/libnimbus_lc/test_libnimbus_lc.c b/beacon_chain/libnimbus_lc/test_libnimbus_lc.c index 825fc71584..c47dd29508 100644 --- a/beacon_chain/libnimbus_lc/test_libnimbus_lc.c +++ b/beacon_chain/libnimbus_lc/test_libnimbus_lc.c @@ -1,6 +1,6 @@ /** * beacon_chain - * Copyright (c) 2023-2024 Status Research & Development GmbH + * Copyright (c) 2023-2025 Status Research & Development GmbH * Licensed and distributed under either of * * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). * * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -464,7 +464,7 @@ int main(void) printHexString(transactionHash, sizeof *transactionHash); printf("\n"); - const uint64_t *transactionChainId = ETHTransactionGetChainId(transaction); + const ETHChainId *transactionChainId = ETHTransactionGetChainId(transaction); printf(" - chain_id: "); printHexStringReversed(transactionChainId, sizeof *transactionChainId); printf("\n"); @@ -558,7 +558,7 @@ int main(void) printHexString(authority, sizeof *authority); printf("\n"); - const uint64_t *chainId = ETHAuthorizationGetChainId(authorization); + const ETHChainId *chainId = ETHAuthorizationGetChainId(authorization); printf(" - chain_id: "); printHexStringReversed(chainId, sizeof *chainId); printf("\n"); diff --git a/beacon_chain/light_client.nim b/beacon_chain/light_client.nim index 94b9a3dbe6..3270e115a8 100644 --- a/beacon_chain/light_client.nim +++ b/beacon_chain/light_client.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2022-2024 Status Research & Development GmbH +# Copyright (c) 2022-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -389,8 +389,8 @@ proc updateGossipStatus*( currentEpochTargetGossipState = getTargetGossipState( epoch, cfg.ALTAIR_FORK_EPOCH, cfg.BELLATRIX_FORK_EPOCH, - cfg.CAPELLA_FORK_EPOCH, cfg.DENEB_FORK_EPOCH, FAR_FUTURE_EPOCH, - isBehind) + cfg.CAPELLA_FORK_EPOCH, cfg.DENEB_FORK_EPOCH, cfg.ELECTRA_FORK_EPOCH, + cfg.FULU_FORK_EPOCH, isBehind) targetGossipState = if lcBehind or epoch < 1: currentEpochTargetGossipState @@ -400,8 +400,8 @@ proc updateGossipStatus*( # Therefore, LC topic subscriptions are kept for 1 extra epoch. let previousEpochTargetGossipState = getTargetGossipState( epoch - 1, cfg.ALTAIR_FORK_EPOCH, cfg.BELLATRIX_FORK_EPOCH, - cfg.CAPELLA_FORK_EPOCH, cfg.DENEB_FORK_EPOCH, FAR_FUTURE_EPOCH, - isBehind) + cfg.CAPELLA_FORK_EPOCH, cfg.DENEB_FORK_EPOCH, cfg.ELECTRA_FORK_EPOCH, + cfg.FULU_FORK_EPOCH, isBehind) currentEpochTargetGossipState + previousEpochTargetGossipState template currentGossipState(): auto = lightClient.gossipState @@ -440,4 +440,4 @@ proc updateGossipStatus*( getLightClientOptimisticUpdateTopic(forkDigest), basicParams) - lightClient.gossipState = targetGossipState + lightClient.gossipState = targetGossipState \ No newline at end of file diff --git a/beacon_chain/networking/README.md b/beacon_chain/networking/README.md index 8c04a02ead..257ce21500 100644 --- a/beacon_chain/networking/README.md +++ b/beacon_chain/networking/README.md @@ -1,10 +1,10 @@ # Networking -This folders hold a collection of modules to: +These folders hold a collection of modules to: - configure the Eth2 P2P network - discover, connect, and maintain quality Eth2 peers -Data received is handed other to the `../gossip_processing` modules for validation. +Data received is handed over to the `../gossip_processing` modules for validation. ## Security concerns diff --git a/beacon_chain/networking/eth2_discovery.nim b/beacon_chain/networking/eth2_discovery.nim index 8bb648dbab..359513c29f 100644 --- a/beacon_chain/networking/eth2_discovery.nim +++ b/beacon_chain/networking/eth2_discovery.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -11,7 +11,7 @@ import std/[algorithm, sequtils], chronos, chronicles, eth/p2p/discoveryv5/[enr, protocol, node, random2], - ../spec/datatypes/altair, + ../spec/datatypes/[altair, fulu], ../spec/eth2_ssz_serialization, ".."/[conf, conf_light_client] @@ -127,6 +127,7 @@ proc queryRandom*( forkId: ENRForkID, wantedAttnets: AttnetBits, wantedSyncnets: SyncnetBits, + wantedCgcnets: CgcBits, minScore: int): Future[seq[Node]] {.async: (raises: [CancelledError]).} = ## Perform a discovery query for a random target ## (forkId) and matching at least one of the attestation subnets. @@ -151,13 +152,26 @@ proc queryRandom*( if not forkId.isCompatibleForkId(peerForkId): continue + let cgcCountBytes = n.record.get(enrCustodySubnetCountField, seq[byte]) + if cgcCountBytes.isOk(): + let cgcCountNode = + try: + SSZ.decode(cgcCountBytes.get(), uint8) + except SerializationError as e: + debug "Could not decode the cgc ENR field of peer", + peer = n.record.toURI(), exception = e.name, msg = e.msg + continue + + if wantedCgcnets.countOnes().uint8 == cgcCountNode: + score += 1 + let attnetsBytes = n.record.get(enrAttestationSubnetsField, seq[byte]) if attnetsBytes.isOk(): let attnetsNode = try: SSZ.decode(attnetsBytes.get(), AttnetBits) except SerializationError as e: - debug "Could not decode the attnets ERN bitfield of peer", + debug "Could not decode the attnets ENR bitfield of peer", peer = n.record.toURI(), exception = e.name, msg = e.msg continue diff --git a/beacon_chain/networking/eth2_network.nim b/beacon_chain/networking/eth2_network.nim index 9e4c2a7996..66b03dd651 100644 --- a/beacon_chain/networking/eth2_network.nim +++ b/beacon_chain/networking/eth2_network.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -69,7 +69,7 @@ type protocols: seq[ProtocolInfo] ## Protocols managed by the DSL and mounted on the switch protocolStates*: seq[RootRef] - metadata*: altair.MetaData + metadata*: fulu.MetaData connectTimeout*: chronos.Duration seenThreshold*: chronos.Duration connQueue: AsyncQueue[PeerAddr] @@ -108,7 +108,7 @@ type lastReqTime*: Moment connections*: int enr*: Opt[enr.Record] - metadata*: Opt[altair.MetaData] + metadata*: Opt[fulu.MetaData] failedMetadataRequests: int lastMetadataTime*: Moment direction*: PeerType @@ -135,7 +135,8 @@ type ## Protocol requests using this type will produce request-making ## client-side procs that return `NetRes[MsgType]` - MultipleChunksResponse*[MsgType; maxLen: static Limit] = distinct UntypedResponse + MultipleChunksResponse*[ + MsgType; maxLen: static Limit] = distinct UntypedResponse ## Protocol requests using this type will produce request-making ## client-side procs that return `NetRes[List[MsgType, maxLen]]`. ## In the future, such procs will return an `InputStream[NetRes[MsgType]]`. @@ -171,7 +172,6 @@ type NetworkStateInitializer* = proc(network: Eth2Node): RootRef {.gcsafe, raises: [].} OnPeerConnectedHandler* = proc(peer: Peer, incoming: bool): Future[void] {.async: (raises: [CancelledError]).} OnPeerDisconnectedHandler* = proc(peer: Peer): Future[void] {.async: (raises: [CancelledError]).} - ThunkProc* = LPProtoHandler MounterProc* = proc(network: Eth2Node) {.gcsafe, raises: [].} MessageContentPrinter* = proc(msg: pointer): string {.gcsafe, raises: [].} @@ -284,9 +284,6 @@ declareGauge nbc_peers, declareCounter nbc_successful_discoveries, "Number of successful discoveries" -declareCounter nbc_failed_discoveries, - "Number of failed discoveries" - declareCounter nbc_cycling_kicked_peers, "Number of peers kicked for peer cycling" @@ -483,15 +480,12 @@ func netKbps*(peer: Peer): float {.inline.} = ## Returns current network throughput average value in Kbps for peer ``peer``. round(((peer.netThroughput.average / 1024) * 10_000) / 10_000) -# /!\ Must be exported to be seen by `peerCmp` -func `<`*(a, b: Peer): bool = - ## Comparison function indicating `true` if peer `a` ranks worse than peer `b` - if a.score != b.score: - a.score < b.score - elif a.netThroughput.average != b.netThroughput.average: - a.netThroughput.average < b.netThroughput.average +# /!\ Must be exported to be seen by `peerpool`. +func cmp*(a, b: Peer): int = + if a.score == b.score: + cmp(a.netThroughput.average, b.netThroughput.average) else: - system.`<`(a, b) + cmp(a.score, b.score) const maxRequestQuota = 1000000 @@ -847,8 +841,8 @@ func chunkMaxSize[T](): uint32 = when isFixedSize(T): uint32 fixedPortionSize(T) else: - static: doAssert MAX_CHUNK_SIZE < high(uint32).uint64 - MAX_CHUNK_SIZE.uint32 + static: doAssert MAX_PAYLOAD_SIZE < high(uint32).uint64 + MAX_PAYLOAD_SIZE.uint32 template gossipMaxSize(T: untyped): uint32 = const maxSize = static: @@ -857,20 +851,20 @@ template gossipMaxSize(T: untyped): uint32 = elif T is bellatrix.SignedBeaconBlock or T is capella.SignedBeaconBlock or T is deneb.SignedBeaconBlock or T is electra.SignedBeaconBlock or T is fulu.SignedBeaconBlock: - GOSSIP_MAX_SIZE + MAX_PAYLOAD_SIZE # TODO https://github.com/status-im/nim-ssz-serialization/issues/20 for # Attestation, AttesterSlashing, and SignedAggregateAndProof, which all # have lists bounded at MAX_VALIDATORS_PER_COMMITTEE (2048) items, thus - # having max sizes significantly smaller than GOSSIP_MAX_SIZE. + # having max sizes significantly smaller than MAX_PAYLOAD_SIZE. elif T is phase0.Attestation or T is phase0.AttesterSlashing or T is phase0.SignedAggregateAndProof or T is phase0.SignedBeaconBlock or T is electra.SignedAggregateAndProof or T is electra.Attestation or T is electra.AttesterSlashing or T is altair.SignedBeaconBlock or T is SomeForkyLightClientObject: - GOSSIP_MAX_SIZE + MAX_PAYLOAD_SIZE else: {.fatal: "unknown type " & name(T).} - static: doAssert maxSize <= GOSSIP_MAX_SIZE + static: doAssert maxSize <= MAX_PAYLOAD_SIZE maxSize.uint32 proc readVarint2(conn: Connection): Future[NetRes[uint64]] {. @@ -904,7 +898,7 @@ proc readChunkPayload*(conn: Connection, peer: Peer, if size == 0: return neterr ZeroSizePrefix - # The `size.int` conversion is safe because `size` is bounded to `MAX_CHUNK_SIZE` + # The `size.int` conversion is safe because `size` is bounded to `MAX_PAYLOAD_SIZE` let dataRes = await conn.uncompressFramedStream(size.int) data = dataRes.valueOr: @@ -955,15 +949,17 @@ proc readResponseChunk( return await readChunkPayload(conn, peer, MsgType) -proc readResponse(conn: Connection, peer: Peer, - MsgType: type, timeout: Duration): Future[NetRes[MsgType]] - {.async: (raises: [CancelledError]).} = +proc readResponse( + conn: Connection, peer: Peer, + MsgType: type, maxResponseItems: Limit, + timeout: Duration +): Future[NetRes[MsgType]] {.async: (raises: [CancelledError]).} = when MsgType is List: type E = MsgType.T var results: MsgType while true: # Because we interleave networking with response processing, it may - # happen that reading all chunks takes longer than a strict dealine + # happen that reading all chunks takes longer than a strict deadline # timeout would allow, so we allow each chunk a new timeout instead. # The problem is exacerbated by the large number of round-trips to the # poll loop that each future along the way causes. @@ -982,18 +978,20 @@ proc readResponse(conn: Connection, peer: Peer, return err nextRes.error else: trace "Got chunk", conn - if not results.add nextRes.value: + if results.len >= maxResponseItems or not results.add nextRes.value: return neterr(ResponseChunkOverflow) else: + discard maxResponseItems # Always set to 1 for non-`List` responses let nextFut = conn.readResponseChunk(peer, MsgType) if not await nextFut.withTimeout(timeout): return neterr(ReadResponseTimeout) return await nextFut # Guaranteed to complete without waiting -proc makeEth2Request(peer: Peer, protocolId: string, requestBytes: seq[byte], - ResponseMsg: type, - timeout: Duration): Future[NetRes[ResponseMsg]] - {.async: (raises: [CancelledError]).} = +proc doMakeEth2Request( + peer: Peer, protocolId: string, requestBytes: seq[byte], + ResponseMsg: type, maxResponseItems: Limit, + timeout: Duration +): Future[NetRes[ResponseMsg]] {.async: (raises: [CancelledError]).} = let deadline = sleepAsync timeout streamRes = @@ -1020,7 +1018,8 @@ proc makeEth2Request(peer: Peer, protocolId: string, requestBytes: seq[byte], nbc_reqresp_messages_sent.inc(1, [shortProtocolId(protocolId)]) # Read the response - let res = await readResponse(stream, peer, ResponseMsg, timeout) + let res = await readResponse( + stream, peer, ResponseMsg, maxResponseItems, timeout) if res.isErr(): if res.error().kind in ProtocolViolations: peer.updateScore(PeerScoreInvalidRequest) @@ -1039,6 +1038,31 @@ proc makeEth2Request(peer: Peer, protocolId: string, requestBytes: seq[byte], debug "Unexpected error while closing stream", peer, protocolId, exc = exc.msg +proc makeEth2Request( + peer: Peer, protocolId: string, requestBytes: seq[byte], + ResponseMsg: type, + timeout: Duration +): Future[NetRes[ResponseMsg]] {. + async: (raises: [CancelledError], raw: true).} = + when ResponseMsg is List: + doMakeEth2Request( + peer, protocolId, requestBytes, ResponseMsg, ResponseMsg.maxLen, timeout) + else: + doMakeEth2Request( + peer, protocolId, requestBytes, ResponseMsg, 1.Limit, timeout) + +proc makeEth2Request( + peer: Peer, protocolId: string, requestBytes: seq[byte], + ResponseMsg: type, maxResponseItems: Limit, + timeout: Duration +): Future[NetRes[ResponseMsg]] {. + async: (raises: [CancelledError], raw: true).} = + when ResponseMsg is List: + doMakeEth2Request( + peer, protocolId, requestBytes, ResponseMsg, maxResponseItems, timeout) + else: + static: raiseAssert $ResponseMsg & " does not support `maxResponseItems`" + func init*(T: type MultipleChunksResponse, peer: Peer, conn: Connection): T = T(UntypedResponse(peer: peer, stream: conn)) @@ -1101,7 +1125,7 @@ func setEventHandlers(p: ProtocolInfo, p.onPeerConnected = onPeerConnected p.onPeerDisconnected = onPeerDisconnected -proc implementSendProcBody(sendProc: SendProc) = +proc implementSendProcBody(sendProc: SendProc, isChunkStream: bool) = let msg = sendProc.msg UntypedResponse = bindSym "UntypedResponse" @@ -1112,9 +1136,16 @@ proc implementSendProcBody(sendProc: SendProc) = case msg.kind of msgRequest: let ResponseRecord = msg.response.recName - quote: - makeEth2Request(`peer`, `msgProto`, `bytes`, - `ResponseRecord`, `timeoutVar`) + if isChunkStream: + quote: + makeEth2Request( + `peer`, `msgProto`, `bytes`, + `ResponseRecord`, maxResponseItems, `timeoutVar`) + else: + quote: + makeEth2Request( + `peer`, `msgProto`, `bytes`, + `ResponseRecord`, `timeoutVar`) else: quote: sendNotificationMsg(`peer`, `msgProto`, `bytes`) else: @@ -1508,7 +1539,8 @@ proc trimConnections(node: Eth2Node, count: int) = inc(nbc_cycling_kicked_peers) if toKick <= 0: return -proc getLowSubnets(node: Eth2Node, epoch: Epoch): (AttnetBits, SyncnetBits) = +proc getLowSubnets(node: Eth2Node, epoch: Epoch): + (AttnetBits, SyncnetBits, CgcBits) = # Returns the subnets required to have a healthy mesh # The subnets are computed, to, in order: # - Have 0 subnet with < `dLow` peers from topic subscription @@ -1573,7 +1605,11 @@ proc getLowSubnets(node: Eth2Node, epoch: Epoch): (AttnetBits, SyncnetBits) = if epoch + 1 >= node.cfg.ALTAIR_FORK_EPOCH: findLowSubnets(getSyncCommitteeTopic, SyncSubcommitteeIndex, SYNC_COMMITTEE_SUBNET_COUNT) else: - default(SyncnetBits) + default(SyncnetBits), + if epoch >= node.cfg.FULU_FORK_EPOCH: + findLowSubnets(getDataColumnSidecarTopic, uint64, (DATA_COLUMN_SIDECAR_SUBNET_COUNT).int) + else: + default(CgcBits) ) proc runDiscoveryLoop(node: Eth2Node) {.async: (raises: [CancelledError]).} = @@ -1582,23 +1618,29 @@ proc runDiscoveryLoop(node: Eth2Node) {.async: (raises: [CancelledError]).} = while true: let currentEpoch = node.getBeaconTime().slotOrZero.epoch - (wantedAttnets, wantedSyncnets) = node.getLowSubnets(currentEpoch) + (wantedAttnets, wantedSyncnets, wantedCgcnets) = node.getLowSubnets(currentEpoch) wantedAttnetsCount = wantedAttnets.countOnes() wantedSyncnetsCount = wantedSyncnets.countOnes() + wantedCgcnetsCount = wantedCgcnets.countOnes() outgoingPeers = node.peerPool.lenCurrent({PeerType.Outgoing}) targetOutgoingPeers = max(node.wantedPeers div 10, 3) if wantedAttnetsCount > 0 or wantedSyncnetsCount > 0 or - outgoingPeers < targetOutgoingPeers: + wantedCgcnetsCount > 0 or outgoingPeers < targetOutgoingPeers: let minScore = - if wantedAttnetsCount > 0 or wantedSyncnetsCount > 0: + if wantedAttnetsCount > 0 or wantedSyncnetsCount > 0 or + wantedCgcnetsCount > 0: 1 else: 0 discoveredNodes = await node.discovery.queryRandom( - node.discoveryForkId, wantedAttnets, wantedSyncnets, minScore) + node.discoveryForkId, + wantedAttnets, + wantedSyncnets, + wantedCgcnets, + minScore) let newPeers = block: var np = newSeq[PeerAddr]() @@ -1647,6 +1689,15 @@ proc runDiscoveryLoop(node: Eth2Node) {.async: (raises: [CancelledError]).} = # Also, give some time to dial the discovered nodes and update stats etc await sleepAsync(5.seconds) +proc fetchNodeIdFromPeerId*(peer: Peer): NodeId= + # Convert peer id to node id by extracting the peer's public key + let nodeId = + block: + var key: PublicKey + discard peer.peerId.extractPublicKey(key) + keys.PublicKey.fromRaw(key.skkey.getBytes()).get().toNodeId() + nodeId + proc resolvePeer(peer: Peer) = # Resolve task which performs searching of peer's public key and recovery of # ENR using discovery5. We only resolve ENR for peers we know about to avoid @@ -1803,7 +1854,7 @@ proc new(T: type Eth2Node, let connectTimeout = chronos.seconds(10) seenThreshold = chronos.seconds(10) - type MetaData = altair.MetaData # Weird bug without this.. + type MetaData = fulu.MetaData # Weird bug without this.. # Versions up to v22.3.0 would write an empty `MetaData` to #`data-dir/node-metadata.json` which would then be reloaded on startup - don't @@ -1841,7 +1892,10 @@ proc new(T: type Eth2Node, quota: TokenBucket.new(maxGlobalQuota, fullReplenishTime) ) - proc peerHook(peerId: PeerId, event: ConnEvent): Future[void] {.gcsafe.} = + proc peerHook( + peerId: PeerId, + event: ConnEvent + ): Future[void] {.async: (raises: [CancelledError], raw: true), gcsafe.} = onConnEvent(node, peerId, event) switch.addConnEventHandler(peerHook, ConnEventKind.Connected) @@ -2002,7 +2056,9 @@ proc p2pProtocolBackendImpl*(p: P2PProtocol): Backend = ## initialize the network object by creating handlers bound to the ## specific network. ## - var userHandlerCall = newTree(nnkDiscardStmt) + var + userHandlerCall = newTree(nnkDiscardStmt) + maxResponseItems: Opt[NimNode] if msg.userHandler != nil: var OutputParamType = if msg.kind == msgRequest: msg.outputParamType @@ -2020,6 +2076,7 @@ proc p2pProtocolBackendImpl*(p: P2PProtocol): Backend = let isChunkStream = eqIdent(OutputParamType[0], "MultipleChunksResponse") msg.response.recName = if isChunkStream: + maxResponseItems.ok OutputParamType[2] newTree(nnkBracketExpr, ident"List", OutputParamType[1], OutputParamType[2]) else: OutputParamType[1] @@ -2039,10 +2096,13 @@ proc p2pProtocolBackendImpl*(p: P2PProtocol): Backend = `userHandlerCall` proc `protocolMounterName`(`networkVar`: `Eth2Node`) {.raises: [].} = - proc snappyThunk(`streamVar`: `Connection`, - `protocolVar`: string): Future[void] {.gcsafe.} = - return handleIncomingStream(`networkVar`, `streamVar`, `protocolVar`, - `MsgStrongRecName`) + proc snappyThunk( + `streamVar`: `Connection`, + `protocolVar`: string + ): Future[void] {. + async: (raises: [CancelledError], raw: true), gcsafe.} = + handleIncomingStream( + `networkVar`, `streamVar`, `protocolVar`, `MsgStrongRecName`) try: mount `networkVar`.switch, @@ -2057,7 +2117,16 @@ proc p2pProtocolBackendImpl*(p: P2PProtocol): Backend = ## var sendProc = msg.createSendProc() - implementSendProcBody sendProc + if maxResponseItems.isSome: + sendProc.def.params.insert( + sendProc.def.params.len - 1, # Insert before implicit `timeout` param + newTree( + nnkIdentDefs, + ident"maxResponseItems", + bindSym"Limit", + maxResponseItems.get)) + + implementSendProcBody(sendProc, maxResponseItems.isSome) protocol.outProcRegistrations.add( newCall(registerMsg, @@ -2082,12 +2151,33 @@ proc p2pProtocolBackendImpl*(p: P2PProtocol): Backend = import ./peer_protocol export peer_protocol +func updateMetadataV2ToV3(metadataRes: NetRes[altair.MetaData]): + NetRes[fulu.MetaData] = + if metadataRes.isOk: + let metadata = metadataRes.get + ok(fulu.MetaData(seq_number: metadata.seq_number, + attnets: metadata.attnets, + syncnets: metadata.syncnets)) + else: + err(metadataRes.error) + +proc getMetadata_vx(node: Eth2Node, peer: Peer): + Future[NetRes[fulu.MetaData]] + {.async: (raises: [CancelledError]).} = + let + res = + if node.getBeaconTime().slotOrZero.epoch >= node.cfg.FULU_FORK_EPOCH: + # Directly fetch fulu metadata if available + await getMetadata_v3(peer) + else: + updateMetadataV2ToV3(await getMetadata_v2(peer)) + return res + proc updatePeerMetadata(node: Eth2Node, peerId: PeerId) {.async: (raises: [CancelledError]).} = trace "updating peer metadata", peerId - let peer = node.getPeer(peerId) - newMetadataRes = await peer.getMetadata_v2() + newMetadataRes = await node.getMetadata_vx(peer) newMetadata = newMetadataRes.valueOr: debug "Failed to retrieve metadata from peer!", peerId, error = newMetadataRes.error peer.failedMetadataRequests.inc() @@ -2228,8 +2318,8 @@ proc getPersistentNetKeys*( func gossipId( data: openArray[byte], phase0Prefix, topic: string): seq[byte] = - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/p2p-interface.md#topics-and-messages - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/p2p-interface.md#topics-and-messages + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/phase0/p2p-interface.md#topics-and-messages + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/altair/p2p-interface.md#topics-and-messages const MESSAGE_DOMAIN_VALID_SNAPPY = [0x01'u8, 0x00, 0x00, 0x00] let messageDigest = withEth2Hash: h.update(MESSAGE_DOMAIN_VALID_SNAPPY) @@ -2329,7 +2419,7 @@ proc createEth2Node*(rng: ref HmacDrbgContext, try: # This doesn't have to be a tight bound, just enough to avoid denial of # service attacks. - let decoded = snappy.decode(m.data, static(GOSSIP_MAX_SIZE.uint32)) + let decoded = snappy.decode(m.data, static(MAX_PAYLOAD_SIZE.uint32)) ok(gossipId(decoded, phase0Prefix, m.topic)) except CatchableError: err(ValidationResult.Reject) @@ -2376,7 +2466,7 @@ proc createEth2Node*(rng: ref HmacDrbgContext, sign = false, verifySignature = false, anonymize = true, - maxMessageSize = static(GOSSIP_MAX_SIZE.int), + maxMessageSize = static(MAX_PAYLOAD_SIZE.int), parameters = params) switch.mount(pubsub) @@ -2397,6 +2487,33 @@ func announcedENR*(node: Eth2Node): enr.Record = doAssert node.discovery != nil, "The Eth2Node must be initialized" node.discovery.localNode.record +proc lookupCgcFromPeer*(peer: Peer): uint64 = + # Fetches the custody column count from a remote peer. + # If the peer advertises their custody column count via the `cgc` ENR field, + # that value is returned. Otherwise, the default value `CUSTODY_REQUIREMENT` + # is assumed. + + let metadata = peer.metadata + if metadata.isOk: + return metadata.get.custody_group_count + + # Try getting the custody count from ENR if metadata fetch fails. + debug "Could not get cgc from metadata, trying from ENR", + peer_id = peer.peerId + let enrOpt = peer.enr + if not enrOpt.isNone: + let enr = enrOpt.get + let enrFieldOpt = enr.get(enrCustodySubnetCountField, seq[byte]) + if enrFieldOpt.isOk: + try: + let cgc = SSZ.decode(enrFieldOpt.get, uint8) + return cgc.uint64 + except SszError, SerializationError: + discard # Ignore decoding errors and fallback to default + + # Return default value if no valid custody subnet count is found. + return CUSTODY_REQUIREMENT.uint64 + func shortForm*(id: NetKeyPair): string = $PeerId.init(id.pubkey) @@ -2489,7 +2606,7 @@ func gossipEncode(msg: auto): seq[byte] = let uncompressed = SSZ.encode(msg) # This function only for messages we create. A message this large amounts to # an internal logic error. - doAssert uncompressed.lenu64 <= GOSSIP_MAX_SIZE + doAssert uncompressed.lenu64 <= MAX_PAYLOAD_SIZE snappy.encode(uncompressed) @@ -2519,7 +2636,7 @@ proc broadcast(node: Eth2Node, topic: string, msg: auto): proc subscribeAttestationSubnets*( node: Eth2Node, subnets: AttnetBits, forkDigest: ForkDigest) = - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/p2p-interface.md#attestations-and-aggregation + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/p2p-interface.md#attestations-and-aggregation # Nimbus won't score attestation subnets for now, we just rely on block and # aggregate which are more stable and reliable @@ -2530,10 +2647,7 @@ proc subscribeAttestationSubnets*( proc unsubscribeAttestationSubnets*( node: Eth2Node, subnets: AttnetBits, forkDigest: ForkDigest) = - # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/p2p-interface.md#attestations-and-aggregation - # Nimbus won't score attestation subnets for now; we just rely on block and - # aggregate which are more stable and reliable - + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/p2p-interface.md#attestations-and-aggregation for subnet_id, enabled in subnets: if enabled: node.unsubscribe(getAttestationTopic(forkDigest, SubnetId(subnet_id))) @@ -2546,7 +2660,7 @@ proc updateStabilitySubnetMetadata*(node: Eth2Node, attnets: AttnetBits) = node.metadata.seq_number += 1 node.metadata.attnets = attnets - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/p2p-interface.md#attestation-subnet-subscription + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/p2p-interface.md#attestation-subnet-subscription # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/p2p-interface.md#attestation-subnet-bitfield let res = node.discovery.updateRecord({ enrAttestationSubnetsField: SSZ.encode(node.metadata.attnets) @@ -2558,8 +2672,22 @@ proc updateStabilitySubnetMetadata*(node: Eth2Node, attnets: AttnetBits) = else: debug "Stability subnets changed; updated ENR attnets", attnets +proc loadCgcnetMetadataAndEnr*(node: Eth2Node, cgcnets: CgcCount) = + node.metadata.custody_group_count = cgcnets.uint64 + let res = + node.discovery.updateRecord({ + enrCustodySubnetCountField: SSZ.encode(cgcnets) + }) + + if res.isErr: + # This should not occur in this scenario as the private key would always + # be the correct one and the ENR will not increase in size + warn "Failed to update the ENR cgc field", error = res.error + else: + debug "Updated ENR cgc", cgcnets + proc updateSyncnetsMetadata*(node: Eth2Node, syncnets: SyncnetBits) = - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/validator.md#sync-committee-subnet-stability + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/altair/validator.md#sync-committee-subnet-stability if node.metadata.syncnets == syncnets: return @@ -2598,10 +2726,10 @@ proc getWallEpoch(node: Eth2Node): Epoch = proc broadcastAttestation*( node: Eth2Node, subnet_id: SubnetId, - attestation: phase0.Attestation | electra.Attestation): + attestation: phase0.Attestation | SingleAttestation): Future[SendResult] {.async: (raises: [CancelledError], raw: true).} = # Regardless of the contents of the attestation, - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/p2p-interface.md#transitioning-the-gossip + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/altair/p2p-interface.md#transitioning-the-gossip # implies that pre-fork, messages using post-fork digests might be # ignored, whilst post-fork, there is effectively a seen_ttl-based # timer unsubscription point that means no new pre-fork-forkdigest diff --git a/beacon_chain/networking/eth2_protocol_dsl.nim b/beacon_chain/networking/eth2_protocol_dsl.nim index 0d2668ccd4..36473bdb06 100644 --- a/beacon_chain/networking/eth2_protocol_dsl.nim +++ b/beacon_chain/networking/eth2_protocol_dsl.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2024 Status Research & Development GmbH +# Copyright (c) 2024-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -132,7 +132,6 @@ let responseVar* {.compileTime.} = ident "response" streamVar* {.compileTime.} = ident "stream" protocolVar* {.compileTime.} = ident "protocol" - deadlineVar* {.compileTime.} = ident "deadline" timeoutVar* {.compileTime.} = ident "timeout" currentProtocolSym* {.compileTime.} = ident "CurrentProtocol" resultIdent* {.compileTime.} = ident "result" @@ -904,4 +903,3 @@ macro emitForAllBackends(backendSyms: typed, options: untyped, body: untyped): u template p2pProtocol*(options: untyped, body: untyped) {.dirty.} = bind emitForAllBackends emitForAllBackends(p2pProtocolBackendImpl, options, body) - diff --git a/beacon_chain/networking/network_metadata.nim b/beacon_chain/networking/network_metadata.nim index 7fd47d9660..f3731c6269 100644 --- a/beacon_chain/networking/network_metadata.nim +++ b/beacon_chain/networking/network_metadata.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -18,7 +18,7 @@ import from std/sequtils import deduplicate, filterIt, mapIt from std/strutils import - escape, parseBiggestUInt, replace, splitLines, startsWith, strip, + endsWith, escape, parseBiggestUInt, replace, splitLines, startsWith, strip, toLowerAscii # TODO(zah): @@ -49,6 +49,7 @@ type mainnet sepolia holesky + hoodi GenesisMetadataKind* = enum NoGenesis @@ -91,23 +92,41 @@ type func hasGenesis*(metadata: Eth2NetworkMetadata): bool = metadata.genesis.kind != NoGenesis -proc readBootstrapNodes*(path: string): seq[string] {.raises: [IOError].} = +proc readBootstrapNodes(path: string): seq[string] {.raises: [IOError].} = # Read a list of ENR values from a YAML file containing a flat list of entries + var res: seq[string] if fileExists(path): - splitLines(readFile(path)). - filterIt(it.startsWith("enr:")). - mapIt(it.strip()) - else: - @[] + for line in splitLines(readFile(path)): + let line = line.strip() + if line.startsWith("enr:"): + res.add line + elif line.len == 0 or line.startsWith("#"): + discard + else: + when nimvm: + raiseAssert "Bootstrap node invalid (" & path & "): " & line + else: + warn "Ignoring invalid bootstrap node", path, bootstrapNode = line + res -proc readBootEnr*(path: string): seq[string] {.raises: [IOError].} = +proc readBootEnr(path: string): seq[string] {.raises: [IOError].} = # Read a list of ENR values from a YAML file containing a flat list of entries + var res: seq[string] if fileExists(path): - splitLines(readFile(path)). - filterIt(it.startsWith("- enr:")). - mapIt(it[2..^1].strip()) - else: - @[] + for line in splitLines(readFile(path)): + let line = line.strip() + if line.startsWith("- enr:"): + res.add line[2 .. ^1] + elif line.startsWith("- \"enr:") and line.endsWith("\""): + res.add line[3 .. ^2] # Gnosis Chiado `boot_enr.yaml` + elif line.len == 0 or line.startsWith("#"): + discard + else: + when nimvm: + raiseAssert "Bootstrap ENR invalid (" & path & "): " & line + else: + warn "Ignoring invalid bootstrap ENR", path, bootstrapEnr = line + res proc loadEth2NetworkMetadata*( path: string, @@ -126,7 +145,8 @@ proc loadEth2NetworkMetadata*( deployBlockPath = path & "/deploy_block.txt" depositContractBlockPath = path & "/deposit_contract_block.txt" depositContractBlockHashPath = path & "/deposit_contract_block_hash.txt" - bootstrapNodesPath = path & "/bootstrap_nodes.txt" + bootstrapNodesLegacyPath = path & "/bootstrap_nodes.txt" # <= Dec 2024 + bootstrapNodesPath = path & "/bootstrap_nodes.yaml" bootEnrPath = path & "/boot_enr.yaml" runtimeConfig = if fileExists(configPath): let (cfg, unknowns) = readRuntimeConfig(configPath) @@ -178,7 +198,8 @@ proc loadEth2NetworkMetadata*( default(Eth2Digest) bootstrapNodes = deduplicate( - readBootstrapNodes(bootstrapNodesPath) & + readBootstrapNodes(bootstrapNodesLegacyPath) & + readBootEnr(bootstrapNodesPath) & readBootEnr(bootEnrPath)) ok Eth2NetworkMetadata( @@ -265,14 +286,10 @@ when const_preset == "gnosis": static: for network in [gnosisMetadata, chiadoMetadata]: checkForkConsistency(network.cfg) - - for network in [gnosisMetadata, chiadoMetadata]: - doAssert network.cfg.DENEB_FORK_EPOCH < FAR_FUTURE_EPOCH - doAssert network.cfg.ELECTRA_FORK_EPOCH == FAR_FUTURE_EPOCH + doAssert network.cfg.ELECTRA_FORK_EPOCH < FAR_FUTURE_EPOCH doAssert network.cfg.FULU_FORK_EPOCH == FAR_FUTURE_EPOCH doAssert ConsensusFork.high == ConsensusFork.Fulu - elif const_preset == "mainnet": when incbinEnabled: # Nim is very inefficent at loading large constants from binary files so we @@ -304,6 +321,11 @@ elif const_preset == "mainnet": Opt.some mainnet, useBakedInGenesis = Opt.some "mainnet") + sepoliaMetadata = loadCompileTimeNetworkMetadata( + vendorDir & "/sepolia/metadata", + Opt.some sepolia, + useBakedInGenesis = Opt.some "sepolia") + holeskyMetadata = loadCompileTimeNetworkMetadata( vendorDir & "/holesky/metadata", Opt.some holesky, @@ -311,18 +333,38 @@ elif const_preset == "mainnet": url: "https://github.com/status-im/nimbus-eth2/releases/download/v23.9.1/holesky-genesis.ssz.sz", digest: Eth2Digest.fromHex "0x0ea3f6f9515823b59c863454675fefcd1d8b4f2dbe454db166206a41fda060a0")) - sepoliaMetadata = loadCompileTimeNetworkMetadata( - vendorDir & "/sepolia/metadata", - Opt.some sepolia, - useBakedInGenesis = Opt.some "sepolia") + # File can be reproduced by `cd vendor/hoodi`, then `git lfs install` and + # `git lfs pull`, and then from repo root: + # + # let + # orig = io2.readAllBytes("./vendor/hoodi/metadata/genesis.ssz").get + # enc = encodeFramed(orig) + # discard secureWriteFile("hoodi-genesis.ssz.sz", enc) + # let + # dec = io2.readAllBytes("hoodi-genesis.ssz.sz").get + # res = decodeFramed(dec) + # state = newClone(readSszForkedHashedBeaconState( + # getMetadataForNetwork("hoodi").cfg, res)) + # withState(state[]): + # echo $forkyState.root + # + # Uploading as release is recommended according to guidance from Github: + # > We don't limit the total size of the binary files in the release or the + # bandwidth used to deliver them. However, each individual file must be + # smaller than 2 GiB. + # - https://docs.github.com/en/repositories/working-with-files/managing-large-files/about-large-files-on-github#distributing-large-binaries + hoodiMetadata = loadCompileTimeNetworkMetadata( + vendorDir & "/hoodi/metadata", + Opt.some hoodi, + downloadGenesisFrom = Opt.some DownloadInfo( + url: "https://github.com/eth-clients/hoodi/releases/download/genesis/hoodi-genesis.ssz.sz", + digest: Eth2Digest.fromHex "0x2683ebc120f91f740c7bed4c866672d01e1ba51b4cc360297138465ee5df40f0")) static: - for network in [mainnetMetadata, sepoliaMetadata, holeskyMetadata]: + for network in [ + mainnetMetadata, sepoliaMetadata, holeskyMetadata, hoodiMetadata]: checkForkConsistency(network.cfg) - - for network in [mainnetMetadata, sepoliaMetadata, holeskyMetadata]: - doAssert network.cfg.DENEB_FORK_EPOCH < FAR_FUTURE_EPOCH - doAssert network.cfg.ELECTRA_FORK_EPOCH == FAR_FUTURE_EPOCH + doAssert network.cfg.ELECTRA_FORK_EPOCH < FAR_FUTURE_EPOCH doAssert network.cfg.FULU_FORK_EPOCH == FAR_FUTURE_EPOCH doAssert ConsensusFork.high == ConsensusFork.Fulu @@ -366,6 +408,8 @@ proc getMetadataForNetwork*(networkName: string): Eth2NetworkMetadata = case toLowerAscii(networkName) of "mainnet": mainnetMetadata + of "hoodi": + hoodiMetadata of "holesky": holeskyMetadata of "sepolia": @@ -461,4 +505,4 @@ else: raiseAssert "Baked genesis states are not available in the current build mode" func bakedGenesisValidatorsRoot*(metadata: Eth2NetworkMetadata): Opt[Eth2Digest] = - Opt.none Eth2Digest + Opt.none Eth2Digest \ No newline at end of file diff --git a/beacon_chain/networking/network_metadata_downloads.nim b/beacon_chain/networking/network_metadata_downloads.nim index adeaa65e64..490743a995 100644 --- a/beacon_chain/networking/network_metadata_downloads.nim +++ b/beacon_chain/networking/network_metadata_downloads.nim @@ -1,10 +1,12 @@ # beacon_chain -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. +{.push raises: [].} + import std/uri, stew/io2, chronos, chronos/apps/http/httpclient, snappy, @@ -41,7 +43,7 @@ proc fetchGenesisBytes*( result = await downloadFile(genesisStateUrlOverride.get(parseUri metadata.genesis.url)) # Under the built-in default URL, we serve a snappy-encoded BeaconState in order # to reduce the size of the downloaded file with roughly 50% (this precise ratio - # depends on the number of validator recors). The user is still free to provide + # depends on the number of validator records). The user is still free to provide # any URL which may serve an uncompressed state (e.g. a Beacon API endpoint) # # Since a SSZ-encoded BeaconState will start with a LittleEndian genesis time @@ -80,3 +82,9 @@ when isMainModule: "holesky-genesis.ssz", waitFor holeskyMetadata.fetchGenesisBytes() ).expect("success") + + let hoodiMetadata = getMetadataForNetwork("hoodi") + io2.writeFile( + "hoodi-genesis.ssz", + waitFor hoodiMetadata.fetchGenesisBytes() + ).expect("success") diff --git a/beacon_chain/networking/peer_pool.nim b/beacon_chain/networking/peer_pool.nim index 611c86e304..a03342bf59 100644 --- a/beacon_chain/networking/peer_pool.nim +++ b/beacon_chain/networking/peer_pool.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -7,7 +7,7 @@ {.push raises: [].} -import std/[tables, heapqueue] +import std/[tables, heapqueue, algorithm, sequtils, typetraits] import chronos export tables @@ -19,9 +19,6 @@ type PeerFlags = enum Acquired, DeleteOnRelease - EventType = enum - NotEmptyEvent, NotFullEvent - PeerStatus* = enum Success, ## Peer was successfully added to PeerPool. DuplicateError, ## Peer is already present in PeerPool. @@ -29,15 +26,15 @@ type LowScoreError, ## Peer has too low score. DeadPeerError ## Peer is already dead. + PeerIndex = distinct int + # Distinct type is important here, because we are using custom sorting + # functions which are not compatible with integer behavior. + PeerItem[T] = object data: T peerType: PeerType flags: set[PeerFlags] - index: int - - PeerIndex = object - data: int - cmp: proc(a, b: PeerIndex): bool {.gcsafe, raises: [].} + index: PeerIndex PeerScoreCheckCallback*[T] = proc(peer: T): bool {.gcsafe, raises: [].} @@ -45,16 +42,14 @@ type PeerOnDeleteCallback*[T] = proc(peer: T) {.gcsafe, raises: [].} + PeerCustomFilterCallback*[T] = proc(peer: T): bool {.gcsafe, raises: [].} + PeerPool*[A, B] = ref object - incNotEmptyEvent*: AsyncEvent - outNotEmptyEvent*: AsyncEvent - incNotFullEvent*: AsyncEvent - outNotFullEvent*: AsyncEvent - incQueue: HeapQueue[PeerIndex] - outQueue: HeapQueue[PeerIndex] - registry: Table[B, PeerIndex] + changeEvent: AsyncEvent storage: seq[PeerItem[A]] - cmp: proc(a, b: PeerIndex): bool {.gcsafe, raises: [].} + registry: Table[B, PeerIndex] + sorted: seq[PeerIndex] + empties: seq[PeerIndex] scoreCheck: PeerScoreCheckCallback[A] onDeletePeer: PeerOnDeleteCallback[A] peerCounter: PeerCounterCallback @@ -68,91 +63,48 @@ type PeerPoolError* = object of CatchableError -proc `<`*(a, b: PeerIndex): bool = - ## PeerIndex ``a`` holds reference to ``cmp()`` procedure which has captured - ## PeerPool instance. - a.cmp(b, a) - -proc fireNotEmptyEvent[A, B](pool: PeerPool[A, B], - item: PeerItem[A]) = - case item.peerType: - of PeerType.Incoming: - pool.incNotEmptyEvent.fire() - of PeerType.Outgoing: - pool.outNotEmptyEvent.fire() - -proc fireNotFullEvent[A, B](pool: PeerPool[A, B], - item: PeerItem[A]) = - case item.peerType: - of PeerType.Incoming: - pool.incNotFullEvent.fire() - of PeerType.Outgoing: - pool.outNotFullEvent.fire() +func `==`*(a, b: PeerIndex): bool {.borrow.} iterator pairs*[A, B](pool: PeerPool[A, B]): (B, A) = - for peerId, peerIdx in pool.registry: - yield (peerId, pool.storage[peerIdx.data].data) - -template incomingEvent(eventType: EventType): AsyncEvent = - case eventType - of EventType.NotEmptyEvent: - pool.incNotEmptyEvent - of EventType.NotFullEvent: - pool.incNotFullEvent - -template outgoingEvent(eventType: EventType): AsyncEvent = - case eventType - of EventType.NotEmptyEvent: - pool.outNotEmptyEvent - of EventType.NotFullEvent: - pool.outNotFullEvent - -proc waitForEvent[A, B](pool: PeerPool[A, B], eventType: EventType, - filter: set[PeerType]) {.async: (raises: [CancelledError]).} = - if filter == {PeerType.Incoming, PeerType.Outgoing} or filter == {}: - var fut1 = incomingEvent(eventType).wait() - var fut2 = outgoingEvent(eventType).wait() - try: - try: - discard await one(fut1, fut2) - except ValueError: - raiseAssert "one precondition satisfied" - if fut1.finished(): - if not(fut2.finished()): - await fut2.cancelAndWait() - incomingEvent(eventType).clear() - else: - if not(fut1.finished()): - await fut1.cancelAndWait() - outgoingEvent(eventType).clear() - except CancelledError as exc: - var pending: seq[FutureBase] - if not(fut1.finished()): - pending.add(fut1.cancelAndWait()) - if not(fut2.finished()): - pending.add(fut2.cancelAndWait()) - await noCancel allFutures(pending) - raise exc - elif PeerType.Incoming in filter: - await incomingEvent(eventType).wait() - incomingEvent(eventType).clear() - elif PeerType.Outgoing in filter: - await outgoingEvent(eventType).wait() - outgoingEvent(eventType).clear() - -proc waitNotEmptyEvent[A, B](pool: PeerPool[A, B], - filter: set[PeerType]) {.async: (raises: [CancelledError], raw: true).} = - pool.waitForEvent(EventType.NotEmptyEvent, filter) - -proc waitNotFullEvent[A, B](pool: PeerPool[A, B], - filter: set[PeerType]){.async: (raises: [CancelledError], raw: true).} = - pool.waitForEvent(EventType.NotFullEvent, filter) - -proc newPeerPool*[A, B](maxPeers = -1, maxIncomingPeers = -1, - maxOutgoingPeers = -1, - scoreCheckCb: PeerScoreCheckCallback[A] = nil, - peerCounterCb: PeerCounterCallback = nil, - onDeleteCb: PeerOnDeleteCallback[A] = nil): PeerPool[A, B] = + for peerId, pindex in pool.registry: + yield (peerId, pool.storage[distinctBase(pindex)].data) + +proc resort[A, B]( + pool: PeerPool[A, B], + unsorted: var openArray[PeerIndex] +) = + mixin `cmp` + proc pcmp(a, b: PeerIndex): int {.closure, raises: [].} = + cmp(pool.storage[distinctBase(a)].data, pool.storage[distinctBase(b)].data) + unsorted.sort(pcmp, order = SortOrder.Descending) + +proc resorted[A, B]( + pool: PeerPool[A, B], + unsorted: openArray[PeerIndex] +): seq[PeerIndex] = + var res = @unsorted + pool.resort(res) + res + +proc addToStorage[A, B](pool: PeerPool[A, B], item: PeerItem[A]): PeerIndex = + var indexedItem = item + if len(pool.empties) > 0: + indexedItem.index = pool.empties[0] + pool.storage[distinctBase(indexedItem.index)] = indexedItem + pool.empties.del(0) + else: + indexedItem.index = PeerIndex(len(pool.storage)) + pool.storage.add(indexedItem) + indexedItem.index + +proc newPeerPool*[A, B]( + maxPeers = -1, + maxIncomingPeers = -1, + maxOutgoingPeers = -1, + scoreCheckCb: PeerScoreCheckCallback[A] = nil, + peerCounterCb: PeerCounterCallback = nil, + onDeleteCb: PeerOnDeleteCallback[A] = nil +): PeerPool[A, B] = ## Create new PeerPool. ## ## ``maxPeers`` - maximum number of peers allowed. All the peers which @@ -178,40 +130,35 @@ proc newPeerPool*[A, B](maxPeers = -1, maxIncomingPeers = -1, ## ## Please note, that if ``maxPeers`` is positive non-zero value, then equation ## ``maxPeers >= maxIncomingPeers + maxOutgoingPeers`` must be ``true``. - var res = PeerPool[A, B]() if maxPeers != -1: doAssert(maxPeers >= maxIncomingPeers + maxOutgoingPeers) - res.maxPeersCount = if maxPeers < 0: high(int) else: maxPeers - res.maxIncPeersCount = - if maxIncomingPeers < 0: - high(int) - else: - maxIncomingPeers - res.maxOutPeersCount = - if maxOutgoingPeers < 0: - high(int) - else: - maxOutgoingPeers - - res.incNotEmptyEvent = newAsyncEvent() - res.outNotEmptyEvent = newAsyncEvent() - res.incNotFullEvent = newAsyncEvent() - res.outNotFullEvent = newAsyncEvent() - res.incQueue = initHeapQueue[PeerIndex]() - res.outQueue = initHeapQueue[PeerIndex]() - res.registry = initTable[B, PeerIndex]() - res.scoreCheck = scoreCheckCb - res.peerCounter = peerCounterCb - res.onDeletePeer = onDeleteCb - res.storage = newSeq[PeerItem[A]]() - - proc peerCmp(a, b: PeerIndex): bool {.closure, gcsafe.} = - let p1 = res.storage[a.data].data - let p2 = res.storage[b.data].data - p1 < p2 - - res.cmp = peerCmp + let + maxPeersCount = if maxPeers < 0: high(int) else: maxPeers + maxIncPeersCount = + if maxIncomingPeers < 0: + high(int) + else: + maxIncomingPeers + maxOutPeersCount = + if maxOutgoingPeers < 0: + high(int) + else: + maxOutgoingPeers + res = PeerPool[A, B]( + changeEvent: newAsyncEvent(), + registry: initTable[B, PeerIndex](), + scoreCheck: scoreCheckCb, + peerCounter: peerCounterCb, + onDeletePeer: onDeleteCb, + maxPeersCount: maxPeersCount, + maxIncPeersCount: maxIncPeersCount, + maxOutPeersCount: maxOutPeersCount, + curIncPeersCount: 0, + curOutPeersCount: 0, + acqIncPeersCount: 0, + acqOutPeersCount: 0 + ) res proc len*[A, B](pool: PeerPool[A, B]): int = @@ -221,50 +168,86 @@ proc len*[A, B](pool: PeerPool[A, B]): int = proc lenCurrent*[A, B](pool: PeerPool[A, B], filter = {PeerType.Incoming, - PeerType.Outgoing}): int {.inline.} = + PeerType.Outgoing}): int = ## Returns number of registered peers in PeerPool ``pool`` which satisfies ## filter ``filter``. (if PeerType.Incoming in filter: pool.curIncPeersCount else: 0) + (if PeerType.Outgoing in filter: pool.curOutPeersCount else: 0) -proc lenAvailable*[A, B](pool: PeerPool[A, B], - filter = {PeerType.Incoming, - PeerType.Outgoing}): int {.inline.} = - ## Returns number of available peers in PeerPool ``pool`` which satisfies - ## filter ``filter``. - (if PeerType.Incoming in filter: len(pool.incQueue) else: 0) + - (if PeerType.Outgoing in filter: len(pool.outQueue) else: 0) +proc lenAvailable*[A, B]( + pool: PeerPool[A, B], + filter = {PeerType.Incoming, PeerType.Outgoing} +): int = + ## Returns number of peers available for acquisition in PeerPool + ## ``pool`` which satisfies filter ``filter``. + (if PeerType.Incoming in filter: + pool.curIncPeersCount - pool.acqIncPeersCount + else: + 0) + + (if PeerType.Outgoing in filter: + pool.curOutPeersCount - pool.acqOutPeersCount + else: + 0) + +proc lenAvailable*[A, B]( + pool: PeerPool[A, B], + filter: set[PeerType], + customFilter: PeerCustomFilterCallback[A] +): int = + ## Returns number of peers available for acquisition in PeerPool + ## ``pool`` which satisfies filter ``filter`` and custom filter + ## ``customFilter``. + ## Note: This is O(n) operation. + let available = pool.lenAvailable(filter) + var res = 0 + for pindex in pool.sorted.items(): + let item = addr(pool.storage[distinctBase(pindex)]) + if (PeerFlags.Acquired notin item[].flags) and + (item[].peerType in filter) and + (isNil(customFilter) or customFilter(item[].data)): + inc(res) + if res == available: + # Number of customly filtered items could not be higher than number of + # peers of specific directions. + break + res -proc lenAcquired*[A, B](pool: PeerPool[A, B], - filter = {PeerType.Incoming, - PeerType.Outgoing}): int {.inline.} = +proc lenAcquired*[A, B]( + pool: PeerPool[A, B], + filter = {PeerType.Incoming, PeerType.Outgoing} +): int = ## Returns number of acquired peers in PeerPool ``pool`` which satisifies ## filter ``filter``. (if PeerType.Incoming in filter: pool.acqIncPeersCount else: 0) + (if PeerType.Outgoing in filter: pool.acqOutPeersCount else: 0) -proc lenSpace*[A, B](pool: PeerPool[A, B], - filter = {PeerType.Incoming, - PeerType.Outgoing}): int {.inline.} = +proc lenSpace*[A, B]( + pool: PeerPool[A, B], + filter = {PeerType.Incoming, PeerType.Outgoing} +): int = ## Returns number of available space for peers in PeerPool ``pool`` which ## satisfies filter ``filter``. - let curPeersCount = pool.curIncPeersCount + pool.curOutPeersCount - let totalSpace = pool.maxPeersCount - curPeersCount - let incoming = min(totalSpace, pool.maxIncPeersCount - pool.curIncPeersCount) - let outgoing = min(totalSpace, pool.maxOutPeersCount - pool.curOutPeersCount) + let + curPeersCount = pool.curIncPeersCount + pool.curOutPeersCount + spaceAvailable = pool.maxPeersCount - curPeersCount + incoming = min(spaceAvailable, + pool.maxIncPeersCount - pool.curIncPeersCount) + outgoing = min(spaceAvailable, + pool.maxOutPeersCount - pool.curOutPeersCount) if filter == {PeerType.Incoming, PeerType.Outgoing}: # To avoid overflow check we need to check by ourself. if uint64(incoming) + uint64(outgoing) > uint64(high(int)): - min(totalSpace, high(int)) + min(spaceAvailable, high(int)) else: - min(totalSpace, incoming + outgoing) + min(spaceAvailable, incoming + outgoing) elif PeerType.Incoming in filter: incoming else: outgoing proc shortLogAvailable*[A, B](pool: PeerPool[A, B]): string = - $len(pool.incQueue) & "/" & $len(pool.outQueue) + $pool.lenAvailable({PeerType.Incoming}) & "/" & + $pool.lenAvailable({PeerType.Outgoing}) proc shortLogAcquired*[A, B](pool: PeerPool[A, B]): string = $pool.acqIncPeersCount & "/" & $pool.acqOutPeersCount @@ -293,6 +276,25 @@ proc peerDeleted[A, B](pool: PeerPool[A, B], peer: A) = if not(isNil(pool.onDeletePeer)): pool.onDeletePeer(peer) +proc deletePeerImpl[A, B]( + pool: PeerPool[A, B], + peer: A, + key: B, + pindex: PeerIndex +) = + let sindex = pool.sorted.find(pindex) + pool.storage[distinctBase(pindex)] = PeerItem[A](index: PeerIndex(-1)) + pool.empties.add(pindex) + pool.registry.del(key) + if sindex >= 0: + # sindex == -1 when deleting peer which was acquired (not in `sorted` array). + pool.sorted.delete(sindex) + + # Indicate that we have an empty space + pool.changeEvent.fire() + pool.peerDeleted(peer) + pool.peerCountChanged() + proc deletePeer*[A, B](pool: PeerPool[A, B], peer: A, force = false): bool = ## Remove ``peer`` from PeerPool ``pool``. ## @@ -300,78 +302,60 @@ proc deletePeer*[A, B](pool: PeerPool[A, B], peer: A, force = false): bool = ## be deleted only when peer will be released. You can change this behavior ## with ``force`` option. mixin getKey - let key = getKey(peer) - if pool.registry.hasKey(key): - let pindex = try: pool.registry[key].data - except KeyError: raiseAssert "checked with hasKey" - var item = addr(pool.storage[pindex]) - if (PeerFlags.Acquired in item[].flags): - if not(force): - item[].flags.incl(PeerFlags.DeleteOnRelease) - else: - if item[].peerType == PeerType.Incoming: - dec(pool.curIncPeersCount) - dec(pool.acqIncPeersCount) - elif item[].peerType == PeerType.Outgoing: - dec(pool.curOutPeersCount) - dec(pool.acqOutPeersCount) - - # Indicate that we have an empty space - pool.fireNotFullEvent(item[]) - # Cleanup storage with default item, and removing key from hashtable. - pool.storage[pindex] = PeerItem[A]() - pool.registry.del(key) - pool.peerDeleted(peer) - pool.peerCountChanged() + let + key = peer.getKey() + pindex = + block: + let res = pool.registry.getOrDefault(key, PeerIndex(-1)) + if res == PeerIndex(-1): + return false + res + + var item = addr(pool.storage[distinctBase(pindex)]) + if (PeerFlags.Acquired in item[].flags): + if not(force): + item[].flags.incl(PeerFlags.DeleteOnRelease) else: - if item[].peerType == PeerType.Incoming: - # If peer is available, then its copy present in heapqueue, so we need - # to remove it. - for i in 0 ..< len(pool.incQueue): - if pool.incQueue[i].data == pindex: - pool.incQueue.del(i) - break + case item[].peerType + of PeerType.Incoming: + dec(pool.acqIncPeersCount) dec(pool.curIncPeersCount) - elif item[].peerType == PeerType.Outgoing: - # If peer is available, then its copy present in heapqueue, so we need - # to remove it. - for i in 0 ..< len(pool.outQueue): - if pool.outQueue[i].data == pindex: - pool.outQueue.del(i) - break + of PeerType.Outgoing: + dec(pool.acqOutPeersCount) dec(pool.curOutPeersCount) - - # Indicate that we have an empty space - pool.fireNotFullEvent(item[]) - # Cleanup storage with default item, and removing key from hashtable. - pool.storage[pindex] = PeerItem[A]() - pool.registry.del(key) - pool.peerDeleted(peer) - pool.peerCountChanged() - true + pool.deletePeerImpl(peer, key, pindex) else: - false + case item[].peerType + of PeerType.Incoming: + dec(pool.curIncPeersCount) + of PeerType.Outgoing: + dec(pool.curOutPeersCount) + pool.deletePeerImpl(peer, key, pindex) + + true proc addPeerImpl[A, B](pool: PeerPool[A, B], peer: A, peerKey: B, peerType: PeerType) = + mixin getFuture proc onPeerClosed(udata: pointer) {.gcsafe, raises: [].} = discard pool.deletePeer(peer) - let item = PeerItem[A](data: peer, peerType: peerType, - index: len(pool.storage)) - pool.storage.add(item) - var pitem = addr(pool.storage[^1]) - let pindex = PeerIndex(data: item.index, cmp: pool.cmp) + let + item = PeerItem[A](data: peer, peerType: peerType) + pindex = pool.addToStorage(item) + pitem = addr(pool.storage[distinctBase(pindex)]) + pool.registry[peerKey] = pindex + pool.sorted.add(pindex) + pool.resort(pool.sorted) + pitem[].data.getFuture().addCallback(onPeerClosed) - if peerType == PeerType.Incoming: + case peerType + of PeerType.Incoming: inc(pool.curIncPeersCount) - pool.incQueue.push(pindex) - pool.incNotEmptyEvent.fire() - elif peerType == PeerType.Outgoing: + of PeerType.Outgoing: inc(pool.curOutPeersCount) - pool.outQueue.push(pindex) - pool.outNotEmptyEvent.fire() + pool.changeEvent.fire() pool.peerCountChanged() proc checkPeer*[A, B](pool: PeerPool[A, B], peer: A): PeerStatus {.inline.} = @@ -395,8 +379,11 @@ proc checkPeer*[A, B](pool: PeerPool[A, B], peer: A): PeerStatus {.inline.} = else: PeerStatus.DuplicateError -proc addPeerNoWait*[A, B](pool: PeerPool[A, B], - peer: A, peerType: PeerType): PeerStatus = +proc addPeerNoWait*[A, B]( + pool: PeerPool[A, B], + peer: A, + peerType: PeerType +): PeerStatus = ## Add peer ``peer`` of type ``peerType`` to PeerPool ``pool``. ## ## Procedure returns ``PeerStatus`` @@ -428,41 +415,21 @@ proc addPeerNoWait*[A, B](pool: PeerPool[A, B], else: PeerStatus.NoSpaceError -proc getPeerSpaceMask[A, B](pool: PeerPool[A, B], - peerType: PeerType): set[PeerType] {.inline.} = - ## This procedure returns set of events which you need to wait to get empty - ## space for peer type ``peerType``. This set can be used for call to - ## ``waitNotFullEvent()``. - case peerType: - of PeerType.Incoming: - if pool.maxIncPeersCount >= pool.maxPeersCount: - # If maximum number of `incoming` peers is only limited by - # maximum number of peers, then we could wait for both events. - # It means that we do not care about what peer will left pool. - {PeerType.Incoming, PeerType.Outgoing} - else: - # Otherwise we could wait only for `incoming` event - {PeerType.Incoming} - of PeerType.Outgoing: - if pool.maxOutPeersCount >= pool.maxPeersCount: - # If maximum number of `outgoing` peers is only limited by - # maximum number of peers, then we could wait for both events. - # It means that we do not care about what peer will left pool. - {PeerType.Incoming, PeerType.Outgoing} - else: - # Otherwise we could wait only for `outgoing` event - {PeerType.Outgoing} - -proc waitForEmptySpace*[A, B](pool: PeerPool[A, B], - peerType: PeerType) {.async: (raises: [CancelledError]).} = +proc waitForEmptySpace*[A, B]( + pool: PeerPool[A, B], + peerType: PeerType +) {.async: (raises: [CancelledError]).} = ## This procedure will block until ``pool`` will have an empty space for peer ## of type ``peerType``. - let mask = pool.getPeerSpaceMask(peerType) while pool.lenSpace({peerType}) == 0: - await pool.waitNotFullEvent(mask) + await pool.changeEvent.wait() + pool.changeEvent.clear() -proc addPeer*[A, B](pool: PeerPool[A, B], - peer: A, peerType: PeerType): Future[PeerStatus] {.async: (raises: [CancelledError]).} = +proc addPeer*[A, B]( + pool: PeerPool[A, B], + peer: A, + peerType: PeerType +): Future[PeerStatus] {.async: (raises: [CancelledError]).} = ## Add peer ``peer`` of type ``peerType`` to PeerPool ``pool``. ## ## This procedure will wait for an empty space in PeerPool ``pool``, if @@ -474,134 +441,174 @@ proc addPeer*[A, B](pool: PeerPool[A, B], ## ## Procedure returns (PeerStatus.Success) on success. mixin getKey - let res = + + template check(peer: untyped) = + let res = pool.checkPeer(peer) + if res != PeerStatus.Success: + return res + + while pool.lenSpace({peerType}) == 0: + peer.check() + await pool.changeEvent.wait() + pool.changeEvent.clear() + + # Because we could wait for a long time we need to check peer one more + # time to avoid race condition. + peer.check() + + pool.addPeerImpl(peer, peer.getKey(), peerType) + PeerStatus.Success + +proc acquireItemImpl[A, B]( + pool: PeerPool[A, B], + filter: set[PeerType], + customFilter: PeerCustomFilterCallback[A] = nil +): A = + let (sindex, pitem) = block: - let res1 = pool.checkPeer(peer) - if res1 != PeerStatus.Success: - res1 - else: - let mask = pool.getPeerSpaceMask(peerType) - # We going to block here until ``pool`` will not have free space, - # for our type of peer. - while pool.lenSpace({peerType}) == 0: - await pool.waitNotFullEvent(mask) - # Because we could wait for a long time we need to check peer one more - # time to avoid race condition. - let res2 = pool.checkPeer(peer) - if res2 == PeerStatus.Success: - let peerKey = peer.getKey() - pool.addPeerImpl(peer, peerKey, peerType) - PeerStatus.Success - else: - res2 - return res - -proc acquireItemImpl[A, B](pool: PeerPool[A, B], - filter: set[PeerType]): A {.inline.} = - doAssert((len(pool.outQueue) > 0) or (len(pool.incQueue) > 0)) - let pindex = - if filter == {PeerType.Incoming, PeerType.Outgoing}: - if len(pool.outQueue) > 0 and len(pool.incQueue) > 0: - # `<` here is the `PeerIndex` implementation (`HeapQueue` uses `<`), - # which then flips the arguments to rank `>` on `A` using `pool.cmp` - if pool.incQueue[0] < pool.outQueue[0]: - inc(pool.acqIncPeersCount) - let item = pool.incQueue.pop() - item.data - else: - inc(pool.acqOutPeersCount) - let item = pool.outQueue.pop() - item.data - else: - if len(pool.outQueue) > 0: - inc(pool.acqOutPeersCount) - let item = pool.outQueue.pop() - item.data - else: - inc(pool.acqIncPeersCount) - let item = pool.incQueue.pop() - item.data - else: - if PeerType.Outgoing in filter: - inc(pool.acqOutPeersCount) - let item = pool.outQueue.pop() - item.data - else: - inc(pool.acqIncPeersCount) - let item = pool.incQueue.pop() - item.data - var pitem = addr(pool.storage[pindex]) - doAssert(PeerFlags.Acquired notin pitem[].flags) + var + rindex = -1 + res: ptr PeerItem[A] = nil + for sindex, pindex in pool.sorted.pairs(): + res = addr(pool.storage[distinctBase(pindex)]) + if (PeerFlags.Acquired notin res[].flags) and + (res[].peerType in filter) and + (isNil(customFilter) or customFilter(res[].data)): + rindex = sindex + break + (rindex, res) + + doAssert(sindex >= 0) + + case pitem[].peerType + of PeerType.Incoming: + inc(pool.acqIncPeersCount) + of PeerType.Outgoing: + inc(pool.acqOutPeersCount) + + pool.sorted.delete(sindex) + pitem[].flags.incl(PeerFlags.Acquired) pitem[].data -proc acquire*[A, B](pool: PeerPool[A, B], - filter = {PeerType.Incoming, - PeerType.Outgoing}): Future[A] {.async: (raises: [CancelledError]).} = +proc acquire*[A, B]( + pool: PeerPool[A, B], + filter = {PeerType.Incoming, PeerType.Outgoing} +): Future[A] {.async: (raises: [CancelledError]).} = ## Acquire peer from PeerPool ``pool``, which match the filter ``filter``. + ## This procedure will wait for peer which satisfy filter will become + ## available for acquisition. mixin getKey doAssert(filter != {}, "Filter must not be empty") while true: if pool.lenAvailable(filter) == 0: - await pool.waitNotEmptyEvent(filter) + await pool.changeEvent.wait() + pool.changeEvent.clear() else: - return pool.acquireItemImpl(filter) - -proc acquireNoWait*[A, B](pool: PeerPool[A, B], - filter = {PeerType.Incoming, - PeerType.Outgoing} - ): A {.raises: [PeerPoolError].} = + return pool.acquireItemImpl(filter, nil) + +proc acquire*[A, B]( + pool: PeerPool[A, B], + filter: set[PeerType], + customFilter: PeerCustomFilterCallback[A] +): Future[A] {.async: (raises: [CancelledError]).} = + ## Acquire peer from PeerPool ``pool``, which match the filter ``filter`` and + ## custom filter ``customFilter``. This procedure will wait for peer which + ## satisfy filters will become available for acquisition. + mixin getKey + doAssert(filter != {}, "Filter must not be empty") + while true: + if pool.lenAvailable(filter, customFilter) == 0: + await pool.changeEvent.wait() + pool.changeEvent.clear() + else: + return pool.acquireItemImpl(filter, customFilter) + +proc acquireNoWait*[A, B]( + pool: PeerPool[A, B], + filter = {PeerType.Incoming, PeerType.Outgoing} +): A {.raises: [PeerPoolError].} = + ## Acquire peer from PeerPool ``pool``, which match the filter ``filter`` + ## without waiting, this procedure will raise PeerPoolError if no peers + ## which satisfy filters are available for acquisition. doAssert(filter != {}, "Filter must not be empty") if pool.lenAvailable(filter) < 1: raise newException(PeerPoolError, "Not enough peers in pool") - pool.acquireItemImpl(filter) + pool.acquireItemImpl(filter, nil) + +proc acquireNoWait*[A, B]( + pool: PeerPool[A, B], + filter: set[PeerType], + customFilter: PeerCustomFilterCallback[A] +): A {.raises: [PeerPoolError].} = + ## Acquire peer from PeerPool ``pool``, which match the filter ``filter`` and + ## custom filter ``customFilter`` without waiting, this procedure will raise + ## PeerPoolError if no peers which satisfy filters are available for + ## acquisition. + doAssert(filter != {}, "Filter must not be empty") + if pool.lenAvailable(filter, customFilter) < 1: + raise newException(PeerPoolError, "Not enough peers in pool") + pool.acquireItemImpl(filter, customFilter) proc release*[A, B](pool: PeerPool[A, B], peer: A) = ## Release peer ``peer`` back to PeerPool ``pool`` mixin getKey - let key = getKey(peer) - var titem = pool.registry.getOrDefault(key, PeerIndex(data: -1)) - if titem.data >= 0: - let pindex = titem.data - var item = addr(pool.storage[pindex]) - if PeerFlags.Acquired in item[].flags: - if not(pool.checkPeerScore(peer)): - item[].flags.incl(DeleteOnRelease) - if PeerFlags.DeleteOnRelease in item[].flags: - # We do not care about result here because peer is present in registry - # and has all proper flags set. - discard pool.deletePeer(peer, force = true) - else: - item[].flags.excl(PeerFlags.Acquired) - case item[].peerType - of PeerType.Incoming: - pool.incQueue.push(titem) - dec(pool.acqIncPeersCount) - of PeerType.Outgoing: - pool.outQueue.push(titem) - dec(pool.acqOutPeersCount) - pool.fireNotEmptyEvent(item[]) - -proc release*[A, B](pool: PeerPool[A, B], peers: openArray[A]) {.inline.} = + let + key = peer.getKey() + pindex = + block: + let res = pool.registry.getOrDefault(key, PeerIndex(-1)) + if res == PeerIndex(-1): + return + res + item = addr(pool.storage[distinctBase(pindex)]) + + if PeerFlags.Acquired in item[].flags: + if not(pool.checkPeerScore(peer)): + item[].flags.incl(DeleteOnRelease) + if PeerFlags.DeleteOnRelease in item[].flags: + case item[].peerType + of PeerType.Incoming: + dec(pool.acqIncPeersCount) + dec(pool.curIncPeersCount) + of PeerType.Outgoing: + dec(pool.acqOutPeersCount) + dec(pool.curOutPeersCount) + pool.deletePeerImpl(peer, key, pindex) + else: + item[].flags.excl(PeerFlags.Acquired) + case item[].peerType + of PeerType.Incoming: + dec(pool.acqIncPeersCount) + of PeerType.Outgoing: + dec(pool.acqOutPeersCount) + + pool.sorted.add(pindex) + pool.resort(pool.sorted) + pool.changeEvent.fire() + +proc release*[A, B](pool: PeerPool[A, B], peers: openArray[A]) = ## Release array of peers ``peers`` back to PeerPool ``pool``. for item in peers: pool.release(item) -proc acquire*[A, B](pool: PeerPool[A, B], - number: int, - filter = {PeerType.Incoming, - PeerType.Outgoing}): Future[seq[A]] {.async: (raises: [CancelledError]).} = +proc acquire*[A, B]( + pool: PeerPool[A, B], + number: int, + filter = {PeerType.Incoming, PeerType.Outgoing} +): Future[seq[A]] {.async: (raises: [CancelledError]).} = ## Acquire ``number`` number of peers from PeerPool ``pool``, which match the ## filter ``filter``. doAssert(filter != {}, "Filter must not be empty") - var peers = newSeq[A]() + var peers: seq[A] try: if number > 0: while true: if len(peers) >= number: break if pool.lenAvailable(filter) == 0: - await pool.waitNotEmptyEvent(filter) + await pool.changeEvent.wait() + pool.changeEvent.clear() else: peers.add(pool.acquireItemImpl(filter)) except CancelledError as exc: @@ -611,96 +618,239 @@ proc acquire*[A, B](pool: PeerPool[A, B], pool.release(item) peers.setLen(0) raise exc - return peers + peers + +proc acquire*[A, B]( + pool: PeerPool[A, B], + number: int, + filter: set[PeerType], + customFilter: PeerCustomFilterCallback[A] +): Future[seq[A]] {.async: (raises: [CancelledError]).} = + ## Acquire ``number`` number of peers from PeerPool ``pool``, which match the + ## filter ``filter`` and custom filter ``customFilter``. This procedure will + ## wait for ``number`` of peers which satisfy filter will become available + ## and acquired. + doAssert(filter != {}, "Filter must not be empty") + var peers: seq[A] + try: + if number > 0: + while true: + if len(peers) >= number: + break + if pool.lenAvailable(filter, customFilter) == 0: + await pool.changeEvent.wait() + pool.changeEvent.clear() + else: + peers.add(pool.acquireItemImpl(filter, customFilter)) + except CancelledError as exc: + # If we got cancelled, we need to return all the acquired peers back to + # pool. + for item in peers: + pool.release(item) + peers.setLen(0) + raise exc + peers -proc acquireNoWait*[A, B](pool: PeerPool[A, B], - number: int, - filter = {PeerType.Incoming, - PeerType.Outgoing}): seq[A] = +proc acquireNoWait*[A, B]( + pool: PeerPool[A, B], + number: int, + filter = {PeerType.Incoming, PeerType.Outgoing} +): seq[A] = ## Acquire ``number`` number of peers from PeerPool ``pool``, which match the - ## filter ``filter``. + ## filter ``filter``. This procedure does not wait for peers, it will raise + ## `PeerPoolError` if peers matching the filters are not available. doAssert(filter != {}, "Filter must not be empty") - var peers = newSeq[A]() + var peers: seq[A] if pool.lenAvailable(filter) < number: raise newException(PeerPoolError, "Not enough peers in pool") for i in 0 ..< number: peers.add(pool.acquireItemImpl(filter)) - return peers + peers + +proc acquireNoWait*[A, B]( + pool: PeerPool[A, B], + number: int, + filter: set[PeerType], + customFilter: PeerCustomFilterCallback[A] +): seq[A] = + ## Acquire ``number`` number of peers from PeerPool ``pool``, which match the + ## filter ``filter`` and custom filter ``filter``. This procedure does not + ## wait for peers, it will raise `PeerPoolError` if peers matching the + ## filters are not available. + doAssert(filter != {}, "Filter must not be empty") + var peers: seq[A] + if pool.lenAvailable(filter, customFilter) < number: + raise newException(PeerPoolError, "Not enough peers in pool") + for i in 0 ..< number: + peers.add(pool.acquireItemImpl(filter, customFilter)) + peers -proc acquireIncomingPeer*[A, B](pool: PeerPool[A, B]): Future[A] {.inline.} = +proc acquireIncomingPeer*[A, B]( + pool: PeerPool[A, B] +): Future[A] {.async: (raises: [CancelledError], raw: true).} = ## Acquire single incoming peer from PeerPool ``pool``. pool.acquire({PeerType.Incoming}) -proc acquireOutgoingPeer*[A, B](pool: PeerPool[A, B]): Future[A] {.inline.} = +proc acquireOutgoingPeer*[A, B]( + pool: PeerPool[A, B] +): Future[A] {.async: (raises: [CancelledError], raw: true).} = ## Acquire single outgoing peer from PeerPool ``pool``. pool.acquire({PeerType.Outgoing}) -proc acquireIncomingPeers*[A, B](pool: PeerPool[A, B], - number: int): Future[seq[A]] {.inline.} = +proc acquireIncomingPeers*[A, B]( + pool: PeerPool[A, B], + number: int +): Future[seq[A]] {.async: (raises: [CancelledError], raw: true).} = ## Acquire ``number`` number of incoming peers from PeerPool ``pool``. pool.acquire(number, {PeerType.Incoming}) -proc acquireOutgoingPeers*[A, B](pool: PeerPool[A, B], - number: int): Future[seq[A]] {.inline.} = +proc acquireOutgoingPeers*[A, B]( + pool: PeerPool[A, B], + number: int +): Future[seq[A]] {.async: (raises: [CancelledError], raw: true).} = ## Acquire ``number`` number of outgoing peers from PeerPool ``pool``. pool.acquire(number, {PeerType.Outgoing}) -iterator peers*[A, B](pool: PeerPool[A, B], - filter = {PeerType.Incoming, - PeerType.Outgoing}): A = +iterator peers*[A, B]( + pool: PeerPool[A, B], + filter = {PeerType.Incoming, PeerType.Outgoing} +): A = + ## Iterate over sorted list of peers. + ## + ## All peers will be sorted by equation `>`(Peer1, Peer2), so biggest values + ## will be first. + ## + ## NOTE: While it safe to use this iterator in combination with await calls, + ## consider that right after `await` call, PeerPool could become different + ## from the snapshot this iterator provides. + var unsorted: seq[PeerIndex] + for pindex in pool.registry.values(): + if pool.storage[distinctBase(pindex)].peerType in filter: + unsorted.add(pindex) + + # We allocate new sequence here to avoid problems with missing indices when + # await operation could be part of iteration. + let sortedPeers = + pool.resorted(unsorted).mapIt(pool.storage[distinctBase(it)].data) + for peer in sortedPeers: + yield peer + +iterator peers*[A, B]( + pool: PeerPool[A, B], + filter: set[PeerType], + customFilter: PeerCustomFilterCallback[A] +): A = ## Iterate over sorted list of peers. ## ## All peers will be sorted by equation `>`(Peer1, Peer2), so biggest values ## will be first. - var sorted = initHeapQueue[PeerIndex]() - for peerIdx in pool.registry.values(): - if pool.storage[peerIdx.data].peerType in filter: - sorted.push(peerIdx) - while len(sorted) > 0: - let peerIdx = sorted.pop() - yield pool.storage[peerIdx.data].data - -iterator availablePeers*[A, B](pool: PeerPool[A, B], - filter = {PeerType.Incoming, - PeerType.Outgoing}): A = + ## + ## NOTE: While it safe to use this iterator in combination with await calls, + ## consider that right after `await` call, PeerPool could become different + ## from the snapshot this iterator provides. + var unsorted: seq[PeerIndex] + for pindex in pool.registry.values(): + let item = addr(pool.storage[distinctBase(pindex)]) + if (item[].peerType in filter) and + (isNil(customFilter) or customFilter(item[].data)): + unsorted.add(pindex) + + # We allocate new sequence here to avoid problems with missing indices when + # await operation could be part of iteration. + let sortedPeers = + pool.resorted(unsorted).mapIt(pool.storage[distinctBase(it)].data) + for peer in sortedPeers: + yield peer + +iterator availablePeers*[A, B]( + pool: PeerPool[A, B], + filter = {PeerType.Incoming, PeerType.Outgoing} +): A = ## Iterate over sorted list of available peers. ## ## All peers will be sorted by equation `>`(Peer1, Peer2), so biggest values ## will be first. - var sorted = initHeapQueue[PeerIndex]() - for peerIdx in pool.registry.values(): - if (PeerFlags.Acquired notin pool.storage[peerIdx.data].flags) and - (pool.storage[peerIdx.data].peerType in filter): - sorted.push(peerIdx) - while len(sorted) > 0: - let peerIdx = sorted.pop() - yield pool.storage[peerIdx.data].data - -iterator acquiredPeers*[A, B](pool: PeerPool[A, B], - filter = {PeerType.Incoming, - PeerType.Outgoing}): A = + ## + ## NOTE: While it safe to use this iterator in combination with await calls, + ## consider that right after `await` call, PeerPool could become different + ## from the snapshot this iterator provides. + + # We allocate new sequence here to avoid problems with missing indices when + # await operation could be part of iteration. + let sortedPeers = + pool.sorted.filterIt( + (PeerFlags.Acquired notin pool.storage[distinctBase(it)].flags) and + (pool.storage[distinctBase(it)].peerType in filter)). + mapIt(pool.storage[distinctBase(it)].data) + + for peer in sortedPeers: + yield peer + +iterator availablePeers*[A, B]( + pool: PeerPool[A, B], + filter: set[PeerType], + customFilter: PeerCustomFilterCallback[A] +): A = + ## Iterate over sorted list of available peers. + ## + ## All peers will be sorted by equation `>`(Peer1, Peer2), so biggest values + ## will be first. + ## + ## NOTE: While it safe to use this iterator in combination with await calls, + ## consider that right after `await` call, PeerPool could become different + ## from the snapshot this iterator provides. + + # We allocate new sequence here to avoid problems with missing indices when + # await operation could be part of iteration. + let sortedPeers = + pool.sorted.filterIt( + (PeerFlags.Acquired notin pool.storage[distinctBase(it)].flags) and + (pool.storage[distinctBase(it)].peerType in filter) and + (isNil(customFilter) or + customFilter(pool.storage[distinctBase(it)].data))). + mapIt(pool.storage[distinctBase(it)].data) + + for peer in sortedPeers: + yield peer + +iterator acquiredPeers*[A, B]( + pool: PeerPool[A, B], + filter = {PeerType.Incoming, PeerType.Outgoing} +): A = ## Iterate over sorted list of acquired (non-available) peers. ## ## All peers will be sorted by equation `>`(Peer1, Peer2), so biggest values ## will be first. - var sorted = initHeapQueue[PeerIndex]() - for peerIdx in pool.registry.values(): - if (PeerFlags.Acquired in pool.storage[peerIdx.data].flags) and - (pool.storage[peerIdx.data].peerType in filter): - sorted.push(peerIdx) - while len(sorted) > 0: - let peerIdx = sorted.pop() - yield pool.storage[peerIdx.data].data - -proc `[]`*[A, B](pool: PeerPool[A, B], key: B): A {.inline, raises: [KeyError].} = + ## + ## NOTE: While it safe to use this iterator in combination with await calls, + ## consider that right after `await` call, PeerPool could become different + ## from the snapshot this iterator provides. + var unsorted: seq[PeerIndex] + for pindex in pool.registry.values(): + if (PeerFlags.Acquired in pool.storage[distinctBase(pindex)].flags) and + (pool.storage[distinctBase(pindex)].peerType in filter): + unsorted.add(pindex) + + # We allocate new sequence here to avoid problems with missing indices when + # await operation could be part of iteration. + let sortedPeers = + pool.resorted(unsorted).mapIt(pool.storage[distinctBase(it)].data) + for peer in sortedPeers: + yield peer + +proc `[]`*[A, B]( + pool: PeerPool[A, B], + key: B +): A {.inline, raises: [KeyError].} = ## Retrieve peer with key ``key`` from PeerPool ``pool``. - let pindex = pool.registry[key] - pool.storage[pindex.data] + pool.storage[distinctBase(pool.registry[key])].data -proc `[]`*[A, B](pool: var PeerPool[A, B], key: B): var A {.inline, raises: [KeyError].} = +proc `[]`*[A, B]( + pool: var PeerPool[A, B], + key: B +): var A {.inline, raises: [KeyError].} = ## Retrieve peer with key ``key`` from PeerPool ``pool``. - let pindex = pool.registry[key] - pool.storage[pindex.data].data + pool.storage[distinctBase(pool.registry[key])].data proc hasPeer*[A, B](pool: PeerPool[A, B], key: B): bool {.inline.} = ## Returns ``true`` if peer with ``key`` present in PeerPool ``pool``. @@ -710,9 +860,9 @@ proc getOrDefault*[A, B](pool: PeerPool[A, B], key: B): A {.inline.} = ## Retrieves the peer from PeerPool ``pool`` using key ``key``. If peer is ## not present, default initialization value for type ``A`` is returned ## (e.g. 0 for any integer type). - let pindex = pool.registry.getOrDefault(key, PeerIndex(data: -1)) - if pindex.data >= 0: - pool.storage[pindex.data].data + let pindex = pool.registry.getOrDefault(key, PeerIndex(-1)) + if pindex != PeerIndex(-1): + pool.storage[distinctBase(pindex)].data else: A() @@ -720,29 +870,32 @@ proc getOrDefault*[A, B](pool: PeerPool[A, B], key: B, default: A): A {.inline.} = ## Retrieves the peer from PeerPool ``pool`` using key ``key``. If peer is ## not present, default value ``default`` is returned. - let pindex = pool.registry.getOrDefault(key, PeerIndex(data: -1)) - if pindex.data >= 0: - pool.storage[pindex.data].data + let pindex = pool.registry.getOrDefault(key, PeerIndex(-1)) + if pindex != PeerIndex(-1): + pool.storage[distinctBase(pindex)].data else: default proc clear*[A, B](pool: PeerPool[A, B]) = ## Performs PeerPool's ``pool`` storage and counters reset. - pool.incQueue.clear() - pool.outQueue.clear() pool.registry.clear() + + pool.sorted.reset() for i in 0 ..< len(pool.storage): pool.storage[i] = PeerItem[A]() - pool.storage.setLen(0) + pool.empties.reset() + pool.storage.reset() pool.curIncPeersCount = 0 pool.curOutPeersCount = 0 pool.acqIncPeersCount = 0 pool.acqOutPeersCount = 0 -proc clearSafe*[A, B](pool: PeerPool[A, B]) {.async: (raises: [CancelledError]).} = +proc clearSafe*[A, B]( + pool: PeerPool[A, B] +) {.async: (raises: [CancelledError]).} = ## Performs "safe" clear. Safe means that it first acquires all the peers ## in PeerPool, and only after that it will reset storage. - var acquired = newSeq[A]() + var acquired: seq[A] while len(pool.registry) > len(acquired): var peers = await pool.acquire(len(pool.registry) - len(acquired)) for item in peers: diff --git a/beacon_chain/networking/peer_protocol.nim b/beacon_chain/networking/peer_protocol.nim index 429e6ccdff..f81ad3b68d 100644 --- a/beacon_chain/networking/peer_protocol.nim +++ b/beacon_chain/networking/peer_protocol.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -54,7 +54,7 @@ func forkDigestAtEpoch(state: PeerSyncNetworkState, epoch: Epoch): ForkDigest = state.forkDigests[].atEpoch(epoch, state.cfg) -# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/p2p-interface.md#status +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/p2p-interface.md#status proc getCurrentStatus(state: PeerSyncNetworkState): StatusMsg = let dag = state.dag @@ -169,6 +169,14 @@ p2pProtocol PeerSync(version = 1, proc getMetadata_v2(peer: Peer): altair.MetaData {.libp2pProtocol("metadata", 2).} = + let altair_metadata = altair.MetaData( + seq_number: peer.network.metadata.seq_number, + attnets: peer.network.metadata.attnets, + syncnets: peer.network.metadata.syncnets) + altair_metadata + + proc getMetadata_v3(peer: Peer): fulu.MetaData + {. libp2pProtocol("metadata", 3).} = peer.network.metadata proc goodbye(peer: Peer, reason: uint64) {. @@ -241,7 +249,7 @@ proc init*(T: type PeerSync.NetworkState, getBeaconTime: getBeaconTime, ) -proc init*(T: type PeerSync.NetworkState, +func init*(T: type PeerSync.NetworkState, cfg: RuntimeConfig, forkDigests: ref ForkDigests, genesisBlockRoot: Eth2Digest, diff --git a/beacon_chain/nimbus_beacon_node.nim b/beacon_chain/nimbus_beacon_node.nim index 1d6edc040c..782a76244e 100644 --- a/beacon_chain/nimbus_beacon_node.nim +++ b/beacon_chain/nimbus_beacon_node.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -8,7 +8,7 @@ {.push raises: [].} import - std/[os, random, terminal, times], + std/[os, random, terminal, times, exitprocs], chronos, chronicles, metrics, metrics/chronos_httpserver, stew/[byteutils, io2], @@ -21,7 +21,7 @@ import ./spec/datatypes/[altair, bellatrix, phase0], ./spec/[ deposit_snapshots, engine_authentication, weak_subjectivity, - eip7594_helpers], + peerdas_helpers], ./sync/[sync_protocol, light_client_protocol, sync_overseer], ./validators/[keystore_management, beacon_validators], "."/[ @@ -146,26 +146,20 @@ func getVanityLogs(stdoutKind: StdoutLogKind): VanityLogs = of StdoutLogKind.Auto: raiseAssert "inadmissable here" of StdoutLogKind.Colors: VanityLogs( - onMergeTransitionBlock: bellatrixColor, - onFinalizedMergeTransitionBlock: bellatrixBlink, onUpgradeToCapella: capellaColor, onKnownBlsToExecutionChange: capellaBlink, onUpgradeToDeneb: denebColor, - onUpgradeToElectra: electraColor) + onUpgradeToElectra: electraColor, + onKnownCompoundingChange: electraBlink) of StdoutLogKind.NoColors: VanityLogs( - onMergeTransitionBlock: bellatrixMono, - onFinalizedMergeTransitionBlock: bellatrixMono, onUpgradeToCapella: capellaMono, onKnownBlsToExecutionChange: capellaMono, onUpgradeToDeneb: denebMono, - onUpgradeToElectra: electraMono) + onUpgradeToElectra: electraMono, + onKnownCompoundingChange: electraMono) of StdoutLogKind.Json, StdoutLogKind.None: VanityLogs( - onMergeTransitionBlock: - (proc() = notice "🐼 Proof of Stake Activated 🐼"), - onFinalizedMergeTransitionBlock: - (proc() = notice "🐼 Proof of Stake Finalized 🐼"), onUpgradeToCapella: (proc() = notice "🦉 Withdrowls now available 🦉"), onKnownBlsToExecutionChange: @@ -173,12 +167,14 @@ func getVanityLogs(stdoutKind: StdoutLogKind): VanityLogs = onUpgradeToDeneb: (proc() = notice "🐟 Proto-Danksharding is ON 🐟"), onUpgradeToElectra: - (proc() = notice "🦒 [PH] Electra 🦒")) + (proc() = notice "🦒 Compounding is available 🦒"), + onKnownCompoundingChange: + (proc() = notice "🦒 Compounding is activated 🦒")) func getVanityMascot(consensusFork: ConsensusFork): string = case consensusFork of ConsensusFork.Fulu: - "not decided yet?" + "❓" of ConsensusFork.Electra: "🦒" of ConsensusFork.Deneb: @@ -278,7 +274,7 @@ proc checkWeakSubjectivityCheckpoint( from ./spec/state_transition_block import kzg_commitment_to_versioned_hash -proc isSlotWithinWeakSubjectivityPeriod(dag: ChainDagRef, slot: Slot): bool = +proc isSlotWithinWeakSubjectivityPeriod(dag: ChainDAGRef, slot: Slot): bool = let checkpoint = Checkpoint( epoch: epoch(getStateField(dag.headState, slot)), @@ -291,14 +287,14 @@ proc initFullNode( rng: ref HmacDrbgContext, dag: ChainDAGRef, clist: ChainListRef, - taskpool: TaskPoolPtr, + taskpool: Taskpool, getBeaconTime: GetBeaconTimeFn) {.async.} = template config(): auto = node.config proc onPhase0AttestationReceived(data: phase0.Attestation) = - node.eventBus.attestQueue.emit(data) - proc onElectraAttestationReceived(data: electra.Attestation) = - debugComment "electra attestation queue" + node.eventBus.phase0AttestQueue.emit(data) + proc onSingleAttestationReceived(data: SingleAttestation) = + node.eventBus.singleAttestQueue.emit(data) proc onSyncContribution(data: SignedContributionAndProof) = node.eventBus.contribQueue.emit(data) proc onVoluntaryExitAdded(data: SignedVoluntaryExit) = @@ -308,9 +304,9 @@ proc initFullNode( proc onProposerSlashingAdded(data: ProposerSlashing) = node.eventBus.propSlashQueue.emit(data) proc onPhase0AttesterSlashingAdded(data: phase0.AttesterSlashing) = - node.eventBus.attSlashQueue.emit(data) + node.eventBus.phase0AttSlashQueue.emit(data) proc onElectraAttesterSlashingAdded(data: electra.AttesterSlashing) = - debugComment "electra att slasher queue" + node.eventBus.electraAttSlashQueue.emit(data) proc onBlobSidecarAdded(data: BlobSidecarInfoObject) = node.eventBus.blobSidecarQueue.emit(data) proc onBlockAdded(data: ForkedTrustedSignedBeaconBlock) = @@ -321,6 +317,9 @@ proc initFullNode( none[bool]() node.eventBus.blocksQueue.emit( EventBeaconBlockObject.init(data, optimistic)) + proc onBlockGossipAdded(data: ForkedSignedBeaconBlock) = + node.eventBus.blockGossipQueue.emit( + EventBeaconBlockGossipObject.init(data)) proc onHeadChanged(data: HeadChangeInfoObject) = let eventData = if node.currentSlot().epoch() >= dag.cfg.BELLATRIX_FORK_EPOCH: @@ -405,7 +404,7 @@ proc initFullNode( Quarantine.init()) attestationPool = newClone(AttestationPool.init( dag, quarantine, onPhase0AttestationReceived, - onElectraAttestationReceived)) + onSingleAttestationReceived)) syncCommitteeMsgPool = newClone( SyncCommitteeMsgPool.init(rng, dag.cfg, onSyncContribution)) lightClientPool = newClone( @@ -414,14 +413,19 @@ proc initFullNode( dag, attestationPool, onVoluntaryExitAdded, onBLSToExecutionChangeAdded, onProposerSlashingAdded, onPhase0AttesterSlashingAdded, onElectraAttesterSlashingAdded)) - blobQuarantine = newClone(BlobQuarantine.init(onBlobSidecarAdded)) + blobQuarantine = newClone(BlobQuarantine.init( + dag.cfg, onBlobSidecarAdded)) dataColumnQuarantine = newClone(DataColumnQuarantine.init()) - supernode = node.config.subscribeAllSubnets - localCustodySubnets = + supernode = node.config.peerdasSupernode + localCustodyGroups = if supernode: - DATA_COLUMN_SIDECAR_SUBNET_COUNT.uint64 + NUMBER_OF_CUSTODY_GROUPS.uint64 else: CUSTODY_REQUIREMENT.uint64 + custody_columns_set = + node.network.nodeId.resolve_column_sets_from_custody_groups( + max(SAMPLES_PER_SLOT.uint64, + localCustodyGroups)) consensusManager = ConsensusManager.new( dag, attestationPool, quarantine, node.elManager, ActionTracker.init(node.network.nodeId, config.subscribeAllSubnets), @@ -478,6 +482,13 @@ proc initFullNode( Opt.some blob_sidecar else: Opt.none(ref BlobSidecar) + rmanDataColumnLoader = proc( + columnId: DataColumnIdentifier): Opt[ref DataColumnSidecar] = + var data_column_sidecar = DataColumnSidecar.new() + if dag.db.getDataColumnSidecar(columnId.block_root, columnId.index, data_column_sidecar[]): + Opt.some data_column_sidecar + else: + Opt.none(ref DataColumnSidecar) processor = Eth2Processor.new( config.doppelgangerDetection, @@ -491,7 +502,10 @@ proc initFullNode( {} syncManager = newSyncManager[Peer, PeerId]( node.network.peerPool, - dag.cfg.DENEB_FORK_EPOCH, dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, + dag.cfg.DENEB_FORK_EPOCH, + dag.cfg.FULU_FORK_EPOCH, + dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, + dag.cfg.MAX_BLOBS_PER_BLOCK_ELECTRA, SyncQueueKind.Forward, getLocalHeadSlot, getLocalWallSlot, getFirstSlotAtFinalizedEpoch, getBackfillSlot, getFrontfillSlot, isWithinWeakSubjectivityPeriod, @@ -500,7 +514,10 @@ proc initFullNode( flags = syncManagerFlags) backfiller = newSyncManager[Peer, PeerId]( node.network.peerPool, - dag.cfg.DENEB_FORK_EPOCH, dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, + dag.cfg.DENEB_FORK_EPOCH, + dag.cfg.FULU_FORK_EPOCH, + dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, + dag.cfg.MAX_BLOBS_PER_BLOCK_ELECTRA, SyncQueueKind.Backward, getLocalHeadSlot, getLocalWallSlot, getFirstSlotAtFinalizedEpoch, getBackfillSlot, getFrontfillSlot, isWithinWeakSubjectivityPeriod, @@ -514,7 +531,10 @@ proc initFullNode( getLocalWallSlot() untrustedManager = newSyncManager[Peer, PeerId]( node.network.peerPool, - dag.cfg.DENEB_FORK_EPOCH, dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, + dag.cfg.DENEB_FORK_EPOCH, + dag.cfg.FULU_FORK_EPOCH, + dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, + dag.cfg.MAX_BLOBS_PER_BLOCK_ELECTRA, SyncQueueKind.Backward, getLocalHeadSlot, getLocalWallSlot, getFirstSlotAtFinalizedEpoch, getUntrustedBackfillSlot, getFrontfillSlot, isWithinWeakSubjectivityPeriod, @@ -525,34 +545,41 @@ proc initFullNode( processor: processor, network: node.network) requestManager = RequestManager.init( - node.network, dag.cfg.DENEB_FORK_EPOCH, getBeaconTime, - (proc(): bool = syncManager.inProgress), - quarantine, blobQuarantine, rmanBlockVerifier, - rmanBlockLoader, rmanBlobLoader) - - # As per EIP 7594, the BN is now categorised into a + node.network, supernode, custody_columns_set, dag.cfg.DENEB_FORK_EPOCH, + getBeaconTime, (proc(): bool = syncManager.inProgress), + quarantine, blobQuarantine, dataColumnQuarantine, rmanBlockVerifier, + rmanBlockLoader, rmanBlobLoader, rmanDataColumnLoader) + + # As per EIP 7594, the BN is now categorised into a # `Fullnode` and a `Supernode`, the fullnodes custodies a # given set of data columns, and hence ONLY subcribes to those # data column subnet topics, however, the supernodes subscribe # to all of the topics. This in turn keeps our `data column quarantine` # really variable. Whenever the BN is a supernode, column quarantine - # essentially means all the NUMBER_OF_COLUMNS, as per mentioned in the + # essentially means all the NUMBER_OF_COLUMNS, as per mentioned in the # spec. However, in terms of fullnode, quarantine is really dependent - # on the randomly assigned columns, by `get_custody_columns`. + # on the randomly assigned columns, by `resolve_columns_from_custody_groups`. # Hence, in order to keep column quarantine accurate and error proof # the custody columns are computed once as the BN boots. Then the values - # are used globally around the codebase. + # are used globally around the codebase. - # `get_custody_columns` is not a very expensive function, but there - # are multiple instances of computing custody columns, especially + # `resolve_columns_from_custody_groups` is not a very expensive function, + # but there are multiple instances of computing custody columns, especially # during peer selection, sync with columns, and so on. That is why, # the rationale of populating it at boot and using it gloabally. dataColumnQuarantine[].supernode = supernode - dataColumnQuarantine[].custody_columns = - node.network.nodeId.get_custody_columns(max(SAMPLES_PER_SLOT.uint64, - localCustodySubnets)) + dataColumnQuarantine[].custody_columns = + node.network.nodeId.resolve_columns_from_custody_groups( + max(SAMPLES_PER_SLOT.uint64, + localCustodyGroups)) + + if node.config.peerdasSupernode: + node.network.loadCgcnetMetadataAndEnr(NUMBER_OF_CUSTODY_GROUPS.uint8) + else: + node.network.loadCgcnetMetadataAndEnr(CUSTODY_REQUIREMENT.uint8) + if node.config.lightClientDataServe: proc scheduleSendingLightClientUpdates(slot: Slot) = if node.lightClientPool[].broadcastGossipFut != nil: @@ -570,6 +597,7 @@ proc initFullNode( dag.setFinalizationCb makeOnFinalizationCb(node.eventBus, node.elManager) dag.setBlockCb(onBlockAdded) + dag.setBlockGossipCb(onBlockGossipAdded) dag.setHeadCb(onHeadChanged) dag.setReorgCb(onChainReorg) @@ -654,7 +682,6 @@ proc init*(T: type BeaconNode, metadata: Eth2NetworkMetadata): Future[BeaconNode] {.async.} = var - taskpool: TaskPoolPtr genesisState: ref ForkedHashedBeaconState = nil template cfg: auto = metadata.cfg @@ -690,18 +717,20 @@ proc init*(T: type BeaconNode, altair_fork_epoch = metadata.cfg.ALTAIR_FORK_EPOCH quit 1 - try: - if config.numThreads < 0: - fatal "The number of threads --numThreads cannot be negative." + let taskpool = + try: + if config.numThreads < 0: + fatal "The number of threads --num-threads cannot be negative." + quit 1 + elif config.numThreads == 0: + Taskpool.new(numThreads = min(countProcessors(), 16)) + else: + Taskpool.new(numThreads = config.numThreads) + except CatchableError as e: + fatal "Cannot start taskpool", err = e.msg quit 1 - elif config.numThreads == 0: - taskpool = TaskPoolPtr.new(numThreads = min(countProcessors(), 16)) - else: - taskpool = TaskPoolPtr.new(numThreads = config.numThreads) - info "Threadpool started", numThreads = taskpool.numThreads - except Exception: - raise newException(Defect, "Failure in taskpool initialization.") + info "Threadpool started", numThreads = taskpool.numThreads if metadata.genesis.kind == BakedIn: if config.genesisState.isSome: @@ -714,11 +743,14 @@ proc init*(T: type BeaconNode, eventBus = EventBus( headQueue: newAsyncEventQueue[HeadChangeInfoObject](), blocksQueue: newAsyncEventQueue[EventBeaconBlockObject](), - attestQueue: newAsyncEventQueue[phase0.Attestation](), + blockGossipQueue: newAsyncEventQueue[EventBeaconBlockGossipObject](), + phase0AttestQueue: newAsyncEventQueue[phase0.Attestation](), + singleAttestQueue: newAsyncEventQueue[SingleAttestation](), exitQueue: newAsyncEventQueue[SignedVoluntaryExit](), blsToExecQueue: newAsyncEventQueue[SignedBLSToExecutionChange](), propSlashQueue: newAsyncEventQueue[ProposerSlashing](), - attSlashQueue: newAsyncEventQueue[phase0.AttesterSlashing](), + phase0AttSlashQueue: newAsyncEventQueue[phase0.AttesterSlashing](), + electraAttSlashQueue: newAsyncEventQueue[electra.AttesterSlashing](), blobSidecarQueue: newAsyncEventQueue[BlobSidecarInfoObject](), finalQueue: newAsyncEventQueue[FinalizationInfoObject](), reorgQueue: newAsyncEventQueue[ReorgInfoObject](), @@ -971,7 +1003,7 @@ proc init*(T: type BeaconNode, withState(dag.headState): getValidator(forkyState().data.validators.asSeq(), pubkey) - func getCapellaForkVersion(): Opt[Version] = + func getCapellaForkVersion(): Opt[presets.Version] = Opt.some(cfg.CAPELLA_FORK_VERSION) func getDenebForkEpoch(): Opt[Epoch] = @@ -1149,7 +1181,7 @@ proc updateBlocksGossipStatus*( targetGossipState = getTargetGossipState( slot.epoch, cfg.ALTAIR_FORK_EPOCH, cfg.BELLATRIX_FORK_EPOCH, cfg.CAPELLA_FORK_EPOCH, cfg.DENEB_FORK_EPOCH, cfg.ELECTRA_FORK_EPOCH, - isBehind) + cfg.FULU_FORK_EPOCH, isBehind) template currentGossipState(): auto = node.blocksGossipState if currentGossipState == targetGossipState: @@ -1270,15 +1302,22 @@ proc addCapellaMessageHandlers( node.addAltairMessageHandlers(forkDigest, slot) node.network.subscribe(getBlsToExecutionChangeTopic(forkDigest), basicParams) -proc addDenebMessageHandlers( - node: BeaconNode, forkDigest: ForkDigest, slot: Slot) = +proc doAddDenebMessageHandlers( + node: BeaconNode, forkDigest: ForkDigest, slot: Slot, + blobSidecarSubnetCount: uint64) = node.addCapellaMessageHandlers(forkDigest, slot) - for topic in blobSidecarTopics(forkDigest): + for topic in blobSidecarTopics(forkDigest, blobSidecarSubnetCount): node.network.subscribe(topic, basicParams) +proc addDenebMessageHandlers( + node: BeaconNode, forkDigest: ForkDigest, slot: Slot) = + node.doAddDenebMessageHandlers( + forkDigest, slot, node.dag.cfg.BLOB_SIDECAR_SUBNET_COUNT) + proc addElectraMessageHandlers( node: BeaconNode, forkDigest: ForkDigest, slot: Slot) = - node.addDenebMessageHandlers(forkDigest, slot) + node.doAddDenebMessageHandlers( + forkDigest, slot, node.dag.cfg.BLOB_SIDECAR_SUBNET_COUNT_ELECTRA) proc addFuluMessageHandlers( node: BeaconNode, forkDigest: ForkDigest, slot: Slot) = @@ -1299,13 +1338,19 @@ proc removeCapellaMessageHandlers(node: BeaconNode, forkDigest: ForkDigest) = node.removeAltairMessageHandlers(forkDigest) node.network.unsubscribe(getBlsToExecutionChangeTopic(forkDigest)) -proc removeDenebMessageHandlers(node: BeaconNode, forkDigest: ForkDigest) = +proc doRemoveDenebMessageHandlers( + node: BeaconNode, forkDigest: ForkDigest, blobSidecarSubnetCount: uint64) = node.removeCapellaMessageHandlers(forkDigest) - for topic in blobSidecarTopics(forkDigest): + for topic in blobSidecarTopics(forkDigest, blobSidecarSubnetCount): node.network.unsubscribe(topic) +proc removeDenebMessageHandlers(node: BeaconNode, forkDigest: ForkDigest) = + node.doRemoveDenebMessageHandlers( + forkDigest, node.dag.cfg.BLOB_SIDECAR_SUBNET_COUNT) + proc removeElectraMessageHandlers(node: BeaconNode, forkDigest: ForkDigest) = - node.removeDenebMessageHandlers(forkDigest) + node.doRemoveDenebMessageHandlers( + forkDigest, node.dag.cfg.BLOB_SIDECAR_SUBNET_COUNT_ELECTRA) proc removeFuluMessageHandlers(node: BeaconNode, forkDigest: ForkDigest) = node.removeElectraMessageHandlers(forkDigest) @@ -1477,6 +1522,7 @@ proc updateGossipStatus(node: BeaconNode, slot: Slot) {.async.} = node.dag.cfg.CAPELLA_FORK_EPOCH, node.dag.cfg.DENEB_FORK_EPOCH, node.dag.cfg.ELECTRA_FORK_EPOCH, + node.dag.cfg.FULU_FORK_EPOCH, isBehind) doAssert targetGossipState.card <= 2 @@ -1572,7 +1618,7 @@ proc pruneBlobs(node: BeaconNode, slot: Slot) = var blocks: array[SLOTS_PER_EPOCH.int, BlockId] var count = 0 let startIndex = node.dag.getBlockRange( - blobPruneEpoch.start_slot, 1, blocks.toOpenArray(0, SLOTS_PER_EPOCH - 1)) + blobPruneEpoch.start_slot, blocks.toOpenArray(0, SLOTS_PER_EPOCH - 1)) for i in startIndex..= ConsensusFork.Electra: node.network.addValidator( @@ -2011,7 +2057,7 @@ proc installMessageValidators(node: BeaconNode) = MsgSource.gossip, proposerSlashing))) # voluntary_exit - # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/phase0/p2p-interface.md#voluntary_exit + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/phase0/p2p-interface.md#voluntary_exit node.network.addValidator( getVoluntaryExitsTopic(digest), proc ( signedVoluntaryExit: SignedVoluntaryExit @@ -2035,7 +2081,7 @@ proc installMessageValidators(node: BeaconNode) = MsgSource.gossip, msg, idx))) # sync_committee_contribution_and_proof - # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/p2p-interface.md#sync_committee_contribution_and_proof + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/altair/p2p-interface.md#sync_committee_contribution_and_proof node.network.addAsyncValidator( getSyncCommitteeContributionAndProofTopic(digest), proc ( msg: SignedContributionAndProof @@ -2045,7 +2091,7 @@ proc installMessageValidators(node: BeaconNode) = MsgSource.gossip, msg))) when consensusFork >= ConsensusFork.Capella: - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/capella/p2p-interface.md#bls_to_execution_change + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/capella/p2p-interface.md#bls_to_execution_change node.network.addAsyncValidator( getBlsToExecutionChangeTopic(digest), proc ( msg: SignedBLSToExecutionChange @@ -2057,7 +2103,12 @@ proc installMessageValidators(node: BeaconNode) = when consensusFork >= ConsensusFork.Deneb: # blob_sidecar_{subnet_id} # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/deneb/p2p-interface.md#blob_sidecar_subnet_id - for it in BlobId: + let subnetCount = + when consensusFork >= ConsensusFork.Electra: + node.dag.cfg.BLOB_SIDECAR_SUBNET_COUNT_ELECTRA + else: + node.dag.cfg.BLOB_SIDECAR_SUBNET_COUNT + for it in 0.BlobId ..< subnetCount.BlobId: closureScope: # Needed for inner `proc`; don't lift it out of loop. let subnet_id = it node.network.addValidator( @@ -2083,6 +2134,8 @@ proc stop(node: BeaconNode) = except CatchableError as exc: warn "Couldn't stop network", msg = exc.msg + waitFor node.metricsServer.stopMetricsServer() + node.attachedValidators[].slashingProtection.close() node.attachedValidators[].close() node.db.close() @@ -2138,7 +2191,7 @@ var gPidFile: string proc createPidFile(filename: string) {.raises: [IOError].} = writeFile filename, $os.getCurrentProcessId() gPidFile = filename - addQuitProc proc {.noconv.} = discard io2.removeFile(gPidFile) + addExitProc proc {.noconv.} = discard io2.removeFile(gPidFile) proc initializeNetworking(node: BeaconNode) {.async.} = node.installMessageValidators() @@ -2350,21 +2403,6 @@ proc doRunBeaconNode(config: var BeaconNodeConf, rng: ref HmacDrbgContext) {.rai config.createDumpDirs() - if config.metricsEnabled: - let metricsAddress = config.metricsAddress - notice "Starting metrics HTTP server", - url = "http://" & $metricsAddress & ":" & $config.metricsPort & "/metrics" - try: - startMetricsHttpServer($metricsAddress, config.metricsPort) - except CatchableError as exc: - raise exc - except Exception as exc: - raiseAssert exc.msg # TODO fix metrics - - # Nim GC metrics (for the main thread) will be collected in onSecond(), but - # we disable piggy-backing on other metrics here. - setSystemMetricsAutomaticUpdate(false) - # There are no managed event loops in here, to do a graceful shutdown, but # letting the default Ctrl+C handler exit is safe, since we only read from # the db. @@ -2407,6 +2445,15 @@ proc doRunBeaconNode(config: var BeaconNodeConf, rng: ref HmacDrbgContext) {.rai let node = waitFor BeaconNode.init(rng, config, metadata) + let metricsServer = (waitFor config.initMetricsServer()).valueOr: + return + + # Nim GC metrics (for the main thread) will be collected in onSecond(), but + # we disable piggy-backing on other metrics here. + setSystemMetricsAutomaticUpdate(false) + + node.metricsServer = metricsServer + if bnStatus == BeaconNodeStatus.Stopping: return @@ -2558,8 +2605,8 @@ programMain: # permissions are insecure. quit QuitFailure - setupFileLimits() setupLogging(config.logLevel, config.logStdout, config.logFile) + setupFileLimits() ## This Ctrl+C handler exits the program in non-graceful way. ## It's responsible for handling Ctrl+C in sub-commands such @@ -2594,4 +2641,4 @@ programMain: else: handleStartUpCmd(config) else: - handleStartUpCmd(config) + handleStartUpCmd(config) \ No newline at end of file diff --git a/beacon_chain/nimbus_binary_common.nim b/beacon_chain/nimbus_binary_common.nim index a5acbd1596..73d6839101 100644 --- a/beacon_chain/nimbus_binary_common.nim +++ b/beacon_chain/nimbus_binary_common.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -16,7 +16,7 @@ import # Nimble packages chronos, confutils, presto, toml_serialization, metrics, chronicles, chronicles/helpers as chroniclesHelpers, chronicles/topics_registry, - stew/io2, + stew/io2, metrics/chronos_httpserver, # Local modules ./spec/[helpers, keystore], @@ -448,6 +448,40 @@ proc initKeymanagerServer*( KeymanagerInitResult(server: keymanagerServer, token: token) +proc initMetricsServer*( + config: AnyConf +): Future[Result[Opt[MetricsHttpServerRef], string]] {. + async: (raises: [CancelledError]).} = + if config.metricsEnabled: + let + metricsAddress = config.metricsAddress + metricsPort = config.metricsPort + url = "http://" & $metricsAddress & ":" & $metricsPort & "/metrics" + + info "Starting metrics HTTP server", url = url + + let server = MetricsHttpServerRef.new($metricsAddress, metricsPort).valueOr: + fatal "Could not start metrics HTTP server", + url = url, reason = error + return err($error) + + try: + await server.start() + except MetricsError as exc: + fatal "Could not start metrics HTTP server", + url = url, reason = exc.msg + return err(exc.msg) + + ok(Opt.some(server)) + else: + ok(Opt.none(MetricsHttpServerRef)) + +proc stopMetricsServer*(v: Opt[MetricsHttpServerRef]) {. + async: (raises: []).} = + if v.isSome(): + info "Shutting down metrics HTTP server" + await v.get().close() + proc quitDoppelganger*() = # Avoid colliding with # https://www.freedesktop.org/software/systemd/man/systemd.exec.html#Process%20Exit%20Codes diff --git a/beacon_chain/nimbus_light_client.nim b/beacon_chain/nimbus_light_client.nim index 519b1e89db..bce64960b7 100644 --- a/beacon_chain/nimbus_light_client.nim +++ b/beacon_chain/nimbus_light_client.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2022-2024 Status Research & Development GmbH +# Copyright (c) 2022-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -162,7 +162,7 @@ programMain: db.putSyncCommittee(period, syncCommittee) db.putLatestFinalizedHeader(finalizedHeader) - var optimisticFcuFut: Future[(PayloadExecutionStatus, Opt[BlockHash])] + var optimisticFcuFut: Future[(PayloadExecutionStatus, Opt[Hash32])] .Raising([CancelledError]) proc onOptimisticHeader( lightClient: LightClient, optimisticHeader: ForkedLightClientHeader) = @@ -242,8 +242,8 @@ programMain: targetGossipState = getTargetGossipState( slot.epoch, cfg.ALTAIR_FORK_EPOCH, cfg.BELLATRIX_FORK_EPOCH, - cfg.CAPELLA_FORK_EPOCH, cfg.DENEB_FORK_EPOCH, FAR_FUTURE_EPOCH, - isBehind) + cfg.CAPELLA_FORK_EPOCH, cfg.DENEB_FORK_EPOCH, cfg.ELECTRA_FORK_EPOCH, + cfg.FULU_FORK_EPOCH, isBehind) template currentGossipState(): auto = blocksGossipState if currentGossipState == targetGossipState: diff --git a/beacon_chain/nimbus_signing_node.nim b/beacon_chain/nimbus_signing_node.nim index 0c0cfca2aa..3d357fa91c 100644 --- a/beacon_chain/nimbus_signing_node.nim +++ b/beacon_chain/nimbus_signing_node.nim @@ -1,5 +1,5 @@ # nimbus_signing_node -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -414,7 +414,7 @@ proc asyncInit(sn: SigningNodeRef) {.async: (raises: [SigningNodeError]).} = raise newException(SigningNodeError, "") SigningNodeServer(kind: SigningNodeKind.NonSecure, nserver: res.get()) -proc asyncRun*(sn: SigningNodeRef) {.async: (raises: []).} = +proc asyncRun*(sn: SigningNodeRef) {.async: (raises: [SigningNodeError]).} = sn.runKeystoreCachePruningLoopFut = runKeystoreCachePruningLoop(sn.keystoreCache) sn.installApiHandlers() @@ -429,6 +429,11 @@ proc asyncRun*(sn: SigningNodeRef) {.async: (raises: []).} = warn "Main loop failed with unexpected error", err_name = $exc.name, reason = $exc.msg + # This is trick to fool `asyncraises` from generating warning: + # No exceptions possible with this operation, `error` always returns nil. + if false: + raise newException(SigningNodeError, "This error should never happen") + debug "Stopping main processing loop" var pending: seq[Future[void]] if not(sn.runKeystoreCachePruningLoopFut.finished()): diff --git a/beacon_chain/nimbus_validator_client.nim b/beacon_chain/nimbus_validator_client.nim index 73533ba130..843a2fef4a 100644 --- a/beacon_chain/nimbus_validator_client.nim +++ b/beacon_chain/nimbus_validator_client.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -161,38 +161,6 @@ proc initClock( current_slot = currentSlot, current_epoch = currentEpoch res -proc initMetrics( - vc: ValidatorClientRef -): Future[bool] {.async: (raises: [CancelledError]).} = - if vc.config.metricsEnabled: - let - metricsAddress = vc.config.metricsAddress - metricsPort = vc.config.metricsPort - url = "http://" & $metricsAddress & ":" & $metricsPort & "/metrics" - info "Starting metrics HTTP server", url = url - let server = - block: - let res = MetricsHttpServerRef.new($metricsAddress, metricsPort) - if res.isErr(): - error "Could not start metrics HTTP server", url = url, - error_msg = res.error() - return false - res.get() - vc.metricsServer = Opt.some(server) - try: - await server.start() - except MetricsError as exc: - error "Could not start metrics HTTP server", url = url, - error_msg = exc.msg, error_name = exc.name - return false - true - -proc shutdownMetrics(vc: ValidatorClientRef) {.async: (raises: []).} = - if vc.config.metricsEnabled: - if vc.metricsServer.isSome(): - info "Shutting down metrics HTTP server" - await vc.metricsServer.get().close() - proc shutdownSlashingProtection(vc: ValidatorClientRef) = info "Closing slashing protection", path = vc.config.validatorsDir() vc.attachedValidators[].slashingProtection.close() @@ -351,7 +319,7 @@ proc asyncInit(vc: ValidatorClientRef): Future[ValidatorClientRef] {. vc.beaconClock = await vc.initClock() - if not(await initMetrics(vc)): + vc.metricsServer = (await vc.config.initMetricsServer()).valueOr: raise newException(ValidatorClientError, "Could not initialize metrics server") @@ -368,7 +336,7 @@ proc asyncInit(vc: ValidatorClientRef): Future[ValidatorClientRef] {. vc.attachedValidators = validatorPool if not(await initValidators(vc)): - await vc.shutdownMetrics() + await vc.metricsServer.stopMetricsServer() raise newException(ValidatorClientError, "Could not initialize local validators") @@ -432,7 +400,7 @@ proc asyncInit(vc: ValidatorClientRef): Future[ValidatorClientRef] {. ) except CancelledError: debug "Initialization process interrupted" - await vc.shutdownMetrics() + await vc.metricsServer.stopMetricsServer() vc.shutdownSlashingProtection() return @@ -522,7 +490,7 @@ proc asyncRun*( except CancelledError: debug "Main loop interrupted" - await vc.shutdownMetrics() + await vc.metricsServer.stopMetricsServer() vc.shutdownSlashingProtection() if doppelEventFut.completed(): diff --git a/beacon_chain/rpc/rest_beacon_api.nim b/beacon_chain/rpc/rest_beacon_api.nim index 1457cf9819..512d37e204 100644 --- a/beacon_chain/rpc/rest_beacon_api.nim +++ b/beacon_chain/rpc/rest_beacon_api.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -1445,8 +1445,8 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) = let dres = decodeBody(seq[AttestationType], contentBody.get()) if dres.isErr(): return RestApiResponse.jsonError(Http400, - InvalidAttestationObjectError, - $dres.error) + InvalidAttestationObjectError, + $dres.error) # Since our validation logic supports batch processing, we will submit all # attestations for validation. for attestation in dres.get(): @@ -1456,7 +1456,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) = of ConsensusFork.Phase0 .. ConsensusFork.Deneb: decodeAttestations(phase0.Attestation) of ConsensusFork.Electra .. ConsensusFork.Fulu: - decodeAttestations(electra.Attestation) + decodeAttestations(electra.SingleAttestation) let failures = block: @@ -1515,7 +1515,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) = let contextFork = node.dag.cfg.consensusForkAtEpoch(node.currentSlot.epoch) - + withConsensusFork(contextFork): when consensusFork < ConsensusFork.Electra: RestApiResponse.jsonResponseWVersion( @@ -1702,7 +1702,12 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) = res.get() # https://github.com/ethereum/beacon-APIs/blob/v2.4.2/types/deneb/blob_sidecar.yaml#L2-L28 - let data = newClone(default(List[BlobSidecar, Limit MAX_BLOBS_PER_BLOCK])) + # The merkleization limit of the list is `MAX_BLOB_COMMITMENTS_PER_BLOCK`, + # the serialization limit is configurable and is: + # - `MAX_BLOBS_PER_BLOCK` from Deneb onward + # - `MAX_BLOBS_PER_BLOCK_ELECTRA` from Electra. + let data = newClone(default( + List[BlobSidecar, Limit MAX_BLOB_COMMITMENTS_PER_BLOCK])) if indices.isErr: return RestApiResponse.jsonError(Http400, @@ -1710,7 +1715,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) = let indexFilter = indices.get.toHashSet - for blobIndex in 0'u64 ..< MAX_BLOBS_PER_BLOCK: + for blobIndex in 0'u64 ..< node.dag.cfg.MAX_BLOBS_PER_BLOCK_ELECTRA: if indexFilter.len > 0 and blobIndex notin indexFilter: continue @@ -1727,3 +1732,63 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) = RestApiResponse.jsonResponse(data) else: RestApiResponse.jsonError(Http500, InvalidAcceptError) + + # https://ethereum.github.io/beacon-APIs/?urls.primaryName=v3.0.0#/Beacon/getPendingDeposits + router.metricsApi2( + MethodGet, "/eth/v1/beacon/states/{state_id}/pending_deposits", + {RestServerMetricsType.Status, Response}) do ( + state_id: StateIdent) -> RestApiResponse: + let + sid = state_id.valueOr: + return RestApiResponse.jsonError(Http400, InvalidStateIdValueError, + $error) + bslot = node.getBlockSlotId(sid).valueOr: + if sid.kind == StateQueryKind.Root: + # TODO (cheatfate): Its impossible to retrieve state by `state_root` + # in current version of database. + return RestApiResponse.jsonError(Http500, NoImplementationError) + return RestApiResponse.jsonError(Http404, StateNotFoundError, + $error) + + node.withStateForBlockSlotId(bslot): + return withState(state): + when consensusFork >= ConsensusFork.Electra: + RestApiResponse.jsonResponseFinalized( + forkyState.data.pending_deposits, + node.getStateOptimistic(state), + node.dag.isFinalized(bslot.bid)) + else: + RestApiResponse.jsonError(Http400, SlotFromTheIncorrectForkError, + $error) + + RestApiResponse.jsonError(Http404, StateNotFoundError) + + # https://ethereum.github.io/beacon-APIs/?urls.primaryName=v3.0.0#/Beacon/getPendingPartialWithdrawals + router.metricsApi2( + MethodGet, "/eth/v1/beacon/states/{state_id}/pending_partial_withdrawals", + {RestServerMetricsType.Status, Response}) do ( + state_id: StateIdent) -> RestApiResponse: + let + sid = state_id.valueOr: + return RestApiResponse.jsonError(Http400, InvalidStateIdValueError, + $error) + bslot = node.getBlockSlotId(sid).valueOr: + if sid.kind == StateQueryKind.Root: + # TODO (cheatfate): Its impossible to retrieve state by `state_root` + # in current version of database. + return RestApiResponse.jsonError(Http500, NoImplementationError) + return RestApiResponse.jsonError(Http404, StateNotFoundError, + $error) + + node.withStateForBlockSlotId(bslot): + return withState(state): + when consensusFork >= ConsensusFork.Electra: + RestApiResponse.jsonResponseFinalized( + forkyState.data.pending_partial_withdrawals, + node.getStateOptimistic(state), + node.dag.isFinalized(bslot.bid)) + else: + RestApiResponse.jsonError(Http400, SlotFromTheIncorrectForkError, + $error) + + RestApiResponse.jsonError(Http404, StateNotFoundError) diff --git a/beacon_chain/rpc/rest_config_api.nim b/beacon_chain/rpc/rest_config_api.nim index 1720bb88fe..817e88668d 100644 --- a/beacon_chain/rpc/rest_config_api.nim +++ b/beacon_chain/rpc/rest_config_api.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -24,7 +24,7 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) = cachedConfigSpec = RestApiResponse.prepareJsonResponse( ( - # https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.1/presets/mainnet/phase0.yaml + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/presets/mainnet/phase0.yaml MAX_COMMITTEES_PER_SLOT: Base10.toString(MAX_COMMITTEES_PER_SLOT), TARGET_COMMITTEE_SIZE: @@ -39,14 +39,14 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) = Base10.toString(HYSTERESIS_DOWNWARD_MULTIPLIER), HYSTERESIS_UPWARD_MULTIPLIER: Base10.toString(HYSTERESIS_UPWARD_MULTIPLIER), + MIN_DEPOSIT_AMOUNT: Base10.toString(MIN_DEPOSIT_AMOUNT), MAX_EFFECTIVE_BALANCE: Base10.toString(MAX_EFFECTIVE_BALANCE), - MAX_EFFECTIVE_BALANCE_ELECTRA: - Base10.toString(static(MAX_EFFECTIVE_BALANCE_ELECTRA.uint64)), EFFECTIVE_BALANCE_INCREMENT: Base10.toString(EFFECTIVE_BALANCE_INCREMENT), + MIN_ATTESTATION_INCLUSION_DELAY: Base10.toString(MIN_ATTESTATION_INCLUSION_DELAY), SLOTS_PER_EPOCH: @@ -61,6 +61,7 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) = Base10.toString(SLOTS_PER_HISTORICAL_ROOT), MIN_EPOCHS_TO_INACTIVITY_PENALTY: Base10.toString(MIN_EPOCHS_TO_INACTIVITY_PENALTY), + EPOCHS_PER_HISTORICAL_VECTOR: Base10.toString(EPOCHS_PER_HISTORICAL_VECTOR), EPOCHS_PER_SLASHINGS_VECTOR: @@ -69,6 +70,7 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) = Base10.toString(HISTORICAL_ROOTS_LIMIT), VALIDATOR_REGISTRY_LIMIT: Base10.toString(VALIDATOR_REGISTRY_LIMIT), + BASE_REWARD_FACTOR: Base10.toString(BASE_REWARD_FACTOR), WHISTLEBLOWER_REWARD_QUOTIENT: @@ -81,6 +83,7 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) = Base10.toString(MIN_SLASHING_PENALTY_QUOTIENT), PROPORTIONAL_SLASHING_MULTIPLIER: Base10.toString(PROPORTIONAL_SLASHING_MULTIPLIER), + MAX_PROPOSER_SLASHINGS: Base10.toString(MAX_PROPOSER_SLASHINGS), MAX_ATTESTER_SLASHINGS: @@ -92,29 +95,32 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) = MAX_VOLUNTARY_EXITS: Base10.toString(MAX_VOLUNTARY_EXITS), - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/presets/mainnet/altair.yaml + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/presets/mainnet/altair.yaml INACTIVITY_PENALTY_QUOTIENT_ALTAIR: Base10.toString(INACTIVITY_PENALTY_QUOTIENT_ALTAIR), MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR: Base10.toString(MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR), PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR: Base10.toString(PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR), + SYNC_COMMITTEE_SIZE: Base10.toString(uint64(SYNC_COMMITTEE_SIZE)), EPOCHS_PER_SYNC_COMMITTEE_PERIOD: Base10.toString(EPOCHS_PER_SYNC_COMMITTEE_PERIOD), + MIN_SYNC_COMMITTEE_PARTICIPANTS: Base10.toString(uint64(MIN_SYNC_COMMITTEE_PARTICIPANTS)), UPDATE_TIMEOUT: Base10.toString(UPDATE_TIMEOUT), - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/presets/mainnet/bellatrix.yaml + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/presets/mainnet/bellatrix.yaml INACTIVITY_PENALTY_QUOTIENT_BELLATRIX: Base10.toString(INACTIVITY_PENALTY_QUOTIENT_BELLATRIX), MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX: Base10.toString(MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX), PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX: Base10.toString(PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX), + MAX_BYTES_PER_TRANSACTION: Base10.toString(uint64(MAX_BYTES_PER_TRANSACTION)), MAX_TRANSACTIONS_PER_PAYLOAD: @@ -124,7 +130,7 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) = MAX_EXTRA_DATA_BYTES: Base10.toString(uint64(MAX_EXTRA_DATA_BYTES)), - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/presets/mainnet/capella.yaml + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/presets/mainnet/capella.yaml MAX_BLS_TO_EXECUTION_CHANGES: Base10.toString(uint64(MAX_BLS_TO_EXECUTION_CHANGES)), MAX_WITHDRAWALS_PER_PAYLOAD: @@ -132,27 +138,27 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) = MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP: Base10.toString(uint64(MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP)), - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/mainnet/deneb.yaml + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/presets/mainnet/deneb.yaml FIELD_ELEMENTS_PER_BLOB: Base10.toString(deneb_preset.FIELD_ELEMENTS_PER_BLOB), MAX_BLOB_COMMITMENTS_PER_BLOCK: Base10.toString(MAX_BLOB_COMMITMENTS_PER_BLOCK), - MAX_BLOBS_PER_BLOCK: - Base10.toString(MAX_BLOBS_PER_BLOCK), KZG_COMMITMENT_INCLUSION_PROOF_DEPTH: Base10.toString(uint64(KZG_COMMITMENT_INCLUSION_PROOF_DEPTH)), - # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/configs/mainnet.yaml + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/configs/mainnet.yaml PRESET_BASE: cfg.PRESET_BASE, CONFIG_NAME: cfg.name(), + TERMINAL_TOTAL_DIFFICULTY: toString(cfg.TERMINAL_TOTAL_DIFFICULTY), TERMINAL_BLOCK_HASH: $cfg.TERMINAL_BLOCK_HASH, TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: Base10.toString(uint64(cfg.TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH)), + MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: Base10.toString(cfg.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT), MIN_GENESIS_TIME: @@ -161,6 +167,7 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) = "0x" & $cfg.GENESIS_FORK_VERSION, GENESIS_DELAY: Base10.toString(cfg.GENESIS_DELAY), + ALTAIR_FORK_VERSION: "0x" & $cfg.ALTAIR_FORK_VERSION, ALTAIR_FORK_EPOCH: @@ -181,6 +188,11 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) = "0x" & $cfg.ELECTRA_FORK_VERSION, ELECTRA_FORK_EPOCH: Base10.toString(uint64(cfg.ELECTRA_FORK_EPOCH)), + FULU_FORK_VERSION: + "0x" & $cfg.FULU_FORK_VERSION, + FULU_FORK_EPOCH: + Base10.toString(uint64(cfg.FULU_FORK_EPOCH)), + SECONDS_PER_SLOT: Base10.toString(SECONDS_PER_SLOT), SECONDS_PER_ETH1_BLOCK: @@ -191,6 +203,7 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) = Base10.toString(cfg.SHARD_COMMITTEE_PERIOD), ETH1_FOLLOW_DISTANCE: Base10.toString(cfg.ETH1_FOLLOW_DISTANCE), + INACTIVITY_SCORE_BIAS: Base10.toString(cfg.INACTIVITY_SCORE_BIAS), INACTIVITY_SCORE_RECOVERY_RATE: @@ -203,6 +216,7 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) = Base10.toString(cfg.CHURN_LIMIT_QUOTIENT), MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: Base10.toString(cfg.MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT), + PROPOSER_SCORE_BOOST: Base10.toString(PROPOSER_SCORE_BOOST), REORG_HEAD_WEIGHT_THRESHOLD: @@ -211,22 +225,22 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) = Base10.toString(REORG_PARENT_WEIGHT_THRESHOLD), REORG_MAX_EPOCHS_SINCE_FINALIZATION: Base10.toString(uint64(REORG_MAX_EPOCHS_SINCE_FINALIZATION)), + DEPOSIT_CHAIN_ID: Base10.toString(cfg.DEPOSIT_CHAIN_ID), DEPOSIT_NETWORK_ID: Base10.toString(cfg.DEPOSIT_NETWORK_ID), DEPOSIT_CONTRACT_ADDRESS: $cfg.DEPOSIT_CONTRACT_ADDRESS, - GOSSIP_MAX_SIZE: - Base10.toString(GOSSIP_MAX_SIZE), + + MAX_PAYLOAD_SIZE: + Base10.toString(MAX_PAYLOAD_SIZE), MAX_REQUEST_BLOCKS: Base10.toString(MAX_REQUEST_BLOCKS), EPOCHS_PER_SUBNET_SUBSCRIPTION: Base10.toString(EPOCHS_PER_SUBNET_SUBSCRIPTION), MIN_EPOCHS_FOR_BLOCK_REQUESTS: Base10.toString(cfg.MIN_EPOCHS_FOR_BLOCK_REQUESTS), - MAX_CHUNK_SIZE: - Base10.toString(MAX_CHUNK_SIZE), TTFB_TIMEOUT: Base10.toString(TTFB_TIMEOUT), RESP_TIMEOUT: @@ -247,14 +261,49 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) = Base10.toString(ATTESTATION_SUBNET_EXTRA_BITS), ATTESTATION_SUBNET_PREFIX_BITS: Base10.toString(ATTESTATION_SUBNET_PREFIX_BITS), + MAX_REQUEST_BLOCKS_DENEB: Base10.toString(MAX_REQUEST_BLOCKS_DENEB), - MAX_REQUEST_BLOB_SIDECARS: - Base10.toString(MAX_REQUEST_BLOB_SIDECARS), MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: Base10.toString(cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS), BLOB_SIDECAR_SUBNET_COUNT: - Base10.toString(BLOB_SIDECAR_SUBNET_COUNT), + Base10.toString(cfg.BLOB_SIDECAR_SUBNET_COUNT), + MAX_BLOBS_PER_BLOCK: + Base10.toString(cfg.MAX_BLOBS_PER_BLOCK), + MAX_REQUEST_BLOB_SIDECARS: + Base10.toString(cfg.MAX_REQUEST_BLOB_SIDECARS), + + MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA: + Base10.toString(cfg.MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA), + MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT: + Base10.toString(cfg.MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT), + BLOB_SIDECAR_SUBNET_COUNT_ELECTRA: + Base10.toString(cfg.BLOB_SIDECAR_SUBNET_COUNT_ELECTRA), + MAX_BLOBS_PER_BLOCK_ELECTRA: + Base10.toString(cfg.MAX_BLOBS_PER_BLOCK_ELECTRA), + MAX_REQUEST_BLOB_SIDECARS_ELECTRA: + Base10.toString(cfg.MAX_REQUEST_BLOB_SIDECARS_ELECTRA), + + NUMBER_OF_COLUMNS: + Base10.toString(NUMBER_OF_COLUMNS.uint64), + NUMBER_OF_CUSTODY_GROUPS: + Base10.toString(NUMBER_OF_CUSTODY_GROUPS.uint64), + DATA_COLUMN_SIDECAR_SUBNET_COUNT: + Base10.toString(DATA_COLUMN_SIDECAR_SUBNET_COUNT.uint64), + MAX_REQUEST_DATA_COLUMN_SIDECARS: + Base10.toString(MAX_REQUEST_DATA_COLUMN_SIDECARS), + SAMPLES_PER_SLOT: + Base10.toString(SAMPLES_PER_SLOT.uint64), + CUSTODY_REQUIREMENT: + Base10.toString(CUSTODY_REQUIREMENT.uint64), + VALIDATOR_CUSTODY_REQUIREMENT: + Base10.toString(VALIDATOR_CUSTODY_REQUIREMENT.uint64), + BALANCE_PER_ADDITIONAL_CUSTODY_GROUP: + Base10.toString(BALANCE_PER_ADDITIONAL_CUSTODY_GROUP), + # MAX_BLOBS_PER_BLOCK_FULU: + # Base10.toString(cfg.MAX_BLOBS_PER_BLOCK_FULU), + # MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS: + # Base10.toString(cfg.MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS), # https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#constants # GENESIS_SLOT @@ -323,6 +372,48 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) = Base10.toString(uint64(TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE)), SYNC_COMMITTEE_SUBNET_COUNT: Base10.toString(uint64(SYNC_COMMITTEE_SUBNET_COUNT)), + + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/electra/beacon-chain.md + UNSET_DEPOSIT_REQUESTS_START_INDEX: + Base10.toString(UNSET_DEPOSIT_REQUESTS_START_INDEX), + FULL_EXIT_REQUEST_AMOUNT: + Base10.toString(FULL_EXIT_REQUEST_AMOUNT), + COMPOUNDING_WITHDRAWAL_PREFIX: + to0xHex([byte(COMPOUNDING_WITHDRAWAL_PREFIX)]), + DEPOSIT_REQUEST_TYPE: + to0xHex([byte(DEPOSIT_REQUEST_TYPE)]), + WITHDRAWAL_REQUEST_TYPE: + to0xHex([byte(WITHDRAWAL_REQUEST_TYPE)]), + CONSOLIDATION_REQUEST_TYPE: + to0xHex([byte(CONSOLIDATION_REQUEST_TYPE)]), + MIN_ACTIVATION_BALANCE: + Base10.toString(uint64(MIN_ACTIVATION_BALANCE)), + MAX_EFFECTIVE_BALANCE_ELECTRA: + Base10.toString(uint64(MAX_EFFECTIVE_BALANCE_ELECTRA)), + MIN_SLASHING_PENALTY_QUOTIENT_ELECTRA: + Base10.toString(MIN_SLASHING_PENALTY_QUOTIENT_ELECTRA), + WHISTLEBLOWER_REWARD_QUOTIENT_ELECTRA: + Base10.toString(WHISTLEBLOWER_REWARD_QUOTIENT_ELECTRA), + PENDING_DEPOSITS_LIMIT: + Base10.toString(PENDING_DEPOSITS_LIMIT), + PENDING_PARTIAL_WITHDRAWALS_LIMIT: + Base10.toString(PENDING_PARTIAL_WITHDRAWALS_LIMIT), + PENDING_CONSOLIDATIONS_LIMIT: + Base10.toString(PENDING_CONSOLIDATIONS_LIMIT), + MAX_ATTESTER_SLASHINGS_ELECTRA: + Base10.toString(MAX_ATTESTER_SLASHINGS_ELECTRA), + MAX_ATTESTATIONS_ELECTRA: + Base10.toString(MAX_ATTESTATIONS_ELECTRA), + MAX_DEPOSIT_REQUESTS_PER_PAYLOAD: + Base10.toString(uint64(MAX_DEPOSIT_REQUESTS_PER_PAYLOAD)), + MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD: + Base10.toString(uint64(MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD)), + MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD: + Base10.toString(MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD), + MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP: + Base10.toString(uint64(MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP)), + MAX_PENDING_DEPOSITS_PER_EPOCH: + Base10.toString(uint64(MAX_PENDING_DEPOSITS_PER_EPOCH)) ) ) cachedDepositContract = diff --git a/beacon_chain/rpc/rest_constants.nim b/beacon_chain/rpc/rest_constants.nim index ca1f8a510b..dfa155d102 100644 --- a/beacon_chain/rpc/rest_constants.nim +++ b/beacon_chain/rpc/rest_constants.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2021-2024 Status Research & Development GmbH +# Copyright (c) 2021-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -229,12 +229,22 @@ const DeprecatedRemovalGetDebugChainHeadsV1* = "v1/debug/beacon/heads endpoint was deprecated and replaced by v2: " & "https://github.com/ethereum/beacon-APIs/pull/319" + DeprecatedRemovalProduceBlindedBlockV1* = + "v1/validator/blinded_blocks/{slot} was deprecated, removed, and replaced " & + "by produceBlockV3: https://github.com/ethereum/beacon-APIs/pull/466" + DeprecatedRemovalValidatorBlocksV2* = + "v2/validator/blocks/{slot} was deprecated, removed, and replaced " & + "by produceBlockV3: https://github.com/ethereum/beacon-APIs/pull/466" BlockIncorrectFork* = "Block has incorrect fork" ValidatorNotActive* = "Validator inactive" + BlocksUnavailable* = + "Requested block slot is unavailable" BlobsOutOfRange* = "Requested slot is outside of blobs window" + DataColumnsOutOfRange* = + "Requested slot is outside of data columns window" InvalidBlsToExecutionChangeObjectError* = "Unable to decode BLS to execution change object(s)" BlsToExecutionChangeValidationError* = diff --git a/beacon_chain/rpc/rest_event_api.nim b/beacon_chain/rpc/rest_event_api.nim index e331b2c09d..a6aa4f9e4b 100644 --- a/beacon_chain/rpc/rest_event_api.nim +++ b/beacon_chain/rpc/rest_event_api.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -125,10 +125,18 @@ proc installEventApiHandlers*(router: var RestRouter, node: BeaconNode) = let handler = response.eventHandler(node.eventBus.blocksQueue, "block") res.add(handler) + if EventTopic.BlockGossip in eventTopics: + let handler = response.eventHandler(node.eventBus.blockGossipQueue, + "block_gossip") + res.add(handler) if EventTopic.Attestation in eventTopics: - let handler = response.eventHandler(node.eventBus.attestQueue, + let handler = response.eventHandler(node.eventBus.phase0AttestQueue, "attestation") res.add(handler) + if EventTopic.Attestation in eventTopics: + let handler = response.eventHandler(node.eventBus.singleAttestQueue, + "single_attestation") + res.add(handler) if EventTopic.VoluntaryExit in eventTopics: let handler = response.eventHandler(node.eventBus.exitQueue, "voluntary_exit") @@ -142,9 +150,15 @@ proc installEventApiHandlers*(router: var RestRouter, node: BeaconNode) = "proposer_slashing") res.add(handler) if EventTopic.AttesterSlashing in eventTopics: - let handler = response.eventHandler(node.eventBus.attSlashQueue, - "attester_slashing") - res.add(handler) + block: + let handler = response.eventHandler(node.eventBus.phase0AttSlashQueue, + "attester_slashing") + res.add(handler) + + block: + let handler = response.eventHandler(node.eventBus.electraAttSlashQueue, + "attester_slashing") + res.add(handler) if EventTopic.BlobSidecar in eventTopics: let handler = response.eventHandler(node.eventBus.blobSidecarQueue, "blob_sidecar") @@ -177,8 +191,8 @@ proc installEventApiHandlers*(router: var RestRouter, node: BeaconNode) = discard await race(handlers) except ValueError: raiseAssert "There should be more than one event handler at this point!" - # One of the handlers finished, it means that connection has been droped, so - # we cancelling all other handlers. + # One of the handlers finished, it means that connection has been dropped, so + # we are cancelling all other handlers. let pending = handlers.filterIt(not(it.finished())).mapIt(it.cancelAndWait()) await noCancel allFutures(pending) diff --git a/beacon_chain/rpc/rest_key_management_api.nim b/beacon_chain/rpc/rest_key_management_api.nim index ca4e789662..93f3a79d2b 100644 --- a/beacon_chain/rpc/rest_key_management_api.nim +++ b/beacon_chain/rpc/rest_key_management_api.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2021-2024 Status Research & Development GmbH +# Copyright (c) 2021-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -667,15 +667,10 @@ proc installKeymanagerHandlers*(router: var RestRouter, host: KeymanagerHost) = decodeBody(SetGraffitiRequest, contentBody.get()).valueOr: return keymanagerApiError(Http400, InvalidGraffitiRequestError) - if not(host.checkValidatorKeystoreDir(pubkey)): - return keymanagerApiError(Http404, ValidatorNotFoundError) + host.setGraffiti(pubkey, GraffitiBytes.init(req.graffiti)).isOkOr: + return keymanagerApiError(Http500, "Failed to set graffiti: " & error) - let status = host.setGraffiti(pubkey, GraffitiBytes.init(req.graffiti)) - if status.isOk: - RestApiResponse.response(Http202) - else: - keymanagerApiError( - Http500, "Failed to set graffiti: " & status.error) + RestApiResponse.response(Http202) # https://ethereum.github.io/keymanager-APIs/?urls.primaryName=dev#/Graffiti/deleteGraffiti router.api2(MethodDelete, "/eth/v1/validator/{pubkey}/graffiti") do ( diff --git a/beacon_chain/rpc/rest_node_api.nim b/beacon_chain/rpc/rest_node_api.nim index b96e0724be..ceb4bbc7c0 100644 --- a/beacon_chain/rpc/rest_node_api.nim +++ b/beacon_chain/rpc/rest_node_api.nim @@ -1,4 +1,4 @@ -# Copyright (c) 2021-2024 Status Research & Development GmbH +# Copyright (c) 2021-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -34,6 +34,17 @@ RestJson.useDefaultSerializationFor( RestNodePeerCount, ) +proc normalize*(address: MultiAddress, value: PeerId): MaResult[MultiAddress] = + ## Checks if `address` has `p2p` suffix, and if not add it. + let + protos = ? address.protocols() + index = protos.find(multiCodec("p2p")) + if index == -1: + let suffix = ? MultiAddress.init(multiCodec("p2p"), value) + concat(address, suffix) + else: + ok(address) + proc validateState(states: seq[PeerStateKind]): Result[ConnectionStateSet, cstring] = var res: set[ConnectionState] @@ -98,14 +109,13 @@ proc toString(direction: PeerType): string = "outbound" proc getLastSeenAddress(node: BeaconNode, id: PeerId): string = - # TODO (cheatfate): We need to provide filter here, which will be able to - # filter such multiaddresses like `/ip4/0.0.0.0` or local addresses or - # addresses with peer ids. - let addrs = node.network.switch.peerStore[AddressBook][id] - if len(addrs) > 0: - $addrs[len(addrs) - 1] - else: - "" + let + address = node.network.switch.peerStore[LastSeenBook][id].valueOr: + return "" + normalized = address.normalize(id).valueOr: + return "" + $normalized + proc getDiscoveryAddresses(node: BeaconNode): seq[string] = let typedRec = TypedRecord.fromRecord(node.network.enrRecord()) @@ -154,7 +164,8 @@ proc installNodeApiHandlers*(router: var RestRouter, node: BeaconNode) = metadata: ( seq_number: node.network.metadata.seq_number, syncnets: to0xHex(node.network.metadata.syncnets.bytes), - attnets: to0xHex(node.network.metadata.attnets.bytes) + attnets: to0xHex(node.network.metadata.attnets.bytes), + custody_group_count: node.network.metadata.custody_group_count ) ) ) diff --git a/beacon_chain/rpc/rest_rewards_api.nim b/beacon_chain/rpc/rest_rewards_api.nim index 44f5f9eb3e..dbc38897bb 100644 --- a/beacon_chain/rpc/rest_rewards_api.nim +++ b/beacon_chain/rpc/rest_rewards_api.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -169,11 +169,11 @@ proc installRewardsApiHandlers*(router: var RestRouter, node: BeaconNode) = let response = withState(tmpState[]): - let total_active_balance = - get_total_active_balance(forkyState.data, cache) var resp: seq[RestSyncCommitteeReward] when consensusFork > ConsensusFork.Phase0: let + total_active_balance = + get_total_active_balance(forkyState.data, cache) keys = block: var res: HashSet[ValidatorPubKey] diff --git a/beacon_chain/rpc/rest_validator_api.nim b/beacon_chain/rpc/rest_validator_api.nim index 671ead2f84..df34536e3a 100644 --- a/beacon_chain/rpc/rest_validator_api.nim +++ b/beacon_chain/rpc/rest_validator_api.nim @@ -1,4 +1,4 @@ -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -14,12 +14,8 @@ import ".."/[beacon_chain_db, beacon_node], attestation_pool, sync_committee_msg_pool], ".."/validators/beacon_validators, ".."/spec/[beaconstate, forks, network, state_transition_block], - ".."/spec/datatypes/[phase0, altair], "."/[rest_utils, state_ttl_cache] -from ".."/spec/datatypes/bellatrix import ExecutionPayload -from ".."/spec/datatypes/capella import ExecutionPayload - export rest_utils logScope: topics = "rest_validatorapi" @@ -315,245 +311,25 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) = RestApiResponse.jsonError(Http404, StateNotFoundError) - # https://ethereum.github.io/beacon-APIs/#/Validator/produceBlock router.api2(MethodGet, "/eth/v1/validator/blocks/{slot}") do ( slot: Slot, randao_reveal: Option[ValidatorSig], graffiti: Option[GraffitiBytes]) -> RestApiResponse: RestApiResponse.jsonError( Http410, DeprecatedRemovalValidatorBlocksV1) - # https://ethereum.github.io/beacon-APIs/#/Validator/produceBlockV2 router.api(MethodGet, "/eth/v2/validator/blocks/{slot}") do ( slot: Slot, randao_reveal: Option[ValidatorSig], graffiti: Option[GraffitiBytes], skip_randao_verification: Option[string]) -> RestApiResponse: - let - contentType = preferredContentType(jsonMediaType, sszMediaType).valueOr: - return RestApiResponse.jsonError(Http406, ContentNotAcceptableError) - let message = - block: - let qslot = block: - if slot.isErr(): - return RestApiResponse.jsonError(Http400, InvalidSlotValueError, - $slot.error()) - let res = slot.get() - - if res <= node.dag.finalizedHead.slot: - return RestApiResponse.jsonError(Http400, InvalidSlotValueError, - "Slot already finalized") - let - wallTime = node.beaconClock.now() + MAXIMUM_GOSSIP_CLOCK_DISPARITY - if res > wallTime.slotOrZero: - return RestApiResponse.jsonError(Http400, InvalidSlotValueError, - "Slot cannot be in the future") - res - let qskip_randao_verification = - if skip_randao_verification.isNone(): - false - else: - let res = skip_randao_verification.get() - if res.isErr() or res.get() != "": - return RestApiResponse.jsonError( - Http400, InvalidSkipRandaoVerificationValue) - true - let qrandao = - if randao_reveal.isNone(): - return RestApiResponse.jsonError(Http400, MissingRandaoRevealValue) - else: - let res = randao_reveal.get() - if res.isErr(): - return RestApiResponse.jsonError(Http400, - InvalidRandaoRevealValue, - $res.error()) - res.get() - let qgraffiti = - if graffiti.isNone(): - defaultGraffitiBytes() - else: - let res = graffiti.get() - if res.isErr(): - return RestApiResponse.jsonError(Http400, - InvalidGraffitiBytesValue, - $res.error()) - res.get() - let qhead = - block: - let res = node.getSyncedHead(qslot) - if res.isErr(): - return RestApiResponse.jsonError(Http503, BeaconNodeInSyncError, - $res.error()) - let tres = res.get() - if not tres.executionValid: - return RestApiResponse.jsonError(Http503, BeaconNodeInSyncError) - tres - let - proposer = node.dag.getProposer(qhead, qslot).valueOr: - return RestApiResponse.jsonError(Http400, ProposerNotFoundError) - - if not node.verifyRandao( - qslot, proposer, qrandao, qskip_randao_verification): - return RestApiResponse.jsonError(Http400, InvalidRandaoRevealValue) - - let res = withConsensusFork( - node.dag.cfg.consensusForkAtEpoch(qslot.epoch)): - when consensusFork >= ConsensusFork.Bellatrix: - await makeBeaconBlockForHeadAndSlot( - consensusFork.ExecutionPayloadForSigning, - node, qrandao, proposer, qgraffiti, qhead, qslot) - else: - return RestApiResponse.jsonError(Http400, InvalidSlotValueError) - if res.isErr(): - return RestApiResponse.jsonError(Http400, res.error()) - res.get - return - withBlck(message.blck): - let data = - when consensusFork >= ConsensusFork.Fulu: - let blobsBundle = message.blobsBundleOpt.get() - fulu.BlockContents( - `block`: forkyBlck, - kzg_proofs: blobsBundle.proofs, - blobs: blobsBundle.blobs) - elif consensusFork >= ConsensusFork.Electra: - let blobsBundle = message.blobsBundleOpt.get() - electra.BlockContents( - `block`: forkyBlck, - kzg_proofs: blobsBundle.proofs, - blobs: blobsBundle.blobs) - elif consensusFork >= ConsensusFork.Deneb: - let blobsBundle = message.blobsBundleOpt.get() - deneb.BlockContents( - `block`: forkyBlck, - kzg_proofs: blobsBundle.proofs, - blobs: blobsBundle.blobs) - else: - forkyBlck - if contentType == sszMediaType: - let headers = [("eth-consensus-version", consensusFork.toString())] - RestApiResponse.sszResponse(data, headers) - elif contentType == jsonMediaType: - RestApiResponse.jsonResponseWVersion(data, consensusFork) - else: - raiseAssert "preferredContentType() returns invalid content type" + RestApiResponse.jsonError( + Http410, DeprecatedRemovalValidatorBlocksV2) - # https://ethereum.github.io/beacon-APIs/#/Validator/produceBlindedBlock - # https://github.com/ethereum/beacon-APIs/blob/c097f1a62c9a12c30e8175a39f205f92d3b931a9/apis/validator/blinded_block.yaml router.api(MethodGet, "/eth/v1/validator/blinded_blocks/{slot}") do ( slot: Slot, randao_reveal: Option[ValidatorSig], graffiti: Option[GraffitiBytes], skip_randao_verification: Option[string]) -> RestApiResponse: - ## Requests a beacon node to produce a valid blinded block, which can then - ## be signed by a validator. A blinded block is a block with only a - ## transactions root, rather than a full transactions list. - ## - ## Metadata in the response indicates the type of block produced, and the - ## supported types of block will be added to as forks progress. - let contentType = - block: - let res = preferredContentType(jsonMediaType, - sszMediaType) - if res.isErr(): - return RestApiResponse.jsonError(Http406, ContentNotAcceptableError) - res.get() - let qslot = block: - if slot.isErr(): - return RestApiResponse.jsonError(Http400, InvalidSlotValueError, - $slot.error()) - let res = slot.get() - - if res <= node.dag.finalizedHead.slot: - return RestApiResponse.jsonError(Http400, InvalidSlotValueError, - "Slot already finalized") - let - wallTime = node.beaconClock.now() + MAXIMUM_GOSSIP_CLOCK_DISPARITY - if res > wallTime.slotOrZero: - return RestApiResponse.jsonError(Http400, InvalidSlotValueError, - "Slot cannot be in the future") - res - let qskip_randao_verification = - if skip_randao_verification.isNone(): - false - else: - let res = skip_randao_verification.get() - if res.isErr() or res.get() != "": - return RestApiResponse.jsonError(Http400, - InvalidSkipRandaoVerificationValue) - true - let qrandao = - if randao_reveal.isNone(): - return RestApiResponse.jsonError(Http400, MissingRandaoRevealValue) - else: - let res = randao_reveal.get() - if res.isErr(): - return RestApiResponse.jsonError(Http400, - InvalidRandaoRevealValue, - $res.error()) - res.get() - let qgraffiti = - if graffiti.isNone(): - defaultGraffitiBytes() - else: - let res = graffiti.get() - if res.isErr(): - return RestApiResponse.jsonError(Http400, - InvalidGraffitiBytesValue, - $res.error()) - res.get() - let qhead = - block: - let res = node.getSyncedHead(qslot) - if res.isErr(): - return RestApiResponse.jsonError(Http503, BeaconNodeInSyncError, - $res.error()) - let tres = res.get() - if not tres.executionValid: - return RestApiResponse.jsonError(Http503, BeaconNodeInSyncError) - tres - let proposer = node.dag.getProposer(qhead, qslot).valueOr: - return RestApiResponse.jsonError(Http400, ProposerNotFoundError) - - if not node.verifyRandao( - qslot, proposer, qrandao, qskip_randao_verification): - return RestApiResponse.jsonError(Http400, InvalidRandaoRevealValue) - - template responseVersioned( - response: untyped, contextFork: ConsensusFork): untyped = - if contentType == sszMediaType: - let headers = [("eth-consensus-version", contextFork.toString())] - RestApiResponse.sszResponse(response, headers) - elif contentType == jsonMediaType: - RestApiResponse.jsonResponseWVersion(response, contextFork) - else: - RestApiResponse.jsonError(Http500, InvalidAcceptError) - - let - payloadBuilderClient = node.getPayloadBuilderClient( - proposer.distinctBase).valueOr: - return RestApiResponse.jsonError( - Http500, "Unable to initialize payload builder client: " & $error) - contextFork = node.dag.cfg.consensusForkAtEpoch(node.currentSlot.epoch) - - withConsensusFork(contextFork): - when consensusFork >= ConsensusFork.Deneb: - let res = await makeBlindedBeaconBlockForHeadAndSlot[ - consensusFork.BlindedBeaconBlock]( - node, payloadBuilderClient, qrandao, - proposer, qgraffiti, qhead, qslot) - if res.isErr(): - return RestApiResponse.jsonError(Http400, res.error()) - return responseVersioned(res.get().blindedBlckPart, contextFork) - elif consensusFork >= ConsensusFork.Bellatrix: - return RestApiResponse.jsonError( - Http400, "Pre-Deneb builder API unsupported") - else: - # Pre-Bellatrix, this endpoint will return a BeaconBlock - let res = await makeBeaconBlockForHeadAndSlot( - bellatrix.ExecutionPayloadForSigning, node, qrandao, - proposer, qgraffiti, qhead, qslot) - if res.isErr(): - return RestApiResponse.jsonError(Http400, res.error()) - withBlck(res.get().blck): - return responseVersioned(forkyBlck, contextFork) + RestApiResponse.jsonError( + Http410, DeprecatedRemovalProduceBlindedBlockV1) func getMaybeBlindedHeaders( consensusFork: ConsensusFork, @@ -753,7 +529,10 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) = return RestApiResponse.jsonError(Http400, InvalidCommitteeIndexValueError, $res.error()) - res.get() + if node.dag.cfg.consensusForkAtEpoch(qslot.epoch) >= ConsensusFork.Electra: + 0.CommitteeIndex + else: + res.get() let qhead = block: let res = node.getSyncedHead(qslot) @@ -1215,25 +994,21 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) = router.api2(MethodPost, "/eth/v1/validator/register_validator") do ( contentBody: Option[ContentBody]) -> RestApiResponse: + if contentBody.isNone(): + return RestApiResponse.jsonError(Http400, EmptyRequestBodyError) let - body = - block: - if contentBody.isNone(): - return RestApiResponse.jsonError(Http400, EmptyRequestBodyError) - let dres = decodeBody(seq[SignedValidatorRegistrationV1], contentBody.get()) - if dres.isErr(): - return RestApiResponse.jsonError(Http400, - InvalidPrepareBeaconProposerError) - dres.get() + body = decodeBodyJsonOrSsz(seq[SignedValidatorRegistrationV1], + contentBody.get()).valueOr: + return RestApiResponse.jsonError(error) - for signedValidatorRegistration in body: + for registration in body: # Don't validate beyond syntactically, because # "requests containing currently inactive or unknown validator pubkeys # will be accepted, as they may become active at a later epoch". Along # these lines, even if it's adding a validator the BN already has as a # local validator, the keymanager API might remove that from the BN. - node.externalBuilderRegistrations[signedValidatorRegistration.message.pubkey] = - signedValidatorRegistration + node.externalBuilderRegistrations[registration.message.pubkey] = + registration RestApiResponse.response(Http200) @@ -1326,4 +1101,4 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) = # able to use it when a feature flag is turned on, the intercepting # middleware can handle and swallow the request. I suggest a CL either # returns 501 Not Implemented [or] 400 Bad Request." - RestApiResponse.jsonError(Http501, AggregationSelectionNotImplemented) + RestApiResponse.jsonError(Http501, AggregationSelectionNotImplemented) \ No newline at end of file diff --git a/beacon_chain/spec/beacon_time.nim b/beacon_chain/spec/beacon_time.nim index 99c6ee5164..19a919f776 100644 --- a/beacon_chain/spec/beacon_time.nim +++ b/beacon_chain/spec/beacon_time.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -43,13 +43,15 @@ const GENESIS_SLOT* = Slot(0) GENESIS_EPOCH* = Epoch(0) # compute_epoch_at_slot(GENESIS_SLOT) - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/fork-choice.md#constant + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/fork-choice.md#constant INTERVALS_PER_SLOT* = 3 - FAR_FUTURE_BEACON_TIME* = BeaconTime(ns_since_genesis: int64.high()) - NANOSECONDS_PER_SLOT* = SECONDS_PER_SLOT * 1_000_000_000'u64 + # Ensure all representable slots are complete + FAR_FUTURE_BEACON_TIME* = + BeaconTime(ns_since_genesis: int64.high() - NANOSECONDS_PER_SLOT.int64) + template ethTimeUnit*(typ: type) {.dirty.} = func `+`*(x: typ, y: uint64): typ {.borrow.} func `-`*(x: typ, y: uint64): typ {.borrow.} @@ -133,22 +135,22 @@ template `+`*(a: TimeDiff, b: Duration): TimeDiff = const # Offsets from the start of the slot to when the corresponding message should # be sent - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/validator.md#attesting + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/phase0/validator.md#attesting attestationSlotOffset* = TimeDiff(nanoseconds: NANOSECONDS_PER_SLOT.int64 div INTERVALS_PER_SLOT) - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/validator.md#broadcast-aggregate + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/phase0/validator.md#broadcast-aggregate aggregateSlotOffset* = TimeDiff(nanoseconds: NANOSECONDS_PER_SLOT.int64 * 2 div INTERVALS_PER_SLOT) - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/validator.md#prepare-sync-committee-message + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/altair/validator.md#prepare-sync-committee-message syncCommitteeMessageSlotOffset* = TimeDiff(nanoseconds: NANOSECONDS_PER_SLOT.int64 div INTERVALS_PER_SLOT) - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/validator.md#broadcast-sync-committee-contribution + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/altair/validator.md#broadcast-sync-committee-contribution syncContributionSlotOffset* = TimeDiff(nanoseconds: NANOSECONDS_PER_SLOT.int64 * 2 div INTERVALS_PER_SLOT) - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/light-client/p2p-interface.md#sync-committee + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/altair/light-client/p2p-interface.md#sync-committee lightClientFinalityUpdateSlotOffset* = TimeDiff(nanoseconds: NANOSECONDS_PER_SLOT.int64 div INTERVALS_PER_SLOT) - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/light-client/p2p-interface.md#sync-committee + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/altair/light-client/p2p-interface.md#sync-committee lightClientOptimisticUpdateSlotOffset* = TimeDiff(nanoseconds: NANOSECONDS_PER_SLOT.int64 div INTERVALS_PER_SLOT) @@ -188,7 +190,7 @@ func epoch*(slot: Slot): Epoch = # aka compute_epoch_at_slot if slot == FAR_FUTURE_SLOT: FAR_FUTURE_EPOCH else: Epoch(slot div SLOTS_PER_EPOCH) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/fork-choice.md#compute_slots_since_epoch_start +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/fork-choice.md#compute_slots_since_epoch_start func since_epoch_start*(slot: Slot): uint64 = # aka compute_slots_since_epoch_start ## How many slots since the beginning of the epoch (`[0..SLOTS_PER_EPOCH-1]`) (slot mod SLOTS_PER_EPOCH) @@ -196,7 +198,7 @@ func since_epoch_start*(slot: Slot): uint64 = # aka compute_slots_since_epoch_st template is_epoch*(slot: Slot): bool = slot.since_epoch_start == 0 -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/beacon-chain.md#compute_start_slot_at_epoch +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/beacon-chain.md#compute_start_slot_at_epoch func start_slot*(epoch: Epoch): Slot = # aka compute_start_slot_at_epoch ## Return the start slot of ``epoch``. const maxEpoch = Epoch(FAR_FUTURE_SLOT div SLOTS_PER_EPOCH) @@ -216,7 +218,7 @@ iterator slots*(epoch: Epoch): Slot = for slot in start_slot ..< start_slot + SLOTS_PER_EPOCH: yield slot -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/validator.md#sync-committee +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/altair/validator.md#sync-committee template sync_committee_period*(epoch: Epoch): SyncCommitteePeriod = if epoch == FAR_FUTURE_EPOCH: FAR_FUTURE_PERIOD else: SyncCommitteePeriod(epoch div EPOCHS_PER_SYNC_COMMITTEE_PERIOD) diff --git a/beacon_chain/spec/beaconstate.nim b/beacon_chain/spec/beaconstate.nim index 48c2a7a212..a4809f6753 100644 --- a/beacon_chain/spec/beaconstate.nim +++ b/beacon_chain/spec/beaconstate.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -48,13 +48,13 @@ func is_compounding_withdrawal_credential*( withdrawal_credentials: Eth2Digest): bool = withdrawal_credentials.data[0] == COMPOUNDING_WITHDRAWAL_PREFIX -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#new-has_compounding_withdrawal_credential +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/electra/beacon-chain.md#new-has_compounding_withdrawal_credential func has_compounding_withdrawal_credential*(validator: Validator): bool = ## Check if ``validator`` has an 0x02 prefixed "compounding" withdrawal ## credential. is_compounding_withdrawal_credential(validator.withdrawal_credentials) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.7/specs/electra/beacon-chain.md#new-get_max_effective_balance +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/electra/beacon-chain.md#new-get_max_effective_balance func get_max_effective_balance*(validator: Validator): Gwei = ## Get max effective balance for ``validator``. if has_compounding_withdrawal_credential(validator): @@ -86,7 +86,7 @@ func get_validator_from_deposit*( # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.7/specs/electra/beacon-chain.md#deposits func get_validator_from_deposit*( - _: electra.BeaconState | fulu.BeaconState, + _: electra.BeaconState | fulu.BeaconState, pubkey: ValidatorPubKey, withdrawal_credentials: Eth2Digest, amount: Gwei): Validator = var validator = Validator( @@ -136,7 +136,7 @@ func compute_activation_exit_epoch*(epoch: Epoch): Epoch = ## ``epoch`` take effect. epoch + 1 + MAX_SEED_LOOKAHEAD -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/beacon-chain.md#get_validator_churn_limit +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/beacon-chain.md#get_validator_churn_limit func get_validator_churn_limit*( cfg: RuntimeConfig, state: ForkyBeaconState, cache: var StateCache): uint64 = @@ -155,7 +155,7 @@ func get_validator_activation_churn_limit*( cfg.MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT, get_validator_churn_limit(cfg, state, cache)) -# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#initiate_validator_exit +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/phase0/beacon-chain.md#initiate_validator_exit func get_state_exit_queue_info*( state: phase0.BeaconState | altair.BeaconState | bellatrix.BeaconState | capella.BeaconState | deneb.BeaconState): ExitQueueInfo = @@ -185,7 +185,7 @@ func get_state_exit_queue_info*( ExitQueueInfo( exit_queue_epoch: exit_queue_epoch, exit_queue_churn: exit_queue_churn) -func get_state_exit_queue_info*(state: electra.BeaconState | +func get_state_exit_queue_info*(state: electra.BeaconState | fulu.BeaconState): ExitQueueInfo = # Electra initiate_validator_exit doesn't have same quadratic aspect given @@ -247,7 +247,7 @@ func get_balance_churn_limit( ) churn - churn mod EFFECTIVE_BALANCE_INCREMENT.Gwei -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#new-get_activation_exit_churn_limit +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/electra/beacon-chain.md#new-get_activation_exit_churn_limit func get_activation_exit_churn_limit*( cfg: RuntimeConfig, state: electra.BeaconState | fulu.BeaconState, cache: var StateCache): Gwei = @@ -370,8 +370,8 @@ func get_slashing_penalty*( else: {.fatal: "invalid BeaconState type".} -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/beacon-chain.md#slash_validator -# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#modified-slash_validator +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/phase0/beacon-chain.md#slash_validator +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/altair/beacon-chain.md#modified-slash_validator # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/bellatrix/beacon-chain.md#modified-slash_validator func get_whistleblower_reward*( state: phase0.BeaconState | altair.BeaconState | bellatrix.BeaconState | @@ -381,12 +381,12 @@ func get_whistleblower_reward*( # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/electra/beacon-chain.md#modified-slash_validator func get_whistleblower_reward*( - state: electra.BeaconState | fulu.BeaconState, + state: electra.BeaconState | fulu.BeaconState, validator_effective_balance: Gwei): Gwei = validator_effective_balance div WHISTLEBLOWER_REWARD_QUOTIENT_ELECTRA -# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#slash_validator -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/beacon-chain.md#modified-slash_validator +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/phase0/beacon-chain.md#slash_validator +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/altair/beacon-chain.md#modified-slash_validator # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/bellatrix/beacon-chain.md#modified-slash_validator func get_proposer_reward(state: ForkyBeaconState, whistleblower_reward: Gwei): Gwei = when state is phase0.BeaconState: @@ -398,9 +398,9 @@ func get_proposer_reward(state: ForkyBeaconState, whistleblower_reward: Gwei): G else: {.fatal: "invalid BeaconState type".} -# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slash_validator -# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#modified-slash_validator -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/bellatrix/beacon-chain.md#modified-slash_validator +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/phase0/beacon-chain.md#slash_validator +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/altair/beacon-chain.md#modified-slash_validator +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.3/specs/bellatrix/beacon-chain.md#modified-slash_validator proc slash_validator*( cfg: RuntimeConfig, state: var ForkyBeaconState, slashed_index: ValidatorIndex, pre_exit_queue_info: ExitQueueInfo, @@ -449,7 +449,7 @@ func genesis_time_from_eth1_timestamp( cfg: RuntimeConfig, eth1_timestamp: uint64): uint64 = eth1_timestamp + cfg.GENESIS_DELAY -# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#genesis-block +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/beacon-chain.md#genesis-block func get_initial_beacon_block*(state: phase0.HashedBeaconState): phase0.TrustedSignedBeaconBlock = # The genesis block is implicitly trusted @@ -461,7 +461,7 @@ func get_initial_beacon_block*(state: phase0.HashedBeaconState): phase0.TrustedSignedBeaconBlock( message: message, root: hash_tree_root(message)) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/beacon-chain.md#initialize-state-for-pure-altair-testnets-and-test-vectors +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/altair/beacon-chain.md#initialize-state-for-pure-altair-testnets-and-test-vectors func get_initial_beacon_block*(state: altair.HashedBeaconState): altair.TrustedSignedBeaconBlock = # The genesis block is implicitly trusted @@ -599,7 +599,7 @@ func is_eligible_for_activation*( # Has not yet been activated validator.activation_epoch == FAR_FUTURE_EPOCH -# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#is_valid_indexed_attestation +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/beacon-chain.md#is_valid_indexed_attestation proc is_valid_indexed_attestation*( state: ForkyBeaconState, # phase0.SomeIndexedAttestation | electra.SomeIndexedAttestation: @@ -806,7 +806,7 @@ func check_attestation_target_epoch( # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#attestations # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#modified-process_attestation -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/deneb/beacon-chain.md#modified-process_attestation +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/deneb/beacon-chain.md#modified-process_attestation func check_attestation_inclusion( consensusFork: static ConsensusFork, attestation_slot: Slot, current_slot: Slot): Result[void, cstring] = @@ -835,7 +835,7 @@ func check_attestation_index( Result[CommitteeIndex, cstring] = check_attestation_index(data.index, committees_per_slot) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/beacon-chain.md#get_attestation_participation_flag_indices +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/altair/beacon-chain.md#get_attestation_participation_flag_indices func get_attestation_participation_flag_indices( state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState, data: AttestationData, inclusion_delay: uint64): set[TimelyFlag] = @@ -908,7 +908,7 @@ func get_attestation_participation_flag_indices( # TODO these duplicate some stuff in state_transition_epoch which uses TotalBalances # better to centralize around that if feasible -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/beacon-chain.md#get_total_active_balance +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/phase0/beacon-chain.md#get_total_active_balance func get_total_active_balance*(state: ForkyBeaconState, cache: var StateCache): Gwei = ## Return the combined effective balance of the active validators. ## Note: ``get_total_balance`` returns ``EFFECTIVE_BALANCE_INCREMENT`` Gwei @@ -924,7 +924,7 @@ func get_total_active_balance*(state: ForkyBeaconState, cache: var StateCache): cache.total_active_balance[epoch] = tab return tab -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/beacon-chain.md#get_base_reward_per_increment +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/altair/beacon-chain.md#get_base_reward_per_increment func get_base_reward_per_increment_sqrt( total_active_balance_sqrt: uint64): Gwei = EFFECTIVE_BALANCE_INCREMENT.Gwei * BASE_REWARD_FACTOR div @@ -935,7 +935,7 @@ func get_base_reward_per_increment*( get_base_reward_per_increment_sqrt( integer_squareroot(distinctBase(total_active_balance))) -# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#get_base_reward +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/altair/beacon-chain.md#get_base_reward func get_base_reward( state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState | deneb.BeaconState | electra.BeaconState | fulu.BeaconState, @@ -982,6 +982,7 @@ proc check_attestation*( ok() +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/electra/beacon-chain.md#modified-process_attestation proc check_attestation*( state: electra.BeaconState | fulu.BeaconState, attestation: electra.Attestation | electra.TrustedAttestation, @@ -1002,15 +1003,32 @@ proc check_attestation*( return err("Electra attestation data index not 0") when on_chain: - var participants_count = 0'u64 - for index in attestation.committee_bits.oneIndices: - if not (index.uint64 < get_committee_count_per_slot( + var committee_offset = 0 + for committee_index in attestation.committee_bits.oneIndices: + if not (committee_index.uint64 < get_committee_count_per_slot( state, data.target.epoch, cache)): return err("attestation wrong committee index len") - participants_count += - get_beacon_committee_len(state, data.slot, index.CommitteeIndex, cache) - - if not (lenu64(attestation.aggregation_bits) == participants_count): + let committee = get_beacon_committee( + state, data.slot, committee_index.CommitteeIndex, cache) + + if attestation.aggregation_bits.len < committee_offset + len(committee): + # This would overflow; see invalid_too_many_committee_bits test case + return err("Electra attestation has too many committee bits") + + # This construction modified slightly from spec version to early-exit and + # not create the actual set, but the result is it uses a flag variable to + # look similar. + var committee_attesters_nonzero = false + for i, attester_index in committee: + if attestation.aggregation_bits[committee_offset + i]: + committee_attesters_nonzero = true + break + if not committee_attesters_nonzero: + return err("Electra attestation committee not present in aggregated bits") + + committee_offset += len(committee) + + if not (len(attestation.aggregation_bits) == committee_offset): return err("attestation wrong aggregation bit length") else: let @@ -1032,7 +1050,7 @@ proc check_attestation*( ok() -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/capella/beacon-chain.md#new-process_bls_to_execution_change +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/capella/beacon-chain.md#new-process_bls_to_execution_change proc check_bls_to_execution_change*( genesisFork: Fork, state: capella.BeaconState | deneb.BeaconState | electra.BeaconState | @@ -1193,12 +1211,10 @@ proc process_attestation*( ok(proposer_reward) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/beacon-chain.md#get_next_sync_committee_indices -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#modified-get_next_sync_committee_indices +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/altair/beacon-chain.md#get_next_sync_committee_indices func get_next_sync_committee_keys( state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState | - deneb.BeaconState | electra.BeaconState | fulu.BeaconState): - array[SYNC_COMMITTEE_SIZE, ValidatorPubKey] = + deneb.BeaconState): array[SYNC_COMMITTEE_SIZE, ValidatorPubKey] = ## Return the sequence of sync committee indices, with possible duplicates, ## for the next sync committee. # The sync committe depends on seed and effective balance - it can @@ -1226,30 +1242,68 @@ func get_next_sync_committee_keys( candidate_index = active_validator_indices[shuffled_index] random_byte = eth2digest(hash_buffer).data[i mod 32] effective_balance = state.validators[candidate_index].effective_balance - const meb = - when typeof(state).kind >= ConsensusFork.Electra: - MAX_EFFECTIVE_BALANCE_ELECTRA.Gwei # [Modified in Electra:EIP7251] - else: - MAX_EFFECTIVE_BALANCE.Gwei + if effective_balance * MAX_RANDOM_BYTE >= + MAX_EFFECTIVE_BALANCE.Gwei * random_byte: + res[index] = state.validators[candidate_index].pubkey + inc index + i += 1'u64 + res + +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/electra/beacon-chain.md#modified-get_next_sync_committee_indices +func get_next_sync_committee_keys( + state: electra.BeaconState | fulu.BeaconState): + array[SYNC_COMMITTEE_SIZE, ValidatorPubKey] = + ## Return the sequence of sync committee indices, with possible duplicates, + ## for the next sync committee. + # The sync committe depends on seed and effective balance - it can + # thus only be computed for the current epoch of the state, after balance + # updates have been performed + + let epoch = get_current_epoch(state) + 1 - if effective_balance * MAX_RANDOM_BYTE >= meb * random_byte: + const MAX_RANDOM_VALUE = 65536 - 1 # [Modified in Electra] + let + active_validator_indices = get_active_validator_indices(state, epoch) + active_validator_count = uint64(len(active_validator_indices)) + seed = get_seed(state, epoch, DOMAIN_SYNC_COMMITTEE) + var + i = 0'u64 + index = 0 + res: array[SYNC_COMMITTEE_SIZE, ValidatorPubKey] + hash_buffer: array[40, byte] + rv_buf: array[8, byte] + hash_buffer[0..31] = seed.data + while index < SYNC_COMMITTEE_SIZE: + hash_buffer[32..39] = uint_to_bytes(uint64(i div 16)) # [Modified in Electra] + let + shuffled_index = compute_shuffled_index( + uint64(i mod active_validator_count), active_validator_count, seed) + candidate_index = active_validator_indices[shuffled_index] + random_bytes = eth2digest(hash_buffer).data + offset = (i mod 16) * 2 + effective_balance = state.validators[candidate_index].effective_balance + rv_buf[0 .. 1] = random_bytes.toOpenArray(offset, offset + 1) + let random_value = bytes_to_uint64(rv_buf) + # [Modified in Electra:EIP7251] + if effective_balance * MAX_RANDOM_VALUE >= + MAX_EFFECTIVE_BALANCE_ELECTRA.Gwei * random_value: res[index] = state.validators[candidate_index].pubkey inc index i += 1'u64 res -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/capella/beacon-chain.md#has_eth1_withdrawal_credential +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/capella/beacon-chain.md#has_eth1_withdrawal_credential func has_eth1_withdrawal_credential*(validator: Validator): bool = ## Check if ``validator`` has an 0x01 prefixed "eth1" withdrawal credential. validator.withdrawal_credentials.data[0] == ETH1_ADDRESS_WITHDRAWAL_PREFIX -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#new-has_execution_withdrawal_credential +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/electra/beacon-chain.md#new-has_execution_withdrawal_credential func has_execution_withdrawal_credential*(validator: Validator): bool = ## Check if ``validator`` has a 0x01 or 0x02 prefixed withdrawal credential. has_compounding_withdrawal_credential(validator) or has_eth1_withdrawal_credential(validator) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/capella/beacon-chain.md#is_fully_withdrawable_validator +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/capella/beacon-chain.md#is_fully_withdrawable_validator # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#updated-is_fully_withdrawable_validator func is_fully_withdrawable_validator( fork: static ConsensusFork, validator: Validator, balance: Gwei, @@ -1304,7 +1358,7 @@ func queue_excess_active_balance( signature: ValidatorSig.infinity, slot: GENESIS_SLOT)) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.7/specs/electra/beacon-chain.md#new-switch_to_compounding_validator +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/electra/beacon-chain.md#new-switch_to_compounding_validator func switch_to_compounding_validator*( state: var (electra.BeaconState | fulu.BeaconState), index: ValidatorIndex) = @@ -1312,13 +1366,13 @@ func switch_to_compounding_validator*( validator.withdrawal_credentials.data[0] = COMPOUNDING_WITHDRAWAL_PREFIX queue_excess_active_balance(state, index.uint64) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/electra/beacon-chain.md#new-get_pending_balance_to_withdraw +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/electra/beacon-chain.md#new-get_pending_balance_to_withdraw func get_pending_balance_to_withdraw*( - state: electra.BeaconState | fulu.BeaconState, + state: electra.BeaconState | fulu.BeaconState, validator_index: ValidatorIndex): Gwei = var pending_balance: Gwei for withdrawal in state.pending_partial_withdrawals: - if withdrawal.index == validator_index: + if withdrawal.validator_index == validator_index: pending_balance += withdrawal.amount pending_balance @@ -1409,7 +1463,7 @@ func get_expected_withdrawals*( # to cleanly treat the results of get_expected_withdrawals as a seq[Withdrawal] # are valuable enough to make that the default version of this spec function. template get_expected_withdrawals_with_partial_count_aux*( - state: electra.BeaconState | fulu.BeaconState, + state: electra.BeaconState | fulu.BeaconState, epoch: Epoch, fetch_balance: untyped): (seq[Withdrawal], uint64) = doAssert epoch - get_current_epoch(state) in [0'u64, 1'u64] @@ -1417,7 +1471,7 @@ template get_expected_withdrawals_with_partial_count_aux*( var withdrawal_index = state.next_withdrawal_index withdrawals: seq[Withdrawal] = @[] - partial_withdrawals_count: uint64 = 0 + processed_partial_withdrawals_count: uint64 = 0 # [New in Electra:EIP7251] Consume pending partial withdrawals for withdrawal in state.pending_partial_withdrawals: @@ -1426,10 +1480,10 @@ template get_expected_withdrawals_with_partial_count_aux*( break let - validator = state.validators.item(withdrawal.index) + validator = state.validators.item(withdrawal.validator_index) # Keep a uniform variable name available for injected code - validator_index {.inject.} = withdrawal.index + validator_index {.inject.} = withdrawal.validator_index # Here, can't use the pre-stored effective balance because this template # might be called on the next slot and therefore next epoch, after which @@ -1453,13 +1507,13 @@ template get_expected_withdrawals_with_partial_count_aux*( withdrawal.amount) var w = Withdrawal( index: withdrawal_index, - validator_index: withdrawal.index, + validator_index: withdrawal.validator_index, amount: withdrawable_balance) w.address.data[0..19] = validator.withdrawal_credentials.data[12..^1] withdrawals.add w withdrawal_index += 1 - partial_withdrawals_count += 1 + processed_partial_withdrawals_count += 1 let bound = min(len(state.validators), MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP) @@ -1470,7 +1524,14 @@ template get_expected_withdrawals_with_partial_count_aux*( for _ in 0 ..< bound: let validator = state.validators.item(validator_index) - balance = fetch_balance + # [Modified in Electra:EIP7251] + partially_withdrawn_balance = block: + var subtot: Gwei + for withdrawal in withdrawals: + if withdrawal.validator_index == validator_index: + subtot += withdrawal.amount + subtot + balance = fetch_balance - partially_withdrawn_balance if is_fully_withdrawable_validator( typeof(state).kind, validator, balance, epoch): var w = Withdrawal( @@ -1494,7 +1555,7 @@ template get_expected_withdrawals_with_partial_count_aux*( break validator_index = (validator_index + 1) mod num_validators - (withdrawals, partial_withdrawals_count) + (withdrawals, processed_partial_withdrawals_count) template get_expected_withdrawals_with_partial_count*( state: electra.BeaconState | fulu.BeaconState): (seq[Withdrawal], uint64) = @@ -1502,7 +1563,7 @@ template get_expected_withdrawals_with_partial_count*( state, get_current_epoch(state)) do: state.balances.item(validator_index) -func get_expected_withdrawals*(state: electra.BeaconState | fulu.BeaconState): +func get_expected_withdrawals*(state: electra.BeaconState | fulu.BeaconState): seq[Withdrawal] = get_expected_withdrawals_with_partial_count(state)[0] @@ -1973,7 +2034,7 @@ func upgrade_to_capella*(cfg: RuntimeConfig, pre: bellatrix.BeaconState): # historical_summaries initialized to correct default automatically ) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/deneb/fork.md#upgrading-the-state +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.3/specs/deneb/fork.md#upgrading-the-state func upgrade_to_deneb*(cfg: RuntimeConfig, pre: capella.BeaconState): ref deneb.BeaconState = let @@ -2083,17 +2144,13 @@ func upgrade_to_electra*( blob_gas_used: pre.latest_execution_payload_header.blob_gas_used, excess_blob_gas: pre.latest_execution_payload_header.excess_blob_gas) - var max_exit_epoch = FAR_FUTURE_EPOCH + var earliest_exit_epoch = + compute_activation_exit_epoch(get_current_epoch(pre)) for v in pre.validators: if v.exit_epoch != FAR_FUTURE_EPOCH: - max_exit_epoch = - if max_exit_epoch == FAR_FUTURE_EPOCH: - v.exit_epoch - else: - max(max_exit_epoch, v.exit_epoch) - if max_exit_epoch == FAR_FUTURE_EPOCH: - max_exit_epoch = get_current_epoch(pre) - let earliest_exit_epoch = max_exit_epoch + 1 + if v.exit_epoch > earliest_exit_epoch: + earliest_exit_epoch = v.exit_epoch + earliest_exit_epoch += 1 let post = (ref electra.BeaconState)( # Versioning @@ -2229,18 +2286,6 @@ func upgrade_to_fulu*( blob_gas_used: pre.latest_execution_payload_header.blob_gas_used, excess_blob_gas: pre.latest_execution_payload_header.excess_blob_gas) - var max_exit_epoch = FAR_FUTURE_EPOCH - for v in pre.validators: - if v.exit_epoch != FAR_FUTURE_EPOCH: - max_exit_epoch = - if max_exit_epoch == FAR_FUTURE_EPOCH: - v.exit_epoch - else: - max(max_exit_epoch, v.exit_epoch) - if max_exit_epoch == FAR_FUTURE_EPOCH: - max_exit_epoch = get_current_epoch(pre) - let earliest_exit_epoch = max_exit_epoch + 1 - let post = (ref fulu.BeaconState)( # Versioning genesis_time: pre.genesis_time, @@ -2301,57 +2346,22 @@ func upgrade_to_fulu*( historical_summaries: pre.historical_summaries, # [New in Electra:EIP6110] - deposit_requests_start_index: UNSET_DEPOSIT_REQUESTS_START_INDEX, + deposit_requests_start_index: pre.deposit_requests_start_index, # [New in Electra:EIP7251] - deposit_balance_to_consume: 0.Gwei, - exit_balance_to_consume: 0.Gwei, - earliest_exit_epoch: earliest_exit_epoch, - consolidation_balance_to_consume: 0.Gwei, - earliest_consolidation_epoch: - compute_activation_exit_epoch(get_current_epoch(pre)) - - # pending_balance_deposits, pending_partial_withdrawals, and - # pending_consolidations are default empty lists + deposit_balance_to_consume: pre.deposit_balance_to_consume, + exit_balance_to_consume: pre.exit_balance_to_consume, + earliest_exit_epoch: pre.earliest_exit_epoch, + consolidation_balance_to_consume: pre.consolidation_balance_to_consume, + earliest_consolidation_epoch: pre.earliest_consolidation_epoch, + pending_deposits: pre.pending_deposits, + pending_partial_withdrawals: pre.pending_partial_withdrawals, + pending_consolidations: pre.pending_consolidations ) - post.exit_balance_to_consume = - get_activation_exit_churn_limit(cfg, post[], cache) - post.consolidation_balance_to_consume = - get_consolidation_churn_limit(cfg, post[], cache) - - # [New in Electra:EIP7251] - # add validators that are not yet active to pending balance deposits - var pre_activation: seq[(Epoch, uint64)] - for index, validator in post.validators: - if validator.activation_epoch == FAR_FUTURE_EPOCH: - pre_activation.add((validator.activation_eligibility_epoch, index.uint64)) - sort(pre_activation) - - for (_, index) in pre_activation: - let balance = post.balances.item(index) - post.balances[index] = 0.Gwei - let validator = addr post.validators.mitem(index) - validator[].effective_balance = 0.Gwei - validator[].activation_eligibility_epoch = FAR_FUTURE_EPOCH - # Use bls.G2_POINT_AT_INFINITY as a signature field placeholder and - # GENESIS_SLOT to distinguish from a pending deposit request - discard post.pending_deposits.add PendingDeposit( - pubkey: validator[].pubkey, - withdrawal_credentials: validator[].withdrawal_credentials, - amount: balance, - signature: ValidatorSig.infinity, - slot: GENESIS_SLOT) - - # Ensure early adopters of compounding credentials go through the activation - # churn - for index, validator in post.validators: - if has_compounding_withdrawal_credential(validator): - queue_excess_active_balance(post[], index.uint64) - post -func latest_block_root(state: ForkyBeaconState, state_root: Eth2Digest): +func latest_block_root*(state: ForkyBeaconState, state_root: Eth2Digest): Eth2Digest = # The root of the last block that was successfully applied to this state - # normally, when a block is applied, the data from the header is stored in @@ -2474,4 +2484,4 @@ func can_advance_slots( target_slot >= state.data.slot and block_root == state.latest_block_root func can_advance_slots*( state: ForkedHashedBeaconState, block_root: Eth2Digest, target_slot: Slot): bool = - withState(state): forkyState.can_advance_slots(block_root, target_slot) + withState(state): forkyState.can_advance_slots(block_root, target_slot) \ No newline at end of file diff --git a/beacon_chain/spec/crypto.nim b/beacon_chain/spec/crypto.nim index c164d797ec..09d6095217 100644 --- a/beacon_chain/spec/crypto.nim +++ b/beacon_chain/spec/crypto.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -7,10 +7,6 @@ {.push raises: [].} -# At the time of writing, the exact definitions of what should be used for -# cryptography in the spec is in flux, with sizes and test vectors still being -# hashed out. This layer helps isolate those chagnes. - # BLS signatures can be combined such that multiple signatures are aggregated. # Each time a new signature is added, the corresponding public key must be # added to the verification key as well - if a key signs twice, it must be added @@ -207,7 +203,7 @@ func finish*(agg: AggregateSignature): CookedSig {.inline.} = sig.finish(agg) CookedSig(sig) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/beacon-chain.md#bls-signatures +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/phase0/beacon-chain.md#bls-signatures func blsVerify*( pubkey: CookedPubKey, message: openArray[byte], signature: CookedSig): bool = diff --git a/beacon_chain/spec/datatypes/altair.nim b/beacon_chain/spec/datatypes/altair.nim index 30bb4bf08c..9862224bf2 100644 --- a/beacon_chain/spec/datatypes/altair.nim +++ b/beacon_chain/spec/datatypes/altair.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2021-2024 Status Research & Development GmbH +# Copyright (c) 2021-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -34,13 +34,13 @@ type static: # Verify that ordinals follow spec values (the spec uses these as shifts for bit flags) - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/beacon-chain.md#participation-flag-indices + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/altair/beacon-chain.md#participation-flag-indices doAssert ord(TIMELY_SOURCE_FLAG_INDEX) == 0 doAssert ord(TIMELY_TARGET_FLAG_INDEX) == 1 doAssert ord(TIMELY_HEAD_FLAG_INDEX) == 2 const - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/beacon-chain.md#incentivization-weights + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/altair/beacon-chain.md#incentivization-weights TIMELY_SOURCE_WEIGHT* = 14 TIMELY_TARGET_WEIGHT* = 26 TIMELY_HEAD_WEIGHT* = 14 @@ -51,7 +51,7 @@ const PARTICIPATION_FLAG_WEIGHTS*: array[TimelyFlag, uint64] = [uint64 TIMELY_SOURCE_WEIGHT, TIMELY_TARGET_WEIGHT, TIMELY_HEAD_WEIGHT] - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/validator.md#misc + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/altair/validator.md#misc TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE* = 16 SYNC_COMMITTEE_SUBNET_COUNT* = 4 @@ -78,7 +78,7 @@ static: doAssert TIMELY_SOURCE_WEIGHT + TIMELY_TARGET_WEIGHT + type ### New types - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/beacon-chain.md#custom-types + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/altair/beacon-chain.md#custom-types ParticipationFlags* = uint8 EpochParticipationFlags* = @@ -96,12 +96,12 @@ type sync_committee_bits*: BitArray[SYNC_COMMITTEE_SIZE] sync_committee_signature*: TrustedSig - # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#synccommittee + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/altair/beacon-chain.md#synccommittee SyncCommittee* = object pubkeys*: HashArray[Limit SYNC_COMMITTEE_SIZE, ValidatorPubKey] aggregate_pubkey*: ValidatorPubKey - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/validator.md#synccommitteemessage + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/altair/validator.md#synccommitteemessage SyncCommitteeMessage* = object slot*: Slot ## Slot to which this contribution pertains @@ -115,7 +115,7 @@ type signature*: ValidatorSig ## Signature by the validator over the block root of `slot` - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/validator.md#synccommitteecontribution + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/altair/validator.md#synccommitteecontribution SyncCommitteeAggregationBits* = BitArray[SYNC_SUBCOMMITTEE_SIZE] @@ -137,18 +137,18 @@ type signature*: ValidatorSig ## Signature by the validator(s) over the block root of `slot` - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/validator.md#contributionandproof + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/altair/validator.md#contributionandproof ContributionAndProof* = object aggregator_index*: uint64 # `ValidatorIndex` after validation contribution*: SyncCommitteeContribution selection_proof*: ValidatorSig - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/validator.md#signedcontributionandproof + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/altair/validator.md#signedcontributionandproof SignedContributionAndProof* = object message*: ContributionAndProof signature*: ValidatorSig - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/validator.md#syncaggregatorselectiondata + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/altair/validator.md#syncaggregatorselectiondata SyncAggregatorSelectionData* = object slot*: Slot subcommittee_index*: uint64 # `SyncSubcommitteeIndex` after validation @@ -164,7 +164,7 @@ type NextSyncCommitteeBranch* = array[log2trunc(NEXT_SYNC_COMMITTEE_GINDEX), Eth2Digest] - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/light-client/sync-protocol.md#lightclientheader + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/altair/light-client/sync-protocol.md#lightclientheader LightClientHeader* = object beacon*: BeaconBlockHeader ## Beacon block header @@ -178,7 +178,7 @@ type ## Current sync committee corresponding to `header.beacon.state_root` current_sync_committee_branch*: CurrentSyncCommitteeBranch - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/sync-protocol.md#lightclientupdate + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/altair/light-client/sync-protocol.md#lightclientupdate LightClientUpdate* = object attested_header*: LightClientHeader ## Header attested to by the sync committee @@ -197,7 +197,7 @@ type signature_slot*: Slot ## Slot at which the aggregate signature was created (untrusted) - # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/sync-protocol.md#lightclientfinalityupdate + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/altair/light-client/sync-protocol.md#lightclientfinalityupdate LightClientFinalityUpdate* = object # Header attested to by the sync committee attested_header*: LightClientHeader @@ -211,7 +211,7 @@ type # Slot at which the aggregate signature was created (untrusted) signature_slot*: Slot - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/light-client/sync-protocol.md#lightclientoptimisticupdate + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/altair/light-client/sync-protocol.md#lightclientoptimisticupdate LightClientOptimisticUpdate* = object # Header attested to by the sync committee attested_header*: LightClientHeader @@ -346,7 +346,7 @@ type data*: BeaconState root*: Eth2Digest # hash_tree_root(data) - # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#beaconblock + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/beacon-chain.md#beaconblock BeaconBlock* = object ## For each slot, a proposer is chosen from the validator pool to propose ## a new block. Once the block as been proposed, it is transmitted to @@ -403,7 +403,7 @@ type state_root*: Eth2Digest body*: TrustedBeaconBlockBody - # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#beaconblockbody + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/altair/beacon-chain.md#beaconblockbody BeaconBlockBody* = object randao_reveal*: ValidatorSig eth1_data*: Eth1Data @@ -480,7 +480,7 @@ type # [New in Altair] sync_aggregate*: TrustedSyncAggregate - # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#signedbeaconblock + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/beacon-chain.md#signedbeaconblock SignedBeaconBlock* = object message*: BeaconBlock signature*: ValidatorSig @@ -668,7 +668,7 @@ chronicles.formatIt SyncCommitteeContribution: shortLog(it) chronicles.formatIt ContributionAndProof: shortLog(it) chronicles.formatIt SignedContributionAndProof: shortLog(it) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/sync-protocol.md#is_valid_light_client_header +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/altair/light-client/sync-protocol.md#is_valid_light_client_header func is_valid_light_client_header*( header: LightClientHeader, cfg: RuntimeConfig): bool = true diff --git a/beacon_chain/spec/datatypes/base.nim b/beacon_chain/spec/datatypes/base.nim index 01ce1cb703..8497cef6f6 100644 --- a/beacon_chain/spec/datatypes/base.nim +++ b/beacon_chain/spec/datatypes/base.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -74,7 +74,7 @@ export tables, results, endians2, json_serialization, sszTypes, beacon_time, crypto, digest, presets -const SPEC_VERSION* = "1.5.0-alpha.8" +const SPEC_VERSION* = "1.5.0-beta.5" ## Spec version we're aiming to be compatible with, right now const @@ -144,7 +144,7 @@ template ethAmountUnit*(typ: type) {.dirty.} = func u256*(n: typ): UInt256 {.borrow.} - proc toString*(B: typedesc[Base10], value: typ): string {.borrow.} + func toString*(B: typedesc[Base10], value: typ): string {.borrow.} proc writeValue*(writer: var JsonWriter, value: typ) {.raises: [IOError].} = writer.writeValue(distinctBase(value)) @@ -201,16 +201,17 @@ type BlobId* = distinct uint8 ## The blob id maps which gossip subscription to use to publish a - ## blob sidecar - it is distinct from the CommitteeIndex in particular + ## blob sidecar - it is distinct from the BlobIndex in particular ## ## The `BlobId` type is constrained to values in the range - ## `[0, BLOB_SIDECAR_SUBNET_COUNT)` during initialization. + ## `[0, MAX_SUPPORTED_BLOB_SIDECAR_SUBNET_COUNT)` during initialization. + ## The network configuration may impose further restrictions on the count! # BitVector[4] in the spec, ie 4 bits which end up encoded as a byte for # SSZ / hashing purposes JustificationBits* = distinct uint8 - # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#proposerslashing + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/phase0/beacon-chain.md#proposerslashing ProposerSlashing* = object signed_header_1*: SignedBeaconBlockHeader signed_header_2*: SignedBeaconBlockHeader @@ -231,7 +232,7 @@ type current_version*: Version genesis_validators_root*: Eth2Digest - # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#checkpoint + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/beacon-chain.md#checkpoint Checkpoint* = object epoch*: Epoch root*: Eth2Digest @@ -262,7 +263,7 @@ type withdrawal_credentials*: Eth2Digest amount*: Gwei - # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#depositdata + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/beacon-chain.md#depositdata DepositData* = object pubkey*: ValidatorPubKey withdrawal_credentials*: Eth2Digest @@ -272,7 +273,7 @@ type signature*: ValidatorSig ## Signing over DepositMessage - # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#voluntaryexit + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/beacon-chain.md#voluntaryexit VoluntaryExit* = object epoch*: Epoch ## Earliest epoch when voluntary exit can be processed @@ -335,12 +336,12 @@ type proposer_index*: uint64 # `ValidatorIndex` after validation - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/beacon-chain.md#historicalbatch + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/beacon-chain.md#historicalbatch HistoricalBatch* = object block_roots* : array[SLOTS_PER_HISTORICAL_ROOT, Eth2Digest] state_roots* : array[SLOTS_PER_HISTORICAL_ROOT, Eth2Digest] - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/beacon-chain.md#fork + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/beacon-chain.md#fork Fork* = object previous_version*: Version current_version*: Version @@ -354,7 +355,7 @@ type deposit_count*: uint64 block_hash*: Eth2Digest - # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#signedvoluntaryexit + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/beacon-chain.md#signedvoluntaryexit SignedVoluntaryExit* = object message*: VoluntaryExit signature*: ValidatorSig @@ -400,7 +401,7 @@ type sync_committees*: Table[SyncCommitteePeriod, SyncCommitteeCache] # This matches the mutable state of the Solidity deposit contract - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/solidity_deposit_contract/deposit_contract.sol + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/solidity_deposit_contract/deposit_contract.sol DepositContractState* = object branch*: array[DEPOSIT_CONTRACT_TREE_DEPTH, Eth2Digest] deposit_count*: array[32, byte] # Uint256 @@ -612,7 +613,7 @@ template makeLimitedU64*(T: untyped, limit: uint64) = makeLimitedU64(CommitteeIndex, MAX_COMMITTEES_PER_SLOT) makeLimitedU64(SubnetId, ATTESTATION_SUBNET_COUNT) -makeLimitedU64(BlobId, BLOB_SIDECAR_SUBNET_COUNT) +makeLimitedU64(BlobId, MAX_SUPPORTED_BLOB_SIDECAR_SUBNET_COUNT) const validatorIndexLimit = min(uint64(int32.high), VALIDATOR_REGISTRY_LIMIT) @@ -960,7 +961,7 @@ func checkForkConsistency*(cfg: RuntimeConfig) = let forkVersions = [cfg.GENESIS_FORK_VERSION, cfg.ALTAIR_FORK_VERSION, cfg.BELLATRIX_FORK_VERSION, cfg.CAPELLA_FORK_VERSION, - cfg.DENEB_FORK_VERSION, cfg.ELECTRA_FORK_VERSION, + cfg.DENEB_FORK_VERSION, cfg.ELECTRA_FORK_VERSION, cfg.FULU_FORK_VERSION] for i in 0 ..< forkVersions.len: @@ -988,7 +989,5 @@ func ofLen*[T, N](ListType: type List[T, N], n: int): ListType = else: raise newException(SszSizeMismatchError) -template debugComment*(s: string) = discard - # Specifically has the `Fulu` naming, for easy debugging. -template debugFuluComment* (s: string) = discard +template debugFuluComment* (s: string) = discard \ No newline at end of file diff --git a/beacon_chain/spec/datatypes/bellatrix.nim b/beacon_chain/spec/datatypes/bellatrix.nim index 610ba436e8..50565a8ef2 100644 --- a/beacon_chain/spec/datatypes/bellatrix.nim +++ b/beacon_chain/spec/datatypes/bellatrix.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2021-2024 Status Research & Development GmbH +# Copyright (c) 2021-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -96,7 +96,7 @@ type ExecutePayload* = proc( execution_payload: ExecutionPayload): bool {.gcsafe, raises: [].} - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/fork-choice.md#powblock + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/bellatrix/fork-choice.md#powblock PowBlock* = object block_hash*: Eth2Digest parent_hash*: Eth2Digest diff --git a/beacon_chain/spec/datatypes/capella.nim b/beacon_chain/spec/datatypes/capella.nim index 57dad11c1e..cdc4f720ff 100644 --- a/beacon_chain/spec/datatypes/capella.nim +++ b/beacon_chain/spec/datatypes/capella.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2022-2024 Status Research & Development GmbH +# Copyright (c) 2022-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -40,7 +40,7 @@ type SignedBLSToExecutionChangeList* = List[SignedBLSToExecutionChange, Limit MAX_BLS_TO_EXECUTION_CHANGES] - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/capella/beacon-chain.md#withdrawal + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/capella/beacon-chain.md#withdrawal Withdrawal* = object index*: WithdrawalIndex validator_index*: uint64 @@ -53,12 +53,12 @@ type from_bls_pubkey*: ValidatorPubKey to_execution_address*: ExecutionAddress - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/capella/beacon-chain.md#signedblstoexecutionchange + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/capella/beacon-chain.md#signedblstoexecutionchange SignedBLSToExecutionChange* = object message*: BLSToExecutionChange signature*: ValidatorSig - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/capella/beacon-chain.md#historicalsummary + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/capella/beacon-chain.md#historicalsummary HistoricalSummary* = object # `HistoricalSummary` matches the components of the phase0 # `HistoricalBatch` making the two hash_tree_root-compatible. @@ -95,7 +95,7 @@ type executionPayload*: ExecutionPayload blockValue*: Wei - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/capella/beacon-chain.md#executionpayloadheader + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/capella/beacon-chain.md#executionpayloadheader ExecutionPayloadHeader* = object # Execution block header fields parent_hash*: Eth2Digest @@ -124,7 +124,7 @@ type ExecutionBranch* = array[log2trunc(EXECUTION_PAYLOAD_GINDEX), Eth2Digest] - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/capella/light-client/sync-protocol.md#modified-lightclientheader + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/capella/light-client/sync-protocol.md#modified-lightclientheader LightClientHeader* = object beacon*: BeaconBlockHeader ## Beacon block header @@ -133,7 +133,7 @@ type ## Execution payload header corresponding to `beacon.body_root` (from Capella onward) execution_branch*: ExecutionBranch - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/sync-protocol.md#lightclientbootstrap + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/altair/light-client/sync-protocol.md#lightclientbootstrap LightClientBootstrap* = object header*: LightClientHeader ## Header matching the requested beacon block root @@ -142,7 +142,7 @@ type ## Current sync committee corresponding to `header.beacon.state_root` current_sync_committee_branch*: altair.CurrentSyncCommitteeBranch - # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/sync-protocol.md#lightclientupdate + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/altair/light-client/sync-protocol.md#lightclientupdate LightClientUpdate* = object attested_header*: LightClientHeader ## Header attested to by the sync committee @@ -301,7 +301,7 @@ type data*: BeaconState root*: Eth2Digest # hash_tree_root(data) - # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#beaconblock + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/beacon-chain.md#beaconblock BeaconBlock* = object ## For each slot, a proposer is chosen from the validator pool to propose ## a new block. Once the block as been proposed, it is transmitted to @@ -358,7 +358,7 @@ type state_root*: Eth2Digest body*: TrustedBeaconBlockBody - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/capella/beacon-chain.md#beaconblockbody + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/capella/beacon-chain.md#beaconblockbody BeaconBlockBody* = object randao_reveal*: ValidatorSig eth1_data*: Eth1Data @@ -446,7 +446,7 @@ type # Capella operations bls_to_execution_changes*: SignedBLSToExecutionChangeList # [New in Capella] - # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#signedbeaconblock + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/beacon-chain.md#signedbeaconblock SignedBeaconBlock* = object message*: BeaconBlock signature*: ValidatorSig @@ -659,7 +659,7 @@ func get_lc_execution_root*( ZERO_HASH -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/capella/light-client/sync-protocol.md#modified-is_valid_light_client_header +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/capella/light-client/sync-protocol.md#modified-is_valid_light_client_header func is_valid_light_client_header*( header: LightClientHeader, cfg: RuntimeConfig): bool = let epoch = header.beacon.slot.epoch @@ -676,13 +676,13 @@ func is_valid_light_client_header*( get_subtree_index(EXECUTION_PAYLOAD_GINDEX), header.beacon.body_root) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/capella/light-client/fork.md#upgrading-light-client-data +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/capella/light-client/fork.md#upgrading-light-client-data func upgrade_lc_header_to_capella*( pre: altair.LightClientHeader): LightClientHeader = LightClientHeader( beacon: pre.beacon) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/capella/light-client/fork.md#upgrading-light-client-data +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/capella/light-client/fork.md#upgrading-light-client-data func upgrade_lc_bootstrap_to_capella*( pre: altair.LightClientBootstrap): LightClientBootstrap = LightClientBootstrap( @@ -690,7 +690,7 @@ func upgrade_lc_bootstrap_to_capella*( current_sync_committee: pre.current_sync_committee, current_sync_committee_branch: pre.current_sync_committee_branch) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/capella/light-client/fork.md#upgrading-light-client-data +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/capella/light-client/fork.md#upgrading-light-client-data func upgrade_lc_update_to_capella*( pre: altair.LightClientUpdate): LightClientUpdate = LightClientUpdate( @@ -702,7 +702,7 @@ func upgrade_lc_update_to_capella*( sync_aggregate: pre.sync_aggregate, signature_slot: pre.signature_slot) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/capella/light-client/fork.md#upgrading-light-client-data +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/capella/light-client/fork.md#upgrading-light-client-data func upgrade_lc_finality_update_to_capella*( pre: altair.LightClientFinalityUpdate): LightClientFinalityUpdate = LightClientFinalityUpdate( @@ -712,7 +712,7 @@ func upgrade_lc_finality_update_to_capella*( sync_aggregate: pre.sync_aggregate, signature_slot: pre.signature_slot) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/capella/light-client/fork.md#upgrading-light-client-data +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/capella/light-client/fork.md#upgrading-light-client-data func upgrade_lc_optimistic_update_to_capella*( pre: altair.LightClientOptimisticUpdate): LightClientOptimisticUpdate = LightClientOptimisticUpdate( diff --git a/beacon_chain/spec/datatypes/constants.nim b/beacon_chain/spec/datatypes/constants.nim index 09a62b4265..442d67c6ff 100644 --- a/beacon_chain/spec/datatypes/constants.nim +++ b/beacon_chain/spec/datatypes/constants.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2022-2024 Status Research & Development GmbH +# Copyright (c) 2022-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -14,7 +14,7 @@ type Epoch* = distinct uint64 SyncCommitteePeriod* = distinct uint64 - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/capella/beacon-chain.md#custom-types + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/capella/beacon-chain.md#custom-types WithdrawalIndex* = uint64 DomainType* = distinct array[4, byte] @@ -55,7 +55,7 @@ const DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF* = DomainType([byte 0x08, 0x00, 0x00, 0x00]) DOMAIN_CONTRIBUTION_AND_PROOF* = DomainType([byte 0x09, 0x00, 0x00, 0x00]) - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/capella/beacon-chain.md#domain-types + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/capella/beacon-chain.md#domain-types DOMAIN_BLS_TO_EXECUTION_CHANGE* = DomainType([byte 0x0a, 0x00, 0x00, 0x00]) # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/phase0/fork-choice.md#configuration @@ -64,18 +64,14 @@ const REORG_PARENT_WEIGHT_THRESHOLD*: uint64 = 160 REORG_MAX_EPOCHS_SINCE_FINALIZATION* = Epoch(2) - # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/deneb/p2p-interface.md#configuration - BLOB_SIDECAR_SUBNET_COUNT*: uint64 = 6 - # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/phase0/p2p-interface.md#configuration MAX_REQUEST_BLOCKS* = 1024'u64 RESP_TIMEOUT* = 10'u64 ATTESTATION_PROPAGATION_SLOT_RANGE*: uint64 = 32 MAXIMUM_GOSSIP_CLOCK_DISPARITY* = 500.millis - # https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/bellatrix/p2p-interface.md#configuration - GOSSIP_MAX_SIZE* = 10'u64 * 1024 * 1024 # bytes - MAX_CHUNK_SIZE* = 10'u64 * 1024 * 1024 # bytes + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/phase0/p2p-interface.md#configuration + MAX_PAYLOAD_SIZE* = 10'u64 * 1024 * 1024 # bytes # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/deneb/p2p-interface.md#configuration MAX_REQUEST_BLOCKS_DENEB*: uint64 = 128 # TODO Make use of in request code @@ -85,4 +81,9 @@ const FULL_EXIT_REQUEST_AMOUNT*: uint64 = 0 # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/electra/beacon-chain.md#withdrawal-prefixes - COMPOUNDING_WITHDRAWAL_PREFIX* = 0x02 + COMPOUNDING_WITHDRAWAL_PREFIX* = 0x02'u8 + + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/electra/beacon-chain.md#execution-layer-triggered-requests + DEPOSIT_REQUEST_TYPE* = 0x00'u8 + WITHDRAWAL_REQUEST_TYPE* = 0x01'u8 + CONSOLIDATION_REQUEST_TYPE* = 0x02'u8 diff --git a/beacon_chain/spec/datatypes/deneb.nim b/beacon_chain/spec/datatypes/deneb.nim index 15e4b7a014..3298c5d719 100644 --- a/beacon_chain/spec/datatypes/deneb.nim +++ b/beacon_chain/spec/datatypes/deneb.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2022-2024 Status Research & Development GmbH +# Copyright (c) 2022-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -36,7 +36,7 @@ const BLS_MODULUS* = "52435875175126190479447740508185965837690552500527637822603658699938581184513".u256 type - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/deneb/beacon-chain.md#beaconblockbody + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/deneb/beacon-chain.md#beaconblockbody KzgCommitments* = List[KzgCommitment, Limit MAX_BLOB_COMMITMENTS_PER_BLOCK] # TODO this apparently is suppposed to be SSZ-equivalent to Bytes32, but @@ -50,10 +50,10 @@ type # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/deneb/beacon-chain.md#custom-types BlobIndex* = uint64 - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/deneb/polynomial-commitments.md#custom-types + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/deneb/polynomial-commitments.md#custom-types Blob* = array[BYTES_PER_FIELD_ELEMENT * FIELD_ELEMENTS_PER_BLOB, byte] - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/deneb/p2p-interface.md#blobsidecar + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/deneb/p2p-interface.md#blobsidecar BlobSidecar* = object index*: BlobIndex ## Index of blob in block @@ -186,7 +186,7 @@ type signature_slot*: Slot ## Slot at which the aggregate signature was created (untrusted) - # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/sync-protocol.md#lightclientfinalityupdate + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/altair/light-client/sync-protocol.md#lightclientfinalityupdate LightClientFinalityUpdate* = object # Header attested to by the sync committee attested_header*: LightClientHeader @@ -382,7 +382,7 @@ type state_root*: Eth2Digest body*: TrustedBeaconBlockBody - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/deneb/beacon-chain.md#beaconblockbody + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/deneb/beacon-chain.md#beaconblockbody BeaconBlockBody* = object randao_reveal*: ValidatorSig eth1_data*: Eth1Data @@ -626,7 +626,7 @@ func kzg_commitment_inclusion_proof_gindex*( BLOB_KZG_COMMITMENTS_FIRST_GINDEX + index -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/deneb/light-client/sync-protocol.md#modified-get_lc_execution_root +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/deneb/light-client/sync-protocol.md#modified-get_lc_execution_root func get_lc_execution_root*( header: LightClientHeader, cfg: RuntimeConfig): Eth2Digest = let epoch = header.beacon.slot.epoch @@ -657,7 +657,7 @@ func get_lc_execution_root*( ZERO_HASH -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/deneb/light-client/sync-protocol.md#modified-is_valid_light_client_header +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/deneb/light-client/sync-protocol.md#modified-is_valid_light_client_header func is_valid_light_client_header*( header: LightClientHeader, cfg: RuntimeConfig): bool = let epoch = header.beacon.slot.epoch @@ -725,7 +725,7 @@ func upgrade_lc_update_to_deneb*( sync_aggregate: pre.sync_aggregate, signature_slot: pre.signature_slot) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/deneb/light-client/fork.md#upgrading-light-client-data +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/deneb/light-client/fork.md#upgrading-light-client-data func upgrade_lc_finality_update_to_deneb*( pre: capella.LightClientFinalityUpdate): LightClientFinalityUpdate = LightClientFinalityUpdate( diff --git a/beacon_chain/spec/datatypes/electra.nim b/beacon_chain/spec/datatypes/electra.nim index e3a4fac849..0aff68a265 100644 --- a/beacon_chain/spec/datatypes/electra.nim +++ b/beacon_chain/spec/datatypes/electra.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2024 Status Research & Development GmbH +# Copyright (c) 2024-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -44,7 +44,7 @@ const # The first member (`genesis_time`) is 64, subsequent members +1 each. # If there are ever more than 64 members in `BeaconState`, indices change! # `FINALIZED_ROOT_GINDEX` is one layer deeper, i.e., `84 * 2 + 1`. - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/ssz/merkle-proofs.md + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/ssz/merkle-proofs.md # finalized_checkpoint > root FINALIZED_ROOT_GINDEX_ELECTRA* = 169.GeneralizedIndex # current_sync_committee @@ -119,9 +119,9 @@ type executionPayload*: ExecutionPayload blockValue*: Wei blobsBundle*: BlobsBundle - executionRequests*: array[3, seq[byte]] + executionRequests*: seq[seq[byte]] - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/deneb/beacon-chain.md#executionpayloadheader + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/deneb/beacon-chain.md#executionpayloadheader ExecutionPayloadHeader* = object # Execution block header fields parent_hash*: Eth2Digest @@ -148,7 +148,7 @@ type ExecutePayload* = proc( execution_payload: ExecutionPayload): bool {.gcsafe, raises: [].} - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.7/specs/electra/beacon-chain.md#pendingdeposit + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/electra/beacon-chain.md#pendingdeposit PendingDeposit* = object pubkey*: ValidatorPubKey withdrawal_credentials*: Eth2Digest @@ -158,11 +158,11 @@ type # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.7/specs/electra/beacon-chain.md#pendingpartialwithdrawal PendingPartialWithdrawal* = object - index*: uint64 + validator_index*: uint64 amount*: Gwei withdrawable_epoch*: Epoch - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/electra/beacon-chain.md#executionlayerwithdrawalrequest + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/electra/beacon-chain.md#withdrawalrequest WithdrawalRequest* = object source_address*: ExecutionAddress validator_pubkey*: ValidatorPubKey @@ -179,13 +179,20 @@ type source_pubkey*: ValidatorPubKey target_pubkey*: ValidatorPubKey - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/validator.md#aggregateandproof + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/electra/beacon-chain.md#singleattestation + SingleAttestation* = object + committee_index*: uint64 + attester_index*: uint64 + data*: AttestationData + signature*: ValidatorSig + + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/phase0/validator.md#aggregateandproof AggregateAndProof* = object aggregator_index*: uint64 # `ValidatorIndex` after validation aggregate*: Attestation selection_proof*: ValidatorSig - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/validator.md#signedaggregateandproof + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/phase0/validator.md#signedaggregateandproof SignedAggregateAndProof* = object message*: AggregateAndProof signature*: ValidatorSig @@ -250,7 +257,7 @@ type # Slot at which the aggregate signature was created (untrusted) signature_slot*: Slot - # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/sync-protocol.md#lightclientoptimisticupdate + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/altair/light-client/sync-protocol.md#lightclientoptimisticupdate LightClientOptimisticUpdate* = object # Header attested to by the sync committee attested_header*: LightClientHeader @@ -598,7 +605,7 @@ type AttestationCommitteeBits* = BitArray[MAX_COMMITTEES_PER_SLOT.int] - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.7/specs/electra/beacon-chain.md#attestation + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/electra/beacon-chain.md#attestation Attestation* = object aggregation_bits*: ElectraCommitteeValidatorsBits data*: AttestationData @@ -786,7 +793,7 @@ func is_valid_light_client_header*( get_subtree_index(EXECUTION_PAYLOAD_GINDEX), header.beacon.body_root) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/light-client/fork.md#normalize_merkle_branch +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/electra/light-client/fork.md#normalize_merkle_branch func normalize_merkle_branch*[N]( branch: array[N, Eth2Digest], gindex: static GeneralizedIndex): auto = @@ -802,7 +809,7 @@ func normalize_merkle_branch*[N]( res[0 ..< depth] = branch[num_extra ..< branch.len] res -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/electra/light-client/fork.md#upgrading-light-client-data +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.3/specs/electra/light-client/fork.md#upgrading-light-client-data func upgrade_lc_header_to_electra*( pre: deneb.LightClientHeader): LightClientHeader = LightClientHeader( @@ -957,11 +964,10 @@ template asTrusted*( MsgTrustedSignedBeaconBlock): TrustedSignedBeaconBlock = isomorphicCast[TrustedSignedBeaconBlock](x) -debugComment "this whole section with getValidatorIndices/shortLog needs refactoring and probably can be combined with identical implementations elsewhere" - from std/sets import toHashSet -iterator getValidatorIndices*(attester_slashing: AttesterSlashing | TrustedAttesterSlashing): uint64 = +iterator getValidatorIndices*( + attester_slashing: AttesterSlashing | TrustedAttesterSlashing): uint64 = template attestation_1(): auto = attester_slashing.attestation_1 template attestation_2(): auto = attester_slashing.attestation_2 @@ -971,14 +977,25 @@ iterator getValidatorIndices*(attester_slashing: AttesterSlashing | TrustedAttes continue yield validator_index +func shortLog*(v: ElectraCommitteeValidatorsBits): auto = + $v.countOnes() & "/" & $v.len() + func shortLog*(v: electra.Attestation | electra.TrustedAttestation): auto = ( - aggregation_bits: v.aggregation_bits, + aggregation_bits: shortLog(v.aggregation_bits), committee_bits: v.committee_bits, data: shortLog(v.data), signature: shortLog(v.signature) ) +func shortLog*(v: SingleAttestation): auto = + ( + committee_index: v.committee_index, + attester_index: v.attester_index, + data: shortLog(v.data), + signature: shortLog(v.signature) + ) + func init*( T: type Attestation, committee_index: CommitteeIndex, diff --git a/beacon_chain/spec/datatypes/fulu.nim b/beacon_chain/spec/datatypes/fulu.nim index 260dbeea2c..05a4019d0e 100644 --- a/beacon_chain/spec/datatypes/fulu.nim +++ b/beacon_chain/spec/datatypes/fulu.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2022-2024 Status Research & Development GmbH +# Copyright (c) 2022-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -40,7 +40,7 @@ from ./deneb import Blobs, BlobsBundle, KzgCommitments, KzgProofs export json_serialization, base const - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/_features/eip7594/polynomial-commitments-sampling.md#cells + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/fulu/polynomial-commitments-sampling.md#cells FIELD_ELEMENTS_PER_EXT_BLOB* = 2 * kzg_abi.FIELD_ELEMENTS_PER_BLOB # Number of field elements in a Reed-Solomon extended blob | FIELD_ELEMENTS_PER_CELL* = 64 # Number of field elements in a cell | @@ -49,22 +49,34 @@ const CELLS_PER_EXT_BLOB* = FIELD_ELEMENTS_PER_EXT_BLOB div FIELD_ELEMENTS_PER_CELL # The number of cells in an extended blob | - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/_features/eip7594/p2p-interface.md#preset + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/p2p-interface.md#preset KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH* = 4 KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH_GINDEX* = 27 - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/_features/eip7594/das-core.md#data-size + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/das-core.md#data-size NUMBER_OF_COLUMNS* = 128 - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/_features/eip7594/das-core.md#networking + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/p2p-interface.md#configuration DATA_COLUMN_SIDECAR_SUBNET_COUNT* = 128 - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/_features/eip7594/das-core.md#custody-setting + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.3/specs/fulu/das-core.md#custody-setting SAMPLES_PER_SLOT* = 8 CUSTODY_REQUIREMENT* = 4 + NUMBER_OF_CUSTODY_GROUPS* = 128 + + # Minimum number of custody groups an honest node with + # validators attached custodies and serves samples from + VALIDATOR_CUSTODY_REQUIREMENT* = 8 + + # Balance increment corresponding to one additional group to custody + # 2**5 * 10**9 (= 32,000,000,000) Gwei + BALANCE_PER_ADDITIONAL_CUSTODY_GROUP*: uint64 = 32000000000'u64 + + # Number of columns in the network per custody group + COLUMNS_PER_GROUP* = NUMBER_OF_COLUMNS div NUMBER_OF_CUSTODY_GROUPS type - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.6/specs/_features/eip7594/polynomial-commitments-sampling.md#custom-types + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/fulu/polynomial-commitments-sampling.md#custom-types BLSFieldElement* = KzgBytes32 G2Point* = array[96, byte] PolynomialCoeff* = List[BLSFieldElement, FIELD_ELEMENTS_PER_EXT_BLOB] @@ -74,16 +86,16 @@ type Cells* = KzgCells CellsAndProofs* = KzgCellsAndKzgProofs - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/_features/eip7594/das-core.md#custom-types + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/das-core.md#custom-types RowIndex* = uint64 ColumnIndex* = uint64 CellIndex* = uint64 - + CustodyIndex* = uint64 type DataColumn* = List[KzgCell, Limit(MAX_BLOB_COMMITMENTS_PER_BLOCK)] - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/_features/eip7594/das-core.md#datacolumnsidecar + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.3/specs/fulu/das-core.md#datacolumnsidecar DataColumnSidecar* = object index*: ColumnIndex # Index of column in extended matrix column*: DataColumn @@ -95,12 +107,12 @@ type DataColumnSidecars* = seq[ref DataColumnSidecar] - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/_features/eip7594/p2p-interface.md#datacolumnidentifier + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/p2p-interface.md#datacolumnidentifier DataColumnIdentifier* = object block_root*: Eth2Digest index*: ColumnIndex - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.6/specs/_features/eip7594/das-core.md#matrixentry + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/das-core.md#matrixentry MatrixEntry* = object cell*: Cell kzg_proof*: KzgProof @@ -108,18 +120,18 @@ type row_index*: RowIndex # Not in spec, defined in order to compute custody subnets - CscBits* = BitArray[DATA_COLUMN_SIDECAR_SUBNET_COUNT] + CgcBits* = BitArray[DATA_COLUMN_SIDECAR_SUBNET_COUNT] - CscCount* = uint8 + CgcCount* = uint8 - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/_features/eip7594/p2p-interface.md#metadata + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/p2p-interface.md#metadata MetaData* = object seq_number*: uint64 attnets*: AttnetBits syncnets*: SyncnetBits - custody_subnet_count*: CscCount + custody_group_count*: uint64 - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/deneb/beacon-chain.md#executionpayload + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/deneb/beacon-chain.md#executionpayload ExecutionPayload* = object # Execution block header fields parent_hash*: Eth2Digest @@ -149,9 +161,9 @@ type executionPayload*: ExecutionPayload blockValue*: Wei blobsBundle*: BlobsBundle - executionRequests*: array[3, seq[byte]] + executionRequests*: seq[seq[byte]] - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/deneb/beacon-chain.md#executionpayloadheader + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/deneb/beacon-chain.md#executionpayloadheader ExecutionPayloadHeader* = object # Execution block header fields parent_hash*: Eth2Digest @@ -264,7 +276,7 @@ type LightClientBootstrap | SomeLightClientUpdate - # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/sync-protocol.md#lightclientstore + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/altair/light-client/sync-protocol.md#lightclientstore LightClientStore* = object finalized_header*: LightClientHeader ## Header that is finalized @@ -379,7 +391,7 @@ type data*: BeaconState root*: Eth2Digest # hash_tree_root(data) - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/beacon-chain.md#beaconblock + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/phase0/beacon-chain.md#beaconblock BeaconBlock* = object ## For each slot, a proposer is chosen from the validator pool to propose ## a new block. Once the block as been proposed, it is transmitted to @@ -436,7 +448,7 @@ type state_root*: Eth2Digest body*: TrustedBeaconBlockBody - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.6/specs/electra/beacon-chain.md#beaconblockbody + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/electra/beacon-chain.md#beaconblockbody BeaconBlockBody* = object randao_reveal*: ValidatorSig eth1_data*: Eth1Data @@ -531,7 +543,7 @@ type blob_kzg_commitments*: KzgCommitments execution_requests*: ExecutionRequests # [New in Electra] - # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#signedbeaconblock + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/phase0/beacon-chain.md#signedbeaconblock SignedBeaconBlock* = object message*: BeaconBlock signature*: ValidatorSig diff --git a/beacon_chain/spec/datatypes/phase0.nim b/beacon_chain/spec/datatypes/phase0.nim index c60bc0f404..a99186ab99 100644 --- a/beacon_chain/spec/datatypes/phase0.nim +++ b/beacon_chain/spec/datatypes/phase0.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2021-2024 Status Research & Development GmbH +# Copyright (c) 2021-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -113,7 +113,7 @@ type data*: BeaconState root*: Eth2Digest # hash_tree_root(data) - # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#beaconblock + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/phase0/beacon-chain.md#beaconblock BeaconBlock* = object ## For each slot, a proposer is chosen from the validator pool to propose ## a new block. Once the block as been proposed, it is transmitted to @@ -221,7 +221,7 @@ type deposits*: List[Deposit, Limit MAX_DEPOSITS] voluntary_exits*: List[TrustedSignedVoluntaryExit, Limit MAX_VOLUNTARY_EXITS] - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/beacon-chain.md#signedbeaconblock + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/phase0/beacon-chain.md#signedbeaconblock SignedBeaconBlock* = object message*: BeaconBlock signature*: ValidatorSig @@ -259,7 +259,7 @@ type root* {.dontSerialize.}: Eth2Digest # cached root of signed beacon block - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/beacon-chain.md#attesterslashing + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/phase0/beacon-chain.md#attesterslashing AttesterSlashing* = object attestation_1*: IndexedAttestation attestation_2*: IndexedAttestation @@ -305,7 +305,7 @@ type aggregate*: Attestation selection_proof*: ValidatorSig - # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/validator.md#signedaggregateandproof + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/phase0/validator.md#signedaggregateandproof SignedAggregateAndProof* = object message*: AggregateAndProof signature*: ValidatorSig diff --git a/beacon_chain/spec/eth2_apis/eth2_rest_serialization.nim b/beacon_chain/spec/eth2_apis/eth2_rest_serialization.nim index 3c02d54b70..df939c1c83 100644 --- a/beacon_chain/spec/eth2_apis/eth2_rest_serialization.nim +++ b/beacon_chain/spec/eth2_apis/eth2_rest_serialization.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -26,6 +26,7 @@ export jsonSerializationResults, rest_keymanager_types from web3/primitives import Hash32, Quantity +from json import getStr, newJString export primitives.Hash32, primitives.Quantity func decodeMediaType*( @@ -68,6 +69,7 @@ RestJson.useDefaultSerializationFor( EmptyBody, Eth1Data, EventBeaconBlockObject, + EventBeaconBlockGossipObject, ExecutionRequests, Fork, FuluSignedBlockContents, @@ -82,8 +84,6 @@ RestJson.useDefaultSerializationFor( GetForkChoiceResponse, GetForkScheduleResponse, GetGenesisResponse, - GetHeaderResponseDeneb, - GetHeaderResponseElectra, GetKeystoresResponse, GetNextWithdrawalsResponse, GetPoolAttesterSlashingsResponse, @@ -168,8 +168,6 @@ RestJson.useDefaultSerializationFor( SignedContributionAndProof, SignedValidatorRegistrationV1, SignedVoluntaryExit, - SubmitBlindedBlockResponseDeneb, - SubmitBlindedBlockResponseElectra, SyncAggregate, SyncAggregatorSelectionData, SyncCommittee, @@ -257,6 +255,7 @@ RestJson.useDefaultSerializationFor( electra.LightClientUpdate, electra.SignedAggregateAndProof, electra.SignedBeaconBlock, + electra.SingleAttestation, electra.TrustedAttestation, electra_mev.BlindedBeaconBlock, electra_mev.BlindedBeaconBlockBody, @@ -340,6 +339,8 @@ const UnableDecodeVersionError = "Unable to decode version" UnableDecodeError = "Unable to decode data" UnexpectedDecodeError = "Unexpected decoding error" + InvalidContentTypeError* = "Invalid content type" + UnexpectedForkVersionError* = "Unexpected fork version received" type EncodeTypes* = @@ -355,9 +356,6 @@ type SetGasLimitRequest | bellatrix_mev.SignedBlindedBeaconBlock | capella_mev.SignedBlindedBeaconBlock | - deneb_mev.SignedBlindedBeaconBlock | - electra_mev.SignedBlindedBeaconBlock | - fulu_mev.SignedBlindedBeaconBlock | phase0.AttesterSlashing | SignedValidatorRegistrationV1 | SignedVoluntaryExit | @@ -373,11 +371,14 @@ type DenebSignedBlockContents | ElectraSignedBlockContents | FuluSignedBlockContents | - ForkedMaybeBlindedBeaconBlock + ForkedMaybeBlindedBeaconBlock | + deneb_mev.SignedBlindedBeaconBlock | + electra_mev.SignedBlindedBeaconBlock | + fulu_mev.SignedBlindedBeaconBlock EncodeArrays* = seq[phase0.Attestation] | - seq[electra.Attestation] | + seq[electra.SingleAttestation] | seq[PrepareBeaconProposer] | seq[RemoteKeystoreInfo] | seq[RestCommitteeSubscription] | @@ -391,6 +392,14 @@ type seq[RestBeaconCommitteeSelection] | seq[RestSyncCommitteeSelection] + MevDecodeTypes* = + GetHeaderResponseDeneb | + GetHeaderResponseElectra | + GetHeaderResponseFulu | + SubmitBlindedBlockResponseDeneb | + SubmitBlindedBlockResponseElectra | + SubmitBlindedBlockResponseFulu + DecodeTypes* = DataEnclosedObject | DataMetaEnclosedObject | @@ -1988,7 +1997,7 @@ proc readValue*(reader: var JsonReader[RestJson], proc writeValue*(writer: var JsonWriter[RestJson], proof: ForkedAggregateAndProof) {.raises: [IOError].} = writer.beginRecord() - writer.writeField("version", proof.kind) + writer.writeField("version", proof.kind.toString()) withAggregateAndProof(proof): writer.writeField("data", forkyProof) writer.endRecord() @@ -3035,7 +3044,7 @@ proc decodeBody*( [version, $exc.msg])) ok(RestPublishedSignedBeaconBlock(ForkedSignedBeaconBlock.init(blck))) else: - err(RestErrorMessage.init(Http415, "Invalid content type", + err(RestErrorMessage.init(Http415, InvalidContentTypeError, [version, $body.contentType])) proc decodeBody*( @@ -3231,9 +3240,102 @@ proc decodeBody*( ok(RestPublishedSignedBlockContents( kind: ConsensusFork.Fulu, fuluData: blckContents)) else: - err(RestErrorMessage.init(Http415, "Invalid content type", + err(RestErrorMessage.init(Http415, InvalidContentTypeError, [version, $body.contentType])) +proc decodeBodyJsonOrSsz*( + t: typedesc[seq[SignedValidatorRegistrationV1]], + body: ContentBody +): Result[seq[SignedValidatorRegistrationV1], RestErrorMessage] = + if body.contentType == ApplicationJsonMediaType: + let data = + try: + RestJson.decode( + body.data, + seq[SignedValidatorRegistrationV1], + requireAllFields = true, + allowUnknownFields = true) + except SerializationError as exc: + debug "Failed to deserialize REST JSON data", + err = exc.formatMsg("") + return err( + RestErrorMessage.init(Http400, UnableDecodeError, + [exc.formatMsg("")])) + ok(data) + elif body.contentType == OctetStreamMediaType: + let data = + try: + SSZ.decode( + body.data, + List[SignedValidatorRegistrationV1, Limit VALIDATOR_REGISTRY_LIMIT]) + except SerializationError as exc: + debug "Failed to deserialize REST SSZ data", + err = exc.formatMsg("") + return err( + RestErrorMessage.init(Http400, UnableDecodeError, + [exc.formatMsg("")])) + ok(data.asSeq) + else: + err(RestErrorMessage.init(Http415, InvalidContentTypeError, + [$body.contentType])) + +proc decodeBytesJsonOrSsz*( + T: typedesc[MevDecodeTypes], + data: openArray[byte], + contentType: Opt[ContentTypeData], + version: string +): Result[T, RestErrorMessage] = + var res {.noinit.}: T + const typeFork = kind(typeof(res.data)) + + if contentType == ApplicationJsonMediaType: + res = + try: + RestJson.decode( + data, + T, + requireAllFields = true, + allowUnknownFields = true) + except SerializationError as exc: + debug "Failed to deserialize REST JSON data", + err = exc.formatMsg("") + return err( + RestErrorMessage.init(Http400, UnableDecodeError, + [exc.formatMsg("")])) + let jsonFork = ConsensusFork.decodeString(res.version.getStr()).valueOr: + return err(RestErrorMessage.init(Http400, UnableDecodeVersionError, + [res.version.getStr(), $error])) + if typeFork != jsonFork: + return err( + RestErrorMessage.init(Http400, UnexpectedForkVersionError, + ["json-version", res.version.getStr(), + typeFork.toString()])) + ok(res) + elif contentType == OctetStreamMediaType: + let consensusFork = + ConsensusFork.decodeString(version).valueOr: + return err(RestErrorMessage.init(Http400, UnableDecodeVersionError, + [version, $error])) + if typeFork != consensusFork: + return err( + RestErrorMessage.init( + Http400, UnexpectedForkVersionError, + ["eth-consensus-version", consensusFork.toString(), + typeFork.toString()])) + + ok(T( + version: newJString(typeFork.toString()), + data: + try: + SSZ.decode(data, typeof(res.data)) + except SerializationError as exc: + return err( + RestErrorMessage.init(Http400, UnableDecodeError, + [exc.formatMsg("")])))) + else: + err(RestErrorMessage.init(Http415, InvalidContentTypeError, + [$contentType])) + proc decodeBody*[T](t: typedesc[T], body: ContentBody): Result[T, cstring] = if body.contentType != ApplicationJsonMediaType: @@ -3284,9 +3386,34 @@ proc decodeBodyJsonOrSsz*[T](t: typedesc[T], RestErrorMessage.init(Http400, UnexpectedDecodeError, [$exc.msg])) ok(blck) else: - err(RestErrorMessage.init(Http415, "Invalid content type", + err(RestErrorMessage.init(Http415, InvalidContentTypeError, [$body.contentType])) +proc encodeBytes*(value: seq[SignedValidatorRegistrationV1], + contentType: string): RestResult[seq[byte]] = + case contentType + of "application/json": + try: + var + stream = memoryOutput() + writer = JsonWriter[RestJson].init(stream) + writer.writeArray(value) + ok(stream.getOutput(seq[byte])) + except IOError: + return err("Input/output error") + except SerializationError: + return err("Serialization error") + of "application/octet-stream": + try: + ok(SSZ.encode( + init( + List[SignedValidatorRegistrationV1, Limit VALIDATOR_REGISTRY_LIMIT], + value))) + except SerializationError: + return err("Serialization error") + else: + err("Content-Type not supported") + proc encodeBytes*[T: EncodeTypes](value: T, contentType: string): RestResult[seq[byte]] = case contentType @@ -3326,29 +3453,26 @@ proc encodeBytes*[T: EncodeArrays](value: T, err("Content-Type not supported") proc encodeBytes*[T: EncodeOctetTypes]( - value: T, - contentType: string - ): RestResult[seq[byte]] = + value: T, + contentType: string +): RestResult[seq[byte]] = case contentType of "application/json": - let data = - try: - var stream = memoryOutput() - var writer = JsonWriter[RestJson].init(stream) - writer.writeValue(value) - stream.getOutput(seq[byte]) - except IOError: - return err("Input/output error") - except SerializationError: - return err("Serialization error") - ok(data) + try: + var + stream = memoryOutput() + writer = JsonWriter[RestJson].init(stream) + writer.writeValue(value) + ok(stream.getOutput(seq[byte])) + except IOError: + err("Input/output error") + except SerializationError: + err("Serialization error") of "application/octet-stream": - let data = - try: - SSZ.encode(value) - except CatchableError: - return err("Serialization error") - ok(data) + try: + ok(SSZ.encode(value)) + except CatchableError: + err("Serialization error") else: err("Content-Type not supported") @@ -3599,8 +3723,12 @@ func decodeString*(t: typedesc[EventTopic], ok(EventTopic.Head) of "block": ok(EventTopic.Block) + of "block_gossip": + ok(EventTopic.BlockGossip) of "attestation": ok(EventTopic.Attestation) + of "single_attestation": + ok(EventTopic.SingleAttestation) of "voluntary_exit": ok(EventTopic.VoluntaryExit) of "bls_to_execution_change": @@ -3630,8 +3758,12 @@ func encodeString*(value: set[EventTopic]): Result[string, cstring] = res.add("head,") if EventTopic.Block in value: res.add("block,") + if EventTopic.BlockGossip in value: + res.add("block_gossip,") if EventTopic.Attestation in value: res.add("attestation,") + if EventTopic.SingleAttestation in value: + res.add("single_attestation,") if EventTopic.VoluntaryExit in value: res.add("voluntary_exit,") if EventTopic.BLSToExecutionChange in value: @@ -4067,7 +4199,7 @@ proc readValue*(reader: var JsonReader[RestJson], proc writeValue*(writer: var JsonWriter[RestJson], attestation: ForkedAttestation) {.raises: [IOError].} = writer.beginRecord() - writer.writeField("version", attestation.kind) + writer.writeField("version", attestation.kind.toString()) withAttestation(attestation): writer.writeField("data", forkyAttestation) - writer.endRecord() + writer.endRecord() \ No newline at end of file diff --git a/beacon_chain/spec/eth2_apis/rest_beacon_calls.nim b/beacon_chain/spec/eth2_apis/rest_beacon_calls.nim index 2d4865d32f..3f5a26e456 100644 --- a/beacon_chain/spec/eth2_apis/rest_beacon_calls.nim +++ b/beacon_chain/spec/eth2_apis/rest_beacon_calls.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -10,7 +10,6 @@ import chronos, presto/client, chronicles, ".."/".."/validators/slashing_protection_common, - ".."/datatypes/[phase0, altair, bellatrix], ".."/mev/[bellatrix_mev, capella_mev], ".."/[helpers, forks, keystore, eth2_ssz_serialization], "."/[rest_types, rest_common, eth2_rest_serialization] diff --git a/beacon_chain/spec/eth2_apis/rest_debug_calls.nim b/beacon_chain/spec/eth2_apis/rest_debug_calls.nim index c5c517d623..c11da462d1 100644 --- a/beacon_chain/spec/eth2_apis/rest_debug_calls.nim +++ b/beacon_chain/spec/eth2_apis/rest_debug_calls.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -9,7 +9,7 @@ import chronos, presto/client, - ".."/[helpers, forks], ".."/datatypes/[phase0, altair], + ".."/[helpers, forks], "."/[rest_types, eth2_rest_serialization] export chronos, client, rest_types, eth2_rest_serialization @@ -19,7 +19,6 @@ proc getDebugChainHeadsV2*(): RestResponse[GetDebugChainHeadsV2Response] {. meth: MethodGet.} ## https://ethereum.github.io/beacon-APIs/#/Beacon/getDebugChainHeadsV2 - proc getStateV2Plain*(state_id: StateIdent): RestPlainResponse {. rest, endpoint: "/eth/v2/debug/beacon/states/{state_id}", accept: preferSSZ, diff --git a/beacon_chain/spec/eth2_apis/rest_keymanager_calls.nim b/beacon_chain/spec/eth2_apis/rest_keymanager_calls.nim index d25144e233..a1fb047361 100644 --- a/beacon_chain/spec/eth2_apis/rest_keymanager_calls.nim +++ b/beacon_chain/spec/eth2_apis/rest_keymanager_calls.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -10,7 +10,6 @@ import presto/client, chronicles, ".."/".."/validators/slashing_protection_common, - ".."/datatypes/[phase0, altair], ".."/[helpers, forks, keystore, eth2_ssz_serialization], "."/[rest_types, rest_common, rest_keymanager_types, eth2_rest_serialization] diff --git a/beacon_chain/spec/eth2_apis/rest_types.nim b/beacon_chain/spec/eth2_apis/rest_types.nim index 06182eb2b1..beba6a19d0 100644 --- a/beacon_chain/spec/eth2_apis/rest_types.nim +++ b/beacon_chain/spec/eth2_apis/rest_types.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -56,10 +56,10 @@ static: type # https://github.com/ethereum/beacon-APIs/blob/v2.4.2/apis/eventstream/index.yaml EventTopic* {.pure.} = enum - Head, Block, Attestation, VoluntaryExit, BLSToExecutionChange, - ProposerSlashing, AttesterSlashing, BlobSidecar, FinalizedCheckpoint, - ChainReorg, ContributionAndProof, LightClientFinalityUpdate, - LightClientOptimisticUpdate + Head, Block, Attestation, BlockGossip, VoluntaryExit, BLSToExecutionChange, + ProposerSlashing, AttesterSlashing, BlobSidecar, SingleAttestation, + FinalizedCheckpoint, ChainReorg, ContributionAndProof, + LightClientFinalityUpdate, LightClientOptimisticUpdate EventTopics* = set[EventTopic] @@ -274,6 +274,7 @@ type seq_number*: string syncnets*: string attnets*: string + custody_group_count*: string RestNetworkIdentity* = object peer_id*: string @@ -518,9 +519,6 @@ type GetEpochCommitteesResponse* = DataEnclosedObject[seq[RestBeaconStatesCommittees]] GetForkScheduleResponse* = DataEnclosedObject[seq[Fork]] GetGenesisResponse* = DataEnclosedObject[RestGenesis] - GetHeaderResponseDeneb* = DataVersionEnclosedObject[deneb_mev.SignedBuilderBid] - GetHeaderResponseElectra* = DataVersionEnclosedObject[electra_mev.SignedBuilderBid] - GetHeaderResponseFulu* = DataVersionEnclosedObject[fulu_mev.SignedBuilderBid] GetNetworkIdentityResponse* = DataEnclosedObject[RestNetworkIdentity] GetPeerCountResponse* = DataMetaEnclosedObject[RestPeerCount] GetPeerResponse* = DataMetaEnclosedObject[RestNodePeer] @@ -546,14 +544,18 @@ type GetEpochSyncCommitteesResponse* = DataEnclosedObject[RestEpochSyncCommittee] ProduceAttestationDataResponse* = DataEnclosedObject[AttestationData] ProduceSyncCommitteeContributionResponse* = DataEnclosedObject[SyncCommitteeContribution] - SubmitBlindedBlockResponseDeneb* = DataEnclosedObject[deneb_mev.ExecutionPayloadAndBlobsBundle] - SubmitBlindedBlockResponseElectra* = DataEnclosedObject[electra_mev.ExecutionPayloadAndBlobsBundle] - SubmitBlindedBlockResponseFulu* = DataEnclosedObject[fulu_mev.ExecutionPayloadAndBlobsBundle] GetValidatorsActivityResponse* = DataEnclosedObject[seq[RestActivityItem]] GetValidatorsLivenessResponse* = DataEnclosedObject[seq[RestLivenessItem]] SubmitBeaconCommitteeSelectionsResponse* = DataEnclosedObject[seq[RestBeaconCommitteeSelection]] SubmitSyncCommitteeSelectionsResponse* = DataEnclosedObject[seq[RestSyncCommitteeSelection]] + GetHeaderResponseDeneb* = DataVersionEnclosedObject[deneb_mev.SignedBuilderBid] + GetHeaderResponseElectra* = DataVersionEnclosedObject[electra_mev.SignedBuilderBid] + GetHeaderResponseFulu* = DataVersionEnclosedObject[fulu_mev.SignedBuilderBid] + SubmitBlindedBlockResponseDeneb* = DataVersionEnclosedObject[deneb_mev.ExecutionPayloadAndBlobsBundle] + SubmitBlindedBlockResponseElectra* = DataVersionEnclosedObject[electra_mev.ExecutionPayloadAndBlobsBundle] + SubmitBlindedBlockResponseFulu* = DataVersionEnclosedObject[fulu_mev.ExecutionPayloadAndBlobsBundle] + RestNodeValidity* {.pure.} = enum valid = "VALID", invalid = "INVALID", diff --git a/beacon_chain/spec/eth2_merkleization.nim b/beacon_chain/spec/eth2_merkleization.nim index f7cee32d66..71618fce93 100644 --- a/beacon_chain/spec/eth2_merkleization.nim +++ b/beacon_chain/spec/eth2_merkleization.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -34,8 +34,8 @@ type func hash_tree_root*( x: phase0.HashedBeaconState | altair.HashedBeaconState | bellatrix.HashedBeaconState | capella.HashedBeaconState | - deneb.HashedBeaconState | electra.SignedBeaconBlock | - fulu.SignedBeaconBlock) {. + deneb.HashedBeaconState | electra.HashedBeaconState | + fulu.HashedBeaconState) {. error: "HashedBeaconState should not be hashed".} func hash_tree_root*( @@ -108,4 +108,4 @@ func init*(T: type HashedValidatorPubKey, key: ValidatorPubKey): HashedValidator else: addr tmp[] - HashedValidatorPubKey(value: cached) # https://github.com/nim-lang/Nim/issues/23505 + HashedValidatorPubKey(value: cached) # https://github.com/nim-lang/Nim/issues/23505 \ No newline at end of file diff --git a/beacon_chain/spec/forks.nim b/beacon_chain/spec/forks.nim index 1b5a97b0f7..fe272d1f44 100644 --- a/beacon_chain/spec/forks.nim +++ b/beacon_chain/spec/forks.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2021-2024 Status Research & Development GmbH +# Copyright (c) 2021-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -178,7 +178,7 @@ type ForkyAttestation* = phase0.Attestation | - electra.Attestation + electra.SingleAttestation ForkedAttestation* = object case kind*: ConsensusFork @@ -441,7 +441,9 @@ template kind*( deneb.SigVerifiedSignedBeaconBlock | deneb.MsgTrustedSignedBeaconBlock | deneb.TrustedSignedBeaconBlock | - deneb_mev.SignedBlindedBeaconBlock]): ConsensusFork = + deneb_mev.SignedBlindedBeaconBlock | + deneb_mev.SignedBuilderBid | + deneb_mev.ExecutionPayloadAndBlobsBundle]): ConsensusFork = ConsensusFork.Deneb template kind*( @@ -461,9 +463,12 @@ template kind*( electra.MsgTrustedSignedBeaconBlock | electra.TrustedSignedBeaconBlock | electra.Attestation | + electra.SingleAttestation | electra.AggregateAndProof | electra.SignedAggregateAndProof | - electra_mev.SignedBlindedBeaconBlock]): ConsensusFork = + electra_mev.SignedBlindedBeaconBlock | + electra_mev.SignedBuilderBid | + electra_mev.ExecutionPayloadAndBlobsBundle]): ConsensusFork = ConsensusFork.Electra template kind*( @@ -482,7 +487,9 @@ template kind*( fulu.SigVerifiedSignedBeaconBlock | fulu.MsgTrustedSignedBeaconBlock | fulu.TrustedSignedBeaconBlock | - fulu_mev.SignedBlindedBeaconBlock]): ConsensusFork = + fulu_mev.SignedBlindedBeaconBlock | + fulu_mev.SignedBuilderBid | + fulu_mev.ExecutionPayloadAndBlobsBundle]): ConsensusFork = ConsensusFork.Fulu template BeaconState*(kind: static ConsensusFork): auto = @@ -1697,7 +1704,7 @@ func compute_fork_data_root*(current_version: Version, genesis_validators_root: genesis_validators_root )) -# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#compute_fork_digest +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/phase0/beacon-chain.md#compute_fork_digest func compute_fork_digest*(current_version: Version, genesis_validators_root: Eth2Digest): ForkDigest = ## Return the 4-byte fork digest for the ``current_version`` and @@ -1854,6 +1861,10 @@ func committee_index*(v: electra.Attestation, on_chain: static bool): uint64 = else: uint64 v.committee_bits.get_committee_index_one().expect("network attestation") +func committee_index*( + v: SingleAttestation, on_chain: static bool = false): uint64 = + v.committee_index + template init*(T: type ForkedAttestation, attestation: phase0.Attestation, fork: ConsensusFork): T = diff --git a/beacon_chain/spec/forks_light_client.nim b/beacon_chain/spec/forks_light_client.nim index 21b590bbb3..5b3a2c3062 100644 --- a/beacon_chain/spec/forks_light_client.nim +++ b/beacon_chain/spec/forks_light_client.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -958,13 +958,13 @@ func migrateToDataFork*( discard elif newKind < x.kind: # Downgrade not supported, re-initialize - x = ForkedLightClientStore(kind: newKind) + x = static(ForkedLightClientStore(kind: newKind)) else: # Upgrade to Altair when newKind >= LightClientDataFork.Altair: if x.kind == LightClientDataFork.None: - x = ForkedLightClientStore( - kind: LightClientDataFork.Altair) + x = static(ForkedLightClientStore( + kind: LightClientDataFork.Altair)) # Upgrade to Capella when newKind >= LightClientDataFork.Capella: diff --git a/beacon_chain/spec/helpers.nim b/beacon_chain/spec/helpers.nim index d77005b61f..2340844420 100644 --- a/beacon_chain/spec/helpers.nim +++ b/beacon_chain/spec/helpers.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -26,7 +26,7 @@ import export eth2_merkleization, forks, ssz_codec, rlp, eth_types_rlp.append -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/weak-subjectivity.md#constants +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/phase0/weak-subjectivity.md#constants const ETH_TO_GWEI = 1_000_000_000.Gwei func toEther*(gwei: Gwei): Ether = @@ -76,7 +76,7 @@ func is_exited_validator*(validator: Validator, epoch: Epoch): bool = func is_withdrawable_validator*(validator: Validator, epoch: Epoch): bool = epoch >= validator.withdrawable_epoch -# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#get_active_validator_indices +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/beacon-chain.md#get_active_validator_indices iterator get_active_validator_indices*(state: ForkyBeaconState, epoch: Epoch): ValidatorIndex = for vidx in state.validators.vindices: @@ -102,7 +102,7 @@ func get_active_validator_indices_len*( withState(state): get_active_validator_indices_len(forkyState.data, epoch) -# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#get_current_epoch +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/beacon-chain.md#get_current_epoch func get_current_epoch*(state: ForkyBeaconState): Epoch = ## Return the current epoch. state.slot.epoch @@ -184,7 +184,7 @@ func compute_signing_root*(ssz_object: auto, domain: Eth2Domain): Eth2Digest = ) hash_tree_root(domain_wrapped_object) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/beacon-chain.md#get_seed +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/phase0/beacon-chain.md#get_seed func get_seed*( state: ForkyBeaconState, epoch: Epoch, domain_type: DomainType, mix: Eth2Digest): Eth2Digest = @@ -203,7 +203,7 @@ func get_seed*(state: ForkyBeaconState, epoch: Epoch, domain_type: DomainType): epoch + EPOCHS_PER_HISTORICAL_VECTOR - MIN_SEED_LOOKAHEAD - 1) state.get_seed(epoch, domain_type, mix) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/beacon-chain.md#add_flag +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/altair/beacon-chain.md#add_flag func add_flag*(flags: ParticipationFlags, flag_index: TimelyFlag): ParticipationFlags = let flag = ParticipationFlags(1'u8 shl ord(flag_index)) flags or flag @@ -251,7 +251,7 @@ func create_blob_sidecars*( res.add(sidecar) res -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/light-client/sync-protocol.md#is_sync_committee_update +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/altair/light-client/sync-protocol.md#is_sync_committee_update template is_sync_committee_update*(update: SomeForkyLightClientUpdate): bool = when update is SomeForkyLightClientUpdateWithSyncCommittee: update.next_sync_committee_branch != @@ -259,7 +259,7 @@ template is_sync_committee_update*(update: SomeForkyLightClientUpdate): bool = else: false -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/light-client/sync-protocol.md#is_finality_update +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/altair/light-client/sync-protocol.md#is_finality_update template is_finality_update*(update: SomeForkyLightClientUpdate): bool = when update is SomeForkyLightClientUpdateWithFinality: update.finality_branch != @@ -267,7 +267,7 @@ template is_finality_update*(update: SomeForkyLightClientUpdate): bool = else: false -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/sync-protocol.md#is_next_sync_committee_known +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/altair/light-client/sync-protocol.md#is_next_sync_committee_known template is_next_sync_committee_known*(store: ForkyLightClientStore): bool = store.next_sync_committee != static(default(typeof(store.next_sync_committee))) @@ -279,7 +279,7 @@ func get_safety_threshold*(store: ForkyLightClientStore): uint64 = store.current_max_active_participants ) div 2 -# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/sync-protocol.md#is_better_update +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/altair/light-client/sync-protocol.md#is_better_update type LightClientUpdateMetadata* = object attested_slot*, finalized_slot*, signature_slot*: Slot has_sync_committee*, has_finality*: bool @@ -326,10 +326,10 @@ func is_better_data*(new_meta, old_meta: LightClientUpdateMetadata): bool = old_has_supermajority = hasSupermajoritySyncParticipation(old_meta.num_active_participants) if new_has_supermajority != old_has_supermajority: - return new_has_supermajority > old_has_supermajority - if not new_has_supermajority: - if new_meta.num_active_participants != old_meta.num_active_participants: - return new_meta.num_active_participants > old_meta.num_active_participants + return new_has_supermajority + if not new_has_supermajority and + new_meta.num_active_participants != old_meta.num_active_participants: + return new_meta.num_active_participants > old_meta.num_active_participants # Compare presence of relevant sync committee let @@ -340,11 +340,11 @@ func is_better_data*(new_meta, old_meta: LightClientUpdateMetadata): bool = old_meta.attested_slot.sync_committee_period == old_meta.signature_slot.sync_committee_period if new_has_relevant_sync_committee != old_has_relevant_sync_committee: - return new_has_relevant_sync_committee > old_has_relevant_sync_committee + return new_has_relevant_sync_committee # Compare indication of any finality if new_meta.has_finality != old_meta.has_finality: - return new_meta.has_finality > old_meta.has_finality + return new_meta.has_finality # Compare sync committee finality if new_meta.has_finality: @@ -356,14 +356,18 @@ func is_better_data*(new_meta, old_meta: LightClientUpdateMetadata): bool = old_meta.finalized_slot.sync_committee_period == old_meta.attested_slot.sync_committee_period if new_has_sync_committee_finality != old_has_sync_committee_finality: - return new_has_sync_committee_finality > old_has_sync_committee_finality + return new_has_sync_committee_finality # Tiebreaker 1: Sync committee participation beyond supermajority if new_meta.num_active_participants != old_meta.num_active_participants: return new_meta.num_active_participants > old_meta.num_active_participants - # Tiebreaker 2: Prefer older data (fewer changes to best data) - new_meta.attested_slot < old_meta.attested_slot + # Tiebreaker 2: Prefer older data (fewer changes to best) + if new_meta.attested_slot != old_meta.attested_slot: + return new_meta.attested_slot < old_meta.attested_slot + + # Tiebreaker 3: Prefer updates with earlier signature slots + new_meta.signature_slot < old_meta.signature_slot template is_better_update*[ A, B: SomeForkyLightClientUpdate | ForkedLightClientUpdate]( @@ -380,7 +384,7 @@ func contextEpoch*(bootstrap: ForkyLightClientBootstrap): Epoch = func contextEpoch*(update: SomeForkyLightClientUpdate): Epoch = update.attested_header.beacon.slot.epoch -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/bellatrix/beacon-chain.md#is_merge_transition_complete +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.3/specs/bellatrix/beacon-chain.md#is_merge_transition_complete func is_merge_transition_complete*( state: bellatrix.BeaconState | capella.BeaconState | deneb.BeaconState | electra.BeaconState | fulu.BeaconState): bool = @@ -388,15 +392,18 @@ func is_merge_transition_complete*( default(typeof(state.latest_execution_payload_header)) state.latest_execution_payload_header != defaultExecutionPayloadHeader -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/sync/optimistic.md#helpers -func is_execution_block*(blck: SomeForkyBeaconBlock): bool = - when typeof(blck).kind >= ConsensusFork.Bellatrix: +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/sync/optimistic.md#helpers +func is_execution_block*(body: SomeForkyBeaconBlockBody): bool = + when typeof(body).kind >= ConsensusFork.Bellatrix: const defaultExecutionPayload = - default(typeof(blck.body.execution_payload)) - blck.body.execution_payload != defaultExecutionPayload + default(typeof(body.execution_payload)) + body.execution_payload != defaultExecutionPayload else: false +func is_execution_block*(blck: SomeForkyBeaconBlock): bool = + blck.body.is_execution_block + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/bellatrix/beacon-chain.md#is_merge_transition_block func is_merge_transition_block( state: bellatrix.BeaconState | capella.BeaconState | deneb.BeaconState | @@ -415,7 +422,7 @@ func is_merge_transition_block( not is_merge_transition_complete(state) and body.execution_payload != defaultExecutionPayload -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/bellatrix/beacon-chain.md#is_execution_enabled +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.3/specs/bellatrix/beacon-chain.md#is_execution_enabled func is_execution_enabled*( state: bellatrix.BeaconState | capella.BeaconState | deneb.BeaconState | electra.BeaconState | fulu.BeaconState, @@ -455,20 +462,16 @@ proc computeTransactionsTrieRoot( func computeRequestsHash( requests: electra.ExecutionRequests): EthHash32 = - const - DEPOSIT_REQUEST_TYPE = 0x00'u8 # EIP-6110 - WITHDRAWAL_REQUEST_TYPE = 0x01'u8 # EIP-7002 - CONSOLIDATION_REQUEST_TYPE = 0x02'u8 # EIP-7251 + template individualHash(requestType, requestList): Digest = + computeDigest: + h.update([requestType.byte]) + for request in requestList: + h.update SSZ.encode(request) let requestsHash = computeDigest: template mixInRequests(requestType, requestList): untyped = - block: - let hash = computeDigest: - bind h - h.update([requestType.byte]) - for request in requestList: - h.update SSZ.encode(request) - h.update(hash.data) + if requestList.len > 0: + h.update(individualHash(requestType, requestList).data) static: doAssert DEPOSIT_REQUEST_TYPE < WITHDRAWAL_REQUEST_TYPE @@ -479,9 +482,10 @@ func computeRequestsHash( requestsHash.to(EthHash32) -proc blockToBlockHeader*(blck: ForkyBeaconBlock): EthHeader = - template payload: auto = blck.body.execution_payload - +proc toExecutionBlockHeader( + payload: ForkyExecutionPayload, + parentRoot: Eth2Digest, + requestsHash = Opt.none(EthHash32)): EthHeader = static: # `GasInt` is signed. We only use it for hashing. doAssert sizeof(GasInt) == sizeof(payload.gas_limit) doAssert sizeof(GasInt) == sizeof(payload.gas_used) @@ -505,12 +509,7 @@ proc blockToBlockHeader*(blck: ForkyBeaconBlock): EthHeader = Opt.none(uint64) parentBeaconBlockRoot = when typeof(payload).kind >= ConsensusFork.Deneb: - Opt.some EthHash32(blck.parent_root.data) - else: - Opt.none(EthHash32) - requestsHash = - when typeof(payload).kind >= ConsensusFork.Electra: - Opt.some blck.body.execution_requests.computeRequestsHash() + Opt.some EthHash32(parentRoot.data) else: Opt.none(EthHash32) @@ -537,8 +536,19 @@ proc blockToBlockHeader*(blck: ForkyBeaconBlock): EthHeader = parentBeaconBlockRoot : parentBeaconBlockRoot, # EIP-4788 requestsHash : requestsHash) # EIP-7685 +proc compute_execution_block_hash*( + body: ForkyBeaconBlockBody, + parentRoot: Eth2Digest): Eth2Digest = + when typeof(body).kind >= ConsensusFork.Electra: + body.execution_payload.toExecutionBlockHeader( + parentRoot, Opt.some body.execution_requests.computeRequestsHash()) + .rlpHash().to(Eth2Digest) + else: + body.execution_payload.toExecutionBlockHeader(parentRoot) + .rlpHash().to(Eth2Digest) + proc compute_execution_block_hash*(blck: ForkyBeaconBlock): Eth2Digest = - rlpHash(blockToBlockHeader(blck)).to(Eth2Digest) + blck.body.compute_execution_block_hash(blck.parent_root) from std/math import exp, ln from std/sequtils import foldl diff --git a/beacon_chain/spec/keystore.nim b/beacon_chain/spec/keystore.nim index 80dd1c7868..5b422a5835 100644 --- a/beacon_chain/spec/keystore.nim +++ b/beacon_chain/spec/keystore.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -854,8 +854,8 @@ proc readValue*(reader: var JsonReader, value: var RemoteKeystore) elif prop.path == ".graffiti": prop.capellaIndex = some GeneralizedIndex(18) prop.denebIndex = some GeneralizedIndex(18) - prop.electraIndex = some GeneralizedIndex(801) - prop.fuluIndex = some GeneralizedIndex(801) + prop.electraIndex = some GeneralizedIndex(18) + prop.fuluIndex = some GeneralizedIndex(18) else: reader.raiseUnexpectedValue("Keystores with proven properties different than " & "`.execution_payload.fee_recipient` and `.graffiti` " & @@ -1386,13 +1386,13 @@ proc createWallet*(kdfKind: KdfKind, crypto: crypto, nextAccount: nextAccount.get(0)) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/validator.md#bls_withdrawal_prefix +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/phase0/validator.md#bls_withdrawal_prefix func makeWithdrawalCredentials*(k: ValidatorPubKey): Eth2Digest = var bytes = eth2digest(k.toRaw()) bytes.data[0] = BLS_WITHDRAWAL_PREFIX.uint8 bytes -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/deposit-contract.md#withdrawal-credentials +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/deposit-contract.md#withdrawal-credentials func makeWithdrawalCredentials*(k: CookedPubKey): Eth2Digest = makeWithdrawalCredentials(k.toPubKey()) @@ -1406,4 +1406,4 @@ func prepareDeposit*(cfg: RuntimeConfig, withdrawal_credentials: makeWithdrawalCredentials(withdrawalPubKey)) res.signature = get_deposit_signature(cfg, res, signingKey).toValidatorSig() - return res + return res \ No newline at end of file diff --git a/beacon_chain/spec/light_client_sync.nim b/beacon_chain/spec/light_client_sync.nim index ea4fe06e8b..00f17b101f 100644 --- a/beacon_chain/spec/light_client_sync.nim +++ b/beacon_chain/spec/light_client_sync.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2021-2024 Status Research & Development GmbH +# Copyright (c) 2021-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -59,7 +59,7 @@ func initialize_light_client_store*( current_sync_committee: bootstrap.current_sync_committee, optimistic_header: bootstrap.header)) -# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/sync-protocol.md#validate_light_client_update +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/altair/light-client/sync-protocol.md#validate_light_client_update proc validate_light_client_update*( store: ForkyLightClientStore, update: SomeForkyLightClientUpdate, @@ -180,7 +180,7 @@ proc validate_light_client_update*( ok() -# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/sync-protocol.md#apply_light_client_update +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/altair/light-client/sync-protocol.md#apply_light_client_update func apply_light_client_update( store: var ForkyLightClientStore, update: SomeForkyLightClientUpdate): bool = @@ -211,7 +211,7 @@ func apply_light_client_update( didProgress = true didProgress -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/light-client/sync-protocol.md#process_light_client_store_force_update +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/altair/light-client/sync-protocol.md#process_light_client_store_force_update type ForceUpdateResult* = enum NoUpdate, diff --git a/beacon_chain/spec/mev/electra_mev.nim b/beacon_chain/spec/mev/electra_mev.nim index bb0c96e462..ca7650075e 100644 --- a/beacon_chain/spec/mev/electra_mev.nim +++ b/beacon_chain/spec/mev/electra_mev.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2024 Status Research & Development GmbH +# Copyright (c) 2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -20,6 +20,7 @@ type BuilderBid* = object header*: electra.ExecutionPayloadHeader blob_kzg_commitments*: KzgCommitments + execution_requests*: ExecutionRequests # [New in Electra] value*: UInt256 pubkey*: ValidatorPubKey diff --git a/beacon_chain/spec/mev/fulu_mev.nim b/beacon_chain/spec/mev/fulu_mev.nim index c6dd0cdd16..eee40a391c 100644 --- a/beacon_chain/spec/mev/fulu_mev.nim +++ b/beacon_chain/spec/mev/fulu_mev.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2024 Status Research & Development GmbH +# Copyright (c) 2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -22,6 +22,7 @@ type BuilderBid* = object header*: ExecutionPayloadHeader blob_kzg_commitments*: KzgCommitments + execution_requests*: ExecutionRequests # [New in Electra] value*: UInt256 pubkey*: ValidatorPubKey @@ -149,4 +150,4 @@ func toSignedBlindedBeaconBlock*(blck: fulu.SignedBeaconBlock): bls_to_execution_changes: blck.message.body.bls_to_execution_changes, blob_kzg_commitments: blck.message.body.blob_kzg_commitments, execution_requests: blck.message.body.execution_requests)), - signature: blck.signature) + signature: blck.signature) \ No newline at end of file diff --git a/beacon_chain/spec/mev/rest_deneb_mev_calls.nim b/beacon_chain/spec/mev/rest_deneb_mev_calls.nim index 327144d220..8074bef942 100644 --- a/beacon_chain/spec/mev/rest_deneb_mev_calls.nim +++ b/beacon_chain/spec/mev/rest_deneb_mev_calls.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -13,6 +13,11 @@ import export chronos, client, rest_types, eth2_rest_serialization +proc getStatus*(): RestPlainResponse {. + rest, endpoint: "/eth/v1/builder/status", + meth: MethodGet.} + ## https://ethereum.github.io/builder-specs/#/Builder/status + proc registerValidator*(body: seq[SignedValidatorRegistrationV1] ): RestPlainResponse {. rest, endpoint: "/eth/v1/builder/validators", @@ -20,19 +25,33 @@ proc registerValidator*(body: seq[SignedValidatorRegistrationV1] ## https://github.com/ethereum/builder-specs/blob/v0.4.0/apis/builder/validators.yaml ## https://github.com/ethereum/beacon-APIs/blob/v2.3.0/apis/validator/register_validator.yaml -proc getHeaderDeneb*(slot: Slot, - parent_hash: Eth2Digest, - pubkey: ValidatorPubKey - ): RestPlainResponse {. - rest, endpoint: "/eth/v1/builder/header/{slot}/{parent_hash}/{pubkey}", - meth: MethodGet, connection: {Dedicated, Close}.} +proc getHeaderDenebPlain*( + slot: Slot, + parent_hash: Eth2Digest, + pubkey: ValidatorPubKey +): RestPlainResponse {. + rest, endpoint: "/eth/v1/builder/header/{slot}/{parent_hash}/{pubkey}", + meth: MethodGet, connection: {Dedicated, Close}.} ## https://github.com/ethereum/builder-specs/blob/v0.4.0/apis/builder/header.yaml +proc getHeaderDeneb*( + client: RestClientRef, + slot: Slot, + parent_hash: Eth2Digest, + pubkey: ValidatorPubKey +): Future[RestPlainResponse] {. + async: (raises: [CancelledError, RestEncodingError, RestDnsResolveError, + RestCommunicationError], raw: true).} = + client.getHeaderDenebPlain( + slot, parent_hash, pubkey, + restAcceptType = "application/octet-stream,application/json;q=0.5", + ) + proc submitBlindedBlockPlain*( body: deneb_mev.SignedBlindedBeaconBlock ): RestPlainResponse {. - rest, endpoint: "/eth/v1/builder/blinded_blocks", - meth: MethodPost, connection: {Dedicated, Close}.} + rest, endpoint: "/eth/v1/builder/blinded_blocks", + meth: MethodPost, connection: {Dedicated, Close}.} ## https://github.com/ethereum/builder-specs/blob/v0.4.0/apis/builder/blinded_blocks.yaml proc submitBlindedBlock*( @@ -40,9 +59,10 @@ proc submitBlindedBlock*( body: deneb_mev.SignedBlindedBeaconBlock ): Future[RestPlainResponse] {. async: (raises: [CancelledError, RestEncodingError, RestDnsResolveError, - RestCommunicationError]).} = + RestCommunicationError], raw: true).} = ## https://github.com/ethereum/builder-specs/blob/v0.4.0/apis/builder/blinded_blocks.yaml - await client.submitBlindedBlockPlain( + client.submitBlindedBlockPlain( body, + restAcceptType = "application/octet-stream,application/json;q=0.5", extraHeaders = @[("eth-consensus-version", toString(ConsensusFork.Deneb))] ) diff --git a/beacon_chain/spec/mev/rest_electra_mev_calls.nim b/beacon_chain/spec/mev/rest_electra_mev_calls.nim index 2b92d8a55d..6984c5571c 100644 --- a/beacon_chain/spec/mev/rest_electra_mev_calls.nim +++ b/beacon_chain/spec/mev/rest_electra_mev_calls.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2024 Status Research & Development GmbH +# Copyright (c) 2024-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -13,29 +13,44 @@ import export chronos, client, rest_types, eth2_rest_serialization -proc getHeaderElectra*(slot: Slot, - parent_hash: Eth2Digest, - pubkey: ValidatorPubKey - ): RestPlainResponse {. - rest, endpoint: "/eth/v1/builder/header/{slot}/{parent_hash}/{pubkey}", - meth: MethodGet, connection: {Dedicated, Close}.} +proc getHeaderElectraPlain*( + slot: Slot, + parent_hash: Eth2Digest, + pubkey: ValidatorPubKey +): RestPlainResponse {. + rest, endpoint: "/eth/v1/builder/header/{slot}/{parent_hash}/{pubkey}", + meth: MethodGet, connection: {Dedicated, Close}.} ## https://github.com/ethereum/builder-specs/blob/v0.4.0/apis/builder/header.yaml +proc getHeaderElectra*( + client: RestClientRef, + slot: Slot, + parent_hash: Eth2Digest, + pubkey: ValidatorPubKey +): Future[RestPlainResponse] {. + async: (raises: [CancelledError, RestEncodingError, RestDnsResolveError, + RestCommunicationError], raw: true).} = + client.getHeaderElectraPlain( + slot, parent_hash, pubkey, + restAcceptType = "application/octet-stream,application/json;q=0.5", + ) + proc submitBlindedBlockPlain*( body: electra_mev.SignedBlindedBeaconBlock ): RestPlainResponse {. - rest, endpoint: "/eth/v1/builder/blinded_blocks", - meth: MethodPost, connection: {Dedicated, Close}.} + rest, endpoint: "/eth/v1/builder/blinded_blocks", + meth: MethodPost, connection: {Dedicated, Close}.} ## https://github.com/ethereum/builder-specs/blob/v0.4.0/apis/builder/blinded_blocks.yaml proc submitBlindedBlock*( - client: RestClientRef, - body: electra_mev.SignedBlindedBeaconBlock + client: RestClientRef, + body: electra_mev.SignedBlindedBeaconBlock ): Future[RestPlainResponse] {. async: (raises: [CancelledError, RestEncodingError, RestDnsResolveError, - RestCommunicationError]).} = + RestCommunicationError], raw: true).} = ## https://github.com/ethereum/builder-specs/blob/v0.4.0/apis/builder/blinded_blocks.yaml - await client.submitBlindedBlockPlain( + client.submitBlindedBlockPlain( body, + restAcceptType = "application/octet-stream,application/json;q=0.5", extraHeaders = @[("eth-consensus-version", toString(ConsensusFork.Electra))] ) diff --git a/beacon_chain/spec/mev/rest_fulu_mev_calls.nim b/beacon_chain/spec/mev/rest_fulu_mev_calls.nim index 8be08ae3c5..61bd649bcd 100644 --- a/beacon_chain/spec/mev/rest_fulu_mev_calls.nim +++ b/beacon_chain/spec/mev/rest_fulu_mev_calls.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2024 Status Research & Development GmbH +# Copyright (c) 2024-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -13,19 +13,33 @@ import export chronos, client, rest_types, eth2_rest_serialization -proc getHeaderFulu*(slot: Slot, - parent_hash: Eth2Digest, - pubkey: ValidatorPubKey - ): RestPlainResponse {. - rest, endpoint: "/eth/v1/builder/header/{slot}/{parent_hash}/{pubkey}", - meth: MethodGet, connection: {Dedicated, Close}.} +proc getHeaderFuluPlain*( + slot: Slot, + parent_hash: Eth2Digest, + pubkey: ValidatorPubKey +): RestPlainResponse {. + rest, endpoint: "/eth/v1/builder/header/{slot}/{parent_hash}/{pubkey}", + meth: MethodGet, connection: {Dedicated, Close}.} ## https://github.com/ethereum/builder-specs/blob/v0.4.0/apis/builder/header.yaml +proc getHeaderFulu*( + client: RestClientRef, + slot: Slot, + parent_hash: Eth2Digest, + pubkey: ValidatorPubKey +): Future[RestPlainResponse] {. + async: (raises: [CancelledError, RestEncodingError, RestDnsResolveError, + RestCommunicationError], raw: true).} = + client.getHeaderFuluPlain( + slot, parent_hash, pubkey, + restAcceptType = "application/octet-stream,application/json;q=0.5", + ) + proc submitBlindedBlockPlain*( body: fulu_mev.SignedBlindedBeaconBlock ): RestPlainResponse {. - rest, endpoint: "/eth/v1/builder/blinded_blocks", - meth: MethodPost, connection: {Dedicated, Close}.} + rest, endpoint: "/eth/v1/builder/blinded_blocks", + meth: MethodPost, connection: {Dedicated, Close}.} ## https://github.com/ethereum/builder-specs/blob/v0.4.0/apis/builder/blinded_blocks.yaml proc submitBlindedBlock*( @@ -33,9 +47,10 @@ proc submitBlindedBlock*( body: fulu_mev.SignedBlindedBeaconBlock ): Future[RestPlainResponse] {. async: (raises: [CancelledError, RestEncodingError, RestDnsResolveError, - RestCommunicationError]).} = + RestCommunicationError], raw: true).} = ## https://github.com/ethereum/builder-specs/blob/v0.4.0/apis/builder/blinded_blocks.yaml - await client.submitBlindedBlockPlain( + client.submitBlindedBlockPlain( body, + restAcceptType = "application/octet-stream,application/json;q=0.5", extraHeaders = @[("eth-consensus-version", toString(ConsensusFork.Fulu))] ) diff --git a/beacon_chain/spec/network.nim b/beacon_chain/spec/network.nim index e5795ec303..3a89bfda5d 100644 --- a/beacon_chain/spec/network.nim +++ b/beacon_chain/spec/network.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -14,25 +14,25 @@ import export base const - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/p2p-interface.md#topics-and-messages - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/capella/p2p-interface.md#topics-and-messages - topicBeaconBlocksSuffix* = "beacon_block/ssz_snappy" - topicVoluntaryExitsSuffix* = "voluntary_exit/ssz_snappy" - topicProposerSlashingsSuffix* = "proposer_slashing/ssz_snappy" - topicAttesterSlashingsSuffix* = "attester_slashing/ssz_snappy" - topicAggregateAndProofsSuffix* = "beacon_aggregate_and_proof/ssz_snappy" - topicBlsToExecutionChangeSuffix* = "bls_to_execution_change/ssz_snappy" + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/phase0/p2p-interface.md#topics-and-messages + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/capella/p2p-interface.md#topics-and-messages + topicBeaconBlocksSuffix = "beacon_block/ssz_snappy" + topicVoluntaryExitsSuffix = "voluntary_exit/ssz_snappy" + topicProposerSlashingsSuffix = "proposer_slashing/ssz_snappy" + topicAttesterSlashingsSuffix = "attester_slashing/ssz_snappy" + topicAggregateAndProofsSuffix = "beacon_aggregate_and_proof/ssz_snappy" + topicBlsToExecutionChangeSuffix = "bls_to_execution_change/ssz_snappy" const # The spec now includes this as a bare uint64 as `RESP_TIMEOUT` RESP_TIMEOUT_DUR* = RESP_TIMEOUT.int64.seconds - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/light-client/p2p-interface.md#configuration + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/altair/light-client/p2p-interface.md#configuration MAX_REQUEST_LIGHT_CLIENT_UPDATES* = 128 - # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/deneb/p2p-interface.md#configuration - MAX_REQUEST_BLOB_SIDECARS*: uint64 = - MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/p2p-interface.md#configuration + MAX_REQUEST_DATA_COLUMN_SIDECARS*: uint64 = + MAX_REQUEST_BLOCKS_DENEB * NUMBER_OF_COLUMNS defaultEth2TcpPort* = 9000 defaultEth2TcpPortDesc* = $defaultEth2TcpPort @@ -43,6 +43,7 @@ const enrAttestationSubnetsField* = "attnets" enrSyncSubnetsField* = "syncnets" + enrCustodySubnetCountField* = "cgc" enrForkIdField* = "eth2" template eth2Prefix(forkDigest: ForkDigest): string = @@ -63,11 +64,11 @@ func getAttesterSlashingsTopic*(forkDigest: ForkDigest): string = func getAggregateAndProofsTopic*(forkDigest: ForkDigest): string = eth2Prefix(forkDigest) & topicAggregateAndProofsSuffix -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/capella/p2p-interface.md#topics-and-messages +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/capella/p2p-interface.md#topics-and-messages func getBlsToExecutionChangeTopic*(forkDigest: ForkDigest): string = eth2Prefix(forkDigest) & topicBlsToExecutionChangeSuffix -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/validator.md#broadcast-attestation +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/phase0/validator.md#broadcast-attestation func compute_subnet_for_attestation*( committees_per_slot: uint64, slot: Slot, committee_index: CommitteeIndex): SubnetId = @@ -83,33 +84,40 @@ func compute_subnet_for_attestation*( (committees_since_epoch_start + committee_index.asUInt64) mod ATTESTATION_SUBNET_COUNT) -# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/validator.md#broadcast-attestation +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/phase0/validator.md#broadcast-attestation func getAttestationTopic*(forkDigest: ForkDigest, subnetId: SubnetId): string = ## For subscribing and unsubscribing to/from a subnet. eth2Prefix(forkDigest) & "beacon_attestation_" & $(subnetId) & "/ssz_snappy" -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/p2p-interface.md#topics-and-messages +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/altair/p2p-interface.md#topics-and-messages func getSyncCommitteeTopic*(forkDigest: ForkDigest, subcommitteeIdx: SyncSubcommitteeIndex): string = ## For subscribing and unsubscribing to/from a subnet. eth2Prefix(forkDigest) & "sync_committee_" & $subcommitteeIdx & "/ssz_snappy" -# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/p2p-interface.md#topics-and-messages +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/altair/p2p-interface.md#topics-and-messages func getSyncCommitteeContributionAndProofTopic*(forkDigest: ForkDigest): string = ## For subscribing and unsubscribing to/from a subnet. eth2Prefix(forkDigest) & "sync_committee_contribution_and_proof/ssz_snappy" -# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/deneb/p2p-interface.md#blob_sidecar_subnet_id +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/deneb/p2p-interface.md#blob_sidecar_subnet_id func getBlobSidecarTopic*(forkDigest: ForkDigest, subnet_id: BlobId): string = eth2Prefix(forkDigest) & "blob_sidecar_" & $subnet_id & "/ssz_snappy" -# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/deneb/validator.md#sidecar -func compute_subnet_for_blob_sidecar*(blob_index: BlobIndex): BlobId = - BlobId(blob_index mod BLOB_SIDECAR_SUBNET_COUNT) +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.3/specs/deneb/validator.md#sidecar +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/electra/validator.md#sidecar +func compute_subnet_for_blob_sidecar*( + cfg: RuntimeConfig, slot: Slot, blob_index: BlobIndex): BlobId = + let subnetCount = + if slot >= cfg.ELECTRA_FORK_EPOCH.start_slot: + cfg.BLOB_SIDECAR_SUBNET_COUNT_ELECTRA + else: + cfg.BLOB_SIDECAR_SUBNET_COUNT + BlobId(blob_index mod subnetCount) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/_features/eip7594/p2p-interface.md#compute_subnet_for_data_column_sidecar +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/p2p-interface.md#compute_subnet_for_data_column_sidecar func compute_subnet_for_data_column_sidecar*(column_index: ColumnIndex): uint64 = uint64(column_index mod DATA_COLUMN_SIDECAR_SUBNET_COUNT) @@ -159,7 +167,8 @@ func getDiscoveryForkID*(cfg: RuntimeConfig, type GossipState* = set[ConsensusFork] func getTargetGossipState*( epoch, ALTAIR_FORK_EPOCH, BELLATRIX_FORK_EPOCH, CAPELLA_FORK_EPOCH, - DENEB_FORK_EPOCH: Epoch, ELECTRA_FORK_EPOCH: Epoch, isBehind: bool): + DENEB_FORK_EPOCH, ELECTRA_FORK_EPOCH, FULU_FORK_EPOCH: Epoch, + isBehind: bool): GossipState = if isBehind: return {} @@ -168,6 +177,7 @@ func getTargetGossipState*( doAssert CAPELLA_FORK_EPOCH >= BELLATRIX_FORK_EPOCH doAssert DENEB_FORK_EPOCH >= CAPELLA_FORK_EPOCH doAssert ELECTRA_FORK_EPOCH >= DENEB_FORK_EPOCH + doAssert FULU_FORK_EPOCH >= ELECTRA_FORK_EPOCH # https://github.com/ethereum/consensus-specs/issues/2902 # Don't care whether ALTAIR_FORK_EPOCH == BELLATRIX_FORK_EPOCH or @@ -195,13 +205,15 @@ func getTargetGossipState*( maybeIncludeFork( ConsensusFork.Deneb, DENEB_FORK_EPOCH, ELECTRA_FORK_EPOCH) maybeIncludeFork( - ConsensusFork.Electra, ELECTRA_FORK_EPOCH, FAR_FUTURE_EPOCH) + ConsensusFork.Electra, ELECTRA_FORK_EPOCH, FULU_FORK_EPOCH) + maybeIncludeFork( + ConsensusFork.Fulu, FULU_FORK_EPOCH, FAR_FUTURE_EPOCH) doAssert len(targetForks) <= 2 targetForks func nearSyncCommitteePeriod*(epoch: Epoch): Opt[uint64] = - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/validator.md#sync-committee-subnet-stability + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/altair/validator.md#sync-committee-subnet-stability if epoch.is_sync_committee_period(): return Opt.some 0'u64 let epochsBefore = @@ -220,13 +232,24 @@ func getSyncSubnets*( if not nodeHasPubkey(pubkey): continue - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/validator.md#broadcast-sync-committee-message + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/altair/validator.md#broadcast-sync-committee-message # The first quarter of the pubkeys map to subnet 0, the second quarter to # subnet 1, the third quarter to subnet 2 and the final quarter to subnet # 3. res.setBit(i div (SYNC_COMMITTEE_SIZE div SYNC_COMMITTEE_SUBNET_COUNT)) res -iterator blobSidecarTopics*(forkDigest: ForkDigest): string = - for subnet_id in BlobId: +iterator blobSidecarTopics*( + forkDigest: ForkDigest, subnetCount: uint64): string = + for subnet_id in 0.BlobId ..< subnetCount.BlobId: yield getBlobSidecarTopic(forkDigest, subnet_id) + +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/p2p-interface.md#data_column_sidecar_subnet_id +func getDataColumnSidecarTopic*(forkDigest: ForkDigest, + subnet_id: uint64): string = + eth2Prefix(forkDigest) & "data_column_sidecar_" & $subnet_id & "/ssz_snappy" + +iterator dataColumnSidecarTopics*(forkDigest: ForkDigest, + targetSubnetCount: uint64): string = + for subnet_id in 0'u64..9 + MAX_SUPPORTED_REQUEST_BLOB_SIDECARS*: uint64 = 1152 + type Version* = distinct array[4, byte] Eth1Address* = web3types.Address RuntimeConfig* = object - ## https://github.com/ethereum/consensus-specs/tree/v1.4.0-beta.4/configs + ## https://github.com/ethereum/consensus-specs/tree/v1.5.0-beta.2/configs PRESET_BASE*: string CONFIG_NAME*: string @@ -79,8 +83,6 @@ type MIN_PER_EPOCH_CHURN_LIMIT*: uint64 CHURN_LIMIT_QUOTIENT*: uint64 MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT*: uint64 - MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA*: uint64 - MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT*: uint64 # Fork choice # TODO PROPOSER_SCORE_BOOST*: uint64 @@ -94,11 +96,10 @@ type DEPOSIT_CONTRACT_ADDRESS*: Eth1Address # Networking - # TODO GOSSIP_MAX_SIZE*: uint64 + # TODO MAX_PAYLOAD_SIZE*: uint64 # TODO MAX_REQUEST_BLOCKS*: uint64 # TODO EPOCHS_PER_SUBNET_SUBSCRIPTION*: uint64 MIN_EPOCHS_FOR_BLOCK_REQUESTS*: uint64 - # TODO MAX_CHUNK_SIZE*: uint64 # TODO TTFB_TIMEOUT*: uint64 # TODO RESP_TIMEOUT*: uint64 # TODO ATTESTATION_PROPAGATION_SLOT_RANGE*: uint64 @@ -112,9 +113,29 @@ type # Deneb # TODO MAX_REQUEST_BLOCKS_DENEB*: uint64 - # TODO MAX_REQUEST_BLOB_SIDECARS*: uint64 MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS*: uint64 - # TODO BLOB_SIDECAR_SUBNET_COUNT*: uint64 + BLOB_SIDECAR_SUBNET_COUNT*: uint64 + MAX_BLOBS_PER_BLOCK*: uint64 + MAX_REQUEST_BLOB_SIDECARS*: uint64 + + # Electra + MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA*: uint64 + MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT*: uint64 + BLOB_SIDECAR_SUBNET_COUNT_ELECTRA*: uint64 + MAX_BLOBS_PER_BLOCK_ELECTRA*: uint64 + MAX_REQUEST_BLOB_SIDECARS_ELECTRA*: uint64 + + # Fulu + # TODO NUMBER_OF_COLUMNS*: uint64 + # TODO NUMBER_OF_CUSTODY_GROUPS*: uint64 + # TODO DATA_COLUMN_SIDECAR_SUBNET_COUNT*: uint64 + # TODO MAX_REQUEST_DATA_COLUMN_SIDECARS*: uint64 + # TODO SAMPLES_PER_SLOT*: uint64 + # TODO CUSTODY_REQUIREMENT*: uint64 + # TODO VALIDATOR_CUSTODY_REQUIREMENT*: uint64 + # TODO BALANCE_PER_ADDITIONAL_CUSTODY_GROUP*: uint64 + # TODO MAX_BLOBS_PER_BLOCK_FULU*: uint64 + # TODO MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS*: uint64 PresetFile* = object values*: Table[string, string] @@ -157,6 +178,7 @@ when const_preset == "mainnet": # * 'mainnet' - there can be only one # * 'sepolia' - testnet # * 'holesky' - testnet + # * 'hoodi' - testnet # Must match the regex: [a-z0-9\-] CONFIG_NAME: "", @@ -233,10 +255,6 @@ when const_preset == "mainnet": CHURN_LIMIT_QUOTIENT: 65536, # [New in Deneb:EIP7514] 2**3 (= 8) MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 8, - # [New in Electra:EIP7251] 2**7 * 10**9 (= 128,000,000,000) - MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA: 128000000000'u64, - # [New in Electra:EIP7251] 2**8 * 10**9 (= 256,000,000,000) - MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT: 256000000000'u64, # Deposit contract # --------------------------------------------------------------- @@ -248,15 +266,13 @@ when const_preset == "mainnet": # Networking # --------------------------------------------------------------- # `10 * 2**20` (= 10485760, 10 MiB) - # TODO GOSSIP_MAX_SIZE: 10485760, + # TODO MAX_PAYLOAD_SIZE: 10485760, # `2**10` (= 1024) # TODO MAX_REQUEST_BLOCKS: 1024, # `2**8` (= 256) # TODO EPOCHS_PER_SUBNET_SUBSCRIPTION: 256, # `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months) MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024, - # `10 * 2**20` (=10485760, 10 MiB) - # TODO MAX_CHUNK_SIZE: 10485760, # 5s # TODO TTFB_TIMEOUT: 5, # 10s @@ -277,12 +293,38 @@ when const_preset == "mainnet": # Deneb # `2**7` (=128) # TODO MAX_REQUEST_BLOCKS_DENEB: 128, - # MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK - # TODO MAX_REQUEST_BLOB_SIDECARS: 768, # `2**12` (= 4096 epochs, ~18 days) MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096, # `6` - # TODO BLOB_SIDECAR_SUBNET_COUNT: 6, + BLOB_SIDECAR_SUBNET_COUNT: 6, + # `uint64(6)` + MAX_BLOBS_PER_BLOCK: 6, + # MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK + MAX_REQUEST_BLOB_SIDECARS: 768, + + # Electra + # 2**7 * 10**9 (= 128,000,000,000) + MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA: 128000000000'u64, + # 2**8 * 10**9 (= 256,000,000,000) + MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT: 256000000000'u64, + # `9` + BLOB_SIDECAR_SUBNET_COUNT_ELECTRA: 9, + # `uint64(9)` + MAX_BLOBS_PER_BLOCK_ELECTRA: 9, + # MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK_ELECTRA + MAX_REQUEST_BLOB_SIDECARS_ELECTRA: 1152, + + # Fulu + # TODO NUMBER_OF_COLUMNS: 128, + # TODO NUMBER_OF_CUSTODY_GROUPS: 128, + # TODO DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128, + # TODO MAX_REQUEST_DATA_COLUMN_SIDECARS: 16384, + # TODO SAMPLES_PER_SLOT: 8, + # TODO CUSTODY_REQUIREMENT: 4, + # TODO VALIDATOR_CUSTODY_REQUIREMENT: 8, + # TODO BALANCE_PER_ADDITIONAL_CUSTODY_GROUP: 32000000000, + # TODO MAX_BLOBS_PER_BLOCK_FULU: 12, + # TODO MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS: 4096 ) elif const_preset == "gnosis": @@ -299,16 +341,12 @@ elif const_preset == "gnosis": # such as `CONFIG_NAME`, `TERMINAL_TOTAL_DIFFICULTY`, `*_FORK_EPOCH`, etc # which must be effectively overriden in all network (including mainnet). const defaultRuntimeConfig* = RuntimeConfig( - # Mainnet config - - # Extends the mainnet preset PRESET_BASE: "gnosis", # Free-form short name of the network that this configuration applies to - known # canonical network names include: - # * 'mainnet' - there can be only one - # * 'sepolia' - testnet - # * 'holesky' - testnet + # * 'gnosis' - there can be only one + # * 'chiado' - testnet # Must match the regex: [a-z0-9\-] CONFIG_NAME: "", @@ -359,8 +397,8 @@ elif const_preset == "gnosis": # Time parameters # --------------------------------------------------------------- - # 12 seconds - # TODO SECONDS_PER_SLOT: 12, + # 5 seconds + # TODO SECONDS_PER_SLOT: 5, # 14 (estimate from Eth1 mainnet) SECONDS_PER_ETH1_BLOCK: 5, # 2**8 (= 256) epochs ~27 hours @@ -385,10 +423,6 @@ elif const_preset == "gnosis": CHURN_LIMIT_QUOTIENT: 4096, # [New in Deneb:EIP7514] 2**3 (= 8) MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 8, - # [New in Electra:EIP7251] 2**7 * 10**9 (= 128,000,000,000) (copied from EF mainnet) - MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA: 128000000000'u64, - # [New in Electra:EIP7251] 2**8 * 10**9 (= 256,000,000,000) (copied from EF mainnet) - MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT: 256000000000'u64, # Deposit contract # --------------------------------------------------------------- @@ -400,15 +434,13 @@ elif const_preset == "gnosis": # Networking # --------------------------------------------------------------- # `10 * 2**20` (= 10485760, 10 MiB) - # TODO GOSSIP_MAX_SIZE: 10485760, + # TODO MAX_PAYLOAD_SIZE: 10485760, # `2**10` (= 1024) # TODO MAX_REQUEST_BLOCKS: 1024, # `2**8` (= 256) # TODO EPOCHS_PER_SUBNET_SUBSCRIPTION: 256, # `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months) MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024, - # `10 * 2**20` (=10485760, 10 MiB) - # TODO MAX_CHUNK_SIZE: 10485760, # 5s # TODO TTFB_TIMEOUT: 5, # 10s @@ -429,12 +461,38 @@ elif const_preset == "gnosis": # Deneb # `2**7` (=128) # TODO MAX_REQUEST_BLOCKS_DENEB: 128, - # MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK - # TODO MAX_REQUEST_BLOB_SIDECARS: 768, # `2**12` (= 4096 epochs, ~18 days) MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 16384, # `6` - # TODO BLOB_SIDECAR_SUBNET_COUNT: 6, + BLOB_SIDECAR_SUBNET_COUNT: 6, + # `uint64(2)` + MAX_BLOBS_PER_BLOCK: 2, + # MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK + MAX_REQUEST_BLOB_SIDECARS: 768, + + # Electra + # 2**7 * 10**9 (= 128,000,000,000) + MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA: 128000000000'u64, + # 2**8 * 10**9 (= 256,000,000,000) + MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT: 256000000000'u64, + # `2` + BLOB_SIDECAR_SUBNET_COUNT_ELECTRA: 2, + # `uint64(2)` + MAX_BLOBS_PER_BLOCK_ELECTRA: 2, + # MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK_ELECTRA + MAX_REQUEST_BLOB_SIDECARS_ELECTRA: 256, + + # Fulu + # TODO NUMBER_OF_COLUMNS: 128, + # TODO NUMBER_OF_CUSTODY_GROUPS: 128, + # TODO DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128, + # TODO MAX_REQUEST_DATA_COLUMN_SIDECARS: 16384, + # TODO SAMPLES_PER_SLOT: 8, + # TODO CUSTODY_REQUIREMENT: 4, + # TODO VALIDATOR_CUSTODY_REQUIREMENT: 8, + # TODO BALANCE_PER_ADDITIONAL_CUSTODY_GROUP: 32000000000, + # TODO MAX_BLOBS_PER_BLOCK_FULU: 12, + # TODO MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS: 4096 ) elif const_preset == "minimal": @@ -452,9 +510,7 @@ elif const_preset == "minimal": # Free-form short name of the network that this configuration applies to - known # canonical network names include: - # * 'mainnet' - there can be only one - # * 'sepolia' - testnet - # * 'holesky' - testnet + # * 'minimal' - spec-testing # Must match the regex: [a-z0-9\-] CONFIG_NAME: "minimal", @@ -532,10 +588,6 @@ elif const_preset == "minimal": CHURN_LIMIT_QUOTIENT: 32, # [New in Deneb:EIP7514] [customized] MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 4, - # [New in Electra:EIP7251] 2**6 * 10**9 (= 64,000,000,000) - MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA: 64000000000'u64, - # [New in Electra:EIP7251] 2**7 * 10**9 (= 128,000,000,000) - MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT: 128000000000'u64, # Deposit contract @@ -549,15 +601,13 @@ elif const_preset == "minimal": # Networking # --------------------------------------------------------------- # `10 * 2**20` (= 10485760, 10 MiB) - # TODO GOSSIP_MAX_SIZE: 10485760, + # TODO MAX_PAYLOAD_SIZE: 10485760, # `2**10` (= 1024) # TODO MAX_REQUEST_BLOCKS: 1024, # `2**8` (= 256) # TODO EPOCHS_PER_SUBNET_SUBSCRIPTION: 256, # [customized] `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 272) MIN_EPOCHS_FOR_BLOCK_REQUESTS: 272, - # `10 * 2**20` (=10485760, 10 MiB) - # TODO MAX_CHUNK_SIZE: 10485760, # 5s # TODO TTFB_TIMEOUT: 5, # 10s @@ -578,12 +628,38 @@ elif const_preset == "minimal": # Deneb # `2**7` (=128) # TODO MAX_REQUEST_BLOCKS_DENEB: 128, - # MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK - # TODO MAX_REQUEST_BLOB_SIDECARS: 768, # `2**12` (= 4096 epochs, ~18 days) MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096, # `6` - # TODO BLOB_SIDECAR_SUBNET_COUNT: 6, + BLOB_SIDECAR_SUBNET_COUNT: 6, + # `uint64(6)` + MAX_BLOBS_PER_BLOCK: 6, + # MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK + MAX_REQUEST_BLOB_SIDECARS: 768, + + # Electra + # [customized] 2**6 * 10**9 (= 64,000,000,000) + MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA: 64000000000'u64, + # [customized] 2**7 * 10**9 (= 128,000,000,000) + MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT: 128000000000'u64, + # `9` + BLOB_SIDECAR_SUBNET_COUNT_ELECTRA: 9, + # `uint64(9)` + MAX_BLOBS_PER_BLOCK_ELECTRA: 9, + # MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK_ELECTRA + MAX_REQUEST_BLOB_SIDECARS_ELECTRA: 1152 + + # Fulu + # TODO NUMBER_OF_COLUMNS: 128, + # TODO NUMBER_OF_CUSTODY_GROUPS: 128, + # TODO DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128, + # TODO MAX_REQUEST_DATA_COLUMN_SIDECARS: 16384, + # TODO SAMPLES_PER_SLOT: 8, + # TODO CUSTODY_REQUIREMENT: 4, + # TODO VALIDATOR_CUSTODY_REQUIREMENT: 8, + # TODO BALANCE_PER_ADDITIONAL_CUSTODY_GROUP: 32000000000, + # TODO MAX_BLOBS_PER_BLOCK_FULU: 12, + # TODO MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS: 4096 ) else: @@ -775,10 +851,11 @@ proc readRuntimeConfig*( checkCompatibility DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF checkCompatibility DOMAIN_CONTRIBUTION_AND_PROOF - checkCompatibility GOSSIP_MAX_SIZE + checkCompatibility MAX_PAYLOAD_SIZE + checkCompatibility MAX_PAYLOAD_SIZE, "GOSSIP_MAX_SIZE" + checkCompatibility MAX_PAYLOAD_SIZE, "MAX_CHUNK_SIZE" checkCompatibility MAX_REQUEST_BLOCKS checkCompatibility EPOCHS_PER_SUBNET_SUBSCRIPTION - checkCompatibility MAX_CHUNK_SIZE checkCompatibility TTFB_TIMEOUT checkCompatibility RESP_TIMEOUT checkCompatibility ATTESTATION_PROPAGATION_SLOT_RANGE @@ -792,11 +869,16 @@ proc readRuntimeConfig*( checkCompatibility ATTESTATION_SUBNET_PREFIX_BITS checkCompatibility MAX_REQUEST_BLOCKS_DENEB - checkCompatibility MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK, - "MAX_REQUEST_BLOB_SIDECARS" - checkCompatibility BLOB_SIDECAR_SUBNET_COUNT - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/fork-choice.md#configuration + for suffix in ["", "_ELECTRA"]: + checkCompatibility MAX_SUPPORTED_BLOB_SIDECAR_SUBNET_COUNT, + "BLOB_SIDECAR_SUBNET_COUNT" & suffix, `<=` + checkCompatibility MAX_SUPPORTED_BLOBS_PER_BLOCK, + "MAX_BLOBS_PER_BLOCK" & suffix, `<=` + checkCompatibility MAX_SUPPORTED_REQUEST_BLOB_SIDECARS, + "MAX_REQUEST_BLOB_SIDECARS" & suffix, `<=` + + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/fork-choice.md#configuration # Isn't being used as a preset in the usual way: at any time, there's one correct value checkCompatibility PROPOSER_SCORE_BOOST checkCompatibility REORG_HEAD_WEIGHT_THRESHOLD @@ -818,6 +900,12 @@ proc readRuntimeConfig*( # Requires initialized `cfg` checkCompatibility cfg.safeMinEpochsForBlockRequests(), "MIN_EPOCHS_FOR_BLOCK_REQUESTS", `>=` + checkCompatibility MAX_REQUEST_BLOCKS_DENEB * cfg.MAX_BLOBS_PER_BLOCK, + "MAX_REQUEST_BLOB_SIDECARS" + checkCompatibility cfg.MAX_BLOBS_PER_BLOCK, + "MAX_BLOBS_PER_BLOCK_ELECTRA", `>=` + checkCompatibility MAX_REQUEST_BLOCKS_DENEB * cfg.MAX_BLOBS_PER_BLOCK_ELECTRA, + "MAX_REQUEST_BLOB_SIDECARS_ELECTRA" var unknowns: seq[string] for name in values.keys: diff --git a/beacon_chain/spec/presets/gnosis/altair_preset.nim b/beacon_chain/spec/presets/gnosis/altair_preset.nim index 7190e7d5c6..41cc0ab643 100644 --- a/beacon_chain/spec/presets/gnosis/altair_preset.nim +++ b/beacon_chain/spec/presets/gnosis/altair_preset.nim @@ -1,10 +1,12 @@ # beacon_chain -# Copyright (c) 2023 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. +{.push raises: [].} + # Gnosis preset - Altair # https://github.com/gnosischain/specs/blob/1648fc86cef7bc148d74cb21921d2d12ca9442ac/consensus/preset/gnosis/altair.yaml const @@ -22,7 +24,7 @@ const # --------------------------------------------------------------- # 2**9 (= 512) SYNC_COMMITTEE_SIZE* = 512 - # 2**8 (= 256) + # 2**9 (= 512) EPOCHS_PER_SYNC_COMMITTEE_PERIOD* {.intdefine.}: uint64 = 512 diff --git a/beacon_chain/spec/presets/gnosis/deneb_preset.nim b/beacon_chain/spec/presets/gnosis/deneb_preset.nim index d32668081a..34f0c93128 100644 --- a/beacon_chain/spec/presets/gnosis/deneb_preset.nim +++ b/beacon_chain/spec/presets/gnosis/deneb_preset.nim @@ -1,18 +1,18 @@ # beacon_chain -# Copyright (c) 2023 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. +{.push raises: [].} + # Gnosis preset - Deneb -# https://github.com/gnosischain/specs/blob/1648fc86cef7bc148d74cb21921d2d12ca9442ac/consensus/preset/gnosis/deneb.yaml +# https://github.com/gnosischain/specs/blob/31f87ac73d271762ac35b3649e7639d00c73c66d/consensus/preset/gnosis/deneb.yaml const # `uint64(4096)` FIELD_ELEMENTS_PER_BLOB*: uint64 = 4096 # `uint64(2**12)` (= 4096) MAX_BLOB_COMMITMENTS_PER_BLOCK*: uint64 = 4096 - # `uint64(6)` - MAX_BLOBS_PER_BLOCK*: uint64 = 6 # `floorlog2(get_generalized_index(BeaconBlockBody, 'blob_kzg_commitments')) + 1 + ceillog2(MAX_BLOB_COMMITMENTS_PER_BLOCK)` = 4 + 1 + 12 = 17 KZG_COMMITMENT_INCLUSION_PROOF_DEPTH* = 17 diff --git a/beacon_chain/spec/presets/gnosis/electra_preset.nim b/beacon_chain/spec/presets/gnosis/electra_preset.nim index d8bb93de55..a7b6488d46 100644 --- a/beacon_chain/spec/presets/gnosis/electra_preset.nim +++ b/beacon_chain/spec/presets/gnosis/electra_preset.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2024 Status Research & Development GmbH +# Copyright (c) 2024-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -8,7 +8,7 @@ {.push raises: [].} # Gnosis preset - Electra (Gnosis version not avilable yet; EF mainnet for now) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/presets/mainnet/electra.yaml +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/presets/mainnet/electra.yaml const # Gwei values # --------------------------------------------------------------- @@ -39,8 +39,8 @@ const MAX_ATTESTER_SLASHINGS_ELECTRA*: uint64 = 1 # `uint64(2**3)` (= 8) MAX_ATTESTATIONS_ELECTRA*: uint64 = 8 - # `uint64(2**0)` (= 1) - MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD*: uint64 = 1 + # `uint64(2**1)` (= 2) + MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD*: uint64 = 2 # Execution # --------------------------------------------------------------- @@ -57,4 +57,4 @@ const # Pending deposits processing # --------------------------------------------------------------- # 2**4 ( = 4) pending deposits - MAX_PENDING_DEPOSITS_PER_EPOCH* = 16 + MAX_PENDING_DEPOSITS_PER_EPOCH* = 16 \ No newline at end of file diff --git a/beacon_chain/spec/presets/mainnet/altair_preset.nim b/beacon_chain/spec/presets/mainnet/altair_preset.nim index 1863501551..0cea68e743 100644 --- a/beacon_chain/spec/presets/mainnet/altair_preset.nim +++ b/beacon_chain/spec/presets/mainnet/altair_preset.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2021-2024 Status Research & Development GmbH +# Copyright (c) 2021-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -8,7 +8,7 @@ {.push raises: [].} # Mainnet preset - Altair -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/presets/mainnet/altair.yaml +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/presets/mainnet/altair.yaml const # Updated penalty values # --------------------------------------------------------------- diff --git a/beacon_chain/spec/presets/mainnet/bellatrix_preset.nim b/beacon_chain/spec/presets/mainnet/bellatrix_preset.nim index a4403d39b0..a5706a0301 100644 --- a/beacon_chain/spec/presets/mainnet/bellatrix_preset.nim +++ b/beacon_chain/spec/presets/mainnet/bellatrix_preset.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2022-2024 Status Research & Development GmbH +# Copyright (c) 2022-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -8,7 +8,7 @@ {.push raises: [].} # Mainnet preset - Bellatrix -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/presets/mainnet/bellatrix.yaml +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/presets/mainnet/bellatrix.yaml const # Updated penalty values # --------------------------------------------------------------- diff --git a/beacon_chain/spec/presets/mainnet/capella_preset.nim b/beacon_chain/spec/presets/mainnet/capella_preset.nim index b72c5dcf82..bcba90124a 100644 --- a/beacon_chain/spec/presets/mainnet/capella_preset.nim +++ b/beacon_chain/spec/presets/mainnet/capella_preset.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2022-2024 Status Research & Development GmbH +# Copyright (c) 2022-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -8,7 +8,7 @@ {.push raises: [].} # Mainnet preset - Capella -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/presets/mainnet/capella.yaml +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/presets/mainnet/capella.yaml const # Max operations per block # --------------------------------------------------------------- diff --git a/beacon_chain/spec/presets/mainnet/deneb_preset.nim b/beacon_chain/spec/presets/mainnet/deneb_preset.nim index cd3f91edba..9e0bb1089e 100644 --- a/beacon_chain/spec/presets/mainnet/deneb_preset.nim +++ b/beacon_chain/spec/presets/mainnet/deneb_preset.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -8,13 +8,11 @@ {.push raises: [].} # Mainnet preset - Deneb -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/mainnet/deneb.yaml +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/presets/mainnet/deneb.yaml const # `uint64(4096)` FIELD_ELEMENTS_PER_BLOB*: uint64 = 4096 # `uint64(2**12)` (= 4096) MAX_BLOB_COMMITMENTS_PER_BLOCK*: uint64 = 4096 - # `uint64(6)` - MAX_BLOBS_PER_BLOCK*: uint64 = 6 # `floorlog2(get_generalized_index(BeaconBlockBody, 'blob_kzg_commitments')) + 1 + ceillog2(MAX_BLOB_COMMITMENTS_PER_BLOCK)` = 4 + 1 + 12 = 17 KZG_COMMITMENT_INCLUSION_PROOF_DEPTH* = 17 diff --git a/beacon_chain/spec/presets/mainnet/electra_preset.nim b/beacon_chain/spec/presets/mainnet/electra_preset.nim index d0c8d35770..9194784ab0 100644 --- a/beacon_chain/spec/presets/mainnet/electra_preset.nim +++ b/beacon_chain/spec/presets/mainnet/electra_preset.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2024 Status Research & Development GmbH +# Copyright (c) 2024-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -8,7 +8,7 @@ {.push raises: [].} # Electra preset - Electra -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/presets/mainnet/electra.yaml +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/presets/mainnet/electra.yaml const # Gwei values # --------------------------------------------------------------- @@ -39,8 +39,8 @@ const MAX_ATTESTER_SLASHINGS_ELECTRA*: uint64 = 1 # `uint64(2**3)` (= 8) MAX_ATTESTATIONS_ELECTRA*: uint64 = 8 - # `uint64(2**0)` (= 1) - MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD*: uint64 = 1 + # `uint64(2**1)` (= 2) + MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD*: uint64 = 2 # Execution # --------------------------------------------------------------- @@ -57,4 +57,4 @@ const # Pending deposits processing # --------------------------------------------------------------- # 2**4 ( = 4) pending deposits - MAX_PENDING_DEPOSITS_PER_EPOCH* = 16 + MAX_PENDING_DEPOSITS_PER_EPOCH* = 16 \ No newline at end of file diff --git a/beacon_chain/spec/presets/minimal/altair_preset.nim b/beacon_chain/spec/presets/minimal/altair_preset.nim index 05857cdb74..eaff35ed93 100644 --- a/beacon_chain/spec/presets/minimal/altair_preset.nim +++ b/beacon_chain/spec/presets/minimal/altair_preset.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2021-2024 Status Research & Development GmbH +# Copyright (c) 2021-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -8,7 +8,7 @@ {.push raises: [].} # Minimal preset - Altair -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/presets/minimal/altair.yaml +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/presets/minimal/altair.yaml const # Updated penalty values # --------------------------------------------------------------- diff --git a/beacon_chain/spec/presets/minimal/bellatrix_preset.nim b/beacon_chain/spec/presets/minimal/bellatrix_preset.nim index 1956e1fabf..c64d61be0a 100644 --- a/beacon_chain/spec/presets/minimal/bellatrix_preset.nim +++ b/beacon_chain/spec/presets/minimal/bellatrix_preset.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2022-2024 Status Research & Development GmbH +# Copyright (c) 2022-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -8,7 +8,7 @@ {.push raises: [].} # Minimal preset - Bellatrix -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/presets/minimal/bellatrix.yaml +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/presets/minimal/bellatrix.yaml const # Updated penalty values # --------------------------------------------------------------- diff --git a/beacon_chain/spec/presets/minimal/capella_preset.nim b/beacon_chain/spec/presets/minimal/capella_preset.nim index 71a8ba7cfe..d6e567b3cd 100644 --- a/beacon_chain/spec/presets/minimal/capella_preset.nim +++ b/beacon_chain/spec/presets/minimal/capella_preset.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2022-2024 Status Research & Development GmbH +# Copyright (c) 2022-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -8,7 +8,7 @@ {.push raises: [].} # Minimal preset - Capella -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/presets/minimal/capella.yaml +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/presets/minimal/capella.yaml const # Max operations per block # --------------------------------------------------------------- diff --git a/beacon_chain/spec/presets/minimal/deneb_preset.nim b/beacon_chain/spec/presets/minimal/deneb_preset.nim index dfc17a87b3..642eec610e 100644 --- a/beacon_chain/spec/presets/minimal/deneb_preset.nim +++ b/beacon_chain/spec/presets/minimal/deneb_preset.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -8,13 +8,11 @@ {.push raises: [].} # Minimal preset - Deneb -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/minimal/deneb.yaml +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/presets/minimal/deneb.yaml const # `uint64(4096)` FIELD_ELEMENTS_PER_BLOB*: uint64 = 4096 # [customized] - MAX_BLOB_COMMITMENTS_PER_BLOCK*: uint64 = 16 - # `uint64(6)` - MAX_BLOBS_PER_BLOCK*: uint64 = 6 - # [customized] `floorlog2(get_generalized_index(BeaconBlockBody, 'blob_kzg_commitments')) + 1 + ceillog2(MAX_BLOB_COMMITMENTS_PER_BLOCK)` = 4 + 1 + 4 = 9 - KZG_COMMITMENT_INCLUSION_PROOF_DEPTH* = 9 + MAX_BLOB_COMMITMENTS_PER_BLOCK*: uint64 = 32 + # [customized] `floorlog2(get_generalized_index(BeaconBlockBody, 'blob_kzg_commitments')) + 1 + ceillog2(MAX_BLOB_COMMITMENTS_PER_BLOCK)` = 4 + 1 + 5 = 10 + KZG_COMMITMENT_INCLUSION_PROOF_DEPTH* = 10 diff --git a/beacon_chain/spec/presets/minimal/electra_preset.nim b/beacon_chain/spec/presets/minimal/electra_preset.nim index 0278bade98..9d87342965 100644 --- a/beacon_chain/spec/presets/minimal/electra_preset.nim +++ b/beacon_chain/spec/presets/minimal/electra_preset.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2024 Status Research & Development GmbH +# Copyright (c) 2024-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -39,8 +39,8 @@ const MAX_ATTESTER_SLASHINGS_ELECTRA*: uint64 = 1 # `uint64(2**3)` (= 8) MAX_ATTESTATIONS_ELECTRA*: uint64 = 8 - # `uint64(2**0)` (= 1) - MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD*: uint64 = 1 + # `uint64(2**1)` (= 2) + MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD*: uint64 = 2 # Execution # --------------------------------------------------------------- @@ -57,4 +57,4 @@ const # Pending deposits processing # --------------------------------------------------------------- # 2**4 ( = 4) pending deposits - MAX_PENDING_DEPOSITS_PER_EPOCH* = 16 + MAX_PENDING_DEPOSITS_PER_EPOCH* = 16 \ No newline at end of file diff --git a/beacon_chain/spec/signatures.nim b/beacon_chain/spec/signatures.nim index 9197912b14..726c1b42ee 100644 --- a/beacon_chain/spec/signatures.nim +++ b/beacon_chain/spec/signatures.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -59,7 +59,7 @@ func compute_epoch_signing_root*( let domain = get_domain(fork, DOMAIN_RANDAO, epoch, genesis_validators_root) compute_signing_root(epoch, domain) -# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/validator.md#randao-reveal +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/phase0/validator.md#randao-reveal func get_epoch_signature*( fork: Fork, genesis_validators_root: Eth2Digest, epoch: Epoch, privkey: ValidatorPrivKey): CookedSig = @@ -145,7 +145,7 @@ func compute_attestation_signing_root*( fork, DOMAIN_BEACON_ATTESTER, epoch, genesis_validators_root) compute_signing_root(attestation_data, domain) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/validator.md#aggregate-signature +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/phase0/validator.md#aggregate-signature func get_attestation_signature*( fork: Fork, genesis_validators_root: Eth2Digest, attestation_data: AttestationData, @@ -271,7 +271,7 @@ proc verify_voluntary_exit_signature*( blsVerify(pubkey, signing_root.data, signature) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/validator.md#prepare-sync-committee-message +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/altair/validator.md#prepare-sync-committee-message func compute_sync_committee_message_signing_root*( fork: Fork, genesis_validators_root: Eth2Digest, slot: Slot, beacon_block_root: Eth2Digest): Eth2Digest = @@ -306,7 +306,7 @@ proc verify_sync_committee_signature*( blsFastAggregateVerify(pubkeys, signing_root.data, signature) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/validator.md#aggregation-selection +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/altair/validator.md#aggregation-selection func compute_sync_committee_selection_proof_signing_root*( fork: Fork, genesis_validators_root: Eth2Digest, slot: Slot, subcommittee_index: SyncSubcommitteeIndex): Eth2Digest = @@ -337,7 +337,7 @@ proc verify_sync_committee_selection_proof*( blsVerify(pubkey, signing_root.data, signature) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/validator.md#signature +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/altair/validator.md#signature func compute_contribution_and_proof_signing_root*( fork: Fork, genesis_validators_root: Eth2Digest, msg: ContributionAndProof): Eth2Digest = @@ -355,7 +355,7 @@ proc get_contribution_and_proof_signature*( blsSign(privkey, signing_root.data) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/validator.md#aggregation-selection +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/altair/validator.md#aggregation-selection func is_sync_committee_aggregator*(signature: ValidatorSig): bool = let signatureDigest = eth2digest(signature.blob) @@ -391,7 +391,7 @@ proc get_builder_signature*( proc verify_builder_signature*( fork: Fork, msg: deneb_mev.BuilderBid | electra_mev.BuilderBid | - fulu_mev.BuilderBid, + fulu_mev.BuilderBid | ValidatorRegistrationV1, pubkey: ValidatorPubKey | CookedPubKey, signature: SomeSig): bool = let signing_root = compute_builder_signing_root(fork, msg) blsVerify(pubkey, signing_root.data, signature) diff --git a/beacon_chain/spec/signatures_batch.nim b/beacon_chain/spec/signatures_batch.nim index be31fb29d1..f62b9c0180 100644 --- a/beacon_chain/spec/signatures_batch.nim +++ b/beacon_chain/spec/signatures_batch.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -26,18 +26,16 @@ import export results, rand, altair, phase0, taskpools, signatures type - TaskPoolPtr* = Taskpool - BatchVerifier* = object sigVerifCache*: BatchedBLSVerifierCache ## A cache for batch BLS signature verification contexts rng*: ref HmacDrbgContext ## A reference to the Nimbus application-wide RNG - taskpool*: TaskPoolPtr + taskpool*: Taskpool proc init*( T: type BatchVerifier, rng: ref HmacDrbgContext, - taskpool: TaskPoolPtr): BatchVerifier = + taskpool: Taskpool): BatchVerifier = BatchVerifier( sigVerifCache: BatchedBLSVerifierCache.init(taskpool), rng: rng, @@ -46,7 +44,7 @@ proc init*( proc new*( T: type BatchVerifier, rng: ref HmacDrbgContext, - taskpool: TaskPoolPtr): ref BatchVerifier = + taskpool: Taskpool): ref BatchVerifier = (ref BatchVerifier)( sigVerifCache: BatchedBLSVerifierCache.init(taskpool), rng: rng, @@ -109,7 +107,7 @@ func aggregateAttesters( # Aggregation spec requires non-empty collection # - https://tools.ietf.org/html/draft-irtf-cfrg-bls-signature-04 # Consensus specs require at least one attesting index in attestation - # - https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#is_valid_indexed_attestation + # - https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/phase0/beacon-chain.md#is_valid_indexed_attestation return err("aggregateAttesters: no attesting indices") var attestersAgg{.noinit.}: AggregatePublicKey @@ -164,7 +162,7 @@ func block_signature_set*( # See also: verify_aggregate_and_proof_signature func aggregate_and_proof_signature_set*( fork: Fork, genesis_validators_root: Eth2Digest, - aggregate_and_proof: phase0.AggregateAndProof, + aggregate_and_proof: phase0.AggregateAndProof | electra.AggregateAndProof, pubkey: CookedPubKey, signature: CookedSig): SignatureSet = let signing_root = compute_aggregate_and_proof_signing_root( fork, genesis_validators_root, aggregate_and_proof) diff --git a/beacon_chain/spec/state_transition.nim b/beacon_chain/spec/state_transition.nim index 876379bbd4..9b8e2e14e0 100644 --- a/beacon_chain/spec/state_transition.nim +++ b/beacon_chain/spec/state_transition.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -70,7 +70,7 @@ proc verify_block_signature( ok() -# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#beacon-chain-state-transition-function +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/phase0/beacon-chain.md#beacon-chain-state-transition-function func verifyStateRoot( state: ForkyBeaconState, blck: ForkyBeaconBlock | ForkySigVerifiedBeaconBlock): @@ -382,7 +382,7 @@ func partialBeaconBlock*( _: ExecutionRequests): auto = const consensusFork = typeof(state).kind - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/validator.md#preparing-for-a-beaconblock + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/phase0/validator.md#preparing-for-a-beaconblock var res = consensusFork.BeaconBlock( slot: state.data.slot, proposer_index: proposer_index.uint64, @@ -512,7 +512,7 @@ proc makeBeaconBlockWithRewards*( transactions_root.get when executionPayload is deneb.ExecutionPayloadForSigning: - # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/deneb/beacon-chain.md#beaconblockbody + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/deneb/beacon-chain.md#beaconblockbody forkyState.data.latest_block_header.body_root = hash_tree_root( [hash_tree_root(randao_reveal), hash_tree_root(eth1_data), @@ -535,7 +535,6 @@ proc makeBeaconBlockWithRewards*( forkyState.data.latest_execution_payload_header.transactions_root = transactions_root.get - debugComment "verify (again) that this is what builder API needs" when executionPayload is electra.ExecutionPayloadForSigning: # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#beaconblockbody forkyState.data.latest_block_header.body_root = hash_tree_root( @@ -552,7 +551,8 @@ proc makeBeaconBlockWithRewards*( hash_tree_root(sync_aggregate), execution_payload_root.get, hash_tree_root(validator_changes.bls_to_execution_changes), - hash_tree_root(kzg_commitments.get) + hash_tree_root(kzg_commitments.get), + hash_tree_root(execution_requests) ]) else: raiseAssert "Attempt to use non-Electra payload with post-Deneb state" @@ -577,7 +577,8 @@ proc makeBeaconBlockWithRewards*( hash_tree_root(sync_aggregate), execution_payload_root.get, hash_tree_root(validator_changes.bls_to_execution_changes), - hash_tree_root(kzg_commitments.get) + hash_tree_root(kzg_commitments.get), + hash_tree_root(execution_requests) ]) else: raiseAssert "Attempt to use non-Fulu payload with post-Electra state" diff --git a/beacon_chain/spec/state_transition_block.nim b/beacon_chain/spec/state_transition_block.nim index b8e42ceb0b..5299179c4e 100644 --- a/beacon_chain/spec/state_transition_block.nim +++ b/beacon_chain/spec/state_transition_block.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -9,11 +9,11 @@ # State transition - block processing as described in # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/beacon-chain.md#block-processing -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/beacon-chain.md#block-processing -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/bellatrix/beacon-chain.md#block-processing -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/capella/beacon-chain.md#block-processing +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/altair/beacon-chain.md#block-processing +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.3/specs/bellatrix/beacon-chain.md#block-processing +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/capella/beacon-chain.md#block-processing # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/deneb/beacon-chain.md#block-processing -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.7/specs/electra/beacon-chain.md#block-processing +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/electra/beacon-chain.md#block-processing # # The entry point is `process_block` which is at the bottom of this file. # @@ -82,7 +82,7 @@ func `xor`[T: array](a, b: T): T = for i in 0..= ConsensusFork.Electra: + if voluntary_exit.validator_index >= state.validators.lenu64: + return err("Exit: validator index out of range") + # Only exit validator if it has no pending withdrawals in the queue - debugComment "truncating" if not (get_pending_balance_to_withdraw( state, voluntary_exit.validator_index.ValidatorIndex) == 0.Gwei): return err("Exit: still has pending withdrawals") @@ -556,14 +558,14 @@ func process_withdrawal_request*( # In theory can fail, but failing/early returning here is indistinguishable discard state.pending_partial_withdrawals.add(PendingPartialWithdrawal( - index: index.uint64, + validator_index: index.uint64, amount: to_withdraw, withdrawable_epoch: withdrawable_epoch, )) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.7/specs/electra/beacon-chain.md#new-is_valid_switch_to_compounding_request +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/electra/beacon-chain.md#new-is_valid_switch_to_compounding_request func is_valid_switch_to_compounding_request( - state: electra.BeaconState | fulu.BeaconState, + state: electra.BeaconState | fulu.BeaconState, consolidation_request: ConsolidationRequest, source_validator: Validator): bool = # Switch to compounding requires source and target be equal @@ -592,7 +594,7 @@ func is_valid_switch_to_compounding_request( true -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.7/specs/electra/beacon-chain.md#new-process_consolidation_request +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/electra/beacon-chain.md#new-process_consolidation_request func process_consolidation_request*( cfg: RuntimeConfig, state: var (electra.BeaconState | fulu.BeaconState), bucketSortedValidators: BucketSortedValidators, @@ -647,8 +649,8 @@ func process_consolidation_request*( if not (has_correct_credential and is_correct_source_address): return - # Verify that target has execution withdrawal credentials - if not has_execution_withdrawal_credential(target_validator): + # Verify that target has compounding withdrawal credentials + if not has_compounding_withdrawal_credential(target_validator): return # Verify the source and the target are active @@ -665,6 +667,15 @@ func process_consolidation_request*( if target_validator.exit_epoch != FAR_FUTURE_EPOCH: return + # Verify the source has been active long enough + if current_epoch < + source_validator.activation_epoch + cfg.SHARD_COMMITTEE_PERIOD: + return + + # Verify the source has no pending withdrawals in the queue + if get_pending_balance_to_withdraw(state, source_index) > 0.Gwei: + return + # Initiate source validator exit and append pending consolidation source_validator[].exit_epoch = compute_consolidation_epoch_and_update_churn( cfg, state, source_validator[].effective_balance, cache) @@ -673,10 +684,6 @@ func process_consolidation_request*( discard state.pending_consolidations.add(PendingConsolidation( source_index: source_index.uint64, target_index: target_index.uint64)) - # Churn any target excess active balance of target and raise its max - if has_eth1_withdrawal_credential(target_validator): - switch_to_compounding_validator(state, target_index) - type # https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.5.0#/Rewards/getBlockRewards BlockRewards* = object @@ -792,11 +799,11 @@ func get_participant_reward*(total_active_balance: Gwei): Gwei = WEIGHT_DENOMINATOR div SLOTS_PER_EPOCH max_participant_rewards div SYNC_COMMITTEE_SIZE -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/beacon-chain.md#sync-aggregate-processing +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/altair/beacon-chain.md#sync-aggregate-processing func get_proposer_reward*(participant_reward: Gwei): Gwei = participant_reward * PROPOSER_WEIGHT div (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/beacon-chain.md#sync-aggregate-processing +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/altair/beacon-chain.md#sync-aggregate-processing proc process_sync_aggregate*( state: var (altair.BeaconState | bellatrix.BeaconState | capella.BeaconState | deneb.BeaconState | electra.BeaconState | @@ -845,6 +852,7 @@ proc process_sync_aggregate*( # Apply participant and proposer rewards let indices = get_sync_committee_cache(state, cache).current_sync_committee + var total_proposer_reward: Gwei for i in 0 ..< min( state.current_sync_committee.pubkeys.len, @@ -853,10 +861,11 @@ proc process_sync_aggregate*( if sync_aggregate.sync_committee_bits[i]: increase_balance(state, participant_index, participant_reward) increase_balance(state, proposer_index, proposer_reward) + increase_balance(total_proposer_reward, proposer_reward) else: decrease_balance(state, participant_index, participant_reward) - ok(proposer_reward) + ok(total_proposer_reward) # https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/bellatrix/beacon-chain.md#process_execution_payload proc process_execution_payload*( @@ -950,7 +959,8 @@ type SomeDenebBeaconBlockBody = # https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/deneb/beacon-chain.md#process_execution_payload proc process_execution_payload*( - state: var deneb.BeaconState, body: SomeDenebBeaconBlockBody, + cfg: RuntimeConfig, state: var deneb.BeaconState, + body: SomeDenebBeaconBlockBody, notify_new_payload: deneb.ExecutePayload): Result[void, cstring] = template payload: auto = body.execution_payload @@ -969,7 +979,7 @@ proc process_execution_payload*( return err("process_execution_payload: invalid timestamp") # [New in Deneb] Verify commitments are under limit - if not (lenu64(body.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK): + if not (lenu64(body.blob_kzg_commitments) <= cfg.MAX_BLOBS_PER_BLOCK): return err("process_execution_payload: too many KZG commitments") # Verify the execution payload is valid @@ -1006,7 +1016,8 @@ type SomeElectraBeaconBlockBody = # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#modified-process_execution_payload proc process_execution_payload*( - state: var electra.BeaconState, body: SomeElectraBeaconBlockBody, + cfg: RuntimeConfig, state: var electra.BeaconState, + body: SomeElectraBeaconBlockBody, notify_new_payload: electra.ExecutePayload): Result[void, cstring] = template payload: auto = body.execution_payload @@ -1025,7 +1036,7 @@ proc process_execution_payload*( return err("process_execution_payload: invalid timestamp") # [New in Deneb] Verify commitments are under limit - if not (lenu64(body.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK): + if not (lenu64(body.blob_kzg_commitments) <= cfg.MAX_BLOBS_PER_BLOCK_ELECTRA): return err("process_execution_payload: too many KZG commitments") # Verify the execution payload is valid @@ -1061,7 +1072,8 @@ type SomeFuluBeaconBlockBody = # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#modified-process_execution_payload proc process_execution_payload*( - state: var fulu.BeaconState, body: SomeFuluBeaconBlockBody, + cfg: RuntimeConfig, state: var fulu.BeaconState, + body: SomeFuluBeaconBlockBody, notify_new_payload: fulu.ExecutePayload): Result[void, cstring] = template payload: auto = body.execution_payload @@ -1080,7 +1092,7 @@ proc process_execution_payload*( return err("process_execution_payload: invalid timestamp") # [New in Deneb] Verify commitments are under limit - if not (lenu64(body.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK): + if not (lenu64(body.blob_kzg_commitments) <= cfg.MAX_BLOBS_PER_BLOCK_ELECTRA): return err("process_execution_payload: too many KZG commitments") # Verify the execution payload is valid @@ -1204,7 +1216,7 @@ proc process_block*( ok(? process_operations(cfg, state, blck.body, 0.Gwei, flags, cache)) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/beacon-chain.md#block-processing +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/altair/beacon-chain.md#block-processing # TODO workaround for https://github.com/nim-lang/Nim/issues/18095 # copy of datatypes/altair.nim type SomeAltairBlock = @@ -1320,7 +1332,7 @@ proc process_block*( if is_execution_enabled(state, blck.body): ? process_withdrawals(state, blck.body.execution_payload) ? process_execution_payload( - state, blck.body, + cfg, state, blck.body, func(_: deneb.ExecutionPayload): bool = true) # [Modified in Deneb] ? process_randao(state, blck.body, flags, cache) ? process_eth1_data(state, blck.body) @@ -1355,7 +1367,7 @@ proc process_block*( if is_execution_enabled(state, blck.body): ? process_withdrawals(state, blck.body.execution_payload) ? process_execution_payload( - state, blck.body, + cfg, state, blck.body, func(_: electra.ExecutionPayload): bool = true) ? process_randao(state, blck.body, flags, cache) ? process_eth1_data(state, blck.body) @@ -1388,7 +1400,7 @@ proc process_block*( if is_execution_enabled(state, blck.body): ? process_withdrawals(state, blck.body.execution_payload) ? process_execution_payload( - state, blck.body, + cfg, state, blck.body, func(_: fulu.ExecutionPayload): bool = true) ? process_randao(state, blck.body, flags, cache) ? process_eth1_data(state, blck.body) @@ -1402,4 +1414,4 @@ proc process_block*( operations_rewards.sync_aggregate = ? process_sync_aggregate( state, blck.body.sync_aggregate, total_active_balance, flags, cache) - ok(operations_rewards) + ok(operations_rewards) \ No newline at end of file diff --git a/beacon_chain/spec/state_transition_epoch.nim b/beacon_chain/spec/state_transition_epoch.nim index 20e4ff78b1..a8da3ab30a 100644 --- a/beacon_chain/spec/state_transition_epoch.nim +++ b/beacon_chain/spec/state_transition_epoch.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -10,7 +10,7 @@ # State transition - epoch processing, as described in # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/beacon-chain.md#epoch-processing # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#epoch-processing -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/bellatrix/beacon-chain.md#epoch-processing +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/bellatrix/beacon-chain.md#epoch-processing # https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/capella/beacon-chain.md#epoch-processing # # The entry point is `process_epoch`, which is at the bottom of this file. @@ -155,7 +155,7 @@ func process_attestations*( if v.flags.contains RewardFlags.isPreviousEpochHeadAttester: info.balances.previous_epoch_head_attesters_raw += validator_balance -# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#helpers +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/phase0/beacon-chain.md#helpers # get_eligible_validator_indices func is_eligible_validator*(validator: RewardStatus): bool = validator.flags.contains(RewardFlags.isActiveInPreviousEpoch) or @@ -174,7 +174,7 @@ func is_eligible_validator*(validator: ParticipationInfo): bool = from ./datatypes/deneb import BeaconState -# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#get_unslashed_participating_indices +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/altair/beacon-chain.md#get_unslashed_participating_indices func get_unslashed_participating_balances*( state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState | deneb.BeaconState | electra.BeaconState | fulu.BeaconState): @@ -243,7 +243,7 @@ func is_unslashed_participating_index( has_flag(epoch_participation[].item(validator_index), flag_index) and not state.validators[validator_index].slashed -# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#justification-and-finalization +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/beacon-chain.md#justification-and-finalization type FinalityState = object slot: Slot current_epoch_ancestor_root: Eth2Digest @@ -462,7 +462,7 @@ proc compute_unrealized_finality*( justified: jfRes.current_justified_checkpoint, finalized: jfRes.finalized_checkpoint) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/beacon-chain.md#helpers +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/phase0/beacon-chain.md#helpers func get_base_reward_sqrt*(state: phase0.BeaconState, index: ValidatorIndex, total_balance_sqrt: auto): Gwei = # Spec function recalculates total_balance every time, which creates an @@ -594,7 +594,7 @@ func get_inactivity_penalty_delta*( delta -# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#get_attestation_deltas +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/phase0/beacon-chain.md#get_attestation_deltas func get_attestation_deltas( state: phase0.BeaconState, info: var phase0.EpochInfo) = ## Update rewards with attestation reward/penalty deltas for each validator. @@ -638,7 +638,7 @@ func get_attestation_deltas( info.validators[proposer_index].delta.add( proposer_delta.get()[1]) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/beacon-chain.md#get_base_reward +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/altair/beacon-chain.md#get_base_reward func get_base_reward_increment*( state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState | deneb.BeaconState | electra.BeaconState | fulu.BeaconState, @@ -650,7 +650,7 @@ func get_base_reward_increment*( EFFECTIVE_BALANCE_INCREMENT.Gwei increments * base_reward_per_increment -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/beacon-chain.md#get_flag_index_deltas +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/altair/beacon-chain.md#get_flag_index_deltas func get_flag_index_reward*( state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState | deneb.BeaconState | electra.BeaconState | fulu.BeaconState, @@ -665,19 +665,19 @@ func get_flag_index_reward*( else: 0.Gwei -# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#get_flag_index_deltas +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/altair/beacon-chain.md#get_flag_index_deltas func get_unslashed_participating_increment*( info: altair.EpochInfo | bellatrix.BeaconState, flag_index: TimelyFlag): uint64 = info.balances.previous_epoch[flag_index] div EFFECTIVE_BALANCE_INCREMENT.Gwei -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/beacon-chain.md#get_flag_index_deltas +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/altair/beacon-chain.md#get_flag_index_deltas func get_active_increments*( info: altair.EpochInfo | bellatrix.BeaconState): uint64 = info.balances.current_epoch div EFFECTIVE_BALANCE_INCREMENT.Gwei # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#get_flag_index_deltas -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/beacon-chain.md#modified-get_inactivity_penalty_deltas +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/altair/beacon-chain.md#modified-get_inactivity_penalty_deltas # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/bellatrix/beacon-chain.md#modified-get_inactivity_penalty_deltas # Combines get_flag_index_deltas() and get_inactivity_penalty_deltas() template get_flag_and_inactivity_delta( @@ -843,7 +843,7 @@ func process_rewards_and_penalties*( decrease_balance(balance, v.delta.penalties) state.balances.asSeq()[idx] = balance -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/beacon-chain.md#rewards-and-penalties +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/altair/beacon-chain.md#rewards-and-penalties func process_rewards_and_penalties*( cfg: RuntimeConfig, state: var (altair.BeaconState | bellatrix.BeaconState | @@ -951,7 +951,7 @@ func process_registry_updates*( # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/electra/beacon-chain.md#modified-process_registry_updates func process_registry_updates*( - cfg: RuntimeConfig, state: var (electra.BeaconState | fulu.BeaconState), + cfg: RuntimeConfig, state: var (electra.BeaconState | fulu.BeaconState), cache: var StateCache): Result[void, cstring] = # Process activation eligibility and ejections for index in 0 ..< state.validators.len: @@ -976,8 +976,8 @@ func process_registry_updates*( ok() # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slashings -# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#slashings -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/bellatrix/beacon-chain.md#slashings +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/altair/beacon-chain.md#slashings +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/bellatrix/beacon-chain.md#slashings func get_adjusted_total_slashing_balance*( state: ForkyBeaconState, total_balance: Gwei): Gwei = const multiplier = @@ -995,17 +995,17 @@ func get_adjusted_total_slashing_balance*( {.fatal: "process_slashings: incorrect BeaconState type".} min(sum(state.slashings.data) * multiplier, total_balance) -# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slashings -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/beacon-chain.md#slashings -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/bellatrix/beacon-chain.md#slashings +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/beacon-chain.md#slashings +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/altair/beacon-chain.md#slashings +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/bellatrix/beacon-chain.md#slashings # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.7/specs/electra/beacon-chain.md#modified-process_slashings func slashing_penalty_applies*(validator: Validator, epoch: Epoch): bool = validator.slashed and epoch + EPOCHS_PER_SLASHINGS_VECTOR div 2 == validator.withdrawable_epoch -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/beacon-chain.md#slashings -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/beacon-chain.md#slashings -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/bellatrix/beacon-chain.md#slashings +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/beacon-chain.md#slashings +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/altair/beacon-chain.md#slashings +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/bellatrix/beacon-chain.md#slashings # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.7/specs/electra/beacon-chain.md#modified-process_slashings func get_slashing_penalty*( consensusFork: static ConsensusFork, validator: Validator, @@ -1037,8 +1037,8 @@ func get_slashing_penalty*( static: doAssert false # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slashings -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/beacon-chain.md#slashings -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/bellatrix/beacon-chain.md#slashings +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/altair/beacon-chain.md#slashings +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/bellatrix/beacon-chain.md#slashings # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.7/specs/electra/beacon-chain.md#modified-process_slashings func get_slashing( state: ForkyBeaconState, total_balance: Gwei, vidx: ValidatorIndex): Gwei = @@ -1095,14 +1095,14 @@ func process_effective_balance_updates*(state: var ForkyBeaconState) = if new_effective_balance != effective_balance: state.validators.mitem(vidx).effective_balance = new_effective_balance -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/beacon-chain.md#slashings-balances-updates +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/beacon-chain.md#slashings-balances-updates func process_slashings_reset*(state: var ForkyBeaconState) = let next_epoch = get_current_epoch(state) + 1 # Reset slashings state.slashings[int(next_epoch mod EPOCHS_PER_SLASHINGS_VECTOR)] = 0.Gwei -# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#randao-mixes-updates +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/phase0/beacon-chain.md#randao-mixes-updates func process_randao_mixes_reset*(state: var ForkyBeaconState) = let current_epoch = get_current_epoch(state) @@ -1120,7 +1120,7 @@ func compute_historical_root*(state: var ForkyBeaconState): Eth2Digest = hash_tree_root([ hash_tree_root(state.block_roots), hash_tree_root(state.state_roots)]) -# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#historical-roots-updates +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/beacon-chain.md#historical-roots-updates func process_historical_roots_update*(state: var ForkyBeaconState) = ## Set historical root accumulator let next_epoch = get_current_epoch(state) + 1 @@ -1128,19 +1128,19 @@ func process_historical_roots_update*(state: var ForkyBeaconState) = if next_epoch mod (SLOTS_PER_HISTORICAL_ROOT div SLOTS_PER_EPOCH) == 0: # Equivalent to hash_tree_root(foo: HistoricalBatch), but without using # significant additional stack or heap. - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/beacon-chain.md#historicalbatch + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/beacon-chain.md#historicalbatch # In response to https://github.com/status-im/nimbus-eth2/issues/921 if not state.historical_roots.add state.compute_historical_root(): raiseAssert "no more room for historical roots, so long and thanks for the fish!" -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/beacon-chain.md#participation-records-rotation +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/phase0/beacon-chain.md#participation-records-rotation func process_participation_record_updates*(state: var phase0.BeaconState) = # Rotate current/previous epoch attestations - using swap avoids copying all # elements using a slow genericSeqAssign state.previous_epoch_attestations.clear() swap(state.previous_epoch_attestations, state.current_epoch_attestations) -# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#participation-flags-updates +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/altair/beacon-chain.md#participation-flags-updates func process_participation_flag_updates*( state: var (altair.BeaconState | bellatrix.BeaconState | capella.BeaconState | deneb.BeaconState | @@ -1190,7 +1190,7 @@ template compute_inactivity_update( min(cfg.INACTIVITY_SCORE_RECOVERY_RATE, inactivity_score) inactivity_score -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/beacon-chain.md#inactivity-scores +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/altair/beacon-chain.md#inactivity-scores func process_inactivity_updates*( cfg: RuntimeConfig, state: var (altair.BeaconState | bellatrix.BeaconState | @@ -1243,7 +1243,7 @@ from ".."/validator_bucket_sort import # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.7/specs/electra/beacon-chain.md#new-apply_pending_deposit func apply_pending_deposit( cfg: RuntimeConfig, state: var (electra.BeaconState | fulu.BeaconState), - deposit: PendingDeposit, validator_index: Opt[ValidatorIndex]): + deposit: PendingDeposit, validator_index: Opt[ValidatorIndex]): Result[void, cstring] = ## Applies ``deposit`` to the ``state``. if validator_index.isNone: @@ -1378,11 +1378,9 @@ func process_pending_consolidations*( return err("process_pending_consolidations: target index out of range") # Calculate the consolidated balance - let - max_effective_balance = get_max_effective_balance(source_validator) - source_effective_balance = min( - state.balances.item(pending_consolidation.source_index), - max_effective_balance) + let source_effective_balance = min( + state.balances.item(pending_consolidation.source_index), + source_validator.effective_balance) # Move active balance to target. Excess balance is withdrawable. decrease_balance(state, source_validator_index, source_effective_balance) @@ -1451,7 +1449,7 @@ func init*( deneb.BeaconState | electra.BeaconState | fulu.BeaconState): T = init(result, state) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/beacon-chain.md#epoch-processing +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/altair/beacon-chain.md#epoch-processing proc process_epoch*( cfg: RuntimeConfig, state: var (altair.BeaconState | bellatrix.BeaconState), @@ -1460,7 +1458,7 @@ proc process_epoch*( let epoch = get_current_epoch(state) info.init(state) - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/beacon-chain.md#justification-and-finalization + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/altair/beacon-chain.md#justification-and-finalization # [Modified in Altair] process_justification_and_finalization(state, info.balances, flags) @@ -1476,13 +1474,13 @@ proc process_epoch*( process_inactivity_updates(cfg, state, info) # [New in Altair] - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/beacon-chain.md#rewards-and-penalties + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/altair/beacon-chain.md#rewards-and-penalties process_rewards_and_penalties(cfg, state, info) # [Modified in Altair] - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/beacon-chain.md#registry-updates + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/beacon-chain.md#registry-updates ? process_registry_updates(cfg, state, cache) - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/beacon-chain.md#slashings + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/altair/beacon-chain.md#slashings process_slashings(state, info.balances.current_epoch) # [Modified in Altair] process_eth1_data_reset(state) @@ -1495,7 +1493,7 @@ proc process_epoch*( ok() -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/capella/beacon-chain.md#epoch-processing +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/capella/beacon-chain.md#epoch-processing proc process_epoch*( cfg: RuntimeConfig, state: var (capella.BeaconState | deneb.BeaconState), @@ -1504,7 +1502,7 @@ proc process_epoch*( let epoch = get_current_epoch(state) info.init(state) - # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#justification-and-finalization + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/altair/beacon-chain.md#justification-and-finalization process_justification_and_finalization(state, info.balances, flags) # state.slot hasn't been incremented yet. @@ -1520,13 +1518,13 @@ proc process_epoch*( process_inactivity_updates(cfg, state, info) - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/beacon-chain.md#rewards-and-penalties + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/altair/beacon-chain.md#rewards-and-penalties process_rewards_and_penalties(cfg, state, info) # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#registry-updates ? process_registry_updates(cfg, state, cache) - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/beacon-chain.md#slashings + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/altair/beacon-chain.md#slashings process_slashings(state, info.balances.current_epoch) process_eth1_data_reset(state) @@ -1547,7 +1545,7 @@ proc process_epoch*( let epoch = get_current_epoch(state) info.init(state) - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/beacon-chain.md#justification-and-finalization + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/altair/beacon-chain.md#justification-and-finalization process_justification_and_finalization(state, info.balances, flags) # state.slot hasn't been incremented yet. @@ -1687,4 +1685,4 @@ proc get_next_slot_expected_withdrawals*( # validator_index is defined by an injected symbol within the template get_validator_balance_after_epoch( cfg, state, cache, info, validator_index.ValidatorIndex) - res + res \ No newline at end of file diff --git a/beacon_chain/spec/validator.nim b/beacon_chain/spec/validator.nim index f9f5554a93..1039bcc19a 100644 --- a/beacon_chain/spec/validator.nim +++ b/beacon_chain/spec/validator.nim @@ -1,4 +1,4 @@ -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -158,7 +158,7 @@ func get_shuffled_active_validator_indices*( withState(state): cache.get_shuffled_active_validator_indices(forkyState.data, epoch) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/beacon-chain.md#get_active_validator_indices +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/beacon-chain.md#get_active_validator_indices func count_active_validators*(state: ForkyBeaconState, epoch: Epoch, cache: var StateCache): uint64 = @@ -189,7 +189,7 @@ iterator get_committee_indices*(committee_count_per_slot: uint64): CommitteeInde let committee_index = CommitteeIndex.init(idx).expect("value clamped") yield committee_index -# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#compute_committee +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/phase0/beacon-chain.md#compute_committee func compute_committee_slice*( active_validators, index, count: uint64): Slice[int] = doAssert active_validators <= ValidatorIndex.high.uint64 @@ -349,8 +349,9 @@ func compute_inverted_shuffled_index*( countdown(SHUFFLE_ROUND_COUNT.uint8 - 1, 0'u8, 1) # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#compute_proposer_index -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#updated-compute_proposer_index -template compute_proposer_index(state: ForkyBeaconState, +template compute_proposer_index( + state: phase0.BeaconState | altair.BeaconState | bellatrix.BeaconState | + capella.BeaconState | deneb.BeaconState, indices: openArray[ValidatorIndex], seed: Eth2Digest, unshuffleTransform: untyped): Opt[ValidatorIndex] = ## Return from ``indices`` a random index sampled by effective balance. @@ -388,6 +389,50 @@ template compute_proposer_index(state: ForkyBeaconState, doAssert res.isSome res +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/electra/beacon-chain.md#modified-compute_proposer_index +template compute_proposer_index( + state: electra.BeaconState | fulu.BeaconState, + indices: openArray[ValidatorIndex], seed: Eth2Digest, + unshuffleTransform: untyped): Opt[ValidatorIndex] = + ## Return from ``indices`` a random index sampled by effective balance. + const MAX_RANDOM_VALUE = 65536 - 1 # [Modified in Electra] + + if len(indices) == 0: + Opt.none(ValidatorIndex) + else: + let seq_len {.inject.} = indices.lenu64 + + var + i = 0'u64 + buffer: array[32+8, byte] + rv_buf: array[8, byte] + res: Opt[ValidatorIndex] + buffer[0..31] = seed.data + while true: + buffer[32..39] = uint_to_bytes(i div 16) # [Modified in Electra] + let + shuffled_index {.inject.} = + compute_shuffled_index(i mod seq_len, seq_len, seed) + candidate_index = indices[unshuffleTransform] + random_bytes = eth2digest(buffer).data + offset = (i mod 16) * 2 + effective_balance = state.validators[candidate_index].effective_balance + rv_buf[0 .. 1] = random_bytes.toOpenArray(offset, offset + 1) + let random_value = bytes_to_uint64(rv_buf) + const max_effective_balance = + when typeof(state).kind >= ConsensusFork.Electra: + MAX_EFFECTIVE_BALANCE_ELECTRA.Gwei # [Modified in Electra:EIP7251] + else: + MAX_EFFECTIVE_BALANCE.Gwei + if effective_balance * MAX_RANDOM_VALUE >= + max_effective_balance * random_value: + res = Opt.some(candidate_index) + break + i += 1 + + doAssert res.isSome + res + func compute_proposer_index(state: ForkyBeaconState, indices: openArray[ValidatorIndex], seed: Eth2Digest): Opt[ValidatorIndex] = @@ -521,7 +566,7 @@ func livenessFailsafeInEffect*( false -# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/phase0/p2p-interface.md#attestation-subnet-subscription +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/phase0/p2p-interface.md#attestation-subnet-subscription func compute_subscribed_subnet(node_id: UInt256, epoch: Epoch, index: uint64): SubnetId = # Ensure neither `truncate` loses information @@ -545,7 +590,7 @@ func compute_subscribed_subnet(node_id: UInt256, epoch: Epoch, index: uint64): ) SubnetId((permutated_prefix + index) mod ATTESTATION_SUBNET_COUNT) -# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/p2p-interface.md#attestation-subnet-subscription +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/phase0/p2p-interface.md#attestation-subnet-subscription iterator compute_subscribed_subnets*(node_id: UInt256, epoch: Epoch): SubnetId = for index in 0'u64 ..< SUBNETS_PER_NODE: yield compute_subscribed_subnet(node_id, epoch, index) diff --git a/beacon_chain/spec/weak_subjectivity.nim b/beacon_chain/spec/weak_subjectivity.nim index 95a624eaca..3834ab1d23 100644 --- a/beacon_chain/spec/weak_subjectivity.nim +++ b/beacon_chain/spec/weak_subjectivity.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -10,10 +10,10 @@ import ./datatypes/base, ./beaconstate, ./forks, ./helpers -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/weak-subjectivity.md#configuration +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/phase0/weak-subjectivity.md#configuration const SAFETY_DECAY* = 10'u64 -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/weak-subjectivity.md#compute_weak_subjectivity_period +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/weak-subjectivity.md#compute_weak_subjectivity_period func compute_weak_subjectivity_period( cfg: RuntimeConfig, state: ForkyBeaconState): uint64 = ## Returns the weak subjectivity period for the current ``state``. @@ -49,7 +49,7 @@ func compute_weak_subjectivity_period( ws_period -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/weak-subjectivity.md#is_within_weak_subjectivity_period +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/phase0/weak-subjectivity.md#is_within_weak_subjectivity_period func is_within_weak_subjectivity_period*(cfg: RuntimeConfig, current_slot: Slot, ws_state: ForkedHashedBeaconState, ws_checkpoint: Checkpoint): bool = diff --git a/beacon_chain/sync/light_client_manager.nim b/beacon_chain/sync/light_client_manager.nim index 22b687a3a1..a74f85e3ea 100644 --- a/beacon_chain/sync/light_client_manager.nim +++ b/beacon_chain/sync/light_client_manager.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2022-2024 Status Research & Development GmbH +# Copyright (c) 2022-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -330,9 +330,12 @@ template query[E]( ): Future[bool].Raising([CancelledError]) = self.query(e, Nothing()) -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/light-client/light-client.md#light-client-sync-process +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/altair/light-client/light-client.md#light-client-sync-process proc loop(self: LightClientManager) {.async: (raises: [CancelledError]).} = - var nextSyncTaskTime = self.getBeaconTime() + var + nextSyncTaskTime = self.getBeaconTime() + wasGossipSupported = false + haveFinalityUpdate = false while true: # Periodically wake and check for changes let wallTime = self.getBeaconTime() @@ -373,16 +376,32 @@ proc loop(self: LightClientManager) {.async: (raises: [CancelledError]).} = await self.query(UpdatesByRange, (startPeriod: syncTask.startPeriod, count: syncTask.count)) of LcSyncKind.FinalityUpdate: + haveFinalityUpdate = true await self.query(FinalityUpdate) of LcSyncKind.OptimisticUpdate: - await self.query(OptimisticUpdate) - - nextSyncTaskTime = wallTime + self.rng.nextLcSyncTaskDelay( - wallTime, - finalized = self.getFinalizedPeriod(), - optimistic = self.getOptimisticPeriod(), - isNextSyncCommitteeKnown = self.isNextSyncCommitteeKnown(), - didLatestSyncTaskProgress = didProgress) + if not haveFinalityUpdate: + haveFinalityUpdate = true + await self.query(FinalityUpdate) + else: + await self.query(OptimisticUpdate) + + let + finalized = self.getFinalizedPeriod() + optimistic = self.getOptimisticPeriod() + isNextSyncCommitteeKnown = self.isNextSyncCommitteeKnown() + isGossipSupported = + current.isGossipSupported(finalized, isNextSyncCommitteeKnown) + nextSyncTaskTime = + if not wasGossipSupported and isGossipSupported: + # Obtain an extra finality update after finishing sync + # to avoid having to wait several minutes for finality gossip + haveFinalityUpdate = false + wallTime + else: + wallTime + self.rng.nextLcSyncTaskDelay( + wallTime, finalized, optimistic, isNextSyncCommitteeKnown, + didLatestSyncTaskProgress = didProgress) + wasGossipSupported = isGossipSupported proc start*(self: var LightClientManager) = ## Start light client manager's loop. diff --git a/beacon_chain/sync/light_client_protocol.nim b/beacon_chain/sync/light_client_protocol.nim index 15e0389bbb..268f27d3f6 100644 --- a/beacon_chain/sync/light_client_protocol.nim +++ b/beacon_chain/sync/light_client_protocol.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -90,7 +90,7 @@ p2pProtocol LightClientSync(version = 1, debug "LC bootstrap request done", peer, blockRoot - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/light-client/p2p-interface.md#lightclientupdatesbyrange + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/altair/light-client/p2p-interface.md#lightclientupdatesbyrange proc lightClientUpdatesByRange( peer: Peer, startPeriod: SyncCommitteePeriod, @@ -134,7 +134,7 @@ p2pProtocol LightClientSync(version = 1, debug "LC updates by range request done", peer, startPeriod, count, found - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/light-client/p2p-interface.md#getlightclientfinalityupdate + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/altair/light-client/p2p-interface.md#getlightclientfinalityupdate proc lightClientFinalityUpdate( peer: Peer, response: SingleChunkResponse[ForkedLightClientFinalityUpdate]) @@ -160,7 +160,7 @@ p2pProtocol LightClientSync(version = 1, debug "LC finality update request done", peer - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/light-client/p2p-interface.md#getlightclientoptimisticupdate + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/altair/light-client/p2p-interface.md#getlightclientoptimisticupdate proc lightClientOptimisticUpdate( peer: Peer, response: SingleChunkResponse[ForkedLightClientOptimisticUpdate]) diff --git a/beacon_chain/sync/request_manager.nim b/beacon_chain/sync/request_manager.nim index 79767e553c..657ccfd7ff 100644 --- a/beacon_chain/sync/request_manager.nim +++ b/beacon_chain/sync/request_manager.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -7,17 +7,19 @@ {.push raises: [].} -import std/[sequtils, strutils] import chronos, chronicles import - ../spec/datatypes/[phase0, deneb], - ../spec/[forks, network], + ../spec/[forks, network, peerdas_helpers], ../networking/eth2_network, ../consensus_object_pools/block_quarantine, ../consensus_object_pools/blob_quarantine, + ../consensus_object_pools/data_column_quarantine, "."/sync_protocol, "."/sync_manager, ../gossip_processing/block_processor +from std/algorithm import binarySearch, sort +from std/sequtils import mapIt +from std/strutils import join from ../beacon_clock import GetBeaconTimeFn export block_quarantine, sync_manager @@ -25,43 +27,57 @@ logScope: topics = "requman" const - SYNC_MAX_REQUESTED_BLOCKS* = 32 # Spec allows up to MAX_REQUEST_BLOCKS. + SYNC_MAX_REQUESTED_BLOCKS = 32 # Spec allows up to MAX_REQUEST_BLOCKS. ## Maximum number of blocks which will be requested in each ## `beaconBlocksByRoot` invocation. - PARALLEL_REQUESTS* = 2 - ## Number of peers we using to resolve our request. + PARALLEL_REQUESTS = 2 + ## Number of peers we're using to resolve our request. - BLOB_GOSSIP_WAIT_TIME_NS* = 2 * 1_000_000_000 - ## How long to wait for blobs to arrive over gossip before fetching. + PARALLEL_REQUESTS_DATA_COLUMNS = 32 + + BLOB_GOSSIP_WAIT_TIME_NS = 2 * 1_000_000_000 + ## How long to wait for blobs to arri ve over gossip before fetching. + + DATA_COLUMN_GOSSIP_WAIT_TIME_NS = 2 * 1_000_000_000 + ## How long to wait for blobs to arri ve over gossip before fetching. POLL_INTERVAL = 1.seconds type - BlockVerifierFn* = proc( + BlockVerifierFn = proc( signedBlock: ForkedSignedBeaconBlock, maybeFinalized: bool ): Future[Result[void, VerifierError]] {.async: (raises: [CancelledError]).} - BlockLoaderFn* = proc( + BlockLoaderFn = proc( blockRoot: Eth2Digest ): Opt[ForkedTrustedSignedBeaconBlock] {.gcsafe, raises: [].} - BlobLoaderFn* = proc( + BlobLoaderFn = proc( blobId: BlobIdentifier): Opt[ref BlobSidecar] {.gcsafe, raises: [].} - InhibitFn* = proc: bool {.gcsafe, raises: [].} + DataColumnLoaderFn = proc( + columnId: DataColumnIdentifier): + Opt[ref DataColumnSidecar] {.gcsafe, raises: [].} + + InhibitFn = proc: bool {.gcsafe, raises: [].} RequestManager* = object network*: Eth2Node + supernode*: bool + custody_columns_set: HashSet[ColumnIndex] getBeaconTime: GetBeaconTimeFn inhibit: InhibitFn quarantine: ref Quarantine blobQuarantine: ref BlobQuarantine + dataColumnQuarantine: ref DataColumnQuarantine blockVerifier: BlockVerifierFn blockLoader: BlockLoaderFn blobLoader: BlobLoaderFn + dataColumnLoader: DataColumnLoaderFn blockLoopFuture: Future[void].Raising([CancelledError]) blobLoopFuture: Future[void].Raising([CancelledError]) + dataColumnLoopFuture: Future[void].Raising([CancelledError]) func shortLog*(x: seq[Eth2Digest]): string = "[" & x.mapIt(shortLog(it)).join(", ") & "]" @@ -70,25 +86,33 @@ func shortLog*(x: seq[FetchRecord]): string = "[" & x.mapIt(shortLog(it.root)).join(", ") & "]" proc init*(T: type RequestManager, network: Eth2Node, + supernode: bool, + custody_columns_set: HashSet[ColumnIndex], denebEpoch: Epoch, getBeaconTime: GetBeaconTimeFn, inhibit: InhibitFn, quarantine: ref Quarantine, blobQuarantine: ref BlobQuarantine, + dataColumnQuarantine: ref DataColumnQuarantine, blockVerifier: BlockVerifierFn, blockLoader: BlockLoaderFn = nil, - blobLoader: BlobLoaderFn = nil): RequestManager = + blobLoader: BlobLoaderFn = nil, + dataColumnLoader: DataColumnLoaderFn = nil): RequestManager = RequestManager( network: network, + supernode: supernode, + custody_columns_set: custody_columns_set, getBeaconTime: getBeaconTime, inhibit: inhibit, quarantine: quarantine, blobQuarantine: blobQuarantine, + dataColumnQuarantine: dataColumnQuarantine, blockVerifier: blockVerifier, blockLoader: blockLoader, - blobLoader: blobLoader) + blobLoader: blobLoader, + dataColumnLoader: dataColumnLoader) -proc checkResponse(roots: openArray[Eth2Digest], +func checkResponse(roots: openArray[Eth2Digest], blocks: openArray[ref ForkedSignedBeaconBlock]): bool = ## This procedure checks peer's response. var checks = @roots @@ -102,20 +126,71 @@ proc checkResponse(roots: openArray[Eth2Digest], checks.del(res) true -proc checkResponse(idList: seq[BlobIdentifier], - blobs: openArray[ref BlobSidecar]): bool = - if len(blobs) > len(idList): +func cmpSidecarIdentifier(x: BlobIdentifier | DataColumnIdentifier, + y: ref BlobSidecar | ref DataColumnSidecar): int = + cmp(x.index, y[].index) + +func checkResponseSanity(idList: seq[BlobIdentifier], + blobs: openArray[ref BlobSidecar]): bool = + # Cannot respond more than what I have asked + if blobs.len > idList.len: return false - for blob in blobs: - let block_root = hash_tree_root(blob.signed_block_header.message) - var found = false - for id in idList: - if id.block_root == block_root and id.index == blob.index: - found = true - break - if not found: + var i = 0 + while i < blobs.len: + let + block_root = + hash_tree_root(blobs[i][].signed_block_header.message) + idListKey = binarySearch(idList, blobs[i], cmpSidecarIdentifier) + + # Verify the block root + if idList[idListKey].block_root != block_root: + return false + + # Verify inclusion proof + blobs[i][].verify_blob_sidecar_inclusion_proof().isOkOr: return false - blob[].verify_blob_sidecar_inclusion_proof().isOkOr: + inc i + true + +func checkResponseSubset(idList: seq[BlobIdentifier], + blobs: openArray[ref BlobSidecar]): bool = + ## Clients MUST respond with at least one sidecar, if they have it. + ## Clients MAY limit the number of blocks and sidecars in the response. + ## https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/deneb/p2p-interface.md#blobsidecarsbyroot-v1 + for blb in blobs: + if binarySearch(idList, blb, cmpSidecarIdentifier) == -1: + return false + true + +func checkResponseSanity(idList: seq[DataColumnIdentifier], + columns: openArray[ref DataColumnSidecar]): bool = + # Cannot respond more than what I have asked + if columns.len > idList.len: + return false + var i = 0 + while i < columns.len: + let + block_root = + hash_tree_root(columns[i][].signed_block_header.message) + idListKey = binarySearch(idList, columns[i], cmpSidecarIdentifier) + + # Verify the block root + if idList[idListKey].block_root != block_root: + return false + + # Verify inclusion proof + columns[i][].verify_data_column_sidecar_inclusion_proof().isOkOr: + return false + inc i + true + +func checkResponseSubset(idList: seq[DataColumnIdentifier], + columns: openArray[ref DataColumnSidecar]): bool = + ## Clients MUST respond with at least one sidecar, if they have it. + ## Clients MAY limit the number of blocks and sidecars in the response. + ## https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.3/specs/fulu/p2p-interface.md#datacolumnsidecarsbyroot-v1 + for col in columns: + if binarySearch(idList, col, cmpSidecarIdentifier) == -1: return false true @@ -190,6 +265,9 @@ proc requestBlocksByRoot(rman: RequestManager, items: seq[Eth2Digest]) {.async: if not(isNil(peer)): rman.network.peerPool.release(peer) +func cmpSidecarIndexes(x, y: ref BlobSidecar | ref DataColumnSidecar): int = + cmp(x[].index, y[].index) + proc fetchBlobsFromNetwork(self: RequestManager, idList: seq[BlobIdentifier]) {.async: (raises: [CancelledError]).} = @@ -200,12 +278,20 @@ proc fetchBlobsFromNetwork(self: RequestManager, debug "Requesting blobs by root", peer = peer, blobs = shortLog(idList), peer_score = peer.getScore() - let blobs = await blobSidecarsByRoot(peer, BlobIdentifierList idList) + let blobs = await blobSidecarsByRoot( + peer, BlobIdentifierList idList, maxResponseItems = idList.len) if blobs.isOk: - let ublobs = blobs.get() - if not checkResponse(idList, ublobs.asSeq()): - debug "Mismatched response to blobs by root", + var ublobs = blobs.get().asSeq() + ublobs.sort(cmpSidecarIndexes) + if not checkResponseSanity(idList, ublobs): + debug "Response to blobs by root have erroneous block root", + peer = peer, blobs = shortLog(idList), ublobs = len(ublobs) + peer.updateScore(PeerScoreBadResponse) + return + + if not checkResponseSubset(idList, ublobs): + debug "Response to blobs by root is not a subset", peer = peer, blobs = shortLog(idList), ublobs = len(ublobs) peer.updateScore(PeerScoreBadResponse) return @@ -232,6 +318,95 @@ proc fetchBlobsFromNetwork(self: RequestManager, if not(isNil(peer)): self.network.peerPool.release(peer) +proc checkPeerCustody(rman: RequestManager, + peer: Peer): + bool = + # Returns true if the peer custodies atleast + # ONE of the common custody columns, straight + # away returns true if the peer is a supernode. + if rman.supernode: + # For a supernode, it is always best/optimistic + # to filter other supernodes, rather than filter + # too many full nodes that have a subset of the custody + # columns + if peer.lookupCgcFromPeer() == + NUMBER_OF_CUSTODY_GROUPS.uint64: + return true + + else: + if peer.lookupCgcFromPeer() == + NUMBER_OF_CUSTODY_GROUPS.uint64: + return true + + elif peer.lookupCgcFromPeer() == + CUSTODY_REQUIREMENT.uint64: + + # Fetch the remote custody count + let remoteCustodyGroupCount = + peer.lookupCgcFromPeer() + + # Extract remote peer's nodeID from peerID + # Fetch custody columns from remote peer + let + remoteNodeId = fetchNodeIdFromPeerId(peer) + remoteCustodyColumns = + remoteNodeId.resolve_column_sets_from_custody_groups( + max(SAMPLES_PER_SLOT.uint64, + remoteCustodyGroupCount)) + + for local_column in rman.custody_columns_set: + if local_column notin remoteCustodyColumns: + return false + + return true + + else: + return false + +proc fetchDataColumnsFromNetwork(rman: RequestManager, + colIdList: seq[DataColumnIdentifier]) + {.async: (raises: [CancelledError]).} = + var peer = await rman.network.peerPool.acquire() + try: + if rman.checkPeerCustody(peer): + debug "Requesting data columns by root", peer = peer, columns = shortLog(colIdList), + peer_score = peer.getScore() + let columns = await dataColumnSidecarsByRoot(peer, DataColumnIdentifierList colIdList) + + if columns.isOk: + var ucolumns = columns.get().asSeq() + ucolumns.sort(cmpSidecarIndexes) + if not checkResponseSanity(colIdList, ucolumns): + debug "Response to columns by root have erroneous block root", + peer = peer, columns = shortLog(colIdList), ucolumns = len(ucolumns) + peer.updateScore(PeerScoreBadResponse) + return + + if not checkResponseSubset(colIdList, ucolumns): + debug "Response to columns by root is not a subset", + peer = peer, columns = shortLog(colIdList), ucolumns = len(ucolumns) + peer.updateScore(PeerScoreBadResponse) + return + + for col in ucolumns: + rman.dataColumnQuarantine[].put(col) + var curRoot: Eth2Digest + for col in ucolumns: + let block_root = hash_tree_root(col.signed_block_header.message) + if block_root != curRoot: + curRoot = block_root + if (let o = rman.quarantine[].popColumnless(curRoot); o.isSome): + let col = o.unsafeGet() + discard await rman.blockVerifier(col, false) + else: + debug "Data columns by root request not done, peer doesn't have custody column", + peer = peer, columns = shortLog(colIdList), err = columns.error() + peer.updateScore(PeerScoreNoValues) + + finally: + if not(isNil(peer)): + rman.network.peerPool.release(peer) + proc requestManagerBlockLoop( rman: RequestManager) {.async: (raises: [CancelledError]).} = while true: @@ -400,10 +575,117 @@ proc requestManagerBlobLoop( blobs_count = len(blobIds), sync_speed = speed(start, finish) +proc getMissingDataColumns(rman: RequestManager): HashSet[DataColumnIdentifier] = + let + wallTime = rman.getBeaconTime() + wallSlot = wallTime.slotOrZero() + delay = wallTime - wallSlot.start_beacon_time() + + const waitDur = TimeDiff(nanoseconds: DATA_COLUMN_GOSSIP_WAIT_TIME_NS) + + var + fetches: HashSet[DataColumnIdentifier] + ready: seq[Eth2Digest] + + for columnless in rman.quarantine[].peekColumnless(): + withBlck(columnless): + when consensusFork >= ConsensusFork.Fulu: + # granting data columns a chance to arrive over gossip + if forkyBlck.message.slot == wallSlot and delay < waitDur: + debug "Not handling missing data columns early in slot" + continue + + if not rman.dataColumnQuarantine[].hasMissingDataColumns(forkyBlck): + let missing = rman.dataColumnQuarantine[].dataColumnFetchRecord(forkyBlck) + if len(missing.indices) == 0: + warn "quarantine is missing data columns, but missing indices are empty", + blk = columnless.root, + commitments = len(forkyBlck.message.body.blob_kzg_commitments) + for idx in missing.indices: + let id = DataColumnIdentifier(block_root: columnless.root, index: idx) + if id.index in rman.custody_columns_set and id notin fetches and + len(forkyBlck.message.body.blob_kzg_commitments) != 0: + fetches.incl(id) + else: + # this is a programming error and it not should occur + warn "missing column handler found columnless block with all data columns", + blk = columnless.root, + commitments = len(forkyBlck.message.body.blob_kzg_commitments) + ready.add(columnless.root) + + for root in ready: + let columnless = rman.quarantine[].popColumnless(root).valueOr: + continue + discard rman.blockVerifier(columnless, false) + fetches + +proc requestManagerDataColumnLoop( + rman: RequestManager) {.async: (raises: [CancelledError]).} = + while true: + + await sleepAsync(POLL_INTERVAL) + if rman.inhibit(): + continue + + let missingColumnIds = rman.getMissingDataColumns() + if missingColumnIds.len == 0: + continue + + var columnIds: seq[DataColumnIdentifier] + if rman.dataColumnLoader == nil: + for item in missingColumnIds: + columnIds.add item + else: + var + blockRoots: seq[Eth2Digest] + curRoot: Eth2Digest + for columnId in missingColumnIds: + if columnId.block_root != curRoot: + curRoot = columnId.block_root + blockRoots.add curRoot + let data_column_sidecar = rman.dataColumnLoader(columnId).valueOr: + columnIds.add columnId + if blockRoots.len > 0 and blockRoots[^1] == curRoot: + # A data column is missing, remove from list of fully available data columns + discard blockRoots.pop() + continue + debug "Loaded orphaned data columns from storage", columnId + rman.dataColumnQuarantine[].put(data_column_sidecar) + var verifiers = newSeqOfCap[ + Future[Result[void, VerifierError]] + .Raising([CancelledError])](blockRoots.len) + for blockRoot in blockRoots: + let blck = rman.quarantine[].popColumnless(blockRoot).valueOr: + continue + verifiers.add rman.blockVerifier(blck, maybeFinalized = false) + try: + await allFutures(verifiers) + except CancelledError as exc: + var futs = newSeqOfCap[Future[void].Raising([])](verifiers.len) + for verifier in verifiers: + futs.add verifier.cancelAndWait() + await noCancel allFutures(futs) + raise exc + if columnIds.len > 0: + debug "Requesting detected missing data columns", columns = shortLog(columnIds) + let start = SyncMoment.now(0) + var workers: + array[PARALLEL_REQUESTS_DATA_COLUMNS, Future[void].Raising([CancelledError])] + for i in 0..= man.DENEB_FORK_EPOCH) and + (epoch >= man.DENEB_FORK_EPOCH) and (epoch < man.FULU_FORK_EPOCH) and (wallEpoch < man.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS or epoch >= wallEpoch - man.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS) proc shouldGetBlobs[A, B](man: SyncManager[A, B], r: SyncRequest[A]): bool = - man.shouldGetBlobs(r.slot) or man.shouldGetBlobs(r.slot + (r.count - 1)) + man.shouldGetBlobs(r.data.slot) or + man.shouldGetBlobs(r.data.slot + (r.data.count - 1)) proc getBlobSidecars[A, B](man: SyncManager[A, B], peer: A, req: SyncRequest[A]): Future[BlobSidecarsRes] {.async: (raises: [CancelledError], raw: true).} = mixin getScore, `==` - logScope: - peer_score = peer.getScore() - peer_speed = peer.netKbps() - sync_ident = man.ident - direction = man.direction - topics = "syncman" - doAssert(not(req.isEmpty()), "Request must not be empty!") - debug "Requesting blobs sidecars from peer", request = req - blobSidecarsByRange(peer, req.slot, req.count) + debug "Requesting blob sidecars from peer", + request = req, + peer_score = req.item.getScore(), + peer_speed = req.item.netKbps(), + sync_ident = man.ident, + topics = "syncman" + + blobSidecarsByRange( + peer, req.data.slot, req.data.count, + maxResponseItems = (req.data.count * man.MAX_BLOBS_PER_BLOCK_ELECTRA).Limit) proc remainingSlots(man: SyncManager): uint64 = let @@ -238,8 +255,8 @@ proc remainingSlots(man: SyncManager): uint64 = 0'u64 func groupBlobs*( - blocks: seq[ref ForkedSignedBeaconBlock], - blobs: seq[ref BlobSidecar] + blocks: openArray[ref ForkedSignedBeaconBlock], + blobs: openArray[ref BlobSidecar] ): Result[seq[BlobSidecars], string] = var grouped = newSeq[BlobSidecars](len(blocks)) @@ -283,17 +300,17 @@ func checkBlobs(blobs: seq[BlobSidecars]): Result[void, string] = proc getSyncBlockData*[T]( peer: T, - slot: Slot + slot: Slot, + maxBlobsPerBlockElectra: uint64 ): Future[SyncBlockDataRes] {.async: (raises: [CancelledError]).} = mixin getScore - logScope: - slot = slot - peer_score = peer.getScore() - peer_speed = peer.netKbps() - topics = "syncman" - - debug "Requesting block from peer" + debug "Requesting block from peer", + slot = slot, + peer = peer, + peer_score = peer.getScore(), + peer_speed = peer.netKbps(), + topics = "syncman" let blocksRange = block: @@ -312,7 +329,12 @@ proc getSyncBlockData*[T]( return err("Incorrect number of blocks was returned by peer, " & $len(blocksRange)) - debug "Received block on request" + debug "Received block on request", + slot = slot, + peer = peer, + peer_score = peer.getScore(), + peer_speed = peer.netKbps(), + topics = "syncman" if blocksRange[0][].slot != slot: peer.updateScore(PeerScoreBadResponse) @@ -333,8 +355,14 @@ proc getSyncBlockData*[T]( if shouldGetBlob: let blobData = block: - debug "Requesting blobs sidecars from peer" - let res = await blobSidecarsByRange(peer, slot, 1'u64) + debug "Requesting blob sidecars from peer", + slot = slot, + peer = peer, + peer_score = peer.getScore(), + peer_speed = peer.netKbps(), + topics = "syncman" + let res = await blobSidecarsByRange( + peer, slot, 1'u64, maxResponseItems = maxBlobsPerBlockElectra.Limit) if res.isErr(): peer.updateScore(PeerScoreNoValues) return err( @@ -349,7 +377,13 @@ proc getSyncBlockData*[T]( peer.updateScore(PeerScoreBadResponse) return err("Incorrect number of received blobs in the requested range") - debug "Received blobs on request", blobs_count = len(blobData) + debug "Received blobs on request", + slot = slot, + blobs_count = len(blobData), + peer = peer, + peer_score = peer.getScore(), + peer_speed = peer.netKbps(), + topics = "syncman" let groupedBlobs = groupBlobs(blocksRange, blobData).valueOr: peer.updateScore(PeerScoreNoValues) @@ -365,84 +399,205 @@ proc getSyncBlockData*[T]( ok(SyncBlockData(blocks: blocksRange, blobs: blobsRange)) -proc syncStep[A, B]( - man: SyncManager[A, B], index: int, peer: A -) {.async: (raises: [CancelledError]).} = - logScope: - peer_score = peer.getScore() - peer_speed = peer.netKbps() - index = index - sync_ident = man.ident - topics = "syncman" +proc getSyncBlockData[A, B]( + man: SyncManager[A, B], + index: int, + sr: SyncRequest[A] +): Future[SyncBlockDataRes] {.async: (raises: [CancelledError]).} = + let + peer = sr.item + blocks = (await man.getBlocks(peer, sr)).valueOr: + peer.updateScore(PeerScoreNoValues) + return err("Failed to receive blocks on request, reason: " & $error) + blockSlots = mapIt(blocks, it[].slot) + + debug "Received blocks on request", + request = sr, + peer_score = sr.item.getScore(), + peer_speed = sr.item.netKbps(), + index = index, + blocks_count = len(blocks), + blocks_map = getShortMap(sr, blocks.toSeq()), + sync_ident = man.ident, + topics = "syncman" + + checkResponse(sr, blockSlots).isOkOr: + peer.updateScore(PeerScoreBadResponse) + return err("Incorrect blocks sequence received, reason: " & $error) - var + let + shouldGetBlobs = + if not(man.shouldGetBlobs(sr)): + false + else: + var hasBlobs = false + for blck in blocks: + withBlck(blck[]): + when consensusFork >= ConsensusFork.Deneb: + if len(forkyBlck.message.body.blob_kzg_commitments) > 0: + hasBlobs = true + break + hasBlobs + blobs = + if shouldGetBlobs: + let + res = (await man.getBlobSidecars(peer, sr)).valueOr: + peer.updateScore(PeerScoreNoValues) + return err("Failed to receive blobs on request, reason: " & $error) + blobData = res.asSeq() + + debug "Received blobs on request", + request = sr, + peer_score = sr.item.getScore(), + peer_speed = sr.item.netKbps(), + index = index, + blobs_count = len(blobData), + blobs_map = getShortMap(sr, blobData), + sync_ident = man.ident, + topics = "syncman" + + if len(blobData) > 0: + let blobSlots = mapIt(blobData, it[].signed_block_header.message.slot) + checkBlobsResponse( + sr, blobSlots, man.MAX_BLOBS_PER_BLOCK_ELECTRA).isOkOr: + peer.updateScore(PeerScoreBadResponse) + return err("Incorrect blobs sequence received, reason: " & $error) + + let groupedBlobs = groupBlobs(blocks.asSeq(), blobData).valueOr: + peer.updateScore(PeerScoreNoValues) + return err( + "Received blobs sequence is inconsistent, reason: " & error) + + groupedBlobs.checkBlobs().isOkOr: + peer.updateScore(PeerScoreBadResponse) + return err("Received blobs verification failed, reason: " & error) + Opt.some(groupedBlobs) + else: + Opt.none(seq[BlobSidecars]) + + ok(SyncBlockData(blocks: blocks.asSeq(), blobs: blobs)) + +proc getOrUpdatePeerStatus[A, B]( + man: SyncManager[A, B], index: int, peer: A +): Future[Result[Slot, string]] {.async: (raises: [CancelledError]).} = + let headSlot = man.getLocalHeadSlot() wallSlot = man.getLocalWallSlot() peerSlot = peer.getHeadSlot() - block: # Check that peer status is recent and relevant - logScope: - peer = peer - direction = man.direction - - debug "Peer's syncing status", wall_clock_slot = wallSlot, - remote_head_slot = peerSlot, local_head_slot = headSlot - - let - peerStatusAge = Moment.now() - peer.getStatusLastTime() - needsUpdate = - # Latest status we got is old - peerStatusAge >= StatusExpirationTime or - # The point we need to sync is close to where the peer is - man.getFirstSlot() >= peerSlot + debug "Peer's syncing status", + peer = peer, + peer_score = peer.getScore(), + peer_speed = peer.netKbps(), + index = index, + wall_clock_slot = wallSlot, + remote_head_slot = peerSlot, + local_head_slot = headSlot, + sync_ident = man.ident, + direction = man.direction, + topics = "syncman" - if needsUpdate: - man.workers[index].status = SyncWorkerStatus.UpdatingStatus - - # Avoid a stampede of requests, but make them more frequent in case the - # peer is "close" to the slot range of interest - if peerStatusAge < StatusExpirationTime div 2: - await sleepAsync(StatusExpirationTime div 2 - peerStatusAge) - - trace "Updating peer's status information", wall_clock_slot = wallSlot, - remote_head_slot = peerSlot, local_head_slot = headSlot - - if not(await peer.updateStatus()): - peer.updateScore(PeerScoreNoStatus) - debug "Failed to get remote peer's status, exiting", - peer_head_slot = peerSlot - - return + let + peerStatusAge = Moment.now() - peer.getStatusLastTime() + needsUpdate = + # Latest status we got is old + peerStatusAge >= StatusExpirationTime or + # The point we need to sync is close to where the peer is + man.getFirstSlot() >= peerSlot + + if not(needsUpdate): + return ok(peerSlot) + + man.workers[index].status = SyncWorkerStatus.UpdatingStatus + + # Avoid a stampede of requests, but make them more frequent in case the + # peer is "close" to the slot range of interest + if peerStatusAge < (StatusExpirationTime div 2): + await sleepAsync((StatusExpirationTime div 2) - peerStatusAge) + + trace "Updating peer's status information", + peer = peer, + peer_score = peer.getScore(), + peer_speed = peer.netKbps(), + index = index, + wall_clock_slot = wallSlot, + remote_head_slot = peerSlot, + local_head_slot = headSlot, + sync_ident = man.ident, + direction = man.direction, + topics = "syncman" + + if not(await peer.updateStatus()): + peer.updateScore(PeerScoreNoStatus) + return err("Failed to get remote peer status") + + let newPeerSlot = peer.getHeadSlot() + if peerSlot >= newPeerSlot: + peer.updateScore(PeerScoreStaleStatus) + debug "Peer's status information is stale", + peer = peer, + peer_score = peer.getScore(), + peer_speed = peer.netKbps(), + index = index, + wall_clock_slot = wallSlot, + remote_old_head_slot = peerSlot, + local_head_slot = headSlot, + remote_new_head_slot = newPeerSlot, + sync_ident = man.ident, + direction = man.direction, + topics = "syncman" + else: + debug "Peer's status information updated", + peer = peer, + peer_score = peer.getScore(), + peer_speed = peer.netKbps(), + index = index, + wall_clock_slot = wallSlot, + remote_old_head_slot = peerSlot, + local_head_slot = headSlot, + remote_new_head_slot = newPeerSlot, + sync_ident = man.ident, + direction = man.direction, + topics = "syncman" + peer.updateScore(PeerScoreGoodStatus) + ok(newPeerSlot) - let newPeerSlot = peer.getHeadSlot() - if peerSlot >= newPeerSlot: - peer.updateScore(PeerScoreStaleStatus) - debug "Peer's status information is stale", - wall_clock_slot = wallSlot, remote_old_head_slot = peerSlot, - local_head_slot = headSlot, remote_new_head_slot = newPeerSlot - else: - debug "Peer's status information updated", wall_clock_slot = wallSlot, - remote_old_head_slot = peerSlot, local_head_slot = headSlot, - remote_new_head_slot = newPeerSlot - peer.updateScore(PeerScoreGoodStatus) - peerSlot = newPeerSlot +proc syncStep[A, B]( + man: SyncManager[A, B], index: int, peer: A +) {.async: (raises: [CancelledError]).} = - # Time passed - enough to move slots, if sleep happened + let + peerSlot = (await man.getOrUpdatePeerStatus(index, peer)).valueOr: + return headSlot = man.getLocalHeadSlot() wallSlot = man.getLocalWallSlot() if man.remainingSlots() <= man.maxHeadAge: - logScope: - peer = peer - direction = man.direction - case man.direction of SyncQueueKind.Forward: - info "We are in sync with network", wall_clock_slot = wallSlot, - remote_head_slot = peerSlot, local_head_slot = headSlot + info "We are in sync with network", + peer = peer, + peer_score = peer.getScore(), + peer_speed = peer.netKbps(), + index = index, + wall_clock_slot = wallSlot, + remote_head_slot = peerSlot, + local_head_slot = headSlot, + direction = man.direction, + sync_ident = man.ident, + topics = "syncman" of SyncQueueKind.Backward: - info "Backfill complete", wall_clock_slot = wallSlot, - remote_head_slot = peerSlot, local_head_slot = headSlot + info "Backfill complete", + peer = peer, + peer_score = peer.getScore(), + peer_speed = peer.netKbps(), + index = index, + wall_clock_slot = wallSlot, + remote_head_slot = peerSlot, + local_head_slot = headSlot, + direction = man.direction, + sync_ident = man.ident, + topics = "syncman" # We clear SyncManager's `notInSyncEvent` so all the workers will become # sleeping soon. @@ -462,158 +617,103 @@ proc syncStep[A, B]( # Right now we decreasing peer's score a bit, so it will not be # disconnected due to low peer's score, but new fresh peers could replace # peers with low latest head. - debug "Peer's head slot is lower then local head slot", peer = peer, - wall_clock_slot = wallSlot, remote_head_slot = peerSlot, + debug "Peer's head slot is lower then local head slot", + peer = peer, + peer_score = peer.getScore(), + peer_speed = peer.netKbps(), + index = index, + wall_clock_slot = wallSlot, + remote_head_slot = peerSlot, local_last_slot = man.getLastSlot(), local_first_slot = man.getFirstSlot(), - direction = man.direction + direction = man.direction, + sync_ident = man.ident, + topics = "syncman" peer.updateScore(PeerScoreUseless) return # Wall clock keeps ticking, so we need to update the queue man.queue.updateLastSlot(man.getLastSlot()) - man.workers[index].status = SyncWorkerStatus.Requesting - let req = man.queue.pop(peerSlot, peer) - if req.isEmpty(): - # SyncQueue could return empty request in 2 cases: - # 1. There no more slots in SyncQueue to download (we are synced, but - # our ``notInSyncEvent`` is not yet cleared). - # 2. Current peer's known head slot is too low to satisfy request. - # - # To avoid endless loop we going to wait for RESP_TIMEOUT time here. - # This time is enough for all pending requests to finish and it is also - # enough for main sync loop to clear ``notInSyncEvent``. - debug "Empty request received from queue, exiting", peer = peer, - local_head_slot = headSlot, remote_head_slot = peerSlot, - queue_input_slot = man.queue.inpSlot, - queue_output_slot = man.queue.outSlot, - queue_last_slot = man.queue.finalSlot, direction = man.direction - await sleepAsync(RESP_TIMEOUT_DUR) - return - - debug "Creating new request for peer", wall_clock_slot = wallSlot, - remote_head_slot = peerSlot, local_head_slot = headSlot, - request = req - - man.workers[index].status = SyncWorkerStatus.Downloading - - let blocks = await man.getBlocks(peer, req) - if blocks.isErr(): - peer.updateScore(PeerScoreNoValues) - man.queue.push(req) - debug "Failed to receive blocks on request", - request = req, err = blocks.error - return - let blockData = blocks.get().asSeq() - debug "Received blocks on request", blocks_count = len(blockData), - blocks_map = getShortMap(req, blockData), request = req - - let slots = mapIt(blockData, it[].slot) - if not(checkResponse(req, slots)): - peer.updateScore(PeerScoreBadResponse) - man.queue.push(req) - warn "Received blocks sequence is not in requested range", - blocks_count = len(blockData), - blocks_map = getShortMap(req, blockData), request = req - return - - let shouldGetBlobs = - if not man.shouldGetBlobs(req): - false - else: - var hasBlobs = false - for blck in blockData: - withBlck(blck[]): - when consensusFork >= ConsensusFork.Deneb: - if forkyBlck.message.body.blob_kzg_commitments.len > 0: - hasBlobs = true - break - hasBlobs - - let blobData = - if shouldGetBlobs: - let blobs = await man.getBlobSidecars(peer, req) - if blobs.isErr(): - peer.updateScore(PeerScoreNoValues) - man.queue.push(req) - debug "Failed to receive blobs on request", - request = req, err = blobs.error - return - let blobData = blobs.get().asSeq() - debug "Received blobs on request", - blobs_count = len(blobData), - blobs_map = getShortMap(req, blobData), request = req - - if len(blobData) > 0: - let slots = mapIt(blobData, it[].signed_block_header.message.slot) - if not(checkResponse(req, slots)): - peer.updateScore(PeerScoreBadResponse) - man.queue.push(req) - warn "Received blobs sequence is not in requested range", - blobs_count = len(blobData), - blobs_map = getShortMap(req, blobData), - request = req - return - let groupedBlobs = groupBlobs(blockData, blobData).valueOr: - peer.updateScore(PeerScoreNoValues) - man.queue.push(req) - info "Received blobs sequence is inconsistent", - blobs_map = getShortMap(req, blobData), - request = req, msg = error - return - if (let checkRes = groupedBlobs.checkBlobs(); checkRes.isErr): - peer.updateScore(PeerScoreBadResponse) - man.queue.push(req) - warn "Received blobs sequence is invalid", - blobs_count = len(blobData), - blobs_map = getShortMap(req, blobData), - request = req, - msg = checkRes.error - return - Opt.some(groupedBlobs) - else: - Opt.none(seq[BlobSidecars]) + proc processCallback() = + man.workers[index].status = SyncWorkerStatus.Processing - if len(blockData) == 0 and man.direction == SyncQueueKind.Backward and - req.contains(man.getSafeSlot()): - # The sync protocol does not distinguish between: - # - All requested slots are empty - # - Peer does not have data available about requested range - # - # However, we include the `backfill` slot in backward sync requests. - # If we receive an empty response to a request covering that slot, - # we know that the response is incomplete and can descore. - peer.updateScore(PeerScoreNoValues) - man.queue.push(req) - debug "Response does not include known-to-exist block", request = req - return - - # Scoring will happen in `syncUpdate`. - man.workers[index].status = SyncWorkerStatus.Queueing - let - peerFinalized = peer.getFinalizedEpoch().start_slot() - lastSlot = req.slot + req.count - # The peer claims the block is finalized - our own block processing will - # verify this point down the line - # TODO descore peers that lie - maybeFinalized = lastSlot < peerFinalized + var jobs: seq[Future[void].Raising([CancelledError])] - await man.queue.push(req, blockData, blobData, maybeFinalized, proc() = - man.workers[index].status = SyncWorkerStatus.Processing) + try: + for rindex in 0 ..< man.concurrentRequestsCount: + man.workers[index].status = SyncWorkerStatus.Requesting + let request = man.queue.pop(peerSlot, peer) + if request.isEmpty(): + # SyncQueue could return empty request in 2 cases: + # 1. There no more slots in SyncQueue to download (we are synced, but + # our ``notInSyncEvent`` is not yet cleared). + # 2. Current peer's known head slot is too low to satisfy request. + # + # To avoid endless loop we going to wait for RESP_TIMEOUT time here. + # This time is enough for all pending requests to finish and it is also + # enough for main sync loop to clear ``notInSyncEvent``. + debug "Empty request received from queue", + peer = peer, + peer_score = peer.getScore(), + peer_speed = peer.netKbps(), + index = index, + local_head_slot = headSlot, + remote_head_slot = peerSlot, + queue_input_slot = man.queue.inpSlot, + queue_output_slot = man.queue.outSlot, + queue_last_slot = man.queue.finalSlot, + direction = man.direction, + sync_ident = man.ident, + topics = "syncman" + await sleepAsync(RESP_TIMEOUT_DUR) + break + + man.workers[index].status = SyncWorkerStatus.Downloading + let data = (await man.getSyncBlockData(index, request)).valueOr: + debug "Failed to get block data", + peer = peer, + peer_score = peer.getScore(), + peer_speed = peer.netKbps(), + index = index, + reason = error, + direction = man.direction, + sync_ident = man.ident, + topics = "syncman" + man.queue.push(request) + break + + # Scoring will happen in `syncUpdate`. + man.workers[index].status = SyncWorkerStatus.Queueing + let + peerFinalized = peer.getFinalizedEpoch().start_slot() + lastSlot = request.data.slot + request.data.count - 1 + # The peer claims the block is finalized - our own block processing will + # verify this point down the line + # TODO descore peers that lie + maybeFinalized = lastSlot < peerFinalized + + jobs.add(man.queue.push(request, data.blocks, data.blobs, maybeFinalized, + processCallback)) + + if len(jobs) > 0: + await allFutures(jobs) + + except CancelledError as exc: + let pending = jobs.filterIt(not(it.finished)).mapIt(cancelAndWait(it)) + await noCancel allFutures(pending) + raise exc proc syncWorker[A, B]( man: SyncManager[A, B], index: int ) {.async: (raises: [CancelledError]).} = mixin getKey, getScore, getHeadSlot - logScope: - index = index - sync_ident = man.ident - direction = man.direction - topics = "syncman" - - debug "Starting syncing worker" + debug "Starting syncing worker", + index = index, + sync_ident = man.ident, + direction = man.direction, + topics = "syncman" var peer: A = nil @@ -631,7 +731,11 @@ proc syncWorker[A, B]( if not(isNil(peer)): man.pool.release(peer) - debug "Sync worker stopped" + debug "Sync worker stopped", + index = index, + sync_ident = man.ident, + direction = man.direction, + topics = "syncman" proc getWorkersStats[A, B](man: SyncManager[A, B]): tuple[map: string, sleeping: int, @@ -716,18 +820,20 @@ proc syncClose[A, B]( proc syncLoop[A, B]( man: SyncManager[A, B] ) {.async: (raises: [CancelledError]).} = - - logScope: - sync_ident = man.ident - direction = man.direction - topics = "syncman" - mixin getKey, getScore - var pauseTime = 0 + + # Update SyncQueue parameters, because callbacks used to calculate parameters + # could provide different values at moment when syncLoop() started. + man.initQueue() man.startWorkers() - debug "Synchronization loop started" + debug "Synchronization loop started", + sync_ident = man.ident, + direction = man.direction, + start_slot = man.queue.startSlot, + finish_slot = man.queue.finalSlot, + topics = "syncman" proc averageSpeedTask() {.async: (raises: [CancelledError]).} = while true: @@ -775,9 +881,11 @@ proc syncLoop[A, B]( pending_workers_count = pending, wall_head_slot = wallSlot, local_head_slot = headSlot, - pause_time = $chronos.seconds(pauseTime), avg_sync_speed = man.avgSyncSpeed.formatBiggestFloat(ffDecimal, 4), - ins_sync_speed = man.insSyncSpeed.formatBiggestFloat(ffDecimal, 4) + ins_sync_speed = man.insSyncSpeed.formatBiggestFloat(ffDecimal, 4), + sync_ident = man.ident, + direction = man.direction, + topics = "syncman" of SyncQueueKind.Backward: debug "Current syncing state", workers_map = map, sleeping_workers_count = sleeping, @@ -785,21 +893,25 @@ proc syncLoop[A, B]( pending_workers_count = pending, wall_head_slot = wallSlot, backfill_slot = man.getSafeSlot(), - pause_time = $chronos.seconds(pauseTime), avg_sync_speed = man.avgSyncSpeed.formatBiggestFloat(ffDecimal, 4), - ins_sync_speed = man.insSyncSpeed.formatBiggestFloat(ffDecimal, 4) + ins_sync_speed = man.insSyncSpeed.formatBiggestFloat(ffDecimal, 4), + sync_ident = man.ident, + direction = man.direction, + topics = "syncman" let pivot = man.progressPivot progress = case man.queue.kind of SyncQueueKind.Forward: - if man.queue.outSlot >= pivot: - man.queue.outSlot - pivot + let outSlot = min(man.queue.finalSlot, man.queue.outSlot) + if outSlot >= pivot: + outSlot - pivot else: 0'u64 of SyncQueueKind.Backward: - if pivot >= man.queue.outSlot: - pivot - man.queue.outSlot + let outSlot = max(man.queue.finalSlot, man.queue.outSlot) + if pivot >= outSlot: + pivot - outSlot else: 0'u64 total = @@ -852,10 +964,17 @@ proc syncLoop[A, B]( # all sync workers are in `Sleeping` state. if pending > 0: debug "Synchronization loop waits for workers completion", - wall_head_slot = wallSlot, local_head_slot = headSlot, - difference = (wallSlot - headSlot), max_head_age = man.maxHeadAge, + wall_head_slot = wallSlot, + local_head_slot = headSlot, + difference = (wallSlot - headSlot), + max_head_age = man.maxHeadAge, sleeping_workers_count = sleeping, - waiting_workers_count = waiting, pending_workers_count = pending + waiting_workers_count = waiting, + pending_workers_count = pending, + sync_ident = man.ident, + direction = man.direction, + topics = "syncman" + # We already synced, so we should reset all the pending workers from # any state they have. man.queue.clearAndWakeup() @@ -868,21 +987,33 @@ proc syncLoop[A, B]( await man.syncClose(averageSpeedTaskFut) man.inProgress = false debug "Forward synchronization process finished, exiting", - wall_head_slot = wallSlot, local_head_slot = headSlot, + wall_head_slot = wallSlot, + local_head_slot = headSlot, difference = (wallSlot - headSlot), - max_head_age = man.maxHeadAge + max_head_age = man.maxHeadAge, + sync_ident = man.ident, + direction = man.direction, + topics = "syncman" break else: man.inProgress = false debug "Forward synchronization process finished, sleeping", - wall_head_slot = wallSlot, local_head_slot = headSlot, + wall_head_slot = wallSlot, + local_head_slot = headSlot, difference = (wallSlot - headSlot), - max_head_age = man.maxHeadAge + max_head_age = man.maxHeadAge, + sync_ident = man.ident, + direction = man.direction, + topics = "syncman" else: - debug "Synchronization loop sleeping", wall_head_slot = wallSlot, + debug "Synchronization loop sleeping", + wall_head_slot = wallSlot, local_head_slot = headSlot, difference = (wallSlot - headSlot), - max_head_age = man.maxHeadAge + max_head_age = man.maxHeadAge, + sync_ident = man.ident, + direction = man.direction, + topics = "syncman" of SyncQueueKind.Backward: # Backward syncing is going to be executed only once, so we exit loop # and stop all pending tasks which belongs to this instance (sync @@ -890,9 +1021,13 @@ proc syncLoop[A, B]( await man.syncClose(averageSpeedTaskFut) man.inProgress = false debug "Backward synchronization process finished, exiting", - wall_head_slot = wallSlot, local_head_slot = headSlot, + wall_head_slot = wallSlot, + local_head_slot = headSlot, backfill_slot = man.getLastSlot(), - max_head_age = man.maxHeadAge + max_head_age = man.maxHeadAge, + sync_ident = man.ident, + direction = man.direction, + topics = "syncman" break else: if not(man.notInSyncEvent.isSet()): @@ -902,10 +1037,14 @@ proc syncLoop[A, B]( man.notInSyncEvent.fire() man.inProgress = true debug "Node lost sync for more then preset period", - period = man.maxHeadAge, wall_head_slot = wallSlot, + period = man.maxHeadAge, + wall_head_slot = wallSlot, local_head_slot = headSlot, missing_slots = man.remainingSlots(), - progress = float(man.queue.progress()) + progress = float(man.queue.progress()), + sync_ident = man.ident, + direction = man.direction, + topics = "syncman" else: man.notInSyncEvent.fire() man.inProgress = true diff --git a/beacon_chain/sync/sync_overseer.nim b/beacon_chain/sync/sync_overseer.nim index 4dbf70cc8e..225e740ca1 100644 --- a/beacon_chain/sync/sync_overseer.nim +++ b/beacon_chain/sync/sync_overseer.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -89,7 +89,8 @@ proc getPeerBlock( let peer = await overseer.pool.acquire() try: let - res = (await getSyncBlockData(peer, slot)).valueOr: + maxBlobs = overseer.consensusManager.dag.cfg.MAX_BLOBS_PER_BLOCK_ELECTRA + res = (await getSyncBlockData(peer, slot, maxBlobs)).valueOr: return err(error) blob = if res.blobs.isSome(): @@ -148,6 +149,22 @@ proc isWithinWeakSubjectivityPeriod( is_within_weak_subjectivity_period( dag.cfg, currentSlot, dag.headState, checkpoint) +proc getLastBlockRetentionPeriodSlot(overseer: SyncOverseerRef): Slot = + let + dag = overseer.consensusManager.dag + currentSlot = overseer.beaconClock.now().slotOrZero() + slotsCount = dag.cfg.MIN_EPOCHS_FOR_BLOCK_REQUESTS * SLOTS_PER_EPOCH + if currentSlot < slotsCount: + GENESIS_SLOT + else: + currentSlot - slotsCount + +proc isWithinBlockRetentionPeriod( + overseer: SyncOverseerRef, + slot: Slot +): bool = + slot >= overseer.getLastBlockRetentionPeriodSlot() + proc isUntrustedBackfillEmpty(clist: ChainListRef): bool = clist.tail.isNone() @@ -321,9 +338,9 @@ proc rebuildState(overseer: SyncOverseerRef): Future[void] {. let fork = - getStateField(dag.headState, fork) + getStateField(dag.clearanceState, fork) genesis_validators_root = - getStateField(dag.headState, genesis_validators_root) + getStateField(dag.clearanceState, genesis_validators_root) verifyBlockProposer(batchVerifier[], fork, genesis_validators_root, dag.db.immutableValidators, blocksOnly).isOkOr: @@ -418,6 +435,15 @@ proc mainLoop*( clist = overseer.clist currentSlot = overseer.beaconClock.now().slotOrZero() + info "Sync overseer starting", + wall_slot = currentSlot, + dag_head_slot = dag.head.slot, + dag_finalized_head_slot = dag.finalizedHead.slot, + dag_horizon = dag.horizon(), + dag_backfill_slot = dag.backfill.slot, + untrusted_tail = shortLog(clist.tail), + untrusted_head = shortLog(clist.head) + if overseer.isWithinWeakSubjectivityPeriod(currentSlot): # Starting forward sync manager/monitor. overseer.forwardSync.start() @@ -433,10 +459,12 @@ proc mainLoop*( if not(isUntrustedBackfillEmpty(clist)): let headSlot = clist.head.get().slot - if not(overseer.isWithinWeakSubjectivityPeriod(headSlot)): + if not(overseer.isWithinBlockRetentionPeriod(headSlot)): # Light forward sync file is too old. - warn "Light client sync was started too long time ago", - current_slot = currentSlot, backfill_data_slot = headSlot + warn "Light forward sync was started too long time ago", + current_slot = currentSlot, + backfill_data_slot = headSlot, + retention_period_slot = overseer.getLastBlockRetentionPeriodSlot() if overseer.config.longRangeSync == LongRangeSyncMode.Lenient: # Starting forward sync manager/monitor only. @@ -451,6 +479,13 @@ proc mainLoop*( altair_start_slot = dag.cfg.ALTAIR_FORK_EPOCH.start_slot quit 1 + if overseer.isWithinBlockRetentionPeriod(dagHead.slot): + fatal "Current database head slot is not in the block retention " & + "period range", + head_slot = dagHead.slot, + retention_period_slot = overseer.getLastBlockRetentionPeriodSlot() + quit 1 + if isUntrustedBackfillEmpty(clist): overseer.untrustedInProgress = true @@ -458,6 +493,7 @@ proc mainLoop*( await overseer.initUntrustedSync() except CancelledError: return + # We need to update pivot slot to enable timeleft calculation. overseer.untrustedSync.updatePivot(overseer.clist.tail.get().slot) # Note: We should not start forward sync manager! diff --git a/beacon_chain/sync/sync_protocol.nim b/beacon_chain/sync/sync_protocol.nim index d79ecb10ad..4b1450655e 100644 --- a/beacon_chain/sync/sync_protocol.nim +++ b/beacon_chain/sync/sync_protocol.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -9,7 +9,6 @@ import chronicles, chronos, snappy, snappy/codec, - ../spec/datatypes/[phase0, altair, bellatrix, capella, deneb], ../spec/[helpers, forks, network], ".."/[beacon_clock], ../networking/eth2_network, @@ -24,6 +23,8 @@ const ## Allow syncing ~64 blocks/sec (minus request costs) blobResponseCost = allowedOpsPerSecondCost(1000) ## Multiple can exist per block, they are much smaller than blocks + dataColumnResponseCost = allowedOpsPerSecondCost(8000) + ## 8 data columns take the same memory as 1 blob approximately type BeaconSyncNetworkState* {.final.} = ref object of RootObj @@ -36,7 +37,10 @@ type slot: Slot BlockRootsList* = List[Eth2Digest, Limit MAX_REQUEST_BLOCKS] - BlobIdentifierList* = List[BlobIdentifier, Limit (MAX_REQUEST_BLOB_SIDECARS)] + BlobIdentifierList* = List[ + BlobIdentifier, Limit MAX_SUPPORTED_REQUEST_BLOB_SIDECARS] + DataColumnIdentifierList* = List[ + DataColumnIdentifier, Limit (MAX_REQUEST_DATA_COLUMN_SIDECARS)] proc readChunkPayload*( conn: Connection, peer: Peer, MsgType: type (ref ForkedSignedBeaconBlock)): @@ -80,8 +84,125 @@ proc readChunkPayload*( else: return neterr InvalidContextBytes +proc readChunkPayload*( + conn: Connection, peer: Peer, MsgType: type (ref DataColumnSidecar)): + Future[NetRes[MsgType]] {.async: (raises: [CancelledError]).} = + var contextBytes: ForkDigest + try: + await conn.readExactly(addr contextBytes, sizeof contextBytes) + except CatchableError: + return neterr UnexpectedEOF + let contextFork = + peer.network.forkDigests[].consensusForkForDigest(contextBytes).valueOr: + return neterr InvalidContextBytes + + withConsensusFork(contextFork): + when consensusFork >= ConsensusFork.Fulu: + let res = await readChunkPayload(conn, peer, DataColumnSidecar) + if res.isOk: + return ok newClone(res.get) + else: + return err(res.error) + else: + return neterr InvalidContextBytes + {.pop.} # TODO fix p2p macro for raises +template getBlobSidecarsByRoot( + versionNumber: static string, peer: Peer, dag: ChainDAGRef, response: auto, + blobIds: BlobIdentifierList, maxReqSidecars: uint64) = + trace "got v" & versionNumber & " blobs range request", + peer, len = blobIds.len + if blobIds.len == 0: + raise newException(InvalidInputsError, "No blobs requested") + if blobIds.lenu64 > maxReqSidecars: + raise newException(InvalidInputsError, "Exceeding blob request limit") + + let count = blobIds.len + + var + found = 0 + bytes: seq[byte] + + for i in 0..= dag.head.slot.epoch: + GENESIS_EPOCH + else: + dag.head.slot.epoch - dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS + + if startSlot.epoch < epochBoundary: + raise newException(ResourceUnavailableError, BlobsOutOfRange) + + var blockIds: array[MAX_SUPPORTED_REQUEST_BLOB_SIDECARS.int, BlockId] + let + count = int min(reqCount, maxReqSidecars) + endIndex = count - 1 + startIndex = + dag.getBlockRange(startSlot, blockIds.toOpenArray(0, endIndex)) + + var + found = 0 + bytes: seq[byte] + + for i in startIndex..endIndex: + for j in 0.. MAX_REQUEST_DATA_COLUMN_SIDECARS: + raise newException(InvalidInputsError, "Exceeding data column request limit") let dag = peer.networkState.dag - count = blobIds.len + count = colIds.len var found = 0 bytes: seq[byte] for i in 0..= dag.head.slot.epoch: GENESIS_EPOCH @@ -297,49 +468,51 @@ p2pProtocol BeaconSync(version = 1, dag.head.slot.epoch - dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS if startSlot.epoch < epochBoundary: - raise newException(ResourceUnavailableError, BlobsOutOfRange) + raise newException(ResourceUnavailableError, DataColumnsOutOfRange) - var blockIds: array[int(MAX_REQUEST_BLOB_SIDECARS), BlockId] + var blockIds: array[int(MAX_REQUEST_DATA_COLUMN_SIDECARS), BlockId] let count = int min(reqCount, blockIds.lenu64) endIndex = count - 1 startIndex = - dag.getBlockRange(startSlot, 1, blockIds.toOpenArray(0, endIndex)) + dag.getBlockRange(startSlot, blockIds.toOpenArray(0, endIndex)) var found = 0 bytes: seq[byte] for i in startIndex..endIndex: - for j in 0..= dag.cfg.BELLATRIX_FORK_EPOCH and + for k in reqColumns: + if dag.db.getDataColumnSidecarSZ(blockIds[i].root, ColumnIndex k, bytes): + if blockIds[i].slot.epoch >= dag.cfg.DENEB_FORK_EPOCH and not dag.head.executionValid: continue let uncompressedLen = uncompressedLenFramed(bytes).valueOr: - warn "Cannot read blobs sidecar size, database corrupt?", - bytes = bytes.len(), blck = shortLog(blockIds[i]) + warn "Cannot read data column sidecar size, database corrup?", + bytes = bytes.len, blck = shortLog(blockIds[i]) continue - # TODO extract from libp2pProtocol - peer.awaitQuota(blobResponseCost, "blobs_sidecars_by_range/1") - peer.network.awaitQuota(blobResponseCost, "blobs_sidecars_by_range/1") + peer.awaitQuota(dataColumnResponseCost, "data_column_sidecars_by_range/1") + peer.network.awaitQuota(dataColumnResponseCost, "data_column_sidecars_by_range/1") await response.writeBytesSZ( uncompressedLen, bytes, peer.network.forkDigestAtEpoch(blockIds[i].slot.epoch).data) inc found - else: - break - debug "BlobSidecar range request done", - peer, startSlot, count = reqCount, found + var + respondedCols: seq[ColumnIndex] + respondedCols.add(k) + + # additional logging for devnets + debug "responded to data column sidecar range request", + peer, blck = shortLog(blockIds[i]), columns = respondedCols + + debug "Data column range request done", + peer, startSlot, count = reqCount, columns = reqColumns, found -proc init*(T: type BeaconSync.NetworkState, dag: ChainDAGRef): T = +func init*(T: type BeaconSync.NetworkState, dag: ChainDAGRef): T = T( dag: dag, ) diff --git a/beacon_chain/sync/sync_queue.nim b/beacon_chain/sync/sync_queue.nim index 85b932e883..272935f78c 100644 --- a/beacon_chain/sync/sync_queue.nim +++ b/beacon_chain/sync/sync_queue.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -7,10 +7,9 @@ {.push raises: [].} -import std/[heapqueue, tables, strutils, sequtils, math] +import std/[deques, heapqueue, tables, strutils, sequtils, math, typetraits] import stew/base10, chronos, chronicles, results import - ../spec/datatypes/[base, phase0, altair], ../spec/[helpers, forks], ../networking/[peer_pool, eth2_network], ../gossip_processing/block_processor, @@ -19,9 +18,6 @@ import export base, phase0, altair, merge, chronos, chronicles, results, block_pools_types, helpers -logScope: - topics = "syncqueue" - type GetSlotCallback* = proc(): Slot {.gcsafe, raises: [].} GetBoolCallback* = proc(): bool {.gcsafe, raises: [].} @@ -30,29 +26,52 @@ type blobs: Opt[BlobSidecars], maybeFinalized: bool): Future[Result[void, VerifierError]] {.async: (raises: [CancelledError]).} + SyncRange* = object + slot*: Slot + count*: uint64 + + SyncPosition* = object + qindex*: int + sindex*: int + SyncQueueKind* {.pure.} = enum Forward, Backward SyncRequest*[T] = object kind*: SyncQueueKind - index*: uint64 - slot*: Slot - count*: uint64 + data*: SyncRange item*: T - SyncResult*[T] = object - request*: SyncRequest[T] - data*: seq[ref ForkedSignedBeaconBlock] - blobs*: Opt[seq[BlobSidecars]] + SyncQueueItem[T] = object + requests: seq[SyncRequest[T]] + data: SyncRange + failuresCount: Natural - GapItem*[T] = object - start*: Slot - finish*: Slot - item*: T - - SyncWaiter* = ref object + SyncWaiterItem[T] = ref object future: Future[void].Raising([CancelledError]) - reset: bool + request: SyncRequest[T] + resetFlag: bool + + SyncProcessError {.pure.} = enum + Invalid, + MissingParent, + GoodAndMissingParent, + UnviableFork, + Duplicate, + Empty, + NoError + + SyncBlock = object + slot: Slot + root: Eth2Digest + + SyncProcessingResult = object + code: SyncProcessError + blck: Opt[SyncBlock] + + GapItem[T] = object + data: SyncRange + item: T RewindPoint = object failSlot: Slot @@ -64,39 +83,49 @@ type outSlot*: Slot startSlot*: Slot finalSlot*: Slot - chunkSize*: uint64 - queueSize*: int - counter*: uint64 - pending*: Table[uint64, SyncRequest[T]] - gapList*: seq[GapItem[T]] - waiters: seq[SyncWaiter] - getSafeSlot*: GetSlotCallback - debtsQueue: HeapQueue[SyncRequest[T]] - debtsCount: uint64 - readyQueue: HeapQueue[SyncResult[T]] - rewind: Option[RewindPoint] + rewind: Opt[RewindPoint] + chunkSize: uint64 + requestsCount: Natural + failureResetThreshold: Natural + requests: Deque[SyncQueueItem[T]] + getSafeSlot: GetSlotCallback blockVerifier: BlockVerifier - ident*: string + waiters: seq[SyncWaiterItem[T]] + gapList: seq[GapItem[T]] + lock: AsyncLock + ident: string chronicles.formatIt SyncQueueKind: toLowerAscii($it) -template shortLog*[T](req: SyncRequest[T]): string = - Base10.toString(uint64(req.slot)) & ":" & - Base10.toString(req.count) & "@" & - Base10.toString(req.index) +proc `$`*(srange: SyncRange): string = + "[" & Base10.toString(uint64(srange.slot)) & ":" & + Base10.toString(uint64(srange.slot + srange.count - 1)) & "]" + +template shortLog[T](req: SyncRequest[T]): string = + $req.data & "@" & Base10.toString(req.data.count) chronicles.expandIt SyncRequest: `it` = shortLog(it) peer = shortLog(it.item) direction = toLowerAscii($it.kind) -proc getShortMap*[T](req: SyncRequest[T], - data: openArray[ref ForkedSignedBeaconBlock]): string = +chronicles.formatIt Opt[SyncBlock]: + if it.isSome(): + Base10.toString(uint64(it.get().slot)) & "@" & shortLog(it.get().root) + else: + "" + +func getShortMap*[T]( + req: SyncRequest[T], + data: openArray[ref ForkedSignedBeaconBlock] +): string = ## Returns all slot numbers in ``data`` as placement map. - var res = newStringOfCap(req.count) - var slider = req.slot - var last = 0 - for i in 0 ..< req.count: + var + res = newStringOfCap(req.data.count) + slider = req.data.slot + last = 0 + + for i in 0 ..< req.data.count: if last < len(data): for k in last ..< len(data): if slider == data[k][].slot: @@ -113,345 +142,195 @@ proc getShortMap*[T](req: SyncRequest[T], proc getShortMap*[T](req: SyncRequest[T], data: openArray[ref BlobSidecar]): string = - ## Returns all slot numbers in ``data`` as placement map. - var res = newStringOfCap(req.count * MAX_BLOBS_PER_BLOCK) - var cur : uint64 = 0 - for slot in req.slot..= lenu64(data): - res.add('|') - continue - if slot == data[cur].signed_block_header.message.slot: - for k in cur..= lenu64(data) or slot != data[k].signed_block_header.message.slot: - res.add('|') + var + res = newStringOfCap(req.data.count) + slider = req.data.slot + last = 0 + + for i in 0 ..< req.data.count: + if last < len(data): + var counter = 0 + for k in last ..< len(data): + if slider < data[k][].signed_block_header.message.slot: break - else: - inc(cur) - res.add('x') + elif slider == data[k][].signed_block_header.message.slot: + inc(counter) + last = last + counter + if counter == 0: + res.add('.') + else: + res.add($counter) else: - res.add('|') + res.add('.') + slider = slider + 1 res -proc contains*[T](req: SyncRequest[T], slot: Slot): bool {.inline.} = - slot >= req.slot and slot < req.slot + req.count - -proc cmp*[T](a, b: SyncRequest[T]): int = - cmp(uint64(a.slot), uint64(b.slot)) - -proc checkResponse*[T](req: SyncRequest[T], - data: openArray[Slot]): bool = - if len(data) == 0: - # Impossible to verify empty response. - return true - - if uint64(len(data)) > req.count: - # Number of blocks in response should be less or equal to number of - # requested blocks. - return false - - var slot = req.slot - var rindex = 0'u64 - var dindex = 0 - - while (rindex < req.count) and (dindex < len(data)): - if slot < data[dindex]: - discard - elif slot == data[dindex]: - inc(dindex) +proc getShortMap*[T]( + req: SyncRequest[T], + blobs: openArray[BlobSidecars] +): string = + var + res = newStringOfCap(req.data.count) + slider = req.data.slot + notFirst = false + + for i in 0 ..< int(req.data.count): + if i >= len(blobs): + res.add('.'.repeat(int(req.data.count) - len(res))) + return res + + if len(blobs[i]) > 0: + let slot = blobs[i][0][].signed_block_header.message.slot + if not(notFirst): + doAssert(slot >= slider, "Incorrect slot number in blobs list") + let firstCount = int(slot - slider) + res.add('.'.repeat(firstCount)) + res.add(Base10.toString(lenu64(blobs[i]))) + slider = slot + notFirst = true + else: + if slot == slider: + res.add(Base10.toString(lenu64(blobs[i]))) + else: + res.add('.') else: - return false - slot += 1'u64 - rindex += 1'u64 - - if dindex == len(data): - return true - else: - return false - -proc init[T](t1: typedesc[SyncRequest], kind: SyncQueueKind, start: Slot, - finish: Slot, t2: typedesc[T]): SyncRequest[T] = - let count = finish - start + 1'u64 - SyncRequest[T](kind: kind, slot: start, count: count) - -proc init[T](t1: typedesc[SyncRequest], kind: SyncQueueKind, slot: Slot, - count: uint64, item: T): SyncRequest[T] = - SyncRequest[T](kind: kind, slot: slot, count: count, item: item) - -proc init[T](t1: typedesc[SyncRequest], kind: SyncQueueKind, start: Slot, - finish: Slot, item: T): SyncRequest[T] = - let count = finish - start + 1'u64 - SyncRequest[T](kind: kind, slot: start, count: count, item: item) - -proc empty*[T](t: typedesc[SyncRequest], kind: SyncQueueKind, - t2: typedesc[T]): SyncRequest[T] {.inline.} = - SyncRequest[T](kind: kind, count: 0'u64) - -proc setItem*[T](sr: var SyncRequest[T], item: T) = - sr.item = item - -proc isEmpty*[T](sr: SyncRequest[T]): bool {.inline.} = - (sr.count == 0'u64) + if notFirst: res.add('.') + if notFirst: inc(slider) + res -proc init*[T](t1: typedesc[SyncQueue], t2: typedesc[T], - queueKind: SyncQueueKind, - start, final: Slot, chunkSize: uint64, - getSafeSlotCb: GetSlotCallback, - blockVerifier: BlockVerifier, - syncQueueSize: int = -1, - ident: string = "main"): SyncQueue[T] = - ## Create new synchronization queue with parameters - ## - ## ``start`` and ``final`` are starting and final Slots. - ## - ## ``chunkSize`` maximum number of slots in one request. - ## - ## ``syncQueueSize`` maximum queue size for incoming data. - ## If ``syncQueueSize > 0`` queue will help to keep backpressure under - ## control. If ``syncQueueSize <= 0`` then queue size is unlimited (default). - - # SyncQueue is the core of sync manager, this data structure distributes - # requests to peers and manages responses from peers. - # - # Because SyncQueue is async data structure it manages backpressure and - # order of incoming responses and it also resolves "joker's" problem. - # - # Joker's problem - # - # According to pre-v0.12.0 Ethereum consensus network specification - # > Clients MUST respond with at least one block, if they have it and it - # > exists in the range. Clients MAY limit the number of blocks in the - # > response. - # https://github.com/ethereum/consensus-specs/blob/v0.11.3/specs/phase0/p2p-interface.md#L590 - # - # Such rule can lead to very uncertain responses, for example let slots from - # 10 to 12 will be not empty. Client which follows specification can answer - # with any response from this list (X - block, `-` empty space): - # - # 1. X X X - # 2. - - X - # 3. - X - - # 4. - X X - # 5. X - - - # 6. X - X - # 7. X X - - # - # If peer answers with `1` everything will be fine and `block_processor` - # will be able to process all 3 blocks. - # In case of `2`, `3`, `4`, `6` - `block_processor` will fail immediately - # with chunk and report "parent is missing" error. - # But in case of `5` and `7` blocks will be processed by `block_processor` - # without any problems, however it will start producing problems right from - # this uncertain last slot. SyncQueue will start producing requests for next - # blocks, but all the responses from this point will fail with "parent is - # missing" error. Lets call such peers "jokers", because they are joking - # with responses. - # - # To fix "joker" problem we going to perform rollback to the latest finalized - # epoch's first slot. - # - # Note that as of spec v0.12.0, well-behaving clients are forbidden from - # answering this way. However, it still makes sense to attempt to handle - # this case to increase compatibility (e.g., with weak subjectivity nodes - # that are still backfilling blocks) - doAssert(chunkSize > 0'u64, "Chunk size should not be zero") - SyncQueue[T]( - kind: queueKind, - startSlot: start, - finalSlot: final, - chunkSize: chunkSize, - queueSize: syncQueueSize, - getSafeSlot: getSafeSlotCb, - waiters: newSeq[SyncWaiter](), - counter: 1'u64, - pending: initTable[uint64, SyncRequest[T]](), - debtsQueue: initHeapQueue[SyncRequest[T]](), - inpSlot: start, - outSlot: start, - blockVerifier: blockVerifier, - ident: ident +proc getShortMap*[T]( + req: SyncRequest[T], + data: Opt[seq[BlobSidecars]] +): string = + if data.isNone(): + return '.'.repeat(req.data.count) + getShortMap(req, data.get()) + +func init*(t: typedesc[SyncRange], slot: Slot, count: uint64): SyncRange = + SyncRange(slot: slot, count: count) + +func init(t: typedesc[SyncProcessError], + kind: VerifierError): SyncProcessError = + case kind + of VerifierError.Invalid: + SyncProcessError.Invalid + of VerifierError.MissingParent: + SyncProcessError.MissingParent + of VerifierError.UnviableFork: + SyncProcessError.UnviableFork + of VerifierError.Duplicate: + SyncProcessError.Duplicate + +func init(t: typedesc[SyncBlock], slot: Slot, root: Eth2Digest): SyncBlock = + SyncBlock(slot: slot, root: root) + +func init(t: typedesc[SyncProcessError]): SyncProcessError = + SyncProcessError.NoError + +func init(t: typedesc[SyncProcessingResult], se: SyncProcessError, + slot: Slot, root: Eth2Digest): SyncProcessingResult = + SyncProcessingResult(blck: Opt.some(SyncBlock.init(slot, root)), code: se) + +func init(t: typedesc[SyncProcessingResult], + se: SyncProcessError): SyncProcessingResult = + SyncProcessingResult(code: se) + +func init(t: typedesc[SyncProcessingResult], se: SyncProcessError, + sblck: SyncBlock): SyncProcessingResult = + SyncProcessingResult(blck: Opt.some(sblck), code: se) + +func init(t: typedesc[SyncProcessingResult], ve: VerifierError, + slot: Slot, root: Eth2Digest): SyncProcessingResult = + SyncProcessingResult(blck: Opt.some(SyncBlock.init(slot, root)), + code: SyncProcessError.init(ve)) + +func init(t: typedesc[SyncProcessingResult], ve: VerifierError, + sblck: SyncBlock): SyncProcessingResult = + SyncProcessingResult(blck: Opt.some(sblck), code: SyncProcessError.init(ve)) + +func init*[T](t: typedesc[SyncRequest], kind: SyncQueueKind, + item: T): SyncRequest[T] = + SyncRequest[T]( + kind: kind, + data: SyncRange(slot: FAR_FUTURE_SLOT, count: 0'u64), + item: item ) -proc `<`*[T](a, b: SyncRequest[T]): bool = - doAssert(a.kind == b.kind) - case a.kind - of SyncQueueKind.Forward: - a.slot < b.slot - of SyncQueueKind.Backward: - a.slot > b.slot - -proc `<`*[T](a, b: SyncResult[T]): bool = - doAssert(a.request.kind == b.request.kind) - case a.request.kind - of SyncQueueKind.Forward: - a.request.slot < b.request.slot - of SyncQueueKind.Backward: - a.request.slot > b.request.slot - -proc `==`*[T](a, b: SyncRequest[T]): bool = - (a.kind == b.kind) and (a.slot == b.slot) and (a.count == b.count) - -proc lastSlot*[T](req: SyncRequest[T]): Slot = - ## Returns last slot for request ``req``. - req.slot + req.count - 1'u64 - -proc makePending*[T](sq: SyncQueue[T], req: var SyncRequest[T]) = - req.index = sq.counter - sq.counter = sq.counter + 1'u64 - sq.pending[req.index] = req - -proc updateLastSlot*[T](sq: SyncQueue[T], last: Slot) {.inline.} = - ## Update last slot stored in queue ``sq`` with value ``last``. - sq.finalSlot = last +func init*[T](t: typedesc[SyncRequest], kind: SyncQueueKind, + data: SyncRange, item: T): SyncRequest[T] = + SyncRequest[T](kind: kind, data: data, item: item) + +func init[T](t: typedesc[SyncQueueItem], + req: SyncRequest[T]): SyncQueueItem[T] = + SyncQueueItem[T](data: req.data, requests: @[req]) + +func init[T](t: typedesc[GapItem], req: SyncRequest[T]): GapItem[T] = + GapItem[T](data: req.data, item: req.item) + +func next(srange: SyncRange): SyncRange {.inline.} = + let slot = srange.slot + srange.count + if slot == FAR_FUTURE_SLOT: + # Finish range + srange + elif slot < srange.slot: + # Range that causes uint64 overflow, fixing. + SyncRange.init(slot, uint64(FAR_FUTURE_SLOT - srange.count)) + else: + if slot + srange.count < slot: + SyncRange.init(slot, uint64(FAR_FUTURE_SLOT - srange.count)) + else: + SyncRange.init(slot, srange.count) -proc wakeupWaiters[T](sq: SyncQueue[T], reset = false) = - ## Wakeup one or all blocked waiters. - for item in sq.waiters: - if reset: - item.reset = true +func prev(srange: SyncRange): SyncRange {.inline.} = + if srange.slot == GENESIS_SLOT: + # Start range + srange + else: + let slot = srange.slot - srange.count + if slot > srange.slot: + # Range that causes uint64 underflow, fixing. + SyncRange.init(GENESIS_SLOT, uint64(srange.slot)) + else: + SyncRange.init(slot, srange.count) - if not(item.future.finished()): - item.future.complete() +func contains(srange: SyncRange, slot: Slot): bool {.inline.} = + ## Returns `true` if `slot` is in range of `srange`. + if (srange.slot + srange.count) < srange.slot: + (slot >= srange.slot) and (slot <= FAR_FUTURE_SLOT) + else: + (slot >= srange.slot) and (slot < (srange.slot + srange.count)) -proc waitForChanges[T](sq: SyncQueue[T]): Future[bool] {.async: (raises: [CancelledError]).} = - ## Create new waiter and wait for completion from `wakeupWaiters()`. - let waitfut = Future[void].Raising([CancelledError]).init("SyncQueue.waitForChanges") - let waititem = SyncWaiter(future: waitfut) - sq.waiters.add(waititem) - try: - await waitfut - return waititem.reset - finally: - sq.waiters.delete(sq.waiters.find(waititem)) +func `>`(a, b: SyncRange): bool {.inline.} = + ## Returns `true` if range `a` is above of range `b`. + (a.slot > b.slot) and (a.slot + a.count - 1 > b.slot) -proc wakeupAndWaitWaiters[T](sq: SyncQueue[T]) {.async: (raises: [CancelledError]).} = - ## This procedure will perform wakeupWaiters(true) and blocks until last - ## waiter will be awakened. - var waitChanges = sq.waitForChanges() - sq.wakeupWaiters(true) - discard await waitChanges +func `<`(a, b: SyncRange): bool {.inline.} = + ## Returns `true` if range `a` is below of range `b`. + (a.slot < b.slot) and (a.slot + a.count - 1 < b.slot) -proc clearAndWakeup*[T](sq: SyncQueue[T]) = - sq.pending.clear() - sq.wakeupWaiters(true) +func `==`(a, b: SyncRange): bool {.inline.} = + (a.slot == b.slot) and (a.count == b.count) -proc resetWait*[T](sq: SyncQueue[T], toSlot: Option[Slot]) {.async: (raises: [CancelledError]).} = - ## Perform reset of all the blocked waiters in SyncQueue. - ## - ## We adding one more waiter to the waiters sequence and - ## call wakeupWaiters(true). Because our waiter is last in sequence of - ## waiters it will be resumed only after all waiters will be awakened and - ## finished. - - # We are clearing pending list, so that all requests that are still running - # around (still downloading, but not yet pushed to the SyncQueue) will be - # expired. Its important to perform this call first (before await), otherwise - # you can introduce race problem. - sq.pending.clear() - - # We calculating minimal slot number to which we will be able to reset, - # without missing any blocks. There 3 sources: - # 1. Debts queue. - # 2. Processing queue (`inpSlot`, `outSlot`). - # 3. Requested slot `toSlot`. - # - # Queue's `outSlot` is the lowest slot we added to `block_pool`, but - # `toSlot` slot can be less then `outSlot`. `debtsQueue` holds only not - # added slot requests, so it can't be bigger then `outSlot` value. - let minSlot = - case sq.kind - of SyncQueueKind.Forward: - if toSlot.isSome(): - min(toSlot.get(), sq.outSlot) - else: - sq.outSlot - of SyncQueueKind.Backward: - if toSlot.isSome(): - toSlot.get() - else: - sq.outSlot - sq.debtsQueue.clear() - sq.debtsCount = 0 - sq.readyQueue.clear() - sq.inpSlot = minSlot - sq.outSlot = minSlot - # We are going to wakeup all the waiters and wait for last one. - await sq.wakeupAndWaitWaiters() +func `==`[T](a, b: SyncRequest[T]): bool {.inline.} = + (a.kind == b.kind) and (a.item == b.item) and (a.data == b.data) -proc isEmpty*[T](sr: SyncResult[T]): bool {.inline.} = - ## Returns ``true`` if response chain of blocks is empty (has only empty - ## slots). - len(sr.data) == 0 - -proc hasEndGap*[T](sr: SyncResult[T]): bool {.inline.} = +proc hasEndGap*[T]( + sr: SyncRequest[T], + data: openArray[ref ForkedSignedBeaconBlock] +): bool {.inline.} = ## Returns ``true`` if response chain of blocks has gap at the end. - let lastslot = sr.request.slot + sr.request.count - 1'u64 - if len(sr.data) == 0: + if len(data) == 0: return true - if sr.data[^1][].slot != lastslot: + if data[^1][].slot != (sr.data.slot + sr.data.count - 1'u64): return true - return false - -proc getLastNonEmptySlot*[T](sr: SyncResult[T]): Slot {.inline.} = - ## Returns last non-empty slot from result ``sr``. If response has only - ## empty slots, original request slot will be returned. - if len(sr.data) == 0: - # If response has only empty slots we going to use original request slot - sr.request.slot - else: - sr.data[^1][].slot - -proc processGap[T](sq: SyncQueue[T], sr: SyncResult[T]) = - if sr.isEmpty(): - let gitem = GapItem[T](start: sr.request.slot, - finish: sr.request.slot + sr.request.count - 1'u64, - item: sr.request.item) - sq.gapList.add(gitem) - else: - if sr.hasEndGap(): - let gitem = GapItem[T](start: sr.getLastNonEmptySlot() + 1'u64, - finish: sr.request.slot + sr.request.count - 1'u64, - item: sr.request.item) - sq.gapList.add(gitem) - else: - sq.gapList.reset() - -proc rewardForGaps[T](sq: SyncQueue[T], score: int) = - mixin updateScore, getStats - logScope: - sync_ident = sq.ident - direction = sq.kind - topics = "syncman" - - for gap in sq.gapList: - if score < 0: - # Every empty response increases penalty by 25%, but not more than 200%. - let - emptyCount = gap.item.getStats(SyncResponseKind.Empty) - goodCount = gap.item.getStats(SyncResponseKind.Good) + false - if emptyCount <= goodCount: - gap.item.updateScore(score) - else: - let - weight = int(min(emptyCount - goodCount, 8'u64)) - newScore = score + score * weight div 4 - gap.item.updateScore(newScore) - debug "Peer received gap penalty", peer = gap.item, - penalty = newScore - else: - gap.item.updateScore(score) - -proc toDebtsQueue[T](sq: SyncQueue[T], sr: SyncRequest[T]) = - sq.debtsQueue.push(sr) - sq.debtsCount = sq.debtsCount + sr.count +proc updateLastSlot*[T](sq: SyncQueue[T], last: Slot) {.inline.} = + ## Update last slot stored in queue ``sq`` with value ``last``. + sq.finalSlot = last proc getRewindPoint*[T](sq: SyncQueue[T], failSlot: Slot, safeSlot: Slot): Slot = - logScope: - sync_ident = sq.ident - direction = sq.kind - topics = "syncman" - case sq.kind of SyncQueueKind.Forward: # Calculate the latest finalized epoch. @@ -483,20 +362,30 @@ proc getRewindPoint*[T](sq: SyncQueue[T], failSlot: Slot, rewindPoint else: warn "Trying to rewind over the last finalized epoch", - finalized_slot = safeSlot, fail_slot = failSlot, - finalized_epoch = finalizedEpoch, fail_epoch = failEpoch, + finalized_slot = safeSlot, + fail_slot = failSlot, + finalized_epoch = finalizedEpoch, + fail_epoch = failEpoch, rewind_epoch_count = rewind.epochCount, - finalized_epoch = finalizedEpoch + finalized_epoch = finalizedEpoch, + sync_ident = sq.ident, + direction = sq.kind, + topics = "syncman" 0'u64 else: # `MissingParent` happened at different slot so we going to rewind for # 1 epoch only. if (failEpoch < 1'u64) or (failEpoch - 1'u64 < finalizedEpoch): warn "Сould not rewind further than the last finalized epoch", - finalized_slot = safeSlot, fail_slot = failSlot, - finalized_epoch = finalizedEpoch, fail_epoch = failEpoch, + finalized_slot = safeSlot, + fail_slot = failSlot, + finalized_epoch = finalizedEpoch, + fail_epoch = failEpoch, rewind_epoch_count = rewind.epochCount, - finalized_epoch = finalizedEpoch + finalized_epoch = finalizedEpoch, + sync_ident = sq.ident, + direction = sq.kind, + topics = "syncman" 0'u64 else: 1'u64 @@ -504,18 +393,28 @@ proc getRewindPoint*[T](sq: SyncQueue[T], failSlot: Slot, # `MissingParent` happened first time. if (failEpoch < 1'u64) or (failEpoch - 1'u64 < finalizedEpoch): warn "Сould not rewind further than the last finalized epoch", - finalized_slot = safeSlot, fail_slot = failSlot, - finalized_epoch = finalizedEpoch, fail_epoch = failEpoch, - finalized_epoch = finalizedEpoch + finalized_slot = safeSlot, + fail_slot = failSlot, + finalized_epoch = finalizedEpoch, + fail_epoch = failEpoch, + finalized_epoch = finalizedEpoch, + sync_ident = sq.ident, + direction = sq.kind, + topics = "syncman" 0'u64 else: 1'u64 if epochCount == 0'u64: warn "Unable to continue syncing, please restart the node", - finalized_slot = safeSlot, fail_slot = failSlot, - finalized_epoch = finalizedEpoch, fail_epoch = failEpoch, - finalized_epoch = finalizedEpoch + finalized_slot = safeSlot, + fail_slot = failSlot, + finalized_epoch = finalizedEpoch, + fail_epoch = failEpoch, + finalized_epoch = finalizedEpoch, + sync_ident = sq.ident, + direction = sq.kind, + topics = "syncman" # Calculate the rewind epoch, which will be equal to last rewind point or # finalizedEpoch let rewindEpoch = @@ -529,468 +428,579 @@ proc getRewindPoint*[T](sq: SyncQueue[T], failSlot: Slot, # finalized epoch. let rewindEpoch = failEpoch - epochCount # Update and save new rewind point in SyncQueue. - sq.rewind = some(RewindPoint(failSlot: failSlot, epochCount: epochCount)) + sq.rewind = Opt.some( + RewindPoint(failSlot: failSlot, epochCount: epochCount)) rewindEpoch.start_slot() of SyncQueueKind.Backward: # While we perform backward sync, the only possible slot we could rewind is # latest stored block. if failSlot == safeSlot: warn "Unable to continue syncing, please restart the node", - safe_slot = safeSlot, fail_slot = failSlot + safe_slot = safeSlot, + fail_slot = failSlot, + sync_ident = sq.ident, + direction = sq.kind, + topics = "syncman" safeSlot -# This belongs inside the blocks iterator below, but can't be there due to -# https://github.com/nim-lang/Nim/issues/21242 -func getOpt(blobs: Opt[seq[BlobSidecars]], i: int): Opt[BlobSidecars] = - if blobs.isSome: - Opt.some(blobs.get()[i]) - else: - Opt.none(BlobSidecars) +func init*[T](t1: typedesc[SyncQueue], t2: typedesc[T], + queueKind: SyncQueueKind, + start, final: Slot, + chunkSize: uint64, + requestsCount: Natural, + failureResetThreshold: Natural, + getSafeSlotCb: GetSlotCallback, + blockVerifier: BlockVerifier, + ident: string = "main"): SyncQueue[T] = + doAssert(chunkSize > 0'u64, "Chunk size should not be zero") + doAssert(requestsCount > 0, "Number of requests should not be zero") -iterator blocks[T](sq: SyncQueue[T], - sr: SyncResult[T]): (ref ForkedSignedBeaconBlock, Opt[BlobSidecars]) = - case sq.kind - of SyncQueueKind.Forward: - for i in countup(0, len(sr.data) - 1): - yield (sr.data[i], sr.blobs.getOpt(i)) - of SyncQueueKind.Backward: - for i in countdown(len(sr.data) - 1, 0): - yield (sr.data[i], sr.blobs.getOpt(i)) + SyncQueue[T]( + kind: queueKind, + startSlot: start, + finalSlot: final, + chunkSize: chunkSize, + requestsCount: requestsCount, + failureResetThreshold: failureResetThreshold, + getSafeSlot: getSafeSlotCb, + inpSlot: start, + outSlot: start, + blockVerifier: blockVerifier, + requests: initDeque[SyncQueueItem[T]](), + lock: newAsyncLock(), + ident: ident + ) -proc advanceOutput*[T](sq: SyncQueue[T], number: uint64) = - case sq.kind - of SyncQueueKind.Forward: - sq.outSlot = sq.outSlot + number - of SyncQueueKind.Backward: - sq.outSlot = sq.outSlot - number +func contains[T](requests: openArray[SyncRequest[T]], source: T): bool = + for req in requests: + if req.item == source: + return true + false -proc advanceInput[T](sq: SyncQueue[T], number: uint64) = - case sq.kind - of SyncQueueKind.Forward: - sq.inpSlot = sq.inpSlot + number - of SyncQueueKind.Backward: - sq.inpSlot = sq.inpSlot - number +func find[T](sq: SyncQueue[T], req: SyncRequest[T]): Opt[SyncPosition] = + if len(sq.requests) == 0: + return Opt.none(SyncPosition) -proc notInRange[T](sq: SyncQueue[T], sr: SyncRequest[T]): bool = case sq.kind of SyncQueueKind.Forward: - (sq.queueSize > 0) and (sr.slot > sq.outSlot) + if (req.data < sq.requests[0].data) or (req.data > sq.requests[^1].data): + return Opt.none(SyncPosition) of SyncQueueKind.Backward: - (sq.queueSize > 0) and (sr.lastSlot < sq.outSlot) + if (req.data > sq.requests[0].data) or (req.data < sq.requests[^1].data) : + return Opt.none(SyncPosition) -func numAlreadyKnownSlots[T](sq: SyncQueue[T], sr: SyncRequest[T]): uint64 = - ## Compute the number of slots covered by a given `SyncRequest` that are - ## already known and, hence, no longer relevant for sync progression. - let - outSlot = sq.outSlot - lowSlot = sr.slot - highSlot = sr.lastSlot - case sq.kind - of SyncQueueKind.Forward: - if outSlot > highSlot: - # Entire request is no longer relevant. - sr.count - elif outSlot > lowSlot: - # Request is only partially relevant. - outSlot - lowSlot - else: - # Entire request is still relevant. - 0 - of SyncQueueKind.Backward: - if lowSlot > outSlot: - # Entire request is no longer relevant. - sr.count - elif highSlot > outSlot: - # Request is only partially relevant. - highSlot - outSlot - else: - # Entire request is still relevant. - 0 - -proc push*[T](sq: SyncQueue[T], sr: SyncRequest[T], - data: seq[ref ForkedSignedBeaconBlock], - blobs: Opt[seq[BlobSidecars]], - maybeFinalized: bool = false, - processingCb: ProcessingCallback = nil) {.async: (raises: [CancelledError]).} = - logScope: - sync_ident = sq.ident - topics = "syncman" + for qindex, qitem in sq.requests.pairs(): + for sindex, request in qitem.requests.pairs(): + if request == req: + return Opt.some(SyncPosition(qindex: qindex, sindex: sindex)) - ## Push successful result to queue ``sq``. - mixin updateScore, updateStats, getStats + Opt.none(SyncPosition) - if sr.index notin sq.pending: - # If request `sr` not in our pending list, it only means that - # SyncQueue.resetWait() happens and all pending requests are expired, so - # we swallow `old` requests, and in such way sync-workers are able to get - # proper new requests from SyncQueue. +proc del[T](sq: SyncQueue[T], position: SyncPosition) = + doAssert(len(sq.requests) > position.qindex) + doAssert(len(sq.requests[position.qindex].requests) > position.sindex) + del(sq.requests[position.qindex].requests, position.sindex) + +proc del[T](sq: SyncQueue[T], request: SyncRequest[T]) = + let pos = sq.find(request).valueOr: return + sq.del(pos) - sq.pending.del(sr.index) +proc rewardForGaps[T](sq: SyncQueue[T], score: int) = + mixin updateScore, getStats + + for gap in sq.gapList: + if score < 0: + # Every empty response increases penalty by 25%, but not more than 200%. + let + emptyCount = gap.item.getStats(SyncResponseKind.Empty) + goodCount = gap.item.getStats(SyncResponseKind.Good) + + if emptyCount <= goodCount: + gap.item.updateScore(score) + else: + let + weight = int(min(emptyCount - goodCount, 8'u64)) + newScore = score + score * weight div 4 + gap.item.updateScore(newScore) + debug "Peer received gap penalty", + peer = gap.item, + penalty = newScore, + sync_ident = sq.ident, + direction = sq.kind, + topics = "syncman" - # This is backpressure handling algorithm, this algorithm is blocking - # all pending `push` requests if `request.slot` not in range. - while true: - if sq.notInRange(sr): - let reset = await sq.waitForChanges() - if reset: - # SyncQueue reset happens. We are exiting to wake up sync-worker. - return else: - let syncres = SyncResult[T](request: sr, data: data, blobs: blobs) - sq.readyQueue.push(syncres) - break + gap.item.updateScore(score) + +proc pop*[T](sq: SyncQueue[T], peerMaxSlot: Slot, item: T): SyncRequest[T] = + # Searching requests queue for an empty space. + var count = 0 + for qitem in sq.requests.mitems(): + if len(qitem.requests) < sq.requestsCount: + if item notin qitem.requests: + return + if qitem.data.slot > peerMaxSlot: + # Peer could not satisfy our request, returning empty one. + SyncRequest.init(sq.kind, item) + else: + doAssert(count < sq.requestsCount, + "You should not pop so many requests for single peer") + let request = SyncRequest.init(sq.kind, qitem.data, item) + qitem.requests.add(request) + request + else: + inc(count) + + doAssert(count < sq.requestsCount, + "You should not pop so many requests for single peer") + + # No empty spaces has been found in queue, so we adding new request. + let newrange = + if len(sq.requests) > 0: + # All requests are filled, adding one more request. + let lastrange = sq.requests[^1].data + if sq.finalSlot in lastrange: + # Requests queue is already at finish position, we are not going to add + # one more request range. + return SyncRequest.init(sq.kind, item) - while len(sq.readyQueue) > 0: - let reqres = case sq.kind of SyncQueueKind.Forward: - let minSlot = sq.readyQueue[0].request.slot - if sq.outSlot < minSlot: - none[SyncResult[T]]() - else: - some(sq.readyQueue.pop()) + lastrange.next() of SyncQueueKind.Backward: - let maxslot = sq.readyQueue[0].request.slot + - (sq.readyQueue[0].request.count - 1'u64) - if sq.outSlot > maxslot: - none[SyncResult[T]]() - else: - some(sq.readyQueue.pop()) - - let item = - if reqres.isSome(): - reqres.get() - else: - let rewindSlot = sq.getRewindPoint(sq.outSlot, sq.getSafeSlot()) - warn "Got incorrect sync result in queue, rewind happens", - blocks_map = getShortMap(sq.readyQueue[0].request, - sq.readyQueue[0].data), - blocks_count = len(sq.readyQueue[0].data), - output_slot = sq.outSlot, input_slot = sq.inpSlot, - rewind_to_slot = rewindSlot, request = sq.readyQueue[0].request - await sq.resetWait(some(rewindSlot)) - break - - if processingCb != nil: - processingCb() + lastrange.prev() + else: + case sq.kind + of SyncQueueKind.Forward: + SyncRange.init(sq.inpSlot, sq.chunkSize) + of SyncQueueKind.Backward: + SyncRange.init(sq.inpSlot - (sq.chunkSize - 1), sq.chunkSize) - # Validating received blocks one by one - var - hasInvalidBlock = false - unviableBlock: Option[(Eth2Digest, Slot)] - missingParentSlot: Option[Slot] - goodBlock: Option[Slot] + if newrange.slot > peerMaxSlot: + # Peer could not satisfy our request, returning empty one. + SyncRequest.init(sq.kind, item) + else: + let request = SyncRequest.init(sq.kind, newrange, item) + sq.requests.addLast(SyncQueueItem.init(request)) + request - # TODO when https://github.com/nim-lang/Nim/issues/21306 is fixed in used - # Nim versions, remove workaround and move `res` into for loop - res: Result[void, VerifierError] +proc wakeupWaiters[T](sq: SyncQueue[T], resetFlag = false) = + ## Wakeup one or all blocked waiters. + for item in sq.waiters: + item.resetFlag = resetFlag + if not(item.future.finished()): + item.future.complete() - var i=0 - for blk, blb in sq.blocks(item): - res = await sq.blockVerifier(blk[], blb, maybeFinalized) - inc(i) +proc waitForChanges[T]( + sq: SyncQueue[T] +): Future[bool] {.async: (raises: [CancelledError]).} = + ## Create new waiter and wait for completion from `wakeupWaiters()`. + let + future = + Future[void].Raising([CancelledError]).init("SyncQueue.waitForChanges") + item = SyncWaiterItem[T](future: future, resetFlag: false) - if res.isOk(): - goodBlock = some(blk[].slot) - else: - case res.error() - of VerifierError.MissingParent: - missingParentSlot = some(blk[].slot) - break - of VerifierError.Duplicate: - # Keep going, happens naturally - discard - of VerifierError.UnviableFork: - # Keep going so as to register other unviable blocks with the - # quarantine - if unviableBlock.isNone: - # Remember the first unviable block, so we can log it - unviableBlock = some((blk[].root, blk[].slot)) - - of VerifierError.Invalid: - hasInvalidBlock = true - - let req = item.request - notice "Received invalid sequence of blocks", request = req, - blocks_count = len(item.data), - blocks_map = getShortMap(req, item.data) - req.item.updateScore(PeerScoreBadValues) - break + sq.waiters.add(item) - # When errors happen while processing blocks, we retry the same request - # with, hopefully, a different peer - let retryRequest = - hasInvalidBlock or unviableBlock.isSome() or missingParentSlot.isSome() - if not(retryRequest): - let numSlotsAdvanced = item.request.count - sq.numAlreadyKnownSlots(sr) - sq.advanceOutput(numSlotsAdvanced) - - if goodBlock.isSome(): - # If there no error and response was not empty we should reward peer - # with some bonus score - not for duplicate blocks though. - item.request.item.updateScore(PeerScoreGoodValues) - item.request.item.updateStats(SyncResponseKind.Good, 1'u64) - - # BlockProcessor reports good block, so we can reward all the peers - # who sent us empty responses. - sq.rewardForGaps(PeerScoreGoodValues) - sq.gapList.reset() - else: - # Response was empty - item.request.item.updateStats(SyncResponseKind.Empty, 1'u64) + try: + await future + item.resetFlag + finally: + sq.waiters.delete(sq.waiters.find(item)) - sq.processGap(item) +proc wakeupAndWaitWaiters[T]( + sq: SyncQueue[T] +) {.async: (raises: [CancelledError]).} = + ## This procedure will perform wakeupWaiters(true) and blocks until last + ## waiter will be awakened. + let waitChanges = sq.waitForChanges() + sq.wakeupWaiters(true) + discard await waitChanges - if numSlotsAdvanced > 0: - sq.wakeupWaiters() +template advanceImpl(kind, slot: untyped, number: uint64) = + case kind + of SyncQueueKind.Forward: + if slot + number < slot: + slot = FAR_FUTURE_SLOT else: - debug "Block pool rejected peer's response", request = item.request, - blocks_map = getShortMap(item.request, item.data), - blocks_count = len(item.data), - ok = goodBlock.isSome(), - unviable = unviableBlock.isSome(), - missing_parent = missingParentSlot.isSome() - # We need to move failed response to the debts queue. - sq.toDebtsQueue(item.request) - - if unviableBlock.isSome(): - let req = item.request - notice "Received blocks from an unviable fork", request = req, - blockRoot = unviableBlock.get()[0], - blockSlot = unviableBlock.get()[1], - blocks_count = len(item.data), - blocks_map = getShortMap(req, item.data) - req.item.updateScore(PeerScoreUnviableFork) - - if missingParentSlot.isSome(): - var - resetSlot: Option[Slot] - failSlot = missingParentSlot.get() - - # If we got `VerifierError.MissingParent` it means that peer returns - # chain of blocks with holes or `block_pool` is in incomplete state. We - # going to rewind the SyncQueue some distance back (2ⁿ, where n∈[0,∞], - # but no more than `finalized_epoch`). - let - req = item.request - safeSlot = sq.getSafeSlot() - gapsCount = len(sq.gapList) - - # We should penalize all the peers which responded with gaps. - sq.rewardForGaps(PeerScoreMissingValues) - sq.gapList.reset() - - case sq.kind - of SyncQueueKind.Forward: - if goodBlock.isSome(): - # `VerifierError.MissingParent` and `Success` present in response, - # it means that we just need to request this range one more time. - debug "Unexpected missing parent, but no rewind needed", - request = req, finalized_slot = safeSlot, - last_good_slot = goodBlock.get(), - missing_parent_slot = missingParentSlot.get(), - blocks_count = len(item.data), - blocks_map = getShortMap(req, item.data), - gaps_count = gapsCount - req.item.updateScore(PeerScoreMissingValues) - else: - if safeSlot < req.slot: - let rewindSlot = sq.getRewindPoint(failSlot, safeSlot) - debug "Unexpected missing parent, rewind happens", - request = req, rewind_to_slot = rewindSlot, - rewind_point = sq.rewind, finalized_slot = safeSlot, - blocks_count = len(item.data), - blocks_map = getShortMap(req, item.data), - gaps_count = gapsCount - resetSlot = some(rewindSlot) - else: - error "Unexpected missing parent at finalized epoch slot", - request = req, rewind_to_slot = safeSlot, - blocks_count = len(item.data), - blocks_map = getShortMap(req, item.data), - gaps_count = gapsCount - req.item.updateScore(PeerScoreBadValues) - of SyncQueueKind.Backward: - if safeSlot > failSlot: - let rewindSlot = sq.getRewindPoint(failSlot, safeSlot) - # It's quite common peers give us fewer blocks than we ask for - debug "Gap in block range response, rewinding", request = req, - rewind_to_slot = rewindSlot, rewind_fail_slot = failSlot, - finalized_slot = safeSlot, blocks_count = len(item.data), - blocks_map = getShortMap(req, item.data) - resetSlot = some(rewindSlot) - req.item.updateScore(PeerScoreMissingValues) - else: - error "Unexpected missing parent at safe slot", request = req, - to_slot = safeSlot, blocks_count = len(item.data), - blocks_map = getShortMap(req, item.data) - req.item.updateScore(PeerScoreBadValues) - - if resetSlot.isSome(): - await sq.resetWait(resetSlot) - case sq.kind - of SyncQueueKind.Forward: - debug "Rewind to slot has happened", reset_slot = resetSlot.get(), - queue_input_slot = sq.inpSlot, queue_output_slot = sq.outSlot, - rewind_point = sq.rewind, direction = sq.kind - of SyncQueueKind.Backward: - debug "Rewind to slot has happened", reset_slot = resetSlot.get(), - queue_input_slot = sq.inpSlot, queue_output_slot = sq.outSlot, - direction = sq.kind - - break + slot = slot + number + of SyncQueueKind.Backward: + if slot - number > slot: + slot = GENESIS_SLOT + else: + slot = slot - number + +proc advanceOutput[T](sq: SyncQueue[T], number: uint64) = + advanceImpl(sq.kind, sq.outSlot, number) + +proc advanceInput[T](sq: SyncQueue[T], number: uint64) = + advanceImpl(sq.kind, sq.inpSlot, number) + +proc advanceQueue[T](sq: SyncQueue[T]) = + if len(sq.requests) > 0: + let item = sq.requests.popFirst() + sq.advanceInput(item.data.count) + sq.advanceOutput(item.data.count) + else: + sq.advanceInput(sq.chunkSize) + sq.advanceOutput(sq.chunkSize) + sq.wakeupWaiters() + +proc resetQueue[T](sq: SyncQueue[T]) = + sq.requests.reset() + +proc clearAndWakeup*[T](sq: SyncQueue[T]) = + # Reset queue and wakeup all the waiters. + sq.resetQueue() + sq.wakeupWaiters(true) + +proc isEmpty*[T](sr: SyncRequest[T]): bool = + # Returns `true` if request `sr` is empty. + sr.data.count == 0'u64 + +proc resetWait[T]( + sq: SyncQueue[T], + toSlot: Slot +) {.async: (raises: [CancelledError], raw: true).} = + sq.inpSlot = toSlot + sq.outSlot = toSlot + # We are going to wakeup all the waiters and wait for last one. + sq.resetQueue() + sq.wakeupAndWaitWaiters() + +func getOpt(blobs: Opt[seq[BlobSidecars]], i: int): Opt[BlobSidecars] = + if blobs.isSome: + Opt.some(blobs.get()[i]) + else: + Opt.none(BlobSidecars) + +iterator blocks( + kind: SyncQueueKind, + blcks: seq[ref ForkedSignedBeaconBlock], + blobs: Opt[seq[BlobSidecars]] +): (ref ForkedSignedBeaconBlock, Opt[BlobSidecars]) = + case kind + of SyncQueueKind.Forward: + for i in countup(0, len(blcks) - 1): + yield (blcks[i], blobs.getOpt(i)) + of SyncQueueKind.Backward: + for i in countdown(len(blcks) - 1, 0): + yield (blcks[i], blobs.getOpt(i)) proc push*[T](sq: SyncQueue[T], sr: SyncRequest[T]) = ## Push failed request back to queue. - if sr.index notin sq.pending: - # If request `sr` not in our pending list, it only means that - # SyncQueue.resetWait() happens and all pending requests are expired, so - # we swallow `old` requests, and in such way sync-workers are able to get - # proper new requests from SyncQueue. + let pos = sq.find(sr).valueOr: + debug "Request is not relevant anymore", request = sr return - sq.pending.del(sr.index) - sq.toDebtsQueue(sr) - -proc handlePotentialSafeSlotAdvancement[T](sq: SyncQueue[T]) = - # It may happen that sync progress advanced to a newer `safeSlot`, either - # by a response that started with good values and only had errors late, or - # through an out-of-band mechanism, e.g., VC / REST. - # If that happens, advance to the new `safeSlot` to avoid repeating requests - # for data that is considered immutable and no longer relevant. - let safeSlot = sq.getSafeSlot() - func numSlotsBehindSafeSlot(slot: Slot): uint64 = - case sq.kind - of SyncQueueKind.Forward: - if safeSlot > slot: - safeSlot - slot - else: - 0 - of SyncQueueKind.Backward: - if slot > safeSlot: - slot - safeSlot - else: - 0 - - let - numOutSlotsAdvanced = sq.outSlot.numSlotsBehindSafeSlot - numInpSlotsAdvanced = - case sq.kind - of SyncQueueKind.Forward: - sq.inpSlot.numSlotsBehindSafeSlot - of SyncQueueKind.Backward: - if sq.inpSlot == 0xFFFF_FFFF_FFFF_FFFF'u64: - 0'u64 + sq.del(pos) + +proc process[T]( + sq: SyncQueue[T], + sr: SyncRequest[T], + blcks: seq[ref ForkedSignedBeaconBlock], + blobs: Opt[seq[BlobSidecars]], + maybeFinalized: bool +): Future[SyncProcessingResult] {. + async: (raises: [CancelledError]).} = + var + slot: Opt[SyncBlock] + unviableBlock: Opt[SyncBlock] + dupBlock: Opt[SyncBlock] + + if len(blcks) == 0: + return SyncProcessingResult.init(SyncProcessError.Empty) + + for blk, blb in blocks(sq.kind, blcks, blobs): + let res = await sq.blockVerifier(blk[], blb, maybeFinalized) + if res.isOk(): + slot = Opt.some(SyncBlock.init(blk[].slot, blk[].root)) + else: + case res.error() + of VerifierError.MissingParent: + if slot.isSome() or dupBlock.isSome(): + return SyncProcessingResult.init( + SyncProcessError.GoodAndMissingParent, blk[].slot, blk[].root) else: - sq.inpSlot.numSlotsBehindSafeSlot - if numOutSlotsAdvanced != 0 or numInpSlotsAdvanced != 0: - debug "Sync progress advanced out-of-band", - safeSlot, outSlot = sq.outSlot, inpSlot = sq.inpSlot - if numOutSlotsAdvanced != 0: - sq.advanceOutput(numOutSlotsAdvanced) - if numInpSlotsAdvanced != 0: - sq.advanceInput(numInpSlotsAdvanced) - sq.wakeupWaiters() - -func updateRequestForNewSafeSlot[T](sq: SyncQueue[T], sr: var SyncRequest[T]) = - # Requests may have originated before the latest `safeSlot` advancement. - # Update it to not request any data prior to `safeSlot`. - let - outSlot = sq.outSlot - lowSlot = sr.slot - highSlot = sr.lastSlot - case sq.kind - of SyncQueueKind.Forward: - if outSlot <= lowSlot: - # Entire request is still relevant. + return SyncProcessingResult.init(res.error(), blk[].slot, blk[].root) + of VerifierError.Duplicate: + # Keep going, happens naturally + if dupBlock.isNone(): + dupBlock = Opt.some(SyncBlock.init(blk[].slot, blk[].root)) + of VerifierError.UnviableFork: + # Keep going so as to register other unviable blocks with the + # quarantine + if unviableBlock.isNone(): + # Remember the first unviable block, so we can log it + unviableBlock = Opt.some(SyncBlock.init(blk[].slot, blk[].root)) + of VerifierError.Invalid: + return SyncProcessingResult.init(res.error(), blk[].slot, blk[].root) + + if unviableBlock.isSome(): + return SyncProcessingResult.init(VerifierError.UnviableFork, + unviableBlock.get()) + if dupBlock.isSome(): + return SyncProcessingResult.init(VerifierError.Duplicate, + dupBlock.get()) + + SyncProcessingResult.init(SyncProcessError.NoError, slot.get()) + +func isError(e: SyncProcessError): bool = + case e + of SyncProcessError.Empty, SyncProcessError.NoError, + SyncProcessError.Duplicate, SyncProcessError.GoodAndMissingParent: + false + of SyncProcessError.Invalid, SyncProcessError.UnviableFork, + SyncProcessError.MissingParent: + true + +proc push*[T]( + sq: SyncQueue[T], + sr: SyncRequest[T], + data: seq[ref ForkedSignedBeaconBlock], + blobs: Opt[seq[BlobSidecars]], + maybeFinalized: bool = false, + processingCb: ProcessingCallback = nil +) {.async: (raises: [CancelledError]).} = + ## Push successful result to queue ``sq``. + mixin updateScore, updateStats, getStats + + template findPosition(sq, sr: untyped): SyncPosition = + sq.find(sr).valueOr: + debug "Request is not relevant anymore", + request = sr, sync_ident = sq.ident, topics = "syncman" + # Request is not in queue anymore, probably reset happened. + return + + # This is backpressure handling algorithm, this algorithm is blocking + # all pending `push` requests if `request` is not in range. + var + position = + block: + var pos: SyncPosition + while true: + pos = sq.findPosition(sr) + + if pos.qindex == 0: + # Exiting loop when request is first in queue. + break + + try: + let res = await sq.waitForChanges() + if res: + # SyncQueue reset happen + debug "Request is not relevant anymore, reset has happened", + request = sr, + sync_ident = sq.ident, + topics = "syncman" + return + except CancelledError as exc: + # Removing request from queue. + sq.del(sr) + raise exc + pos + + await sq.lock.acquire() + try: + position = sq.findPosition(sr) + + if not(isNil(processingCb)): + processingCb() + + let pres = await sq.process(sr, data, blobs, maybeFinalized) + + # We need to update position, because while we waiting for `process()` to + # complete - clearAndWakeup() could be invoked which could clean whole the + # queue (invalidating all the positions). + position = sq.findPosition(sr) + + case pres.code + of SyncProcessError.Empty: + # Empty responses does not affect failures count + debug "Received empty response", + request = sr, + blocks_count = len(data), + blocks_map = getShortMap(sr, data), + blobs_map = getShortMap(sr, blobs), + sync_ident = sq.ident, + topics = "syncman" + + sr.item.updateStats(SyncResponseKind.Empty, 1'u64) + sq.gapList.add(GapItem.init(sr)) + sq.advanceQueue() + + of SyncProcessError.Duplicate: + # Duplicate responses does not affect failures count + debug "Received duplicate response", + request = sr, + blocks_count = len(data), + blocks_map = getShortMap(sr, data), + blobs_map = getShortMap(sr, blobs), + sync_ident = sq.ident, + topics = "syncman" + sq.gapList.reset() + sq.advanceQueue() + + of SyncProcessError.Invalid: + debug "Block pool rejected peer's response", + request = sr, + invalid_block = pres.blck, + failures_count = sq.requests[position.qindex].failuresCount, + blocks_count = len(data), + blocks_map = getShortMap(sr, data), + blobs_map = getShortMap(sr, blobs), + sync_ident = sq.ident, + topics = "syncman" + + inc(sq.requests[position.qindex].failuresCount) + sq.del(position) + + of SyncProcessError.UnviableFork: + notice "Received blocks from an unviable fork", + request = sr, + unviable_block = pres.blck, + failures_count = sq.requests[position.qindex].failuresCount, + blocks_count = len(data), + blocks_map = getShortMap(sr, data), + blobs_map = getShortMap(sr, blobs), + sync_ident = sq.ident, + topics = "syncman" + + sr.item.updateScore(PeerScoreUnviableFork) + inc(sq.requests[position.qindex].failuresCount) + sq.del(position) + + of SyncProcessError.MissingParent: + debug "Unexpected missing parent", + request = sr, + missing_parent_block = pres.blck, + failures_count = sq.requests[position.qindex].failuresCount, + blocks_count = len(data), + blocks_map = getShortMap(sr, data), + blobs_map = getShortMap(sr, blobs), + sync_ident = sq.ident, + direction = sq.kind, + topics = "syncman" + + sr.item.updateScore(PeerScoreMissingValues) + sq.rewardForGaps(PeerScoreMissingValues) + sq.gapList.reset() + inc(sq.requests[position.qindex].failuresCount) + sq.del(position) + + of SyncProcessError.GoodAndMissingParent: + # Responses which has at least one good block and a gap does not affect + # failures count + debug "Unexpected missing parent, but no rewind needed", + request = sr, + finalized_slot = sq.getSafeSlot(), + missing_parent_block = pres.blck, + failures_count = sq.requests[position.qindex].failuresCount, + blocks_count = len(data), + blocks_map = getShortMap(sr, data), + blobs_map = getShortMap(sr, blobs), + sync_ident = sq.ident, + topics = "syncman" + + sr.item.updateScore(PeerScoreMissingValues) + sq.del(position) + + of SyncProcessError.NoError: + sr.item.updateScore(PeerScoreGoodValues) + sr.item.updateStats(SyncResponseKind.Good, 1'u64) + sq.rewardForGaps(PeerScoreGoodValues) + sq.gapList.reset() + + if sr.hasEndGap(data): + sq.gapList.add(GapItem.init(sr)) + + sq.advanceQueue() + + if pres.code.isError(): + if sq.requests[position.qindex].failuresCount >= sq.failureResetThreshold: + let point = sq.getRewindPoint(pres.blck.get().slot, sq.getSafeSlot()) + debug "Multiple repeating errors occured, rewinding", + failures_count = sq.requests[position.qindex].failuresCount, + rewind_slot = point, + sync_ident = sq.ident, + direction = sq.kind, + topics = "syncman" + await sq.resetWait(point) + + except CancelledError as exc: + sq.del(sr) + raise exc + finally: + try: + sq.lock.release() + except AsyncLockError: + raiseAssert "Lock is not acquired" + +proc checkResponse*[T](req: SyncRequest[T], + data: openArray[Slot]): Result[void, cstring] = + if len(data) == 0: + # Impossible to verify empty response. + return ok() + + if lenu64(data) > req.data.count: + # Number of blocks in response should be less or equal to number of + # requested blocks. + return err("Too many blocks received") + + var + slot = req.data.slot + rindex = 0'u64 + dindex = 0 + + while (rindex < req.data.count) and (dindex < len(data)): + if slot < data[dindex]: discard - elif outSlot <= highSlot: - # Request is only partially relevant. - let - numSlotsDone = outSlot - lowSlot - sr.slot += numSlotsDone - sr.count -= numSlotsDone + elif slot == data[dindex]: + inc(dindex) else: - # Entire request is no longer relevant. - sr.count = 0 - of SyncQueueKind.Backward: - if outSlot >= highSlot: - # Entire request is still relevant. - discard - elif outSlot >= lowSlot: - # Request is only partially relevant. - let - numSlotsDone = highSlot - outSlot - sr.count -= numSlotsDone + return err("Incorrect order or duplicate blocks found") + slot += 1'u64 + rindex += 1'u64 + + if dindex != len(data): + return err("Some of the blocks are outside the requested range") + + ok() + +proc checkBlobsResponse*[T]( + req: SyncRequest[T], + data: openArray[Slot], + maxBlobsPerBlockElectra: uint64): Result[void, cstring] = + if len(data) == 0: + # Impossible to verify empty response. + return ok() + + if lenu64(data) > (req.data.count * maxBlobsPerBlockElectra): + # Number of blobs in response should be less or equal to number of + # requested (blocks * MAX_BLOBS_PER_BLOCK_ELECTRA). + # NOTE: This is not strict check, proper check will be done in blobs + # validation. + return err("Too many blobs received") + + var + pslot = data[0] + counter = 0'u64 + for slot in data: + if slot notin req.data: + return err("Some of the blobs are not in requested range") + if slot < pslot: + return err("Incorrect order") + if slot == pslot: + inc(counter) + if counter > maxBlobsPerBlockElectra: + # NOTE: This is not strict check, proper check will be done in blobs + # validation. + return err("Number of blobs in the block exceeds the limit") else: - # Entire request is no longer relevant. - sr.count = 0 - -proc pop*[T](sq: SyncQueue[T], maxslot: Slot, item: T): SyncRequest[T] = - ## Create new request according to current SyncQueue parameters. - sq.handlePotentialSafeSlotAdvancement() - while len(sq.debtsQueue) > 0: - if maxslot < sq.debtsQueue[0].slot: - # Peer's latest slot is less than starting request's slot. - return SyncRequest.empty(sq.kind, T) - if maxslot < sq.debtsQueue[0].lastSlot(): - # Peer's latest slot is less than finishing request's slot. - return SyncRequest.empty(sq.kind, T) - var sr = sq.debtsQueue.pop() - sq.debtsCount = sq.debtsCount - sr.count - sq.updateRequestForNewSafeSlot(sr) - if sr.isEmpty: - continue - sr.setItem(item) - sq.makePending(sr) - return sr + counter = 1'u64 + pslot = slot - case sq.kind - of SyncQueueKind.Forward: - if maxslot < sq.inpSlot: - # Peer's latest slot is less than queue's input slot. - return SyncRequest.empty(sq.kind, T) - if sq.inpSlot > sq.finalSlot: - # Queue's input slot is bigger than queue's final slot. - return SyncRequest.empty(sq.kind, T) - let lastSlot = min(maxslot, sq.finalSlot) - let count = min(sq.chunkSize, lastSlot + 1'u64 - sq.inpSlot) - var sr = SyncRequest.init(sq.kind, sq.inpSlot, count, item) - sq.advanceInput(count) - sq.makePending(sr) - sr - of SyncQueueKind.Backward: - if sq.inpSlot == 0xFFFF_FFFF_FFFF_FFFF'u64: - return SyncRequest.empty(sq.kind, T) - if sq.inpSlot < sq.finalSlot: - return SyncRequest.empty(sq.kind, T) - let (slot, count) = - block: - let baseSlot = sq.inpSlot + 1'u64 - if baseSlot - sq.finalSlot < sq.chunkSize: - let count = uint64(baseSlot - sq.finalSlot) - (baseSlot - count, count) - else: - (baseSlot - sq.chunkSize, sq.chunkSize) - if (maxslot + 1'u64) < slot + count: - # Peer's latest slot is less than queue's input slot. - return SyncRequest.empty(sq.kind, T) - var sr = SyncRequest.init(sq.kind, slot, count, item) - sq.advanceInput(count) - sq.makePending(sr) - sr - -proc debtLen*[T](sq: SyncQueue[T]): uint64 = - sq.debtsCount - -proc pendingLen*[T](sq: SyncQueue[T]): uint64 = - case sq.kind - of SyncQueueKind.Forward: - # When moving forward `outSlot` will be <= of `inpSlot`. - sq.inpSlot - sq.outSlot - of SyncQueueKind.Backward: - # When moving backward `outSlot` will be >= of `inpSlot` - sq.outSlot - sq.inpSlot + ok() proc len*[T](sq: SyncQueue[T]): uint64 {.inline.} = ## Returns number of slots left in queue ``sq``. @@ -1023,4 +1033,4 @@ proc total*[T](sq: SyncQueue[T]): uint64 {.inline.} = proc progress*[T](sq: SyncQueue[T]): uint64 = ## How many useful slots we've synced so far, adjusting for how much has ## become obsolete by time movements - sq.total - sq.len + sq.total() - len(sq) diff --git a/beacon_chain/trusted_node_sync.nim b/beacon_chain/trusted_node_sync.nim index 79d272b7af..c670c5b710 100644 --- a/beacon_chain/trusted_node_sync.nim +++ b/beacon_chain/trusted_node_sync.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -15,13 +15,14 @@ import ./spec/eth2_apis/rest_beacon_client, ./spec/[beaconstate, eth2_merkleization, forks, light_client_sync, network, presets, - state_transition, deposit_snapshots], - "."/[beacon_clock, beacon_chain_db, era_db] + state_transition, deposit_snapshots] from presto import RestDecodingError +from "."/beacon_clock import + BeaconClock, fromFloatSeconds, getBeaconTimeFn, init const - largeRequestsTimeout = 120.seconds # Downloading large items such as states. + largeRequestsTimeout = 3.minutes # Downloading large items such as states. smallRequestsTimeout = 30.seconds # Downloading smaller items such as blocks and deposit snapshots. proc fetchDepositSnapshot( @@ -178,7 +179,7 @@ proc doTrustedNodeSync*( let stateId = case syncTarget.kind of TrustedNodeSyncKind.TrustedBlockRoot: - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/light-client/light-client.md#light-client-sync-process + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/altair/light-client/light-client.md#light-client-sync-process const lcDataFork = LightClientDataFork.high var bestViableCheckpoint: Opt[tuple[slot: Slot, state_root: Eth2Digest]] func trackBestViableCheckpoint(store: lcDataFork.LightClientStore) = @@ -337,7 +338,7 @@ proc doTrustedNodeSync*( else: tmp awaitWithTimeout(client.getStateV2(id, cfg), largeRequestsTimeout): - error "Attempt to download checkpoint state timed out" + error "Attempt to download checkpoint state timed out; https://nimbus.guide/trusted-node-sync.html#sync-from-checkpoint-files provides an alternative approach" quit 1 except CatchableError as exc: error "Unable to download checkpoint state", diff --git a/beacon_chain/validator_client/attestation_service.nim b/beacon_chain/validator_client/attestation_service.nim index 9e215feebd..d39df49c90 100644 --- a/beacon_chain/validator_client/attestation_service.nim +++ b/beacon_chain/validator_client/attestation_service.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2021-2024 Status Research & Development GmbH +# Copyright (c) 2021-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -72,10 +72,12 @@ proc serveAttestation( logScope: attestation = shortLog(atst) try: - when atst is electra.Attestation: + when atst is electra.SingleAttestation: await vc.submitPoolAttestationsV2(@[atst], ApiStrategyKind.First) - else: + elif atst is phase0.Attestation: await vc.submitPoolAttestations(@[atst], ApiStrategyKind.First) + else: + static: doAssert false except ValidatorApiError as exc: warn "Unable to publish attestation", reason = exc.getFailureReason() return false @@ -85,7 +87,7 @@ proc serveAttestation( let res = if afterElectra: - let attestation = registered.toElectraAttestation(signature) + let attestation = registered.toSingleAttestation(signature) submitAttestation(attestation) else: let attestation = registered.toAttestation(signature) @@ -279,6 +281,7 @@ proc produceAndPublishAttestations*( tmp.add(RegisteredAttestation( validator: validator, + validator_index: validator_index, committee_index: duty.data.committee_index, index_in_committee: duty.data.validator_committee_index, committee_len: int duty.data.committee_length, @@ -447,7 +450,7 @@ proc publishAttestationsAndAggregates( raise exc let aggregateTime = - # chronos.Duration substraction could not return negative value, in such + # chronos.Duration subtraction could not return negative value, in such # case it will return `ZeroDuration`. vc.beaconClock.durationToNextSlot() - OneThirdDuration if aggregateTime != ZeroDuration: @@ -512,6 +515,7 @@ proc produceAndPublishAttestationsV2*( tmp.add(RegisteredAttestation( validator: validator, + validator_index: validator_index, committee_index: duty.data.committee_index, index_in_committee: duty.data.validator_committee_index, committee_len: int(duty.data.committee_length), @@ -691,7 +695,7 @@ proc publishAttestationsAndAggregatesV2( raise exc let aggregateTime = - # chronos.Duration substraction could not return negative value, in such + # chronos.Duration subtraction could not return negative value, in such # case it will return `ZeroDuration`. vc.beaconClock.durationToNextSlot() - OneThirdDuration if aggregateTime != ZeroDuration: diff --git a/beacon_chain/validator_client/block_service.nim b/beacon_chain/validator_client/block_service.nim index 4f2fbe2762..5cbb8cc082 100644 --- a/beacon_chain/validator_client/block_service.nim +++ b/beacon_chain/validator_client/block_service.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2021-2024 Status Research & Development GmbH +# Copyright (c) 2021-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -107,11 +107,7 @@ proc publishBlockV3( ) {.async: (raises: [CancelledError]).} = let genesisRoot = vc.beaconGenesis.genesis_validators_root - graffiti = - if vc.config.graffiti.isSome(): - vc.config.graffiti.get() - else: - defaultGraffitiBytes() + graffiti = vc.getGraffitiBytes(validator) vindex = validator.index.get() logScope: @@ -391,7 +387,7 @@ proc addOrReplaceProposers*(vc: ValidatorClientRef, epoch: Epoch, for task in epochDuties.duties: if task notin duties: - # Task is no more relevant, so cancel it. + # Task is not relevant anymore, so cancel it. debug "Cancelling running proposal duty tasks", slot = task.duty.slot, pubkey = shortLog(task.duty.pubkey) diff --git a/beacon_chain/validator_client/common.nim b/beacon_chain/validator_client/common.nim index 06b340cd5f..d3c5f17af7 100644 --- a/beacon_chain/validator_client/common.nim +++ b/beacon_chain/validator_client/common.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2021-2024 Status Research & Development GmbH +# Copyright (c) 2021-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -236,7 +236,6 @@ type beaconGenesis*: RestGenesis proposerTasks*: Table[Slot, seq[ProposerTask]] dynamicFeeRecipientsStore*: ref DynamicFeeRecipientsStore - validatorsRegCache*: Table[ValidatorPubKey, SignedValidatorRegistrationV1] blocksSeen*: Table[Slot, BlockDataItem] rootsSeen*: Table[Eth2Digest, Slot] processingDelay*: Opt[Duration] @@ -1059,18 +1058,17 @@ proc isExpired(vc: ValidatorClientRef, EPOCHS_BETWEEN_VALIDATOR_REGISTRATION proc getValidatorRegistration( - vc: ValidatorClientRef, - validator: AttachedValidator, - timestamp: Time, - fork: Fork - ): Result[PendingValidatorRegistration, RegistrationKind] = + vc: ValidatorClientRef, + validator: AttachedValidator, + timestamp: Time, + fork: Fork +): Result[PendingValidatorRegistration, RegistrationKind] = if validator.index.isNone(): debug "Validator registration missing validator index", validator = validatorLog(validator) return err(RegistrationKind.MissingIndex) let - cached = vc.validatorsRegCache.getOrDefault(validator.pubkey) currentSlot = block: let res = vc.beaconClock.toSlot(timestamp) @@ -1078,49 +1076,46 @@ proc getValidatorRegistration( return err(RegistrationKind.IncorrectTime) res.slot - if cached.isDefault() or vc.isExpired(cached, currentSlot): - if not cached.isDefault(): - # Want to send it to relay, but not recompute perfectly fine cache - return ok(PendingValidatorRegistration(registration: cached, future: nil)) + if validator.externalBuilderRegistration.isSome(): + let cached = validator.externalBuilderRegistration.get() + return + if not(vc.isExpired(cached, currentSlot)): + err(RegistrationKind.Cached) + else: + ok(PendingValidatorRegistration(registration: cached, future: nil)) - let - feeRecipient = vc.getFeeRecipient(validator, currentSlot.epoch()) - gasLimit = vc.getGasLimit(validator) - var registration = - SignedValidatorRegistrationV1( - message: ValidatorRegistrationV1( - fee_recipient: ExecutionAddress(data: distinctBase(feeRecipient)), - gas_limit: gasLimit, - timestamp: uint64(timestamp.toUnix()), - pubkey: validator.pubkey - ) + let + feeRecipient = vc.getFeeRecipient(validator, currentSlot.epoch()) + gasLimit = vc.getGasLimit(validator) + + var registration = + SignedValidatorRegistrationV1( + message: ValidatorRegistrationV1( + fee_recipient: ExecutionAddress(data: distinctBase(feeRecipient)), + gas_limit: gasLimit, + timestamp: uint64(timestamp.toUnix()), + pubkey: validator.pubkey ) - - let sigfut = validator.getBuilderSignature(fork, registration.message) - if sigfut.finished(): - # This is short-path if we able to create signature locally. - if not(sigfut.completed()): - let exc = sigfut.error() - debug "Got unexpected exception while signing validator registration", - validator = validatorLog(validator), error = exc.name, - reason = exc.msg - return err(RegistrationKind.ErrorSignature) - let sigres = sigfut.value() - if sigres.isErr(): - debug "Failed to get signature for validator registration", - validator = validatorLog(validator), reason = sigres.error() - return err(RegistrationKind.NoSignature) - registration.signature = sigres.get() - # Updating cache table with new signed registration data - vc.validatorsRegCache[registration.message.pubkey] = registration - ok(PendingValidatorRegistration(registration: registration, future: nil)) - else: - # Remote signature service involved, cache will be updated later. - ok(PendingValidatorRegistration(registration: registration, - future: sigfut)) + ) + + let sigfut = validator.getBuilderSignature(fork, registration.message) + if sigfut.finished(): + # This is short-path if we able to create signature locally. + if not(sigfut.completed()): + let exc = sigfut.error() + debug "Got unexpected exception while signing validator registration", + validator = validatorLog(validator), error = exc.name, + reason = exc.msg + return err(RegistrationKind.ErrorSignature) + + registration.signature = sigfut.value().valueOr: + debug "Failed to get signature for validator registration", + validator = validatorLog(validator), reason = error + return err(RegistrationKind.NoSignature) + + ok(PendingValidatorRegistration(registration: registration, future: nil)) else: - # Returning cached result. - err(RegistrationKind.Cached) + ok(PendingValidatorRegistration(registration: registration, future: sigfut)) proc prepareRegistrationList*( vc: ValidatorClientRef, @@ -1131,6 +1126,7 @@ proc prepareRegistrationList*( var messages: seq[SignedValidatorRegistrationV1] + validators: seq[AttachedValidator] futures: seq[Future[SignatureResult]] registrations: seq[SignedValidatorRegistrationV1] total = vc.attachedValidators[].count() @@ -1151,6 +1147,7 @@ proc prepareRegistrationList*( registrations.add(preg.registration) else: messages.add(preg.registration) + validators.add(validator) futures.add(preg.future) else: case res.error() @@ -1174,8 +1171,7 @@ proc prepareRegistrationList*( var reg = messages[index] reg.signature = sres.get() registrations.add(reg) - # Updating cache table - vc.validatorsRegCache[reg.message.pubkey] = reg + validators[index].externalBuilderRegistration = Opt.some(reg) inc(succeed) else: inc(bad) diff --git a/beacon_chain/validators/README.md b/beacon_chain/validators/README.md index 5f88d55072..6513f3190a 100644 --- a/beacon_chain/validators/README.md +++ b/beacon_chain/validators/README.md @@ -1,6 +1,6 @@ # Validators -This folder holds all modules related to a Beacon Chain Validator besides the binaries they interact directly with (nimbus_validator_cliant and nimbus_signing_process): +This folder holds all modules related to a Beacon Chain Validator besides the binaries they interact directly with (nimbus_validator_client and nimbus_signing_process): - Validator keystore - Validator slashing protection - Validator duties diff --git a/beacon_chain/validators/beacon_validators.nim b/beacon_chain/validators/beacon_validators.nim index 770cc4ab7d..963bf75b4d 100644 --- a/beacon_chain/validators/beacon_validators.nim +++ b/beacon_chain/validators/beacon_validators.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -43,7 +43,7 @@ import keystore_management, slashing_protection, validator_duties, validator_pool], ".."/spec/mev/[rest_deneb_mev_calls, rest_electra_mev_calls, rest_fulu_mev_calls] -from std/sequtils import countIt, foldl, mapIt +from std/sequtils import mapIt from eth/async_utils import awaitWithTimeout # Metrics for tracking attestation and beacon block loss @@ -84,6 +84,7 @@ type BuilderBid[SBBB] = object blindedBlckPart*: SBBB + executionRequests*: ExecutionRequests executionPayloadValue*: UInt256 consensusBlockValue*: UInt256 @@ -247,7 +248,7 @@ proc isSynced*(node: BeaconNode, head: BlockRef): bool = ## determine if we're in sync and should be producing blocks and ## attestations. Generally, the problem is that slot time keeps advancing ## even when there are no blocks being produced, so there's no way to - ## distinguish validators geniunely going missing from the node not being + ## distinguish validators genuinely going missing from the node not being ## well connected (during a network split or an internet outage for ## example). It would generally be correct to simply keep running as if ## we were the only legit node left alive, but then we run into issues: @@ -357,24 +358,14 @@ proc createAndSendAttestation(node: BeaconNode, registered.validator.doppelgangerActivity(epoch) # Logged in the router - let - consensusFork = node.dag.cfg.consensusForkAtEpoch(epoch) - res = - if consensusFork >= ConsensusFork.Electra: - await node.router.routeAttestation( - registered.toElectraAttestation(signature), subnet_id, - checkSignature = false, checkValidator = false) - else: - await node.router.routeAttestation( - registered.toAttestation(signature), subnet_id, - checkSignature = false, checkValidator = false) - if not res.isOk(): - return - - if node.config.dumpEnabled: - dump( - node.config.dumpDirOutgoing, registered.data, - registered.validator.pubkey) + if node.dag.cfg.consensusForkAtEpoch(epoch) >= ConsensusFork.Electra: + discard await node.router.routeAttestation( + registered.toSingleAttestation(signature), subnet_id, + checkSignature = false, checkValidator = false) + else: + discard await node.router.routeAttestation( + registered.toAttestation(signature), subnet_id, + checkSignature = false, checkValidator = false) proc getBlockProposalEth1Data*(node: BeaconNode, state: ForkedHashedBeaconState): @@ -458,7 +449,7 @@ proc makeBeaconBlockForHeadAndSlot*( execution_payload_root: Opt[Eth2Digest], withdrawals_root: Opt[Eth2Digest], kzg_commitments: Opt[KzgCommitments], - execution_requests: ExecutionRequests): # TODO probably need this for builder API, otherwise remove, maybe needs to be Opt + execution_requests: ExecutionRequests): Future[ForkedBlockResult] {.async: (raises: [CancelledError]).} = # Advance state to the slot that we're proposing for var cache = StateCache() @@ -541,19 +532,44 @@ proc makeBeaconBlockForHeadAndSlot*( let execution_requests_actual = when PayloadType.kind >= ConsensusFork.Electra: # Don't want un-decoded SSZ going any further/deeper + var + execution_requests_buffer: ExecutionRequests + prev_type: Opt[byte] try: - ExecutionRequests( - deposits: SSZ.decode( - payload.executionRequests[0], - List[DepositRequest, Limit MAX_DEPOSIT_REQUESTS_PER_PAYLOAD]), - withdrawals: SSZ.decode( - payload.executionRequests[1], - List[WithdrawalRequest, Limit MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD]), - consolidations: SSZ.decode( - payload.executionRequests[2], - List[ConsolidationRequest, Limit MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD])) + for request_type_and_payload in payload.executionRequests: + if request_type_and_payload.len < 2: + return err("Execution layer request too short") + + let request_type = request_type_and_payload[0] + if prev_type.isSome: + if request_type < prev_type.get: + return err("Execution layer request types not sorted") + if request_type == prev_type.get: + return err("Execution layer request types duplicated") + prev_type.ok request_type + + template request_payload: untyped = + request_type_and_payload.toOpenArray( + 1, request_type_and_payload.len - 1) + case request_type_and_payload[0] + of DEPOSIT_REQUEST_TYPE: + execution_requests_buffer.deposits = + SSZ.decode(request_payload, + List[DepositRequest, Limit MAX_DEPOSIT_REQUESTS_PER_PAYLOAD]) + of WITHDRAWAL_REQUEST_TYPE: + execution_requests_buffer.withdrawals = + SSZ.decode(request_payload, + List[WithdrawalRequest, Limit MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD]) + of CONSOLIDATION_REQUEST_TYPE: + execution_requests_buffer.consolidations = + SSZ.decode(request_payload, + List[ConsolidationRequest, Limit MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD]) + else: + return err("Execution layer invalid request type") except CatchableError: return err("Unable to deserialize execution layer requests") + + execution_requests_buffer else: default(ExecutionRequests) # won't be used by block builder @@ -584,7 +600,7 @@ proc makeBeaconBlockForHeadAndSlot*( slot, head = shortLog(head), error $error - var blobsBundleOpt = Opt.none(BlobsBundle) + var blobsBundleOpt = Opt.none(deneb.BlobsBundle) when typeof(payload).kind >= ConsensusFork.Deneb: blobsBundleOpt = Opt.some(payload.blobsBundle) @@ -630,8 +646,9 @@ proc getBlindedExecutionPayload[ BUILDER_PROPOSAL_DELAY_TOLERANCE): return err "Timeout obtaining Deneb blinded header from builder" - res = decodeBytes( - GetHeaderResponseDeneb, response.data, response.contentType) + res = decodeBytesJsonOrSsz( + GetHeaderResponseDeneb, response.data, response.contentType, + response.headers.getString("eth-consensus-version")) blindedHeader = res.valueOr: return err( @@ -646,8 +663,9 @@ proc getBlindedExecutionPayload[ BUILDER_PROPOSAL_DELAY_TOLERANCE): return err "Timeout obtaining Electra blinded header from builder" - res = decodeBytes( - GetHeaderResponseElectra, response.data, response.contentType) + res = decodeBytesJsonOrSsz( + GetHeaderResponseElectra, response.data, response.contentType, + response.headers.getString("eth-consensus-version")) blindedHeader = res.valueOr: return err( @@ -664,8 +682,9 @@ proc getBlindedExecutionPayload[ BUILDER_PROPOSAL_DELAY_TOLERANCE): return err "Timeout obtaining Fulu blinded header from builder" - res = decodeBytes( - GetHeaderResponseFulu, response.data, response.contentType) + res = decodeBytesJsonOrSsz( + GetHeaderResponseFulu, response.data, response.contentType, + response.headers.getString("eth-consensus-version")) blindedHeader = res.valueOr: return err( @@ -685,11 +704,23 @@ proc getBlindedExecutionPayload[ return err "getBlindedExecutionPayload: signature verification failed" template builderBid: untyped = blindedHeader.data.message - return ok(BuilderBid[EPH]( - blindedBlckPart: EPH( - execution_payload_header: builderBid.header, - blob_kzg_commitments: builderBid.blob_kzg_commitments), - executionPayloadValue: builderBid.value)) + when EPH is deneb_mev.BlindedExecutionPayloadAndBlobsBundle: + return ok(BuilderBid[EPH]( + blindedBlckPart: EPH( + execution_payload_header: builderBid.header, + blob_kzg_commitments: builderBid.blob_kzg_commitments), + executionRequests: default(ExecutionRequests), + executionPayloadValue: builderBid.value)) + elif EPH is electra_mev.BlindedExecutionPayloadAndBlobsBundle or + EPH is fulu_mev.BlindedExecutionPayloadAndBlobsBundle: + return ok(BuilderBid[EPH]( + blindedBlckPart: EPH( + execution_payload_header: builderBid.header, + blob_kzg_commitments: builderBid.blob_kzg_commitments), + executionRequests: builderBid.execution_requests, + executionPayloadValue: builderBid.value)) + else: + static: doAssert false from ./message_router_mev import copyFields, getFieldNames, unblindAndRouteBlockMEV @@ -762,54 +793,6 @@ func constructSignableBlindedBlock[T: fulu_mev.SignedBlindedBeaconBlock]( blindedBlock -func constructPlainBlindedBlock[T: deneb_mev.BlindedBeaconBlock]( - blck: ForkyBeaconBlock, - blindedBundle: deneb_mev.BlindedExecutionPayloadAndBlobsBundle): T = - # https://github.com/nim-lang/Nim/issues/23020 workaround - static: doAssert T is deneb_mev.BlindedBeaconBlock - - const - blckFields = getFieldNames(typeof(blck)) - blckBodyFields = getFieldNames(typeof(blck.body)) - - var blindedBlock: T - - # https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/bellatrix/validator.md#block-proposal - copyFields(blindedBlock, blck, blckFields) - copyFields(blindedBlock.body, blck.body, blckBodyFields) - assign( - blindedBlock.body.execution_payload_header, - blindedBundle.execution_payload_header) - assign( - blindedBlock.body.blob_kzg_commitments, - blindedBundle.blob_kzg_commitments) - - blindedBlock - -func constructPlainBlindedBlock[T: electra_mev.BlindedBeaconBlock]( - blck: ForkyBeaconBlock, - blindedBundle: electra_mev.BlindedExecutionPayloadAndBlobsBundle): T = - # https://github.com/nim-lang/Nim/issues/23020 workaround - static: doAssert T is electra_mev.BlindedBeaconBlock - - const - blckFields = getFieldNames(typeof(blck)) - blckBodyFields = getFieldNames(typeof(blck.body)) - - var blindedBlock: T - - # https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/bellatrix/validator.md#block-proposal - copyFields(blindedBlock, blck, blckFields) - copyFields(blindedBlock.body, blck.body, blckBodyFields) - assign( - blindedBlock.body.execution_payload_header, - blindedBundle.execution_payload_header) - assign( - blindedBlock.body.blob_kzg_commitments, - blindedBundle.blob_kzg_commitments) - - blindedBlock - func constructPlainBlindedBlock[T: fulu_mev.BlindedBeaconBlock]( blck: ForkyBeaconBlock, blindedBundle: fulu_mev.BlindedExecutionPayloadAndBlobsBundle): T = @@ -913,7 +896,7 @@ proc getBlindedBlockParts[ slot, validator_index, head = shortLog(head) return err("loadExecutionBlockHash failed") - executionPayloadHeader = + blindedBlockRes = try: awaitWithTimeout( getBlindedExecutionPayload[EPH]( @@ -927,12 +910,12 @@ proc getBlindedBlockParts[ BlindedBlockResult[EPH].err( "getBlindedExecutionPayload REST error: " & exc.msg) - if executionPayloadHeader.isErr: + if blindedBlockRes.isErr: warn "Could not obtain blinded execution payload header", - error = executionPayloadHeader.error, slot, validator_index, + error = blindedBlockRes.error, slot, validator_index, head = shortLog(head) # Haven't committed to the MEV block, so allow EL fallback. - return err(executionPayloadHeader.error) + return err(blindedBlockRes.error) # When creating this block, need to ensure it uses the MEV-provided execution # payload, both to avoid repeated calls to network services and to ensure the @@ -946,11 +929,12 @@ proc getBlindedBlockParts[ when EPH is deneb_mev.BlindedExecutionPayloadAndBlobsBundle: type PayloadType = deneb.ExecutionPayloadForSigning template actualEPH: untyped = - executionPayloadHeader.get.blindedBlckPart.execution_payload_header + blindedBlockRes.get.blindedBlckPart.execution_payload_header let withdrawals_root = Opt.some actualEPH.withdrawals_root kzg_commitments = Opt.some( - executionPayloadHeader.get.blindedBlckPart.blob_kzg_commitments) + blindedBlockRes.get.blindedBlckPart.blob_kzg_commitments) + execution_requests = default(ExecutionRequests) var shimExecutionPayload: PayloadType type DenebEPH = @@ -958,14 +942,14 @@ proc getBlindedBlockParts[ copyFields( shimExecutionPayload.executionPayload, actualEPH, getFieldNames(DenebEPH)) elif EPH is electra_mev.BlindedExecutionPayloadAndBlobsBundle: - debugComment "verify (again, after change) this is what builder API needs" type PayloadType = electra.ExecutionPayloadForSigning template actualEPH: untyped = - executionPayloadHeader.get.blindedBlckPart.execution_payload_header + blindedBlockRes.get.blindedBlckPart.execution_payload_header let withdrawals_root = Opt.some actualEPH.withdrawals_root kzg_commitments = Opt.some( - executionPayloadHeader.get.blindedBlckPart.blob_kzg_commitments) + blindedBlockRes.get.blindedBlckPart.blob_kzg_commitments) + execution_requests = blindedBlockRes.get.executionRequests var shimExecutionPayload: PayloadType type ElectraEPH = @@ -976,11 +960,12 @@ proc getBlindedBlockParts[ debugFuluComment "verify (again, after change) this is what builder API needs" type PayloadType = fulu.ExecutionPayloadForSigning template actualEPH: untyped = - executionPayloadHeader.get.blindedBlckPart.execution_payload_header + blindedBlockRes.get.blindedBlckPart.execution_payload_header let withdrawals_root = Opt.some actualEPH.withdrawals_root kzg_commitments = Opt.some( - executionPayloadHeader.get.blindedBlckPart.blob_kzg_commitments) + blindedBlockRes.get.blindedBlckPart.blob_kzg_commitments) + execution_requests = blindedBlockRes.get.executionRequests var shimExecutionPayload: PayloadType type FuluEPH = @@ -990,7 +975,6 @@ proc getBlindedBlockParts[ else: static: doAssert false - debugComment "the electra builder API bids have these requests" let newBlock = await makeBeaconBlockForHeadAndSlot( PayloadType, node, randao, validator_index, graffiti, head, slot, execution_payload = Opt.some shimExecutionPayload, @@ -998,7 +982,7 @@ proc getBlindedBlockParts[ execution_payload_root = Opt.some hash_tree_root(actualEPH), withdrawals_root = withdrawals_root, kzg_commitments = kzg_commitments, - execution_requests = default(ExecutionRequests)) + execution_requests = execution_requests) if newBlock.isErr(): # Haven't committed to the MEV block, so allow EL fallback. @@ -1007,8 +991,8 @@ proc getBlindedBlockParts[ let forkedBlck = newBlock.get() return ok( - (executionPayloadHeader.get.blindedBlckPart, - executionPayloadHeader.get.executionPayloadValue, + (blindedBlockRes.get.blindedBlckPart, + blindedBlockRes.get.executionPayloadValue, forkedBlck.consensusBlockValue, forkedBlck.blck)) @@ -1050,11 +1034,23 @@ proc getBuilderBid[ if unsignedBlindedBlock.isErr: return err unsignedBlindedBlock.error() - ok(BuilderBid[SBBB]( - blindedBlckPart: unsignedBlindedBlock.get, - executionPayloadValue: bidValue, - consensusBlockValue: consensusValue - )) + template execution_requests: untyped = + unsignedBlindedBlock.get.message.body.execution_requests + when SBBB is deneb_mev.SignedBlindedBeaconBlock: + return ok(BuilderBid[SBBB]( + blindedBlckPart: unsignedBlindedBlock.get, + executionRequests: default(ExecutionRequests), + executionPayloadValue: bidValue, + consensusBlockValue: consensusValue)) + elif SBBB is electra_mev.SignedBlindedBeaconBlock or + SBBB is fulu_mev.SignedBlindedBeaconBlock: + return ok(BuilderBid[SBBB]( + blindedBlckPart: unsignedBlindedBlock.get, + executionRequests: execution_requests, + executionPayloadValue: bidValue, + consensusBlockValue: consensusValue)) + else: + static: doAssert false proc proposeBlockMEV( node: BeaconNode, payloadBuilderClient: RestClientRef, @@ -1093,72 +1089,6 @@ proc proposeBlockMEV( func isEFMainnet(cfg: RuntimeConfig): bool = cfg.DEPOSIT_CHAIN_ID == 1 and cfg.DEPOSIT_NETWORK_ID == 1 -proc makeBlindedBeaconBlockForHeadAndSlot*[BBB: ForkyBlindedBeaconBlock]( - node: BeaconNode, payloadBuilderClient: RestClientRef, - randao_reveal: ValidatorSig, validator_index: ValidatorIndex, - graffiti: GraffitiBytes, head: BlockRef, slot: Slot): - Future[BlindedBlockResult[BBB]] {.async: (raises: [CancelledError]).} = - ## Requests a beacon node to produce a valid blinded block, which can then be - ## signed by a validator. A blinded block is a block with only a transactions - ## root, rather than a full transactions list. - ## - ## This function is used by the validator client, but not the beacon node for - ## its own validators. - when BBB is fulu_mev.BlindedBeaconBlock: - type EPH = fulu_mev.BlindedExecutionPayloadAndBlobsBundle - elif BBB is electra_mev.BlindedBeaconBlock: - type EPH = electra_mev.BlindedExecutionPayloadAndBlobsBundle - elif BBB is deneb_mev.BlindedBeaconBlock: - type EPH = deneb_mev.BlindedExecutionPayloadAndBlobsBundle - else: - static: doAssert false - - let - pubkey = - # Relevant state for knowledge of validators - withState(node.dag.headState): - if node.dag.cfg.isEFMainnet and livenessFailsafeInEffect( - forkyState.data.block_roots.data, forkyState.data.slot): - # It's head block's slot which matters here, not proposal slot - return err("Builder API liveness failsafe in effect") - - if distinctBase(validator_index) >= forkyState.data.validators.lenu64: - debug "makeBlindedBeaconBlockForHeadAndSlot: invalid validator index", - head = shortLog(head), - validator_index, - validators_len = forkyState.data.validators.len - return err("Invalid validator index") - - forkyState.data.validators.item(validator_index).pubkey - - blindedBlockParts = await getBlindedBlockParts[EPH]( - node, payloadBuilderClient, head, pubkey, slot, randao_reveal, - validator_index, graffiti) - if blindedBlockParts.isErr: - # Don't try EL fallback -- VC specifically requested a blinded block - return err("Unable to create blinded block") - - let (executionPayloadHeader, bidValue, consensusValue, forkedBlck) = - blindedBlockParts.get - withBlck(forkedBlck): - when consensusFork >= ConsensusFork.Deneb: - when ((consensusFork == ConsensusFork.Deneb and - EPH is deneb_mev.BlindedExecutionPayloadAndBlobsBundle) or - (consensusFork == ConsensusFork.Electra and - EPH is electra_mev.BlindedExecutionPayloadAndBlobsBundle) or - (consensusFork == ConsensusFork.Fulu and - EPH is fulu_mev.BlindedExecutionPayloadAndBlobsBundle)): - return ok( - BuilderBid[BBB]( - blindedBlckPart: - constructPlainBlindedBlock[BBB](forkyBlck, executionPayloadHeader), - executionPayloadValue: bidValue, - consensusBlockValue: consensusValue)) - else: - return err("makeBlindedBeaconBlockForHeadAndSlot: mismatched block/payload types") - else: - return err("Attempt to create pre-Deneb blinded block") - proc collectBids( SBBB: typedesc, EPS: typedesc, node: BeaconNode, payloadBuilderClient: RestClientRef, validator_pubkey: ValidatorPubKey, @@ -1559,7 +1489,8 @@ proc sendAttestations(node: BeaconNode, head: BlockRef, slot: Slot) = continue tmp.add((RegisteredAttestation( - validator: validator, committee_index: committee_index, + validator: validator, validator_index: validator_index, + committee_index: committee_index, index_in_committee: uint64 index_in_committee, committee_len: committee.len(), data: data), subnet_id )) @@ -1747,8 +1678,8 @@ proc signAndSendAggregate( signAndSendAggregatedAttestations() else: - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/validator.md#construct-aggregate - # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/validator.md#aggregateandproof + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/phase0/validator.md#construct-aggregate + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/phase0/validator.md#aggregateandproof var msg = phase0.SignedAggregateAndProof( message: phase0.AggregateAndProof( aggregator_index: distinctBase validator_index, @@ -2103,8 +2034,8 @@ proc handleValidatorDuties*(node: BeaconNode, lastSlot, slot: Slot) {.async: (ra updateValidatorMetrics(node) # the important stuff is done, update the vanity numbers - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/validator.md#broadcast-aggregate - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/altair/validator.md#broadcast-sync-committee-contribution + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/phase0/validator.md#broadcast-aggregate + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/altair/validator.md#broadcast-sync-committee-contribution # Wait 2 / 3 of the slot time to allow messages to propagate, then collect # the result in aggregates static: @@ -2243,4 +2174,4 @@ proc makeMaybeBlindedBeaconBlockForHeadAndSlot*( makeMaybeBlindedBeaconBlockForHeadAndSlotImpl[ResultType]( node, consensusFork, randao_reveal, graffiti, head, slot, - builderBoostFactor) + builderBoostFactor) \ No newline at end of file diff --git a/beacon_chain/validators/keystore_management.nim b/beacon_chain/validators/keystore_management.nim index a1ab62ef52..22f3000f1b 100644 --- a/beacon_chain/validators/keystore_management.nim +++ b/beacon_chain/validators/keystore_management.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -1256,9 +1256,6 @@ proc saveLockedKeystore( keystoreDir = validatorsDir / keyName keystoreFile = keystoreDir / KeystoreFileName - if dirExists(keystoreDir): - return err(KeystoreGenerationError(kind: DuplicateKeystoreDir, - error: "Keystore directory already exists")) if fileExists(keystoreFile): return err(KeystoreGenerationError(kind: DuplicateKeystoreFile, error: "Keystore file already exists")) @@ -1335,9 +1332,6 @@ proc saveLockedKeystore( remotes: urls, flags: flags) - if dirExists(keystoreDir): - return err(KeystoreGenerationError(kind: DuplicateKeystoreDir, - error: "Keystore directory already exists")) if fileExists(keystoreFile): return err(KeystoreGenerationError(kind: DuplicateKeystoreFile, error: "Keystore file already exists")) @@ -1491,6 +1485,7 @@ proc removeGasLimitFile*(host: KeymanagerHost, if fileExists(path): io2.removeFile(path).isOkOr: return err($uint(error) & " " & ioErrorMsg(error)) + host.validatorPool[].invalidateValidatorRegistration(pubkey) ok() proc removeGraffitiFile*(host: KeymanagerHost, @@ -1525,9 +1520,14 @@ proc setGasLimit*(host: KeymanagerHost, ? secureCreatePath(validatorKeystoreDir).mapErr(proc(e: auto): string = "Could not create wallet directory [" & validatorKeystoreDir & "]: " & $e) - io2.writeFile(validatorKeystoreDir / GasLimitFilename, $gasLimit) + let res = io2.writeFile(validatorKeystoreDir / GasLimitFilename, $gasLimit) .mapErr(proc(e: auto): string = "Failed to write gas limit file: " & $e) + if res.isOk: + host.validatorPool[].invalidateValidatorRegistration(pubkey) + + res + proc setGraffiti*(host: KeymanagerHost, pubkey: ValidatorPubKey, graffiti: GraffitiBytes): Result[void, string] = @@ -1573,10 +1573,18 @@ func getPerValidatorDefaultFeeRecipient*( (static(default(Eth1Address))) proc getSuggestedFeeRecipient*( - host: KeymanagerHost, pubkey: ValidatorPubKey, - defaultFeeRecipient: Eth1Address): - Result[Eth1Address, ValidatorConfigFileStatus] = - host.validatorsDir.getSuggestedFeeRecipient(pubkey, defaultFeeRecipient) + host: KeymanagerHost, + pubkey: ValidatorPubKey, + defaultFeeRecipient: Eth1Address +): Result[Eth1Address, ValidatorConfigFileStatus] = + let res = getSuggestedFeeRecipient( + host.validatorsDir, pubkey, defaultFeeRecipient).valueOr: + if error == ValidatorConfigFileStatus.noSuchValidator: + # Dynamic validators do not have directories. + if host.validatorPool[].isDynamic(pubkey): + return ok(defaultFeeRecipient) + return err(error) + ok(res) proc getSuggestedFeeRecipient( host: KeymanagerHost, pubkey: ValidatorPubKey, @@ -1590,8 +1598,16 @@ proc getSuggestedFeeRecipient( proc getSuggestedGasLimit*( host: KeymanagerHost, - pubkey: ValidatorPubKey): Result[uint64, ValidatorConfigFileStatus] = - host.validatorsDir.getSuggestedGasLimit(pubkey, host.defaultGasLimit) + pubkey: ValidatorPubKey +): Result[uint64, ValidatorConfigFileStatus] = + let res = getSuggestedGasLimit( + host.validatorsDir, pubkey, host.defaultGasLimit).valueOr: + if error == ValidatorConfigFileStatus.noSuchValidator: + # Dynamic validators do not have directories. + if host.validatorPool[].isDynamic(pubkey): + return ok(host.defaultGasLimit) + return err(error) + ok(res) proc getSuggestedGraffiti*( host: KeymanagerHost, diff --git a/beacon_chain/validators/message_router.nim b/beacon_chain/validators/message_router.nim index 5cde690eb8..31c9fca380 100644 --- a/beacon_chain/validators/message_router.nim +++ b/beacon_chain/validators/message_router.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -157,7 +157,9 @@ proc routeSignedBeaconBlock*( let blobs = blobsOpt.get() var workers = newSeq[Future[SendResult]](blobs.len) for i in 0..- - --network=prater - --data-dir=/home/user/nimbus-eth2/build/data/shared_prater_0 - --web3-url=wss://goerli.infura.io/ws/v3/YOUR_TOKEN + --network=hoodi + --data-dir=/home/user/nimbus-eth2/build/data/shared_hoodi_0 + --web3-url=wss://hoodi.infura.io/ws/v3/YOUR_TOKEN --nat=extip:YOUR_EXTERNAL_IP --log-level=info --tcp-port=9000 @@ -34,4 +34,3 @@ services: --metrics --metrics-address=0.0.0.0 --metrics-port=8008 - diff --git a/docker/dist/binaries/docker-compose-example2.yml b/docker/dist/binaries/docker-compose-example2.yml index b9f29b00b8..3c3b20a1be 100644 --- a/docker/dist/binaries/docker-compose-example2.yml +++ b/docker/dist/binaries/docker-compose-example2.yml @@ -19,9 +19,9 @@ services: - 127.0.0.1:8008:8008/tcp volumes: - ./data:/home/user/nimbus-eth2/build/data - entrypoint: /home/user/nimbus-eth2/run-prater-beacon-node.sh + entrypoint: /home/user/nimbus-eth2/run-holesky-beacon-node.sh environment: - WEB3_URL: wss://goerli.infura.io/ws/v3/YOUR_TOKEN + WEB3_URL: wss://holesky.infura.io/ws/v3/YOUR_TOKEN # you need to make sure that port 9000 is accesible from outside; no automagic port forwarding here command: >- --nat=extip:YOUR_EXTERNAL_IP @@ -29,4 +29,3 @@ services: --rest-address=0.0.0.0 --metrics --metrics-address=0.0.0.0 - diff --git a/docker/dist/binaries/docker-compose-example3.yml b/docker/dist/binaries/docker-compose-example3.yml index b3f6cde113..9dce8662d0 100644 --- a/docker/dist/binaries/docker-compose-example3.yml +++ b/docker/dist/binaries/docker-compose-example3.yml @@ -16,9 +16,8 @@ services: network_mode: host volumes: - ./data:/home/user/nimbus-eth2/build/data - entrypoint: /home/user/nimbus-eth2/run-prater-beacon-node.sh + entrypoint: /home/user/nimbus-eth2/run-holesky-beacon-node.sh environment: - WEB3_URL: wss://goerli.infura.io/ws/v3/YOUR_TOKEN + WEB3_URL: wss://holesky.infura.io/ws/v3/YOUR_TOKEN #command: >- #--nat=any - diff --git a/docker/dist/entry_point.sh b/docker/dist/entry_point.sh index 6b828a2792..966503f761 100755 --- a/docker/dist/entry_point.sh +++ b/docker/dist/entry_point.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# Copyright (c) 2020-2024 Status Research & Development GmbH. Licensed under +# Copyright (c) 2020-2025 Status Research & Development GmbH. Licensed under # either of: # - Apache License, version 2.0 # - MIT license @@ -140,7 +140,6 @@ elif [[ "${PLATFORM}" == "macOS_amd64" ]]; then CC="${CC}" \ AR="x86_64-apple-darwin${DARWIN_VER}-ar" \ RANLIB="x86_64-apple-darwin${DARWIN_VER}-ranlib" \ - CMAKE="x86_64-apple-darwin${DARWIN_VER}-cmake" \ DSYMUTIL="x86_64-apple-darwin${DARWIN_VER}-dsymutil" \ FORCE_DSYMUTIL=1 \ USE_VENDORED_LIBUNWIND=1 \ @@ -172,7 +171,6 @@ elif [[ "${PLATFORM}" == "macOS_arm64" ]]; then CC="${CC}" \ AR="arm64-apple-darwin${DARWIN_VER}-ar" \ RANLIB="arm64-apple-darwin${DARWIN_VER}-ranlib" \ - CMAKE="arm64-apple-darwin${DARWIN_VER}-cmake" \ DSYMUTIL="arm64-apple-darwin${DARWIN_VER}-dsymutil" \ FORCE_DSYMUTIL=1 \ USE_VENDORED_LIBUNWIND=1 \ diff --git a/docs/attestation_flow.md b/docs/attestation_flow.md index ae90c35e67..37ef6499a2 100644 --- a/docs/attestation_flow.md +++ b/docs/attestation_flow.md @@ -9,7 +9,7 @@ It is important to distinguish attestation `validation` from attestation `verifi - Aggregated: https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/p2p-interface.md#beacon_aggregate_and_proof - Unaggregated: https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/p2p-interface.md#beacon_attestation_subnet_id - Attestation `verification` is defined in the consensus specs. Verified attestations can affect fork choice and may be included in a block. - - https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#attestations + - https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/beacon-chain.md#attestations From the specs it seems like gossip attestation `validation` is a superset of consensus attestation `verification`. @@ -51,7 +51,7 @@ These GossipSub topics are used to listen for attestations: - Unaggregated: `/eth2/{$forkDigest}/beacon_attestation_{subnetIndex}/ssz_snappy` The attestations are then validated by `validateAttestation()` or `validateAggregate()` in either `attestationValidator()` or `aggregateValidator()` according to the P2P specs. -- https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/p2p-interface.md#beacon_aggregate_and_proof +- https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/phase0/p2p-interface.md#beacon_aggregate_and_proof - https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/phase0/p2p-interface.md#attestation-subnets Finally, valid attestations are added to the local `attestationPool`. diff --git a/docs/block_flow.md b/docs/block_flow.md index f8002ccef3..5f15b8b321 100644 --- a/docs/block_flow.md +++ b/docs/block_flow.md @@ -6,7 +6,7 @@ This is a WIP document to explain the beacon block flows. Important distinction: - We distinguish block `validation` which is defined in the P2P specs: - https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/p2p-interface.md#beacon_block. + https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/phase0/p2p-interface.md#beacon_block. A validated block can be forwarded on gossipsub. - and we distinguish `verification` which is defined in consensus specs: https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/beacon-chain.md#block-processing diff --git a/docs/e2store.md b/docs/e2store.md index 492a672c0a..36bf2ca384 100644 --- a/docs/e2store.md +++ b/docs/e2store.md @@ -99,7 +99,7 @@ type: [0x01, 0x00] data: snappyFramed(ssz(SignedBeaconBlock)) ``` -`CompressedSignedBeackBlock` contain `SignedBeaconBlock` objects encoded using `SSZ` then compressed using the snappy [framing format](https://github.com/google/snappy/blob/master/framing_format.txt). +`CompressedSignedBeaconBlock` contains `SignedBeaconBlock` objects encoded using `SSZ` then compressed using the snappy [framing format](https://github.com/google/snappy/blob/master/framing_format.txt). The encoding matches that of the `BeaconBlocksByRoot` and `BeaconBlocksByRange` requests from the p2p specification. @@ -183,7 +183,7 @@ Each era is identified by when it ends. Thus, the genesis era is era `0`, follow `.era` file names follow a simple convention: `---.era`: -* `config-name` is the `CONFIG_NAME` field of the runtime configation (`mainnet`, `sepolia`, `holesky`, etc) +* `config-name` is the `CONFIG_NAME` field of the runtime configuration (`mainnet`, `sepolia`, `holesky`, `hoodi`, etc) * `era-number` is the number of the _first_ era stored in the file - for example, the genesis era file has number 0 - as a 5-digit 0-filled decimal integer * `short-era-root` is the first 4 bytes of the last historical root in the _last_ state in the era file, lower-case hex-encoded (8 characters), except the genesis era which instead uses the `genesis_validators_root` field from the genesis state. * The root is available as `state.historical_roots[era - 1]` except for genesis, which is `state.genesis_validators_root` @@ -217,8 +217,8 @@ The `era-state` is the state in the era transition slot. The genesis group conta The structure of the era file gives it the following properties: * the indices at the end are fixed-length: they can be used to discover the beginning of an era if the end of it is known -* the start slot field of the state slot index idenfifies which era the group pertains to -* the state in the era file is the end state after having applied all the blocks in the era and, if applicable, the block at the first slot - the `block_roots` entries in the state can be used to discover the digest of the blocks - either to verify the intergrity of the era file or to quickly load block roots without computing them. +* the start slot field of the state slot index identifies which era the group pertains to +* the state in the era file is the end state after having applied all the blocks in the era and, if applicable, the block at the first slot - the `block_roots` entries in the state can be used to discover the digest of the blocks - either to verify the integrity of the era file or to quickly load block roots without computing them. * each group in the era file is full, independent era file - groups can freely be split and combined ## Reading era files diff --git a/docs/requirements.txt b/docs/requirements.txt index 5dd17e85f7..67f0fabc3f 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -22,7 +22,7 @@ ghp-import==2.1.0 # via mkdocs idna==3.10 # via requests -jinja2==3.1.4 +jinja2==3.1.6 # via # mkdocs # mkdocs-material diff --git a/docs/the_nimbus_book/mkdocs.yml b/docs/the_nimbus_book/mkdocs.yml index 8544bf677e..db0dc5be19 100644 --- a/docs/the_nimbus_book/mkdocs.yml +++ b/docs/the_nimbus_book/mkdocs.yml @@ -86,7 +86,6 @@ nav: - General: - 'keep-updated.md' - 'eth1.md' - - 'goerli-eth.md' - 'beacon-node-systemd.md' - 'log-rotate.md' - 'metrics-pretty-pictures.md' @@ -94,7 +93,7 @@ nav: - 'email-notifications.md' - 'profits.md' - 'health.md' - - 'holesky.md' + - 'hoodi.md' - Security: - 'preparation.md' diff --git a/docs/the_nimbus_book/src/attestation-performance.md b/docs/the_nimbus_book/src/attestation-performance.md index df08b5710c..eb6c6ce001 100644 --- a/docs/the_nimbus_book/src/attestation-performance.md +++ b/docs/the_nimbus_book/src/attestation-performance.md @@ -35,9 +35,9 @@ The following options are available: Where: -- The `network` can be `mainnet`, `holesky`, or `sepolia`. +- The `network` can be `mainnet`, `hoodi`, `holesky`, or `sepolia`. -- The default location of the `db` is `build/data/shared_mainnet_0/db` for `mainnet`, `build/data/shared_holesky_0/db` for `holesky`, etc. +- The default location of the `db` is `build/data/shared_mainnet_0/db` for `mainnet`, `build/data/shared_hoodi_0/db` for `hoodi`, etc. Near the bottom, you should see: @@ -55,11 +55,12 @@ Use `start-slot` and `slots` to restrict the analysis on a specific block range. ### 3. Run -To view the performance of all validators on Holesky so far across the entire block range stored in your database, run: +To view the performance of all validators on Hoodi so far across the entire block range stored in your database, run: + ```sh build/ncli_db validatorPerf \ ---network=holesky \ ---db=build/data/shared_holesky_0/db +--network=hoodi \ +--db=build/data/shared_hoodi_0/db ``` You should see output that looks like to the following: @@ -75,10 +76,11 @@ validator_index,attestation_hits,attestation_misses,head_attestation_hits,head_a ### 4. Adjust to target a specific block range To restrict the analysis to the performance between slots 0 and 128, say, run: + ```sh build/ncli_db validatorPerf \ ---network=holesky \ ---db=build/data/shared_holesky_0/db \ +--network=hoodi \ +--db=build/data/shared_hoodi_0/db \ --start-slot=0 \ --slots=128 ``` @@ -104,4 +106,3 @@ This workbook consists of three inter-related spreadsheets: `Summary`, `My Valid ## Resources The workbook's method is explained [here](https://hackmd.io/xQfi83kHQpm05-aAFVV0DA?view). - diff --git a/docs/the_nimbus_book/src/beacon-node-systemd.md b/docs/the_nimbus_book/src/beacon-node-systemd.md index f165693d0c..9d2a102100 100644 --- a/docs/the_nimbus_book/src/beacon-node-systemd.md +++ b/docs/the_nimbus_book/src/beacon-node-systemd.md @@ -56,7 +56,7 @@ sudo systemctl edit nimbus_beacon_node.service The service file contains several options for controlling Nimbus. Important options include: -* `Environment=NETWORK`: set this to `mainnet`, `holesky` or `sepolia`, depending on which network you want to connect to +* `Environment=NETWORK`: set this to `mainnet`, `hoodi`, `holesky`, or `sepolia`, depending on which network you want to connect to * `Environment=WEB3_URL`: point this to your execution client, see the [Execution Client](./eth1.md) setup guide * `Environment=REST_ENABLED`: REST is used to interact with the beacon node, in particular when setting up a separate Validator Client, see the [REST API](./rest-api.md) guide * `Environment=METRICS_ENABLED`: metrics are used for monitoring the node, see the [metrics](./metrics-pretty-pictures.md) setup guide @@ -141,5 +141,5 @@ When running multiple beacon nodes, make sure that each service: ## Further examples -- A [service template file](https://github.com/chfast/ethereum-node/blob/main/nimbus%40.service) by Pawel Bylica which allows you to start two services at the same time, e.g. `nimbus@holesky.service` and `nimbus@mainnet.service`. +- A [service template file](https://github.com/chfast/ethereum-node/blob/main/nimbus%40.service) by Pawel Bylica which allows you to start two services at the same time, e.g. `nimbus@hoodi.service` and `nimbus@mainnet.service`. - The [EthereumOnARM](https://github.com/EOA-Blockchain-Labs/ethereumonarm/blob/main/fpm-package-builder/l1-clients/consensus-layer/nimbus/extras/nimbus-beacon.service) project maintains a service file as part of their Ethereum installation package repository. diff --git a/docs/the_nimbus_book/src/data-dir.md b/docs/the_nimbus_book/src/data-dir.md index 4006dda0b5..5eb63ae9a0 100644 --- a/docs/the_nimbus_book/src/data-dir.md +++ b/docs/the_nimbus_book/src/data-dir.md @@ -3,7 +3,7 @@ Nimbus stores all the information it needs to run in a data directory. In this directory, you'll find a database, your validator keys and secrets, and several other items. -When following the installation guide, the chain data will be stored in `build/data` with separate directories for each chain (mainnet, holesky, etc). +When following the installation guide, the chain data will be stored in `build/data` with separate directories for each chain (mainnet, hoodi, etc). !!! tip "The `--data-dir` option" The `--data-dir=/path/to/data` allows picking a specific data directory to store the chain. diff --git a/docs/the_nimbus_book/src/database-backup.md b/docs/the_nimbus_book/src/database-backup.md index 9dcf01901c..3cdb95fa6e 100644 --- a/docs/the_nimbus_book/src/database-backup.md +++ b/docs/the_nimbus_book/src/database-backup.md @@ -3,11 +3,10 @@ The best way to do this is to use `.backup` sqlite command: -1. Navigate to either `build/data/shared_mainnet_0/db/` (if you're running Holesky: `shared_holesky_0`) or the directory you supplied to the `--data-dir` argument when you launched Nimbus. +1. Navigate to either `build/data/shared_mainnet_0/db/` (if you're running Hoodi: `shared_hoodi_0`) or the directory you supplied to the `--data-dir` argument when you launched Nimbus. 2. Run the following command: ``` sqlite3 nbc.sqlite3 ".backup 'backup_nbc.sqlite3'" ``` Make sure to correctly type both single and double quotes, as written above. - diff --git a/docs/the_nimbus_book/src/developers.md b/docs/the_nimbus_book/src/developers.md index 2218bf09b1..9f2d427825 100644 --- a/docs/the_nimbus_book/src/developers.md +++ b/docs/the_nimbus_book/src/developers.md @@ -29,14 +29,14 @@ The `unstable` branch contains features and bugfixes that are actively being tes * Features and bugfixes are generally pushed to individual branches, each with their own pull request against the `unstable` branch. * Once the branch has been reviewed and passed CI, the developer or reviewer merges the branch to `unstable`. -* The `unstable` branch is regularly deployed to the Nimbus Prater fleet where additional testing happens. +* The `unstable` branch is regularly deployed to the Nimbus Hoodi fleet where additional testing happens. ### Testing The `testing` branch contains features and bugfixes that have gone through CI and initial testing on the `unstable` branch and are ready to be included in the next release. * After testing a bugfix or feature on `unstable`, the features and fixes that are planned for the next release get merged to the `testing` branch either by the release manager or team members. -* The `testing` branch is regularly deployed to the Nimbus prater fleet as well as a smaller mainnet fleet. +* The `testing` branch is regularly deployed to the Nimbus Hoodi fleet as well as a smaller mainnet fleet. * The branch should remain release-ready at most times. ### Stable @@ -170,7 +170,7 @@ nim --version # Nimbus is tested and supported on 1.2.12 at the moment ## Stress-testing the client by limiting the CPU power ```bash -make prater CPU_LIMIT=20 +make sepolia CPU_LIMIT=20 ``` The limiting is provided by the `cpulimit` utility, available on Linux and macOS. diff --git a/docs/the_nimbus_book/src/docker.md b/docs/the_nimbus_book/src/docker.md index 7034ab7d70..a62a1d4869 100644 --- a/docs/the_nimbus_book/src/docker.md +++ b/docs/the_nimbus_book/src/docker.md @@ -4,7 +4,7 @@ Docker images for the [Nimbus beacon node](https://hub.docker.com/r/statusim/nim We have version-specific Docker tags (e.g. `statusim/nimbus-eth2:amd64-v1.2.3`) and a tag for the latest image (e.g. `statusim/nimbus-eth2:amd64-latest`). -These images contain the same binaries as the [release tarballs](./binaries.md) inside a `debian:bullseye-slim` image, running under a user imaginatively named `user`, with UID:GID of 1000:1000. +These images contain the same binaries as the [release tarballs](./binaries.md) inside a `debian:bookworm-slim` image, running under a user imaginatively named `user`, with UID:GID of 1000:1000. The binaries are placed under the `/home/user/` directory which is also the default *WORKDIR*. The *ENTRYPOINT* of the image is configured to directly launch the respective binary without any extra arguments. diff --git a/docs/the_nimbus_book/src/el-light-client.md b/docs/the_nimbus_book/src/el-light-client.md index c3fda25918..09b84c466c 100644 --- a/docs/the_nimbus_book/src/el-light-client.md +++ b/docs/the_nimbus_book/src/el-light-client.md @@ -59,7 +59,7 @@ Follow the [regular instructions](./eth1.md) for running the execution client, t ## Running the light client The light client starts syncing from a trusted block. -This trusted block should be somewhat recent ([~1-2 weeks](https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/weak-subjectivity.md)) and needs to be configured each time when starting the light client. +This trusted block should be somewhat recent ([~1-2 weeks](https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/phase0/weak-subjectivity.md)) and needs to be configured each time when starting the light client. ### 1. Obtaining a trusted block root @@ -74,7 +74,7 @@ A block root may be obtained from another trusted beacon node, or from a trusted ``` === "Beaconcha.in" - On the [beaconcha.in](https://beaconcha.in) website ([Holesky](https://holesky.beaconcha.in)), navigate to the `Epochs` section and select a recent `Finalized` epoch. + On the [beaconcha.in](https://beaconcha.in) website ([Hoodi](https://hoodi.beaconcha.in)), navigate to the `Epochs` section and select a recent `Finalized` epoch. Then, scroll down to the bottom of the page. If the bottom-most slot has a `Proposed` status, copy its `Root Hash`. Otherwise, for example if the bottom-most slot was `Missed`, go back and pick a different epoch. @@ -97,11 +97,11 @@ To start the light client, run the following commands (inserting your own truste --trusted-block-root=$TRUSTED_BLOCK_ROOT ``` -=== "Holesky" +=== "Hoodi" ```sh TRUSTED_BLOCK_ROOT=0x1234567890123456789012345678901234567890123456789012345678901234 JWTSECRET=path/to/execution/client/jwt.hex - build/nimbus_light_client --network=holesky \ + build/nimbus_light_client --network=hoodi \ --web3-url=http://127.0.0.1:8551 --jwt-secret="$JWTSECRET" \ --trusted-block-root=$TRUSTED_BLOCK_ROOT ``` diff --git a/docs/the_nimbus_book/src/eth1.md b/docs/the_nimbus_book/src/eth1.md index cd6af53267..09b8696e41 100644 --- a/docs/the_nimbus_book/src/eth1.md +++ b/docs/the_nimbus_book/src/eth1.md @@ -15,7 +15,7 @@ See the [execution client comparison](https://ethereum.org/en/developers/docs/no ### 1. Install execution client -Select an execution client and install it, configuring it such that that the authenticated JSON-RPC interface is enabled and a JWT secret file is created. +Select an execution client and install it, configuring it such that the authenticated JSON-RPC interface is enabled and a JWT secret file is created. === "Nimbus" @@ -36,9 +36,9 @@ Select an execution client and install it, configuring it such that that the aut geth --authrpc.addr localhost --authrpc.port 8551 --authrpc.vhosts localhost --authrpc.jwtsecret /tmp/jwtsecret ``` - === "Holesky" + === "Hoodi" ``` - geth --holesky --authrpc.addr localhost --authrpc.port 8551 --authrpc.vhosts localhost --authrpc.jwtsecret /tmp/jwtsecret + geth --hoodi --authrpc.addr localhost --authrpc.port 8551 --authrpc.vhosts localhost --authrpc.jwtsecret /tmp/jwtsecret ``` === "Nethermind" @@ -82,20 +82,19 @@ You will need to pass the path to the token file to Nimbus together with the web === "Mainnet" ```sh ./run-mainnet-beacon-node.sh \ - --el=http://127.0.0.1:8551 \ - --jwt-secret=/tmp/jwtsecret + --el=http://127.0.0.1:8551 \ + --jwt-secret=/tmp/jwtsecret ``` -=== "Holesky" +=== "Hoodi" ```sh - ./run-holesky-beacon-node.sh \ - --el=http://127.0.0.1:8551 \ - --jwt-secret=/tmp/jwtsecret + build/nimbus_beacon_node \ + --network=hoodi \ + --data-dir=build/data/shared_hoodi_0 \ + --el=http://127.0.0.1:8551 \ + --jwt-secret=/tmp/jwtsecret ``` - - - ## Upgrade execution client === "Nimbus" @@ -140,9 +139,9 @@ To enable this mode, just specify multiple URLs through the `--el` option when s ```sh ./run-mainnet-beacon-node.sh \ - --el=http://127.0.0.1:8551 \ - --el=ws://other:8551 \ - --jwt-secret=/tmp/jwtsecret + --el=http://127.0.0.1:8551 \ + --el=ws://other:8551 \ + --jwt-secret=/tmp/jwtsecret ``` !!! tip diff --git a/docs/the_nimbus_book/src/execution-client.md b/docs/the_nimbus_book/src/execution-client.md index 256f953819..1f9c5ba41e 100644 --- a/docs/the_nimbus_book/src/execution-client.md +++ b/docs/the_nimbus_book/src/execution-client.md @@ -45,6 +45,11 @@ In addition to the era files themselves, you will need at least 200GB of free sp * https://mainnet.era.nimbus.team/ * https://mainnet.era1.nimbus.team/ + === "Hoodi" + * https://hoodi.era.nimbus.team/ + + The Hoodi network does not have `era1` files since it never operated as a proof-of-work chain + === "Holesky" * https://holesky.era.nimbus.team/ @@ -70,6 +75,11 @@ See the [era file guide](./era-store.md) for more information. ``` +=== "Hoodi" + ```sh + build/nimbus_execution_client --network=hoodi --data-dir=build/hoodi import + ``` + === "Holesky" ```sh build/nimbus_execution_client --network=holesky --data-dir=build/holesky import @@ -93,6 +103,11 @@ During startup, a `jwt.hex` file will be placed in the data directory containing build/nimbus_execution_client --data-dir=build/mainnet --engine-api ``` +=== "Hoodi" + ```sh + build/nimbus_execution_client --network=hoodi --data-dir=build/hoodi --engine-api + ``` + === "Holesky" ```sh build/nimbus_execution_client --network=holesky --data-dir=build/holesky --engine-api @@ -115,6 +130,12 @@ This method of syncing loads blocks from the consensus node and passes them to t while true; do build/nrpc sync --beacon-api=http://localhost:5052 --el-engine-api=http://localhost:8550 --jwt-secret=build/mainnet/jwt.hex; sleep 2; done ``` +=== "Hoodi" + ```sh + # Start `nrpc` every 2 seconds in case there is a fork or the execution client goes out of sync + while true; do build/nrpc sync --network=hoodi --beacon-api=http://localhost:5052 --el-engine-api=http://localhost:8550 --jwt-secret=build/hoodi/jwt.hex; sleep 2; done + ``` + === "Holesky" ```sh # Start `nrpc` every 2 seconds in case there is a fork or the execution client goes out of sync diff --git a/docs/the_nimbus_book/src/external-block-builder.md b/docs/the_nimbus_book/src/external-block-builder.md index 7fcf62bcca..f8accaf4d1 100644 --- a/docs/the_nimbus_book/src/external-block-builder.md +++ b/docs/the_nimbus_book/src/external-block-builder.md @@ -30,17 +30,24 @@ Additionally, the URL of the service exposing the [builder API](https://ethereum === "Mainnet Beacon Node" ```sh - ./run-mainnet-beacon-node.sh --payload-builder=true --payload-builder-url=https://${HOST}:${PORT}/ + ./run-mainnet-beacon-node.sh \ + --payload-builder=true \ + --payload-builder-url=https://${HOST}:${PORT}/ ``` -=== "Holesky Beacon Node" +=== "Hoodi Beacon Node" ```sh - ./run-holesky-beacon-node.sh --payload-builder=true --payload-builder-url=https://${HOST}:${PORT}/ + build/nimbus_beacon_node \ + --network=hoodi \ + --data-dir=build/data/shared_hoodi_0 \ + --payload-builder=true \ + --payload-builder-url=https://${HOST}:${PORT}/ ``` === "Validator Client" ```sh - build/nimbus_validator_client --payload-builder=true + build/nimbus_validator_client \ + --payload-builder=true ``` ## Useful resources @@ -51,4 +58,4 @@ Additionally, the URL of the service exposing the [builder API](https://ethereum - [Mainnet Relay Overview](https://beaconcha.in/relays) -- [Holesky Relay Overview](https://holesky.beaconcha.in/relays) +- [Hoodi Relay Overview](https://hoodi.beaconcha.in/relays) diff --git a/docs/the_nimbus_book/src/faq.md b/docs/the_nimbus_book/src/faq.md index 95d833766d..fcfd3dc38f 100644 --- a/docs/the_nimbus_book/src/faq.md +++ b/docs/the_nimbus_book/src/faq.md @@ -62,7 +62,7 @@ While we strongly recommend against it, you can disable doppelganger detection w ### What is the best way to stress test my execution+consensus setup before committing with real ETH? -We recommend running [a Nimbus beacon node](./quick-start.md) on [Holesky](./holesky.md) and a mainnet [execution client](./eth1.md) on the same machine. +We recommend running [a Nimbus beacon node](./quick-start.md) on [Hoodi](./hoodi.md) and a mainnet [execution client](./eth1.md) on the same machine. This will simulate the load of running a mainnet validator. ### How do I add an additional validator? @@ -181,7 +181,7 @@ In other words, if you stood to earn ≈0.01 ETH, you would instead be penalized ### How can I keep track of my validator? -One way of keeping track is using an online service such as beaconcha.in: [Mainnet](https://beaconcha.in/) or [Holesky](https://holesky.beaconcha.in). +One way of keeping track is using an online service such as beaconcha.in: [Mainnet](https://beaconcha.in/) or [Hoodi](https://hoodi.beaconcha.in). Another way is to set up [validator monitoring](./validator-monitor.md) together with a [dashboard](./metrics-pretty-pictures.md) to keep track of its performance. @@ -334,4 +334,3 @@ In a nutshell, security. The signing key must be available at all times. As such, it will need to be held online. Since anything online is vulnerable to being hacked, it's not a good idea to use the same key for withdrawals. - diff --git a/docs/the_nimbus_book/src/goerli-eth.md b/docs/the_nimbus_book/src/goerli-eth.md index b3ce66d8cb..e49296e45d 100644 --- a/docs/the_nimbus_book/src/goerli-eth.md +++ b/docs/the_nimbus_book/src/goerli-eth.md @@ -1,3 +1,3 @@ -# Obtain Goerli ETH +This page has been removed. -To participate in an eth2 testnet, you need to stake 32 testnet ETH. You can request this testnet ETH by joining the [ethstaker discord](https://discord.gg/fxsECtdUCB) - look for the `#request-goerli-eth` channel. +Use the [Hoodi testnet](./hoodi.md). diff --git a/docs/the_nimbus_book/src/graffiti.md b/docs/the_nimbus_book/src/graffiti.md index d62972fda5..879148bf7e 100644 --- a/docs/the_nimbus_book/src/graffiti.md +++ b/docs/the_nimbus_book/src/graffiti.md @@ -9,11 +9,14 @@ The graffiti can be either a string or, if you want to specify raw bytes, you ca === "Mainnet" ```sh - ./run-mainnet-beacon-node.sh --graffiti="" + ./run-mainnet-beacon-node.sh \ + --graffiti="" ``` -=== "Holesky" +=== "Hoodi" ```sh - ./run-holesky-beacon-node.sh --graffiti="" + build/nimbus_beacon_node \ + --network=hoodi \ + --data-dir=build/data/shared_hoodi_0 \ + --graffiti="" ``` - diff --git a/docs/the_nimbus_book/src/history.md b/docs/the_nimbus_book/src/history.md index a9689a912c..82654ff1be 100644 --- a/docs/the_nimbus_book/src/history.md +++ b/docs/the_nimbus_book/src/history.md @@ -41,10 +41,14 @@ In order to recreate deep history in a pruned node, download the [era archive of === "Mainnet" ```sh - ./run-mainnet-beacon-node.sh --history=prune ... + ./run-mainnet-beacon-node.sh \ + --history=prune ``` -=== "Holesky" +=== "Hoodi" ```sh - ./run-holesky-beacon-node.sh --history=prune ... + build/nimbus_beacon_node \ + --network=hoodi \ + --data-dir=build/data/shared_hoodi_0 \ + --history=prune ``` diff --git a/docs/the_nimbus_book/src/holesky.md b/docs/the_nimbus_book/src/holesky.md index 68cc9cb5fc..e49296e45d 100644 --- a/docs/the_nimbus_book/src/holesky.md +++ b/docs/the_nimbus_book/src/holesky.md @@ -1,122 +1,3 @@ -# Holešky testnet +This page has been removed. -`holesky` is the main long-running Ethereum staking, infrastructure and protocol-developer testnet. -For testing decentralized applications, smart contracts, and other EVM functionality, please use Sepolia testnet! -`holesky` replaces the Prater/Görli network which has been deprecated since early 2023. - - -It provides an opportunity to verify your setup works as expected through the proof-of-stake transition and in a post-merge context as well as to safely practice node operations such as adding and removing validators, migrating between clients, and performing upgrades and backups. -If you come across any issues, please [report them here](https://github.com/status-im/nimbus-eth2/issues). - - - - - - -## General Preparation - -1. Generate the JWT secret with `openssl rand -hex 32 | tr -d "\n" > "/opt/jwtsecret"`. This file needs to be passed to both the execution client and the consensus client. - -2. Choose an Ethereum address to receive transaction fees. - This ETH will be immediately available, not part of the staking contract. - -3. Download the [latest release](./binaries.md) and install it by unpacking the archive. - -4. Choose one of Nethermind, Besu, Erigon, or Geth as an execution client. - Download, install, and [run it](https://notes.ethereum.org/@launchpad/holesky#Run-an-Execution-Layer-Client). - - === "Nethermind" - - ```sh - cd nethermind/src/Nethermind/Nethermind.Runner - dotnet run -c Release -- --config holesky \ - --JsonRpc.Host=0.0.0.0 \ - --JsonRpc.JwtSecretFile=/opt/jwtsecret - ``` - - === "Erigon" - - ```sh - ./build/bin/erigon --chain=holesky \ - --datadir holesky-testnet \ - --authrpc.jwtsecret=/opt/jwtsecret \ - --http --http.api=engine,net,eth - ``` - - === "Besu" - - ```sh - build/install/besu/bin/besu \ - --network=holesky \ - --rpc-http-enabled=true \ - --rpc-http-host="0.0.0.0" \ - --rpc-http-cors-origins="*" \ - --sync-mode="X_SNAP" \ - --data-storage-format="BONSAI"\ - --Xmerge-support=true \ - --rpc-ws-host="0.0.0.0" \ - --host-allowlist="*" \ - --engine-rpc-enabled=true \ - --engine-host-allowlist="*" \ - --engine-jwt-enabled=true \ - --engine-jwt-secret=/opt/jwtsecret - ``` - - - - - - ## Sync the beacon node and execution client - -5. [Start syncing](./start-syncing.md) the node consisting of Nimbus and chosen execution client, for example by running: - ```sh - nimbus-eth2/build/nimbus_beacon_node \ - --network=holesky \ - --web3-url=http://127.0.0.1:8551 \ - --rest \ - --metrics \ - --jwt-secret="/opt/jwtsecret" \ - --suggested-fee-recipient= - ``` - - !!! tip - If you want the syncing process to complete much faster, you can [sync from a trusted node](./trusted-node-sync.md). - - One might consider here to [set up a systemd service](./beacon-node-systemd.md) to ensure this runs automatically, including after restarts. - - - - - - ## Obtaining genesis file (optional) - - By default, Nimbus will automatically download the genesis state of Holešky from Github through the HTTPS protocol. - If something prevents you from using this method, you may be able to work-around the issue by either instructing Nimbus to use a different URL by specifying the `--genesis-state-url` command-line parameter (for example, you can point it to the `/eth/v2/debug/beacon/states/genesis` endpoint of a trusted beacon node or a checkpoint provider) or by downloading the `genesis.ssz` file of the network through some other means and then supplying its path through the `--genesis-state` command-line parameter. - - - - - - ## Begin validating - -6. Once this Holešky node is [completely synced](./keep-an-eye.md#keep-track-of-your-syncing-progress), use the [Holesky launchpad](https://holesky.launchpad.ethereum.org/en/) to obtain Holesky validators. -It might require some time before these enter and are activated on the beacon chain. -If one does this before the node which will attest and propose using those validators has synced, one might miss attestations and block proposals. - -7. Follow our validating guide from [step 2 (import the validator keys) onward](./run-a-validator.md#2-import-your-validator-keys). - - - - - - - -## Useful resources - -- Holesky [landing page](https://holesky.ethpandaops.io): view block explorers, request funds from the faucet, and connect to a JSON RPC endpoint. - -- Holesky [EF launchpad notes](https://notes.ethereum.org/@launchpad/holesky): how to run a node; contains instructions for how to build Nimbus from source for this purpose - -- Holesky consensus layer [beacon chain explorer](https://holesky.beaconcha.in/) - -- Holesky execution layer [transaction explorer](https://holesky.etherscan.io/) +Use the [Hoodi testnet](./hoodi.md). diff --git a/docs/the_nimbus_book/src/hoodi.md b/docs/the_nimbus_book/src/hoodi.md new file mode 100644 index 0000000000..cdc9c32209 --- /dev/null +++ b/docs/the_nimbus_book/src/hoodi.md @@ -0,0 +1,122 @@ +# Hoodi testnet + +`Hoodi` is the main long-running Ethereum staking, infrastructure and protocol-developer testnet. +For testing decentralized applications, smart contracts, and other EVM functionality, please use Sepolia testnet! + + +It provides an opportunity to verify your setup works as expected through the proof-of-stake transition and in a post-merge context as well as to safely practice node operations such as adding and removing validators, migrating between clients, and performing upgrades and backups. +If you come across any issues, please [report them here](https://github.com/status-im/nimbus-eth2/issues). + + + + + + +## General Preparation + +1. Generate the JWT secret with `openssl rand -hex 32 | tr -d "\n" > "/opt/jwtsecret"`. This file needs to be passed to both the execution client and the consensus client. + +2. Choose an Ethereum address to receive transaction fees. + This ETH will be immediately available, not part of the staking contract. + +3. Download the [latest release](./binaries.md) and install it by unpacking the archive. + +4. Choose one of Nethermind, Erigon, or Besu as an execution client. + Download, install, and [run it](https://notes.ethereum.org/@launchpad/hoodi#Run-an-Execution-Layer-Client). + + === "Nethermind" + + ```sh + cd nethermind/src/Nethermind/Nethermind.Runner + dotnet run -c Release -- --config hoodi \ + --JsonRpc.Host=0.0.0.0 \ + --JsonRpc.JwtSecretFile=/opt/jwtsecret + ``` + + === "Erigon" + + ```sh + ./build/bin/erigon --chain=hoodi \ + --datadir hoodi-testnet \ + --authrpc.jwtsecret=/opt/jwtsecret \ + --http --http.api=engine,net,eth \ + --externalcl + ``` + + === "Besu" + + ```sh + build/install/besu/bin/besu \ + --network=hoodi \ + --rpc-http-enabled=true \ + --rpc-http-host="0.0.0.0" \ + --rpc-http-cors-origins="*" \ + --sync-mode="X_SNAP" \ + --data-storage-format="BONSAI" \ + --Xmerge-support=true \ + --rpc-ws-host="0.0.0.0" \ + --host-allowlist="*" \ + --engine-rpc-enabled=true \ + --engine-host-allowlist="*" \ + --engine-jwt-enabled=true \ + --engine-jwt-secret=/opt/jwtsecret + ``` + + + + + + ## Sync the beacon node and execution client + +5. [Start syncing](./start-syncing.md) the node consisting of Nimbus and chosen execution client, for example by running: + ```sh + nimbus-eth2/build/nimbus_beacon_node \ + --network=hoodi \ + --web3-url=http://127.0.0.1:8551 \ + --rest \ + --metrics \ + --jwt-secret="/opt/jwtsecret" \ + --suggested-fee-recipient= + ``` + + !!! tip + If you want the syncing process to complete much faster, you can [sync from a trusted node](./trusted-node-sync.md). + + One might consider here to [set up a systemd service](./beacon-node-systemd.md) to ensure this runs automatically, including after restarts. + + + + + + ## Obtaining genesis file (optional) + + By default, Nimbus will automatically download the genesis state of Hoodi from Github through the HTTPS protocol. + If something prevents you from using this method, you may be able to work-around the issue by either instructing Nimbus to use a different URL by specifying the `--genesis-state-url` command-line parameter (for example, you can point it to the `/eth/v2/debug/beacon/states/genesis` endpoint of a trusted beacon node or a checkpoint provider) or by downloading the `genesis.ssz` file of the network through some other means and then supplying its path through the `--genesis-state` command-line parameter. + + + + + + ## Begin validating + +6. Once this Hoodi node is [completely synced](./keep-an-eye.md#keep-track-of-your-syncing-progress), use the [Hoodi launchpad](https://hoodi.launchpad.ethereum.org/en/) to obtain Hoodi validators. +It might require some time before these enter and are activated on the beacon chain. +If one does this before the node which will attest and propose using those validators has synced, one might miss attestations and block proposals. + +7. Follow our validating guide from [step 2 (import the validator keys) onward](./run-a-validator.md#2-import-your-validator-keys). + + + + + + + +## Useful resources + +- Hoodi [landing page](https://hoodi.ethpandaops.io): view block explorers, request funds from the faucet, and connect to a JSON RPC endpoint. + +- Hoodi [EF launchpad notes](https://notes.ethereum.org/@launchpad/hoodi): how to run a node; contains instructions for how to build Nimbus from source for this purpose + +- Hoodi consensus layer [beacon chain explorer](https://hoodi.beaconcha.in/) + +- Hoodi execution layer [transaction explorer](https://hoodi.etherscan.io/) diff --git a/docs/the_nimbus_book/src/install.md b/docs/the_nimbus_book/src/install.md index 309ef21498..5c8c78749d 100644 --- a/docs/the_nimbus_book/src/install.md +++ b/docs/the_nimbus_book/src/install.md @@ -14,7 +14,6 @@ Check that your machine matches the [minimal system requirements](./hardware.md) When building from source, you will need additional build dependencies to be installed: - Developer tools (C compiler, Make, Bash, Git) -- [CMake](https://cmake.org/) @@ -26,21 +25,21 @@ When building from source, you will need additional build dependencies to be ins ```sh # Debian and Ubuntu - sudo apt-get install build-essential git-lfs cmake + sudo apt-get install build-essential git-lfs # Fedora - dnf install @development-tools cmake + dnf install @development-tools # Arch Linux, using an AUR manager - yourAURmanager -S base-devel git-lfs cmake + yourAURmanager -S base-devel git-lfs ``` === "macOS" - With [Homebrew](https://brew.sh/): + The Command Line Tools package is available as part of Xcode and can be installed via the Terminal application: ```sh - brew install cmake + xcode-select --install ``` === "Windows" diff --git a/docs/the_nimbus_book/src/keep-an-eye.md b/docs/the_nimbus_book/src/keep-an-eye.md index d409b359fe..e7cca43507 100644 --- a/docs/the_nimbus_book/src/keep-an-eye.md +++ b/docs/the_nimbus_book/src/keep-an-eye.md @@ -2,7 +2,7 @@ Once your validator has been activated, you can set up [validator monitoring](./validator-monitor.md) together with a [dashboard](./metrics-pretty-pictures.md) to keep track of its performance. -Another way of keeping track is using an online service such as beaconcha.in: [Mainnet](https://beaconcha.in/) or [Holesky](https://holesky.beaconcha.in). +Another way of keeping track is using an online service such as beaconcha.in: [Mainnet](https://beaconcha.in/) or [Hoodi](https://hoodi.beaconcha.in). Both online services and dashboards allow setting up alerts for when the validator is offline. @@ -53,4 +53,4 @@ The string of letters -- what we call the `sync worker map` (in the above case r ``` !!! tip - You can also use you calls outlined in the [REST API page](./rest-api.md) to retrieve similar information. + You can also use the calls outlined in the [REST API page](./rest-api.md) to retrieve similar information. diff --git a/docs/the_nimbus_book/src/keymanager-api.md b/docs/the_nimbus_book/src/keymanager-api.md index 03241ab1a5..b8c231900b 100644 --- a/docs/the_nimbus_book/src/keymanager-api.md +++ b/docs/the_nimbus_book/src/keymanager-api.md @@ -8,7 +8,10 @@ By default, we disable the Keymanager API. To enable it, start the beacon node with the `--keymanager` option enabled: ``` -./run-holesky-beacon-node.sh --keymanager +build/nimbus_beacon_node \ + --network=hoodi \ + --data-dir=build/data/shared_hoodi_0 \ + --keymanager ``` Once the node is running, you'll be able to access the API from [http://localhost:5052/](http://localhost:5052/). diff --git a/docs/the_nimbus_book/src/log-rotate.md b/docs/the_nimbus_book/src/log-rotate.md index 501e5131eb..6fa039fbe6 100644 --- a/docs/the_nimbus_book/src/log-rotate.md +++ b/docs/the_nimbus_book/src/log-rotate.md @@ -70,7 +70,7 @@ The final step is to redirect logs to `rotatelogs` using a pipe when starting Ni ```bash build/nimbus_beacon_node \ - --network:holesky \ + --network:hoodi \ --web3-url="$WEB3URL" \ --data-dir:$DATADIR 2>&1 | rotatelogs -L "$DATADIR/nbc_bn.log" -p "/path/to/rotatelogs-compress.sh" -D -f -c "$DATADIR/log/nbc_bn_%Y%m%d%H%M%S.log" 3600 ``` diff --git a/docs/the_nimbus_book/src/metrics-pretty-pictures.md b/docs/the_nimbus_book/src/metrics-pretty-pictures.md index c17412ebca..54830c3122 100644 --- a/docs/the_nimbus_book/src/metrics-pretty-pictures.md +++ b/docs/the_nimbus_book/src/metrics-pretty-pictures.md @@ -14,7 +14,10 @@ You can learn more about Grafana [here](https://github.com/grafana/grafana). To enable the metrics server, run the beacon node with the `--metrics` flag: ``` -./run-holesky-beacon-node.sh --metrics +build/nimbus_beacon_node \ + --network=hoodi \ + --data-dir=build/data/shared_hoodi_0 \ + --metrics ``` Visit [http://127.0.0.1:8008/metrics](http://127.0.0.1:8008/metrics) with a browser or `curl`. @@ -212,4 +215,3 @@ Note that this dashboard does rely heavily on three prometheus exporter tools: ` The good news is that you don't need to use all these tools, as long as you take care of removing the related panels. See [here](https://github.com/metanull-operator/eth2-grafana/tree/master/nimbus) for a detailed guide explaining how to use it. - diff --git a/docs/the_nimbus_book/src/more-keys.md b/docs/the_nimbus_book/src/more-keys.md index b9ed06ca74..e97453cede 100644 --- a/docs/the_nimbus_book/src/more-keys.md +++ b/docs/the_nimbus_book/src/more-keys.md @@ -37,12 +37,12 @@ Run the following command from the directory which contains the `deposit` execut --chain mainnet ``` -=== "Holesky" +=== "Hoodi" ```sh ./deposit existing-mnemonic \ --validator_start_index 0 \ --num_validators 1 \ - --chain holesky + --chain hoodi ``` You'll be prompted to enter your mnemonic, and a new password for your keystore. @@ -70,12 +70,12 @@ Run the following command from the directory which contains the `deposit` execut --chain mainnet ``` -=== "Holesky" +=== "Hoodi" ``` ./deposit existing-mnemonic \ --validator_start_index 1 \ --num_validators 1 \ - --chain holesky + --chain hoodi ``` You'll be prompted to enter your mnemonic and a new password for your keystore. diff --git a/docs/the_nimbus_book/src/options.md b/docs/the_nimbus_book/src/options.md index 670bf79557..e2e0711edc 100644 --- a/docs/the_nimbus_book/src/options.md +++ b/docs/the_nimbus_book/src/options.md @@ -116,7 +116,7 @@ The following options are available: keys of the validators what to sign and when) and load the validators in the beacon node itself [=true]. --discv5 Enable Discovery v5 [=true]. - --dump Write SSZ dumps of blocks, attestations and states to data dir [=false]. + --dump Write SSZ dumps of blocks and states to data dir [=false]. --direct-peer The list of privileged, secure and known peers to connect and maintain the connection to. This requires a not random netkey-file. In the multiaddress format like: /ip4/
/tcp//p2p/, or enr format diff --git a/docs/the_nimbus_book/src/pi-guide.md b/docs/the_nimbus_book/src/pi-guide.md index 0c754834c9..6c8ed708bc 100644 --- a/docs/the_nimbus_book/src/pi-guide.md +++ b/docs/the_nimbus_book/src/pi-guide.md @@ -357,7 +357,7 @@ Depending on your installation method, run these commands to import your signing ```sh # Run import command as the `nimbus` user - sudo -u nimbus /usr/bin/nimbus_beacon_node deposits import --data-dir=/var/lib/nimbus/shared_holesky_0 /path/to/keys + sudo -u nimbus /usr/bin/nimbus_beacon_node deposits import --data-dir=/var/lib/nimbus/shared_hoodi_0 /path/to/keys ``` === "Manual installation" @@ -365,7 +365,7 @@ Depending on your installation method, run these commands to import your signing To import your signing key into Nimbus, from the `nimbus-eth2` directory run. ```sh - build/nimbus_beacon_node deposits import --data-dir=build/data/shared_holesky_0 ../validator_keys + build/nimbus_beacon_node deposits import --data-dir=build/data/shared_hoodi_0 ../validator_keys ``` You'll be asked to enter the password you created to encrypt your keystore(s). @@ -373,26 +373,30 @@ Depending on your installation method, run these commands to import your signing Your validator client needs both your signing keystore(s) and the password encrypting it to import your [key](https://blog.ethereum.org/2020/05/21/keys/) (since it needs to decrypt the keystore in order to be able to use it to sign on your behalf). -### 15. Connect to Holesky +### 15. Connect to Hoodi -We're finally ready to connect to the Holesky testnet! +We're finally ready to connect to the Hoodi testnet! !!! note If you haven't already, we recommend registering for, and running, your own eth1 node in parallel. For instructions on how to do so, see the [eth1 page](./eth1.md). -To connect to Holesky, run: +To connect to Hoodi, run: === "Using package manager" ```sh - sudo -u nimbus /usr/bin/nimbus_beacon_node --network=holesky --data-dir=/var/lib/nimbus/shared_holesky_0 + sudo -u nimbus /usr/bin/nimbus_beacon_node \ + --network=hoodi \ + --data-dir=/var/lib/nimbus/shared_hoodi_0 ``` === "Manual installation" ```sh - ./run-holesky-beacon-node.sh + build/nimbus_beacon_node \ + --network=hoodi \ + --data-dir=build/data/shared_hoodi_0 ``` @@ -403,7 +407,7 @@ If you look near the top of the logs printed to your console, you should see con ``` INF 2023-10-01 11:25:33.487+01:00 Launching beacon node ... -INF 2023-10-01 11:25:34.556+01:00 Loading block dag from database topics="beacnde" tid=19985314 file=nimbus_beacon_node.nim:198 path=build/data/shared_holesky_0/db +INF 2023-10-01 11:25:34.556+01:00 Loading block dag from database topics="beacnde" tid=19985314 file=nimbus_beacon_node.nim:198 path=build/data/shared_hoodi_0/db INF 2023-10-01 11:25:35.921+01:00 Block dag initialized INF 2023-10-01 11:25:37.073+01:00 Generating new networking key ... @@ -426,7 +430,7 @@ peers: 15 ❯ finalized: ada7228a:8765 ❯ head: b2fe11cd:8767:2 ❯ time: 9900: Keep an eye on the number of peers you're currently connected to (in the above case that's `15`), as well as your [sync progress](./keep-an-eye.md#keep-track-of-your-syncing-progress). !!! note - 15 - 20 peers and an average sync speed of **0.5 - 1.0** blocks per second is normal on `Holesky` with a Pi. + 15 - 20 peers and an average sync speed of **0.5 - 1.0** blocks per second is normal on `Hoodi` with a Pi. If your sync speed is much slower than this, the root of the problem may be your USB3.0 to SSD adapter. See [this post](https://forums.raspberrypi.com/viewtopic.php?f=28&t=245931) for a recommended workaround. @@ -436,8 +440,8 @@ Keep an eye on the number of peers you're currently connected to (in the above c Whether or not your Pi is up to the task will depend on a number of factors such as SSD speed, network connectivity, etc. As such, it's best to verify performance on a testnet first. -The best thing you can do is to set your Pi to run Holesky. -If you have no trouble syncing and attesting on Holesky, your setup should good enough for mainnet as well. +The best thing you can do is to set your Pi to run Hoodi. +If you have no trouble syncing and attesting on Hoodi, your setup should good enough for mainnet as well. diff --git a/docs/the_nimbus_book/src/prater.md b/docs/the_nimbus_book/src/prater.md index 6f226ad09e..e49296e45d 100644 --- a/docs/the_nimbus_book/src/prater.md +++ b/docs/the_nimbus_book/src/prater.md @@ -1,3 +1,3 @@ This page has been removed. -Use the [Holešky testnet](./holesky.md). +Use the [Hoodi testnet](./hoodi.md). diff --git a/docs/the_nimbus_book/src/quick-start.md b/docs/the_nimbus_book/src/quick-start.md index e13d1cb41f..8f7bd3385c 100644 --- a/docs/the_nimbus_book/src/quick-start.md +++ b/docs/the_nimbus_book/src/quick-start.md @@ -13,8 +13,8 @@ Running a beacon node is a [worthwhile endeavor](https://vitalik.eth.limo/genera The guide assumes [Ubuntu Linux](https://ubuntu.com/download/server) is being used, and therefore some familiarity with [the Linux command line](https://ubuntu.com/tutorials/command-line-for-beginners) is needed. !!! tip - You can practice running the node safely on the [Holesky testnet](./holesky.md). - Throughout, we'll provide instructions for both Holesky and Mainnet. + You can practice running the node safely on the [Hoodi testnet](./hoodi.md). + Throughout, we'll provide instructions for both Hoodi and Mainnet. ## Steps @@ -65,17 +65,21 @@ cd nimbus-eth2 ``` === "Mainnet" - ```sh # Start a mainnet node - ./run-mainnet-beacon-node.sh --web3-url=http://127.0.0.1:8551 --jwt-secret=/tmp/jwtsecret + ./run-mainnet-beacon-node.sh \ + --web3-url=http://127.0.0.1:8551 \ + --jwt-secret=/tmp/jwtsecret ``` -=== "Holesky" - +=== "Hoodi" ```sh - # Start a holesky testnet node - ./run-holesky-beacon-node.sh --web3-url=http://127.0.0.1:8551 --jwt-secret=/tmp/jwtsecret + # Start a hoodi testnet node + build/nimbus_beacon_node \ + --network=hoodi \ + --data-dir=build/data/shared_hoodi_0 \ + --web3-url=http://127.0.0.1:8551 \ + --jwt-secret=/tmp/jwtsecret ``` Once the beacon node starts, you'll see it logging information to the console, like so: diff --git a/docs/the_nimbus_book/src/rest-api.md b/docs/the_nimbus_book/src/rest-api.md index 57381bfd43..4fe0f4dbef 100644 --- a/docs/the_nimbus_book/src/rest-api.md +++ b/docs/the_nimbus_book/src/rest-api.md @@ -15,6 +15,7 @@ If you wish to expose the beacon node to the public internet, it is recommended * `http://testing.mainnet.beacon-api.nimbus.team/` * `http://unstable.mainnet.beacon-api.nimbus.team/` +* `http://unstable.hoodi.beacon-api.nimbus.team/` * `http://unstable.holesky.beacon-api.nimbus.team/` * `http://unstable.sepolia.beacon-api.nimbus.team/` @@ -30,6 +31,11 @@ You can make requests as follows (here we are requesting the version the Nimbus curl -X GET http://unstable.mainnet.beacon-api.nimbus.team/eth/v1/node/version ``` +=== "Hoodi unstable branch" + ``` + curl -X GET http://unstable.hoodi.beacon-api.nimbus.team/eth/v1/node/version + ``` + === "Holesky unstable branch" ``` curl -X GET http://unstable.holesky.beacon-api.nimbus.team/eth/v1/node/version @@ -184,4 +190,3 @@ In addition to supporting the standard endpoints, Nimbus has a set of specific e - The complete API specification is well documented [here](https://ethereum.github.io/beacon-APIs/) - See the repository Readme [here](https://github.com/ethereum/beacon-APIs) - diff --git a/docs/the_nimbus_book/src/run-a-validator.md b/docs/the_nimbus_book/src/run-a-validator.md index d5081e8169..afa5f1fb30 100644 --- a/docs/the_nimbus_book/src/run-a-validator.md +++ b/docs/the_nimbus_book/src/run-a-validator.md @@ -29,10 +29,10 @@ To make a deposit, you will need to generate keys then submit a deposit transact The process of setting up a validator is also documented at the Ethereum launchpad site: * [Mainnet](https://launchpad.ethereum.org/) - * [Holesky EthStaker Launchpad](https://holesky.launchpad.ethstaker.cc/en/) or [Holesky EF Launchpad](https://holesky.launchpad.ethereum.org/) + * [Hoodi EthStaker Launchpad](https://hoodi.launchpad.ethstaker.cc/en/) or [Hoodi EF Launchpad](https://hoodi.launchpad.ethereum.org/) !!! tip - Before running your validator on Mainnet, you can (and should) verify that your setup works as expected by running it on the [Holesky testnet](./holesky.md). + Before running your validator on Mainnet, you can (and should) verify that your setup works as expected by running it on the [Hoodi testnet](./hoodi.md). ### 1. Download the deposit tool @@ -153,14 +153,6 @@ If your `validator_keys` folder is stored elsewhere, you can pass its location t Replacing `/path/to/keys` with the full pathname of where the `validator_keys` directory is found. -### Optimized import for a large number of validators - -If you plan to use a large number of validators (e.g. more than 100) on a single beacon node or a validator client, you might benefit from running the `deposits import` command with the option `--method=single-salt`. -This will force Nimbus to use the same password and random salt value when encrypting all of the imported keystores which will later enable it to load the large number of validator keys almost instantly. -The theoretical downside of using this approach is that it makes the brute-force cracking of all imported keystores computationally equivalent to cracking just one of them. -Nevertheless, the security parameters used by Ethereum are such that cracking even a single keystore is considered computationally infeasible with current hardware. - - ### Troubleshooting If you come across an error, make sure that: @@ -191,12 +183,18 @@ Press `Ctrl-c` to stop the beacon node if it's running, then use the same comman === "Mainnet" ```sh - ./run-mainnet-beacon-node.sh --web3-url=http://127.0.0.1:8551 --suggested-fee-recipient=0x... + ./run-mainnet-beacon-node.sh \ + --web3-url=http://127.0.0.1:8551 \ + --suggested-fee-recipient=0x... ``` === "Holesky" ```sh - ./run-holesky-beacon-node.sh --web3-url=http://127.0.0.1:8551 --suggested-fee-recipient=0x... + build/nimbus_beacon_node \ + --network=hoodi \ + --data-dir=build/data/shared_hoodi_0 \ + --web3-url=http://127.0.0.1:8551 \ + --suggested-fee-recipient=0x... ``` ### 3. Check the logs diff --git a/docs/the_nimbus_book/src/start-syncing.md b/docs/the_nimbus_book/src/start-syncing.md index 01d159793d..70d261685d 100644 --- a/docs/the_nimbus_book/src/start-syncing.md +++ b/docs/the_nimbus_book/src/start-syncing.md @@ -24,10 +24,12 @@ Later, when everything is working, you can easily switch to mainnet. === "Testnet" - To start syncing the `holesky` testnet from the `nimbus-eth2` repository, run: + To start syncing the `hoodi` testnet from the `nimbus-eth2` repository, run: ``` - ./run-holesky-beacon-node.sh + build/nimbus_beacon_node \ + --network=hoodi \ + --data-dir=build/data/shared_hoodi_0 ``` === "Mainnet" @@ -35,7 +37,7 @@ Later, when everything is working, you can easily switch to mainnet. To start syncing the Ethereum beacon chain mainnet, run: ``` - ./run-mainnet-beacon-node.sh + ./run-mainnet-beacon-node.sh ``` ## Log output @@ -45,7 +47,7 @@ You should see the following output: ``` INF 2023-10-01 11:25:33.487+01:00 Launching beacon node ... -INF 2023-10-01 11:25:34.556+01:00 Loading block dag from database topics="beacnde" tid=19985314 path=build/data/shared_holesky_0/db +INF 2023-10-01 11:25:34.556+01:00 Loading block dag from database topics="beacnde" tid=19985314 path=build/data/shared_hoodi_0/db INF 2023-10-01 11:25:35.921+01:00 Block dag initialized INF 2023-10-01 11:25:37.073+01:00 Generating new networking key ... @@ -68,7 +70,11 @@ You can add command line options to the startup command. For example, to change the port to 9100, use: ```sh -./run-holesky-beacon-node.sh --tcp-port=9100 --udp-port=9100 +build/nimbus_beacon_node \ + --network=hoodi \ + --data-dir=build/data/shared_hoodi_0 \ + --tcp-port=9100 \ + --udp-port=9100 ``` To see a list of the command line options available to you, with descriptions, run: @@ -101,11 +107,13 @@ You can use an existing synced node or a third-party service to accelerate sync To use checkpoint sync, run the following commands (inserting the checkpoint sync endpoint and your own trusted block root): -=== "Holesky" +=== "Hoodi" ```sh CHECKPOINT_SYNC_ENDPOINT=http://127.0.0.1:8551 TRUSTED_BLOCK_ROOT=0x1234567890123456789012345678901234567890123456789012345678901234 - ./run-holesky-beacon-node.sh \ + build/nimbus_beacon_node \ + --network=hoodi \ + --data-dir=build/data/shared_hoodi_0 \ --external-beacon-api-url=$CHECKPOINT_SYNC_ENDPOINT \ --trusted-block-root=$TRUSTED_BLOCK_ROOT ``` diff --git a/docs/the_nimbus_book/src/suggested-fee-recipient.md b/docs/the_nimbus_book/src/suggested-fee-recipient.md index 289eb3308e..c6c063f32b 100644 --- a/docs/the_nimbus_book/src/suggested-fee-recipient.md +++ b/docs/the_nimbus_book/src/suggested-fee-recipient.md @@ -34,17 +34,22 @@ Fee recipients are recorded publicly on-chain as part of proposed blocks, so sug === "Mainnet" ```sh - ./run-mainnet-beacon-node.sh --suggested-fee-recipient=0x70E47C843E0F6ab0991A3189c28F2957eb6d3842 + ./run-mainnet-beacon-node.sh \ + --suggested-fee-recipient=0x70E47C843E0F6ab0991A3189c28F2957eb6d3842 ``` -=== "Holesky" +=== "Hoodi" ```sh - ./run-holesky-beacon-node.sh --suggested-fee-recipient=0x70E47C843E0F6ab0991A3189c28F2957eb6d3842 + build/nimbus_beacon_node \ + --network=hoodi \ + --data-dir=build/data/shared_hoodi_0 \ + --suggested-fee-recipient=0x70E47C843E0F6ab0991A3189c28F2957eb6d3842 ``` === "Validator Client" ```sh - ./nimbus_validator_client --suggested-fee-recipient=0x70E47C843E0F6ab0991A3189c28F2957eb6d3842 + ./nimbus_validator_client \ + --suggested-fee-recipient=0x70E47C843E0F6ab0991A3189c28F2957eb6d3842 ``` ## Logs diff --git a/docs/the_nimbus_book/src/troubleshooting.md b/docs/the_nimbus_book/src/troubleshooting.md index 461ef7dffd..e955aefe58 100644 --- a/docs/the_nimbus_book/src/troubleshooting.md +++ b/docs/the_nimbus_book/src/troubleshooting.md @@ -2,7 +2,7 @@ !!! note The commands on this page refer to mainnet. - If you're running on `holesky` or another testnet, replace `mainnet` accordingly. + If you're running on `hoodi` or another testnet, replace `mainnet` accordingly. We are continuously making improvements to both stability and resource usage. If you run into any problem with Nimbus and are not running the latest version, chances are they have already been fixed. @@ -162,4 +162,3 @@ See our page on [monitoring the health of your node](./health.md) for more. We have seen reports of degraded performance when using several types of USB3.0 to SSD adapters or when using native USB3.0 disk drives. [This post on RPi forums](https://forums.raspberrypi.com/viewtopic.php?t=245931#p1501426 ) details why there is a difference in behaviour from models prior to Pi 4 and the recommended workaround. - diff --git a/docs/the_nimbus_book/src/trusted-node-sync.md b/docs/the_nimbus_book/src/trusted-node-sync.md index 27863bf7b9..ae7dc75e52 100644 --- a/docs/the_nimbus_book/src/trusted-node-sync.md +++ b/docs/the_nimbus_book/src/trusted-node-sync.md @@ -35,11 +35,11 @@ To start trusted node sync, run: --trusted-node-url=http://localhost:5052 ``` -=== "Holesky" +=== "Hoodi" ```sh build/nimbus_beacon_node trustedNodeSync \ - --network:holesky \ - --data-dir=build/data/shared_holesky_0 \ + --network:hoodi \ + --data-dir=build/data/shared_hoodi_0 \ --trusted-node-url=http://localhost:5052 ``` @@ -139,7 +139,7 @@ curl -o state.finalized.ssz \ # Start the beacon node using the downloaded state as starting point ./run-mainnet-beacon-node.sh \ - --finalized-checkpoint-state=state.finalized.ssz + --finalized-checkpoint-state=state.finalized.ssz ``` ## Recreate historical state access indices diff --git a/docs/the_nimbus_book/src/validator-client-options.md b/docs/the_nimbus_book/src/validator-client-options.md index 4c78677a5a..b0908662b2 100644 --- a/docs/the_nimbus_book/src/validator-client-options.md +++ b/docs/the_nimbus_book/src/validator-client-options.md @@ -1,8 +1,8 @@ # Validator client -In the most simple setup, a single beacon node paired with an execution client is all that is needed to run a successful validator setup. +In the simplest setup, a single beacon node paired with an execution client is all that is needed to run a successful validator setup. -Nimbus however also provides options for running advanded setups that provide additional security and redundancy. +Nimbus however also provides options for running advanced setups that provide additional security and redundancy. See the [validator client page](./validator-client.md) to get started! diff --git a/docs/the_nimbus_book/src/validator-client.md b/docs/the_nimbus_book/src/validator-client.md index 9699fe6c75..3af135e118 100644 --- a/docs/the_nimbus_book/src/validator-client.md +++ b/docs/the_nimbus_book/src/validator-client.md @@ -21,7 +21,7 @@ Next, choose a data directory for the validator client and import the keys there ```sh build/nimbus_beacon_node deposits import \ - --data-dir:build/data/vc_shared_holesky_0 "" + --data-dir:build/data/vc_shared_hoodi_0 "" ``` !!! warning @@ -35,7 +35,7 @@ With the keys imported, you are ready to start validator client: ```sh build/nimbus_validator_client \ - --data-dir:build/data/vc_shared_holesky_0 + --data-dir:build/data/vc_shared_hoodi_0 ``` # Options diff --git a/ncli/ncli.nim b/ncli/ncli.nim index b38d668c01..a13dcf1263 100644 --- a/ncli/ncli.nim +++ b/ncli/ncli.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2020-2024 Status Research & Development GmbH +# Copyright (c) 2020-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -80,15 +80,15 @@ type of slots: preState2* {. argument - desc: "State to which to apply specified block"}: string + desc: "State to which to apply specified empty slots"}: string slot* {. argument - desc: "Block to apply to preState"}: uint64 + desc: "Empty slots to apply to preState"}: uint64 postState2* {. argument - desc: "Filename of state resulting from applying blck to preState"}: string + desc: "Filename of state resulting from empty slots to preState"}: string template saveSSZFile(filename: string, value: ForkedHashedBeaconState) = try: diff --git a/ncli/ncli_db.nim b/ncli/ncli_db.nim index 2f67345b04..8a6a8e2a6f 100644 --- a/ncli/ncli_db.nim +++ b/ncli/ncli_db.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2020-2024 Status Research & Development GmbH +# Copyright (c) 2020-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -8,7 +8,7 @@ {.push raises: [].} import - std/[os, stats, tables], + std/tables, snappy, chronicles, confutils, stew/[byteutils, io2], eth/db/kvstore_sqlite3, ../beacon_chain/networking/network_metadata, @@ -21,6 +21,9 @@ import ../research/simutils, ./era, ./ncli_common, ./validator_db_aggregator +from std/os import createDir, dirExists, moveFile, `/` +from std/stats import RunningStat + when defined(posix): import system/ansi_c @@ -447,19 +450,14 @@ proc cmdDumpBlock(conf: DbConf) = if shouldShutDown: quit QuitSuccess try: let root = Eth2Digest.fromHex(blockRoot) - if (let blck = db.getBlock( - root, phase0.TrustedSignedBeaconBlock); blck.isSome): - dump("./", blck.get()) - elif (let blck = db.getBlock( - root, altair.TrustedSignedBeaconBlock); blck.isSome): - dump("./", blck.get()) - elif (let blck = db.getBlock(root, bellatrix.TrustedSignedBeaconBlock); blck.isSome): - dump("./", blck.get()) - elif (let blck = db.getBlock(root, capella.TrustedSignedBeaconBlock); blck.isSome): - dump("./", blck.get()) - elif (let blck = db.getBlock(root, deneb.TrustedSignedBeaconBlock); blck.isSome): - dump("./", blck.get()) - else: + var found = false + withAll(ConsensusFork): + if not found: + let blck = db.getBlock(root, consensusFork.TrustedSignedBeaconBlock) + if blck.isSome: + found = true + dump("./", blck.get()) + if not found: echo "Couldn't load ", blockRoot except CatchableError as e: echo "Couldn't load ", blockRoot, ": ", e.msg @@ -641,7 +639,7 @@ proc cmdExportEra(conf: DbConf, cfg: RuntimeConfig) = if firstSlot.isSome(): withTimer(timers[tBlocks]): var blocks: array[SLOTS_PER_HISTORICAL_ROOT.int, BlockId] - for i in dag.getBlockRange(firstSlot.get(), 1, blocks).. conf.remoteSignersUrls.len.uint32: - error "The specified treshold must be lower or equal to the number of signers" + error "The specified threshold must be lower or equal to the number of signers" quit 1 let rng = HmacDrbgContext.new() @@ -102,4 +109,4 @@ proc main = error "Failed to generate distributed keystore", err = status.error quit 1 -main() +main() \ No newline at end of file diff --git a/ncli/ncli_testnet.nim b/ncli/ncli_testnet.nim index 36ab4df559..464ced1041 100644 --- a/ncli/ncli_testnet.nim +++ b/ncli/ncli_testnet.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -258,12 +258,12 @@ func `as`(blk: BlockObject, T: type bellatrix.ExecutionPayloadHeader): T = state_root: blk.stateRoot as Eth2Digest, receipts_root: blk.receiptsRoot as Eth2Digest, logs_bloom: BloomLogs(data: distinctBase(blk.logsBloom)), - prev_randao: Eth2Digest(data: blk.difficulty.toByteArrayBE), # Is BE correct here? + prev_randao: Eth2Digest(data: blk.difficulty.toBytesBE), # Is BE correct here? block_number: uint64 blk.number, gas_limit: uint64 blk.gasLimit, gas_used: uint64 blk.gasUsed, timestamp: uint64 blk.timestamp, - extra_data: List[byte, MAX_EXTRA_DATA_BYTES].init(blk.extraData.bytes), + extra_data: List[byte, MAX_EXTRA_DATA_BYTES].init(blk.extraData.data), base_fee_per_gas: blk.baseFeePerGas.getOrDefault(), block_hash: blk.hash as Eth2Digest, transactions_root: blk.transactionsRoot as Eth2Digest) @@ -274,12 +274,12 @@ func `as`(blk: BlockObject, T: type capella.ExecutionPayloadHeader): T = state_root: blk.stateRoot as Eth2Digest, receipts_root: blk.receiptsRoot as Eth2Digest, logs_bloom: BloomLogs(data: distinctBase(blk.logsBloom)), - prev_randao: Eth2Digest(data: blk.difficulty.toByteArrayBE), + prev_randao: Eth2Digest(data: blk.difficulty.toBytesBE), block_number: uint64 blk.number, gas_limit: uint64 blk.gasLimit, gas_used: uint64 blk.gasUsed, timestamp: uint64 blk.timestamp, - extra_data: List[byte, MAX_EXTRA_DATA_BYTES].init(blk.extraData.bytes), + extra_data: List[byte, MAX_EXTRA_DATA_BYTES].init(blk.extraData.data), base_fee_per_gas: blk.baseFeePerGas.getOrDefault(), block_hash: blk.hash as Eth2Digest, transactions_root: blk.transactionsRoot as Eth2Digest, @@ -291,12 +291,12 @@ func `as`(blk: BlockObject, T: type deneb.ExecutionPayloadHeader): T = state_root: blk.stateRoot as Eth2Digest, receipts_root: blk.receiptsRoot as Eth2Digest, logs_bloom: BloomLogs(data: distinctBase(blk.logsBloom)), - prev_randao: Eth2Digest(data: blk.difficulty.toByteArrayBE), + prev_randao: Eth2Digest(data: blk.difficulty.toBytesBE), block_number: uint64 blk.number, gas_limit: uint64 blk.gasLimit, gas_used: uint64 blk.gasUsed, timestamp: uint64 blk.timestamp, - extra_data: List[byte, MAX_EXTRA_DATA_BYTES].init(blk.extraData.bytes), + extra_data: List[byte, MAX_EXTRA_DATA_BYTES].init(blk.extraData.data), base_fee_per_gas: blk.baseFeePerGas.getOrDefault(), block_hash: blk.hash as Eth2Digest, transactions_root: blk.transactionsRoot as Eth2Digest, @@ -310,12 +310,12 @@ func `as`(blk: BlockObject, T: type electra.ExecutionPayloadHeader): T = state_root: blk.stateRoot as Eth2Digest, receipts_root: blk.receiptsRoot as Eth2Digest, logs_bloom: BloomLogs(data: distinctBase(blk.logsBloom)), - prev_randao: Eth2Digest(data: blk.difficulty.toByteArrayBE), + prev_randao: Eth2Digest(data: blk.difficulty.toBytesBE), block_number: uint64 blk.number, gas_limit: uint64 blk.gasLimit, gas_used: uint64 blk.gasUsed, timestamp: uint64 blk.timestamp, - extra_data: List[byte, MAX_EXTRA_DATA_BYTES].init(blk.extraData.bytes), + extra_data: List[byte, MAX_EXTRA_DATA_BYTES].init(blk.extraData.data), base_fee_per_gas: blk.baseFeePerGas.getOrDefault(), block_hash: blk.hash as Eth2Digest, transactions_root: blk.transactionsRoot as Eth2Digest, @@ -562,7 +562,7 @@ when isMainModule: let r = await web3.send(tr) result = await web3.getMinedTransactionReceipt(r) - proc sendEth(web3: Web3, to: Eth1Address, valueEth: int): Future[TxHash] = + proc sendEth(web3: Web3, to: Eth1Address, valueEth: int): Future[Hash32] = let tr = TransactionArgs( `from`: Opt.some web3.defaultAccount, # TODO: Force json-rpc to generate 'data' field @@ -727,4 +727,4 @@ when isMainModule: # This is handled above before the case statement discard - waitFor main() + waitFor main() \ No newline at end of file diff --git a/ncli/requirements.txt b/ncli/requirements.txt index 9e488cb90e..63a0aedc6c 100644 --- a/ncli/requirements.txt +++ b/ncli/requirements.txt @@ -22,7 +22,7 @@ ipython==8.10.0 ipython-genutils==0.2.0 ipywidgets==7.6.5 jedi==0.18.1 -Jinja2==3.1.4 +Jinja2==3.1.6 jsonschema==4.4.0 jupyter==1.0.0 jupyter-autotime==1.1.0 @@ -79,7 +79,7 @@ stack-data==0.1.4 terminado==0.12.1 testpath==0.5.0 tomli==1.2.3 -tornado==6.4.1 +tornado==6.4.2 traitlets==5.1.1 typing_extensions==4.0.1 wcwidth==0.2.5 diff --git a/ncli/resttest-rules.json b/ncli/resttest-rules.json index 12f8683150..3f26bc8be4 100644 --- a/ncli/resttest-rules.json +++ b/ncli/resttest-rules.json @@ -2623,6 +2623,30 @@ "body": [{"operator": "jstructcmpns", "value": {"code": 400, "message": ""}}] } }, + { + "topics": ["beacon", "states_pending_deposits"], + "request": { + "url": "/eth/v1/beacon/states/head/pending_deposits", + "headers": {"Accept": "application/json"} + }, + "response": { + "status": {"operator": "equals", "value": "400"}, + "headers": [{"key": "Content-Type", "value": "application/json", "operator": "equals"}], + "body": [{"operator": "jstructcmpns", "value": {"code": 400, "message": ""}}] + } + }, + { + "topics": ["beacon", "states_pending_partial_withdrawals"], + "request": { + "url": "/eth/v1/beacon/states/head/pending_partial_withdrawals", + "headers": {"Accept": "application/json"} + }, + "response": { + "status": {"operator": "equals", "value": "400"}, + "headers": [{"key": "Content-Type", "value": "application/json", "operator": "equals"}], + "body": [{"operator": "jstructcmpns", "value": {"code": 400, "message": ""}}] + } + }, { "topics": ["beacon", "beacon_headers"], "request": { @@ -4180,7 +4204,7 @@ "response": { "status": {"operator": "equals", "value": "200"}, "headers": [{"key": "Content-Type", "value": "application/json", "operator": "equals"}], - "body": [{"operator": "jstructcmps", "start": ["data"], "value": {"MAX_COMMITTEES_PER_SLOT":"","TARGET_COMMITTEE_SIZE":"","MAX_VALIDATORS_PER_COMMITTEE":"","SHUFFLE_ROUND_COUNT":"","HYSTERESIS_QUOTIENT":"","HYSTERESIS_DOWNWARD_MULTIPLIER":"","HYSTERESIS_UPWARD_MULTIPLIER":"","MIN_DEPOSIT_AMOUNT":"","MAX_EFFECTIVE_BALANCE":"","MAX_EFFECTIVE_BALANCE_ELECTRA":"","EFFECTIVE_BALANCE_INCREMENT":"","MIN_ATTESTATION_INCLUSION_DELAY":"","SLOTS_PER_EPOCH":"","MIN_SEED_LOOKAHEAD":"","MAX_SEED_LOOKAHEAD":"","EPOCHS_PER_ETH1_VOTING_PERIOD":"","SLOTS_PER_HISTORICAL_ROOT":"","MIN_EPOCHS_TO_INACTIVITY_PENALTY":"","EPOCHS_PER_HISTORICAL_VECTOR":"","EPOCHS_PER_SLASHINGS_VECTOR":"","HISTORICAL_ROOTS_LIMIT":"","VALIDATOR_REGISTRY_LIMIT":"","BASE_REWARD_FACTOR":"","WHISTLEBLOWER_REWARD_QUOTIENT":"","PROPOSER_REWARD_QUOTIENT":"","INACTIVITY_PENALTY_QUOTIENT":"","MIN_SLASHING_PENALTY_QUOTIENT":"","PROPORTIONAL_SLASHING_MULTIPLIER":"","MAX_PROPOSER_SLASHINGS":"","MAX_ATTESTER_SLASHINGS":"","MAX_ATTESTATIONS":"","MAX_DEPOSITS":"","MAX_VOLUNTARY_EXITS":"","INACTIVITY_PENALTY_QUOTIENT_ALTAIR":"","MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR":"","PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR":"","SYNC_COMMITTEE_SIZE":"","EPOCHS_PER_SYNC_COMMITTEE_PERIOD":"","MIN_SYNC_COMMITTEE_PARTICIPANTS":"","UPDATE_TIMEOUT":"","INACTIVITY_PENALTY_QUOTIENT_BELLATRIX":"","MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX":"","PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX":"","MAX_BYTES_PER_TRANSACTION":"","MAX_TRANSACTIONS_PER_PAYLOAD":"","BYTES_PER_LOGS_BLOOM":"","MAX_EXTRA_DATA_BYTES":"","MAX_BLS_TO_EXECUTION_CHANGES":"","MAX_WITHDRAWALS_PER_PAYLOAD":"","MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP":"","PRESET_BASE":"","CONFIG_NAME":"","TERMINAL_TOTAL_DIFFICULTY":"","TERMINAL_BLOCK_HASH":"","TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH":"","MIN_GENESIS_ACTIVE_VALIDATOR_COUNT":"","MIN_GENESIS_TIME":"","GENESIS_FORK_VERSION":"","GENESIS_DELAY":"","ALTAIR_FORK_VERSION":"","ALTAIR_FORK_EPOCH":"","BELLATRIX_FORK_VERSION":"","BELLATRIX_FORK_EPOCH":"","CAPELLA_FORK_VERSION":"","CAPELLA_FORK_EPOCH":"","DENEB_FORK_VERSION":"","DENEB_FORK_EPOCH":"","ELECTRA_FORK_VERSION":"","ELECTRA_FORK_EPOCH":"","SECONDS_PER_SLOT":"","SECONDS_PER_ETH1_BLOCK":"","MIN_VALIDATOR_WITHDRAWABILITY_DELAY":"","FIELD_ELEMENTS_PER_BLOB":"","MAX_BLOB_COMMITMENTS_PER_BLOCK":"","MAX_BLOBS_PER_BLOCK":"","KZG_COMMITMENT_INCLUSION_PROOF_DEPTH":"","SHARD_COMMITTEE_PERIOD":"","ETH1_FOLLOW_DISTANCE":"","INACTIVITY_SCORE_BIAS":"","INACTIVITY_SCORE_RECOVERY_RATE":"","EJECTION_BALANCE":"","MIN_PER_EPOCH_CHURN_LIMIT":"","CHURN_LIMIT_QUOTIENT":"","MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT":"","PROPOSER_SCORE_BOOST":"","REORG_HEAD_WEIGHT_THRESHOLD":"","REORG_PARENT_WEIGHT_THRESHOLD":"","REORG_MAX_EPOCHS_SINCE_FINALIZATION":"","DEPOSIT_CHAIN_ID":"","DEPOSIT_NETWORK_ID":"","DEPOSIT_CONTRACT_ADDRESS":"","GOSSIP_MAX_SIZE":"","MAX_REQUEST_BLOCKS":"","EPOCHS_PER_SUBNET_SUBSCRIPTION":"","MIN_EPOCHS_FOR_BLOCK_REQUESTS":"","MAX_CHUNK_SIZE":"","TTFB_TIMEOUT":"","RESP_TIMEOUT":"","ATTESTATION_PROPAGATION_SLOT_RANGE":"","MAXIMUM_GOSSIP_CLOCK_DISPARITY":"","MESSAGE_DOMAIN_INVALID_SNAPPY":"","MESSAGE_DOMAIN_VALID_SNAPPY":"","SUBNETS_PER_NODE":"","ATTESTATION_SUBNET_COUNT":"","ATTESTATION_SUBNET_EXTRA_BITS":"","ATTESTATION_SUBNET_PREFIX_BITS":"","MAX_REQUEST_BLOCKS_DENEB":"","MAX_REQUEST_BLOB_SIDECARS":"","MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS":"","BLOB_SIDECAR_SUBNET_COUNT":"","BLS_WITHDRAWAL_PREFIX":"","ETH1_ADDRESS_WITHDRAWAL_PREFIX":"","DOMAIN_BEACON_PROPOSER":"","DOMAIN_BEACON_ATTESTER":"","DOMAIN_RANDAO":"","DOMAIN_DEPOSIT":"","DOMAIN_VOLUNTARY_EXIT":"","DOMAIN_SELECTION_PROOF":"","DOMAIN_AGGREGATE_AND_PROOF":"","TIMELY_SOURCE_FLAG_INDEX":"","TIMELY_TARGET_FLAG_INDEX":"","TIMELY_HEAD_FLAG_INDEX":"","TIMELY_SOURCE_WEIGHT":"","TIMELY_TARGET_WEIGHT":"","TIMELY_HEAD_WEIGHT":"","SYNC_REWARD_WEIGHT":"","PROPOSER_WEIGHT":"","WEIGHT_DENOMINATOR":"","DOMAIN_SYNC_COMMITTEE":"","DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF":"","DOMAIN_CONTRIBUTION_AND_PROOF":"","DOMAIN_BLS_TO_EXECUTION_CHANGE":"","TARGET_AGGREGATORS_PER_COMMITTEE":"","TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE":"","SYNC_COMMITTEE_SUBNET_COUNT":""}}] + "body": [{"operator": "jstructcmps", "start": ["data"], "value": {"MAX_COMMITTEES_PER_SLOT":"","TARGET_COMMITTEE_SIZE":"","MAX_VALIDATORS_PER_COMMITTEE":"","SHUFFLE_ROUND_COUNT":"","HYSTERESIS_QUOTIENT":"","HYSTERESIS_DOWNWARD_MULTIPLIER":"","HYSTERESIS_UPWARD_MULTIPLIER":"","MIN_DEPOSIT_AMOUNT":"","MAX_EFFECTIVE_BALANCE":"","EFFECTIVE_BALANCE_INCREMENT":"","MIN_ATTESTATION_INCLUSION_DELAY":"","SLOTS_PER_EPOCH":"","MIN_SEED_LOOKAHEAD":"","MAX_SEED_LOOKAHEAD":"","EPOCHS_PER_ETH1_VOTING_PERIOD":"","SLOTS_PER_HISTORICAL_ROOT":"","MIN_EPOCHS_TO_INACTIVITY_PENALTY":"","EPOCHS_PER_HISTORICAL_VECTOR":"","EPOCHS_PER_SLASHINGS_VECTOR":"","HISTORICAL_ROOTS_LIMIT":"","VALIDATOR_REGISTRY_LIMIT":"","BASE_REWARD_FACTOR":"","WHISTLEBLOWER_REWARD_QUOTIENT":"","PROPOSER_REWARD_QUOTIENT":"","INACTIVITY_PENALTY_QUOTIENT":"","MIN_SLASHING_PENALTY_QUOTIENT":"","PROPORTIONAL_SLASHING_MULTIPLIER":"","MAX_PROPOSER_SLASHINGS":"","MAX_ATTESTER_SLASHINGS":"","MAX_ATTESTATIONS":"","MAX_DEPOSITS":"","MAX_VOLUNTARY_EXITS":"","INACTIVITY_PENALTY_QUOTIENT_ALTAIR":"","MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR":"","PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR":"","SYNC_COMMITTEE_SIZE":"","EPOCHS_PER_SYNC_COMMITTEE_PERIOD":"","MIN_SYNC_COMMITTEE_PARTICIPANTS":"","UPDATE_TIMEOUT":"","INACTIVITY_PENALTY_QUOTIENT_BELLATRIX":"","MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX":"","PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX":"","MAX_BYTES_PER_TRANSACTION":"","MAX_TRANSACTIONS_PER_PAYLOAD":"","BYTES_PER_LOGS_BLOOM":"","MAX_EXTRA_DATA_BYTES":"","MAX_BLS_TO_EXECUTION_CHANGES":"","MAX_WITHDRAWALS_PER_PAYLOAD":"","MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP":"","FIELD_ELEMENTS_PER_BLOB":"","MAX_BLOB_COMMITMENTS_PER_BLOCK":"","KZG_COMMITMENT_INCLUSION_PROOF_DEPTH":"","PRESET_BASE":"","CONFIG_NAME":"","TERMINAL_TOTAL_DIFFICULTY":"","TERMINAL_BLOCK_HASH":"","TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH":"","MIN_GENESIS_ACTIVE_VALIDATOR_COUNT":"","MIN_GENESIS_TIME":"","GENESIS_FORK_VERSION":"","GENESIS_DELAY":"","ALTAIR_FORK_VERSION":"","ALTAIR_FORK_EPOCH":"","BELLATRIX_FORK_VERSION":"","BELLATRIX_FORK_EPOCH":"","CAPELLA_FORK_VERSION":"","CAPELLA_FORK_EPOCH":"","DENEB_FORK_VERSION":"","DENEB_FORK_EPOCH":"","ELECTRA_FORK_VERSION":"","ELECTRA_FORK_EPOCH":"","FULU_FORK_VERSION":"","FULU_FORK_EPOCH":"","SECONDS_PER_SLOT":"","SECONDS_PER_ETH1_BLOCK":"","MIN_VALIDATOR_WITHDRAWABILITY_DELAY":"","SHARD_COMMITTEE_PERIOD":"","ETH1_FOLLOW_DISTANCE":"","INACTIVITY_SCORE_BIAS":"","INACTIVITY_SCORE_RECOVERY_RATE":"","EJECTION_BALANCE":"","MIN_PER_EPOCH_CHURN_LIMIT":"","CHURN_LIMIT_QUOTIENT":"","MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT":"","PROPOSER_SCORE_BOOST":"","REORG_HEAD_WEIGHT_THRESHOLD":"","REORG_PARENT_WEIGHT_THRESHOLD":"","REORG_MAX_EPOCHS_SINCE_FINALIZATION":"","DEPOSIT_CHAIN_ID":"","DEPOSIT_NETWORK_ID":"","DEPOSIT_CONTRACT_ADDRESS":"","MAX_PAYLOAD_SIZE":"","MAX_REQUEST_BLOCKS":"","EPOCHS_PER_SUBNET_SUBSCRIPTION":"","MIN_EPOCHS_FOR_BLOCK_REQUESTS":"","TTFB_TIMEOUT":"","RESP_TIMEOUT":"","ATTESTATION_PROPAGATION_SLOT_RANGE":"","MAXIMUM_GOSSIP_CLOCK_DISPARITY":"","MESSAGE_DOMAIN_INVALID_SNAPPY":"","MESSAGE_DOMAIN_VALID_SNAPPY":"","SUBNETS_PER_NODE":"","ATTESTATION_SUBNET_COUNT":"","ATTESTATION_SUBNET_EXTRA_BITS":"","ATTESTATION_SUBNET_PREFIX_BITS":"","MAX_REQUEST_BLOCKS_DENEB":"","MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS":"","BLOB_SIDECAR_SUBNET_COUNT":"","MAX_BLOBS_PER_BLOCK":"","MAX_REQUEST_BLOB_SIDECARS":"","MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA":"","MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT":"","BLOB_SIDECAR_SUBNET_COUNT_ELECTRA":"","MAX_BLOBS_PER_BLOCK_ELECTRA":"","MAX_REQUEST_BLOB_SIDECARS_ELECTRA":"","NUMBER_OF_COLUMNS":"","NUMBER_OF_CUSTODY_GROUPS":"","DATA_COLUMN_SIDECAR_SUBNET_COUNT":"","MAX_REQUEST_DATA_COLUMN_SIDECARS":"","SAMPLES_PER_SLOT":"","CUSTODY_REQUIREMENT":"","VALIDATOR_CUSTODY_REQUIREMENT":"","BALANCE_PER_ADDITIONAL_CUSTODY_GROUP":"","BLS_WITHDRAWAL_PREFIX":"","ETH1_ADDRESS_WITHDRAWAL_PREFIX":"","DOMAIN_BEACON_PROPOSER":"","DOMAIN_BEACON_ATTESTER":"","DOMAIN_RANDAO":"","DOMAIN_DEPOSIT":"","DOMAIN_VOLUNTARY_EXIT":"","DOMAIN_SELECTION_PROOF":"","DOMAIN_AGGREGATE_AND_PROOF":"","TIMELY_SOURCE_FLAG_INDEX":"","TIMELY_TARGET_FLAG_INDEX":"","TIMELY_HEAD_FLAG_INDEX":"","TIMELY_SOURCE_WEIGHT":"","TIMELY_TARGET_WEIGHT":"","TIMELY_HEAD_WEIGHT":"","SYNC_REWARD_WEIGHT":"","PROPOSER_WEIGHT":"","WEIGHT_DENOMINATOR":"","DOMAIN_SYNC_COMMITTEE":"","DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF":"","DOMAIN_CONTRIBUTION_AND_PROOF":"","DOMAIN_BLS_TO_EXECUTION_CHANGE":"","TARGET_AGGREGATORS_PER_COMMITTEE":"","TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE":"","SYNC_COMMITTEE_SUBNET_COUNT":"","UNSET_DEPOSIT_REQUESTS_START_INDEX":"","FULL_EXIT_REQUEST_AMOUNT":"","COMPOUNDING_WITHDRAWAL_PREFIX":"","DEPOSIT_REQUEST_TYPE":"","WITHDRAWAL_REQUEST_TYPE":"","CONSOLIDATION_REQUEST_TYPE":"","MIN_ACTIVATION_BALANCE":"","MAX_EFFECTIVE_BALANCE_ELECTRA":"","MIN_SLASHING_PENALTY_QUOTIENT_ELECTRA":"","WHISTLEBLOWER_REWARD_QUOTIENT_ELECTRA":"","PENDING_DEPOSITS_LIMIT":"","PENDING_PARTIAL_WITHDRAWALS_LIMIT":"","PENDING_CONSOLIDATIONS_LIMIT":"","MAX_ATTESTER_SLASHINGS_ELECTRA":"","MAX_ATTESTATIONS_ELECTRA":"","MAX_DEPOSIT_REQUESTS_PER_PAYLOAD":"","MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD":"","MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD":"","MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP":"","MAX_PENDING_DEPOSITS_PER_EPOCH":""}}] } }, { @@ -4241,7 +4265,7 @@ "response": { "status": {"operator": "equals", "value": "200"}, "headers": [{"key": "Content-Type", "value": "application/json", "operator": "equals"}], - "body": [{"operator": "jstructcmps", "start": ["data"], "value": {"peer_id": "", "enr": "", "p2p_addresses": [""], "discovery_addresses": [""], "metadata": {"seq_number": "", "attnets": "", "syncnets": ""}}}] + "body": [{"operator": "jstructcmps", "start": ["data"], "value": {"peer_id": "", "enr": "", "p2p_addresses": [""], "discovery_addresses": [""], "metadata": {"seq_number": "", "attnets": "", "syncnets": "", "custody_group_count": ""}}}] } }, { @@ -4526,7 +4550,7 @@ "headers": {"Accept": "application/json"} }, "response": { - "status": {"operator": "equals", "value": "400"} + "status": {"operator": "equals", "value": "410"} } }, { @@ -4536,7 +4560,7 @@ "headers": {"Accept": "application/json"} }, "response": { - "status": {"operator": "equals", "value": "400"} + "status": {"operator": "equals", "value": "410"} } }, { @@ -5000,6 +5024,31 @@ "headers": [{"key": "Content-Type", "value": "application/json", "operator": "equals"}] } }, + { + "topics": ["validator", "register_validators"], + "request": { + "url": "/eth/v1/validator/register_validator", + "method": "POST", + "headers": {"Accept": "application/json"}, + "body": {"content-type": "application/octet-stream", "data": "64000000000000000000000000000000000000000000000000000000b5b5b76700000000a81da27d35ce91ddd3129042e47d0bf91a0ff60ed415ed25cda46b42c8d9cb91eb727966e5054828417bda9a1e5e8ba79043ce8b1542c118846537dc33ec2bfe8fa63858d8104b94b7673dd1390f05ac8ff949ba709cfde534857ddfbf757faa0af4a9e1e3743add7c51ca36223a580d2fa25b4dc46623e57f8d75fc647e6966c4828ee90c8eaf4f1babaa4b2f40d82bc8000000000000000000000000000000000000000000000000000000b5b5b76700000000886979da71af92933ebb6055a8c4a43a0c5e4ab645f9272ab6d441a5c2645c3278893439890eeda72f5580f05ae0898b8ba37e74091eb05ab1f66bf02f03ab7bfe0ee8f471ff8de8e10ef71164cef57a2ea694cde397d678a2ca18994cb2e154170e2a71a40fca2505c1e375c2ec05f9bc0a4f987968822177fe8d6326890362e0e55d18d0cf30b171ae44f969dbdf33"} + }, + "response": { + "status": {"operator": "equals", "value": "200"} + } + }, + { + "topics": ["validator", "register_validators"], + "request": { + "url": "/eth/v1/validator/register_validator", + "method": "POST", + "headers": {"Accept": "application/json"}, + "body": {"content-type": "application/octet-stream", "data": "64000000000000000000000000000000000000000000000000000000b5b5b76700000000a81da27d35ce91ddd3129042e47d0bf91a0ff60ed415ed25cda46b42c8d9cb91eb727966e5054828417bda9a1e5e8ba79043ce8b1542c118846537dc33ec2bfe8fa63858d8104b94b7673dd1390f05ac8ff949ba709cfde534857ddfbf757faa0af4a9e1e3743add7c51ca36223a580d2fa25b4dc46623e57f8d75fc647e6966c4828ee90c8eaf4f1babaa4b2f40d82bc8000000000000000000000000000000000000000000000000000000b5b5b76700000000886979da71af92933ebb6055a8c4a43a0c5e4ab645f9272ab6d441a5c2645c3278893439890eeda72f5580f05ae0898b8ba37e74091eb05ab1f66bf02f03ab7bfe0ee8f471ff8de8e10ef71164cef57a2ea694cde397d678a2ca18994cb2e154170e2a71a40fca2505c1e375c2ec05f9bc0a4f987968822177fe8d6326890362e0e55d18d0cf30b171ae44f969dbdf"} + }, + "response": { + "status": {"operator": "equals", "value": "400"}, + "headers": [{"key": "Content-Type", "value": "application/json", "operator": "equals"}] + } + }, { "topics": ["key_management", "list_keys"], "request": { diff --git a/ncli/resttest.nim b/ncli/resttest.nim index 3193d5f5d8..8b1b6192cf 100644 --- a/ncli/resttest.nim +++ b/ncli/resttest.nim @@ -1,14 +1,16 @@ # beacon_chain -# Copyright (c) 2021-2024 Status Research & Development GmbH +# Copyright (c) 2021-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. +{.push raises: [].} + import std/[strutils, os, options, uri, json, tables], results, - stew/[io2, base10], + stew/[io2, base10, byteutils], confutils, chronicles, httputils, chronos, chronos/streams/[asyncstream, tlsstream] @@ -453,7 +455,15 @@ proc prepareRequest(uri: Uri, return err("Field `body.data` must be present") if bdata.kind != JString: return err("Field `body.data` should be string") - (btype.str, bdata.str) + if toLowerAscii(btype.str) == "application/octet-stream": + let data = + try: + string.fromBytes(hexToSeqByte(bdata.str)) + except ValueError: + return err("Field `body.data` should be valid hexadecimal string") + (btype.str, data) + else: + (btype.str, bdata.str) var res = meth & " " & uri.path & requestUri & " HTTP/1.1\r\n" res.add("Content-Length: " & Base10.toString(uint64(len(requestBodyData))) & @@ -598,7 +608,7 @@ proc getResponseHeadersExpect(rule: JsonNode): Result[HeadersExpect, cstring] = block: var vres: seq[string] let jvalue = jitem.getOrDefault("value") - if not isnil(jvalue): + if not isNil(jvalue): case jvalue.kind of JArray: if len(jvalue.elems) == 0: @@ -1031,7 +1041,7 @@ proc workerLoop(address: TransportAddress, uri: Uri, worker: int, worker = worker return except CatchableError as exc: - warn "Unexpected exception while running test test run", host = hostname, + warn "Unexpected exception while running test run", host = hostname, error_name = exc.name, error_msg = exc.msg, index = index, worker = worker return @@ -1156,6 +1166,9 @@ proc run(conf: RestTesterConf): int = waitFor(checkConnection(conf, uri)) except ConnectionError: return 1 + except CatchableError as exc: + fatal "Unexpected test failure", error_name = exc.name, error_msg = exc.msg + return 1 try: return waitFor(startTests(conf, uri, jnodes)) diff --git a/nfuzz/libnfuzz.nim b/nfuzz/libnfuzz.nim index 1fdd4e4c30..1259057319 100644 --- a/nfuzz/libnfuzz.nim +++ b/nfuzz/libnfuzz.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2019-2024 Status Research & Development GmbH +# Copyright (c) 2019-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -167,7 +167,7 @@ proc nfuzz_voluntary_exit(input: openArray[byte], xoutput: ptr byte, # Note: Could also accept raw input pointer and access list_size + seed here. # However, list_size needs to be known also outside this proc to allocate xoutput. -# TODO: rework to copy immediatly in an uint8 openArray, considering we have to +# TODO: rework to copy immediately in an uint8 openArray, considering we have to # go over the list anyhow? func nfuzz_shuffle(input_seed: ptr byte, xoutput: var openArray[uint64]): bool {.exportc, raises: [].} = diff --git a/nimble.lock b/nimble.lock deleted file mode 100644 index 5033fa9a5b..0000000000 --- a/nimble.lock +++ /dev/null @@ -1,554 +0,0 @@ -{ - "version": 1, - "packages": { - "stew": { - "version": "0.1.0", - "vcsRevision": "bb705bf17b46d2c8f9bfb106d9cc7437009a2501", - "url": "https://github.com/status-im/nim-stew", - "downloadMethod": "git", - "dependencies": [], - "checksums": { - "sha1": "adac4b56a0dc35b1f5fc8fb66e8bb182100c857a" - } - }, - "unittest2": { - "version": "0.0.3", - "vcsRevision": "24eb5e829ed07e71e3a5d09786d5f80aa988874f", - "url": "https://github.com/status-im/nim-unittest2.git", - "downloadMethod": "git", - "dependencies": [], - "checksums": { - "sha1": "0140709df36bca378cfa8678e04082f9a1fccabf" - } - }, - "httputils": { - "version": "0.3.0", - "vcsRevision": "40048e8b3e69284bdb5d4daa0a16ad93402c55db", - "url": "https://github.com/status-im/nim-http-utils", - "downloadMethod": "git", - "dependencies": [ - "stew", - "unittest2" - ], - "checksums": { - "sha1": "7a3ddd118b1f5eecd413a2effcc289cd19cb812e" - } - }, - "nimcrypto": { - "version": "0.5.4", - "vcsRevision": "a5742a9a214ac33f91615f3862c7b099aec43b00", - "url": "https://github.com/cheatfate/nimcrypto", - "downloadMethod": "git", - "dependencies": [], - "checksums": { - "sha1": "f76c87707cd4e96355b8bb6ef27e7f8b0aac1e08" - } - }, - "taskpools": { - "version": "0.0.3", - "vcsRevision": "8d408ac6cfc9c24ec8b7b65d5993e85050dcbaa9", - "url": "https://github.com/status-im/nim-taskpools.git", - "downloadMethod": "git", - "dependencies": [], - "checksums": { - "sha1": "37bbbbb03d9b893af6980592624211ab057392c0" - } - }, - "blscurve": { - "version": "0.0.1", - "vcsRevision": "0237e4e0e914fc19359c18a66406d33bc942775c", - "url": "https://github.com/status-im/nim-blscurve", - "downloadMethod": "git", - "dependencies": [ - "nimcrypto", - "stew", - "taskpools" - ], - "checksums": { - "sha1": "65f58854ffd2098e0d0ca08f6ea0efb3c27529e0" - } - }, - "stint": { - "version": "0.0.1", - "vcsRevision": "ddfa6c608a6c2a843d7b405f377a22703947267a", - "url": "https://github.com/status-im/nim-stint", - "downloadMethod": "git", - "dependencies": [ - "stew" - ], - "checksums": { - "sha1": "b08fe1fc190d9e07016cd4047d872836264adebe" - } - }, - "bearssl": { - "version": "0.1.5", - "vcsRevision": "ba80e2a0d7ae8aab666cee013e38ff8d33a3e5e7", - "url": "https://github.com/status-im/nim-bearssl", - "downloadMethod": "git", - "dependencies": [ - "unittest2" - ], - "checksums": { - "sha1": "383abd5becc77bf8e365b780a29d20529e1d9c4c" - } - }, - "chronos": { - "version": "3.0.11", - "vcsRevision": "87197230779002a2bfa8642f0e2ae07e2349e304", - "url": "https://github.com/status-im/nim-chronos", - "downloadMethod": "git", - "dependencies": [ - "stew", - "bearssl", - "httputils", - "unittest2" - ], - "checksums": { - "sha1": "badd688fa1121e2685e7927e0f2f4fe62a0ed62e" - } - }, - "testutils": { - "version": "0.4.2", - "vcsRevision": "aa6e5216f4b4ab5aa971cdcdd70e1ec1203cedf2", - "url": "https://github.com/status-im/nim-testutils", - "downloadMethod": "git", - "dependencies": [ - "unittest2" - ], - "checksums": { - "sha1": "94427e0cce0e0c5841edcd3a6530b4e6b857a3cb" - } - }, - "faststreams": { - "version": "0.3.0", - "vcsRevision": "37a183153c071539ab870f427c09a1376ba311b9", - "url": "https://github.com/status-im/nim-faststreams", - "downloadMethod": "git", - "dependencies": [ - "stew", - "testutils", - "chronos", - "unittest2" - ], - "checksums": { - "sha1": "5f7dbee99012f64e5d2365337e0a1fa056af63a0" - } - }, - "serialization": { - "version": "0.1.0", - "vcsRevision": "37bc0db558d85711967acb16e9bb822b06911d46", - "url": "https://github.com/status-im/nim-serialization", - "downloadMethod": "git", - "dependencies": [ - "faststreams", - "unittest2", - "stew" - ], - "checksums": { - "sha1": "5420a09b1d955e77971389852951892f42150241" - } - }, - "json_serialization": { - "version": "0.1.0", - "vcsRevision": "ea965bbe00c4ebc6d4d23bb3dee501a7474c0014", - "url": "https://github.com/status-im/nim-json-serialization", - "downloadMethod": "git", - "dependencies": [ - "serialization", - "stew" - ], - "checksums": { - "sha1": "adcecf3fa6cc3a1ba02f710314cd683de36d3880" - } - }, - "chronicles": { - "version": "0.10.2", - "vcsRevision": "1682096306ddba8185dcfac360a8c3f952d721e4", - "url": "https://github.com/status-im/nim-chronicles", - "downloadMethod": "git", - "dependencies": [ - "testutils", - "json_serialization" - ], - "checksums": { - "sha1": "9a5bebb76b0f7d587a31e621d260119279e91c76" - } - }, - "news": { - "version": "0.5", - "vcsRevision": "9094ae314754908838979fe0840b9b33c0e0a603", - "url": "https://github.com/status-im/news", - "downloadMethod": "git", - "dependencies": [], - "checksums": { - "sha1": "db9d3265b01c039bb9c7e7d304c23c744344c507" - } - }, - "asynctest": { - "version": "0.3.0", - "vcsRevision": "3882ed64ed3159578f796bc5ae0c6b13837fe798", - "url": "https://github.com/markspanbroek/asynctest", - "downloadMethod": "git", - "dependencies": [], - "checksums": { - "sha1": "7a3182c135b291e8e486d83accfc6d960cf75191" - } - }, - "zlib": { - "version": "0.1.0", - "vcsRevision": "74cdeb54b21bededb5a515d36f608bc1850555a2", - "url": "https://github.com/status-im/nim-zlib", - "downloadMethod": "git", - "dependencies": [ - "stew" - ], - "checksums": { - "sha1": "01d330dc4c1924e56b1559ee73bc760e526f635c" - } - }, - "websock": { - "version": "0.1.0", - "vcsRevision": "73edde4417f7b45003113b7a34212c3ccd95b9fd", - "url": "https://github.com/status-im/nim-websock", - "downloadMethod": "git", - "dependencies": [ - "chronos", - "httputils", - "chronicles", - "stew", - "asynctest", - "nimcrypto", - "bearssl", - "zlib" - ], - "checksums": { - "sha1": "ec2b137543f280298ca48de9ed4461a033ba88d3" - } - }, - "json_rpc": { - "version": "0.0.2", - "vcsRevision": "733a05b00c01f90e805225a8c65396fbf0374403", - "url": "https://github.com/status-im/nim-json-rpc", - "downloadMethod": "git", - "dependencies": [ - "stew", - "nimcrypto", - "stint", - "chronos", - "httputils", - "chronicles", - "news", - "websock", - "json_serialization" - ], - "checksums": { - "sha1": "1b6f2cfe9b1012be712de3d7daed8a11d4364d19" - } - }, - "snappy": { - "version": "0.1.0", - "vcsRevision": "3d39a6228af6204af21ad6eaa693f1661716ae2a", - "url": "https://github.com/status-im/nim-snappy", - "downloadMethod": "git", - "dependencies": [ - "faststreams", - "stew" - ], - "checksums": { - "sha1": "ce2e3f10151630d72fac12b927f0f1c238f651e2" - } - }, - "websocket": { - "version": "0.5.0", - "vcsRevision": "28cc44c8defc0b248b3abbc8205759b69a98f7f6", - "url": "https://github.com/niv/websocket.nim", - "downloadMethod": "git", - "dependencies": [], - "checksums": { - "sha1": "cd7f7d3ec4d800f5c920c769ecafc38837319e57" - } - }, - "unicodedb": { - "version": "0.10.0", - "vcsRevision": "675407fa4b6e701ebf9f94e5370ce18c3af40cc3", - "url": "https://github.com/nitely/nim-unicodedb", - "downloadMethod": "git", - "dependencies": [], - "checksums": { - "sha1": "49a66e3391badf050aab2c7bddd60ddac00a2290" - } - }, - "tempfile": { - "version": "0.1.7", - "vcsRevision": "26e0239441755e5edcfd170e9aa566bb9c9eb6f3", - "url": "https://github.com/OpenSystemsLab/tempfile.nim", - "downloadMethod": "git", - "dependencies": [], - "checksums": { - "sha1": "d7c473819bbf0e38cad32e00598bd42366606e08" - } - }, - "ws": { - "version": "0.5.0", - "vcsRevision": "9536bf99ddf5948db221ccb7bb3663aa238a8e21", - "url": "https://github.com/treeform/ws", - "downloadMethod": "git", - "dependencies": [], - "checksums": { - "sha1": "ae4daf4ae302d0431f3c2d385ae9d2fe767a3246" - } - }, - "nat_traversal": { - "version": "0.0.1", - "vcsRevision": "8994b67b07813955c61bebddf4bd2325439c3535", - "url": "https://github.com/status-im/nim-nat-traversal", - "downloadMethod": "git", - "dependencies": [ - "stew" - ], - "checksums": { - "sha1": "74ceb2eb41dca5252d04d3dd8369b7eab517281b" - } - }, - "libbacktrace": { - "version": "0.0.8", - "vcsRevision": "ce966b1c469dda179b54346feaaf1a62202c984f", - "url": "https://github.com/status-im/nim-libbacktrace", - "downloadMethod": "git", - "dependencies": [], - "checksums": { - "sha1": "ba7a2f3d21db894ace7bb4ebe0a5b06af995d68b" - } - }, - "dnsclient": { - "version": "0.1.0", - "vcsRevision": "647ed10deca7758a147c7f0af90d5addfe514c0f", - "url": "https://github.com/ba0f3/dnsclient.nim", - "downloadMethod": "git", - "dependencies": [], - "checksums": { - "sha1": "ddf1f02944db2802fbb9dfe8b11085681b3b2a46" - } - }, - "metrics": { - "version": "0.0.1", - "vcsRevision": "858f73b7d3ae992333a7ffab35da87e3b7b81356", - "url": "https://github.com/status-im/nim-metrics", - "downloadMethod": "git", - "dependencies": [ - "chronos" - ], - "checksums": { - "sha1": "fb332b70d824bf3f714bfdc06e37371c52b59ff5" - } - }, - "secp256k1": { - "version": "0.5.2", - "vcsRevision": "e092373a5cbe1fa25abfc62e0f2a5f138dc3fb13", - "url": "https://github.com/status-im/nim-secp256k1", - "downloadMethod": "git", - "dependencies": [ - "stew", - "nimcrypto" - ], - "checksums": { - "sha1": "75aafb13984bb6006e3c2a1e74be19ff40a08b0f" - } - }, - "libp2p": { - "version": "0.0.2", - "vcsRevision": "58f383e661521314df314e7096c24db5a7490372", - "url": "https://github.com/status-im/nim-libp2p", - "downloadMethod": "git", - "dependencies": [ - "nimcrypto", - "dnsclient", - "bearssl", - "chronicles", - "chronos", - "metrics", - "secp256k1", - "stew", - "websock" - ], - "checksums": { - "sha1": "58b1e544b91c886a4633946e9685d043417eaf16" - } - }, - "asynctools": { - "version": "0.1.1", - "vcsRevision": "84ced6d002789567f2396c75800ffd6dff2866f7", - "url": "https://github.com/cheatfate/asynctools", - "downloadMethod": "git", - "dependencies": [], - "checksums": { - "sha1": "7b83755829059030a15a9c5ad39cf118366c56d9" - } - }, - "rocksdb": { - "version": "0.3.1", - "vcsRevision": "c565aa88b963ae1e6ca8bf1296d2ff9ce2847295", - "url": "https://github.com/status-im/nim-rocksdb", - "downloadMethod": "git", - "dependencies": [ - "stew", - "tempfile" - ], - "checksums": { - "sha1": "eca1de31383f69137e4830cd89199d55a85e3d1a" - } - }, - "sqlite3_abi": { - "version": "3.34.0", - "vcsRevision": "07039dd887c4e5b57367a16f4be3c18763be1d7b", - "url": "https://github.com/arnetheduck/nim-sqlite3-abi", - "downloadMethod": "git", - "dependencies": [], - "checksums": { - "sha1": "5a08191466f05542e49d36d2672ad14f5d1c0a6a" - } - }, - "confutils": { - "version": "0.1.0", - "vcsRevision": "0435e67832b6bb8dfdf0ddb102903e9d820206d2", - "url": "https://github.com/status-im/nim-confutils", - "downloadMethod": "git", - "dependencies": [ - "stew" - ], - "checksums": { - "sha1": "1edab14b434aca6ae28e2385982fa60d623c600a" - } - }, - "eth": { - "version": "1.0.0", - "vcsRevision": "779d767b024175a51cf74c79ec7513301ebe2f46", - "url": "https://github.com/status-im/nim-eth", - "downloadMethod": "git", - "dependencies": [ - "nimcrypto", - "stint", - "secp256k1", - "rocksdb", - "chronos", - "chronicles", - "stew", - "nat_traversal", - "metrics", - "sqlite3_abi", - "confutils", - "testutils", - "unittest2" - ], - "checksums": { - "sha1": "4a9a2140196046c7cbaa539d119099c355567c88" - } - }, - "zxcvbn": { - "version": "0.1.0", - "vcsRevision": "4794baca09cd971d9723e0371cb3de27a5f3722b", - "url": "https://github.com/status-im/nim-zxcvbn", - "downloadMethod": "git", - "dependencies": [ - "testutils" - ], - "checksums": { - "sha1": "b8e5b0dd6db2b9d69604c1afd484aea1fd0b22d6" - } - }, - "nimbus_security_resources": { - "version": "0.1.0", - "vcsRevision": "577fbc483f9ffeb715731c9f105f5ac81b3f703a", - "url": "ssh://git@gitlab.com/metacraft-labs/nimbus/nimbus-security-resources.git", - "downloadMethod": "git", - "dependencies": [], - "checksums": { - "sha1": "6a754ed71c7e8d3ce8f0a91a5d46b46dda7bec09" - } - }, - "presto": { - "version": "0.0.4", - "vcsRevision": "1dba6dd6f466cd4e7b793b0e473c237ce453d82a", - "url": "https://github.com/status-im/nim-presto", - "downloadMethod": "git", - "dependencies": [ - "chronos", - "chronicles", - "stew" - ], - "checksums": { - "sha1": "0d94d20991fe38ae252d2e1feb81044caa7e6f38" - } - }, - "ssz_serialization": { - "version": "0.1.0", - "vcsRevision": "cd500484e054ead951f2d07aeb81c1c8c695db26", - "url": "https://github.com/status-im/nim-ssz-serialization", - "downloadMethod": "git", - "dependencies": [ - "serialization", - "json_serialization", - "stew", - "stint", - "nimcrypto", - "blscurve", - "unittest2" - ], - "checksums": { - "sha1": "f7f024ee895d2212ba71b5ca757e899a62e4b774" - } - }, - "web3": { - "version": "0.0.1", - "vcsRevision": "0012deda7965d8a3f464c29763ef26623b6776f8", - "url": "https://github.com/status-im/nim-web3", - "downloadMethod": "git", - "dependencies": [ - "chronicles", - "chronos", - "eth", - "faststreams", - "json_rpc", - "json_serialization", - "nimcrypto", - "stew", - "stint" - ], - "checksums": { - "sha1": "e89378aacfd71b950b0fc9556a3936fcb56982dc" - } - }, - "dotenv": { - "version": "2.0.1", - "vcsRevision": "4b8613cb4c46331729e88f594c5ae2b727381a57", - "url": "https://github.com/euantorano/dotenv.nim", - "downloadMethod": "git", - "dependencies": [], - "checksums": { - "sha1": "9a9e39544f129e6666935796610da79fbe724510" - } - }, - "yaml": { - "version": "0.14.0", - "vcsRevision": "27642432ca66c97b07730b5a84e9565f5b69be13", - "url": "https://github.com/status-im/NimYAML", - "downloadMethod": "git", - "dependencies": [], - "checksums": { - "sha1": "2ba8d30ede073325688ae899b972974ed6d9341b" - } - }, - "normalize": { - "version": "0.8.0", - "vcsRevision": "30948551be113d809b28bf6131c925caff3af515", - "url": "https://github.com/nitely/nim-normalize", - "downloadMethod": "git", - "dependencies": [ - "unicodedb" - ], - "checksums": { - "sha1": "9ad415f090f6e8e40cbbcc72efd5a925ac1aa725" - } - } - } -} diff --git a/nix/README.md b/nix/README.md index 17825a941c..2f063599c3 100644 --- a/nix/README.md +++ b/nix/README.md @@ -19,11 +19,12 @@ https://github.com/NixOS/nix/issues/4423 It can be also done without even cloning the repo: ```sh -nix build 'github:status-im/nimbus-eth2?submodules=1' +nix build 'github:status-im/nimbus-eth2?submodules=1#' ``` +The trailing `#` is required due to [URI parsing bug in Nix](https://github.com/NixOS/nix/issues/6633). ## Running ```sh -nix run 'github:status-im/nimbus-eth2?submodules=1' +nix run 'github:status-im/nimbus-eth2?submodules=1#' ``` diff --git a/nix/default.nix b/nix/default.nix index a84a538e2b..ee9ffeed75 100644 --- a/nix/default.nix +++ b/nix/default.nix @@ -5,7 +5,7 @@ # Options: nimbus_light_client, nimbus_validator_client, nimbus_signing_node, all targets ? ["nimbus_beacon_node"], # Options: 0,1,2 - verbosity ? 0, + verbosity ? 1, # Perform 2-stage bootstrap instead of 3-stage to save time. quickAndDirty ? true, # These are the only platforms tested in CI and considered stable. @@ -16,6 +16,10 @@ ], }: +# The 'or' is to handle src fallback to ../. which lack submodules attribue. +assert pkgs.lib.assertMsg ((src.submodules or true) == true) + "Unable to build without submodules. Append '?submodules=1#' to the URI."; + let inherit (pkgs) stdenv lib writeScriptBin callPackage; @@ -31,7 +35,7 @@ in stdenv.mkDerivation rec { fakeGit = writeScriptBin "git" "echo ${version}"; fakeLsbRelease = writeScriptBin "lsb_release" "echo nix"; in - with pkgs; [ fakeGit fakeLsbRelease which cmake ] + with pkgs; [ fakeGit fakeLsbRelease which ] ++ lib.optionals stdenv.isDarwin [ pkgs.darwin.cctools ]; enableParallelBuilding = true; @@ -59,11 +63,7 @@ in stdenv.mkDerivation rec { pushd vendor/nimbus-build-system/vendor/Nim mkdir dist cp -r ${callPackage ./nimble.nix {}} dist/nimble - chmod 777 -R dist/nimble - mkdir -p dist/nimble/dist - cp -r ${callPackage ./sat.nix {}} dist/nimble/dist/sat - cp -r ${callPackage ./checksums.nix {}} dist/checksums # need both - cp -r ${callPackage ./checksums.nix {}} dist/nimble/dist/checksums + cp -r ${callPackage ./checksums.nix {}} dist/checksums cp -r ${callPackage ./csources.nix {}} csources_v2 chmod 777 -R dist/nimble csources_v2 popd diff --git a/nix/nimble.nix b/nix/nimble.nix index 5343aaa813..39c5e0fff7 100644 --- a/nix/nimble.nix +++ b/nix/nimble.nix @@ -6,7 +6,8 @@ let in pkgs.fetchFromGitHub { owner = "nim-lang"; repo = "nimble"; + fetchSubmodules = true; rev = tools.findKeyValue "^ +NimbleStableCommit = \"([a-f0-9]+)\".+" sourceFile; # WARNING: Requires manual updates when Nim compiler version changes. - hash = "sha256-sa0irAZjQRZLduEMBPf7sHlY1FigBJTR/vIH4ihii/w="; + hash = "sha256-Rz48sGUKZEAp+UySla+MlsOfsERekuGKw69Tm11fDz8="; } diff --git a/research/block_sim.nim b/research/block_sim.nim index af9ae5b7e6..0f5af2e552 100644 --- a/research/block_sim.nim +++ b/research/block_sim.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2019-2024 Status Research & Development GmbH +# Copyright (c) 2019-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -16,12 +16,10 @@ import confutils, chronicles, eth/db/kvstore_sqlite3, - chronos/timer, taskpools, + chronos, chronos/timer, taskpools, ../tests/testblockutil, - ../beacon_chain/el/eth1_chain, ../beacon_chain/spec/[forks, state_transition], ../beacon_chain/beacon_chain_db, - ../beacon_chain/validators/validator_pool, ../beacon_chain/gossip_processing/[batch_validation, gossip_validation], ../beacon_chain/consensus_object_pools/[blockchain_dag, block_clearance], ./simutils @@ -29,13 +27,16 @@ import from std/random import Rand, gauss, initRand, rand from std/stats import RunningStat from ../beacon_chain/consensus_object_pools/attestation_pool import - AttestationPool, addAttestation, addForkChoice, getAttestationsForBlock, + AttestationPool, addAttestation, addForkChoice, getElectraAttestationsForBlock, init, prune from ../beacon_chain/consensus_object_pools/block_quarantine import Quarantine, init from ../beacon_chain/consensus_object_pools/sync_committee_msg_pool import SyncCommitteeMsgPool, addContribution, addSyncCommitteeMessage, init, produceContribution, produceSyncAggregate, pruneData +from ../beacon_chain/el/eth1_chain import + Eth1Block, Eth1BlockNumber, Eth1BlockTimestamp, Eth1Chain, addBlock, + getBlockProposalData, init from ../beacon_chain/spec/beaconstate import get_beacon_committee, get_beacon_proposer_index, get_committee_count_per_slot, get_committee_indices @@ -51,55 +52,6 @@ type Timers = enum tSyncCommittees = "Produce sync committee actions" tReplay = "Replay all produced blocks" -# TODO The rest of nimbus-eth2 uses only the forked version of these, and in -# general it's better for the validator_duties caller to use the forkedstate -# version, so isolate these here pending refactoring of block_sim to prefer, -# when possible, to also use the forked version. It'll be worth keeping some -# example of the non-forked version because it enables fork bootstrapping. -proc makeSimulationBlock( - cfg: RuntimeConfig, - state: var deneb.HashedBeaconState, - proposer_index: ValidatorIndex, - randao_reveal: ValidatorSig, - eth1_data: Eth1Data, - graffiti: GraffitiBytes, - attestations: seq[phase0.Attestation], - deposits: seq[Deposit], - exits: BeaconBlockValidatorChanges, - sync_aggregate: SyncAggregate, - execution_payload: deneb.ExecutionPayloadForSigning, - bls_to_execution_changes: SignedBLSToExecutionChangeList, - rollback: RollbackHashedProc[deneb.HashedBeaconState], - cache: var StateCache, - # TODO: - # `verificationFlags` is needed only in tests and can be - # removed if we don't use invalid signatures there - verificationFlags: UpdateFlags = {}): Result[deneb.BeaconBlock, cstring] = - ## Create a block for the given state. The latest block applied to it will - ## be used for the parent_root value, and the slot will be take from - ## state.slot meaning process_slots must be called up to the slot for which - ## the block is to be created. - - # To create a block, we'll first apply a partial block to the state, skipping - # some validations. - - var blck = partialBeaconBlock( - cfg, state, proposer_index, randao_reveal, eth1_data, graffiti, - attestations, deposits, exits, sync_aggregate, execution_payload, - default(ExecutionRequests)) - - let res = process_block( - cfg, state.data, blck.asSigVerified(), verificationFlags, cache) - - if res.isErr: - rollback(state) - return err(res.error()) - - state.root = hash_tree_root(state.data) - blck.state_root = state.root - - ok(blck) - proc makeSimulationBlock( cfg: RuntimeConfig, state: var electra.HashedBeaconState, @@ -268,31 +220,24 @@ cli do(slots = SLOTS_PER_EPOCH * 7, sig.toValidatorSig()).expect("valid data") attPool.addAttestation( - attestation, [validator_index], sig, data.slot.start_beacon_time) + attestation, [validator_index], attestation.aggregation_bits.len, + -1, sig, data.slot.start_beacon_time) else: - var - data = makeAttestationData( - updatedState, slot, committee_index, bid.root) - committee_bits: BitArray[static(MAX_COMMITTEES_PER_SLOT.int)] - aggregation_bits = ElectraCommitteeValidatorsBits.init(committee.len) - let committeeidx = data.index - aggregation_bits.setBit(index_in_committee) - committee_bits.setBit(committeeidx) - data.index = 0 # obviously, fix in makeAttestationData for Electra + var data = makeAttestationData( + updatedState, slot, committee_index, bid.root) + data.index = 0 # fix in makeAttestationData for Electra let - sig = - get_attestation_signature( - fork, genesis_validators_root, data, - MockPrivKeys[validator_index]) - attestation = electra.Attestation( - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/electra/validator.md#construct-attestation - aggregation_bits: aggregation_bits, - data: data, - committee_bits: committee_bits, + sig = get_attestation_signature( + fork, genesis_validators_root, data, + MockPrivKeys[validator_index]) + attestation = SingleAttestation( + committee_index: committee_index.distinctBase, + attester_index: validator_index.uint64, data: data, signature: sig.toValidatorSig()) attPool.addAttestation( - attestation, [validator_index], sig, data.slot.start_beacon_time) + attestation, [validator_index], committee.len, + index_in_committee, sig, data.slot.start_beacon_time) do: raiseAssert "withUpdatedState failed" @@ -409,9 +354,7 @@ cli do(slots = SLOTS_PER_EPOCH * 7, sync_aggregate = syncCommitteePool[].produceSyncAggregate(dag.head.bid, slot) hashedState = - when T is deneb.SignedBeaconBlock: - addr state.denebData - elif T is electra.SignedBeaconBlock: + when T is electra.SignedBeaconBlock: addr state.electraData elif T is fulu.SignedBeaconBlock: addr state.fuluData @@ -427,12 +370,7 @@ cli do(slots = SLOTS_PER_EPOCH * 7, slot.epoch, privKey).toValidatorSig(), eth1ProposalData.vote, default(GraffitiBytes), - when T is electra.SignedBeaconBlock: - attPool.getElectraAttestationsForBlock(state, cache) - elif T is fulu.SignedBeaconBlock: - attPool.getElectraAttestationsForBlock(state, cache) - else: - attPool.getAttestationsForBlock(state, cache), + attPool.getElectraAttestationsForBlock(state, cache), eth1ProposalData.deposits, BeaconBlockValidatorChanges(), sync_aggregate, @@ -440,8 +378,6 @@ cli do(slots = SLOTS_PER_EPOCH * 7, default(electra.ExecutionPayloadForSigning) elif T is fulu.SignedBeaconBlock: default(fulu.ExecutionPayloadForSigning) - elif T is deneb.SignedBeaconBlock: - default(deneb.ExecutionPayloadForSigning) else: static: doAssert false), static(default(SignedBLSToExecutionChangeList)), @@ -467,28 +403,6 @@ cli do(slots = SLOTS_PER_EPOCH * 7, # HTTP server's state function, combine all proposeForkBlock functions into a # single generic function. Until https://github.com/nim-lang/Nim/issues/20811 # is fixed, that generic function must take `blockRatio` as a parameter. - proc proposeDenebBlock(slot: Slot) = - if rand(r, 1.0) > blockRatio: - return - - dag.withUpdatedState(tmpState[], dag.getBlockIdAtSlot(slot).expect("block")) do: - let - newBlock = getNewBlock[deneb.SignedBeaconBlock](updatedState, slot, cache) - added = dag.addHeadBlock(verifier, newBlock) do ( - blckRef: BlockRef, signedBlock: deneb.TrustedSignedBeaconBlock, - epochRef: EpochRef, unrealized: FinalityCheckpoints): - # Callback add to fork choice if valid - attPool.addForkChoice( - epochRef, blckRef, unrealized, signedBlock.message, - blckRef.slot.start_beacon_time) - - dag.updateHead(added[], quarantine[], []) - if dag.needStateCachesAndForkChoicePruning(): - dag.pruneStateCachesDAG() - attPool.prune() - do: - raiseAssert "withUpdatedState failed" - proc proposeElectraBlock(slot: Slot) = if rand(r, 1.0) > blockRatio: return @@ -573,10 +487,9 @@ cli do(slots = SLOTS_PER_EPOCH * 7, if blockRatio > 0.0: withTimer(timers[t]): case dag.cfg.consensusForkAtEpoch(slot.epoch) - of ConsensusFork.Fulu: proposeFuluBlock(slot) - of ConsensusFork.Electra: proposeElectraBlock(slot) - of ConsensusFork.Deneb: proposeDenebBlock(slot) - of ConsensusFork.Phase0 .. ConsensusFork.Capella: + of ConsensusFork.Fulu: proposeFuluBlock(slot) + of ConsensusFork.Electra: proposeElectraBlock(slot) + of ConsensusFork.Phase0 .. ConsensusFork.Deneb: doAssert false if attesterRatio > 0.0: withTimer(timers[tAttest]): @@ -609,4 +522,4 @@ cli do(slots = SLOTS_PER_EPOCH * 7, echo "Done!" - printTimers(dag.headState, attesters, true, timers) + printTimers(dag.headState, attesters, true, timers) \ No newline at end of file diff --git a/research/fakeee.nim b/research/fakeee.nim index ab2cddab3a..d6280a07ec 100644 --- a/research/fakeee.nim +++ b/research/fakeee.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2022-2024 Status Research & Development GmbH +# Copyright (c) 2022-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -17,7 +17,7 @@ import chronicles proc setupEngineAPI*(server: RpcServer) = - # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.3/src/engine/paris.md#engine_newpayloadv1 + # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.4/src/engine/paris.md#engine_newpayloadv1 # cannot use `params` as param name. see https:#github.com/status-im/nim-json-rpc/issues/128 server.rpc("engine_newPayloadV1") do(payload: ExecutionPayloadV1) -> PayloadStatusV1: info "engine_newPayloadV1", @@ -27,7 +27,7 @@ proc setupEngineAPI*(server: RpcServer) = status: PayloadExecutionStatus.syncing, ) - # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.3/src/engine/shanghai.md#engine_newpayloadv2 + # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.4/src/engine/shanghai.md#engine_newpayloadv2 server.rpc("engine_newPayloadV2") do(payload: ExecutionPayloadV2) -> PayloadStatusV1: info "engine_newPayloadV2", payload @@ -35,7 +35,7 @@ proc setupEngineAPI*(server: RpcServer) = status: PayloadExecutionStatus.syncing, ) - # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.3/src/engine/paris.md#engine_getpayloadv1 + # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.4/src/engine/paris.md#engine_getpayloadv1 server.rpc("engine_getPayloadV1") do(payloadId: PayloadID) -> ExecutionPayloadV1: info "engine_getPayloadV1", id = payloadId.toHex @@ -45,16 +45,7 @@ proc setupEngineAPI*(server: RpcServer) = msg: "Unknown payload" ) - # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.3/src/engine/paris.md#engine_exchangetransitionconfigurationv1 - server.rpc("engine_exchangeTransitionConfigurationV1") do(conf: TransitionConfigurationV1) -> TransitionConfigurationV1: - info "engine_exchangeTransitionConfigurationV1", - ttd = conf.terminalTotalDifficulty, - number = uint64(conf.terminalBlockNumber), - blockHash = conf.terminalBlockHash - - return conf - - # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.3/src/engine/paris.md#engine_forkchoiceupdatedv1 + # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.4/src/engine/paris.md#engine_forkchoiceupdatedv1 server.rpc("engine_forkchoiceUpdatedV1") do( update: ForkchoiceStateV1, payloadAttributes: Opt[PayloadAttributesV1]) -> ForkchoiceUpdatedResponse: @@ -66,7 +57,7 @@ proc setupEngineAPI*(server: RpcServer) = payloadStatus: PayloadStatusV1( status: PayloadExecutionStatus.syncing)) - # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.3/src/engine/shanghai.md#engine_forkchoiceupdatedv2 + # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.4/src/engine/shanghai.md#engine_forkchoiceupdatedv2 server.rpc("engine_forkchoiceUpdatedV2") do( forkchoiceState: ForkchoiceStateV1, payloadAttributes: Opt[PayloadAttributesV2]) -> ForkchoiceUpdatedResponse: info "engine_forkchoiceUpdatedV2", @@ -91,10 +82,10 @@ proc setupEngineAPI*(server: RpcServer) = return BlockObject(number: 1000.Quantity) - server.rpc("eth_chainId") do() -> Quantity: + server.rpc("eth_chainId") do() -> UInt256: info "eth_chainId" - return 1.Quantity + return 1.u256 when isMainModule: let server = newRpcHttpServer( diff --git a/research/simutils.nim b/research/simutils.nim index ed268559a0..fd85ae6a32 100644 --- a/research/simutils.nim +++ b/research/simutils.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2020-2024 Status Research & Development GmbH +# Copyright (c) 2020-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -61,7 +61,8 @@ func getSimulationConfig*(): RuntimeConfig {.compileTime.} = cfg.BELLATRIX_FORK_EPOCH = 0.Epoch cfg.CAPELLA_FORK_EPOCH = 0.Epoch cfg.DENEB_FORK_EPOCH = 0.Epoch - cfg.ELECTRA_FORK_EPOCH = 3.Epoch + cfg.ELECTRA_FORK_EPOCH = 0.Epoch + cfg.FULU_FORK_EPOCH = 3.Epoch cfg proc loadGenesis*( @@ -139,15 +140,15 @@ proc loadGenesis*( depositContractState: merkleizer.toDepositContractState) let res = (ref ForkedHashedBeaconState)( - kind: ConsensusFork.Deneb, - denebData: deneb.HashedBeaconState( + kind: ConsensusFork.Electra, + electraData: electra.HashedBeaconState( data: initialize_beacon_state_from_eth1( cfg, ZERO_HASH, 0, deposits, - default(deneb.ExecutionPayloadHeader), {skipBlsValidation}))) + default(electra.ExecutionPayloadHeader), {skipBlsValidation}))) info "Saving genesis file", fileName = genesisFn try: - SSZ.saveFile(genesisFn, res.denebData.data) + SSZ.saveFile(genesisFn, res.electraData.data) except IOError as exc: fatal "Genesis file failed to save", fileName = genesisFn, exc = exc.msg diff --git a/scripts/clean-devnet-dir.sh b/scripts/clean-devnet-dir.sh deleted file mode 100755 index 3dfbf9f8a1..0000000000 --- a/scripts/clean-devnet-dir.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/env bash - -if [ -z "$1" ]; then - echo "Usage: run-devnet-el-cl-pair.sh " - exit 1 -fi - -if [ ! -d "$1" ]; then - echo "Please supply a valid network metadata directory" - exit 1 -fi - -set -Eeu - -NETWORK=$(cd "$1"; pwd) - -cd $(dirname "$0") - -source ./repo_paths.sh -rm -rf "$(data_dir_for_network)" diff --git a/scripts/execution_genesis.json.template b/scripts/execution_genesis.json.template index bfde370983..bda9859168 100644 --- a/scripts/execution_genesis.json.template +++ b/scripts/execution_genesis.json.template @@ -14,6 +14,23 @@ "londonBlock":0, "shanghaiTime":SHANGHAI_FORK_TIME, "cancunTime":CANCUN_FORK_TIME, + "blobSchedule": { + "cancun": { + "target": 3, + "max": 6, + "baseFeeUpdateFraction": 3338477 + }, + "prague": { + "target": 6, + "max": 9, + "baseFeeUpdateFraction": 5007716 + }, + "osaka": { + "target": 9, + "max": 12, + "baseFeeUpdateFraction": 5007716 + } + }, "pragueTime":PRAGUE_FORK_TIME, "mergeForkBlock":0, "mergeNetsplitBlock":0, diff --git a/scripts/geth_binaries.sh b/scripts/geth_binaries.sh index cf5df3201d..c1f8ec6955 100644 --- a/scripts/geth_binaries.sh +++ b/scripts/geth_binaries.sh @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -16,12 +16,10 @@ source "${SCRIPTS_DIR}/bash_utils.sh" : ${CURL_BINARY:="curl"} : ${STABLE_GETH_BINARY:="${BUILD_DIR}/downloads/geth$EXE_EXTENSION"} -: ${GETH_CAPELLA_BINARY:="$STABLE_GETH_BINARY"} -: ${GETH_DENEB_BINARY:="$STABLE_GETH_BINARY"} download_geth_stable() { if [[ ! -e "${STABLE_GETH_BINARY}" ]]; then - GETH_VERSION="1.14.11-f3c696fa" # https://geth.ethereum.org/downloads + GETH_VERSION="1.15.6-19d2b4c8" # https://geth.ethereum.org/downloads GETH_URL="https://gethstore.blob.core.windows.net/builds/" case "${OS}-${ARCH}" in @@ -106,12 +104,4 @@ download_status_geth_binary() { fi } -download_geth_capella() { - download_geth_stable -} - -download_geth_deneb() { - download_geth_stable -} - fi diff --git a/scripts/launch_local_testnet.sh b/scripts/launch_local_testnet.sh index 99d2e0e7ee..c423eba69b 100755 --- a/scripts/launch_local_testnet.sh +++ b/scripts/launch_local_testnet.sh @@ -1,16 +1,13 @@ #!/usr/bin/env bash -# Copyright (c) 2020-2024 Status Research & Development GmbH. Licensed under +# Copyright (c) 2020-2025 Status Research & Development GmbH. Licensed under # either of: # - Apache License, version 2.0 # - MIT license # at your option. This file may not be copied, modified, or distributed except # according to those terms. -# Mostly a duplication of "tests/simulation/{start.sh,run_node.sh}", but with a focus on -# replicating testnets as closely as possible, which means following the Docker execution labyrinth. - -set -euo pipefail +set -Eeuo pipefail SCRIPTS_DIR="$(dirname "${BASH_SOURCE[0]}")" cd "$SCRIPTS_DIR/.." @@ -446,13 +443,8 @@ LAST_SIGNER_NODE_IDX=$(( SIGNER_NODES - 1 )) if [[ "${RUN_GETH}" == "1" ]]; then source "${SCRIPTS_DIR}/geth_binaries.sh" - if [[ $DENEB_FORK_EPOCH -lt $STOP_AT_EPOCH ]]; then - download_geth_deneb - GETH_BINARY="$GETH_DENEB_BINARY" - else - download_geth_capella - GETH_BINARY="$GETH_CAPELLA_BINARY" - fi + download_geth_stable + GETH_BINARY="$STABLE_GETH_BINARY" source ./scripts/geth_vars.sh fi @@ -813,7 +805,7 @@ if [[ "$REUSE_EXISTING_DATA_DIR" == "0" ]]; then --out-secrets-dir="${SECRETS_DIR}" \ --out-deposits-file="${DEPOSITS_FILE}" \ --threshold=${REMOTE_SIGNER_THRESHOLD} \ - --remote-validators-count=${REMOTE_VALIDATORS_COUNT} \ + --remote-validators-count="${REMOTE_VALIDATORS_COUNT}" \ ${REMOTE_URLS} fi @@ -881,7 +873,7 @@ fi jq -r '.hash' "$EXECUTION_GENESIS_BLOCK_JSON" > "${DATA_DIR}/deposit_contract_block_hash.txt" -for NUM_NODE in $(seq 1 $NUM_NODES); do +for NUM_NODE in $(seq 1 "${NUM_NODES}"); do NODE_DATA_DIR="${DATA_DIR}/node${NUM_NODE}" rm -rf "${NODE_DATA_DIR}" scripts/makedir.sh "${NODE_DATA_DIR}" 2>&1 @@ -901,7 +893,7 @@ done --genesis-time=$GENESIS_TIME \ --capella-fork-epoch=0 \ --deneb-fork-epoch=$DENEB_FORK_EPOCH \ - --electra-fork-epoch=$ELECTRA_FORK_EPOCH \ + --electra-fork-epoch="${ELECTRA_FORK_EPOCH}" \ --execution-genesis-block="$EXECUTION_GENESIS_BLOCK_JSON" DIRECTPEER_ENR=$( @@ -922,7 +914,7 @@ DIRECTPEER_ENR=$( cp "$SCRIPTS_DIR/$CONST_PRESET-non-overriden-config.yaml" "$RUNTIME_CONFIG_FILE" # TODO the runtime config file should be used during deposit generation as well! -echo Wrote $RUNTIME_CONFIG_FILE: +echo Wrote "${RUNTIME_CONFIG_FILE}": tee -a "$RUNTIME_CONFIG_FILE" < "$DATA_DIR/log_deposit_maker.txt" 2>&1 & -for NUM_NODE in $(seq 1 $NUM_NODES); do +for NUM_NODE in $(seq 1 "${NUM_NODES}"); do # Copy validators to individual nodes. # The first $NODES_WITH_VALIDATORS nodes split them equally between them, # after skipping the first $USER_VALIDATORS. @@ -1080,19 +1072,19 @@ for NUM_NODE in $(seq 1 "${NUM_NODES}"); do NODE_DATA_DIR="${DATA_DIR}/node${NUM_NODE}" CONTAINER_NODE_DATA_DIR="${CONTAINER_DATA_DIR}/node${NUM_NODE}" VALIDATOR_DATA_DIR="${DATA_DIR}/validator${NUM_NODE}" - if [[ ${NUM_NODE} == ${BOOTSTRAP_NODE} ]]; then + if [[ ${NUM_NODE} == "${BOOTSTRAP_NODE}" ]]; then # Due to star topology, the bootstrap node must relay all attestations, # even if it itself is not interested. --subscribe-all-subnets could be # removed by switching to a fully-connected topology. BOOTSTRAP_ARG="--netkey-file=${CONTAINER_BOOTSTRAP_NETWORK_KEYFILE} --insecure-netkey-password=true --subscribe-all-subnets --direct-peer=$DIRECTPEER_ENR" - elif [[ ${NUM_NODE} == ${DIRECTPEER_NODE} ]]; then + elif [[ ${NUM_NODE} == "${DIRECTPEER_NODE}" ]]; then # Start a node using the Direct Peer functionality instead of regular bootstraping BOOTSTRAP_ARG="--netkey-file=${DIRECTPEER_NETWORK_KEYFILE} --direct-peer=$(cat $CONTAINER_BOOTSTRAP_ENR) --insecure-netkey-password=true" else BOOTSTRAP_ARG="--bootstrap-file=${CONTAINER_BOOTSTRAP_ENR}" fi - if [[ ${NUM_NODE} != ${BOOTSTRAP_NODE} ]]; then + if [[ ${NUM_NODE} != "${BOOTSTRAP_NODE}" ]]; then if [[ "${CONST_PRESET}" == "minimal" ]]; then # The fast epoch and slot times in the minimal config might cause the # mesh to break down due to re-subscriptions happening within the prune @@ -1157,7 +1149,7 @@ for NUM_NODE in $(seq 1 "${NUM_NODES}"); do fi done - ./build/${LH_BINARY} vc \ + ./build/"${LH_BINARY}" vc \ --debug-level "debug" \ --logfile-max-number 0 \ --log-format "JSON" \ @@ -1171,7 +1163,7 @@ for NUM_NODE in $(seq 1 "${NUM_NODES}"); do else ./build/nimbus_validator_client \ --log-level="${LOG_LEVEL}" \ - ${STOP_AT_EPOCH_FLAG} \ + "${STOP_AT_EPOCH_FLAG}" \ --data-dir="${VALIDATOR_DATA_DIR}" \ --metrics \ --metrics-port=$(( BASE_VC_METRICS_PORT + NUM_NODE - 1 )) \ @@ -1257,7 +1249,7 @@ if [ "$LC_NODES" -ge "1" ]; then --trusted-block-root="${LC_TRUSTED_BLOCK_ROOT}" \ --jwt-secret="${JWT_FILE}" \ "${WEB3_ARG[@]}" \ - ${STOP_AT_EPOCH_FLAG} \ + "${STOP_AT_EPOCH_FLAG}" \ &> "${DATA_DIR}/logs/nimbus_light_client.${NUM_LC}.jsonl" & PID=$! PIDS_TO_WAIT="${PIDS_TO_WAIT},${PID}" diff --git a/scripts/mainnet-non-overriden-config.yaml b/scripts/mainnet-non-overriden-config.yaml index 07dd383cd8..b6ccbfeed8 100644 --- a/scripts/mainnet-non-overriden-config.yaml +++ b/scripts/mainnet-non-overriden-config.yaml @@ -53,13 +53,16 @@ BELLATRIX_FORK_VERSION: 0x02000000 # (overriden in launch_local_testnet.sh) BELLATRIX_FORK_EPOCH: 144896 # Sept 6, 2022, 11:34:47am UTC # Capella CAPELLA_FORK_VERSION: 0x03000000 -# (overriden in launch_local_testnet.sh) CAPELLA_FORK_EPOCH: 18446744073709551615 +# (overriden in launch_local_testnet.sh) CAPELLA_FORK_EPOCH: 194048 # April 12, 2023, 10:27:35pm UTC # Deneb DENEB_FORK_VERSION: 0x04000000 -# (overriden in launch_local_testnet.sh) DENEB_FORK_EPOCH: 18446744073709551615 +# (overriden in launch_local_testnet.sh) DENEB_FORK_EPOCH: 269568 # March 13, 2024, 01:55:35pm UTC # Electra ELECTRA_FORK_VERSION: 0x05000000 -# (overriden in launch_local_testnet.sh) ELECTRA_FORK_EPOCH: 18446744073709551615 +# (overriden in launch_local_testnet.sh) ELECTRA_FORK_EPOCH: 18446744073709551615 # temporary stub +# Fulu +FULU_FORK_VERSION: 0x06000000 +# (overriden in launch_local_testnet.sh) FULU_FORK_EPOCH: 18446744073709551615 # temporary stub # Time parameters # --------------------------------------------------------------- @@ -87,7 +90,6 @@ EJECTION_BALANCE: 16000000000 MIN_PER_EPOCH_CHURN_LIMIT: 4 # 2**16 (= 65,536) CHURN_LIMIT_QUOTIENT: 65536 - # [New in Deneb:EIP7514] 2**3 (= 8) MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 8 @@ -95,7 +97,6 @@ MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 8 # --------------------------------------------------------------- # 40% PROPOSER_SCORE_BOOST: 40 - # 20% REORG_HEAD_WEIGHT_THRESHOLD: 20 # 160% @@ -103,6 +104,7 @@ REORG_PARENT_WEIGHT_THRESHOLD: 160 # `2` epochs REORG_MAX_EPOCHS_SINCE_FINALIZATION: 2 + # Deposit contract # --------------------------------------------------------------- # Ethereum PoW Mainnet @@ -110,18 +112,17 @@ DEPOSIT_CHAIN_ID: 1 DEPOSIT_NETWORK_ID: 1 # (overriden in launch_local_testnet.sh) DEPOSIT_CONTRACT_ADDRESS: 0x00000000219ab540356cBB839Cbe05303d7705Fa + # Networking # --------------------------------------------------------------- # `10 * 2**20` (= 10485760, 10 MiB) -GOSSIP_MAX_SIZE: 10485760 +MAX_PAYLOAD_SIZE: 10485760 # `2**10` (= 1024) MAX_REQUEST_BLOCKS: 1024 # `2**8` (= 256) EPOCHS_PER_SUBNET_SUBSCRIPTION: 256 # `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months) MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 -# `10 * 2**20` (=10485760, 10 MiB) -MAX_CHUNK_SIZE: 10485760 # 5s TTFB_TIMEOUT: 5 # 10s @@ -142,9 +143,35 @@ ATTESTATION_SUBNET_PREFIX_BITS: 6 # Deneb # `2**7` (=128) MAX_REQUEST_BLOCKS_DENEB: 128 -# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK -MAX_REQUEST_BLOB_SIDECARS: 768 # `2**12` (= 4096 epochs, ~18 days) MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096 # `6` BLOB_SIDECAR_SUBNET_COUNT: 6 +# `uint64(6)` +MAX_BLOBS_PER_BLOCK: 6 +# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK +MAX_REQUEST_BLOB_SIDECARS: 768 + +# Electra +# 2**7 * 10**9 (= 128,000,000,000) +MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA: 128000000000 +# 2**8 * 10**9 (= 256,000,000,000) +MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT: 256000000000 +# `9` +BLOB_SIDECAR_SUBNET_COUNT_ELECTRA: 9 +# `uint64(9)` +MAX_BLOBS_PER_BLOCK_ELECTRA: 9 +# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK_ELECTRA +MAX_REQUEST_BLOB_SIDECARS_ELECTRA: 1152 + +# Fulu +NUMBER_OF_COLUMNS: 128 +NUMBER_OF_CUSTODY_GROUPS: 128 +DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 +MAX_REQUEST_DATA_COLUMN_SIDECARS: 16384 +SAMPLES_PER_SLOT: 8 +CUSTODY_REQUIREMENT: 4 +VALIDATOR_CUSTODY_REQUIREMENT: 8 +BALANCE_PER_ADDITIONAL_CUSTODY_GROUP: 32000000000 +MAX_BLOBS_PER_BLOCK_FULU: 12 +MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS: 4096 diff --git a/scripts/minimal-non-overriden-config.yaml b/scripts/minimal-non-overriden-config.yaml index 8acd453784..b264220897 100644 --- a/scripts/minimal-non-overriden-config.yaml +++ b/scripts/minimal-non-overriden-config.yaml @@ -59,6 +59,9 @@ DENEB_FORK_VERSION: 0x04000001 # Electra ELECTRA_FORK_VERSION: 0x05000001 # (overriden in launch_local_testnet.sh) ELECTRA_FORK_EPOCH: 18446744073709551615 +# Fulu +FULU_FORK_VERSION: 0x06000001 +# (overriden in launch_local_testnet.sh) FULU_FORK_EPOCH: 18446744073709551615 # Time parameters # --------------------------------------------------------------- @@ -82,19 +85,18 @@ INACTIVITY_SCORE_BIAS: 4 INACTIVITY_SCORE_RECOVERY_RATE: 16 # 2**4 * 10**9 (= 16,000,000,000) Gwei EJECTION_BALANCE: 16000000000 -# 2**2 (= 4) -MIN_PER_EPOCH_CHURN_LIMIT: 4 +# [customized] more easily demonstrate the difference between this value and the activation churn limit +MIN_PER_EPOCH_CHURN_LIMIT: 2 # [customized] scale queue churn at much lower validator counts for testing CHURN_LIMIT_QUOTIENT: 32 - # [New in Deneb:EIP7514] [customized] MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 4 + # Fork choice # --------------------------------------------------------------- # 40% PROPOSER_SCORE_BOOST: 40 - # 20% REORG_HEAD_WEIGHT_THRESHOLD: 20 # 160% @@ -102,6 +104,7 @@ REORG_PARENT_WEIGHT_THRESHOLD: 160 # `2` epochs REORG_MAX_EPOCHS_SINCE_FINALIZATION: 2 + # Deposit contract # --------------------------------------------------------------- # Ethereum Goerli testnet @@ -110,18 +113,17 @@ DEPOSIT_NETWORK_ID: 5 # Configured on a per testnet basis # (overriden in launch_local_testnet.sh) DEPOSIT_CONTRACT_ADDRESS: 0x1234567890123456789012345678901234567890 + # Networking # --------------------------------------------------------------- # `10 * 2**20` (= 10485760, 10 MiB) -GOSSIP_MAX_SIZE: 10485760 +MAX_PAYLOAD_SIZE: 10485760 # `2**10` (= 1024) MAX_REQUEST_BLOCKS: 1024 # `2**8` (= 256) EPOCHS_PER_SUBNET_SUBSCRIPTION: 256 # [customized] `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 272) MIN_EPOCHS_FOR_BLOCK_REQUESTS: 272 -# `10 * 2**20` (=10485760, 10 MiB) -MAX_CHUNK_SIZE: 10485760 # 5s TTFB_TIMEOUT: 5 # 10s @@ -142,9 +144,35 @@ ATTESTATION_SUBNET_PREFIX_BITS: 6 # Deneb # `2**7` (=128) MAX_REQUEST_BLOCKS_DENEB: 128 -# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK -MAX_REQUEST_BLOB_SIDECARS: 768 # `2**12` (= 4096 epochs, ~18 days) MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096 # `6` BLOB_SIDECAR_SUBNET_COUNT: 6 +## `uint64(6)` +MAX_BLOBS_PER_BLOCK: 6 +# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK +MAX_REQUEST_BLOB_SIDECARS: 768 + +# Electra +# [customized] 2**6 * 10**9 (= 64,000,000,000) +MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA: 64000000000 +# [customized] 2**7 * 10**9 (= 128,000,000,000) +MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT: 128000000000 +# `9` +BLOB_SIDECAR_SUBNET_COUNT_ELECTRA: 9 +# `uint64(9)` +MAX_BLOBS_PER_BLOCK_ELECTRA: 9 +# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK_ELECTRA +MAX_REQUEST_BLOB_SIDECARS_ELECTRA: 1152 + +# Fulu +NUMBER_OF_COLUMNS: 128 +NUMBER_OF_CUSTODY_GROUPS: 128 +DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 +MAX_REQUEST_DATA_COLUMN_SIDECARS: 16384 +SAMPLES_PER_SLOT: 8 +CUSTODY_REQUIREMENT: 4 +VALIDATOR_CUSTODY_REQUIREMENT: 8 +BALANCE_PER_ADDITIONAL_CUSTODY_GROUP: 32000000000 +MAX_BLOBS_PER_BLOCK_FULU: 12 +MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS: 4096 diff --git a/scripts/package_src/nimbus_beacon_node/image/lib/systemd/system/nimbus_beacon_node.service b/scripts/package_src/nimbus_beacon_node/image/lib/systemd/system/nimbus_beacon_node.service index 9dbc5b554e..452a34c20b 100644 --- a/scripts/package_src/nimbus_beacon_node/image/lib/systemd/system/nimbus_beacon_node.service +++ b/scripts/package_src/nimbus_beacon_node/image/lib/systemd/system/nimbus_beacon_node.service @@ -24,7 +24,7 @@ After=network-online.target WantedBy=multi-user.target [Service] -# Network - use `prater` to connect to the prater testnet +# Network - use `hoodi` to connect to the hoodi testnet Environment=NETWORK=mainnet # You need to have have access to an execution client - by default, we assume @@ -38,7 +38,7 @@ Environment=JWT_SECRET=/tmp/jwtsecret Environment=DATA_DIR_PREFIX=/var/lib/nimbus # Default ports - if you want to run multiple instances of nimbus, for example -# to run both prater and mainnet, separate ports must be used +# to run both hoodi and mainnet, separate ports must be used Environment=TCP_PORT=9000 Environment=UDP_PORT=9000 Environment=REST_PORT=5052 diff --git a/scripts/package_src/nimbus_validator_client/image/lib/systemd/system/nimbus_validator_client.service b/scripts/package_src/nimbus_validator_client/image/lib/systemd/system/nimbus_validator_client.service index a336e56d01..d517db91f3 100644 --- a/scripts/package_src/nimbus_validator_client/image/lib/systemd/system/nimbus_validator_client.service +++ b/scripts/package_src/nimbus_validator_client/image/lib/systemd/system/nimbus_validator_client.service @@ -33,7 +33,7 @@ Environment=BEACON_NODE=http://127.0.0.1:5052 Environment=DATA_DIR_PREFIX=/var/lib/nimbus # Default ports - if you want to run multiple instances of nimbus, for example -# to run both prater and mainnet, separate ports must be used +# to run both hoodi and mainnet, separate ports must be used Environment=METRICS_PORT=8108 # Interaction and monitoring diff --git a/scripts/repo_paths.sh b/scripts/repo_paths.sh deleted file mode 100644 index ccd817de7a..0000000000 --- a/scripts/repo_paths.sh +++ /dev/null @@ -1,24 +0,0 @@ -if [ -z "${REPO_PATHS_SOURCED:-}" ]; then -REPO_PATHS_SOURCED=1 - -SCRIPTS_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd) -BUILD_DIR=$(cd "$SCRIPTS_DIR/../build" &> /dev/null && pwd) - -data_dir_for_network() { - NETWORK_ID=$(cat "$NETWORK/genesis.json" | jq '.config.chainId') - echo "$BUILD_DIR/data/$NETWORK_ID" -} - -create_data_dir_for_network() { - NETWORK_DIR=$(data_dir_for_network) - mkdir -p "$NETWORK_DIR" - echo "$NETWORK_DIR" -} - -create_jwt_token() { - if [ ! -f "$1" ]; then - openssl rand -hex 32 | tr -d "\n" > "$1" - fi -} - -fi diff --git a/scripts/run-geth-in-devnet.sh b/scripts/run-geth-in-devnet.sh deleted file mode 100755 index 5e61b9d01d..0000000000 --- a/scripts/run-geth-in-devnet.sh +++ /dev/null @@ -1,70 +0,0 @@ -#!/usr/bin/env bash -# Via Adrian Sutton - -if [ -z "$1" ]; then - echo "Usage: run-geth-el.sh " - exit 1 -fi - -if [ ! -d "$1" ]; then - echo "Please supply a valid network metadata directory" - exit 1 -fi - -set -Eeu - -NETWORK=$(cd "$1"; pwd) - -cd $(dirname "$0") - -source geth_binaries.sh -source repo_paths.sh - -download_geth_capella - -: ${GETH_AUTH_RPC_PORT:=18550} -: ${GETH_WS_PORT:=18551} - -DATA_DIR="$(create_data_dir_for_network "$NETWORK")" - -JWT_TOKEN="$DATA_DIR/jwt-token" -create_jwt_token "$JWT_TOKEN" - -NETWORK_ID=$(cat "$NETWORK/genesis.json" | jq '.config.chainId') - -EXECUTION_BOOTNODES="" -if [[ -f "$NETWORK/el_bootnode.txt" ]]; then - EXECUTION_BOOTNODES+=$(awk '{print $1}' "$NETWORK/el_bootnode.txt" "$NETWORK/el_bootnode.txt" | paste -s -d, -) -fi - -if [[ -f "$NETWORK/el_bootnodes.txt" ]]; then - EXECUTION_BOOTNODES+=$(awk '{print $1}' "$NETWORK/el_bootnodes.txt" "$NETWORK/el_bootnodes.txt" | paste -s -d, -) -fi - -if [[ -f "$NETWORK/bootnodes.txt" ]]; then - EXECUTION_BOOTNODES+=$(awk '{print $1}' "$NETWORK/bootnodes.txt" "$NETWORK/bootnodes.txt" | paste -s -d, -) -fi - -GETH_DATA_DIR="$DATA_DIR/geth" -EXECUTION_GENESIS_JSON="${NETWORK}/genesis.json" - -set -x - -if [[ ! -d "$GETH_DATA_DIR/geth" ]]; then - # Initialize the genesis - $GETH_CAPELLA_BINARY --http --ws -http.api "engine" --datadir "${GETH_DATA_DIR}" init "${EXECUTION_GENESIS_JSON}" -fi - -echo "Logging to $DATA_DIR/geth_output.log" - -$GETH_CAPELLA_BINARY \ - --authrpc.port ${GETH_AUTH_RPC_PORT} \ - --authrpc.jwtsecret "$JWT_TOKEN" \ - --allow-insecure-unlock \ - --datadir "${GETH_DATA_DIR}" \ - --bootnodes "${EXECUTION_BOOTNODES}" \ - --port 30308 \ - --password "" \ - --metrics \ - --syncmode snap \ - --networkid $NETWORK_ID 2>&1 | tee "$DATA_DIR/geth_output.log" diff --git a/scripts/run-nimbus-in-devnet.sh b/scripts/run-nimbus-in-devnet.sh deleted file mode 100755 index 6a86b061e0..0000000000 --- a/scripts/run-nimbus-in-devnet.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/env bash - -if [ -z "$1" ]; then - echo "Usage: run-nimbus-eth2-in-withdrawal-testnet.sh " - exit 1 -fi - -if [ ! -d "$1" ]; then - echo "Please supply a valid network metadata directory" - exit 1 -fi - -set -Eeu - -NETWORK=$(cd "$1"; pwd) - -cd $(dirname "$0") - -source repo_paths.sh - -DATA_DIR="$(create_data_dir_for_network "$NETWORK")" - -JWT_TOKEN="$DATA_DIR/jwt-token" -create_jwt_token "$JWT_TOKEN" - -"$BUILD_DIR/nimbus_beacon_node" \ - --non-interactive \ - --udp-port=19000 \ - --tcp-port=19000 \ - --network="$NETWORK" \ - --log-level=DEBUG \ - --data-dir="$DATA_DIR/nimbus_bn" \ - --web3-url=http://localhost:18550/ \ - --rest:on \ - --rest-port=15052 \ - --metrics=on \ - --metrics-port=18008 \ - --doppelganger-detection=no \ - --jwt-secret="$JWT_TOKEN" diff --git a/scripts/signers/web3signer.sh b/scripts/signers/web3signer.sh index 4fa1dce4f3..5d8723f2ae 100755 --- a/scripts/signers/web3signer.sh +++ b/scripts/signers/web3signer.sh @@ -1,6 +1,6 @@ #!/usr/bin/bash -# Copyright (c) 2023 Status Research & Development GmbH. +# Copyright (c) 2023-2025 Status Research & Development GmbH. # Licensed under either of: # - Apache License, version 2.0 # - MIT license @@ -12,7 +12,7 @@ if ! command javac > /dev/null || ! javac -version > /dev/null; then # Instead, macOS ships with a stub executable that displays a message that # Java is not installed (javac -version exits with an error code 1). # If the user is running under these default settings, but a homebrew - # installation is disovered, we are happy to use it just in this script: + # installation is discovered, we are happy to use it just in this script: if [[ -d /opt/homebrew/opt/openjdk/bin ]]; then export PATH="/opt/homebrew/opt/openjdk/bin:$PATH" fi @@ -44,4 +44,4 @@ done --keystores-path="${KEYSTORES_DIR}" \ --network="${RUNTIME_CONFIG_FILE}" &> "${DATA_DIR}/logs/web3signer.${WEB3SIGNER_NODE_IDX}.log" & -echo $! > "${DATA_DIR}/pids/web3signer.${WEB3SIGNER_NODE_IDX}" +echo $! > "${DATA_DIR}/pids/web3signer.${WEB3SIGNER_NODE_IDX}" \ No newline at end of file diff --git a/tests/all_tests.nim b/tests/all_tests.nim index c9df26277b..b3f86ada9b 100644 --- a/tests/all_tests.nim +++ b/tests/all_tests.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -27,7 +27,6 @@ import # Unit test ./test_discovery, ./test_engine_api_conversions, ./test_engine_authentication, - ./test_eip7594_helpers, ./test_el_manager, ./test_el_conf, ./test_eth2_ssz_serialization, @@ -44,6 +43,7 @@ import # Unit test ./test_message_signatures, ./test_network_metadata, ./test_peer_pool, + ./test_peerdas_helpers, ./test_remote_keystore, ./test_rest_json_serialization, ./test_serialization, @@ -62,9 +62,8 @@ import # Unit test ./slashing_protection/test_slashing_protection_db, ./test_validator_client, ./test_beacon_validators, - ./test_beacon_chain_file - -when not defined(windows): - import ./test_keymanager_api + ./test_beacon_chain_file, + ./test_mev_calls, + ./test_keymanager_api # currently has to run after test_remote_keystore summarizeLongTests("AllTests") diff --git a/tests/consensus_spec/altair/test_fixture_light_client_sync_protocol.nim b/tests/consensus_spec/altair/test_fixture_light_client_sync_protocol.nim index 99c4d67dd2..a37968c1dc 100644 --- a/tests/consensus_spec/altair/test_fixture_light_client_sync_protocol.nim +++ b/tests/consensus_spec/altair/test_fixture_light_client_sync_protocol.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2021-2024 Status Research & Development GmbH +# Copyright (c) 2021-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -23,7 +23,7 @@ import # Test utilities ../../testutil, ../../testblockutil -# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/tests/core/pyspec/eth2spec/test/helpers/sync_committee.py#L27-L44 +# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/tests/core/pyspec/eth2spec/test/helpers/sync_committee.py#L27-L44 proc compute_aggregate_sync_committee_signature( cfg: RuntimeConfig, forked: ForkedHashedBeaconState, diff --git a/tests/consensus_spec/altair/test_fixture_operations.nim b/tests/consensus_spec/altair/test_fixture_operations.nim index 4bd39a28ee..74031d8ba1 100644 --- a/tests/consensus_spec/altair/test_fixture_operations.nim +++ b/tests/consensus_spec/altair/test_fixture_operations.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -149,9 +149,9 @@ suite baseDescription & "Sync Aggregate " & preset(): preState: var altair.BeaconState, syncAggregate: SyncAggregate): Result[void, cstring] = var cache: StateCache - doAssert (? process_sync_aggregate( + discard ? process_sync_aggregate( preState, syncAggregate, get_total_active_balance(preState, cache), - {}, cache)) > 0.Gwei + {}, cache) ok() for path in walkTests(OpSyncAggregateDir): diff --git a/tests/consensus_spec/bellatrix/test_fixture_operations.nim b/tests/consensus_spec/bellatrix/test_fixture_operations.nim index f4187c295f..b2cfb33ff6 100644 --- a/tests/consensus_spec/bellatrix/test_fixture_operations.nim +++ b/tests/consensus_spec/bellatrix/test_fixture_operations.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -20,11 +20,11 @@ import ../fixtures_utils, ../os_ops, ../../helpers/debug_state -from std/sequtils import mapIt, toSeq +from std/sequtils import anyIt, mapIt, toSeq from std/strutils import contains from ../../../beacon_chain/spec/beaconstate import get_base_reward_per_increment, get_state_exit_queue_info, - get_total_active_balance, process_attestation + get_total_active_balance, latest_block_root, process_attestation const OpDir = SszTestsDir/const_preset/"bellatrix"/"operations" @@ -110,9 +110,12 @@ suite baseDescription & "Attester Slashing " & preset(): applyAttesterSlashing, path) suite baseDescription & "Block Header " & preset(): - func applyBlockHeader( + proc applyBlockHeader( preState: var bellatrix.BeaconState, blck: bellatrix.BeaconBlock): Result[void, cstring] = + if blck.is_execution_block: + check blck.body.execution_payload.block_hash == + blck.compute_execution_block_hash() var cache: StateCache process_block_header(preState, blck, {}, cache) @@ -144,6 +147,13 @@ suite baseDescription & "Execution Payload " & preset(): let payloadValid = os_ops.readFile( OpExecutionPayloadDir/"pyspec_tests"/path/"execution.yaml" ).contains("execution_valid: true") + if payloadValid and body.is_execution_block and + not body.execution_payload.transactions.anyIt(it.len == 0): + let expectedOk = (path != "incorrect_block_hash") + check expectedOk == (body.execution_payload.block_hash == + body.compute_execution_block_hash( + preState.latest_block_root( + assignClone(preState)[].hash_tree_root()))) func executePayload(_: bellatrix.ExecutionPayload): bool = payloadValid process_execution_payload( preState, body.execution_payload, executePayload) @@ -174,9 +184,9 @@ suite baseDescription & "Sync Aggregate " & preset(): preState: var bellatrix.BeaconState, syncAggregate: SyncAggregate): Result[void, cstring] = var cache: StateCache - doAssert (? process_sync_aggregate( + discard ? process_sync_aggregate( preState, syncAggregate, get_total_active_balance(preState, cache), - {}, cache)) > 0.Gwei + {}, cache) ok() for path in walkTests(OpSyncAggregateDir): @@ -199,4 +209,4 @@ suite baseDescription & "Voluntary Exit " & preset(): for path in walkTests(OpVoluntaryExitDir): runTest[SignedVoluntaryExit, typeof applyVoluntaryExit]( OpVoluntaryExitDir, suiteName, "Voluntary Exit", "voluntary_exit", - applyVoluntaryExit, path) \ No newline at end of file + applyVoluntaryExit, path) diff --git a/tests/consensus_spec/capella/test_fixture_operations.nim b/tests/consensus_spec/capella/test_fixture_operations.nim index 47f9b751d0..c5a0ec17ed 100644 --- a/tests/consensus_spec/capella/test_fixture_operations.nim +++ b/tests/consensus_spec/capella/test_fixture_operations.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2022-2024 Status Research & Development GmbH +# Copyright (c) 2022-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -20,11 +20,11 @@ import ../fixtures_utils, ../os_ops, ../../helpers/debug_state -from std/sequtils import mapIt, toSeq +from std/sequtils import anyIt, mapIt, toSeq from std/strutils import contains from ../../../beacon_chain/spec/beaconstate import get_base_reward_per_increment, get_state_exit_queue_info, - get_total_active_balance, process_attestation + get_total_active_balance, latest_block_root, process_attestation const OpDir = SszTestsDir/const_preset/"capella"/"operations" @@ -114,9 +114,12 @@ suite baseDescription & "Attester Slashing " & preset(): applyAttesterSlashing, path) suite baseDescription & "Block Header " & preset(): - func applyBlockHeader( + proc applyBlockHeader( preState: var capella.BeaconState, blck: capella.BeaconBlock): Result[void, cstring] = + if blck.is_execution_block: + check blck.body.execution_payload.block_hash == + blck.compute_execution_block_hash() var cache: StateCache process_block_header(preState, blck, {}, cache) @@ -161,6 +164,13 @@ suite baseDescription & "Execution Payload " & preset(): let payloadValid = os_ops.readFile( OpExecutionPayloadDir/"pyspec_tests"/path/"execution.yaml" ).contains("execution_valid: true") + if payloadValid and body.is_execution_block and + not body.execution_payload.transactions.anyIt(it.len == 0): + let expectedOk = (path != "incorrect_block_hash") + check expectedOk == (body.execution_payload.block_hash == + body.compute_execution_block_hash( + preState.latest_block_root( + assignClone(preState)[].hash_tree_root()))) func executePayload(_: capella.ExecutionPayload): bool = payloadValid process_execution_payload( preState, body.execution_payload, executePayload) @@ -191,9 +201,9 @@ suite baseDescription & "Sync Aggregate " & preset(): preState: var capella.BeaconState, syncAggregate: SyncAggregate): Result[void, cstring] = var cache: StateCache - doAssert (? process_sync_aggregate( + discard ? process_sync_aggregate( preState, syncAggregate, get_total_active_balance(preState, cache), - {}, cache)) > 0.Gwei + {}, cache) ok() for path in walkTests(OpSyncAggregateDir): @@ -227,4 +237,4 @@ suite baseDescription & "Withdrawals " & preset(): for path in walkTests(OpWithdrawalsDir): runTest[capella.ExecutionPayload, typeof applyWithdrawals]( OpWithdrawalsDir, suiteName, "Withdrawals", "execution_payload", - applyWithdrawals, path) \ No newline at end of file + applyWithdrawals, path) diff --git a/tests/consensus_spec/consensus_spec_tests_preset.nim b/tests/consensus_spec/consensus_spec_tests_preset.nim index 1aa23a82e6..a71079906a 100644 --- a/tests/consensus_spec/consensus_spec_tests_preset.nim +++ b/tests/consensus_spec/consensus_spec_tests_preset.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2021-2024 Status Research & Development GmbH +# Copyright (c) 2021-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -17,10 +17,11 @@ import ./bellatrix/all_bellatrix_fixtures, ./capella/all_capella_fixtures, ./deneb/all_deneb_fixtures, - ./eip7594/all_eip7594_fixtures, ./electra/all_electra_fixtures, + ./fulu/all_fulu_fixtures, ./test_fixture_fork, ./test_fixture_fork_choice, + ./test_fixture_light_client_data_collection, ./test_fixture_light_client_single_merkle_proof, ./test_fixture_light_client_sync, ./test_fixture_light_client_update_ranking, diff --git a/tests/consensus_spec/deneb/test_fixture_operations.nim b/tests/consensus_spec/deneb/test_fixture_operations.nim index 56aeb97684..f98c6e68ad 100644 --- a/tests/consensus_spec/deneb/test_fixture_operations.nim +++ b/tests/consensus_spec/deneb/test_fixture_operations.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2022-2024 Status Research & Development GmbH +# Copyright (c) 2022-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -20,11 +20,11 @@ import ../fixtures_utils, ../os_ops, ../../helpers/debug_state -from std/sequtils import mapIt, toSeq +from std/sequtils import anyIt, mapIt, toSeq from std/strutils import contains from ../../../beacon_chain/spec/beaconstate import get_base_reward_per_increment, get_state_exit_queue_info, - get_total_active_balance, process_attestation + get_total_active_balance, latest_block_root, process_attestation const OpDir = SszTestsDir/const_preset/"deneb"/"operations" @@ -114,9 +114,12 @@ suite baseDescription & "Attester Slashing " & preset(): applyAttesterSlashing, path) suite baseDescription & "Block Header " & preset(): - func applyBlockHeader( + proc applyBlockHeader( preState: var deneb.BeaconState, blck: deneb.BeaconBlock): Result[void, cstring] = + if blck.is_execution_block: + check blck.body.execution_payload.block_hash == + blck.compute_execution_block_hash() var cache: StateCache process_block_header(preState, blck, {}, cache) @@ -164,8 +167,16 @@ suite baseDescription & "Execution Payload " & preset(): let payloadValid = os_ops.readFile( OpExecutionPayloadDir/"pyspec_tests"/path/"execution.yaml" ).contains("execution_valid: true") + if payloadValid and body.is_execution_block and + not body.execution_payload.transactions.anyIt(it.len == 0): + let expectedOk = (path != "incorrect_block_hash") + check expectedOk == (body.execution_payload.block_hash == + body.compute_execution_block_hash( + preState.latest_block_root( + assignClone(preState)[].hash_tree_root()))) func executePayload(_: deneb.ExecutionPayload): bool = payloadValid - process_execution_payload(preState, body, executePayload) + process_execution_payload( + defaultRuntimeConfig, preState, body, executePayload) for path in walkTests(OpExecutionPayloadDir): let applyExecutionPayload = makeApplyExecutionPayloadCb(path) @@ -193,9 +204,9 @@ suite baseDescription & "Sync Aggregate " & preset(): preState: var deneb.BeaconState, syncAggregate: SyncAggregate): Result[void, cstring] = var cache: StateCache - doAssert (? process_sync_aggregate( + discard ? process_sync_aggregate( preState, syncAggregate, get_total_active_balance(preState, cache), - {}, cache)) > 0.Gwei + {}, cache) ok() for path in walkTests(OpSyncAggregateDir): @@ -229,4 +240,4 @@ suite baseDescription & "Withdrawals " & preset(): for path in walkTests(OpWithdrawalsDir): runTest[deneb.ExecutionPayload, typeof applyWithdrawals]( OpWithdrawalsDir, suiteName, "Withdrawals", "execution_payload", - applyWithdrawals, path) \ No newline at end of file + applyWithdrawals, path) diff --git a/tests/consensus_spec/electra/test_fixture_operations.nim b/tests/consensus_spec/electra/test_fixture_operations.nim index c739715aca..109dbf9394 100644 --- a/tests/consensus_spec/electra/test_fixture_operations.nim +++ b/tests/consensus_spec/electra/test_fixture_operations.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2024 Status Research & Development GmbH +# Copyright (c) 2024-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -20,11 +20,11 @@ import ../fixtures_utils, ../os_ops, ../../helpers/debug_state -from std/sequtils import mapIt, toSeq +from std/sequtils import anyIt, mapIt, toSeq from std/strutils import contains from ../../../beacon_chain/spec/beaconstate import get_base_reward_per_increment, get_state_exit_queue_info, - get_total_active_balance, process_attestation + get_total_active_balance, latest_block_root, process_attestation const OpDir = SszTestsDir/const_preset/"electra"/"operations" @@ -121,9 +121,12 @@ suite baseDescription & "Attester Slashing " & preset(): applyAttesterSlashing, path) suite baseDescription & "Block Header " & preset(): - func applyBlockHeader( + proc applyBlockHeader( preState: var electra.BeaconState, blck: electra.BeaconBlock): Result[void, cstring] = + if blck.is_execution_block: + check blck.body.execution_payload.block_hash == + blck.compute_execution_block_hash() var cache: StateCache process_block_header(preState, blck, {}, cache) @@ -199,8 +202,16 @@ suite baseDescription & "Execution Payload " & preset(): let payloadValid = os_ops.readFile( OpExecutionPayloadDir/"pyspec_tests"/path/"execution.yaml" ).contains("execution_valid: true") + if payloadValid and body.is_execution_block and + not body.execution_payload.transactions.anyIt(it.len == 0): + let expectedOk = (path != "incorrect_block_hash") + check expectedOk == (body.execution_payload.block_hash == + body.compute_execution_block_hash( + preState.latest_block_root( + assignClone(preState)[].hash_tree_root()))) func executePayload(_: electra.ExecutionPayload): bool = payloadValid - process_execution_payload(preState, body, executePayload) + process_execution_payload( + defaultRuntimeConfig, preState, body, executePayload) for path in walkTests(OpExecutionPayloadDir): let applyExecutionPayload = makeApplyExecutionPayloadCb(path) @@ -244,9 +255,9 @@ suite baseDescription & "Sync Aggregate " & preset(): preState: var electra.BeaconState, syncAggregate: SyncAggregate): Result[void, cstring] = var cache: StateCache - doAssert (? process_sync_aggregate( + discard ? process_sync_aggregate( preState, syncAggregate, get_total_active_balance(preState, cache), - {}, cache)) > 0.Gwei + {}, cache) ok() for path in walkTests(OpSyncAggregateDir): @@ -280,4 +291,4 @@ suite baseDescription & "Withdrawals " & preset(): for path in walkTests(OpWithdrawalsDir): runTest[electra.ExecutionPayload, typeof applyWithdrawals]( OpWithdrawalsDir, suiteName, "Withdrawals", "execution_payload", - applyWithdrawals, path) \ No newline at end of file + applyWithdrawals, path) diff --git a/tests/consensus_spec/electra/test_fixture_ssz_consensus_objects.nim b/tests/consensus_spec/electra/test_fixture_ssz_consensus_objects.nim index 14e75ed943..9a3b92d83c 100644 --- a/tests/consensus_spec/electra/test_fixture_ssz_consensus_objects.nim +++ b/tests/consensus_spec/electra/test_fixture_ssz_consensus_objects.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2024 Status Research & Development GmbH +# Copyright (c) 2024-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -175,6 +175,7 @@ suite "EF - Electra - SSZ consensus objects " & preset(): checkSSZ(SignedContributionAndProof, path, hash) of "SignedVoluntaryExit": checkSSZ(SignedVoluntaryExit, path, hash) of "SigningData": checkSSZ(SigningData, path, hash) + of "SingleAttestation": checkSSZ(SingleAttestation, path, hash) of "SyncAggregate": checkSSZ(SyncAggregate, path, hash) of "SyncAggregatorSelectionData": checkSSZ(SyncAggregatorSelectionData, path, hash) @@ -187,4 +188,4 @@ suite "EF - Electra - SSZ consensus objects " & preset(): of "VoluntaryExit": checkSSZ(VoluntaryExit, path, hash) of "WithdrawalRequest": checkSSZ(WithdrawalRequest, path, hash) else: - raise newException(ValueError, "Unsupported test: " & sszType) + raise newException(ValueError, "Unsupported test: " & sszType) \ No newline at end of file diff --git a/tests/consensus_spec/fixtures_utils.nim b/tests/consensus_spec/fixtures_utils.nim index fbd999568e..b8a4602710 100644 --- a/tests/consensus_spec/fixtures_utils.nim +++ b/tests/consensus_spec/fixtures_utils.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -9,18 +9,18 @@ import # Standard library - std/[strutils, typetraits], + std/[sequtils, strutils, typetraits], # Internals ./os_ops, ../../beacon_chain/spec/datatypes/[phase0, altair, bellatrix], ../../beacon_chain/spec/[ - eth2_merkleization, eth2_ssz_serialization, forks], + eth2_merkleization, eth2_ssz_serialization, forks, helpers], # Status libs, snappy, stew/byteutils export - eth2_merkleization, eth2_ssz_serialization + eth2_merkleization, eth2_ssz_serialization, helpers # Process current EF test format # --------------------------------------------- @@ -90,7 +90,7 @@ type rewards*: List[Gwei, Limit VALIDATOR_REGISTRY_LIMIT] penalties*: List[Gwei, Limit VALIDATOR_REGISTRY_LIMIT] - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/validator.md#eth1block + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/phase0/validator.md#eth1block Eth1Block* = object timestamp*: uint64 deposit_root*: Eth2Digest @@ -173,4 +173,22 @@ proc loadForkedState*( withState(state[]): forkyState.data = parseTest(path, SSZ, consensusFork.BeaconState) forkyState.root = hash_tree_root(forkyState.data) - state \ No newline at end of file + state + +proc loadBlock*( + path: string, + consensusFork: static ConsensusFork, + validateBlockHash = true): auto = + var blck = parseTest(path, SSZ, consensusFork.SignedBeaconBlock) + blck.root = hash_tree_root(blck.message) + when consensusFork >= ConsensusFork.Bellatrix: + if blck.message.is_execution_block and + not blck.message.body.execution_payload.transactions.anyIt(it.len == 0): + if blck.message.body.execution_payload.block_hash != + blck.message.compute_execution_block_hash(): + try: + stderr.write "Invalid `block_hash`: ", path, "\n" + except IOError: + discard + quit 1 + blck diff --git a/tests/consensus_spec/eip7594/all_eip7594_fixtures.nim b/tests/consensus_spec/fulu/all_fulu_fixtures.nim similarity index 70% rename from tests/consensus_spec/eip7594/all_eip7594_fixtures.nim rename to tests/consensus_spec/fulu/all_fulu_fixtures.nim index 952c08f92f..40dcdc7c6b 100644 --- a/tests/consensus_spec/eip7594/all_eip7594_fixtures.nim +++ b/tests/consensus_spec/fulu/all_fulu_fixtures.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2022-2024 Status Research & Development GmbH +# Copyright (c) 2022-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -9,4 +9,6 @@ {.used.} import - ./test_fixture_ssz_consensus_objects + ./test_fixture_operations, + ./test_fixture_ssz_consensus_objects, + ./test_fixture_state_transition_epoch diff --git a/tests/consensus_spec/fulu/test_fixture_operations.nim b/tests/consensus_spec/fulu/test_fixture_operations.nim new file mode 100644 index 0000000000..5620db0c20 --- /dev/null +++ b/tests/consensus_spec/fulu/test_fixture_operations.nim @@ -0,0 +1,294 @@ +# beacon_chain +# Copyright (c) 2025 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +{.push raises: [].} +{.used.} + +import + # Utilities + chronicles, + unittest2, + # Beacon chain internals + ../../../beacon_chain/spec/state_transition_block, + ../../../beacon_chain/spec/datatypes/fulu, + # Test utilities + ../../testutil, + ../fixtures_utils, ../os_ops, + ../../helpers/debug_state + +from std/sequtils import anyIt, mapIt, toSeq +from std/strutils import contains +from ../../../beacon_chain/spec/beaconstate import + get_base_reward_per_increment, get_state_exit_queue_info, + get_total_active_balance, latest_block_root, process_attestation + +const + OpDir = SszTestsDir/const_preset/"fulu"/"operations" + OpAttestationsDir = OpDir/"attestation" + OpAttSlashingDir = OpDir/"attester_slashing" + OpBlockHeaderDir = OpDir/"block_header" + OpBlsToExecutionChangeDir = OpDir/"bls_to_execution_change" + OpConsolidationRequestDir = OpDir/"consolidation_request" + OpDepositRequestDir = OpDir/"deposit_request" + OpDepositsDir = OpDir/"deposit" + OpWithdrawalRequestDir = OpDir/"withdrawal_request" + OpExecutionPayloadDir = OpDir/"execution_payload" + OpProposerSlashingDir = OpDir/"proposer_slashing" + OpSyncAggregateDir = OpDir/"sync_aggregate" + OpVoluntaryExitDir = OpDir/"voluntary_exit" + OpWithdrawalsDir = OpDir/"withdrawals" + + baseDescription = "EF - Fulu - Operations - " + + +const testDirs = toHashSet([ + OpAttestationsDir, OpAttSlashingDir, OpBlockHeaderDir, + OpBlsToExecutionChangeDir, OpConsolidationRequestDir, OpDepositRequestDir, + OpDepositsDir, OpWithdrawalRequestDir, OpExecutionPayloadDir, + OpProposerSlashingDir, OpSyncAggregateDir, OpVoluntaryExitDir, + OpWithdrawalsDir]) + +doAssert toHashSet( + mapIt(toSeq(walkDir(OpDir, relative = false)), it.path)) == testDirs + +proc runTest[T, U]( + testSuiteDir, suiteName, opName, applyFile: string, + applyProc: U, identifier: string) = + let testDir = testSuiteDir / "pyspec_tests" / identifier + + let prefix = + if fileExists(testDir/"post.ssz_snappy"): + "[Valid] " + else: + "[Invalid] " + + test prefix & baseDescription & opName & " - " & identifier: + let preState = newClone( + parseTest(testDir/"pre.ssz_snappy", SSZ, fulu.BeaconState)) + let done = applyProc( + preState[], parseTest(testDir/(applyFile & ".ssz_snappy"), SSZ, T)) + + if fileExists(testDir/"post.ssz_snappy"): + let postState = + newClone(parseTest( + testDir/"post.ssz_snappy", SSZ, fulu.BeaconState)) + + reportDiff(preState, postState) + check: + done.isOk() + preState[].hash_tree_root() == postState[].hash_tree_root() + else: + check: done.isErr() # No post state = processing should fail + +suite baseDescription & "Attestation " & preset(): + proc applyAttestation( + preState: var fulu.BeaconState, attestation: electra.Attestation): + Result[void, cstring] = + var cache: StateCache + let + total_active_balance = get_total_active_balance(preState, cache) + base_reward_per_increment = + get_base_reward_per_increment(total_active_balance) + + # This returns the proposer reward for including the attestation, which + # isn't tested here. + discard ? process_attestation( + preState, attestation, {strictVerification}, base_reward_per_increment, cache) + ok() + + for path in walkTests(OpAttestationsDir): + runTest[electra.Attestation, typeof applyAttestation]( + OpAttestationsDir, suiteName, "Attestation", "attestation", + applyAttestation, path) + +suite baseDescription & "Attester Slashing " & preset(): + proc applyAttesterSlashing( + preState: var fulu.BeaconState, + attesterSlashing: electra.AttesterSlashing): Result[void, cstring] = + var cache: StateCache + doAssert (? process_attester_slashing( + defaultRuntimeConfig, preState, attesterSlashing, {}, + get_state_exit_queue_info(preState), cache))[0] > 0.Gwei + ok() + + for path in walkTests(OpAttSlashingDir): + runTest[electra.AttesterSlashing, typeof applyAttesterSlashing]( + OpAttSlashingDir, suiteName, "Attester Slashing", "attester_slashing", + applyAttesterSlashing, path) + +suite baseDescription & "Block Header " & preset(): + proc applyBlockHeader( + preState: var fulu.BeaconState, blck: fulu.BeaconBlock): + Result[void, cstring] = + if blck.is_execution_block: + check blck.body.execution_payload.block_hash == + blck.compute_execution_block_hash() + var cache: StateCache + process_block_header(preState, blck, {}, cache) + + for path in walkTests(OpBlockHeaderDir): + runTest[fulu.BeaconBlock, typeof applyBlockHeader]( + OpBlockHeaderDir, suiteName, "Block Header", "block", + applyBlockHeader, path) + +from ../../../beacon_chain/spec/datatypes/capella import + SignedBLSToExecutionChange + +suite baseDescription & "BLS to execution change " & preset(): + proc applyBlsToExecutionChange( + preState: var fulu.BeaconState, + signed_address_change: SignedBLSToExecutionChange): + Result[void, cstring] = + process_bls_to_execution_change( + defaultRuntimeConfig, preState, signed_address_change) + + for path in walkTests(OpBlsToExecutionChangeDir): + runTest[SignedBLSToExecutionChange, typeof applyBlsToExecutionChange]( + OpBlsToExecutionChangeDir, suiteName, "BLS to execution change", "address_change", + applyBlsToExecutionChange, path) + +from ".."/".."/".."/beacon_chain/validator_bucket_sort import + sortValidatorBuckets + +suite baseDescription & "Consolidation Request " & preset(): + proc applyConsolidationRequest( + preState: var fulu.BeaconState, + consolidation_request: ConsolidationRequest): Result[void, cstring] = + var cache: StateCache + process_consolidation_request( + defaultRuntimeConfig, preState, + sortValidatorBuckets(preState.validators.asSeq)[], + consolidation_request, cache) + ok() + + for path in walkTests(OpConsolidationRequestDir): + runTest[ConsolidationRequest, typeof applyConsolidationRequest]( + OpConsolidationRequestDir, suiteName, "Consolidation Request", + "consolidation_request", applyConsolidationRequest, path) + +suite baseDescription & "Deposit " & preset(): + func applyDeposit( + preState: var fulu.BeaconState, deposit: Deposit): + Result[void, cstring] = + process_deposit( + defaultRuntimeConfig, preState, + sortValidatorBuckets(preState.validators.asSeq)[], deposit, {}) + + for path in walkTests(OpDepositsDir): + runTest[Deposit, typeof applyDeposit]( + OpDepositsDir, suiteName, "Deposit", "deposit", applyDeposit, path) + +suite baseDescription & "Deposit Request " & preset(): + func applyDepositRequest( + preState: var fulu.BeaconState, depositRequest: DepositRequest): + Result[void, cstring] = + process_deposit_request( + defaultRuntimeConfig, preState, depositRequest, {}) + + for path in walkTests(OpDepositRequestDir): + runTest[DepositRequest, typeof applyDepositRequest]( + OpDepositRequestDir, suiteName, "Deposit Request", "deposit_request", + applyDepositRequest, path) + +suite baseDescription & "Execution Payload " & preset(): + func makeApplyExecutionPayloadCb(path: string): auto = + return proc( + preState: var fulu.BeaconState, body: fulu.BeaconBlockBody): + Result[void, cstring] {.raises: [IOError].} = + let payloadValid = os_ops.readFile( + OpExecutionPayloadDir/"pyspec_tests"/path/"execution.yaml" + ).contains("execution_valid: true") + if payloadValid and body.is_execution_block and + not body.execution_payload.transactions.anyIt(it.len == 0): + let expectedOk = (path != "incorrect_block_hash") + check expectedOk == (body.execution_payload.block_hash == + body.compute_execution_block_hash( + preState.latest_block_root( + assignClone(preState)[].hash_tree_root()))) + func executePayload(_: fulu.ExecutionPayload): bool = payloadValid + process_execution_payload( + defaultRuntimeConfig, preState, body, executePayload) + + for path in walkTests(OpExecutionPayloadDir): + let applyExecutionPayload = makeApplyExecutionPayloadCb(path) + runTest[fulu.BeaconBlockBody, typeof applyExecutionPayload]( + OpExecutionPayloadDir, suiteName, "Execution Payload", "body", + applyExecutionPayload, path) + +suite baseDescription & "Withdrawal Request " & preset(): + func applyWithdrawalRequest( + preState: var fulu.BeaconState, withdrawalRequest: WithdrawalRequest): + Result[void, cstring] = + var cache: StateCache + process_withdrawal_request( + defaultRuntimeConfig, preState, + sortValidatorBuckets(preState.validators.asSeq)[], withdrawalRequest, + cache) + ok() + + for path in walkTests(OpWithdrawalRequestDir): + runTest[WithdrawalRequest, typeof applyWithdrawalRequest]( + OpWithdrawalRequestDir, suiteName, "Withdrawal Request", + "withdrawal_request", applyWithdrawalRequest, path) + +suite baseDescription & "Proposer Slashing " & preset(): + proc applyProposerSlashing( + preState: var fulu.BeaconState, proposerSlashing: ProposerSlashing): + Result[void, cstring] = + var cache: StateCache + doAssert (? process_proposer_slashing( + defaultRuntimeConfig, preState, proposerSlashing, {}, + get_state_exit_queue_info(preState), cache))[0] > 0.Gwei + ok() + + for path in walkTests(OpProposerSlashingDir): + runTest[ProposerSlashing, typeof applyProposerSlashing]( + OpProposerSlashingDir, suiteName, "Proposer Slashing", "proposer_slashing", + applyProposerSlashing, path) + +suite baseDescription & "Sync Aggregate " & preset(): + proc applySyncAggregate( + preState: var fulu.BeaconState, syncAggregate: SyncAggregate): + Result[void, cstring] = + var cache: StateCache + discard ? process_sync_aggregate( + preState, syncAggregate, get_total_active_balance(preState, cache), + {}, cache) + ok() + + for path in walkTests(OpSyncAggregateDir): + runTest[SyncAggregate, typeof applySyncAggregate]( + OpSyncAggregateDir, suiteName, "Sync Aggregate", "sync_aggregate", + applySyncAggregate, path) + +suite baseDescription & "Voluntary Exit " & preset(): + proc applyVoluntaryExit( + preState: var fulu.BeaconState, voluntaryExit: SignedVoluntaryExit): + Result[void, cstring] = + var cache: StateCache + if process_voluntary_exit( + defaultRuntimeConfig, preState, voluntaryExit, {}, + get_state_exit_queue_info(preState), cache).isOk: + ok() + else: + err("") + + for path in walkTests(OpVoluntaryExitDir): + runTest[SignedVoluntaryExit, typeof applyVoluntaryExit]( + OpVoluntaryExitDir, suiteName, "Voluntary Exit", "voluntary_exit", + applyVoluntaryExit, path) + +suite baseDescription & "Withdrawals " & preset(): + func applyWithdrawals( + preState: var fulu.BeaconState, + executionPayload: fulu.ExecutionPayload): Result[void, cstring] = + process_withdrawals(preState, executionPayload) + + for path in walkTests(OpWithdrawalsDir): + runTest[fulu.ExecutionPayload, typeof applyWithdrawals]( + OpWithdrawalsDir, suiteName, "Withdrawals", "execution_payload", + applyWithdrawals, path) \ No newline at end of file diff --git a/tests/consensus_spec/fulu/test_fixture_rewards.nim b/tests/consensus_spec/fulu/test_fixture_rewards.nim new file mode 100644 index 0000000000..071f048a87 --- /dev/null +++ b/tests/consensus_spec/fulu/test_fixture_rewards.nim @@ -0,0 +1,88 @@ +# beacon_chain +# Copyright (c) 2025 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +{.push raises: [].} +{.used.} + +import + # Beacon chain internals + ../../../beacon_chain/spec/[beaconstate, validator, helpers, state_transition_epoch], + ../../../beacon_chain/spec/datatypes/[altair, fulu], + # Test utilities + ../../testutil, + ../fixtures_utils, ../os_ops + +const + RewardsDirBase = SszTestsDir/const_preset/"fulu"/"rewards" + RewardsDirBasic = RewardsDirBase/"basic"/"pyspec_tests" + RewardsDirLeak = RewardsDirBase/"leak"/"pyspec_tests" + RewardsDirRandom = RewardsDirBase/"random"/"pyspec_tests" + +func init(T: type Deltas, len: int): T = + if not result.rewards.setLen(len): + raiseAssert "setLen" + if not result.penalties.setLen(len): + raiseAssert "setLen" + +proc runTest(rewardsDir, identifier: string) = + let testDir = rewardsDir / identifier + + var info: altair.EpochInfo + + let + state = newClone( + parseTest(testDir/"pre.ssz_snappy", SSZ, fulu.BeaconState)) + flagDeltas = [ + parseTest(testDir/"source_deltas.ssz_snappy", SSZ, Deltas), + parseTest(testDir/"target_deltas.ssz_snappy", SSZ, Deltas), + parseTest(testDir/"head_deltas.ssz_snappy", SSZ, Deltas)] + inactivityPenaltyDeltas = + parseTest(testDir/"inactivity_penalty_deltas.ssz_snappy", SSZ, Deltas) + + info.init(state[]) + let + total_balance = info.balances.current_epoch + base_reward_per_increment = get_base_reward_per_increment(total_balance) + + var + flagDeltas2: array[TimelyFlag, Deltas] = [ + Deltas.init(state[].validators.len), + Deltas.init(state[].validators.len), + Deltas.init(state[].validators.len)] + inactivityPenaltyDeltas2 = Deltas.init(state[].validators.len) + + let finality_delay = get_finality_delay(state[]) + + for validator_index, reward0, reward1, reward2, penalty0, penalty1, penalty2 + in get_flag_and_inactivity_deltas( + defaultRuntimeConfig, state[], base_reward_per_increment, info, + finality_delay): + if not is_eligible_validator(info.validators[validator_index]): + continue + flagDeltas2[TimelyFlag.TIMELY_SOURCE_FLAG_INDEX].rewards[validator_index] = + reward0 + flagDeltas2[TimelyFlag.TIMELY_TARGET_FLAG_INDEX].rewards[validator_index] = + reward1 + flagDeltas2[TimelyFlag.TIMELY_HEAD_FLAG_INDEX].rewards[validator_index] = + reward2 + flagDeltas2[TimelyFlag.TIMELY_SOURCE_FLAG_INDEX].penalties[validator_index] = + penalty0 + flagDeltas2[TimelyFlag.TIMELY_TARGET_FLAG_INDEX].penalties[validator_index] = + penalty1 + flagDeltas2[TimelyFlag.TIMELY_HEAD_FLAG_INDEX].penalties[validator_index] = + 0.Gwei + inactivityPenaltyDeltas2.penalties[validator_index] = penalty2 + + check: + flagDeltas == flagDeltas2 + inactivityPenaltyDeltas == inactivityPenaltyDeltas2 + +suite "EF - Fulu - Rewards " & preset(): + for rewardsDir in [RewardsDirBasic, RewardsDirLeak, RewardsDirRandom]: + for kind, path in walkDir(rewardsDir, relative = true, checkDir = true): + test "EF - Fulu - Rewards - " & path & preset(): + runTest(rewardsDir, path) diff --git a/tests/consensus_spec/eip7594/test_fixture_ssz_consensus_objects.nim b/tests/consensus_spec/fulu/test_fixture_ssz_consensus_objects.nim similarity index 77% rename from tests/consensus_spec/eip7594/test_fixture_ssz_consensus_objects.nim rename to tests/consensus_spec/fulu/test_fixture_ssz_consensus_objects.nim index 03fae0ca2d..5d235760c3 100644 --- a/tests/consensus_spec/eip7594/test_fixture_ssz_consensus_objects.nim +++ b/tests/consensus_spec/fulu/test_fixture_ssz_consensus_objects.nim @@ -17,8 +17,8 @@ import yaml, # Beacon chain internals ../../../beacon_chain/spec/datatypes/[ - altair, - deneb, + altair, + electra, fulu], # Status libraries snappy, @@ -29,7 +29,8 @@ from ../../../beacon_chain/spec/datatypes/bellatrix import PowBlock from ../../../beacon_chain/spec/datatypes/capella import BLSToExecutionChange, SignedBLSToExecutionChange, HistoricalSummary, Withdrawal - +from ../../../beacon_chain/spec/datatypes/deneb import + BlobIdentifier, BlobSidecar # SSZ tests of consensus objects (minimal/mainnet preset specific) @@ -37,7 +38,7 @@ from ../../../beacon_chain/spec/datatypes/capella import # ---------------------------------------------------------------- const - SSZDir = SszTestsDir/const_preset/"eip7594"/"ssz_static" + SSZDir = SszTestsDir/const_preset/"fulu"/"ssz_static" type SSZHashTreeRoot = object @@ -51,7 +52,7 @@ type # Checking the values against the yaml file is TODO (require more flexible Yaml parser) proc checkSSZ( - T: type deneb.SignedBeaconBlock, + T: type electra.SignedBeaconBlock, dir: string, expectedHash: SSZHashTreeRoot ) {.raises: [IOError, SerializationError, UnconsumedInput].} = @@ -100,7 +101,7 @@ proc loadExpectedHashTreeRoot( # Test runner # ---------------------------------------------------------------- -suite "EF - EIP7594 - SSZ consensus objects " & preset(): +suite "EF - Fulu - SSZ consensus objects " & preset(): doAssert dirExists(SSZDir), "You need to run the \"download_test_vectors.sh\" script to retrieve the consensus spec test vectors." for pathKind, sszType in walkDir(SSZDir, relative = true, checkDir = true): doAssert pathKind == pcDir @@ -117,55 +118,63 @@ suite "EF - EIP7594 - SSZ consensus objects " & preset(): let hash = loadExpectedHashTreeRoot(path) case sszType: - of "AggregateAndProof": checkSSZ(phase0.AggregateAndProof, path, hash) - of "Attestation": checkSSZ(phase0.Attestation, path, hash) + of "AggregateAndProof": checkSSZ(electra.AggregateAndProof, path, hash) + of "Attestation": checkSSZ(electra.Attestation, path, hash) of "AttestationData": checkSSZ(AttestationData, path, hash) - of "AttesterSlashing": checkSSZ(phase0.AttesterSlashing, path, hash) - of "BeaconBlock": checkSSZ(deneb.BeaconBlock, path, hash) - of "BeaconBlockBody": checkSSZ(deneb.BeaconBlockBody, path, hash) + of "AttesterSlashing": checkSSZ(electra.AttesterSlashing, path, hash) + of "BeaconBlock": checkSSZ(electra.BeaconBlock, path, hash) + of "BeaconBlockBody": checkSSZ(electra.BeaconBlockBody, path, hash) of "BeaconBlockHeader": checkSSZ(BeaconBlockHeader, path, hash) - of "BeaconState": checkSSZ(deneb.BeaconState, path, hash) + of "BeaconState": checkSSZ(electra.BeaconState, path, hash) of "BlobIdentifier": checkSSZ(BlobIdentifier, path, hash) of "BlobSidecar": checkSSZ(BlobSidecar, path, hash) of "BLSToExecutionChange": checkSSZ(BLSToExecutionChange, path, hash) of "Checkpoint": checkSSZ(Checkpoint, path, hash) + of "ConsolidationRequest": checkSSZ(ConsolidationRequest, path, hash) of "ContributionAndProof": checkSSZ(ContributionAndProof, path, hash) of "DataColumnSidecar": checkSSZ(DataColumnSidecar, path, hash) of "DataColumnIdentifier": checkSSZ(DataColumnIdentifier, path, hash) of "Deposit": checkSSZ(Deposit, path, hash) of "DepositData": checkSSZ(DepositData, path, hash) of "DepositMessage": checkSSZ(DepositMessage, path, hash) + of "DepositRequest": checkSSZ(DepositRequest, path, hash) of "Eth1Block": checkSSZ(Eth1Block, path, hash) of "Eth1Data": checkSSZ(Eth1Data, path, hash) of "ExecutionPayload": - checkSSZ(deneb.ExecutionPayload, path, hash) + checkSSZ(electra.ExecutionPayload, path, hash) of "ExecutionPayloadHeader": - checkSSZ(deneb.ExecutionPayloadHeader, path, hash) + checkSSZ(electra.ExecutionPayloadHeader, path, hash) + of "ExecutionRequests": + checkSSZ(electra.ExecutionRequests, path, hash) of "Fork": checkSSZ(Fork, path, hash) of "ForkData": checkSSZ(ForkData, path, hash) of "HistoricalBatch": checkSSZ(HistoricalBatch, path, hash) of "HistoricalSummary": checkSSZ(HistoricalSummary, path, hash) of "IndexedAttestation": - checkSSZ(phase0.IndexedAttestation, path, hash) + checkSSZ(electra.IndexedAttestation, path, hash) of "LightClientBootstrap": - checkSSZ(deneb.LightClientBootstrap, path, hash) + checkSSZ(electra.LightClientBootstrap, path, hash) of "LightClientHeader": - checkSSZ(deneb.LightClientHeader, path, hash) + checkSSZ(electra.LightClientHeader, path, hash) of "LightClientUpdate": - checkSSZ(deneb.LightClientUpdate, path, hash) + checkSSZ(electra.LightClientUpdate, path, hash) of "LightClientFinalityUpdate": - checkSSZ(deneb.LightClientFinalityUpdate, path, hash) + checkSSZ(electra.LightClientFinalityUpdate, path, hash) of "LightClientOptimisticUpdate": - checkSSZ(deneb.LightClientOptimisticUpdate, path, hash) + checkSSZ(electra.LightClientOptimisticUpdate, path, hash) of "MatrixEntry": checkSSZ(MatrixEntry, path, hash) of "PendingAttestation": checkSSZ(PendingAttestation, path, hash) + of "PendingConsolidation": checkSSZ(PendingConsolidation, path, hash) + of "PendingDeposit": checkSSZ(PendingDeposit, path, hash) + of "PendingPartialWithdrawal": + checkSSZ(PendingPartialWithdrawal, path, hash) of "PowBlock": checkSSZ(PowBlock, path, hash) of "ProposerSlashing": checkSSZ(ProposerSlashing, path, hash) of "SignedAggregateAndProof": - checkSSZ(phase0.SignedAggregateAndProof, path, hash) + checkSSZ(electra.SignedAggregateAndProof, path, hash) of "SignedBeaconBlock": - checkSSZ(deneb.SignedBeaconBlock, path, hash) + checkSSZ(electra.SignedBeaconBlock, path, hash) of "SignedBeaconBlockHeader": checkSSZ(SignedBeaconBlockHeader, path, hash) of "SignedBLSToExecutionChange": @@ -174,6 +183,7 @@ suite "EF - EIP7594 - SSZ consensus objects " & preset(): checkSSZ(SignedContributionAndProof, path, hash) of "SignedVoluntaryExit": checkSSZ(SignedVoluntaryExit, path, hash) of "SigningData": checkSSZ(SigningData, path, hash) + of "SingleAttestation": checkSSZ(SingleAttestation, path, hash) of "SyncAggregate": checkSSZ(SyncAggregate, path, hash) of "SyncAggregatorSelectionData": checkSSZ(SyncAggregatorSelectionData, path, hash) @@ -182,6 +192,7 @@ suite "EF - EIP7594 - SSZ consensus objects " & preset(): checkSSZ(SyncCommitteeContribution, path, hash) of "SyncCommitteeMessage": checkSSZ(SyncCommitteeMessage, path, hash) of "Withdrawal": checkSSZ(Withdrawal, path, hash) + of "WithdrawalRequest": checkSSZ(WithdrawalRequest, path, hash) of "Validator": checkSSZ(Validator, path, hash) of "VoluntaryExit": checkSSZ(VoluntaryExit, path, hash) else: diff --git a/tests/consensus_spec/fulu/test_fixture_state_transition_epoch.nim b/tests/consensus_spec/fulu/test_fixture_state_transition_epoch.nim new file mode 100644 index 0000000000..465cbe9982 --- /dev/null +++ b/tests/consensus_spec/fulu/test_fixture_state_transition_epoch.nim @@ -0,0 +1,165 @@ +# beacon_chain +# Copyright (c) 2025 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +{.push raises: [].} +{.used.} + +import + # Status internals + chronicles, + # Beacon chain internals + ../../../beacon_chain/spec/[presets, state_transition_epoch], + ../../../beacon_chain/spec/datatypes/altair, + # Test utilities + ../../testutil, + ../fixtures_utils, ../os_ops, + ./test_fixture_rewards, + ../../helpers/debug_state + +from std/sequtils import mapIt, toSeq +from std/strutils import rsplit +from ../../../beacon_chain/spec/datatypes/fulu import BeaconState + +const + RootDir = SszTestsDir/const_preset/"fulu"/"epoch_processing" + + JustificationFinalizationDir = RootDir/"justification_and_finalization" + InactivityDir = RootDir/"inactivity_updates" + RegistryUpdatesDir = RootDir/"registry_updates" + SlashingsDir = RootDir/"slashings" + Eth1DataResetDir = RootDir/"eth1_data_reset" + EffectiveBalanceUpdatesDir = RootDir/"effective_balance_updates" + SlashingsResetDir = RootDir/"slashings_reset" + RandaoMixesResetDir = RootDir/"randao_mixes_reset" + ParticipationFlagDir = RootDir/"participation_flag_updates" + SyncCommitteeDir = RootDir/"sync_committee_updates" + RewardsAndPenaltiesDir = RootDir/"rewards_and_penalties" + HistoricalSummariesUpdateDir = RootDir/"historical_summaries_update" + PendingConsolidationsDir = RootDir/"pending_consolidations" + PendingDepositsDir = RootDir/"pending_deposits" + +doAssert (toHashSet(mapIt(toSeq(walkDir(RootDir, relative = false)), it.path)) - + toHashSet([SyncCommitteeDir])) == + toHashSet([ + JustificationFinalizationDir, InactivityDir, RegistryUpdatesDir, + SlashingsDir, Eth1DataResetDir, EffectiveBalanceUpdatesDir, + SlashingsResetDir, RandaoMixesResetDir, ParticipationFlagDir, + RewardsAndPenaltiesDir, HistoricalSummariesUpdateDir, + PendingDepositsDir, PendingConsolidationsDir]) + +template runSuite( + suiteDir, testName: string, transitionProc: untyped): untyped = + suite "EF - Fulu - Epoch Processing - " & testName & preset(): + for testDir in walkDirRec( + suiteDir / "pyspec_tests", yieldFilter = {pcDir}, checkDir = true): + let unitTestName = testDir.rsplit(DirSep, 1)[1] + test testName & " - " & unitTestName & preset(): + # BeaconState objects are stored on the heap to avoid stack overflow + type T = fulu.BeaconState + let preState {.inject.} = newClone(parseTest(testDir/"pre.ssz_snappy", SSZ, T)) + var cache {.inject, used.} = StateCache() + template state: untyped {.inject, used.} = preState[] + template cfg: untyped {.inject, used.} = defaultRuntimeConfig + + if transitionProc.isOk: + let postState = + newClone(parseTest(testDir/"post.ssz_snappy", SSZ, T)) + check: hash_tree_root(preState[]) == hash_tree_root(postState[]) + reportDiff(preState, postState) + else: + check: not fileExists(testDir/"post.ssz_snappy") + +# Justification & Finalization +# --------------------------------------------------------------- +runSuite(JustificationFinalizationDir, "Justification & Finalization"): + let info = altair.EpochInfo.init(state) + process_justification_and_finalization(state, info.balances) + Result[void, cstring].ok() + +# Inactivity updates +# --------------------------------------------------------------- +runSuite(InactivityDir, "Inactivity"): + let info = altair.EpochInfo.init(state) + process_inactivity_updates(cfg, state, info) + Result[void, cstring].ok() + +# Rewards & Penalties +# --------------------------------------------------------------- +runSuite(RewardsAndPenaltiesDir, "Rewards and penalties"): + var info = altair.EpochInfo.init(state) + process_rewards_and_penalties(cfg, state, info) + Result[void, cstring].ok() + +# rest in test_fixture_rewards + +# Registry updates +# --------------------------------------------------------------- +runSuite(RegistryUpdatesDir, "Registry updates"): + process_registry_updates(cfg, state, cache) + +# Slashings +# --------------------------------------------------------------- +runSuite(SlashingsDir, "Slashings"): + let info = altair.EpochInfo.init(state) + process_slashings(state, info.balances.current_epoch) + Result[void, cstring].ok() + +# Eth1 data reset +# --------------------------------------------------------------- +runSuite(Eth1DataResetDir, "Eth1 data reset"): + process_eth1_data_reset(state) + Result[void, cstring].ok() + +# Effective balance updates +# --------------------------------------------------------------- +runSuite(EffectiveBalanceUpdatesDir, "Effective balance updates"): + process_effective_balance_updates(state) + Result[void, cstring].ok() + +# Slashings reset +# --------------------------------------------------------------- +runSuite(SlashingsResetDir, "Slashings reset"): + process_slashings_reset(state) + Result[void, cstring].ok() + +# RANDAO mixes reset +# --------------------------------------------------------------- +runSuite(RandaoMixesResetDir, "RANDAO mixes reset"): + process_randao_mixes_reset(state) + Result[void, cstring].ok() + +# Historical roots update +# --------------------------------------------------------------- +runSuite(HistoricalSummariesUpdateDir, "Historical summaries update"): + process_historical_summaries_update(state) + +# Participation flag updates +# --------------------------------------------------------------- +runSuite(ParticipationFlagDir, "Participation flag updates"): + process_participation_flag_updates(state) + Result[void, cstring].ok() + +# Pending deposits +# --------------------------------------------------------------- +runSuite(PendingDepositsDir, "Pending deposits"): + process_pending_deposits(cfg, state, cache) + +# Pending consolidations +# --------------------------------------------------------------- +runSuite(PendingConsolidationsDir, "Pending consolidations"): + process_pending_consolidations(cfg, state) + +# Sync committee updates +# --------------------------------------------------------------- + +# These are only for minimal, not mainnet +when const_preset == "minimal": + runSuite(SyncCommitteeDir, "Sync committee updates"): + process_sync_committee_updates(state) + Result[void, cstring].ok() +else: + doAssert not dirExists(SyncCommitteeDir) \ No newline at end of file diff --git a/tests/consensus_spec/test_fixture_fork.nim b/tests/consensus_spec/test_fixture_fork.nim index a4c7e236ba..6db9a733e7 100644 --- a/tests/consensus_spec/test_fixture_fork.nim +++ b/tests/consensus_spec/test_fixture_fork.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2021-2024 Status Research & Development GmbH +# Copyright (c) 2021-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -85,4 +85,13 @@ suite "EF - Electra - Fork " & preset(): SszTestsDir/const_preset/"electra"/"fork"/"fork"/"pyspec_tests" for kind, path in walkDir(OpForkDir, relative = true, checkDir = true): runTest(deneb.BeaconState, electra.BeaconState, "Electra", OpForkDir, - upgrade_to_electra, suiteName, path) \ No newline at end of file + upgrade_to_electra, suiteName, path) + +from ../../beacon_chain/spec/datatypes/fulu import BeaconState + +suite "EF - Fulu - Fork " & preset(): + const OpForkDir = + SszTestsDir/const_preset/"fulu"/"fork"/"fork"/"pyspec_tests" + for kind, path in walkDir(OpForkDir, relative = true, checkDir = true): + runTest(electra.BeaconState, fulu.BeaconState, "Fulu", OpForkDir, + upgrade_to_fulu, suiteName, path) diff --git a/tests/consensus_spec/test_fixture_fork_choice.nim b/tests/consensus_spec/test_fixture_fork_choice.nim index b735640c36..b2ab66ced1 100644 --- a/tests/consensus_spec/test_fixture_fork_choice.nim +++ b/tests/consensus_spec/test_fixture_fork_choice.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -10,12 +10,12 @@ import # Status libraries - stew/byteutils, chronicles, + chronicles, taskpools, # Internals - ../../beacon_chain/spec/[helpers, forks, state_transition_block], + ../../beacon_chain/spec/forks, ../../beacon_chain/fork_choice/[fork_choice, fork_choice_types], - ../../beacon_chain/[beacon_chain_db, beacon_clock], + ../../beacon_chain/beacon_chain_db, ../../beacon_chain/consensus_object_pools/[ blockchain_dag, block_clearance, block_quarantine, spec_cache], # Third-party @@ -28,7 +28,10 @@ from std/json import JsonNode, getBool, getInt, getStr, hasKey, items, len, pairs, `$`, `[]` from std/sequtils import mapIt, toSeq from std/strutils import contains +from stew/byteutils import fromHex from ../testbcutil import addHeadBlock +from ../../beacon_chain/spec/state_transition_block import + check_attester_slashing, validate_blobs # Test format described at https://github.com/ethereum/consensus-specs/tree/v1.3.0/tests/formats/fork_choice # Note that our implementation has been optimized with "ProtoArray" @@ -37,10 +40,12 @@ from ../testbcutil import addHeadBlock type OpKind = enum opOnTick - opOnAttestation + opOnPhase0Attestation + opOnElectraAttestation opOnBlock opOnMergeBlock - opOnAttesterSlashing + opOnPhase0AttesterSlashing + opOnElectraAttesterSlashing opInvalidateHash opChecks @@ -54,15 +59,19 @@ type case kind: OpKind of opOnTick: tick: int - of opOnAttestation: - att: phase0.Attestation + of opOnPhase0Attestation: + phase0Att: phase0.Attestation + of opOnElectraAttestation: + electraAtt: electra.Attestation of opOnBlock: blck: ForkedSignedBeaconBlock blobData: Opt[BlobData] of opOnMergeBlock: powBlock: PowBlock - of opOnAttesterSlashing: - attesterSlashing: phase0.AttesterSlashing + of opOnPhase0AttesterSlashing: + phase0AttesterSlashing: phase0.AttesterSlashing + of opOnElectraAttesterSlashing: + electraAttesterSlashing: electra.AttesterSlashing of opInvalidateHash: invalidatedHash: Eth2Digest latestValidHash: Eth2Digest @@ -108,21 +117,19 @@ proc loadOps( tick: step["tick"].getInt()) elif step.hasKey"attestation": let filename = step["attestation"].getStr() - let att = parseTest( - path/filename & ".ssz_snappy", - SSZ, phase0.Attestation - ) - result.add Operation(kind: opOnAttestation, - att: att) + if fork >= ConsensusFork.Electra: + result.add Operation( + kind: opOnElectraAttestation, electraAtt: parseTest( + path/filename & ".ssz_snappy", SSZ, electra.Attestation)) + else: + result.add Operation(kind: opOnPhase0Attestation, phase0Att: parseTest( + path/filename & ".ssz_snappy", SSZ, phase0.Attestation)) elif step.hasKey"block": let filename = step["block"].getStr() doAssert step.hasKey"blobs" == step.hasKey"proofs" withConsensusFork(fork): let - blck = parseTest( - path/filename & ".ssz_snappy", - SSZ, consensusFork.SignedBeaconBlock) - + blck = loadBlock(path/filename & ".ssz_snappy", consensusFork) blobData = when consensusFork >= ConsensusFork.Deneb: if step.hasKey"blobs": @@ -130,7 +137,7 @@ proc loadOps( Opt.some BlobData( blobs: distinctBase(parseTest( path/(step["blobs"].getStr()) & ".ssz_snappy", - SSZ, List[KzgBlob, Limit MAX_BLOBS_PER_BLOCK])), + SSZ, List[KzgBlob, Limit MAX_BLOB_COMMITMENTS_PER_BLOCK])), proofs: step["proofs"].mapIt( KzgProof(bytes: fromHex(array[48, byte], it.getStr())))) else: @@ -144,12 +151,14 @@ proc loadOps( blobData: blobData) elif step.hasKey"attester_slashing": let filename = step["attester_slashing"].getStr() - let attesterSlashing = parseTest( - path/filename & ".ssz_snappy", - SSZ, phase0.AttesterSlashing - ) - result.add Operation(kind: opOnAttesterSlashing, - attesterSlashing: attesterSlashing) + if fork >= ConsensusFork.Electra: + result.add Operation(kind: opOnElectraAttesterSlashing, + electraAttesterSlashing: parseTest( + path/filename & ".ssz_snappy", SSZ, electra.AttesterSlashing)) + else: + result.add Operation(kind: opOnPhase0AttesterSlashing, + phase0AttesterSlashing: parseTest( + path/filename & ".ssz_snappy", SSZ, phase0.AttesterSlashing)) elif step.hasKey"payload_status": if step["payload_status"]["status"].getStr() == "INVALID": result.add Operation(kind: opInvalidateHash, @@ -325,10 +334,16 @@ proc doRunTest( time = BeaconTime(ns_since_genesis: step.tick.seconds.nanoseconds) let status = stores.fkChoice[].update_time(stores.dag, time) doAssert status.isOk == step.valid - of opOnAttestation: + of opOnPhase0Attestation: + let status = stores.fkChoice[].on_attestation( + stores.dag, step.phase0Att.data.slot, step.phase0Att.data.beacon_block_root, + toSeq(stores.dag.get_attesting_indices(step.phase0Att.asTrusted)), time) + doAssert status.isOk == step.valid + of opOnElectraAttestation: let status = stores.fkChoice[].on_attestation( - stores.dag, step.att.data.slot, step.att.data.beacon_block_root, - toSeq(stores.dag.get_attesting_indices(step.att.asTrusted)), time) + stores.dag, step.electraAtt.data.slot, + step.electraAtt.data.beacon_block_root, + toSeq(stores.dag.get_attesting_indices(step.electraAtt, true)), time) doAssert status.isOk == step.valid of opOnBlock: withBlck(step.blck): @@ -337,9 +352,16 @@ proc doRunTest( verifier, state[], stateCache, forkyBlck, step.blobData, time, invalidatedHashes) doAssert status.isOk == step.valid - of opOnAttesterSlashing: - let indices = - check_attester_slashing(state[], step.attesterSlashing, flags = {}) + of opOnPhase0AttesterSlashing: + let indices = check_attester_slashing( + state[], step.phase0AttesterSlashing, flags = {}) + if indices.isOk: + for idx in indices.get: + stores.fkChoice[].process_equivocation(idx) + doAssert indices.isOk == step.valid + of opOnElectraAttesterSlashing: + let indices = check_attester_slashing( + state[], step.electraAttesterSlashing, flags = {}) if indices.isOk: for idx in indices.get: stores.fkChoice[].process_equivocation(idx) @@ -389,8 +411,6 @@ template fcSuite(suiteName: static[string], testPathElem: static[string]) = let testsPath = presetPath/path/testPathElem if kind != pcDir or not os_ops.dirExists(testsPath): continue - if testsPath.contains("/electra/") or testsPath.contains("\\electra\\"): - continue let fork = forkForPathComponent(path).valueOr: raiseAssert "Unknown test fork: " & testsPath for kind, path in walkDir(testsPath, relative = true, checkDir = true): diff --git a/tests/consensus_spec/test_fixture_kzg.nim b/tests/consensus_spec/test_fixture_kzg.nim index f7b202545d..9a2f51c6e5 100644 --- a/tests/consensus_spec/test_fixture_kzg.nim +++ b/tests/consensus_spec/test_fixture_kzg.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2024 Status Research & Development GmbH +# Copyright (c) 2024-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -12,12 +12,12 @@ import std/json, yaml/tojson, kzg4844/[kzg, kzg_abi], - stew/byteutils, ../testutil, ./fixtures_utils, ./os_ops from std/sequtils import anyIt, mapIt, toSeq from std/strutils import rsplit +from stew/byteutils import fromHex func toUInt64(s: int): Opt[uint64] = if s < 0: @@ -76,7 +76,7 @@ proc runVerifyKzgProofTest(suiteName, suitePath, path: string) = y = fromHex[32](data["input"]["y"].getStr) proof = fromHex[48](data["input"]["proof"].getStr) - # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/tests/formats/kzg/verify_kzg_proof.md#condition + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/tests/formats/kzg_4844/verify_kzg_proof.md#condition # "If the commitment or proof is invalid (e.g. not on the curve or not in # the G1 subgroup of the BLS curve) or `z` or `y` are not a valid BLS # field element, it should error, i.e. the output should be `null`." @@ -201,10 +201,30 @@ proc runComputeBlobKzgProofTest(suiteName, suitePath, path: string) = else: check p.get.bytes == fromHex[48](output.getStr).get +proc runComputeCellsTest(suiteName, suitePath, path: string) = + let relativePathComponent = path.relativeTestPathComponent(suitePath) + test "KZG - Compute Cells - " & relativePathComponent: + let + data = loadToJson(os_ops.readFile(path/"data.yaml"))[0] + output = data["output"] + blob = fromHex[131072](data["input"]["blob"].getStr) + + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/tests/formats/kzg_7594/compute_cells.md#condition + if blob.isNone: + check output.kind == JNull + else: + let p = newClone computeCells(KzgBlob(bytes: blob.get)) + if p[].isErr: + check output.kind == JNull + else: + let p_val = newClone p[].get + for i in 0.. maxSlot: + maxSlot = att.data.slot + + check process_slots( + defaultRuntimeConfig, state[], + maxSlot + MIN_ATTESTATION_INCLUSION_DELAY, cache, + info, {}).isOk() + + let attestations = pool[].getElectraAttestationsForBlock(state[], cache) + check: + attestations.len() == 2 + attestations[0].aggregation_bits.countOnes() == 4 + attestations[0].committee_bits.countOnes() == 2 + attestations[1].aggregation_bits.countOnes() == 4 + attestations[1].committee_bits.countOnes() == 2 + check_attestation( + state[].electraData.data, attestations[0], {}, cache, true).isOk + check_attestation( + state[].electraData.data, attestations[1], {}, cache, true).isOk + pool[].verifyAttestationSignature(state, cache, attestations[0]) + pool[].verifyAttestationSignature(state, cache, attestations[1]) + + test "Simple add and get with electra nonzero committee" & preset(): + let + bc0 = get_beacon_committee( + state[], getStateField(state[], slot), 0.CommitteeIndex, cache) + + bc1 = get_beacon_committee( + state[], getStateField(state[], slot), 1.CommitteeIndex, cache) + + attestation_1 = makeElectraAttestation( + state[], state[].latest_block_root, bc0[0], cache) + + attestation_2 = makeElectraAttestation( + state[], state[].latest_block_root, bc1[0], cache) + + pool[].addAttestation( + attestation_1, @[bc0[0]], attestation_1.aggregation_bits.len, + attestation_1.loadSig, attestation_1.data.slot.start_beacon_time) + + pool[].addAttestation( + attestation_2, @[bc1[0]], attestation_2.aggregation_bits.len, + attestation_2.loadSig, attestation_2.data.slot.start_beacon_time) + + check: + process_slots( + defaultRuntimeConfig, state[], + getStateField(state[], slot) + MIN_ATTESTATION_INCLUSION_DELAY, cache, + info, {}).isOk() + + check: + pool[].getElectraAggregatedAttestation(1.Slot, hash_tree_root(attestation_1.data), + 0.CommitteeIndex).isOk + pool[].getElectraAggregatedAttestation(1.Slot, hash_tree_root(attestation_2.data), + 1.CommitteeIndex).isOk + + test "Cache coherence on chain aggregates" & preset(): + # Add attestation from different committee + var maxSlot = 0.Slot + + for i in 0 ..< 4: + let + bc = get_beacon_committee( + state[], getStateField(state[], slot), i.CommitteeIndex, cache) + att = makeElectraAttestation( + state[], state[].latest_block_root, bc[0], cache) + var att2 = makeElectraAttestation( + state[], state[].latest_block_root, bc[1], cache) + + pool[].addAttestation( + att, @[bc[0]], att.aggregation_bits.len, att.loadSig, + att.data.slot.start_beacon_time) + + if att.data.slot < 2: + pool[].addAttestation( + att2, @[bc[1]], att2.aggregation_bits.len, att2.loadSig, + att2.data.slot.start_beacon_time) + + if att.data.slot > maxSlot: + maxSlot = att.data.slot + + check process_slots( + defaultRuntimeConfig, state[], + maxSlot + MIN_ATTESTATION_INCLUSION_DELAY, cache, + info, {}).isOk() + + let attestations = pool[].getElectraAttestationsForBlock(state[], cache) + check: + ## Considering that all structures in getElectraAttestationsForBlock + ## are sorted, the most relevant should be at sequence head. + ## Given the attestations added, the most "scored" is on + ## slot 1 + attestations.len() == 2 + + attestations[0].aggregation_bits.countOnes() == 4 + attestations[0].committee_bits.countOnes() == 2 + attestations[0].data.slot == 1.Slot + + + attestations[1].aggregation_bits.countOnes() == 2 + attestations[1].committee_bits.countOnes() == 2 + attestations[1].data.slot == 2.Slot + + check_attestation( + state[].electraData.data, attestations[0], {}, cache, true).isOk + check_attestation( + state[].electraData.data, attestations[1], {}, cache, true).isOk + pool[].verifyAttestationSignature(state, cache, attestations[0]) + pool[].verifyAttestationSignature(state, cache, attestations[1]) diff --git a/tests/test_beacon_chain_db.nim b/tests/test_beacon_chain_db.nim index 772e05e093..cfa0e8f95c 100644 --- a/tests/test_beacon_chain_db.nim +++ b/tests/test_beacon_chain_db.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or https://www.apache.org/licenses/LICENSE-2.0) # * MIT license ([LICENSE-MIT](LICENSE-MIT) or https://opensource.org/licenses/MIT) @@ -1140,6 +1140,106 @@ suite "Beacon chain DB" & preset(): db.close() + test "sanity check data columns" & preset(): + const + blockHeader0 = SignedBeaconBlockHeader( + message: BeaconBlockHeader(slot: Slot(0))) + blockHeader1 = SignedBeaconBlockHeader( + message: BeaconBlockHeader(slot: Slot(1))) + + let + blockRoot0 = hash_tree_root(blockHeader0.message) + blockRoot1 = hash_tree_root(blockHeader1.message) + + # Ensure minimal-difference pairs on both block root and + # data column index to verify that the columnkey uses both + dataColumnSidecar0 = DataColumnSidecar(signed_block_header: blockHeader0, index: 3) + dataColumnSidecar1 = DataColumnSidecar(signed_block_header: blockHeader0, index: 2) + dataColumnSidecar2 = DataColumnSidecar(signed_block_header: blockHeader1, index: 2) + + db = makeTestDB(SLOTS_PER_EPOCH) + + var + buf: seq[byte] + dataColumnSidecar: DataColumnSidecar + + check: + not db.getDataColumnSidecar(blockRoot0, 3, dataColumnSidecar) + not db.getDataColumnSidecar(blockRoot0, 2, dataColumnSidecar) + not db.getDataColumnSidecar(blockRoot1, 2, dataColumnSidecar) + not db.getDataColumnSidecarSZ(blockRoot0, 3, buf) + not db.getDataColumnSidecarSZ(blockRoot0, 3, buf) + not db.getDataColumnSidecarSZ(blockRoot1, 2, buf) + + db.putDataColumnSidecar(dataColumnSidecar0) + + check: + db.getDataColumnSidecar(blockRoot0, 3, dataColumnSidecar) + dataColumnSidecar == dataColumnSidecar0 + not db.getDataColumnSidecar(blockRoot0, 2, dataColumnSidecar) + not db.getDataColumnSidecar(blockRoot1, 2, dataColumnSidecar) + db.getDataColumnSidecarSZ(blockRoot0, 3, buf) + not db.getDataColumnSidecarSZ(blockRoot0, 2, buf) + not db.getDataColumnSidecarSZ(blockRoot1, 2, buf) + + db.putDataColumnSidecar(dataColumnSidecar1) + + check: + db.getDataColumnSidecar(blockRoot0, 3, dataColumnSidecar) + dataColumnSidecar == dataColumnSidecar0 + db.getDataColumnSidecar(blockRoot0, 2, dataColumnSidecar) + dataColumnSidecar == dataColumnSidecar1 + not db.getDataColumnSidecar(blockRoot1, 2, dataColumnSidecar) + db.getDataColumnSidecarSZ(blockRoot0, 3, buf) + db.getDataColumnSidecarSZ(blockRoot0, 2, buf) + not db.getDataColumnSidecarSZ(blockRoot1, 2, buf) + + check db.delDataColumnSidecar(blockRoot0, 3) + + check: + not db.getDataColumnSidecar(blockRoot0, 3, dataColumnSidecar) + db.getDataColumnSidecar(blockRoot0, 2, dataColumnSidecar) + dataColumnSidecar == dataColumnSidecar1 + not db.getDataColumnSidecar(blockRoot1, 2, dataColumnSidecar) + not db.getDataColumnSidecarSZ(blockRoot0, 3, buf) + db.getDataColumnSidecarSZ(blockRoot0, 2, buf) + not db.getDataColumnSidecarSZ(blockRoot1, 2, buf) + + db.putDataColumnSidecar(dataColumnSidecar2) + + check: + not db.getDataColumnSidecar(blockRoot0, 3, dataColumnSidecar) + db.getDataColumnSidecar(blockRoot0, 2, dataColumnSidecar) + dataColumnSidecar == dataColumnSidecar1 + db.getDataColumnSidecar(blockRoot1, 2, dataColumnSidecar) + dataColumnSidecar == dataColumnSidecar2 + not db.getDataColumnSidecarSZ(blockRoot0, 3, buf) + db.getDataColumnSidecarSZ(blockRoot0, 2, buf) + db.getDataColumnSidecarSZ(blockRoot1, 2, buf) + + check db.delDataColumnSidecar(blockRoot0, 2) + + check: + not db.getDataColumnSidecar(blockRoot0, 3, dataColumnSidecar) + not db.getDataColumnSidecar(blockRoot0, 2, dataColumnSidecar) + db.getDataColumnSidecar(blockRoot1, 2, dataColumnSidecar) + dataColumnSidecar == dataColumnSidecar2 + not db.getDataColumnSidecarSZ(blockRoot0, 3, buf) + not db.getDataColumnSidecarSZ(blockRoot0, 2, buf) + db.getDataColumnSidecarSZ(blockRoot1, 2, buf) + + check db.delDataColumnSidecar(blockRoot1, 2) + + check: + not db.getDataColumnSidecar(blockRoot0, 3, dataColumnSidecar) + not db.getDataColumnSidecar(blockRoot0, 2, dataColumnSidecar) + not db.getDataColumnSidecar(blockRoot1, 2, dataColumnSidecar) + not db.getDataColumnSidecarSZ(blockRoot0, 3, buf) + not db.getDataColumnSidecarSZ(blockRoot0, 2, buf) + not db.getDataColumnSidecarSZ(blockRoot1, 2, buf) + + db.close() + suite "FinalizedBlocks" & preset(): test "Basic ops" & preset(): var diff --git a/tests/test_block_quarantine.nim b/tests/test_block_quarantine.nim index 64accd7884..85b6c140b7 100644 --- a/tests/test_block_quarantine.nim +++ b/tests/test_block_quarantine.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2022-2024 Status Research & Development GmbH +# Copyright (c) 2022-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -10,6 +10,7 @@ import unittest2, + chronicles, ../beacon_chain/spec/forks, ../beacon_chain/spec/datatypes/[phase0, deneb], ../beacon_chain/consensus_object_pools/block_quarantine @@ -118,3 +119,39 @@ suite "Block quarantine": b0.root in quarantine.missing b1.root notin quarantine.missing b2.root notin quarantine.missing + + test "Keep downloading parent chain even if we hit missing limit": + var quarantine: Quarantine + var blocks = @[makeBlock(Slot 0, ZERO_HASH)] + for i in 0.. RestApiResponse: + + if contentBody.isNone: + return RestApiResponse.jsonError(Http400, EmptyRequestBodyError) + + let registrations = + decodeBodyJsonOrSsz(seq[SignedValidatorRegistrationV1], + contentBody.get()).valueOr: + return RestApiResponse.jsonError(error) + + for item in registrations: + if not(verify_builder_signature(emptyFork, item.message, + item.message.pubkey, item.signature)): + return RestApiResponse.jsonError(Http400, + "Signature verification failed") + RestApiResponse.jsonResponse(Http200) + + router.api2(MethodGet, + "/eth/v1/builder/header/{slot}/{parent_hash}/{pubkey}") do ( + slot: Slot, parent_hash: Eth2Digest, + pubkey: ValidatorPubKey) -> RestApiResponse: + let + qslot = slot.valueOr: + return RestApiResponse.jsonError(Http400, "Invalid slot", $error) + qhash = parent_hash.valueOr: + return RestApiResponse.jsonError(Http400, "Invalid parent_hash", $error) + qpubkey {.used.} = pubkey.valueOr: + return RestApiResponse.jsonError(Http400, "Invalid pubkey", $error) + contentType = preferredContentType(jsonMediaType, + sszMediaType).valueOr: + return RestApiResponse.jsonError(Http406, "Content is not acceptable") + + template respondSszOrJson(contentType, bid: auto): RestApiResponse = + if contentType == sszMediaType: + RestApiResponse.sszResponseSignedBuilderBid(bid) + elif contentType == jsonMediaType: + RestApiResponse.jsonResponseSignedBuilderBid(bid) + else: + RestApiResponse.jsonError(Http415, "Invalid Accept") + + if qslot == DenebSlot: + let bid = deneb_mev.SignedBuilderBid( + message: deneb_mev.BuilderBid( + header: deneb.ExecutionPayloadHeader(parent_hash: qhash)) + ) + respondSszOrJson(contentType, bid) + elif qslot == ElectraSlot: + let bid = electra_mev.SignedBuilderBid( + message: electra_mev.BuilderBid( + header: electra.ExecutionPayloadHeader(parent_hash: qhash)) + ) + respondSszOrJson(contentType, bid) + elif qslot == FuluSlot: + let bid = fulu_mev.SignedBuilderBid( + message: fulu_mev.BuilderBid( + header: fulu.ExecutionPayloadHeader(parent_hash: qhash)) + ) + respondSszOrJson(contentType, bid) + else: + RestApiResponse.jsonError(Http500, "Unsupported slot number") + + router.api2(MethodPost, "/eth/v1/builder/blinded_blocks") do ( + contentBody: Option[ContentBody]) -> RestApiResponse: + + if contentBody.isNone: + return RestApiResponse.jsonError(Http400, EmptyRequestBodyError) + + let + rawVersion = request.headers.getString("eth-consensus-version") + consensusFork = ConsensusFork.decodeString(rawVersion).valueOr: + return RestApiResponse.jsonError(Http400, "Invalid consensus version") + contentType = preferredContentType(jsonMediaType, + sszMediaType).valueOr: + return RestApiResponse.jsonError(Http406, "Content type not acceptable") + + if consensusFork < ConsensusFork.Deneb: + return RestApiResponse.jsonError(Http400, "Unsupported fork version") + + template respondSszOrJson(contentType, payload: auto): RestApiResponse = + if contentType == sszMediaType: + RestApiResponse.sszResponseExecutionPayloadAndBlobsBundle(payload) + elif contentType == jsonMediaType: + RestApiResponse.jsonResponseExecutionPayloadAndBlobsBundle(payload) + else: + RestApiResponse.jsonError(Http415, "Invalid Accept") + + if consensusFork == ConsensusFork.Deneb: + let + blck = + decodeBodyJsonOrSsz(deneb_mev.SignedBlindedBeaconBlock, + contentBody.get()).valueOr: + return RestApiResponse.jsonError(error) + proposerKey = + if int(blck.message.proposer_index) < len(node.validators): + node.validators[int(blck.message.proposer_index)] + else: + ValidatorPubKey() + slot = blck.message.slot + blockRoot = hash_tree_root(blck.message) + + if not(verify_block_signature(emptyFork, emptyRoot, slot, blockRoot, + proposerKey, blck.signature)): + return RestApiResponse.jsonError(Http400, "Invalid signature") + + let + payload = deneb_mev.ExecutionPayloadAndBlobsBundle( + execution_payload: deneb.ExecutionPayload( + parent_hash: blck.message.body.execution_payload_header.parent_hash + ), + blobs_bundle: BlobsBundle() + ) + respondSszOrJson(contentType, payload) + elif consensusFork == ConsensusFork.Electra: + let + blck = + decodeBodyJsonOrSsz(electra_mev.SignedBlindedBeaconBlock, + contentBody.get()).valueOr: + return RestApiResponse.jsonError(error) + payload = electra_mev.ExecutionPayloadAndBlobsBundle( + execution_payload: electra.ExecutionPayload( + parent_hash: blck.message.body.execution_payload_header.parent_hash + ), + blobs_bundle: BlobsBundle() + ) + respondSszOrJson(contentType, payload) + elif consensusFork == ConsensusFork.Fulu: + let + blck = + decodeBodyJsonOrSsz(fulu_mev.SignedBlindedBeaconBlock, + contentBody.get()).valueOr: + return RestApiResponse.jsonError(error) + payload = fulu_mev.ExecutionPayloadAndBlobsBundle( + execution_payload: fulu.ExecutionPayload( + parent_hash: blck.message.body.execution_payload_header.parent_hash + ), + blobs_bundle: BlobsBundle() + ) + respondSszOrJson(contentType, payload) + else: + raiseAssert "Unsupported fork version" + + router.api2(MethodGet, "/eth/v1/builder/status") do () -> RestApiResponse: + RestApiResponse.response(Http200) + +proc testSuite() = + + suite "MEV calls serialization/deserialization and behavior test suite": + let + rng = HmacDrbgContext.new() + node = TestNodeRef() + var router = RestRouter.init(proc(pattern: string, value: string): int = 0) + setupEngineAPI(router, node) + + let + bindAddress = try: + initTAddress("127.0.0.1", Port(0)) + except TransportAddressError as exc: + raiseAssert "Unexpected error, reason " & $exc.msg + + server = RestServerRef.new(router, bindAddress).valueOr: + raiseAssert "Unable to establish REST server, reason " & $error + address = server.localAddress() + + server.start() + + setup: + let + httpFlags: set[HttpClientFlag] = {} + prestoFlags = {RestClientFlag.CommaSeparatedArray} + socketFlags = {SocketFlags.TcpNoDelay} + remoteUri = "http://" & $address & "/" + client = RestClientRef.new( + remoteUri, prestoFlags, httpFlags, socketFlags = socketFlags).valueOr: + raiseAssert "Unable to resolve distributed signer address " & $address + + teardown: + waitFor client.closeWait() + + template getHeaderTest(responseKind: TestKind): untyped = + let + bytes = rng[].generate(array[32, byte]) + parent_hash = Eth2Digest(data: bytes) + privateKey = keyGen(rng[]).valueOr: + raiseAssert "Unable to generate private key" + publicKey = privateKey.toPubKey().toPubKey() + (restAcceptType1, responseMediaType1) = + if responseKind == TestKind.Ssz: + ("application/octet-stream", OctetStreamMediaType) + else: + ("application/json", ApplicationJsonMediaType) + (restAcceptType2, responseMediaType2) = + if responseKind == TestKind.Ssz: + ("application/json;q=0.9,application/octet-stream", + OctetStreamMediaType) + else: + ("application/octet-stream;q=0.9,application/json", + ApplicationJsonMediaType) + (restAcceptType3, responseMediaType3) = + if responseKind == TestKind.Ssz: + ("application/json;q=0.5,application/octet-stream;q=1.0", + OctetStreamMediaType) + else: + ("application/octet-stream;q=0.5,application/json;q=1.0", + ApplicationJsonMediaType) + + let + response1 = + await client.getHeaderDenebPlain(DenebSlot, parent_hash, + publicKey, restAcceptType = restAcceptType1) + response2 = + await client.getHeaderElectraPlain(ElectraSlot, parent_hash, + publicKey, restAcceptType = restAcceptType2) + response3 = + await client.getHeaderFuluPlain(FuluSlot, parent_hash, + publicKey, restAcceptType = restAcceptType3) + + check: + response1.status == 200 + response2.status == 200 + response3.status == 200 + response1.contentType.isSome() + response2.contentType.isSome() + response3.contentType.isSome() + response1.contentType.get().mediaType == responseMediaType1 + response2.contentType.get().mediaType == responseMediaType2 + response3.contentType.get().mediaType == responseMediaType3 + + let + version1 = response1.headers.getString("eth-consensus-version") + version2 = response2.headers.getString("eth-consensus-version") + version3 = response3.headers.getString("eth-consensus-version") + + check: + version1 == ConsensusFork.Deneb.toString() + version2 == ConsensusFork.Electra.toString() + version3 == ConsensusFork.Fulu.toString() + + let + bid1res = + decodeBytesJsonOrSsz(GetHeaderResponseDeneb, response1.data, + response1.contentType, version1) + bid2res = + decodeBytesJsonOrSsz(GetHeaderResponseElectra, response2.data, + response2.contentType, version2) + bid3res = + decodeBytesJsonOrSsz(GetHeaderResponseFulu, response3.data, + response3.contentType, version3) + + check: + bid1res.isOk() + bid2res.isOk() + bid3res.isOk() + bid1res.get().data.message.header.parent_hash == parent_hash + bid2res.get().data.message.header.parent_hash == parent_hash + bid3res.get().data.message.header.parent_hash == parent_hash + + template submitBlindedBlockTest( + requestKind: TestKind, + responseKind: TestKind + ): untyped = + let + parent_hash1 = Eth2Digest(data: rng[].generate(array[32, byte])) + parent_hash2 = Eth2Digest(data: rng[].generate(array[32, byte])) + parent_hash3 = Eth2Digest(data: rng[].generate(array[32, byte])) + privateKey1 = keyGen(rng[]).valueOr: + raiseAssert "Unable to generate private key" + privateKey2 = keyGen(rng[]).valueOr: + raiseAssert "Unable to generate private key" + privateKey3 = keyGen(rng[]).valueOr: + raiseAssert "Unable to generate private key" + publicKey1 = privateKey1.toPubKey().toPubKey() + publicKey2 = privateKey1.toPubKey().toPubKey() + publicKey3 = privateKey1.toPubKey().toPubKey() + + node.validators.reset() + node.validators.add(publicKey1) + node.validators.add(publicKey2) + node.validators.add(publicKey3) + + let + blck1 = + prepare(deneb_mev.SignedBlindedBeaconBlock, DenebSlot, parent_hash1, + 0'u64, privateKey1) + blck2 = + prepare(electra_mev.SignedBlindedBeaconBlock, ElectraSlot, parent_hash2, + 1'u64, privateKey2) + blck3 = + prepare(fulu_mev.SignedBlindedBeaconBlock, FuluSlot, parent_hash3, + 2'u64, privateKey3) + + restContentType1 = + if requestKind == TestKind.Ssz: + "application/octet-stream" + else: + "application/json" + restContentType2 = + if requestKind == TestKind.Ssz: + "application/octet-stream" + else: + "application/json" + restContentType3 = + if requestKind == TestKind.Ssz: + "application/octet-stream" + else: + "application/json" + (restAcceptType1, responseMediaType1) = + if responseKind == TestKind.Ssz: + ("application/octet-stream", OctetStreamMediaType) + else: + ("application/json", ApplicationJsonMediaType) + (restAcceptType2, responseMediaType2) = + if responseKind == TestKind.Ssz: + ("application/octet-stream,application/json;q=0.9", + OctetStreamMediaType) + else: + ("application/json,application/octet-stream;q=0.9", + ApplicationJsonMediaType) + (restAcceptType3, responseMediaType3) = + if responseKind == TestKind.Ssz: + ("application/json;q=0.5,application/octet-stream;q=1.0", + OctetStreamMediaType) + else: + ("application/octet-stream;q=0.5,application/json;q=1.0", + ApplicationJsonMediaType) + + response1 = + await client.submitBlindedBlockPlain( + blck1, + restContentType = restContentType1, + restAcceptType = restAcceptType1, + extraHeaders = @[("eth-consensus-version", + toString(ConsensusFork.Deneb))]) + response2 = + await client.submitBlindedBlockPlain( + blck2, + restContentType = restContentType2, + restAcceptType = restAcceptType2, + extraHeaders = @[("eth-consensus-version", + toString(ConsensusFork.Electra))]) + response3 = + await client.submitBlindedBlockPlain( + blck3, + restContentType = restContentType3, + restAcceptType = restAcceptType3, + extraHeaders = @[("eth-consensus-version", + toString(ConsensusFork.Fulu))]) + check: + response1.status == 200 + response2.status == 200 + response3.status == 200 + + let + version1 = response1.headers.getString("eth-consensus-version") + version2 = response2.headers.getString("eth-consensus-version") + version3 = response3.headers.getString("eth-consensus-version") + + check: + response1.contentType.isSome() + response2.contentType.isSome() + response3.contentType.isSome() + response1.contentType.get().mediaType == responseMediaType1 + response2.contentType.get().mediaType == responseMediaType2 + response3.contentType.get().mediaType == responseMediaType3 + version1 == ConsensusFork.Deneb.toString() + version2 == ConsensusFork.Electra.toString() + version3 == ConsensusFork.Fulu.toString() + + let + payload1res = + decodeBytesJsonOrSsz(SubmitBlindedBlockResponseDeneb, + response1.data, response1.contentType, version1) + payload2res = + decodeBytesJsonOrSsz(SubmitBlindedBlockResponseElectra, + response2.data, response2.contentType, version2) + payload3res = + decodeBytesJsonOrSsz(SubmitBlindedBlockResponseFulu, + response3.data, response3.contentType, version3) + + check: + payload1res.isOk() + payload2res.isOk() + payload3res.isOk() + payload1res.get().data.execution_payload.parent_hash == parent_hash1 + payload2res.get().data.execution_payload.parent_hash == parent_hash2 + payload3res.get().data.execution_payload.parent_hash == parent_hash3 + + asyncTest "/eth/v1/builder/status test": + let response = await client.getStatus() + check response.status == 200 + + asyncTest "/eth/v1/builder/validators [json] test": + let + response1 = + await client.registerValidator( + generateRegistrations(rng[], 5)) + response2 = + await client.registerValidator( + generateRegistrations(rng[], 5), + restContentType = "application/json") + check: + response1.status == 200 + response2.status == 200 + + asyncTest "/eth/v1/builder/validators [ssz] test": + let + response = + await client.registerValidator( + generateRegistrations(rng[], 5), + restContentType = "application/octet-stream") + check response.status == 200 + + asyncTest "/eth/v1/builder/header [json] test": + getHeaderTest(TestKind.Json) + + asyncTest "/eth/v1/builder/header [ssz] test": + getHeaderTest(TestKind.Ssz) + + asyncTest "/eth/v1/builder/blinded_blocks [json/json] test": + submitBlindedBlockTest(TestKind.Json, TestKind.Json) + + asyncTest "/eth/v1/builder/blinded_blocks [json/ssz] test": + submitBlindedBlockTest(TestKind.Json, TestKind.Ssz) + + asyncTest "/eth/v1/builder/blinded_blocks [ssz/ssz] test": + submitBlindedBlockTest(TestKind.Ssz, TestKind.Ssz) + + asyncTest "/eth/v1/builder/blinded_blocks [ssz/json] test": + submitBlindedBlockTest(TestKind.Ssz, TestKind.Json) + + suiteTeardown: + waitFor server.stop() + +testSuite() diff --git a/tests/test_peer_pool.nim b/tests/test_peer_pool.nim index bf648e07b1..a623d8f954 100644 --- a/tests/test_peer_pool.nim +++ b/tests/test_peer_pool.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2019-2024 Status Research & Development GmbH +# Copyright (c) 2019-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -8,19 +8,17 @@ {.push raises: [].} {.used.} -import std/[random, heapqueue, tables] -import chronos +import std/[random, heapqueue, tables, sequtils, strutils] +import chronos, chronos/unittest2/asynctests import ../beacon_chain/networking/peer_pool import ./testutil -template closureScope(raisesAnnotation: untyped, body: untyped): untyped = - (proc() {.raises: raisesAnnotation} = body)() - type PeerTestID = string - PeerTest = object + PeerTest = ref object id: PeerTestID weight: int + metadata: uint64 future: Future[void] func getKey(peer: PeerTest): PeerTestID = @@ -29,13 +27,24 @@ func getKey(peer: PeerTest): PeerTestID = func getFuture(peer: PeerTest): Future[void] = peer.future -func `<`(a, b: PeerTest): bool = - `<`(a.weight, b.weight) +func getMetadata(peer: PeerTest): uint64 = + peer.metadata + +func cmp*(a, b: PeerTest): int = + cmp(a.weight, b.weight) proc init*(t: typedesc[PeerTest], id: string = "", weight: int = 0): PeerTest = PeerTest(id: id, weight: weight, future: newFuture[void]()) +proc init*(t: typedesc[PeerTest], id: string = "", + weight: int = 0, metadata: uint64): PeerTest = + PeerTest(id: id, weight: weight, future: newFuture[void](), + metadata: metadata) + +proc toString(a: openArray[PeerTest]): string = + "[" & a.mapIt(it.getKey()).join(",") & "]" + proc close(peer: PeerTest) = peer.future.complete() @@ -241,7 +250,7 @@ suite "PeerPool testing suite": itemFut23.finished == false itemFut24.finished == false - test "Acquire/Sorting and consistency test": closureScope([CatchableError]): + test "Acquire/Sorting and consistency test": const TestsCount = 1000 MaxNumber = 1_000_000 @@ -310,61 +319,140 @@ suite "PeerPool testing suite": check waitFor(testAcquireRelease()) == TestsCount - test "deletePeer() test": - proc testDeletePeer(): Future[bool] {.async.} = - var pool = newPeerPool[PeerTest, PeerTestID]() - var peer = PeerTest.init("deletePeer") - - ## Delete available peer - doAssert(pool.addPeerNoWait(peer, - PeerType.Incoming) == PeerStatus.Success) - doAssert(pool.len == 1) - doAssert(pool.lenAvailable == 1) - doAssert(pool.lenAvailable({PeerType.Outgoing}) == 0) - doAssert(pool.lenAvailable({PeerType.Incoming}) == 1) - doAssert(pool.deletePeer(peer) == true) - doAssert(pool.len == 0) - doAssert(pool.lenAvailable == 0) - doAssert(pool.lenAvailable({PeerType.Outgoing}) == 0) - doAssert(pool.lenAvailable({PeerType.Incoming}) == 0) + asyncTest "deletePeer() test": + var pool = newPeerPool[PeerTest, PeerTestID]() - ## Delete acquired peer - peer = PeerTest.init("closingPeer") - doAssert(pool.addPeerNoWait(peer, - PeerType.Incoming) == PeerStatus.Success) - doAssert(pool.len == 1) - doAssert(pool.lenAvailable == 1) - doAssert(pool.lenAvailable({PeerType.Outgoing}) == 0) - doAssert(pool.lenAvailable({PeerType.Incoming}) == 1) - var apeer = await pool.acquire() - doAssert(pool.deletePeer(peer) == true) - doAssert(pool.len == 1) - doAssert(pool.lenAvailable == 0) - doAssert(pool.lenAvailable({PeerType.Outgoing}) == 0) - doAssert(pool.lenAvailable({PeerType.Incoming}) == 0) + ## Delete available peer + block: + let peer = PeerTest.init("deletePeer") + check: + pool.addPeerNoWait(peer, PeerType.Incoming) == PeerStatus.Success + pool.len == 1 + pool.lenAvailable == 1 + pool.lenAvailable({PeerType.Outgoing}) == 0 + pool.lenAvailable({PeerType.Incoming}) == 1 + pool.deletePeer(peer) == true + pool.len == 0 + pool.lenAvailable == 0 + pool.lenAvailable({PeerType.Outgoing}) == 0 + pool.lenAvailable({PeerType.Incoming}) == 0 + + ## Delete acquired peer + block: + let peer = PeerTest.init("closingPeer") + check: + pool.addPeerNoWait(peer, PeerType.Incoming) == PeerStatus.Success + pool.len == 1 + pool.lenAvailable == 1 + pool.lenAvailable({PeerType.Outgoing}) == 0 + pool.lenAvailable({PeerType.Incoming}) == 1 + let apeer = await pool.acquire() + check: + pool.deletePeer(peer) == true + pool.len == 1 + pool.lenAvailable == 0 + pool.lenAvailable({PeerType.Outgoing}) == 0 + pool.lenAvailable({PeerType.Incoming}) == 0 pool.release(apeer) - doAssert(pool.len == 0) - doAssert(pool.lenAvailable == 0) - doAssert(pool.lenAvailable({PeerType.Outgoing}) == 0) - doAssert(pool.lenAvailable({PeerType.Incoming}) == 0) + check: + pool.len == 0 + pool.lenAvailable == 0 + pool.lenAvailable({PeerType.Outgoing}) == 0 + pool.lenAvailable({PeerType.Incoming}) == 0 + + ## Force delete acquired peer + block: + let peer = PeerTest.init("closingPeer") + check: + pool.addPeerNoWait(peer, PeerType.Incoming) == PeerStatus.Success + pool.len == 1 + pool.lenAvailable == 1 + pool.lenAvailable({PeerType.Outgoing}) == 0 + pool.lenAvailable({PeerType.Incoming}) == 1 + let apeer = await pool.acquire() + check: + pool.deletePeer(apeer, true) == true + pool.len == 0 + pool.lenAvailable == 0 + pool.lenAvailable({PeerType.Outgoing}) == 0 + pool.lenAvailable({PeerType.Incoming}) == 0 + + ## Delete single available peer in pool full of peers + block: + for i in 0 ..< 100: + let peer = PeerTest.init("peer" & $i) + check pool.addPeerNoWait(peer, PeerType.Incoming) == PeerStatus.Success + for i in 100 ..< 200: + let peer = PeerTest.init("peer" & $i) + check pool.addPeerNoWait(peer, PeerType.Outgoing) == PeerStatus.Success + check: + pool.len == 200 + pool.lenAvailable == 200 + pool.lenAvailable({PeerType.Outgoing}) == 100 + pool.lenAvailable({PeerType.Incoming}) == 100 + for i in 0 ..< 20: + let + index = 90 + i + peerKey = "peer" & $index + dpeer = pool.getOrDefault(peerKey, default(PeerTest)) + check: + pool.deletePeer(dpeer) == true + pool.hasPeer(peerKey) == false + check: + pool.len == 180 + pool.lenAvailable == 180 + pool.lenAvailable({PeerType.Outgoing}) == 90 + pool.lenAvailable({PeerType.Incoming}) == 90 + pool.clear() + + ## Delete single acquired peer in pool full of peers + block: + for i in 0 ..< 100: + let peer = PeerTest.init("peer" & $i) + check pool.addPeerNoWait(peer, PeerType.Incoming) == PeerStatus.Success + for i in 100 ..< 200: + let peer = PeerTest.init("peer" & $i) + check pool.addPeerNoWait(peer, PeerType.Outgoing) == PeerStatus.Success + check: + pool.len == 200 + pool.lenAvailable == 200 + pool.lenAvailable({PeerType.Outgoing}) == 100 + pool.lenAvailable({PeerType.Incoming}) == 100 - ## Force delete acquired peer - peer = PeerTest.init("closingPeer") - doAssert(pool.addPeerNoWait(peer, - PeerType.Incoming) == PeerStatus.Success) - doAssert(pool.len == 1) - doAssert(pool.lenAvailable == 1) - doAssert(pool.lenAvailable({PeerType.Outgoing}) == 0) - doAssert(pool.lenAvailable({PeerType.Incoming}) == 1) - apeer = await pool.acquire() - doAssert(pool.deletePeer(peer, true) == true) - doAssert(pool.len == 0) - doAssert(pool.lenAvailable == 0) - doAssert(pool.lenAvailable({PeerType.Outgoing}) == 0) - doAssert(pool.lenAvailable({PeerType.Incoming}) == 0) + for i in 0 ..< 20: + let apeer = await pool.acquire() + check pool.deletePeer(apeer) == true + pool.release(apeer) + check pool.hasPeer(apeer.getKey()) == false - result = true - check waitFor(testDeletePeer()) == true + check: + pool.len == 180 + pool.lenAvailable == 180 + pool.clear() + + ## Force delete single acquired peer in pool full of peers + block: + for i in 0 ..< 100: + let peer = PeerTest.init("peer" & $i) + check pool.addPeerNoWait(peer, PeerType.Incoming) == PeerStatus.Success + for i in 100 ..< 200: + let peer = PeerTest.init("peer" & $i) + check pool.addPeerNoWait(peer, PeerType.Outgoing) == PeerStatus.Success + check: + pool.len == 200 + pool.lenAvailable == 200 + pool.lenAvailable({PeerType.Outgoing}) == 100 + pool.lenAvailable({PeerType.Incoming}) == 100 + + for i in 0 ..< 20: + let apeer = await pool.acquire() + check: + pool.deletePeer(apeer, true) == true + pool.hasPeer(apeer.getKey()) == false + + check: + pool.len == 180 + pool.lenAvailable == 180 test "Peer lifetime test": proc testPeerLifetime(): Future[bool] {.async.} = @@ -415,7 +503,7 @@ suite "PeerPool testing suite": check waitFor(testPeerLifetime()) == true - test "Safe/Clear test": closureScope([CatchableError]): + test "Safe/Clear test": var pool = newPeerPool[PeerTest, PeerTestID]() var peer1 = PeerTest.init("peer1", 10) var peer2 = PeerTest.init("peer2", 9) @@ -462,7 +550,7 @@ suite "PeerPool testing suite": asyncSpawn testConsumer() check waitFor(testClose()) == true - test "Access peers by key test": closureScope([CatchableError]): + test "Access peers by key test": var pool = newPeerPool[PeerTest, PeerTestID]() var peer1 = PeerTest.init("peer1", 10) var peer2 = PeerTest.init("peer2", 9) @@ -591,6 +679,285 @@ suite "PeerPool testing suite": len(acqui2) == 2 len(acqui3) == 1 + asyncTest "Custom filters test": + var pool = newPeerPool[PeerTest, PeerTestID]() + let + peer1 = PeerTest.init("peer1", 10, 256'u64) + peer2 = PeerTest.init("peer2", 9, 0'u64) + peer3 = PeerTest.init("peer3", 8, 4'u64) + peer4 = PeerTest.init("peer4", 7, 2'u64) + peer5 = PeerTest.init("peer5", 6, 2'u64) + peer6 = PeerTest.init("peer6", 5, 2'u64) + peer7 = PeerTest.init("peer7", 4, 4'u64) + peer8 = PeerTest.init("peer8", 3, 128'u64) + peer9 = PeerTest.init("peer9", 2, 4'u64) + peer10 = PeerTest.init("peer10", 1, 256'u64) + + proc custom1(peer: PeerTest): bool = + true + + proc custom2(peer: PeerTest): bool = + if peer.getMetadata() == 2'u64: + true + else: + false + + proc custom3(peer: PeerTest): bool = + if peer.getMetadata() in [2'u64, 4'u64]: + true + else: + false + + check: + pool.addPeerNoWait(peer2, PeerType.Incoming) == PeerStatus.Success + pool.addPeerNoWait(peer3, PeerType.Incoming) == PeerStatus.Success + pool.addPeerNoWait(peer1, PeerType.Incoming) == PeerStatus.Success + pool.addPeerNoWait(peer4, PeerType.Incoming) == PeerStatus.Success + pool.addPeerNoWait(peer5, PeerType.Incoming) == PeerStatus.Success + + pool.addPeerNoWait(peer10, PeerType.Outgoing) == PeerStatus.Success + pool.addPeerNoWait(peer7, PeerType.Outgoing) == PeerStatus.Success + pool.addPeerNoWait(peer6, PeerType.Outgoing) == PeerStatus.Success + pool.addPeerNoWait(peer8, PeerType.Outgoing) == PeerStatus.Success + pool.addPeerNoWait(peer9, PeerType.Outgoing) == PeerStatus.Success + + template checkTotal() = + let + total1 = + pool.peers({PeerType.Incoming, PeerType.Outgoing}, custom1).toSeq() + total2 = + pool.peers({PeerType.Incoming}, custom1).toSeq() + total3 = + pool.peers({PeerType.Outgoing}, custom1).toSeq() + total4 = + pool.peers({PeerType.Incoming, PeerType.Outgoing}, custom2).toSeq() + total5 = + pool.peers({PeerType.Incoming}, custom2).toSeq() + total6 = + pool.peers({PeerType.Outgoing}, custom2).toSeq() + total7 = + pool.peers({PeerType.Incoming, PeerType.Outgoing}, custom3).toSeq() + total8 = + pool.peers({PeerType.Incoming}, custom3).toSeq() + total9 = + pool.peers({PeerType.Outgoing}, custom3).toSeq() + + check: + total1.toString() == + "[peer1,peer2,peer3,peer4,peer5,peer6,peer7,peer8,peer9,peer10]" + total2.toString() == "[peer1,peer2,peer3,peer4,peer5]" + total3.toString() == "[peer6,peer7,peer8,peer9,peer10]" + total4.toString() == "[peer4,peer5,peer6]" + total5.toString() == "[peer4,peer5]" + total6.toString() == "[peer6]" + total7.toString() == "[peer3,peer4,peer5,peer6,peer7,peer9]" + total8.toString() == "[peer3,peer4,peer5]" + total9.toString() == "[peer6,peer7,peer9]" + + checkTotal() + + block: + let + avail1 = + pool.availablePeers({PeerType.Incoming, PeerType.Outgoing}, + custom1).toSeq() + avail2 = + pool.availablePeers({PeerType.Incoming}, custom1).toSeq() + avail3 = + pool.availablePeers({PeerType.Outgoing}, custom1).toSeq() + avail4 = + pool.availablePeers({PeerType.Incoming, PeerType.Outgoing}, + custom2).toSeq() + avail5 = + pool.availablePeers({PeerType.Incoming}, custom2).toSeq() + avail6 = + pool.availablePeers({PeerType.Outgoing}, custom2).toSeq() + avail7 = + pool.availablePeers({PeerType.Incoming, PeerType.Outgoing}, + custom3).toSeq() + avail8 = + pool.availablePeers({PeerType.Incoming}, custom3).toSeq() + avail9 = + pool.availablePeers({PeerType.Outgoing}, custom3).toSeq() + + check: + avail1.toString() == + "[peer1,peer2,peer3,peer4,peer5,peer6,peer7,peer8,peer9,peer10]" + avail2.toString() == "[peer1,peer2,peer3,peer4,peer5]" + avail3.toString() == "[peer6,peer7,peer8,peer9,peer10]" + avail4.toString() == "[peer4,peer5,peer6]" + avail5.toString() == "[peer4,peer5]" + avail6.toString() == "[peer6]" + avail7.toString() == "[peer3,peer4,peer5,peer6,peer7,peer9]" + avail8.toString() == "[peer3,peer4,peer5]" + avail9.toString() == "[peer6,peer7,peer9]" + + let + tpeer1 = await pool.acquire({PeerType.Incoming, PeerType.Outgoing}, + custom1) + tpeer2 = await pool.acquire({PeerType.Incoming}, custom2) + tpeer3 = await pool.acquire({PeerType.Outgoing}, custom2) + tpeer4 = await pool.acquire({PeerType.Incoming}, custom3) + tpeer5 = await pool.acquire({PeerType.Outgoing}, custom3) + + check: + tpeer1.getKey() == "peer1" + tpeer2.getKey() == "peer4" + tpeer3.getKey() == "peer6" + tpeer4.getKey() == "peer3" + tpeer5.getKey() == "peer7" + + checkTotal() + + block: + let + avail1 = + pool.availablePeers({PeerType.Incoming, PeerType.Outgoing}, + custom1).toSeq() + avail2 = + pool.availablePeers({PeerType.Incoming}, custom1).toSeq() + avail3 = + pool.availablePeers({PeerType.Outgoing}, custom1).toSeq() + avail4 = + pool.availablePeers({PeerType.Incoming, PeerType.Outgoing}, + custom2).toSeq() + avail5 = + pool.availablePeers({PeerType.Incoming}, custom2).toSeq() + avail6 = + pool.availablePeers({PeerType.Outgoing}, custom2).toSeq() + avail7 = + pool.availablePeers({PeerType.Incoming, PeerType.Outgoing}, + custom3).toSeq() + avail8 = + pool.availablePeers({PeerType.Incoming}, custom3).toSeq() + avail9 = + pool.availablePeers({PeerType.Outgoing}, custom3).toSeq() + + check: + avail1.toString() == "[peer2,peer5,peer8,peer9,peer10]" + avail2.toString() == "[peer2,peer5]" + avail3.toString() == "[peer8,peer9,peer10]" + + avail4.toString() == "[peer5]" + avail5.toString() == "[peer5]" + avail6.toString() == "[]" + + avail7.toString() == "[peer5,peer9]" + avail8.toString() == "[peer5]" + avail9.toString() == "[peer9]" + + let + tpeer6 = await pool.acquire({PeerType.Incoming, PeerType.Outgoing}, + custom1) + tpeer7 = await pool.acquire({PeerType.Incoming}, custom2) + tpeer8 = await pool.acquire({PeerType.Outgoing}, custom3) + tpeer9 = await pool.acquire({PeerType.Outgoing}, custom1) + tpeer10 = await pool.acquire({PeerType.Outgoing}, custom1) + + check: + tpeer6.getKey() == "peer2" + tpeer7.getKey() == "peer5" + tpeer8.getKey() == "peer9" + tpeer9.getKey() == "peer8" + tpeer10.getKey() == "peer10" + + checkTotal() + + block: + let + avail1 = + pool.availablePeers({PeerType.Incoming, PeerType.Outgoing}, + custom1).toSeq() + avail2 = + pool.availablePeers({PeerType.Incoming}, custom1).toSeq() + avail3 = + pool.availablePeers({PeerType.Outgoing}, custom1).toSeq() + avail4 = + pool.availablePeers({PeerType.Incoming, PeerType.Outgoing}, + custom2).toSeq() + avail5 = + pool.availablePeers({PeerType.Incoming}, custom2).toSeq() + avail6 = + pool.availablePeers({PeerType.Outgoing}, custom2).toSeq() + avail7 = + pool.availablePeers({PeerType.Incoming, PeerType.Outgoing}, + custom3).toSeq() + avail8 = + pool.availablePeers({PeerType.Incoming}, custom3).toSeq() + avail9 = + pool.availablePeers({PeerType.Outgoing}, custom3).toSeq() + + check: + avail1.toString() == "[]" + avail2.toString() == "[]" + avail3.toString() == "[]" + + avail4.toString() == "[]" + avail5.toString() == "[]" + avail6.toString() == "[]" + + avail7.toString() == "[]" + avail8.toString() == "[]" + avail9.toString() == "[]" + + let + fut1 = pool.acquire({PeerType.Incoming}, custom2) + fut2 = pool.acquire({PeerType.Incoming}, custom2) + fut3 = pool.acquire({PeerType.Outgoing}, custom3) + fut4 = pool.acquire({PeerType.Outgoing}, custom3) + + check: + fut1.finished == false + fut2.finished == false + fut3.finished == false + fut4.finished == false + + pool.release(tpeer1) + await sleepAsync(100.milliseconds) + check: + fut1.finished == false + fut2.finished == false + fut3.finished == false + fut4.finished == false + + pool.release(tpeer2) + await sleepAsync(100.milliseconds) + check: + fut1.finished == true + fut1.value.getKey() == "peer4" + fut2.finished == false + fut3.finished == false + fut4.finished == false + + pool.release(tpeer3) + await sleepAsync(100.milliseconds) + check: + fut2.finished == false + fut3.finished == true + fut3.value.getKey() == "peer6" + fut4.finished == false + + pool.release(tpeer5) + await sleepAsync(100.milliseconds) + check: + fut2.finished == false + fut4.finished == true + fut4.value.getKey() == "peer7" + + pool.release(tpeer4) + pool.release(tpeer6) + pool.release(tpeer8) + pool.release(tpeer10) + await sleepAsync(100.milliseconds) + check: + fut2.finished == false + + pool.release(tpeer7) + await sleepAsync(100.milliseconds) + check: + fut2.finished == true + fut2.value.getKey() == "peer5" + test "Score check test": var pool = newPeerPool[PeerTest, PeerTestID]() func scoreCheck(peer: PeerTest): bool = @@ -889,8 +1256,8 @@ suite "PeerPool testing suite": pool7.lenSpace({PeerType.Incoming}) == 0 pool7.lenSpace({PeerType.Outgoing}) == high(int) - 39 - # We could not check whole high(int), so we check 10_000 items - for i in 0 ..< 10_000: + # We could not check whole high(int), so we check 1000 items + for i in 0 ..< 1000: check: pool7.addPeerNoWait(PeerTest.init("idOut" & $i), PeerType.Outgoing) == PeerStatus.Success @@ -914,8 +1281,8 @@ suite "PeerPool testing suite": pool8.lenSpace({PeerType.Outgoing}) == 0 pool8.lenSpace({PeerType.Incoming}) == high(int) - 40 - # We could not check whole high(int), so we check 10_000 items - for i in 0 ..< 10_000: + # We could not check whole high(int), so we check 1000 items + for i in 0 ..< 1000: check: pool8.addPeerNoWait(PeerTest.init("idInc" & $i), PeerType.Incoming) == PeerStatus.Success @@ -924,8 +1291,8 @@ suite "PeerPool testing suite": pool8.lenSpace({PeerType.Incoming}) == high(int) - 40 - (i + 1) # POOL 9 - # We could not check whole high(int), so we check 10_000 items - for i in 0 ..< 10_000: + # We could not check whole high(int), so we check 1000 items + for i in 0 ..< 1000: check: pool9.addPeerNoWait(PeerTest.init("idInc" & $i), PeerType.Incoming) == PeerStatus.Success diff --git a/tests/test_eip7594_helpers.nim b/tests/test_peerdas_helpers.nim similarity index 95% rename from tests/test_eip7594_helpers.nim rename to tests/test_peerdas_helpers.nim index e535e663eb..1f7e1b97b4 100644 --- a/tests/test_eip7594_helpers.nim +++ b/tests/test_peerdas_helpers.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -14,7 +14,7 @@ import results, kzg4844/[kzg_abi, kzg], ./consensus_spec/[os_ops, fixtures_utils], - ../beacon_chain/spec/[helpers, eip7594_helpers], + ../beacon_chain/spec/[helpers, peerdas_helpers], ../beacon_chain/spec/datatypes/[fulu, deneb] from std/strutils import rsplit @@ -79,7 +79,7 @@ suite "EIP-7594 Unit Tests": blob_count = rng.rand(1..(NUMBER_OF_COLUMNS.int)) blobs = createSampleKzgBlobs(blob_count, rng.rand(int)) extended_matrix = compute_matrix(blobs) - + # Construct a matrix with some entries missing var partial_matrix: seq[MatrixEntry] for blob_entries in chunks(extended_matrix.get, kzg_abi.CELLS_PER_EXT_BLOB): @@ -94,8 +94,8 @@ suite "EIP-7594 Unit Tests": doAssert recovered_matrix.get == extended_matrix.get, "Both matrices don't match!" testRecoverMatrix() -suite "EIP-7594 Sampling Tests": - test "EIP7594: Extended Sample Count": +suite "PeerDAS Sampling Tests": + test "PeerDAS: Extended Sample Count": proc testExtendedSampleCount() = let samplesPerSlot = 16 const tests = [ diff --git a/tests/test_signing_node.nim b/tests/test_signing_node.nim index e99d4ee8db..e0030cc678 100644 --- a/tests/test_signing_node.nim +++ b/tests/test_signing_node.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2023-2024 Status Research & Development GmbH +# Copyright (c) 2023-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -9,15 +9,18 @@ {.used.} import - unittest2, chronicles, results, stew/[byteutils, io2], + unittest2, chronicles, results, chronos/asyncproc, chronos/unittest2/asynctests, - ../beacon_chain/spec/[signatures, crypto], + ../beacon_chain/spec/crypto, ../beacon_chain/spec/eth2_apis/rest_remote_signer_calls, - ../beacon_chain/filepath, ../beacon_chain/validators/validator_pool from std/os import getEnv, osErrorMsg +from stew/byteutils import hexToByteArray +from ../beacon_chain/filepath import secureCreatePath, secureWriteFile +from ../beacon_chain/spec/signatures import + get_aggregate_and_proof_signature, get_block_signature const TestDirectoryName = "test-signing-node" @@ -62,9 +65,10 @@ const AgAttestationPhase0 = "{\"data\":{\"aggregation_bits\":\"0x01\",\"signature\":\"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505\",\"data\":{\"slot\":\"1\",\"index\":\"1\",\"beacon_block_root\":\"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2\",\"source\":{\"epoch\":\"1\",\"root\":\"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2\"},\"target\":{\"epoch\":\"1\",\"root\":\"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2\"}}}}" AgAttestationElectra = "{\"data\":{\"aggregation_bits\":\"0x01\",\"committee_bits\":\"0x0000000000000001\",\"signature\":\"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505\",\"data\":{\"slot\":\"1\",\"index\":\"1\",\"beacon_block_root\":\"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2\",\"source\":{\"epoch\":\"1\",\"root\":\"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2\"},\"target\":{\"epoch\":\"1\",\"root\":\"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2\"}}}}" - CapellaBlock = "{\"message\":{\"slot\":\"5297696\",\"proposer_index\":\"153094\",\"parent_root\":\"0xe6106533af9be918120ead7440a8006c7f123cc3cb7daf1f11d951864abea014\",\"state_root\":\"0xf86196d34500ca25d1f4e7431d4d52f6f85540bcaf97dd0d2ad9ecdb3eebcdf0\",\"body\":{\"randao_reveal\":\"0xa7efee3d5ddceb60810b23e3b5d39734696418f41dfd13a0851c7be7a72acbdceaa61e1db27513801917d72519d1c1040ccfed829faf06abe06d9964949554bf4369134b66de715ea49eb4fecf3e2b7e646f1764a1993e31e53dbc6557929c12\",\"eth1_data\":{\"deposit_root\":\"0x8ec87d7219a3c873fff3bfe206b4f923d1b471ce4ff9d6d6ecc162ef07825e14\",\"deposit_count\":\"259476\",\"block_hash\":\"0x877b6f8332c7397251ff3f0c5cecec105ff7d4cb78251b47f91fd15a86a565ab\"},\"graffiti\":\"\",\"proposer_slashings\":[],\"attester_slashings\":[],\"attestations\":[],\"deposits\":[],\"voluntary_exits\":[],\"sync_aggregate\":{\"sync_committee_bits\":\"0x733dfda7f5ffde5ade73367fcbf7fffeef7fe43777ffdffab9dbad6f7eed5fff9bfec4affdefbfaddf35bf5efbff9ffff9dfd7dbf97fbfcdfaddfeffbf95f75f\",\"sync_committee_signature\":\"0x81fdf76e797f81b0116a1c1ae5200b613c8041115223cd89e8bd5477aab13de6097a9ebf42b130c59527bbb4c96811b809353a17c717549f82d4bd336068ef0b99b1feebd4d2432a69fa77fac12b78f1fcc9d7b59edbeb381adf10b15bc4a520\"},\"execution_payload\":{\"parent_hash\":\"0x14c2242a8cfbce559e84c391f5f16d10d7719751b8558873012dc88ae5a193e8\",\"fee_recipient\":\"$1\",\"state_root\":\"0xdf8d96b2c292736d39e72e25802c2744d34d3d3c616de5b362425cab01f72fa5\",\"receipts_root\":\"0x4938a2bf640846d213b156a1a853548b369cd02917fa63d8766ab665d7930bac\",\"logs_bloom\":\"0x298610600038408c201080013832408850a00bc8f801920121840030a015310010e2a0e0108628110552062811441c84802f43825c4fc82140b036c58025a28800054c80a44025c052090a0f2c209a0400058040019ea0008e589084078048050880930113a2894082e0112408b088382402a851621042212aa40018a408d07e178c68691486411aa9a2809043b000a04c040000065a030028018540b04b1820271d00821b00c29059095022322c10a530060223240416140190056608200063c82248274ba8f0098e402041cd9f451031481a1010b8220824833520490221071898802d206348449116812280014a10a2d1c210100a30010802490f0a221849\",\"prev_randao\":\"0xc061711e135cd40531ec3ee29d17d3824c0e5f80d07f721e792ab83240aa0ab5\",\"block_number\":\"8737497\",\"gas_limit\":\"30000000\",\"gas_used\":\"16367052\",\"timestamp\":\"1680080352\",\"extra_data\":\"0xd883010b05846765746888676f312e32302e32856c696e7578\",\"base_fee_per_gas\":\"231613172261\",\"block_hash\":\"0x5aa9fd22a9238925adb2b038fd6eafc77adabf554051db5bc16ae5168a52eff6\",\"transactions\":[],\"withdrawals\":[]},\"bls_to_execution_changes\":[]}},\"signature\":\"$2\"}" DenebBlockContents = "{\"signed_block\":{\"message\":{\"slot\":\"5297696\",\"proposer_index\":\"153094\",\"parent_root\":\"0xe6106533af9be918120ead7440a8006c7f123cc3cb7daf1f11d951864abea014\",\"state_root\":\"0xf86196d34500ca25d1f4e7431d4d52f6f85540bcaf97dd0d2ad9ecdb3eebcdf0\",\"body\":{\"randao_reveal\":\"0xa7efee3d5ddceb60810b23e3b5d39734696418f41dfd13a0851c7be7a72acbdceaa61e1db27513801917d72519d1c1040ccfed829faf06abe06d9964949554bf4369134b66de715ea49eb4fecf3e2b7e646f1764a1993e31e53dbc6557929c12\",\"eth1_data\":{\"deposit_root\":\"0x8ec87d7219a3c873fff3bfe206b4f923d1b471ce4ff9d6d6ecc162ef07825e14\",\"deposit_count\":\"259476\",\"block_hash\":\"0x877b6f8332c7397251ff3f0c5cecec105ff7d4cb78251b47f91fd15a86a565ab\"},\"graffiti\":\"\",\"proposer_slashings\":[],\"attester_slashings\":[],\"attestations\":[],\"deposits\":[],\"voluntary_exits\":[],\"sync_aggregate\":{\"sync_committee_bits\":\"0x733dfda7f5ffde5ade73367fcbf7fffeef7fe43777ffdffab9dbad6f7eed5fff9bfec4affdefbfaddf35bf5efbff9ffff9dfd7dbf97fbfcdfaddfeffbf95f75f\",\"sync_committee_signature\":\"0x81fdf76e797f81b0116a1c1ae5200b613c8041115223cd89e8bd5477aab13de6097a9ebf42b130c59527bbb4c96811b809353a17c717549f82d4bd336068ef0b99b1feebd4d2432a69fa77fac12b78f1fcc9d7b59edbeb381adf10b15bc4a520\"},\"execution_payload\":{\"parent_hash\":\"0x14c2242a8cfbce559e84c391f5f16d10d7719751b8558873012dc88ae5a193e8\",\"fee_recipient\":\"$1\",\"state_root\":\"0xdf8d96b2c292736d39e72e25802c2744d34d3d3c616de5b362425cab01f72fa5\",\"receipts_root\":\"0x4938a2bf640846d213b156a1a853548b369cd02917fa63d8766ab665d7930bac\",\"logs_bloom\":\"0x298610600038408c201080013832408850a00bc8f801920121840030a015310010e2a0e0108628110552062811441c84802f43825c4fc82140b036c58025a28800054c80a44025c052090a0f2c209a0400058040019ea0008e589084078048050880930113a2894082e0112408b088382402a851621042212aa40018a408d07e178c68691486411aa9a2809043b000a04c040000065a030028018540b04b1820271d00821b00c29059095022322c10a530060223240416140190056608200063c82248274ba8f0098e402041cd9f451031481a1010b8220824833520490221071898802d206348449116812280014a10a2d1c210100a30010802490f0a221849\",\"prev_randao\":\"0xc061711e135cd40531ec3ee29d17d3824c0e5f80d07f721e792ab83240aa0ab5\",\"block_number\":\"8737497\",\"gas_limit\":\"30000000\",\"gas_used\":\"16367052\",\"timestamp\":\"1680080352\",\"extra_data\":\"0xd883010b05846765746888676f312e32302e32856c696e7578\",\"base_fee_per_gas\":\"231613172261\",\"block_hash\":\"0x5aa9fd22a9238925adb2b038fd6eafc77adabf554051db5bc16ae5168a52eff6\",\"transactions\":[],\"withdrawals\":[],\"blob_gas_used\":\"2316131761\",\"excess_blob_gas\":\"231613172261\"},\"bls_to_execution_changes\":[],\"blob_kzg_commitments\":[]}},\"signature\":\"$2\"},\"kzg_proofs\":[],\"blobs\":[]}" + ElectraBlockContents = "{\"signed_block\":{\"message\":{\"slot\":\"5297696\",\"proposer_index\":\"153094\",\"parent_root\":\"0xe6106533af9be918120ead7440a8006c7f123cc3cb7daf1f11d951864abea014\",\"state_root\":\"0xf86196d34500ca25d1f4e7431d4d52f6f85540bcaf97dd0d2ad9ecdb3eebcdf0\",\"body\":{\"randao_reveal\":\"0xa7efee3d5ddceb60810b23e3b5d39734696418f41dfd13a0851c7be7a72acbdceaa61e1db27513801917d72519d1c1040ccfed829faf06abe06d9964949554bf4369134b66de715ea49eb4fecf3e2b7e646f1764a1993e31e53dbc6557929c12\",\"eth1_data\":{\"deposit_root\":\"0x8ec87d7219a3c873fff3bfe206b4f923d1b471ce4ff9d6d6ecc162ef07825e14\",\"deposit_count\":\"259476\",\"block_hash\":\"0x877b6f8332c7397251ff3f0c5cecec105ff7d4cb78251b47f91fd15a86a565ab\"},\"graffiti\":\"\",\"proposer_slashings\":[],\"attester_slashings\":[],\"attestations\":[],\"deposits\":[],\"voluntary_exits\":[],\"sync_aggregate\":{\"sync_committee_bits\":\"0x733dfda7f5ffde5ade73367fcbf7fffeef7fe43777ffdffab9dbad6f7eed5fff9bfec4affdefbfaddf35bf5efbff9ffff9dfd7dbf97fbfcdfaddfeffbf95f75f\",\"sync_committee_signature\":\"0x81fdf76e797f81b0116a1c1ae5200b613c8041115223cd89e8bd5477aab13de6097a9ebf42b130c59527bbb4c96811b809353a17c717549f82d4bd336068ef0b99b1feebd4d2432a69fa77fac12b78f1fcc9d7b59edbeb381adf10b15bc4a520\"},\"execution_payload\":{\"parent_hash\":\"0x14c2242a8cfbce559e84c391f5f16d10d7719751b8558873012dc88ae5a193e8\",\"fee_recipient\":\"$1\",\"state_root\":\"0xdf8d96b2c292736d39e72e25802c2744d34d3d3c616de5b362425cab01f72fa5\",\"receipts_root\":\"0x4938a2bf640846d213b156a1a853548b369cd02917fa63d8766ab665d7930bac\",\"logs_bloom\":\"0x298610600038408c201080013832408850a00bc8f801920121840030a015310010e2a0e0108628110552062811441c84802f43825c4fc82140b036c58025a28800054c80a44025c052090a0f2c209a0400058040019ea0008e589084078048050880930113a2894082e0112408b088382402a851621042212aa40018a408d07e178c68691486411aa9a2809043b000a04c040000065a030028018540b04b1820271d00821b00c29059095022322c10a530060223240416140190056608200063c82248274ba8f0098e402041cd9f451031481a1010b8220824833520490221071898802d206348449116812280014a10a2d1c210100a30010802490f0a221849\",\"prev_randao\":\"0xc061711e135cd40531ec3ee29d17d3824c0e5f80d07f721e792ab83240aa0ab5\",\"block_number\":\"8737497\",\"gas_limit\":\"30000000\",\"gas_used\":\"16367052\",\"timestamp\":\"1680080352\",\"extra_data\":\"0xd883010b05846765746888676f312e32302e32856c696e7578\",\"base_fee_per_gas\":\"231613172261\",\"block_hash\":\"0x5aa9fd22a9238925adb2b038fd6eafc77adabf554051db5bc16ae5168a52eff6\",\"transactions\":[],\"withdrawals\":[],\"blob_gas_used\":\"2316131761\",\"excess_blob_gas\":\"231613172261\"},\"bls_to_execution_changes\":[],\"blob_kzg_commitments\":[],\"execution_requests\":{\"deposits\":[],\"withdrawals\":[],\"consolidations\":[]}}},\"signature\":\"$2\"},\"kzg_proofs\":[],\"blobs\":[]}" + SigningNodeAddress = "127.0.0.1" defaultSigningNodePort = 35333 @@ -92,19 +96,16 @@ proc getBlock( ): ForkedBeaconBlock {.raises: [ResultError[cstring]].} = try: case fork - of ConsensusFork.Phase0 .. ConsensusFork.Bellatrix: + of ConsensusFork.Phase0 .. ConsensusFork.Capella: raiseAssert "Unsupported fork" - of ConsensusFork.Capella: - ForkedBeaconBlock.init(RestJson.decode( - CapellaBlock % [feeRecipient, SomeSignature], - capella.SignedBeaconBlock).message) of ConsensusFork.Deneb: ForkedBeaconBlock.init(RestJson.decode( DenebBlockContents % [feeRecipient, SomeSignature], DenebSignedBlockContents).signed_block.message) of ConsensusFork.Electra: - debugComment "electra test signing node getblock" - raiseAssert "electra unsupported" + ForkedBeaconBlock.init(RestJson.decode( + ElectraBlockContents % [feeRecipient, SomeSignature], + ElectraSignedBlockContents).signed_block.message) of ConsensusFork.Fulu: debugFuluComment "electra test signing node getblock" raiseAssert "fulu unsupported" @@ -117,12 +118,8 @@ proc getBlock( func init(t: typedesc[Web3SignerForkedBeaconBlock], forked: ForkedBeaconBlock): Web3SignerForkedBeaconBlock = case forked.kind - of ConsensusFork.Phase0 .. ConsensusFork.Bellatrix: - raiseAssert "supports Capella and later forks" - of ConsensusFork.Capella: - Web3SignerForkedBeaconBlock( - kind: ConsensusFork.Capella, - data: forked.capellaData.toBeaconBlockHeader) + of ConsensusFork.Phase0 .. ConsensusFork.Capella: + raiseAssert "supports Deneb and later forks" of ConsensusFork.Deneb: Web3SignerForkedBeaconBlock( kind: ConsensusFork.Deneb, @@ -251,7 +248,6 @@ func getRemoteKeystoreData(data: string, basePort: int, pubkey: publicKey ) - debugComment "check electraIndex" ok case rt of RemoteSignerType.Web3Signer: KeystoreData( @@ -843,9 +839,9 @@ block: sres2.get() == rres2.get() sres3.get() == rres3.get() - asyncTest "Signing BeaconBlock (getBlockSignature(capella))": + asyncTest "Signing BeaconBlock (getBlockSignature(deneb))": let - forked = getBlock(ConsensusFork.Capella) + forked = getBlock(ConsensusFork.Deneb) blockRoot = withBlck(forked): hash_tree_root(forkyBlck) sres1 = @@ -878,9 +874,9 @@ block: sres2.get() == rres2.get() sres3.get() == rres3.get() - asyncTest "Signing BeaconBlock (getBlockSignature(deneb))": + asyncTest "Signing BeaconBlock (getBlockSignature(electra))": let - forked = getBlock(ConsensusFork.Deneb) + forked = getBlock(ConsensusFork.Electra) blockRoot = withBlck(forked): hash_tree_root(forkyBlck) sres1 = @@ -1045,9 +1041,9 @@ block: await client.closeWait() - asyncTest "Signing BeaconBlock (getBlockSignature(capella))": + asyncTest "Signing BeaconBlock (getBlockSignature(deneb))": let - fork = ConsensusFork.Capella + fork = ConsensusFork.Deneb forked1 = getBlock(fork) blockRoot1 = withBlck(forked1): hash_tree_root(forkyBlck) forked2 = getBlock(fork, SigningOtherFeeRecipient) @@ -1134,9 +1130,9 @@ block: finally: await client.closeWait() - asyncTest "Signing BeaconBlock (getBlockSignature(deneb))": + asyncTest "Signing BeaconBlock (getBlockSignature(electra))": let - fork = ConsensusFork.Deneb + fork = ConsensusFork.Electra forked1 = getBlock(fork) blockRoot1 = withBlck(forked1): hash_tree_root(forkyBlck) forked2 = getBlock(fork, SigningOtherFeeRecipient) diff --git a/tests/test_sync_committee_pool.nim b/tests/test_sync_committee_pool.nim index 0cd4fd74aa..bb12434d5f 100644 --- a/tests/test_sync_committee_pool.nim +++ b/tests/test_sync_committee_pool.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2021-2024 Status Research & Development GmbH +# Copyright (c) 2021-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -176,11 +176,7 @@ suite "Sync committee pool": let fork = altairFork(cfg) genesis_validators_root = eth2digest(@[5.byte, 6, 7]) - - privkey1 = MockPrivKeys[1.ValidatorIndex] - privkey2 = MockPrivKeys[2.ValidatorIndex] - privkey3 = MockPrivKeys[3.ValidatorIndex] - privkey4 = MockPrivKeys[4.ValidatorIndex] + privkey = MockPrivKeys[1.ValidatorIndex] bid1 = BlockId(slot: Slot(100), root: eth2digest(@[1.byte])) bid2 = BlockId(slot: Slot(101), root: eth2digest(@[1.byte, 2])) @@ -190,16 +186,15 @@ suite "Sync committee pool": subcommittee2 = SyncSubcommitteeIndex(1) sig1 = get_sync_committee_message_signature( - fork, genesis_validators_root, bid1.slot, bid1.root, privkey1) + fork, genesis_validators_root, bid1.slot, bid1.root, privkey) sig2 = get_sync_committee_message_signature( - fork, genesis_validators_root, bid2.slot, bid2.root, privkey1) + fork, genesis_validators_root, bid2.slot, bid2.root, privkey) sig3 = get_sync_committee_message_signature( - fork, genesis_validators_root, bid3.slot, bid3.root, privkey1) + fork, genesis_validators_root, bid3.slot, bid3.root, privkey) sig4 = get_sync_committee_message_signature( - fork, genesis_validators_root, bid3.slot, bid2.root, privkey1) + fork, genesis_validators_root, bid3.slot, bid2.root, privkey) # Inserting sync committee messages - # pool.addSyncCommitteeMessage( bid1.slot, bid1, 1, sig1, subcommittee1, @[1'u64]) pool.addSyncCommitteeMessage( diff --git a/tests/test_sync_manager.nim b/tests/test_sync_manager.nim index 3071aa92d5..2be73ecc51 100644 --- a/tests/test_sync_manager.nim +++ b/tests/test_sync_manager.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2020-2024 Status Research & Development GmbH +# Copyright (c) 2020-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -10,17 +10,23 @@ import std/[strutils, sequtils] import unittest2 -import chronos +import chronos, stew/base10, chronos/unittest2/asynctests +import ../beacon_chain/networking/peer_scores import ../beacon_chain/gossip_processing/block_processor, ../beacon_chain/sync/sync_manager, + ../beacon_chain/sync/sync_queue, ../beacon_chain/spec/forks type SomeTPeer = ref object + id: string score: int +func init(t: typedesc[SomeTPeer], id: string, score = 1000): SomeTPeer = + SomeTPeer(id: id, score: score) + func `$`(peer: SomeTPeer): string = - "SomeTPeer" + "peer#" & peer.id template shortLog(peer: SomeTPeer): string = $peer @@ -35,7 +41,7 @@ func getStats(peer: SomeTPeer, index: SyncResponseKind): uint64 = 0 func getStaticSlotCb(slot: Slot): GetSlotCallback = - proc getSlot(): Slot = + func getSlot(): Slot = slot getSlot @@ -44,1084 +50,1405 @@ type blck*: ForkedSignedBeaconBlock resfut*: Future[Result[void, VerifierError]] -func collector(queue: AsyncQueue[BlockEntry]): BlockVerifier = - # This sets up a fake block verifiation collector that simply puts the blocks - # in the async queue, similar to how BlockProcessor does it - as far as - # testing goes, this is risky because it might introduce differences between - # the BlockProcessor and this test - proc verify(signedBlock: ForkedSignedBeaconBlock, blobs: Opt[BlobSidecars], - maybeFinalized: bool): - Future[Result[void, VerifierError]] {.async: (raises: [CancelledError], raw: true).} = - let fut = Future[Result[void, VerifierError]].Raising([CancelledError]).init() - try: queue.addLastNoWait(BlockEntry(blck: signedBlock, resfut: fut)) - except CatchableError as exc: raiseAssert exc.msg - return fut - - return verify - -suite "SyncManager test suite": - proc createChain(start, finish: Slot): seq[ref ForkedSignedBeaconBlock] = - doAssert(start <= finish) - let count = int(finish - start + 1'u64) - var res = newSeq[ref ForkedSignedBeaconBlock](count) - var curslot = start - for item in res.mitems(): - item = newClone ForkedSignedBeaconBlock(kind: ConsensusFork.Deneb) - item[].denebData.message.slot = curslot - curslot = curslot + 1'u64 - res - - func createBlobs( - blocks: var seq[ref ForkedSignedBeaconBlock], slots: seq[Slot] - ): seq[ref BlobSidecar] = - var res = newSeq[ref BlobSidecar](len(slots)) - for blck in blocks: - withBlck(blck[]): - when consensusFork >= ConsensusFork.Deneb: - template kzgs: untyped = forkyBlck.message.body.blob_kzg_commitments +func createChain(slots: Slice[Slot]): seq[ref ForkedSignedBeaconBlock] = + var res = newSeqOfCap[ref ForkedSignedBeaconBlock](len(slots)) + for slot in slots: + let item = newClone ForkedSignedBeaconBlock(kind: ConsensusFork.Deneb) + item[].denebData.message.slot = slot + res.add(item) + res + +proc createChain(srange: SyncRange): seq[ref ForkedSignedBeaconBlock] = + createChain(srange.slot .. (srange.slot + srange.count - 1)) + +func createBlobs( + blocks: var seq[ref ForkedSignedBeaconBlock], + slots: openArray[Slot] +): seq[ref BlobSidecar] = + var res = newSeq[ref BlobSidecar](len(slots)) + for blck in blocks: + withBlck(blck[]): + when consensusFork >= ConsensusFork.Deneb: + template kzgs: untyped = forkyBlck.message.body.blob_kzg_commitments + for i, slot in slots: + if slot == forkyBlck.message.slot: + doAssert kzgs.add default(KzgCommitment) + if kzgs.len > 0: + forkyBlck.root = hash_tree_root(forkyBlck.message) + var + kzg_proofs: KzgProofs + blobs: Blobs + for _ in kzgs: + doAssert kzg_proofs.add default(KzgProof) + doAssert blobs.add default(Blob) + let sidecars = forkyBlck.create_blob_sidecars(kzg_proofs, blobs) + var sidecarIdx = 0 for i, slot in slots: if slot == forkyBlck.message.slot: - doAssert kzgs.add default(KzgCommitment) - if kzgs.len > 0: - forkyBlck.root = hash_tree_root(forkyBlck.message) - var - kzg_proofs: KzgProofs - blobs: Blobs - for _ in kzgs: - doAssert kzg_proofs.add default(KzgProof) - doAssert blobs.add default(Blob) - let sidecars = forkyBlck.create_blob_sidecars(kzg_proofs, blobs) - var sidecarIdx = 0 - for i, slot in slots: - if slot == forkyBlck.message.slot: - res[i] = newClone sidecars[sidecarIdx] - inc sidecarIdx - res - - proc getSlice(chain: openArray[ref ForkedSignedBeaconBlock], startSlot: Slot, - request: SyncRequest[SomeTPeer]): seq[ref ForkedSignedBeaconBlock] = - let - startIndex = int(request.slot - startSlot) - finishIndex = int(request.slot - startSlot) + int(request.count) - 1 - var res = newSeq[ref ForkedSignedBeaconBlock](1 + finishIndex - startIndex) - for i in 0.. 0, "Empty scenarios are not allowed") + + var + scenario = @sc + aq = newAsyncQueue[BlockEntry]() template done(b: BlockEntry) = b.resfut.complete(Result[void, VerifierError].ok()) template fail(b: BlockEntry, e: untyped) = b.resfut.complete(Result[void, VerifierError].err(e)) - - template smokeTest(kkind: SyncQueueKind, start, finish: Slot, - chunkSize: uint64) = - let aq = newAsyncQueue[BlockEntry]() - - var counter = - case kkind - of SyncQueueKind.Forward: - int(start) - of SyncQueueKind.Backward: - int(finish) - - proc backwardValidator(aq: AsyncQueue[BlockEntry]) {.async.} = - while true: - let sblock = await aq.popFirst() - if sblock.blck.slot == Slot(counter): - sblock.done() - else: - sblock.fail(VerifierError.Invalid) - dec(counter) - - proc forwardValidator(aq: AsyncQueue[BlockEntry]) {.async.} = - while true: - let sblock = await aq.popFirst() - if sblock.blck.slot == Slot(counter): - inc(counter) - sblock.done() - else: - sblock.fail(VerifierError.Invalid) - - var - queue = - case kkind - of SyncQueueKind.Forward: - SyncQueue.init(SomeTPeer, SyncQueueKind.Forward, - start, finish, chunkSize, - getStaticSlotCb(start), collector(aq)) - of SyncQueueKind.Backward: - SyncQueue.init(SomeTPeer, SyncQueueKind.Backward, - finish, start, chunkSize, - getStaticSlotCb(finish), collector(aq)) - chain = createChain(start, finish) - validatorFut = - case kkind + template verifyBlock(i, e, s, v: untyped): untyped = + let item = await queue.popFirst() + if item.blck.slot == s: + if e.code.isSome(): + item.fail(e.code.get()) + else: + item.done() + else: + raiseAssert "Verifier got block from incorrect slot, " & + "expected " & $s & ", got " & + $item.blck.slot & ", position [" & + $i & ", " & $s & "]" + inc(v) + + proc verifier(queue: AsyncQueue[BlockEntry]) {.async: (raises: []).} = + var slotsVerified = 0 + try: + for index, entry in scenario.pairs(): + case skind of SyncQueueKind.Forward: - forwardValidator(aq) + for slot in countup(entry.slots.a, entry.slots.b): + verifyBlock(index, entry, slot, slotsVerified) of SyncQueueKind.Backward: - backwardValidator(aq) - - let p1 = SomeTPeer() + for slot in countdown(entry.slots.b, entry.slots.a): + verifyBlock(index, entry, slot, slotsVerified) + except CancelledError: + raiseAssert "Scenario is not completed, " & + "number of slots passed " & $slotsVerified - proc runSmokeTest() {.async.} = - while true: - var request = queue.pop(finish, p1) - if request.isEmpty(): - break - await queue.push(request, getSlice(chain, start, request), - Opt.none(seq[BlobSidecars])) - await validatorFut.cancelAndWait() - - waitFor runSmokeTest() - case kkind - of SyncQueueKind.Forward: - check (counter - 1) == int(finish) - of SyncQueueKind.Backward: - check (counter + 1) == int(start) - - template unorderedAsyncTest(kkind: SyncQueueKind, startSlot: Slot) = - let - aq = newAsyncQueue[BlockEntry]() - chunkSize = 3'u64 - numberOfChunks = 3'u64 - finishSlot = startSlot + numberOfChunks * chunkSize - 1'u64 - queueSize = 1 - - var counter = - case kkind - of SyncQueueKind.Forward: - int(startSlot) - of SyncQueueKind.Backward: - int(finishSlot) - - proc backwardValidator(aq: AsyncQueue[BlockEntry]) {.async.} = - while true: - let sblock = await aq.popFirst() - if sblock.blck.slot == Slot(counter): - sblock.done() - else: - sblock.fail(VerifierError.Invalid) - dec(counter) - - proc forwardValidator(aq: AsyncQueue[BlockEntry]) {.async.} = - while true: - let sblock = await aq.popFirst() - if sblock.blck.slot == Slot(counter): - inc(counter) - sblock.done() - else: - sblock.fail(VerifierError.Invalid) + (collector(aq), verifier(aq)) - var - chain = createChain(startSlot, finishSlot) - queue = - case kkind - of SyncQueueKind.Forward: - SyncQueue.init(SomeTPeer, SyncQueueKind.Forward, - startSlot, finishSlot, chunkSize, - getStaticSlotCb(startSlot), collector(aq), - queueSize) - of SyncQueueKind.Backward: - SyncQueue.init(SomeTPeer, SyncQueueKind.Backward, - finishSlot, startSlot, chunkSize, - getStaticSlotCb(finishSlot), collector(aq), - queueSize) - validatorFut = - case kkind - of SyncQueueKind.Forward: - forwardValidator(aq) - of SyncQueueKind.Backward: - backwardValidator(aq) +suite "SyncManager test suite": + for kind in [SyncQueueKind.Forward, SyncQueueKind.Backward]: + asyncTest "[SyncQueue# & " & $kind & "] Smoke [single peer] test": + # Four ranges was distributed to single peer only. + let + scenario = [ + (Slot(0) .. Slot(127), Opt.none(VerifierError)) + ] + verifier = setupVerifier(kind, scenario) + sq = + case kind + of SyncQueueKind.Forward: + SyncQueue.init(SomeTPeer, kind, Slot(0), Slot(127), + 32'u64, # 32 slots per request + 3, # 3 concurrent requests + 2, # 2 failures allowed + getStaticSlotCb(Slot(0)), + verifier.collector) + of SyncQueueKind.Backward: + SyncQueue.init(SomeTPeer, kind, Slot(127), Slot(0), + 32'u64, # 32 slots per request + 3, # 3 concurrent requests + 2, # 2 failures allowed + getStaticSlotCb(Slot(127)), + verifier.collector) + peer = SomeTPeer.init("1") + r1 = sq.pop(Slot(127), peer) + r2 = sq.pop(Slot(127), peer) + r3 = sq.pop(Slot(127), peer) + d1 = createChain(r1.data) + d2 = createChain(r2.data) + d3 = createChain(r3.data) + + let + f1 = sq.push(r1, d1, Opt.none(seq[BlobSidecars])) + f2 = sq.push(r2, d2, Opt.none(seq[BlobSidecars])) + f3 = sq.push(r3, d3, Opt.none(seq[BlobSidecars])) - let - p1 = SomeTPeer() - p2 = SomeTPeer() - p3 = SomeTPeer() - - proc runTest(): Future[bool] {.async.} = - var r11 = queue.pop(finishSlot, p1) - var r12 = queue.pop(finishSlot, p2) - var r13 = queue.pop(finishSlot, p3) - - var f13 = queue.push(r13, chain.getSlice(startSlot, r13), - Opt.none(seq[BlobSidecars])) - await sleepAsync(100.milliseconds) check: - f13.finished == false - case kkind - of SyncQueueKind.Forward: counter == int(startSlot) - of SyncQueueKind.Backward: counter == int(finishSlot) - - var f11 = queue.push(r11, chain.getSlice(startSlot, r11), - Opt.none(seq[BlobSidecars])) - await sleepAsync(100.milliseconds) - check: - case kkind - of SyncQueueKind.Forward: counter == int(startSlot + chunkSize) - of SyncQueueKind.Backward: counter == int(finishSlot - chunkSize) - f11.finished == true and f11.failed == false - f13.finished == false - - var f12 = queue.push(r12, chain.getSlice(startSlot, r12), - Opt.none(seq[BlobSidecars])) - await allFutures(f11, f12, f13) - check: - f12.finished == true and f12.failed == false - f13.finished == true and f13.failed == false + f1.finished == false + f2.finished == false + f3.finished == false + + await noCancel f1 + check: - case kkind - of SyncQueueKind.Forward: counter == int(finishSlot) + 1 - of SyncQueueKind.Backward: counter == int(startSlot) - 1 - r11.item == p1 - r12.item == p2 - r13.item == p3 - await validatorFut.cancelAndWait() - return true - - check waitFor(runTest()) == true - - template partialGoodResponseTest(kkind: SyncQueueKind, start, finish: Slot, - chunkSize: uint64) = - let aq = newAsyncQueue[BlockEntry]() + f1.finished == true + f2.finished == false + f3.finished == false - var counter = - case kkind - of SyncQueueKind.Forward: - int(start) - of SyncQueueKind.Backward: - int(finish) - - proc backwardValidator(aq: AsyncQueue[BlockEntry]) {.async.} = - while true: - let sblock = await aq.popFirst() - if sblock.blck.slot == Slot(counter): - dec(counter) - sblock.done() - elif sblock.blck.slot < Slot(counter): - # There was a gap, report missing parent - sblock.fail(VerifierError.MissingParent) - else: - sblock.fail(VerifierError.Duplicate) - - proc getBackwardSafeSlotCb(): Slot = - min((Slot(counter).epoch + 1).start_slot, finish) - - proc forwardValidator(aq: AsyncQueue[BlockEntry]) {.async.} = - while true: - let sblock = await aq.popFirst() - if sblock.blck.slot == Slot(counter): - inc(counter) - sblock.done() - elif sblock.blck.slot > Slot(counter): - # There was a gap, report missing parent - sblock.fail(VerifierError.MissingParent) - else: - sblock.fail(VerifierError.Duplicate) - - proc getFowardSafeSlotCb(): Slot = - max(Slot(max(counter, 1) - 1).epoch.start_slot, start) + await noCancel f2 - var - queue = - case kkind - of SyncQueueKind.Forward: - SyncQueue.init(SomeTPeer, SyncQueueKind.Forward, - start, finish, chunkSize, - getFowardSafeSlotCb, collector(aq)) - of SyncQueueKind.Backward: - SyncQueue.init(SomeTPeer, SyncQueueKind.Backward, - finish, start, chunkSize, - getBackwardSafeSlotCb, collector(aq)) - chain = createChain(start, finish) - validatorFut = - case kkind - of SyncQueueKind.Forward: - forwardValidator(aq) - of SyncQueueKind.Backward: - backwardValidator(aq) + check: + f1.finished == true + f2.finished == true + f3.finished == false - let p1 = SomeTPeer() + await noCancel f3 - var expectedScore = 0 - proc runTest() {.async.} = - while true: - var request = queue.pop(finish, p1) - if request.isEmpty(): - break - var response = getSlice(chain, start, request) - if response.len >= (SLOTS_PER_EPOCH + 3).int: - # Create gap close to end of response, to simulate behaviour where - # the remote peer is sending valid data but does not have it fully - # available (e.g., still doing backfill after checkpoint sync) - case kkind - of SyncQueueKind.Forward: - response.delete(response.len - 2) - of SyncQueueKind.Backward: - response.delete(1) - expectedScore += PeerScoreMissingValues - if response.len >= 1: - # Ensure requested values are past `safeSlot` - case kkind - of SyncQueueKind.Forward: - check response[0][].slot >= getFowardSafeSlotCb() - else: - check response[^1][].slot <= getBackwardSafeSlotCb() - await queue.push(request, response, Opt.none(seq[BlobSidecars])) - await validatorFut.cancelAndWait() - - waitFor runTest() - case kkind - of SyncQueueKind.Forward: - check (counter - 1) == int(finish) - of SyncQueueKind.Backward: - check (counter + 1) == int(start) - check p1.score >= expectedScore - - template outOfBandAdvancementTest(kkind: SyncQueueKind, start, finish: Slot, - chunkSize: uint64) = - let aq = newAsyncQueue[BlockEntry]() + check: + f1.finished == true + f2.finished == true + f3.finished == true - var counter = - case kkind - of SyncQueueKind.Forward: - int(start) - of SyncQueueKind.Backward: - int(finish) - - proc failingValidator(aq: AsyncQueue[BlockEntry]) {.async.} = - while true: - let sblock = await aq.popFirst() - sblock.fail(VerifierError.Invalid) - - proc getBackwardSafeSlotCb(): Slot = - let progress = (uint64(int(finish) - counter) div chunkSize) * chunkSize - finish - progress - - proc getFowardSafeSlotCb(): Slot = - let progress = (uint64(counter - int(start)) div chunkSize) * chunkSize - start + progress - - template advanceSafeSlot() = - case kkind - of SyncQueueKind.Forward: - counter += int(chunkSize) - if counter > int(finish) + 1: - counter = int(finish) + 1 - break - of SyncQueueKind.Backward: - counter -= int(chunkSize) - if counter < int(start) - 1: - counter = int(start) - 1 - break + let + r4 = sq.pop(Slot(127), peer) + d4 = createChain(r4.data) + f4 = sq.push(r4, d4, Opt.none(seq[BlobSidecars])) - var - queue = - case kkind - of SyncQueueKind.Forward: - SyncQueue.init(SomeTPeer, SyncQueueKind.Forward, - start, finish, chunkSize, - getFowardSafeSlotCb, collector(aq)) - of SyncQueueKind.Backward: - SyncQueue.init(SomeTPeer, SyncQueueKind.Backward, - finish, start, chunkSize, - getBackwardSafeSlotCb, collector(aq)) - chain = createChain(start, finish) - validatorFut = failingValidator(aq) + await noCancel f4 - let - p1 = SomeTPeer() - p2 = SomeTPeer() - - proc runTest() {.async.} = - while true: - var - request1 = queue.pop(finish, p1) - request2 = queue.pop(finish, p2) - if request1.isEmpty(): - break + check: + f1.finished == true + f2.finished == true + f3.finished == true + f4.finished == true + + await noCancel wait(verifier.verifier, 2.seconds) + + asyncTest "[SyncQueue# & " & $kind & "] Smoke [3 peers] test": + # Three ranges was distributed between 3 peers, every range is going to + # be pushed by all peers. + let + scenario = [ + (Slot(0) .. Slot(127), Opt.none(VerifierError)) + ] + verifier = setupVerifier(kind, scenario) + sq = + case kind + of SyncQueueKind.Forward: + SyncQueue.init(SomeTPeer, kind, Slot(0), Slot(127), + 32'u64, # 32 slots per request + 3, # 3 concurrent requests + 2, # 2 failures allowed + getStaticSlotCb(Slot(0)), + verifier.collector) + of SyncQueueKind.Backward: + SyncQueue.init(SomeTPeer, kind, Slot(127), Slot(0), + 32'u64, # 32 slots per request + 3, # 3 concurrent requests + 2, # 2 failures allowed + getStaticSlotCb(Slot(127)), + verifier.collector) + peer1 = SomeTPeer.init("1") + peer2 = SomeTPeer.init("2") + peer3 = SomeTPeer.init("3") + r11 = sq.pop(Slot(127), peer1) + r12 = sq.pop(Slot(127), peer2) + r13 = sq.pop(Slot(127), peer3) + d11 = createChain(r11.data) + d12 = createChain(r12.data) + d13 = createChain(r13.data) + r21 = sq.pop(Slot(127), peer1) + r22 = sq.pop(Slot(127), peer2) + r23 = sq.pop(Slot(127), peer3) + d21 = createChain(r21.data) + d22 = createChain(r22.data) + d23 = createChain(r23.data) + r31 = sq.pop(Slot(127), peer1) + r32 = sq.pop(Slot(127), peer2) + r33 = sq.pop(Slot(127), peer3) + d31 = createChain(r31.data) + d32 = createChain(r32.data) + d33 = createChain(r33.data) + + let + f11 = sq.push(r11, d11, Opt.none(seq[BlobSidecars])) + f12 = sq.push(r12, d12, Opt.none(seq[BlobSidecars])) + f13 = sq.push(r13, d13, Opt.none(seq[BlobSidecars])) + + f22 = sq.push(r22, d22, Opt.none(seq[BlobSidecars])) + f21 = sq.push(r21, d21, Opt.none(seq[BlobSidecars])) + f23 = sq.push(r23, d23, Opt.none(seq[BlobSidecars])) + + f33 = sq.push(r33, d33, Opt.none(seq[BlobSidecars])) + f32 = sq.push(r32, d32, Opt.none(seq[BlobSidecars])) + f31 = sq.push(r31, d31, Opt.none(seq[BlobSidecars])) + + await noCancel f11 + check: + f11.finished == true + # We do not check f12 and f13 here because their state is undefined + # at this time. + f21.finished == false + f22.finished == false + f23.finished == false + f31.finished == false + f32.finished == false + f33.finished == false + + await noCancel f22 + check: + f11.finished == true + f12.finished == true + f13.finished == true + f22.finished == true + # We do not check f21 and f23 here because their state is undefined + # at this time. + f31.finished == false + f32.finished == false + f33.finished == false + + await noCancel f33 + check: + f11.finished == true + f12.finished == true + f13.finished == true + f21.finished == true + f22.finished == true + f23.finished == true + f33.finished == true + # We do not check f31 and f32 here because their state is undefined + # at this time. - # Simulate failing request 2. - queue.push(request2) - check debtLen(queue) == request2.count - - # Advance `safeSlot` out of band. - advanceSafeSlot() - - # Handle request 1. Should be re-enqueued as it simulates `Invalid`. - let response1 = getSlice(chain, start, request1) - await queue.push(request1, response1, Opt.none(seq[BlobSidecars])) - check debtLen(queue) == request2.count + request1.count - - # Request 1 should be discarded as it is no longer relevant. - # Request 2 should be re-issued. - var request3 = queue.pop(finish, p1) - check: - request3 == request2 - debtLen(queue) == 0 - - # Handle request 3. Should be re-enqueued as it simulates `Invalid`. - let response3 = getSlice(chain, start, request3) - await queue.push(request3, response3, Opt.none(seq[BlobSidecars])) - check debtLen(queue) == request3.count - - # Request 2 should be re-issued. - var request4 = queue.pop(finish, p1) - check: - request4 == request2 - debtLen(queue) == 0 - - # Advance `safeSlot` out of band. - advanceSafeSlot() - - # Handle request 4. Should be re-enqueued as it simulates `Invalid`. - let response4 = getSlice(chain, start, request4) - await queue.push(request4, response4, Opt.none(seq[BlobSidecars])) - check debtLen(queue) == request4.count - - # Advance `safeSlot` out of band. - advanceSafeSlot() - - # Fetch a request. It should take into account the new `safeSlot`. - let request5 = queue.pop(finish, p1) - if request5.isEmpty(): - break - case kkind - of SyncQueueKind.Forward: - check request5.slot >= getFowardSafeSlotCb() - else: - check request5.lastSlot <= getBackwardSafeSlotCb() - queue.push(request5) - - await validatorFut.cancelAndWait() - - waitFor runTest() - case kkind - of SyncQueueKind.Forward: - check (counter - 1) == int(finish) - of SyncQueueKind.Backward: - check (counter + 1) == int(start) - - for k in {SyncQueueKind.Forward, SyncQueueKind.Backward}: - let prefix = "[SyncQueue#" & $k & "] " - - test prefix & "Start and finish slots equal": - startAndFinishSlotsEqual(k) - - test prefix & "Pass through established limits test": - passThroughLimitsTest(k) - - test prefix & "Two full requests success/fail": - twoFullRequests(k) - - test prefix & "Smoke test": - const SmokeTests = [ - (Slot(0), Slot(547), 61'u64), - (Slot(193), Slot(389), 79'u64), - (Slot(1181), Slot(1399), 41'u64) - ] - for item in SmokeTests: - smokeTest(k, item[0], item[1], item[2]) - - test prefix & "Async unordered push test": - const UnorderedTests = [ - Slot(0), Slot(100) - ] - for item in UnorderedTests: - unorderedAsyncTest(k, item) - - test prefix & "Good response with missing values towards end": - const PartialGoodResponseTests = [ - (Slot(0), Slot(200), (SLOTS_PER_EPOCH + 3).uint64) - ] - for item in PartialGoodResponseTests: - partialGoodResponseTest(k, item[0], item[1], item[2]) - - test prefix & "Handle out-of-band sync progress advancement": - const OutOfBandAdvancementTests = [ - (Slot(0), Slot(500), SLOTS_PER_EPOCH.uint64) - ] - for item in OutOfBandAdvancementTests: - outOfBandAdvancementTest(k, item[0], item[1], item[2]) - - test "[SyncQueue#Forward] Async unordered push with rewind test": - let - aq = newAsyncQueue[BlockEntry]() - startSlot = Slot(0) - chunkSize = SLOTS_PER_EPOCH - numberOfChunks = 4'u64 - finishSlot = startSlot + numberOfChunks * chunkSize - 1'u64 - queueSize = 1 - - var counter = int(startSlot) - - proc forwardValidator(aq: AsyncQueue[BlockEntry]) {.async.} = - while true: - let sblock = await aq.popFirst() - if sblock.blck.slot == Slot(counter): - withBlck(sblock.blck): - if forkyBlck.message.proposer_index == 0xDEADBEAF'u64: - sblock.fail(VerifierError.MissingParent) - else: - inc(counter) - sblock.done() - else: - sblock.fail(VerifierError.Invalid) + let + r41 = sq.pop(Slot(127), peer1) + d41 = createChain(r41.data) - var - chain = createChain(startSlot, finishSlot) - queue = SyncQueue.init(SomeTPeer, SyncQueueKind.Forward, - startSlot, finishSlot, chunkSize, - getStaticSlotCb(startSlot), collector(aq), - queueSize) - validatorFut = forwardValidator(aq) + await noCancel sq.push(r41, d41, Opt.none(seq[BlobSidecars])) - let - p1 = SomeTPeer() - p2 = SomeTPeer() - p3 = SomeTPeer() - p4 = SomeTPeer() - p5 = SomeTPeer() - p6 = SomeTPeer() - p7 = SomeTPeer() - p8 = SomeTPeer() - - proc runTest(): Future[bool] {.async.} = - var r11 = queue.pop(finishSlot, p1) - var r12 = queue.pop(finishSlot, p2) - var r13 = queue.pop(finishSlot, p3) - var r14 = queue.pop(finishSlot, p4) - - var f14 = queue.push(r14, chain.getSlice(startSlot, r14), - Opt.none(seq[BlobSidecars])) - await sleepAsync(100.milliseconds) check: - f14.finished == false - counter == int(startSlot) - - var f12 = queue.push(r12, chain.getSlice(startSlot, r12), - Opt.none(seq[BlobSidecars])) - await sleepAsync(100.milliseconds) + f11.finished == true + f12.finished == true + f13.finished == true + f21.finished == true + f22.finished == true + f23.finished == true + f31.finished == true + f32.finished == true + f33.finished == true + + await noCancel wait(verifier.verifier, 2.seconds) + + asyncTest "[SyncQueue# & " & $kind & "] Failure request push test": + let + scenario = + case kind + of SyncQueueKind.Forward: + [ + (Slot(0) .. Slot(31), Opt.none(VerifierError)), + (Slot(32) .. Slot(63), Opt.none(VerifierError)) + ] + of SyncQueueKind.Backward: + [ + (Slot(32) .. Slot(63), Opt.none(VerifierError)), + (Slot(0) .. Slot(31), Opt.none(VerifierError)) + ] + verifier = setupVerifier(kind, scenario) + sq = + case kind + of SyncQueueKind.Forward: + SyncQueue.init(SomeTPeer, kind, Slot(0), Slot(63), + 32'u64, # 32 slots per request + 3, # 3 concurrent requests + 2, # 2 failures allowed + getStaticSlotCb(Slot(0)), + verifier.collector) + of SyncQueueKind.Backward: + SyncQueue.init(SomeTPeer, kind, Slot(63), Slot(0), + 32'u64, # 32 slots per request + 3, # 3 concurrent requests + 2, # 2 failures allowed + getStaticSlotCb(Slot(63)), + verifier.collector) + peer1 = SomeTPeer.init("1") + peer2 = SomeTPeer.init("2") + peer3 = SomeTPeer.init("3") + + block: + let + r11 = sq.pop(Slot(63), peer1) + r12 = sq.pop(Slot(63), peer2) + r13 = sq.pop(Slot(63), peer3) + + sq.push(r11) + sq.push(r12) + sq.push(r13) + # Next couple of calls should be detected as non relevant + sq.push(r11) + sq.push(r12) + sq.push(r13) + + block: + let + r11 = sq.pop(Slot(63), peer1) + r12 = sq.pop(Slot(63), peer2) + r13 = sq.pop(Slot(63), peer3) + d12 = createChain(r12.data) + + sq.push(r11) + await noCancel sq.push(r12, d12, Opt.none(seq[BlobSidecars])) + sq.push(r13) + # Next couple of calls should be detected as non relevant + sq.push(r11) + sq.push(r12) + sq.push(r13) + + block: + let + r11 = sq.pop(Slot(63), peer1) + r12 = sq.pop(Slot(63), peer2) + r13 = sq.pop(Slot(63), peer3) + d13 = createChain(r13.data) + + sq.push(r11) + sq.push(r12) + await noCancel sq.push(r13, d13, Opt.none(seq[BlobSidecars])) + # Next couple of calls should be detected as non relevant + sq.push(r11) + sq.push(r12) + sq.push(r13) + + await noCancel wait(verifier.verifier, 2.seconds) + + asyncTest "[SyncQueue# & " & $kind & "] Invalid block [3 peers] test": + # This scenario performs test for 2 cases. + # 1. When first error encountered it just drops the the response and + # increases `failuresCounter`. + # 2. When another error encountered it will reset whole queue to the + # last known good/safe point (rewind process). + let + scenario = + case kind + of SyncQueueKind.Forward: + [ + (Slot(0) .. Slot(31), Opt.none(VerifierError)), + (Slot(32) .. Slot(40), Opt.none(VerifierError)), + (Slot(41) .. Slot(41), Opt.some(VerifierError.Invalid)), + (Slot(32) .. Slot(40), Opt.some(VerifierError.Duplicate)), + (Slot(41) .. Slot(41), Opt.some(VerifierError.Invalid)), + (Slot(0) .. Slot(31), Opt.some(VerifierError.Duplicate)), + (Slot(32) .. Slot(40), Opt.some(VerifierError.Duplicate)), + (Slot(41) .. Slot(41), Opt.none(VerifierError)), + (Slot(42) .. Slot(63), Opt.none(VerifierError)) + ] + of SyncQueueKind.Backward: + [ + (Slot(32) .. Slot(63), Opt.none(VerifierError)), + (Slot(22) .. Slot(31), Opt.none(VerifierError)), + (Slot(21) .. Slot(21), Opt.some(VerifierError.Invalid)), + (Slot(22) .. Slot(31), Opt.some(VerifierError.Duplicate)), + (Slot(21) .. Slot(21), Opt.some(VerifierError.Invalid)), + (Slot(32) .. Slot(63), Opt.some(VerifierError.Duplicate)), + (Slot(22) .. Slot(31), Opt.some(VerifierError.Duplicate)), + (Slot(21) .. Slot(21), Opt.none(VerifierError)), + (Slot(0) .. Slot(20), Opt.none(VerifierError)), + ] + verifier = setupVerifier(kind, scenario) + sq = + case kind + of SyncQueueKind.Forward: + SyncQueue.init(SomeTPeer, kind, Slot(0), Slot(63), + 32'u64, # 32 slots per request + 3, # 3 concurrent requests + 2, # 2 failures allowed + getStaticSlotCb(Slot(0)), + verifier.collector) + of SyncQueueKind.Backward: + SyncQueue.init(SomeTPeer, kind, Slot(63), Slot(0), + 32'u64, # 32 slots per request + 3, # 3 concurrent requests + 2, # 2 failures allowed + getStaticSlotCb(Slot(63)), + verifier.collector) + peer1 = SomeTPeer.init("1") + peer2 = SomeTPeer.init("2") + peer3 = SomeTPeer.init("3") + r11 = sq.pop(Slot(63), peer1) + r12 = sq.pop(Slot(63), peer2) + r13 = sq.pop(Slot(63), peer3) + d11 = createChain(r11.data) + d12 = createChain(r12.data) + d13 = createChain(r13.data) + r21 = sq.pop(Slot(63), peer1) + r22 = sq.pop(Slot(63), peer2) + r23 = sq.pop(Slot(63), peer3) + d21 = createChain(r21.data) + d22 = createChain(r22.data) + d23 = createChain(r23.data) + + let + f11 = sq.push(r11, d11, Opt.none(seq[BlobSidecars])) + f12 = sq.push(r12, d12, Opt.none(seq[BlobSidecars])) + f13 = sq.push(r13, d13, Opt.none(seq[BlobSidecars])) + + await noCancel f11 + check f11.finished == true + + let + f21 = sq.push(r21, d21, Opt.none(seq[BlobSidecars])) + f22 = sq.push(r22, d22, Opt.none(seq[BlobSidecars])) + f23 = sq.push(r23, d23, Opt.none(seq[BlobSidecars])) + + await noCancel f21 check: - counter == int(startSlot) - f12.finished == false - f14.finished == false + f21.finished == true + f11.finished == true + f12.finished == true + f13.finished == true - var f11 = queue.push(r11, chain.getSlice(startSlot, r11), - Opt.none(seq[BlobSidecars])) - await allFutures(f11, f12) + await noCancel f22 check: - counter == int(startSlot + chunkSize + chunkSize) - f11.finished == true and f11.failed == false - f12.finished == true and f12.failed == false - f14.finished == false - - var missingSlice = chain.getSlice(startSlot, r13) - withBlck(missingSlice[0][]): - forkyBlck.message.proposer_index = 0xDEADBEAF'u64 - var f13 = queue.push(r13, missingSlice, - Opt.none(seq[BlobSidecars])) - await allFutures(f13, f14) + f21.finished == true + f22.finished == true + f11.finished == true + f12.finished == true + f13.finished == true + + await noCancel f23 check: - f11.finished == true and f11.failed == false - f12.finished == true and f12.failed == false - f13.finished == true and f13.failed == false - f14.finished == true and f14.failed == false - queue.inpSlot == Slot(SLOTS_PER_EPOCH) - queue.outSlot == Slot(SLOTS_PER_EPOCH) - queue.debtLen == 0 - - # Recovery process - counter = int(SLOTS_PER_EPOCH) - - var r15 = queue.pop(finishSlot, p5) - var r16 = queue.pop(finishSlot, p6) - var r17 = queue.pop(finishSlot, p7) - var r18 = queue.pop(finishSlot, p8) - - check r18.isEmpty() == true - - var f17 = queue.push(r17, chain.getSlice(startSlot, r17), - Opt.none(seq[BlobSidecars])) - await sleepAsync(100.milliseconds) - check f17.finished == false - - var f16 = queue.push(r16, chain.getSlice(startSlot, r16), - Opt.none(seq[BlobSidecars])) - await sleepAsync(100.milliseconds) - check f16.finished == false - - var f15 = queue.push(r15, chain.getSlice(startSlot, r15), - Opt.none(seq[BlobSidecars])) - await allFutures(f15, f16, f17) + f21.finished == true + f22.finished == true + f23.finished == true + f11.finished == true + f12.finished == true + f13.finished == true + + let + r31 = sq.pop(Slot(63), peer1) + r32 = sq.pop(Slot(63), peer2) + r33 = sq.pop(Slot(63), peer3) + d31 = createChain(r31.data) + d32 = createChain(r32.data) + d33 = createChain(r33.data) + r41 = sq.pop(Slot(63), peer1) + r42 = sq.pop(Slot(63), peer2) + r43 = sq.pop(Slot(63), peer3) + d41 = createChain(r41.data) + d42 = createChain(r42.data) + d43 = createChain(r43.data) + + let + f31 = sq.push(r31, d31, Opt.none(seq[BlobSidecars])) + f32 = sq.push(r32, d32, Opt.none(seq[BlobSidecars])) + f33 = sq.push(r33, d33, Opt.none(seq[BlobSidecars])) + f42 = sq.push(r42, d42, Opt.none(seq[BlobSidecars])) + f41 = sq.push(r41, d41, Opt.none(seq[BlobSidecars])) + f43 = sq.push(r43, d43, Opt.none(seq[BlobSidecars])) + + await noCancel f31 check: - f15.finished == true and f15.failed == false - f16.finished == true and f16.failed == false - f17.finished == true and f17.failed == false - counter == int(finishSlot) + 1 + f31.finished == true - await validatorFut.cancelAndWait() - return true + await noCancel f42 + check: + f31.finished == true + f32.finished == true + f33.finished == true + f42.finished == true - check waitFor(runTest()) == true + await noCancel f43 + check: + f31.finished == true + f32.finished == true + f33.finished == true + f41.finished == true + f42.finished == true + f43.finished == true + + await noCancel wait(verifier.verifier, 2.seconds) + + asyncTest "[SyncQueue# & " & $kind & "] Unviable block [3 peers] test": + # This scenario performs test for 2 cases. + # 1. When first error encountered it just drops the the response and + # increases `failuresCounter`. + # 2. When another error encountered it will reset whole queue to the + # last known good/safe point (rewind process). + # Unviable fork blocks processed differently from invalid blocks, all + # this blocks should be added to quarantine, so blocks range is not get + # failed immediately. + let + scenario = + case kind + of SyncQueueKind.Forward: + [ + (Slot(0) .. Slot(31), Opt.none(VerifierError)), + (Slot(32) .. Slot(40), Opt.none(VerifierError)), + (Slot(41) .. Slot(63), Opt.some(VerifierError.UnviableFork)), + (Slot(32) .. Slot(40), Opt.some(VerifierError.Duplicate)), + (Slot(41) .. Slot(63), Opt.some(VerifierError.UnviableFork)), + (Slot(0) .. Slot(31), Opt.some(VerifierError.Duplicate)), + (Slot(32) .. Slot(40), Opt.some(VerifierError.Duplicate)), + (Slot(41) .. Slot(63), Opt.none(VerifierError)) + ] + of SyncQueueKind.Backward: + [ + (Slot(32) .. Slot(63), Opt.none(VerifierError)), + (Slot(22) .. Slot(31), Opt.none(VerifierError)), + (Slot(0) .. Slot(21), Opt.some(VerifierError.UnviableFork)), + (Slot(22) .. Slot(31), Opt.some(VerifierError.Duplicate)), + (Slot(0) .. Slot(21), Opt.some(VerifierError.UnviableFork)), + (Slot(32) .. Slot(63), Opt.some(VerifierError.Duplicate)), + (Slot(22) .. Slot(31), Opt.some(VerifierError.Duplicate)), + (Slot(0) .. Slot(21), Opt.none(VerifierError)) + ] + verifier = setupVerifier(kind, scenario) + sq = + case kind + of SyncQueueKind.Forward: + SyncQueue.init(SomeTPeer, kind, Slot(0), Slot(63), + 32'u64, # 32 slots per request + 3, # 3 concurrent requests + 2, # 2 failures allowed + getStaticSlotCb(Slot(0)), + verifier.collector) + of SyncQueueKind.Backward: + SyncQueue.init(SomeTPeer, kind, Slot(63), Slot(0), + 32'u64, # 32 slots per request + 3, # 3 concurrent requests + 2, # 2 failures allowed + getStaticSlotCb(Slot(63)), + verifier.collector) + peer1 = SomeTPeer.init("1") + peer2 = SomeTPeer.init("2") + peer3 = SomeTPeer.init("3") + r11 = sq.pop(Slot(63), peer1) + r12 = sq.pop(Slot(63), peer2) + r13 = sq.pop(Slot(63), peer3) + d11 = createChain(r11.data) + d12 = createChain(r12.data) + d13 = createChain(r13.data) + r21 = sq.pop(Slot(63), peer1) + r22 = sq.pop(Slot(63), peer2) + r23 = sq.pop(Slot(63), peer3) + d21 = createChain(r21.data) + d22 = createChain(r22.data) + d23 = createChain(r23.data) + + let + f11 = sq.push(r11, d11, Opt.none(seq[BlobSidecars])) + f12 = sq.push(r12, d12, Opt.none(seq[BlobSidecars])) + f13 = sq.push(r13, d13, Opt.none(seq[BlobSidecars])) + + await noCancel f11 + check f11.finished == true + + let + f21 = sq.push(r21, d21, Opt.none(seq[BlobSidecars])) + f22 = sq.push(r22, d22, Opt.none(seq[BlobSidecars])) + f23 = sq.push(r23, d23, Opt.none(seq[BlobSidecars])) + + await noCancel f21 + check: + f21.finished == true + f11.finished == true + f12.finished == true + f13.finished == true - test "Process all unviable blocks": - let - aq = newAsyncQueue[BlockEntry]() - startSlot = Slot(0) - chunkSize = SLOTS_PER_EPOCH - numberOfChunks = 1'u64 - finishSlot = startSlot + numberOfChunks * chunkSize - 1'u64 - queueSize = 1 - - var counter = int(startSlot) - - proc forwardValidator(aq: AsyncQueue[BlockEntry]) {.async.} = - while true: - let sblock = await aq.popFirst() - withBlck(sblock.blck): - sblock.fail(VerifierError.UnviableFork) - inc(counter) + await noCancel f22 + check: + f21.finished == true + f22.finished == true + f11.finished == true + f12.finished == true + f13.finished == true - var - chain = createChain(startSlot, finishSlot) - queue = SyncQueue.init(SomeTPeer, SyncQueueKind.Forward, - startSlot, finishSlot, chunkSize, - getStaticSlotCb(startSlot), collector(aq), - queueSize) - validatorFut = forwardValidator(aq) + await noCancel f23 + check: + f21.finished == true + f22.finished == true + f23.finished == true + f11.finished == true + f12.finished == true + f13.finished == true + + let + r31 = sq.pop(Slot(63), peer1) + r32 = sq.pop(Slot(63), peer2) + r33 = sq.pop(Slot(63), peer3) + + let + d31 = createChain(r31.data) + d32 = createChain(r32.data) + d33 = createChain(r33.data) + r41 = sq.pop(Slot(63), peer1) + r42 = sq.pop(Slot(63), peer2) + r43 = sq.pop(Slot(63), peer3) + d41 = createChain(r41.data) + d42 = createChain(r42.data) + d43 = createChain(r43.data) + + let + f31 = sq.push(r31, d31, Opt.none(seq[BlobSidecars])) + f32 = sq.push(r32, d32, Opt.none(seq[BlobSidecars])) + f33 = sq.push(r33, d33, Opt.none(seq[BlobSidecars])) + f42 = sq.push(r42, d42, Opt.none(seq[BlobSidecars])) + f41 = sq.push(r41, d41, Opt.none(seq[BlobSidecars])) + f43 = sq.push(r43, d43, Opt.none(seq[BlobSidecars])) + + await noCancel f31 + check: + f31.finished == true - let - p1 = SomeTPeer() + await noCancel f42 + check: + f31.finished == true + f32.finished == true + f33.finished == true + f42.finished == true - proc runTest(): Future[bool] {.async.} = - var r11 = queue.pop(finishSlot, p1) + await noCancel f43 + check: + f31.finished == true + f32.finished == true + f33.finished == true + f41.finished == true + f42.finished == true + f43.finished == true + + await noCancel wait(verifier.verifier, 2.seconds) + + asyncTest "[SyncQueue# & " & $kind & "] Combination of missing parent " & + "and good blocks [3 peers] test": + let + scenario = + case kind + of SyncQueueKind.Forward: + [ + (Slot(0) .. Slot(31), Opt.none(VerifierError)), + (Slot(32) .. Slot(40), Opt.none(VerifierError)), + (Slot(41) .. Slot(41), Opt.some(VerifierError.MissingParent)), + (Slot(32) .. Slot(40), Opt.some(VerifierError.Duplicate)), + (Slot(41) .. Slot(41), Opt.some(VerifierError.MissingParent)), + (Slot(32) .. Slot(40), Opt.some(VerifierError.Duplicate)), + (Slot(41) .. Slot(41), Opt.some(VerifierError.MissingParent)), + (Slot(32) .. Slot(40), Opt.some(VerifierError.Duplicate)), + (Slot(41) .. Slot(41), Opt.some(VerifierError.MissingParent)), + (Slot(32) .. Slot(40), Opt.some(VerifierError.Duplicate)), + (Slot(41) .. Slot(41), Opt.some(VerifierError.MissingParent)), + (Slot(32) .. Slot(40), Opt.some(VerifierError.Duplicate)), + (Slot(41) .. Slot(41), Opt.some(VerifierError.MissingParent)), + (Slot(32) .. Slot(40), Opt.some(VerifierError.Duplicate)), + (Slot(41) .. Slot(63), Opt.none(VerifierError)) + ] + of SyncQueueKind.Backward: + [ + (Slot(32) .. Slot(63), Opt.none(VerifierError)), + (Slot(22) .. Slot(31), Opt.none(VerifierError)), + (Slot(21) .. Slot(21), Opt.some(VerifierError.MissingParent)), + (Slot(22) .. Slot(31), Opt.some(VerifierError.Duplicate)), + (Slot(21) .. Slot(21), Opt.some(VerifierError.MissingParent)), + (Slot(22) .. Slot(31), Opt.some(VerifierError.Duplicate)), + (Slot(21) .. Slot(21), Opt.some(VerifierError.MissingParent)), + (Slot(22) .. Slot(31), Opt.some(VerifierError.Duplicate)), + (Slot(21) .. Slot(21), Opt.some(VerifierError.MissingParent)), + (Slot(22) .. Slot(31), Opt.some(VerifierError.Duplicate)), + (Slot(21) .. Slot(21), Opt.some(VerifierError.MissingParent)), + (Slot(22) .. Slot(31), Opt.some(VerifierError.Duplicate)), + (Slot(21) .. Slot(21), Opt.some(VerifierError.MissingParent)), + (Slot(22) .. Slot(31), Opt.some(VerifierError.Duplicate)), + (Slot(0) .. Slot(21), Opt.none(VerifierError)), + ] + verifier = setupVerifier(kind, scenario) + sq = + case kind + of SyncQueueKind.Forward: + SyncQueue.init(SomeTPeer, kind, Slot(0), Slot(63), + 32'u64, # 32 slots per request + 3, # 3 concurrent requests + 2, # 2 failures allowed + getStaticSlotCb(Slot(0)), + verifier.collector) + of SyncQueueKind.Backward: + SyncQueue.init(SomeTPeer, kind, Slot(63), Slot(0), + 32'u64, # 32 slots per request + 3, # 3 concurrent requests + 2, # 2 failures allowed + getStaticSlotCb(Slot(63)), + verifier.collector) + peer1 = SomeTPeer.init("1") + peer2 = SomeTPeer.init("2") + peer3 = SomeTPeer.init("3") + r11 = sq.pop(Slot(63), peer1) + r12 = sq.pop(Slot(63), peer2) + r13 = sq.pop(Slot(63), peer3) + d11 = createChain(r11.data) + d12 = createChain(r12.data) + d13 = createChain(r13.data) + r21 = sq.pop(Slot(63), peer1) + r22 = sq.pop(Slot(63), peer2) + r23 = sq.pop(Slot(63), peer3) + d21 = createChain(r21.data) + d22 = createChain(r22.data) + d23 = createChain(r23.data) + + let + f11 = sq.push(r11, d11, Opt.none(seq[BlobSidecars])) + f12 = sq.push(r12, d12, Opt.none(seq[BlobSidecars])) + f13 = sq.push(r13, d13, Opt.none(seq[BlobSidecars])) + + await noCancel f11 + check f11.finished == true + + let + f21 = sq.push(r21, d21, Opt.none(seq[BlobSidecars])) + f22 = sq.push(r22, d22, Opt.none(seq[BlobSidecars])) + f23 = sq.push(r23, d23, Opt.none(seq[BlobSidecars])) + + await noCancel f21 + check: + f21.finished == true + f11.finished == true + f12.finished == true + f13.finished == true - # Push a single request that will fail with all blocks being unviable - var f11 = queue.push(r11, chain.getSlice(startSlot, r11), - Opt.none(seq[BlobSidecars])) - discard await f11.withTimeout(1.seconds) + await noCancel f22 + check: + f21.finished == true + f22.finished == true + f11.finished == true + f12.finished == true + f13.finished == true + await noCancel f23 check: + f21.finished == true + f22.finished == true + f23.finished == true f11.finished == true - counter == int(startSlot + chunkSize) # should process all unviable blocks - debtLen(queue) == chunkSize # The range must be retried + f12.finished == true + f13.finished == true + + let + r31 = sq.pop(Slot(63), peer1) + r32 = sq.pop(Slot(63), peer2) + r33 = sq.pop(Slot(63), peer3) + d31 = createChain(r31.data) + d32 = createChain(r32.data) + d33 = createChain(r33.data) + f31 = sq.push(r31, d31, Opt.none(seq[BlobSidecars])) + f32 = sq.push(r32, d32, Opt.none(seq[BlobSidecars])) + f33 = sq.push(r33, d33, Opt.none(seq[BlobSidecars])) + + await noCancel f31 + await noCancel f32 + await noCancel f33 + + let + r41 = sq.pop(Slot(63), peer1) + r42 = sq.pop(Slot(63), peer2) + r43 = sq.pop(Slot(63), peer3) + d41 = createChain(r41.data) + d42 = createChain(r42.data) + d43 = createChain(r43.data) + f42 = sq.push(r32, d42, Opt.none(seq[BlobSidecars])) + f41 = sq.push(r31, d41, Opt.none(seq[BlobSidecars])) + f43 = sq.push(r33, d43, Opt.none(seq[BlobSidecars])) + + await noCancel allFutures(f42, f41, f43) + + await noCancel wait(verifier.verifier, 2.seconds) + + asyncTest "[SyncQueue#Forward] Missing parent and exponential rewind " & + "[3 peers] test": + let + scenario = + [ + (Slot(0) .. Slot(31), Opt.none(VerifierError)), + # .. 3 ranges are empty + (Slot(128) .. Slot(128), Opt.some(VerifierError.MissingParent)), + (Slot(128) .. Slot(128), Opt.some(VerifierError.MissingParent)), + # 1st rewind should be to (failed_slot - 1 * epoch) = 96 + (Slot(128) .. Slot(128), Opt.some(VerifierError.MissingParent)), + (Slot(128) .. Slot(128), Opt.some(VerifierError.MissingParent)), + # 2nd rewind should be to (failed_slot - 2 * epoch) = 64 + (Slot(128) .. Slot(128), Opt.some(VerifierError.MissingParent)), + (Slot(128) .. Slot(128), Opt.some(VerifierError.MissingParent)), + # 3rd rewind should be to (failed_slot - 4 * epoch) = 0 + (Slot(0) .. Slot(31), Opt.some(VerifierError.Duplicate)), + (Slot(32) .. Slot(63), Opt.none(VerifierError)), + (Slot(64) .. Slot(95), Opt.none(VerifierError)), + (Slot(96) .. Slot(127), Opt.none(VerifierError)), + (Slot(128) .. Slot(159), Opt.none(VerifierError)), + ] + kind = SyncQueueKind.Forward + verifier = setupVerifier(kind, scenario) + sq = SyncQueue.init(SomeTPeer, kind, Slot(0), Slot(159), + 32'u64, # 32 slots per request + 3, # 3 concurrent requests + 2, # 2 failures allowed + getStaticSlotCb(Slot(0)), + verifier.collector) + peer1 = SomeTPeer.init("1") + peer2 = SomeTPeer.init("2") + peer3 = SomeTPeer.init("3") + r11 = sq.pop(Slot(159), peer1) + r12 = sq.pop(Slot(159), peer2) + r13 = sq.pop(Slot(159), peer3) + d11 = createChain(r11.data) + d12 = createChain(r12.data) + d13 = createChain(r13.data) + f11 = sq.push(r11, d11, Opt.none(seq[BlobSidecars])) + f12 = sq.push(r12, d12, Opt.none(seq[BlobSidecars])) + f13 = sq.push(r13, d13, Opt.none(seq[BlobSidecars])) + + await noCancel f11 + await noCancel f12 + await noCancel f13 + + for i in 0 ..< 3: + let + re1 = sq.pop(Slot(159), peer1) + re2 = sq.pop(Slot(159), peer2) + re3 = sq.pop(Slot(159), peer3) + de1 = default(seq[ref ForkedSignedBeaconBlock]) + de2 = default(seq[ref ForkedSignedBeaconBlock]) + de3 = default(seq[ref ForkedSignedBeaconBlock]) + fe1 = sq.push(re1, de1, Opt.none(seq[BlobSidecars])) + fe2 = sq.push(re2, de2, Opt.none(seq[BlobSidecars])) + fe3 = sq.push(re3, de3, Opt.none(seq[BlobSidecars])) + + await noCancel fe1 + await noCancel fe2 + await noCancel fe3 - await validatorFut.cancelAndWait() - return true + let + r21 = sq.pop(Slot(159), peer1) + r22 = sq.pop(Slot(159), peer2) + r23 = sq.pop(Slot(159), peer3) + d21 = createChain(r21.data) + d22 = createChain(r22.data) + d23 = createChain(r23.data) + f21 = sq.push(r21, d21, Opt.none(seq[BlobSidecars])) + f22 = sq.push(r22, d22, Opt.none(seq[BlobSidecars])) + f23 = sq.push(r23, d23, Opt.none(seq[BlobSidecars])) + + await noCancel f21 + await noCancel f22 + await noCancel f23 + + for i in 0 ..< 1: + let + re1 = sq.pop(Slot(159), peer1) + re2 = sq.pop(Slot(159), peer2) + re3 = sq.pop(Slot(159), peer3) + de1 = default(seq[ref ForkedSignedBeaconBlock]) + de2 = default(seq[ref ForkedSignedBeaconBlock]) + de3 = default(seq[ref ForkedSignedBeaconBlock]) + fe1 = sq.push(re1, de1, Opt.none(seq[BlobSidecars])) + fe2 = sq.push(re2, de2, Opt.none(seq[BlobSidecars])) + fe3 = sq.push(re3, de3, Opt.none(seq[BlobSidecars])) + + await noCancel fe1 + await noCancel fe2 + await noCancel fe3 - check waitFor(runTest()) == true + let + r31 = sq.pop(Slot(159), peer1) + r32 = sq.pop(Slot(159), peer2) + r33 = sq.pop(Slot(159), peer3) + d31 = createChain(r31.data) + d32 = createChain(r32.data) + d33 = createChain(r33.data) + f31 = sq.push(r31, d31, Opt.none(seq[BlobSidecars])) + f32 = sq.push(r32, d32, Opt.none(seq[BlobSidecars])) + f33 = sq.push(r33, d33, Opt.none(seq[BlobSidecars])) + + await noCancel f31 + await noCancel f32 + await noCancel f33 + + for i in 0 ..< 2: + let + re1 = sq.pop(Slot(159), peer1) + re2 = sq.pop(Slot(159), peer2) + re3 = sq.pop(Slot(159), peer3) + de1 = default(seq[ref ForkedSignedBeaconBlock]) + de2 = default(seq[ref ForkedSignedBeaconBlock]) + de3 = default(seq[ref ForkedSignedBeaconBlock]) + fe1 = sq.push(re1, de1, Opt.none(seq[BlobSidecars])) + fe2 = sq.push(re2, de2, Opt.none(seq[BlobSidecars])) + fe3 = sq.push(re3, de3, Opt.none(seq[BlobSidecars])) + + await noCancel fe1 + await noCancel fe2 + await noCancel fe3 - test "[SyncQueue#Backward] Async unordered push with rewind test": let - aq = newAsyncQueue[BlockEntry]() - startSlot = Slot(0) - chunkSize = SLOTS_PER_EPOCH - numberOfChunks = 4'u64 - finishSlot = startSlot + numberOfChunks * chunkSize - 1'u64 - queueSize = 1 + r41 = sq.pop(Slot(159), peer1) + r42 = sq.pop(Slot(159), peer2) + r43 = sq.pop(Slot(159), peer3) + d41 = createChain(r41.data) + d42 = createChain(r42.data) + d43 = createChain(r43.data) + f41 = sq.push(r41, d41, Opt.none(seq[BlobSidecars])) + f42 = sq.push(r42, d42, Opt.none(seq[BlobSidecars])) + f43 = sq.push(r43, d43, Opt.none(seq[BlobSidecars])) + + await noCancel f41 + await noCancel f42 + await noCancel f43 + + for i in 0 ..< 5: + let + rf1 = sq.pop(Slot(159), peer1) + rf2 = sq.pop(Slot(159), peer2) + rf3 = sq.pop(Slot(159), peer3) + df1 = createChain(rf1.data) + df2 = createChain(rf2.data) + df3 = createChain(rf3.data) + ff1 = sq.push(rf1, df1, Opt.none(seq[BlobSidecars])) + ff2 = sq.push(rf2, df2, Opt.none(seq[BlobSidecars])) + ff3 = sq.push(rf3, df3, Opt.none(seq[BlobSidecars])) + + await noCancel ff1 + await noCancel ff2 + await noCancel ff3 + + await noCancel wait(verifier.verifier, 2.seconds) + + asyncTest "[SyncQueue#Backward] Missing parent and exponential rewind " & + "[3 peers] test": + let + scenario = + [ + (Slot(128) .. Slot(159), Opt.none(VerifierError)), + # .. 3 ranges are empty + (Slot(31) .. Slot(31), Opt.some(VerifierError.MissingParent)), + (Slot(31) .. Slot(31), Opt.some(VerifierError.MissingParent)), + (Slot(128) .. Slot(159), Opt.some(VerifierError.Duplicate)), + (Slot(96) .. Slot(127), Opt.none(VerifierError)), + # .. 2 ranges are empty + (Slot(31) .. Slot(31), Opt.some(VerifierError.MissingParent)), + (Slot(31) .. Slot(31), Opt.some(VerifierError.MissingParent)), + (Slot(128) .. Slot(159), Opt.some(VerifierError.Duplicate)), + (Slot(96) .. Slot(127), Opt.some(VerifierError.Duplicate)), + (Slot(64) .. Slot(95), Opt.none(VerifierError)), + # .. 1 range is empty + (Slot(31) .. Slot(31), Opt.some(VerifierError.MissingParent)), + (Slot(31) .. Slot(31), Opt.some(VerifierError.MissingParent)), + (Slot(128) .. Slot(159), Opt.some(VerifierError.Duplicate)), + (Slot(96) .. Slot(127), Opt.some(VerifierError.Duplicate)), + (Slot(64) .. Slot(95), Opt.some(VerifierError.Duplicate)), + (Slot(32) .. Slot(63), Opt.none(VerifierError)), + (Slot(0) .. Slot(31), Opt.none(VerifierError)) + ] + kind = SyncQueueKind.Backward + verifier = setupVerifier(kind, scenario) + sq = SyncQueue.init(SomeTPeer, kind, Slot(159), Slot(0), + 32'u64, # 32 slots per request + 3, # 3 concurrent requests + 2, # 2 failures allowed + getStaticSlotCb(Slot(159)), + verifier.collector) + peer1 = SomeTPeer.init("1") + peer2 = SomeTPeer.init("2") + peer3 = SomeTPeer.init("3") + r11 = sq.pop(Slot(159), peer1) + r12 = sq.pop(Slot(159), peer2) + r13 = sq.pop(Slot(159), peer3) + d11 = createChain(r11.data) + d12 = createChain(r12.data) + d13 = createChain(r13.data) + f11 = sq.push(r11, d11, Opt.none(seq[BlobSidecars])) + f12 = sq.push(r12, d12, Opt.none(seq[BlobSidecars])) + f13 = sq.push(r13, d13, Opt.none(seq[BlobSidecars])) + + await noCancel f11 + await noCancel f12 + await noCancel f13 + + for i in 0 ..< 3: + let + re1 = sq.pop(Slot(159), peer1) + re2 = sq.pop(Slot(159), peer2) + re3 = sq.pop(Slot(159), peer3) + de1 = default(seq[ref ForkedSignedBeaconBlock]) + de2 = default(seq[ref ForkedSignedBeaconBlock]) + de3 = default(seq[ref ForkedSignedBeaconBlock]) + fe1 = sq.push(re1, de1, Opt.none(seq[BlobSidecars])) + fe2 = sq.push(re2, de2, Opt.none(seq[BlobSidecars])) + fe3 = sq.push(re3, de3, Opt.none(seq[BlobSidecars])) + + await noCancel fe1 + await noCancel fe2 + await noCancel fe3 - var - lastSafeSlot = finishSlot - counter = int(finishSlot) - - proc getSafeSlot(): Slot = - lastSafeSlot - - proc backwardValidator(aq: AsyncQueue[BlockEntry]) {.async.} = - while true: - let sblock = await aq.popFirst() - if sblock.blck.slot == Slot(counter): - withBlck(sblock.blck): - if forkyBlck.message.proposer_index == 0xDEADBEAF'u64: - sblock.fail(VerifierError.MissingParent) - else: - lastSafeSlot = sblock.blck.slot - dec(counter) - sblock.done() - else: - sblock.fail(VerifierError.Invalid) + let + r21 = sq.pop(Slot(159), peer1) + r22 = sq.pop(Slot(159), peer2) + r23 = sq.pop(Slot(159), peer3) + d21 = createChain(r21.data) + d22 = createChain(r22.data) + d23 = createChain(r23.data) + f21 = sq.push(r21, d21, Opt.none(seq[BlobSidecars])) + f22 = sq.push(r22, d22, Opt.none(seq[BlobSidecars])) + f23 = sq.push(r23, d23, Opt.none(seq[BlobSidecars])) + + await noCancel f21 + await noCancel f22 + await noCancel f23 + + for i in 0 ..< 2: + let + r31 = sq.pop(Slot(159), peer1) + r32 = sq.pop(Slot(159), peer2) + r33 = sq.pop(Slot(159), peer3) + d31 = createChain(r31.data) + d32 = createChain(r32.data) + d33 = createChain(r33.data) + f31 = sq.push(r31, d31, Opt.none(seq[BlobSidecars])) + f32 = sq.push(r32, d32, Opt.none(seq[BlobSidecars])) + f33 = sq.push(r33, d33, Opt.none(seq[BlobSidecars])) + + await noCancel f31 + await noCancel f32 + await noCancel f33 + + for i in 0 ..< 2: + let + re1 = sq.pop(Slot(159), peer1) + re2 = sq.pop(Slot(159), peer2) + re3 = sq.pop(Slot(159), peer3) + de1 = default(seq[ref ForkedSignedBeaconBlock]) + de2 = default(seq[ref ForkedSignedBeaconBlock]) + de3 = default(seq[ref ForkedSignedBeaconBlock]) + fe1 = sq.push(re1, de1, Opt.none(seq[BlobSidecars])) + fe2 = sq.push(re2, de2, Opt.none(seq[BlobSidecars])) + fe3 = sq.push(re3, de3, Opt.none(seq[BlobSidecars])) + + await noCancel fe1 + await noCancel fe2 + await noCancel fe3 - var - chain = createChain(startSlot, finishSlot) - queue = SyncQueue.init(SomeTPeer, SyncQueueKind.Backward, - finishSlot, startSlot, chunkSize, - getSafeSlot, collector(aq), queueSize) - validatorFut = backwardValidator(aq) + let + r41 = sq.pop(Slot(159), peer1) + r42 = sq.pop(Slot(159), peer2) + r43 = sq.pop(Slot(159), peer3) + d41 = createChain(r41.data) + d42 = createChain(r42.data) + d43 = createChain(r43.data) + f41 = sq.push(r41, d41, Opt.none(seq[BlobSidecars])) + f42 = sq.push(r42, d42, Opt.none(seq[BlobSidecars])) + f43 = sq.push(r43, d43, Opt.none(seq[BlobSidecars])) + + await noCancel f41 + await noCancel f42 + await noCancel f43 + + for i in 0 ..< 3: + let + r51 = sq.pop(Slot(159), peer1) + r52 = sq.pop(Slot(159), peer2) + r53 = sq.pop(Slot(159), peer3) + d51 = createChain(r51.data) + d52 = createChain(r52.data) + d53 = createChain(r53.data) + f51 = sq.push(r51, d51, Opt.none(seq[BlobSidecars])) + f52 = sq.push(r52, d52, Opt.none(seq[BlobSidecars])) + f53 = sq.push(r53, d53, Opt.none(seq[BlobSidecars])) + + await noCancel f51 + await noCancel f52 + await noCancel f53 + + for i in 0 ..< 1: + let + re1 = sq.pop(Slot(159), peer1) + re2 = sq.pop(Slot(159), peer2) + re3 = sq.pop(Slot(159), peer3) + de1 = default(seq[ref ForkedSignedBeaconBlock]) + de2 = default(seq[ref ForkedSignedBeaconBlock]) + de3 = default(seq[ref ForkedSignedBeaconBlock]) + fe1 = sq.push(re1, de1, Opt.none(seq[BlobSidecars])) + fe2 = sq.push(re2, de2, Opt.none(seq[BlobSidecars])) + fe3 = sq.push(re3, de3, Opt.none(seq[BlobSidecars])) + + await noCancel fe1 + await noCancel fe2 + await noCancel fe3 let - p1 = SomeTPeer() - p2 = SomeTPeer() - p3 = SomeTPeer() - p4 = SomeTPeer() - p5 = SomeTPeer() - p6 = SomeTPeer() - p7 = SomeTPeer() - - proc runTest(): Future[bool] {.async.} = - var r11 = queue.pop(finishSlot, p1) - var r12 = queue.pop(finishSlot, p2) - var r13 = queue.pop(finishSlot, p3) - var r14 = queue.pop(finishSlot, p4) - - var f14 = queue.push(r14, chain.getSlice(startSlot, r14), - Opt.none(seq[BlobSidecars])) - await sleepAsync(100.milliseconds) - check: - f14.finished == false - counter == int(finishSlot) + r61 = sq.pop(Slot(159), peer1) + r62 = sq.pop(Slot(159), peer2) + r63 = sq.pop(Slot(159), peer3) + d61 = createChain(r61.data) + d62 = createChain(r62.data) + d63 = createChain(r63.data) + f61 = sq.push(r61, d61, Opt.none(seq[BlobSidecars])) + f62 = sq.push(r62, d62, Opt.none(seq[BlobSidecars])) + f63 = sq.push(r63, d63, Opt.none(seq[BlobSidecars])) + + await noCancel f61 + await noCancel f62 + await noCancel f63 + + for i in 0 ..< 5: + let + r71 = sq.pop(Slot(159), peer1) + r72 = sq.pop(Slot(159), peer2) + r73 = sq.pop(Slot(159), peer3) + d71 = createChain(r71.data) + d72 = createChain(r72.data) + d73 = createChain(r73.data) + f71 = sq.push(r71, d71, Opt.none(seq[BlobSidecars])) + f72 = sq.push(r72, d72, Opt.none(seq[BlobSidecars])) + f73 = sq.push(r73, d73, Opt.none(seq[BlobSidecars])) + + await noCancel f71 + await noCancel f72 + await noCancel f73 + + await noCancel wait(verifier.verifier, 2.seconds) - var f12 = queue.push(r12, chain.getSlice(startSlot, r12), - Opt.none(seq[BlobSidecars])) - await sleepAsync(100.milliseconds) - check: - counter == int(finishSlot) - f12.finished == false - f14.finished == false + test "[SyncQueue#Forward] getRewindPoint() test": + let aq = newAsyncQueue[BlockEntry]() + block: + let + queue = SyncQueue.init(SomeTPeer, SyncQueueKind.Forward, + Slot(0), Slot(0xFFFF_FFFF_FFFF_FFFFF'u64), + 1'u64, 3, 2, getStaticSlotCb(Slot(0)), + collector(aq)) + finalizedSlot = start_slot(Epoch(0'u64)) + epochStartSlot = start_slot(Epoch(0'u64)) + 1'u64 + finishSlot = start_slot(Epoch(2'u64)) - var f11 = queue.push(r11, chain.getSlice(startSlot, r11), - Opt.none(seq[BlobSidecars])) - await allFutures(f11, f12) - check: - counter == int(finishSlot - chunkSize - chunkSize) - f11.finished == true and f11.failed == false - f12.finished == true and f12.failed == false - f14.finished == false - - var missingSlice = chain.getSlice(startSlot, r13) - withBlck(missingSlice[0][]): - forkyBlck.message.proposer_index = 0xDEADBEAF'u64 - var f13 = queue.push(r13, missingSlice, Opt.none(seq[BlobSidecars])) - await allFutures(f13, f14) - check: - f11.finished == true and f11.failed == false - f12.finished == true and f12.failed == false - f13.finished == true and f13.failed == false - f14.finished == true and f14.failed == false + for i in uint64(epochStartSlot) ..< uint64(finishSlot): + check queue.getRewindPoint(Slot(i), finalizedSlot) == finalizedSlot - # Recovery process - counter = int(SLOTS_PER_EPOCH) + 1 + block: + let + queue = SyncQueue.init(SomeTPeer, SyncQueueKind.Forward, + Slot(0), Slot(0xFFFF_FFFF_FFFF_FFFFF'u64), + 1'u64, 3, 2, getStaticSlotCb(Slot(0)), + collector(aq)) + finalizedSlot = start_slot(Epoch(1'u64)) + epochStartSlot = start_slot(Epoch(1'u64)) + 1'u64 + finishSlot = start_slot(Epoch(3'u64)) - var r15 = queue.pop(finishSlot, p5) - var r16 = queue.pop(finishSlot, p6) - var r17 = queue.pop(finishSlot, p7) + for i in uint64(epochStartSlot) ..< uint64(finishSlot) : + check queue.getRewindPoint(Slot(i), finalizedSlot) == finalizedSlot - check r17.isEmpty() == true + block: + let + queue = SyncQueue.init(SomeTPeer, SyncQueueKind.Forward, + Slot(0), Slot(0xFFFF_FFFF_FFFF_FFFFF'u64), + 1'u64, 3, 2, getStaticSlotCb(Slot(0)), + collector(aq)) + finalizedSlot = start_slot(Epoch(0'u64)) + failSlot = Slot(0xFFFF_FFFF_FFFF_FFFFF'u64) + failEpoch = epoch(failSlot) - var f16 = queue.push(r16, chain.getSlice(startSlot, r16), - Opt.none(seq[BlobSidecars])) - await sleepAsync(100.milliseconds) - check f16.finished == false + var counter = 1'u64 + for i in 0 ..< 64: + if counter >= failEpoch: + break + let rewindEpoch = failEpoch - counter + let rewindSlot = start_slot(rewindEpoch) + check queue.getRewindPoint(failSlot, finalizedSlot) == rewindSlot + counter = counter shl 1 - var f15 = queue.push(r15, chain.getSlice(startSlot, r15), - Opt.none(seq[BlobSidecars])) - await allFutures(f15, f16) - check: - f15.finished == true and f15.failed == false - f16.finished == true and f16.failed == false - counter == int(startSlot) - 1 + block: + let + queue = SyncQueue.init(SomeTPeer, SyncQueueKind.Forward, + Slot(0), Slot(0xFFFF_FFFF_FFFF_FFFFF'u64), + 1'u64, 3, 2, getStaticSlotCb(Slot(0)), + collector(aq)) + let + finalizedSlot = start_slot(Epoch(1'u64)) + failSlot = Slot(0xFFFF_FFFF_FFFF_FFFFF'u64) + failEpoch = epoch(failSlot) - await validatorFut.cancelAndWait() - return true + var counter = 1'u64 + for i in 0 ..< 64: + if counter >= failEpoch: + break + let + rewindEpoch = failEpoch - counter + rewindSlot = start_slot(rewindEpoch) + check queue.getRewindPoint(failSlot, finalizedSlot) == rewindSlot + counter = counter shl 1 - check waitFor(runTest()) == true + test "[SyncQueue#Backward] getRewindPoint() test": + let aq = newAsyncQueue[BlockEntry]() + block: + let + getSafeSlot = getStaticSlotCb(Slot(1024)) + queue = SyncQueue.init(SomeTPeer, SyncQueueKind.Backward, + Slot(1024), Slot(0), + 1'u64, 3, 2, getSafeSlot, collector(aq)) + safeSlot = getSafeSlot() + + for i in countdown(1023, 0): + check queue.getRewindPoint(Slot(i), safeSlot) == safeSlot test "[SyncQueue] hasEndGap() test": - let chain1 = createChain(Slot(1), Slot(1)) - let chain2 = newSeq[ref ForkedSignedBeaconBlock]() + let + chain1 = createChain(Slot(1) .. Slot(1)) + chain2 = newSeq[ref ForkedSignedBeaconBlock]() for counter in countdown(32'u64, 2'u64): - let req = SyncRequest[SomeTPeer](slot: Slot(1), count: counter) - let sr = SyncResult[SomeTPeer](request: req, data: chain1) - check sr.hasEndGap() == true + let + srange = SyncRange.init(Slot(1), counter) + req = SyncRequest[SomeTPeer](data: srange) + check req.hasEndGap(chain1) == true - let req = SyncRequest[SomeTPeer](slot: Slot(1), count: 1'u64) - let sr1 = SyncResult[SomeTPeer](request: req, data: chain1) - let sr2 = SyncResult[SomeTPeer](request: req, data: chain2) + let req = SyncRequest[SomeTPeer](data: SyncRange.init(Slot(1), 1'u64)) check: - sr1.hasEndGap() == false - sr2.hasEndGap() == true - - test "[SyncQueue] getLastNonEmptySlot() test": - let chain1 = createChain(Slot(10), Slot(10)) - let chain2 = newSeq[ref ForkedSignedBeaconBlock]() + req.hasEndGap(chain1) == false + req.hasEndGap(chain2) == true - for counter in countdown(32'u64, 2'u64): - let req = SyncRequest[SomeTPeer](slot: Slot(10), count: counter) - let sr = SyncResult[SomeTPeer](request: req, data: chain1) - check sr.getLastNonEmptySlot() == Slot(10) - - let req = SyncRequest[SomeTPeer](slot: Slot(100), count: 1'u64) - let sr = SyncResult[SomeTPeer](request: req, data: chain2) - check sr.getLastNonEmptySlot() == Slot(100) - - test "[SyncQueue] contains() test": - proc checkRange[T](req: SyncRequest[T]): bool = - var slot = req.slot - var counter = 0'u64 - while counter < req.count: - if not(req.contains(slot)): - return false - slot = slot + 1 - counter = counter + 1'u64 - return true - - var req1 = SyncRequest[SomeTPeer](slot: Slot(5), count: 10'u64) + test "[SyncQueue] checkResponse() test": + let + r1 = SyncRequest[SomeTPeer](data: SyncRange.init(Slot(11), 1'u64)) + r2 = SyncRequest[SomeTPeer](data: SyncRange.init(Slot(11), 2'u64)) + r3 = SyncRequest[SomeTPeer](data: SyncRange.init(Slot(11), 3'u64)) check: - req1.checkRange() == true - - req1.contains(Slot(4)) == false - req1.contains(Slot(15)) == false + checkResponse(r1, [Slot(11)]).isOk() == true + checkResponse(r1, @[]).isOk() == true + checkResponse(r1, @[Slot(11), Slot(11)]).isOk() == false + checkResponse(r1, [Slot(10)]).isOk() == false + checkResponse(r1, [Slot(12)]).isOk() == false + + checkResponse(r2, [Slot(11)]).isOk() == true + checkResponse(r2, [Slot(12)]).isOk() == true + checkResponse(r2, @[]).isOk() == true + checkResponse(r2, [Slot(11), Slot(12)]).isOk() == true + checkResponse(r2, [Slot(12)]).isOk() == true + checkResponse(r2, [Slot(11), Slot(12), Slot(13)]).isOk() == false + checkResponse(r2, [Slot(10), Slot(11)]).isOk() == false + checkResponse(r2, [Slot(10)]).isOk() == false + checkResponse(r2, [Slot(12), Slot(11)]).isOk() == false + checkResponse(r2, [Slot(12), Slot(13)]).isOk() == false + checkResponse(r2, [Slot(13)]).isOk() == false + + checkResponse(r2, [Slot(11), Slot(11)]).isOk() == false + checkResponse(r2, [Slot(12), Slot(12)]).isOk() == false + + checkResponse(r3, @[Slot(11)]).isOk() == true + checkResponse(r3, @[Slot(12)]).isOk() == true + checkResponse(r3, @[Slot(13)]).isOk() == true + checkResponse(r3, @[Slot(11), Slot(12)]).isOk() == true + checkResponse(r3, @[Slot(11), Slot(13)]).isOk() == true + checkResponse(r3, @[Slot(12), Slot(13)]).isOk() == true + checkResponse(r3, @[Slot(11), Slot(13), Slot(12)]).isOk() == false + checkResponse(r3, @[Slot(12), Slot(13), Slot(11)]).isOk() == false + checkResponse(r3, @[Slot(13), Slot(12), Slot(11)]).isOk() == false + checkResponse(r3, @[Slot(13), Slot(11)]).isOk() == false + checkResponse(r3, @[Slot(13), Slot(12)]).isOk() == false + checkResponse(r3, @[Slot(12), Slot(11)]).isOk() == false + + checkResponse(r3, @[Slot(11), Slot(11), Slot(11)]).isOk() == false + checkResponse(r3, @[Slot(11), Slot(12), Slot(12)]).isOk() == false + checkResponse(r3, @[Slot(11), Slot(13), Slot(13)]).isOk() == false + checkResponse(r3, @[Slot(12), Slot(13), Slot(13)]).isOk() == false + checkResponse(r3, @[Slot(12), Slot(12), Slot(12)]).isOk() == false + checkResponse(r3, @[Slot(13), Slot(13), Slot(13)]).isOk() == false + checkResponse(r3, @[Slot(11), Slot(11)]).isOk() == false + checkResponse(r3, @[Slot(12), Slot(12)]).isOk() == false + checkResponse(r3, @[Slot(13), Slot(13)]).isOk() == false + + test "[SyncQueue] checkBlobsResponse() test": + const maxBlobsPerBlockElectra = 9 + + proc checkBlobsResponse[T]( + req: SyncRequest[T], + data: openArray[Slot]): Result[void, cstring] = + checkBlobsResponse(req, data, maxBlobsPerBlockElectra) - test "[SyncQueue] checkResponse() test": let - chain = createChain(Slot(10), Slot(20)) - r1 = SyncRequest[SomeTPeer](slot: Slot(11), count: 1'u64) - r21 = SyncRequest[SomeTPeer](slot: Slot(11), count: 2'u64) - r3 = SyncRequest[SomeTPeer](slot: Slot(11), count: 3'u64) + r1 = SyncRequest[SomeTPeer](data: SyncRange.init(Slot(11), 1'u64)) + r2 = SyncRequest[SomeTPeer](data: SyncRange.init(Slot(11), 2'u64)) + r3 = SyncRequest[SomeTPeer](data: SyncRange.init(Slot(11), 3'u64)) - let slots = mapIt(chain, it[].slot) + d1 = Slot(11).repeat(maxBlobsPerBlockElectra) + d2 = Slot(12).repeat(maxBlobsPerBlockElectra) + d3 = Slot(13).repeat(maxBlobsPerBlockElectra) check: - checkResponse(r1, @[slots[1]]) == true - checkResponse(r1, @[]) == true - checkResponse(r1, @[slots[1], slots[1]]) == false - checkResponse(r1, @[slots[0]]) == false - checkResponse(r1, @[slots[2]]) == false - - checkResponse(r21, @[slots[1]]) == true - checkResponse(r21, @[]) == true - checkResponse(r21, @[slots[1], slots[2]]) == true - checkResponse(r21, @[slots[2]]) == true - checkResponse(r21, @[slots[1], slots[2], slots[3]]) == false - checkResponse(r21, @[slots[0], slots[1]]) == false - checkResponse(r21, @[slots[0]]) == false - checkResponse(r21, @[slots[2], slots[1]]) == false - checkResponse(r21, @[slots[2], slots[1]]) == false - checkResponse(r21, @[slots[2], slots[3]]) == false - checkResponse(r21, @[slots[3]]) == false - - checkResponse(r21, @[slots[1], slots[1]]) == false - checkResponse(r21, @[slots[2], slots[2]]) == false - - checkResponse(r3, @[slots[1]]) == true - checkResponse(r3, @[slots[2]]) == true - checkResponse(r3, @[slots[3]]) == true - checkResponse(r3, @[slots[1], slots[2]]) == true - checkResponse(r3, @[slots[1], slots[3]]) == true - checkResponse(r3, @[slots[2], slots[3]]) == true - checkResponse(r3, @[slots[1], slots[3], slots[2]]) == false - checkResponse(r3, @[slots[2], slots[3], slots[1]]) == false - checkResponse(r3, @[slots[3], slots[2], slots[1]]) == false - checkResponse(r3, @[slots[3], slots[1]]) == false - checkResponse(r3, @[slots[3], slots[2]]) == false - checkResponse(r3, @[slots[2], slots[1]]) == false - - checkResponse(r3, @[slots[1], slots[1], slots[1]]) == false - checkResponse(r3, @[slots[1], slots[2], slots[2]]) == false - checkResponse(r3, @[slots[1], slots[3], slots[3]]) == false - checkResponse(r3, @[slots[2], slots[3], slots[3]]) == false - checkResponse(r3, @[slots[1], slots[1], slots[1]]) == false - checkResponse(r3, @[slots[2], slots[2], slots[2]]) == false - checkResponse(r3, @[slots[3], slots[3], slots[3]]) == false - checkResponse(r3, @[slots[1], slots[1]]) == false - checkResponse(r3, @[slots[2], slots[2]]) == false - checkResponse(r3, @[slots[3], slots[3]]) == false + checkBlobsResponse(r1, [Slot(11)]).isOk() == true + checkBlobsResponse(r1, @[]).isOk() == true + checkBlobsResponse(r1, [Slot(11), Slot(11)]).isOk() == true + checkBlobsResponse(r1, [Slot(11), Slot(11), Slot(11)]).isOk() == true + checkBlobsResponse(r1, d1).isOk() == true + checkBlobsResponse(r1, d1 & @[Slot(11)]).isOk() == false + checkBlobsResponse(r1, [Slot(10)]).isOk() == false + checkBlobsResponse(r1, [Slot(12)]).isOk() == false + + checkBlobsResponse(r2, [Slot(11)]).isOk() == true + checkBlobsResponse(r2, [Slot(12)]).isOk() == true + checkBlobsResponse(r2, @[]).isOk() == true + checkBlobsResponse(r2, [Slot(11), Slot(12)]).isOk() == true + checkBlobsResponse(r2, [Slot(11), Slot(11)]).isOk() == true + checkBlobsResponse(r2, [Slot(12), Slot(12)]).isOk() == true + checkBlobsResponse(r2, d1).isOk() == true + checkBlobsResponse(r2, d2).isOk() == true + checkBlobsResponse(r2, d1 & d2).isOk() == true + checkBlobsResponse(r2, [Slot(11), Slot(12), Slot(11)]).isOk() == false + checkBlobsResponse(r2, [Slot(12), Slot(11)]).isOk() == false + checkBlobsResponse(r2, d1 & @[Slot(11)]).isOk() == false + checkBlobsResponse(r2, d2 & @[Slot(12)]).isOk() == false + checkBlobsResponse(r2, @[Slot(11)] & d2 & @[Slot(12)]).isOk() == false + checkBlobsResponse(r2, d1 & d2 & @[Slot(12)]).isOk() == false + checkBlobsResponse(r2, d2 & d1).isOk() == false + + checkBlobsResponse(r3, [Slot(11)]).isOk() == true + checkBlobsResponse(r3, [Slot(12)]).isOk() == true + checkBlobsResponse(r3, [Slot(13)]).isOk() == true + checkBlobsResponse(r3, @[]).isOk() == true + checkBlobsResponse(r3, [Slot(11), Slot(12)]).isOk() == true + checkBlobsResponse(r3, [Slot(11), Slot(11)]).isOk() == true + checkBlobsResponse(r3, [Slot(12), Slot(12)]).isOk() == true + checkBlobsResponse(r3, [Slot(11), Slot(13)]).isOk() == true + checkBlobsResponse(r3, [Slot(12), Slot(13)]).isOk() == true + checkBlobsResponse(r3, [Slot(13), Slot(13)]).isOk() == true + checkBlobsResponse(r3, d1).isOk() == true + checkBlobsResponse(r3, d2).isOk() == true + checkBlobsResponse(r3, d3).isOk() == true + checkBlobsResponse(r3, d1 & d2).isOk() == true + checkBlobsResponse(r3, d1 & d3).isOk() == true + checkBlobsResponse(r3, d2 & d3).isOk() == true + checkBlobsResponse(r3, [Slot(11), Slot(12), Slot(11)]).isOk() == false + checkBlobsResponse(r3, [Slot(11), Slot(13), Slot(12)]).isOk() == false + checkBlobsResponse(r3, [Slot(12), Slot(13), Slot(11)]).isOk() == false + checkBlobsResponse(r3, [Slot(12), Slot(11)]).isOk() == false + checkBlobsResponse(r3, [Slot(13), Slot(12)]).isOk() == false + checkBlobsResponse(r3, [Slot(13), Slot(11)]).isOk() == false + checkBlobsResponse(r3, d1 & @[Slot(11)]).isOk() == false + checkBlobsResponse(r3, d2 & @[Slot(12)]).isOk() == false + checkBlobsResponse(r3, d3 & @[Slot(13)]).isOk() == false + checkBlobsResponse(r3, @[Slot(11)] & d2 & @[Slot(12)]).isOk() == false + checkBlobsResponse(r3, @[Slot(12)] & d3 & @[Slot(13)]).isOk() == false + checkBlobsResponse(r3, @[Slot(11)] & d3 & @[Slot(13)]).isOk() == false + checkBlobsResponse(r2, d1 & d2 & @[Slot(12)]).isOk() == false + checkBlobsResponse(r2, d1 & d3 & @[Slot(13)]).isOk() == false + checkBlobsResponse(r2, d2 & d3 & @[Slot(13)]).isOk() == false + checkBlobsResponse(r2, d2 & d1).isOk() == false + checkBlobsResponse(r2, d3 & d2).isOk() == false + checkBlobsResponse(r2, d3 & d1).isOk() == false test "[SyncManager] groupBlobs() test": var - blocks = createChain(Slot(10), Slot(15)) + blocks = createChain(Slot(10) .. Slot(15)) blobs = createBlobs(blocks, @[Slot(11), Slot(11), Slot(12), Slot(14)]) let groupedRes = groupBlobs(blocks, blobs) - check: - groupedRes.isOk() + check groupedRes.isOk() let grouped = groupedRes.get() @@ -1164,77 +1491,3 @@ suite "SyncManager test suite": check: groupedRes3.isErr() - - - - test "[SyncQueue#Forward] getRewindPoint() test": - let aq = newAsyncQueue[BlockEntry]() - block: - var queue = SyncQueue.init(SomeTPeer, SyncQueueKind.Forward, - Slot(0), Slot(0xFFFF_FFFF_FFFF_FFFFF'u64), - 1'u64, getStaticSlotCb(Slot(0)), - collector(aq), 2) - let finalizedSlot = start_slot(Epoch(0'u64)) - let epochStartSlot = start_slot(Epoch(0'u64)) + 1'u64 - let finishSlot = start_slot(Epoch(2'u64)) - - for i in uint64(epochStartSlot) ..< uint64(finishSlot): - check queue.getRewindPoint(Slot(i), finalizedSlot) == finalizedSlot - - block: - var queue = SyncQueue.init(SomeTPeer, SyncQueueKind.Forward, - Slot(0), Slot(0xFFFF_FFFF_FFFF_FFFFF'u64), - 1'u64, getStaticSlotCb(Slot(0)), - collector(aq), 2) - let finalizedSlot = start_slot(Epoch(1'u64)) - let epochStartSlot = start_slot(Epoch(1'u64)) + 1'u64 - let finishSlot = start_slot(Epoch(3'u64)) - - for i in uint64(epochStartSlot) ..< uint64(finishSlot) : - check queue.getRewindPoint(Slot(i), finalizedSlot) == finalizedSlot - - block: - var queue = SyncQueue.init(SomeTPeer, SyncQueueKind.Forward, - Slot(0), Slot(0xFFFF_FFFF_FFFF_FFFFF'u64), - 1'u64, getStaticSlotCb(Slot(0)), - collector(aq), 2) - let finalizedSlot = start_slot(Epoch(0'u64)) - let failSlot = Slot(0xFFFF_FFFF_FFFF_FFFFF'u64) - let failEpoch = epoch(failSlot) - - var counter = 1'u64 - for i in 0 ..< 64: - if counter >= failEpoch: - break - let rewindEpoch = failEpoch - counter - let rewindSlot = start_slot(rewindEpoch) - check queue.getRewindPoint(failSlot, finalizedSlot) == rewindSlot - counter = counter shl 1 - - block: - var queue = SyncQueue.init(SomeTPeer, SyncQueueKind.Forward, - Slot(0), Slot(0xFFFF_FFFF_FFFF_FFFFF'u64), - 1'u64, getStaticSlotCb(Slot(0)), - collector(aq), 2) - let finalizedSlot = start_slot(Epoch(1'u64)) - let failSlot = Slot(0xFFFF_FFFF_FFFF_FFFFF'u64) - let failEpoch = epoch(failSlot) - var counter = 1'u64 - for i in 0 ..< 64: - if counter >= failEpoch: - break - let rewindEpoch = failEpoch - counter - let rewindSlot = start_slot(rewindEpoch) - check queue.getRewindPoint(failSlot, finalizedSlot) == rewindSlot - counter = counter shl 1 - - test "[SyncQueue#Backward] getRewindPoint() test": - let aq = newAsyncQueue[BlockEntry]() - block: - let getSafeSlot = getStaticSlotCb(Slot(1024)) - var queue = SyncQueue.init(SomeTPeer, SyncQueueKind.Backward, - Slot(1024), Slot(0), - 1'u64, getSafeSlot, collector(aq), 2) - let safeSlot = getSafeSlot() - for i in countdown(1023, 0): - check queue.getRewindPoint(Slot(i), safeSlot) == safeSlot diff --git a/tests/test_toblindedblock.nim b/tests/test_toblindedblock.nim index 8e6b9e9156..b5e2a1bb80 100644 --- a/tests/test_toblindedblock.nim +++ b/tests/test_toblindedblock.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2024 Status Research & Development GmbH +# Copyright (c) 2024-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -10,27 +10,24 @@ import # Beacon chain internals - ../beacon_chain/spec/helpers, - ../beacon_chain/spec/datatypes/[bellatrix, capella], + ../beacon_chain/spec/forks, ../beacon_chain/spec/mev/[bellatrix_mev, capella_mev, deneb_mev, electra_mev, fulu_mev], # Test utilities unittest2 - template do_check() = check: hash_tree_root(b.message) == hash_tree_root( b.toSignedBlindedBeaconBlock.message) b.signature == b.toSignedBlindedBeaconBlock.signature -const - nondefaultEth1Data = Eth1Data( - deposit_root: Eth2Digest.fromHex( - "0x55aaf2ee893f67db190d617070bd10d1583b00194fbcfda03d89baa24626f5bb"), - deposit_count: 1, - block_hash: Eth2Digest.fromHex( - "0xe617d58db390a10741ab7d3de0ba9460b5df5e0772e9721fe33c0422a63b2677")) +const nondefaultEth1Data = Eth1Data( + deposit_root: Eth2Digest.fromHex( + "0x55aaf2ee893f67db190d617070bd10d1583b00194fbcfda03d89baa24626f5bb"), + deposit_count: 1, + block_hash: Eth2Digest.fromHex( + "0xe617d58db390a10741ab7d3de0ba9460b5df5e0772e9721fe33c0422a63b2677")) let nondefaultValidatorSig = ValidatorSig.fromHex( "0xac08ca70066c6ea0525aa54dd867f82b86945818cb9305aae30f3bee13275dcf13d6d0680a47e889482ff2bb9a9f3cdb0588746f9e30c04645eda6d01bbd0ce6326ceb695294cb338ebace5b130c5b8f2e4f8efa63d63d5bb255c21a39da9c12")[] @@ -54,14 +51,18 @@ template bellatrix_steps() = do_check check: b.message.body.proposer_slashings.add(default(ProposerSlashing)) do_check - when false: - debugComment "both Electra attestations and attestation slashings need to be done iff Electra" - check: - b.message.body.attester_slashings.add(default(phase0.AttesterSlashing)) - do_check - check: b.message.body.attestations.add( - phase0.Attestation(aggregation_bits: CommitteeValidatorsBits.init(1))) - do_check + check: + b.message.body.attester_slashings.setLen( + b.message.body.attester_slashings.len + 1) + do_check + check: + when typeof(b).kind >= ConsensusFork.Electra: + b.message.body.attestations.add(electra.Attestation( + aggregation_bits: ElectraCommitteeValidatorsBits.init(1))) + else: + b.message.body.attestations.add(phase0.Attestation( + aggregation_bits: CommitteeValidatorsBits.init(1))) + do_check check: b.message.body.deposits.add(default(Deposit)) do_check check: b.message.body.voluntary_exits.add(default(SignedVoluntaryExit)) @@ -155,4 +156,6 @@ suite "Blinded block conversions": deneb_steps when consensusFork >= ConsensusFork.Electra: electra_steps + when consensusFork >= ConsensusFork.Fulu: + fulu_steps static: doAssert high(ConsensusFork) == ConsensusFork.Fulu diff --git a/tests/testblockutil.nim b/tests/testblockutil.nim index 21108bf8fe..fb0a2830ac 100644 --- a/tests/testblockutil.nim +++ b/tests/testblockutil.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -89,7 +89,7 @@ func signBlock( from eth/eip1559 import EIP1559_INITIAL_BASE_FEE, calcEip1599BaseFee from eth/common/eth_types import EMPTY_ROOT_HASH, GasInt -proc build_empty_merge_execution_payload(state: bellatrix.BeaconState): +func build_empty_merge_execution_payload(state: bellatrix.BeaconState): bellatrix.ExecutionPayloadForSigning = ## Assuming a pre-state of the same slot, build a valid ExecutionPayload ## without any transactions from a non-merged block. @@ -112,13 +112,13 @@ proc build_empty_merge_execution_payload(state: bellatrix.BeaconState): timestamp: timestamp, base_fee_per_gas: EIP1559_INITIAL_BASE_FEE) - payload.block_hash = rlpHash blockToBlockHeader(bellatrix.BeaconBlock(body: + payload.block_hash = compute_execution_block_hash(bellatrix.BeaconBlock(body: bellatrix.BeaconBlockBody(execution_payload: payload))) bellatrix.ExecutionPayloadForSigning(executionPayload: payload, blockValue: Wei.zero) -proc build_empty_execution_payload( +func build_empty_execution_payload( state: bellatrix.BeaconState, feeRecipient: Eth1Address): bellatrix.ExecutionPayloadForSigning = ## Assuming a pre-state of the same slot, build a valid ExecutionPayload @@ -221,6 +221,8 @@ proc addTestBlock*( electraAttestations elif consensusFork == ConsensusFork.Fulu: electraAttestations + elif consensusFork == ConsensusFork.Fulu: + electraAttestations else: attestations, deposits, @@ -641,4 +643,4 @@ iterator makeTestBlocks*( deposits = deposits, sync_aggregate = sync_aggregate, graffiti = graffiti, - cfg = cfg) + cfg = cfg) \ No newline at end of file diff --git a/tests/testdbutil.nim b/tests/testdbutil.nim index 7a7c39cf97..1dc590f980 100644 --- a/tests/testdbutil.nim +++ b/tests/testdbutil.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -26,6 +26,7 @@ proc makeTestDB*( flags: UpdateFlags = {}, cfg = defaultRuntimeConfig): BeaconChainDB = # Blob support requires DENEB_FORK_EPOCH != FAR_FUTURE_EPOCH + # Data column support requires FULU_FORK_EPOCH != FAR_FUTURE_EPOCH var cfg = cfg if cfg.CAPELLA_FORK_EPOCH == FAR_FUTURE_EPOCH: cfg.CAPELLA_FORK_EPOCH = 90000.Epoch @@ -36,7 +37,6 @@ proc makeTestDB*( if cfg.FULU_FORK_EPOCH == FAR_FUTURE_EPOCH: cfg.FULU_FORK_EPOCH = 120000.Epoch - var genState = (ref ForkedHashedBeaconState)( kind: ConsensusFork.Phase0, phase0Data: initialize_hashed_beacon_state_from_eth1( diff --git a/tests/teststateutil.nim b/tests/teststateutil.nim index 6e93147fe3..907e6a0cd7 100644 --- a/tests/teststateutil.nim +++ b/tests/teststateutil.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2021-2024 Status Research & Development GmbH +# Copyright (c) 2021-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or https://www.apache.org/licenses/LICENSE-2.0) # * MIT license ([LICENSE-MIT](LICENSE-MIT) or https://opensource.org/licenses/MIT) @@ -123,4 +123,4 @@ proc checkPerValidatorBalanceCalc*( tmpState.balances.item(it) == get_validator_balance_after_epoch( defaultRuntimeConfig, state, cache, info, it.ValidatorIndex)) and get_expected_withdrawals(tmpState[]) == get_next_slot_expected_withdrawals( - defaultRuntimeConfig, state, cache, info) + defaultRuntimeConfig, state, cache, info) \ No newline at end of file diff --git a/tests/testutil.nim b/tests/testutil.nim index f00a8873d5..a0533497d6 100644 --- a/tests/testutil.nim +++ b/tests/testutil.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2024 Status Research & Development GmbH +# Copyright (c) 2018-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -58,8 +58,9 @@ proc summarizeLongTests*(name: string) = status.sort do (a: (string, OrderedTable[string, Status]), b: (string, OrderedTable[string, Status])) -> int: cmp(a[0], b[0]) - generateReport(name & "-" & const_preset, status, width=90) + generateReport( + name & "-" & const_preset, status, width=90, withTotals=false) except CatchableError as exc: raiseAssert exc.msg -addOutputFormatter(new TimingCollector) +addOutputFormatter(new TimingCollector) \ No newline at end of file diff --git a/vendor/NimYAML b/vendor/NimYAML index 7721c955b5..9e43c0ab26 160000 --- a/vendor/NimYAML +++ b/vendor/NimYAML @@ -1 +1 @@ -Subproject commit 7721c955b522f4893265bb36a6de4f8edef8b54b +Subproject commit 9e43c0ab26381889a3d932b1e811def60f7408e1 diff --git a/vendor/gnosis-chain-configs b/vendor/gnosis-chain-configs index 0e085cb606..d447a94bbe 160000 --- a/vendor/gnosis-chain-configs +++ b/vendor/gnosis-chain-configs @@ -1 +1 @@ -Subproject commit 0e085cb606e78a495ce8014f9350931bc360e663 +Subproject commit d447a94bbe1bafbef4fe5a2fbb2e5469e7b494bb diff --git a/vendor/holesky b/vendor/holesky index 37eaaf8008..32a72e21c6 160000 --- a/vendor/holesky +++ b/vendor/holesky @@ -1 +1 @@ -Subproject commit 37eaaf80084489af9459836c03c6e24b9e431c2a +Subproject commit 32a72e21c6e53c262f27d50dd540cb654517d03a diff --git a/vendor/hoodi b/vendor/hoodi new file mode 160000 index 0000000000..08dd242abd --- /dev/null +++ b/vendor/hoodi @@ -0,0 +1 @@ +Subproject commit 08dd242abdb1f93026453bf8e63e6bba1c7b1bc1 diff --git a/vendor/mainnet b/vendor/mainnet index f6b7882618..978f1794ea 160000 --- a/vendor/mainnet +++ b/vendor/mainnet @@ -1 +1 @@ -Subproject commit f6b7882618a5ad2c1d2731ae35e5d16a660d5bb7 +Subproject commit 978f1794eada6f85bee76e4d2d5959a5fb8e0cc5 diff --git a/vendor/nim-bearssl b/vendor/nim-bearssl index 953a795b96..c7683c5a62 160000 --- a/vendor/nim-bearssl +++ b/vendor/nim-bearssl @@ -1 +1 @@ -Subproject commit 953a795b96d73a7401e7a1041587da9ffe6d78f6 +Subproject commit c7683c5a6221605bbab31b53d01feb3a3161bb8b diff --git a/vendor/nim-blscurve b/vendor/nim-blscurve index de2d3c7926..52ae4332c7 160000 --- a/vendor/nim-blscurve +++ b/vendor/nim-blscurve @@ -1 +1 @@ -Subproject commit de2d3c79264bba18dbea469c8c5c4b3bb3c8bc55 +Subproject commit 52ae4332c749d89fa05226f5493decae568f682c diff --git a/vendor/nim-chronicles b/vendor/nim-chronicles index 81a4a7a360..a8fb38a10b 160000 --- a/vendor/nim-chronicles +++ b/vendor/nim-chronicles @@ -1 +1 @@ -Subproject commit 81a4a7a360c78be9c80c8f735c76b6d4a1517304 +Subproject commit a8fb38a10bcb548df78e9a70bd77b26bb50abd12 diff --git a/vendor/nim-chronos b/vendor/nim-chronos index c04576d829..0646c444fc 160000 --- a/vendor/nim-chronos +++ b/vendor/nim-chronos @@ -1 +1 @@ -Subproject commit c04576d829b8a0a1b12baaa8bc92037501b3a4a0 +Subproject commit 0646c444fce7c7ed08ef6f2c9a7abfd172ffe655 diff --git a/vendor/nim-confutils b/vendor/nim-confutils index cb858a27f4..e214b3992a 160000 --- a/vendor/nim-confutils +++ b/vendor/nim-confutils @@ -1 +1 @@ -Subproject commit cb858a27f4347be949d10ed74b58713d687936d2 +Subproject commit e214b3992a31acece6a9aada7d0a1ad37c928f3b diff --git a/vendor/nim-eth b/vendor/nim-eth index 719c0dfd56..57fa21263a 160000 --- a/vendor/nim-eth +++ b/vendor/nim-eth @@ -1 +1 @@ -Subproject commit 719c0dfd56c14814379d2f1c400df8dcfb7d5199 +Subproject commit 57fa21263a50a325e80636280f976e8077f6627c diff --git a/vendor/nim-eth2-scenarios b/vendor/nim-eth2-scenarios index 93996ec086..5d3b872650 160000 --- a/vendor/nim-eth2-scenarios +++ b/vendor/nim-eth2-scenarios @@ -1 +1 @@ -Subproject commit 93996ec0865765d94b1f72825bd98a5aef215a12 +Subproject commit 5d3b8726503b591e9132cabf5d6da3cb8f819d88 diff --git a/vendor/nim-faststreams b/vendor/nim-faststreams index cf8d4d2263..2b08c774af 160000 --- a/vendor/nim-faststreams +++ b/vendor/nim-faststreams @@ -1 +1 @@ -Subproject commit cf8d4d22636b8e514caf17e49f9c786ac56b0e85 +Subproject commit 2b08c774afaafd600cf4c6f994cf78b8aa090c0c diff --git a/vendor/nim-http-utils b/vendor/nim-http-utils index 8bb1acbaa4..79cbab1460 160000 --- a/vendor/nim-http-utils +++ b/vendor/nim-http-utils @@ -1 +1 @@ -Subproject commit 8bb1acbaa4b86eb866145b0d468eff64a57d1897 +Subproject commit 79cbab1460f4c0cdde2084589d017c43a3d7b4f1 diff --git a/vendor/nim-json-rpc b/vendor/nim-json-rpc index 31af0f2bda..274372132d 160000 --- a/vendor/nim-json-rpc +++ b/vendor/nim-json-rpc @@ -1 +1 @@ -Subproject commit 31af0f2bda1486ffb7326c5df1dc47dc63d73fff +Subproject commit 274372132de497e6b7b793c9d5d5474b71bf80a2 diff --git a/vendor/nim-kzg4844 b/vendor/nim-kzg4844 index abef457e17..2163a77cb6 160000 --- a/vendor/nim-kzg4844 +++ b/vendor/nim-kzg4844 @@ -1 +1 @@ -Subproject commit abef457e17c56901fd038bb1a7e11dfd5a4f919a +Subproject commit 2163a77cb66b1b0faf032a735a751d0ea1e83499 diff --git a/vendor/nim-libbacktrace b/vendor/nim-libbacktrace index 0c1a0edaf2..65f9ed0a3e 160000 --- a/vendor/nim-libbacktrace +++ b/vendor/nim-libbacktrace @@ -1 +1 @@ -Subproject commit 0c1a0edaf2e599d4fa74bc5813ee76e87f3a7a30 +Subproject commit 65f9ed0a3e8aa4c860ccb659ae20d5795aed8207 diff --git a/vendor/nim-libp2p b/vendor/nim-libp2p index ed5670408b..e67744bf2a 160000 --- a/vendor/nim-libp2p +++ b/vendor/nim-libp2p @@ -1 +1 @@ -Subproject commit ed5670408b0902d60f9d91a550ff39ffd33173ed +Subproject commit e67744bf2a3cbc14f554e63eda6c26e82701c0cd diff --git a/vendor/nim-metrics b/vendor/nim-metrics index cacfdc1245..25ffd054fd 160000 --- a/vendor/nim-metrics +++ b/vendor/nim-metrics @@ -1 +1 @@ -Subproject commit cacfdc12454a0804c65112b9f4f50d1375208dcd +Subproject commit 25ffd054fd774f8cf7935e75d6cad542306d7802 diff --git a/vendor/nim-minilru b/vendor/nim-minilru index c35304151e..0c4b2bce95 160000 --- a/vendor/nim-minilru +++ b/vendor/nim-minilru @@ -1 +1 @@ -Subproject commit c35304151ea39077330f225e3837450990d55e48 +Subproject commit 0c4b2bce959591f0a862e9b541ba43c6d0cf3476 diff --git a/vendor/nim-nat-traversal b/vendor/nim-nat-traversal index 5e4059746e..dae59ddfd5 160000 --- a/vendor/nim-nat-traversal +++ b/vendor/nim-nat-traversal @@ -1 +1 @@ -Subproject commit 5e4059746e9095e1731b02eeaecd62a70fbe664d +Subproject commit dae59ddfd514260bb8586b700ec20f58c4ea30ff diff --git a/vendor/nim-presto b/vendor/nim-presto index bd410a26e0..92b1c7ff14 160000 --- a/vendor/nim-presto +++ b/vendor/nim-presto @@ -1 +1 @@ -Subproject commit bd410a26e0da62fdcf56d2be17bd53d84868a457 +Subproject commit 92b1c7ff141e6920e1f8a98a14c35c1fa098e3be diff --git a/vendor/nim-secp256k1 b/vendor/nim-secp256k1 index 0706e2c350..62e16b4dff 160000 --- a/vendor/nim-secp256k1 +++ b/vendor/nim-secp256k1 @@ -1 +1 @@ -Subproject commit 0706e2c350266017dcd663ff2727eb2b03d2fa44 +Subproject commit 62e16b4dff513f1eea7148a8cbba8a8c547b9546 diff --git a/vendor/nim-snappy b/vendor/nim-snappy index 0c308d3424..8291337351 160000 --- a/vendor/nim-snappy +++ b/vendor/nim-snappy @@ -1 +1 @@ -Subproject commit 0c308d34241c9f0764f6d111a0288428ded173bc +Subproject commit 829133735113951b219e3b108a6bd2146209300b diff --git a/vendor/nim-sqlite3-abi b/vendor/nim-sqlite3-abi index acd3c32743..3108a5f48f 160000 --- a/vendor/nim-sqlite3-abi +++ b/vendor/nim-sqlite3-abi @@ -1 +1 @@ -Subproject commit acd3c327433784226b412757bdb5455b5be04c55 +Subproject commit 3108a5f48f4a8b1f4e7ffbac6f1c8ad9d6680441 diff --git a/vendor/nim-ssz-serialization b/vendor/nim-ssz-serialization index 3397088499..55ac17ca1f 160000 --- a/vendor/nim-ssz-serialization +++ b/vendor/nim-ssz-serialization @@ -1 +1 @@ -Subproject commit 3397088499cefc2e7aa5e73a56ad2d5b0f091735 +Subproject commit 55ac17ca1f42afa35db9a06dd50c4d79a17c5d28 diff --git a/vendor/nim-stew b/vendor/nim-stew index a6e1981320..79e4fa5a9d 160000 --- a/vendor/nim-stew +++ b/vendor/nim-stew @@ -1 +1 @@ -Subproject commit a6e198132097fb544d04959aeb3b839e1408f942 +Subproject commit 79e4fa5a9d3374db17ed63622714d3e1094c7f34 diff --git a/vendor/nim-stint b/vendor/nim-stint index 3236fa6839..1a2c661e3f 160000 --- a/vendor/nim-stint +++ b/vendor/nim-stint @@ -1 +1 @@ -Subproject commit 3236fa68394f1e3a06e2bc34218aacdd2d675923 +Subproject commit 1a2c661e3f50ff696b0b6692fab0d7bb2abf10cc diff --git a/vendor/nim-taskpools b/vendor/nim-taskpools index 66585e2e96..7b74a716a4 160000 --- a/vendor/nim-taskpools +++ b/vendor/nim-taskpools @@ -1 +1 @@ -Subproject commit 66585e2e960b7695e48ea60377fb3aeac96406e8 +Subproject commit 7b74a716a40249720fd7da428113147942b9642d diff --git a/vendor/nim-testutils b/vendor/nim-testutils index 4d37244f9f..94d68e796c 160000 --- a/vendor/nim-testutils +++ b/vendor/nim-testutils @@ -1 +1 @@ -Subproject commit 4d37244f9f5e1acd8592a4ceb5c3fc47bc160181 +Subproject commit 94d68e796c045d5b37cabc6be32d7bfa168f8857 diff --git a/vendor/nim-unicodedb b/vendor/nim-unicodedb index 15c5e25e2a..66f2458710 160000 --- a/vendor/nim-unicodedb +++ b/vendor/nim-unicodedb @@ -1 +1 @@ -Subproject commit 15c5e25e2a49a924bc97647481ff50125bba2c76 +Subproject commit 66f2458710dc641dd4640368f9483c8a0ec70561 diff --git a/vendor/nim-web3 b/vendor/nim-web3 index 523fa0d3d3..d8a91d0409 160000 --- a/vendor/nim-web3 +++ b/vendor/nim-web3 @@ -1 +1 @@ -Subproject commit 523fa0d3d39994c85b75900139fc16d27277f6ad +Subproject commit d8a91d040975cd3dd2a10c26456fab2d7523e8dd diff --git a/vendor/nim-websock b/vendor/nim-websock index 53c2ba2696..ebe308a79a 160000 --- a/vendor/nim-websock +++ b/vendor/nim-websock @@ -1 +1 @@ -Subproject commit 53c2ba2696ac5ad72a70b36d0711a0837f0b1c8f +Subproject commit ebe308a79a7b440a11dfbe74f352be86a3883508 diff --git a/vendor/nim-zlib b/vendor/nim-zlib index 91cf360b1a..3f79980952 160000 --- a/vendor/nim-zlib +++ b/vendor/nim-zlib @@ -1 +1 @@ -Subproject commit 91cf360b1aeb2e0c753ff8bac6de22a41c5ed8cd +Subproject commit 3f7998095264d262a8d99e2be89045e6d9301537 diff --git a/vendor/nim-zxcvbn b/vendor/nim-zxcvbn index f31e4bd8fe..d827fdc996 160000 --- a/vendor/nim-zxcvbn +++ b/vendor/nim-zxcvbn @@ -1 +1 @@ -Subproject commit f31e4bd8fef867799bc42e47b073925a281349f3 +Subproject commit d827fdc9968a4f06bba587dc38df2b20399f8cf5 diff --git a/vendor/nimbus-build-system b/vendor/nimbus-build-system index 8fafcd0bac..4c6ff070c1 160000 --- a/vendor/nimbus-build-system +++ b/vendor/nimbus-build-system @@ -1 +1 @@ -Subproject commit 8fafcd0bac9f409091b7bcaee62ab6330f57441e +Subproject commit 4c6ff070c116450bb2c285691724ac9e6202cb28 diff --git a/vendor/sepolia b/vendor/sepolia index f2c219a93c..562d9938f0 160000 --- a/vendor/sepolia +++ b/vendor/sepolia @@ -1 +1 @@ -Subproject commit f2c219a93c4491cee3d90c18f2f8e82aed850eab +Subproject commit 562d9938f08675e9ba490a1dfba21fb05843f39f diff --git a/wasm/index_ncli.html b/wasm/index_ncli.html index ef58bd5784..aca8586a02 100644 --- a/wasm/index_ncli.html +++ b/wasm/index_ncli.html @@ -5,7 +5,7 @@ - Nimbus ncli/title> + <title>Nimbus ncli