diff --git a/.github/workflows/generate_vectors.yml b/.github/workflows/generate_vectors.yml index c9ae2564b6..31ffa847b0 100644 --- a/.github/workflows/generate_vectors.yml +++ b/.github/workflows/generate_vectors.yml @@ -34,20 +34,15 @@ jobs: - name: Generate tests run: | cd consensus-specs - make -j$(nproc) gen_all 2>&1 | tee ../consensustestgen.log + make reftests verbose=true 2>&1 | tee ../consensustestgen.log cp -r presets/ ../consensus-spec-tests/presets cp -r configs/ ../consensus-spec-tests/configs - find . -type d -empty -delete - name: Check for errors run: | if grep -q "\[ERROR\]" consensustestgen.log; then echo "There is an error in the log" exit 1 fi - if find . -type f -name "INCOMPLETE" | grep -q "INCOMPLETE"; then - echo "There is an INCOMPLETE file" - exit 1 - fi - name: Archive configurations run: | cd consensus-spec-tests diff --git a/.github/workflows/nightly-tests.yml b/.github/workflows/nightly-tests.yml index 6c458a886b..4fa36fe033 100644 --- a/.github/workflows/nightly-tests.yml +++ b/.github/workflows/nightly-tests.yml @@ -30,8 +30,8 @@ jobs: - deneb - electra - fulu - - eip7441 - eip7732 + - eip7805 steps: - name: Checkout repository uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 diff --git a/.github/workflows/run-tests.yml b/.github/workflows/run-tests.yml index 2d32aff4c4..a8c2946f1d 100644 --- a/.github/workflows/run-tests.yml +++ b/.github/workflows/run-tests.yml @@ -50,8 +50,8 @@ jobs: - deneb - electra - fulu - - eip7441 - eip7732 + - eip7805 steps: - name: Checkout repository uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 @@ -62,23 +62,3 @@ jobs: cache: 'pip' - name: Run pyspec tests for ${{ matrix.fork }} run: make test preset=minimal fork=${{ matrix.fork }} - - modcheck: - needs: [lint, whitespace] - runs-on: [self-hosted-ghr-custom, size-s-x64, profile-consensusSpecs] - steps: - - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - name: Setup python - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 - with: - python-version: '3.12' - cache: 'pip' - - name: Run generators with --modcheck - run: make gen_all modcheck=true 2>&1 | tee consensustestgen.log - - name: Check for errors - run: | - if grep -q "\[ERROR\]" consensustestgen.log; then - echo "There is an error in the log" - exit 1 - fi diff --git a/Makefile b/Makefile index 057b54b76b..f81657d86e 100644 --- a/Makefile +++ b/Makefile @@ -13,19 +13,17 @@ ALL_EXECUTABLE_SPEC_NAMES = \ eip6800 \ eip7441 \ eip7732 \ - eip7805 + eip7805 # A list of fake targets. .PHONY: \ clean \ coverage \ - detect_errors \ - gen_all \ - gen_list \ help \ kzg_setups \ lint \ pyspec \ + reftests \ serve_docs \ test @@ -38,17 +36,14 @@ NORM = $(shell tput sgr0) # Print target descriptions. help: - @echo "make $(BOLD)clean$(NORM) -- delete all untracked files" - @echo "make $(BOLD)coverage$(NORM) -- run pyspec tests with coverage" - @echo "make $(BOLD)detect_errors$(NORM) -- detect generator errors" - @echo "make $(BOLD)gen_$(NORM) -- run a single generator" - @echo "make $(BOLD)gen_all$(NORM) -- run all generators" - @echo "make $(BOLD)gen_list$(NORM) -- list all generator targets" - @echo "make $(BOLD)kzg_setups$(NORM) -- generate trusted setups" - @echo "make $(BOLD)lint$(NORM) -- run the linters" - @echo "make $(BOLD)pyspec$(NORM) -- generate python specifications" - @echo "make $(BOLD)serve_docs$(NORM) -- start a local docs web server" - @echo "make $(BOLD)test$(NORM) -- run pyspec tests" + @echo "make $(BOLD)clean$(NORM) -- delete all untracked files" + @echo "make $(BOLD)coverage$(NORM) -- run pyspec tests with coverage" + @echo "make $(BOLD)kzg_setups$(NORM) -- generate trusted setups" + @echo "make $(BOLD)lint$(NORM) -- run the linters" + @echo "make $(BOLD)pyspec$(NORM) -- build python specifications" + @echo "make $(BOLD)reftests$(NORM) -- generate reference tests" + @echo "make $(BOLD)serve_docs$(NORM) -- start a local docs web server" + @echo "make $(BOLD)test$(NORM) -- run pyspec tests" ############################################################################### # Virtual Environment @@ -64,7 +59,7 @@ MDFORMAT_VENV = $(VENV)/bin/mdformat $(VENV): @echo "Creating virtual environment" @python3 -m venv $(VENV) - @$(PIP_VENV) install --quiet uv==0.5.24 + @$(PIP_VENV) install --quiet --upgrade uv ############################################################################### # Specification @@ -189,9 +184,10 @@ MARKDOWN_FILES = $(CURDIR)/README.md \ # Check for mistakes. lint: pyspec - @$(MDFORMAT_VENV) --number $(MARKDOWN_FILES) + @$(MDFORMAT_VENV) --number --wrap=80 $(MARKDOWN_FILES) @$(CODESPELL_VENV) . --skip "./.git,$(VENV),$(PYSPEC_DIR)/.mypy_cache" -I .codespell-whitelist - @$(PYTHON_VENV) -m black $(CURDIR)/tests + @$(PYTHON_VENV) -m isort --quiet $(CURDIR)/tests + @$(PYTHON_VENV) -m black --quiet $(CURDIR)/tests @$(PYTHON_VENV) -m pylint --rcfile $(PYLINT_CONFIG) $(PYLINT_SCOPE) @$(PYTHON_VENV) -m mypy --config-file $(MYPY_CONFIG) $(MYPY_SCOPE) @@ -199,72 +195,47 @@ lint: pyspec # Generators ############################################################################### -TEST_VECTOR_DIR = $(CURDIR)/../consensus-spec-tests/tests -GENERATOR_DIR = $(CURDIR)/tests/generators -SCRIPTS_DIR = $(CURDIR)/scripts -GENERATOR_ERROR_LOG_FILE = $(TEST_VECTOR_DIR)/testgen_error_log.txt -GENERATORS = $(sort $(dir $(wildcard $(GENERATOR_DIR)/*/.))) -GENERATOR_TARGETS = $(patsubst $(GENERATOR_DIR)/%/, gen_%, $(GENERATORS)) COMMA:= , +TEST_VECTOR_DIR = $(CURDIR)/../consensus-spec-tests/tests -# List available generators. -gen_list: - @for target in $(shell echo $(GENERATOR_TARGETS) | tr ' ' '\n' | sort -n); do \ - echo $$target; \ - done - -# Run one generator. +# Generate reference tests. # This will forcibly rebuild pyspec just in case. -# To print more details, append verbose=true, eg: -# make gen_bls verbose=true -# To check modules for a generator, append modcheck=true, eg: -# make gen_genesis modcheck=true -# To run the generator for a specific test, append k=, eg: -# make gen_operations k=invalid_committee_index -# To run the generator for a specific fork, append fork=, eg: -# make gen_operations fork=fulu -# To run the generator for a specific preset, append preset=, eg: -# make gen_operations preset=mainnet -# To run the generator for a list of tests, forks, and/or presets, append them as comma-separated lists, eg: -# make gen_operations k=invalid_committee_index,invalid_too_many_committee_bits +# To generate reference tests for a single runner, append runner=, eg: +# make reftests runner=bls +# To generate reference tests with more details, append verbose=true, eg: +# make reftests runner=bls verbose=true +# To generate reference tests with fewer threads, append threads=N, eg: +# make reftests runner=bls threads=1 +# To generate reference tests for a specific test, append k=, eg: +# make reftests runner=operations k=invalid_committee_index +# To generate reference tests for a specific fork, append fork=, eg: +# make reftests runner=operations fork=fulu +# To generate reference tests for a specific preset, append preset=, eg: +# make reftests runner=operations preset=mainnet +# To generate reference tests for a list of tests, forks, and/or presets, append them as comma-separated lists, eg: +# make reftests runner=operations k=invalid_committee_index,invalid_too_many_committee_bits # Or all at the same time, eg: -# make gen_operations preset=mainnet fork=fulu k=invalid_committee_index -gen_%: MAYBE_VERBOSE := $(if $(filter true,$(verbose)),--verbose) -gen_%: MAYBE_MODCHECK := $(if $(filter true,$(modcheck)),--modcheck) -gen_%: MAYBE_TESTS := $(if $(k),--case-list $(subst ${COMMA}, ,$(k))) -gen_%: MAYBE_FORKS := $(if $(fork),--fork-list $(subst ${COMMA}, ,$(fork))) -gen_%: MAYBE_PRESETS := $(if $(preset),--preset-list $(subst ${COMMA}, ,$(preset))) -gen_%: pyspec - @mkdir -p $(TEST_VECTOR_DIR) - @$(PYTHON_VENV) $(GENERATOR_DIR)/$*/main.py \ +# make reftests runner=operations preset=mainnet fork=fulu k=invalid_committee_index +reftests: MAYBE_VERBOSE := $(if $(filter true,$(verbose)),--verbose) +reftests: MAYBE_THREADS := $(if $(threads),--threads=$(threads)) +reftests: MAYBE_RUNNERS := $(if $(runner),--runners $(subst ${COMMA}, ,$(runner))) +reftests: MAYBE_TESTS := $(if $(k),--cases $(subst ${COMMA}, ,$(k))) +reftests: MAYBE_FORKS := $(if $(fork),--forks $(subst ${COMMA}, ,$(fork))) +reftests: MAYBE_PRESETS := $(if $(preset),--presets $(subst ${COMMA}, ,$(preset))) +reftests: pyspec + @$(PYTHON_VENV) -m tests.generators.main \ --output $(TEST_VECTOR_DIR) \ $(MAYBE_VERBOSE) \ - $(MAYBE_MODCHECK) \ + $(MAYBE_THREADS) \ + $(MAYBE_RUNNERS) \ $(MAYBE_TESTS) \ $(MAYBE_FORKS) \ $(MAYBE_PRESETS) -# Run all generators then check for errors. -gen_all: $(GENERATOR_TARGETS) - @$(MAKE) detect_errors - -# Detect errors in generators. -detect_errors: $(TEST_VECTOR_DIR) - @incomplete_files=$$(find $(TEST_VECTOR_DIR) -name "INCOMPLETE"); \ - if [ -n "$$incomplete_files" ]; then \ - echo "[ERROR] incomplete detected"; \ - exit 1; \ - fi - @if [ -f $(GENERATOR_ERROR_LOG_FILE) ]; then \ - echo "[ERROR] $(GENERATOR_ERROR_LOG_FILE) file exists"; \ - exit 1; \ - fi - @echo "[PASSED] no errors detected" - # Generate KZG trusted setups for testing. kzg_setups: pyspec @for preset in minimal mainnet; do \ - $(PYTHON_VENV) $(SCRIPTS_DIR)/gen_kzg_trusted_setups.py \ + $(PYTHON_VENV) $(CURDIR)/scripts/gen_kzg_trusted_setups.py \ --secret=1337 \ --g1-length=4096 \ --g2-length=65 \ diff --git a/README.md b/README.md index f00e3db260..5b53e6b18c 100644 --- a/README.md +++ b/README.md @@ -4,15 +4,17 @@ [![testgen](https://github.com/ethereum/consensus-specs/actions/workflows/generate_vectors.yml/badge.svg?branch=dev&event=schedule)](https://github.com/ethereum/consensus-specs/actions/workflows/generate_vectors.yml) This repository hosts the current Ethereum -[proof-of-stake](https://ethereum.org/en/developers/docs/consensus-mechanisms/pos/) specifications. -Discussions about design rationale and proposed changes can be brought up and discussed as issues. -Solidified, agreed-upon changes to the specifications can be made through pull requests. +[proof-of-stake](https://ethereum.org/en/developers/docs/consensus-mechanisms/pos/) +specifications. Discussions about design rationale and proposed changes can be +brought up and discussed as issues. Solidified, agreed-upon changes to the +specifications can be made through pull requests. ## Specifications -Core specifications for Ethereum proof-of-stake clients can be found in [specs](specs). These are -divided into features. Features are researched and developed in parallel, and then consolidated into -sequential upgrades when ready. +Core specifications for Ethereum proof-of-stake clients can be found in +[specs](specs). These are divided into features. Features are researched and +developed in parallel, and then consolidated into sequential upgrades when +ready. ### Stable Specifications @@ -39,8 +41,8 @@ sequential upgrades when ready. ### External specifications -Additional specifications and standards outside of requisite client functionality can be found in -the following repositories: +Additional specifications and standards outside of requisite client +functionality can be found in the following repositories: - [Beacon APIs](https://github.com/ethereum/beacon-apis) - [Engine APIs](https://github.com/ethereum/execution-apis/tree/main/src/engine) @@ -49,11 +51,11 @@ the following repositories: ### Reference tests -Reference tests built from the executable Python spec are available in the [Ethereum Proof-of-Stake -Consensus Spec Tests](https://github.com/ethereum/consensus-spec-tests) repository. Compressed -tarballs are available for each release -[here](https://github.com/ethereum/consensus-spec-tests/releases). Nightly reference tests are -available +Reference tests built from the executable Python spec are available in the +[Ethereum Proof-of-Stake Consensus Spec Tests](https://github.com/ethereum/consensus-spec-tests) +repository. Compressed tarballs are available for each release +[here](https://github.com/ethereum/consensus-spec-tests/releases). Nightly +reference tests are available [here](https://github.com/ethereum/consensus-specs/actions/workflows/generate_vectors.yml). ## Contributors @@ -80,12 +82,16 @@ make help ### Design goals -The following are the broad design goals for the Ethereum proof-of-stake consensus specifications: +The following are the broad design goals for the Ethereum proof-of-stake +consensus specifications: - Minimize complexity, even at the cost of some losses in efficiency. -- Remain live through major network partitions and when very large portions of nodes go offline. -- Select components that are quantum secure or easily swappable for quantum-secure alternatives. -- Utilize crypto and design techniques that allow for a large participation of validators. +- Remain live through major network partitions and when very large portions of + nodes go offline. +- Select components that are quantum secure or easily swappable for + quantum-secure alternatives. +- Utilize crypto and design techniques that allow for a large participation of + validators. - Minimize hardware requirements such that a consumer laptop can participate. ### Useful resources diff --git a/configs/mainnet.yaml b/configs/mainnet.yaml index 3437d68441..3f46765475 100644 --- a/configs/mainnet.yaml +++ b/configs/mainnet.yaml @@ -172,7 +172,6 @@ SAMPLES_PER_SLOT: 8 CUSTODY_REQUIREMENT: 4 VALIDATOR_CUSTODY_REQUIREMENT: 8 BALANCE_PER_ADDITIONAL_CUSTODY_GROUP: 32000000000 -MAX_BLOBS_PER_BLOCK_FULU: 12 MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS: 4096 # EIP7441 @@ -191,3 +190,14 @@ VIEW_FREEZE_DEADLINE: 9 MAX_REQUEST_INCLUSION_LIST: 16 # 2**13 (= 8192) MAX_BYTES_PER_INCLUSION_LIST: 8192 + +# Blob Scheduling +# --------------------------------------------------------------- + +BLOB_SCHEDULE: + # Deneb + - EPOCH: 269568 + MAX_BLOBS_PER_BLOCK: 6 + # Electra + - EPOCH: 364032 + MAX_BLOBS_PER_BLOCK: 9 diff --git a/configs/minimal.yaml b/configs/minimal.yaml index 500122461d..acbe53e84c 100644 --- a/configs/minimal.yaml +++ b/configs/minimal.yaml @@ -169,7 +169,6 @@ SAMPLES_PER_SLOT: 8 CUSTODY_REQUIREMENT: 4 VALIDATOR_CUSTODY_REQUIREMENT: 8 BALANCE_PER_ADDITIONAL_CUSTODY_GROUP: 32000000000 -MAX_BLOBS_PER_BLOCK_FULU: 12 MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS: 4096 # EIP7441 @@ -188,3 +187,14 @@ VIEW_FREEZE_DEADLINE: 3 MAX_REQUEST_INCLUSION_LIST: 16 # 2**13 (= 8192) MAX_BYTES_PER_INCLUSION_LIST: 8192 + +# Blob Scheduling +# --------------------------------------------------------------- + +BLOB_SCHEDULE: + # Deneb + - EPOCH: 18446744073709551615 + MAX_BLOBS_PER_BLOCK: 6 + # Electra + - EPOCH: 18446744073709551615 + MAX_BLOBS_PER_BLOCK: 9 diff --git a/docs/docs/release.md b/docs/docs/release.md index 2528321f2a..1d0e2b3aa5 100644 --- a/docs/docs/release.md +++ b/docs/docs/release.md @@ -125,7 +125,7 @@ cp -r presets ../consensus-spec-tests cp -r configs ../consensus-spec-tests ``` -Next, use `make gen_all` to generate all the reference tests. The following command will run all +Next, use `make reftests` to generate all the reference tests. The following command will run all generators in parallel for maximum speed. The console output is saved to a file so we can check for errors afterwards. @@ -143,7 +143,7 @@ errors afterwards. > to do this. Note that the "Bundle Reference Tests" section can be skipped if this route is taken. ```bash -make --jobs gen_all 2>&1 | tee ../consensustestgen.log +make reftests verbose=true 2>&1 | tee ../consensustestgen.log ``` Next, check for errors by searching for "ERROR" in test logfile. diff --git a/presets/mainnet/fulu.yaml b/presets/mainnet/fulu.yaml index b87e004d2a..82ab37ce25 100644 --- a/presets/mainnet/fulu.yaml +++ b/presets/mainnet/fulu.yaml @@ -7,7 +7,7 @@ KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH: 4 # Blob # --------------------------------------------------------------- -# 2**6` (= 64) +# 2**6 (= 64) FIELD_ELEMENTS_PER_CELL: 64 -# 2**0 * FIELD_ELEMENTS_PER_BLOB (= 8,192) +# 2**1 * FIELD_ELEMENTS_PER_BLOB (= 8,192) FIELD_ELEMENTS_PER_EXT_BLOB: 8192 diff --git a/presets/minimal/fulu.yaml b/presets/minimal/fulu.yaml index e94d499b8b..48b4d7a072 100644 --- a/presets/minimal/fulu.yaml +++ b/presets/minimal/fulu.yaml @@ -7,7 +7,7 @@ KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH: 4 # Blob # --------------------------------------------------------------- -# 2**6` (= 64) +# 2**6 (= 64) FIELD_ELEMENTS_PER_CELL: 64 -# 2**0 * FIELD_ELEMENTS_PER_BLOB (= 8,192) +# 2**1 * FIELD_ELEMENTS_PER_BLOB (= 8,192) FIELD_ELEMENTS_PER_EXT_BLOB: 8192 diff --git a/pyproject.toml b/pyproject.toml index 05b3393e77..69987c2519 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,7 +2,7 @@ requires = [ "marko==2.1.3", "ruamel.yaml==0.18.10", - "setuptools==80.3.1", + "setuptools==80.4.0", "wheel==0.45.1", ] @@ -17,6 +17,7 @@ dependencies = [ "curdleproofs==0.1.2", "eth-typing==5.2.1", "eth-utils==5.3.0", + "frozendict==2.4.6", "lru-dict==1.3.0", "marko==2.1.3", "milagro_bls_binding==1.9.0", @@ -25,7 +26,7 @@ dependencies = [ "pycryptodome==3.22.0", "remerkleable==0.1.28", "ruamel.yaml==0.18.10", - "setuptools==80.3.1", + "setuptools==80.4.0", "trie==3.1.0", ] @@ -38,6 +39,7 @@ test = [ lint = [ "black==25.1.0", "codespell==2.4.1", + "isort==6.0.1", "mdformat-gfm-alerts==1.0.1", "mdformat-gfm==0.4.1", "mdformat-toc==0.3.0", @@ -50,13 +52,25 @@ generator = [ "pathos==0.3.4", "pytest==8.3.5", "python-snappy==0.7.3", + "rich==14.0.0", ] docs = [ "mdx-truly-sane-lists==1.3", "mkdocs-awesome-pages-plugin==2.10.1", - "mkdocs-material==9.6.12", + "mkdocs-material==9.6.13", "mkdocs==1.6.1", ] [tool.black] line-length = 100 + +[tool.isort] +profile = "black" +line_length = 100 +combine_as_imports = true +known_first_party = ["eth2spec"] +order_by_type = false +skip_glob = [ + "tests/core/pyspec/eth2spec/*/mainnet.py", + "tests/core/pyspec/eth2spec/*/minimal.py", +] diff --git a/pysetup/helpers.py b/pysetup/helpers.py index 914dcef534..3295361019 100644 --- a/pysetup/helpers.py +++ b/pysetup/helpers.py @@ -1,5 +1,5 @@ import re -from typing import TypeVar, Dict +from typing import TypeVar, Dict, Union, List import textwrap from functools import reduce @@ -85,12 +85,25 @@ def format_protocol(protocol_name: str, protocol_def: ProtocolDefinition) -> str ordered_class_objects_spec = '\n\n\n'.join(ordered_class_objects.values()) # Access global dict of config vars for runtime configurables + # Ignore variable between quotes and doubles quotes for name in spec_object.config_vars.keys(): - functions_spec = re.sub(r"\b%s\b" % name, 'config.' + name, functions_spec) - ordered_class_objects_spec = re.sub(r"\b%s\b" % name, 'config.' + name, ordered_class_objects_spec) - - def format_config_var(name: str, vardef: VariableDefinition) -> str: - if vardef.type_name is None: + functions_spec = re.sub(r"(? str: + if isinstance(vardef, list): + # A special case for list of records. + indent = " " * 4 + lines = [f"{name}=("] + for d in vardef: + line = indent*2 + "frozendict({\n" + for k, v in d.items(): + line += indent * 3 + f'"{k}": {v},\n' + line += indent*2 + "})," + lines.append(line) + lines.append(indent + "),") + return "\n".join(lines) + elif vardef.type_name is None: out = f'{name}={vardef.value},' else: out = f'{name}={vardef.type_name}({vardef.value}),' @@ -98,10 +111,16 @@ def format_config_var(name: str, vardef: VariableDefinition) -> str: out += f' # {vardef.comment}' return out + def format_config_var_param(value): + if isinstance(value, list): + # A special case for list of records. + return "tuple[frozendict[str, Any], ...]" + elif isinstance(value, VariableDefinition): + return value.type_name if value.type_name is not None else "int" + config_spec = 'class Configuration(NamedTuple):\n' config_spec += ' PRESET_BASE: str\n' - config_spec += '\n'.join(f' {k}: {v.type_name if v.type_name is not None else "int"}' - for k, v in spec_object.config_vars.items()) + config_spec += '\n'.join(f' {k}: {format_config_var_param(v)}' for k, v in spec_object.config_vars.items()) config_spec += '\n\n\nconfig = Configuration(\n' config_spec += f' PRESET_BASE="{preset_name}",\n' config_spec += '\n'.join(' ' + format_config_var(k, v) for k, v in spec_object.config_vars.items()) @@ -267,13 +286,16 @@ def combine_spec_objects(spec0: SpecObject, spec1: SpecObject) -> SpecObject: ) -def parse_config_vars(conf: Dict[str, str]) -> Dict[str, str]: +def parse_config_vars(conf: Dict[str, str]) -> Dict[str, Union[str, List[Dict[str, str]]]]: """ Parses a dict of basic str/int/list types into a dict for insertion into the spec code. """ - out: Dict[str, str] = dict() + out: Dict[str, Union[str, List[Dict[str, str]]]] = dict() for k, v in conf.items(): - if isinstance(v, str) and (v.startswith("0x") or k == 'PRESET_BASE' or k == 'CONFIG_NAME'): + if isinstance(v, list): + # A special case for list of records + out[k] = v + elif isinstance(v, str) and (v.startswith("0x") or k == "PRESET_BASE" or k == "CONFIG_NAME"): # Represent byte data with string, to avoid misinterpretation as big-endian int. # Everything except PRESET_BASE and CONFIG_NAME is either byte data or an integer. out[k] = f"'{v}'" diff --git a/pysetup/spec_builders/altair.py b/pysetup/spec_builders/altair.py index 830f396a9d..779d1df606 100644 --- a/pysetup/spec_builders/altair.py +++ b/pysetup/spec_builders/altair.py @@ -21,7 +21,7 @@ def imports(cls, preset_name: str) -> str: def preparations(cls): return ''' SSZVariableName = str -GeneralizedIndex = NewType('GeneralizedIndex', int) +GeneralizedIndex = int ''' @classmethod diff --git a/pysetup/spec_builders/fulu.py b/pysetup/spec_builders/fulu.py index 39befcb41e..f5cf0af329 100644 --- a/pysetup/spec_builders/fulu.py +++ b/pysetup/spec_builders/fulu.py @@ -10,6 +10,7 @@ class FuluSpecBuilder(BaseSpecBuilder): @classmethod def imports(cls, preset_name: str): return f''' +from frozendict import frozendict from eth2spec.electra import {preset_name} as electra ''' diff --git a/setup.py b/setup.py index 66cf6bbdb3..3a3b27be75 100644 --- a/setup.py +++ b/setup.py @@ -3,6 +3,7 @@ import json import logging import os +import re import string import sys import warnings @@ -238,6 +239,8 @@ def get_spec(file_name: Path, preset: Dict[str, str], config: Dict[str, str], pr current_name = None should_skip = False + list_of_records = None + list_of_records_name = None for child in document.children: if isinstance(child, BlankLine): continue @@ -278,6 +281,62 @@ def get_spec(file_name: Path, preset: Dict[str, str], config: Dict[str, str], pr ssz_objects[current_name] = "\n".join(line.rstrip() for line in source.splitlines()) else: raise Exception("unrecognized python code element: " + source) + elif isinstance(child, Table) and list_of_records is not None: + list_of_records_header = None + for i, row in enumerate(child.children): + # This will start as an empty list when there is a comment, + # which indicates that the next table is a list-of-records. After we're done parsing + # the table, we will reset this to None. + if list_of_records is not None: + if i == 0: + # Save the table header, this will be used for field names + # Skip the last item, which is the description + list_of_records_header = [ + # Convert the titles to SNAKE_CASE + re.sub(r'\s+', '_', value.children[0].children.upper()) + for value in row.children[:-1] + ] + else: + # Add the row entry to our list of records + list_of_records.append({ + list_of_records_header[i]: value.children[0].children + for i, value in enumerate(row.children[:-1]) + }) + + # Make a type map from the spec definition + # We'll apply this to the file config (ie mainnet.yaml) + type_map: dict[str,str] = {} + pattern = re.compile(r'^(\w+)\(.*\)$') + for entry in list_of_records: + for k, v in entry.items(): + m = pattern.match(v) + if m: + type_map[k] = m.group(1) + + # Apply the types to the file config + list_of_records_config: list[dict[str,str]] = [] + for entry in config[list_of_records_name]: + new_entry: dict[str,str] = {} + for k, v in entry.items(): + ctor = type_map.get(k) + if ctor: + new_entry[k] = f"{ctor}({v})" + else: + new_entry[k] = v + list_of_records_config.append(new_entry) + + # For mainnet, check that the spec config & file config are the same + # For minimal, we expect this to be different; just use the file config + if preset_name == "mainnet": + assert list_of_records == list_of_records_config, \ + f"list of records mismatch: {list_of_records} vs {list_of_records_config}" + elif preset_name == "minimal": + list_of_records = list_of_records_config + + # Set the config variable and reset the global variable + config_vars[list_of_records_name] = list_of_records + list_of_records = None + elif isinstance(child, Table): for row in child.children: cells = row.children @@ -342,6 +401,14 @@ def get_spec(file_name: Path, preset: Dict[str, str], config: Dict[str, str], pr elif isinstance(child, HTMLBlock): if child.body.strip() == "": should_skip = True + # Handle list-of-records tables + match = re.match(r"", child.body.strip()) + if match: + # Initialize list-of-records, in the next iteration this will indicate that the + # table is a list-of-records and must be parsed differently. + list_of_records = [] + # Use regex to extract the desired configuration list name + list_of_records_name = match.group(1).upper() # Load KZG trusted setup from files if any('KZG_SETUP' in name for name in constant_vars): diff --git a/specs/_deprecated/custody_game/beacon-chain.md b/specs/_deprecated/custody_game/beacon-chain.md index 48d915db9e..4f5d66c206 100644 --- a/specs/_deprecated/custody_game/beacon-chain.md +++ b/specs/_deprecated/custody_game/beacon-chain.md @@ -52,8 +52,9 @@ ## Introduction -This document details the beacon chain additions and changes of to support the shard data custody game, -building upon the [Sharding](../sharding/beacon-chain.md) specification. +This document details the beacon chain additions and changes of to support the +shard data custody game, building upon the +[Sharding](../sharding/beacon-chain.md) specification. ## Constants @@ -219,7 +220,9 @@ class CustodyKeyReveal(Container): #### `EarlyDerivedSecretReveal` -Represents an early (punishable) reveal of one of the derived secrets, where derived secrets are RANDAO reveals and custody reveals (both are part of the same domain). +Represents an early (punishable) reveal of one of the derived secrets, where +derived secrets are RANDAO reveals and custody reveals (both are part of the +same domain). ```python class EarlyDerivedSecretReveal(Container): @@ -251,7 +254,9 @@ def replace_empty_or_append(l: List, new_element: Any) -> int: ### `legendre_bit` -Returns the Legendre symbol `(a/q)` normalizes as a bit (i.e. `((a/q) + 1) // 2`). In a production implementation, a well-optimized library (e.g. GMP) should be used for this. +Returns the Legendre symbol `(a/q)` normalizes as a bit (i.e. +`((a/q) + 1) // 2`). In a production implementation, a well-optimized library +(e.g. GMP) should be used for this. ```python def legendre_bit(a: int, q: int) -> int: @@ -280,7 +285,8 @@ def legendre_bit(a: int, q: int) -> int: ### `get_custody_atoms` -Given one set of data, return the custody atoms: each atom will be combined with one legendre bit. +Given one set of data, return the custody atoms: each atom will be combined with +one legendre bit. ```python def get_custody_atoms(bytez: bytes) -> Sequence[bytes]: diff --git a/specs/_deprecated/custody_game/validator.md b/specs/_deprecated/custody_game/validator.md index 4d499d28c2..43ec0e4ed2 100644 --- a/specs/_deprecated/custody_game/validator.md +++ b/specs/_deprecated/custody_game/validator.md @@ -19,47 +19,77 @@ ## Introduction -This is an accompanying document to [Custody Game -- The Beacon Chain](./beacon-chain.md), which describes the expected actions of a "validator" -participating in the shard data Custody Game. +This is an accompanying document to +[Custody Game -- The Beacon Chain](./beacon-chain.md), which describes the +expected actions of a "validator" participating in the shard data Custody Game. ## Prerequisites -This document is an extension of the [Sharding -- Validator](../sharding/validator.md). All behaviors and definitions defined in the Sharding doc carry over unless explicitly noted or overridden. +This document is an extension of the +[Sharding -- Validator](../sharding/validator.md). All behaviors and definitions +defined in the Sharding doc carry over unless explicitly noted or overridden. -All terminology, constants, functions, and protocol mechanics defined in the [Custody Game -- The Beacon Chain](./beacon-chain.md) -docs are requisite for this document and used throughout. Please see the Custody Game docs before continuing and use them as a reference throughout. +All terminology, constants, functions, and protocol mechanics defined in the +[Custody Game -- The Beacon Chain](./beacon-chain.md) docs are requisite for +this document and used throughout. Please see the Custody Game docs before +continuing and use them as a reference throughout. ## Becoming a validator -Becoming a validator in Custody Game is unchanged from Phase 0. See the [Phase 0 validator guide](../../phase0/validator.md#becoming-a-validator) for details. +Becoming a validator in Custody Game is unchanged from Phase 0. See the +[Phase 0 validator guide](../../phase0/validator.md#becoming-a-validator) for +details. ## Beacon chain validator assignments -Beacon chain validator assignments to beacon committees and beacon block proposal are unchanged from Phase 0. See the [Phase 0 validator guide](../../phase0/validator.md#validator-assignments) for details. +Beacon chain validator assignments to beacon committees and beacon block +proposal are unchanged from Phase 0. See the +[Phase 0 validator guide](../../phase0/validator.md#validator-assignments) for +details. ##### Custody slashings -Up to `MAX_CUSTODY_SLASHINGS`, [`CustodySlashing`](./beacon-chain.md#custodyslashing) objects can be included in the `block`. The custody slashings must satisfy the verification conditions found in [custody slashings processing](beacon-chain.md#custody-slashings). The validator receives a small "whistleblower" reward for each custody slashing included (THIS IS NOT CURRENTLY THE CASE BUT PROBABLY SHOULD BE). +Up to `MAX_CUSTODY_SLASHINGS`, +[`CustodySlashing`](./beacon-chain.md#custodyslashing) objects can be included +in the `block`. The custody slashings must satisfy the verification conditions +found in [custody slashings processing](beacon-chain.md#custody-slashings). The +validator receives a small "whistleblower" reward for each custody slashing +included (THIS IS NOT CURRENTLY THE CASE BUT PROBABLY SHOULD BE). ##### Custody key reveals -Up to `MAX_CUSTODY_KEY_REVEALS`, [`CustodyKeyReveal`](./beacon-chain.md#custodykeyreveal) objects can be included in the `block`. The custody key reveals must satisfy the verification conditions found in [custody key reveal processing](beacon-chain.md#custody-key-reveals). The validator receives a small reward for each custody key reveal included. +Up to `MAX_CUSTODY_KEY_REVEALS`, +[`CustodyKeyReveal`](./beacon-chain.md#custodykeyreveal) objects can be included +in the `block`. The custody key reveals must satisfy the verification conditions +found in [custody key reveal processing](beacon-chain.md#custody-key-reveals). +The validator receives a small reward for each custody key reveal included. ##### Early derived secret reveals -Up to `MAX_EARLY_DERIVED_SECRET_REVEALS`, [`EarlyDerivedSecretReveal`](./beacon-chain.md#earlyderivedsecretreveal) objects can be included in the `block`. The early derived secret reveals must satisfy the verification conditions found in [early derived secret reveal processing](beacon-chain.md#custody-key-reveals). The validator receives a small "whistleblower" reward for each early derived secret reveal included. +Up to `MAX_EARLY_DERIVED_SECRET_REVEALS`, +[`EarlyDerivedSecretReveal`](./beacon-chain.md#earlyderivedsecretreveal) objects +can be included in the `block`. The early derived secret reveals must satisfy +the verification conditions found in +[early derived secret reveal processing](beacon-chain.md#custody-key-reveals). +The validator receives a small "whistleblower" reward for each early derived +secret reveal included. #### Construct attestation -`attestation.data`, `attestation.aggregation_bits`, and `attestation.signature` are unchanged from Phase 0. But safety/validity in signing the message is premised upon calculation of the "custody bit" [TODO]. +`attestation.data`, `attestation.aggregation_bits`, and `attestation.signature` +are unchanged from Phase 0. But safety/validity in signing the message is +premised upon calculation of the "custody bit" [TODO]. ## How to avoid slashing -Proposer and Attester slashings described in Phase 0 remain in place with the addition of the following. +Proposer and Attester slashings described in Phase 0 remain in place with the +addition of the following. ### Custody slashing -To avoid custody slashings, the attester must never sign any shard transition for which the custody bit is one. The custody bit is computed using the custody secret: +To avoid custody slashings, the attester must never sign any shard transition +for which the custody bit is one. The custody bit is computed using the custody +secret: ```python def get_custody_secret(state: BeaconState, @@ -75,5 +105,7 @@ def get_custody_secret(state: BeaconState, return bls.Sign(privkey, signing_root) ``` -Note that the valid custody secret is always the one for the **attestation target epoch**, not to be confused with the epoch in which the shard block was generated. -While they are the same most of the time, getting this wrong at custody epoch boundaries would result in a custody slashing. +Note that the valid custody secret is always the one for the **attestation +target epoch**, not to be confused with the epoch in which the shard block was +generated. While they are the same most of the time, getting this wrong at +custody epoch boundaries would result in a custody slashing. diff --git a/specs/_deprecated/das/das-core.md b/specs/_deprecated/das/das-core.md index fa0778bebe..8aec3b8dc0 100644 --- a/specs/_deprecated/das/das-core.md +++ b/specs/_deprecated/das/das-core.md @@ -91,8 +91,9 @@ def das_fft_extension(data: Sequence[Point]) -> Sequence[Point]: ### Data recovery -See [Reed-Solomon erasure code recovery in `n*log^2(n)` time with FFTs](https://ethresear.ch/t/reed-solomon-erasure-code-recovery-in-n-log-2-n-time-with-ffts/3039) for theory. -Implementations: +See +[Reed-Solomon erasure code recovery in `n*log^2(n)` time with FFTs](https://ethresear.ch/t/reed-solomon-erasure-code-recovery-in-n-log-2-n-time-with-ffts/3039) +for theory. Implementations: - [Original Python](https://github.com/ethereum/research/blob/master/mimc_stark/recovery.py) - [New optimized approach in python](https://github.com/ethereum/research/tree/master/polynomial_reconstruction) diff --git a/specs/_deprecated/das/fork-choice.md b/specs/_deprecated/das/fork-choice.md index b091d85e74..8cbcfa3746 100644 --- a/specs/_deprecated/das/fork-choice.md +++ b/specs/_deprecated/das/fork-choice.md @@ -11,9 +11,13 @@ ## Introduction -This document is the beacon chain fork choice spec for Data Availability Sampling. The only change that we add from phase 0 is that we add a concept of "data dependencies"; -a block is only eligible for consideration in the fork choice after a data availability test has been successfully completed for all dependencies. -The "root" of a shard block for data dependency purposes is considered to be a `DataCommitment` object, which is a pair of a Kate commitment and a length. +This document is the beacon chain fork choice spec for Data Availability +Sampling. The only change that we add from phase 0 is that we add a concept of +"data dependencies"; a block is only eligible for consideration in the fork +choice after a data availability test has been successfully completed for all +dependencies. The "root" of a shard block for data dependency purposes is +considered to be a `DataCommitment` object, which is a pair of a Kate commitment +and a length. ## Dependency calculation diff --git a/specs/_deprecated/das/p2p-interface.md b/specs/_deprecated/das/p2p-interface.md index 3af2220439..4a8bdb9a05 100644 --- a/specs/_deprecated/das/p2p-interface.md +++ b/specs/_deprecated/das/p2p-interface.md @@ -25,8 +25,9 @@ ## Introduction -For an introduction about DAS itself, see [the DAS participation spec](sampling.md#data-availability-sampling). -This is not a pre-requisite for the network layer, but will give you valuable context. +For an introduction about DAS itself, see +[the DAS participation spec](sampling.md#data-availability-sampling). This is +not a pre-requisite for the network layer, but will give you valuable context. For sampling, all nodes need to query for `k` random samples each slot. @@ -34,161 +35,209 @@ For sampling, all nodes need to query for `k` random samples each slot. This is a lot of work, and ideally happens at a low latency. -To achieve quick querying, the query model is changed to *push* the samples to listeners instead, using GossipSub. -The listeners then randomly rotate their subscriptions to keep queries unpredictable. -Except for a small subset of subscriptions, which will function as a backbone to keep topics more stable and allow for efficient peer discovery. +To achieve quick querying, the query model is changed to *push* the samples to +listeners instead, using GossipSub. The listeners then randomly rotate their +subscriptions to keep queries unpredictable. Except for a small subset of +subscriptions, which will function as a backbone to keep topics more stable and +allow for efficient peer discovery. -Publishing can utilize the fan-out functionality in GossipSub, and is easier to split between nodes: -nodes on the horizontal networks can help by producing the same samples and fan-out publishing to their own peers. +Publishing can utilize the fan-out functionality in GossipSub, and is easier to +split between nodes: nodes on the horizontal networks can help by producing the +same samples and fan-out publishing to their own peers. -This push model also helps to obfuscate the original source of a message: -the listeners do not have to make individual queries to some identified source. +This push model also helps to obfuscate the original source of a message: the +listeners do not have to make individual queries to some identified source. -The push model does not aim to serve "historical" queries (anything older than the most recent). -Historical queries are still required for the unhappy case, where messages are not pushed quick enough, -and missing samples are not reconstructed by other nodes on the horizontal subnet quick enough. +The push model does not aim to serve "historical" queries (anything older than +the most recent). Historical queries are still required for the unhappy case, +where messages are not pushed quick enough, and missing samples are not +reconstructed by other nodes on the horizontal subnet quick enough. -The main challenge in supporting historical queries is to target the right nodes, -without concentrating too many requests on a single node, or breaking the network/consensus identity separation. +The main challenge in supporting historical queries is to target the right +nodes, without concentrating too many requests on a single node, or breaking the +network/consensus identity separation. ## DAS Subnets On a high level, the push-model roles are divided into: -- Sources: create blobs of shard block data, and transformed into many tiny samples. +- Sources: create blobs of shard block data, and transformed into many tiny + samples. - Sinks: continuously look for samples At full operation, the network has one proposer, per shard, per slot. In the push-model, there are: -- *Vertical subnets*: Sinks can subscribe to indices of samples: there is a sample to subnet mapping. -- *Horizontal subnets*: Sources need to distribute samples to all vertical networks: they participate in a fan-out layer. +- *Vertical subnets*: Sinks can subscribe to indices of samples: there is a + sample to subnet mapping. +- *Horizontal subnets*: Sources need to distribute samples to all vertical + networks: they participate in a fan-out layer. ### Horizontal subnets -The shift of the distribution responsibility to a proposer can only be achieved with amplification: -a regular proposer cannot reach every vertical subnet. +The shift of the distribution responsibility to a proposer can only be achieved +with amplification: a regular proposer cannot reach every vertical subnet. #### Publishing -To publish their work, proposers propagate the shard block as a whole on a shard-block subnet. +To publish their work, proposers propagate the shard block as a whole on a +shard-block subnet. -The proposer can fan-out their work more aggressively, by using the fan-out functionality of GossipSub: -it may publish to all its peers on the subnet, instead of just those in its mesh. +The proposer can fan-out their work more aggressively, by using the fan-out +functionality of GossipSub: it may publish to all its peers on the subnet, +instead of just those in its mesh. #### Horizontal propagation -Peers on the horizontal subnet are expected to at least perform regular propagation of shard blocks, like participation in any other topic. +Peers on the horizontal subnet are expected to at least perform regular +propagation of shard blocks, like participation in any other topic. -*Although this may be sufficient for testnets, expect parameter changes in the spec here.* +*Although this may be sufficient for testnets, expect parameter changes in the +spec here.* #### Horizontal to vertical -Nodes on this same subnet can replicate the sampling efficiently (including a proof for each sample), -and distribute it to any vertical networks that are available to them. +Nodes on this same subnet can replicate the sampling efficiently (including a +proof for each sample), and distribute it to any vertical networks that are +available to them. -Since the messages are content-addressed (instead of origin-stamped), -multiple publishers of the same samples on a vertical subnet do not hurt performance, -but actually improve it by shortcutting regular propagation on the vertical subnet, and thus lowering the latency to a sample. +Since the messages are content-addressed (instead of origin-stamped), multiple +publishers of the same samples on a vertical subnet do not hurt performance, but +actually improve it by shortcutting regular propagation on the vertical subnet, +and thus lowering the latency to a sample. ### Vertical subnets -Vertical subnets propagate the samples to every peer that is interested. -These interests are randomly sampled and rotate quickly: although not perfect, +Vertical subnets propagate the samples to every peer that is interested. These +interests are randomly sampled and rotate quickly: although not perfect, sufficient to avoid any significant amount of nodes from being 100% predictable. -As soon as a sample is missing after the expected propagation time window, -nodes can divert to the pull-model, or ultimately flag it as unavailable data. +As soon as a sample is missing after the expected propagation time window, nodes +can divert to the pull-model, or ultimately flag it as unavailable data. -Note that the vertical subnets are shared between the different shards, -and a simple hash function `(shard, slot, sample_index) -> subnet_index` defines which samples go where. -This is to evenly distribute samples to subnets, even when one shard has more activity than the other. +Note that the vertical subnets are shared between the different shards, and a +simple hash function `(shard, slot, sample_index) -> subnet_index` defines which +samples go where. This is to evenly distribute samples to subnets, even when one +shard has more activity than the other. TODO: define `(shard, slot, sample_index) -> subnet_index` hash function. #### Slow rotation: Backbone -To allow for subscriptions to rotate quickly and randomly, a backbone is formed to help onboard peers into other topics. +To allow for subscriptions to rotate quickly and randomly, a backbone is formed +to help onboard peers into other topics. This backbone is based on a pure function of the *node* identity and time: -- Nodes can be found *without additional discovery overhead*: - peers on a vertical topic can be found by searching the local peerstore for identities that hash to the desired topic(s), - assuming the peerstore already has a large enough variety of peers. -- Nodes can be held accountable for contributing to the backbone: - peers that participate in DAS but are not active on the appropriate backbone topics can be scored down. - *Note*: This is experimental, DAS should be light enough for all participants to run, but scoring needs to undergo testing. +- Nodes can be found *without additional discovery overhead*: peers on a + vertical topic can be found by searching the local peerstore for identities + that hash to the desired topic(s), assuming the peerstore already has a large + enough variety of peers. +- Nodes can be held accountable for contributing to the backbone: peers that + participate in DAS but are not active on the appropriate backbone topics can + be scored down. *Note*: This is experimental, DAS should be light enough for + all participants to run, but scoring needs to undergo testing. -A node should anticipate backbone topics to subscribe to based their own identity. -These subscriptions rotate slowly, and with different offsets per node identity to avoid sudden network-wide rotations. +A node should anticipate backbone topics to subscribe to based their own +identity. These subscriptions rotate slowly, and with different offsets per node +identity to avoid sudden network-wide rotations. ```python # TODO hash function: (node, time)->subnets ``` -Backbone subscription work is outlined in the [DAS participation spec](sampling.md#slow-rotation-backbone) +Backbone subscription work is outlined in the +[DAS participation spec](sampling.md#slow-rotation-backbone) #### Quick Rotation: Sampling -A node MUST maintain `k` random subscriptions to topics, and rotate these according to the [DAS participation spec](sampling.md#quick-rotation-sampling). -If the node does not already have connected peers on the topic it needs to sample, it can search its peerstore and, if necessary, in the DHT for peers in the topic backbone. +A node MUST maintain `k` random subscriptions to topics, and rotate these +according to the [DAS participation spec](sampling.md#quick-rotation-sampling). +If the node does not already have connected peers on the topic it needs to +sample, it can search its peerstore and, if necessary, in the DHT for peers in +the topic backbone. ## DAS in the Gossip domain: Push ### Topics and messages -Following the same scheme as the [Phase0 gossip topics](../../phase0/p2p-interface.md#topics-and-messages), names and payload types are: +Following the same scheme as the +[Phase0 gossip topics](../../phase0/p2p-interface.md#topics-and-messages), names +and payload types are: | Name | Message Type | | --------------------------- | ------------ | | `das_sample_{subnet_index}` | `DASSample` | -Also see the [Sharding general networking spec](../sharding/p2p-interface.md) for important topics such as that of the shard-blobs and shard-headers. +Also see the [Sharding general networking spec](../sharding/p2p-interface.md) +for important topics such as that of the shard-blobs and shard-headers. #### Horizontal subnets: `shard_blob_{shard}` -Extending the regular `shard_blob_{shard}` as [defined in the Sharding networking specification](../sharding/p2p-interface.md#shard-blobs-shard_blob_shard) +Extending the regular `shard_blob_{shard}` as +[defined in the Sharding networking specification](../sharding/p2p-interface.md#shard-blobs-shard_blob_shard) -If participating in DAS, upon receiving a `signed_blob` for the first time with a `slot` not older than `MAX_RESAMPLE_TIME`, -a subscriber of a `shard_blob_{shard}` SHOULD reconstruct the samples and publish them to vertical subnets. -Take `blob = signed_blob.blob`: +If participating in DAS, upon receiving a `signed_blob` for the first time with +a `slot` not older than `MAX_RESAMPLE_TIME`, a subscriber of a +`shard_blob_{shard}` SHOULD reconstruct the samples and publish them to vertical +subnets. Take `blob = signed_blob.blob`: 1. Extend the data: `extended_data = extend_data(blob.data)` -2. Create samples with proofs: `samples = sample_data(blob.slot, blob.shard, extended_data)` -3. Fanout-publish the samples to the vertical subnets of its peers (not all vertical subnets may be reached). +2. Create samples with proofs: + `samples = sample_data(blob.slot, blob.shard, extended_data)` +3. Fanout-publish the samples to the vertical subnets of its peers (not all + vertical subnets may be reached). -The [DAS participation spec](sampling.md#horizontal-subnets) outlines when and where to participate in DAS on horizontal subnets. +The [DAS participation spec](sampling.md#horizontal-subnets) outlines when and +where to participate in DAS on horizontal subnets. #### Vertical subnets: `das_sample_{subnet_index}` -Shard blob samples can be verified with just a 48 byte KZG proof (commitment quotient polynomial), -against the commitment to blob polynomial, specific to that `(shard, slot)` key. - -The following validations MUST pass before forwarding the `sample` on the vertical subnet. - -- _[IGNORE]_ The commitment for the (`sample.shard`, `sample.slot`, `sample.index`) tuple must be known. - If not known, the client MAY queue the sample if it passes formatting conditions. -- _[REJECT]_ `sample.shard`, `sample.slot` and `sample.index` are hashed into a `sbunet_index` (TODO: define hash) which MUST match the topic `{subnet_index}` parameter. -- _[REJECT]_ `sample.shard` must be within valid range: `0 <= sample.shard < get_active_shard_count(state, compute_epoch_at_slot(sample.slot))`. -- _[REJECT]_ `sample.index` must be within valid range: `0 <= sample.index < sample_count`, where: +Shard blob samples can be verified with just a 48 byte KZG proof (commitment +quotient polynomial), against the commitment to blob polynomial, specific to +that `(shard, slot)` key. + +The following validations MUST pass before forwarding the `sample` on the +vertical subnet. + +- _[IGNORE]_ The commitment for the (`sample.shard`, `sample.slot`, + `sample.index`) tuple must be known. If not known, the client MAY queue the + sample if it passes formatting conditions. +- _[REJECT]_ `sample.shard`, `sample.slot` and `sample.index` are hashed into a + `sbunet_index` (TODO: define hash) which MUST match the topic `{subnet_index}` + parameter. +- _[REJECT]_ `sample.shard` must be within valid range: + `0 <= sample.shard < get_active_shard_count(state, compute_epoch_at_slot(sample.slot))`. +- _[REJECT]_ `sample.index` must be within valid range: + `0 <= sample.index < sample_count`, where: - `sample_count = (points_count + POINTS_PER_SAMPLE - 1) // POINTS_PER_SAMPLE` - - `points_count` is the length as claimed along with the commitment, which must be smaller than `MAX_SAMPLES_PER_BLOCK`. -- _[IGNORE]_ The `sample` is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- - i.e. validate that `sample.slot <= current_slot`. A client MAY queue future samples for processing at the appropriate slot if it passed formatting conditions. -- _[IGNORE]_ This is the first received sample with the (`sample.shard`, `sample.slot`, `sample.index`) key tuple. -- _[REJECT]_ As already limited by the SSZ list-limit, it is important the sample data is well-formatted and not too large. -- _[REJECT]_ The `sample.data` MUST NOT contain any point `p >= MODULUS`. Although it is a `uint256`, not the full 256 bit range is valid. -- _[REJECT]_ The `sample.proof` MUST be valid: `verify_sample(sample, sample_count, commitment)` - -Upon receiving a valid sample, it SHOULD be retained for a buffer period if the local node is part of the backbone that covers this sample. -This is to serve other peers that may have missed it. + - `points_count` is the length as claimed along with the commitment, which + must be smaller than `MAX_SAMPLES_PER_BLOCK`. +- _[IGNORE]_ The `sample` is not from a future slot (with a + `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. validate that + `sample.slot <= current_slot`. A client MAY queue future samples for + processing at the appropriate slot if it passed formatting conditions. +- _[IGNORE]_ This is the first received sample with the (`sample.shard`, + `sample.slot`, `sample.index`) key tuple. +- _[REJECT]_ As already limited by the SSZ list-limit, it is important the + sample data is well-formatted and not too large. +- _[REJECT]_ The `sample.data` MUST NOT contain any point `p >= MODULUS`. + Although it is a `uint256`, not the full 256 bit range is valid. +- _[REJECT]_ The `sample.proof` MUST be valid: + `verify_sample(sample, sample_count, commitment)` + +Upon receiving a valid sample, it SHOULD be retained for a buffer period if the +local node is part of the backbone that covers this sample. This is to serve +other peers that may have missed it. ## DAS in the Req-Resp domain: Pull -To pull samples from nodes, in case of network instability when samples are unavailable, a new query method is added to the Req-Resp domain. +To pull samples from nodes, in case of network instability when samples are +unavailable, a new query method is added to the Req-Resp domain. -This builds on top of the protocol identification and encoding spec which was introduced in [the Phase0 network spec](../../phase0/p2p-interface.md). +This builds on top of the protocol identification and encoding spec which was +introduced in [the Phase0 network spec](../../phase0/p2p-interface.md). Note that DAS networking uses a different protocol prefix: `/eth2/das/req` @@ -217,7 +266,11 @@ Response Content: When the sample is: - Available: respond with a `Success` result code, and the encoded sample. -- Expected to be available, but not: respond with a `ResourceUnavailable` result code. -- Not available, but never of interest to the node: respond with an `InvalidRequest` result code. - -When the node is part of the backbone and expected to have the sample, the validity of the quest MUST be recognized with `Success` or `ResourceUnavailable`. +- Expected to be available, but not: respond with a `ResourceUnavailable` result + code. +- Not available, but never of interest to the node: respond with an + `InvalidRequest` result code. + +When the node is part of the backbone and expected to have the sample, the +validity of the quest MUST be recognized with `Success` or +`ResourceUnavailable`. diff --git a/specs/_deprecated/das/sampling.md b/specs/_deprecated/das/sampling.md index b00d146d05..f7d6a47a97 100644 --- a/specs/_deprecated/das/sampling.md +++ b/specs/_deprecated/das/sampling.md @@ -41,38 +41,49 @@ TODO ### DAS during network instability -The GossipSub based retrieval of samples may not always work. -In such event, a node can move through below stages until it recovers data availability. +The GossipSub based retrieval of samples may not always work. In such event, a +node can move through below stages until it recovers data availability. #### Stage 0: Waiting on missing samples -Wait for the sample to re-broadcast. Someone may be slow with publishing, or someone else is able to do the work. +Wait for the sample to re-broadcast. Someone may be slow with publishing, or +someone else is able to do the work. Any node can do the following work to keep the network healthy: -- Common: Listen on a horizontal subnet, chunkify the block data in samples, and propagate the samples to vertical subnets. -- Extreme: Listen on enough vertical subnets, reconstruct the missing samples by recovery, and propagate the recovered samples. +- Common: Listen on a horizontal subnet, chunkify the block data in samples, and + propagate the samples to vertical subnets. +- Extreme: Listen on enough vertical subnets, reconstruct the missing samples by + recovery, and propagate the recovered samples. -This is not a requirement, but should improve the network stability with little resources, and without any central party. +This is not a requirement, but should improve the network stability with little +resources, and without any central party. #### Stage 1: Pulling missing samples from known peers -The more realistic option, to execute when a sample is missing, is to query any node that is known to hold it. -Since *consensus identity is disconnected from network identity*, there is no direct way to contact custody holders -without explicitly asking for the data. +The more realistic option, to execute when a sample is missing, is to query any +node that is known to hold it. Since *consensus identity is disconnected from +network identity*, there is no direct way to contact custody holders without +explicitly asking for the data. -However, *network identities* are still used to build a backbone for each vertical subnet. -These nodes should have received the samples, and can serve a buffer of them on demand. -Although serving these is not directly incentivised, it is little work: +However, *network identities* are still used to build a backbone for each +vertical subnet. These nodes should have received the samples, and can serve a +buffer of them on demand. Although serving these is not directly incentivised, +it is little work: -1. Buffer any message you see on the backbone vertical subnets, for a buffer of up to two weeks. -2. Serve the samples on request. An individual sample is just expected to be `~ 0.5 KB`, and does not require any pre-processing to serve. +1. Buffer any message you see on the backbone vertical subnets, for a buffer of + up to two weeks. +2. Serve the samples on request. An individual sample is just expected to be + `~ 0.5 KB`, and does not require any pre-processing to serve. -A validator SHOULD make a `DASQuery` request to random peers, until failing more than the configured failure-rate. +A validator SHOULD make a `DASQuery` request to random peers, until failing more +than the configured failure-rate. -TODO: detailed failure-mode spec. Stop after trying e.g. 3 peers for any sample in a configured time window (after the gossip period). +TODO: detailed failure-mode spec. Stop after trying e.g. 3 peers for any sample +in a configured time window (after the gossip period). #### Stage 2: Pulling missing data from validators with custody. -Pulling samples directly from nodes with validators that have a custody responsibility, -without revealing their identity to the network, is an open problem. +Pulling samples directly from nodes with validators that have a custody +responsibility, without revealing their identity to the network, is an open +problem. diff --git a/specs/_deprecated/sharding/beacon-chain.md b/specs/_deprecated/sharding/beacon-chain.md index 07df11fd98..b7304119a2 100644 --- a/specs/_deprecated/sharding/beacon-chain.md +++ b/specs/_deprecated/sharding/beacon-chain.md @@ -42,18 +42,23 @@ ## Introduction -This document describes the extensions made to the Phase 0 design of The Beacon Chain to support data sharding, -based on the ideas [here](https://notes.ethereum.org/@dankrad/new_sharding) and more broadly [here](https://arxiv.org/abs/1809.09044), -using KZG10 commitments to commit to data to remove any need for fraud proofs (and hence, safety-critical synchrony assumptions) in the design. +This document describes the extensions made to the Phase 0 design of The Beacon +Chain to support data sharding, based on the ideas +[here](https://notes.ethereum.org/@dankrad/new_sharding) and more broadly +[here](https://arxiv.org/abs/1809.09044), using KZG10 commitments to commit to +data to remove any need for fraud proofs (and hence, safety-critical synchrony +assumptions) in the design. ### Glossary - **Data**: A list of KZG points, to translate a byte string into -- **Blob**: Data with commitments and meta-data, like a flattened bundle of L2 transactions. +- **Blob**: Data with commitments and meta-data, like a flattened bundle of L2 + transactions. ## Constants -The following values are (non-configurable) constants used throughout the specification. +The following values are (non-configurable) constants used throughout the +specification. ### Misc @@ -79,7 +84,8 @@ The following values are (non-configurable) constants used throughout the specif ### Time parameters -With the introduction of builder blocks the number of slots per epoch is doubled (it counts beacon blocks and builder blocks). +With the introduction of builder blocks the number of slots per epoch is doubled +(it counts beacon blocks and builder blocks). | Name | Value | Unit | Duration | | ----------------- | --------------------- | :---: | :----------: | @@ -93,8 +99,9 @@ With the introduction of builder blocks the number of slots per epoch is doubled ## Configuration -*Note*: Some preset variables may become run-time configurable for testnets, but default to a preset while the spec is unstable. -E.g. `ACTIVE_SHARDS` and `SAMPLES_PER_BLOB`. +*Note*: Some preset variables may become run-time configurable for testnets, but +default to a preset while the spec is unstable. E.g. `ACTIVE_SHARDS` and +`SAMPLES_PER_BLOB`. ### Time parameters diff --git a/specs/_deprecated/sharding/p2p-interface.md b/specs/_deprecated/sharding/p2p-interface.md index 5999e0ecdf..1439818a04 100644 --- a/specs/_deprecated/sharding/p2p-interface.md +++ b/specs/_deprecated/sharding/p2p-interface.md @@ -18,8 +18,9 @@ ## Introduction -The specification of these changes continues in the same format as the network specifications of previous upgrades, and assumes them as pre-requisite. -The adjustments and additions for Shards are outlined in this document. +The specification of these changes continues in the same format as the network +specifications of previous upgrades, and assumes them as pre-requisite. The +adjustments and additions for Shards are outlined in this document. ## Constants @@ -34,7 +35,9 @@ The adjustments and additions for Shards are outlined in this document. ### Topics and messages -Following the same scheme as the [Phase0 gossip topics](../../phase0/p2p-interface.md#topics-and-messages), names and payload types are: +Following the same scheme as the +[Phase0 gossip topics](../../phase0/p2p-interface.md#topics-and-messages), names +and payload types are: | Name | Message Type | | -------------------------- | ------------------- | @@ -48,38 +51,52 @@ The [DAS network specification](../das/das-core.md) defines additional topics. ##### `builder_block_bid` -- _[IGNORE]_ The `bid` is published 1 slot early or later (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- - i.e. validate that `bid.slot <= current_slot + 1` - (a client MAY queue future samples for propagation at the appropriate slot). -- _[IGNORE]_ The `bid` is for the current or next block - i.e. validate that `bid.slot >= current_slot` -- _[IGNORE]_ The `bid` is the first `bid` valid bid for `bid.slot`, or the bid is at least 1% higher than the previous known `bid` -- _[REJECT]_ The validator defined by `bid.validator_index` exists and is slashable. -- _[REJECT]_ The bid signature, which is an Eth1 signature, needs to be valid and the address needs to contain enough Ether to cover the bid and the data gas base fee. +- _[IGNORE]_ The `bid` is published 1 slot early or later (with a + `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. validate that + `bid.slot <= current_slot + 1` (a client MAY queue future samples for + propagation at the appropriate slot). +- _[IGNORE]_ The `bid` is for the current or next block i.e. validate that + `bid.slot >= current_slot` +- _[IGNORE]_ The `bid` is the first `bid` valid bid for `bid.slot`, or the bid + is at least 1% higher than the previous known `bid` +- _[REJECT]_ The validator defined by `bid.validator_index` exists and is + slashable. +- _[REJECT]_ The bid signature, which is an Eth1 signature, needs to be valid + and the address needs to contain enough Ether to cover the bid and the data + gas base fee. #### Shard sample subnets -Shard sample (row/column) subnets are used by builders to make their samples available as part of their intermediate block release after selection by beacon block proposers. +Shard sample (row/column) subnets are used by builders to make their samples +available as part of their intermediate block release after selection by beacon +block proposers. ##### `shard_row_{subnet_id}` -Shard sample data, in the form of a `SignedShardSample` is published to the `shard_row_{subnet_id}` and `shard_column_{subnet_id}` subnets. +Shard sample data, in the form of a `SignedShardSample` is published to the +`shard_row_{subnet_id}` and `shard_column_{subnet_id}` subnets. The following validations MUST pass before forwarding the `sample`. -- _[IGNORE]_ The `sample` is published 1 slot early or later (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- - i.e. validate that `sample.slot <= current_slot + 1` - (a client MAY queue future samples for propagation at the appropriate slot). -- _[IGNORE]_ The `sample` is new enough to still be processed -- - i.e. validate that `compute_epoch_at_slot(sample.slot) >= get_previous_epoch(state)` -- _[REJECT]_ The shard sample is for the correct subnet -- - i.e. `sample.row == subnet_id` for `shard_row_{subnet_id}` and `sample.column == subnet_id` for `shard_column_{subnet_id}` -- _[IGNORE]_ The sample is the first sample with valid signature received for the `(sample.builder, sample.slot, sample.row, sample.column)` combination. -- _[REJECT]_ The `sample.data` MUST NOT contain any point `x >= BLS_MODULUS`. Although it is a `uint256`, not the full 256 bit range is valid. +- _[IGNORE]_ The `sample` is published 1 slot early or later (with a + `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. validate that + `sample.slot <= current_slot + 1` (a client MAY queue future samples for + propagation at the appropriate slot). +- _[IGNORE]_ The `sample` is new enough to still be processed -- i.e. validate + that `compute_epoch_at_slot(sample.slot) >= get_previous_epoch(state)` +- _[REJECT]_ The shard sample is for the correct subnet -- i.e. + `sample.row == subnet_id` for `shard_row_{subnet_id}` and + `sample.column == subnet_id` for `shard_column_{subnet_id}` +- _[IGNORE]_ The sample is the first sample with valid signature received for + the `(sample.builder, sample.slot, sample.row, sample.column)` combination. +- _[REJECT]_ The `sample.data` MUST NOT contain any point `x >= BLS_MODULUS`. + Although it is a `uint256`, not the full 256 bit range is valid. - _[REJECT]_ The validator defined by `sample.builder` exists and is slashable. -- _[REJECT]_ The sample is proposed by the expected `builder` for the sample's `slot`. - i.e., the beacon block at `sample.slot - 1` according to the node's fork choice contains an `IntermediateBlockBid` - with `intermediate_block_bid.validator_index == sample.builder` -- _[REJECT]_ The sample signature, `sample.signature`, is valid for the builder -- - i.e. `bls.Verify(builder_pubkey, sample_signing_root, sample.signature)` OR `sample.signature == Bytes96(b"\0" * 96)` AND - the sample verification `verify_sample` passes +- _[REJECT]_ The sample is proposed by the expected `builder` for the sample's + `slot`. i.e., the beacon block at `sample.slot - 1` according to the node's + fork choice contains an `IntermediateBlockBid` with + `intermediate_block_bid.validator_index == sample.builder` +- _[REJECT]_ The sample signature, `sample.signature`, is valid for the builder + -- i.e. `bls.Verify(builder_pubkey, sample_signing_root, sample.signature)` OR + `sample.signature == Bytes96(b"\0" * 96)` AND the sample verification + `verify_sample` passes diff --git a/specs/_deprecated/sharding/polynomial-commitments.md b/specs/_deprecated/sharding/polynomial-commitments.md index b62189a569..d4e2d5b385 100644 --- a/specs/_deprecated/sharding/polynomial-commitments.md +++ b/specs/_deprecated/sharding/polynomial-commitments.md @@ -41,7 +41,11 @@ ## Introduction -This document specifies basic polynomial operations and KZG polynomial commitment operations as they are needed for the sharding specification. The implementations are not optimized for performance, but readability. All practical implementations should optimize the polynomial operations, and hints what the best known algorithms for these implementations are included below. +This document specifies basic polynomial operations and KZG polynomial +commitment operations as they are needed for the sharding specification. The +implementations are not optimized for performance, but readability. All +practical implementations should optimize the polynomial operations, and hints +what the best known algorithms for these implementations are included below. ## Constants @@ -304,7 +308,8 @@ def evaluate_polynomial_in_evaluation_form(poly: BLSPolynomialByEvaluations, x: ## KZG Operations -We are using the KZG10 polynomial commitment scheme (Kate, Zaverucha and Goldberg, 2010: https://www.iacr.org/archive/asiacrypt2010/6477178/6477178.pdf). +We are using the KZG10 polynomial commitment scheme (Kate, Zaverucha and +Goldberg, 2010: https://www.iacr.org/archive/asiacrypt2010/6477178/6477178.pdf). ### Elliptic curve helper functions diff --git a/specs/_deprecated/sharding/validator.md b/specs/_deprecated/sharding/validator.md index 2becda8928..3f0dd0b99b 100644 --- a/specs/_deprecated/sharding/validator.md +++ b/specs/_deprecated/sharding/validator.md @@ -22,15 +22,20 @@ ## Introduction -This document represents the changes to be made in the code of an "honest validator" to implement executable beacon chain proposal. +This document represents the changes to be made in the code of an "honest +validator" to implement executable beacon chain proposal. ## Prerequisites -This document is an extension of the [Bellatrix -- Honest Validator](../../bellatrix/validator.md) guide. -All behaviors and definitions defined in this document, and documents it extends, carry over unless explicitly noted or overridden. +This document is an extension of the +[Bellatrix -- Honest Validator](../../bellatrix/validator.md) guide. All +behaviors and definitions defined in this document, and documents it extends, +carry over unless explicitly noted or overridden. -All terminology, constants, functions, and protocol mechanics defined in the updated Beacon Chain doc of [Sharding](./beacon-chain.md) are requisite for this document and used throughout. -Please see related Beacon Chain doc before continuing and use them as a reference throughout. +All terminology, constants, functions, and protocol mechanics defined in the +updated Beacon Chain doc of [Sharding](./beacon-chain.md) are requisite for this +document and used throughout. Please see related Beacon Chain doc before +continuing and use them as a reference throughout. ## Constants @@ -45,7 +50,9 @@ Please see related Beacon Chain doc before continuing and use them as a referenc ### `get_validator_row_subnets` -TODO: Currently the subnets are public (i.e. anyone can derive them.) This is good for a proof of custody with public verifiability, but bad for validator privacy. +TODO: Currently the subnets are public (i.e. anyone can derive them.) This is +good for a proof of custody with public verifiability, but bad for validator +privacy. ```python def get_validator_row_subnets(validator: Validator, epoch: Epoch) -> List[uint64]: @@ -100,35 +107,71 @@ def verify_sample(state: BeaconState, block: BeaconBlock, sample: SignedShardSam ### Attesting -Every attester is assigned `VALIDATOR_SAMPLE_ROW_COUNT` rows and `VALIDATOR_SAMPLE_COLUMN_COUNT` columns of shard samples. As part of their validator duties, they should subscribe to the subnets given by `get_validator_row_subnets` and `get_validator_column_subnets`, for the whole epoch. +Every attester is assigned `VALIDATOR_SAMPLE_ROW_COUNT` rows and +`VALIDATOR_SAMPLE_COLUMN_COUNT` columns of shard samples. As part of their +validator duties, they should subscribe to the subnets given by +`get_validator_row_subnets` and `get_validator_column_subnets`, for the whole +epoch. -A row or column is *available* for a `slot` if at least half of the total number of samples were received on the subnet and passed `verify_sample`. Otherwise it is called unavailable. +A row or column is *available* for a `slot` if at least half of the total number +of samples were received on the subnet and passed `verify_sample`. Otherwise it +is called unavailable. -If a validator is assigned to an attestation at slot `attestation_slot` and had his previous attestation duty at `previous_attestation_slot`, then they should only attest under the following conditions: +If a validator is assigned to an attestation at slot `attestation_slot` and had +his previous attestation duty at `previous_attestation_slot`, then they should +only attest under the following conditions: -- For all intermediate blocks `block` with `previous_attestation_slot < block.slot <= attestation_slot`: All sample rows and columns assigned to the validator were available. +- For all intermediate blocks `block` with + `previous_attestation_slot < block.slot <= attestation_slot`: All sample rows + and columns assigned to the validator were available. -If this condition is not fulfilled, then the validator should instead attest to the last block for which the condition holds. +If this condition is not fulfilled, then the validator should instead attest to +the last block for which the condition holds. -This leads to the security property that a chain that is not fully available cannot have more than 1/16th of all validators voting for it. TODO: This claim is for an "infinite number" of validators. Compute the concrete security due to sampling bias. +This leads to the security property that a chain that is not fully available +cannot have more than 1/16th of all validators voting for it. TODO: This claim +is for an "infinite number" of validators. Compute the concrete security due to +sampling bias. # Sample reconstruction -A validator that has received enough samples of a row or column to mark it as available, should reconstruct all samples in that row/column (if they aren't all available already.) The function `reconstruct_polynomial` gives an example implementation for this. +A validator that has received enough samples of a row or column to mark it as +available, should reconstruct all samples in that row/column (if they aren't all +available already.) The function `reconstruct_polynomial` gives an example +implementation for this. -Once they have run the reconstruction function, they should distribute the samples that they reconstructed on all pubsub that -the local node is subscribed to, if they have not already received that sample on that pubsub. As an example: +Once they have run the reconstruction function, they should distribute the +samples that they reconstructed on all pubsub that the local node is subscribed +to, if they have not already received that sample on that pubsub. As an example: - The validator is subscribed to row `2` and column `5` - The sample `(row, column) = (2, 5)` is missing in the column `5` pubsub -- After they have reconstruction of row `2`, the validator should send the sample `(2, 5)` on to the row `2` pubsub (if it was missing) as well as the column `5` pubsub. +- After they have reconstruction of row `2`, the validator should send the + sample `(2, 5)` on to the row `2` pubsub (if it was missing) as well as the + column `5` pubsub. -TODO: We need to verify the total complexity of doing this and make sure this does not cause too much load on a validator +TODO: We need to verify the total complexity of doing this and make sure this +does not cause too much load on a validator ## Minimum online validator requirement -The data availability construction guarantees that reconstruction is possible if 75% of all samples are available. In this case, at least 50% of all rows and 50% of all columns are independently available. In practice, it is likely that some supernodes will centrally collect all samples and fill in any gaps. However, we want to build a system that reliably reconstructs even absent all supernodes. Any row or column with 50% of samples will easily be reconstructed even with only 100s of validators online; so the only question is how we get to 50% of samples for all rows and columns, when some of them might be completely unseeded. - -Each validator will transfer 4 samples between rows and columns where there is overlap. Without loss of generality, look at row 0. Each validator has 1/128 chance of having a sample in this row, and we need 256 samples to reconstruct it. So we expect that we need ~256 * 128 = 32,768 validators to have a fair chance of reconstructing it if it was completely unseeded. - -A more elaborate estimate [here](https://notes.ethereum.org/@dankrad/minimum-reconstruction-validators) needs about 55,000 validators to be online for high safety that each row and column will be reconstructed. +The data availability construction guarantees that reconstruction is possible if +75% of all samples are available. In this case, at least 50% of all rows and 50% +of all columns are independently available. In practice, it is likely that some +supernodes will centrally collect all samples and fill in any gaps. However, we +want to build a system that reliably reconstructs even absent all supernodes. +Any row or column with 50% of samples will easily be reconstructed even with +only 100s of validators online; so the only question is how we get to 50% of +samples for all rows and columns, when some of them might be completely +unseeded. + +Each validator will transfer 4 samples between rows and columns where there is +overlap. Without loss of generality, look at row 0. Each validator has 1/128 +chance of having a sample in this row, and we need 256 samples to reconstruct +it. So we expect that we need ~256 * 128 = 32,768 validators to have a fair +chance of reconstructing it if it was completely unseeded. + +A more elaborate estimate +[here](https://notes.ethereum.org/@dankrad/minimum-reconstruction-validators) +needs about 55,000 validators to be online for high safety that each row and +column will be reconstructed. diff --git a/specs/_features/eip6800/beacon-chain.md b/specs/_features/eip6800/beacon-chain.md index b08f198a09..f4888d99c6 100644 --- a/specs/_features/eip6800/beacon-chain.md +++ b/specs/_features/eip6800/beacon-chain.md @@ -28,7 +28,8 @@ ## Introduction -This upgrade adds transaction execution to the beacon chain as part of the eip6800 upgrade. +This upgrade adds transaction execution to the beacon chain as part of the +eip6800 upgrade. ## Custom types diff --git a/specs/_features/eip6800/fork.md b/specs/_features/eip6800/fork.md index 28e4772a09..390ebe34e2 100644 --- a/specs/_features/eip6800/fork.md +++ b/specs/_features/eip6800/fork.md @@ -58,16 +58,24 @@ def compute_fork_version(epoch: Epoch) -> Version: The fork is triggered at epoch `EIP6800_FORK_EPOCH`. -Note that for the pure eip6800 networks, we don't apply `upgrade_to_eip6800` since it starts with the eip6800 version logic. +Note that for the pure eip6800 networks, we don't apply `upgrade_to_eip6800` +since it starts with the eip6800 version logic. ### Upgrading the state -If `state.slot % SLOTS_PER_EPOCH == 0` and `compute_epoch_at_slot(state.slot) == EIP6800_FORK_EPOCH`, -an irregular state change is made to upgrade to eip6800. - -The upgrade occurs after the completion of the inner loop of `process_slots` that sets `state.slot` equal to `EIP6800_FORK_EPOCH * SLOTS_PER_EPOCH`. -Care must be taken when transitioning through the fork boundary as implementations will need a modified [state transition function](../../phase0/beacon-chain.md#beacon-chain-state-transition-function) that deviates from the Phase 0 document. -In particular, the outer `state_transition` function defined in the Phase 0 document will not expose the precise fork slot to execute the upgrade in the presence of skipped slots at the fork boundary. Instead, the logic must be within `process_slots`. +If `state.slot % SLOTS_PER_EPOCH == 0` and +`compute_epoch_at_slot(state.slot) == EIP6800_FORK_EPOCH`, an irregular state +change is made to upgrade to eip6800. + +The upgrade occurs after the completion of the inner loop of `process_slots` +that sets `state.slot` equal to `EIP6800_FORK_EPOCH * SLOTS_PER_EPOCH`. Care +must be taken when transitioning through the fork boundary as implementations +will need a modified +[state transition function](../../phase0/beacon-chain.md#beacon-chain-state-transition-function) +that deviates from the Phase 0 document. In particular, the outer +`state_transition` function defined in the Phase 0 document will not expose the +precise fork slot to execute the upgrade in the presence of skipped slots at the +fork boundary. Instead, the logic must be within `process_slots`. ```python def upgrade_to_eip6800(pre: deneb.BeaconState) -> BeaconState: diff --git a/specs/_features/eip6914/beacon-chain.md b/specs/_features/eip6914/beacon-chain.md index b4f9376bc3..8738855fc7 100644 --- a/specs/_features/eip6914/beacon-chain.md +++ b/specs/_features/eip6914/beacon-chain.md @@ -18,9 +18,12 @@ ## Introduction -This is the beacon chain specification to assign new deposits to existing validator records. Refers to [EIP-6914](https://github.com/ethereum/EIPs/pull/6914). +This is the beacon chain specification to assign new deposits to existing +validator records. Refers to +[EIP-6914](https://github.com/ethereum/EIPs/pull/6914). -*Note*: This specification is built upon [Capella](../../capella/beacon-chain.md) and is under active development. +*Note*: This specification is built upon +[Capella](../../capella/beacon-chain.md) and is under active development. ## Preset diff --git a/specs/_features/eip6914/fork-choice.md b/specs/_features/eip6914/fork-choice.md index 157c97f998..ebe9fa6819 100644 --- a/specs/_features/eip6914/fork-choice.md +++ b/specs/_features/eip6914/fork-choice.md @@ -19,9 +19,14 @@ This is the modification of the fork choice according to EIP-6914. A new handler is added with this upgrade: -- `on_reused_index(store, index)` whenever a validator index `index: ValidatorIndex` is reused. That is, [`get_index_for_new_validator()`](./beacon-chain.md#get_index_for_new_validator) provides an index due to a return value of `True` from [`is_reusable_validator()`](./beacon-chain.md#is_reusable_validator). - -This new handler is used to update the list of equivocating indices to be synchronized with the canonical chain. +- `on_reused_index(store, index)` whenever a validator index + `index: ValidatorIndex` is reused. That is, + [`get_index_for_new_validator()`](./beacon-chain.md#get_index_for_new_validator) + provides an index due to a return value of `True` from + [`is_reusable_validator()`](./beacon-chain.md#is_reusable_validator). + +This new handler is used to update the list of equivocating indices to be +synchronized with the canonical chain. ### Handlers diff --git a/specs/_features/eip7441/beacon-chain.md b/specs/_features/eip7441/beacon-chain.md index 3cf7412ba5..cd2eb4e674 100644 --- a/specs/_features/eip7441/beacon-chain.md +++ b/specs/_features/eip7441/beacon-chain.md @@ -26,9 +26,11 @@ ## Introduction -This document details the beacon chain additions and changes of to support the EIP-7441 (Whisk SSLE). +This document details the beacon chain additions and changes of to support the +EIP-7441 (Whisk SSLE). -*Note*: This specification is built upon [capella](../../capella/beacon-chain.md) and is under active development. +*Note*: This specification is built upon +[capella](../../capella/beacon-chain.md) and is under active development. ## Constants @@ -69,7 +71,8 @@ This document details the beacon chain additions and changes of to support the E | `WhiskShuffleProof` | `ByteList[MAX_SHUFFLE_PROOF_SIZE]` | Serialized shuffle proof | | `WhiskTrackerProof` | `ByteList[MAX_OPENING_PROOF_SIZE]` | Serialized tracker proof | -*Note*: A subgroup check MUST be performed when deserializing a `BLSG1Point` for use in any of the functions below. +*Note*: A subgroup check MUST be performed when deserializing a `BLSG1Point` for +use in any of the functions below. ```python def BLSG1ScalarMultiply(scalar: BLSFieldElement, point: BLSG1Point) -> BLSG1Point: @@ -94,7 +97,10 @@ def bytes_to_bls_field(b: Bytes32) -> BLSFieldElement: ### Curdleproofs and opening proofs -Note that Curdleproofs (Whisk Shuffle Proofs), the tracker opening proofs and all related data structures and verifier code (along with tests) is specified in [curdleproofs.pie](https://github.com/nalinbhardwaj/curdleproofs.pie/tree/dev) repository. +Note that Curdleproofs (Whisk Shuffle Proofs), the tracker opening proofs and +all related data structures and verifier code (along with tests) is specified in +[curdleproofs.pie](https://github.com/nalinbhardwaj/curdleproofs.pie/tree/dev) +repository. ```python def IsValidWhiskShuffleProof(pre_shuffle_trackers: Sequence[WhiskTracker], @@ -244,7 +250,8 @@ def process_whisk_opening_proof(state: BeaconState, block: BeaconBlock) -> None: assert IsValidWhiskOpeningProof(tracker, k_commitment, block.body.whisk_opening_proof) ``` -Removed `assert block.proposer_index == get_beacon_proposer_index(state)` check in Whisk. +Removed `assert block.proposer_index == get_beacon_proposer_index(state)` check +in Whisk. ```python def process_block_header(state: BeaconState, block: BeaconBlock) -> None: diff --git a/specs/_features/eip7441/fork.md b/specs/_features/eip7441/fork.md index e951c8cf82..7905061a36 100644 --- a/specs/_features/eip7441/fork.md +++ b/specs/_features/eip7441/fork.md @@ -40,11 +40,17 @@ Warning: this configuration is not definitive. ## Fork to EIP-7441 -If `state.slot % SLOTS_PER_EPOCH == 0` and `compute_epoch_at_slot(state.slot) == EIP7441_FORK_EPOCH`, an irregular state change is made to upgrade to Whisk. `EIP7441_FORK_EPOCH` must be a multiple of `RUN_DURATION_IN_EPOCHS`. +If `state.slot % SLOTS_PER_EPOCH == 0` and +`compute_epoch_at_slot(state.slot) == EIP7441_FORK_EPOCH`, an irregular state +change is made to upgrade to Whisk. `EIP7441_FORK_EPOCH` must be a multiple of +`RUN_DURATION_IN_EPOCHS`. -The upgrade occurs after the completion of the inner loop of `process_slots` that sets `state.slot` equal to `EIP7441_FORK_EPOCH * SLOTS_PER_EPOCH`. +The upgrade occurs after the completion of the inner loop of `process_slots` +that sets `state.slot` equal to `EIP7441_FORK_EPOCH * SLOTS_PER_EPOCH`. -This ensures that we drop right into the beginning of the shuffling phase but without `process_whisk_epoch()` triggering for this Whisk run. Hence we handle all the setup ourselves in `upgrade_to_whisk()` below. +This ensures that we drop right into the beginning of the shuffling phase but +without `process_whisk_epoch()` triggering for this Whisk run. Hence we handle +all the setup ourselves in `upgrade_to_whisk()` below. ```python def upgrade_to_eip7441(pre: capella.BeaconState) -> BeaconState: diff --git a/specs/_features/eip7732/beacon-chain.md b/specs/_features/eip7732/beacon-chain.md index e444b23696..cb0926bc4d 100644 --- a/specs/_features/eip7732/beacon-chain.md +++ b/specs/_features/eip7732/beacon-chain.md @@ -60,17 +60,33 @@ ## Introduction -This is the beacon chain specification of the enshrined proposer builder separation feature. - -*Note*: This specification is built upon [Electra](../../electra/beacon-chain.md) and is under active development. - -This feature adds new staked consensus participants called *Builders* and new honest validators duties called *payload timeliness attestations*. The slot is divided in **four** intervals. Honest validators gather *signed bids* (a `SignedExecutionPayloadHeader`) from builders and submit their consensus blocks (a `SignedBeaconBlock`) including these bids at the beginning of the slot. At the start of the second interval, honest validators submit attestations just as they do previous to this feature). At the start of the third interval, aggregators aggregate these attestations and the builder broadcasts either a full payload or a message indicating that they are withholding the payload (a `SignedExecutionPayloadEnvelope`). At the start of the fourth interval, some validators selected to be members of the new **Payload Timeliness Committee** (PTC) attest to the presence and timeliness of the builder's payload. +This is the beacon chain specification of the enshrined proposer builder +separation feature. + +*Note*: This specification is built upon +[Electra](../../electra/beacon-chain.md) and is under active development. + +This feature adds new staked consensus participants called *Builders* and new +honest validators duties called *payload timeliness attestations*. The slot is +divided in **four** intervals. Honest validators gather *signed bids* (a +`SignedExecutionPayloadHeader`) from builders and submit their consensus blocks +(a `SignedBeaconBlock`) including these bids at the beginning of the slot. At +the start of the second interval, honest validators submit attestations just as +they do previous to this feature). At the start of the third interval, +aggregators aggregate these attestations and the builder broadcasts either a +full payload or a message indicating that they are withholding the payload (a +`SignedExecutionPayloadEnvelope`). At the start of the fourth interval, some +validators selected to be members of the new **Payload Timeliness Committee** +(PTC) attest to the presence and timeliness of the builder's payload. At any given slot, the status of the blockchain's head may be either -- A block from a previous slot (e.g. the current slot's proposer did not submit its block). -- An *empty* block from the current slot (e.g. the proposer submitted a timely block, but the builder did not reveal the payload on time). -- A full block for the current slot (both the proposer and the builder revealed on time). +- A block from a previous slot (e.g. the current slot's proposer did not submit + its block). +- An *empty* block from the current slot (e.g. the proposer submitted a timely + block, but the builder did not reveal the payload on time). +- A full block for the current slot (both the proposer and the builder revealed + on time). ## Constants @@ -177,7 +193,11 @@ class SignedExecutionPayloadEnvelope(Container): #### `BeaconBlockBody` -*Note*: The Beacon Block body is modified to contain a `Signed ExecutionPayloadHeader`. The containers `BeaconBlock` and `SignedBeaconBlock` are modified indirectly. The field `execution_requests` is removed from the beacon block body and moved into the signed execution payload envelope. +*Note*: The Beacon Block body is modified to contain a +`Signed ExecutionPayloadHeader`. The containers `BeaconBlock` and +`SignedBeaconBlock` are modified indirectly. The field `execution_requests` is +removed from the beacon block body and moved into the signed execution payload +envelope. ```python class BeaconBlockBody(Container): @@ -203,7 +223,9 @@ class BeaconBlockBody(Container): #### `ExecutionPayloadHeader` -*Note*: The `ExecutionPayloadHeader` is modified to only contain the block hash of the committed `ExecutionPayload` in addition to the builder's payment information, gas limit and KZG commitments root to verify the inclusion proofs. +*Note*: The `ExecutionPayloadHeader` is modified to only contain the block hash +of the committed `ExecutionPayload` in addition to the builder's payment +information, gas limit and KZG commitments root to verify the inclusion proofs. ```python class ExecutionPayloadHeader(Container): @@ -219,7 +241,12 @@ class ExecutionPayloadHeader(Container): #### `BeaconState` -*Note*: The `BeaconState` is modified to track the last withdrawals honored in the CL. The `latest_execution_payload_header` is modified semantically to refer not to a past committed `ExecutionPayload` but instead it corresponds to the state's slot builder's bid. Another addition is to track the last committed block hash and the last slot that was full, that is in which there were both consensus and execution blocks included. +*Note*: The `BeaconState` is modified to track the last withdrawals honored in +the CL. The `latest_execution_payload_header` is modified semantically to refer +not to a past committed `ExecutionPayload` but instead it corresponds to the +state's slot builder's bid. Another addition is to track the last committed +block hash and the last slot that was full, that is in which there were both +consensus and execution blocks included. ```python class BeaconState(Container): @@ -335,7 +362,10 @@ def is_valid_indexed_payload_attestation( #### `is_parent_block_full` -This function returns true if the last committed payload header was fulfilled with a payload, this can only happen when both beacon block and payload were present. This function must be called on a beacon state before processing the execution payload header in the block. +This function returns true if the last committed payload header was fulfilled +with a payload, this can only happen when both beacon block and payload were +present. This function must be called on a beacon state before processing the +execution payload header in the block. ```python def is_parent_block_full(state: BeaconState) -> bool: @@ -418,11 +448,22 @@ def get_indexed_payload_attestation(state: BeaconState, slot: Slot, ## Beacon chain state transition function -*Note*: state transition is fundamentally modified in EIP-7732. The full state transition is broken in two parts, first importing a signed block and then importing an execution payload. +*Note*: state transition is fundamentally modified in EIP-7732. The full state +transition is broken in two parts, first importing a signed block and then +importing an execution payload. -The post-state corresponding to a pre-state `state` and a signed beacon block `signed_block` is defined as `state_transition(state, signed_block)`. State transitions that trigger an unhandled exception (e.g. a failed `assert` or an out-of-range list access) are considered invalid. State transitions that cause a `uint64` overflow or underflow are also considered invalid. +The post-state corresponding to a pre-state `state` and a signed beacon block +`signed_block` is defined as `state_transition(state, signed_block)`. State +transitions that trigger an unhandled exception (e.g. a failed `assert` or an +out-of-range list access) are considered invalid. State transitions that cause a +`uint64` overflow or underflow are also considered invalid. -The post-state corresponding to a pre-state `state` and a signed execution payload envelope `signed_envelope` is defined as `process_execution_payload(state, signed_envelope)`. State transitions that trigger an unhandled exception (e.g. a failed `assert` or an out-of-range list access) are considered invalid. State transitions that cause an `uint64` overflow or underflow are also considered invalid. +The post-state corresponding to a pre-state `state` and a signed execution +payload envelope `signed_envelope` is defined as +`process_execution_payload(state, signed_envelope)`. State transitions that +trigger an unhandled exception (e.g. a failed `assert` or an out-of-range list +access) are considered invalid. State transitions that cause an `uint64` +overflow or underflow are also considered invalid. ### Block processing @@ -442,7 +483,12 @@ def process_block(state: BeaconState, block: BeaconBlock) -> None: ##### Modified `process_withdrawals` -*Note*: This is modified to take only the `state` as parameter. Withdrawals are deterministic given the beacon state, any execution payload that has the corresponding block as parent beacon block is required to honor these withdrawals in the execution layer. This function must be called before `process_execution_payload_header` as this latter function affects validator balances. +*Note*: This is modified to take only the `state` as parameter. Withdrawals are +deterministic given the beacon state, any execution payload that has the +corresponding block as parent beacon block is required to honor these +withdrawals in the execution layer. This function must be called before +`process_execution_payload_header` as this latter function affects validator +balances. ```python def process_withdrawals(state: BeaconState) -> None: @@ -601,7 +647,8 @@ def process_payload_attestation(state: BeaconState, payload_attestation: Payload #### Modified `is_merge_transition_complete` -`is_merge_transition_complete` is modified only for testing purposes to add the blob kzg commitments root for an empty list +`is_merge_transition_complete` is modified only for testing purposes to add the +blob kzg commitments root for an empty list ```python def is_merge_transition_complete(state: BeaconState) -> bool: @@ -614,7 +661,8 @@ def is_merge_transition_complete(state: BeaconState) -> bool: #### Modified `validate_merge_block` -`validate_merge_block` is modified to use the new `signed_execution_payload_header` message in the Beacon Block Body +`validate_merge_block` is modified to use the new +`signed_execution_payload_header` message in the Beacon Block Body ```python def validate_merge_block(block: BeaconBlock) -> None: @@ -656,7 +704,9 @@ def verify_execution_payload_envelope_signature( #### New `process_execution_payload` -*Note*: `process_execution_payload` is now an independent check in state transition. It is called when importing a signed execution payload proposed by the builder of the current slot. +*Note*: `process_execution_payload` is now an independent check in state +transition. It is called when importing a signed execution payload proposed by +the builder of the current slot. ```python def process_execution_payload(state: BeaconState, @@ -731,7 +781,8 @@ def process_execution_payload(state: BeaconState, ### Modified `is_merge_transition_complete` -The function `is_merge_transition_complete` is modified for test purposes only to include the hash tree root of the empty KZG commitment list +The function `is_merge_transition_complete` is modified for test purposes only +to include the hash tree root of the empty KZG commitment list ```python def is_merge_transition_complete(state: BeaconState) -> bool: diff --git a/specs/_features/eip7732/builder.md b/specs/_features/eip7732/builder.md index d74e51a81f..c54ca6aa66 100644 --- a/specs/_features/eip7732/builder.md +++ b/specs/_features/eip7732/builder.md @@ -15,31 +15,58 @@ ## Introduction -This is an accompanying document which describes the expected actions of a "builder" participating in the Ethereum proof-of-stake protocol. +This is an accompanying document which describes the expected actions of a +"builder" participating in the Ethereum proof-of-stake protocol. -With the EIP-7732 Fork, the protocol includes new staked participants of the protocol called *Builders*. While Builders are a subset of the validator set, they have extra attributions that are optional. Validators may opt to not be builders and as such we collect the set of guidelines for those validators that want to act as builders in this document. +With the EIP-7732 Fork, the protocol includes new staked participants of the +protocol called *Builders*. While Builders are a subset of the validator set, +they have extra attributions that are optional. Validators may opt to not be +builders and as such we collect the set of guidelines for those validators that +want to act as builders in this document. ## Builders attributions -Builders can submit bids to produce execution payloads. They can broadcast these bids in the form of `SignedExecutionPayloadHeader` objects, these objects encode a commitment to reveal an execution payload in exchange for a payment. When their bids are chosen by the corresponding proposer, builders are expected to broadcast an accompanying `SignedExecutionPayloadEnvelope` object honoring the commitment. +Builders can submit bids to produce execution payloads. They can broadcast these +bids in the form of `SignedExecutionPayloadHeader` objects, these objects encode +a commitment to reveal an execution payload in exchange for a payment. When +their bids are chosen by the corresponding proposer, builders are expected to +broadcast an accompanying `SignedExecutionPayloadEnvelope` object honoring the +commitment. -Thus, builders tasks are divided in two, submitting bids, and submitting payloads. +Thus, builders tasks are divided in two, submitting bids, and submitting +payloads. ### Constructing the payload bid -Builders can broadcast a payload bid for the current or the next slot's proposer to include. They produce a `SignedExecutionPayloadHeader` as follows. - -1. Set `header.parent_block_hash` to the current head of the execution chain (this can be obtained from the beacon state as `state.last_block_hash`). -2. Set `header.parent_block_root` to be the head of the consensus chain (this can be obtained from the beacon state as `hash_tree_root(state.latest_block_header)`. The `parent_block_root` and `parent_block_hash` must be compatible, in the sense that they both should come from the same `state` by the method described in this and the previous point. -3. Construct an execution payload. This can be performed with an external execution engine with a call to `engine_getPayloadV4`. -4. Set `header.block_hash` to be the block hash of the constructed payload, that is `payload.block_hash`. -5. Set `header.gas_limit` to be the gas limit of the constructed payload, that is `payload.gas_limit`. -6. Set `header.builder_index` to be the validator index of the builder performing these actions. -7. Set `header.slot` to be the slot for which this bid is aimed. This slot **MUST** be either the current slot or the next slot. -8. Set `header.value` to be the value that the builder will pay the proposer if the bid is accepted. The builder **MUST** have balance enough to fulfill this bid. -9. Set `header.kzg_commitments_root` to be the `hash_tree_root` of the `blobsbundle.commitments` field returned by `engine_getPayloadV4`. - -After building the `header`, the builder obtains a `signature` of the header by using +Builders can broadcast a payload bid for the current or the next slot's proposer +to include. They produce a `SignedExecutionPayloadHeader` as follows. + +1. Set `header.parent_block_hash` to the current head of the execution chain + (this can be obtained from the beacon state as `state.last_block_hash`). +2. Set `header.parent_block_root` to be the head of the consensus chain (this + can be obtained from the beacon state as + `hash_tree_root(state.latest_block_header)`. The `parent_block_root` and + `parent_block_hash` must be compatible, in the sense that they both should + come from the same `state` by the method described in this and the previous + point. +3. Construct an execution payload. This can be performed with an external + execution engine with a call to `engine_getPayloadV4`. +4. Set `header.block_hash` to be the block hash of the constructed payload, that + is `payload.block_hash`. +5. Set `header.gas_limit` to be the gas limit of the constructed payload, that + is `payload.gas_limit`. +6. Set `header.builder_index` to be the validator index of the builder + performing these actions. +7. Set `header.slot` to be the slot for which this bid is aimed. This slot + **MUST** be either the current slot or the next slot. +8. Set `header.value` to be the value that the builder will pay the proposer if + the bid is accepted. The builder **MUST** have balance enough to fulfill this + bid. +9. Set `header.kzg_commitments_root` to be the `hash_tree_root` of the + `blobsbundle.commitments` field returned by `engine_getPayloadV4`. + +After building the `header`, the builder obtains a `signature` of the header by +using ```python def get_execution_payload_header_signature( @@ -49,13 +76,19 @@ def get_execution_payload_header_signature( return bls.Sign(privkey, signing_root) ``` -The builder assembles then `signed_execution_payload_header = SignedExecutionPayloadHeader(message=header, signature=signature)` and broadcasts it on the `execution_payload_header` global gossip topic. +The builder assembles then +`signed_execution_payload_header = SignedExecutionPayloadHeader(message=header, signature=signature)` +and broadcasts it on the `execution_payload_header` global gossip topic. ### Constructing the `BlobSidecar`s [Modified in EIP-7732] -The `BlobSidecar` container is modified indirectly because the constant `KZG_COMMITMENT_INCLUSION_PROOF_DEPTH` is modified. The function `get_blob_sidecars` is modified because the KZG commitments are no longer included in the beacon block but rather in the `ExecutionPayloadEnvelope`, the builder has to send the commitments as parameters to this function. +The `BlobSidecar` container is modified indirectly because the constant +`KZG_COMMITMENT_INCLUSION_PROOF_DEPTH` is modified. The function +`get_blob_sidecars` is modified because the KZG commitments are no longer +included in the beacon block but rather in the `ExecutionPayloadEnvelope`, the +builder has to send the commitments as parameters to this function. ```python def get_blob_sidecars(signed_block: SignedBeaconBlock, @@ -101,17 +134,30 @@ def get_blob_sidecars(signed_block: SignedBeaconBlock, ### Constructing the execution payload envelope -When the proposer publishes a valid `SignedBeaconBlock` containing a signed commitment by the builder, the builder is later expected to broadcast the corresponding `SignedExecutionPayloadEnvelope` that fulfills this commitment. See below for a special case of an *honestly withheld payload*. - -To construct the `execution_payload_envelope` the builder must perform the following steps, we alias `header` to be the committed `ExecutionPayloadHeader` in the beacon block. - -1. Set the `payload` field to be the `ExecutionPayload` constructed when creating the corresponding bid. This payload **MUST** have the same block hash as `header.block_hash`. -2. Set the `builder_index` field to be the validator index of the builder performing these steps. This field **MUST** be `header.builder_index`. -3. Set `beacon_block_root` to be the `hash_tree_root` of the corresponding beacon block. -4. Set `blob_kzg_commitments` to be the `commitments` field of the blobs bundle constructed when constructing the bid. This field **MUST** have a `hash_tree_root` equal to `header.blob_kzg_commitments_root`. +When the proposer publishes a valid `SignedBeaconBlock` containing a signed +commitment by the builder, the builder is later expected to broadcast the +corresponding `SignedExecutionPayloadEnvelope` that fulfills this commitment. +See below for a special case of an *honestly withheld payload*. + +To construct the `execution_payload_envelope` the builder must perform the +following steps, we alias `header` to be the committed `ExecutionPayloadHeader` +in the beacon block. + +1. Set the `payload` field to be the `ExecutionPayload` constructed when + creating the corresponding bid. This payload **MUST** have the same block + hash as `header.block_hash`. +2. Set the `builder_index` field to be the validator index of the builder + performing these steps. This field **MUST** be `header.builder_index`. +3. Set `beacon_block_root` to be the `hash_tree_root` of the corresponding + beacon block. +4. Set `blob_kzg_commitments` to be the `commitments` field of the blobs bundle + constructed when constructing the bid. This field **MUST** have a + `hash_tree_root` equal to `header.blob_kzg_commitments_root`. 5. Set `payload_withheld` to `False`. -After setting these parameters, the builder should run `process_execution_payload(state, signed_envelope, verify=False)` and this function should not trigger an exception. +After setting these parameters, the builder should run +`process_execution_payload(state, signed_envelope, verify=False)` and this +function should not trigger an exception. 6. Set `state_root` to `hash_tree_root(state)`. @@ -125,8 +171,19 @@ def get_execution_payload_envelope_signature( return bls.Sign(privkey, signing_root) ``` -The builder assembles then `signed_execution_payload_envelope = SignedExecutionPayloadEnvelope(message=envelope, signature=signature)` and broadcasts it on the `execution_payload` global gossip topic. +The builder assembles then +`signed_execution_payload_envelope = SignedExecutionPayloadEnvelope(message=envelope, signature=signature)` +and broadcasts it on the `execution_payload` global gossip topic. ### Honest payload withheld messages -An honest builder that has seen a `SignedBeaconBlock` referencing his signed bid, but that block was not timely and thus it is not the head of the builder's chain, may choose to withhold their execution payload. For this the builder should simply act as if it were building an empty payload, without any transactions, withdrawals, etc. The `payload.block_hash` may not be equal to `header.block_hash`. The builder may then sets `payload_withheld` to `True`. If the PTC sees this message and votes for it, validators will attribute a *withholding boost* to the builder, which would increase the forkchoice weight of the parent block, favoring it and preventing the builder from being charged for the bid by not revealing. +An honest builder that has seen a `SignedBeaconBlock` referencing his signed +bid, but that block was not timely and thus it is not the head of the builder's +chain, may choose to withhold their execution payload. For this the builder +should simply act as if it were building an empty payload, without any +transactions, withdrawals, etc. The `payload.block_hash` may not be equal to +`header.block_hash`. The builder may then sets `payload_withheld` to `True`. If +the PTC sees this message and votes for it, validators will attribute a +*withholding boost* to the builder, which would increase the forkchoice weight +of the parent block, favoring it and preventing the builder from being charged +for the bid by not revealing. diff --git a/specs/_features/eip7732/fork-choice.md b/specs/_features/eip7732/fork-choice.md index 4d60c84990..d0e485517e 100644 --- a/specs/_features/eip7732/fork-choice.md +++ b/specs/_features/eip7732/fork-choice.md @@ -77,7 +77,11 @@ class LatestMessage(object): ### Modified `update_latest_messages` -*Note*: the function `update_latest_messages` is updated to use the attestation slot instead of target. Notice that this function is only called on validated attestations and validators cannot attest twice in the same epoch without equivocating. Notice also that target epoch number and slot number are validated on `validate_on_attestation`. +*Note*: the function `update_latest_messages` is updated to use the attestation +slot instead of target. Notice that this function is only called on validated +attestations and validators cannot attest twice in the same epoch without +equivocating. Notice also that target epoch number and slot number are validated +on `validate_on_attestation`. ```python def update_latest_messages(store: Store, attesting_indices: Sequence[ValidatorIndex], attestation: Attestation) -> None: @@ -91,7 +95,9 @@ def update_latest_messages(store: Store, attesting_indices: Sequence[ValidatorIn ### Modified `Store` -*Note*: `Store` is modified to track the intermediate states of "empty" consensus blocks, that is, those consensus blocks for which the corresponding execution payload has not been revealed or has not been included on chain. +*Note*: `Store` is modified to track the intermediate states of "empty" +consensus blocks, that is, those consensus blocks for which the corresponding +execution payload has not been revealed or has not been included on chain. ```python @dataclass @@ -197,7 +203,8 @@ def is_parent_node_full(store: Store, block: BeaconBlock) -> bool: ### Modified `get_ancestor` -*Note*: `get_ancestor` is modified to return whether the chain is based on an *empty* or *full* block. +*Note*: `get_ancestor` is modified to return whether the chain is based on an +*empty* or *full* block. ```python def get_ancestor(store: Store, root: Root, slot: Slot) -> ChildNode: @@ -250,7 +257,11 @@ def is_supporting_vote(store: Store, node: ChildNode, message: LatestMessage) -> ### New `compute_proposer_boost` -This is a helper to compute the proposer boost. It applies the proposer boost to any ancestor of the proposer boost root taking into account the payload presence. There is one exception: if the requested node has the same root and slot as the block with the proposer boost root, then the proposer boost is applied to both empty and full versions of the node. +This is a helper to compute the proposer boost. It applies the proposer boost to +any ancestor of the proposer boost root taking into account the payload +presence. There is one exception: if the requested node has the same root and +slot as the block with the proposer boost root, then the proposer boost is +applied to both empty and full versions of the node. ```python def compute_proposer_boost(store: Store, state: BeaconState, node: ChildNode) -> Gwei: @@ -271,7 +282,8 @@ def compute_proposer_boost(store: Store, state: BeaconState, node: ChildNode) -> ### New `compute_withhold_boost` -This is a similar helper that applies for the withhold boost. In this case this always takes into account the reveal status. +This is a similar helper that applies for the withhold boost. In this case this +always takes into account the reveal status. ```python def compute_withhold_boost(store: Store, state: BeaconState, node: ChildNode) -> Gwei: @@ -291,7 +303,9 @@ def compute_withhold_boost(store: Store, state: BeaconState, node: ChildNode) -> ### New `compute_reveal_boost` -This is a similar helper to the last two, the only difference is that the reveal boost is only applied to the full version of the node when querying for the same slot as the revealed payload. +This is a similar helper to the last two, the only difference is that the reveal +boost is only applied to the full version of the node when querying for the same +slot as the revealed payload. ```python def compute_reveal_boost(store: Store, state: BeaconState, node: ChildNode) -> Gwei: @@ -310,7 +324,10 @@ def compute_reveal_boost(store: Store, state: BeaconState, node: ChildNode) -> G ### Modified `get_weight` -*Note*: `get_weight` is modified to only count votes for descending chains that support the status of a triple `Root, Slot, bool`, where the `bool` indicates if the block was full or not. `Slot` is needed for a correct implementation of `(Block, Slot)` voting. +*Note*: `get_weight` is modified to only count votes for descending chains that +support the status of a triple `Root, Slot, bool`, where the `bool` indicates if +the block was full or not. `Slot` is needed for a correct implementation of +`(Block, Slot)` voting. ```python def get_weight(store: Store, node: ChildNode) -> Gwei: @@ -336,7 +353,8 @@ def get_weight(store: Store, node: ChildNode) -> Gwei: ### Modified `get_head` -*Note*: `get_head` is a modified to use the new `get_weight` function. It returns the `ChildNode` object corresponding to the head block. +*Note*: `get_head` is a modified to use the new `get_weight` function. It +returns the `ChildNode` object corresponding to the head block. ```python def get_head(store: Store) -> ChildNode: @@ -384,7 +402,10 @@ def get_head(store: Store) -> ChildNode: ### Modified `on_block` -*Note*: The handler `on_block` is modified to consider the pre `state` of the given consensus beacon block depending not only on the parent block root, but also on the parent blockhash. In addition we delay the checking of blob data availability until the processing of the execution payload. +*Note*: The handler `on_block` is modified to consider the pre `state` of the +given consensus beacon block depending not only on the parent block root, but +also on the parent blockhash. In addition we delay the checking of blob data +availability until the processing of the execution payload. ```python def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: @@ -457,7 +478,8 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: ### New `on_execution_payload` -The handler `on_execution_payload` is called when the node receives a `SignedExecutionPayloadEnvelope` to sync. +The handler `on_execution_payload` is called when the node receives a +`SignedExecutionPayloadEnvelope` to sync. ```python def on_execution_payload(store: Store, signed_envelope: SignedExecutionPayloadEnvelope) -> None: diff --git a/specs/_features/eip7732/fork.md b/specs/_features/eip7732/fork.md index 1aaacf8fe4..caee77b662 100644 --- a/specs/_features/eip7732/fork.md +++ b/specs/_features/eip7732/fork.md @@ -58,12 +58,14 @@ def compute_fork_version(epoch: Epoch) -> Version: ### Fork trigger -The fork is triggered at epoch `EIP7732_FORK_EPOCH`. The EIP may be combined with other consensus-layer upgrade. +The fork is triggered at epoch `EIP7732_FORK_EPOCH`. The EIP may be combined +with other consensus-layer upgrade. ### Upgrading the state -If `state.slot % SLOTS_PER_EPOCH == 0` and `compute_epoch_at_slot(state.slot) == EIP7732_FORK_EPOCH`, -an irregular state change is made to upgrade to EIP-7732. +If `state.slot % SLOTS_PER_EPOCH == 0` and +`compute_epoch_at_slot(state.slot) == EIP7732_FORK_EPOCH`, an irregular state +change is made to upgrade to EIP-7732. ```python def upgrade_to_eip7732(pre: electra.BeaconState) -> BeaconState: diff --git a/specs/_features/eip7732/p2p-interface.md b/specs/_features/eip7732/p2p-interface.md index 1f3128915a..149eb70f26 100644 --- a/specs/_features/eip7732/p2p-interface.md +++ b/specs/_features/eip7732/p2p-interface.md @@ -33,7 +33,8 @@ This document contains the consensus-layer networking specification for EIP7732. -The specification of these changes continues in the same format as the network specifications of previous upgrades, and assumes them as pre-requisite. +The specification of these changes continues in the same format as the network +specifications of previous upgrades, and assumes them as pre-requisite. ## Modification in EIP-7732 @@ -57,7 +58,8 @@ The specification of these changes continues in the same format as the network s #### `BlobSidecar` -The `BlobSidecar` container is modified indirectly because the constant `KZG_COMMITMENT_INCLUSION_PROOF_DEPTH` is modified. +The `BlobSidecar` container is modified indirectly because the constant +`KZG_COMMITMENT_INCLUSION_PROOF_DEPTH` is modified. ```python class BlobSidecar(Container): @@ -73,7 +75,9 @@ class BlobSidecar(Container): ##### Modified `verify_blob_sidecar_inclusion_proof` -`verify_blob_sidecar_inclusion_proof` is modified in EIP-7732 to account for the fact that the KZG commitments are included in the `ExecutionPayloadEnvelope` and no longer in the beacon block body. +`verify_blob_sidecar_inclusion_proof` is modified in EIP-7732 to account for the +fact that the KZG commitments are included in the `ExecutionPayloadEnvelope` and +no longer in the beacon block body. ```python def verify_blob_sidecar_inclusion_proof(blob_sidecar: BlobSidecar) -> bool: @@ -100,7 +104,8 @@ def verify_blob_sidecar_inclusion_proof(blob_sidecar: BlobSidecar) -> bool: ### The gossip domain: gossipsub -Some gossip meshes are upgraded in the fork of EIP-7732 to support upgraded types. +Some gossip meshes are upgraded in the fork of EIP-7732 to support upgraded +types. #### Topics and messages @@ -112,7 +117,8 @@ The `beacon_block` topic is updated to support the modified type | -------------- | ------------------------------------------ | | `beacon_block` | `SignedBeaconBlock` [modified in EIP-7732] | -The new topics along with the type of the `data` field of a gossipsub message are given in this table: +The new topics along with the type of the `data` field of a gossipsub message +are given in this table: | Name | Message Type | | ----------------------------- | -------------------------------------------------- | @@ -122,77 +128,125 @@ The new topics along with the type of the `data` field of a gossipsub message ar ##### Global topics -EIP-7732 introduces new global topics for execution header, execution payload and payload attestation. +EIP-7732 introduces new global topics for execution header, execution payload +and payload attestation. ###### `beacon_block` [Modified in EIP-7732] -The *type* of the payload of this topic changes to the (modified) `SignedBeaconBlock` found in [the Beacon Chain changes](./beacon-chain.md). - -There are no new validations for this topic. However, all validations with regards to the `ExecutionPayload` are removed: - -- _[REJECT]_ The length of KZG commitments is less than or equal to the limitation defined in Consensus Layer -- i.e. validate that `len(signed_beacon_block.message.body.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK` -- _[REJECT]_ The block's execution payload timestamp is correct with respect to the slot - -- i.e. `execution_payload.timestamp == compute_timestamp_at_slot(state, block.slot)`. -- If `execution_payload` verification of block's parent by an execution node is *not* complete: - - [REJECT] The block's parent (defined by `block.parent_root`) passes all validation (excluding execution node verification of the `block.body.execution_payload`). +The *type* of the payload of this topic changes to the (modified) +`SignedBeaconBlock` found in [the Beacon Chain changes](./beacon-chain.md). + +There are no new validations for this topic. However, all validations with +regards to the `ExecutionPayload` are removed: + +- _[REJECT]_ The length of KZG commitments is less than or equal to the + limitation defined in Consensus Layer -- i.e. validate that + `len(signed_beacon_block.message.body.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK` +- _[REJECT]_ The block's execution payload timestamp is correct with respect to + the slot -- i.e. + `execution_payload.timestamp == compute_timestamp_at_slot(state, block.slot)`. +- If `execution_payload` verification of block's parent by an execution node is + *not* complete: + - [REJECT] The block's parent (defined by `block.parent_root`) passes all + validation (excluding execution node verification of the + `block.body.execution_payload`). - otherwise: - - [IGNORE] The block's parent (defined by `block.parent_root`) passes all validation (including execution node verification of the `block.body.execution_payload`). -- [REJECT] The block's parent (defined by `block.parent_root`) passes validation. - -And instead the following validations are set in place with the alias `header = signed_execution_payload_header.message`: - -- If `execution_payload` verification of block's execution payload parent by an execution node **is complete**: - - [REJECT] The block's execution payload parent (defined by `header.parent_block_hash`) passes all validation. -- [REJECT] The block's parent (defined by `block.parent_root`) passes validation. + - [IGNORE] The block's parent (defined by `block.parent_root`) passes all + validation (including execution node verification of the + `block.body.execution_payload`). +- [REJECT] The block's parent (defined by `block.parent_root`) passes + validation. + +And instead the following validations are set in place with the alias +`header = signed_execution_payload_header.message`: + +- If `execution_payload` verification of block's execution payload parent by an + execution node **is complete**: + - [REJECT] The block's execution payload parent (defined by + `header.parent_block_hash`) passes all validation. +- [REJECT] The block's parent (defined by `block.parent_root`) passes + validation. ###### `execution_payload` -This topic is used to propagate execution payload messages as `SignedExecutionPayloadEnvelope`. +This topic is used to propagate execution payload messages as +`SignedExecutionPayloadEnvelope`. -The following validations MUST pass before forwarding the `signed_execution_payload_envelope` on the network, assuming the alias `envelope = signed_execution_payload_envelope.message`, `payload = payload_envelope.payload`: +The following validations MUST pass before forwarding the +`signed_execution_payload_envelope` on the network, assuming the alias +`envelope = signed_execution_payload_envelope.message`, +`payload = payload_envelope.payload`: -- _[IGNORE]_ The envelope's block root `envelope.block_root` has been seen (via gossip or non-gossip sources) (a client MAY queue payload for processing once the block is retrieved). -- _[IGNORE]_ The node has not seen another valid `SignedExecutionPayloadEnvelope` for this block root from this builder. +- _[IGNORE]_ The envelope's block root `envelope.block_root` has been seen (via + gossip or non-gossip sources) (a client MAY queue payload for processing once + the block is retrieved). +- _[IGNORE]_ The node has not seen another valid + `SignedExecutionPayloadEnvelope` for this block root from this builder. -Let `block` be the block with `envelope.beacon_block_root`. -Let `header` alias `block.body.signed_execution_payload_header.message` (notice that this can be obtained from the `state.signed_execution_payload_header`) +Let `block` be the block with `envelope.beacon_block_root`. Let `header` alias +`block.body.signed_execution_payload_header.message` (notice that this can be +obtained from the `state.signed_execution_payload_header`) - _[REJECT]_ `block` passes validation. - _[REJECT]_ `envelope.builder_index == header.builder_index` - if `envelope.payload_withheld == False` then - _[REJECT]_ `payload.block_hash == header.block_hash` -- _[REJECT]_ The builder signature, `signed_execution_payload_envelope.signature`, is valid with respect to the builder's public key. +- _[REJECT]_ The builder signature, + `signed_execution_payload_envelope.signature`, is valid with respect to the + builder's public key. ###### `payload_attestation_message` This topic is used to propagate signed payload attestation message. -The following validations MUST pass before forwarding the `payload_attestation_message` on the network, assuming the alias `data = payload_attestation_message.data`: - -- _[IGNORE]_ The message's slot is for the current slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance), i.e. `data.slot == current_slot`. -- _[REJECT]_ The message's payload status is a valid status, i.e. `data.payload_status < PAYLOAD_INVALID_STATUS`. -- _[IGNORE]_ The `payload_attestation_message` is the first valid message received from the validator with index `payload_attestation_message.validate_index`. -- _[IGNORE]_ The message's block `data.beacon_block_root` has been seen (via gossip or non-gossip sources) (a client MAY queue attestation for processing once the block is retrieved. Note a client might want to request payload after). +The following validations MUST pass before forwarding the +`payload_attestation_message` on the network, assuming the alias +`data = payload_attestation_message.data`: + +- _[IGNORE]_ The message's slot is for the current slot (with a + `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance), i.e. `data.slot == current_slot`. +- _[REJECT]_ The message's payload status is a valid status, i.e. + `data.payload_status < PAYLOAD_INVALID_STATUS`. +- _[IGNORE]_ The `payload_attestation_message` is the first valid message + received from the validator with index + `payload_attestation_message.validate_index`. +- _[IGNORE]_ The message's block `data.beacon_block_root` has been seen (via + gossip or non-gossip sources) (a client MAY queue attestation for processing + once the block is retrieved. Note a client might want to request payload + after). - _[REJECT]_ The message's block `data.beacon_block_root` passes validation. -- _[REJECT]_ The message's validator index is within the payload committee in `get_ptc(state, data.slot)`. The `state` is the head state corresponding to processing the block up to the current slot as determined by the fork choice. -- _[REJECT]_ The message's signature of `payload_attestation_message.signature` is valid with respect to the validator index. +- _[REJECT]_ The message's validator index is within the payload committee in + `get_ptc(state, data.slot)`. The `state` is the head state corresponding to + processing the block up to the current slot as determined by the fork choice. +- _[REJECT]_ The message's signature of `payload_attestation_message.signature` + is valid with respect to the validator index. ###### `execution_payload_header` This topic is used to propagate signed bids as `SignedExecutionPayloadHeader`. -The following validations MUST pass before forwarding the `signed_execution_payload_header` on the network, assuming the alias `header = signed_execution_payload_header.message`: - -- _[IGNORE]_ this is the first signed bid seen with a valid signature from the given builder for this slot. -- _[IGNORE]_ this bid is the highest value bid seen for the pair of the corresponding slot and the given parent block hash. -- _[REJECT]_ The signed builder bid, `header.builder_index` is a valid, active, and non-slashed builder index in state. -- _[IGNORE]_ The signed builder bid value, `header.value`, is less or equal than the builder's balance in state. i.e. `MIN_BUILDER_BALANCE + header.value < state.builder_balances[header.builder_index]`. -- _[IGNORE]_ `header.parent_block_hash` is the block hash of a known execution payload in fork choice. - \_ _[IGNORE]_ `header.parent_block_root` is the hash tree root of a known beacon block in fork choice. +The following validations MUST pass before forwarding the +`signed_execution_payload_header` on the network, assuming the alias +`header = signed_execution_payload_header.message`: + +- _[IGNORE]_ this is the first signed bid seen with a valid signature from the + given builder for this slot. +- _[IGNORE]_ this bid is the highest value bid seen for the pair of the + corresponding slot and the given parent block hash. +- _[REJECT]_ The signed builder bid, `header.builder_index` is a valid, active, + and non-slashed builder index in state. +- _[IGNORE]_ The signed builder bid value, `header.value`, is less or equal than + the builder's balance in state. i.e. + `MIN_BUILDER_BALANCE + header.value < state.builder_balances[header.builder_index]`. +- _[IGNORE]_ `header.parent_block_hash` is the block hash of a known execution + payload in fork choice. _ _[IGNORE]_ `header.parent_block_root` is the hash + tree root of a known beacon block in fork choice. - _[IGNORE]_ `header.slot` is the current slot or the next slot. -- _[REJECT]_ The builder signature, `signed_execution_payload_header_envelope.signature`, is valid with respect to the `header_envelope.builder_index`. +- _[REJECT]_ The builder signature, + `signed_execution_payload_header_envelope.signature`, is valid with respect to + the `header_envelope.builder_index`. ### The Req/Resp domain @@ -243,7 +297,8 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: ##### ExecutionPayloadEnvelopesByRange v1 -**Protocol ID:** `/eth2/beacon_chain/req/execution_payload_envelopes_by_range/1/` +**Protocol ID:** +`/eth2/beacon_chain/req/execution_payload_envelopes_by_range/1/` *[New in EIP-7732]* @@ -264,9 +319,13 @@ Response Content: ) ``` -Specifications of req\\response methods are equivalent to [BeaconBlocksByRange v2](#beaconblocksbyrange-v2), with the only difference being the response content type. +Specifications of req\\response methods are equivalent to +[BeaconBlocksByRange v2](#beaconblocksbyrange-v2), with the only difference +being the response content type. -For each `response_chunk`, a `ForkDigest`-context based on `compute_fork_version(compute_epoch_at_slot(signed_execution_payload_envelop.message.slot))` is used to select the fork namespace of the Response type. +For each `response_chunk`, a `ForkDigest`-context based on +`compute_fork_version(compute_epoch_at_slot(signed_execution_payload_envelop.message.slot))` +is used to select the fork namespace of the Response type. Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: @@ -280,7 +339,8 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: **Protocol ID:** `/eth2/beacon_chain/req/execution_payload_envelopes_by_root/1/` -The `` field is calculated as `context = compute_fork_digest(fork_version, genesis_validators_root)`: +The `` field is calculated as +`context = compute_fork_digest(fork_version, genesis_validators_root)`: @@ -304,16 +364,25 @@ Response Content: ) ``` -Requests execution payload envelopes by `signed_execution_payload_envelope.message.block_root`. The response is a list of `SignedExecutionPayloadEnvelope` whose length is less than or equal to the number of requested execution payload envelopes. It may be less in the case that the responding peer is missing payload envelopes. +Requests execution payload envelopes by +`signed_execution_payload_envelope.message.block_root`. The response is a list +of `SignedExecutionPayloadEnvelope` whose length is less than or equal to the +number of requested execution payload envelopes. It may be less in the case that +the responding peer is missing payload envelopes. No more than `MAX_REQUEST_PAYLOADS` may be requested at a time. -ExecutionPayloadEnvelopesByRoot is primarily used to recover recent execution payload envelopes (e.g. when receiving a payload attestation with revealed status as true but never received a payload). +ExecutionPayloadEnvelopesByRoot is primarily used to recover recent execution +payload envelopes (e.g. when receiving a payload attestation with revealed +status as true but never received a payload). The request MUST be encoded as an SSZ-field. -The response MUST consist of zero or more `response_chunk`. Each successful `response_chunk` MUST contain a single `SignedExecutionPayloadEnvelope` payload. +The response MUST consist of zero or more `response_chunk`. Each successful +`response_chunk` MUST contain a single `SignedExecutionPayloadEnvelope` payload. -Clients MUST support requesting payload envelopes since the latest finalized epoch. +Clients MUST support requesting payload envelopes since the latest finalized +epoch. -Clients MUST respond with at least one payload envelope, if they have it. Clients MAY limit the number of payload envelopes in the response. +Clients MUST respond with at least one payload envelope, if they have it. +Clients MAY limit the number of payload envelopes in the response. diff --git a/specs/_features/eip7732/validator.md b/specs/_features/eip7732/validator.md index f35dc5cd30..356fcdfc4a 100644 --- a/specs/_features/eip7732/validator.md +++ b/specs/_features/eip7732/validator.md @@ -21,13 +21,17 @@ ## Introduction -This document represents the changes and additions to the Honest validator guide included in the EIP-7732 fork. +This document represents the changes and additions to the Honest validator guide +included in the EIP-7732 fork. ## Validator assignment -A validator may be a member of the new Payload Timeliness Committee (PTC) for a given slot. To check for PTC assignments the validator uses the helper `get_ptc_assignment(state, epoch, validator_index)` where `epoch <= next_epoch`. +A validator may be a member of the new Payload Timeliness Committee (PTC) for a +given slot. To check for PTC assignments the validator uses the helper +`get_ptc_assignment(state, epoch, validator_index)` where `epoch <= next_epoch`. -PTC committee selection is only stable within the context of the current and next epoch. +PTC committee selection is only stable within the context of the current and +next epoch. ```python def get_ptc_assignment( @@ -52,80 +56,131 @@ def get_ptc_assignment( [New in EIP-7732] -`get_ptc_assignment` should be called at the start of each epoch to get the assignment for the next epoch (`current_epoch + 1`). A validator should plan for future assignments by noting their assigned PTC slot. +`get_ptc_assignment` should be called at the start of each epoch to get the +assignment for the next epoch (`current_epoch + 1`). A validator should plan for +future assignments by noting their assigned PTC slot. ## Beacon chain responsibilities All validator responsibilities remain unchanged other than the following: -- Proposers are no longer required to broadcast `BlobSidecar` objects, as this becomes a builder's duty. -- Some validators are selected per slot to become PTC members, these validators must broadcast `PayloadAttestationMessage` objects during the assigned slot before the deadline of `3 * SECONDS_PER_SLOT // INTERVALS_PER_SLOT` seconds into the slot. +- Proposers are no longer required to broadcast `BlobSidecar` objects, as this + becomes a builder's duty. +- Some validators are selected per slot to become PTC members, these validators + must broadcast `PayloadAttestationMessage` objects during the assigned slot + before the deadline of `3 * SECONDS_PER_SLOT // INTERVALS_PER_SLOT` seconds + into the slot. ### Attestation -Attestation duties are not changed for validators, however the attestation deadline is implicitly changed by the change in `INTERVALS_PER_SLOT`. +Attestation duties are not changed for validators, however the attestation +deadline is implicitly changed by the change in `INTERVALS_PER_SLOT`. ### Sync Committee participations -Sync committee duties are not changed for validators, however the submission deadline is implicitly changed by the change in `INTERVALS_PER_SLOT`. +Sync committee duties are not changed for validators, however the submission +deadline is implicitly changed by the change in `INTERVALS_PER_SLOT`. ### Block proposal -Validators are still expected to propose `SignedBeaconBlock` at the beginning of any slot during which `is_proposer(state, validator_index)` returns `true`. The mechanism to prepare this beacon block and related sidecars differs from previous forks as follows +Validators are still expected to propose `SignedBeaconBlock` at the beginning of +any slot during which `is_proposer(state, validator_index)` returns `true`. The +mechanism to prepare this beacon block and related sidecars differs from +previous forks as follows #### Constructing the new `signed_execution_payload_header` field in `BeaconBlockBody` -To obtain `signed_execution_payload_header`, a block proposer building a block on top of a `state` must take the following actions: +To obtain `signed_execution_payload_header`, a block proposer building a block +on top of a `state` must take the following actions: -- Listen to the `execution_payload_header` gossip global topic and save an accepted `signed_execution_payload_header` from a builder. Proposer MAY obtain these signed messages by other off-protocol means. -- The `signed_execution_payload_header` must satisfy the verification conditions found in `process_execution_payload_header`, that is +- Listen to the `execution_payload_header` gossip global topic and save an + accepted `signed_execution_payload_header` from a builder. Proposer MAY obtain + these signed messages by other off-protocol means. +- The `signed_execution_payload_header` must satisfy the verification conditions + found in `process_execution_payload_header`, that is - The header signature must be valid - The builder balance can cover the header value - The header slot is for the proposal block slot - The header parent block hash equals the state's `latest_block_hash`. - The header parent block root equals the current block's `parent_root`. -- Select one bid and set `body.signed_execution_payload_header = signed_execution_payload_header` +- Select one bid and set + `body.signed_execution_payload_header = signed_execution_payload_header` #### Constructing the new `payload_attestations` field in `BeaconBlockBody` -Up to `MAX_PAYLOAD_ATTESTATIONS`, aggregate payload attestations can be included in the block. The validator will have to +Up to `MAX_PAYLOAD_ATTESTATIONS`, aggregate payload attestations can be included +in the block. The validator will have to - Listen to the `payload_attestation_message` gossip global topic -- The payload attestations added must satisfy the verification conditions found in payload attestation gossip validation and payload attestation processing. This means +- The payload attestations added must satisfy the verification conditions found + in payload attestation gossip validation and payload attestation processing. + This means - The `data.beacon_block_root` corresponds to `block.parent_root`. - The slot of the parent block is exactly one slot before the proposing slot. - The signature of the payload attestation data message verifies correctly. -- The proposer needs to aggregate all payload attestations with the same data into a given `PayloadAttestation` object. For this it needs to fill the `aggregation_bits` field by using the relative position of the validator indices with respect to the PTC that is obtained from `get_ptc(state, block_slot - 1)`. -- The proposer should only include payload attestations that are consistent with the current block they are proposing. That is, if the previous block had a payload, they should only include attestations with `payload_status = PAYLOAD_PRESENT`. Proposers are penalized for attestations that are not-consistent with their view. +- The proposer needs to aggregate all payload attestations with the same data + into a given `PayloadAttestation` object. For this it needs to fill the + `aggregation_bits` field by using the relative position of the validator + indices with respect to the PTC that is obtained from + `get_ptc(state, block_slot - 1)`. +- The proposer should only include payload attestations that are consistent with + the current block they are proposing. That is, if the previous block had a + payload, they should only include attestations with + `payload_status = PAYLOAD_PRESENT`. Proposers are penalized for attestations + that are not-consistent with their view. #### Blob sidecars -The blob sidecars are no longer broadcast by the validator, and thus their construction is not necessary. This deprecates the corresponding sections from the honest validator guide in the Electra fork, moving them, albeit with some modifications, to the [honest Builder guide](./builder.md) +The blob sidecars are no longer broadcast by the validator, and thus their +construction is not necessary. This deprecates the corresponding sections from +the honest validator guide in the Electra fork, moving them, albeit with some +modifications, to the [honest Builder guide](./builder.md) ### Payload timeliness attestation -Some validators are selected to submit payload timeliness attestations. Validators should call `get_ptc_assignment` at the beginning of an epoch to be prepared to submit their PTC attestations during the next epoch. +Some validators are selected to submit payload timeliness attestations. +Validators should call `get_ptc_assignment` at the beginning of an epoch to be +prepared to submit their PTC attestations during the next epoch. -A validator should create and broadcast the `payload_attestation_message` to the global execution attestation subnet not after `SECONDS_PER_SLOT * 3 / INTERVALS_PER_SLOT` seconds since the start of `slot` +A validator should create and broadcast the `payload_attestation_message` to the +global execution attestation subnet not after +`SECONDS_PER_SLOT * 3 / INTERVALS_PER_SLOT` seconds since the start of `slot` #### Constructing a payload attestation -If a validator is in the payload attestation committee for the current slot (as obtained from `get_ptc_assignment` above) then the validator should prepare a `PayloadAttestationMessage` for the current slot, -according to the logic in `get_payload_attestation_message` below and broadcast it not after `SECONDS_PER_SLOT * 3 / INTERVALS_PER_SLOT` seconds since the start of the slot, to the global `payload_attestation_message` pubsub topic. +If a validator is in the payload attestation committee for the current slot (as +obtained from `get_ptc_assignment` above) then the validator should prepare a +`PayloadAttestationMessage` for the current slot, according to the logic in +`get_payload_attestation_message` below and broadcast it not after +`SECONDS_PER_SLOT * 3 / INTERVALS_PER_SLOT` seconds since the start of the slot, +to the global `payload_attestation_message` pubsub topic. The validator creates `payload_attestation_message` as follows: -- If the validator has not seen any beacon block for the assigned slot, do not submit a payload attestation. It will be ignored anyway. -- Set `data.beacon_block_root` be the HTR of the beacon block seen for the assigned slot +- If the validator has not seen any beacon block for the assigned slot, do not + submit a payload attestation. It will be ignored anyway. +- Set `data.beacon_block_root` be the HTR of the beacon block seen for the + assigned slot - Set `data.slot` to be the assigned slot. - Set `data.payload_status` as follows - - If a `SignedExecutionPayloadEnvelope` has been seen referencing the block `data.beacon_block_root` and the envelope has `payload_withheld = False`, set to `PAYLOAD_PRESENT`. - - If a `SignedExecutionPayloadEnvelope` has been seen referencing the block `data.beacon_block_root` and the envelope has `payload_withheld = True`, set to `PAYLOAD_WITHHELD`. - - If no `SignedExecutionPayloadEnvelope` has been seen referencing the block `data.beacon_block_root` set to `PAYLOAD_ABSENT`. -- Set `payload_attestation_message.validator_index = validator_index` where `validator_index` is the validator chosen to submit. The private key mapping to `state.validators[validator_index].pubkey` is used to sign the payload timeliness attestation. -- Sign the `payload_attestation_message.data` using the helper `get_payload_attestation_message_signature`. - -Notice that the attester only signs the `PayloadAttestationData` and not the `validator_index` field in the message. Proposers need to aggregate these attestations as described above. + - If a `SignedExecutionPayloadEnvelope` has been seen referencing the block + `data.beacon_block_root` and the envelope has `payload_withheld = False`, + set to `PAYLOAD_PRESENT`. + - If a `SignedExecutionPayloadEnvelope` has been seen referencing the block + `data.beacon_block_root` and the envelope has `payload_withheld = True`, set + to `PAYLOAD_WITHHELD`. + - If no `SignedExecutionPayloadEnvelope` has been seen referencing the block + `data.beacon_block_root` set to `PAYLOAD_ABSENT`. +- Set `payload_attestation_message.validator_index = validator_index` where + `validator_index` is the validator chosen to submit. The private key mapping + to `state.validators[validator_index].pubkey` is used to sign the payload + timeliness attestation. +- Sign the `payload_attestation_message.data` using the helper + `get_payload_attestation_message_signature`. + +Notice that the attester only signs the `PayloadAttestationData` and not the +`validator_index` field in the message. Proposers need to aggregate these +attestations as described above. ```python def get_payload_attestation_message_signature( @@ -135,4 +190,7 @@ def get_payload_attestation_message_signature( return bls.Sign(privkey, signing_root) ``` -**Remark** Validators do not need to check the full validity of the `ExecutionPayload` contained in within the envelope, but the checks in the [P2P guide](./p2p-interface.md) should pass for the `SignedExecutionPayloadEnvelope`. +**Remark** Validators do not need to check the full validity of the +`ExecutionPayload` contained in within the envelope, but the checks in the +[P2P guide](./p2p-interface.md) should pass for the +`SignedExecutionPayloadEnvelope`. diff --git a/specs/_features/eip7805/beacon-chain.md b/specs/_features/eip7805/beacon-chain.md index fced49767f..2bfa3b244a 100644 --- a/specs/_features/eip7805/beacon-chain.md +++ b/specs/_features/eip7805/beacon-chain.md @@ -29,11 +29,14 @@ ## Introduction -This is the beacon chain specification to add EIP-7805 / fork-choice enforced, committee-based inclusion list (FOCIL) mechanism to allow forced transaction inclusion. Refers to the following posts: +This is the beacon chain specification to add EIP-7805 / fork-choice enforced, +committee-based inclusion list (FOCIL) mechanism to allow forced transaction +inclusion. Refers to the following posts: - [Fork-Choice enforced Inclusion Lists (FOCIL): A simple committee-based inclusion list proposal](https://ethresear.ch/t/fork-choice-enforced-inclusion-lists-focil-a-simple-committee-based-inclusion-list-proposal/19870/1) - [FOCIL CL & EL workflow](https://ethresear.ch/t/focil-cl-el-workflow/20526) - *Note*: This specification is built upon [Electra](../../electra/beacon-chain.md) and is under active development. + *Note*: This specification is built upon + [Electra](../../electra/beacon-chain.md) and is under active development. ## Constants @@ -132,7 +135,8 @@ class NewPayloadRequest(object): ##### Modified `is_valid_block_hash` -*Note*: The function `is_valid_block_hash` is modified to include the additional `inclusion_list_transactions`. +*Note*: The function `is_valid_block_hash` is modified to include the additional +`inclusion_list_transactions`. ```python def is_valid_block_hash(self: ExecutionEngine, @@ -148,7 +152,8 @@ def is_valid_block_hash(self: ExecutionEngine, ##### Modified `notify_new_payload` -*Note*: The function `notify_new_payload` is modified to include the additional `inclusion_list_transactions`. +*Note*: The function `notify_new_payload` is modified to include the additional +`inclusion_list_transactions`. ```python def notify_new_payload(self: ExecutionEngine, @@ -168,8 +173,9 @@ def notify_new_payload(self: ExecutionEngine, ##### Modified `verify_and_notify_new_payload` -*Note*: The function `verify_and_notify_new_payload` is modified to pass the additional parameter -`inclusion_list_transactions` when calling `notify_new_payload` in EIP-7805. +*Note*: The function `verify_and_notify_new_payload` is modified to pass the +additional parameter `inclusion_list_transactions` when calling +`notify_new_payload` in EIP-7805. ```python def verify_and_notify_new_payload(self: ExecutionEngine, diff --git a/specs/_features/eip7805/fork-choice.md b/specs/_features/eip7805/fork-choice.md index 09f1870b12..951c04c8c9 100644 --- a/specs/_features/eip7805/fork-choice.md +++ b/specs/_features/eip7805/fork-choice.md @@ -8,6 +8,7 @@ - [Fork choice](#fork-choice) - [Helpers](#helpers) - [Modified `Store`](#modified-store) + - [Modified `get_forkchoice_store`](#modified-get_forkchoice_store) - [New `validate_inclusion_lists`](#new-validate_inclusion_lists) - [New `get_attester_head`](#new-get_attester_head) - [Modified `get_proposer_head`](#modified-get_proposer_head) @@ -33,7 +34,8 @@ This is the modification of the fork choice accompanying the EIP-7805 upgrade. #### Modified `Store` -*Note*: `Store` is modified to track the seen inclusion lists and inclusion list equivocators. +*Note*: `Store` is modified to track the seen inclusion lists and inclusion list +equivocators. ```python @dataclass @@ -58,6 +60,34 @@ class Store(object): unsatisfied_inclusion_list_blocks: Set[Root] = field(default_factory=Set) ``` +### Modified `get_forkchoice_store` + +```python +def get_forkchoice_store(anchor_state: BeaconState, anchor_block: BeaconBlock) -> Store: + assert anchor_block.state_root == hash_tree_root(anchor_state) + anchor_root = hash_tree_root(anchor_block) + anchor_epoch = get_current_epoch(anchor_state) + justified_checkpoint = Checkpoint(epoch=anchor_epoch, root=anchor_root) + finalized_checkpoint = Checkpoint(epoch=anchor_epoch, root=anchor_root) + proposer_boost_root = Root() + return Store( + time=uint64(anchor_state.genesis_time + SECONDS_PER_SLOT * anchor_state.slot), + genesis_time=anchor_state.genesis_time, + justified_checkpoint=justified_checkpoint, + finalized_checkpoint=finalized_checkpoint, + unrealized_justified_checkpoint=justified_checkpoint, + unrealized_finalized_checkpoint=finalized_checkpoint, + proposer_boost_root=proposer_boost_root, + equivocating_indices=set(), + blocks={anchor_root: copy(anchor_block)}, + block_states={anchor_root: copy(anchor_state)}, + checkpoint_states={justified_checkpoint: copy(anchor_state)}, + unrealized_justifications={anchor_root: justified_checkpoint}, + # [New in EIP-7805] + unsatisfied_inclusion_list_blocks=set(), + ) +``` + #### New `validate_inclusion_lists` ```python @@ -93,7 +123,8 @@ def get_attester_head(store: Store, head_root: Root) -> Root: ##### Modified `get_proposer_head` -The implementation of `get_proposer_head` is modified to also account for `store.unsatisfied_inclusion_list_blocks`. +The implementation of `get_proposer_head` is modified to also account for +`store.unsatisfied_inclusion_list_blocks`. ```python def get_proposer_head(store: Store, head_root: Root, slot: Slot) -> Root: @@ -142,7 +173,8 @@ def get_proposer_head(store: Store, head_root: Root, slot: Slot) -> Root: #### New `on_inclusion_list` -`on_inclusion_list` is called to import `signed_inclusion_list` to the fork choice store. +`on_inclusion_list` is called to import `signed_inclusion_list` to the fork +choice store. ```python def on_inclusion_list( diff --git a/specs/_features/eip7805/fork.md b/specs/_features/eip7805/fork.md index 894a313f06..874c9a9402 100644 --- a/specs/_features/eip7805/fork.md +++ b/specs/_features/eip7805/fork.md @@ -57,8 +57,9 @@ def compute_fork_version(epoch: Epoch) -> Version: ### Upgrading the state -If `state.slot % SLOTS_PER_EPOCH == 0` and `compute_epoch_at_slot(state.slot) == EIP7805_FORK_EPOCH`, -an irregular state change is made to upgrade to EIP-7805. +If `state.slot % SLOTS_PER_EPOCH == 0` and +`compute_epoch_at_slot(state.slot) == EIP7805_FORK_EPOCH`, an irregular state +change is made to upgrade to EIP-7805. ```python def upgrade_to_eip7805(pre: electra.BeaconState) -> BeaconState: diff --git a/specs/_features/eip7805/p2p-interface.md b/specs/_features/eip7805/p2p-interface.md index 2e196fa63a..19c274419f 100644 --- a/specs/_features/eip7805/p2p-interface.md +++ b/specs/_features/eip7805/p2p-interface.md @@ -1,6 +1,7 @@ # EIP-7805 -- Networking -This document contains the consensus-layer networking specification for EIP-7805. +This document contains the consensus-layer networking specification for +EIP-7805. @@ -33,7 +34,8 @@ This document contains the consensus-layer networking specification for EIP-7805 #### Topics and messages -The new topics along with the type of the `data` field of a gossipsub message are given in this table: +The new topics along with the type of the `data` field of a gossipsub message +are given in this table: | Name | Message Type | | ---------------- | --------------------- | @@ -46,15 +48,26 @@ EIP-7805 introduces a new global topic for inclusion lists. ###### `inclusion_list` This topic is used to propagate signed inclusion list as `SignedInclusionList`. -The following validations MUST pass before forwarding the `inclusion_list` on the network, assuming the alias `message = signed_inclusion_list.message`: +The following validations MUST pass before forwarding the `inclusion_list` on +the network, assuming the alias `message = signed_inclusion_list.message`: -- _[REJECT]_ The size of `message.transactions` is within upperbound `MAX_BYTES_PER_INCLUSION_LIST`. +- _[REJECT]_ The size of `message.transactions` is within upperbound + `MAX_BYTES_PER_INCLUSION_LIST`. - _[REJECT]_ The slot `message.slot` is equal to the previous or current slot. -- _[IGNORE]_ The slot `message.slot` is equal to the current slot, or it is equal to the previous slot and the current time is less than `ATTESTATION_DEADLINE` seconds into the slot. -- _[IGNORE]_ The `inclusion_list_committee` for slot `message.slot` on the current branch corresponds to `message.inclusion_list_committee_root`, as determined by `hash_tree_root(inclusion_list_committee) == message.inclusion_list_committee_root`. -- _[REJECT]_ The validator index `message.validator_index` is within the `inclusion_list_committee` corresponding to `message.inclusion_list_committee_root`. -- _[IGNORE]_ The `message` is either the first or second valid message received from the validator with index `message.validator_index`. -- _[REJECT]_ The signature of `inclusion_list.signature` is valid with respect to the validator index. +- _[IGNORE]_ The slot `message.slot` is equal to the current slot, or it is + equal to the previous slot and the current time is less than + `ATTESTATION_DEADLINE` seconds into the slot. +- _[IGNORE]_ The `inclusion_list_committee` for slot `message.slot` on the + current branch corresponds to `message.inclusion_list_committee_root`, as + determined by + `hash_tree_root(inclusion_list_committee) == message.inclusion_list_committee_root`. +- _[REJECT]_ The validator index `message.validator_index` is within the + `inclusion_list_committee` corresponding to + `message.inclusion_list_committee_root`. +- _[IGNORE]_ The `message` is either the first or second valid message received + from the validator with index `message.validator_index`. +- _[REJECT]_ The signature of `inclusion_list.signature` is valid with respect + to the validator index. ### The Req/Resp domain @@ -64,7 +77,8 @@ The following validations MUST pass before forwarding the `inclusion_list` on th **Protocol ID:** `/eth2/beacon_chain/req/inclusion_list_by_committee_indices/1/` -The `` field is calculated as `context = compute_fork_digest(fork_version, genesis_validators_root)`: +The `` field is calculated as +`context = compute_fork_digest(fork_version, genesis_validators_root)`: diff --git a/specs/_features/eip7805/validator.md b/specs/_features/eip7805/validator.md index 6c8fe2c5d1..3718933725 100644 --- a/specs/_features/eip7805/validator.md +++ b/specs/_features/eip7805/validator.md @@ -24,15 +24,20 @@ ## Introduction -This document represents the changes to be made in the code of an "honest validator" to implement EIP-7805. +This document represents the changes to be made in the code of an "honest +validator" to implement EIP-7805. ## Prerequisites -This document is an extension of the [Electra -- Honest Validator](../../electra/validator.md) guide. -All behaviors and definitions defined in this document, and documents it extends, carry over unless explicitly noted or overridden. +This document is an extension of the +[Electra -- Honest Validator](../../electra/validator.md) guide. All behaviors +and definitions defined in this document, and documents it extends, carry over +unless explicitly noted or overridden. -All terminology, constants, functions, and protocol mechanics defined in the updated Beacon Chain doc of [EIP-7805](./beacon-chain.md) are requisite for this document and used throughout. -Please see related Beacon Chain doc before continuing and use them as a reference throughout. +All terminology, constants, functions, and protocol mechanics defined in the +updated Beacon Chain doc of [EIP-7805](./beacon-chain.md) are requisite for this +document and used throughout. Please see related Beacon Chain doc before +continuing and use them as a reference throughout. ## Configuration @@ -46,15 +51,21 @@ Please see related Beacon Chain doc before continuing and use them as a referenc ### `ExecutionEngine` -*Note*: `engine_getInclusionListV1` and `engine_updateBlockWithInclusionListV1` functions are added to the `ExecutionEngine` protocol for use as a validator. +*Note*: `engine_getInclusionListV1` and `engine_updateBlockWithInclusionListV1` +functions are added to the `ExecutionEngine` protocol for use as a validator. -The body of these function is implementation dependent. The Engine API may be used to implement it with an external execution engine. +The body of these function is implementation dependent. The Engine API may be +used to implement it with an external execution engine. ## New inclusion list committee assignment -A validator may be a member of the new Inclusion List Committee (ILC) for a given slot. To check for ILC assignments the validator uses the helper `get_inclusion_committee_assignment(state, epoch, validator_index)` where `epoch <= next_epoch`. +A validator may be a member of the new Inclusion List Committee (ILC) for a +given slot. To check for ILC assignments the validator uses the helper +`get_inclusion_committee_assignment(state, epoch, validator_index)` where +`epoch <= next_epoch`. -Inclusion list committee selection is only stable within the context of the current and next epoch. +Inclusion list committee selection is only stable within the context of the +current and next epoch. ```python def get_inclusion_committee_assignment( @@ -77,34 +88,50 @@ def get_inclusion_committee_assignment( ### Lookahead -`get_inclusion_committee_assignment` should be called at the start of each epoch to get the assignment for the next epoch (`current_epoch + 1`). A validator should plan for future assignments by noting their assigned ILC slot. +`get_inclusion_committee_assignment` should be called at the start of each epoch +to get the assignment for the next epoch (`current_epoch + 1`). A validator +should plan for future assignments by noting their assigned ILC slot. ## New proposer duty ### Block proposal -Proposers are still expected to propose `SignedBeaconBlock` at the beginning of any slot during which `is_proposer(state, validator_index)` returns true. The mechanism to prepare this beacon block and related sidecars differs from previous forks as follows: +Proposers are still expected to propose `SignedBeaconBlock` at the beginning of +any slot during which `is_proposer(state, validator_index)` returns true. The +mechanism to prepare this beacon block and related sidecars differs from +previous forks as follows: #### Update execution client with inclusion lists -The proposer should call `engine_updateInclusionListV1` at `PROPOSER_INCLUSION_LIST_CUT_OFF` into the slot with the list of the inclusion lists that gathered up to `PROPOSER_INCLUSION_LIST_CUT_OFF`. +The proposer should call `engine_updateInclusionListV1` at +`PROPOSER_INCLUSION_LIST_CUT_OFF` into the slot with the list of the inclusion +lists that gathered up to `PROPOSER_INCLUSION_LIST_CUT_OFF`. ## New inclusion list committee duty -Some validators are selected to submit signed inclusion list. Validators should call `get_inclusion_committee_assignment` at the beginning of an epoch to be prepared to submit their inclusion list during the next epoch. +Some validators are selected to submit signed inclusion list. Validators should +call `get_inclusion_committee_assignment` at the beginning of an epoch to be +prepared to submit their inclusion list during the next epoch. -A validator should create and broadcast the `signed_inclusion_list` to the global `inclusion_list` subnet by `PROPOSER_INCLUSION_LIST_CUT_OFF` seconds into the slot, unless a block for the current slot has been processed and is the head of the chain and broadcast to the network. +A validator should create and broadcast the `signed_inclusion_list` to the +global `inclusion_list` subnet by `PROPOSER_INCLUSION_LIST_CUT_OFF` seconds into +the slot, unless a block for the current slot has been processed and is the head +of the chain and broadcast to the network. #### Constructing a signed inclusion list The validator creates the `signed_inclusion_list` as follows: - First, the validator creates the `inclusion_list`. -- Set `inclusion_list.slot` to the assigned slot returned by `get_inclusion_committee_assignment`. +- Set `inclusion_list.slot` to the assigned slot returned by + `get_inclusion_committee_assignment`. - Set `inclusion_list.validator_index` to the validator's index. -- Set `inclusion_list.inclusion_list_committee_root` to the hash tree root of the committee that the validator is a member of. -- Set `inclusion_list.transactions` using the response from `engine_getInclusionListV1` from the execution layer client. -- Sign the `inclusion_list` using the helper `get_inclusion_list_signature` and obtain the `signature`. +- Set `inclusion_list.inclusion_list_committee_root` to the hash tree root of + the committee that the validator is a member of. +- Set `inclusion_list.transactions` using the response from + `engine_getInclusionListV1` from the execution layer client. +- Sign the `inclusion_list` using the helper `get_inclusion_list_signature` and + obtain the `signature`. - Set `signed_inclusion_list.message` to `inclusion_list`. - Set `signed_inclusion_list.signature` to `signature`. diff --git a/specs/altair/beacon-chain.md b/specs/altair/beacon-chain.md index 4edfbeb763..31db21b316 100644 --- a/specs/altair/beacon-chain.md +++ b/specs/altair/beacon-chain.md @@ -108,9 +108,11 @@ Altair is the first beacon chain hard fork. Its main features are: ### Rewards and penalties -This patch updates a few configuration values to move penalty parameters closer to their final, maximum security values. +This patch updates a few configuration values to move penalty parameters closer +to their final, maximum security values. -*Note*: The spec does *not* override previous configuration values but instead creates new values and replaces usage throughout. +*Note*: The spec does *not* override previous configuration values but instead +creates new values and replaces usage throughout. | Name | Value | | ----------------------------------------- | ---------------------------------- | @@ -216,9 +218,11 @@ class SyncCommittee(Container): ### Crypto -Refer to the definitions in the [phase 0 document regarding BLS signatures](../phase0/beacon-chain.md#bls-signatures) -and the extensions defined in the [Altair BLS document](./bls.md). This specification assumes knowledge of -the functionality described in those documents. +Refer to the definitions in the +[phase 0 document regarding BLS signatures](../phase0/beacon-chain.md#bls-signatures) +and the extensions defined in the [Altair BLS document](./bls.md). This +specification assumes knowledge of the functionality described in those +documents. ### Misc @@ -291,7 +295,9 @@ def get_next_sync_committee_indices(state: BeaconState) -> Sequence[ValidatorInd #### `get_next_sync_committee` -*Note*: The function `get_next_sync_committee` should only be called at sync committee period boundaries and when [upgrading state to Altair](./fork.md#upgrading-the-state). +*Note*: The function `get_next_sync_committee` should only be called at sync +committee period boundaries and when +[upgrading state to Altair](./fork.md#upgrading-the-state). ```python def get_next_sync_committee(state: BeaconState) -> SyncCommittee: @@ -313,9 +319,11 @@ def get_base_reward_per_increment(state: BeaconState) -> Gwei: #### `get_base_reward` -*Note*: The function `get_base_reward` is modified with the removal of `BASE_REWARDS_PER_EPOCH` and the use of increment based accounting. +*Note*: The function `get_base_reward` is modified with the removal of +`BASE_REWARDS_PER_EPOCH` and the use of increment based accounting. -*Note*: On average an optimally performing validator earns one base reward per epoch. +*Note*: On average an optimally performing validator earns one base reward per +epoch. ```python def get_base_reward(state: BeaconState, index: ValidatorIndex) -> Gwei: @@ -423,8 +431,9 @@ def get_inactivity_penalty_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], S #### Modified `slash_validator` -*Note*: The function `slash_validator` is modified to use `MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR` -and use `PROPOSER_WEIGHT` when calculating the proposer reward. +*Note*: The function `slash_validator` is modified to use +`MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR` and use `PROPOSER_WEIGHT` when +calculating the proposer reward. ```python def slash_validator(state: BeaconState, @@ -464,7 +473,8 @@ def process_block(state: BeaconState, block: BeaconBlock) -> None: #### Modified `process_attestation` -*Note*: The function `process_attestation` is modified to do incentive accounting with epoch participation flags. +*Note*: The function `process_attestation` is modified to do incentive +accounting with epoch participation flags. ```python def process_attestation(state: BeaconState, attestation: Attestation) -> None: @@ -504,7 +514,9 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: #### Modified `add_validator_to_registry` -*Note*: The function `add_validator_to_registry` is modified to initialize `inactivity_scores`, `previous_epoch_participation`, and `current_epoch_participation`. +*Note*: The function `add_validator_to_registry` is modified to initialize +`inactivity_scores`, `previous_epoch_participation`, and +`current_epoch_participation`. ```python def add_validator_to_registry(state: BeaconState, @@ -573,7 +585,8 @@ def process_epoch(state: BeaconState) -> None: #### Justification and finalization -*Note*: The function `process_justification_and_finalization` is modified to adapt to the new participation records. +*Note*: The function `process_justification_and_finalization` is modified to +adapt to the new participation records. ```python def process_justification_and_finalization(state: BeaconState) -> None: @@ -612,7 +625,8 @@ def process_inactivity_updates(state: BeaconState) -> None: #### Rewards and penalties -*Note*: The function `process_rewards_and_penalties` is modified to support the incentive accounting reforms. +*Note*: The function `process_rewards_and_penalties` is modified to support the +incentive accounting reforms. ```python def process_rewards_and_penalties(state: BeaconState) -> None: @@ -630,7 +644,8 @@ def process_rewards_and_penalties(state: BeaconState) -> None: #### Slashings -*Note*: The function `process_slashings` is modified to use `PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR`. +*Note*: The function `process_slashings` is modified to use +`PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR`. ```python def process_slashings(state: BeaconState) -> None: diff --git a/specs/altair/bls.md b/specs/altair/bls.md index 3abc26056b..c677cef23d 100644 --- a/specs/altair/bls.md +++ b/specs/altair/bls.md @@ -12,9 +12,11 @@ ## Introduction -A number of extensions are defined to handle BLS signatures in the Altair upgrade. +A number of extensions are defined to handle BLS signatures in the Altair +upgrade. -Knowledge of the [phase 0 specification](../phase0/beacon-chain.md) is assumed, including type definitions. +Knowledge of the [phase 0 specification](../phase0/beacon-chain.md) is assumed, +including type definitions. ## Constants diff --git a/specs/altair/fork.md b/specs/altair/fork.md index 6781788fbe..4da17083f8 100644 --- a/specs/altair/fork.md +++ b/specs/altair/fork.md @@ -15,7 +15,8 @@ ## Introduction -This document describes the process of the first upgrade of the beacon chain: the Altair hard fork, introducing light client support and other improvements. +This document describes the process of the first upgrade of the beacon chain: +the Altair hard fork, introducing light client support and other improvements. ## Configuration @@ -46,15 +47,24 @@ def compute_fork_version(epoch: Epoch) -> Version: The fork is triggered at epoch `ALTAIR_FORK_EPOCH`. -Note that for the pure Altair networks, we don't apply `upgrade_to_altair` since it starts with Altair version logic. +Note that for the pure Altair networks, we don't apply `upgrade_to_altair` since +it starts with Altair version logic. ### Upgrading the state -If `state.slot % SLOTS_PER_EPOCH == 0` and `compute_epoch_at_slot(state.slot) == ALTAIR_FORK_EPOCH`, an irregular state change is made to upgrade to Altair. - -The upgrade occurs after the completion of the inner loop of `process_slots` that sets `state.slot` equal to `ALTAIR_FORK_EPOCH * SLOTS_PER_EPOCH`. -Care must be taken when transitioning through the fork boundary as implementations will need a modified [state transition function](../phase0/beacon-chain.md#beacon-chain-state-transition-function) that deviates from the Phase 0 document. -In particular, the outer `state_transition` function defined in the Phase 0 document will not expose the precise fork slot to execute the upgrade in the presence of skipped slots at the fork boundary. Instead the logic must be within `process_slots`. +If `state.slot % SLOTS_PER_EPOCH == 0` and +`compute_epoch_at_slot(state.slot) == ALTAIR_FORK_EPOCH`, an irregular state +change is made to upgrade to Altair. + +The upgrade occurs after the completion of the inner loop of `process_slots` +that sets `state.slot` equal to `ALTAIR_FORK_EPOCH * SLOTS_PER_EPOCH`. Care must +be taken when transitioning through the fork boundary as implementations will +need a modified +[state transition function](../phase0/beacon-chain.md#beacon-chain-state-transition-function) +that deviates from the Phase 0 document. In particular, the outer +`state_transition` function defined in the Phase 0 document will not expose the +precise fork slot to execute the upgrade in the presence of skipped slots at the +fork boundary. Instead the logic must be within `process_slots`. ```python def translate_participation(state: BeaconState, pending_attestations: Sequence[phase0.PendingAttestation]) -> None: diff --git a/specs/altair/light-client/full-node.md b/specs/altair/light-client/full-node.md index c5cb40fd39..cb4f716d40 100644 --- a/specs/altair/light-client/full-node.md +++ b/specs/altair/light-client/full-node.md @@ -16,13 +16,16 @@ ## Introduction -This document provides helper functions to enable full nodes to serve light client data. Full nodes SHOULD implement the described functionality to enable light clients to sync with the network. +This document provides helper functions to enable full nodes to serve light +client data. Full nodes SHOULD implement the described functionality to enable +light clients to sync with the network. ## Helper functions ### `compute_merkle_proof` -This function return the Merkle proof of the given SSZ object `object` at generalized index `index`. +This function return the Merkle proof of the given SSZ object `object` at +generalized index `index`. ```python def compute_merkle_proof(object: SSZObject, @@ -47,7 +50,8 @@ def block_to_light_client_header(block: SignedBeaconBlock) -> LightClientHeader: ## Deriving light client data -Full nodes are expected to derive light client data from historic blocks and states and provide it to other clients. +Full nodes are expected to derive light client data from historic blocks and +states and provide it to other clients. ### `create_light_client_bootstrap` @@ -74,21 +78,32 @@ def create_light_client_bootstrap(state: BeaconState, ) ``` -Full nodes SHOULD provide `LightClientBootstrap` for all finalized epoch boundary blocks in the epoch range `[max(ALTAIR_FORK_EPOCH, current_epoch - MIN_EPOCHS_FOR_BLOCK_REQUESTS), current_epoch]` where `current_epoch` is defined by the current wall-clock time. Full nodes MAY also provide `LightClientBootstrap` for other blocks. +Full nodes SHOULD provide `LightClientBootstrap` for all finalized epoch +boundary blocks in the epoch range +`[max(ALTAIR_FORK_EPOCH, current_epoch - MIN_EPOCHS_FOR_BLOCK_REQUESTS), current_epoch]` +where `current_epoch` is defined by the current wall-clock time. Full nodes MAY +also provide `LightClientBootstrap` for other blocks. -Blocks are considered to be epoch boundary blocks if their block root can occur as part of a valid `Checkpoint`, i.e., if their slot is the initial slot of an epoch, or if all following slots through the initial slot of the next epoch are empty (no block proposed / orphaned). +Blocks are considered to be epoch boundary blocks if their block root can occur +as part of a valid `Checkpoint`, i.e., if their slot is the initial slot of an +epoch, or if all following slots through the initial slot of the next epoch are +empty (no block proposed / orphaned). -`LightClientBootstrap` is computed from the block's immediate post state (without applying empty slots). +`LightClientBootstrap` is computed from the block's immediate post state +(without applying empty slots). ### `create_light_client_update` -To form a `LightClientUpdate`, the following historical states and blocks are needed: +To form a `LightClientUpdate`, the following historical states and blocks are +needed: - `state`: the post state of any block with a post-Altair parent block - `block`: the corresponding block - `attested_state`: the post state of `attested_block` - `attested_block`: the block referred to by `block.parent_root` -- `finalized_block`: the block referred to by `attested_state.finalized_checkpoint.root`, if locally available (may be unavailable, e.g., when using checkpoint sync, or if it was pruned locally) +- `finalized_block`: the block referred to by + `attested_state.finalized_checkpoint.root`, if locally available (may be + unavailable, e.g., when using checkpoint sync, or if it was pruned locally) ```python def create_light_client_update(state: BeaconState, @@ -137,11 +152,20 @@ def create_light_client_update(state: BeaconState, return update ``` -Full nodes SHOULD provide the best derivable `LightClientUpdate` (according to `is_better_update`) for each sync committee period covering any epochs in range `[max(ALTAIR_FORK_EPOCH, current_epoch - MIN_EPOCHS_FOR_BLOCK_REQUESTS), current_epoch]` where `current_epoch` is defined by the current wall-clock time. Full nodes MAY also provide `LightClientUpdate` for other sync committee periods. - -- `LightClientUpdate` are assigned to sync committee periods based on their `attested_header.beacon.slot` -- `LightClientUpdate` are only considered if `compute_sync_committee_period_at_slot(update.attested_header.beacon.slot) == compute_sync_committee_period_at_slot(update.signature_slot)` -- Only `LightClientUpdate` with `sync_aggregate` from blocks on the canonical chain as selected by fork choice are considered, regardless of ranking by `is_better_update`. `LightClientUpdate` referring to orphaned blocks SHOULD NOT be provided. +Full nodes SHOULD provide the best derivable `LightClientUpdate` (according to +`is_better_update`) for each sync committee period covering any epochs in range +`[max(ALTAIR_FORK_EPOCH, current_epoch - MIN_EPOCHS_FOR_BLOCK_REQUESTS), current_epoch]` +where `current_epoch` is defined by the current wall-clock time. Full nodes MAY +also provide `LightClientUpdate` for other sync committee periods. + +- `LightClientUpdate` are assigned to sync committee periods based on their + `attested_header.beacon.slot` +- `LightClientUpdate` are only considered if + `compute_sync_committee_period_at_slot(update.attested_header.beacon.slot) == compute_sync_committee_period_at_slot(update.signature_slot)` +- Only `LightClientUpdate` with `sync_aggregate` from blocks on the canonical + chain as selected by fork choice are considered, regardless of ranking by + `is_better_update`. `LightClientUpdate` referring to orphaned blocks SHOULD + NOT be provided. ### `create_light_client_finality_update` @@ -156,7 +180,13 @@ def create_light_client_finality_update(update: LightClientUpdate) -> LightClien ) ``` -Full nodes SHOULD provide the `LightClientFinalityUpdate` with the highest `attested_header.beacon.slot` (if multiple, highest `signature_slot`) as selected by fork choice, and SHOULD support a push mechanism to deliver new `LightClientFinalityUpdate` whenever `finalized_header` changes. If that `LightClientFinalityUpdate` does not have supermajority (> 2/3) sync committee participation, a second `LightClientFinalityUpdate` SHOULD be delivered for the same `finalized_header` once supermajority participation is obtained. +Full nodes SHOULD provide the `LightClientFinalityUpdate` with the highest +`attested_header.beacon.slot` (if multiple, highest `signature_slot`) as +selected by fork choice, and SHOULD support a push mechanism to deliver new +`LightClientFinalityUpdate` whenever `finalized_header` changes. If that +`LightClientFinalityUpdate` does not have supermajority (> 2/3) sync committee +participation, a second `LightClientFinalityUpdate` SHOULD be delivered for the +same `finalized_header` once supermajority participation is obtained. ### `create_light_client_optimistic_update` @@ -169,4 +199,7 @@ def create_light_client_optimistic_update(update: LightClientUpdate) -> LightCli ) ``` -Full nodes SHOULD provide the `LightClientOptimisticUpdate` with the highest `attested_header.beacon.slot` (if multiple, highest `signature_slot`) as selected by fork choice, and SHOULD support a push mechanism to deliver new `LightClientOptimisticUpdate` whenever `attested_header` changes. +Full nodes SHOULD provide the `LightClientOptimisticUpdate` with the highest +`attested_header.beacon.slot` (if multiple, highest `signature_slot`) as +selected by fork choice, and SHOULD support a push mechanism to deliver new +`LightClientOptimisticUpdate` whenever `attested_header` changes. diff --git a/specs/altair/light-client/light-client.md b/specs/altair/light-client/light-client.md index d2f83773d6..9db58c7594 100644 --- a/specs/altair/light-client/light-client.md +++ b/specs/altair/light-client/light-client.md @@ -9,15 +9,49 @@ ## Introduction -This document explains how light clients MAY obtain light client data to sync with the network. +This document explains how light clients MAY obtain light client data to sync +with the network. ## Light client sync process -1. The light client MUST be configured out-of-band with a spec/preset (including fork schedule), with `genesis_state` (including `genesis_time` and `genesis_validators_root`), and with a trusted block root. The trusted block SHOULD be within the weak subjectivity period, and its root SHOULD be from a finalized `Checkpoint`. -2. The local clock is initialized based on the configured `genesis_time`, and the current fork digest is determined to browse for and connect to relevant light client data providers. -3. The light client fetches a [`LightClientBootstrap`](./sync-protocol.md#lightclientbootstrap) object for the configured trusted block root. The `bootstrap` object is passed to [`initialize_light_client_store`](./sync-protocol.md#initialize_light_client_store) to obtain a local [`LightClientStore`](./sync-protocol.md#lightclientstore). -4. The light client tracks the sync committee periods `finalized_period` from `store.finalized_header.beacon.slot`, `optimistic_period` from `store.optimistic_header.beacon.slot`, and `current_period` from `current_slot` based on the local clock. - 1. When `finalized_period == optimistic_period` and [`is_next_sync_committee_known`](./sync-protocol.md#is_next_sync_committee_known) indicates `False`, the light client fetches a [`LightClientUpdate`](./sync-protocol.md#lightclientupdate) for `finalized_period`. If `finalized_period == current_period`, this fetch SHOULD be scheduled at a random time before `current_period` advances. - 2. When `finalized_period + 1 < current_period`, the light client fetches a `LightClientUpdate` for each sync committee period in range `[finalized_period + 1, current_period)` (current period excluded) - 3. When `finalized_period + 1 >= current_period`, the light client keeps observing [`LightClientFinalityUpdate`](./sync-protocol.md#lightclientfinalityupdate) and [`LightClientOptimisticUpdate`](./sync-protocol.md#lightclientoptimisticupdate). Received objects are passed to [`process_light_client_finality_update`](./sync-protocol.md#process_light_client_finality_update) and [`process_light_client_optimistic_update`](./sync-protocol.md#process_light_client_optimistic_update). This ensures that `finalized_header` and `optimistic_header` reflect the latest blocks. -5. [`process_light_client_store_force_update`](./sync-protocol.md#process_light_client_store_force_update) MAY be called based on use case dependent heuristics if light client sync appears stuck. If available, falling back to an alternative syncing mechanism to cover the affected sync committee period is preferred. +1. The light client MUST be configured out-of-band with a spec/preset (including + fork schedule), with `genesis_state` (including `genesis_time` and + `genesis_validators_root`), and with a trusted block root. The trusted block + SHOULD be within the weak subjectivity period, and its root SHOULD be from a + finalized `Checkpoint`. +2. The local clock is initialized based on the configured `genesis_time`, and + the current fork digest is determined to browse for and connect to relevant + light client data providers. +3. The light client fetches a + [`LightClientBootstrap`](./sync-protocol.md#lightclientbootstrap) object for + the configured trusted block root. The `bootstrap` object is passed to + [`initialize_light_client_store`](./sync-protocol.md#initialize_light_client_store) + to obtain a local [`LightClientStore`](./sync-protocol.md#lightclientstore). +4. The light client tracks the sync committee periods `finalized_period` from + `store.finalized_header.beacon.slot`, `optimistic_period` from + `store.optimistic_header.beacon.slot`, and `current_period` from + `current_slot` based on the local clock. + 1. When `finalized_period == optimistic_period` and + [`is_next_sync_committee_known`](./sync-protocol.md#is_next_sync_committee_known) + indicates `False`, the light client fetches a + [`LightClientUpdate`](./sync-protocol.md#lightclientupdate) for + `finalized_period`. If `finalized_period == current_period`, this fetch + SHOULD be scheduled at a random time before `current_period` advances. + 2. When `finalized_period + 1 < current_period`, the light client fetches a + `LightClientUpdate` for each sync committee period in range + `[finalized_period + 1, current_period)` (current period excluded) + 3. When `finalized_period + 1 >= current_period`, the light client keeps + observing + [`LightClientFinalityUpdate`](./sync-protocol.md#lightclientfinalityupdate) + and + [`LightClientOptimisticUpdate`](./sync-protocol.md#lightclientoptimisticupdate). + Received objects are passed to + [`process_light_client_finality_update`](./sync-protocol.md#process_light_client_finality_update) + and + [`process_light_client_optimistic_update`](./sync-protocol.md#process_light_client_optimistic_update). + This ensures that `finalized_header` and `optimistic_header` reflect the + latest blocks. +5. [`process_light_client_store_force_update`](./sync-protocol.md#process_light_client_store_force_update) + MAY be called based on use case dependent heuristics if light client sync + appears stuck. If available, falling back to an alternative syncing mechanism + to cover the affected sync committee period is preferred. diff --git a/specs/altair/light-client/p2p-interface.md b/specs/altair/light-client/p2p-interface.md index 460ab2ebea..1ce7e5d358 100644 --- a/specs/altair/light-client/p2p-interface.md +++ b/specs/altair/light-client/p2p-interface.md @@ -24,7 +24,9 @@ ## Networking -This section extends the [networking specification for Altair](../p2p-interface.md) with additional messages, topics and data to the Req-Resp and Gossip domains. +This section extends the +[networking specification for Altair](../p2p-interface.md) with additional +messages, topics and data to the Req-Resp and Gossip domains. ### Configuration @@ -49,25 +51,44 @@ New global topics are added to provide light clients with the latest updates. ###### `light_client_finality_update` -This topic is used to propagate the latest `LightClientFinalityUpdate` to light clients, allowing them to keep track of the latest `finalized_header`. +This topic is used to propagate the latest `LightClientFinalityUpdate` to light +clients, allowing them to keep track of the latest `finalized_header`. -The following validations MUST pass before forwarding the `finality_update` on the network. +The following validations MUST pass before forwarding the `finality_update` on +the network. -- _[IGNORE]_ The `finalized_header.beacon.slot` is greater than that of all previously forwarded `finality_update`s, or it matches the highest previously forwarded slot and also has a `sync_aggregate` indicating supermajority (> 2/3) sync committee participation while the previously forwarded `finality_update` for that slot did not indicate supermajority -- _[IGNORE]_ The `finality_update` is received after the block at `signature_slot` was given enough time to propagate through the network -- i.e. validate that one-third of `finality_update.signature_slot` has transpired (`SECONDS_PER_SLOT / INTERVALS_PER_SLOT` seconds after the start of the slot, with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) +- _[IGNORE]_ The `finalized_header.beacon.slot` is greater than that of all + previously forwarded `finality_update`s, or it matches the highest previously + forwarded slot and also has a `sync_aggregate` indicating supermajority (> + 2/3) sync committee participation while the previously forwarded + `finality_update` for that slot did not indicate supermajority +- _[IGNORE]_ The `finality_update` is received after the block at + `signature_slot` was given enough time to propagate through the network -- + i.e. validate that one-third of `finality_update.signature_slot` has + transpired (`SECONDS_PER_SLOT / INTERVALS_PER_SLOT` seconds after the start of + the slot, with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -For full nodes, the following validations MUST additionally pass before forwarding the `finality_update` on the network. +For full nodes, the following validations MUST additionally pass before +forwarding the `finality_update` on the network. -- _[IGNORE]_ The received `finality_update` matches the locally computed one exactly (as defined in [`create_light_client_finality_update`](./full-node.md#create_light_client_finality_update)) +- _[IGNORE]_ The received `finality_update` matches the locally computed one + exactly (as defined in + [`create_light_client_finality_update`](./full-node.md#create_light_client_finality_update)) -For light clients, the following validations MUST additionally pass before forwarding the `finality_update` on the network. +For light clients, the following validations MUST additionally pass before +forwarding the `finality_update` on the network. -- _[REJECT]_ The `finality_update` is valid -- i.e. validate that `process_light_client_finality_update` does not indicate errors -- _[IGNORE]_ The `finality_update` advances the `finalized_header` of the local `LightClientStore` -- i.e. validate that processing `finality_update` increases `store.finalized_header.beacon.slot` +- _[REJECT]_ The `finality_update` is valid -- i.e. validate that + `process_light_client_finality_update` does not indicate errors +- _[IGNORE]_ The `finality_update` advances the `finalized_header` of the local + `LightClientStore` -- i.e. validate that processing `finality_update` + increases `store.finalized_header.beacon.slot` -Light clients SHOULD call `process_light_client_finality_update` even if the message is ignored. +Light clients SHOULD call `process_light_client_finality_update` even if the +message is ignored. -The gossip `ForkDigestValue` is determined based on `compute_fork_version(compute_epoch_at_slot(finality_update.attested_header.beacon.slot))`. +The gossip `ForkDigestValue` is determined based on +`compute_fork_version(compute_epoch_at_slot(finality_update.attested_header.beacon.slot))`. Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: @@ -80,25 +101,42 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: ###### `light_client_optimistic_update` -This topic is used to propagate the latest `LightClientOptimisticUpdate` to light clients, allowing them to keep track of the latest `optimistic_header`. +This topic is used to propagate the latest `LightClientOptimisticUpdate` to +light clients, allowing them to keep track of the latest `optimistic_header`. -The following validations MUST pass before forwarding the `optimistic_update` on the network. +The following validations MUST pass before forwarding the `optimistic_update` on +the network. -- _[IGNORE]_ The `attested_header.beacon.slot` is greater than that of all previously forwarded `optimistic_update`s -- _[IGNORE]_ The `optimistic_update` is received after the block at `signature_slot` was given enough time to propagate through the network -- i.e. validate that one-third of `optimistic_update.signature_slot` has transpired (`SECONDS_PER_SLOT / INTERVALS_PER_SLOT` seconds after the start of the slot, with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) +- _[IGNORE]_ The `attested_header.beacon.slot` is greater than that of all + previously forwarded `optimistic_update`s +- _[IGNORE]_ The `optimistic_update` is received after the block at + `signature_slot` was given enough time to propagate through the network -- + i.e. validate that one-third of `optimistic_update.signature_slot` has + transpired (`SECONDS_PER_SLOT / INTERVALS_PER_SLOT` seconds after the start of + the slot, with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -For full nodes, the following validations MUST additionally pass before forwarding the `optimistic_update` on the network. +For full nodes, the following validations MUST additionally pass before +forwarding the `optimistic_update` on the network. -- _[IGNORE]_ The received `optimistic_update` matches the locally computed one exactly (as defined in [`create_light_client_optimistic_update`](./full-node.md#create_light_client_optimistic_update)) +- _[IGNORE]_ The received `optimistic_update` matches the locally computed one + exactly (as defined in + [`create_light_client_optimistic_update`](./full-node.md#create_light_client_optimistic_update)) -For light clients, the following validations MUST additionally pass before forwarding the `optimistic_update` on the network. +For light clients, the following validations MUST additionally pass before +forwarding the `optimistic_update` on the network. -- _[REJECT]_ The `optimistic_update` is valid -- i.e. validate that `process_light_client_optimistic_update` does not indicate errors -- _[IGNORE]_ The `optimistic_update` either matches corresponding fields of the most recently forwarded `LightClientFinalityUpdate` (if any), or it advances the `optimistic_header` of the local `LightClientStore` -- i.e. validate that processing `optimistic_update` increases `store.optimistic_header.beacon.slot` +- _[REJECT]_ The `optimistic_update` is valid -- i.e. validate that + `process_light_client_optimistic_update` does not indicate errors +- _[IGNORE]_ The `optimistic_update` either matches corresponding fields of the + most recently forwarded `LightClientFinalityUpdate` (if any), or it advances + the `optimistic_header` of the local `LightClientStore` -- i.e. validate that + processing `optimistic_update` increases `store.optimistic_header.beacon.slot` -Light clients SHOULD call `process_light_client_optimistic_update` even if the message is ignored. +Light clients SHOULD call `process_light_client_optimistic_update` even if the +message is ignored. -The gossip `ForkDigestValue` is determined based on `compute_fork_version(compute_epoch_at_slot(optimistic_update.attested_header.beacon.slot))`. +The gossip `ForkDigestValue` is determined based on +`compute_fork_version(compute_epoch_at_slot(optimistic_update.attested_header.beacon.slot))`. Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: @@ -133,15 +171,21 @@ Response Content: ) ``` -Requests the `LightClientBootstrap` structure corresponding to a given post-Altair beacon block root. +Requests the `LightClientBootstrap` structure corresponding to a given +post-Altair beacon block root. The request MUST be encoded as an SSZ-field. -Peers SHOULD provide results as defined in [`create_light_client_bootstrap`](./full-node.md#create_light_client_bootstrap). To fulfill a request, the requested block and its post state need to be known. +Peers SHOULD provide results as defined in +[`create_light_client_bootstrap`](./full-node.md#create_light_client_bootstrap). +To fulfill a request, the requested block and its post state need to be known. -When a `LightClientBootstrap` instance cannot be produced for a given block root, peers SHOULD respond with error code `3: ResourceUnavailable`. +When a `LightClientBootstrap` instance cannot be produced for a given block +root, peers SHOULD respond with error code `3: ResourceUnavailable`. -A `ForkDigest`-context based on `compute_fork_version(compute_epoch_at_slot(bootstrap.header.beacon.slot))` is used to select the fork namespace of the Response type. +A `ForkDigest`-context based on +`compute_fork_version(compute_epoch_at_slot(bootstrap.header.beacon.slot))` is +used to select the fork namespace of the Response type. Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: @@ -173,15 +217,26 @@ Response Content: ) ``` -Requests the `LightClientUpdate` instances in the sync committee period range `[start_period, start_period + count)`, leading up to the current head sync committee period as selected by fork choice. +Requests the `LightClientUpdate` instances in the sync committee period range +`[start_period, start_period + count)`, leading up to the current head sync +committee period as selected by fork choice. The request MUST be encoded as an SSZ-container. -The response MUST consist of zero or more `response_chunk`. Each _successful_ `response_chunk` MUST contain a single `LightClientUpdate` payload. +The response MUST consist of zero or more `response_chunk`. Each _successful_ +`response_chunk` MUST contain a single `LightClientUpdate` payload. -Peers SHOULD provide results as defined in [`create_light_client_update`](./full-node.md#create_light_client_update). They MUST respond with at least the earliest known result within the requested range, and MUST send results in consecutive order (by period). The response MUST NOT contain more than `min(MAX_REQUEST_LIGHT_CLIENT_UPDATES, count)` results. +Peers SHOULD provide results as defined in +[`create_light_client_update`](./full-node.md#create_light_client_update). They +MUST respond with at least the earliest known result within the requested range, +and MUST send results in consecutive order (by period). The response MUST NOT +contain more than `min(MAX_REQUEST_LIGHT_CLIENT_UPDATES, count)` results. -For each `response_chunk`, a `ForkDigest`-context based on `compute_fork_version(compute_epoch_at_slot(update.attested_header.beacon.slot))` is used to select the fork namespace of the Response type. Note that this `fork_version` may be different from the one used to verify the `update.sync_aggregate`, which is based on `update.signature_slot`. +For each `response_chunk`, a `ForkDigest`-context based on +`compute_fork_version(compute_epoch_at_slot(update.attested_header.beacon.slot))` +is used to select the fork namespace of the Response type. Note that this +`fork_version` may be different from the one used to verify the +`update.sync_aggregate`, which is based on `update.signature_slot`. Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: @@ -208,11 +263,18 @@ Response Content: Requests the latest `LightClientFinalityUpdate` known by a peer. -Peers SHOULD provide results as defined in [`create_light_client_finality_update`](./full-node.md#create_light_client_finality_update). +Peers SHOULD provide results as defined in +[`create_light_client_finality_update`](./full-node.md#create_light_client_finality_update). -When no `LightClientFinalityUpdate` is available, peers SHOULD respond with error code `3: ResourceUnavailable`. +When no `LightClientFinalityUpdate` is available, peers SHOULD respond with +error code `3: ResourceUnavailable`. -A `ForkDigest`-context based on `compute_fork_version(compute_epoch_at_slot(finality_update.attested_header.beacon.slot))` is used to select the fork namespace of the Response type. Note that this `fork_version` may be different from the one used to verify the `finality_update.sync_aggregate`, which is based on `finality_update.signature_slot`. +A `ForkDigest`-context based on +`compute_fork_version(compute_epoch_at_slot(finality_update.attested_header.beacon.slot))` +is used to select the fork namespace of the Response type. Note that this +`fork_version` may be different from the one used to verify the +`finality_update.sync_aggregate`, which is based on +`finality_update.signature_slot`. Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: @@ -239,11 +301,18 @@ Response Content: Requests the latest `LightClientOptimisticUpdate` known by a peer. -Peers SHOULD provide results as defined in [`create_light_client_optimistic_update`](./full-node.md#create_light_client_optimistic_update). +Peers SHOULD provide results as defined in +[`create_light_client_optimistic_update`](./full-node.md#create_light_client_optimistic_update). -When no `LightClientOptimisticUpdate` is available, peers SHOULD respond with error code `3: ResourceUnavailable`. +When no `LightClientOptimisticUpdate` is available, peers SHOULD respond with +error code `3: ResourceUnavailable`. -A `ForkDigest`-context based on `compute_fork_version(compute_epoch_at_slot(optimistic_update.attested_header.beacon.slot))` is used to select the fork namespace of the Response type. Note that this `fork_version` may be different from the one used to verify the `optimistic_update.sync_aggregate`, which is based on `optimistic_update.signature_slot`. +A `ForkDigest`-context based on +`compute_fork_version(compute_epoch_at_slot(optimistic_update.attested_header.beacon.slot))` +is used to select the fork namespace of the Response type. Note that this +`fork_version` may be different from the one used to verify the +`optimistic_update.sync_aggregate`, which is based on +`optimistic_update.signature_slot`. Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: @@ -256,25 +325,55 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: ## Light clients -Light clients using libp2p to stay in sync with the network SHOULD subscribe to the [`light_client_finality_update`](#light_client_finality_update) and [`light_client_optimistic_update`](#light_client_optimistic_update) pubsub topics and validate all received messages while the [light client sync process](./light-client.md#light-client-sync-process) supports processing `LightClientFinalityUpdate` and `LightClientOptimisticUpdate` structures. - -Light clients MAY also collect historic light client data and make it available to other peers. If they do, they SHOULD advertise supported message endpoints in [the Req/Resp domain](#the-reqresp-domain), and MAY also update the contents of their [`Status`](../../phase0/p2p-interface.md#status) message to reflect the locally available light client data. - -If only limited light client data is locally available, the light client SHOULD use data based on `genesis_block` and `GENESIS_SLOT` in its `Status` message. Hybrid peers that also implement full node functionality MUST only incorporate data based on their full node sync progress into their `Status` message. +Light clients using libp2p to stay in sync with the network SHOULD subscribe to +the [`light_client_finality_update`](#light_client_finality_update) and +[`light_client_optimistic_update`](#light_client_optimistic_update) pubsub +topics and validate all received messages while the +[light client sync process](./light-client.md#light-client-sync-process) +supports processing `LightClientFinalityUpdate` and +`LightClientOptimisticUpdate` structures. + +Light clients MAY also collect historic light client data and make it available +to other peers. If they do, they SHOULD advertise supported message endpoints in +[the Req/Resp domain](#the-reqresp-domain), and MAY also update the contents of +their [`Status`](../../phase0/p2p-interface.md#status) message to reflect the +locally available light client data. + +If only limited light client data is locally available, the light client SHOULD +use data based on `genesis_block` and `GENESIS_SLOT` in its `Status` message. +Hybrid peers that also implement full node functionality MUST only incorporate +data based on their full node sync progress into their `Status` message. ## Validator assignments -This section extends the [honest validator specification](../validator.md) with additional responsibilities to enable light clients to sync with the network. +This section extends the [honest validator specification](../validator.md) with +additional responsibilities to enable light clients to sync with the network. ### Beacon chain responsibilities -All full nodes SHOULD subscribe to and provide stability on the [`light_client_finality_update`](#light_client_finality_update) and [`light_client_optimistic_update`](#light_client_optimistic_update) pubsub topics by validating all received messages. +All full nodes SHOULD subscribe to and provide stability on the +[`light_client_finality_update`](#light_client_finality_update) and +[`light_client_optimistic_update`](#light_client_optimistic_update) pubsub +topics by validating all received messages. ### Sync committee -Whenever fork choice selects a new head block with a sync aggregate participation `>= MIN_SYNC_COMMITTEE_PARTICIPANTS` and a post-Altair parent block, full nodes with at least one validator assigned to the current sync committee at the block's `slot` SHOULD broadcast derived light client data as follows: - -- If `finalized_header.beacon.slot` increased, a `LightClientFinalityUpdate` SHOULD be broadcasted to the pubsub topic `light_client_finality_update` if no matching message has not yet been forwarded as part of gossip validation. -- If `attested_header.beacon.slot` increased, a `LightClientOptimisticUpdate` SHOULD be broadcasted to the pubsub topic `light_client_optimistic_update` if no matching message has not yet been forwarded as part of gossip validation. - -These messages SHOULD be broadcasted after one-third of `slot` has transpired (`SECONDS_PER_SLOT / INTERVALS_PER_SLOT` seconds after the start of the slot). To ensure that the corresponding block was given enough time to propagate through the network, they SHOULD NOT be sent earlier. Note that this is different from how other messages are handled, e.g., attestations, which may be sent early. +Whenever fork choice selects a new head block with a sync aggregate +participation `>= MIN_SYNC_COMMITTEE_PARTICIPANTS` and a post-Altair parent +block, full nodes with at least one validator assigned to the current sync +committee at the block's `slot` SHOULD broadcast derived light client data as +follows: + +- If `finalized_header.beacon.slot` increased, a `LightClientFinalityUpdate` + SHOULD be broadcasted to the pubsub topic `light_client_finality_update` if no + matching message has not yet been forwarded as part of gossip validation. +- If `attested_header.beacon.slot` increased, a `LightClientOptimisticUpdate` + SHOULD be broadcasted to the pubsub topic `light_client_optimistic_update` if + no matching message has not yet been forwarded as part of gossip validation. + +These messages SHOULD be broadcasted after one-third of `slot` has transpired +(`SECONDS_PER_SLOT / INTERVALS_PER_SLOT` seconds after the start of the slot). +To ensure that the corresponding block was given enough time to propagate +through the network, they SHOULD NOT be sent earlier. Note that this is +different from how other messages are handled, e.g., attestations, which may be +sent early. diff --git a/specs/altair/light-client/sync-protocol.md b/specs/altair/light-client/sync-protocol.md index 3c754a4849..3feddcbf02 100644 --- a/specs/altair/light-client/sync-protocol.md +++ b/specs/altair/light-client/sync-protocol.md @@ -41,13 +41,15 @@ ## Introduction -The beacon chain is designed to be light client friendly for constrained environments to -access Ethereum with reasonable safety and liveness. -Such environments include resource-constrained devices (e.g. phones for trust-minimized wallets) -and metered VMs (e.g. blockchain VMs for cross-chain bridges). +The beacon chain is designed to be light client friendly for constrained +environments to access Ethereum with reasonable safety and liveness. Such +environments include resource-constrained devices (e.g. phones for +trust-minimized wallets) and metered VMs (e.g. blockchain VMs for cross-chain +bridges). This document suggests a minimal light client design for the beacon chain that -uses sync committees introduced in [this beacon chain extension](../beacon-chain.md). +uses sync committees introduced in +[this beacon chain extension](../beacon-chain.md). Additional documents describe how the light client sync protocol can be used: @@ -90,7 +92,9 @@ class LightClientHeader(Container): beacon: BeaconBlockHeader ``` -Future upgrades may introduce additional fields to this structure, and validate them by extending [`is_valid_light_client_header`](#is_valid_light_client_header). +Future upgrades may introduce additional fields to this structure, and validate +them by extending +[`is_valid_light_client_header`](#is_valid_light_client_header). ### `LightClientBootstrap` @@ -322,7 +326,10 @@ def compute_sync_committee_period_at_slot(slot: Slot) -> uint64: ## Light client initialization -A light client maintains its state in a `store` object of type `LightClientStore`. `initialize_light_client_store` initializes a new `store` with a received `LightClientBootstrap` derived from a given `trusted_block_root`. +A light client maintains its state in a `store` object of type +`LightClientStore`. `initialize_light_client_store` initializes a new `store` +with a received `LightClientBootstrap` derived from a given +`trusted_block_root`. ### `initialize_light_client_store` @@ -352,11 +359,19 @@ def initialize_light_client_store(trusted_block_root: Root, ## Light client state updates -- A light client receives objects of type `LightClientUpdate`, `LightClientFinalityUpdate` and `LightClientOptimisticUpdate`: - - **`update: LightClientUpdate`**: Every `update` triggers `process_light_client_update(store, update, current_slot, genesis_validators_root)` where `current_slot` is the current slot based on a local clock. - - **`finality_update: LightClientFinalityUpdate`**: Every `finality_update` triggers `process_light_client_finality_update(store, finality_update, current_slot, genesis_validators_root)`. - - **`optimistic_update: LightClientOptimisticUpdate`**: Every `optimistic_update` triggers `process_light_client_optimistic_update(store, optimistic_update, current_slot, genesis_validators_root)`. -- `process_light_client_store_force_update` MAY be called based on use case dependent heuristics if light client sync appears stuck. +- A light client receives objects of type `LightClientUpdate`, + `LightClientFinalityUpdate` and `LightClientOptimisticUpdate`: + - **`update: LightClientUpdate`**: Every `update` triggers + `process_light_client_update(store, update, current_slot, genesis_validators_root)` + where `current_slot` is the current slot based on a local clock. + - **`finality_update: LightClientFinalityUpdate`**: Every `finality_update` + triggers + `process_light_client_finality_update(store, finality_update, current_slot, genesis_validators_root)`. + - **`optimistic_update: LightClientOptimisticUpdate`**: Every + `optimistic_update` triggers + `process_light_client_optimistic_update(store, optimistic_update, current_slot, genesis_validators_root)`. +- `process_light_client_store_force_update` MAY be called based on use case + dependent heuristics if light client sync appears stuck. ### `validate_light_client_update` diff --git a/specs/altair/p2p-interface.md b/specs/altair/p2p-interface.md index 82972e3003..35a0bc8890 100644 --- a/specs/altair/p2p-interface.md +++ b/specs/altair/p2p-interface.md @@ -30,17 +30,21 @@ ## Introduction -This document contains the networking specification for Altair. -This document should be viewed as additive to the [document from Phase 0](../phase0/p2p-interface.md) and will be referred to as the "Phase 0 document" hereafter. -Readers should understand the Phase 0 document and use it as a basis to understand the changes outlined in this document. +This document contains the networking specification for Altair. This document +should be viewed as additive to the +[document from Phase 0](../phase0/p2p-interface.md) and will be referred to as +the "Phase 0 document" hereafter. Readers should understand the Phase 0 document +and use it as a basis to understand the changes outlined in this document. -Altair adds new messages, topics and data to the Req-Resp, Gossip and Discovery domain. Some Phase 0 features will be deprecated, but not removed immediately. +Altair adds new messages, topics and data to the Req-Resp, Gossip and Discovery +domain. Some Phase 0 features will be deprecated, but not removed immediately. ## Modifications in Altair ### MetaData -The `MetaData` stored locally by clients is updated with an additional field to communicate the sync committee subnet subscriptions: +The `MetaData` stored locally by clients is updated with an additional field to +communicate the sync committee subnet subscriptions: ``` ( @@ -52,40 +56,54 @@ The `MetaData` stored locally by clients is updated with an additional field to Where -- `seq_number` and `attnets` have the same meaning defined in the Phase 0 document. -- `syncnets` is a `Bitvector` representing the node's sync committee subnet subscriptions. This field should mirror the data in the node's ENR as outlined in the [validator guide](./validator.md#sync-committee-subnet-stability). +- `seq_number` and `attnets` have the same meaning defined in the Phase 0 + document. +- `syncnets` is a `Bitvector` representing the node's sync committee subnet + subscriptions. This field should mirror the data in the node's ENR as outlined + in the [validator guide](./validator.md#sync-committee-subnet-stability). ### The gossip domain: gossipsub -Gossip meshes are added in Altair to support the consensus activities of the sync committees. -Validators use an aggregation scheme to balance the processing and networking load across all of the relevant actors. +Gossip meshes are added in Altair to support the consensus activities of the +sync committees. Validators use an aggregation scheme to balance the processing +and networking load across all of the relevant actors. #### Topics and messages -Topics follow the same specification as in the Phase 0 document. -New topics are added in Altair to support the sync committees and the beacon block topic is updated with the modified type. - -The specification around the creation, validation, and dissemination of messages has not changed from the Phase 0 document. - -The derivation of the `message-id` has changed starting with Altair to incorporate the message `topic` along with the message `data`. These are fields of the `Message` Protobuf, and interpreted as empty byte strings if missing. -The `message-id` MUST be the following 20 byte value computed from the message: - -- If `message.data` has a valid snappy decompression, set `message-id` to the first 20 bytes of the `SHA256` hash of - the concatenation of the following data: `MESSAGE_DOMAIN_VALID_SNAPPY`, the length of the topic byte string (encoded as little-endian `uint64`), - the topic byte string, and the snappy decompressed message data: - i.e. `SHA256(MESSAGE_DOMAIN_VALID_SNAPPY + uint_to_bytes(uint64(len(message.topic))) + message.topic + snappy_decompress(message.data))[:20]`. -- Otherwise, set `message-id` to the first 20 bytes of the `SHA256` hash of - the concatenation of the following data: `MESSAGE_DOMAIN_INVALID_SNAPPY`, the length of the topic byte string (encoded as little-endian `uint64`), - the topic byte string, and the raw message data: - i.e. `SHA256(MESSAGE_DOMAIN_INVALID_SNAPPY + uint_to_bytes(uint64(len(message.topic))) + message.topic + message.data)[:20]`. - -Implementations may need to carefully handle the function that computes the `message-id`. In particular, messages on topics with the Phase 0 -fork digest should use the `message-id` procedure specified in the Phase 0 document. -Messages on topics with the Altair fork digest should use the `message-id` procedure defined here. -If an implementation only supports a single `message-id` function, it can define a switch inline; -for example, `if topic in phase0_topics: return phase0_msg_id_fn(message) else return altair_msg_id_fn(message)`. - -The new topics along with the type of the `data` field of a gossipsub message are given in this table: +Topics follow the same specification as in the Phase 0 document. New topics are +added in Altair to support the sync committees and the beacon block topic is +updated with the modified type. + +The specification around the creation, validation, and dissemination of messages +has not changed from the Phase 0 document. + +The derivation of the `message-id` has changed starting with Altair to +incorporate the message `topic` along with the message `data`. These are fields +of the `Message` Protobuf, and interpreted as empty byte strings if missing. The +`message-id` MUST be the following 20 byte value computed from the message: + +- If `message.data` has a valid snappy decompression, set `message-id` to the + first 20 bytes of the `SHA256` hash of the concatenation of the following + data: `MESSAGE_DOMAIN_VALID_SNAPPY`, the length of the topic byte string + (encoded as little-endian `uint64`), the topic byte string, and the snappy + decompressed message data: i.e. + `SHA256(MESSAGE_DOMAIN_VALID_SNAPPY + uint_to_bytes(uint64(len(message.topic))) + message.topic + snappy_decompress(message.data))[:20]`. +- Otherwise, set `message-id` to the first 20 bytes of the `SHA256` hash of the + concatenation of the following data: `MESSAGE_DOMAIN_INVALID_SNAPPY`, the + length of the topic byte string (encoded as little-endian `uint64`), the topic + byte string, and the raw message data: i.e. + `SHA256(MESSAGE_DOMAIN_INVALID_SNAPPY + uint_to_bytes(uint64(len(message.topic))) + message.topic + message.data)[:20]`. + +Implementations may need to carefully handle the function that computes the +`message-id`. In particular, messages on topics with the Phase 0 fork digest +should use the `message-id` procedure specified in the Phase 0 document. +Messages on topics with the Altair fork digest should use the `message-id` +procedure defined here. If an implementation only supports a single `message-id` +function, it can define a switch inline; for example, +`if topic in phase0_topics: return phase0_msg_id_fn(message) else return altair_msg_id_fn(message)`. + +The new topics along with the type of the `data` field of a gossipsub message +are given in this table: | Name | Message Type | | --------------------------------------- | ------------------------------ | @@ -93,27 +111,38 @@ The new topics along with the type of the `data` field of a gossipsub message ar | `sync_committee_contribution_and_proof` | `SignedContributionAndProof` | | `sync_committee_{subnet_id}` | `SyncCommitteeMessage` | -Definitions of these new types can be found in the [Altair validator guide](./validator.md#containers). +Definitions of these new types can be found in the +[Altair validator guide](./validator.md#containers). -Note that the `ForkDigestValue` path segment of the topic separates the old and the new `beacon_block` topics. +Note that the `ForkDigestValue` path segment of the topic separates the old and +the new `beacon_block` topics. ##### Global topics -Altair changes the type of the global beacon block topic and adds one global topic to propagate partially aggregated sync committee messages to all potential proposers of beacon blocks. +Altair changes the type of the global beacon block topic and adds one global +topic to propagate partially aggregated sync committee messages to all potential +proposers of beacon blocks. ###### `beacon_block` -The existing specification for this topic does not change from the Phase 0 document, -but the type of the payload does change to the (modified) `SignedBeaconBlock`. -This type changes due to the inclusion of the inner `BeaconBlockBody` that is modified in Altair. +The existing specification for this topic does not change from the Phase 0 +document, but the type of the payload does change to the (modified) +`SignedBeaconBlock`. This type changes due to the inclusion of the inner +`BeaconBlockBody` that is modified in Altair. -See the [state transition document](./beacon-chain.md#beaconblockbody) for Altair for further details. +See the [state transition document](./beacon-chain.md#beaconblockbody) for +Altair for further details. ###### `sync_committee_contribution_and_proof` -This topic is used to propagate partially aggregated sync committee messages to be included in future blocks. +This topic is used to propagate partially aggregated sync committee messages to +be included in future blocks. -The following validations MUST pass before forwarding the `signed_contribution_and_proof` on the network; define `contribution_and_proof = signed_contribution_and_proof.message`, `contribution = contribution_and_proof.contribution`, and the following function `get_sync_subcommittee_pubkeys` for convenience: +The following validations MUST pass before forwarding the +`signed_contribution_and_proof` on the network; define +`contribution_and_proof = signed_contribution_and_proof.message`, +`contribution = contribution_and_proof.contribution`, and the following function +`get_sync_subcommittee_pubkeys` for convenience: ```python def get_sync_subcommittee_pubkeys(state: BeaconState, subcommittee_index: uint64) -> Sequence[BLSPubkey]: @@ -131,100 +160,147 @@ def get_sync_subcommittee_pubkeys(state: BeaconState, subcommittee_index: uint64 return sync_committee.pubkeys[i:i + sync_subcommittee_size] ``` -- _[IGNORE]_ The contribution's slot is for the current slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance), i.e. `contribution.slot == current_slot`. -- _[REJECT]_ The subcommittee index is in the allowed range, i.e. `contribution.subcommittee_index < SYNC_COMMITTEE_SUBNET_COUNT`. -- _[REJECT]_ The contribution has participants -- - that is, `any(contribution.aggregation_bits)`. -- _[REJECT]_ `contribution_and_proof.selection_proof` selects the validator as an aggregator for the slot -- i.e. `is_sync_committee_aggregator(contribution_and_proof.selection_proof)` returns `True`. -- _[REJECT]_ The aggregator's validator index is in the declared subcommittee of the current sync committee -- - i.e. `state.validators[contribution_and_proof.aggregator_index].pubkey in get_sync_subcommittee_pubkeys(state, contribution.subcommittee_index)`. -- _[IGNORE]_ A valid sync committee contribution with equal `slot`, `beacon_block_root` and `subcommittee_index` whose `aggregation_bits` is non-strict superset has _not_ already been seen. -- _[IGNORE]_ The sync committee contribution is the first valid contribution received for the aggregator with index `contribution_and_proof.aggregator_index` - for the slot `contribution.slot` and subcommittee index `contribution.subcommittee_index` - (this requires maintaining a cache of size `SYNC_COMMITTEE_SIZE` for this topic that can be flushed after each slot). -- _[REJECT]_ The `contribution_and_proof.selection_proof` is a valid signature of the `SyncAggregatorSelectionData` derived from the `contribution` by the validator with index `contribution_and_proof.aggregator_index`. -- _[REJECT]_ The aggregator signature, `signed_contribution_and_proof.signature`, is valid. -- _[REJECT]_ The aggregate signature is valid for the message `beacon_block_root` and aggregate pubkey derived from the participation info in `aggregation_bits` for the subcommittee specified by the `contribution.subcommittee_index`. +- _[IGNORE]_ The contribution's slot is for the current slot (with a + `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance), i.e. + `contribution.slot == current_slot`. +- _[REJECT]_ The subcommittee index is in the allowed range, i.e. + `contribution.subcommittee_index < SYNC_COMMITTEE_SUBNET_COUNT`. +- _[REJECT]_ The contribution has participants -- that is, + `any(contribution.aggregation_bits)`. +- _[REJECT]_ `contribution_and_proof.selection_proof` selects the validator as + an aggregator for the slot -- i.e. + `is_sync_committee_aggregator(contribution_and_proof.selection_proof)` returns + `True`. +- _[REJECT]_ The aggregator's validator index is in the declared subcommittee of + the current sync committee -- i.e. + `state.validators[contribution_and_proof.aggregator_index].pubkey in get_sync_subcommittee_pubkeys(state, contribution.subcommittee_index)`. +- _[IGNORE]_ A valid sync committee contribution with equal `slot`, + `beacon_block_root` and `subcommittee_index` whose `aggregation_bits` is + non-strict superset has _not_ already been seen. +- _[IGNORE]_ The sync committee contribution is the first valid contribution + received for the aggregator with index + `contribution_and_proof.aggregator_index` for the slot `contribution.slot` and + subcommittee index `contribution.subcommittee_index` (this requires + maintaining a cache of size `SYNC_COMMITTEE_SIZE` for this topic that can be + flushed after each slot). +- _[REJECT]_ The `contribution_and_proof.selection_proof` is a valid signature + of the `SyncAggregatorSelectionData` derived from the `contribution` by the + validator with index `contribution_and_proof.aggregator_index`. +- _[REJECT]_ The aggregator signature, + `signed_contribution_and_proof.signature`, is valid. +- _[REJECT]_ The aggregate signature is valid for the message + `beacon_block_root` and aggregate pubkey derived from the participation info + in `aggregation_bits` for the subcommittee specified by the + `contribution.subcommittee_index`. ##### Sync committee subnets -Sync committee subnets are used to propagate unaggregated sync committee messages to subsections of the network. +Sync committee subnets are used to propagate unaggregated sync committee +messages to subsections of the network. ###### `sync_committee_{subnet_id}` -The `sync_committee_{subnet_id}` topics are used to propagate unaggregated sync committee messages to the subnet `subnet_id` to be aggregated before being gossiped to the global `sync_committee_contribution_and_proof` topic. - -The following validations MUST pass before forwarding the `sync_committee_message` on the network: - -- _[IGNORE]_ The message's slot is for the current slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance), i.e. `sync_committee_message.slot == current_slot`. -- _[REJECT]_ The `subnet_id` is valid for the given validator, i.e. `subnet_id in compute_subnets_for_sync_committee(state, sync_committee_message.validator_index)`. - Note this validation implies the validator is part of the broader current sync committee along with the correct subcommittee. -- _[IGNORE]_ There has been no other valid sync committee message for the declared `slot` for the validator referenced by `sync_committee_message.validator_index` - (this requires maintaining a cache of size `SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT` for each subnet that can be flushed after each slot). - Note this validation is _per topic_ so that for a given `slot`, multiple messages could be forwarded with the same `validator_index` as long as the `subnet_id`s are distinct. -- _[REJECT]_ The `signature` is valid for the message `beacon_block_root` for the validator referenced by `validator_index`. +The `sync_committee_{subnet_id}` topics are used to propagate unaggregated sync +committee messages to the subnet `subnet_id` to be aggregated before being +gossiped to the global `sync_committee_contribution_and_proof` topic. + +The following validations MUST pass before forwarding the +`sync_committee_message` on the network: + +- _[IGNORE]_ The message's slot is for the current slot (with a + `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance), i.e. + `sync_committee_message.slot == current_slot`. +- _[REJECT]_ The `subnet_id` is valid for the given validator, i.e. + `subnet_id in compute_subnets_for_sync_committee(state, sync_committee_message.validator_index)`. + Note this validation implies the validator is part of the broader current sync + committee along with the correct subcommittee. +- _[IGNORE]_ There has been no other valid sync committee message for the + declared `slot` for the validator referenced by + `sync_committee_message.validator_index` (this requires maintaining a cache of + size `SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT` for each subnet that + can be flushed after each slot). Note this validation is _per topic_ so that + for a given `slot`, multiple messages could be forwarded with the same + `validator_index` as long as the `subnet_id`s are distinct. +- _[REJECT]_ The `signature` is valid for the message `beacon_block_root` for + the validator referenced by `validator_index`. ##### Sync committees and aggregation -The aggregation scheme closely follows the design of the attestation aggregation scheme. -Sync committee messages are broadcast into "subnets" defined by a topic. -The number of subnets is defined by `SYNC_COMMITTEE_SUBNET_COUNT` in the [Altair validator guide](./validator.md#constants). -Sync committee members are divided into "subcommittees" which are then assigned to a subnet for the duration of tenure in the sync committee. -Individual validators can be duplicated in the broader sync committee such that they are included multiple times in a given subcommittee or across multiple subcommittees. +The aggregation scheme closely follows the design of the attestation aggregation +scheme. Sync committee messages are broadcast into "subnets" defined by a topic. +The number of subnets is defined by `SYNC_COMMITTEE_SUBNET_COUNT` in the +[Altair validator guide](./validator.md#constants). Sync committee members are +divided into "subcommittees" which are then assigned to a subnet for the +duration of tenure in the sync committee. Individual validators can be +duplicated in the broader sync committee such that they are included multiple +times in a given subcommittee or across multiple subcommittees. -Unaggregated messages (along with metadata) are sent as `SyncCommitteeMessage`s on the `sync_committee_{subnet_id}` topics. +Unaggregated messages (along with metadata) are sent as `SyncCommitteeMessage`s +on the `sync_committee_{subnet_id}` topics. -Aggregated sync committee messages are packaged into (signed) `SyncCommitteeContribution` along with proofs and gossiped to the `sync_committee_contribution_and_proof` topic. +Aggregated sync committee messages are packaged into (signed) +`SyncCommitteeContribution` along with proofs and gossiped to the +`sync_committee_contribution_and_proof` topic. #### Transitioning the gossip -With any fork, the fork version, and thus the `ForkDigestValue`, change. -Message types are unique per topic, and so for a smooth transition a node must temporarily subscribe to both the old and new topics. +With any fork, the fork version, and thus the `ForkDigestValue`, change. Message +types are unique per topic, and so for a smooth transition a node must +temporarily subscribe to both the old and new topics. -The topics that are not removed in a fork are updated with a new `ForkDigestValue`. In advance of the fork, a node SHOULD subscribe to the post-fork variants of the topics. +The topics that are not removed in a fork are updated with a new +`ForkDigestValue`. In advance of the fork, a node SHOULD subscribe to the +post-fork variants of the topics. -Subscriptions are expected to be well-received, all updated nodes should subscribe as well. -Topic-meshes can be grafted quickly as the nodes are already connected and exchanging gossip control messages. +Subscriptions are expected to be well-received, all updated nodes should +subscribe as well. Topic-meshes can be grafted quickly as the nodes are already +connected and exchanging gossip control messages. -Messages SHOULD NOT be re-broadcast from one fork to the other. -A node's behavior before the fork and after the fork are as follows: +Messages SHOULD NOT be re-broadcast from one fork to the other. A node's +behavior before the fork and after the fork are as follows: Pre-fork: -- Peers who propagate messages on the post-fork topics MAY be scored negatively proportionally to time till fork, - to account for clock discrepancy. -- Messages can be IGNORED on the post-fork topics, with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` margin. +- Peers who propagate messages on the post-fork topics MAY be scored negatively + proportionally to time till fork, to account for clock discrepancy. +- Messages can be IGNORED on the post-fork topics, with a + `MAXIMUM_GOSSIP_CLOCK_DISPARITY` margin. Post-fork: -- Peers who propagate messages on the pre-fork topics MUST NOT be scored negatively. Lagging IWANT may force them to. -- Messages on pre and post-fork variants of topics share application-level caches. - E.g. an attestation on the both the old and new topic is ignored like any duplicate. -- Two epochs after the fork, pre-fork topics SHOULD be unsubscribed from. This is well after the configured `seen_ttl`. +- Peers who propagate messages on the pre-fork topics MUST NOT be scored + negatively. Lagging IWANT may force them to. +- Messages on pre and post-fork variants of topics share application-level + caches. E.g. an attestation on the both the old and new topic is ignored like + any duplicate. +- Two epochs after the fork, pre-fork topics SHOULD be unsubscribed from. This + is well after the configured `seen_ttl`. ### The Req/Resp domain #### Req-Resp interaction -An additional `` field is introduced to the `response_chunk` as defined in the Phase 0 document: +An additional `` field is introduced to the `response_chunk` as +defined in the Phase 0 document: ``` response_chunk ::= | | | ``` -All Phase 0 methods are compatible: `` is empty by default. -On a non-zero `` with `ErrorMessage` payload, the `` is also empty. +All Phase 0 methods are compatible: `` is empty by default. On a +non-zero `` with `ErrorMessage` payload, the `` is also +empty. In Altair and later forks, `` functions as a short meta-data, defined per req-resp method, and can parametrize the payload decoder. ##### `ForkDigest`-context -Starting with Altair, and in future forks, SSZ type definitions may change. -For this common case, we define the `ForkDigest`-context: +Starting with Altair, and in future forks, SSZ type definitions may change. For +this common case, we define the `ForkDigest`-context: -A fixed-width 4 byte ``, set to the `ForkDigest` matching the chunk: -`compute_fork_digest(fork_version, genesis_validators_root)`. +A fixed-width 4 byte ``, set to the `ForkDigest` matching the +chunk: `compute_fork_digest(fork_version, genesis_validators_root)`. #### Messages @@ -232,7 +308,8 @@ A fixed-width 4 byte ``, set to the `ForkDigest` matching the chu **Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_range/2/` -Request and Response remain unchanged. A `ForkDigest`-context is used to select the fork namespace of the Response type. +Request and Response remain unchanged. A `ForkDigest`-context is used to select +the fork namespace of the Response type. Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: @@ -247,7 +324,8 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: **Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_root/2/` -Request and Response remain unchanged. A `ForkDigest`-context is used to select the fork namespace of the Response type. +Request and Response remain unchanged. A `ForkDigest`-context is used to select +the fork namespace of the Response type. Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: @@ -278,15 +356,16 @@ protocol are unchanged from the phase 0 p2p networking document. #### Transitioning from v1 to v2 -In advance of the fork, implementations can opt in to both run the v1 and v2 for a smooth transition. -This is non-breaking, and is recommended as soon as the fork specification is stable. +In advance of the fork, implementations can opt in to both run the v1 and v2 for +a smooth transition. This is non-breaking, and is recommended as soon as the +fork specification is stable. -The v1 variants will be deprecated, and implementations should use v2 when available -(as negotiated with peers via LibP2P multistream-select). +The v1 variants will be deprecated, and implementations should use v2 when +available (as negotiated with peers via LibP2P multistream-select). -The v1 method MAY be unregistered at the fork boundary. -In the event of a request on v1 for an Altair specific payload, -the responder MUST return the **InvalidRequest** response code. +The v1 method MAY be unregistered at the fork boundary. In the event of a +request on v1 for an Altair specific payload, the responder MUST return the +**InvalidRequest** response code. ### The discovery domain: discv5 @@ -294,12 +373,16 @@ the responder MUST return the **InvalidRequest** response code. ##### Sync committee bitfield -An additional bitfield is added to the ENR under the key `syncnets` to facilitate sync committee subnet discovery. -The length of this bitfield is `SYNC_COMMITTEE_SUBNET_COUNT` where each bit corresponds to a distinct `subnet_id` for a specific sync committee subnet. -The `i`th bit is set in this bitfield if the validator is currently subscribed to the `sync_committee_{i}` topic. +An additional bitfield is added to the ENR under the key `syncnets` to +facilitate sync committee subnet discovery. The length of this bitfield is +`SYNC_COMMITTEE_SUBNET_COUNT` where each bit corresponds to a distinct +`subnet_id` for a specific sync committee subnet. The `i`th bit is set in this +bitfield if the validator is currently subscribed to the `sync_committee_{i}` +topic. | Key | Value | | :--------- | :------------------------------------------- | | `syncnets` | SSZ `Bitvector[SYNC_COMMITTEE_SUBNET_COUNT]` | -See the [validator document](./validator.md#sync-committee-subnet-stability) for further details on how the new bits are used. +See the [validator document](./validator.md#sync-committee-subnet-stability) for +further details on how the new bits are used. diff --git a/specs/altair/validator.md b/specs/altair/validator.md index d1c9027ce9..b7b3ecacd1 100644 --- a/specs/altair/validator.md +++ b/specs/altair/validator.md @@ -1,6 +1,8 @@ # Altair -- Honest Validator -This is an accompanying document to [Altair -- The Beacon Chain](./beacon-chain.md), which describes the expected actions of a "validator" participating in the Ethereum proof-of-stake protocol. +This is an accompanying document to +[Altair -- The Beacon Chain](./beacon-chain.md), which describes the expected +actions of a "validator" participating in the Ethereum proof-of-stake protocol. @@ -43,19 +45,28 @@ This is an accompanying document to [Altair -- The Beacon Chain](./beacon-chain. ## Introduction -This document represents the expected behavior of an "honest validator" with respect to the Altair upgrade of the Ethereum proof-of-stake protocol. -It builds on the [previous document for the behavior of an "honest validator" from Phase 0](../phase0/validator.md) of the Ethereum proof-of-stake protocol. -This previous document is referred to below as the "Phase 0 document". - -Altair introduces a new type of committee: the sync committee. Sync committees are responsible for signing each block of the canonical chain and there exists an efficient algorithm for light clients to sync the chain using the output of the sync committees. -See the [sync protocol](./light-client/sync-protocol.md) for further details on the light client sync. -Under this network upgrade, validators track their participation in this new committee type and produce the relevant signatures as required. -Block proposers incorporate the (aggregated) sync committee signatures into each block they produce. +This document represents the expected behavior of an "honest validator" with +respect to the Altair upgrade of the Ethereum proof-of-stake protocol. It builds +on the +[previous document for the behavior of an "honest validator" from Phase 0](../phase0/validator.md) +of the Ethereum proof-of-stake protocol. This previous document is referred to +below as the "Phase 0 document". + +Altair introduces a new type of committee: the sync committee. Sync committees +are responsible for signing each block of the canonical chain and there exists +an efficient algorithm for light clients to sync the chain using the output of +the sync committees. See the [sync protocol](./light-client/sync-protocol.md) +for further details on the light client sync. Under this network upgrade, +validators track their participation in this new committee type and produce the +relevant signatures as required. Block proposers incorporate the (aggregated) +sync committee signatures into each block they produce. ## Prerequisites -All terminology, constants, functions, and protocol mechanics defined in the [Altair -- The Beacon Chain](./beacon-chain.md) doc are requisite for this document and used throughout. -Please see this document before continuing and use as a reference throughout. +All terminology, constants, functions, and protocol mechanics defined in the +[Altair -- The Beacon Chain](./beacon-chain.md) doc are requisite for this +document and used throughout. Please see this document before continuing and use +as a reference throughout. ## Constants @@ -126,17 +137,26 @@ class SyncAggregatorSelectionData(Container): ## Validator assignments -A validator determines beacon committee assignments and beacon block proposal duties as defined in the Phase 0 document. +A validator determines beacon committee assignments and beacon block proposal +duties as defined in the Phase 0 document. ### Sync Committee -To determine sync committee assignments, a validator can run the following function: `is_assigned_to_sync_committee(state, epoch, validator_index)` where `epoch` is an epoch number within the current or next sync committee period. -This function is a predicate indicating the presence or absence of the validator in the corresponding sync committee for the queried sync committee period. - -*Note*: Being assigned to a sync committee for a given `slot` means that the validator produces and broadcasts signatures for `slot - 1` for inclusion in `slot`. -This means that when assigned to an `epoch` sync committee signatures must be produced and broadcast for slots on range `[compute_start_slot_at_epoch(epoch) - 1, compute_start_slot_at_epoch(epoch) + SLOTS_PER_EPOCH - 1)` -rather than for the range `[compute_start_slot_at_epoch(epoch), compute_start_slot_at_epoch(epoch) + SLOTS_PER_EPOCH)`. -To reduce complexity during the Altair fork, sync committees are not expected to produce signatures for `compute_start_slot_at_epoch(ALTAIR_FORK_EPOCH) - 1`. +To determine sync committee assignments, a validator can run the following +function: `is_assigned_to_sync_committee(state, epoch, validator_index)` where +`epoch` is an epoch number within the current or next sync committee period. +This function is a predicate indicating the presence or absence of the validator +in the corresponding sync committee for the queried sync committee period. + +*Note*: Being assigned to a sync committee for a given `slot` means that the +validator produces and broadcasts signatures for `slot - 1` for inclusion in +`slot`. This means that when assigned to an `epoch` sync committee signatures +must be produced and broadcast for slots on range +`[compute_start_slot_at_epoch(epoch) - 1, compute_start_slot_at_epoch(epoch) + SLOTS_PER_EPOCH - 1)` +rather than for the range +`[compute_start_slot_at_epoch(epoch), compute_start_slot_at_epoch(epoch) + SLOTS_PER_EPOCH)`. +To reduce complexity during the Altair fork, sync committees are not expected to +produce signatures for `compute_start_slot_at_epoch(ALTAIR_FORK_EPOCH) - 1`. ```python def compute_sync_committee_period(epoch: Epoch) -> uint64: @@ -162,57 +182,89 @@ def is_assigned_to_sync_committee(state: BeaconState, ### Lookahead -The sync committee shufflings give validators 1 sync committee period of lookahead which amounts to `EPOCHS_PER_SYNC_COMMITTEE_PERIOD` epochs. -At any given `epoch`, the `BeaconState` contains the current `SyncCommittee` and the next `SyncCommittee`. -Once every `EPOCHS_PER_SYNC_COMMITTEE_PERIOD` epochs, the next `SyncCommittee` becomes the current `SyncCommittee` and the next committee is computed and stored. - -*Note*: The data required to compute a given committee is not cached in the `BeaconState` after committees are calculated at the period boundaries. -For this reason, *always* get committee assignments via the fields of the `BeaconState` (`current_sync_committee` and `next_sync_committee`) or use the above reference code. - -A validator should plan for future sync committee assignments by noting which sync committee periods they are selected for participation. -Specifically, a validator should: - -- Upon (re)syncing the chain and upon sync committee period boundaries, check for assignments in the current and next sync committee periods. -- If the validator is in the current sync committee period, then they perform the responsibilities below for sync committee rewards. -- If the validator is in the next sync committee period, they should wait until the next `EPOCHS_PER_SYNC_COMMITTEE_PERIOD` boundary and then perform the responsibilities throughout that period. +The sync committee shufflings give validators 1 sync committee period of +lookahead which amounts to `EPOCHS_PER_SYNC_COMMITTEE_PERIOD` epochs. At any +given `epoch`, the `BeaconState` contains the current `SyncCommittee` and the +next `SyncCommittee`. Once every `EPOCHS_PER_SYNC_COMMITTEE_PERIOD` epochs, the +next `SyncCommittee` becomes the current `SyncCommittee` and the next committee +is computed and stored. + +*Note*: The data required to compute a given committee is not cached in the +`BeaconState` after committees are calculated at the period boundaries. For this +reason, *always* get committee assignments via the fields of the `BeaconState` +(`current_sync_committee` and `next_sync_committee`) or use the above reference +code. + +A validator should plan for future sync committee assignments by noting which +sync committee periods they are selected for participation. Specifically, a +validator should: + +- Upon (re)syncing the chain and upon sync committee period boundaries, check + for assignments in the current and next sync committee periods. +- If the validator is in the current sync committee period, then they perform + the responsibilities below for sync committee rewards. +- If the validator is in the next sync committee period, they should wait until + the next `EPOCHS_PER_SYNC_COMMITTEE_PERIOD` boundary and then perform the + responsibilities throughout that period. ## Beacon chain responsibilities A validator maintains the responsibilities given in the Phase 0 document. -Block proposals are modified to incorporate the sync committee signatures as detailed below. +Block proposals are modified to incorporate the sync committee signatures as +detailed below. -When assigned to a sync committee, validators have a new responsibility to sign and broadcast beacon block roots during each slot of the sync committee period. -These signatures are aggregated and routed to the proposer over gossip for inclusion into a beacon block. -Assignments to a particular sync committee are infrequent at normal validator counts; however, an action every slot is required when in the current active sync committee. +When assigned to a sync committee, validators have a new responsibility to sign +and broadcast beacon block roots during each slot of the sync committee period. +These signatures are aggregated and routed to the proposer over gossip for +inclusion into a beacon block. Assignments to a particular sync committee are +infrequent at normal validator counts; however, an action every slot is required +when in the current active sync committee. ### Block proposal -Refer to the phase 0 document for the majority of the [block proposal responsibility](../phase0/validator.md#block-proposal). -The validator should follow those instructions to prepare a `SignedBeaconBlock` for inclusion into the chain. All changes are additive to phase 0 and noted below. +Refer to the phase 0 document for the majority of the +[block proposal responsibility](../phase0/validator.md#block-proposal). The +validator should follow those instructions to prepare a `SignedBeaconBlock` for +inclusion into the chain. All changes are additive to phase 0 and noted below. #### Preparing a `BeaconBlock` -No change to [Preparing for a `BeaconBlock`](../phase0/validator.md#preparing-for-a-beaconblock). +No change to +[Preparing for a `BeaconBlock`](../phase0/validator.md#preparing-for-a-beaconblock). #### Constructing the `BeaconBlockBody` -Each section of [Constructing the `BeaconBlockBody`](../phase0/validator.md#constructing-the-beaconblockbody) should be followed. -After constructing the `BeaconBlockBody` as per that section, the proposer has an additional task to include the sync committee signatures: +Each section of +[Constructing the `BeaconBlockBody`](../phase0/validator.md#constructing-the-beaconblockbody) +should be followed. After constructing the `BeaconBlockBody` as per that +section, the proposer has an additional task to include the sync committee +signatures: ##### Sync committee -The proposer receives a number of `SyncCommitteeContribution`s (wrapped in `SignedContributionAndProof`s on the wire) from validators in the sync committee who are selected to partially aggregate signatures from independent subcommittees formed by breaking the full sync committee into `SYNC_COMMITTEE_SUBNET_COUNT` pieces (see below for details). - -The proposer collects the contributions that match their local view of the chain (i.e. `contribution.beacon_block_root == block.parent_root`) for further aggregation when preparing a block. -Of these contributions, proposers should select the best contribution seen across all aggregators for each subnet/subcommittee. -A contribution with more valid signatures is better than a contribution with fewer signatures. - -Recall `block.body.sync_aggregate.sync_committee_bits` is a `Bitvector` where the `i`th bit is `True` if the corresponding validator in the sync committee has produced a valid signature, -and that `block.body.sync_aggregate.sync_committee_signature` is the aggregate BLS signature combining all of the valid signatures. - -Given a collection of the best seen `contributions` (with no repeating `subcommittee_index` values) and the `BeaconBlock` under construction, -the proposer processes them as follows: +The proposer receives a number of `SyncCommitteeContribution`s (wrapped in +`SignedContributionAndProof`s on the wire) from validators in the sync committee +who are selected to partially aggregate signatures from independent +subcommittees formed by breaking the full sync committee into +`SYNC_COMMITTEE_SUBNET_COUNT` pieces (see below for details). + +The proposer collects the contributions that match their local view of the chain +(i.e. `contribution.beacon_block_root == block.parent_root`) for further +aggregation when preparing a block. Of these contributions, proposers should +select the best contribution seen across all aggregators for each +subnet/subcommittee. A contribution with more valid signatures is better than a +contribution with fewer signatures. + +Recall `block.body.sync_aggregate.sync_committee_bits` is a `Bitvector` where +the `i`th bit is `True` if the corresponding validator in the sync committee has +produced a valid signature, and that +`block.body.sync_aggregate.sync_committee_signature` is the aggregate BLS +signature combining all of the valid signatures. + +Given a collection of the best seen `contributions` (with no repeating +`subcommittee_index` values) and the `BeaconBlock` under construction, the +proposer processes them as follows: ```python def process_sync_committee_contributions(block: BeaconBlock, @@ -234,34 +286,60 @@ def process_sync_committee_contributions(block: BeaconBlock, block.body.sync_aggregate = sync_aggregate ``` -*Note*: The resulting block must pass the validations for the `SyncAggregate` defined in `process_sync_aggregate` defined in the [state transition document](./beacon-chain.md#sync-aggregate-processing). -In particular, this means `SyncCommitteeContribution`s received from gossip must have a `beacon_block_root` that matches the proposer's local view of the chain. +*Note*: The resulting block must pass the validations for the `SyncAggregate` +defined in `process_sync_aggregate` defined in the +[state transition document](./beacon-chain.md#sync-aggregate-processing). In +particular, this means `SyncCommitteeContribution`s received from gossip must +have a `beacon_block_root` that matches the proposer's local view of the chain. #### Packaging into a `SignedBeaconBlock` -No change to [Packaging into a `SignedBeaconBlock`](../phase0/validator.md#packaging-into-a-signedbeaconblock). +No change to +[Packaging into a `SignedBeaconBlock`](../phase0/validator.md#packaging-into-a-signedbeaconblock). ### Attesting and attestation aggregation -Refer to the phase 0 document for the [attesting](../phase0/validator.md#attesting) and [attestation aggregation](../phase0/validator.md#attestation-aggregation) responsibilities. -There is no change compared to the phase 0 document. +Refer to the phase 0 document for the +[attesting](../phase0/validator.md#attesting) and +[attestation aggregation](../phase0/validator.md#attestation-aggregation) +responsibilities. There is no change compared to the phase 0 document. ### Sync committees -Sync committee members employ an aggregation scheme to reduce load on the global proposer channel that is monitored by all potential proposers to be able to include the full output of the sync committee every slot. -Sync committee members produce individual signatures on subnets (similar to the attestation subnets) via `SyncCommitteeMessage`s which are then collected by aggregators sampled from the sync subcommittees to produce a `SyncCommitteeContribution` which is gossiped to proposers. -This process occurs each slot. +Sync committee members employ an aggregation scheme to reduce load on the global +proposer channel that is monitored by all potential proposers to be able to +include the full output of the sync committee every slot. Sync committee members +produce individual signatures on subnets (similar to the attestation subnets) +via `SyncCommitteeMessage`s which are then collected by aggregators sampled from +the sync subcommittees to produce a `SyncCommitteeContribution` which is +gossiped to proposers. This process occurs each slot. #### Sync committee messages ##### Prepare sync committee message -If a validator is in the current sync committee (i.e. `is_assigned_to_sync_committee()` above returns `True`), then for every `slot` in the current sync committee period, the validator should prepare a `SyncCommitteeMessage` for the previous slot (`slot - 1`) according to the logic in `get_sync_committee_message` as soon as they have determined the head block of `slot - 1`. This means that when assigned to `slot` a `SyncCommitteeMessage` is prepared and broadcast in `slot-1 ` instead of `slot`. - -This logic is triggered upon the same conditions as when producing an attestation. -Meaning, a sync committee member should produce and broadcast a `SyncCommitteeMessage` either when (a) the validator has received a valid block from the expected block proposer for the current `slot` or (b) one-third of the slot has transpired (`SECONDS_PER_SLOT / INTERVALS_PER_SLOT` seconds after the start of the slot) -- whichever comes first. - -`get_sync_committee_message(state, block_root, validator_index, privkey)` assumes the parameter `state` is the head state corresponding to processing the block up to the current slot as determined by the fork choice (including any empty slots up to the current slot processed with `process_slots` on top of the latest block), `block_root` is the root of the head block, `validator_index` is the index of the validator in the registry `state.validators` controlled by `privkey`, and `privkey` is the BLS private key for the validator. +If a validator is in the current sync committee (i.e. +`is_assigned_to_sync_committee()` above returns `True`), then for every `slot` +in the current sync committee period, the validator should prepare a +`SyncCommitteeMessage` for the previous slot (`slot - 1`) according to the logic +in `get_sync_committee_message` as soon as they have determined the head block +of `slot - 1`. This means that when assigned to `slot` a `SyncCommitteeMessage` +is prepared and broadcast in `slot-1 ` instead of `slot`. + +This logic is triggered upon the same conditions as when producing an +attestation. Meaning, a sync committee member should produce and broadcast a +`SyncCommitteeMessage` either when (a) the validator has received a valid block +from the expected block proposer for the current `slot` or (b) one-third of the +slot has transpired (`SECONDS_PER_SLOT / INTERVALS_PER_SLOT` seconds after the +start of the slot) -- whichever comes first. + +`get_sync_committee_message(state, block_root, validator_index, privkey)` +assumes the parameter `state` is the head state corresponding to processing the +block up to the current slot as determined by the fork choice (including any +empty slots up to the current slot processed with `process_slots` on top of the +latest block), `block_root` is the root of the head block, `validator_index` is +the index of the validator in the registry `state.validators` controlled by +`privkey`, and `privkey` is the BLS private key for the validator. ```python def get_sync_committee_message(state: BeaconState, @@ -283,12 +361,17 @@ def get_sync_committee_message(state: BeaconState, ##### Broadcast sync committee message -The validator broadcasts the assembled signature to the assigned subnet, the `sync_committee_{subnet_id}` pubsub topic. +The validator broadcasts the assembled signature to the assigned subnet, the +`sync_committee_{subnet_id}` pubsub topic. -The `subnet_id` is derived from the position in the sync committee such that the sync committee is divided into "subcommittees". -`subnet_id` can be computed via `compute_subnets_for_sync_committee(state, validator_index)` where `state` is a `BeaconState` during the matching sync committee period. +The `subnet_id` is derived from the position in the sync committee such that the +sync committee is divided into "subcommittees". `subnet_id` can be computed via +`compute_subnets_for_sync_committee(state, validator_index)` where `state` is a +`BeaconState` during the matching sync committee period. -*Note*: This function returns multiple deduplicated subnets if a given validator index is included multiple times in a given sync committee across multiple subcommittees. +*Note*: This function returns multiple deduplicated subnets if a given validator +index is included multiple times in a given sync committee across multiple +subcommittees. ```python def compute_subnets_for_sync_committee(state: BeaconState, validator_index: ValidatorIndex) -> Set[SubnetID]: @@ -306,18 +389,28 @@ def compute_subnets_for_sync_committee(state: BeaconState, validator_index: Vali ]) ``` -*Note*: Subnet assignment does not change during the duration of a validator's assignment to a given sync committee. +*Note*: Subnet assignment does not change during the duration of a validator's +assignment to a given sync committee. -*Note*: If a validator has multiple `subnet_id` results from `compute_subnets_for_sync_committee`, the validator should broadcast a copy of the `sync_committee_message` on each of the distinct subnets. +*Note*: If a validator has multiple `subnet_id` results from +`compute_subnets_for_sync_committee`, the validator should broadcast a copy of +the `sync_committee_message` on each of the distinct subnets. #### Sync committee contributions -Each slot, some sync committee members in each subcommittee are selected to aggregate the `SyncCommitteeMessage`s into a `SyncCommitteeContribution` which is broadcast on a global channel for inclusion into the next block. +Each slot, some sync committee members in each subcommittee are selected to +aggregate the `SyncCommitteeMessage`s into a `SyncCommitteeContribution` which +is broadcast on a global channel for inclusion into the next block. ##### Aggregation selection -A validator is selected to aggregate based on the value returned by `is_sync_committee_aggregator()` where `signature` is the BLS signature returned by `get_sync_committee_selection_proof()`. -The signature function takes a `BeaconState` with the relevant sync committees for the queried `slot` (i.e. `state.slot` is within the span covered by the current or next sync committee period), the `subcommittee_index` equal to the `subnet_id`, and the `privkey` is the BLS private key associated with the validator. +A validator is selected to aggregate based on the value returned by +`is_sync_committee_aggregator()` where `signature` is the BLS signature returned +by `get_sync_committee_selection_proof()`. The signature function takes a +`BeaconState` with the relevant sync committees for the queried `slot` (i.e. +`state.slot` is within the span covered by the current or next sync committee +period), the `subcommittee_index` equal to the `subnet_id`, and the `privkey` is +the BLS private key associated with the validator. ```python def get_sync_committee_selection_proof(state: BeaconState, @@ -339,50 +432,85 @@ def is_sync_committee_aggregator(signature: BLSSignature) -> bool: return bytes_to_uint64(hash(signature)[0:8]) % modulo == 0 ``` -*Note*: The set of aggregators generally changes every slot; however, the assignments can be computed ahead of time as soon as the committee is known. +*Note*: The set of aggregators generally changes every slot; however, the +assignments can be computed ahead of time as soon as the committee is known. ##### Construct sync committee contribution -If a validator is selected to aggregate the `SyncCommitteeMessage`s produced on a subnet during a given `slot`, they construct an aggregated `SyncCommitteeContribution`. +If a validator is selected to aggregate the `SyncCommitteeMessage`s produced on +a subnet during a given `slot`, they construct an aggregated +`SyncCommitteeContribution`. -Collect all of the (valid) `sync_committee_messages: Set[SyncCommitteeMessage]` from the `sync_committee_{subnet_id}` gossip during the selected `slot` with an equivalent `beacon_block_root` to that of the aggregator. If `len(sync_committee_messages) > 0`, the aggregator creates a `contribution: SyncCommitteeContribution` with the following fields: +Collect all of the (valid) `sync_committee_messages: Set[SyncCommitteeMessage]` +from the `sync_committee_{subnet_id}` gossip during the selected `slot` with an +equivalent `beacon_block_root` to that of the aggregator. If +`len(sync_committee_messages) > 0`, the aggregator creates a +`contribution: SyncCommitteeContribution` with the following fields: ###### Slot -Set `contribution.slot = state.slot` where `state` is the `BeaconState` for the slot in question. +Set `contribution.slot = state.slot` where `state` is the `BeaconState` for the +slot in question. ###### Beacon block root -Set `contribution.beacon_block_root = beacon_block_root` from the `beacon_block_root` found in the `sync_committee_messages`. +Set `contribution.beacon_block_root = beacon_block_root` from the +`beacon_block_root` found in the `sync_committee_messages`. ###### Subcommittee index -Set `contribution.subcommittee_index` to the index for the subcommittee index corresponding to the subcommittee assigned to this subnet. This index matches the `subnet_id` used to derive the topic name. +Set `contribution.subcommittee_index` to the index for the subcommittee index +corresponding to the subcommittee assigned to this subnet. This index matches +the `subnet_id` used to derive the topic name. ###### Aggregation bits -Let `contribution.aggregation_bits` be a `Bitvector[SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT]`, where the `index`th bit is set in the `Bitvector` for each corresponding validator included in this aggregate from the corresponding subcommittee. -An aggregator finds the index in the sync committee (as determined by a reverse pubkey lookup on `state.current_sync_committee.pubkeys`) for a given validator referenced by `sync_committee_message.validator_index` and maps the sync committee index to an index in the subcommittee (along with the prior `subcommittee_index`). This index within the subcommittee is set in `contribution.aggregation_bits`. +Let `contribution.aggregation_bits` be a +`Bitvector[SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT]`, where the +`index`th bit is set in the `Bitvector` for each corresponding validator +included in this aggregate from the corresponding subcommittee. An aggregator +finds the index in the sync committee (as determined by a reverse pubkey lookup +on `state.current_sync_committee.pubkeys`) for a given validator referenced by +`sync_committee_message.validator_index` and maps the sync committee index to an +index in the subcommittee (along with the prior `subcommittee_index`). This +index within the subcommittee is set in `contribution.aggregation_bits`. -For example, if a validator with index `2044` is pseudo-randomly sampled to sync committee index `135`. This sync committee index maps to `subcommittee_index` `1` with position `7` in the `Bitvector` for the contribution. +For example, if a validator with index `2044` is pseudo-randomly sampled to sync +committee index `135`. This sync committee index maps to `subcommittee_index` +`1` with position `7` in the `Bitvector` for the contribution. -*Note*: A validator **could be included multiple times** in a given subcommittee such that multiple bits are set for a single `SyncCommitteeMessage`. +*Note*: A validator **could be included multiple times** in a given subcommittee +such that multiple bits are set for a single `SyncCommitteeMessage`. ###### Signature -Set `contribution.signature = aggregate_signature` where `aggregate_signature` is obtained by assembling the appropriate collection of `BLSSignature`s from the set of `sync_committee_messages` and using the `bls.Aggregate()` function to produce an aggregate `BLSSignature`. +Set `contribution.signature = aggregate_signature` where `aggregate_signature` +is obtained by assembling the appropriate collection of `BLSSignature`s from the +set of `sync_committee_messages` and using the `bls.Aggregate()` function to +produce an aggregate `BLSSignature`. -The collection of input signatures should include one signature per validator who had a bit set in the `aggregation_bits` bitfield, with repeated signatures if one validator maps to multiple indices within the subcommittee. +The collection of input signatures should include one signature per validator +who had a bit set in the `aggregation_bits` bitfield, with repeated signatures +if one validator maps to multiple indices within the subcommittee. ##### Broadcast sync committee contribution -If the validator is selected to aggregate (`is_sync_committee_aggregator()`), then they broadcast their best aggregate as a `SignedContributionAndProof` to the global aggregate channel (`sync_committee_contribution_and_proof` topic) two-thirds of the way through the `slot`-that is, `SECONDS_PER_SLOT * 2 / INTERVALS_PER_SLOT` seconds after the start of `slot`. +If the validator is selected to aggregate (`is_sync_committee_aggregator()`), +then they broadcast their best aggregate as a `SignedContributionAndProof` to +the global aggregate channel (`sync_committee_contribution_and_proof` topic) +two-thirds of the way through the `slot`-that is, +`SECONDS_PER_SLOT * 2 / INTERVALS_PER_SLOT` seconds after the start of `slot`. -Selection proofs are provided in `ContributionAndProof` to prove to the gossip channel that the validator has been selected as an aggregator. +Selection proofs are provided in `ContributionAndProof` to prove to the gossip +channel that the validator has been selected as an aggregator. -`ContributionAndProof` messages are signed by the aggregator and broadcast inside of `SignedContributionAndProof` objects to prevent a class of DoS attacks and message forgeries. +`ContributionAndProof` messages are signed by the aggregator and broadcast +inside of `SignedContributionAndProof` objects to prevent a class of DoS attacks +and message forgeries. -First, `contribution_and_proof = get_contribution_and_proof(state, validator_index, contribution, privkey)` is constructed. +First, +`contribution_and_proof = get_contribution_and_proof(state, validator_index, contribution, privkey)` +is constructed. ```python def get_contribution_and_proof(state: BeaconState, @@ -402,7 +530,9 @@ def get_contribution_and_proof(state: BeaconState, ) ``` -Then `signed_contribution_and_proof = SignedContributionAndProof(message=contribution_and_proof, signature=signature)` is constructed and broadcast. Where `signature` is obtained from: +Then +`signed_contribution_and_proof = SignedContributionAndProof(message=contribution_and_proof, signature=signature)` +is constructed and broadcast. Where `signature` is obtained from: ```python def get_contribution_and_proof_signature(state: BeaconState, @@ -416,19 +546,33 @@ def get_contribution_and_proof_signature(state: BeaconState, ## Sync committee subnet stability -The sync committee subnets need special care to ensure stability given the relatively low number of validators involved in the sync committee at any particular time. -To provide this stability, a validator must do the following: - -- Maintain advertisement of the subnet the validator in the sync committee is assigned to in their node's ENR as soon as they have joined the subnet. - Subnet assignments are known `EPOCHS_PER_SYNC_COMMITTEE_PERIOD` epochs in advance and can be computed with `compute_subnets_for_sync_committee` defined above. - ENR advertisement is indicated by setting the appropriate bit(s) of the bitfield found under the `syncnets` key in the ENR corresponding to the derived `subnet_id`(s). - Any bits modified for the sync committee responsibilities are unset in the ENR once the node no longer has any validators in the subcommittee. - - *Note*: The first sync committee from phase 0 to the Altair fork will not be known until the fork happens, which implies subnet assignments are not known until then. - Early sync committee members should listen for topic subscriptions from peers and employ discovery via the ENR advertisements near the fork boundary to form initial subnets. - Some early sync committee rewards may be missed while the initial subnets form. - -- To join a sync committee subnet, select a random number of epochs before the end of the current sync committee period between 1 and `SYNC_COMMITTEE_SUBNET_COUNT`, inclusive. - Validators should join their member subnet at the beginning of the epoch they have randomly selected. - For example, if the next sync committee period starts at epoch `853,248` and the validator randomly selects an offset of `3`, they should join the subnet at the beginning of epoch `853,245`. - Validators should leverage the lookahead period on sync committee assignments so that they can join the appropriate subnets ahead of their assigned sync committee period. +The sync committee subnets need special care to ensure stability given the +relatively low number of validators involved in the sync committee at any +particular time. To provide this stability, a validator must do the following: + +- Maintain advertisement of the subnet the validator in the sync committee is + assigned to in their node's ENR as soon as they have joined the subnet. Subnet + assignments are known `EPOCHS_PER_SYNC_COMMITTEE_PERIOD` epochs in advance and + can be computed with `compute_subnets_for_sync_committee` defined above. ENR + advertisement is indicated by setting the appropriate bit(s) of the bitfield + found under the `syncnets` key in the ENR corresponding to the derived + `subnet_id`(s). Any bits modified for the sync committee responsibilities are + unset in the ENR once the node no longer has any validators in the + subcommittee. + + *Note*: The first sync committee from phase 0 to the Altair fork will not be + known until the fork happens, which implies subnet assignments are not known + until then. Early sync committee members should listen for topic subscriptions + from peers and employ discovery via the ENR advertisements near the fork + boundary to form initial subnets. Some early sync committee rewards may be + missed while the initial subnets form. + +- To join a sync committee subnet, select a random number of epochs before the + end of the current sync committee period between 1 and + `SYNC_COMMITTEE_SUBNET_COUNT`, inclusive. Validators should join their member + subnet at the beginning of the epoch they have randomly selected. For example, + if the next sync committee period starts at epoch `853,248` and the validator + randomly selects an offset of `3`, they should join the subnet at the + beginning of epoch `853,245`. Validators should leverage the lookahead period + on sync committee assignments so that they can join the appropriate subnets + ahead of their assigned sync committee period. diff --git a/specs/bellatrix/beacon-chain.md b/specs/bellatrix/beacon-chain.md index 8ea9deceef..a93d802ab0 100644 --- a/specs/bellatrix/beacon-chain.md +++ b/specs/bellatrix/beacon-chain.md @@ -45,7 +45,8 @@ ## Introduction -This upgrade adds transaction execution to the beacon chain as part of Bellatrix upgrade. +This upgrade adds transaction execution to the beacon chain as part of Bellatrix +upgrade. Additionally, this upgrade introduces the following minor changes: @@ -64,9 +65,11 @@ Additionally, this upgrade introduces the following minor changes: ### Rewards and penalties -Bellatrix updates a few configuration values to move penalty parameters to their final, maximum security values. +Bellatrix updates a few configuration values to move penalty parameters to their +final, maximum security values. -*Note*: The spec does *not* override previous configuration values but instead creates new values and replaces usage throughout. +*Note*: The spec does *not* override previous configuration values but instead +creates new values and replaces usage throughout. | Name | Value | | -------------------------------------------- | ------------------------------ | @@ -244,7 +247,8 @@ def compute_timestamp_at_slot(state: BeaconState, slot: Slot) -> uint64: #### Modified `get_inactivity_penalty_deltas` -*Note*: The function `get_inactivity_penalty_deltas` is modified to use `INACTIVITY_PENALTY_QUOTIENT_BELLATRIX`. +*Note*: The function `get_inactivity_penalty_deltas` is modified to use +`INACTIVITY_PENALTY_QUOTIENT_BELLATRIX`. ```python def get_inactivity_penalty_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]: @@ -268,7 +272,8 @@ def get_inactivity_penalty_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], S #### Modified `slash_validator` -*Note*: The function `slash_validator` is modified to use `MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX`. +*Note*: The function `slash_validator` is modified to use +`MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX`. ```python def slash_validator(state: BeaconState, @@ -312,17 +317,21 @@ class NewPayloadRequest(object): #### Engine APIs -The implementation-dependent `ExecutionEngine` protocol encapsulates the execution sub-system logic via: +The implementation-dependent `ExecutionEngine` protocol encapsulates the +execution sub-system logic via: - a state object `self.execution_state` of type `ExecutionState` -- a notification function `self.notify_new_payload` which may apply changes to the `self.execution_state` +- a notification function `self.notify_new_payload` which may apply changes to + the `self.execution_state` -The body of these functions are implementation dependent. -The Engine API may be used to implement this and similarly defined functions via an external execution engine. +The body of these functions are implementation dependent. The Engine API may be +used to implement this and similarly defined functions via an external execution +engine. #### `notify_new_payload` -`notify_new_payload` is a function accessed through the `EXECUTION_ENGINE` module which instantiates the `ExecutionEngine` protocol. +`notify_new_payload` is a function accessed through the `EXECUTION_ENGINE` +module which instantiates the `ExecutionEngine` protocol. ```python def notify_new_payload(self: ExecutionEngine, execution_payload: ExecutionPayload) -> bool: @@ -366,7 +375,9 @@ def verify_and_notify_new_payload(self: ExecutionEngine, ### Block processing -*Note*: The call to the `process_execution_payload` must happen before the call to the `process_randao` as the former depends on the `randao_mix` computed with the reveal of the previous block. +*Note*: The call to the `process_execution_payload` must happen before the call +to the `process_randao` as the former depends on the `randao_mix` computed with +the reveal of the previous block. ```python def process_block(state: BeaconState, block: BeaconBlock) -> None: @@ -419,7 +430,8 @@ def process_execution_payload(state: BeaconState, body: BeaconBlockBody, executi #### Slashings -*Note*: The function `process_slashings` is modified to use `PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX`. +*Note*: The function `process_slashings` is modified to use +`PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX`. ```python def process_slashings(state: BeaconState) -> None: diff --git a/specs/bellatrix/fork-choice.md b/specs/bellatrix/fork-choice.md index ba3011ca33..6c2da6427e 100644 --- a/specs/bellatrix/fork-choice.md +++ b/specs/bellatrix/fork-choice.md @@ -22,9 +22,11 @@ ## Introduction -This is the modification of the fork choice according to the executable beacon chain proposal. +This is the modification of the fork choice according to the executable beacon +chain proposal. -*Note*: It introduces the process of transition from the last PoW block to the first PoS block. +*Note*: It introduces the process of transition from the last PoW block to the +first PoS block. ## Custom types @@ -36,22 +38,27 @@ This is the modification of the fork choice according to the executable beacon c ### `ExecutionEngine` -*Note*: The `notify_forkchoice_updated` function is added to the `ExecutionEngine` protocol to signal the fork choice updates. +*Note*: The `notify_forkchoice_updated` function is added to the +`ExecutionEngine` protocol to signal the fork choice updates. -The body of this function is implementation dependent. -The Engine API may be used to implement it with an external execution engine. +The body of this function is implementation dependent. The Engine API may be +used to implement it with an external execution engine. #### `notify_forkchoice_updated` This function performs three actions *atomically*: -- Re-organizes the execution payload chain and corresponding state to make `head_block_hash` the head. -- Updates safe block hash with the value provided by `safe_block_hash` parameter. -- Applies finality to the execution state: it irreversibly persists the chain of all execution payloads - and corresponding state, up to and including `finalized_block_hash`. +- Re-organizes the execution payload chain and corresponding state to make + `head_block_hash` the head. +- Updates safe block hash with the value provided by `safe_block_hash` + parameter. +- Applies finality to the execution state: it irreversibly persists the chain of + all execution payloads and corresponding state, up to and including + `finalized_block_hash`. -Additionally, if `payload_attributes` is provided, this function sets in motion a payload build process on top of -`head_block_hash` and returns an identifier of initiated process. +Additionally, if `payload_attributes` is provided, this function sets in motion +a payload build process on top of `head_block_hash` and returns an identifier of +initiated process. ```python def notify_forkchoice_updated(self: ExecutionEngine, @@ -62,33 +69,45 @@ def notify_forkchoice_updated(self: ExecutionEngine, ... ``` -*Note*: The `(head_block_hash, finalized_block_hash)` values of the `notify_forkchoice_updated` function call maps on the `POS_FORKCHOICE_UPDATED` event defined in the [EIP-3675](https://eips.ethereum.org/EIPS/eip-3675#definitions). -As per EIP-3675, before a post-transition block is finalized, `notify_forkchoice_updated` MUST be called with `finalized_block_hash = Hash32()`. +*Note*: The `(head_block_hash, finalized_block_hash)` values of the +`notify_forkchoice_updated` function call maps on the `POS_FORKCHOICE_UPDATED` +event defined in the +[EIP-3675](https://eips.ethereum.org/EIPS/eip-3675#definitions). As per +EIP-3675, before a post-transition block is finalized, +`notify_forkchoice_updated` MUST be called with +`finalized_block_hash = Hash32()`. -*Note*: Client software MUST NOT call this function until the transition conditions are met on the PoW network, i.e. there exists a block for which `is_valid_terminal_pow_block` function returns `True`. +*Note*: Client software MUST NOT call this function until the transition +conditions are met on the PoW network, i.e. there exists a block for which +`is_valid_terminal_pow_block` function returns `True`. -*Note*: Client software MUST call this function to initiate the payload build process to produce the merge transition block; the `head_block_hash` parameter MUST be set to the hash of a terminal PoW block in this case. +*Note*: Client software MUST call this function to initiate the payload build +process to produce the merge transition block; the `head_block_hash` parameter +MUST be set to the hash of a terminal PoW block in this case. ##### `safe_block_hash` The `safe_block_hash` parameter MUST be set to return value of -[`get_safe_execution_block_hash(store: Store)`](../../fork_choice/safe-block.md#get_safe_execution_block_hash) function. +[`get_safe_execution_block_hash(store: Store)`](../../fork_choice/safe-block.md#get_safe_execution_block_hash) +function. ##### `should_override_forkchoice_update` -If proposer boost re-orgs are implemented and enabled (see `get_proposer_head`) then additional care -must be taken to ensure that the proposer is able to build an execution payload. +If proposer boost re-orgs are implemented and enabled (see `get_proposer_head`) +then additional care must be taken to ensure that the proposer is able to build +an execution payload. If a beacon node knows it will propose the next block then it SHOULD NOT call -`notify_forkchoice_updated` if it detects the current head to be weak and potentially capable of -being re-orged. Complete information for evaluating `get_proposer_head` _will not_ be available -immediately after the receipt of a new block, so an approximation of those conditions should be -used when deciding whether to send or suppress a fork choice notification. The exact conditions +`notify_forkchoice_updated` if it detects the current head to be weak and +potentially capable of being re-orged. Complete information for evaluating +`get_proposer_head` _will not_ be available immediately after the receipt of a +new block, so an approximation of those conditions should be used when deciding +whether to send or suppress a fork choice notification. The exact conditions used may be implementation-specific, a suggested implementation is below. -Let `validator_is_connected(validator_index: ValidatorIndex) -> bool` be a function that indicates -whether the validator with `validator_index` is connected to the node (e.g. has sent an unexpired -proposer preparation message). +Let `validator_is_connected(validator_index: ValidatorIndex) -> bool` be a +function that indicates whether the validator with `validator_index` is +connected to the node (e.g. has sent an unexpired proposer preparation message). ```python def should_override_forkchoice_update(store: Store, head_root: Root) -> bool: @@ -140,25 +159,28 @@ def should_override_forkchoice_update(store: Store, head_root: Root) -> bool: head_weak, parent_strong]) ``` -*Note*: The ordering of conditions is a suggestion only. Implementations are free to -optimize by re-ordering the conditions from least to most expensive and by returning early if -any of the early conditions are `False`. +*Note*: The ordering of conditions is a suggestion only. Implementations are +free to optimize by re-ordering the conditions from least to most expensive and +by returning early if any of the early conditions are `False`. -In case `should_override_forkchoice_update` returns `True`, a node SHOULD instead call -`notify_forkchoice_updated` with parameters appropriate for building upon the parent block. Care -must be taken to compute the correct `payload_attributes`, as they may change depending on the slot -of the block to be proposed (due to withdrawals). +In case `should_override_forkchoice_update` returns `True`, a node SHOULD +instead call `notify_forkchoice_updated` with parameters appropriate for +building upon the parent block. Care must be taken to compute the correct +`payload_attributes`, as they may change depending on the slot of the block to +be proposed (due to withdrawals). -If `should_override_forkchoice_update` returns `True` but `get_proposer_head` later chooses the -canonical head rather than its parent, then this is a misprediction that will cause the node -to construct a payload with less notice. The result of `get_proposer_head` MUST be preferred over -the result of `should_override_forkchoice_update` (when proposer reorgs are enabled). +If `should_override_forkchoice_update` returns `True` but `get_proposer_head` +later chooses the canonical head rather than its parent, then this is a +misprediction that will cause the node to construct a payload with less notice. +The result of `get_proposer_head` MUST be preferred over the result of +`should_override_forkchoice_update` (when proposer reorgs are enabled). ## Helpers ### `PayloadAttributes` -Used to signal to initiate the payload build process via `notify_forkchoice_updated`. +Used to signal to initiate the payload build process via +`notify_forkchoice_updated`. ```python @dataclass @@ -179,10 +201,12 @@ class PowBlock(Container): ### `get_pow_block` -Let `get_pow_block(block_hash: Hash32) -> Optional[PowBlock]` be the function that given the hash of the PoW block returns its data. -It may result in `None` if the requested block is not yet available. +Let `get_pow_block(block_hash: Hash32) -> Optional[PowBlock]` be the function +that given the hash of the PoW block returns its data. It may result in `None` +if the requested block is not yet available. -*Note*: The `eth_getBlockByHash` JSON-RPC method may be used to pull this information from an execution client. +*Note*: The `eth_getBlockByHash` JSON-RPC method may be used to pull this +information from an execution client. ### `is_valid_terminal_pow_block` @@ -226,7 +250,8 @@ def validate_merge_block(block: BeaconBlock) -> None: ### `on_block` -*Note*: The only modification is the addition of the verification of transition block conditions. +*Note*: The only modification is the addition of the verification of transition +block conditions. ```python def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: diff --git a/specs/bellatrix/fork.md b/specs/bellatrix/fork.md index 2c4ca327ca..9deda31906 100644 --- a/specs/bellatrix/fork.md +++ b/specs/bellatrix/fork.md @@ -46,20 +46,30 @@ def compute_fork_version(epoch: Epoch) -> Version: ### Fork trigger -TBD. Social consensus, along with state conditions such as epoch boundary, finality, deposits, active validator count, etc. may be part of the decision process to trigger the fork. For now we assume the condition will be triggered at epoch `BELLATRIX_FORK_EPOCH`. +TBD. Social consensus, along with state conditions such as epoch boundary, +finality, deposits, active validator count, etc. may be part of the decision +process to trigger the fork. For now we assume the condition will be triggered +at epoch `BELLATRIX_FORK_EPOCH`. -Note that for the pure Bellatrix networks, we don't apply `upgrade_to_bellatrix` since it starts with Bellatrix version logic. +Note that for the pure Bellatrix networks, we don't apply `upgrade_to_bellatrix` +since it starts with Bellatrix version logic. ### Upgrading the state -As with the Phase0-to-Altair upgrade, the `state_transition` is modified to upgrade the `BeaconState`. -The `BeaconState` upgrade runs as part of `process_slots`, slots with missing block proposals do not affect the upgrade time. +As with the Phase0-to-Altair upgrade, the `state_transition` is modified to +upgrade the `BeaconState`. The `BeaconState` upgrade runs as part of +`process_slots`, slots with missing block proposals do not affect the upgrade +time. -If `state.slot % SLOTS_PER_EPOCH == 0` and `compute_epoch_at_slot(state.slot) == BELLATRIX_FORK_EPOCH`, an irregular state change is made to upgrade to Bellatrix. -The upgrade occurs after the completion of the inner loop of `process_slots` that sets `state.slot` equal to `BELLATRIX_FORK_EPOCH * SLOTS_PER_EPOCH`. +If `state.slot % SLOTS_PER_EPOCH == 0` and +`compute_epoch_at_slot(state.slot) == BELLATRIX_FORK_EPOCH`, an irregular state +change is made to upgrade to Bellatrix. The upgrade occurs after the completion +of the inner loop of `process_slots` that sets `state.slot` equal to +`BELLATRIX_FORK_EPOCH * SLOTS_PER_EPOCH`. -When multiple upgrades are scheduled for the same epoch (common for test-networks), -all the upgrades run in sequence before resuming the regular state transition. +When multiple upgrades are scheduled for the same epoch (common for +test-networks), all the upgrades run in sequence before resuming the regular +state transition. ```python def upgrade_to_bellatrix(pre: altair.BeaconState) -> BeaconState: diff --git a/specs/bellatrix/p2p-interface.md b/specs/bellatrix/p2p-interface.md index de9e71f3c8..c2ba3d6239 100644 --- a/specs/bellatrix/p2p-interface.md +++ b/specs/bellatrix/p2p-interface.md @@ -25,9 +25,14 @@ This document contains the networking specification for Bellatrix. -The specification of these changes continues in the same format as the network specifications of previous upgrades, and assumes them as pre-requisite. This document should be viewed as additive to the documents from [Phase 0](../phase0/p2p-interface.md) and from [Altair](../altair/p2p-interface.md) -and will be referred to as the "Phase 0 document" and "Altair document" respectively, hereafter. -Readers should understand the Phase 0 and Altair documents and use them as a basis to understand the changes outlined in this document. +The specification of these changes continues in the same format as the network +specifications of previous upgrades, and assumes them as pre-requisite. This +document should be viewed as additive to the documents from +[Phase 0](../phase0/p2p-interface.md) and from +[Altair](../altair/p2p-interface.md) and will be referred to as the "Phase 0 +document" and "Altair document" respectively, hereafter. Readers should +understand the Phase 0 and Altair documents and use them as a basis to +understand the changes outlined in this document. ## Modifications in Bellatrix @@ -37,20 +42,24 @@ Some gossip meshes are upgraded in Bellatrix to support upgraded types. #### Topics and messages -Topics follow the same specification as in prior upgrades. -All topics remain stable except the beacon block topic which is updated with the modified type. +Topics follow the same specification as in prior upgrades. All topics remain +stable except the beacon block topic which is updated with the modified type. -The specification around the creation, validation, and dissemination of messages has not changed from the Phase 0 and Altair documents unless explicitly noted here. +The specification around the creation, validation, and dissemination of messages +has not changed from the Phase 0 and Altair documents unless explicitly noted +here. The derivation of the `message-id` remains stable. -The new topics along with the type of the `data` field of a gossipsub message are given in this table: +The new topics along with the type of the `data` field of a gossipsub message +are given in this table: | Name | Message Type | | -------------- | ------------------------------ | | `beacon_block` | `SignedBeaconBlock` (modified) | -Note that the `ForkDigestValue` path segment of the topic separates the old and the new `beacon_block` topics. +Note that the `ForkDigestValue` path segment of the topic separates the old and +the new `beacon_block` topics. ##### Global topics @@ -58,48 +67,58 @@ Bellatrix changes the type of the global beacon block topic. ###### `beacon_block` -The *type* of the payload of this topic changes to the (modified) `SignedBeaconBlock` found in Bellatrix. -Specifically, this type changes with the addition of `execution_payload` to the inner `BeaconBlockBody`. -See Bellatrix [state transition document](./beacon-chain.md#beaconblockbody) for further details. +The *type* of the payload of this topic changes to the (modified) +`SignedBeaconBlock` found in Bellatrix. Specifically, this type changes with the +addition of `execution_payload` to the inner `BeaconBlockBody`. See Bellatrix +[state transition document](./beacon-chain.md#beaconblockbody) for further +details. Blocks with execution enabled will be permitted to propagate regardless of the validity of the execution payload. This prevents network segregation between [optimistic](/sync/optimistic.md) and non-optimistic nodes. In addition to the gossip validations for this topic from prior specifications, -the following validations MUST pass before forwarding the `signed_beacon_block` on the network. -Alias `block = signed_beacon_block.message`, `execution_payload = block.body.execution_payload`. - -If the execution is enabled for the block -- i.e. `is_execution_enabled(state, block.body)` -then validate the following: - -- _[REJECT]_ The block's execution payload timestamp is correct with respect to the slot - -- i.e. `execution_payload.timestamp == compute_timestamp_at_slot(state, block.slot)`. -- If `execution_payload` verification of block's parent by an execution node is *not* complete: +the following validations MUST pass before forwarding the `signed_beacon_block` +on the network. Alias `block = signed_beacon_block.message`, +`execution_payload = block.body.execution_payload`. + +If the execution is enabled for the block -- i.e. +`is_execution_enabled(state, block.body)` then validate the following: + +- _[REJECT]_ The block's execution payload timestamp is correct with respect to + the slot -- i.e. + `execution_payload.timestamp == compute_timestamp_at_slot(state, block.slot)`. +- If `execution_payload` verification of block's parent by an execution node is + *not* complete: - _[REJECT]_ The block's parent (defined by `block.parent_root`) passes all - validation (excluding execution node verification of the `block.body.execution_payload`). + validation (excluding execution node verification of the + `block.body.execution_payload`). - Otherwise: - _[IGNORE]_ The block's parent (defined by `block.parent_root`) passes all - validation (including execution node verification of the `block.body.execution_payload`). + validation (including execution node verification of the + `block.body.execution_payload`). -The following gossip validation from prior specifications MUST NOT be applied if the execution is -enabled for the block -- i.e. `is_execution_enabled(state, block.body)`: +The following gossip validation from prior specifications MUST NOT be applied if +the execution is enabled for the block -- i.e. +`is_execution_enabled(state, block.body)`: -- _[REJECT]_ The block's parent (defined by `block.parent_root`) passes validation. +- _[REJECT]_ The block's parent (defined by `block.parent_root`) passes + validation. #### Transitioning the gossip -See gossip transition details found in the [Altair document](../altair/p2p-interface.md#transitioning-the-gossip) for +See gossip transition details found in the +[Altair document](../altair/p2p-interface.md#transitioning-the-gossip) for details on how to handle transitioning gossip topics. ### The Req/Resp domain -Non-faulty, [optimistic](/sync/optimistic.md) nodes may send blocks which -result in an INVALID response from an execution engine. To prevent network -segregation between optimistic and non-optimistic nodes, transmission of an -INVALID execution payload via the Req/Resp domain SHOULD NOT cause a node to be -down-scored or disconnected. Transmission of a block which is invalid due to -any consensus layer rules (i.e., *not* execution layer rules) MAY result in +Non-faulty, [optimistic](/sync/optimistic.md) nodes may send blocks which result +in an INVALID response from an execution engine. To prevent network segregation +between optimistic and non-optimistic nodes, transmission of an INVALID +execution payload via the Req/Resp domain SHOULD NOT cause a node to be +down-scored or disconnected. Transmission of a block which is invalid due to any +consensus layer rules (i.e., *not* execution layer rules) MAY result in down-scoring or disconnection. #### Messages @@ -110,7 +129,8 @@ down-scoring or disconnection. Request and Response remain unchanged unless explicitly noted here. -Bellatrix fork-digest is introduced to the `context` enum to specify Bellatrix block type. +Bellatrix fork-digest is introduced to the `context` enum to specify Bellatrix +block type. Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: @@ -126,8 +146,8 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: **Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_root/2/` -Request and Response remain unchanged. -Bellatrix fork-digest is introduced to the `context` enum to specify Bellatrix block type. +Request and Response remain unchanged. Bellatrix fork-digest is introduced to +the `context` enum to specify Bellatrix block type. Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: @@ -146,18 +166,18 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: #### Why was the max gossip message size increased at Bellatrix? With the addition of `ExecutionPayload` to `BeaconBlock`s, there is a dynamic -field -- `transactions` -- which can validly exceed the `MAX_PAYLOAD_SIZE` limit (1 MiB) put in -place at Phase 0, so MAX_PAYLOAD_SIZE has increased to 10 MiB on the network. -At the `GAS_LIMIT` (~30M) currently seen on mainnet in 2021, a single transaction -filled entirely with data at a cost of 16 gas per byte can create a valid -`ExecutionPayload` of ~2 MiB. Thus we need a size limit to at least account for -current mainnet conditions. +field -- `transactions` -- which can validly exceed the `MAX_PAYLOAD_SIZE` limit +(1 MiB) put in place at Phase 0, so MAX_PAYLOAD_SIZE has increased to 10 MiB on +the network. At the `GAS_LIMIT` (~30M) currently seen on mainnet in 2021, a +single transaction filled entirely with data at a cost of 16 gas per byte can +create a valid `ExecutionPayload` of ~2 MiB. Thus we need a size limit to at +least account for current mainnet conditions. Note, that due to additional size induced by the `BeaconBlock` contents (e.g. -proposer signature, operations lists, etc) this does reduce the -theoretical max valid `ExecutionPayload` (and `transactions` list) size as -slightly lower than 10 MiB. Considering that `BeaconBlock` max size is on the -order of 128 KiB in the worst case and the current gas limit (~30M) bounds max blocksize to less +proposer signature, operations lists, etc) this does reduce the theoretical max +valid `ExecutionPayload` (and `transactions` list) size as slightly lower than +10 MiB. Considering that `BeaconBlock` max size is on the order of 128 KiB in +the worst case and the current gas limit (~30M) bounds max blocksize to less than 2 MiB today, this marginal difference in theoretical bounds will have zero impact on network functionality and security. @@ -177,8 +197,8 @@ always by simultaneously respected. #### Why allow invalid payloads on the P2P network? -The specification allows blocks with invalid execution payloads to propagate across -gossip and via RPC calls. The reasoning for this is as follows: +The specification allows blocks with invalid execution payloads to propagate +across gossip and via RPC calls. The reasoning for this is as follows: 1. Optimistic nodes must listen to block gossip to obtain a view of the head of the chain. @@ -189,11 +209,11 @@ gossip and via RPC calls. The reasoning for this is as follows: 4. Therefore, optimistic nodes must send optimistic blocks via RPC. So, to prevent network segregation from optimistic nodes inadvertently sending -invalid execution payloads, nodes should never downscore/disconnect nodes due to such invalid -payloads. This does open the network to some DoS attacks from invalid execution -payloads, but the scope of actors is limited to validators who can put those -payloads in valid (and slashable) beacon blocks. Therefore, it is argued that -the DoS risk introduced in tolerable. +invalid execution payloads, nodes should never downscore/disconnect nodes due to +such invalid payloads. This does open the network to some DoS attacks from +invalid execution payloads, but the scope of actors is limited to validators who +can put those payloads in valid (and slashable) beacon blocks. Therefore, it is +argued that the DoS risk introduced in tolerable. More complicated schemes are possible that could restrict invalid payloads from RPC. However, it's not clear that complexity is warranted. diff --git a/specs/bellatrix/validator.md b/specs/bellatrix/validator.md index eb647433c7..e49cf3896e 100644 --- a/specs/bellatrix/validator.md +++ b/specs/bellatrix/validator.md @@ -20,15 +20,20 @@ ## Introduction -This document represents the changes to be made in the code of an "honest validator" to implement executable beacon chain proposal. +This document represents the changes to be made in the code of an "honest +validator" to implement executable beacon chain proposal. ## Prerequisites -This document is an extension of the [Altair -- Honest Validator](../altair/validator.md) guide. -All behaviors and definitions defined in this document, and documents it extends, carry over unless explicitly noted or overridden. +This document is an extension of the +[Altair -- Honest Validator](../altair/validator.md) guide. All behaviors and +definitions defined in this document, and documents it extends, carry over +unless explicitly noted or overridden. -All terminology, constants, functions, and protocol mechanics defined in the updated Beacon Chain doc of [Bellatrix](./beacon-chain.md) are requisite for this document and used throughout. -Please see related Beacon Chain doc before continuing and use them as a reference throughout. +All terminology, constants, functions, and protocol mechanics defined in the +updated Beacon Chain doc of [Bellatrix](./beacon-chain.md) are requisite for +this document and used throughout. Please see related Beacon Chain doc before +continuing and use them as a reference throughout. ## Helpers @@ -80,15 +85,17 @@ avoid requiring simple serialize hashing capabilities in the Execution Layer. ### `ExecutionEngine` -*Note*: `get_payload` function is added to the `ExecutionEngine` protocol for use as a validator. +*Note*: `get_payload` function is added to the `ExecutionEngine` protocol for +use as a validator. -The body of this function is implementation dependent. -The Engine API may be used to implement it with an external execution engine. +The body of this function is implementation dependent. The Engine API may be +used to implement it with an external execution engine. #### `get_payload` -Given the `payload_id`, `get_payload` returns `GetPayloadResponse` with the most recent version of -the execution payload that has been built since the corresponding call to `notify_forkchoice_updated` method. +Given the `payload_id`, `get_payload` returns `GetPayloadResponse` with the most +recent version of the execution payload that has been built since the +corresponding call to `notify_forkchoice_updated` method. ```python def get_payload(self: ExecutionEngine, payload_id: PayloadId) -> GetPayloadResponse: @@ -100,9 +107,13 @@ def get_payload(self: ExecutionEngine, payload_id: PayloadId) -> GetPayloadRespo ## Beacon chain responsibilities -All validator responsibilities remain unchanged other than those noted below. Namely, the transition block handling and the addition of `ExecutionPayload`. +All validator responsibilities remain unchanged other than those noted below. +Namely, the transition block handling and the addition of `ExecutionPayload`. -*Note*: A validator must not propose on or attest to a block that isn't deemed valid, i.e. hasn't yet passed the beacon chain state transition and execution validations. In future upgrades, an "execution Proof-of-Custody" will be integrated to prevent outsourcing of execution payload validations. +*Note*: A validator must not propose on or attest to a block that isn't deemed +valid, i.e. hasn't yet passed the beacon chain state transition and execution +validations. In future upgrades, an "execution Proof-of-Custody" will be +integrated to prevent outsourcing of execution payload validations. ### Block proposal @@ -110,14 +121,23 @@ All validator responsibilities remain unchanged other than those noted below. Na ##### ExecutionPayload -To obtain an execution payload, a block proposer building a block on top of a `state` must take the following actions: - -1. Set `payload_id = prepare_execution_payload(state, pow_chain, safe_block_hash, finalized_block_hash, suggested_fee_recipient, execution_engine)`, where: - - `state` is the state object after applying `process_slots(state, slot)` transition to the resulting state of the parent block processing - - `pow_chain` is a `Dict[Hash32, PowBlock]` dictionary that abstractly represents all blocks in the PoW chain with block hash as the dictionary key - - `safe_block_hash` is the return value of the `get_safe_execution_block_hash(store: Store)` function call - - `finalized_block_hash` is the block hash of the latest finalized execution payload (`Hash32()` if none yet finalized) - - `suggested_fee_recipient` is the value suggested to be used for the `fee_recipient` field of the execution payload +To obtain an execution payload, a block proposer building a block on top of a +`state` must take the following actions: + +1. Set + `payload_id = prepare_execution_payload(state, pow_chain, safe_block_hash, finalized_block_hash, suggested_fee_recipient, execution_engine)`, + where: + - `state` is the state object after applying `process_slots(state, slot)` + transition to the resulting state of the parent block processing + - `pow_chain` is a `Dict[Hash32, PowBlock]` dictionary that abstractly + represents all blocks in the PoW chain with block hash as the dictionary + key + - `safe_block_hash` is the return value of the + `get_safe_execution_block_hash(store: Store)` function call + - `finalized_block_hash` is the block hash of the latest finalized execution + payload (`Hash32()` if none yet finalized) + - `suggested_fee_recipient` is the value suggested to be used for the + `fee_recipient` field of the execution payload ```python def prepare_execution_payload(state: BeaconState, @@ -158,7 +178,9 @@ def prepare_execution_payload(state: BeaconState, ) ``` -2. Set `block.body.execution_payload = get_execution_payload(payload_id, execution_engine)`, where: +2. Set + `block.body.execution_payload = get_execution_payload(payload_id, execution_engine)`, + where: ```python def get_execution_payload(payload_id: Optional[PayloadId], execution_engine: ExecutionEngine) -> ExecutionPayload: @@ -169,5 +191,6 @@ def get_execution_payload(payload_id: Optional[PayloadId], execution_engine: Exe return execution_engine.get_payload(payload_id).execution_payload ``` -*Note*: It is recommended for a validator to call `prepare_execution_payload` as soon as input parameters become known, -and make subsequent calls to this function when any of these parameters gets updated. +*Note*: It is recommended for a validator to call `prepare_execution_payload` as +soon as input parameters become known, and make subsequent calls to this +function when any of these parameters gets updated. diff --git a/specs/capella/beacon-chain.md b/specs/capella/beacon-chain.md index 2df33835af..9a121d0ead 100644 --- a/specs/capella/beacon-chain.md +++ b/specs/capella/beacon-chain.md @@ -39,19 +39,21 @@ ## Introduction -Capella is a consensus-layer upgrade containing a number of features related -to validator withdrawals. Including: +Capella is a consensus-layer upgrade containing a number of features related to +validator withdrawals. Including: - Automatic withdrawals of `withdrawable` validators. -- Partial withdrawals sweep for validators with 0x01 withdrawal - credentials and balances in excess of `MAX_EFFECTIVE_BALANCE`. +- Partial withdrawals sweep for validators with 0x01 withdrawal credentials and + balances in excess of `MAX_EFFECTIVE_BALANCE`. - Operation to change from `BLS_WITHDRAWAL_PREFIX` to - `ETH1_ADDRESS_WITHDRAWAL_PREFIX` versioned withdrawal credentials to enable withdrawals for a validator. + `ETH1_ADDRESS_WITHDRAWAL_PREFIX` versioned withdrawal credentials to enable + withdrawals for a validator. -Another new feature is the new independent state and block historical accumulators -that replace the original singular historical roots. With these accumulators, it becomes possible to validate -the entire block history that led up to that particular state without any additional information -beyond the state and the blocks. +Another new feature is the new independent state and block historical +accumulators that replace the original singular historical roots. With these +accumulators, it becomes possible to validate the entire block history that led +up to that particular state without any additional information beyond the state +and the blocks. ## Custom types @@ -289,7 +291,8 @@ def is_partially_withdrawable_validator(validator: Validator, balance: Gwei) -> ### Epoch processing -*Note*: The function `process_historical_summaries_update` replaces `process_historical_roots_update` in Capella. +*Note*: The function `process_historical_summaries_update` replaces +`process_historical_roots_update` in Capella. ```python def process_epoch(state: BeaconState) -> None: @@ -398,8 +401,9 @@ def process_withdrawals(state: BeaconState, payload: ExecutionPayload) -> None: #### Modified `process_execution_payload` -*Note*: The function `process_execution_payload` is modified to use the new `ExecutionPayloadHeader` type -and removed the `is_merge_transition_complete` check. +*Note*: The function `process_execution_payload` is modified to use the new +`ExecutionPayloadHeader` type and removed the `is_merge_transition_complete` +check. ```python def process_execution_payload(state: BeaconState, body: BeaconBlockBody, execution_engine: ExecutionEngine) -> None: @@ -435,7 +439,8 @@ def process_execution_payload(state: BeaconState, body: BeaconBlockBody, executi #### Modified `process_operations` -*Note*: The function `process_operations` is modified to process `BLSToExecutionChange` operations included in the block. +*Note*: The function `process_operations` is modified to process +`BLSToExecutionChange` operations included in the block. ```python def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: diff --git a/specs/capella/fork-choice.md b/specs/capella/fork-choice.md index 7a72eabd70..0625bb4a87 100644 --- a/specs/capella/fork-choice.md +++ b/specs/capella/fork-choice.md @@ -18,7 +18,8 @@ This is the modification of the fork choice according to the Capella upgrade. -Unless stated explicitly, all prior functionality from [Bellatrix](../bellatrix/fork-choice.md) is inherited. +Unless stated explicitly, all prior functionality from +[Bellatrix](../bellatrix/fork-choice.md) is inherited. ## Custom types @@ -26,12 +27,14 @@ Unless stated explicitly, all prior functionality from [Bellatrix](../bellatrix/ ### `ExecutionEngine` -*Note*: The `notify_forkchoice_updated` function is modified in the `ExecutionEngine` protocol at the Capella upgrade. +*Note*: The `notify_forkchoice_updated` function is modified in the +`ExecutionEngine` protocol at the Capella upgrade. #### `notify_forkchoice_updated` -The only change made is to the `PayloadAttributes` container through the addition of `withdrawals`. -Otherwise, `notify_forkchoice_updated` inherits all prior functionality. +The only change made is to the `PayloadAttributes` container through the +addition of `withdrawals`. Otherwise, `notify_forkchoice_updated` inherits all +prior functionality. ```python def notify_forkchoice_updated(self: ExecutionEngine, @@ -61,7 +64,8 @@ class PayloadAttributes(object): ### `on_block` -*Note*: The only modification is the deletion of the verification of merge transition block conditions. +*Note*: The only modification is the deletion of the verification of merge +transition block conditions. ```python def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: diff --git a/specs/capella/fork.md b/specs/capella/fork.md index a2a15ef465..b27e171ca8 100644 --- a/specs/capella/fork.md +++ b/specs/capella/fork.md @@ -50,16 +50,24 @@ def compute_fork_version(epoch: Epoch) -> Version: The fork is triggered at epoch `CAPELLA_FORK_EPOCH`. -Note that for the pure Capella networks, we don't apply `upgrade_to_capella` since it starts with Capella version logic. +Note that for the pure Capella networks, we don't apply `upgrade_to_capella` +since it starts with Capella version logic. ### Upgrading the state -If `state.slot % SLOTS_PER_EPOCH == 0` and `compute_epoch_at_slot(state.slot) == CAPELLA_FORK_EPOCH`, -an irregular state change is made to upgrade to Capella. - -The upgrade occurs after the completion of the inner loop of `process_slots` that sets `state.slot` equal to `CAPELLA_FORK_EPOCH * SLOTS_PER_EPOCH`. -Care must be taken when transitioning through the fork boundary as implementations will need a modified [state transition function](../phase0/beacon-chain.md#beacon-chain-state-transition-function) that deviates from the Phase 0 document. -In particular, the outer `state_transition` function defined in the Phase 0 document will not expose the precise fork slot to execute the upgrade in the presence of skipped slots at the fork boundary. Instead, the logic must be within `process_slots`. +If `state.slot % SLOTS_PER_EPOCH == 0` and +`compute_epoch_at_slot(state.slot) == CAPELLA_FORK_EPOCH`, an irregular state +change is made to upgrade to Capella. + +The upgrade occurs after the completion of the inner loop of `process_slots` +that sets `state.slot` equal to `CAPELLA_FORK_EPOCH * SLOTS_PER_EPOCH`. Care +must be taken when transitioning through the fork boundary as implementations +will need a modified +[state transition function](../phase0/beacon-chain.md#beacon-chain-state-transition-function) +that deviates from the Phase 0 document. In particular, the outer +`state_transition` function defined in the Phase 0 document will not expose the +precise fork slot to execute the upgrade in the presence of skipped slots at the +fork boundary. Instead, the logic must be within `process_slots`. ```python def upgrade_to_capella(pre: bellatrix.BeaconState) -> BeaconState: diff --git a/specs/capella/light-client/fork.md b/specs/capella/light-client/fork.md index e03905788d..9b7db239a3 100644 --- a/specs/capella/light-client/fork.md +++ b/specs/capella/light-client/fork.md @@ -10,11 +10,17 @@ ## Introduction -This document describes how to upgrade existing light client objects based on the [Altair specification](../../altair/light-client/sync-protocol.md) to Capella. This is necessary when processing pre-Capella data with a post-Capella `LightClientStore`. Note that the data being exchanged over the network protocols uses the original format. +This document describes how to upgrade existing light client objects based on +the [Altair specification](../../altair/light-client/sync-protocol.md) to +Capella. This is necessary when processing pre-Capella data with a post-Capella +`LightClientStore`. Note that the data being exchanged over the network +protocols uses the original format. ## Upgrading light client data -A Capella `LightClientStore` can still process earlier light client data. In order to do so, that pre-Capella data needs to be locally upgraded to Capella before processing. +A Capella `LightClientStore` can still process earlier light client data. In +order to do so, that pre-Capella data needs to be locally upgraded to Capella +before processing. ```python def upgrade_lc_header_to_capella(pre: bellatrix.LightClientHeader) -> LightClientHeader: @@ -67,7 +73,9 @@ def upgrade_lc_optimistic_update_to_capella(pre: bellatrix.LightClientOptimistic ## Upgrading the store -Existing `LightClientStore` objects based on Altair MUST be upgraded to Capella before Capella based light client data can be processed. The `LightClientStore` upgrade MAY be performed before `CAPELLA_FORK_EPOCH`. +Existing `LightClientStore` objects based on Altair MUST be upgraded to Capella +before Capella based light client data can be processed. The `LightClientStore` +upgrade MAY be performed before `CAPELLA_FORK_EPOCH`. ```python def upgrade_lc_store_to_capella(pre: bellatrix.LightClientStore) -> LightClientStore: diff --git a/specs/capella/light-client/full-node.md b/specs/capella/light-client/full-node.md index f1ab0483ca..01fa2f242b 100644 --- a/specs/capella/light-client/full-node.md +++ b/specs/capella/light-client/full-node.md @@ -10,7 +10,8 @@ ## Introduction -This upgrade adds information about the execution payload to light client data as part of the Capella upgrade. +This upgrade adds information about the execution payload to light client data +as part of the Capella upgrade. ## Helper functions diff --git a/specs/capella/light-client/p2p-interface.md b/specs/capella/light-client/p2p-interface.md index 535d6b4b57..eb979da673 100644 --- a/specs/capella/light-client/p2p-interface.md +++ b/specs/capella/light-client/p2p-interface.md @@ -19,7 +19,9 @@ ## Networking -The [Altair light client networking specification](../../altair/light-client/p2p-interface.md) is extended to exchange [Capella light client data](./sync-protocol.md). +The +[Altair light client networking specification](../../altair/light-client/p2p-interface.md) +is extended to exchange [Capella light client data](./sync-protocol.md). ### The gossip domain: gossipsub diff --git a/specs/capella/light-client/sync-protocol.md b/specs/capella/light-client/sync-protocol.md index 046086026f..e9b28d5e05 100644 --- a/specs/capella/light-client/sync-protocol.md +++ b/specs/capella/light-client/sync-protocol.md @@ -15,7 +15,11 @@ ## Introduction -This upgrade adds information about the execution payload to light client data as part of the Capella upgrade. It extends the [Altair Light Client specifications](../../altair/light-client/sync-protocol.md). The [fork document](./fork.md) explains how to upgrade existing Altair based deployments to Capella. +This upgrade adds information about the execution payload to light client data +as part of the Capella upgrade. It extends the +[Altair Light Client specifications](../../altair/light-client/sync-protocol.md). +The [fork document](./fork.md) explains how to upgrade existing Altair based +deployments to Capella. Additional documents describes the impact of the upgrade on certain roles: diff --git a/specs/capella/p2p-interface.md b/specs/capella/p2p-interface.md index 5528e2e3c0..1eae552168 100644 --- a/specs/capella/p2p-interface.md +++ b/specs/capella/p2p-interface.md @@ -21,52 +21,67 @@ This document contains the networking specification for Capella. -The specification of these changes continues in the same format as the network specifications of previous upgrades, and assumes them as pre-requisite. +The specification of these changes continues in the same format as the network +specifications of previous upgrades, and assumes them as pre-requisite. ## Modifications in Capella ### The gossip domain: gossipsub -A new topic is added to support the gossip of withdrawal credential change messages. And an existing topic is upgraded for updated types in Capella. +A new topic is added to support the gossip of withdrawal credential change +messages. And an existing topic is upgraded for updated types in Capella. #### Topics and messages -Topics follow the same specification as in prior upgrades. All existing topics remain stable except the beacon block topic which is updated with the modified type. +Topics follow the same specification as in prior upgrades. All existing topics +remain stable except the beacon block topic which is updated with the modified +type. -The new topics along with the type of the `data` field of a gossipsub message are given in this table: +The new topics along with the type of the `data` field of a gossipsub message +are given in this table: | Name | Message Type | | ------------------------- | ------------------------------ | | `beacon_block` | `SignedBeaconBlock` (modified) | | `bls_to_execution_change` | `SignedBLSToExecutionChange` | -Note that the `ForkDigestValue` path segment of the topic separates the old and the new `beacon_block` topics. +Note that the `ForkDigestValue` path segment of the topic separates the old and +the new `beacon_block` topics. ##### Global topics -Capella changes the type of the global beacon block topic and adds one global topic to propagate withdrawal credential change messages to all potential proposers of beacon blocks. +Capella changes the type of the global beacon block topic and adds one global +topic to propagate withdrawal credential change messages to all potential +proposers of beacon blocks. ###### `beacon_block` -The *type* of the payload of this topic changes to the (modified) `SignedBeaconBlock` found in Capella. -Specifically, this type changes with the addition of `bls_to_execution_changes` to the inner `BeaconBlockBody`. -See Capella [state transition document](./beacon-chain.md#beaconblockbody) for further details. +The *type* of the payload of this topic changes to the (modified) +`SignedBeaconBlock` found in Capella. Specifically, this type changes with the +addition of `bls_to_execution_changes` to the inner `BeaconBlockBody`. See +Capella [state transition document](./beacon-chain.md#beaconblockbody) for +further details. ###### `bls_to_execution_change` -This topic is used to propagate signed bls to execution change messages to be included in future blocks. +This topic is used to propagate signed bls to execution change messages to be +included in future blocks. -The following validations MUST pass before forwarding the `signed_bls_to_execution_change` on the network: +The following validations MUST pass before forwarding the +`signed_bls_to_execution_change` on the network: -- _[IGNORE]_ `current_epoch >= CAPELLA_FORK_EPOCH`, - where `current_epoch` is defined by the current wall-clock time. -- _[IGNORE]_ The `signed_bls_to_execution_change` is the first valid signed bls to execution change received - for the validator with index `signed_bls_to_execution_change.message.validator_index`. -- _[REJECT]_ All of the conditions within `process_bls_to_execution_change` pass validation. +- _[IGNORE]_ `current_epoch >= CAPELLA_FORK_EPOCH`, where `current_epoch` is + defined by the current wall-clock time. +- _[IGNORE]_ The `signed_bls_to_execution_change` is the first valid signed bls + to execution change received for the validator with index + `signed_bls_to_execution_change.message.validator_index`. +- _[REJECT]_ All of the conditions within `process_bls_to_execution_change` pass + validation. #### Transitioning the gossip -See gossip transition details found in the [Altair document](../altair/p2p-interface.md#transitioning-the-gossip) for +See gossip transition details found in the +[Altair document](../altair/p2p-interface.md#transitioning-the-gossip) for details on how to handle transitioning gossip topics for Capella. ### The Req/Resp domain @@ -77,7 +92,8 @@ details on how to handle transitioning gossip topics for Capella. **Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_range/2/` -The Capella fork-digest is introduced to the `context` enum to specify Capella block type. +The Capella fork-digest is introduced to the `context` enum to specify Capella +block type. Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: @@ -94,7 +110,8 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: **Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_root/2/` -The Capella fork-digest is introduced to the `context` enum to specify Capella block type. +The Capella fork-digest is introduced to the `context` enum to specify Capella +block type. Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: diff --git a/specs/capella/validator.md b/specs/capella/validator.md index 016aebb76a..d3b27751dc 100644 --- a/specs/capella/validator.md +++ b/specs/capella/validator.md @@ -21,15 +21,20 @@ ## Introduction -This document represents the changes to be made in the code of an "honest validator" to implement the Capella upgrade. +This document represents the changes to be made in the code of an "honest +validator" to implement the Capella upgrade. ## Prerequisites -This document is an extension of the [Bellatrix -- Honest Validator](../bellatrix/validator.md) guide. -All behaviors and definitions defined in this document, and documents it extends, carry over unless explicitly noted or overridden. +This document is an extension of the +[Bellatrix -- Honest Validator](../bellatrix/validator.md) guide. All behaviors +and definitions defined in this document, and documents it extends, carry over +unless explicitly noted or overridden. -All terminology, constants, functions, and protocol mechanics defined in the updated Beacon Chain doc of [Capella](./beacon-chain.md) are requisite for this document and used throughout. -Please see related Beacon Chain doc before continuing and use them as a reference throughout. +All terminology, constants, functions, and protocol mechanics defined in the +updated Beacon Chain doc of [Capella](./beacon-chain.md) are requisite for this +document and used throughout. Please see related Beacon Chain doc before +continuing and use them as a reference throughout. ## Helpers @@ -61,14 +66,18 @@ All validator responsibilities remain unchanged other than those noted below. ##### ExecutionPayload `ExecutionPayload`s are constructed as they were in Bellatrix, except that the -expected withdrawals for the slot must be gathered from the `state` (utilizing the -helper `get_expected_withdrawals`) and passed into the `ExecutionEngine` within `prepare_execution_payload`. +expected withdrawals for the slot must be gathered from the `state` (utilizing +the helper `get_expected_withdrawals`) and passed into the `ExecutionEngine` +within `prepare_execution_payload`. -*Note*: In this section, `state` is the state of the slot for the block proposal _without_ the block yet applied. -That is, `state` is the `previous_state` processed through any empty slots up to the assigned slot using `process_slots(previous_state, slot)`. +*Note*: In this section, `state` is the state of the slot for the block proposal +_without_ the block yet applied. That is, `state` is the `previous_state` +processed through any empty slots up to the assigned slot using +`process_slots(previous_state, slot)`. *Note*: The only change made to `prepare_execution_payload` is to call -`get_expected_withdrawals()` to set the new `withdrawals` field of `PayloadAttributes`. +`get_expected_withdrawals()` to set the new `withdrawals` field of +`PayloadAttributes`. ```python def prepare_execution_payload(state: BeaconState, @@ -96,40 +105,65 @@ def prepare_execution_payload(state: BeaconState, ##### BLS to execution changes -Up to `MAX_BLS_TO_EXECUTION_CHANGES`, [`BLSToExecutionChange`](./beacon-chain.md#blstoexecutionchange) objects can be included in the `block`. The BLS to execution changes must satisfy the verification conditions found in [BLS to execution change processing](./beacon-chain.md#new-process_bls_to_execution_change). +Up to `MAX_BLS_TO_EXECUTION_CHANGES`, +[`BLSToExecutionChange`](./beacon-chain.md#blstoexecutionchange) objects can be +included in the `block`. The BLS to execution changes must satisfy the +verification conditions found in +[BLS to execution change processing](./beacon-chain.md#new-process_bls_to_execution_change). ## Enabling validator withdrawals -Validator balances are withdrawn periodically via an automatic process. For exited validators, the full balance is withdrawn. For active validators, the balance in excess of `MAX_EFFECTIVE_BALANCE` is withdrawn. +Validator balances are withdrawn periodically via an automatic process. For +exited validators, the full balance is withdrawn. For active validators, the +balance in excess of `MAX_EFFECTIVE_BALANCE` is withdrawn. -There is one prerequisite for this automated process: -the validator's withdrawal credentials pointing to an execution layer address, i.e. having an `ETH1_ADDRESS_WITHDRAWAL_PREFIX`. +There is one prerequisite for this automated process: the validator's withdrawal +credentials pointing to an execution layer address, i.e. having an +`ETH1_ADDRESS_WITHDRAWAL_PREFIX`. -If a validator has a `BLS_WITHDRAWAL_PREFIX` withdrawal credential prefix, to participate in withdrawals the validator must -create a one-time message to change their withdrawal credential from the version authenticated with a BLS key to the -version compatible with the execution layer. This message -- a `BLSToExecutionChange` -- is available starting in Capella +If a validator has a `BLS_WITHDRAWAL_PREFIX` withdrawal credential prefix, to +participate in withdrawals the validator must create a one-time message to +change their withdrawal credential from the version authenticated with a BLS key +to the version compatible with the execution layer. This message -- a +`BLSToExecutionChange` -- is available starting in Capella -Validators who wish to enable withdrawals **MUST** assemble, sign, and broadcast this message so that it is accepted -on the beacon chain. Validators who do not want to enable withdrawals and have the `BLS_WITHDRAWAL_PREFIX` version of -withdrawal credentials can delay creating this message until they are ready to enable withdrawals. +Validators who wish to enable withdrawals **MUST** assemble, sign, and broadcast +this message so that it is accepted on the beacon chain. Validators who do not +want to enable withdrawals and have the `BLS_WITHDRAWAL_PREFIX` version of +withdrawal credentials can delay creating this message until they are ready to +enable withdrawals. ### Changing from BLS to execution withdrawal credentials -First, the validator must construct a valid [`BLSToExecutionChange`](./beacon-chain.md#blstoexecutionchange) `message`. -This `message` contains the `validator_index` for the validator who wishes to change their credentials, the `from_bls_pubkey` -- the BLS public key corresponding to the **withdrawal BLS secret key** used to form the `BLS_WITHDRAWAL_PREFIX` withdrawal credential, and the `to_execution_address` specifying the execution layer address to which the validator's balances will be withdrawn. - -*Note*: The withdrawal key pair used to construct the `BLS_WITHDRAWAL_PREFIX` withdrawal credential should be distinct from the signing key pair used to operate the validator under typical circumstances. Consult your validator deposit tooling documentation for further details if you are not aware of the difference. - -*Warning*: This message can only be included on-chain once and is -irreversible so ensure the correctness and accessibility to `to_execution_address`. - -Next, the validator signs the assembled `message: BLSToExecutionChange` with the **withdrawal BLS secret key** and this -`signature` is placed into a `SignedBLSToExecutionChange` message along with the inner `BLSToExecutionChange` `message`. -Note that the `SignedBLSToExecutionChange` message should pass all of the validations in [`process_bls_to_execution_change`](./beacon-chain.md#new-process_bls_to_execution_change). - -The `SignedBLSToExecutionChange` message should then be submitted to the consensus layer network. Once included on-chain, -the withdrawal credential change takes effect. No further action is required for a validator to enter into the automated -withdrawal process. - -*Note*: A node *should* prioritize locally received `BLSToExecutionChange` operations to ensure these changes make it on-chain -through self published blocks even if the rest of the network censors. +First, the validator must construct a valid +[`BLSToExecutionChange`](./beacon-chain.md#blstoexecutionchange) `message`. This +`message` contains the `validator_index` for the validator who wishes to change +their credentials, the `from_bls_pubkey` -- the BLS public key corresponding to +the **withdrawal BLS secret key** used to form the `BLS_WITHDRAWAL_PREFIX` +withdrawal credential, and the `to_execution_address` specifying the execution +layer address to which the validator's balances will be withdrawn. + +*Note*: The withdrawal key pair used to construct the `BLS_WITHDRAWAL_PREFIX` +withdrawal credential should be distinct from the signing key pair used to +operate the validator under typical circumstances. Consult your validator +deposit tooling documentation for further details if you are not aware of the +difference. + +*Warning*: This message can only be included on-chain once and is irreversible +so ensure the correctness and accessibility to `to_execution_address`. + +Next, the validator signs the assembled `message: BLSToExecutionChange` with the +**withdrawal BLS secret key** and this `signature` is placed into a +`SignedBLSToExecutionChange` message along with the inner `BLSToExecutionChange` +`message`. Note that the `SignedBLSToExecutionChange` message should pass all of +the validations in +[`process_bls_to_execution_change`](./beacon-chain.md#new-process_bls_to_execution_change). + +The `SignedBLSToExecutionChange` message should then be submitted to the +consensus layer network. Once included on-chain, the withdrawal credential +change takes effect. No further action is required for a validator to enter into +the automated withdrawal process. + +*Note*: A node *should* prioritize locally received `BLSToExecutionChange` +operations to ensure these changes make it on-chain through self published +blocks even if the rest of the network censors. diff --git a/specs/deneb/beacon-chain.md b/specs/deneb/beacon-chain.md index 4acd774e30..b4c8c933e8 100644 --- a/specs/deneb/beacon-chain.md +++ b/specs/deneb/beacon-chain.md @@ -46,10 +46,14 @@ Deneb is a consensus-layer upgrade containing a number of features. Including: -- [EIP-4788](https://eips.ethereum.org/EIPS/eip-4788): Beacon block root in the EVM -- [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844): Shard Blob Transactions scale data-availability of Ethereum in a simple, forwards-compatible manner -- [EIP-7044](https://eips.ethereum.org/EIPS/eip-7044): Perpetually Valid Signed Voluntary Exits -- [EIP-7045](https://eips.ethereum.org/EIPS/eip-7045): Increase Max Attestation Inclusion Slot +- [EIP-4788](https://eips.ethereum.org/EIPS/eip-4788): Beacon block root in the + EVM +- [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844): Shard Blob Transactions + scale data-availability of Ethereum in a simple, forwards-compatible manner +- [EIP-7044](https://eips.ethereum.org/EIPS/eip-7044): Perpetually Valid Signed + Voluntary Exits +- [EIP-7045](https://eips.ethereum.org/EIPS/eip-7045): Increase Max Attestation + Inclusion Slot - [EIP-7514](https://eips.ethereum.org/EIPS/eip-7514): Add Max Epoch Churn Limit ## Custom types @@ -83,8 +87,10 @@ Deneb is a consensus-layer upgrade containing a number of features. Including: | --------------------- | ----------- | -------------------------------------------------------------------------------------------------------------- | | `MAX_BLOBS_PER_BLOCK` | `uint64(6)` | *[New in Deneb:EIP4844]* maximum number of blobs in a single block limited by `MAX_BLOB_COMMITMENTS_PER_BLOCK` | -*Note*: The blob transactions are packed into the execution payload by the EL/builder with their corresponding blobs being independently transmitted -and are limited by `MAX_BLOB_GAS_PER_BLOCK // GAS_PER_BLOB`. However the CL limit is independently defined by `MAX_BLOBS_PER_BLOCK`. +*Note*: The blob transactions are packed into the execution payload by the +EL/builder with their corresponding blobs being independently transmitted and +are limited by `MAX_BLOB_GAS_PER_BLOCK // GAS_PER_BLOB`. However the CL limit is +independently defined by `MAX_BLOBS_PER_BLOCK`. ### Validator cycle @@ -230,7 +236,11 @@ def kzg_commitment_to_versioned_hash(kzg_commitment: KZGCommitment) -> Versioned #### Modified `get_attestation_participation_flag_indices` -*Note*: The function `get_attestation_participation_flag_indices` is modified to set the `TIMELY_TARGET_FLAG` for any correct target attestation, regardless of `inclusion_delay` as a baseline reward for any speed of inclusion of an attestation that contributes to justification of the contained chain for EIP-7045. +*Note*: The function `get_attestation_participation_flag_indices` is modified to +set the `TIMELY_TARGET_FLAG` for any correct target attestation, regardless of +`inclusion_delay` as a baseline reward for any speed of inclusion of an +attestation that contributes to justification of the contained chain for +EIP-7045. ```python def get_attestation_participation_flag_indices(state: BeaconState, @@ -291,7 +301,8 @@ class NewPayloadRequest(object): ##### `is_valid_block_hash` -*Note*: The function `is_valid_block_hash` is modified to include the additional `parent_beacon_block_root` parameter for EIP-4788. +*Note*: The function `is_valid_block_hash` is modified to include the additional +`parent_beacon_block_root` parameter for EIP-4788. ```python def is_valid_block_hash(self: ExecutionEngine, @@ -316,7 +327,8 @@ def is_valid_versioned_hashes(self: ExecutionEngine, new_payload_request: NewPay ##### Modified `notify_new_payload` -*Note*: The function `notify_new_payload` is modified to include the additional `parent_beacon_block_root` parameter for EIP-4788. +*Note*: The function `notify_new_payload` is modified to include the additional +`parent_beacon_block_root` parameter for EIP-4788. ```python def notify_new_payload(self: ExecutionEngine, @@ -361,7 +373,11 @@ def verify_and_notify_new_payload(self: ExecutionEngine, #### Modified `process_attestation` -*Note*: The function `process_attestation` is modified to expand valid slots for inclusion to those in both `target.epoch` epoch and `target.epoch + 1` epoch for EIP-7045. Additionally, it utilizes an updated version of `get_attestation_participation_flag_indices` to ensure rewards are available for the extended attestation inclusion range for EIP-7045. +*Note*: The function `process_attestation` is modified to expand valid slots for +inclusion to those in both `target.epoch` epoch and `target.epoch + 1` epoch for +EIP-7045. Additionally, it utilizes an updated version of +`get_attestation_participation_flag_indices` to ensure rewards are available for +the extended attestation inclusion range for EIP-7045. ```python def process_attestation(state: BeaconState, attestation: Attestation) -> None: @@ -403,7 +419,10 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: ##### Modified `process_execution_payload` -*Note*: The function `process_execution_payload` is modified to pass `versioned_hashes` into `execution_engine.verify_and_notify_new_payload` and to assign the new fields in `ExecutionPayloadHeader` for EIP-4844. It is also modified to pass in the parent beacon block root to support EIP-4788. +*Note*: The function `process_execution_payload` is modified to pass +`versioned_hashes` into `execution_engine.verify_and_notify_new_payload` and to +assign the new fields in `ExecutionPayloadHeader` for EIP-4844. It is also +modified to pass in the parent beacon block root to support EIP-4788. ```python def process_execution_payload(state: BeaconState, body: BeaconBlockBody, execution_engine: ExecutionEngine) -> None: @@ -455,7 +474,8 @@ def process_execution_payload(state: BeaconState, body: BeaconBlockBody, executi #### Modified `process_voluntary_exit` -*Note*: The function `process_voluntary_exit` is modified to use the fixed fork version -- `CAPELLA_FORK_VERSION` -- for EIP-7044. +*Note*: The function `process_voluntary_exit` is modified to use the fixed fork +version -- `CAPELLA_FORK_VERSION` -- for EIP-7044. ```python def process_voluntary_exit(state: BeaconState, signed_voluntary_exit: SignedVoluntaryExit) -> None: @@ -482,7 +502,9 @@ def process_voluntary_exit(state: BeaconState, signed_voluntary_exit: SignedVolu #### Registry updates -*Note*: The function `process_registry_updates` is modified to utilize `get_validator_activation_churn_limit()` to rate limit the activation queue for EIP-7514. +*Note*: The function `process_registry_updates` is modified to utilize +`get_validator_activation_churn_limit()` to rate limit the activation queue for +EIP-7514. ```python def process_registry_updates(state: BeaconState) -> None: diff --git a/specs/deneb/fork-choice.md b/specs/deneb/fork-choice.md index 2f4002011e..15ff0eedea 100644 --- a/specs/deneb/fork-choice.md +++ b/specs/deneb/fork-choice.md @@ -38,12 +38,19 @@ class PayloadAttributes(object): *[New in Deneb:EIP4844]* -The implementation of `is_data_available` will become more sophisticated during later scaling upgrades. -Initially, verification requires every verifying actor to retrieve all matching `Blob`s and `KZGProof`s, and validate them with `verify_blob_kzg_proof_batch`. +The implementation of `is_data_available` will become more sophisticated during +later scaling upgrades. Initially, verification requires every verifying actor +to retrieve all matching `Blob`s and `KZGProof`s, and validate them with +`verify_blob_kzg_proof_batch`. -The block MUST NOT be considered valid until all valid `Blob`s have been downloaded. Blocks that have been previously validated as available SHOULD be considered available even if the associated `Blob`s have subsequently been pruned. +The block MUST NOT be considered valid until all valid `Blob`s have been +downloaded. Blocks that have been previously validated as available SHOULD be +considered available even if the associated `Blob`s have subsequently been +pruned. -*Note*: Extraneous or invalid Blobs (in addition to KZG expected/referenced valid blobs) received on the p2p network MUST NOT invalidate a block that is otherwise valid and available. +*Note*: Extraneous or invalid Blobs (in addition to KZG expected/referenced +valid blobs) received on the p2p network MUST NOT invalidate a block that is +otherwise valid and available. ```python def is_data_available(beacon_block_root: Root, blob_kzg_commitments: Sequence[KZGCommitment]) -> bool: @@ -60,7 +67,8 @@ def is_data_available(beacon_block_root: Root, blob_kzg_commitments: Sequence[KZ ### `on_block` -*Note*: The only modification is the addition of the blob data availability check. +*Note*: The only modification is the addition of the blob data availability +check. ```python def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: diff --git a/specs/deneb/fork.md b/specs/deneb/fork.md index bdaa3deedb..40fa9855ca 100644 --- a/specs/deneb/fork.md +++ b/specs/deneb/fork.md @@ -54,7 +54,8 @@ def compute_fork_version(epoch: Epoch) -> Version: The fork is triggered at epoch `DENEB_FORK_EPOCH`. -Note that for the pure Deneb networks, we don't apply `upgrade_to_deneb` since it starts with Deneb version logic. +Note that for the pure Deneb networks, we don't apply `upgrade_to_deneb` since +it starts with Deneb version logic. ### Upgrading the state diff --git a/specs/deneb/light-client/fork.md b/specs/deneb/light-client/fork.md index b316666f18..ab1a28fffd 100644 --- a/specs/deneb/light-client/fork.md +++ b/specs/deneb/light-client/fork.md @@ -10,11 +10,17 @@ ## Introduction -This document describes how to upgrade existing light client objects based on the [Capella specification](../../capella/light-client/sync-protocol.md) to Deneb. This is necessary when processing pre-Deneb data with a post-Deneb `LightClientStore`. Note that the data being exchanged over the network protocols uses the original format. +This document describes how to upgrade existing light client objects based on +the [Capella specification](../../capella/light-client/sync-protocol.md) to +Deneb. This is necessary when processing pre-Deneb data with a post-Deneb +`LightClientStore`. Note that the data being exchanged over the network +protocols uses the original format. ## Upgrading light client data -A Deneb `LightClientStore` can still process earlier light client data. In order to do so, that pre-Deneb data needs to be locally upgraded to Deneb before processing. +A Deneb `LightClientStore` can still process earlier light client data. In order +to do so, that pre-Deneb data needs to be locally upgraded to Deneb before +processing. ```python def upgrade_lc_header_to_deneb(pre: capella.LightClientHeader) -> LightClientHeader: @@ -87,7 +93,9 @@ def upgrade_lc_optimistic_update_to_deneb(pre: capella.LightClientOptimisticUpda ## Upgrading the store -Existing `LightClientStore` objects based on Capella MUST be upgraded to Deneb before Deneb based light client data can be processed. The `LightClientStore` upgrade MAY be performed before `DENEB_FORK_EPOCH`. +Existing `LightClientStore` objects based on Capella MUST be upgraded to Deneb +before Deneb based light client data can be processed. The `LightClientStore` +upgrade MAY be performed before `DENEB_FORK_EPOCH`. ```python def upgrade_lc_store_to_deneb(pre: capella.LightClientStore) -> LightClientStore: diff --git a/specs/deneb/light-client/p2p-interface.md b/specs/deneb/light-client/p2p-interface.md index 373c5f00ab..89c7fb6d66 100644 --- a/specs/deneb/light-client/p2p-interface.md +++ b/specs/deneb/light-client/p2p-interface.md @@ -19,7 +19,9 @@ ## Networking -The [Capella light client networking specification](../../capella/light-client/p2p-interface.md) is extended to exchange [Deneb light client data](./sync-protocol.md). +The +[Capella light client networking specification](../../capella/light-client/p2p-interface.md) +is extended to exchange [Deneb light client data](./sync-protocol.md). ### The gossip domain: gossipsub diff --git a/specs/deneb/light-client/sync-protocol.md b/specs/deneb/light-client/sync-protocol.md index f3aef8504e..f6e07e44ca 100644 --- a/specs/deneb/light-client/sync-protocol.md +++ b/specs/deneb/light-client/sync-protocol.md @@ -11,7 +11,11 @@ ## Introduction -This upgrade updates light client data to include the Deneb changes to the [`ExecutionPayload`](../beacon-chain.md) structure. It extends the [Capella Light Client specifications](../../capella/light-client/sync-protocol.md). The [fork document](./fork.md) explains how to upgrade existing Capella based deployments to Deneb. +This upgrade updates light client data to include the Deneb changes to the +[`ExecutionPayload`](../beacon-chain.md) structure. It extends the +[Capella Light Client specifications](../../capella/light-client/sync-protocol.md). +The [fork document](./fork.md) explains how to upgrade existing Capella based +deployments to Deneb. Additional documents describes the impact of the upgrade on certain roles: diff --git a/specs/deneb/p2p-interface.md b/specs/deneb/p2p-interface.md index 8b938458a4..dcb8211bcb 100644 --- a/specs/deneb/p2p-interface.md +++ b/specs/deneb/p2p-interface.md @@ -38,7 +38,8 @@ This document contains the consensus-layer networking specification for Deneb. -The specification of these changes continues in the same format as the network specifications of previous upgrades, and assumes them as pre-requisite. +The specification of these changes continues in the same format as the network +specifications of previous upgrades, and assumes them as pre-requisite. ## Modifications in Deneb @@ -115,17 +116,24 @@ Some gossip meshes are upgraded in the fork of Deneb to support upgraded types. Topics follow the same specification as in prior upgrades. -The `beacon_block` topic is modified to also support Deneb blocks and new topics are added per table below. +The `beacon_block` topic is modified to also support Deneb blocks and new topics +are added per table below. -The `voluntary_exit` topic is implicitly modified despite the lock-in use of `CAPELLA_FORK_VERSION` for this message signature validation for EIP-7044. +The `voluntary_exit` topic is implicitly modified despite the lock-in use of +`CAPELLA_FORK_VERSION` for this message signature validation for EIP-7044. -The `beacon_aggregate_and_proof` and `beacon_attestation_{subnet_id}` topics are modified to support the gossip of attestations created in epoch `N` to be gossiped through the entire range of slots in epoch `N+1` rather than only through one epoch of slots for EIP-7045. +The `beacon_aggregate_and_proof` and `beacon_attestation_{subnet_id}` topics are +modified to support the gossip of attestations created in epoch `N` to be +gossiped through the entire range of slots in epoch `N+1` rather than only +through one epoch of slots for EIP-7045. -The specification around the creation, validation, and dissemination of messages has not changed from the Capella document unless explicitly noted here. +The specification around the creation, validation, and dissemination of messages +has not changed from the Capella document unless explicitly noted here. The derivation of the `message-id` remains stable. -The new topics along with the type of the `data` field of a gossipsub message are given in this table: +The new topics along with the type of the `data` field of a gossipsub message +are given in this table: | Name | Message Type | | -------------------------- | ------------------------------------ | @@ -135,14 +143,16 @@ The new topics along with the type of the `data` field of a gossipsub message ar ###### `beacon_block` -The *type* of the payload of this topic changes to the (modified) `SignedBeaconBlock` found in Deneb. +The *type* of the payload of this topic changes to the (modified) +`SignedBeaconBlock` found in Deneb. *[Modified in Deneb:EIP4844]* New validation: -- _[REJECT]_ The length of KZG commitments is less than or equal to the limitation defined in Consensus Layer -- - i.e. validate that `len(signed_beacon_block.message.body.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK` +- _[REJECT]_ The length of KZG commitments is less than or equal to the + limitation defined in Consensus Layer -- i.e. validate that + `len(signed_beacon_block.message.body.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK` ###### `beacon_aggregate_and_proof` @@ -150,18 +160,21 @@ New validation: The following validation is removed: -- _[IGNORE]_ `aggregate.data.slot` is within the last `ATTESTATION_PROPAGATION_SLOT_RANGE` slots (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- - i.e. `aggregate.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE >= current_slot >= aggregate.data.slot` +- _[IGNORE]_ `aggregate.data.slot` is within the last + `ATTESTATION_PROPAGATION_SLOT_RANGE` slots (with a + `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. + `aggregate.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE >= current_slot >= aggregate.data.slot` (a client MAY queue future aggregates for processing at the appropriate slot). The following validations are added in its place: -- _[IGNORE]_ `aggregate.data.slot` is equal to or earlier than the `current_slot` (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- - i.e. `aggregate.data.slot <= current_slot` - (a client MAY queue future aggregates for processing at the appropriate slot). -- _[IGNORE]_ the epoch of `aggregate.data.slot` is either the current or previous epoch - (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- - i.e. `compute_epoch_at_slot(aggregate.data.slot) in (get_previous_epoch(state), get_current_epoch(state))` +- _[IGNORE]_ `aggregate.data.slot` is equal to or earlier than the + `current_slot` (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. + `aggregate.data.slot <= current_slot` (a client MAY queue future aggregates + for processing at the appropriate slot). +- _[IGNORE]_ the epoch of `aggregate.data.slot` is either the current or + previous epoch (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. + `compute_epoch_at_slot(aggregate.data.slot) in (get_previous_epoch(state), get_current_epoch(state))` ##### Blob subnets @@ -169,26 +182,53 @@ The following validations are added in its place: *[New in Deneb:EIP4844]* -This topic is used to propagate blob sidecars, where each blob index maps to some `subnet_id`. - -The following validations MUST pass before forwarding the `blob_sidecar` on the network, assuming the alias `block_header = blob_sidecar.signed_block_header.message`: - -- _[REJECT]_ The sidecar's index is consistent with `MAX_BLOBS_PER_BLOCK` -- i.e. `blob_sidecar.index < MAX_BLOBS_PER_BLOCK`. -- _[REJECT]_ The sidecar is for the correct subnet -- i.e. `compute_subnet_for_blob_sidecar(blob_sidecar.index) == subnet_id`. -- _[IGNORE]_ The sidecar is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. validate that `block_header.slot <= current_slot` (a client MAY queue future sidecars for processing at the appropriate slot). -- _[IGNORE]_ The sidecar is from a slot greater than the latest finalized slot -- i.e. validate that `block_header.slot > compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)` -- _[REJECT]_ The proposer signature of `blob_sidecar.signed_block_header`, is valid with respect to the `block_header.proposer_index` pubkey. -- _[IGNORE]_ The sidecar's block's parent (defined by `block_header.parent_root`) has been seen (via gossip or non-gossip sources) (a client MAY queue sidecars for processing once the parent block is retrieved). -- _[REJECT]_ The sidecar's block's parent (defined by `block_header.parent_root`) passes validation. -- _[REJECT]_ The sidecar is from a higher slot than the sidecar's block's parent (defined by `block_header.parent_root`). -- _[REJECT]_ The current finalized_checkpoint is an ancestor of the sidecar's block -- i.e. `get_checkpoint_block(store, block_header.parent_root, store.finalized_checkpoint.epoch) == store.finalized_checkpoint.root`. -- _[REJECT]_ The sidecar's inclusion proof is valid as verified by `verify_blob_sidecar_inclusion_proof(blob_sidecar)`. -- _[REJECT]_ The sidecar's blob is valid as verified by `verify_blob_kzg_proof(blob_sidecar.blob, blob_sidecar.kzg_commitment, blob_sidecar.kzg_proof)`. -- _[IGNORE]_ The sidecar is the first sidecar for the tuple `(block_header.slot, block_header.proposer_index, blob_sidecar.index)` with valid header signature, sidecar inclusion proof, and kzg proof. -- _[REJECT]_ The sidecar is proposed by the expected `proposer_index` for the block's slot in the context of the current shuffling (defined by `block_header.parent_root`/`block_header.slot`). - If the `proposer_index` cannot immediately be verified against the expected shuffling, the sidecar MAY be queued for later processing while proposers for the block's branch are calculated -- in such a case _do not_ `REJECT`, instead `IGNORE` this message. - -The gossip `ForkDigestValue` is determined based on `compute_fork_version(compute_epoch_at_slot(blob_sidecar.signed_block_header.message.slot))`. +This topic is used to propagate blob sidecars, where each blob index maps to +some `subnet_id`. + +The following validations MUST pass before forwarding the `blob_sidecar` on the +network, assuming the alias +`block_header = blob_sidecar.signed_block_header.message`: + +- _[REJECT]_ The sidecar's index is consistent with `MAX_BLOBS_PER_BLOCK` -- + i.e. `blob_sidecar.index < MAX_BLOBS_PER_BLOCK`. +- _[REJECT]_ The sidecar is for the correct subnet -- i.e. + `compute_subnet_for_blob_sidecar(blob_sidecar.index) == subnet_id`. +- _[IGNORE]_ The sidecar is not from a future slot (with a + `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. validate that + `block_header.slot <= current_slot` (a client MAY queue future sidecars for + processing at the appropriate slot). +- _[IGNORE]_ The sidecar is from a slot greater than the latest finalized slot + -- i.e. validate that + `block_header.slot > compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)` +- _[REJECT]_ The proposer signature of `blob_sidecar.signed_block_header`, is + valid with respect to the `block_header.proposer_index` pubkey. +- _[IGNORE]_ The sidecar's block's parent (defined by + `block_header.parent_root`) has been seen (via gossip or non-gossip sources) + (a client MAY queue sidecars for processing once the parent block is + retrieved). +- _[REJECT]_ The sidecar's block's parent (defined by + `block_header.parent_root`) passes validation. +- _[REJECT]_ The sidecar is from a higher slot than the sidecar's block's parent + (defined by `block_header.parent_root`). +- _[REJECT]_ The current finalized_checkpoint is an ancestor of the sidecar's + block -- i.e. + `get_checkpoint_block(store, block_header.parent_root, store.finalized_checkpoint.epoch) == store.finalized_checkpoint.root`. +- _[REJECT]_ The sidecar's inclusion proof is valid as verified by + `verify_blob_sidecar_inclusion_proof(blob_sidecar)`. +- _[REJECT]_ The sidecar's blob is valid as verified by + `verify_blob_kzg_proof(blob_sidecar.blob, blob_sidecar.kzg_commitment, blob_sidecar.kzg_proof)`. +- _[IGNORE]_ The sidecar is the first sidecar for the tuple + `(block_header.slot, block_header.proposer_index, blob_sidecar.index)` with + valid header signature, sidecar inclusion proof, and kzg proof. +- _[REJECT]_ The sidecar is proposed by the expected `proposer_index` for the + block's slot in the context of the current shuffling (defined by + `block_header.parent_root`/`block_header.slot`). If the `proposer_index` + cannot immediately be verified against the expected shuffling, the sidecar MAY + be queued for later processing while proposers for the block's branch are + calculated -- in such a case _do not_ `REJECT`, instead `IGNORE` this message. + +The gossip `ForkDigestValue` is determined based on +`compute_fork_version(compute_epoch_at_slot(blob_sidecar.signed_block_header.message.slot))`. Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: @@ -200,13 +240,19 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: ###### Blob retrieval via local execution layer client -In addition to `BlobSidecarsByRoot` requests, recent blobs MAY be retrieved by querying the Execution Layer (i.e. via `engine_getBlobsV1`). -Honest nodes SHOULD query `engine_getBlobsV1` as soon as they receive a valid gossip block that contains data, and import the returned blobs. +In addition to `BlobSidecarsByRoot` requests, recent blobs MAY be retrieved by +querying the Execution Layer (i.e. via `engine_getBlobsV1`). Honest nodes SHOULD +query `engine_getBlobsV1` as soon as they receive a valid gossip block that +contains data, and import the returned blobs. -When clients use the local execution layer to retrieve blobs, they MUST behave as if the corresponding `blob_sidecar` had been received via gossip. In particular they MUST: +When clients use the local execution layer to retrieve blobs, they MUST behave +as if the corresponding `blob_sidecar` had been received via gossip. In +particular they MUST: -- Publish the corresponding `blob_sidecar` on the `blob_sidecar_{subnet_id}` subnet. -- Update gossip rule related data structures (i.e. update the anti-equivocation cache). +- Publish the corresponding `blob_sidecar` on the `blob_sidecar_{subnet_id}` + subnet. +- Update gossip rule related data structures (i.e. update the anti-equivocation + cache). ##### Attestation subnets @@ -216,22 +262,27 @@ When clients use the local execution layer to retrieve blobs, they MUST behave a The following validation is removed: -- _[IGNORE]_ `attestation.data.slot` is within the last `ATTESTATION_PROPAGATION_SLOT_RANGE` slots (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- - i.e. `attestation.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE >= current_slot >= attestation.data.slot` - (a client MAY queue future attestations for processing at the appropriate slot). +- _[IGNORE]_ `attestation.data.slot` is within the last + `ATTESTATION_PROPAGATION_SLOT_RANGE` slots (with a + `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. + `attestation.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE >= current_slot >= attestation.data.slot` + (a client MAY queue future attestations for processing at the appropriate + slot). The following validations are added in its place: -- _[IGNORE]_ `attestation.data.slot` is equal to or earlier than the `current_slot` (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- - i.e. `attestation.data.slot <= current_slot` - (a client MAY queue future attestation for processing at the appropriate slot). -- _[IGNORE]_ the epoch of `attestation.data.slot` is either the current or previous epoch - (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- - i.e. `compute_epoch_at_slot(attestation.data.slot) in (get_previous_epoch(state), get_current_epoch(state))` +- _[IGNORE]_ `attestation.data.slot` is equal to or earlier than the + `current_slot` (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. + `attestation.data.slot <= current_slot` (a client MAY queue future attestation + for processing at the appropriate slot). +- _[IGNORE]_ the epoch of `attestation.data.slot` is either the current or + previous epoch (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. + `compute_epoch_at_slot(attestation.data.slot) in (get_previous_epoch(state), get_current_epoch(state))` #### Transitioning the gossip -See gossip transition details found in the [Altair document](../altair/p2p-interface.md#transitioning-the-gossip) for +See gossip transition details found in the +[Altair document](../altair/p2p-interface.md#transitioning-the-gossip) for details on how to handle transitioning gossip topics for this upgrade. ### The Req/Resp domain @@ -242,7 +293,8 @@ details on how to handle transitioning gossip topics for this upgrade. **Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_range/2/` -The Deneb fork-digest is introduced to the `context` enum to specify Deneb beacon block type. +The Deneb fork-digest is introduced to the `context` enum to specify Deneb +beacon block type. Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: @@ -276,9 +328,9 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: No more than `MAX_REQUEST_BLOCKS_DENEB` may be requested at a time. -*[Modified in Deneb:EIP4844]* -Clients SHOULD include a block in the response as soon as it passes the gossip validation rules. -Clients SHOULD NOT respond with blocks that fail the beacon chain state transition. +*[Modified in Deneb:EIP4844]* Clients SHOULD include a block in the response as +soon as it passes the gossip validation rules. Clients SHOULD NOT respond with +blocks that fail the beacon chain state transition. ##### BlobSidecarsByRange v1 @@ -303,40 +355,50 @@ Response Content: ) ``` -Requests blob sidecars in the slot range `[start_slot, start_slot + count)`, leading up to the current head block as selected by fork choice. +Requests blob sidecars in the slot range `[start_slot, start_slot + count)`, +leading up to the current head block as selected by fork choice. -Before consuming the next response chunk, the response reader SHOULD verify the blob sidecar is well-formatted, has valid inclusion proof, and is correct w.r.t. the expected KZG commitments through `verify_blob_kzg_proof`. +Before consuming the next response chunk, the response reader SHOULD verify the +blob sidecar is well-formatted, has valid inclusion proof, and is correct w.r.t. +the expected KZG commitments through `verify_blob_kzg_proof`. -`BlobSidecarsByRange` is primarily used to sync blobs that may have been missed on gossip and to sync within the `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS` window. +`BlobSidecarsByRange` is primarily used to sync blobs that may have been missed +on gossip and to sync within the `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS` window. The request MUST be encoded as an SSZ-container. -The response MUST consist of zero or more `response_chunk`. -Each _successful_ `response_chunk` MUST contain a single `BlobSidecar` payload. +The response MUST consist of zero or more `response_chunk`. Each _successful_ +`response_chunk` MUST contain a single `BlobSidecar` payload. -Let `blob_serve_range` be `[max(current_epoch - MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, DENEB_FORK_EPOCH), current_epoch]`. -Clients MUST keep a record of blob sidecars seen on the epoch range `blob_serve_range` -where `current_epoch` is defined by the current wall-clock time, -and clients MUST support serving requests of blobs on this range. +Let `blob_serve_range` be +`[max(current_epoch - MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, DENEB_FORK_EPOCH), current_epoch]`. +Clients MUST keep a record of blob sidecars seen on the epoch range +`blob_serve_range` where `current_epoch` is defined by the current wall-clock +time, and clients MUST support serving requests of blobs on this range. -Peers that are unable to reply to blob sidecar requests within the -range `blob_serve_range` SHOULD respond with error code `3: ResourceUnavailable`. -Such peers that are unable to successfully reply to this range of requests MAY get descored -or disconnected at any time. +Peers that are unable to reply to blob sidecar requests within the range +`blob_serve_range` SHOULD respond with error code `3: ResourceUnavailable`. Such +peers that are unable to successfully reply to this range of requests MAY get +descored or disconnected at any time. -*Note*: The above requirement implies that nodes that start from a recent weak subjectivity checkpoint -MUST backfill the local blobs database to at least the range `blob_serve_range` -to be fully compliant with `BlobSidecarsByRange` requests. +*Note*: The above requirement implies that nodes that start from a recent weak +subjectivity checkpoint MUST backfill the local blobs database to at least the +range `blob_serve_range` to be fully compliant with `BlobSidecarsByRange` +requests. -*Note*: Although clients that bootstrap from a weak subjectivity checkpoint can begin -participating in the networking immediately, other peers MAY -disconnect and/or temporarily ban such an un-synced or semi-synced client. +*Note*: Although clients that bootstrap from a weak subjectivity checkpoint can +begin participating in the networking immediately, other peers MAY disconnect +and/or temporarily ban such an un-synced or semi-synced client. -Clients MUST respond with at least the blob sidecars of the first blob-carrying block that exists in the range, if they have it, and no more than `MAX_REQUEST_BLOB_SIDECARS` sidecars. +Clients MUST respond with at least the blob sidecars of the first blob-carrying +block that exists in the range, if they have it, and no more than +`MAX_REQUEST_BLOB_SIDECARS` sidecars. -Clients MUST include all blob sidecars of each block from which they include blob sidecars. +Clients MUST include all blob sidecars of each block from which they include +blob sidecars. -The following blob sidecars, where they exist, MUST be sent in consecutive `(slot, index)` order. +The following blob sidecars, where they exist, MUST be sent in consecutive +`(slot, index)` order. Slots that do not contain known blobs MUST be skipped, mimicking the behaviour of the `BlocksByRange` request. Only response chunks with known blobs should @@ -344,17 +406,23 @@ therefore be sent. Clients MAY limit the number of blob sidecars in the response. -The response MUST contain no more than `count * MAX_BLOBS_PER_BLOCK` blob sidecars. +The response MUST contain no more than `count * MAX_BLOBS_PER_BLOCK` blob +sidecars. -Clients MUST respond with blob sidecars from their view of the current fork choice --- that is, blob sidecars as included by blocks from the single chain defined by the current head. -Of note, blocks from slots before the finalization MUST lead to the finalized block reported in the `Status` handshake. +Clients MUST respond with blob sidecars from their view of the current fork +choice -- that is, blob sidecars as included by blocks from the single chain +defined by the current head. Of note, blocks from slots before the finalization +MUST lead to the finalized block reported in the `Status` handshake. -Clients MUST respond with blob sidecars that are consistent from a single chain within the context of the request. +Clients MUST respond with blob sidecars that are consistent from a single chain +within the context of the request. -After the initial blob sidecar, clients MAY stop in the process of responding if their fork choice changes the view of the chain in the context of the request. +After the initial blob sidecar, clients MAY stop in the process of responding if +their fork choice changes the view of the chain in the context of the request. -For each `response_chunk`, a `ForkDigest`-context based on `compute_fork_version(compute_epoch_at_slot(blob_sidecar.signed_block_header.message.slot))` is used to select the fork namespace of the Response type. +For each `response_chunk`, a `ForkDigest`-context based on +`compute_fork_version(compute_epoch_at_slot(blob_sidecar.signed_block_header.message.slot))` +is used to select the fork namespace of the Response type. Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: @@ -386,29 +454,39 @@ Response Content: ) ``` -Requests sidecars by block root and index. -The response is a list of `BlobSidecar` whose length is less than or equal to the number of requests. -It may be less in the case that the responding peer is missing blocks or sidecars. +Requests sidecars by block root and index. The response is a list of +`BlobSidecar` whose length is less than or equal to the number of requests. It +may be less in the case that the responding peer is missing blocks or sidecars. -Before consuming the next response chunk, the response reader SHOULD verify the blob sidecar is well-formatted, has valid inclusion proof, and is correct w.r.t. the expected KZG commitments through `verify_blob_kzg_proof`. +Before consuming the next response chunk, the response reader SHOULD verify the +blob sidecar is well-formatted, has valid inclusion proof, and is correct w.r.t. +the expected KZG commitments through `verify_blob_kzg_proof`. No more than `MAX_REQUEST_BLOB_SIDECARS` may be requested at a time. -`BlobSidecarsByRoot` is primarily used to recover recent blobs (e.g. when receiving a block with a transaction whose corresponding blob is missing). +`BlobSidecarsByRoot` is primarily used to recover recent blobs (e.g. when +receiving a block with a transaction whose corresponding blob is missing). -The response MUST consist of zero or more `response_chunk`. -Each _successful_ `response_chunk` MUST contain a single `BlobSidecar` payload. +The response MUST consist of zero or more `response_chunk`. Each _successful_ +`response_chunk` MUST contain a single `BlobSidecar` payload. -Clients MUST support requesting sidecars since `minimum_request_epoch`, where `minimum_request_epoch = max(finalized_epoch, current_epoch - MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, DENEB_FORK_EPOCH)`. If any root in the request content references a block earlier than `minimum_request_epoch`, peers MAY respond with error code `3: ResourceUnavailable` or not include the blob sidecar in the response. +Clients MUST support requesting sidecars since `minimum_request_epoch`, where +`minimum_request_epoch = max(finalized_epoch, current_epoch - MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, DENEB_FORK_EPOCH)`. +If any root in the request content references a block earlier than +`minimum_request_epoch`, peers MAY respond with error code +`3: ResourceUnavailable` or not include the blob sidecar in the response. -Clients MUST respond with at least one sidecar, if they have it. -Clients MAY limit the number of blocks and sidecars in the response. +Clients MUST respond with at least one sidecar, if they have it. Clients MAY +limit the number of blocks and sidecars in the response. -Clients SHOULD include a sidecar in the response as soon as it passes the gossip validation rules. -Clients SHOULD NOT respond with sidecars related to blocks that fail gossip validation rules. -Clients SHOULD NOT respond with sidecars related to blocks that fail the beacon chain state transition +Clients SHOULD include a sidecar in the response as soon as it passes the gossip +validation rules. Clients SHOULD NOT respond with sidecars related to blocks +that fail gossip validation rules. Clients SHOULD NOT respond with sidecars +related to blocks that fail the beacon chain state transition -For each `response_chunk`, a `ForkDigest`-context based on `compute_fork_version(compute_epoch_at_slot(blob_sidecar.signed_block_header.message.slot))` is used to select the fork namespace of the Response type. +For each `response_chunk`, a `ForkDigest`-context based on +`compute_fork_version(compute_epoch_at_slot(blob_sidecar.signed_block_header.message.slot))` +is used to select the fork namespace of the Response type. Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: @@ -422,9 +500,10 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: ### Why are blobs relayed as a sidecar, separate from beacon blocks? -This "sidecar" design provides forward compatibility for further data increases by black-boxing `is_data_available()`: -with full sharding `is_data_available()` can be replaced by data-availability-sampling (DAS) -thus avoiding all blobs being downloaded by all beacon nodes on the network. +This "sidecar" design provides forward compatibility for further data increases +by black-boxing `is_data_available()`: with full sharding `is_data_available()` +can be replaced by data-availability-sampling (DAS) thus avoiding all blobs +being downloaded by all beacon nodes on the network. -Such sharding design may introduce an updated `BlobSidecar` to identify the shard, -but does not affect the `BeaconBlock` structure. +Such sharding design may introduce an updated `BlobSidecar` to identify the +shard, but does not affect the `BeaconBlock` structure. diff --git a/specs/deneb/polynomial-commitments.md b/specs/deneb/polynomial-commitments.md index 5c76cd5aa5..1ab84cad9d 100644 --- a/specs/deneb/polynomial-commitments.md +++ b/specs/deneb/polynomial-commitments.md @@ -45,11 +45,18 @@ ## Introduction -This document specifies basic polynomial operations and KZG polynomial commitment operations that are essential for the implementation of the EIP-4844 feature in the Deneb specification. The implementations are not optimized for performance, but readability. All practical implementations should optimize the polynomial operations. +This document specifies basic polynomial operations and KZG polynomial +commitment operations that are essential for the implementation of the EIP-4844 +feature in the Deneb specification. The implementations are not optimized for +performance, but readability. All practical implementations should optimize the +polynomial operations. -Functions flagged as "Public method" MUST be provided by the underlying KZG library as public functions. All other functions are private functions used internally by the KZG library. +Functions flagged as "Public method" MUST be provided by the underlying KZG +library as public functions. All other functions are private functions used +internally by the KZG library. -Public functions MUST accept raw bytes as input and perform the required cryptographic normalization before invoking any internal functions. +Public functions MUST accept raw bytes as input and perform the required +cryptographic normalization before invoking any internal functions. ## Custom types @@ -104,10 +111,10 @@ Public functions MUST accept raw bytes as input and perform the required cryptog ### Bit-reversal permutation -All polynomials (which are always given in Lagrange form) should be interpreted as being in -bit-reversal permutation. In practice, clients can implement this by storing the lists -`KZG_SETUP_G1_LAGRANGE` and roots of unity in bit-reversal permutation, so these functions only -have to be called once at startup. +All polynomials (which are always given in Lagrange form) should be interpreted +as being in bit-reversal permutation. In practice, clients can implement this by +storing the lists `KZG_SETUP_G1_LAGRANGE` and roots of unity in bit-reversal +permutation, so these functions only have to be called once at startup. #### `is_power_of_two` @@ -147,7 +154,8 @@ def bit_reversal_permutation(sequence: Sequence[T]) -> Sequence[T]: #### `multi_exp` -This function performs a multi-scalar multiplication between `points` and `integers`. `points` can either be in G1 or G2. +This function performs a multi-scalar multiplication between `points` and +`integers`. `points` can either be in G1 or G2. ```python def multi_exp(points: Sequence[TPoint], diff --git a/specs/deneb/validator.md b/specs/deneb/validator.md index 81f043cff0..3be170bdee 100644 --- a/specs/deneb/validator.md +++ b/specs/deneb/validator.md @@ -22,15 +22,20 @@ ## Introduction -This document represents the changes to be made in the code of an "honest validator" to implement Deneb. +This document represents the changes to be made in the code of an "honest +validator" to implement Deneb. ## Prerequisites -This document is an extension of the [Capella -- Honest Validator](../capella/validator.md) guide. -All behaviors and definitions defined in this document, and documents it extends, carry over unless explicitly noted or overridden. +This document is an extension of the +[Capella -- Honest Validator](../capella/validator.md) guide. All behaviors and +definitions defined in this document, and documents it extends, carry over +unless explicitly noted or overridden. -All terminology, constants, functions, and protocol mechanics defined in the updated [Beacon Chain doc of Deneb](./beacon-chain.md) are requisite for this document and used throughout. -Please see related Beacon Chain doc before continuing and use them as a reference throughout. +All terminology, constants, functions, and protocol mechanics defined in the +updated [Beacon Chain doc of Deneb](./beacon-chain.md) are requisite for this +document and used throughout. Please see related Beacon Chain doc before +continuing and use them as a reference throughout. ## Helpers @@ -75,8 +80,9 @@ def compute_signed_block_header(signed_block: SignedBeaconBlock) -> SignedBeacon #### Modified `get_payload` -Given the `payload_id`, `get_payload` returns the most recent version of the execution payload that -has been built since the corresponding call to `notify_forkchoice_updated` method. +Given the `payload_id`, `get_payload` returns the most recent version of the +execution payload that has been built since the corresponding call to +`notify_forkchoice_updated` method. ```python def get_payload(self: ExecutionEngine, payload_id: PayloadId) -> GetPayloadResponse: @@ -97,13 +103,16 @@ All validator responsibilities remain unchanged other than those noted below. ##### ExecutionPayload -`prepare_execution_payload` is updated from the Capella specs to provide the parent beacon block root. +`prepare_execution_payload` is updated from the Capella specs to provide the +parent beacon block root. -*Note*: In this section, `state` is the state of the slot for the block proposal _without_ the block yet applied. -That is, `state` is the `previous_state` processed through any empty slots up to the assigned slot using `process_slots(previous_state, slot)`. +*Note*: In this section, `state` is the state of the slot for the block proposal +_without_ the block yet applied. That is, `state` is the `previous_state` +processed through any empty slots up to the assigned slot using +`process_slots(previous_state, slot)`. -*Note*: The only change made to `prepare_execution_payload` is to add the parent beacon block root as an additional -parameter to the `PayloadAttributes`. +*Note*: The only change made to `prepare_execution_payload` is to add the parent +beacon block root as an additional parameter to the `PayloadAttributes`. ```python def prepare_execution_payload(state: BeaconState, @@ -134,18 +143,22 @@ def prepare_execution_payload(state: BeaconState, *[New in Deneb:EIP4844]* -1. The execution payload is obtained from the execution engine as defined above using `payload_id`. The response also includes a `blobs_bundle` entry containing the corresponding `blobs`, `commitments`, and `proofs`. +1. The execution payload is obtained from the execution engine as defined above + using `payload_id`. The response also includes a `blobs_bundle` entry + containing the corresponding `blobs`, `commitments`, and `proofs`. 2. Set `block.body.blob_kzg_commitments = commitments`. #### Constructing the `BlobSidecar`s *[New in Deneb:EIP4844]* -To construct a `BlobSidecar`, a `blob_sidecar` is defined with the necessary context for block and sidecar proposal. +To construct a `BlobSidecar`, a `blob_sidecar` is defined with the necessary +context for block and sidecar proposal. ##### Sidecar -Blobs associated with a block are packaged into sidecar objects for distribution to the associated sidecar topic, the `blob_sidecar_{subnet_id}` pubsub topic. +Blobs associated with a block are packaged into sidecar objects for distribution +to the associated sidecar topic, the `blob_sidecar_{subnet_id}` pubsub topic. Each `sidecar` is obtained from: @@ -181,9 +194,12 @@ def compute_subnet_for_blob_sidecar(blob_index: BlobIndex) -> SubnetID: return SubnetID(blob_index % BLOB_SIDECAR_SUBNET_COUNT) ``` -After publishing the peers on the network may request the sidecar through sync-requests, or a local user may be interested. +After publishing the peers on the network may request the sidecar through +sync-requests, or a local user may be interested. -The validator MUST hold on to sidecars for `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS` epochs and serve when capable, -to ensure the data-availability of these blobs throughout the network. +The validator MUST hold on to sidecars for +`MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS` epochs and serve when capable, to ensure +the data-availability of these blobs throughout the network. -After `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS` nodes MAY prune the sidecars and/or stop serving them. +After `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS` nodes MAY prune the sidecars +and/or stop serving them. diff --git a/specs/electra/beacon-chain.md b/specs/electra/beacon-chain.md index 8a9c146570..b2e99249dc 100644 --- a/specs/electra/beacon-chain.md +++ b/specs/electra/beacon-chain.md @@ -110,17 +110,23 @@ Electra is a consensus-layer upgrade containing a number of features. Including: -- [EIP-6110](https://eips.ethereum.org/EIPS/eip-6110): Supply validator deposits on chain -- [EIP-7002](https://eips.ethereum.org/EIPS/eip-7002): Execution layer triggerable exits -- [EIP-7251](https://eips.ethereum.org/EIPS/eip-7251): Increase the MAX_EFFECTIVE_BALANCE -- [EIP-7549](https://eips.ethereum.org/EIPS/eip-7549): Move committee index outside Attestation +- [EIP-6110](https://eips.ethereum.org/EIPS/eip-6110): Supply validator deposits + on chain +- [EIP-7002](https://eips.ethereum.org/EIPS/eip-7002): Execution layer + triggerable exits +- [EIP-7251](https://eips.ethereum.org/EIPS/eip-7251): Increase the + MAX_EFFECTIVE_BALANCE +- [EIP-7549](https://eips.ethereum.org/EIPS/eip-7549): Move committee index + outside Attestation - [EIP-7691](https://eips.ethereum.org/EIPS/eip-7691): Blob throughput increase -*Note*: This specification is built upon [Deneb](../deneb/beacon-chain.md) and is under active development. +*Note*: This specification is built upon [Deneb](../deneb/beacon-chain.md) and +is under active development. ## Constants -The following values are (non-configurable) constants used throughout the specification. +The following values are (non-configurable) constants used throughout the +specification. ### Misc @@ -417,7 +423,9 @@ class BeaconState(Container): #### Modified `compute_proposer_index` -*Note*: The function `compute_proposer_index` is modified to use `MAX_EFFECTIVE_BALANCE_ELECTRA` and to use a 16-bit random value instead of an 8-bit random byte in the effective balance filter. +*Note*: The function `compute_proposer_index` is modified to use +`MAX_EFFECTIVE_BALANCE_ELECTRA` and to use a 16-bit random value instead of an +8-bit random byte in the effective balance filter. ```python def compute_proposer_index(state: BeaconState, indices: Sequence[ValidatorIndex], seed: Bytes32) -> ValidatorIndex: @@ -443,7 +451,8 @@ def compute_proposer_index(state: BeaconState, indices: Sequence[ValidatorIndex] #### Modified `is_eligible_for_activation_queue` -*Note*: The function `is_eligible_for_activation_queue` is modified to use `MIN_ACTIVATION_BALANCE` instead of `MAX_EFFECTIVE_BALANCE`. +*Note*: The function `is_eligible_for_activation_queue` is modified to use +`MIN_ACTIVATION_BALANCE` instead of `MAX_EFFECTIVE_BALANCE`. ```python def is_eligible_for_activation_queue(validator: Validator) -> bool: @@ -485,7 +494,9 @@ def has_execution_withdrawal_credential(validator: Validator) -> bool: #### Modified `is_fully_withdrawable_validator` -*Note*: The function `is_fully_withdrawable_validator` is modified to use `has_execution_withdrawal_credential` instead of `has_eth1_withdrawal_credential`. +*Note*: The function `is_fully_withdrawable_validator` is modified to use +`has_execution_withdrawal_credential` instead of +`has_eth1_withdrawal_credential`. ```python def is_fully_withdrawable_validator(validator: Validator, balance: Gwei, epoch: Epoch) -> bool: @@ -501,7 +512,10 @@ def is_fully_withdrawable_validator(validator: Validator, balance: Gwei, epoch: #### Modified `is_partially_withdrawable_validator` -*Note*: The function `is_partially_withdrawable_validator` is modified to use `get_max_effective_balance` instead of `MAX_EFFECTIVE_BALANCE` and `has_execution_withdrawal_credential` instead of `has_eth1_withdrawal_credential`. +*Note*: The function `is_partially_withdrawable_validator` is modified to use +`get_max_effective_balance` instead of `MAX_EFFECTIVE_BALANCE` and +`has_execution_withdrawal_credential` instead of +`has_eth1_withdrawal_credential`. ```python def is_partially_withdrawable_validator(validator: Validator, balance: Gwei) -> bool: @@ -610,7 +624,9 @@ def get_attesting_indices(state: BeaconState, attestation: Attestation) -> Set[V #### Modified `get_next_sync_committee_indices` -*Note*: The function `get_next_sync_committee_indices` is modified to use `MAX_EFFECTIVE_BALANCE_ELECTRA` and to use a 16-bit random value instead of an 8-bit random byte in the effective balance filter. +*Note*: The function `get_next_sync_committee_indices` is modified to use +`MAX_EFFECTIVE_BALANCE_ELECTRA` and to use a 16-bit random value instead of an +8-bit random byte in the effective balance filter. ```python def get_next_sync_committee_indices(state: BeaconState) -> Sequence[ValidatorIndex]: @@ -644,7 +660,8 @@ def get_next_sync_committee_indices(state: BeaconState) -> Sequence[ValidatorInd #### Modified `initiate_validator_exit` -*Note*: The function `initiate_validator_exit` is modified to use the new `compute_exit_epoch_and_update_churn` function. +*Note*: The function `initiate_validator_exit` is modified to use the new +`compute_exit_epoch_and_update_churn` function. ```python def initiate_validator_exit(state: BeaconState, index: ValidatorIndex) -> None: @@ -748,7 +765,9 @@ def compute_consolidation_epoch_and_update_churn(state: BeaconState, consolidati #### Modified `slash_validator` -*Note*: The function `slash_validator` is modified to change how the slashing penalty and proposer/whistleblower rewards are calculated in accordance with EIP7251. +*Note*: The function `slash_validator` is modified to change how the slashing +penalty and proposer/whistleblower rewards are calculated in accordance with +EIP7251. ```python def slash_validator(state: BeaconState, @@ -784,7 +803,9 @@ def slash_validator(state: BeaconState, #### Modified `process_epoch` -*Note*: The function `process_epoch` is modified to call updated functions and to process pending balance deposits and pending consolidations which are new in Electra. +*Note*: The function `process_epoch` is modified to call updated functions and +to process pending balance deposits and pending consolidations which are new in +Electra. ```python def process_epoch(state: BeaconState) -> None: @@ -806,10 +827,11 @@ def process_epoch(state: BeaconState) -> None: #### Modified `process_registry_updates` -*Note*: The function `process_registry_updates` is modified to use the updated definitions of -`initiate_validator_exit` and `is_eligible_for_activation_queue`, changes how the activation epochs -are computed for eligible validators, and processes activations in the same loop as activation -eligibility updates and ejections. +*Note*: The function `process_registry_updates` is modified to use the updated +definitions of `initiate_validator_exit` and `is_eligible_for_activation_queue`, +changes how the activation epochs are computed for eligible validators, and +processes activations in the same loop as activation eligibility updates and +ejections. ```python def process_registry_updates(state: BeaconState) -> None: @@ -828,7 +850,8 @@ def process_registry_updates(state: BeaconState) -> None: #### Modified `process_slashings` -*Note*: The function `process_slashings` is modified to use a new algorithm to compute correlation penalty. +*Note*: The function `process_slashings` is modified to use a new algorithm to +compute correlation penalty. ```python def process_slashings(state: BeaconState) -> None: @@ -872,9 +895,11 @@ def apply_pending_deposit(state: BeaconState, deposit: PendingDeposit) -> None: #### New `process_pending_deposits` -Iterating over `pending_deposits` queue this function runs the following checks before applying pending deposit: +Iterating over `pending_deposits` queue this function runs the following checks +before applying pending deposit: -1. All Eth1 bridge deposits are processed before the first deposit request gets processed. +1. All Eth1 bridge deposits are processed before the first deposit request gets + processed. 2. Deposit position in the queue is finalized. 3. Deposit does not exceed the `MAX_PENDING_DEPOSITS_PER_EPOCH` limit. 4. Deposit does not exceed the activation churn limit. @@ -972,7 +997,8 @@ def process_pending_consolidations(state: BeaconState) -> None: #### Modified `process_effective_balance_updates` -*Note*: The function `process_effective_balance_updates` is modified to use the new limit for the maximum effective balance. +*Note*: The function `process_effective_balance_updates` is modified to use the +new limit for the maximum effective balance. ```python def process_effective_balance_updates(state: BeaconState) -> None: @@ -1011,7 +1037,8 @@ class NewPayloadRequest(object): ##### Modified `is_valid_block_hash` -*Note*: The function `is_valid_block_hash` is modified to include the additional `execution_requests_list`. +*Note*: The function `is_valid_block_hash` is modified to include the additional +`execution_requests_list`. ```python def is_valid_block_hash(self: ExecutionEngine, @@ -1026,7 +1053,8 @@ def is_valid_block_hash(self: ExecutionEngine, ##### Modified `notify_new_payload` -*Note*: The function `notify_new_payload` is modified to include the additional `execution_requests_list`. +*Note*: The function `notify_new_payload` is modified to include the additional +`execution_requests_list`. ```python def notify_new_payload(self: ExecutionEngine, @@ -1042,8 +1070,9 @@ def notify_new_payload(self: ExecutionEngine, ##### Modified `verify_and_notify_new_payload` -*Note*: The function `verify_and_notify_new_payload` is modified to pass the additional parameter -`execution_requests_list` when calling `is_valid_block_hash` and `notify_new_payload` in Electra. +*Note*: The function `verify_and_notify_new_payload` is modified to pass the +additional parameter `execution_requests_list` when calling +`is_valid_block_hash` and `notify_new_payload` in Electra. ```python def verify_and_notify_new_payload(self: ExecutionEngine, @@ -1196,7 +1225,8 @@ def process_withdrawals(state: BeaconState, payload: ExecutionPayload) -> None: ##### New `get_execution_requests_list` -*Note*: Encodes execution requests as defined by [EIP-7685](https://eips.ethereum.org/EIPS/eip-7685). +*Note*: Encodes execution requests as defined by +[EIP-7685](https://eips.ethereum.org/EIPS/eip-7685). ```python def get_execution_requests_list(execution_requests: ExecutionRequests) -> Sequence[bytes]: @@ -1215,7 +1245,9 @@ def get_execution_requests_list(execution_requests: ExecutionRequests) -> Sequen ##### Modified `process_execution_payload` -*Note*: The function `process_execution_payload` is modified to pass `execution_requests` into `execution_engine.verify_and_notify_new_payload` (via the updated `NewPayloadRequest`). +*Note*: The function `process_execution_payload` is modified to pass +`execution_requests` into `execution_engine.verify_and_notify_new_payload` (via +the updated `NewPayloadRequest`). ```python def process_execution_payload(state: BeaconState, body: BeaconBlockBody, execution_engine: ExecutionEngine) -> None: @@ -1265,7 +1297,8 @@ def process_execution_payload(state: BeaconState, body: BeaconBlockBody, executi ##### Modified `process_operations` -*Note*: The function `process_operations` is modified to support all of the new functionality in Electra. +*Note*: The function `process_operations` is modified to support all of the new +functionality in Electra. ```python def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: @@ -1351,7 +1384,8 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: ###### Modified `get_validator_from_deposit` -*Note*: The function is modified to use `MAX_EFFECTIVE_BALANCE_ELECTRA` for compounding withdrawal credential. +*Note*: The function is modified to use `MAX_EFFECTIVE_BALANCE_ELECTRA` for +compounding withdrawal credential. ```python def get_validator_from_deposit(pubkey: BLSPubkey, withdrawal_credentials: Bytes32, amount: uint64) -> Validator: @@ -1375,7 +1409,8 @@ def get_validator_from_deposit(pubkey: BLSPubkey, withdrawal_credentials: Bytes3 ###### Modified `add_validator_to_registry` -*Note*: The function `add_validator_to_registry` is modified to use the modified `get_validator_from_deposit`. +*Note*: The function `add_validator_to_registry` is modified to use the modified +`get_validator_from_deposit`. ```python def add_validator_to_registry(state: BeaconState, @@ -1439,7 +1474,8 @@ def is_valid_deposit_signature(pubkey: BLSPubkey, ###### Modified `process_deposit` -*Note*: The function `process_deposit` is modified to use the modified `apply_deposit`. +*Note*: The function `process_deposit` is modified to use the modified +`apply_deposit`. ```python def process_deposit(state: BeaconState, deposit: Deposit) -> None: @@ -1469,7 +1505,8 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None: ###### Modified `process_voluntary_exit` -*Note*: The function `process_voluntary_exit` is modified to ensure the validator has no pending withdrawals in the queue. +*Note*: The function `process_voluntary_exit` is modified to ensure the +validator has no pending withdrawals in the queue. ```python def process_voluntary_exit(state: BeaconState, signed_voluntary_exit: SignedVoluntaryExit) -> None: diff --git a/specs/electra/fork.md b/specs/electra/fork.md index 564a506e84..7e38e40e27 100644 --- a/specs/electra/fork.md +++ b/specs/electra/fork.md @@ -56,12 +56,14 @@ def compute_fork_version(epoch: Epoch) -> Version: The fork is triggered at epoch `ELECTRA_FORK_EPOCH`. -Note that for the pure Electra networks, we don't apply `upgrade_to_electra` since it starts with Electra version logic. +Note that for the pure Electra networks, we don't apply `upgrade_to_electra` +since it starts with Electra version logic. ### Upgrading the state -If `state.slot % SLOTS_PER_EPOCH == 0` and `compute_epoch_at_slot(state.slot) == ELECTRA_FORK_EPOCH`, -an irregular state change is made to upgrade to Electra. +If `state.slot % SLOTS_PER_EPOCH == 0` and +`compute_epoch_at_slot(state.slot) == ELECTRA_FORK_EPOCH`, an irregular state +change is made to upgrade to Electra. ```python def upgrade_to_electra(pre: deneb.BeaconState) -> BeaconState: diff --git a/specs/electra/light-client/fork.md b/specs/electra/light-client/fork.md index 7dd1807ec6..d75771e290 100644 --- a/specs/electra/light-client/fork.md +++ b/specs/electra/light-client/fork.md @@ -12,7 +12,11 @@ ## Introduction -This document describes how to upgrade existing light client objects based on the [Deneb specification](../../deneb/light-client/sync-protocol.md) to Electra. This is necessary when processing pre-Electra data with a post-Electra `LightClientStore`. Note that the data being exchanged over the network protocols uses the original format. +This document describes how to upgrade existing light client objects based on +the [Deneb specification](../../deneb/light-client/sync-protocol.md) to Electra. +This is necessary when processing pre-Electra data with a post-Electra +`LightClientStore`. Note that the data being exchanged over the network +protocols uses the original format. ## Helper functions @@ -28,7 +32,9 @@ def normalize_merkle_branch(branch: Sequence[Bytes32], ## Upgrading light client data -An Electra `LightClientStore` can still process earlier light client data. In order to do so, that pre-Electra data needs to be locally upgraded to Electra before processing. +An Electra `LightClientStore` can still process earlier light client data. In +order to do so, that pre-Electra data needs to be locally upgraded to Electra +before processing. ```python def upgrade_lc_header_to_electra(pre: deneb.LightClientHeader) -> LightClientHeader: @@ -87,7 +93,9 @@ def upgrade_lc_optimistic_update_to_electra(pre: deneb.LightClientOptimisticUpda ## Upgrading the store -Existing `LightClientStore` objects based on Deneb MUST be upgraded to Electra before Electra based light client data can be processed. The `LightClientStore` upgrade MAY be performed before `ELECTRA_FORK_EPOCH`. +Existing `LightClientStore` objects based on Deneb MUST be upgraded to Electra +before Electra based light client data can be processed. The `LightClientStore` +upgrade MAY be performed before `ELECTRA_FORK_EPOCH`. ```python def upgrade_lc_store_to_electra(pre: deneb.LightClientStore) -> LightClientStore: diff --git a/specs/electra/light-client/p2p-interface.md b/specs/electra/light-client/p2p-interface.md index b557daba12..0f283f3690 100644 --- a/specs/electra/light-client/p2p-interface.md +++ b/specs/electra/light-client/p2p-interface.md @@ -19,7 +19,9 @@ ## Networking -The [Deneb light client networking specification](../../deneb/light-client/p2p-interface.md) is extended to exchange [Electra light client data](./sync-protocol.md). +The +[Deneb light client networking specification](../../deneb/light-client/p2p-interface.md) +is extended to exchange [Electra light client data](./sync-protocol.md). ### The gossip domain: gossipsub diff --git a/specs/electra/light-client/sync-protocol.md b/specs/electra/light-client/sync-protocol.md index 8e48cf745d..fdc43eb83e 100644 --- a/specs/electra/light-client/sync-protocol.md +++ b/specs/electra/light-client/sync-protocol.md @@ -17,7 +17,12 @@ ## Introduction -This upgrade updates light client data to include the Electra changes to the [`ExecutionPayload`](../beacon-chain.md) structure and to the generalized indices of surrounding containers. It extends the [Deneb Light Client specifications](../../deneb/light-client/sync-protocol.md). The [fork document](./fork.md) explains how to upgrade existing Deneb based deployments to Electra. +This upgrade updates light client data to include the Electra changes to the +[`ExecutionPayload`](../beacon-chain.md) structure and to the generalized +indices of surrounding containers. It extends the +[Deneb Light Client specifications](../../deneb/light-client/sync-protocol.md). +The [fork document](./fork.md) explains how to upgrade existing Deneb based +deployments to Electra. Additional documents describes the impact of the upgrade on certain roles: @@ -35,7 +40,8 @@ Additional documents describes the impact of the upgrade on certain roles: ### Frozen constants -Existing `GeneralizedIndex` constants are frozen at their [Altair](../../altair/light-client/sync-protocol.md#constants) values. +Existing `GeneralizedIndex` constants are frozen at their +[Altair](../../altair/light-client/sync-protocol.md#constants) values. | Name | Value | | ------------------------------- | ----------------------------------------------------------------------------------- | diff --git a/specs/electra/p2p-interface.md b/specs/electra/p2p-interface.md index dfdc5885e5..b6b9f0f24c 100644 --- a/specs/electra/p2p-interface.md +++ b/specs/electra/p2p-interface.md @@ -26,7 +26,8 @@ This document contains the consensus-layer networking specification for Electra. -The specification of these changes continues in the same format as the network specifications of previous upgrades, and assumes them as pre-requisite. +The specification of these changes continues in the same format as the network +specifications of previous upgrades, and assumes them as pre-requisite. ## Modifications in Electra @@ -41,7 +42,8 @@ The specification of these changes continues in the same format as the network s ### The gossip domain: gossipsub -Some gossip meshes are upgraded in the fork of Electra to support upgraded types. +Some gossip meshes are upgraded in the fork of Electra to support upgraded +types. #### Topics and messages @@ -49,11 +51,14 @@ Topics follow the same specification as in prior upgrades. The `beacon_block` topic is modified to also support Electra blocks. -The `beacon_aggregate_and_proof` and `beacon_attestation_{subnet_id}` topics are modified to support the gossip of the new attestation type. +The `beacon_aggregate_and_proof` and `beacon_attestation_{subnet_id}` topics are +modified to support the gossip of the new attestation type. -The `attester_slashing` topic is modified to support the gossip of the new `AttesterSlashing` type. +The `attester_slashing` topic is modified to support the gossip of the new +`AttesterSlashing` type. -The specification around the creation, validation, and dissemination of messages has not changed from the Capella document unless explicitly noted here. +The specification around the creation, validation, and dissemination of messages +has not changed from the Capella document unless explicitly noted here. The derivation of the `message-id` remains stable. @@ -63,8 +68,9 @@ The derivation of the `message-id` remains stable. *Updated validation* -- _[REJECT]_ The length of KZG commitments is less than or equal to the limitation defined in Consensus Layer -- - i.e. validate that `len(signed_beacon_block.message.body.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK_ELECTRA` +- _[REJECT]_ The length of KZG commitments is less than or equal to the + limitation defined in Consensus Layer -- i.e. validate that + `len(signed_beacon_block.message.body.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK_ELECTRA` ###### `beacon_aggregate_and_proof` @@ -74,16 +80,19 @@ The following convenience variables are re-defined The following validations are added: -- [REJECT] `len(committee_indices) == 1`, where `committee_indices = get_committee_indices(aggregate)`. +- [REJECT] `len(committee_indices) == 1`, where + `committee_indices = get_committee_indices(aggregate)`. - [REJECT] `aggregate.data.index == 0` ###### `blob_sidecar_{subnet_id}` *[Modified in Electra:EIP7691]* -The existing validations all apply as given from previous forks, with the following exceptions: +The existing validations all apply as given from previous forks, with the +following exceptions: -- Uses of `MAX_BLOBS_PER_BLOCK` in existing validations are replaced with `MAX_BLOBS_PER_BLOCK_ELECTRA`. +- Uses of `MAX_BLOBS_PER_BLOCK` in existing validations are replaced with + `MAX_BLOBS_PER_BLOCK_ELECTRA`. ##### Attestation subnets @@ -103,8 +112,9 @@ The following validations are added: The following validations are removed: -- _[REJECT]_ The attestation is unaggregated -- - that is, it has exactly one participating validator (`len([bit for bit in aggregation_bits if bit]) == 1`, i.e. exactly 1 bit is set). +- _[REJECT]_ The attestation is unaggregated -- that is, it has exactly one + participating validator (`len([bit for bit in aggregation_bits if bit]) == 1`, + i.e. exactly 1 bit is set). - _[REJECT]_ The number of aggregation bits matches the committee size -- i.e. `len(aggregation_bits) == len(get_beacon_committee(state, attestation.data.slot, index))`. @@ -116,7 +126,8 @@ The following validations are removed: **Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_range/2/` -The Electra fork-digest is introduced to the `context` enum to specify Electra beacon block type. +The Electra fork-digest is introduced to the `context` enum to specify Electra +beacon block type. Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: @@ -173,7 +184,9 @@ Response Content: *Updated validation* -Clients MUST respond with at least the blob sidecars of the first blob-carrying block that exists in the range, if they have it, and no more than `MAX_REQUEST_BLOB_SIDECARS_ELECTRA` sidecars. +Clients MUST respond with at least the blob sidecars of the first blob-carrying +block that exists in the range, if they have it, and no more than +`MAX_REQUEST_BLOB_SIDECARS_ELECTRA` sidecars. ##### BlobSidecarsByRoot v1 diff --git a/specs/electra/validator.md b/specs/electra/validator.md index f0564a0f2b..e4b8b99bf7 100644 --- a/specs/electra/validator.md +++ b/specs/electra/validator.md @@ -31,15 +31,20 @@ ## Introduction -This document represents the changes to be made in the code of an "honest validator" to implement Electra. +This document represents the changes to be made in the code of an "honest +validator" to implement Electra. ## Prerequisites -This document is an extension of the [Deneb -- Honest Validator](../deneb/validator.md) guide. -All behaviors and definitions defined in this document, and documents it extends, carry over unless explicitly noted or overridden. +This document is an extension of the +[Deneb -- Honest Validator](../deneb/validator.md) guide. All behaviors and +definitions defined in this document, and documents it extends, carry over +unless explicitly noted or overridden. -All terminology, constants, functions, and protocol mechanics defined in the updated Beacon Chain doc of [Electra](./beacon-chain.md) are requisite for this document and used throughout. -Please see related Beacon Chain doc before continuing and use them as a reference throughout. +All terminology, constants, functions, and protocol mechanics defined in the +updated Beacon Chain doc of [Electra](./beacon-chain.md) are requisite for this +document and used throughout. Please see related Beacon Chain doc before +continuing and use them as a reference throughout. ## Helpers @@ -81,8 +86,9 @@ class SignedAggregateAndProof(Container): #### Modified `get_payload` -Given the `payload_id`, `get_payload` returns the most recent version of the execution payload that -has been built since the corresponding call to `notify_forkchoice_updated` method. +Given the `payload_id`, `get_payload` returns the most recent version of the +execution payload that has been built since the corresponding call to +`notify_forkchoice_updated` method. ```python def get_payload(self: ExecutionEngine, payload_id: PayloadId) -> GetPayloadResponse: @@ -105,9 +111,12 @@ Changed the max attester slashings size to `MAX_ATTESTER_SLASHINGS_ELECTRA`. Changed the max attestations size to `MAX_ATTESTATIONS_ELECTRA`. -The network attestation aggregates contain only the assigned committee attestations. -Attestation aggregates received by the block proposer from the committee aggregators with disjoint `committee_bits` sets and equal `AttestationData` SHOULD be consolidated into a single `Attestation` object. -The proposer should run the following function to construct an on chain final aggregate from a list of network aggregates with equal `AttestationData`: +The network attestation aggregates contain only the assigned committee +attestations. Attestation aggregates received by the block proposer from the +committee aggregators with disjoint `committee_bits` sets and equal +`AttestationData` SHOULD be consolidated into a single `Attestation` object. The +proposer should run the following function to construct an on chain final +aggregate from a list of network aggregates with equal `AttestationData`: ```python def compute_on_chain_aggregate(network_aggregates: Sequence[Attestation]) -> Attestation: @@ -135,7 +144,9 @@ def compute_on_chain_aggregate(network_aggregates: Sequence[Attestation]) -> Att #### Deposits -*[New in Electra:EIP6110]* The expected number of deposits MUST be changed from `min(MAX_DEPOSITS, eth1_data.deposit_count - state.eth1_deposit_index)` to the result of the following function: +*[New in Electra:EIP6110]* The expected number of deposits MUST be changed from +`min(MAX_DEPOSITS, eth1_data.deposit_count - state.eth1_deposit_index)` to the +result of the following function: ```python def get_eth1_pending_deposit_count(state: BeaconState) -> uint64: @@ -146,7 +157,10 @@ def get_eth1_pending_deposit_count(state: BeaconState) -> uint64: return uint64(0) ``` -*Note*: Clients will be able to remove the `Eth1Data` polling mechanism in an uncoordinated fashion once the transition period is finished. The transition period is considered finished when a network reaches the point where `state.eth1_deposit_index == state.deposit_requests_start_index`. +*Note*: Clients will be able to remove the `Eth1Data` polling mechanism in an +uncoordinated fashion once the transition period is finished. The transition +period is considered finished when a network reaches the point where +`state.eth1_deposit_index == state.deposit_requests_start_index`. ```python def get_eth1_vote(state: BeaconState, eth1_chain: Sequence[Eth1Block]) -> Eth1Data: @@ -184,10 +198,13 @@ def get_eth1_vote(state: BeaconState, eth1_chain: Sequence[Eth1Block]) -> Eth1Da `prepare_execution_payload` is updated from the Deneb specs. -*Note*: In this section, `state` is the state of the slot for the block proposal _without_ the block yet applied. -That is, `state` is the `previous_state` processed through any empty slots up to the assigned slot using `process_slots(previous_state, slot)`. +*Note*: In this section, `state` is the state of the slot for the block proposal +_without_ the block yet applied. That is, `state` is the `previous_state` +processed through any empty slots up to the assigned slot using +`process_slots(previous_state, slot)`. -*Note*: The only change to `prepare_execution_payload` is the new definition of `get_expected_withdrawals`. +*Note*: The only change to `prepare_execution_payload` is the new definition of +`get_expected_withdrawals`. ```python def prepare_execution_payload(state: BeaconState, @@ -220,8 +237,17 @@ def prepare_execution_payload(state: BeaconState, *[New in Electra]* -1. The execution payload is obtained from the execution engine as defined above using `payload_id`. The response also includes a `execution_requests` entry containing a list of bytes. Each element on the list corresponds to one SSZ list of requests as defined in [EIP-7685](https://eips.ethereum.org/EIPS/eip-7685). The first byte of each request is used to determine the request type. Requests must be ordered by request type in ascending order. As a result, there can only be at most one instance of each request type. -2. Set `block.body.execution_requests = get_execution_requests(execution_requests)`, where: +1. The execution payload is obtained from the execution engine as defined above + using `payload_id`. The response also includes a `execution_requests` entry + containing a list of bytes. Each element on the list corresponds to one SSZ + list of requests as defined in + [EIP-7685](https://eips.ethereum.org/EIPS/eip-7685). The first byte of each + request is used to determine the request type. Requests must be ordered by + request type in ascending order. As a result, there can only be at most one + instance of each request type. +2. Set + `block.body.execution_requests = get_execution_requests(execution_requests)`, + where: ```python def get_execution_requests(execution_requests_list: Sequence[bytes]) -> ExecutionRequests: @@ -286,11 +312,12 @@ def compute_subnet_for_blob_sidecar(blob_index: BlobIndex) -> SubnetID: ### Construct attestation -The validator creates `attestation` as a `SingleAttestation` container -with updated field assignments: +The validator creates `attestation` as a `SingleAttestation` container with +updated field assignments: - Set `attestation_data.index = 0`. -- Set `attestation.committee_index` to the index associated with the validator's committee. +- Set `attestation.committee_index` to the index associated with the validator's + committee. - Set `attestation.attester_index` to the index of the validator. ## Attestation aggregation @@ -298,5 +325,9 @@ with updated field assignments: ### Construct aggregate - Set `attestation_data.index = 0`. -- Let `aggregation_bits` be a `Bitlist[MAX_VALIDATORS_PER_COMMITTEE * MAX_COMMITTEES_PER_SLOT]` of length `len(committee)`, where each bit set from each individual attestation is set to `0b1`. -- Set `attestation.committee_bits = committee_bits`, where `committee_bits` has the bit set corresponding to `committee_index` in each individual attestation. +- Let `aggregation_bits` be a + `Bitlist[MAX_VALIDATORS_PER_COMMITTEE * MAX_COMMITTEES_PER_SLOT]` of length + `len(committee)`, where each bit set from each individual attestation is set + to `0b1`. +- Set `attestation.committee_bits = committee_bits`, where `committee_bits` has + the bit set corresponding to `committee_index` in each individual attestation. diff --git a/specs/electra/weak-subjectivity.md b/specs/electra/weak-subjectivity.md index f26f950374..d5e3850fde 100644 --- a/specs/electra/weak-subjectivity.md +++ b/specs/electra/weak-subjectivity.md @@ -12,13 +12,15 @@ ## Introduction -This document is an extension of the [Phase 0 -- Weak Subjectivity -Guide](../phase0/weak-subjectivity.md). All behaviors and definitions defined in this document, and -documents it extends, carry over unless explicitly noted or overridden. +This document is an extension of the +[Phase 0 -- Weak Subjectivity Guide](../phase0/weak-subjectivity.md). All +behaviors and definitions defined in this document, and documents it extends, +carry over unless explicitly noted or overridden. -This document is a guide for implementing Weak Subjectivity protections in Electra. The Weak -Subjectivity Period (WSP) calculations have changed in Electra due to EIP-7251, which increases the -maximum effective balance for validators and allows validators to consolidate. +This document is a guide for implementing Weak Subjectivity protections in +Electra. The Weak Subjectivity Period (WSP) calculations have changed in Electra +due to EIP-7251, which increases the maximum effective balance for validators +and allows validators to consolidate. ## Weak Subjectivity Period @@ -41,8 +43,8 @@ def compute_weak_subjectivity_period(state: BeaconState) -> uint64: return MIN_VALIDATOR_WITHDRAWABILITY_DELAY + epochs_for_validator_set_churn ``` -A brief reference for what these values look like in practice ([reference -script](https://gist.github.com/jtraglia/457fd9ae7d2080fef1e4034a39b80c46)): +A brief reference for what these values look like in practice +([reference script](https://gist.github.com/jtraglia/457fd9ae7d2080fef1e4034a39b80c46)): | Safety Decay | Total Active Balance (ETH) | Weak Sub. Period (Epochs) | | -----------: | -------------------------: | ------------------------: | diff --git a/specs/fulu/beacon-chain.md b/specs/fulu/beacon-chain.md index 717282a056..8830d5b795 100644 --- a/specs/fulu/beacon-chain.md +++ b/specs/fulu/beacon-chain.md @@ -6,7 +6,6 @@ - [Introduction](#introduction) - [Configuration](#configuration) - - [Execution](#execution) - [Beacon chain state transition function](#beacon-chain-state-transition-function) - [Block processing](#block-processing) - [Execution payload](#execution-payload) @@ -16,16 +15,11 @@ ## Introduction -*Note*: This specification is built upon [Electra](../electra/beacon-chain.md) and is under active development. +*Note*: This specification is built upon [Electra](../electra/beacon-chain.md) +and is under active development. ## Configuration -### Execution - -| Name | Value | Description | -| -------------------------- | ------------ | ------------------------------------------------------------------------------------------------------------- | -| `MAX_BLOBS_PER_BLOCK_FULU` | `uint64(12)` | *[New in Fulu:EIP7594]* Maximum number of blobs in a single block limited by `MAX_BLOB_COMMITMENTS_PER_BLOCK` | - ## Beacon chain state transition function ### Block processing @@ -45,7 +39,7 @@ def process_execution_payload(state: BeaconState, body: BeaconBlockBody, executi # Verify timestamp assert payload.timestamp == compute_timestamp_at_slot(state, state.slot) # Verify commitments are under limit - assert len(body.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK_FULU # [Modified in Fulu:EIP7594] + assert len(body.blob_kzg_commitments) <= get_max_blobs_per_block(get_current_epoch(state)) # [Modified in Fulu:EIP7892] # Verify the execution payload is valid versioned_hashes = [kzg_commitment_to_versioned_hash(commitment) for commitment in body.blob_kzg_commitments] assert execution_engine.verify_and_notify_new_payload( diff --git a/specs/fulu/das-core.md b/specs/fulu/das-core.md index 0332750f5f..96217e641a 100644 --- a/specs/fulu/das-core.md +++ b/specs/fulu/das-core.md @@ -10,11 +10,13 @@ - [Configuration](#configuration) - [Data size](#data-size) - [Custody setting](#custody-setting) + - [Blob schedule](#blob-schedule) - [Containers](#containers) - [`DataColumnSidecar`](#datacolumnsidecar) - [`MatrixEntry`](#matrixentry) - [Helper functions](#helper-functions) - [`get_custody_groups`](#get_custody_groups) + - [`get_max_blobs_per_block`](#get_max_blobs_per_block) - [`compute_columns_for_custody_group`](#compute_columns_for_custody_group) - [`compute_matrix`](#compute_matrix) - [`recover_matrix`](#recover_matrix) @@ -35,7 +37,8 @@ ## Constants -The following values are (non-configurable) constants used throughout the specification. +The following values are (non-configurable) constants used throughout the +specification. ### Misc @@ -63,10 +66,22 @@ The following values are (non-configurable) constants used throughout the specif | Name | Value | Description | | -------------------------- | ----- | --------------------------------------------------------------------------------- | -| `SAMPLES_PER_SLOT` | `8` | Number of `DataColumnSidecar` random samples a node queries per slot | +| `SAMPLES_PER_SLOT` | `8` | Minimum number of samples for an honest node | | `NUMBER_OF_CUSTODY_GROUPS` | `128` | Number of custody groups available for nodes to custody | | `CUSTODY_REQUIREMENT` | `4` | Minimum number of custody groups an honest node custodies and serves samples from | +### Blob schedule + +*[New in EIP7892]* This schedule defines the maximum blobs per block limit for a +given epoch. + + + +| Epoch | Max Blobs Per Block | Description | +| --------------------------- | ------------------- | -------------------------------- | +| `Epoch(269568)` **Deneb** | `uint64(6)` | The limit is set to `6` blobs | +| `Epoch(364032)` **Electra** | `uint64(9)` | The limit is raised to `9` blobs | + ### Containers #### `DataColumnSidecar` @@ -118,6 +133,20 @@ def get_custody_groups(node_id: NodeID, custody_group_count: uint64) -> Sequence return sorted(custody_groups) ``` +### `get_max_blobs_per_block` + +```python +def get_max_blobs_per_block(epoch: Epoch) -> uint64: + """ + Return the maximum number of blobs that can be included in a block for a given epoch. + """ + assert len(BLOB_SCHEDULE) > 0 + for entry in sorted(BLOB_SCHEDULE, key=lambda e: e["EPOCH"], reverse=True): + if epoch >= entry["EPOCH"]: + return entry["MAX_BLOBS_PER_BLOCK"] + return min(entry["MAX_BLOBS_PER_BLOCK"] for entry in BLOB_SCHEDULE) +``` + ### `compute_columns_for_custody_group` ```python @@ -182,61 +211,127 @@ def recover_matrix(partial_matrix: Sequence[MatrixEntry], blob_count: uint64) -> ### Custody requirement -Columns are grouped into custody groups. Nodes custodying a custody group MUST custody all the columns in that group. When syncing, a node MUST backfill columns from all of its custody groups. +Columns are grouped into custody groups. Nodes custodying a custody group MUST +custody all the columns in that group. When syncing, a node MUST backfill +columns from all of its custody groups. -A node *may* choose to custody and serve more than the minimum honesty requirement. Such a node explicitly advertises a number greater than `CUSTODY_REQUIREMENT` through the peer discovery mechanism, specifically by setting a higher value in the `custody_group_count` field within its ENR. This value can be increased up to `NUMBER_OF_CUSTODY_GROUPS`, indicating a super-full node. +A node *may* choose to custody and serve more than the minimum honesty +requirement. Such a node explicitly advertises a number greater than +`CUSTODY_REQUIREMENT` through the peer discovery mechanism, specifically by +setting a higher value in the `custody_group_count` field within its ENR. This +value can be increased up to `NUMBER_OF_CUSTODY_GROUPS`, indicating a super-full +node. -A node stores the custodied columns for the duration of the pruning period and responds to peer requests for samples on those columns. +A node stores the custodied columns for the duration of the pruning period and +responds to peer requests for samples on those columns. ### Public, deterministic selection -The particular columns/groups that a node custodies are selected pseudo-randomly as a function (`get_custody_groups`) of the node-id and custody size -- importantly this function can be run by any party as the inputs are all public. +The particular columns/groups that a node custodies are selected pseudo-randomly +as a function (`get_custody_groups`) of the node-id and custody size -- +importantly this function can be run by any party as the inputs are all public. -*Note*: increasing the `custody_size` parameter for a given `node_id` extends the returned list (rather than being an entirely new shuffle) such that if `custody_size` is unknown, the default `CUSTODY_REQUIREMENT` will be correct for a subset of the node's custody. +*Note*: increasing the `custody_size` parameter for a given `node_id` extends +the returned list (rather than being an entirely new shuffle) such that if +`custody_size` is unknown, the default `CUSTODY_REQUIREMENT` will be correct for +a subset of the node's custody. ## Custody sampling -At each slot, a node advertising `custody_group_count` downloads a minimum of `sampling_size = max(SAMPLES_PER_SLOT, custody_group_count * columns_per_group)` total columns, where `columns_per_group = NUMBER_OF_COLUMNS // NUMBER_OF_CUSTODY_GROUPS`. The corresponding set of columns is selected by `groups = get_custody_groups(node_id, sampling_size)` and `compute_columns_for_custody_group(group) for group in groups`, so that in particular the subset of columns to custody is consistent with the output of `get_custody_groups(node_id, custody_group_count)`. Sampling is considered successful if the node manages to retrieve all selected columns. +At each slot, a node advertising `custody_group_count` downloads a minimum of +`sampling_size = max(SAMPLES_PER_SLOT, custody_group_count)` custody groups, +selected by `groups = get_custody_groups(node_id, sampling_size)`, to which +correspond the columns +`compute_columns_for_custody_group(group) for group in groups`. The custody +groups to custody, selected by +`get_custody_groups(node_id, custody_group_count)`, are then in particular a +subset of those to sample. Sampling is considered successful if the node manages +to retrieve all selected columns. ## Extended data -In this construction, we extend the blobs using a one-dimensional erasure coding extension. The matrix comprises maximum `MAX_BLOBS_PER_BLOCK` rows and fixed `NUMBER_OF_COLUMNS` columns, with each row containing a `Blob` and its corresponding extension. `compute_matrix` demonstrates the relationship between blobs and the matrix, a potential method of storing cells/proofs. +In this construction, we extend the blobs using a one-dimensional erasure coding +extension. The matrix comprises maximum `MAX_BLOBS_PER_BLOCK` rows and fixed +`NUMBER_OF_COLUMNS` columns, with each row containing a `Blob` and its +corresponding extension. `compute_matrix` demonstrates the relationship between +blobs and the matrix, a potential method of storing cells/proofs. ## Column gossip ### Parameters -Verifiable samples from their respective column are distributed on the assigned subnet. To custody columns in a particular custody group, a node joins the respective gossipsub subnets. If a node fails to get columns on the column subnets, a node can also utilize the Req/Resp protocol to query the missing columns from other peers. +Verifiable samples from their respective column are distributed on the assigned +subnet. To custody columns in a particular custody group, a node joins the +respective gossipsub subnets. If a node fails to get columns on the column +subnets, a node can also utilize the Req/Resp protocol to query the missing +columns from other peers. ## Reconstruction and cross-seeding -If the node obtains 50%+ of all the columns, it SHOULD reconstruct the full data matrix via `recover_matrix` helper. Nodes MAY delay this reconstruction allowing time for other columns to arrive over the network. If delaying reconstruction, nodes may use a random delay in order to desynchronize reconstruction among nodes, thus reducing overall CPU load. +If the node obtains 50%+ of all the columns, it SHOULD reconstruct the full data +matrix via the `recover_matrix` helper. Nodes MAY delay this reconstruction +allowing time for other columns to arrive over the network. If delaying +reconstruction, nodes may use a random delay in order to desynchronize +reconstruction among nodes, thus reducing overall CPU load. -Once the node obtains a column through reconstruction, the node MUST expose the new column as if it had received it over the network. If the node is subscribed to the subnet corresponding to the column, it MUST send the reconstructed DataColumnSidecar to its topic mesh neighbors. If instead the node is not subscribed to the corresponding subnet, it SHOULD still expose the availability of the DataColumnSidecar as part of the gossip emission process. +Once the node obtains a column through reconstruction, the node MUST expose the +new column as if it had received it over the network. If the node is subscribed +to the subnet corresponding to the column, it MUST send the reconstructed +`DataColumnSidecar` to its topic mesh neighbors. If instead the node is not +subscribed to the corresponding subnet, it SHOULD still expose the availability +of the `DataColumnSidecar` as part of the gossip emission process. After +exposing the reconstructed `DataColumnSidecar` to the network, the node MAY +delete the `DataColumnSidecar` if it is not part of the node's custody +requirement. -*Note*: A node always maintains a matrix view of the rows and columns they are following, able to cross-reference and cross-seed in either direction. +*Note*: A node always maintains a matrix view of the rows and columns they are +following, able to cross-reference and cross-seed in either direction. -*Note*: There are timing considerations to analyze -- at what point does a node consider samples missing and choose to reconstruct and cross-seed. +*Note*: There are timing considerations to analyze -- at what point does a node +consider samples missing and choose to reconstruct and cross-seed. -*Note*: There may be anti-DoS and quality-of-service considerations around how to send samples and consider samples -- is each individual sample a message or are they sent in aggregate forms. +*Note*: There may be anti-DoS and quality-of-service considerations around how +to send samples and consider samples -- is each individual sample a message or +are they sent in aggregate forms. ## FAQs ### Why don't nodes custody rows? -In the one-dimension construction, a node samples the peers by requesting the whole `DataColumnSidecar`. In reconstruction, a node can reconstruct all the blobs by 50% of the columns. Note that nodes can still download the row via `blob_sidecar_{subnet_id}` subnets. +In the one-dimension construction, a node samples the peers by requesting the +whole `DataColumnSidecar`. In reconstruction, a node can reconstruct all the +blobs by 50% of the columns. Note that nodes can still download the row via +`blob_sidecar_{subnet_id}` subnets. The potential benefits of having row custody could include: -1. Allow for more "natural" distribution of data to consumers -- e.g., roll-ups -- but honestly, they won't know a priori which row their blob is going to be included in the block, so they would either need to listen to all rows or download a particular row after seeing the block. The former looks just like listening to column \[0, N) and the latter is req/resp instead of gossiping. -2. Help with some sort of distributed reconstruction. Those with full rows can compute extensions and seed missing samples to the network. This would either need to be able to send individual points on the gossip or would need some sort of req/resp faculty, potentially similar to an `IHAVEPOINTBITFIELD` and `IWANTSAMPLE`. +1. Allow for more "natural" distribution of data to consumers -- e.g., roll-ups + -- but honestly, they won't know a priori which row their blob is going to be + included in the block, so they would either need to listen to all rows or + download a particular row after seeing the block. The former looks just like + listening to column \[0, N) and the latter is req/resp instead of gossiping. +2. Help with some sort of distributed reconstruction. Those with full rows can + compute extensions and seed missing samples to the network. This would either + need to be able to send individual points on the gossip or would need some + sort of req/resp faculty, potentially similar to an `IHAVEPOINTBITFIELD` and + `IWANTSAMPLE`. -However, for simplicity, we don't assign row custody assignments to nodes in the current design. +However, for simplicity, we don't assign row custody assignments to nodes in the +current design. ### Why don't we rotate custody over time? -To start with a simple, stable backbone, for now, we don't shuffle the custody assignments via the deterministic custody selection helper `get_custody_groups`. However, staggered rotation likely needs to happen on the order of the pruning period to ensure subnets can be utilized for recovery. For example, introducing an `epoch` argument allows the function to maintain stability over many epochs. +To start with a simple, stable backbone, for now, we don't shuffle the custody +assignments via the deterministic custody selection helper `get_custody_groups`. +However, staggered rotation likely needs to happen on the order of the pruning +period to ensure subnets can be utilized for recovery. For example, introducing +an `epoch` argument allows the function to maintain stability over many epochs. ### Does having a lot of column subnets make the network unstable? -No, the number of subnets doesn't really matter. What matters to the network stability is the number of nodes and the churn rate in the network. If the number of the nodes is too low, it's likely to have a network partition when some nodes are down. For the churn rate, if the churn rate is high, we even need to have a higher number of nodes, since nodes are likely to be turned off more often. +No, the number of subnets doesn't really matter. What matters to the network +stability is the number of nodes and the churn rate in the network. If the +number of the nodes is too low, it's likely to have a network partition when +some nodes are down. For the churn rate, if the churn rate is high, we even need +to have a higher number of nodes, since nodes are likely to be turned off more +often. diff --git a/specs/fulu/fork-choice.md b/specs/fulu/fork-choice.md index 6076de3ed7..482b0f732e 100644 --- a/specs/fulu/fork-choice.md +++ b/specs/fulu/fork-choice.md @@ -39,7 +39,8 @@ def is_data_available(beacon_block_root: Root) -> bool: ### Modified `on_block` -*Note*: The only modification is that `is_data_available` does not take `blob_kzg_commitments` as input. +*Note*: The only modification is that `is_data_available` does not take +`blob_kzg_commitments` as input. ```python def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: diff --git a/specs/fulu/fork.md b/specs/fulu/fork.md index 6162138651..103ea8eeaf 100644 --- a/specs/fulu/fork.md +++ b/specs/fulu/fork.md @@ -60,12 +60,14 @@ def compute_fork_version(epoch: Epoch) -> Version: The fork is triggered at epoch `FULU_FORK_EPOCH`. -Note that for the pure Fulu networks, we don't apply `upgrade_to_fulu` since it starts with Fulu version logic. +Note that for the pure Fulu networks, we don't apply `upgrade_to_fulu` since it +starts with Fulu version logic. ### Upgrading the state -If `state.slot % SLOTS_PER_EPOCH == 0` and `compute_epoch_at_slot(state.slot) == FULU_FORK_EPOCH`, -an irregular state change is made to upgrade to Fulu. +If `state.slot % SLOTS_PER_EPOCH == 0` and +`compute_epoch_at_slot(state.slot) == FULU_FORK_EPOCH`, an irregular state +change is made to upgrade to Fulu. ```python def upgrade_to_fulu(pre: electra.BeaconState) -> BeaconState: diff --git a/specs/fulu/p2p-interface.md b/specs/fulu/p2p-interface.md index 44002a04e6..480c23cdaf 100644 --- a/specs/fulu/p2p-interface.md +++ b/specs/fulu/p2p-interface.md @@ -41,7 +41,8 @@ This document contains the consensus-layer networking specification for Fulu. -The specification of these changes continues in the same format as the network specifications of previous upgrades, and assumes them as pre-requisite. +The specification of these changes continues in the same format as the network +specifications of previous upgrades, and assumes them as pre-requisite. ## Modifications in Fulu @@ -140,7 +141,8 @@ def compute_subnet_for_data_column_sidecar(column_index: ColumnIndex) -> SubnetI ### MetaData -The `MetaData` stored locally by clients is updated with an additional field to communicate the custody subnet count. +The `MetaData` stored locally by clients is updated with an additional field to +communicate the custody subnet count. ``` ( @@ -153,8 +155,10 @@ The `MetaData` stored locally by clients is updated with an additional field to Where -- `seq_number`, `attnets`, and `syncnets` have the same meaning defined in the Altair document. -- `custody_group_count` represents the node's custody group count. Clients MAY reject peers with a value less than `CUSTODY_REQUIREMENT`. +- `seq_number`, `attnets`, and `syncnets` have the same meaning defined in the + Altair document. +- `custody_group_count` represents the node's custody group count. Clients MAY + reject peers with a value less than `CUSTODY_REQUIREMENT`. ### The gossip domain: gossipsub @@ -168,8 +172,9 @@ Some gossip meshes are upgraded in the Fulu fork to support upgraded types. *Updated validation* -- _[REJECT]_ The length of KZG commitments is less than or equal to the limitation defined in Consensus Layer -- - i.e. validate that `len(signed_beacon_block.message.body.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK_FULU` +- _[REJECT]_ The length of KZG commitments is less than or equal to the + limitation defined in Consensus Layer -- i.e. validate that + `len(signed_beacon_block.message.body.blob_kzg_commitments) <= get_max_blobs_per_block(get_current_epoch(state))` ##### Blob subnets @@ -179,39 +184,81 @@ Some gossip meshes are upgraded in the Fulu fork to support upgraded types. ###### `data_column_sidecar_{subnet_id}` -This topic is used to propagate column sidecars, where each column maps to some `subnet_id`. +This topic is used to propagate column sidecars, where each column maps to some +`subnet_id`. The *type* of the payload of this topic is `DataColumnSidecar`. -The following validations MUST pass before forwarding the `sidecar: DataColumnSidecar` on the network, assuming the alias `block_header = sidecar.signed_block_header.message`: - -- _[REJECT]_ The sidecar is valid as verified by `verify_data_column_sidecar(sidecar)`. -- _[REJECT]_ The sidecar is for the correct subnet -- i.e. `compute_subnet_for_data_column_sidecar(sidecar.index) == subnet_id`. -- _[IGNORE]_ The sidecar is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. validate that `block_header.slot <= current_slot` (a client MAY queue future sidecars for processing at the appropriate slot). -- _[IGNORE]_ The sidecar is from a slot greater than the latest finalized slot -- i.e. validate that `block_header.slot > compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)` -- _[REJECT]_ The proposer signature of `sidecar.signed_block_header`, is valid with respect to the `block_header.proposer_index` pubkey. -- _[IGNORE]_ The sidecar's block's parent (defined by `block_header.parent_root`) has been seen (via gossip or non-gossip sources) (a client MAY queue sidecars for processing once the parent block is retrieved). -- _[REJECT]_ The sidecar's block's parent (defined by `block_header.parent_root`) passes validation. -- _[REJECT]_ The sidecar is from a higher slot than the sidecar's block's parent (defined by `block_header.parent_root`). -- _[REJECT]_ The current finalized_checkpoint is an ancestor of the sidecar's block -- i.e. `get_checkpoint_block(store, block_header.parent_root, store.finalized_checkpoint.epoch) == store.finalized_checkpoint.root`. -- _[REJECT]_ The sidecar's `kzg_commitments` field inclusion proof is valid as verified by `verify_data_column_sidecar_inclusion_proof(sidecar)`. -- _[REJECT]_ The sidecar's column data is valid as verified by `verify_data_column_sidecar_kzg_proofs(sidecar)`. -- _[IGNORE]_ The sidecar is the first sidecar for the tuple `(block_header.slot, block_header.proposer_index, sidecar.index)` with valid header signature, sidecar inclusion proof, and kzg proof. -- _[REJECT]_ The sidecar is proposed by the expected `proposer_index` for the block's slot in the context of the current shuffling (defined by `block_header.parent_root`/`block_header.slot`). - If the `proposer_index` cannot immediately be verified against the expected shuffling, the sidecar MAY be queued for later processing while proposers for the block's branch are calculated -- in such a case _do not_ `REJECT`, instead `IGNORE` this message. - -*Note*: In the `verify_data_column_sidecar_inclusion_proof(sidecar)` check, for all the sidecars of the same block, it verifies against the same set of `kzg_commitments` of the given beacon block. Client can choose to cache the result of the arguments tuple `(sidecar.kzg_commitments, sidecar.kzg_commitments_inclusion_proof, sidecar.signed_block_header)`. +The following validations MUST pass before forwarding the +`sidecar: DataColumnSidecar` on the network, assuming the alias +`block_header = sidecar.signed_block_header.message`: + +- _[REJECT]_ The sidecar is valid as verified by + `verify_data_column_sidecar(sidecar)`. +- _[REJECT]_ The sidecar is for the correct subnet -- i.e. + `compute_subnet_for_data_column_sidecar(sidecar.index) == subnet_id`. +- _[IGNORE]_ The sidecar is not from a future slot (with a + `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. validate that + `block_header.slot <= current_slot` (a client MAY queue future sidecars for + processing at the appropriate slot). +- _[IGNORE]_ The sidecar is from a slot greater than the latest finalized slot + -- i.e. validate that + `block_header.slot > compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)` +- _[REJECT]_ The proposer signature of `sidecar.signed_block_header`, is valid + with respect to the `block_header.proposer_index` pubkey. +- _[IGNORE]_ The sidecar's block's parent (defined by + `block_header.parent_root`) has been seen (via gossip or non-gossip sources) + (a client MAY queue sidecars for processing once the parent block is + retrieved). +- _[REJECT]_ The sidecar's block's parent (defined by + `block_header.parent_root`) passes validation. +- _[REJECT]_ The sidecar is from a higher slot than the sidecar's block's parent + (defined by `block_header.parent_root`). +- _[REJECT]_ The current finalized_checkpoint is an ancestor of the sidecar's + block -- i.e. + `get_checkpoint_block(store, block_header.parent_root, store.finalized_checkpoint.epoch) == store.finalized_checkpoint.root`. +- _[REJECT]_ The sidecar's `kzg_commitments` field inclusion proof is valid as + verified by `verify_data_column_sidecar_inclusion_proof(sidecar)`. +- _[REJECT]_ The sidecar's column data is valid as verified by + `verify_data_column_sidecar_kzg_proofs(sidecar)`. +- _[IGNORE]_ The sidecar is the first sidecar for the tuple + `(block_header.slot, block_header.proposer_index, sidecar.index)` with valid + header signature, sidecar inclusion proof, and kzg proof. +- _[REJECT]_ The sidecar is proposed by the expected `proposer_index` for the + block's slot in the context of the current shuffling (defined by + `block_header.parent_root`/`block_header.slot`). If the `proposer_index` + cannot immediately be verified against the expected shuffling, the sidecar MAY + be queued for later processing while proposers for the block's branch are + calculated -- in such a case _do not_ `REJECT`, instead `IGNORE` this message. + +*Note*: In the `verify_data_column_sidecar_inclusion_proof(sidecar)` check, for +all the sidecars of the same block, it verifies against the same set of +`kzg_commitments` of the given beacon block. Client can choose to cache the +result of the arguments tuple +`(sidecar.kzg_commitments, sidecar.kzg_commitments_inclusion_proof, sidecar.signed_block_header)`. ###### Distributed Blob Publishing using blobs retrieved from local execution layer client -Honest nodes SHOULD query `engine_getBlobsV2` as soon as they receive a valid `beacon_block` or `data_column_sidecar` from gossip. If ALL blobs matching `kzg_commitments` are retrieved, they should convert the response to data columns, and import the result. +Honest nodes SHOULD query `engine_getBlobsV2` as soon as they receive a valid +`beacon_block` or `data_column_sidecar` from gossip. If ALL blobs matching +`kzg_commitments` are retrieved, they should convert the response to data +columns, and import the result. -Implementers are encouraged to leverage this method to increase the likelihood of incorporating and attesting to the last block when its proposer is not able to publish data columns on time. +Implementers are encouraged to leverage this method to increase the likelihood +of incorporating and attesting to the last block when its proposer is not able +to publish data columns on time. -When clients use the local execution layer to retrieve blobs, they SHOULD skip verification of those blobs. When subsequently importing the blobs as data columns, they MUST behave as if the `data_column_sidecar` had been received via gossip. In particular, clients MUST: +When clients use the local execution layer to retrieve blobs, they SHOULD skip +verification of those blobs. When subsequently importing the blobs as data +columns, they MUST behave as if the `data_column_sidecar` had been received via +gossip. In particular, clients MUST: -- Publish the corresponding `data_column_sidecar` on the `data_column_sidecar_{subnet_id}` topic **if and only if** they are **subscribed** to it, either due to custody requirements or additional sampling. -- Update gossip rule related data structures (i.e. update the anti-equivocation cache). +- Publish the corresponding `data_column_sidecar` on the + `data_column_sidecar_{subnet_id}` topic **if and only if** they are + **subscribed** to it, either due to custody requirements or additional + sampling. +- Update gossip rule related data structures (i.e. update the anti-equivocation + cache). ### The Req/Resp domain @@ -225,9 +272,13 @@ Deprecated as of `FULU_FORK_EPOCH + MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS`. During the deprecation transition period: -- Clients MUST respond with a list of blob sidecars from the range `[min(current_epoch - MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, FULU_FORK_EPOCH), FULU_FORK_EPOCH)` if the requested range includes any epochs in this interval. -- Clients MAY respond with an empty list if the requested range lies entirely at or after `FULU_FORK_EPOCH`. -- Clients SHOULD NOT penalize peers for requesting blob sidecars from `FULU_FORK_EPOCH`. +- Clients MUST respond with a list of blob sidecars from the range + `[min(current_epoch - MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, FULU_FORK_EPOCH), FULU_FORK_EPOCH)` + if the requested range includes any epochs in this interval. +- Clients MAY respond with an empty list if the requested range lies entirely at + or after `FULU_FORK_EPOCH`. +- Clients SHOULD NOT penalize peers for requesting blob sidecars from + `FULU_FORK_EPOCH`. ##### BlobSidecarsByRoot v1 @@ -237,15 +288,21 @@ Deprecated as of `FULU_FORK_EPOCH + MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS`. During the deprecation transition period: -- Clients MUST respond with blob sidecars corresponding to block roots from the range `[min(current_epoch - MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, FULU_FORK_EPOCH), FULU_FORK_EPOCH)` if any of the requested roots correspond to blocks in this interval. -- Clients MAY respond with an empty list if all requested roots correspond to blocks at or after `FULU_FORK_EPOCH`. -- Clients SHOULD NOT penalize peers for requesting blob sidecars from `FULU_FORK_EPOCH`. +- Clients MUST respond with blob sidecars corresponding to block roots from the + range + `[min(current_epoch - MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, FULU_FORK_EPOCH), FULU_FORK_EPOCH)` + if any of the requested roots correspond to blocks in this interval. +- Clients MAY respond with an empty list if all requested roots correspond to + blocks at or after `FULU_FORK_EPOCH`. +- Clients SHOULD NOT penalize peers for requesting blob sidecars from + `FULU_FORK_EPOCH`. ##### DataColumnSidecarsByRange v1 **Protocol ID:** `/eth2/beacon_chain/req/data_column_sidecars_by_range/1/` -The `` field is calculated as `context = compute_fork_digest(fork_version, genesis_validators_root)`: +The `` field is calculated as +`context = compute_fork_digest(fork_version, genesis_validators_root)`: @@ -271,56 +328,77 @@ Response Content: ) ``` -Requests data column sidecars in the slot range `[start_slot, start_slot + count)` of the given `columns`, leading up to the current head block as selected by fork choice. +Requests data column sidecars in the slot range +`[start_slot, start_slot + count)` of the given `columns`, leading up to the +current head block as selected by fork choice. -Before consuming the next response chunk, the response reader SHOULD verify the data column sidecar is well-formatted through `verify_data_column_sidecar`, has valid inclusion proof through `verify_data_column_sidecar_inclusion_proof`, and is correct w.r.t. the expected KZG commitments through `verify_data_column_sidecar_kzg_proofs`. +Before consuming the next response chunk, the response reader SHOULD verify the +data column sidecar is well-formatted through `verify_data_column_sidecar`, has +valid inclusion proof through `verify_data_column_sidecar_inclusion_proof`, and +is correct w.r.t. the expected KZG commitments through +`verify_data_column_sidecar_kzg_proofs`. -`DataColumnSidecarsByRange` is primarily used to sync data columns that may have been missed on gossip and to sync within the `MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS` window. +`DataColumnSidecarsByRange` is primarily used to sync data columns that may have +been missed on gossip and to sync within the +`MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS` window. The request MUST be encoded as an SSZ-container. -The response MUST consist of zero or more `response_chunk`. -Each _successful_ `response_chunk` MUST contain a single `DataColumnSidecar` payload. +The response MUST consist of zero or more `response_chunk`. Each _successful_ +`response_chunk` MUST contain a single `DataColumnSidecar` payload. -Let `data_column_serve_range` be `[max(current_epoch - MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS, FULU_FORK_EPOCH), current_epoch]`. -Clients MUST keep a record of data column sidecars seen on the epoch range `data_column_serve_range` -where `current_epoch` is defined by the current wall-clock time, -and clients MUST support serving requests of data columns on this range. +Let `data_column_serve_range` be +`[max(current_epoch - MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS, FULU_FORK_EPOCH), current_epoch]`. +Clients MUST keep a record of data column sidecars seen on the epoch range +`data_column_serve_range` where `current_epoch` is defined by the current +wall-clock time, and clients MUST support serving requests of data columns on +this range. -Peers that are unable to reply to data column sidecar requests within the -range `data_column_serve_range` SHOULD respond with error code `3: ResourceUnavailable`. -Such peers that are unable to successfully reply to this range of requests MAY get descored -or disconnected at any time. +Peers that are unable to reply to data column sidecar requests within the range +`data_column_serve_range` SHOULD respond with error code +`3: ResourceUnavailable`. Such peers that are unable to successfully reply to +this range of requests MAY get descored or disconnected at any time. -*Note*: The above requirement implies that nodes that start from a recent weak subjectivity checkpoint -MUST backfill the local data columns database to at least the range `data_column_serve_range` -to be fully compliant with `DataColumnSidecarsByRange` requests. +*Note*: The above requirement implies that nodes that start from a recent weak +subjectivity checkpoint MUST backfill the local data columns database to at +least the range `data_column_serve_range` to be fully compliant with +`DataColumnSidecarsByRange` requests. -*Note*: Although clients that bootstrap from a weak subjectivity checkpoint can begin -participating in the networking immediately, other peers MAY -disconnect and/or temporarily ban such an un-synced or semi-synced client. +*Note*: Although clients that bootstrap from a weak subjectivity checkpoint can +begin participating in the networking immediately, other peers MAY disconnect +and/or temporarily ban such an un-synced or semi-synced client. -Clients MUST respond with at least the data column sidecars of the first blob-carrying block that exists in the range, if they have it, and no more than `MAX_REQUEST_DATA_COLUMN_SIDECARS` sidecars. +Clients MUST respond with at least the data column sidecars of the first +blob-carrying block that exists in the range, if they have it, and no more than +`MAX_REQUEST_DATA_COLUMN_SIDECARS` sidecars. -Clients MUST include all data column sidecars of each block from which they include data column sidecars. +Clients MUST include all data column sidecars of each block from which they +include data column sidecars. -The following data column sidecars, where they exist, MUST be sent in `(slot, column_index)` order. +The following data column sidecars, where they exist, MUST be sent in +`(slot, column_index)` order. -Slots that do not contain known data columns MUST be skipped, mimicking the behaviour -of the `BlocksByRange` request. Only response chunks with known data columns should -therefore be sent. +Slots that do not contain known data columns MUST be skipped, mimicking the +behaviour of the `BlocksByRange` request. Only response chunks with known data +columns should therefore be sent. Clients MAY limit the number of data column sidecars in the response. -The response MUST contain no more than `count * NUMBER_OF_COLUMNS` data column sidecars. +The response MUST contain no more than `count * NUMBER_OF_COLUMNS` data column +sidecars. -Clients MUST respond with data columns sidecars from their view of the current fork choice --- that is, data column sidecars as included by blocks from the single chain defined by the current head. -Of note, blocks from slots before the finalization MUST lead to the finalized block reported in the `Status` handshake. +Clients MUST respond with data columns sidecars from their view of the current +fork choice -- that is, data column sidecars as included by blocks from the +single chain defined by the current head. Of note, blocks from slots before the +finalization MUST lead to the finalized block reported in the `Status` +handshake. -Clients MUST respond with data column sidecars that are consistent from a single chain within the context of the request. +Clients MUST respond with data column sidecars that are consistent from a single +chain within the context of the request. -After the initial data column sidecar, clients MAY stop in the process of responding if their fork choice changes the view of the chain in the context of the request. +After the initial data column sidecar, clients MAY stop in the process of +responding if their fork choice changes the view of the chain in the context of +the request. ##### DataColumnSidecarsByRoot v1 @@ -328,7 +406,8 @@ After the initial data column sidecar, clients MAY stop in the process of respon *[New in Fulu:EIP7594]* -The `` field is calculated as `context = compute_fork_digest(fork_version, genesis_validators_root)`: +The `` field is calculated as +`context = compute_fork_digest(fork_version, genesis_validators_root)`: @@ -352,25 +431,36 @@ Response Content: ) ``` -Requests data column sidecars by block root and column indices. -The response is a list of `DataColumnSidecar` whose length is less than or equal to `requested_columns_count`, where `requested_columns_count = sum(len(r.columns) for r in request)`. -It may be less in the case that the responding peer is missing blocks or sidecars. +Requests data column sidecars by block root and column indices. The response is +a list of `DataColumnSidecar` whose length is less than or equal to +`requested_columns_count`, where +`requested_columns_count = sum(len(r.columns) for r in request)`. It may be less +in the case that the responding peer is missing blocks or sidecars. -Before consuming the next response chunk, the response reader SHOULD verify the data column sidecar is well-formatted through `verify_data_column_sidecar`, has valid inclusion proof through `verify_data_column_sidecar_inclusion_proof`, and is correct w.r.t. the expected KZG commitments through `verify_data_column_sidecar_kzg_proofs`. +Before consuming the next response chunk, the response reader SHOULD verify the +data column sidecar is well-formatted through `verify_data_column_sidecar`, has +valid inclusion proof through `verify_data_column_sidecar_inclusion_proof`, and +is correct w.r.t. the expected KZG commitments through +`verify_data_column_sidecar_kzg_proofs`. No more than `MAX_REQUEST_DATA_COLUMN_SIDECARS` may be requested at a time. -The response MUST consist of zero or more `response_chunk`. -Each _successful_ `response_chunk` MUST contain a single `DataColumnSidecar` payload. +The response MUST consist of zero or more `response_chunk`. Each _successful_ +`response_chunk` MUST contain a single `DataColumnSidecar` payload. -Clients MUST support requesting sidecars since `minimum_request_epoch`, where `minimum_request_epoch = max(finalized_epoch, current_epoch - MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS, FULU_FORK_EPOCH)`. If any root in the request content references a block earlier than `minimum_request_epoch`, peers MAY respond with error code `3: ResourceUnavailable` or not include the data column sidecar in the response. +Clients MUST support requesting sidecars since `minimum_request_epoch`, where +`minimum_request_epoch = max(finalized_epoch, current_epoch - MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS, FULU_FORK_EPOCH)`. +If any root in the request content references a block earlier than +`minimum_request_epoch`, peers MAY respond with error code +`3: ResourceUnavailable` or not include the data column sidecar in the response. -Clients MUST respond with at least one sidecar, if they have it. -Clients MAY limit the number of blocks and sidecars in the response. +Clients MUST respond with at least one sidecar, if they have it. Clients MAY +limit the number of blocks and sidecars in the response. -Clients SHOULD include a sidecar in the response as soon as it passes the gossip validation rules. -Clients SHOULD NOT respond with sidecars related to blocks that fail gossip validation rules. -Clients SHOULD NOT respond with sidecars related to blocks that fail the beacon chain state transition +Clients SHOULD include a sidecar in the response as soon as it passes the gossip +validation rules. Clients SHOULD NOT respond with sidecars related to blocks +that fail gossip validation rules. Clients SHOULD NOT respond with sidecars +related to blocks that fail the beacon chain state transition ##### GetMetaData v3 @@ -386,7 +476,9 @@ Response Content: ) ``` -Requests the MetaData of a peer, using the new `MetaData` definition given above that is extended from Altair. Other conditions for the `GetMetaData` protocol are unchanged from the Altair p2p networking document. +Requests the MetaData of a peer, using the new `MetaData` definition given above +that is extended from Altair. Other conditions for the `GetMetaData` protocol +are unchanged from the Altair p2p networking document. ### The discovery domain: discv5 @@ -394,7 +486,8 @@ Requests the MetaData of a peer, using the new `MetaData` definition given above ##### Custody group count -A new field is added to the ENR under the key `cgc` to facilitate custody data column discovery. +A new field is added to the ENR under the key `cgc` to facilitate custody data +column discovery. | Key | Value | | ----- | ----------------------------------------------------------------------------------------------------------------- | diff --git a/specs/fulu/peer-sampling.md b/specs/fulu/peer-sampling.md index 881c763c6c..2e3f6e8c5c 100644 --- a/specs/fulu/peer-sampling.md +++ b/specs/fulu/peer-sampling.md @@ -18,7 +18,12 @@ ## Introduction -The purpose of this document is to complement [Fulu -- Data Availability Sampling Core](das-core.md) by specifying the peer sampling functionality of the full PeerDAS protocol. Initially, this functionality may not be implemented by all clients. In such cases, it is replaced by [subnet sampling](das-core.md#subnet-sampling), which is an extension of the custody component of the protocol. +The purpose of this document is to complement +[Fulu -- Data Availability Sampling Core](das-core.md) by specifying the peer +sampling functionality of the full PeerDAS protocol. Initially, this +functionality may not be implemented by all clients. In such cases, it is +replaced by [subnet sampling](das-core.md#subnet-sampling), which is an +extension of the custody component of the protocol. ## Helper functions @@ -66,23 +71,52 @@ def get_extended_sample_count(allowed_failures: uint64) -> uint64: ## Peer discovery -At each slot, a node needs to be able to readily sample from *any* set of columns. To this end, a node SHOULD find and maintain a set of diverse and reliable peers that can regularly satisfy their sampling demands. - -A node runs a background peer discovery process, maintaining peers of various custody distributions (both `custody_size` and column assignments). The combination of advertised `custody_size` size and public node-id make this readily and publicly accessible. The peer set should cover the whole column space, with some redundancy. The number of peers, or at least the redundancy implied by the custody distributions over the peer set, should be tuned upward in the event of failed sampling. - -*Note*: while high-capacity and super-full nodes are high value with respect to satisfying sampling requirements, a node SHOULD maintain a distribution across node capacities as to not centralize the p2p graph too much (in the extreme becomes hub/spoke) and to distribute sampling load better across all nodes. - -*Note*: A DHT-based peer discovery mechanism is expected to be utilized in the above. The beacon-chain network currently utilizes discv5 in a similar method as described for finding peers of particular distributions of attestation subnets. Additional peer discovery methods are valuable to integrate (e.g., latent peer discovery via libp2p gossipsub) to add a defense in breadth against one of the discovery methods being attacked. +At each slot, a node needs to be able to readily sample from *any* set of +columns. To this end, a node SHOULD find and maintain a set of diverse and +reliable peers that can regularly satisfy their sampling demands. + +A node runs a background peer discovery process, maintaining peers of various +custody distributions (both `custody_size` and column assignments). The +combination of advertised `custody_size` size and public node-id make this +readily and publicly accessible. The peer set should cover the whole column +space, with some redundancy. The number of peers, or at least the redundancy +implied by the custody distributions over the peer set, should be tuned upward +in the event of failed sampling. + +*Note*: while high-capacity and super-full nodes are high value with respect to +satisfying sampling requirements, a node SHOULD maintain a distribution across +node capacities as to not centralize the p2p graph too much (in the extreme +becomes hub/spoke) and to distribute sampling load better across all nodes. + +*Note*: A DHT-based peer discovery mechanism is expected to be utilized in the +above. The beacon-chain network currently utilizes discv5 in a similar method as +described for finding peers of particular distributions of attestation subnets. +Additional peer discovery methods are valuable to integrate (e.g., latent peer +discovery via libp2p gossipsub) to add a defense in breadth against one of the +discovery methods being attacked. ## Peer sampling ### Sample selection -At each slot, a node SHOULD select at least `SAMPLES_PER_SLOT` column IDs for sampling. It is recommended to use uniform random selection without replacement based on local randomness. Sampling is considered successful if the node manages to retrieve all selected columns. - -Alternatively, a node MAY use a method that selects more than `SAMPLES_PER_SLOT` columns while allowing some missing, respecting the same target false positive threshold (the probability of successful sampling of an unavailable block) as dictated by the `SAMPLES_PER_SLOT` parameter. If using uniform random selection without replacement, a node can use the `get_extended_sample_count(allowed_failures) -> sample_count` helper function to determine the sample count (number of unique column IDs) for any selected number of allowed failures. Sampling is then considered successful if any `sample_count - allowed_failures` columns are retrieved successfully. - -For reference, the table below shows the number of samples and the number of allowed missing columns assuming `NUMBER_OF_COLUMNS = 128` and `SAMPLES_PER_SLOT = 16`. +At each slot, a node SHOULD select at least `SAMPLES_PER_SLOT` column IDs for +sampling. It is recommended to use uniform random selection without replacement +based on local randomness. Sampling is considered successful if the node manages +to retrieve all selected columns. + +Alternatively, a node MAY use a method that selects more than `SAMPLES_PER_SLOT` +columns while allowing some missing, respecting the same target false positive +threshold (the probability of successful sampling of an unavailable block) as +dictated by the `SAMPLES_PER_SLOT` parameter. If using uniform random selection +without replacement, a node can use the +`get_extended_sample_count(allowed_failures) -> sample_count` helper function to +determine the sample count (number of unique column IDs) for any selected number +of allowed failures. Sampling is then considered successful if any +`sample_count - allowed_failures` columns are retrieved successfully. + +For reference, the table below shows the number of samples and the number of +allowed missing columns assuming `NUMBER_OF_COLUMNS = 128` and +`SAMPLES_PER_SLOT = 16`. | Allowed missing | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | | --------------- | --- | --- | --- | --- | --- | --- | --- | --- | --- | @@ -90,24 +124,46 @@ For reference, the table below shows the number of samples and the number of all ### Sample queries -A node SHOULD maintain a diverse set of peers for each column and each slot by verifying responsiveness to sample queries. +A node SHOULD maintain a diverse set of peers for each column and each slot by +verifying responsiveness to sample queries. -A node SHOULD query for samples from selected peers via `DataColumnSidecarsByRoot` request. A node utilizes `get_custody_groups` helper to determine which peer(s) it could request from, identifying a list of candidate peers for each selected column. +A node SHOULD query for samples from selected peers via +`DataColumnSidecarsByRoot` request. A node utilizes `get_custody_groups` helper +to determine which peer(s) it could request from, identifying a list of +candidate peers for each selected column. -If more than one candidate peer is found for a given column, a node SHOULD randomize its peer selection to distribute sample query load in the network. Nodes MAY use peer scoring to tune this selection (for example, by using weighted selection or by using a cut-off threshold). If possible, it is also recommended to avoid requesting many columns from the same peer in order to avoid relying on and exposing the sample selection to a single peer. +If more than one candidate peer is found for a given column, a node SHOULD +randomize its peer selection to distribute sample query load in the network. +Nodes MAY use peer scoring to tune this selection (for example, by using +weighted selection or by using a cut-off threshold). If possible, it is also +recommended to avoid requesting many columns from the same peer in order to +avoid relying on and exposing the sample selection to a single peer. -If a node already has a column because of custody, it is not required to send out queries for that column. +If a node already has a column because of custody, it is not required to send +out queries for that column. -If a node has enough good/honest peers across all columns, and the data is being made available, the above procedure has a high chance of success. +If a node has enough good/honest peers across all columns, and the data is being +made available, the above procedure has a high chance of success. ## Peer scoring -Due to the deterministic custody functions, a node knows exactly what a peer should be able to respond to. In the event that a peer does not respond to samples of their custodied rows/columns, a node may downscore or disconnect from a peer. +Due to the deterministic custody functions, a node knows exactly what a peer +should be able to respond to. In the event that a peer does not respond to +samples of their custodied rows/columns, a node may downscore or disconnect from +a peer. ## DAS providers -A DAS provider is a consistently-available-for-DAS-queries, super-full (or high capacity) node. To the p2p, these look just like other nodes but with high advertised capacity, and they should generally be able to be latently found via normal discovery. +A DAS provider is a consistently-available-for-DAS-queries, super-full (or high +capacity) node. To the p2p, these look just like other nodes but with high +advertised capacity, and they should generally be able to be latently found via +normal discovery. -DAS providers can also be found out-of-band and configured into a node to connect to directly and prioritize. Nodes can add some set of these to their local configuration for persistent connection to bolster their DAS quality of service. +DAS providers can also be found out-of-band and configured into a node to +connect to directly and prioritize. Nodes can add some set of these to their +local configuration for persistent connection to bolster their DAS quality of +service. -Such direct peering utilizes a feature supported out of the box today on all nodes and can complement (and reduce attackability and increase quality-of-service) alternative peer discovery mechanisms. +Such direct peering utilizes a feature supported out of the box today on all +nodes and can complement (and reduce attackability and increase +quality-of-service) alternative peer discovery mechanisms. diff --git a/specs/fulu/polynomial-commitments-sampling.md b/specs/fulu/polynomial-commitments-sampling.md index c262c45b10..d852ab8512 100644 --- a/specs/fulu/polynomial-commitments-sampling.md +++ b/specs/fulu/polynomial-commitments-sampling.md @@ -49,13 +49,20 @@ ## Introduction -This document extends [polynomial-commitments.md](../deneb/polynomial-commitments.md) with the functions required for data availability sampling (DAS). It is not part of the core Deneb spec but an extension that can be optionally implemented to allow nodes to reduce their load using DAS. +This document extends +[polynomial-commitments.md](../deneb/polynomial-commitments.md) with the +functions required for data availability sampling (DAS). It is not part of the +core Deneb spec but an extension that can be optionally implemented to allow +nodes to reduce their load using DAS. ## Public Methods -For any KZG library extended to support DAS, functions flagged as "Public method" MUST be provided by the underlying KZG library as public functions. All other functions are private functions used internally by the KZG library. +For any KZG library extended to support DAS, functions flagged as "Public +method" MUST be provided by the underlying KZG library as public functions. All +other functions are private functions used internally by the KZG library. -Public functions MUST accept raw bytes as input and perform the required cryptographic normalization before invoking any internal functions. +Public functions MUST accept raw bytes as input and perform the required +cryptographic normalization before invoking any internal functions. The following is a list of the public methods: @@ -83,7 +90,9 @@ The following is a list of the public methods: ### Blob -Cells are the smallest unit of blob data that can come with their own KZG proofs. Samples can be constructed from one or several cells (e.g. an individual cell or line). +Cells are the smallest unit of blob data that can come with their own KZG +proofs. Samples can be constructed from one or several cells (e.g. an individual +cell or line). | Name | Value | Description | | ---------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | diff --git a/specs/fulu/validator.md b/specs/fulu/validator.md index 8c88934ad0..878fad67e2 100644 --- a/specs/fulu/validator.md +++ b/specs/fulu/validator.md @@ -26,16 +26,19 @@ ## Introduction -This document represents the changes to be made in the code of an "honest validator" to implement Fulu. +This document represents the changes to be made in the code of an "honest +validator" to implement Fulu. ## Prerequisites -This document is an extension of the [Electra -- Honest Validator](../electra/validator.md) guide. -All behaviors and definitions defined in this document, and documents it extends, carry over unless -explicitly noted or overridden. +This document is an extension of the +[Electra -- Honest Validator](../electra/validator.md) guide. All behaviors and +definitions defined in this document, and documents it extends, carry over +unless explicitly noted or overridden. -All terminology, constants, functions, and protocol mechanics defined in [Fulu -- Beacon -Chain](./beacon-chain.md) and [Fulu -- Data Availability Sampling Core] are requisite for this +All terminology, constants, functions, and protocol mechanics defined in +[Fulu -- Beacon Chain](./beacon-chain.md) and +[Fulu -- Data Availability Sampling Core](./das-core.md) are requisite for this document and used throughout. ## Configuration @@ -53,7 +56,8 @@ document and used throughout. *[Modified in Fulu:EIP7594]* -The `BlobsBundle` object is modified to include cell KZG proofs instead of blob KZG proofs. +The `BlobsBundle` object is modified to include cell KZG proofs instead of blob +KZG proofs. ```python @dataclass @@ -68,7 +72,8 @@ class BlobsBundle(object): *[Modified in Fulu:EIP7594]* -The `GetPayloadResponse` object is modified to use the updated `BlobsBundle` object. +The `GetPayloadResponse` object is modified to use the updated `BlobsBundle` +object. ```python @dataclass @@ -84,7 +89,8 @@ class GetPayloadResponse(object): #### Modified `get_payload` -The `get_payload` method is modified to return the updated `GetPayloadResponse` object. +The `get_payload` method is modified to return the updated `GetPayloadResponse` +object. ```python def get_payload(self: ExecutionEngine, payload_id: PayloadId) -> GetPayloadResponse: @@ -101,15 +107,16 @@ def get_payload(self: ExecutionEngine, payload_id: PayloadId) -> GetPayloadRespo *[New in Fulu:EIP7594]* -A node with validators attached downloads and custodies a higher minimum of custody groups per slot, -determined by `get_validators_custody_requirement(state, validator_indices)`. Here, `state` is the -latest finalized `BeaconState` and `validator_indices` is the list of indices corresponding to validators -attached to the node. Any node with at least one validator attached, and with the sum of the -effective balances of all attached validators being `total_node_balance`, downloads and custodies -`total_node_balance // BALANCE_PER_ADDITIONAL_CUSTODY_GROUP` custody groups per slot, with a minimum -of `VALIDATOR_CUSTODY_REQUIREMENT` and of course a maximum of `NUMBER_OF_CUSTODY_GROUPS`. The node -SHOULD dynamically adjust its custody groups following any changes to the effective balances of -attached validators. +A node with validators attached downloads and custodies a higher minimum of +custody groups per slot, determined by +`get_validators_custody_requirement(state, validator_indices)`. Here, `state` is +the latest finalized `BeaconState` and `validator_indices` is the list of +indices corresponding to validators attached to the node. Any node with at least +one validator attached, and with the sum of the effective balances of all +attached validators being `total_node_balance`, downloads and custodies +`total_node_balance // BALANCE_PER_ADDITIONAL_CUSTODY_GROUP` custody groups per +slot, with a minimum of `VALIDATOR_CUSTODY_REQUIREMENT` and of course a maximum +of `NUMBER_OF_CUSTODY_GROUPS`. ```python def get_validators_custody_requirement(state: BeaconState, validator_indices: Sequence[ValidatorIndex]) -> uint64: @@ -118,18 +125,28 @@ def get_validators_custody_requirement(state: BeaconState, validator_indices: Se return min(max(count, VALIDATOR_CUSTODY_REQUIREMENT), NUMBER_OF_CUSTODY_GROUPS) ``` -This higher custody is advertised in the node's Metadata by setting a higher `custody_group_count` -and in the node's ENR by setting a higher `custody_group_count`. As with the regular custody -requirement, a node with validators MAY still choose to custody, advertise and serve more than -this minimum. As with the regular custody requirement, a node MUST backfill columns when syncing. In -addition, when the validator custody requirement increases, due to an increase in the total -effective balance of the attached validators, a node MUST backfill columns from the new custody -groups. However, a node MAY wait to advertise a higher custody in its Metadata and ENR until -backfilling is complete. - -*Note*: The node SHOULD manage validator custody (and any changes during its lifetime) without any -input from the user, for example by using existing signals about validator metadata to compute the -required custody. +This higher custody is advertised in the node's Metadata by setting a higher +`custody_group_count` and in the node's ENR by setting a higher +`custody_group_count`. As with the regular custody requirement, a node with +validators MAY still choose to custody, advertise and serve more than this +minimum. As with the regular custody requirement, a node MUST backfill columns +when syncing. + +A node SHOULD dynamically adjust its custody groups (without any input from the +user) following any changes to the total effective balances of attached +validators. If the node's custody requirements are increased, it SHOULD only +advertise the updated `custody_group_count` after +`MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS` epochs. The node SHOULD NOT backfill +custody groups as a result of this change. After +`MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS` epochs, the node will be able to respond +to any `DataColumnSidecar` request within the retention period. If the node's +custody requirements are decreased, the node MAY update its +`custody_group_count` to reflect this. However, it SHOULD NOT prune existing +custody columns until after the usual period of +`MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS` epochs. Nodes SHOULD be able to +appropriately handle multiple changes to custody requirements within the same +retention period (e.g., an increase in one epoch and a decrease in the next +epoch). ### Block and sidecar proposal @@ -137,21 +154,26 @@ required custody. *[New in Fulu:EIP7594]* -For a block proposal, blobs associated with a block are packaged into many `DataColumnSidecar` -objects for distribution to the associated sidecar topic, the `data_column_sidecar_{subnet_id}` -pubsub topic. A `DataColumnSidecar` can be viewed as vertical slice of all blobs stacked on top of -each other, with extra fields for the necessary context. +For a block proposal, blobs associated with a block are packaged into many +`DataColumnSidecar` objects for distribution to the associated sidecar topic, +the `data_column_sidecar_{subnet_id}` pubsub topic. A `DataColumnSidecar` can be +viewed as vertical slice of all blobs stacked on top of each other, with extra +fields for the necessary context. ##### `get_data_column_sidecars` -The sequence of sidecars associated with a block and can be obtained by first computing -`cells_and_kzg_proofs = [compute_cells_and_kzg_proofs(blob) for blob in blobs]` and then calling +The sequence of sidecars associated with a block and can be obtained by first +computing +`cells_and_kzg_proofs = [compute_cells_and_kzg_proofs(blob) for blob in blobs]` +and then calling `get_data_column_sidecars_from_block(signed_block, cells_and_kzg_proofs)`. -Moreover, the full sequence of sidecars can also be computed from `cells_and_kzg_proofs` and any single -`sidecar`, by calling `get_data_column_sidecars_from_column_sidecar(sidecar, cells_and_kzg_proofs)`. -This can be used in distributed blob publishing, to reconstruct all sidecars from any sidecar received -on the wire, assuming all cells and kzg proofs could be retrieved from the local execution layer client. +Moreover, the full sequence of sidecars can also be computed from +`cells_and_kzg_proofs` and any single `sidecar`, by calling +`get_data_column_sidecars_from_column_sidecar(sidecar, cells_and_kzg_proofs)`. +This can be used in distributed blob publishing, to reconstruct all sidecars +from any sidecar received on the wire, assuming all cells and kzg proofs could +be retrieved from the local execution layer client. ```python def get_data_column_sidecars( @@ -245,13 +267,15 @@ The `subnet_id` for the `data_column_sidecar` is calculated with: - Let `column_index = data_column_sidecar.index`. - Let `subnet_id = compute_subnet_for_data_column_sidecar(column_index)`. -After publishing all columns to their respective subnets, peers on the network may request the -sidecar through sync-requests, or a local user may be interested. +After publishing all columns to their respective subnets, peers on the network +may request the sidecar through sync-requests, or a local user may be +interested. #### Sidecar retention -The validator MUST hold on to sidecars for `MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS` epochs and -serve when capable, to ensure the data-availability of these blobs throughout the network. +The validator MUST hold on to sidecars for +`MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS` epochs and serve when capable, to +ensure the data-availability of these blobs throughout the network. -After `MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS` nodes MAY prune the sidecars and/or stop -serving them. +After `MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS` nodes MAY prune the +sidecars and/or stop serving them. diff --git a/specs/phase0/beacon-chain.md b/specs/phase0/beacon-chain.md index 5658ed2c23..01a2979337 100644 --- a/specs/phase0/beacon-chain.md +++ b/specs/phase0/beacon-chain.md @@ -139,8 +139,17 @@ This document represents the specification for Phase 0 -- The Beacon Chain. -At the core of Ethereum proof-of-stake is a system chain called the "beacon chain". The beacon chain stores and manages the registry of validators. In the initial deployment phases of proof-of-stake, the only mechanism to become a validator is to make a one-way ETH transaction to a deposit contract on the Ethereum proof-of-work chain. Activation as a validator happens when deposit receipts are processed by the beacon chain, the activation balance is reached, and a queuing process is completed. Exit is either voluntary or done forcibly as a penalty for misbehavior. -The primary source of load on the beacon chain is "attestations". Attestations are simultaneously availability votes for a shard block (in a later upgrade) and proof-of-stake votes for a beacon block (Phase 0). +At the core of Ethereum proof-of-stake is a system chain called the "beacon +chain". The beacon chain stores and manages the registry of validators. In the +initial deployment phases of proof-of-stake, the only mechanism to become a +validator is to make a one-way ETH transaction to a deposit contract on the +Ethereum proof-of-work chain. Activation as a validator happens when deposit +receipts are processed by the beacon chain, the activation balance is reached, +and a queuing process is completed. Exit is either voluntary or done forcibly as +a penalty for misbehavior. The primary source of load on the beacon chain is +"attestations". Attestations are simultaneously availability votes for a shard +block (in a later upgrade) and proof-of-stake votes for a beacon block (Phase +0). ## Notation @@ -168,7 +177,8 @@ We define the following Python custom types for type hinting and readability: ## Constants -The following values are (non-configurable) constants used throughout the specification. +The following values are (non-configurable) constants used throughout the +specification. ### Misc @@ -204,13 +214,19 @@ The following values are (non-configurable) constants used throughout the specif | `DOMAIN_AGGREGATE_AND_PROOF` | `DomainType('0x06000000')` | | `DOMAIN_APPLICATION_MASK` | `DomainType('0x00000001')` | -*Note*: `DOMAIN_APPLICATION_MASK` reserves the rest of the bitspace in `DomainType` for application usage. This means for some `DomainType` `DOMAIN_SOME_APPLICATION`, `DOMAIN_SOME_APPLICATION & DOMAIN_APPLICATION_MASK` **MUST** be non-zero. This expression for any other `DomainType` in the consensus specs **MUST** be zero. +*Note*: `DOMAIN_APPLICATION_MASK` reserves the rest of the bitspace in +`DomainType` for application usage. This means for some `DomainType` +`DOMAIN_SOME_APPLICATION`, `DOMAIN_SOME_APPLICATION & DOMAIN_APPLICATION_MASK` +**MUST** be non-zero. This expression for any other `DomainType` in the +consensus specs **MUST** be zero. ## Preset -*Note*: The below configuration is bundled as a preset: a bundle of configuration variables which are expected to differ -between different modes of operation, e.g. testing, but not generally between different networks. -Additional preset configurations can be found in the [`configs`](../../configs) directory. +*Note*: The below configuration is bundled as a preset: a bundle of +configuration variables which are expected to differ between different modes of +operation, e.g. testing, but not generally between different networks. +Additional preset configurations can be found in the [`configs`](../../configs) +directory. ### Misc @@ -224,7 +240,13 @@ Additional preset configurations can be found in the [`configs`](../../configs) | `HYSTERESIS_DOWNWARD_MULTIPLIER` | `uint64(1)` | | `HYSTERESIS_UPWARD_MULTIPLIER` | `uint64(5)` | -- For the safety of committees, `TARGET_COMMITTEE_SIZE` exceeds [the recommended minimum committee size of 111](http://web.archive.org/web/20190504131341/https://vitalik.ca/files/Ithaca201807_Sharding.pdf); with sufficient active validators (at least `SLOTS_PER_EPOCH * TARGET_COMMITTEE_SIZE`), the shuffling algorithm ensures committee sizes of at least `TARGET_COMMITTEE_SIZE`. (Unbiasable randomness with a Verifiable Delay Function (VDF) will improve committee robustness and lower the safe minimum committee size.) +- For the safety of committees, `TARGET_COMMITTEE_SIZE` exceeds + [the recommended minimum committee size of 111](http://web.archive.org/web/20190504131341/https://vitalik.ca/files/Ithaca201807_Sharding.pdf); + with sufficient active validators (at least + `SLOTS_PER_EPOCH * TARGET_COMMITTEE_SIZE`), the shuffling algorithm ensures + committee sizes of at least `TARGET_COMMITTEE_SIZE`. (Unbiasable randomness + with a Verifiable Delay Function (VDF) will improve committee robustness and + lower the safe minimum committee size.) ### Gwei values @@ -266,9 +288,21 @@ Additional preset configurations can be found in the [`configs`](../../configs) | `MIN_SLASHING_PENALTY_QUOTIENT` | `uint64(2**7)` (= 128) | | `PROPORTIONAL_SLASHING_MULTIPLIER` | `uint64(1)` | -- The `INACTIVITY_PENALTY_QUOTIENT` equals `INVERSE_SQRT_E_DROP_TIME**2` where `INVERSE_SQRT_E_DROP_TIME := 2**13` epochs (about 36 days) is the time it takes the inactivity penalty to reduce the balance of non-participating validators to about `1/sqrt(e) ~= 60.6%`. Indeed, the balance retained by offline validators after `n` epochs is about `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(n**2/2)`; so after `INVERSE_SQRT_E_DROP_TIME` epochs, it is roughly `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(INACTIVITY_PENALTY_QUOTIENT/2) ~= 1/sqrt(e)`. Note this value will be upgraded to `2**24` after Phase 0 mainnet stabilizes to provide a faster recovery in the event of an inactivity leak. - -- The `PROPORTIONAL_SLASHING_MULTIPLIER` is set to `1` at initial mainnet launch, resulting in one-third of the minimum accountable safety margin in the event of a finality attack. After Phase 0 mainnet stabilizes, this value will be upgraded to `3` to provide the maximal minimum accountable safety margin. +- The `INACTIVITY_PENALTY_QUOTIENT` equals `INVERSE_SQRT_E_DROP_TIME**2` where + `INVERSE_SQRT_E_DROP_TIME := 2**13` epochs (about 36 days) is the time it + takes the inactivity penalty to reduce the balance of non-participating + validators to about `1/sqrt(e) ~= 60.6%`. Indeed, the balance retained by + offline validators after `n` epochs is about + `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(n**2/2)`; so after + `INVERSE_SQRT_E_DROP_TIME` epochs, it is roughly + `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(INACTIVITY_PENALTY_QUOTIENT/2) ~= 1/sqrt(e)`. + Note this value will be upgraded to `2**24` after Phase 0 mainnet stabilizes + to provide a faster recovery in the event of an inactivity leak. + +- The `PROPORTIONAL_SLASHING_MULTIPLIER` is set to `1` at initial mainnet + launch, resulting in one-third of the minimum accountable safety margin in the + event of a finality attack. After Phase 0 mainnet stabilizes, this value will + be upgraded to `3` to provide the maximal minimum accountable safety margin. ### Max operations per block @@ -282,9 +316,10 @@ Additional preset configurations can be found in the [`configs`](../../configs) ## Configuration -*Note*: The default mainnet configuration values are included here for illustrative purposes. -Defaults for this more dynamic type of configuration are available with the presets in the [`configs`](../../configs) directory. -Testnets and other types of chain instances may use a different configuration. +*Note*: The default mainnet configuration values are included here for +illustrative purposes. Defaults for this more dynamic type of configuration are +available with the presets in the [`configs`](../../configs) directory. Testnets +and other types of chain instances may use a different configuration. ### Genesis settings @@ -315,9 +350,11 @@ Testnets and other types of chain instances may use a different configuration. ## Containers -The following types are [SimpleSerialize (SSZ)](../../ssz/simple-serialize.md) containers. +The following types are [SimpleSerialize (SSZ)](../../ssz/simple-serialize.md) +containers. -*Note*: The definitions are ordered topologically to facilitate execution of the spec. +*Note*: The definitions are ordered topologically to facilitate execution of the +spec. *Note*: Fields missing in container instantiations default to their zero value. @@ -586,7 +623,8 @@ class SignedBeaconBlockHeader(Container): ## Helper functions -*Note*: The definitions below are for specification purposes and are not necessarily optimal implementations. +*Note*: The definitions below are for specification purposes and are not +necessarily optimal implementations. ### Math @@ -619,7 +657,9 @@ def xor(bytes_1: Bytes32, bytes_2: Bytes32) -> Bytes32: #### `uint_to_bytes` -`def uint_to_bytes(n: uint) -> bytes` is a function for serializing the `uint` type object to bytes in `ENDIANNESS`-endian. The expected length of the output is the byte-length of the `uint` type. +`def uint_to_bytes(n: uint) -> bytes` is a function for serializing the `uint` +type object to bytes in `ENDIANNESS`-endian. The expected length of the output +is the byte-length of the `uint` type. #### `bytes_to_uint64` @@ -649,11 +689,16 @@ def saturating_sub(a: int, b: int) -> int: #### `hash_tree_root` -`def hash_tree_root(object: SSZSerializable) -> Root` is a function for hashing objects into a single root by utilizing a hash tree structure, as defined in the [SSZ spec](../../ssz/simple-serialize.md#merkleization). +`def hash_tree_root(object: SSZSerializable) -> Root` is a function for hashing +objects into a single root by utilizing a hash tree structure, as defined in the +[SSZ spec](../../ssz/simple-serialize.md#merkleization). #### BLS signatures -The [IETF BLS signature draft standard v4](https://tools.ietf.org/html/draft-irtf-cfrg-bls-signature-04) with ciphersuite `BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_POP_` defines the following functions: +The +[IETF BLS signature draft standard v4](https://tools.ietf.org/html/draft-irtf-cfrg-bls-signature-04) +with ciphersuite `BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_POP_` defines the +following functions: - `def Sign(privkey: int, message: Bytes) -> BLSSignature` - `def Verify(pubkey: BLSPubkey, message: Bytes, signature: BLSSignature) -> bool` @@ -1177,13 +1222,23 @@ def slash_validator(state: BeaconState, ## Genesis -Before the Ethereum beacon chain genesis has been triggered, and for every Ethereum proof-of-work block, let `candidate_state = initialize_beacon_state_from_eth1(eth1_block_hash, eth1_timestamp, deposits)` where: +Before the Ethereum beacon chain genesis has been triggered, and for every +Ethereum proof-of-work block, let +`candidate_state = initialize_beacon_state_from_eth1(eth1_block_hash, eth1_timestamp, deposits)` +where: - `eth1_block_hash` is the hash of the Ethereum proof-of-work block - `eth1_timestamp` is the Unix timestamp corresponding to `eth1_block_hash` -- `deposits` is the sequence of all deposits, ordered chronologically, up to (and including) the block with hash `eth1_block_hash` +- `deposits` is the sequence of all deposits, ordered chronologically, up to + (and including) the block with hash `eth1_block_hash` -Proof-of-work blocks must only be considered once they are at least `SECONDS_PER_ETH1_BLOCK * ETH1_FOLLOW_DISTANCE` seconds old (i.e. `eth1_timestamp + SECONDS_PER_ETH1_BLOCK * ETH1_FOLLOW_DISTANCE <= current_unix_time`). Due to this constraint, if `GENESIS_DELAY < SECONDS_PER_ETH1_BLOCK * ETH1_FOLLOW_DISTANCE`, then the `genesis_time` can happen before the time/state is first known. Values should be configured to avoid this case. +Proof-of-work blocks must only be considered once they are at least +`SECONDS_PER_ETH1_BLOCK * ETH1_FOLLOW_DISTANCE` seconds old (i.e. +`eth1_timestamp + SECONDS_PER_ETH1_BLOCK * ETH1_FOLLOW_DISTANCE <= current_unix_time`). +Due to this constraint, if +`GENESIS_DELAY < SECONDS_PER_ETH1_BLOCK * ETH1_FOLLOW_DISTANCE`, then the +`genesis_time` can happen before the time/state is first known. Values should be +configured to avoid this case. ```python def initialize_beacon_state_from_eth1(eth1_block_hash: Hash32, @@ -1223,11 +1278,13 @@ def initialize_beacon_state_from_eth1(eth1_block_hash: Hash32, return state ``` -*Note*: The ETH1 block with `eth1_timestamp` meeting the minimum genesis active validator count criteria can also occur before `MIN_GENESIS_TIME`. +*Note*: The ETH1 block with `eth1_timestamp` meeting the minimum genesis active +validator count criteria can also occur before `MIN_GENESIS_TIME`. ### Genesis state -Let `genesis_state = candidate_state` whenever `is_valid_genesis_state(candidate_state) is True` for the first time. +Let `genesis_state = candidate_state` whenever +`is_valid_genesis_state(candidate_state) is True` for the first time. ```python def is_valid_genesis_state(state: BeaconState) -> bool: @@ -1244,7 +1301,11 @@ Let `genesis_block = BeaconBlock(state_root=hash_tree_root(genesis_state))`. ## Beacon chain state transition function -The post-state corresponding to a pre-state `state` and a signed block `signed_block` is defined as `state_transition(state, signed_block)`. State transitions that trigger an unhandled exception (e.g. a failed `assert` or an out-of-range list access) are considered invalid. State transitions that cause a `uint64` overflow or underflow are also considered invalid. +The post-state corresponding to a pre-state `state` and a signed block +`signed_block` is defined as `state_transition(state, signed_block)`. State +transitions that trigger an unhandled exception (e.g. a failed `assert` or an +out-of-range list access) are considered invalid. State transitions that cause a +`uint64` overflow or underflow are also considered invalid. ```python def state_transition(state: BeaconState, signed_block: SignedBeaconBlock, validate_result: bool=True) -> None: diff --git a/specs/phase0/deposit-contract.md b/specs/phase0/deposit-contract.md index 0d47c03a17..c6fe54ed98 100644 --- a/specs/phase0/deposit-contract.md +++ b/specs/phase0/deposit-contract.md @@ -16,11 +16,13 @@ ## Introduction -This document represents the specification for the beacon chain deposit contract, part of Phase 0. +This document represents the specification for the beacon chain deposit +contract, part of Phase 0. ## Constants -The following values are (non-configurable) constants used throughout the specification. +The following values are (non-configurable) constants used throughout the +specification. | Name | Value | | ----------------------------- | ------------- | @@ -28,9 +30,11 @@ The following values are (non-configurable) constants used throughout the specif ## Configuration -*Note*: The default mainnet configuration values are included here for spec-design purposes. -The different configurations for mainnet, testnets, and YAML-based testing can be found in the [`configs/constant_presets`](../../configs) directory. -These configurations are updated for releases and may be out of sync during `dev` changes. +*Note*: The default mainnet configuration values are included here for +spec-design purposes. The different configurations for mainnet, testnets, and +YAML-based testing can be found in the +[`configs/constant_presets`](../../configs) directory. These configurations are +updated for releases and may be out of sync during `dev` changes. | Name | Value | | -------------------------- | -------------------------------------------- | @@ -40,34 +44,62 @@ These configurations are updated for releases and may be out of sync during `dev ## Staking deposit contract -The initial deployment phases of Ethereum proof-of-stake are implemented without consensus changes to the existing Ethereum proof-of-work chain. A deposit contract at address `DEPOSIT_CONTRACT_ADDRESS` is added to the Ethereum proof-of-work chain defined by the [chain-id](https://eips.ethereum.org/EIPS/eip-155) -- `DEPOSIT_CHAIN_ID` -- and the network-id -- `DEPOSIT_NETWORK_ID` -- for deposits of ETH to the beacon chain. Validator balances will be withdrawable to the execution-layer in a followup fork after Bellatrix upgrade. +The initial deployment phases of Ethereum proof-of-stake are implemented without +consensus changes to the existing Ethereum proof-of-work chain. A deposit +contract at address `DEPOSIT_CONTRACT_ADDRESS` is added to the Ethereum +proof-of-work chain defined by the +[chain-id](https://eips.ethereum.org/EIPS/eip-155) -- `DEPOSIT_CHAIN_ID` -- and +the network-id -- `DEPOSIT_NETWORK_ID` -- for deposits of ETH to the beacon +chain. Validator balances will be withdrawable to the execution-layer in a +followup fork after Bellatrix upgrade. -_Note_: See [here](https://chainid.network/) for a comprehensive list of public Ethereum chain chain-id's and network-id's. +_Note_: See [here](https://chainid.network/) for a comprehensive list of public +Ethereum chain chain-id's and network-id's. ### `deposit` function -The deposit contract has a public `deposit` function to make deposits. It takes as arguments `bytes calldata pubkey, bytes calldata withdrawal_credentials, bytes calldata signature, bytes32 deposit_data_root`. The first three arguments populate a [`DepositData`](./beacon-chain.md#depositdata) object, and `deposit_data_root` is the expected `DepositData` root as a protection against malformed calldata. +The deposit contract has a public `deposit` function to make deposits. It takes +as arguments +`bytes calldata pubkey, bytes calldata withdrawal_credentials, bytes calldata signature, bytes32 deposit_data_root`. +The first three arguments populate a +[`DepositData`](./beacon-chain.md#depositdata) object, and `deposit_data_root` +is the expected `DepositData` root as a protection against malformed calldata. #### Deposit amount -The amount of ETH (rounded down to the closest Gwei) sent to the deposit contract is the deposit amount, which must be of size at least `MIN_DEPOSIT_AMOUNT` Gwei. Note that ETH consumed by the deposit contract is no longer usable on the execution-layer until sometime after Bellatrix upgrade. +The amount of ETH (rounded down to the closest Gwei) sent to the deposit +contract is the deposit amount, which must be of size at least +`MIN_DEPOSIT_AMOUNT` Gwei. Note that ETH consumed by the deposit contract is no +longer usable on the execution-layer until sometime after Bellatrix upgrade. #### Withdrawal credentials -One of the `DepositData` fields is `withdrawal_credentials` which constrains validator withdrawals. -The first byte of this 32-byte field is a withdrawal prefix which defines the semantics of the remaining 31 bytes. -The withdrawal prefixes currently supported are `BLS_WITHDRAWAL_PREFIX` and `ETH1_ADDRESS_WITHDRAWAL_PREFIX`. -Read more in the [validator guide](./validator.md#withdrawal-credentials). +One of the `DepositData` fields is `withdrawal_credentials` which constrains +validator withdrawals. The first byte of this 32-byte field is a withdrawal +prefix which defines the semantics of the remaining 31 bytes. The withdrawal +prefixes currently supported are `BLS_WITHDRAWAL_PREFIX` and +`ETH1_ADDRESS_WITHDRAWAL_PREFIX`. Read more in the +[validator guide](./validator.md#withdrawal-credentials). -*Note*: The deposit contract does not validate the `withdrawal_credentials` field. -Support for new withdrawal prefixes can be added without modifying the deposit contract. +*Note*: The deposit contract does not validate the `withdrawal_credentials` +field. Support for new withdrawal prefixes can be added without modifying the +deposit contract. #### `DepositEvent` log -Every deposit emits a `DepositEvent` log for consumption by the beacon chain. The deposit contract does little validation, pushing most of the validator onboarding logic to the beacon chain. In particular, the proof of possession (a BLS12-381 signature) is not verified by the deposit contract. +Every deposit emits a `DepositEvent` log for consumption by the beacon chain. +The deposit contract does little validation, pushing most of the validator +onboarding logic to the beacon chain. In particular, the proof of possession (a +BLS12-381 signature) is not verified by the deposit contract. ## Solidity code -The deposit contract source code, written in Solidity, is available [here](../../solidity_deposit_contract/deposit_contract.sol). +The deposit contract source code, written in Solidity, is available +[here](../../solidity_deposit_contract/deposit_contract.sol). -*Note*: To save on gas, the deposit contract uses a progressive Merkle root calculation algorithm that requires only O(log(n)) storage. See [here](https://github.com/ethereum/research/blob/master/beacon_chain_impl/progressive_merkle_tree.py) for a Python implementation, and [here](https://github.com/runtimeverification/verified-smart-contracts/blob/master/deposit/formal-incremental-merkle-tree-algorithm.pdf) for a formal correctness proof. +*Note*: To save on gas, the deposit contract uses a progressive Merkle root +calculation algorithm that requires only O(log(n)) storage. See +[here](https://github.com/ethereum/research/blob/master/beacon_chain_impl/progressive_merkle_tree.py) +for a Python implementation, and +[here](https://github.com/runtimeverification/verified-smart-contracts/blob/master/deposit/formal-incremental-merkle-tree-algorithm.pdf) +for a formal correctness proof. diff --git a/specs/phase0/fork-choice.md b/specs/phase0/fork-choice.md index 30c29224ec..47360b03ca 100644 --- a/specs/phase0/fork-choice.md +++ b/specs/phase0/fork-choice.md @@ -53,26 +53,47 @@ ## Introduction -This document is the beacon chain fork choice spec, part of Phase 0. It assumes the [beacon chain state transition function spec](./beacon-chain.md). +This document is the beacon chain fork choice spec, part of Phase 0. It assumes +the [beacon chain state transition function spec](./beacon-chain.md). ## Fork choice -The head block root associated with a `store` is defined as `get_head(store)`. At genesis, let `store = get_forkchoice_store(genesis_state, genesis_block)` and update `store` by running: +The head block root associated with a `store` is defined as `get_head(store)`. +At genesis, let `store = get_forkchoice_store(genesis_state, genesis_block)` and +update `store` by running: -- `on_tick(store, time)` whenever `time > store.time` where `time` is the current Unix time -- `on_block(store, block)` whenever a block `block: SignedBeaconBlock` is received -- `on_attestation(store, attestation)` whenever an attestation `attestation` is received -- `on_attester_slashing(store, attester_slashing)` whenever an attester slashing `attester_slashing` is received +- `on_tick(store, time)` whenever `time > store.time` where `time` is the + current Unix time +- `on_block(store, block)` whenever a block `block: SignedBeaconBlock` is + received +- `on_attestation(store, attestation)` whenever an attestation `attestation` is + received +- `on_attester_slashing(store, attester_slashing)` whenever an attester slashing + `attester_slashing` is received -Any of the above handlers that trigger an unhandled exception (e.g. a failed assert or an out-of-range list access) are considered invalid. Invalid calls to handlers must not modify `store`. +Any of the above handlers that trigger an unhandled exception (e.g. a failed +assert or an out-of-range list access) are considered invalid. Invalid calls to +handlers must not modify `store`. *Notes*: -1. **Leap seconds**: Slots will last `SECONDS_PER_SLOT + 1` or `SECONDS_PER_SLOT - 1` seconds around leap seconds. This is automatically handled by [UNIX time](https://en.wikipedia.org/wiki/Unix_time). -2. **Honest clocks**: Honest nodes are assumed to have clocks synchronized within `SECONDS_PER_SLOT` seconds of each other. -3. **Eth1 data**: The large `ETH1_FOLLOW_DISTANCE` specified in the [honest validator document](./validator.md) should ensure that `state.latest_eth1_data` of the canonical beacon chain remains consistent with the canonical Ethereum proof-of-work chain. If not, emergency manual intervention will be required. -4. **Manual forks**: Manual forks may arbitrarily change the fork choice rule but are expected to be enacted at epoch transitions, with the fork details reflected in `state.fork`. -5. **Implementation**: The implementation found in this specification is constructed for ease of understanding rather than for optimization in computation, space, or any other resource. A number of optimized alternatives can be found [here](https://github.com/protolambda/lmd-ghost). +1. **Leap seconds**: Slots will last `SECONDS_PER_SLOT + 1` or + `SECONDS_PER_SLOT - 1` seconds around leap seconds. This is automatically + handled by [UNIX time](https://en.wikipedia.org/wiki/Unix_time). +2. **Honest clocks**: Honest nodes are assumed to have clocks synchronized + within `SECONDS_PER_SLOT` seconds of each other. +3. **Eth1 data**: The large `ETH1_FOLLOW_DISTANCE` specified in the + [honest validator document](./validator.md) should ensure that + `state.latest_eth1_data` of the canonical beacon chain remains consistent + with the canonical Ethereum proof-of-work chain. If not, emergency manual + intervention will be required. +4. **Manual forks**: Manual forks may arbitrarily change the fork choice rule + but are expected to be enacted at epoch transitions, with the fork details + reflected in `state.fork`. +5. **Implementation**: The implementation found in this specification is + constructed for ease of understanding rather than for optimization in + computation, space, or any other resource. A number of optimized alternatives + can be found [here](https://github.com/protolambda/lmd-ghost). ### Constant @@ -89,8 +110,8 @@ Any of the above handlers that trigger an unhandled exception (e.g. a failed ass | `REORG_PARENT_WEIGHT_THRESHOLD` | `uint64(160)` | | `REORG_MAX_EPOCHS_SINCE_FINALIZATION` | `Epoch(2)` | -- The proposer score boost and re-org weight threshold are percentage - values that are measured with respect to the weight of a single committee. See +- The proposer score boost and re-org weight threshold are percentage values + that are measured with respect to the weight of a single committee. See `calculate_committee_fraction`. ### Helpers @@ -106,12 +127,22 @@ class LatestMessage(object): #### `Store` -The `Store` is responsible for tracking information required for the fork choice algorithm. The important fields being tracked are described below: +The `Store` is responsible for tracking information required for the fork choice +algorithm. The important fields being tracked are described below: -- `justified_checkpoint`: the justified checkpoint used as the starting point for the LMD GHOST fork choice algorithm. -- `finalized_checkpoint`: the highest known finalized checkpoint. The fork choice only considers blocks that are not conflicting with this checkpoint. -- `unrealized_justified_checkpoint` & `unrealized_finalized_checkpoint`: these track the highest justified & finalized checkpoints resp., without regard to whether on-chain ***realization*** has occurred, i.e. FFG processing of new attestations within the state transition function. This is an important distinction from `justified_checkpoint` & `finalized_checkpoint`, because they will only track the checkpoints that are realized on-chain. Note that on-chain processing of FFG information only happens at epoch boundaries. -- `unrealized_justifications`: stores a map of block root to the unrealized justified checkpoint observed in that block. +- `justified_checkpoint`: the justified checkpoint used as the starting point + for the LMD GHOST fork choice algorithm. +- `finalized_checkpoint`: the highest known finalized checkpoint. The fork + choice only considers blocks that are not conflicting with this checkpoint. +- `unrealized_justified_checkpoint` & `unrealized_finalized_checkpoint`: these + track the highest justified & finalized checkpoints resp., without regard to + whether on-chain ***realization*** has occurred, i.e. FFG processing of new + attestations within the state transition function. This is an important + distinction from `justified_checkpoint` & `finalized_checkpoint`, because they + will only track the checkpoints that are realized on-chain. Note that on-chain + processing of FFG information only happens at epoch boundaries. +- `unrealized_justifications`: stores a map of block root to the unrealized + justified checkpoint observed in that block. ```python @dataclass @@ -134,10 +165,14 @@ class Store(object): #### `get_forkchoice_store` -The provided anchor-state will be regarded as a trusted state, to not roll back beyond. -This should be the genesis state for a full client. +The provided anchor-state will be regarded as a trusted state, to not roll back +beyond. This should be the genesis state for a full client. -*Note* With regards to fork choice, block headers are interchangeable with blocks. The spec is likely to move to headers for reduced overhead in test vectors and better encapsulation. Full implementations store blocks as part of their database and will often use full blocks when dealing with production fork choice. +*Note* With regards to fork choice, block headers are interchangeable with +blocks. The spec is likely to move to headers for reduced overhead in test +vectors and better encapsulation. Full implementations store blocks as part of +their database and will often use full blocks when dealing with production fork +choice. ```python def get_forkchoice_store(anchor_state: BeaconState, anchor_block: BeaconBlock) -> Store: @@ -277,7 +312,9 @@ def get_voting_source(store: Store, block_root: Root) -> Checkpoint: #### `filter_block_tree` -*Note*: External calls to `filter_block_tree` (i.e., any calls that are not made by the recursive logic in this function) MUST set `block_root` to `store.justified_checkpoint`. +*Note*: External calls to `filter_block_tree` (i.e., any calls that are not made +by the recursive logic in this function) MUST set `block_root` to +`store.justified_checkpoint`. ```python def filter_block_tree(store: Store, block_root: Root, blocks: Dict[Root, BeaconBlock]) -> bool: @@ -500,9 +537,9 @@ def get_proposer_head(store: Store, head_root: Root, slot: Slot) -> Root: return head_root ``` -*Note*: The ordering of conditions is a suggestion only. Implementations are free to -optimize by re-ordering the conditions from least to most expensive and by returning early if -any of the early conditions are `False`. +*Note*: The ordering of conditions is a suggestion only. Implementations are +free to optimize by re-ordering the conditions from least to most expensive and +by returning early if any of the early conditions are `False`. #### Pull-up tip helpers @@ -705,7 +742,9 @@ def on_attestation(store: Store, attestation: Attestation, is_from_block: bool=F #### `on_attester_slashing` -*Note*: `on_attester_slashing` should be called while syncing and a client MUST maintain the equivocation set of `AttesterSlashing`s from at least the latest finalized checkpoint. +*Note*: `on_attester_slashing` should be called while syncing and a client MUST +maintain the equivocation set of `AttesterSlashing`s from at least the latest +finalized checkpoint. ```python def on_attester_slashing(store: Store, attester_slashing: AttesterSlashing) -> None: diff --git a/specs/phase0/p2p-interface.md b/specs/phase0/p2p-interface.md index c3daafd260..3e03fd2d66 100644 --- a/specs/phase0/p2p-interface.md +++ b/specs/phase0/p2p-interface.md @@ -116,61 +116,80 @@ This document contains the networking specification for Phase 0. It consists of four main sections: 1. A specification of the network fundamentals. -2. A specification of the three network interaction *domains* of the proof-of-stake consensus layer: (a) the gossip domain, (b) the discovery domain, and (c) the Req/Resp domain. -3. The rationale and further explanation for the design choices made in the previous two sections. -4. An analysis of the maturity/state of the libp2p features required by this spec across the languages in which clients are being developed. +2. A specification of the three network interaction *domains* of the + proof-of-stake consensus layer: (a) the gossip domain, (b) the discovery + domain, and (c) the Req/Resp domain. +3. The rationale and further explanation for the design choices made in the + previous two sections. +4. An analysis of the maturity/state of the libp2p features required by this + spec across the languages in which clients are being developed. ## Network fundamentals -This section outlines the specification for the networking stack in Ethereum consensus-layer clients. +This section outlines the specification for the networking stack in Ethereum +consensus-layer clients. ### Transport -Even though libp2p is a multi-transport stack (designed to listen on multiple simultaneous transports and endpoints transparently), -we hereby define a profile for basic interoperability. +Even though libp2p is a multi-transport stack (designed to listen on multiple +simultaneous transports and endpoints transparently), we hereby define a profile +for basic interoperability. -All implementations MUST support the TCP libp2p transport, MAY support the QUIC (UDP) libp2p transport, and MUST be enabled for both dialing and listening (i.e. outbound and inbound connections). -The libp2p TCP and QUIC (UDP) transports support listening on IPv4 and IPv6 addresses (and on multiple simultaneously). +All implementations MUST support the TCP libp2p transport, MAY support the QUIC +(UDP) libp2p transport, and MUST be enabled for both dialing and listening (i.e. +outbound and inbound connections). The libp2p TCP and QUIC (UDP) transports +support listening on IPv4 and IPv6 addresses (and on multiple simultaneously). -Clients must support listening on at least one of IPv4 or IPv6. -Clients that do _not_ have support for listening on IPv4 SHOULD be cognizant of the potential disadvantages in terms of -Internet-wide routability/support. Clients MAY choose to listen only on IPv6, but MUST be capable of dialing both IPv4 and IPv6 addresses. +Clients must support listening on at least one of IPv4 or IPv6. Clients that do +_not_ have support for listening on IPv4 SHOULD be cognizant of the potential +disadvantages in terms of Internet-wide routability/support. Clients MAY choose +to listen only on IPv6, but MUST be capable of dialing both IPv4 and IPv6 +addresses. -All listening endpoints must be publicly dialable, and thus not rely on libp2p circuit relay, AutoNAT, or AutoRelay facilities. -(Usage of circuit relay, AutoNAT, or AutoRelay will be specifically re-examined soon.) +All listening endpoints must be publicly dialable, and thus not rely on libp2p +circuit relay, AutoNAT, or AutoRelay facilities. (Usage of circuit relay, +AutoNAT, or AutoRelay will be specifically re-examined soon.) -Nodes operating behind a NAT, or otherwise undialable by default (e.g. container runtime, firewall, etc.), -MUST have their infrastructure configured to enable inbound traffic on the announced public listening endpoint. +Nodes operating behind a NAT, or otherwise undialable by default (e.g. container +runtime, firewall, etc.), MUST have their infrastructure configured to enable +inbound traffic on the announced public listening endpoint. ### Encryption and identification The [Libp2p-noise](https://github.com/libp2p/specs/tree/master/noise) secure channel handshake with `secp256k1` identities will be used for encryption. -As specified in the libp2p specification, clients MUST support the `XX` handshake pattern. +As specified in the libp2p specification, clients MUST support the `XX` +handshake pattern. ### Protocol Negotiation -Clients MUST use exact equality when negotiating protocol versions to use and MAY use the version to give priority to higher version numbers. +Clients MUST use exact equality when negotiating protocol versions to use and +MAY use the version to give priority to higher version numbers. -Clients MUST support [multistream-select 1.0](https://github.com/multiformats/multistream-select/) -and MAY support [multiselect 2.0](https://github.com/libp2p/specs/pull/95) when the spec solidifies. -Once all clients have implementations for multiselect 2.0, multistream-select 1.0 MAY be phased out. +Clients MUST support +[multistream-select 1.0](https://github.com/multiformats/multistream-select/) +and MAY support [multiselect 2.0](https://github.com/libp2p/specs/pull/95) when +the spec solidifies. Once all clients have implementations for multiselect 2.0, +multistream-select 1.0 MAY be phased out. ### Multiplexing -During connection bootstrapping, libp2p dynamically negotiates a mutually supported multiplexing method to conduct parallel conversations. -This applies to transports that are natively incapable of multiplexing (e.g. TCP, WebSockets, WebRTC), -and is omitted for capable transports (e.g. QUIC). +During connection bootstrapping, libp2p dynamically negotiates a mutually +supported multiplexing method to conduct parallel conversations. This applies to +transports that are natively incapable of multiplexing (e.g. TCP, WebSockets, +WebRTC), and is omitted for capable transports (e.g. QUIC). Two multiplexers are commonplace in libp2p implementations: -[mplex](https://github.com/libp2p/specs/tree/master/mplex) and [yamux](https://github.com/libp2p/specs/blob/master/yamux/README.md). -Their protocol IDs are, respectively: `/mplex/6.7.0` and `/yamux/1.0.0`. +[mplex](https://github.com/libp2p/specs/tree/master/mplex) and +[yamux](https://github.com/libp2p/specs/blob/master/yamux/README.md). Their +protocol IDs are, respectively: `/mplex/6.7.0` and `/yamux/1.0.0`. Clients MUST support [mplex](https://github.com/libp2p/specs/tree/master/mplex) -and MAY support [yamux](https://github.com/libp2p/specs/blob/master/yamux/README.md). -If both are supported by the client, yamux MUST take precedence during negotiation. -See the [Rationale](#design-decision-rationale) section below for tradeoffs. +and MAY support +[yamux](https://github.com/libp2p/specs/blob/master/yamux/README.md). If both +are supported by the client, yamux MUST take precedence during negotiation. See +the [Rationale](#design-decision-rationale) section below for tradeoffs. ## Consensus-layer network interaction domains @@ -222,17 +241,20 @@ Clients MUST locally store the following `MetaData`: Where -- `seq_number` is a `uint64` starting at `0` used to version the node's metadata. - If any other field in the local `MetaData` changes, the node MUST increment `seq_number` by 1. -- `attnets` is a `Bitvector` representing the node's persistent attestation subnet subscriptions. +- `seq_number` is a `uint64` starting at `0` used to version the node's + metadata. If any other field in the local `MetaData` changes, the node MUST + increment `seq_number` by 1. +- `attnets` is a `Bitvector` representing the node's persistent attestation + subnet subscriptions. -*Note*: `MetaData.seq_number` is used for versioning of the node's metadata, -is entirely independent of the ENR sequence number, -and will in most cases be out of sync with the ENR sequence number. +*Note*: `MetaData.seq_number` is used for versioning of the node's metadata, is +entirely independent of the ENR sequence number, and will in most cases be out +of sync with the ENR sequence number. ### Maximum message sizes -Maximum message sizes are derived from the maximum payload size that the network can carry according to the following functions: +Maximum message sizes are derived from the maximum payload size that the network +can carry according to the following functions: #### `max_compressed_len` @@ -253,69 +275,88 @@ def max_message_size() -> uint64: ### The gossip domain: gossipsub -Clients MUST support the [gossipsub v1](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.0.md) libp2p Protocol -including the [gossipsub v1.1](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md) extension. +Clients MUST support the +[gossipsub v1](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.0.md) +libp2p Protocol including the +[gossipsub v1.1](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md) +extension. **Protocol ID:** `/meshsub/1.1.0` **Gossipsub Parameters** -The following gossipsub [parameters](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.0.md#parameters) will be used: +The following gossipsub +[parameters](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.0.md#parameters) +will be used: - `D` (topic stable mesh target count): 8 - `D_low` (topic stable mesh low watermark): 6 - `D_high` (topic stable mesh high watermark): 12 - `D_lazy` (gossip target): 6 - `heartbeat_interval` (frequency of heartbeat, seconds): 0.7 -- `fanout_ttl` (ttl for fanout maps for topics we are not subscribed to but have published to, seconds): 60 -- `mcache_len` (number of windows to retain full messages in cache for `IWANT` responses): 6 +- `fanout_ttl` (ttl for fanout maps for topics we are not subscribed to but have + published to, seconds): 60 +- `mcache_len` (number of windows to retain full messages in cache for `IWANT` + responses): 6 - `mcache_gossip` (number of windows to gossip about): 3 -- `seen_ttl` (expiry time for cache of seen message ids, seconds): SECONDS_PER_SLOT * SLOTS_PER_EPOCH * 2 +- `seen_ttl` (expiry time for cache of seen message ids, seconds): + SECONDS_PER_SLOT * SLOTS_PER_EPOCH * 2 *Note*: Gossipsub v1.1 introduces a number of [additional parameters](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md#overview-of-new-parameters) -for peer scoring and other attack mitigations. -These are currently under investigation and will be spec'd and released to mainnet when they are ready. +for peer scoring and other attack mitigations. These are currently under +investigation and will be spec'd and released to mainnet when they are ready. #### Topics and messages -Topics are plain UTF-8 strings and are encoded on the wire as determined by protobuf (gossipsub messages are enveloped in protobuf messages). -Topic strings have form: `/eth2/ForkDigestValue/Name/Encoding`. -This defines both the type of data being sent on the topic and how the data field of the message is encoded. - -- `ForkDigestValue` - the lowercase hex-encoded (no "0x" prefix) bytes of `compute_fork_digest(current_fork_version, genesis_validators_root)` where - - `current_fork_version` is the fork version of the epoch of the message to be sent on the topic - - `genesis_validators_root` is the static `Root` found in `state.genesis_validators_root` +Topics are plain UTF-8 strings and are encoded on the wire as determined by +protobuf (gossipsub messages are enveloped in protobuf messages). Topic strings +have form: `/eth2/ForkDigestValue/Name/Encoding`. This defines both the type of +data being sent on the topic and how the data field of the message is encoded. + +- `ForkDigestValue` - the lowercase hex-encoded (no "0x" prefix) bytes of + `compute_fork_digest(current_fork_version, genesis_validators_root)` where + - `current_fork_version` is the fork version of the epoch of the message to be + sent on the topic + - `genesis_validators_root` is the static `Root` found in + `state.genesis_validators_root` - `Name` - see table below -- `Encoding` - the encoding strategy describes a specific representation of bytes that will be transmitted over the wire. - See the [Encodings](#Encodings) section for further details. +- `Encoding` - the encoding strategy describes a specific representation of + bytes that will be transmitted over the wire. See the [Encodings](#Encodings) + section for further details. Clients MUST reject messages with an unknown topic. -*Note*: `ForkDigestValue` is composed of values that are not known until the genesis block/state are available. -Due to this, clients SHOULD NOT subscribe to gossipsub topics until these genesis values are known. +*Note*: `ForkDigestValue` is composed of values that are not known until the +genesis block/state are available. Due to this, clients SHOULD NOT subscribe to +gossipsub topics until these genesis values are known. -The optional `from` (1), `seqno` (3), `signature` (5) and `key` (6) protobuf fields are omitted from the message, -since messages are identified by content, anonymous, and signed where necessary in the application layer. -Starting from Gossipsub v1.1, clients MUST enforce this by applying the `StrictNoSign` +The optional `from` (1), `seqno` (3), `signature` (5) and `key` (6) protobuf +fields are omitted from the message, since messages are identified by content, +anonymous, and signed where necessary in the application layer. Starting from +Gossipsub v1.1, clients MUST enforce this by applying the `StrictNoSign` [signature policy](https://github.com/libp2p/specs/blob/master/pubsub/README.md#signature-policy-options). -The `message-id` of a gossipsub message MUST be the following 20 byte value computed from the message data: +The `message-id` of a gossipsub message MUST be the following 20 byte value +computed from the message data: -- If `message.data` has a valid snappy decompression, set `message-id` to the first 20 bytes of the `SHA256` hash of - the concatenation of `MESSAGE_DOMAIN_VALID_SNAPPY` with the snappy decompressed message data, - i.e. `SHA256(MESSAGE_DOMAIN_VALID_SNAPPY + snappy_decompress(message.data))[:20]`. -- Otherwise, set `message-id` to the first 20 bytes of the `SHA256` hash of - the concatenation of `MESSAGE_DOMAIN_INVALID_SNAPPY` with the raw message data, +- If `message.data` has a valid snappy decompression, set `message-id` to the + first 20 bytes of the `SHA256` hash of the concatenation of + `MESSAGE_DOMAIN_VALID_SNAPPY` with the snappy decompressed message data, i.e. + `SHA256(MESSAGE_DOMAIN_VALID_SNAPPY + snappy_decompress(message.data))[:20]`. +- Otherwise, set `message-id` to the first 20 bytes of the `SHA256` hash of the + concatenation of `MESSAGE_DOMAIN_INVALID_SNAPPY` with the raw message data, i.e. `SHA256(MESSAGE_DOMAIN_INVALID_SNAPPY + message.data)[:20]`. -Where relevant, clients MUST reject messages with `message-id` sizes other than 20 bytes. +Where relevant, clients MUST reject messages with `message-id` sizes other than +20 bytes. -*Note*: The above logic handles two exceptional cases: -(1) multiple snappy `data` can decompress to the same value, -and (2) some message `data` can fail to snappy decompress altogether. +*Note*: The above logic handles two exceptional cases: (1) multiple snappy +`data` can decompress to the same value, and (2) some message `data` can fail to +snappy decompress altogether. -The payload is carried in the `data` field of a gossipsub message, and varies depending on the topic: +The payload is carried in the `data` field of a gossipsub message, and varies +depending on the topic: | Name | Message Type | | -------------------------------- | ------------------------- | @@ -326,58 +367,75 @@ The payload is carried in the `data` field of a gossipsub message, and varies de | `proposer_slashing` | `ProposerSlashing` | | `attester_slashing` | `AttesterSlashing` | -Clients MUST reject (fail validation) messages containing an incorrect type, or invalid payload. +Clients MUST reject (fail validation) messages containing an incorrect type, or +invalid payload. -When processing incoming gossip, clients MAY descore or disconnect peers who fail to observe these constraints. +When processing incoming gossip, clients MAY descore or disconnect peers who +fail to observe these constraints. -For any optional queueing, clients SHOULD maintain maximum queue sizes to avoid DoS vectors. +For any optional queueing, clients SHOULD maintain maximum queue sizes to avoid +DoS vectors. -Gossipsub v1.1 introduces [Extended Validators](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md#extended-validators) -for the application to aid in the gossipsub peer-scoring scheme. -We utilize `ACCEPT`, `REJECT`, and `IGNORE`. For each gossipsub topic, there are application specific validations. -If all validations pass, return `ACCEPT`. -If one or more validations fail while processing the items in order, return either `REJECT` or `IGNORE` as specified in the prefix of the particular condition. +Gossipsub v1.1 introduces +[Extended Validators](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md#extended-validators) +for the application to aid in the gossipsub peer-scoring scheme. We utilize +`ACCEPT`, `REJECT`, and `IGNORE`. For each gossipsub topic, there are +application specific validations. If all validations pass, return `ACCEPT`. If +one or more validations fail while processing the items in order, return either +`REJECT` or `IGNORE` as specified in the prefix of the particular condition. ##### Global topics -There are two primary global topics used to propagate beacon blocks (`beacon_block`) -and aggregate attestations (`beacon_aggregate_and_proof`) to all nodes on the network. +There are two primary global topics used to propagate beacon blocks +(`beacon_block`) and aggregate attestations (`beacon_aggregate_and_proof`) to +all nodes on the network. -There are three additional global topics that are used to propagate lower frequency validator messages -(`voluntary_exit`, `proposer_slashing`, and `attester_slashing`). +There are three additional global topics that are used to propagate lower +frequency validator messages (`voluntary_exit`, `proposer_slashing`, and +`attester_slashing`). ###### `beacon_block` -The `beacon_block` topic is used solely for propagating new signed beacon blocks to all nodes on the networks. -Signed blocks are sent in their entirety. +The `beacon_block` topic is used solely for propagating new signed beacon blocks +to all nodes on the networks. Signed blocks are sent in their entirety. -The following validations MUST pass before forwarding the `signed_beacon_block` on the network. +The following validations MUST pass before forwarding the `signed_beacon_block` +on the network. -- _[IGNORE]_ The block is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- - i.e. validate that `signed_beacon_block.message.slot <= current_slot` - (a client MAY queue future blocks for processing at the appropriate slot). +- _[IGNORE]_ The block is not from a future slot (with a + `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. validate that + `signed_beacon_block.message.slot <= current_slot` (a client MAY queue future + blocks for processing at the appropriate slot). - _[IGNORE]_ The block is from a slot greater than the latest finalized slot -- - i.e. validate that `signed_beacon_block.message.slot > compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)` - (a client MAY choose to validate and store such blocks for additional purposes -- e.g. slashing detection, archive nodes, etc). -- _[IGNORE]_ The block is the first block with valid signature received for the proposer for the slot, `signed_beacon_block.message.slot`. -- _[REJECT]_ The proposer signature, `signed_beacon_block.signature`, is valid with respect to the `proposer_index` pubkey. + i.e. validate that + `signed_beacon_block.message.slot > compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)` + (a client MAY choose to validate and store such blocks for additional purposes + -- e.g. slashing detection, archive nodes, etc). +- _[IGNORE]_ The block is the first block with valid signature received for the + proposer for the slot, `signed_beacon_block.message.slot`. +- _[REJECT]_ The proposer signature, `signed_beacon_block.signature`, is valid + with respect to the `proposer_index` pubkey. - _[IGNORE]_ The block's parent (defined by `block.parent_root`) has been seen - (via gossip or non-gossip sources) - (a client MAY queue blocks for processing once the parent block is retrieved). -- _[REJECT]_ The block's parent (defined by `block.parent_root`) passes validation. + (via gossip or non-gossip sources) (a client MAY queue blocks for processing + once the parent block is retrieved). +- _[REJECT]_ The block's parent (defined by `block.parent_root`) passes + validation. - _[REJECT]_ The block is from a higher slot than its parent. -- _[REJECT]_ The current `finalized_checkpoint` is an ancestor of `block` -- i.e. +- _[REJECT]_ The current `finalized_checkpoint` is an ancestor of `block` -- + i.e. `get_checkpoint_block(store, block.parent_root, store.finalized_checkpoint.epoch) == store.finalized_checkpoint.root` -- _[REJECT]_ The block is proposed by the expected `proposer_index` for the block's slot - in the context of the current shuffling (defined by `parent_root`/`slot`). - If the `proposer_index` cannot immediately be verified against the expected shuffling, - the block MAY be queued for later processing while proposers for the block's branch are calculated -- - in such a case _do not_ `REJECT`, instead `IGNORE` this message. +- _[REJECT]_ The block is proposed by the expected `proposer_index` for the + block's slot in the context of the current shuffling (defined by + `parent_root`/`slot`). If the `proposer_index` cannot immediately be verified + against the expected shuffling, the block MAY be queued for later processing + while proposers for the block's branch are calculated -- in such a case _do + not_ `REJECT`, instead `IGNORE` this message. ###### `beacon_aggregate_and_proof` -The `beacon_aggregate_and_proof` topic is used to propagate aggregated attestations (as `SignedAggregateAndProof`s) -to subscribing nodes (typically validators) to be included in future blocks. +The `beacon_aggregate_and_proof` topic is used to propagate aggregated +attestations (as `SignedAggregateAndProof`s) to subscribing nodes (typically +validators) to be included in future blocks. We define the following variables for convenience: @@ -386,160 +444,218 @@ We define the following variables for convenience: - `index = aggregate.data.index` - `aggregation_bits = attestation.aggregation_bits` -The following validations MUST pass before forwarding the `signed_aggregate_and_proof` on the network. +The following validations MUST pass before forwarding the +`signed_aggregate_and_proof` on the network. -- _[REJECT]_ The committee index is within the expected range -- i.e. `index < get_committee_count_per_slot(state, aggregate.data.target.epoch)`. -- _[IGNORE]_ `aggregate.data.slot` is within the last `ATTESTATION_PROPAGATION_SLOT_RANGE` slots (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- - i.e. `aggregate.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE >= current_slot >= aggregate.data.slot` +- _[REJECT]_ The committee index is within the expected range -- i.e. + `index < get_committee_count_per_slot(state, aggregate.data.target.epoch)`. +- _[IGNORE]_ `aggregate.data.slot` is within the last + `ATTESTATION_PROPAGATION_SLOT_RANGE` slots (with a + `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. + `aggregate.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE >= current_slot >= aggregate.data.slot` (a client MAY queue future aggregates for processing at the appropriate slot). -- _[REJECT]_ The aggregate attestation's epoch matches its target -- i.e. `aggregate.data.target.epoch == compute_epoch_at_slot(aggregate.data.slot)` +- _[REJECT]_ The aggregate attestation's epoch matches its target -- i.e. + `aggregate.data.target.epoch == compute_epoch_at_slot(aggregate.data.slot)` - _[REJECT]_ The number of aggregation bits matches the committee size -- i.e. `len(aggregation_bits) == len(get_beacon_committee(state, aggregate.data.slot, index))`. -- _[REJECT]_ The aggregate attestation has participants -- - that is, `len(get_attesting_indices(state, aggregate)) >= 1`. -- _[IGNORE]_ A valid aggregate attestation defined by `hash_tree_root(aggregate.data)` whose `aggregation_bits` is a non-strict superset has _not_ already been seen. - (via aggregate gossip, within a verified block, or through the creation of an equivalent aggregate locally). -- _[IGNORE]_ The `aggregate` is the first valid aggregate received for the aggregator - with index `aggregate_and_proof.aggregator_index` for the epoch `aggregate.data.target.epoch`. -- _[REJECT]_ The attestation has participants -- that is, `len(get_attesting_indices(state, aggregate)) >= 1`. -- _[REJECT]_ `aggregate_and_proof.selection_proof` selects the validator as an aggregator for the slot -- - i.e. `is_aggregator(state, aggregate.data.slot, index, aggregate_and_proof.selection_proof)` returns `True`. -- _[REJECT]_ The aggregator's validator index is within the committee -- - i.e. `aggregate_and_proof.aggregator_index in get_beacon_committee(state, aggregate.data.slot, index)`. -- _[REJECT]_ The `aggregate_and_proof.selection_proof` is a valid signature - of the `aggregate.data.slot` by the validator with index `aggregate_and_proof.aggregator_index`. -- _[REJECT]_ The aggregator signature, `signed_aggregate_and_proof.signature`, is valid. +- _[REJECT]_ The aggregate attestation has participants -- that is, + `len(get_attesting_indices(state, aggregate)) >= 1`. +- _[IGNORE]_ A valid aggregate attestation defined by + `hash_tree_root(aggregate.data)` whose `aggregation_bits` is a non-strict + superset has _not_ already been seen. (via aggregate gossip, within a verified + block, or through the creation of an equivalent aggregate locally). +- _[IGNORE]_ The `aggregate` is the first valid aggregate received for the + aggregator with index `aggregate_and_proof.aggregator_index` for the epoch + `aggregate.data.target.epoch`. +- _[REJECT]_ The attestation has participants -- that is, + `len(get_attesting_indices(state, aggregate)) >= 1`. +- _[REJECT]_ `aggregate_and_proof.selection_proof` selects the validator as an + aggregator for the slot -- i.e. + `is_aggregator(state, aggregate.data.slot, index, aggregate_and_proof.selection_proof)` + returns `True`. +- _[REJECT]_ The aggregator's validator index is within the committee -- i.e. + `aggregate_and_proof.aggregator_index in get_beacon_committee(state, aggregate.data.slot, index)`. +- _[REJECT]_ The `aggregate_and_proof.selection_proof` is a valid signature of + the `aggregate.data.slot` by the validator with index + `aggregate_and_proof.aggregator_index`. +- _[REJECT]_ The aggregator signature, `signed_aggregate_and_proof.signature`, + is valid. - _[REJECT]_ The signature of `aggregate` is valid. -- _[IGNORE]_ The block being voted for (`aggregate.data.beacon_block_root`) has been seen - (via gossip or non-gossip sources) - (a client MAY queue aggregates for processing once block is retrieved). -- _[REJECT]_ The block being voted for (`aggregate.data.beacon_block_root`) passes validation. -- _[REJECT]_ The aggregate attestation's target block is an ancestor of the block named in the LMD vote -- i.e. +- _[IGNORE]_ The block being voted for (`aggregate.data.beacon_block_root`) has + been seen (via gossip or non-gossip sources) (a client MAY queue aggregates + for processing once block is retrieved). +- _[REJECT]_ The block being voted for (`aggregate.data.beacon_block_root`) + passes validation. +- _[REJECT]_ The aggregate attestation's target block is an ancestor of the + block named in the LMD vote -- i.e. `get_checkpoint_block(store, aggregate.data.beacon_block_root, aggregate.data.target.epoch) == aggregate.data.target.root` -- _[IGNORE]_ The current `finalized_checkpoint` is an ancestor of the `block` defined by `aggregate.data.beacon_block_root` -- i.e. +- _[IGNORE]_ The current `finalized_checkpoint` is an ancestor of the `block` + defined by `aggregate.data.beacon_block_root` -- i.e. `get_checkpoint_block(store, aggregate.data.beacon_block_root, finalized_checkpoint.epoch) == store.finalized_checkpoint.root` ###### `voluntary_exit` -The `voluntary_exit` topic is used solely for propagating signed voluntary validator exits to proposers on the network. -Signed voluntary exits are sent in their entirety. +The `voluntary_exit` topic is used solely for propagating signed voluntary +validator exits to proposers on the network. Signed voluntary exits are sent in +their entirety. -The following validations MUST pass before forwarding the `signed_voluntary_exit` on to the network. +The following validations MUST pass before forwarding the +`signed_voluntary_exit` on to the network. -- _[IGNORE]_ The voluntary exit is the first valid voluntary exit received - for the validator with index `signed_voluntary_exit.message.validator_index`. -- _[REJECT]_ All of the conditions within `process_voluntary_exit` pass validation. +- _[IGNORE]_ The voluntary exit is the first valid voluntary exit received for + the validator with index `signed_voluntary_exit.message.validator_index`. +- _[REJECT]_ All of the conditions within `process_voluntary_exit` pass + validation. ###### `proposer_slashing` -The `proposer_slashing` topic is used solely for propagating proposer slashings to proposers on the network. -Proposer slashings are sent in their entirety. +The `proposer_slashing` topic is used solely for propagating proposer slashings +to proposers on the network. Proposer slashings are sent in their entirety. -The following validations MUST pass before forwarding the `proposer_slashing` on to the network. +The following validations MUST pass before forwarding the `proposer_slashing` on +to the network. - _[IGNORE]_ The proposer slashing is the first valid proposer slashing received - for the proposer with index `proposer_slashing.signed_header_1.message.proposer_index`. -- _[REJECT]_ All of the conditions within `process_proposer_slashing` pass validation. + for the proposer with index + `proposer_slashing.signed_header_1.message.proposer_index`. +- _[REJECT]_ All of the conditions within `process_proposer_slashing` pass + validation. ###### `attester_slashing` -The `attester_slashing` topic is used solely for propagating attester slashings to proposers on the network. -Attester slashings are sent in their entirety. +The `attester_slashing` topic is used solely for propagating attester slashings +to proposers on the network. Attester slashings are sent in their entirety. -Clients who receive an attester slashing on this topic MUST validate the conditions within `process_attester_slashing` before forwarding it across the network. +Clients who receive an attester slashing on this topic MUST validate the +conditions within `process_attester_slashing` before forwarding it across the +network. -- _[IGNORE]_ At least one index in the intersection of the attesting indices of each attestation - has not yet been seen in any prior `attester_slashing` - (i.e. `attester_slashed_indices = set(attestation_1.attesting_indices).intersection(attestation_2.attesting_indices)`, - verify if `any(attester_slashed_indices.difference(prior_seen_attester_slashed_indices))`). -- _[REJECT]_ All of the conditions within `process_attester_slashing` pass validation. +- _[IGNORE]_ At least one index in the intersection of the attesting indices of + each attestation has not yet been seen in any prior `attester_slashing` (i.e. + `attester_slashed_indices = set(attestation_1.attesting_indices).intersection(attestation_2.attesting_indices)`, + verify if + `any(attester_slashed_indices.difference(prior_seen_attester_slashed_indices))`). +- _[REJECT]_ All of the conditions within `process_attester_slashing` pass + validation. ##### Attestation subnets -Attestation subnets are used to propagate unaggregated attestations to subsections of the network. +Attestation subnets are used to propagate unaggregated attestations to +subsections of the network. ###### `beacon_attestation_{subnet_id}` -The `beacon_attestation_{subnet_id}` topics are used to propagate unaggregated attestations -to the subnet `subnet_id` (typically beacon and persistent committees) to be aggregated before being gossiped to `beacon_aggregate_and_proof`. +The `beacon_attestation_{subnet_id}` topics are used to propagate unaggregated +attestations to the subnet `subnet_id` (typically beacon and persistent +committees) to be aggregated before being gossiped to +`beacon_aggregate_and_proof`. We define the following variables for convenience: - `index = attestation.data.index` - `aggregation_bits = attestation.aggregation_bits` -The following validations MUST pass before forwarding the `attestation` on the subnet. - -- _[REJECT]_ The committee index is within the expected range -- i.e. `index < get_committee_count_per_slot(state, attestation.data.target.epoch)`. -- _[REJECT]_ The attestation is for the correct subnet -- - i.e. `compute_subnet_for_attestation(committees_per_slot, attestation.data.slot, index) == subnet_id`, - where `committees_per_slot = get_committee_count_per_slot(state, attestation.data.target.epoch)`, - which may be pre-computed along with the committee information for the signature check. -- _[IGNORE]_ `attestation.data.slot` is within the last `ATTESTATION_PROPAGATION_SLOT_RANGE` slots - (within a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- - i.e. `attestation.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE >= current_slot >= attestation.data.slot` - (a client MAY queue future attestations for processing at the appropriate slot). -- _[REJECT]_ The attestation's epoch matches its target -- i.e. `attestation.data.target.epoch == compute_epoch_at_slot(attestation.data.slot)` -- _[REJECT]_ The attestation is unaggregated -- - that is, it has exactly one participating validator (`len([bit for bit in aggregation_bits if bit]) == 1`, i.e. exactly 1 bit is set). +The following validations MUST pass before forwarding the `attestation` on the +subnet. + +- _[REJECT]_ The committee index is within the expected range -- i.e. + `index < get_committee_count_per_slot(state, attestation.data.target.epoch)`. +- _[REJECT]_ The attestation is for the correct subnet -- i.e. + `compute_subnet_for_attestation(committees_per_slot, attestation.data.slot, index) == subnet_id`, + where + `committees_per_slot = get_committee_count_per_slot(state, attestation.data.target.epoch)`, + which may be pre-computed along with the committee information for the + signature check. +- _[IGNORE]_ `attestation.data.slot` is within the last + `ATTESTATION_PROPAGATION_SLOT_RANGE` slots (within a + `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. + `attestation.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE >= current_slot >= attestation.data.slot` + (a client MAY queue future attestations for processing at the appropriate + slot). +- _[REJECT]_ The attestation's epoch matches its target -- i.e. + `attestation.data.target.epoch == compute_epoch_at_slot(attestation.data.slot)` +- _[REJECT]_ The attestation is unaggregated -- that is, it has exactly one + participating validator (`len([bit for bit in aggregation_bits if bit]) == 1`, + i.e. exactly 1 bit is set). - _[REJECT]_ The number of aggregation bits matches the committee size -- i.e. `len(aggregation_bits) == len(get_beacon_committee(state, attestation.data.slot, index))`. -- _[IGNORE]_ There has been no other valid attestation seen on an attestation subnet - that has an identical `attestation.data.target.epoch` and participating validator index. +- _[IGNORE]_ There has been no other valid attestation seen on an attestation + subnet that has an identical `attestation.data.target.epoch` and participating + validator index. - _[REJECT]_ The signature of `attestation` is valid. -- _[IGNORE]_ The block being voted for (`attestation.data.beacon_block_root`) has been seen - (via gossip or non-gossip sources) - (a client MAY queue attestations for processing once block is retrieved). -- _[REJECT]_ The block being voted for (`attestation.data.beacon_block_root`) passes validation. -- _[REJECT]_ The attestation's target block is an ancestor of the block named in the LMD vote -- i.e. +- _[IGNORE]_ The block being voted for (`attestation.data.beacon_block_root`) + has been seen (via gossip or non-gossip sources) (a client MAY queue + attestations for processing once block is retrieved). +- _[REJECT]_ The block being voted for (`attestation.data.beacon_block_root`) + passes validation. +- _[REJECT]_ The attestation's target block is an ancestor of the block named in + the LMD vote -- i.e. `get_checkpoint_block(store, attestation.data.beacon_block_root, attestation.data.target.epoch) == attestation.data.target.root` -- _[IGNORE]_ The current `finalized_checkpoint` is an ancestor of the `block` defined by `attestation.data.beacon_block_root` -- i.e. +- _[IGNORE]_ The current `finalized_checkpoint` is an ancestor of the `block` + defined by `attestation.data.beacon_block_root` -- i.e. `get_checkpoint_block(store, attestation.data.beacon_block_root, store.finalized_checkpoint.epoch) == store.finalized_checkpoint.root` ##### Attestations and Aggregation -Attestation broadcasting is grouped into subnets defined by a topic. -The number of subnets is defined via `ATTESTATION_SUBNET_COUNT`. -The correct subnet for an attestation can be calculated with `compute_subnet_for_attestation`. -`beacon_attestation_{subnet_id}` topics, are rotated through throughout the epoch in a similar fashion to rotating through shards in committees (future beacon chain upgrade). -The subnets are rotated through with `committees_per_slot = get_committee_count_per_slot(state, attestation.data.target.epoch)` subnets per slot. +Attestation broadcasting is grouped into subnets defined by a topic. The number +of subnets is defined via `ATTESTATION_SUBNET_COUNT`. The correct subnet for an +attestation can be calculated with `compute_subnet_for_attestation`. +`beacon_attestation_{subnet_id}` topics, are rotated through throughout the +epoch in a similar fashion to rotating through shards in committees (future +beacon chain upgrade). The subnets are rotated through with +`committees_per_slot = get_committee_count_per_slot(state, attestation.data.target.epoch)` +subnets per slot. Unaggregated attestations are sent as `Attestation`s to the subnet topic, -`beacon_attestation_{compute_subnet_for_attestation(committees_per_slot, attestation.data.slot, attestation.data.index)}` as `Attestation`s. +`beacon_attestation_{compute_subnet_for_attestation(committees_per_slot, attestation.data.slot, attestation.data.index)}` +as `Attestation`s. -Aggregated attestations are sent to the `beacon_aggregate_and_proof` topic as `AggregateAndProof`s. +Aggregated attestations are sent to the `beacon_aggregate_and_proof` topic as +`AggregateAndProof`s. #### Encodings -Topics are post-fixed with an encoding. Encodings define how the payload of a gossipsub message is encoded. +Topics are post-fixed with an encoding. Encodings define how the payload of a +gossipsub message is encoded. -- `ssz_snappy` - All objects are SSZ-encoded and then compressed with [Snappy](https://github.com/google/snappy) block compression. - Example: The beacon aggregate attestation topic string is `/eth2/446a7232/beacon_aggregate_and_proof/ssz_snappy`, - the fork digest is `446a7232` and the data field of a gossipsub message is an `AggregateAndProof` +- `ssz_snappy` - All objects are SSZ-encoded and then compressed with + [Snappy](https://github.com/google/snappy) block compression. Example: The + beacon aggregate attestation topic string is + `/eth2/446a7232/beacon_aggregate_and_proof/ssz_snappy`, the fork digest is + `446a7232` and the data field of a gossipsub message is an `AggregateAndProof` that has been SSZ-encoded and then compressed with Snappy. -Snappy has two formats: "block" and "frames" (streaming). -Gossip messages remain relatively small (100s of bytes to 100s of kilobytes) -so [basic snappy block compression](https://github.com/google/snappy/blob/master/format_description.txt) is used to avoid the additional overhead associated with snappy frames. +Snappy has two formats: "block" and "frames" (streaming). Gossip messages remain +relatively small (100s of bytes to 100s of kilobytes) so +[basic snappy block compression](https://github.com/google/snappy/blob/master/format_description.txt) +is used to avoid the additional overhead associated with snappy frames. -Implementations MUST use a single encoding for gossip. -Changing an encoding will require coordination between participating implementations. +Implementations MUST use a single encoding for gossip. Changing an encoding will +require coordination between participating implementations. #### Gossipsub size limits -Size limits are placed both on the [`RPCMsg`](https://github.com/libp2p/specs/blob/b5f7fce29b32d4c7d0efe37b019936a11e5db872/pubsub/README.md#the-rpc) frame as well as the encoded payload in each [`Message`](https://github.com/libp2p/specs/blob/b5f7fce29b32d4c7d0efe37b019936a11e5db872/pubsub/README.md#the-message). +Size limits are placed both on the +[`RPCMsg`](https://github.com/libp2p/specs/blob/b5f7fce29b32d4c7d0efe37b019936a11e5db872/pubsub/README.md#the-rpc) +frame as well as the encoded payload in each +[`Message`](https://github.com/libp2p/specs/blob/b5f7fce29b32d4c7d0efe37b019936a11e5db872/pubsub/README.md#the-message). -Clients MUST reject and MUST NOT emit or propagate messages whose size exceed the following limits: +Clients MUST reject and MUST NOT emit or propagate messages whose size exceed +the following limits: -- The size of the encoded `RPCMsg` (including control messages, framing, topics, etc) must not exceed `max_message_size()`. -- The size of the compressed payload in the `Message.data` field must not exceed `max_compressed_len(MAX_PAYLOAD_SIZE)`. -- The size of the uncompressed payload must not exceed `MAX_PAYLOAD_SIZE` or the [type-specific SSZ bound](#what-are-ssz-type-size-bounds), whichever is lower. +- The size of the encoded `RPCMsg` (including control messages, framing, topics, + etc) must not exceed `max_message_size()`. +- The size of the compressed payload in the `Message.data` field must not exceed + `max_compressed_len(MAX_PAYLOAD_SIZE)`. +- The size of the uncompressed payload must not exceed `MAX_PAYLOAD_SIZE` or the + [type-specific SSZ bound](#what-are-ssz-type-size-bounds), whichever is lower. ### The Req/Resp domain #### Protocol identification -Each message type is segregated into its own libp2p protocol ID, which is a case-sensitive UTF-8 string of the form: +Each message type is segregated into its own libp2p protocol ID, which is a +case-sensitive UTF-8 string of the form: ``` /ProtocolPrefix/MessageName/SchemaVersion/Encoding @@ -547,24 +663,28 @@ Each message type is segregated into its own libp2p protocol ID, which is a case With: -- `ProtocolPrefix` - messages are grouped into families identified by a shared libp2p protocol name prefix. - In this case, we use `/eth2/beacon_chain/req`. -- `MessageName` - each request is identified by a name consisting of English alphabet, digits and underscores (`_`). -- `SchemaVersion` - an ordinal version number (e.g. 1, 2, 3…). - Each schema is versioned to facilitate backward and forward-compatibility when possible. +- `ProtocolPrefix` - messages are grouped into families identified by a shared + libp2p protocol name prefix. In this case, we use `/eth2/beacon_chain/req`. +- `MessageName` - each request is identified by a name consisting of English + alphabet, digits and underscores (`_`). +- `SchemaVersion` - an ordinal version number (e.g. 1, 2, 3…). Each schema is + versioned to facilitate backward and forward-compatibility when possible. - `Encoding` - while the schema defines the data types in more abstract terms, - the encoding strategy describes a specific representation of bytes that will be transmitted over the wire. - See the [Encodings](#Encoding-strategies) section for further details. + the encoding strategy describes a specific representation of bytes that will + be transmitted over the wire. See the [Encodings](#Encoding-strategies) + section for further details. -This protocol segregation allows libp2p `multistream-select 1.0` / `multiselect 2.0` -to handle the request type, version, and encoding negotiation before establishing the underlying streams. +This protocol segregation allows libp2p `multistream-select 1.0` / +`multiselect 2.0` to handle the request type, version, and encoding negotiation +before establishing the underlying streams. #### Req/Resp interaction -We use ONE stream PER request/response interaction. -Streams are closed when the interaction finishes, whether in success or in error. +We use ONE stream PER request/response interaction. Streams are closed when the +interaction finishes, whether in success or in error. -Request/response messages MUST adhere to the encoding specified in the protocol name and follow this structure (relaxed BNF grammar): +Request/response messages MUST adhere to the encoding specified in the protocol +name and follow this structure (relaxed BNF grammar): ``` request ::= | @@ -573,83 +693,110 @@ response_chunk ::= | | result ::= “0” | “1” | “2” | [“128” ... ”255”] ``` -The encoding-dependent header may carry metadata or assertions such as the encoded payload length, for integrity and attack proofing purposes. -Because req/resp streams are single-use and stream closures implicitly delimit the boundaries, it is not strictly necessary to length-prefix payloads; -however, certain encodings like SSZ do, for added security. +The encoding-dependent header may carry metadata or assertions such as the +encoded payload length, for integrity and attack proofing purposes. Because +req/resp streams are single-use and stream closures implicitly delimit the +boundaries, it is not strictly necessary to length-prefix payloads; however, +certain encodings like SSZ do, for added security. -A `response` is formed by zero or more `response_chunk`s. -Responses that consist of a single SSZ-list (such as `BlocksByRange` and `BlocksByRoot`) send each list item as a `response_chunk`. -All other response types (non-Lists) send a single `response_chunk`. +A `response` is formed by zero or more `response_chunk`s. Responses that consist +of a single SSZ-list (such as `BlocksByRange` and `BlocksByRoot`) send each list +item as a `response_chunk`. All other response types (non-Lists) send a single +`response_chunk`. -For both `request`s and `response`s, the `encoding-dependent-header` MUST be valid, -and the `encoded-payload` must be valid within the constraints of the `encoding-dependent-header`. -This includes type-specific bounds on payload size for some encoding strategies. -Regardless of these type specific bounds, a global maximum uncompressed byte size of `MAX_PAYLOAD_SIZE` MUST be applied to all method response chunks. +For both `request`s and `response`s, the `encoding-dependent-header` MUST be +valid, and the `encoded-payload` must be valid within the constraints of the +`encoding-dependent-header`. This includes type-specific bounds on payload size +for some encoding strategies. Regardless of these type specific bounds, a global +maximum uncompressed byte size of `MAX_PAYLOAD_SIZE` MUST be applied to all +method response chunks. -Clients MUST ensure that lengths are within these bounds; if not, they SHOULD reset the stream immediately. -Clients tracking peer reputation MAY decrement the score of the misbehaving peer under this circumstance. +Clients MUST ensure that lengths are within these bounds; if not, they SHOULD +reset the stream immediately. Clients tracking peer reputation MAY decrement the +score of the misbehaving peer under this circumstance. ##### Requesting side -Once a new stream with the protocol ID for the request type has been negotiated, the full request message SHOULD be sent immediately. -The request MUST be encoded according to the encoding strategy. +Once a new stream with the protocol ID for the request type has been negotiated, +the full request message SHOULD be sent immediately. The request MUST be encoded +according to the encoding strategy. -The requester MUST close the write side of the stream once it finishes writing the request message. -At this point, the stream will be half-closed. +The requester MUST close the write side of the stream once it finishes writing +the request message. At this point, the stream will be half-closed. -The requester MUST NOT make more than `MAX_CONCURRENT_REQUESTS` concurrent requests with the same protocol ID. +The requester MUST NOT make more than `MAX_CONCURRENT_REQUESTS` concurrent +requests with the same protocol ID. -If a timeout occurs or the response is no longer relevant, the requester SHOULD reset the stream. +If a timeout occurs or the response is no longer relevant, the requester SHOULD +reset the stream. A requester SHOULD read from the stream until either: -1. An error result is received in one of the chunks (the error payload MAY be read before stopping). +1. An error result is received in one of the chunks (the error payload MAY be + read before stopping). 2. The responder closes the stream. 3. Any part of the `response_chunk` fails validation. 4. The maximum number of requested chunks are read. -For requests consisting of a single valid `response_chunk`, -the requester SHOULD read the chunk fully, as defined by the `encoding-dependent-header`, before closing the stream. +For requests consisting of a single valid `response_chunk`, the requester SHOULD +read the chunk fully, as defined by the `encoding-dependent-header`, before +closing the stream. ##### Responding side Once a new stream with the protocol ID for the request type has been negotiated, -the responder SHOULD process the incoming request and MUST validate it before processing it. -Request processing and validation MUST be done according to the encoding strategy, until EOF (denoting stream half-closure by the requester). +the responder SHOULD process the incoming request and MUST validate it before +processing it. Request processing and validation MUST be done according to the +encoding strategy, until EOF (denoting stream half-closure by the requester). The responder MUST: 1. Use the encoding strategy to read the optional header. -2. If there are any length assertions for length `N`, it should read exactly `N` bytes from the stream, at which point an EOF should arise (no more bytes). +2. If there are any length assertions for length `N`, it should read exactly `N` + bytes from the stream, at which point an EOF should arise (no more bytes). Should this not be the case, it should be treated as a failure. 3. Deserialize the expected type, and process the request. -4. Write the response which may consist of zero or more `response_chunk`s (result, optional header, payload). -5. Close their write side of the stream. At this point, the stream will be fully closed. - -If steps (1), (2), or (3) fail due to invalid, malformed, or inconsistent data, the responder MUST respond in error. -Clients tracking peer reputation MAY record such failures, as well as unexpected events, e.g. early stream resets. - -The responder MAY rate-limit chunks by withholding each chunk until capacity is available. The responder MUST NOT respond with an error or close the stream when rate limiting. - -When rate limiting, the responder MUST send each `response_chunk` in full promptly but may introduce delays between each chunk. - -Chunks start with a **single-byte** response code which determines the contents of the `response_chunk` (`result` particle in the BNF grammar above). -For multiple chunks, only the last chunk is allowed to have a non-zero error code (i.e. The chunk stream is terminated once an error occurs). - -The response code can have one of the following values, encoded as a single unsigned byte: - -- 0: **Success** -- a normal response follows, with contents matching the expected message schema and encoding specified in the request. -- 1: **InvalidRequest** -- the contents of the request are semantically invalid, or the payload is malformed, or could not be understood. - The response payload adheres to the `ErrorMessage` schema (described below). -- 2: **ServerError** -- the responder encountered an error while processing the request. - The response payload adheres to the `ErrorMessage` schema (described below). +4. Write the response which may consist of zero or more `response_chunk`s + (result, optional header, payload). +5. Close their write side of the stream. At this point, the stream will be fully + closed. + +If steps (1), (2), or (3) fail due to invalid, malformed, or inconsistent data, +the responder MUST respond in error. Clients tracking peer reputation MAY record +such failures, as well as unexpected events, e.g. early stream resets. + +The responder MAY rate-limit chunks by withholding each chunk until capacity is +available. The responder MUST NOT respond with an error or close the stream when +rate limiting. + +When rate limiting, the responder MUST send each `response_chunk` in full +promptly but may introduce delays between each chunk. + +Chunks start with a **single-byte** response code which determines the contents +of the `response_chunk` (`result` particle in the BNF grammar above). For +multiple chunks, only the last chunk is allowed to have a non-zero error code +(i.e. The chunk stream is terminated once an error occurs). + +The response code can have one of the following values, encoded as a single +unsigned byte: + +- 0: **Success** -- a normal response follows, with contents matching the + expected message schema and encoding specified in the request. +- 1: **InvalidRequest** -- the contents of the request are semantically invalid, + or the payload is malformed, or could not be understood. The response payload + adheres to the `ErrorMessage` schema (described below). +- 2: **ServerError** -- the responder encountered an error while processing the + request. The response payload adheres to the `ErrorMessage` schema (described + below). - 3: **ResourceUnavailable** -- the responder does not have requested resource. The response payload adheres to the `ErrorMessage` schema (described below). *Note*: This response code is only valid as a response where specified. -Clients MAY use response codes above `128` to indicate alternative, erroneous request-specific responses. +Clients MAY use response codes above `128` to indicate alternative, erroneous +request-specific responses. -The range `[4, 127]` is RESERVED for future usages, and should be treated as error if not recognized expressly. +The range `[4, 127]` is RESERVED for future usages, and should be treated as +error if not recognized expressly. The `ErrorMessage` schema is: @@ -659,69 +806,96 @@ The `ErrorMessage` schema is: ) ``` -*Note*: By convention, the `error_message` is a sequence of bytes that MAY be interpreted as a UTF-8 string (for debugging purposes). -Clients MUST treat as valid any byte sequences. +*Note*: By convention, the `error_message` is a sequence of bytes that MAY be +interpreted as a UTF-8 string (for debugging purposes). Clients MUST treat as +valid any byte sequences. -The responder MAY penalise peers that concurrently open more than `MAX_CONCURRENT_REQUESTS` streams for the same request type, for the protocol IDs defined in this specification. +The responder MAY penalise peers that concurrently open more than +`MAX_CONCURRENT_REQUESTS` streams for the same request type, for the protocol +IDs defined in this specification. #### Encoding strategies -The token of the negotiated protocol ID specifies the type of encoding to be used for the req/resp interaction. -Only one value is possible at this time: +The token of the negotiated protocol ID specifies the type of encoding to be +used for the req/resp interaction. Only one value is possible at this time: -- `ssz_snappy`: The contents are first [SSZ-encoded](../../ssz/simple-serialize.md) - and then compressed with [Snappy](https://github.com/google/snappy) frames compression. - For objects containing a single field, only the field is SSZ-encoded not a container with a single field. - For example, the `BeaconBlocksByRoot` request is an SSZ-encoded list of `Root`'s. - This encoding type MUST be supported by all clients. +- `ssz_snappy`: The contents are first + [SSZ-encoded](../../ssz/simple-serialize.md) and then compressed with + [Snappy](https://github.com/google/snappy) frames compression. For objects + containing a single field, only the field is SSZ-encoded not a container with + a single field. For example, the `BeaconBlocksByRoot` request is an + SSZ-encoded list of `Root`'s. This encoding type MUST be supported by all + clients. ##### SSZ-snappy encoding strategy -The [SimpleSerialize (SSZ) specification](../../ssz/simple-serialize.md) outlines how objects are SSZ-encoded. +The [SimpleSerialize (SSZ) specification](../../ssz/simple-serialize.md) +outlines how objects are SSZ-encoded. -To achieve snappy encoding on top of SSZ, we feed the serialized form of the object to the Snappy compressor on encoding. -The inverse happens on decoding. +To achieve snappy encoding on top of SSZ, we feed the serialized form of the +object to the Snappy compressor on encoding. The inverse happens on decoding. -Snappy has two formats: "block" and "frames" (streaming). -To support large requests and response chunks, snappy-framing is used. +Snappy has two formats: "block" and "frames" (streaming). To support large +requests and response chunks, snappy-framing is used. -Since snappy frame contents [have a maximum size of `65536` bytes](https://github.com/google/snappy/blob/master/framing_format.txt#L104) -and frame headers are just `identifier (1) + checksum (4)` bytes, the expected buffering of a single frame is acceptable. +Since snappy frame contents +[have a maximum size of `65536` bytes](https://github.com/google/snappy/blob/master/framing_format.txt#L104) +and frame headers are just `identifier (1) + checksum (4)` bytes, the expected +buffering of a single frame is acceptable. -**Encoding-dependent header:** Req/Resp protocols using the `ssz_snappy` encoding strategy MUST encode the length of the raw SSZ bytes, -encoded as an unsigned [protobuf varint](https://developers.google.com/protocol-buffers/docs/encoding#varints). +**Encoding-dependent header:** Req/Resp protocols using the `ssz_snappy` +encoding strategy MUST encode the length of the raw SSZ bytes, encoded as an +unsigned +[protobuf varint](https://developers.google.com/protocol-buffers/docs/encoding#varints). -*Writing*: By first computing and writing the SSZ byte length, the SSZ encoder can then directly write the chunk contents to the stream. -When Snappy is applied, it can be passed through a buffered Snappy writer to compress frame by frame. +*Writing*: By first computing and writing the SSZ byte length, the SSZ encoder +can then directly write the chunk contents to the stream. When Snappy is +applied, it can be passed through a buffered Snappy writer to compress frame by +frame. -*Reading*: After reading the expected SSZ byte length, the SSZ decoder can directly read the contents from the stream. -When snappy is applied, it can be passed through a buffered Snappy reader to decompress frame by frame. +*Reading*: After reading the expected SSZ byte length, the SSZ decoder can +directly read the contents from the stream. When snappy is applied, it can be +passed through a buffered Snappy reader to decompress frame by frame. Before reading the payload, the header MUST be validated: -- The length-prefix MUST be encoded as an unsigned protobuf varint. It SHOULD be minimally encoded (i.e., without any redundant bytes) and MUST not exceed 10 bytes in length, which is sufficient to represent any `uint64` value. The length-prefix MUST be decoded into a type which supports the full range of `uint64` values. -- The length-prefix is within the expected [size bounds derived from the payload SSZ type](#what-are-ssz-type-size-bounds) or `MAX_PAYLOAD_SIZE`, whichever is smaller. +- The length-prefix MUST be encoded as an unsigned protobuf varint. It SHOULD be + minimally encoded (i.e., without any redundant bytes) and MUST not exceed 10 + bytes in length, which is sufficient to represent any `uint64` value. The + length-prefix MUST be decoded into a type which supports the full range of + `uint64` values. +- The length-prefix is within the expected + [size bounds derived from the payload SSZ type](#what-are-ssz-type-size-bounds) + or `MAX_PAYLOAD_SIZE`, whichever is smaller. -After reading a valid header, the payload MAY be read, while maintaining the size constraints from the header. +After reading a valid header, the payload MAY be read, while maintaining the +size constraints from the header. -A reader MUST NOT read more than `max_compressed_len(n)` bytes after reading the SSZ length-prefix `n` from the header. +A reader MUST NOT read more than `max_compressed_len(n)` bytes after reading the +SSZ length-prefix `n` from the header. A reader MUST consider the following cases as invalid input: -- Any remaining bytes, after having read the `n` SSZ bytes. An EOF is expected if more bytes are read than required. -- An early EOF, before fully reading the declared length-prefix worth of SSZ bytes. +- Any remaining bytes, after having read the `n` SSZ bytes. An EOF is expected + if more bytes are read than required. +- An early EOF, before fully reading the declared length-prefix worth of SSZ + bytes. In case of an invalid input (header or payload), a reader MUST: -- From requests: send back an error message, response code `InvalidRequest`. The request itself is ignored. -- From responses: ignore the response, the response MUST be considered bad server behavior. +- From requests: send back an error message, response code `InvalidRequest`. The + request itself is ignored. +- From responses: ignore the response, the response MUST be considered bad + server behavior. -All messages that contain only a single field MUST be encoded directly as the type of that field and MUST NOT be encoded as an SSZ container. +All messages that contain only a single field MUST be encoded directly as the +type of that field and MUST NOT be encoded as an SSZ container. -Responses that are SSZ-lists (for example `List[SignedBeaconBlock, ...]`) send their -constituents individually as `response_chunk`s. For example, the -`List[SignedBeaconBlock, ...]` response type sends zero or more `response_chunk`s. -Each _successful_ `response_chunk` contains a single `SignedBeaconBlock` payload. +Responses that are SSZ-lists (for example `List[SignedBeaconBlock, ...]`) send +their constituents individually as `response_chunk`s. For example, the +`List[SignedBeaconBlock, ...]` response type sends zero or more +`response_chunk`s. Each _successful_ `response_chunk` contains a single +`SignedBeaconBlock` payload. #### Messages @@ -743,14 +917,20 @@ Request, Response Content: The fields are, as seen by the client at the time of sending the message: -- `fork_digest`: The node's `ForkDigest` (`compute_fork_digest(current_fork_version, genesis_validators_root)`) where - - `current_fork_version` is the fork version at the node's current epoch defined by the wall-clock time - (not necessarily the epoch to which the node is sync) - - `genesis_validators_root` is the static `Root` found in `state.genesis_validators_root` -- `finalized_root`: `store.finalized_checkpoint.root` according to [fork choice](./fork-choice.md). - (Note this defaults to `Root(b'\x00' * 32)` for the genesis finalized checkpoint). -- `finalized_epoch`: `store.finalized_checkpoint.epoch` according to [fork choice](./fork-choice.md). -- `head_root`: The `hash_tree_root` root of the current head block (`BeaconBlock`). +- `fork_digest`: The node's `ForkDigest` + (`compute_fork_digest(current_fork_version, genesis_validators_root)`) where + - `current_fork_version` is the fork version at the node's current epoch + defined by the wall-clock time (not necessarily the epoch to which the node + is sync) + - `genesis_validators_root` is the static `Root` found in + `state.genesis_validators_root` +- `finalized_root`: `store.finalized_checkpoint.root` according to + [fork choice](./fork-choice.md). (Note this defaults to `Root(b'\x00' * 32)` + for the genesis finalized checkpoint). +- `finalized_epoch`: `store.finalized_checkpoint.epoch` according to + [fork choice](./fork-choice.md). +- `head_root`: The `hash_tree_root` root of the current head block + (`BeaconBlock`). - `head_slot`: The slot of the block corresponding to the `head_root`. The dialing client MUST send a `Status` request upon connection. @@ -759,19 +939,25 @@ The request/response MUST be encoded as an SSZ-container. The response MUST consist of a single `response_chunk`. -Clients SHOULD immediately disconnect from one another following the handshake above under the following conditions: +Clients SHOULD immediately disconnect from one another following the handshake +above under the following conditions: -1. If `fork_digest` does not match the node's local `fork_digest`, since the client’s chain is on another fork. -2. If the (`finalized_root`, `finalized_epoch`) shared by the peer is not in the client's chain at the expected epoch. - For example, if Peer 1 sends (root, epoch) of (A, 5) and Peer 2 sends (B, 3) but Peer 1 has root C at epoch 3, - then Peer 1 would disconnect because it knows that their chains are irreparably disjoint. +1. If `fork_digest` does not match the node's local `fork_digest`, since the + client’s chain is on another fork. +2. If the (`finalized_root`, `finalized_epoch`) shared by the peer is not in the + client's chain at the expected epoch. For example, if Peer 1 sends (root, + epoch) of (A, 5) and Peer 2 sends (B, 3) but Peer 1 has root C at epoch 3, + then Peer 1 would disconnect because it knows that their chains are + irreparably disjoint. -Once the handshake completes, the client with the lower `finalized_epoch` or `head_slot` (if the clients have equal `finalized_epoch`s) -SHOULD request beacon blocks from its counterparty via the `BeaconBlocksByRange` request. +Once the handshake completes, the client with the lower `finalized_epoch` or +`head_slot` (if the clients have equal `finalized_epoch`s) SHOULD request beacon +blocks from its counterparty via the `BeaconBlocksByRange` request. -*Note*: Under abnormal network condition or after some rounds of `BeaconBlocksByRange` requests, -the client might need to send `Status` request again to learn if the peer has a higher head. -Implementers are free to implement such behavior in their own way. +*Note*: Under abnormal network condition or after some rounds of +`BeaconBlocksByRange` requests, the client might need to send `Status` request +again to learn if the peer has a higher head. Implementers are free to implement +such behavior in their own way. ##### Goodbye v1 @@ -785,13 +971,15 @@ Request, Response Content: ) ``` -Client MAY send goodbye messages upon disconnection. The reason field MAY be one of the following values: +Client MAY send goodbye messages upon disconnection. The reason field MAY be one +of the following values: - 1: Client shut down. - 2: Irrelevant network. - 3: Fault/error. -Clients MAY use reason codes above `128` to indicate alternative, erroneous request-specific responses. +Clients MAY use reason codes above `128` to indicate alternative, erroneous +request-specific responses. The range `[4, 127]` is RESERVED for future usage. @@ -821,45 +1009,50 @@ Response Content: ) ``` -Requests beacon blocks in the slot range `[start_slot, start_slot + count)`, leading up to the current head block as selected by fork choice. -For example, requesting blocks starting at `start_slot=2` and `count=4` would return the blocks at slots `[2, 3, 4, 5]`. -In cases where a slot is empty for a given slot number, no block is returned. -For example, if slot 4 were empty in the previous example, the returned array would contain `[2, 3, 5]`. +Requests beacon blocks in the slot range `[start_slot, start_slot + count)`, +leading up to the current head block as selected by fork choice. For example, +requesting blocks starting at `start_slot=2` and `count=4` would return the +blocks at slots `[2, 3, 4, 5]`. In cases where a slot is empty for a given slot +number, no block is returned. For example, if slot 4 were empty in the previous +example, the returned array would contain `[2, 3, 5]`. -`step` is deprecated and must be set to 1. Clients may respond with a single block if a larger step is returned during the deprecation transition period. +`step` is deprecated and must be set to 1. Clients may respond with a single +block if a larger step is returned during the deprecation transition period. -`/eth2/beacon_chain/req/beacon_blocks_by_range/1/` is deprecated. Clients MAY respond with an empty list during the deprecation transition period. +`/eth2/beacon_chain/req/beacon_blocks_by_range/1/` is deprecated. Clients MAY +respond with an empty list during the deprecation transition period. `BeaconBlocksByRange` is primarily used to sync historical blocks. The request MUST be encoded as an SSZ-container. -The response MUST consist of zero or more `response_chunk`. -Each _successful_ `response_chunk` MUST contain a single `SignedBeaconBlock` payload. +The response MUST consist of zero or more `response_chunk`. Each _successful_ +`response_chunk` MUST contain a single `SignedBeaconBlock` payload. Clients MUST keep a record of signed blocks seen on the epoch range `[max(GENESIS_EPOCH, current_epoch - MIN_EPOCHS_FOR_BLOCK_REQUESTS), current_epoch]` -where `current_epoch` is defined by the current wall-clock time, -and clients MUST support serving requests of blocks on this range. +where `current_epoch` is defined by the current wall-clock time, and clients +MUST support serving requests of blocks on this range. -Peers that are unable to reply to block requests within the `MIN_EPOCHS_FOR_BLOCK_REQUESTS` -epoch range SHOULD respond with error code `3: ResourceUnavailable`. -Such peers that are unable to successfully reply to this range of requests MAY get descored -or disconnected at any time. +Peers that are unable to reply to block requests within the +`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epoch range SHOULD respond with error code +`3: ResourceUnavailable`. Such peers that are unable to successfully reply to +this range of requests MAY get descored or disconnected at any time. -*Note*: The above requirement implies that nodes that start from a recent weak subjectivity checkpoint -MUST backfill the local block database to at least epoch `current_epoch - MIN_EPOCHS_FOR_BLOCK_REQUESTS` -to be fully compliant with `BlocksByRange` requests. To safely perform such a -backfill of blocks to the recent state, the node MUST validate both (1) the -proposer signatures and (2) that the blocks form a valid chain up to the most -recent block referenced in the weak subjectivity state. +*Note*: The above requirement implies that nodes that start from a recent weak +subjectivity checkpoint MUST backfill the local block database to at least epoch +`current_epoch - MIN_EPOCHS_FOR_BLOCK_REQUESTS` to be fully compliant with +`BlocksByRange` requests. To safely perform such a backfill of blocks to the +recent state, the node MUST validate both (1) the proposer signatures and (2) +that the blocks form a valid chain up to the most recent block referenced in the +weak subjectivity state. -*Note*: Although clients that bootstrap from a weak subjectivity checkpoint can begin -participating in the networking immediately, other peers MAY -disconnect and/or temporarily ban such an un-synced or semi-synced client. +*Note*: Although clients that bootstrap from a weak subjectivity checkpoint can +begin participating in the networking immediately, other peers MAY disconnect +and/or temporarily ban such an un-synced or semi-synced client. -Clients MUST respond with at least the first block that exists in the range, if they have it, -and no more than `MAX_REQUEST_BLOCKS` blocks. +Clients MUST respond with at least the first block that exists in the range, if +they have it, and no more than `MAX_REQUEST_BLOCKS` blocks. The following blocks, where they exist, MUST be sent in consecutive order. @@ -867,16 +1060,18 @@ Clients MAY limit the number of blocks in the response. The response MUST contain no more than `count` blocks. -Clients MUST respond with blocks from their view of the current fork choice --- that is, blocks from the single chain defined by the current head. -Of note, blocks from slots before the finalization MUST lead to the finalized block reported in the `Status` handshake. +Clients MUST respond with blocks from their view of the current fork choice -- +that is, blocks from the single chain defined by the current head. Of note, +blocks from slots before the finalization MUST lead to the finalized block +reported in the `Status` handshake. -Clients MUST respond with blocks that are consistent from a single chain within the context of the request. -This applies to any `step` value. -In particular when `step == 1`, each `parent_root` MUST match the `hash_tree_root` of the preceding block. +Clients MUST respond with blocks that are consistent from a single chain within +the context of the request. This applies to any `step` value. In particular when +`step == 1`, each `parent_root` MUST match the `hash_tree_root` of the preceding +block. -After the initial block, clients MAY stop in the process of responding -if their fork choice changes the view of the chain in the context of the request. +After the initial block, clients MAY stop in the process of responding if their +fork choice changes the view of the chain in the context of the request. ##### BeaconBlocksByRoot v1 @@ -899,27 +1094,31 @@ Response Content: ``` Requests blocks by block root (= `hash_tree_root(SignedBeaconBlock.message)`). -The response is a list of `SignedBeaconBlock` whose length is less than or equal to the number of requested blocks. -It may be less in the case that the responding peer is missing blocks. +The response is a list of `SignedBeaconBlock` whose length is less than or equal +to the number of requested blocks. It may be less in the case that the +responding peer is missing blocks. No more than `MAX_REQUEST_BLOCKS` may be requested at a time. -`BeaconBlocksByRoot` is primarily used to recover recent blocks (e.g. when receiving a block or attestation whose parent is unknown). +`BeaconBlocksByRoot` is primarily used to recover recent blocks (e.g. when +receiving a block or attestation whose parent is unknown). The request MUST be encoded as an SSZ-field. -The response MUST consist of zero or more `response_chunk`. -Each _successful_ `response_chunk` MUST contain a single `SignedBeaconBlock` payload. +The response MUST consist of zero or more `response_chunk`. Each _successful_ +`response_chunk` MUST contain a single `SignedBeaconBlock` payload. Clients MUST support requesting blocks since the latest finalized epoch. -Clients MUST respond with at least one block, if they have it. -Clients MAY limit the number of blocks in the response. +Clients MUST respond with at least one block, if they have it. Clients MAY limit +the number of blocks in the response. -Clients MAY include a block in the response as soon as it passes the gossip validation rules. -Clients SHOULD NOT respond with blocks that fail the beacon chain state transition. +Clients MAY include a block in the response as soon as it passes the gossip +validation rules. Clients SHOULD NOT respond with blocks that fail the beacon +chain state transition. -`/eth2/beacon_chain/req/beacon_blocks_by_root/1/` is deprecated. Clients MAY respond with an empty list during the deprecation transition period. +`/eth2/beacon_chain/req/beacon_blocks_by_root/1/` is deprecated. Clients MAY +respond with an empty list during the deprecation transition period. ##### Ping v1 @@ -942,12 +1141,14 @@ Response Content: ``` Sent intermittently, the `Ping` protocol checks liveness of connected peers. -Peers request and respond with their local metadata sequence number (`MetaData.seq_number`). +Peers request and respond with their local metadata sequence number +(`MetaData.seq_number`). -If the peer does not respond to the `Ping` request, the client MAY disconnect from the peer. +If the peer does not respond to the `Ping` request, the client MAY disconnect +from the peer. -A client can then determine if their local record of a peer's MetaData is up to date -and MAY request an updated version via the `MetaData` RPC method if not. +A client can then determine if their local record of a peer's MetaData is up to +date and MAY request an updated version via the `MetaData` RPC method if not. The request MUST be encoded as an SSZ-field. @@ -967,10 +1168,9 @@ Response Content: ) ``` -Requests the MetaData of a peer. -The request opens and negotiates the stream without sending any request content. -Once established the receiving peer responds with -it's local most up-to-date MetaData. +Requests the MetaData of a peer. The request opens and negotiates the stream +without sending any request content. Once established the receiving peer +responds with it's local most up-to-date MetaData. The response MUST be encoded as an SSZ-container. @@ -978,61 +1178,81 @@ The response MUST consist of a single `response_chunk`. ### The discovery domain: discv5 -Discovery Version 5 ([discv5](https://github.com/ethereum/devp2p/blob/master/discv5/discv5.md)) (Protocol version v5.1) is used for peer discovery. +Discovery Version 5 +([discv5](https://github.com/ethereum/devp2p/blob/master/discv5/discv5.md)) +(Protocol version v5.1) is used for peer discovery. -`discv5` is a standalone protocol, running on UDP on a dedicated port, meant for peer discovery only. -`discv5` supports self-certified, flexible peer records (ENRs) and topic-based advertisement, both of which are (or will be) requirements in this context. +`discv5` is a standalone protocol, running on UDP on a dedicated port, meant for +peer discovery only. `discv5` supports self-certified, flexible peer records +(ENRs) and topic-based advertisement, both of which are (or will be) +requirements in this context. #### Integration into libp2p stacks -`discv5` SHOULD be integrated into the client’s libp2p stack by implementing an adaptor -to make it conform to the [service discovery](https://github.com/libp2p/go-libp2p-core/blob/master/discovery/discovery.go) -and [peer routing](https://github.com/libp2p/go-libp2p-core/blob/master/routing/routing.go#L36-L44) abstractions and interfaces (go-libp2p links provided). +`discv5` SHOULD be integrated into the client’s libp2p stack by implementing an +adaptor to make it conform to the +[service discovery](https://github.com/libp2p/go-libp2p-core/blob/master/discovery/discovery.go) +and +[peer routing](https://github.com/libp2p/go-libp2p-core/blob/master/routing/routing.go#L36-L44) +abstractions and interfaces (go-libp2p links provided). -Inputs to operations include peer IDs (when locating a specific peer) or capabilities (when searching for peers with a specific capability), -and the outputs will be multiaddrs converted from the ENR records returned by the discv5 backend. +Inputs to operations include peer IDs (when locating a specific peer) or +capabilities (when searching for peers with a specific capability), and the +outputs will be multiaddrs converted from the ENR records returned by the discv5 +backend. -This integration enables the libp2p stack to subsequently form connections and streams with discovered peers. +This integration enables the libp2p stack to subsequently form connections and +streams with discovered peers. #### ENR structure -The Ethereum Node Record (ENR) for an Ethereum consensus client MUST contain the following entries -(exclusive of the sequence number and signature, which MUST be present in an ENR): +The Ethereum Node Record (ENR) for an Ethereum consensus client MUST contain the +following entries (exclusive of the sequence number and signature, which MUST be +present in an ENR): - The compressed secp256k1 publickey, 33 bytes (`secp256k1` field). The ENR MAY contain the following entries: - An IPv4 address (`ip` field) and/or IPv6 address (`ip6` field). -- An IPv4 TCP port (`tcp` field) representing the local libp2p TCP listening port and/or the corresponding IPv6 port (`tcp6` field). -- An IPv4 QUIC port (`quic` field) representing the local libp2p QUIC (UDP) listening port and/or the corresponding IPv6 port (`quic6` field). -- An IPv4 UDP port (`udp` field) representing the local discv5 listening port and/or the corresponding IPv6 port (`udp6` field). +- An IPv4 TCP port (`tcp` field) representing the local libp2p TCP listening + port and/or the corresponding IPv6 port (`tcp6` field). +- An IPv4 QUIC port (`quic` field) representing the local libp2p QUIC (UDP) + listening port and/or the corresponding IPv6 port (`quic6` field). +- An IPv4 UDP port (`udp` field) representing the local discv5 listening port + and/or the corresponding IPv6 port (`udp6` field). -Specifications of these parameters can be found in the [ENR Specification](http://eips.ethereum.org/EIPS/eip-778). +Specifications of these parameters can be found in the +[ENR Specification](http://eips.ethereum.org/EIPS/eip-778). ##### Attestation subnet bitfield -The ENR `attnets` entry signifies the attestation subnet bitfield with the following form -to more easily discover peers participating in particular attestation gossip subnets. +The ENR `attnets` entry signifies the attestation subnet bitfield with the +following form to more easily discover peers participating in particular +attestation gossip subnets. | Key | Value | | :-------- | :---------------------------------------- | | `attnets` | SSZ `Bitvector[ATTESTATION_SUBNET_COUNT]` | -If a node's `MetaData.attnets` has any non-zero bit, the ENR MUST include the `attnets` entry with the same value as `MetaData.attnets`. +If a node's `MetaData.attnets` has any non-zero bit, the ENR MUST include the +`attnets` entry with the same value as `MetaData.attnets`. -If a node's `MetaData.attnets` is composed of all zeros, the ENR MAY optionally include the `attnets` entry or leave it out entirely. +If a node's `MetaData.attnets` is composed of all zeros, the ENR MAY optionally +include the `attnets` entry or leave it out entirely. ##### `eth2` field -ENRs MUST carry a generic `eth2` key with an 16-byte value of the node's current fork digest, next fork version, -and next fork epoch to ensure connections are made with peers on the intended Ethereum network. +ENRs MUST carry a generic `eth2` key with an 16-byte value of the node's current +fork digest, next fork version, and next fork epoch to ensure connections are +made with peers on the intended Ethereum network. | Key | Value | | :----- | :-------------- | | `eth2` | SSZ `ENRForkID` | -Specifically, the value of the `eth2` key MUST be the following SSZ encoded object (`ENRForkID`) +Specifically, the value of the `eth2` key MUST be the following SSZ encoded +object (`ENRForkID`) ``` ( @@ -1044,35 +1264,52 @@ Specifically, the value of the `eth2` key MUST be the following SSZ encoded obje where the fields of `ENRForkID` are defined as -- `fork_digest` is `compute_fork_digest(current_fork_version, genesis_validators_root)` where - - `current_fork_version` is the fork version at the node's current epoch defined by the wall-clock time - (not necessarily the epoch to which the node is sync) - - `genesis_validators_root` is the static `Root` found in `state.genesis_validators_root` -- `next_fork_version` is the fork version corresponding to the next planned hard fork at a future epoch. - If no future fork is planned, set `next_fork_version = current_fork_version` to signal this fact -- `next_fork_epoch` is the epoch at which the next fork is planned and the `current_fork_version` will be updated. - If no future fork is planned, set `next_fork_epoch = FAR_FUTURE_EPOCH` to signal this fact - -*Note*: `fork_digest` is composed of values that are not known until the genesis block/state are available. -Due to this, clients SHOULD NOT form ENRs and begin peer discovery until genesis values are known. -One notable exception to this rule is the distribution of bootnode ENRs prior to genesis. -In this case, bootnode ENRs SHOULD be initially distributed with `eth2` field set as +- `fork_digest` is + `compute_fork_digest(current_fork_version, genesis_validators_root)` where + - `current_fork_version` is the fork version at the node's current epoch + defined by the wall-clock time (not necessarily the epoch to which the node + is sync) + - `genesis_validators_root` is the static `Root` found in + `state.genesis_validators_root` +- `next_fork_version` is the fork version corresponding to the next planned hard + fork at a future epoch. If no future fork is planned, set + `next_fork_version = current_fork_version` to signal this fact +- `next_fork_epoch` is the epoch at which the next fork is planned and the + `current_fork_version` will be updated. If no future fork is planned, set + `next_fork_epoch = FAR_FUTURE_EPOCH` to signal this fact + +*Note*: `fork_digest` is composed of values that are not known until the genesis +block/state are available. Due to this, clients SHOULD NOT form ENRs and begin +peer discovery until genesis values are known. One notable exception to this +rule is the distribution of bootnode ENRs prior to genesis. In this case, +bootnode ENRs SHOULD be initially distributed with `eth2` field set as `ENRForkID(fork_digest=compute_fork_digest(GENESIS_FORK_VERSION, b'\x00'*32), next_fork_version=GENESIS_FORK_VERSION, next_fork_epoch=FAR_FUTURE_EPOCH)`. -After genesis values are known, the bootnodes SHOULD update ENRs to participate in normal discovery operations. +After genesis values are known, the bootnodes SHOULD update ENRs to participate +in normal discovery operations. -Clients SHOULD connect to peers with `fork_digest`, `next_fork_version`, and `next_fork_epoch` that match local values. +Clients SHOULD connect to peers with `fork_digest`, `next_fork_version`, and +`next_fork_epoch` that match local values. -Clients MAY connect to peers with the same `fork_digest` but a different `next_fork_version`/`next_fork_epoch`. -Unless `ENRForkID` is manually updated to matching prior to the earlier `next_fork_epoch` of the two clients, -these connecting clients will be unable to successfully interact starting at the earlier `next_fork_epoch`. +Clients MAY connect to peers with the same `fork_digest` but a different +`next_fork_version`/`next_fork_epoch`. Unless `ENRForkID` is manually updated to +matching prior to the earlier `next_fork_epoch` of the two clients, these +connecting clients will be unable to successfully interact starting at the +earlier `next_fork_epoch`. ### Attestation subnet subscription -Because Phase 0 does not have shards and thus does not have Shard Committees, there is no stable backbone to the attestation subnets (`beacon_attestation_{subnet_id}`). To provide this stability, each beacon node should: +Because Phase 0 does not have shards and thus does not have Shard Committees, +there is no stable backbone to the attestation subnets +(`beacon_attestation_{subnet_id}`). To provide this stability, each beacon node +should: -- Remain subscribed to `SUBNETS_PER_NODE` for `EPOCHS_PER_SUBNET_SUBSCRIPTION` epochs. -- Maintain advertisement of the selected subnets in their node's ENR `attnets` entry by setting the selected `subnet_id` bits to `True` (e.g. `ENR["attnets"][subnet_id] = True`) for all persistent attestation subnets. -- Select these subnets based on their node-id as specified by the following `compute_subscribed_subnets(node_id, epoch)` function. +- Remain subscribed to `SUBNETS_PER_NODE` for `EPOCHS_PER_SUBNET_SUBSCRIPTION` + epochs. +- Maintain advertisement of the selected subnets in their node's ENR `attnets` + entry by setting the selected `subnet_id` bits to `True` (e.g. + `ENR["attnets"][subnet_id] = True`) for all persistent attestation subnets. +- Select these subnets based on their node-id as specified by the following + `compute_subscribed_subnets(node_id, epoch)` function. ```python def compute_subscribed_subnet(node_id: NodeID, epoch: Epoch, index: int) -> SubnetID: @@ -1092,7 +1329,12 @@ def compute_subscribed_subnets(node_id: NodeID, epoch: Epoch) -> Sequence[Subnet return [compute_subscribed_subnet(node_id, epoch, index) for index in range(SUBNETS_PER_NODE)] ``` -*Note*: When preparing for a hard fork, a node must select and subscribe to subnets of the future fork versioning at least `EPOCHS_PER_SUBNET_SUBSCRIPTION` epochs in advance of the fork. These new subnets for the fork are maintained in addition to those for the current fork until the fork occurs. After the fork occurs, let the subnets from the previous fork reach the end of life with no replacements. +*Note*: When preparing for a hard fork, a node must select and subscribe to +subnets of the future fork versioning at least `EPOCHS_PER_SUBNET_SUBSCRIPTION` +epochs in advance of the fork. These new subnets for the fork are maintained in +addition to those for the current fork until the fork occurs. After the fork +occurs, let the subnets from the previous fork reach the end of life with no +replacements. ## Design decision rationale @@ -1100,328 +1342,419 @@ def compute_subscribed_subnets(node_id: NodeID, epoch: Epoch) -> Sequence[Subnet #### Why are we defining specific transports? -libp2p peers can listen on multiple transports concurrently, and these can change over time. -Multiaddrs encode not only the address but also the transport to be used to dial. +libp2p peers can listen on multiple transports concurrently, and these can +change over time. Multiaddrs encode not only the address but also the transport +to be used to dial. -Due to this dynamic nature, agreeing on specific transports like TCP, QUIC, or WebSockets on paper becomes irrelevant. +Due to this dynamic nature, agreeing on specific transports like TCP, QUIC, or +WebSockets on paper becomes irrelevant. -However, it is useful to define a minimum baseline for interoperability purposes. +However, it is useful to define a minimum baseline for interoperability +purposes. #### Can clients support other transports/handshakes than the ones mandated by the spec? -Clients may support other transports such as libp2p QUIC, WebSockets, and WebRTC transports, if available in the language of choice. -While interoperability shall not be harmed by lack of such support, the advantages are desirable: +Clients may support other transports such as libp2p QUIC, WebSockets, and WebRTC +transports, if available in the language of choice. While interoperability shall +not be harmed by lack of such support, the advantages are desirable: - Better latency, performance, and other QoS characteristics (QUIC). - Paving the way for interfacing with future light clients (WebSockets, WebRTC). -The libp2p QUIC transport inherently relies on TLS 1.3 per requirement in section 7 -of the [QUIC protocol specification](https://tools.ietf.org/html/draft-ietf-quic-transport-22#section-7) -and the accompanying [QUIC-TLS document](https://tools.ietf.org/html/draft-ietf-quic-tls-22). +The libp2p QUIC transport inherently relies on TLS 1.3 per requirement in +section 7 of the +[QUIC protocol specification](https://tools.ietf.org/html/draft-ietf-quic-transport-22#section-7) +and the accompanying +[QUIC-TLS document](https://tools.ietf.org/html/draft-ietf-quic-tls-22). -The usage of one handshake procedure or the other shall be transparent to the application layer, -once the libp2p Host/Node object has been configured appropriately. +The usage of one handshake procedure or the other shall be transparent to the +application layer, once the libp2p Host/Node object has been configured +appropriately. #### What are the advantages of using TCP/QUIC/Websockets? -TCP is a reliable, ordered, full-duplex, congestion-controlled network protocol that powers much of the Internet as we know it today. -HTTP/1.1 and HTTP/2 run atop TCP. - -QUIC is a new protocol that’s in the final stages of specification by the IETF QUIC WG. -It emerged from Google’s SPDY experiment. The QUIC transport is undoubtedly promising. -It’s UDP-based yet reliable, ordered, multiplexed, natively secure (TLS 1.3), reduces latency vs. TCP, -and offers stream-level and connection-level congestion control (thus removing head-of-line blocking), -0-RTT connection establishment, and endpoint migration, amongst other features. -UDP also has better NAT traversal properties than TCP—something we desperately pursue in peer-to-peer networks. - -QUIC is being adopted as the underlying protocol for HTTP/3. -This has the potential to award us censorship resistance via deep packet inspection for free. -Provided that we use the same port numbers and encryption mechanisms as HTTP/3, our traffic may be indistinguishable from standard web traffic, -and we may only become subject to standard IP-based firewall filtering—something we can counteract via other mechanisms. +TCP is a reliable, ordered, full-duplex, congestion-controlled network protocol +that powers much of the Internet as we know it today. HTTP/1.1 and HTTP/2 run +atop TCP. + +QUIC is a new protocol that’s in the final stages of specification by the IETF +QUIC WG. It emerged from Google’s SPDY experiment. The QUIC transport is +undoubtedly promising. It’s UDP-based yet reliable, ordered, multiplexed, +natively secure (TLS 1.3), reduces latency vs. TCP, and offers stream-level and +connection-level congestion control (thus removing head-of-line blocking), 0-RTT +connection establishment, and endpoint migration, amongst other features. UDP +also has better NAT traversal properties than TCP—something we desperately +pursue in peer-to-peer networks. + +QUIC is being adopted as the underlying protocol for HTTP/3. This has the +potential to award us censorship resistance via deep packet inspection for free. +Provided that we use the same port numbers and encryption mechanisms as HTTP/3, +our traffic may be indistinguishable from standard web traffic, and we may only +become subject to standard IP-based firewall filtering—something we can +counteract via other mechanisms. WebSockets and/or WebRTC transports are necessary for interaction with browsers, -and will become increasingly important as we incorporate browser-based light clients to the Ethereum network. +and will become increasingly important as we incorporate browser-based light +clients to the Ethereum network. #### Why do we not just support a single transport? -Networks evolve. -Hardcoding design decisions leads to ossification, preventing the evolution of networks alongside the state of the art. -Introducing changes on an ossified protocol is very costly, and sometimes, downright impracticable without causing undesirable breakage. +Networks evolve. Hardcoding design decisions leads to ossification, preventing +the evolution of networks alongside the state of the art. Introducing changes on +an ossified protocol is very costly, and sometimes, downright impracticable +without causing undesirable breakage. -Modeling for upgradeability and dynamic transport selection from the get-go lays the foundation for a future-proof stack. +Modeling for upgradeability and dynamic transport selection from the get-go lays +the foundation for a future-proof stack. -Clients can adopt new transports without breaking old ones, and the multi-transport ability enables constrained and sandboxed environments -(e.g. browsers, embedded devices) to interact with the network as first-class citizens via suitable/native transports (e.g. WSS), -without the need for proxying or trust delegation to servers. +Clients can adopt new transports without breaking old ones, and the +multi-transport ability enables constrained and sandboxed environments (e.g. +browsers, embedded devices) to interact with the network as first-class citizens +via suitable/native transports (e.g. WSS), without the need for proxying or +trust delegation to servers. #### Why are we not using QUIC from the start? -The QUIC standard is still not finalized (at working draft 22 at the time of writing), -and not all mainstream runtimes/languages have mature, standard, and/or fully-interoperable [QUIC support](https://github.com/quicwg/base-drafts/wiki/Implementations). -One remarkable example is node.js, where the QUIC implementation is [in early development](https://github.com/nodejs/quic). +The QUIC standard is still not finalized (at working draft 22 at the time of +writing), and not all mainstream runtimes/languages have mature, standard, +and/or fully-interoperable +[QUIC support](https://github.com/quicwg/base-drafts/wiki/Implementations). One +remarkable example is node.js, where the QUIC implementation is +[in early development](https://github.com/nodejs/quic). -*Note*: [TLS 1.3 is a prerequisite of the QUIC transport](https://tools.ietf.org/html/draft-ietf-quic-transport-22#section-7), -although an experiment exists to integrate Noise as the QUIC crypto layer: [nQUIC](https://eprint.iacr.org/2019/028). +*Note*: +[TLS 1.3 is a prerequisite of the QUIC transport](https://tools.ietf.org/html/draft-ietf-quic-transport-22#section-7), +although an experiment exists to integrate Noise as the QUIC crypto layer: +[nQUIC](https://eprint.iacr.org/2019/028). -On the other hand, TLS 1.3 is the newest, simplified iteration of TLS. -Old, insecure, obsolete ciphers and algorithms have been removed, adopting Ed25519 as the sole ECDH key agreement function. -Handshakes are faster, 1-RTT data is supported, and session resumption is a reality, amongst other features. +On the other hand, TLS 1.3 is the newest, simplified iteration of TLS. Old, +insecure, obsolete ciphers and algorithms have been removed, adopting Ed25519 as +the sole ECDH key agreement function. Handshakes are faster, 1-RTT data is +supported, and session resumption is a reality, amongst other features. ### Multiplexing #### Why are we using mplex/yamux? -[Yamux](https://github.com/hashicorp/yamux/blob/master/spec.md) is a multiplexer invented by Hashicorp that supports stream-level congestion control. -Implementations exist in a limited set of languages, and it’s not a trivial piece to develop. +[Yamux](https://github.com/hashicorp/yamux/blob/master/spec.md) is a multiplexer +invented by Hashicorp that supports stream-level congestion control. +Implementations exist in a limited set of languages, and it’s not a trivial +piece to develop. -Conscious of that, the libp2p community conceptualized [mplex](https://github.com/libp2p/specs/blob/master/mplex/README.md) -as a simple, minimal multiplexer for usage with libp2p. -It does not support stream-level congestion control and is subject to head-of-line blocking. +Conscious of that, the libp2p community conceptualized +[mplex](https://github.com/libp2p/specs/blob/master/mplex/README.md) as a +simple, minimal multiplexer for usage with libp2p. It does not support +stream-level congestion control and is subject to head-of-line blocking. -Overlay multiplexers are not necessary with QUIC since the protocol provides native multiplexing, -but they need to be layered atop TCP, WebSockets, and other transports that lack such support. +Overlay multiplexers are not necessary with QUIC since the protocol provides +native multiplexing, but they need to be layered atop TCP, WebSockets, and other +transports that lack such support. ### Protocol Negotiation #### When is multiselect 2.0 due and why do we plan to migrate to it? -multiselect 2.0 is currently being conceptualized. -The debate started [on this issue](https://github.com/libp2p/specs/pull/95), -but it got overloaded—as it tends to happen with large conceptual OSS discussions that touch the heart and core of a system. +multiselect 2.0 is currently being conceptualized. The debate started +[on this issue](https://github.com/libp2p/specs/pull/95), but it got +overloaded—as it tends to happen with large conceptual OSS discussions that +touch the heart and core of a system. -At some point in 2020, we expect a renewed initiative to first define the requirements, constraints, assumptions, and features, -in order to lock in basic consensus upfront and subsequently build on that consensus by submitting a specification for implementation. +At some point in 2020, we expect a renewed initiative to first define the +requirements, constraints, assumptions, and features, in order to lock in basic +consensus upfront and subsequently build on that consensus by submitting a +specification for implementation. We plan to eventually migrate to multiselect 2.0 because it will: -1. Reduce round trips during connection bootstrapping and stream protocol negotiation. +1. Reduce round trips during connection bootstrapping and stream protocol + negotiation. 2. Enable efficient one-stream-per-request interaction patterns. -3. Leverage *push data* mechanisms of underlying protocols to expedite negotiation. +3. Leverage *push data* mechanisms of underlying protocols to expedite + negotiation. 4. Provide the building blocks for enhanced censorship resistance. #### What is the difference between connection-level and stream-level protocol negotiation? All libp2p connections must be authenticated, encrypted, and multiplexed. -Connections using network transports unsupportive of native authentication/encryption and multiplexing (e.g. TCP) need to undergo protocol negotiation to agree on a mutually supported: +Connections using network transports unsupportive of native +authentication/encryption and multiplexing (e.g. TCP) need to undergo protocol +negotiation to agree on a mutually supported: 1. authentication/encryption mechanism (such as SecIO, TLS 1.3, Noise). 2. overlay multiplexer (such as mplex, Yamux, spdystream). In this specification, we refer to these two as *connection-level negotiations*. -Transports supporting those features natively (such as QUIC) omit those negotiations. +Transports supporting those features natively (such as QUIC) omit those +negotiations. -After successfully selecting a multiplexer, all subsequent I/O happens over *streams*. -When opening streams, peers pin a protocol to that stream, by conducting *stream-level protocol negotiation*. +After successfully selecting a multiplexer, all subsequent I/O happens over +*streams*. When opening streams, peers pin a protocol to that stream, by +conducting *stream-level protocol negotiation*. -At present, multistream-select 1.0 is used for both types of negotiation, -but multiselect 2.0 will use dedicated mechanisms for connection bootstrapping process and stream protocol negotiation. +At present, multistream-select 1.0 is used for both types of negotiation, but +multiselect 2.0 will use dedicated mechanisms for connection bootstrapping +process and stream protocol negotiation. ### Encryption #### Why are we not supporting SecIO? -SecIO has been the default encryption layer for libp2p for years. -It is used in IPFS and Filecoin. And although it will be superseded shortly, it is proven to work at scale. +SecIO has been the default encryption layer for libp2p for years. It is used in +IPFS and Filecoin. And although it will be superseded shortly, it is proven to +work at scale. -Although SecIO has wide language support, we won’t be using it for mainnet because, amongst other things, -it requires several round trips to be sound, and doesn’t support early data (0-RTT data), -a mechanism that multiselect 2.0 will leverage to reduce round trips during connection bootstrapping. +Although SecIO has wide language support, we won’t be using it for mainnet +because, amongst other things, it requires several round trips to be sound, and +doesn’t support early data (0-RTT data), a mechanism that multiselect 2.0 will +leverage to reduce round trips during connection bootstrapping. SecIO is not considered secure for the purposes of this spec. #### Why are we using Noise? -Copied from the Noise Protocol Framework [website](http://www.noiseprotocol.org): +Copied from the Noise Protocol Framework +[website](http://www.noiseprotocol.org): -> Noise is a framework for building crypto protocols. -> Noise protocols support mutual and optional authentication, identity hiding, forward secrecy, zero round-trip encryption, and other advanced features. +> Noise is a framework for building crypto protocols. Noise protocols support +> mutual and optional authentication, identity hiding, forward secrecy, zero +> round-trip encryption, and other advanced features. -Noise in itself does not specify a single handshake procedure, -but provides a framework to build secure handshakes based on Diffie-Hellman key agreement with a variety of tradeoffs and guarantees. +Noise in itself does not specify a single handshake procedure, but provides a +framework to build secure handshakes based on Diffie-Hellman key agreement with +a variety of tradeoffs and guarantees. -Noise handshakes are lightweight and simple to understand, -and are used in major cryptographic-centric projects like WireGuard, I2P, and Lightning. -[Various](https://www.wireguard.com/papers/kobeissi-bhargavan-noise-explorer-2018.pdf) [studies](https://eprint.iacr.org/2019/436.pdf) -have assessed the stated security goals of several Noise handshakes with positive results. +Noise handshakes are lightweight and simple to understand, and are used in major +cryptographic-centric projects like WireGuard, I2P, and Lightning. +[Various](https://www.wireguard.com/papers/kobeissi-bhargavan-noise-explorer-2018.pdf) +[studies](https://eprint.iacr.org/2019/436.pdf) have assessed the stated +security goals of several Noise handshakes with positive results. #### Why are we using encryption at all? -Transport level encryption secures message exchange and provides properties that are useful for privacy, safety, and censorship resistance. -These properties are derived from the following security guarantees that apply to the entire communication between two peers: +Transport level encryption secures message exchange and provides properties that +are useful for privacy, safety, and censorship resistance. These properties are +derived from the following security guarantees that apply to the entire +communication between two peers: -- Peer authentication: the peer I’m talking to is really who they claim to be and who I expect them to be. +- Peer authentication: the peer I’m talking to is really who they claim to be + and who I expect them to be. - Confidentiality: no observer can eavesdrop on the content of our messages. -- Integrity: the data has not been tampered with by a third-party while in transit. -- Non-repudiation: the originating peer cannot dispute that they sent the message. -- Depending on the chosen algorithms and mechanisms (e.g. continuous HMAC), we may obtain additional guarantees, - such as non-replayability (this byte could’ve only been sent *now;* e.g. by using continuous HMACs), - or perfect forward secrecy (in the case that a peer key is compromised, the content of a past conversation will not be compromised). - -Note that transport-level encryption is not exclusive of application-level encryption or cryptography. -Transport-level encryption secures the communication itself, -while application-level cryptography is necessary for the application’s use cases (e.g. signatures, randomness, etc.). +- Integrity: the data has not been tampered with by a third-party while in + transit. +- Non-repudiation: the originating peer cannot dispute that they sent the + message. +- Depending on the chosen algorithms and mechanisms (e.g. continuous HMAC), we + may obtain additional guarantees, such as non-replayability (this byte + could’ve only been sent *now;* e.g. by using continuous HMACs), or perfect + forward secrecy (in the case that a peer key is compromised, the content of a + past conversation will not be compromised). + +Note that transport-level encryption is not exclusive of application-level +encryption or cryptography. Transport-level encryption secures the communication +itself, while application-level cryptography is necessary for the application’s +use cases (e.g. signatures, randomness, etc.). ### Gossipsub #### Why are we using a pub/sub algorithm for block and attestation propagation? Pubsub is a technique to broadcast/disseminate data across a network rapidly. -Such data is packaged in fire-and-forget messages that do not require a response from every recipient. -Peers subscribed to a topic participate in the propagation of messages in that topic. +Such data is packaged in fire-and-forget messages that do not require a response +from every recipient. Peers subscribed to a topic participate in the propagation +of messages in that topic. -The alternative is to maintain a fully connected mesh (all peers connected to each other 1:1), which scales poorly (O(n^2)). +The alternative is to maintain a fully connected mesh (all peers connected to +each other 1:1), which scales poorly (O(n^2)). #### Why are we using topics to segregate encodings, yet only support one encoding? -For future extensibility with almost zero overhead now (besides the extra bytes in the topic name). +For future extensibility with almost zero overhead now (besides the extra bytes +in the topic name). #### How do we upgrade gossip channels (e.g. changes in encoding, compression)? -Changing gossipsub/broadcasts requires a coordinated upgrade where all clients start publishing to the new topic together, during a hard fork. +Changing gossipsub/broadcasts requires a coordinated upgrade where all clients +start publishing to the new topic together, during a hard fork. -When a node is preparing for upcoming tasks (e.g. validator duty lookahead) on a gossipsub topic, -the node should join the topic of the future epoch in which the task is to occur in addition to listening to the topics for the current epoch. +When a node is preparing for upcoming tasks (e.g. validator duty lookahead) on a +gossipsub topic, the node should join the topic of the future epoch in which the +task is to occur in addition to listening to the topics for the current epoch. #### Why must all clients use the same gossip topic instead of one negotiated between each peer pair? -Supporting multiple topics/encodings would require the presence of relayers to translate between encodings -and topics so as to avoid network fragmentation where participants have diverging views on the gossiped state, -making the protocol more complicated and fragile. +Supporting multiple topics/encodings would require the presence of relayers to +translate between encodings and topics so as to avoid network fragmentation +where participants have diverging views on the gossiped state, making the +protocol more complicated and fragile. -Gossip protocols typically remember what messages they've seen for a finite period of time-based on message identity --- if you publish the same message again after that time has passed, -it will be re-broadcast—adding a relay delay also makes this scenario more likely. +Gossip protocols typically remember what messages they've seen for a finite +period of time-based on message identity -- if you publish the same message +again after that time has passed, it will be re-broadcast—adding a relay delay +also makes this scenario more likely. -One can imagine that in a complicated upgrade scenario, we might have peers publishing the same message on two topics/encodings, -but the price here is pretty high in terms of overhead -- both computational and networking -- so we'd rather avoid that. +One can imagine that in a complicated upgrade scenario, we might have peers +publishing the same message on two topics/encodings, but the price here is +pretty high in terms of overhead -- both computational and networking -- so we'd +rather avoid that. -It is permitted for clients to publish data on alternative topics as long as they also publish on the network-wide mandatory topic. +It is permitted for clients to publish data on alternative topics as long as +they also publish on the network-wide mandatory topic. #### Why are the topics strings and not hashes? -Topic names have a hierarchical structure. -In the future, gossipsub may support wildcard subscriptions -(e.g. subscribe to all children topics under a root prefix) by way of prefix matching. -Enforcing hashes for topic names would preclude us from leveraging such features going forward. +Topic names have a hierarchical structure. In the future, gossipsub may support +wildcard subscriptions (e.g. subscribe to all children topics under a root +prefix) by way of prefix matching. Enforcing hashes for topic names would +preclude us from leveraging such features going forward. -No security or privacy guarantees are lost as a result of choosing plaintext topic names, -since the domain is finite anyway, and calculating a digest's preimage would be trivial. +No security or privacy guarantees are lost as a result of choosing plaintext +topic names, since the domain is finite anyway, and calculating a digest's +preimage would be trivial. -Furthermore, the topic names are shorter than their digest equivalents (assuming SHA-256 hash), -so hashing topics would bloat messages unnecessarily. +Furthermore, the topic names are shorter than their digest equivalents (assuming +SHA-256 hash), so hashing topics would bloat messages unnecessarily. #### Why are we using the `StrictNoSign` signature policy? -The policy omits the `from` (1), `seqno` (3), `signature` (5) and `key` (6) fields. These fields would: +The policy omits the `from` (1), `seqno` (3), `signature` (5) and `key` (6) +fields. These fields would: - Expose origin of sender (`from`), type of sender (based on `seqno`) -- Add extra unused data to the gossip, since message IDs are based on `data`, not on the `from` and `seqno`. +- Add extra unused data to the gossip, since message IDs are based on `data`, + not on the `from` and `seqno`. - Introduce more message validation than necessary, e.g. no `signature`. #### Why are we overriding the default libp2p pubsub `message-id`? -For our current purposes, there is no need to address messages based on source peer, or track a message `seqno`. -By overriding the default `message-id` to use content-addressing we can filter unnecessary duplicates before hitting the application layer. +For our current purposes, there is no need to address messages based on source +peer, or track a message `seqno`. By overriding the default `message-id` to use +content-addressing we can filter unnecessary duplicates before hitting the +application layer. Some examples of where messages could be duplicated: -- A validator client connected to multiple beacon nodes publishing duplicate gossip messages -- Attestation aggregation strategies where clients partially aggregate attestations and propagate them. - Partial aggregates could be duplicated +- A validator client connected to multiple beacon nodes publishing duplicate + gossip messages +- Attestation aggregation strategies where clients partially aggregate + attestations and propagate them. Partial aggregates could be duplicated - Clients re-publishing seen messages #### Why are these specific gossip parameters chosen? - `D`, `D_low`, `D_high`, `D_lazy`: recommended defaults. -- `heartbeat_interval`: 0.7 seconds, recommended for the beacon chain in the [GossipSub evaluation report by Protocol Labs](https://gateway.ipfs.io/ipfs/QmRAFP5DBnvNjdYSbWhEhVRJJDFCLpPyvew5GwCCB4VxM4). -- `fanout_ttl`: 60 seconds, recommended default. - Fanout is primarily used by committees publishing attestations to subnets. - This happens once per epoch per validator and the subnet changes each epoch - so there is little to gain in having a `fanout_ttl` be increased from the recommended default. +- `heartbeat_interval`: 0.7 seconds, recommended for the beacon chain in the + [GossipSub evaluation report by Protocol Labs](https://gateway.ipfs.io/ipfs/QmRAFP5DBnvNjdYSbWhEhVRJJDFCLpPyvew5GwCCB4VxM4). +- `fanout_ttl`: 60 seconds, recommended default. Fanout is primarily used by + committees publishing attestations to subnets. This happens once per epoch per + validator and the subnet changes each epoch so there is little to gain in + having a `fanout_ttl` be increased from the recommended default. - `mcache_len`: 6, increase by one to ensure that mcache is around for long enough for `IWANT`s to respond to `IHAVE`s in the context of the shorter `heartbeat_interval`. If `mcache_gossip` is increased, this param should be increased to be at least `3` (~2 seconds) more than `mcache_gossip`. -- `mcache_gossip`: 3, recommended default. This can be increased to 5 or 6 - (~4 seconds) if gossip times are longer than expected and the current window - does not provide enough responsiveness during adverse conditions. -- `seen_ttl`: `SLOTS_PER_EPOCH * SECONDS_PER_SLOT / heartbeat_interval = approx. 550`. - Attestation gossip validity is bounded by an epoch, so this is the safe max bound. +- `mcache_gossip`: 3, recommended default. This can be increased to 5 or 6 (~4 + seconds) if gossip times are longer than expected and the current window does + not provide enough responsiveness during adverse conditions. +- `seen_ttl`: + `SLOTS_PER_EPOCH * SECONDS_PER_SLOT / heartbeat_interval = approx. 550`. + Attestation gossip validity is bounded by an epoch, so this is the safe max + bound. #### Why is there `MAXIMUM_GOSSIP_CLOCK_DISPARITY` when validating slot ranges of messages in gossip subnets? -For some gossip channels (e.g. those for Attestations and BeaconBlocks), -there are designated ranges of slots during which particular messages can be sent, -limiting messages gossiped to those that can be reasonably used in the consensus at the current time/slot. -This is to reduce optionality in DoS attacks. +For some gossip channels (e.g. those for Attestations and BeaconBlocks), there +are designated ranges of slots during which particular messages can be sent, +limiting messages gossiped to those that can be reasonably used in the consensus +at the current time/slot. This is to reduce optionality in DoS attacks. -`MAXIMUM_GOSSIP_CLOCK_DISPARITY` provides some leeway in validating slot ranges to prevent the gossip network -from becoming overly brittle with respect to clock disparity. -For minimum and maximum allowable slot broadcast times, -`MAXIMUM_GOSSIP_CLOCK_DISPARITY` MUST be subtracted and added respectively, marginally extending the valid range. -Although messages can at times be eagerly gossiped to the network, -the node's fork choice prevents integration of these messages into the actual consensus until the _actual local start_ of the designated slot. +`MAXIMUM_GOSSIP_CLOCK_DISPARITY` provides some leeway in validating slot ranges +to prevent the gossip network from becoming overly brittle with respect to clock +disparity. For minimum and maximum allowable slot broadcast times, +`MAXIMUM_GOSSIP_CLOCK_DISPARITY` MUST be subtracted and added respectively, +marginally extending the valid range. Although messages can at times be eagerly +gossiped to the network, the node's fork choice prevents integration of these +messages into the actual consensus until the _actual local start_ of the +designated slot. #### Why are there `ATTESTATION_SUBNET_COUNT` attestation subnets? -Depending on the number of validators, it may be more efficient to group shard subnets and might provide better stability for the gossipsub channel. -The exact grouping will be dependent on more involved network tests. -This constant allows for more flexibility in setting up the network topology for attestation aggregation (as aggregation should happen on each subnet). -The value is currently set to be equal to `MAX_COMMITTEES_PER_SLOT` if/until network tests indicate otherwise. +Depending on the number of validators, it may be more efficient to group shard +subnets and might provide better stability for the gossipsub channel. The exact +grouping will be dependent on more involved network tests. This constant allows +for more flexibility in setting up the network topology for attestation +aggregation (as aggregation should happen on each subnet). The value is +currently set to be equal to `MAX_COMMITTEES_PER_SLOT` if/until network tests +indicate otherwise. #### Why are attestations limited to be broadcast on gossip channels within `SLOTS_PER_EPOCH` slots? -Attestations can only be included on chain within an epoch's worth of slots so this is the natural cutoff. -There is no utility to the chain to broadcast attestations older than one epoch, -and because validators have a chance to make a new attestation each epoch, -there is minimal utility to the fork choice to relay old attestations as a new latest message can soon be created by each validator. +Attestations can only be included on chain within an epoch's worth of slots so +this is the natural cutoff. There is no utility to the chain to broadcast +attestations older than one epoch, and because validators have a chance to make +a new attestation each epoch, there is minimal utility to the fork choice to +relay old attestations as a new latest message can soon be created by each +validator. -In addition to this, relaying attestations requires validating the attestation in the context of the `state` during which it was created. -Thus, validating arbitrarily old attestations would put additional requirements on which states need to be readily available to the node. -This would result in a higher resource burden and could serve as a DoS vector. +In addition to this, relaying attestations requires validating the attestation +in the context of the `state` during which it was created. Thus, validating +arbitrarily old attestations would put additional requirements on which states +need to be readily available to the node. This would result in a higher resource +burden and could serve as a DoS vector. #### Why are aggregate attestations broadcast to the global topic as `AggregateAndProof`s rather than just as `Attestation`s? -The dominant strategy for an individual validator is to always broadcast an aggregate containing their own attestation -to the global channel to ensure that proposers see their attestation for inclusion. -Using a private selection criteria and providing this proof of selection alongside -the gossiped aggregate ensures that this dominant strategy will not flood the global channel. +The dominant strategy for an individual validator is to always broadcast an +aggregate containing their own attestation to the global channel to ensure that +proposers see their attestation for inclusion. Using a private selection +criteria and providing this proof of selection alongside the gossiped aggregate +ensures that this dominant strategy will not flood the global channel. -Also, an attacker can create any number of honest-looking aggregates and broadcast them to the global pubsub channel. -Thus without some sort of proof of selection as an aggregator, the global channel can trivially be spammed. +Also, an attacker can create any number of honest-looking aggregates and +broadcast them to the global pubsub channel. Thus without some sort of proof of +selection as an aggregator, the global channel can trivially be spammed. #### Why are we sending entire objects in the pubsub and not just hashes? -Entire objects should be sent to get the greatest propagation speeds. -If only hashes are sent, then block and attestation propagation is dependent on recursive requests from each peer. -In a hash-only scenario, peers could receive hashes without knowing who to download the actual contents from. -Sending entire objects ensures that they get propagated through the entire network. +Entire objects should be sent to get the greatest propagation speeds. If only +hashes are sent, then block and attestation propagation is dependent on +recursive requests from each peer. In a hash-only scenario, peers could receive +hashes without knowing who to download the actual contents from. Sending entire +objects ensures that they get propagated through the entire network. #### Should clients gossip blocks if they *cannot* validate the proposer signature due to not yet being synced, not knowing the head block, etc? -The prohibition of unverified-block-gossiping extends to nodes that cannot verify a signature -due to not being fully synced to ensure that such (amplified) DOS attacks are not possible. +The prohibition of unverified-block-gossiping extends to nodes that cannot +verify a signature due to not being fully synced to ensure that such (amplified) +DOS attacks are not possible. #### How are we going to discover peers in a gossipsub topic? -In Phase 0, peers for attestation subnets will be found using the `attnets` entry in the ENR. +In Phase 0, peers for attestation subnets will be found using the `attnets` +entry in the ENR. -Although this method will be sufficient for early upgrade of the beacon chain, we aim to use the more appropriate discv5 topics for this and other similar tasks in the future. -ENRs should ultimately not be used for this purpose. -They are best suited to store identity, location, and capability information, rather than more volatile advertisements. +Although this method will be sufficient for early upgrade of the beacon chain, +we aim to use the more appropriate discv5 topics for this and other similar +tasks in the future. ENRs should ultimately not be used for this purpose. They +are best suited to store identity, location, and capability information, rather +than more volatile advertisements. #### How should fork version be used in practice? -Fork versions are to be manually updated (likely via incrementing) at each hard fork. -This is to provide native domain separation for signatures as well as to aid in usefulness for identifying peers (via ENRs) -and versioning network protocols (e.g. using fork version to naturally version gossipsub topics). +Fork versions are to be manually updated (likely via incrementing) at each hard +fork. This is to provide native domain separation for signatures as well as to +aid in usefulness for identifying peers (via ENRs) and versioning network +protocols (e.g. using fork version to naturally version gossipsub topics). -`BeaconState.genesis_validators_root` is mixed into signature and ENR fork domains (`ForkDigest`) to aid in the ease of domain separation between chains. -This allows fork versions to safely be reused across chains except for the case of contentious forks using the same genesis. -In these cases, extra care should be taken to isolate fork versions (e.g. flip a high order bit in all future versions of one of the chains). +`BeaconState.genesis_validators_root` is mixed into signature and ENR fork +domains (`ForkDigest`) to aid in the ease of domain separation between chains. +This allows fork versions to safely be reused across chains except for the case +of contentious forks using the same genesis. In these cases, extra care should +be taken to isolate fork versions (e.g. flip a high order bit in all future +versions of one of the chains). -A node locally stores all previous and future planned fork versions along with the each fork epoch. -This allows for handling sync and processing messages starting from past forks/epochs. +A node locally stores all previous and future planned fork versions along with +the each fork epoch. This allows for handling sync and processing messages +starting from past forks/epochs. ### Req/Resp @@ -1429,143 +1762,204 @@ This allows for handling sync and processing messages starting from past forks/e Requests are segregated by protocol ID to: -1. Leverage protocol routing in libp2p, such that the libp2p stack will route the incoming stream to the appropriate handler. - This allows the handler function for each request type to be self-contained. - For an analogy, think about how you attach HTTP handlers to a REST API server. -2. Version requests independently. - In a coarser-grained umbrella protocol, the entire protocol would have to be versioned even if just one field in a single message changed. -3. Enable clients to select the individual requests/versions they support. - It would no longer be a strict requirement to support all requests, - and clients, in principle, could support a subset of requests and variety of versions. -4. Enable flexibility and agility for clients adopting spec changes that impact the request, by signalling to peers exactly which subset of new/old requests they support. -5. Enable clients to explicitly choose backwards compatibility at the request granularity. - Without this, clients would be forced to support entire versions of the coarser request protocol. -6. Parallelise RFCs (or EIPs). - By decoupling requests from one another, each RFC that affects the request protocol can be deployed/tested/debated independently - without relying on a synchronization point to version the general top-level protocol. - 1. This has the benefit that clients can explicitly choose which RFCs to deploy - without buying into all other RFCs that may be included in that top-level version. - 2. Affording this level of granularity with a top-level protocol would imply creating as many variants - (e.g. /protocol/43-{a,b,c,d,...}) as the cartesian product of RFCs in-flight, O(n^2). -7. Allow us to simplify the payload of requests. - Request-id’s and method-ids no longer need to be sent. - The encoding/request type and version can all be handled by the framework. - -**Caveat**: The protocol negotiation component in the current version of libp2p is called multistream-select 1.0. -It is somewhat naïve and introduces overhead on every request when negotiating streams, -although implementation-specific optimizations are possible to save this cost. -Multiselect 2.0 will eventually remove this overhead by memoizing previously selected protocols, and modeling shared protocol tables. -Fortunately, this req/resp protocol is not the expected network bottleneck in the protocol -so the additional overhead is not expected to significantly hinder this domain. +1. Leverage protocol routing in libp2p, such that the libp2p stack will route + the incoming stream to the appropriate handler. This allows the handler + function for each request type to be self-contained. For an analogy, think + about how you attach HTTP handlers to a REST API server. +2. Version requests independently. In a coarser-grained umbrella protocol, the + entire protocol would have to be versioned even if just one field in a single + message changed. +3. Enable clients to select the individual requests/versions they support. It + would no longer be a strict requirement to support all requests, and clients, + in principle, could support a subset of requests and variety of versions. +4. Enable flexibility and agility for clients adopting spec changes that impact + the request, by signalling to peers exactly which subset of new/old requests + they support. +5. Enable clients to explicitly choose backwards compatibility at the request + granularity. Without this, clients would be forced to support entire versions + of the coarser request protocol. +6. Parallelise RFCs (or EIPs). By decoupling requests from one another, each RFC + that affects the request protocol can be deployed/tested/debated + independently without relying on a synchronization point to version the + general top-level protocol. + 1. This has the benefit that clients can explicitly choose which RFCs to + deploy without buying into all other RFCs that may be included in that + top-level version. + 2. Affording this level of granularity with a top-level protocol would imply + creating as many variants (e.g. /protocol/43-{a,b,c,d,...}) as the + cartesian product of RFCs in-flight, O(n^2). +7. Allow us to simplify the payload of requests. Request-id’s and method-ids no + longer need to be sent. The encoding/request type and version can all be + handled by the framework. + +**Caveat**: The protocol negotiation component in the current version of libp2p +is called multistream-select 1.0. It is somewhat naïve and introduces overhead +on every request when negotiating streams, although implementation-specific +optimizations are possible to save this cost. Multiselect 2.0 will eventually +remove this overhead by memoizing previously selected protocols, and modeling +shared protocol tables. Fortunately, this req/resp protocol is not the expected +network bottleneck in the protocol so the additional overhead is not expected to +significantly hinder this domain. #### Why are messages length-prefixed with a protobuf varint in the SSZ-encoding? -We are using single-use streams where each stream is closed at the end of the message. -Thus, libp2p transparently handles message delimiting in the underlying stream. -libp2p streams are full-duplex, and each party is responsible for closing their write side (like in TCP). -We can therefore use stream closure to mark the end of the request and response independently. +We are using single-use streams where each stream is closed at the end of the +message. Thus, libp2p transparently handles message delimiting in the underlying +stream. libp2p streams are full-duplex, and each party is responsible for +closing their write side (like in TCP). We can therefore use stream closure to +mark the end of the request and response independently. -Nevertheless, in the case of `ssz_snappy`, messages are still length-prefixed with the length of the underlying data: +Nevertheless, in the case of `ssz_snappy`, messages are still length-prefixed +with the length of the underlying data: - A basic reader can prepare a correctly sized buffer before reading the message - A more advanced reader can stream-decode SSZ given the length of the SSZ data. - Alignment with protocols like gRPC over HTTP/2 that prefix with length -- Sanity checking of message length, and enabling much stricter message length limiting based on SSZ type information, - to provide even more DOS protection than the global message length already does. - E.g. a small `Status` message does not nearly require `MAX_PAYLOAD_SIZE` bytes. +- Sanity checking of message length, and enabling much stricter message length + limiting based on SSZ type information, to provide even more DOS protection + than the global message length already does. E.g. a small `Status` message + does not nearly require `MAX_PAYLOAD_SIZE` bytes. -[Protobuf varint](https://developers.google.com/protocol-buffers/docs/encoding#varints) is an efficient technique to encode variable-length (unsigned here) ints. -Instead of reserving a fixed-size field of as many bytes as necessary to convey the maximum possible value, this field is elastic in exchange for 1-bit overhead per byte. +[Protobuf varint](https://developers.google.com/protocol-buffers/docs/encoding#varints) +is an efficient technique to encode variable-length (unsigned here) ints. +Instead of reserving a fixed-size field of as many bytes as necessary to convey +the maximum possible value, this field is elastic in exchange for 1-bit overhead +per byte. #### Why do we version protocol strings with ordinals instead of semver? -Using semver for network protocols is confusing. -It is never clear what a change in a field, even if backwards compatible on deserialization, actually implies. +Using semver for network protocols is confusing. It is never clear what a change +in a field, even if backwards compatible on deserialization, actually implies. Network protocol agreement should be explicit. Imagine two peers: - Peer A supporting v1.1.1 of protocol X. - Peer B supporting v1.1.2 of protocol X. -These two peers should never speak to each other because the results can be unpredictable. -This is an oversimplification: imagine the same problem with a set of 10 possible versions. -We now have 10^2 (100) possible outcomes that peers need to model for. The resulting complexity is unwieldy. +These two peers should never speak to each other because the results can be +unpredictable. This is an oversimplification: imagine the same problem with a +set of 10 possible versions. We now have 10^2 (100) possible outcomes that peers +need to model for. The resulting complexity is unwieldy. -For this reason, we rely on negotiation of explicit, verbatim protocols. -In the above case, peer B would provide backwards compatibility by supporting and advertising both v1.1.1 and v1.1.2 of the protocol. +For this reason, we rely on negotiation of explicit, verbatim protocols. In the +above case, peer B would provide backwards compatibility by supporting and +advertising both v1.1.1 and v1.1.2 of the protocol. -Therefore, semver would be relegated to convey expectations at the human level, and it wouldn't do a good job there either, -because it's unclear if "backwards compatibility" and "breaking change" apply only to wire schema level, to behavior, etc. +Therefore, semver would be relegated to convey expectations at the human level, +and it wouldn't do a good job there either, because it's unclear if "backwards +compatibility" and "breaking change" apply only to wire schema level, to +behavior, etc. -For this reason, we remove and replace semver with ordinals that require explicit agreement and do not mandate a specific policy for changes. +For this reason, we remove and replace semver with ordinals that require +explicit agreement and do not mandate a specific policy for changes. #### Why is it called Req/Resp and not RPC? -Req/Resp is used to avoid confusion with JSON-RPC and similar user-client interaction mechanisms. +Req/Resp is used to avoid confusion with JSON-RPC and similar user-client +interaction mechanisms. #### What is a typical rate limiting strategy? -The responder typically will want to rate limit requests to protect against spam and to manage resource consumption, while the requester will want to maximise performance based on its own resource allocation strategy. For the network, it is beneficial if available resources are used optimally. - -Broadly, the requester does not know the capacity / limit of each server but can derive it from the rate of responses for the purpose of selecting the next peer for a request. - -Because the server withholds the response until capacity is available, a client can optimistically send requests without risking running into negative scoring situations or sub-optimal rate polling. - -A typical approach for the requester is to implement a timeout on the request that depends on the nature of the request and on connectivity parameters in general - for example when requesting blocks, a peer might choose to send a request to a second peer if the first peer does not respond within a reasonable time, and to reset the request to the first peer if the second peer responds faster. Clients may use past response performance to reward fast peers when implementing peer scoring. - -A typical approach for the responder is to implement a two-level token/leaky bucket with a per-peer limit and a global limit. The granularity of rate limiting may be based either on full requests or individual chunks with the latter being preferable. A token cost may be assigned to the request itself and separately each chunk in the response so as to remain protected both against large and frequent requests. - -For requesters, rate limiting is not distinguishable from other conditions causing slow responses (slow peers, congestion etc) and since the latter conditions must be handled anyway, including rate limiting in this strategy keeps the implementation simple. +The responder typically will want to rate limit requests to protect against spam +and to manage resource consumption, while the requester will want to maximise +performance based on its own resource allocation strategy. For the network, it +is beneficial if available resources are used optimally. + +Broadly, the requester does not know the capacity / limit of each server but can +derive it from the rate of responses for the purpose of selecting the next peer +for a request. + +Because the server withholds the response until capacity is available, a client +can optimistically send requests without risking running into negative scoring +situations or sub-optimal rate polling. + +A typical approach for the requester is to implement a timeout on the request +that depends on the nature of the request and on connectivity parameters in +general - for example when requesting blocks, a peer might choose to send a +request to a second peer if the first peer does not respond within a reasonable +time, and to reset the request to the first peer if the second peer responds +faster. Clients may use past response performance to reward fast peers when +implementing peer scoring. + +A typical approach for the responder is to implement a two-level token/leaky +bucket with a per-peer limit and a global limit. The granularity of rate +limiting may be based either on full requests or individual chunks with the +latter being preferable. A token cost may be assigned to the request itself and +separately each chunk in the response so as to remain protected both against +large and frequent requests. + +For requesters, rate limiting is not distinguishable from other conditions +causing slow responses (slow peers, congestion etc) and since the latter +conditions must be handled anyway, including rate limiting in this strategy +keeps the implementation simple. #### Why do we allow empty responses in block requests? -When requesting blocks by range or root, it may happen that there are no blocks in the selected range or the responding node does not have the requested blocks. +When requesting blocks by range or root, it may happen that there are no blocks +in the selected range or the responding node does not have the requested blocks. -Thus, it may happen that we need to transmit an empty list - there are several ways to encode this: +Thus, it may happen that we need to transmit an empty list - there are several +ways to encode this: 0. Close the stream without sending any data -1. Add a `null` option to the `success` response, for example by introducing an additional byte +1. Add a `null` option to the `success` response, for example by introducing an + additional byte 2. Respond with an error result, using a specific error code for "No data" -Semantically, it is not an error that a block is missing during a slot making option 2 unnatural. +Semantically, it is not an error that a block is missing during a slot making +option 2 unnatural. -Option 1 allows the responder to signal "no block", but this information may be wrong - for example in the case of a malicious node. +Option 1 allows the responder to signal "no block", but this information may be +wrong - for example in the case of a malicious node. -Under option 0, there is no way for a client to distinguish between a slot without a block and an incomplete response, -but given that it already must contain logic to handle the uncertainty of a malicious peer, option 0 was chosen. -Clients should mark any slots missing blocks as unknown until they can be verified as not containing a block by successive blocks. +Under option 0, there is no way for a client to distinguish between a slot +without a block and an incomplete response, but given that it already must +contain logic to handle the uncertainty of a malicious peer, option 0 was +chosen. Clients should mark any slots missing blocks as unknown until they can +be verified as not containing a block by successive blocks. -Assuming option 0 with no special `null` encoding, consider a request for slots `2, 3, 4` --- if there was no block produced at slot 4, the response would be `2, 3, EOF`. -Now consider the same situation, but where only `4` is requested --- closing the stream with only `EOF` (without any `response_chunk`) is consistent. +Assuming option 0 with no special `null` encoding, consider a request for slots +`2, 3, 4` -- if there was no block produced at slot 4, the response would be +`2, 3, EOF`. Now consider the same situation, but where only `4` is requested -- +closing the stream with only `EOF` (without any `response_chunk`) is consistent. -Failing to provide blocks that nodes "should" have is reason to trust a peer less --- for example, if a particular peer gossips a block, it should have access to its parent. -If a request for the parent fails, it's indicative of poor peer quality since peers should validate blocks before gossiping them. +Failing to provide blocks that nodes "should" have is reason to trust a peer +less -- for example, if a particular peer gossips a block, it should have access +to its parent. If a request for the parent fails, it's indicative of poor peer +quality since peers should validate blocks before gossiping them. #### Why does `BeaconBlocksByRange` let the server choose which branch to send blocks from? -When connecting, the `Status` message gives an idea about the sync status of a particular peer, but this changes over time. -By the time a subsequent `BeaconBlockByRange` request is processed, the information may be stale, -and the responder might have moved on to a new finalization point and pruned blocks around the previous head and finalized blocks. +When connecting, the `Status` message gives an idea about the sync status of a +particular peer, but this changes over time. By the time a subsequent +`BeaconBlockByRange` request is processed, the information may be stale, and the +responder might have moved on to a new finalization point and pruned blocks +around the previous head and finalized blocks. -To avoid this race condition, we allow the responder to choose which branch to send to the requester. -The requester then goes on to validate the blocks and incorporate them in their own database --- because they follow the same rules, they should at this point arrive at the same canonical chain. +To avoid this race condition, we allow the responder to choose which branch to +send to the requester. The requester then goes on to validate the blocks and +incorporate them in their own database -- because they follow the same rules, +they should at this point arrive at the same canonical chain. #### Why are `BlocksByRange` requests only required to be served for the latest `MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs? -Due to economic finality and weak subjectivity requirements of a proof-of-stake blockchain, for a new node to safely join the network -the node must provide a recent checkpoint found out-of-band. This checkpoint can be in the form of a `root` & `epoch` or it can be the entire -beacon state and then a simple block sync from there to the head. We expect the latter to be the dominant UX strategy. - -These checkpoints *in the worst case* (i.e. very large validator set and maximal allowed safety decay) must be from the -most recent `MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, and thus a user must be able to block sync to the head from this starting point. -Thus, this defines the epoch range outside which nodes may prune blocks, and -the epoch range that a new node syncing from a checkpoint must backfill. - -`MIN_EPOCHS_FOR_BLOCK_REQUESTS` is calculated using the arithmetic from `compute_weak_subjectivity_period` found in the -[weak subjectivity guide](./weak-subjectivity.md). Specifically to find this max epoch range, we use the worst case event of a very large validator size +Due to economic finality and weak subjectivity requirements of a proof-of-stake +blockchain, for a new node to safely join the network the node must provide a +recent checkpoint found out-of-band. This checkpoint can be in the form of a +`root` & `epoch` or it can be the entire beacon state and then a simple block +sync from there to the head. We expect the latter to be the dominant UX +strategy. + +These checkpoints *in the worst case* (i.e. very large validator set and maximal +allowed safety decay) must be from the most recent +`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, and thus a user must be able to block +sync to the head from this starting point. Thus, this defines the epoch range +outside which nodes may prune blocks, and the epoch range that a new node +syncing from a checkpoint must backfill. + +`MIN_EPOCHS_FOR_BLOCK_REQUESTS` is calculated using the arithmetic from +`compute_weak_subjectivity_period` found in the +[weak subjectivity guide](./weak-subjectivity.md). Specifically to find this max +epoch range, we use the worst case event of a very large validator size (`>= MIN_PER_EPOCH_CHURN_LIMIT * CHURN_LIMIT_QUOTIENT`). @@ -1577,167 +1971,225 @@ MIN_EPOCHS_FOR_BLOCK_REQUESTS = ( ) ``` -Where `MAX_SAFETY_DECAY = 100` and thus `MIN_EPOCHS_FOR_BLOCK_REQUESTS = 33024` (~5 months). +Where `MAX_SAFETY_DECAY = 100` and thus `MIN_EPOCHS_FOR_BLOCK_REQUESTS = 33024` +(~5 months). #### Why must the proposer signature be checked when backfilling blocks in the database? -When backfilling blocks in a database from a know safe block/state (e.g. when starting from a weak subjectivity state), -the node not only must ensure the `BeaconBlock`s form a chain to the known safe block, -but also must check that the proposer signature is valid in the `SignedBeaconBlock` wrapper. +When backfilling blocks in a database from a know safe block/state (e.g. when +starting from a weak subjectivity state), the node not only must ensure the +`BeaconBlock`s form a chain to the known safe block, but also must check that +the proposer signature is valid in the `SignedBeaconBlock` wrapper. This is because the signature is not part of the `BeaconBlock` hash chain, and thus could be corrupted by an attacker serving valid `BeaconBlock`s but invalid signatures contained in `SignedBeaconBlock`. Although in this particular use case this does not represent a decay in safety -(due to the assumptions of starting at a weak subjectivity checkpoint), it -would represent invalid historic data and could be unwittingly transmitted to +(due to the assumptions of starting at a weak subjectivity checkpoint), it would +represent invalid historic data and could be unwittingly transmitted to additional nodes. #### What's the effect of empty slots on the sync algorithm? -When syncing one can only tell that a slot has been skipped on a particular branch -by examining subsequent blocks and analyzing the graph formed by the parent root. -Because the server side may choose to omit blocks in the response for any reason, clients must validate the graph and be prepared to fill in gaps. +When syncing one can only tell that a slot has been skipped on a particular +branch by examining subsequent blocks and analyzing the graph formed by the +parent root. Because the server side may choose to omit blocks in the response +for any reason, clients must validate the graph and be prepared to fill in gaps. -For example, if a peer responds with blocks [2, 3] when asked for [2, 3, 4], clients may not assume that block 4 doesn't exist --- it merely means that the responding peer did not send it (they may not have it yet or may maliciously be trying to hide it) -and successive blocks will be needed to determine if there exists a block at slot 4 in this particular branch. +For example, if a peer responds with blocks [2, 3] when asked for [2, 3, 4], +clients may not assume that block 4 doesn't exist -- it merely means that the +responding peer did not send it (they may not have it yet or may maliciously be +trying to hide it) and successive blocks will be needed to determine if there +exists a block at slot 4 in this particular branch. ### Discovery #### Why are we using discv5 and not libp2p Kademlia DHT? -discv5 is a standalone protocol, running on UDP on a dedicated port, meant for peer and service discovery only. -discv5 supports self-certified, flexible peer records (ENRs) and topic-based advertisement, both of which are, or will be, requirements in this context. +discv5 is a standalone protocol, running on UDP on a dedicated port, meant for +peer and service discovery only. discv5 supports self-certified, flexible peer +records (ENRs) and topic-based advertisement, both of which are, or will be, +requirements in this context. -On the other hand, libp2p Kademlia DHT is a fully-fledged DHT protocol/implementations -with content routing and storage capabilities, both of which are irrelevant in this context. +On the other hand, libp2p Kademlia DHT is a fully-fledged DHT +protocol/implementations with content routing and storage capabilities, both of +which are irrelevant in this context. -Ethereum execution-layer nodes will evolve to support discv5. -By sharing the discovery network between Ethereum consensus-layer and execution-layer clients, -we benefit from the additive effect on network size that enhances resilience and resistance against certain attacks, -to which smaller networks are more vulnerable. -It should also help light clients of both networks find nodes with specific capabilities. +Ethereum execution-layer nodes will evolve to support discv5. By sharing the +discovery network between Ethereum consensus-layer and execution-layer clients, +we benefit from the additive effect on network size that enhances resilience and +resistance against certain attacks, to which smaller networks are more +vulnerable. It should also help light clients of both networks find nodes with +specific capabilities. discv5 is in the process of being audited. #### What is the difference between an ENR and a multiaddr, and why are we using ENRs? -Ethereum Node Records are self-certified node records. -Nodes craft and disseminate ENRs for themselves, proving authorship via a cryptographic signature. -ENRs are sequentially indexed, enabling conflicts to be resolved. +Ethereum Node Records are self-certified node records. Nodes craft and +disseminate ENRs for themselves, proving authorship via a cryptographic +signature. ENRs are sequentially indexed, enabling conflicts to be resolved. -ENRs are key-value records with string-indexed ASCII keys. -They can store arbitrary information, but EIP-778 specifies a pre-defined dictionary, including IPv4 and IPv6 addresses, secp256k1 public keys, etc. +ENRs are key-value records with string-indexed ASCII keys. They can store +arbitrary information, but EIP-778 specifies a pre-defined dictionary, including +IPv4 and IPv6 addresses, secp256k1 public keys, etc. -Comparing ENRs and multiaddrs is like comparing apples and oranges. -ENRs are self-certified containers of identity, addresses, and metadata about a node. -Multiaddrs are address strings with the peculiarity that they’re self-describing, composable and future-proof. -An ENR can contain multiaddrs, and multiaddrs can be derived securely from the fields of an authenticated ENR. +Comparing ENRs and multiaddrs is like comparing apples and oranges. ENRs are +self-certified containers of identity, addresses, and metadata about a node. +Multiaddrs are address strings with the peculiarity that they’re +self-describing, composable and future-proof. An ENR can contain multiaddrs, and +multiaddrs can be derived securely from the fields of an authenticated ENR. discv5 uses ENRs and we will presumably need to: -1. Add `multiaddr` to the dictionary, so that nodes can advertise their multiaddr under a reserved namespace in ENRs. – and/or – -2. Define a bi-directional conversion function between multiaddrs and the corresponding denormalized fields in an ENR - (ip, ip6, tcp, tcp6, etc.), for compatibility with nodes that do not support multiaddr natively (e.g. Ethereum execution-layer nodes). +1. Add `multiaddr` to the dictionary, so that nodes can advertise their + multiaddr under a reserved namespace in ENRs. – and/or – +2. Define a bi-directional conversion function between multiaddrs and the + corresponding denormalized fields in an ENR (ip, ip6, tcp, tcp6, etc.), for + compatibility with nodes that do not support multiaddr natively (e.g. + Ethereum execution-layer nodes). #### Why do we not form ENRs and find peers until genesis block/state is known? -Although client software might very well be running locally prior to the solidification of the beacon chain genesis state and block, -clients cannot form valid ENRs prior to this point. -ENRs contain `fork_digest` which utilizes the `genesis_validators_root` for a cleaner separation between chains -so prior to knowing genesis, we cannot use `fork_digest` to cleanly find peers on our intended chain. -Once genesis data is known, we can then form ENRs and safely find peers. +Although client software might very well be running locally prior to the +solidification of the beacon chain genesis state and block, clients cannot form +valid ENRs prior to this point. ENRs contain `fork_digest` which utilizes the +`genesis_validators_root` for a cleaner separation between chains so prior to +knowing genesis, we cannot use `fork_digest` to cleanly find peers on our +intended chain. Once genesis data is known, we can then form ENRs and safely +find peers. -When using a proof-of-work deposit contract for deposits, `fork_digest` will be known `GENESIS_DELAY` (7 days in mainnet configuration) before `genesis_time`, -providing ample time to find peers and form initial connections and gossip subnets prior to genesis. +When using a proof-of-work deposit contract for deposits, `fork_digest` will be +known `GENESIS_DELAY` (7 days in mainnet configuration) before `genesis_time`, +providing ample time to find peers and form initial connections and gossip +subnets prior to genesis. ### Compression/Encoding #### Why are we using SSZ for encoding? -SSZ is used at the consensus layer, and all implementations should have support for SSZ-encoding/decoding, -requiring no further dependencies to be added to client implementations. -This is a natural choice for serializing objects to be sent across the wire. -The actual data in most protocols will be further compressed for efficiency. +SSZ is used at the consensus layer, and all implementations should have support +for SSZ-encoding/decoding, requiring no further dependencies to be added to +client implementations. This is a natural choice for serializing objects to be +sent across the wire. The actual data in most protocols will be further +compressed for efficiency. -SSZ has well-defined schemas for consensus objects (typically sent across the wire) reducing any serialization schema data that needs to be sent. -It also has defined all required types that are required for this network specification. +SSZ has well-defined schemas for consensus objects (typically sent across the +wire) reducing any serialization schema data that needs to be sent. It also has +defined all required types that are required for this network specification. #### Why are we compressing, and at which layers? -We compress on the wire to achieve smaller payloads per-message, which, in aggregate, -result in higher efficiency, better utilization of available bandwidth, and overall reduction in network-wide traffic overhead. - -At this time, libp2p does not have an out-of-the-box compression feature that can be dynamically negotiated -and layered atop connections and streams, but it is [being considered](https://github.com/libp2p/libp2p/issues/81). - -This is a non-trivial feature because the behavior -of network IO loops, kernel buffers, chunking, and packet fragmentation, amongst others, need to be taken into account. -libp2p streams are unbounded streams, whereas compression algorithms work best on bounded byte streams of which we have some prior knowledge. - -Compression tends not to be a one-size-fits-all problem. -A lot of variables need careful evaluation, and generic approaches/choices lead to poor size shavings, -which may even be counterproductive when factoring in the CPU and memory tradeoff. - -For all these reasons, generically negotiating compression algorithms may be treated as a research problem at the libp2p community, -one we’re happy to tackle in the medium-term. - -At this stage, the wisest choice is to consider libp2p a messenger of bytes, -and to make application layer participate in compressing those bytes. -This looks different depending on the interaction layer: - -- Gossip domain: since gossipsub has a framing protocol and exposes an API, we compress the payload - (when dictated by the encoding token in the topic name) prior to publishing the message via the API. - No length-prefixing is necessary because protobuf takes care of bounding the field in the serialized form. -- Req/Resp domain: since we define custom protocols that operate on byte streams, - implementers are encouraged to encapsulate the encoding and compression logic behind - MessageReader and MessageWriter components/strategies that can be layered on top of the raw byte streams. +We compress on the wire to achieve smaller payloads per-message, which, in +aggregate, result in higher efficiency, better utilization of available +bandwidth, and overall reduction in network-wide traffic overhead. + +At this time, libp2p does not have an out-of-the-box compression feature that +can be dynamically negotiated and layered atop connections and streams, but it +is [being considered](https://github.com/libp2p/libp2p/issues/81). + +This is a non-trivial feature because the behavior of network IO loops, kernel +buffers, chunking, and packet fragmentation, amongst others, need to be taken +into account. libp2p streams are unbounded streams, whereas compression +algorithms work best on bounded byte streams of which we have some prior +knowledge. + +Compression tends not to be a one-size-fits-all problem. A lot of variables need +careful evaluation, and generic approaches/choices lead to poor size shavings, +which may even be counterproductive when factoring in the CPU and memory +tradeoff. + +For all these reasons, generically negotiating compression algorithms may be +treated as a research problem at the libp2p community, one we’re happy to tackle +in the medium-term. + +At this stage, the wisest choice is to consider libp2p a messenger of bytes, and +to make application layer participate in compressing those bytes. This looks +different depending on the interaction layer: + +- Gossip domain: since gossipsub has a framing protocol and exposes an API, we + compress the payload (when dictated by the encoding token in the topic name) + prior to publishing the message via the API. No length-prefixing is necessary + because protobuf takes care of bounding the field in the serialized form. +- Req/Resp domain: since we define custom protocols that operate on byte + streams, implementers are encouraged to encapsulate the encoding and + compression logic behind MessageReader and MessageWriter components/strategies + that can be layered on top of the raw byte streams. #### Why are we using Snappy for compression? -Snappy is used in Ethereum 1.0. It is well maintained by Google, has good benchmarks, -and can calculate the size of the uncompressed object without inflating it in memory. -This prevents DOS vectors where large uncompressed data is sent. +Snappy is used in Ethereum 1.0. It is well maintained by Google, has good +benchmarks, and can calculate the size of the uncompressed object without +inflating it in memory. This prevents DOS vectors where large uncompressed data +is sent. #### Can I get access to unencrypted bytes on the wire for debugging purposes? -Yes, you can add loggers in your libp2p protocol handlers to log incoming and outgoing messages. -It is recommended to use programming design patterns to encapsulate the logging logic cleanly. +Yes, you can add loggers in your libp2p protocol handlers to log incoming and +outgoing messages. It is recommended to use programming design patterns to +encapsulate the logging logic cleanly. -If your libp2p library relies on frameworks/runtimes such as Netty (jvm) or Node.js (javascript), -you can use logging facilities in those frameworks/runtimes to enable message tracing. +If your libp2p library relies on frameworks/runtimes such as Netty (jvm) or +Node.js (javascript), you can use logging facilities in those +frameworks/runtimes to enable message tracing. -For specific ad-hoc testing scenarios, you can use the [plaintext/2.0.0 secure channel](https://github.com/libp2p/specs/blob/master/plaintext/README.md) -(which is essentially no-op encryption or message authentication), in combination with tcpdump or Wireshark to inspect the wire. +For specific ad-hoc testing scenarios, you can use the +[plaintext/2.0.0 secure channel](https://github.com/libp2p/specs/blob/master/plaintext/README.md) +(which is essentially no-op encryption or message authentication), in +combination with tcpdump or Wireshark to inspect the wire. #### What are SSZ type size bounds? -The SSZ encoding outputs of each type have size bounds: each dynamic type, such as a list, has a "limit", which can be used to compute the maximum valid output size. -Note that for some more complex dynamic-length objects, element offsets (4 bytes each) may need to be included. -Other types are static, they have a fixed size: no dynamic-length content is involved, and the minimum and maximum bounds are the same. +The SSZ encoding outputs of each type have size bounds: each dynamic type, such +as a list, has a "limit", which can be used to compute the maximum valid output +size. Note that for some more complex dynamic-length objects, element offsets (4 +bytes each) may need to be included. Other types are static, they have a fixed +size: no dynamic-length content is involved, and the minimum and maximum bounds +are the same. -For reference, the type bounds can be computed ahead of time, [as per this example](https://gist.github.com/protolambda/db75c7faa1e94f2464787a480e5d613e). -It is advisable to derive these lengths from the SSZ type definitions in use, to ensure that version changes do not cause out-of-sync type bounds. +For reference, the type bounds can be computed ahead of time, +[as per this example](https://gist.github.com/protolambda/db75c7faa1e94f2464787a480e5d613e). +It is advisable to derive these lengths from the SSZ type definitions in use, to +ensure that version changes do not cause out-of-sync type bounds. #### Why is the message size defined in terms of application payload? -When transmitting messages over gossipsub and/or the req/resp domain, we want to ensure that the same payload sizes are supported regardless of the underlying transport, decoupling the consensus layer from libp2p-induced overhead and the particular transmission strategy. +When transmitting messages over gossipsub and/or the req/resp domain, we want to +ensure that the same payload sizes are supported regardless of the underlying +transport, decoupling the consensus layer from libp2p-induced overhead and the +particular transmission strategy. -To derive "encoded size limits" from desired application sizes, we take into account snappy compression and framing overhead. +To derive "encoded size limits" from desired application sizes, we take into +account snappy compression and framing overhead. -In the case of gossipsub, the protocol supports sending multiple application payloads as well as mixing application data with control messages in each gossipsub frame. The limit is set such that at least one max-sized application-level message together with a small amount (1 KiB) of gossipsub overhead is allowed. Implementations are free to pack multiple smaller application messages into a single gossipsub frame, and/or combine it with control messages as they see fit. +In the case of gossipsub, the protocol supports sending multiple application +payloads as well as mixing application data with control messages in each +gossipsub frame. The limit is set such that at least one max-sized +application-level message together with a small amount (1 KiB) of gossipsub +overhead is allowed. Implementations are free to pack multiple smaller +application messages into a single gossipsub frame, and/or combine it with +control messages as they see fit. -The limit is set on the uncompressed payload size in particular to protect against decompression bombs. +The limit is set on the uncompressed payload size in particular to protect +against decompression bombs. #### Why is there a limit on message sizes at all? -The message size limit protects against several forms of DoS and network-based amplification attacks and provides upper bounds for resource (network, memory) usage in the client based on protocol requirements to decode, buffer, cache, store and re-transmit messages which in turn translate into performance and protection tradeoffs, ensuring capacity to handle worst cases during recovery from network instability. +The message size limit protects against several forms of DoS and network-based +amplification attacks and provides upper bounds for resource (network, memory) +usage in the client based on protocol requirements to decode, buffer, cache, +store and re-transmit messages which in turn translate into performance and +protection tradeoffs, ensuring capacity to handle worst cases during recovery +from network instability. -In particular, blocks—-currently the only message type without a practical SSZ-derived upper bound on size—-cannot be fully verified synchronously as part of gossipsub validity checks. This means that there exist cases where invalid messages signed by a validator may be amplified by the network. +In particular, blocks—-currently the only message type without a practical +SSZ-derived upper bound on size—-cannot be fully verified synchronously as part +of gossipsub validity checks. This means that there exist cases where invalid +messages signed by a validator may be amplified by the network. ## libp2p implementations matrix -This section will soon contain a matrix showing the maturity/state of the libp2p features required -by this spec across the languages in which clients are being developed. +This section will soon contain a matrix showing the maturity/state of the libp2p +features required by this spec across the languages in which clients are being +developed. diff --git a/specs/phase0/validator.md b/specs/phase0/validator.md index 6f4bb94205..7bcb7416cf 100644 --- a/specs/phase0/validator.md +++ b/specs/phase0/validator.md @@ -1,6 +1,8 @@ # Phase 0 -- Honest Validator -This is an accompanying document to [Phase 0 -- The Beacon Chain](./beacon-chain.md), which describes the expected actions of a "validator" participating in the Ethereum proof-of-stake protocol. +This is an accompanying document to +[Phase 0 -- The Beacon Chain](./beacon-chain.md), which describes the expected +actions of a "validator" participating in the Ethereum proof-of-stake protocol. @@ -68,13 +70,29 @@ This is an accompanying document to [Phase 0 -- The Beacon Chain](./beacon-chain ## Introduction -This document represents the expected behavior of an "honest validator" with respect to Phase 0 of the Ethereum proof-of-stake protocol. This document does not distinguish between a "node" (i.e. the functionality of following and reading the beacon chain) and a "validator client" (i.e. the functionality of actively participating in consensus). The separation of concerns between these (potentially) two pieces of software is left as a design decision that is out of scope. - -A validator is an entity that participates in the consensus of the Ethereum proof-of-stake protocol. This is an optional role for users in which they can post ETH as collateral and verify and attest to the validity of blocks to seek financial returns in exchange for building and securing the protocol. This is similar to proof-of-work networks in which miners provide collateral in the form of hardware/hash-power to seek returns in exchange for building and securing the protocol. +This document represents the expected behavior of an "honest validator" with +respect to Phase 0 of the Ethereum proof-of-stake protocol. This document does +not distinguish between a "node" (i.e. the functionality of following and +reading the beacon chain) and a "validator client" (i.e. the functionality of +actively participating in consensus). The separation of concerns between these +(potentially) two pieces of software is left as a design decision that is out of +scope. + +A validator is an entity that participates in the consensus of the Ethereum +proof-of-stake protocol. This is an optional role for users in which they can +post ETH as collateral and verify and attest to the validity of blocks to seek +financial returns in exchange for building and securing the protocol. This is +similar to proof-of-work networks in which miners provide collateral in the form +of hardware/hash-power to seek returns in exchange for building and securing the +protocol. ## Prerequisites -All terminology, constants, functions, and protocol mechanics defined in the [Phase 0 -- The Beacon Chain](./beacon-chain.md) and [Phase 0 -- Deposit Contract](./deposit-contract.md) doc are requisite for this document and used throughout. Please see the Phase 0 doc before continuing and use as a reference throughout. +All terminology, constants, functions, and protocol mechanics defined in the +[Phase 0 -- The Beacon Chain](./beacon-chain.md) and +[Phase 0 -- Deposit Contract](./deposit-contract.md) doc are requisite for this +document and used throughout. Please see the Phase 0 doc before continuing and +use as a reference throughout. ## Constants @@ -117,35 +135,43 @@ class SignedAggregateAndProof(Container): ### Initialization -A validator must initialize many parameters locally before submitting a deposit and joining the validator registry. +A validator must initialize many parameters locally before submitting a deposit +and joining the validator registry. #### BLS public key -Validator public keys are [G1 points](beacon-chain.md#bls-signatures) on the [BLS12-381 curve](https://z.cash/blog/new-snark-curve). A private key, `privkey`, must be securely generated along with the resultant `pubkey`. This `privkey` must be "hot", that is, constantly available to sign data throughout the lifetime of the validator. +Validator public keys are [G1 points](beacon-chain.md#bls-signatures) on the +[BLS12-381 curve](https://z.cash/blog/new-snark-curve). A private key, +`privkey`, must be securely generated along with the resultant `pubkey`. This +`privkey` must be "hot", that is, constantly available to sign data throughout +the lifetime of the validator. #### Withdrawal credentials -The `withdrawal_credentials` field constrains validator withdrawals. -The first byte of this 32-byte field is a withdrawal prefix which defines the semantics of the remaining 31 bytes. +The `withdrawal_credentials` field constrains validator withdrawals. The first +byte of this 32-byte field is a withdrawal prefix which defines the semantics of +the remaining 31 bytes. The following withdrawal prefixes are currently supported. ##### `BLS_WITHDRAWAL_PREFIX` Withdrawal credentials with the BLS withdrawal prefix allow a BLS key pair -`(bls_withdrawal_privkey, bls_withdrawal_pubkey)` to trigger withdrawals. -The `withdrawal_credentials` field must be such that: +`(bls_withdrawal_privkey, bls_withdrawal_pubkey)` to trigger withdrawals. The +`withdrawal_credentials` field must be such that: - `withdrawal_credentials[:1] == BLS_WITHDRAWAL_PREFIX` - `withdrawal_credentials[1:] == hash(bls_withdrawal_pubkey)[1:]` -*Note*: The `bls_withdrawal_privkey` is not required for validating and can be kept in cold storage. +*Note*: The `bls_withdrawal_privkey` is not required for validating and can be +kept in cold storage. ##### `ETH1_ADDRESS_WITHDRAWAL_PREFIX` -Withdrawal credentials with the Eth1 address withdrawal prefix specify -a 20-byte Eth1 address `eth1_withdrawal_address` as the recipient for all withdrawals. -The `eth1_withdrawal_address` can be the address of either an externally owned account or of a contract. +Withdrawal credentials with the Eth1 address withdrawal prefix specify a 20-byte +Eth1 address `eth1_withdrawal_address` as the recipient for all withdrawals. The +`eth1_withdrawal_address` can be the address of either an externally owned +account or of a contract. The `withdrawal_credentials` field must be such that: @@ -154,39 +180,74 @@ The `withdrawal_credentials` field must be such that: - `withdrawal_credentials[12:] == eth1_withdrawal_address` After the merge of the current Ethereum execution layer into the Beacon Chain, -withdrawals to `eth1_withdrawal_address` will simply be increases to the account's ETH balance that do **NOT** trigger any EVM execution. +withdrawals to `eth1_withdrawal_address` will simply be increases to the +account's ETH balance that do **NOT** trigger any EVM execution. ### Submit deposit -In Phase 0, all incoming validator deposits originate from the Ethereum proof-of-work chain defined by `DEPOSIT_CHAIN_ID` and `DEPOSIT_NETWORK_ID`. Deposits are made to the [deposit contract](./deposit-contract.md) located at `DEPOSIT_CONTRACT_ADDRESS`. +In Phase 0, all incoming validator deposits originate from the Ethereum +proof-of-work chain defined by `DEPOSIT_CHAIN_ID` and `DEPOSIT_NETWORK_ID`. +Deposits are made to the [deposit contract](./deposit-contract.md) located at +`DEPOSIT_CONTRACT_ADDRESS`. To submit a deposit: -- Pack the validator's [initialization parameters](#initialization) into `deposit_data`, a [`DepositData`](./beacon-chain.md#depositdata) SSZ object. -- Let `amount` be the amount in Gwei to be deposited by the validator where `amount >= MIN_DEPOSIT_AMOUNT`. +- Pack the validator's [initialization parameters](#initialization) into + `deposit_data`, a [`DepositData`](./beacon-chain.md#depositdata) SSZ object. +- Let `amount` be the amount in Gwei to be deposited by the validator where + `amount >= MIN_DEPOSIT_AMOUNT`. - Set `deposit_data.pubkey` to validator's `pubkey`. - Set `deposit_data.withdrawal_credentials` to `withdrawal_credentials`. - Set `deposit_data.amount` to `amount`. -- Let `deposit_message` be a `DepositMessage` with all the `DepositData` contents except the `signature`. -- Let `signature` be the result of `bls.Sign` of the `compute_signing_root(deposit_message, domain)` with `domain=compute_domain(DOMAIN_DEPOSIT)`. (_Warning_: Deposits _must_ be signed with `GENESIS_FORK_VERSION`, calling `compute_domain` without a second argument defaults to the correct version). +- Let `deposit_message` be a `DepositMessage` with all the `DepositData` + contents except the `signature`. +- Let `signature` be the result of `bls.Sign` of the + `compute_signing_root(deposit_message, domain)` with + `domain=compute_domain(DOMAIN_DEPOSIT)`. (_Warning_: Deposits _must_ be signed + with `GENESIS_FORK_VERSION`, calling `compute_domain` without a second + argument defaults to the correct version). - Let `deposit_data_root` be `hash_tree_root(deposit_data)`. -- Send a transaction on the Ethereum proof-of-work chain to `DEPOSIT_CONTRACT_ADDRESS` executing `def deposit(pubkey: bytes[48], withdrawal_credentials: bytes[32], signature: bytes[96], deposit_data_root: bytes32)` along with a deposit of `amount` Gwei. +- Send a transaction on the Ethereum proof-of-work chain to + `DEPOSIT_CONTRACT_ADDRESS` executing + `def deposit(pubkey: bytes[48], withdrawal_credentials: bytes[32], signature: bytes[96], deposit_data_root: bytes32)` + along with a deposit of `amount` Gwei. -*Note*: Deposits made for the same `pubkey` are treated as for the same validator. A singular `Validator` will be added to `state.validators` with each additional deposit amount added to the validator's balance. A validator can only be activated when total deposits for the validator pubkey meet or exceed `MAX_EFFECTIVE_BALANCE`. +*Note*: Deposits made for the same `pubkey` are treated as for the same +validator. A singular `Validator` will be added to `state.validators` with each +additional deposit amount added to the validator's balance. A validator can only +be activated when total deposits for the validator pubkey meet or exceed +`MAX_EFFECTIVE_BALANCE`. ### Process deposit -Deposits cannot be processed into the beacon chain until the proof-of-work block in which they were deposited or any of its descendants is added to the beacon chain `state.eth1_data`. This takes _a minimum_ of `ETH1_FOLLOW_DISTANCE` Eth1 blocks (~8 hours) plus `EPOCHS_PER_ETH1_VOTING_PERIOD` epochs (~6.8 hours). Once the requisite proof-of-work block data is added, the deposit will normally be added to a beacon chain block and processed into the `state.validators` within an epoch or two. The validator is then in a queue to be activated. +Deposits cannot be processed into the beacon chain until the proof-of-work block +in which they were deposited or any of its descendants is added to the beacon +chain `state.eth1_data`. This takes _a minimum_ of `ETH1_FOLLOW_DISTANCE` Eth1 +blocks (~8 hours) plus `EPOCHS_PER_ETH1_VOTING_PERIOD` epochs (~6.8 hours). Once +the requisite proof-of-work block data is added, the deposit will normally be +added to a beacon chain block and processed into the `state.validators` within +an epoch or two. The validator is then in a queue to be activated. ### Validator index -Once a validator has been processed and added to the beacon state's `validators`, the validator's `validator_index` is defined by the index into the registry at which the [`ValidatorRecord`](./beacon-chain.md#validator) contains the `pubkey` specified in the validator's deposit. A validator's `validator_index` is guaranteed to not change from the time of initial deposit until the validator exits and fully withdraws. This `validator_index` is used throughout the specification to dictate validator roles and responsibilities at any point and should be stored locally. +Once a validator has been processed and added to the beacon state's +`validators`, the validator's `validator_index` is defined by the index into the +registry at which the [`ValidatorRecord`](./beacon-chain.md#validator) contains +the `pubkey` specified in the validator's deposit. A validator's +`validator_index` is guaranteed to not change from the time of initial deposit +until the validator exits and fully withdraws. This `validator_index` is used +throughout the specification to dictate validator roles and responsibilities at +any point and should be stored locally. ### Activation -In normal operation, the validator is quickly activated, at which point the validator is added to the shuffling and begins validation after an additional `MAX_SEED_LOOKAHEAD` epochs (25.6 minutes). +In normal operation, the validator is quickly activated, at which point the +validator is added to the shuffling and begins validation after an additional +`MAX_SEED_LOOKAHEAD` epochs (25.6 minutes). -The function [`is_active_validator`](./beacon-chain.md#is_active_validator) can be used to check if a validator is active during a given epoch. Usage is as follows: +The function [`is_active_validator`](./beacon-chain.md#is_active_validator) can +be used to check if a validator is active during a given epoch. Usage is as +follows: ```python def check_if_validator_active(state: BeaconState, validator_index: ValidatorIndex) -> bool: @@ -194,13 +255,18 @@ def check_if_validator_active(state: BeaconState, validator_index: ValidatorInde return is_active_validator(validator, get_current_epoch(state)) ``` -Once a validator is activated, the validator is assigned [responsibilities](#beacon-chain-responsibilities) until exited. +Once a validator is activated, the validator is assigned +[responsibilities](#beacon-chain-responsibilities) until exited. -*Note*: There is a maximum validator churn per finalized epoch, so the delay until activation is variable depending upon finality, total active validator balance, and the number of validators in the queue to be activated. +*Note*: There is a maximum validator churn per finalized epoch, so the delay +until activation is variable depending upon finality, total active validator +balance, and the number of validators in the queue to be activated. ## Validator assignments -A validator can get committee assignments for a given epoch using the following helper via `get_committee_assignment(state, epoch, validator_index)` where `epoch <= next_epoch`. +A validator can get committee assignments for a given epoch using the following +helper via `get_committee_assignment(state, epoch, validator_index)` where +`epoch <= next_epoch`. ```python def get_committee_assignment(state: BeaconState, @@ -228,84 +294,120 @@ def get_committee_assignment(state: BeaconState, return None ``` -A validator can use the following function to see if they are supposed to propose during a slot. This function can only be run with a `state` of the slot in question. Proposer selection is only stable within the context of the current epoch. +A validator can use the following function to see if they are supposed to +propose during a slot. This function can only be run with a `state` of the slot +in question. Proposer selection is only stable within the context of the current +epoch. ```python def is_proposer(state: BeaconState, validator_index: ValidatorIndex) -> bool: return get_beacon_proposer_index(state) == validator_index ``` -*Note*: To see if a validator is assigned to propose during the slot, the beacon state must be in the epoch in question. At the epoch boundaries, the validator must run an epoch transition into the epoch to successfully check the proposal assignment of the first slot. +*Note*: To see if a validator is assigned to propose during the slot, the beacon +state must be in the epoch in question. At the epoch boundaries, the validator +must run an epoch transition into the epoch to successfully check the proposal +assignment of the first slot. -*Note*: `BeaconBlock` proposal is distinct from beacon committee assignment, and in a given epoch each responsibility might occur at a different slot. +*Note*: `BeaconBlock` proposal is distinct from beacon committee assignment, and +in a given epoch each responsibility might occur at a different slot. ### Lookahead -The beacon chain shufflings are designed to provide a minimum of 1 epoch lookahead -on the validator's upcoming committee assignments for attesting dictated by the shuffling and slot. -Note that this lookahead does not apply to proposing, which must be checked during the epoch in question. +The beacon chain shufflings are designed to provide a minimum of 1 epoch +lookahead on the validator's upcoming committee assignments for attesting +dictated by the shuffling and slot. Note that this lookahead does not apply to +proposing, which must be checked during the epoch in question. -`get_committee_assignment` should be called at the start of each epoch -to get the assignment for the next epoch (`current_epoch + 1`). -A validator should plan for future assignments by noting their assigned attestation -slot and joining the committee index attestation subnet related to their committee assignment. +`get_committee_assignment` should be called at the start of each epoch to get +the assignment for the next epoch (`current_epoch + 1`). A validator should plan +for future assignments by noting their assigned attestation slot and joining the +committee index attestation subnet related to their committee assignment. Specifically a validator should: -- Call `_, committee_index, _ = get_committee_assignment(state, next_epoch, validator_index)` when checking for next epoch assignments. -- Calculate the committees per slot for the next epoch: `committees_per_slot = get_committee_count_per_slot(state, next_epoch)` -- Calculate the subnet index: `subnet_id = compute_subnet_for_attestation(committees_per_slot, slot, committee_index)` +- Call + `_, committee_index, _ = get_committee_assignment(state, next_epoch, validator_index)` + when checking for next epoch assignments. +- Calculate the committees per slot for the next epoch: + `committees_per_slot = get_committee_count_per_slot(state, next_epoch)` +- Calculate the subnet index: + `subnet_id = compute_subnet_for_attestation(committees_per_slot, slot, committee_index)` - Find peers of the pubsub topic `beacon_attestation_{subnet_id}`. - - If an _insufficient_ number of current peers are subscribed to the topic, the validator must discover new peers on this topic. Via the discovery protocol, find peers with an ENR containing the `attnets` entry such that `ENR["attnets"][subnet_id] == True`. Then validate that the peers are still persisted on the desired topic by requesting `GetMetaData` and checking the resulting `attnets` field. - - If the validator is assigned to be an aggregator for the slot (see `is_aggregator()`), then subscribe to the topic. - -*Note*: If the validator is _not_ assigned to be an aggregator, the validator only needs sufficient number of peers on the topic to be able to publish messages. The validator does not need to _subscribe_ and listen to all messages on the topic. + - If an _insufficient_ number of current peers are subscribed to the topic, + the validator must discover new peers on this topic. Via the discovery + protocol, find peers with an ENR containing the `attnets` entry such that + `ENR["attnets"][subnet_id] == True`. Then validate that the peers are still + persisted on the desired topic by requesting `GetMetaData` and checking the + resulting `attnets` field. + - If the validator is assigned to be an aggregator for the slot (see + `is_aggregator()`), then subscribe to the topic. + +*Note*: If the validator is _not_ assigned to be an aggregator, the validator +only needs sufficient number of peers on the topic to be able to publish +messages. The validator does not need to _subscribe_ and listen to all messages +on the topic. ## Beacon chain responsibilities -A validator has two primary responsibilities to the beacon chain: [proposing blocks](#block-proposal) and [creating attestations](#attesting). Proposals happen infrequently, whereas attestations should be created once per epoch. +A validator has two primary responsibilities to the beacon chain: +[proposing blocks](#block-proposal) and [creating attestations](#attesting). +Proposals happen infrequently, whereas attestations should be created once per +epoch. ### Block proposal -A validator is expected to propose a [`SignedBeaconBlock`](./beacon-chain.md#signedbeaconblock) at -the beginning of any `slot` during which `is_proposer(state, validator_index)` returns `True`. +A validator is expected to propose a +[`SignedBeaconBlock`](./beacon-chain.md#signedbeaconblock) at the beginning of +any `slot` during which `is_proposer(state, validator_index)` returns `True`. To propose, the validator selects a `BeaconBlock`, `parent` using this process: 1. Compute fork choice's view of the head at the start of `slot`, after running - `on_tick` and applying any queued attestations from `slot - 1`. - Set `head_root = get_head(store)`. -2. Compute the _proposer head_, which is the head upon which the proposer SHOULD build in order to - incentivise timely block propagation by other validators. - Set `parent_root = get_proposer_head(store, head_root, slot)`. - A proposer may set `parent_root == head_root` if proposer re-orgs are not implemented or have - been disabled. + `on_tick` and applying any queued attestations from `slot - 1`. Set + `head_root = get_head(store)`. +2. Compute the _proposer head_, which is the head upon which the proposer SHOULD + build in order to incentivise timely block propagation by other validators. + Set `parent_root = get_proposer_head(store, head_root, slot)`. A proposer may + set `parent_root == head_root` if proposer re-orgs are not implemented or + have been disabled. 3. Let `parent` be the block with `parent_root`. -The validator creates, signs, and broadcasts a `block` that is a child of `parent` -and satisfies a valid [beacon chain state transition](./beacon-chain.md#beacon-chain-state-transition-function). -Note that the parent's slot must be strictly less than the slot of the block about to be proposed, -i.e. `parent.slot < slot`. +The validator creates, signs, and broadcasts a `block` that is a child of +`parent` and satisfies a valid +[beacon chain state transition](./beacon-chain.md#beacon-chain-state-transition-function). +Note that the parent's slot must be strictly less than the slot of the block +about to be proposed, i.e. `parent.slot < slot`. -There is one proposer per slot, so if there are N active validators any individual validator -will on average be assigned to propose once per N slots (e.g. at 312,500 validators = 10 million ETH, that's once per ~6 weeks). +There is one proposer per slot, so if there are N active validators any +individual validator will on average be assigned to propose once per N slots +(e.g. at 312,500 validators = 10 million ETH, that's once per ~6 weeks). -*Note*: In this section, `state` is the state of the slot for the block proposal _without_ the block yet applied. -That is, `state` is the `previous_state` processed through any empty slots up to the assigned slot using `process_slots(previous_state, slot)`. +*Note*: In this section, `state` is the state of the slot for the block proposal +_without_ the block yet applied. That is, `state` is the `previous_state` +processed through any empty slots up to the assigned slot using +`process_slots(previous_state, slot)`. #### Preparing for a `BeaconBlock` -To construct a `BeaconBlockBody`, a `block` (`BeaconBlock`) is defined with the necessary context for a block proposal: +To construct a `BeaconBlockBody`, a `block` (`BeaconBlock`) is defined with the +necessary context for a block proposal: ##### Slot -Set `block.slot = slot` where `slot` is the current slot at which the validator has been selected to propose. The `parent` selected must satisfy that `parent.slot < block.slot`. +Set `block.slot = slot` where `slot` is the current slot at which the validator +has been selected to propose. The `parent` selected must satisfy that +`parent.slot < block.slot`. -*Note*: There might be "skipped" slots between the `parent` and `block`. These skipped slots are processed in the state transition function without per-block processing. +*Note*: There might be "skipped" slots between the `parent` and `block`. These +skipped slots are processed in the state transition function without per-block +processing. ##### Proposer index -Set `block.proposer_index = validator_index` where `validator_index` is the validator chosen to propose at this slot. The private key mapping to `state.validators[validator_index].pubkey` is used to sign the block. +Set `block.proposer_index = validator_index` where `validator_index` is the +validator chosen to propose at this slot. The private key mapping to +`state.validators[validator_index].pubkey` is used to sign the block. ##### Parent root @@ -315,7 +417,8 @@ Set `block.parent_root = hash_tree_root(parent)`. ##### Randao reveal -Set `block.body.randao_reveal = epoch_signature` where `epoch_signature` is obtained from: +Set `block.body.randao_reveal = epoch_signature` where `epoch_signature` is +obtained from: ```python def get_epoch_signature(state: BeaconState, block: BeaconBlock, privkey: int) -> BLSSignature: @@ -326,21 +429,25 @@ def get_epoch_signature(state: BeaconState, block: BeaconBlock, privkey: int) -> ##### Eth1 Data -The `block.body.eth1_data` field is for block proposers to vote on recent Eth1 data. -This recent data contains an Eth1 block hash as well as the associated deposit root -(as calculated by the `get_deposit_root()` method of the deposit contract) and -deposit count after execution of the corresponding Eth1 block. -If over half of the block proposers in the current Eth1 voting period vote for the same -`eth1_data` then `state.eth1_data` updates immediately allowing new deposits to be processed. -Each deposit in `block.body.deposits` must verify against `state.eth1_data.eth1_deposit_root`. +The `block.body.eth1_data` field is for block proposers to vote on recent Eth1 +data. This recent data contains an Eth1 block hash as well as the associated +deposit root (as calculated by the `get_deposit_root()` method of the deposit +contract) and deposit count after execution of the corresponding Eth1 block. If +over half of the block proposers in the current Eth1 voting period vote for the +same `eth1_data` then `state.eth1_data` updates immediately allowing new +deposits to be processed. Each deposit in `block.body.deposits` must verify +against `state.eth1_data.eth1_deposit_root`. ###### `get_eth1_data` -Let `Eth1Block` be an abstract object representing Eth1 blocks with the `timestamp` and deposit contract data available. +Let `Eth1Block` be an abstract object representing Eth1 blocks with the +`timestamp` and deposit contract data available. -Let `get_eth1_data(block: Eth1Block) -> Eth1Data` be the function that returns the Eth1 data for a given Eth1 block. +Let `get_eth1_data(block: Eth1Block) -> Eth1Data` be the function that returns +the Eth1 data for a given Eth1 block. -An honest block proposer sets `block.body.eth1_data = get_eth1_vote(state, eth1_chain)` where: +An honest block proposer sets +`block.body.eth1_data = get_eth1_vote(state, eth1_chain)` where: ```python def compute_time_at_slot(state: BeaconState, slot: Slot) -> uint64: @@ -391,25 +498,56 @@ def get_eth1_vote(state: BeaconState, eth1_chain: Sequence[Eth1Block]) -> Eth1Da ##### Proposer slashings -Up to `MAX_PROPOSER_SLASHINGS`, [`ProposerSlashing`](./beacon-chain.md#proposerslashing) objects can be included in the `block`. The proposer slashings must satisfy the verification conditions found in [proposer slashings processing](./beacon-chain.md#proposer-slashings). The validator receives a small "whistleblower" reward for each proposer slashing found and included. +Up to `MAX_PROPOSER_SLASHINGS`, +[`ProposerSlashing`](./beacon-chain.md#proposerslashing) objects can be included +in the `block`. The proposer slashings must satisfy the verification conditions +found in [proposer slashings processing](./beacon-chain.md#proposer-slashings). +The validator receives a small "whistleblower" reward for each proposer slashing +found and included. ##### Attester slashings -Up to `MAX_ATTESTER_SLASHINGS`, [`AttesterSlashing`](./beacon-chain.md#attesterslashing) objects can be included in the `block`. The attester slashings must satisfy the verification conditions found in [attester slashings processing](./beacon-chain.md#attester-slashings). The validator receives a small "whistleblower" reward for each attester slashing found and included. +Up to `MAX_ATTESTER_SLASHINGS`, +[`AttesterSlashing`](./beacon-chain.md#attesterslashing) objects can be included +in the `block`. The attester slashings must satisfy the verification conditions +found in [attester slashings processing](./beacon-chain.md#attester-slashings). +The validator receives a small "whistleblower" reward for each attester slashing +found and included. ##### Attestations -Up to `MAX_ATTESTATIONS`, aggregate attestations can be included in the `block`. The attestations added must satisfy the verification conditions found in [attestation processing](./beacon-chain.md#attestations). To maximize profit, the validator should attempt to gather aggregate attestations that include singular attestations from the largest number of validators whose signatures from the same epoch have not previously been added on chain. +Up to `MAX_ATTESTATIONS`, aggregate attestations can be included in the `block`. +The attestations added must satisfy the verification conditions found in +[attestation processing](./beacon-chain.md#attestations). To maximize profit, +the validator should attempt to gather aggregate attestations that include +singular attestations from the largest number of validators whose signatures +from the same epoch have not previously been added on chain. ##### Deposits -If there are any unprocessed deposits for the existing `state.eth1_data` (i.e. `state.eth1_data.deposit_count > state.eth1_deposit_index`), then pending deposits _must_ be added to the block. The expected number of deposits is exactly `min(MAX_DEPOSITS, eth1_data.deposit_count - state.eth1_deposit_index)`. These [`deposits`](./beacon-chain.md#deposit) are constructed from the `Deposit` logs from the [deposit contract](./deposit-contract.md) and must be processed in sequential order. The deposits included in the `block` must satisfy the verification conditions found in [deposits processing](./beacon-chain.md#deposits). - -The `proof` for each deposit must be constructed against the deposit root contained in `state.eth1_data` rather than the deposit root at the time the deposit was initially logged from the proof-of-work chain. This entails storing a full deposit merkle tree locally and computing updated proofs against the `eth1_data.deposit_root` as needed. See [`minimal_merkle.py`](https://github.com/ethereum/research/blob/master/spec_pythonizer/utils/merkle_minimal.py) for a sample implementation. +If there are any unprocessed deposits for the existing `state.eth1_data` (i.e. +`state.eth1_data.deposit_count > state.eth1_deposit_index`), then pending +deposits _must_ be added to the block. The expected number of deposits is +exactly `min(MAX_DEPOSITS, eth1_data.deposit_count - state.eth1_deposit_index)`. +These [`deposits`](./beacon-chain.md#deposit) are constructed from the `Deposit` +logs from the [deposit contract](./deposit-contract.md) and must be processed in +sequential order. The deposits included in the `block` must satisfy the +verification conditions found in +[deposits processing](./beacon-chain.md#deposits). + +The `proof` for each deposit must be constructed against the deposit root +contained in `state.eth1_data` rather than the deposit root at the time the +deposit was initially logged from the proof-of-work chain. This entails storing +a full deposit merkle tree locally and computing updated proofs against the +`eth1_data.deposit_root` as needed. See +[`minimal_merkle.py`](https://github.com/ethereum/research/blob/master/spec_pythonizer/utils/merkle_minimal.py) +for a sample implementation. ##### Voluntary exits -Up to `MAX_VOLUNTARY_EXITS`, [`VoluntaryExit`](./beacon-chain.md#voluntaryexit) objects can be included in the `block`. The exits must satisfy the verification conditions found in [exits processing](./beacon-chain.md#voluntary-exits). +Up to `MAX_VOLUNTARY_EXITS`, [`VoluntaryExit`](./beacon-chain.md#voluntaryexit) +objects can be included in the `block`. The exits must satisfy the verification +conditions found in [exits processing](./beacon-chain.md#voluntary-exits). *Note*: If a slashing for a validator is included in the same block as a voluntary exit, the voluntary exit will fail and cause the block to be invalid @@ -420,10 +558,14 @@ operation interaction when packing blocks. ##### State root -Set `block.state_root = hash_tree_root(state)` of the resulting `state` of the `parent -> block` state transition. +Set `block.state_root = hash_tree_root(state)` of the resulting `state` of the +`parent -> block` state transition. -*Note*: To calculate `state_root`, the validator should first run the state transition function on an unsigned `block` containing a stub for the `state_root`. -It is useful to be able to run a state transition function (working on a copy of the state) that does _not_ validate signatures or state root for this purpose: +*Note*: To calculate `state_root`, the validator should first run the state +transition function on an unsigned `block` containing a stub for the +`state_root`. It is useful to be able to run a state transition function +(working on a copy of the state) that does _not_ validate signatures or state +root for this purpose: ```python def compute_new_state_root(state: BeaconState, block: BeaconBlock) -> Root: @@ -435,7 +577,8 @@ def compute_new_state_root(state: BeaconState, block: BeaconBlock) -> Root: ##### Signature -`signed_block = SignedBeaconBlock(message=block, signature=block_signature)`, where `block_signature` is obtained from: +`signed_block = SignedBeaconBlock(message=block, signature=block_signature)`, +where `block_signature` is obtained from: ```python def get_block_signature(state: BeaconState, block: BeaconBlock, privkey: int) -> BLSSignature: @@ -446,23 +589,38 @@ def get_block_signature(state: BeaconState, block: BeaconBlock, privkey: int) -> ### Attesting -A validator is expected to create, sign, and broadcast an attestation during each epoch. The `committee`, assigned `index`, and assigned `slot` for which the validator performs this role during an epoch are defined by `get_committee_assignment(state, epoch, validator_index)`. +A validator is expected to create, sign, and broadcast an attestation during +each epoch. The `committee`, assigned `index`, and assigned `slot` for which the +validator performs this role during an epoch are defined by +`get_committee_assignment(state, epoch, validator_index)`. -A validator should create and broadcast the `attestation` to the associated attestation subnet when either (a) the validator has received a valid block from the expected block proposer for the assigned `slot` or (b) `1 / INTERVALS_PER_SLOT` of the `slot` has transpired (`SECONDS_PER_SLOT / INTERVALS_PER_SLOT` seconds after the start of `slot`) -- whichever comes _first_. +A validator should create and broadcast the `attestation` to the associated +attestation subnet when either (a) the validator has received a valid block from +the expected block proposer for the assigned `slot` or (b) +`1 / INTERVALS_PER_SLOT` of the `slot` has transpired +(`SECONDS_PER_SLOT / INTERVALS_PER_SLOT` seconds after the start of `slot`) -- +whichever comes _first_. -*Note*: Although attestations during `GENESIS_EPOCH` do not count toward FFG finality, these initial attestations do give weight to the fork choice, are rewarded, and should be made. +*Note*: Although attestations during `GENESIS_EPOCH` do not count toward FFG +finality, these initial attestations do give weight to the fork choice, are +rewarded, and should be made. #### Attestation data -First, the validator should construct `attestation_data`, an [`AttestationData`](./beacon-chain.md#attestationdata) object based upon the state at the assigned slot. +First, the validator should construct `attestation_data`, an +[`AttestationData`](./beacon-chain.md#attestationdata) object based upon the +state at the assigned slot. -- Let `head_block` be the result of running the fork choice during the assigned slot. -- Let `head_state` be the state of `head_block` processed through any empty slots up to the assigned slot using `process_slots(state, slot)`. +- Let `head_block` be the result of running the fork choice during the assigned + slot. +- Let `head_state` be the state of `head_block` processed through any empty + slots up to the assigned slot using `process_slots(state, slot)`. ##### General - Set `attestation_data.slot = slot` where `slot` is the assigned slot. -- Set `attestation_data.index = index` where `index` is the index associated with the validator's committee. +- Set `attestation_data.index = index` where `index` is the index associated + with the validator's committee. ##### LMD GHOST vote @@ -471,30 +629,41 @@ Set `attestation_data.beacon_block_root = hash_tree_root(head_block)`. ##### FFG vote - Set `attestation_data.source = head_state.current_justified_checkpoint`. -- Set `attestation_data.target = Checkpoint(epoch=get_current_epoch(head_state), root=epoch_boundary_block_root)` where `epoch_boundary_block_root` is the root of block at the most recent epoch boundary. +- Set + `attestation_data.target = Checkpoint(epoch=get_current_epoch(head_state), root=epoch_boundary_block_root)` + where `epoch_boundary_block_root` is the root of block at the most recent + epoch boundary. *Note*: `epoch_boundary_block_root` can be looked up in the state using: - Let `start_slot = compute_start_slot_at_epoch(get_current_epoch(head_state))`. -- Let `epoch_boundary_block_root = hash_tree_root(head_block) if start_slot == head_state.slot else get_block_root(state, get_current_epoch(head_state))`. +- Let + `epoch_boundary_block_root = hash_tree_root(head_block) if start_slot == head_state.slot else get_block_root(state, get_current_epoch(head_state))`. #### Construct attestation -Next, the validator creates `attestation`, an [`Attestation`](./beacon-chain.md#attestation) object. +Next, the validator creates `attestation`, an +[`Attestation`](./beacon-chain.md#attestation) object. ##### Data -Set `attestation.data = attestation_data` where `attestation_data` is the `AttestationData` object defined in the previous section, [attestation data](#attestation-data). +Set `attestation.data = attestation_data` where `attestation_data` is the +`AttestationData` object defined in the previous section, +[attestation data](#attestation-data). ##### Aggregation bits -- Let `attestation.aggregation_bits` be a `Bitlist[MAX_VALIDATORS_PER_COMMITTEE]` of length `len(committee)`, where the bit of the index of the validator in the `committee` is set to `0b1`. +- Let `attestation.aggregation_bits` be a + `Bitlist[MAX_VALIDATORS_PER_COMMITTEE]` of length `len(committee)`, where the + bit of the index of the validator in the `committee` is set to `0b1`. -*Note*: Calling `get_attesting_indices(state, attestation)` should return a list of length equal to 1, containing `validator_index`. +*Note*: Calling `get_attesting_indices(state, attestation)` should return a list +of length equal to 1, containing `validator_index`. ##### Aggregate signature -Set `attestation.signature = attestation_signature` where `attestation_signature` is obtained from: +Set `attestation.signature = attestation_signature` where +`attestation_signature` is obtained from: ```python def get_attestation_signature(state: BeaconState, attestation_data: AttestationData, privkey: int) -> BLSSignature: @@ -505,12 +674,15 @@ def get_attestation_signature(state: BeaconState, attestation_data: AttestationD #### Broadcast attestation -Finally, the validator broadcasts `attestation` to the associated attestation subnet, the `beacon_attestation_{subnet_id}` pubsub topic. +Finally, the validator broadcasts `attestation` to the associated attestation +subnet, the `beacon_attestation_{subnet_id}` pubsub topic. The `subnet_id` for the `attestation` is calculated with: -- Let `committees_per_slot = get_committee_count_per_slot(state, attestation.data.target.epoch)`. -- Let `subnet_id = compute_subnet_for_attestation(committees_per_slot, attestation.data.slot, attestation.data.index)`. +- Let + `committees_per_slot = get_committee_count_per_slot(state, attestation.data.target.epoch)`. +- Let + `subnet_id = compute_subnet_for_attestation(committees_per_slot, attestation.data.slot, attestation.data.index)`. ```python def compute_subnet_for_attestation(committees_per_slot: uint64, @@ -528,11 +700,13 @@ def compute_subnet_for_attestation(committees_per_slot: uint64, ### Attestation aggregation -Some validators are selected to locally aggregate attestations with a similar `attestation_data` to their constructed `attestation` for the assigned `slot`. +Some validators are selected to locally aggregate attestations with a similar +`attestation_data` to their constructed `attestation` for the assigned `slot`. #### Aggregation selection -A validator is selected to aggregate based upon the return value of `is_aggregator()`. +A validator is selected to aggregate based upon the return value of +`is_aggregator()`. ```python def get_slot_signature(state: BeaconState, slot: Slot, privkey: int) -> BLSSignature: @@ -550,21 +724,30 @@ def is_aggregator(state: BeaconState, slot: Slot, index: CommitteeIndex, slot_si #### Construct aggregate -If the validator is selected to aggregate (`is_aggregator()`), they construct an aggregate attestation via the following. +If the validator is selected to aggregate (`is_aggregator()`), they construct an +aggregate attestation via the following. -Collect `attestations` seen via gossip during the `slot` that have an equivalent `attestation_data` to that constructed by the validator. If `len(attestations) > 0`, create an `aggregate_attestation: Attestation` with the following fields. +Collect `attestations` seen via gossip during the `slot` that have an equivalent +`attestation_data` to that constructed by the validator. If +`len(attestations) > 0`, create an `aggregate_attestation: Attestation` with the +following fields. ##### Data -Set `aggregate_attestation.data = attestation_data` where `attestation_data` is the `AttestationData` object that is the same for each individual attestation being aggregated. +Set `aggregate_attestation.data = attestation_data` where `attestation_data` is +the `AttestationData` object that is the same for each individual attestation +being aggregated. ##### Aggregation bits -Let `aggregate_attestation.aggregation_bits` be a `Bitlist[MAX_VALIDATORS_PER_COMMITTEE]` of length `len(committee)`, where each bit set from each individual attestation is set to `0b1`. +Let `aggregate_attestation.aggregation_bits` be a +`Bitlist[MAX_VALIDATORS_PER_COMMITTEE]` of length `len(committee)`, where each +bit set from each individual attestation is set to `0b1`. ##### Aggregate signature -Set `aggregate_attestation.signature = aggregate_signature` where `aggregate_signature` is obtained from: +Set `aggregate_attestation.signature = aggregate_signature` where +`aggregate_signature` is obtained from: ```python def get_aggregate_signature(attestations: Sequence[Attestation]) -> BLSSignature: @@ -574,13 +757,22 @@ def get_aggregate_signature(attestations: Sequence[Attestation]) -> BLSSignature #### Broadcast aggregate -If the validator is selected to aggregate (`is_aggregator`), then they broadcast their best aggregate as a `SignedAggregateAndProof` to the global aggregate channel (`beacon_aggregate_and_proof`) `2 / INTERVALS_PER_SLOT` of the way through the `slot`-that is, `SECONDS_PER_SLOT * 2 / INTERVALS_PER_SLOT` seconds after the start of `slot`. +If the validator is selected to aggregate (`is_aggregator`), then they broadcast +their best aggregate as a `SignedAggregateAndProof` to the global aggregate +channel (`beacon_aggregate_and_proof`) `2 / INTERVALS_PER_SLOT` of the way +through the `slot`-that is, `SECONDS_PER_SLOT * 2 / INTERVALS_PER_SLOT` seconds +after the start of `slot`. -Selection proofs are provided in `AggregateAndProof` to prove to the gossip channel that the validator has been selected as an aggregator. +Selection proofs are provided in `AggregateAndProof` to prove to the gossip +channel that the validator has been selected as an aggregator. -`AggregateAndProof` messages are signed by the aggregator and broadcast inside of `SignedAggregateAndProof` objects to prevent a class of DoS attacks and message forgeries. +`AggregateAndProof` messages are signed by the aggregator and broadcast inside +of `SignedAggregateAndProof` objects to prevent a class of DoS attacks and +message forgeries. -First, `aggregate_and_proof = get_aggregate_and_proof(state, validator_index, aggregate_attestation, privkey)` is constructed. +First, +`aggregate_and_proof = get_aggregate_and_proof(state, validator_index, aggregate_attestation, privkey)` +is constructed. ```python def get_aggregate_and_proof(state: BeaconState, @@ -594,7 +786,9 @@ def get_aggregate_and_proof(state: BeaconState, ) ``` -Then `signed_aggregate_and_proof = SignedAggregateAndProof(message=aggregate_and_proof, signature=signature)` is constructed and broadcast. Where `signature` is obtained from: +Then +`signed_aggregate_and_proof = SignedAggregateAndProof(message=aggregate_and_proof, signature=signature)` +is constructed and broadcast. Where `signature` is obtained from: ```python def get_aggregate_and_proof_signature(state: BeaconState, @@ -608,40 +802,84 @@ def get_aggregate_and_proof_signature(state: BeaconState, ## How to avoid slashing -"Slashing" is the burning of some amount of validator funds and immediate ejection from the active validator set. In Phase 0, there are two ways in which funds can be slashed: [proposer slashing](#proposer-slashing) and [attester slashing](#attester-slashing). Although being slashed has serious repercussions, it is simple enough to avoid being slashed all together by remaining _consistent_ with respect to the messages a validator has previously signed. +"Slashing" is the burning of some amount of validator funds and immediate +ejection from the active validator set. In Phase 0, there are two ways in which +funds can be slashed: [proposer slashing](#proposer-slashing) and +[attester slashing](#attester-slashing). Although being slashed has serious +repercussions, it is simple enough to avoid being slashed all together by +remaining _consistent_ with respect to the messages a validator has previously +signed. -*Note*: Signed data must be within a sequential `Fork` context to conflict. Messages cannot be slashed across diverging forks. If the previous fork version is 1 and the chain splits into fork 2 and 102, messages from 1 can be slashable against messages in forks 1, 2, and 102. Messages in 2 cannot be slashable against messages in 102, and vice versa. +*Note*: Signed data must be within a sequential `Fork` context to conflict. +Messages cannot be slashed across diverging forks. If the previous fork version +is 1 and the chain splits into fork 2 and 102, messages from 1 can be slashable +against messages in forks 1, 2, and 102. Messages in 2 cannot be slashable +against messages in 102, and vice versa. ### Proposer slashing -To avoid "proposer slashings", a validator must not sign two conflicting [`BeaconBlock`](./beacon-chain.md#beaconblock) where conflicting is defined as two distinct blocks within the same slot. +To avoid "proposer slashings", a validator must not sign two conflicting +[`BeaconBlock`](./beacon-chain.md#beaconblock) where conflicting is defined as +two distinct blocks within the same slot. -*In Phase 0, as long as the validator does not sign two different beacon blocks for the same slot, the validator is safe against proposer slashings.* +*In Phase 0, as long as the validator does not sign two different beacon blocks +for the same slot, the validator is safe against proposer slashings.* -Specifically, when signing a `BeaconBlock`, a validator should perform the following steps in the following order: +Specifically, when signing a `BeaconBlock`, a validator should perform the +following steps in the following order: -1. Save a record to hard disk that a beacon block has been signed for the `slot=block.slot`. +1. Save a record to hard disk that a beacon block has been signed for the + `slot=block.slot`. 2. Generate and broadcast the block. -If the software crashes at some point within this routine, then when the validator comes back online, the hard disk has the record of the *potentially* signed/broadcast block and can effectively avoid slashing. +If the software crashes at some point within this routine, then when the +validator comes back online, the hard disk has the record of the *potentially* +signed/broadcast block and can effectively avoid slashing. ### Attester slashing -To avoid "attester slashings", a validator must not sign two conflicting [`AttestationData`](./beacon-chain.md#attestationdata) objects, i.e. two attestations that satisfy [`is_slashable_attestation_data`](./beacon-chain.md#is_slashable_attestation_data). +To avoid "attester slashings", a validator must not sign two conflicting +[`AttestationData`](./beacon-chain.md#attestationdata) objects, i.e. two +attestations that satisfy +[`is_slashable_attestation_data`](./beacon-chain.md#is_slashable_attestation_data). -Specifically, when signing an `Attestation`, a validator should perform the following steps in the following order: +Specifically, when signing an `Attestation`, a validator should perform the +following steps in the following order: -1. Save a record to hard disk that an attestation has been signed for source (i.e. `attestation_data.source.epoch`) and target (i.e. `attestation_data.target.epoch`). +1. Save a record to hard disk that an attestation has been signed for source + (i.e. `attestation_data.source.epoch`) and target (i.e. + `attestation_data.target.epoch`). 2. Generate and broadcast attestation. -If the software crashes at some point within this routine, then when the validator comes back online, the hard disk has the record of the *potentially* signed/broadcast attestation and can effectively avoid slashing. +If the software crashes at some point within this routine, then when the +validator comes back online, the hard disk has the record of the *potentially* +signed/broadcast attestation and can effectively avoid slashing. ## Protection best practices -A validator client should be considered standalone and should consider the beacon node as untrusted. This means that the validator client should protect: - -1. Private keys -- private keys should be protected from being exported accidentally or by an attacker. -2. Slashing -- before a validator client signs a message it should validate the data, check it against a local slashing database (do not sign a slashable attestation or block) and update its internal slashing database with the newly signed object. -3. Recovered validator -- Recovering a validator from a private key will result in an empty local slashing db. Best practice is to import (from a trusted source) that validator's attestation history. See [EIP 3076](https://github.com/ethereum/EIPs/pull/3076/files) for a standard slashing interchange format. -4. Far future signing requests -- A validator client can be requested to sign a far into the future attestation, resulting in a valid non-slashable request. If the validator client signs this message, it will result in it blocking itself from attesting any other attestation until the beacon-chain reaches that far into the future epoch. This will result in an inactivity penalty and potential ejection due to low balance. - A validator client should prevent itself from signing such requests by: a) keeping a local time clock if possible and following best practices to stop time server attacks and b) refusing to sign, by default, any message that has a large (>6h) gap from the current slashing protection database indicated a time "jump" or a long offline event. The administrator can manually override this protection to restart the validator after a genuine long offline event. +A validator client should be considered standalone and should consider the +beacon node as untrusted. This means that the validator client should protect: + +1. Private keys -- private keys should be protected from being exported + accidentally or by an attacker. +2. Slashing -- before a validator client signs a message it should validate the + data, check it against a local slashing database (do not sign a slashable + attestation or block) and update its internal slashing database with the + newly signed object. +3. Recovered validator -- Recovering a validator from a private key will result + in an empty local slashing db. Best practice is to import (from a trusted + source) that validator's attestation history. See + [EIP 3076](https://github.com/ethereum/EIPs/pull/3076/files) for a standard + slashing interchange format. +4. Far future signing requests -- A validator client can be requested to sign a + far into the future attestation, resulting in a valid non-slashable request. + If the validator client signs this message, it will result in it blocking + itself from attesting any other attestation until the beacon-chain reaches + that far into the future epoch. This will result in an inactivity penalty and + potential ejection due to low balance. A validator client should prevent + itself from signing such requests by: a) keeping a local time clock if + possible and following best practices to stop time server attacks and b) + refusing to sign, by default, any message that has a large (>6h) gap from the + current slashing protection database indicated a time "jump" or a long + offline event. The administrator can manually override this protection to + restart the validator after a genuine long offline event. diff --git a/specs/phase0/weak-subjectivity.md b/specs/phase0/weak-subjectivity.md index 6bfad75124..1ecd86d913 100644 --- a/specs/phase0/weak-subjectivity.md +++ b/specs/phase0/weak-subjectivity.md @@ -21,9 +21,10 @@ ## Introduction -This document is a guide for implementing the Weak Subjectivity protections in Phase 0. -This document is still a work-in-progress, and is subject to large changes. -For more information about weak subjectivity and why it is required, please refer to: +This document is a guide for implementing the Weak Subjectivity protections in +Phase 0. This document is still a work-in-progress, and is subject to large +changes. For more information about weak subjectivity and why it is required, +please refer to: - [Weak Subjectivity in Ethereum Proof-of-Stake](https://notes.ethereum.org/@adiasg/weak-subjectvity-eth2) - [Proof of Stake: How I Learned to Love Weak Subjectivity](https://blog.ethereum.org/2014/11/25/proof-stake-learned-love-weak-subjectivity/) @@ -31,7 +32,8 @@ For more information about weak subjectivity and why it is required, please refe ## Prerequisites This document uses data structures, constants, functions, and terminology from -[Phase 0 -- The Beacon Chain](./beacon-chain.md) and [Phase 0 -- Beacon Chain Fork Choice](./fork-choice.md). +[Phase 0 -- The Beacon Chain](./beacon-chain.md) and +[Phase 0 -- Beacon Chain Fork Choice](./fork-choice.md). ## Custom Types @@ -53,28 +55,38 @@ This document uses data structures, constants, functions, and terminology from ## Weak Subjectivity Checkpoint -Any `Checkpoint` object can be used as a Weak Subjectivity Checkpoint. -These Weak Subjectivity Checkpoints are distributed by providers, -downloaded by users and/or distributed as a part of clients, and used as input while syncing a client. +Any `Checkpoint` object can be used as a Weak Subjectivity Checkpoint. These +Weak Subjectivity Checkpoints are distributed by providers, downloaded by users +and/or distributed as a part of clients, and used as input while syncing a +client. ## Weak Subjectivity Period The Weak Subjectivity Period is the number of recent epochs within which there -must be a Weak Subjectivity Checkpoint to ensure that an attacker who takes control -of the validator set at the beginning of the period is slashed at least a minimum threshold -in the event that a conflicting `Checkpoint` is finalized. +must be a Weak Subjectivity Checkpoint to ensure that an attacker who takes +control of the validator set at the beginning of the period is slashed at least +a minimum threshold in the event that a conflicting `Checkpoint` is finalized. -`SAFETY_DECAY` is defined as the maximum percentage tolerable loss in the one-third -safety margin of FFG finality. Thus, any attack exploiting the Weak Subjectivity Period has -a safety margin of at least `1/3 - SAFETY_DECAY/100`. +`SAFETY_DECAY` is defined as the maximum percentage tolerable loss in the +one-third safety margin of FFG finality. Thus, any attack exploiting the Weak +Subjectivity Period has a safety margin of at least `1/3 - SAFETY_DECAY/100`. ### Calculating the Weak Subjectivity Period -A detailed analysis of the calculation of the weak subjectivity period is made in [this report](https://github.com/runtimeverification/beacon-chain-verification/blob/master/weak-subjectivity/weak-subjectivity-analysis.pdf). +A detailed analysis of the calculation of the weak subjectivity period is made +in +[this report](https://github.com/runtimeverification/beacon-chain-verification/blob/master/weak-subjectivity/weak-subjectivity-analysis.pdf). -*Note*: The expressions in the report use fractions, whereas the consensus-specs only use `uint64` arithmetic. The expressions have been simplified to avoid computing fractions, and more details can be found [here](https://www.overleaf.com/read/wgjzjdjpvpsd). +*Note*: The expressions in the report use fractions, whereas the consensus-specs +only use `uint64` arithmetic. The expressions have been simplified to avoid +computing fractions, and more details can be found +[here](https://www.overleaf.com/read/wgjzjdjpvpsd). -*Note*: The calculations here use `Ether` instead of `Gwei`, because the large magnitude of balances in `Gwei` can cause an overflow while computing using `uint64` arithmetic operations. Using `Ether` reduces the magnitude of the multiplicative factors by an order of `ETH_TO_GWEI` (`= 10**9`) and avoid the scope for overflows in `uint64`. +*Note*: The calculations here use `Ether` instead of `Gwei`, because the large +magnitude of balances in `Gwei` can cause an overflow while computing using +`uint64` arithmetic operations. Using `Ether` reduces the magnitude of the +multiplicative factors by an order of `ETH_TO_GWEI` (`= 10**9`) and avoid the +scope for overflows in `uint64`. #### `compute_weak_subjectivity_period` @@ -112,7 +124,8 @@ def compute_weak_subjectivity_period(state: BeaconState) -> uint64: return ws_period ``` -A brief reference for what these values look like in practice ([reference script](https://gist.github.com/adiasg/3aceab409b36aa9a9d9156c1baa3c248)): +A brief reference for what these values look like in practice +([reference script](https://gist.github.com/adiasg/3aceab409b36aa9a9d9156c1baa3c248)): | Safety Decay | Avg. Val. Balance (ETH) | Val. Count | Weak Sub. Period (Epochs) | | ------------ | ----------------------- | ---------- | ------------------------- | @@ -132,14 +145,16 @@ A brief reference for what these values look like in practice ([reference script ## Weak Subjectivity Sync Clients should allow users to input a Weak Subjectivity Checkpoint at startup, -and guarantee that any successful sync leads to the given Weak Subjectivity Checkpoint along the canonical chain. -If such a sync is not possible, the client should treat this as a critical and irrecoverable failure. +and guarantee that any successful sync leads to the given Weak Subjectivity +Checkpoint along the canonical chain. If such a sync is not possible, the client +should treat this as a critical and irrecoverable failure. ### Weak Subjectivity Sync Procedure -1. Input a Weak Subjectivity Checkpoint as a CLI parameter in `block_root:epoch_number` format, - where `block_root` (an "0x" prefixed 32-byte hex string) and `epoch_number` (an integer) represent a valid `Checkpoint`. - Example of the format: +1. Input a Weak Subjectivity Checkpoint as a CLI parameter in + `block_root:epoch_number` format, where `block_root` (an "0x" prefixed + 32-byte hex string) and `epoch_number` (an integer) represent a valid + `Checkpoint`. Example of the format: ``` 0x8584188b86a9296932785cc2827b925f9deebacce6d72ad8d53171fa046b43d9:9544 @@ -147,19 +162,22 @@ If such a sync is not possible, the client should treat this as a critical and i 2. Check the weak subjectivity requirements: - - *IF* `epoch_number > store.finalized_checkpoint.epoch`, - then *ASSERT* during block sync that block with root `block_root` is in the sync path at epoch `epoch_number`. - Emit descriptive critical error if this assert fails, then exit client process. - - *IF* `epoch_number <= store.finalized_checkpoint.epoch`, - then *ASSERT* that the block in the canonical chain at epoch `epoch_number` has root `block_root`. - Emit descriptive critical error if this assert fails, then exit client process. + - *IF* `epoch_number > store.finalized_checkpoint.epoch`, then *ASSERT* + during block sync that block with root `block_root` is in the sync path at + epoch `epoch_number`. Emit descriptive critical error if this assert fails, + then exit client process. + - *IF* `epoch_number <= store.finalized_checkpoint.epoch`, then *ASSERT* that + the block in the canonical chain at epoch `epoch_number` has root + `block_root`. Emit descriptive critical error if this assert fails, then + exit client process. ### Checking for Stale Weak Subjectivity Checkpoint -Clients may choose to validate that the input Weak Subjectivity Checkpoint is not stale at the time of startup. -To support this mechanism, the client needs to take the state at the Weak Subjectivity Checkpoint as -a CLI parameter input (or fetch the state associated with the input Weak Subjectivity Checkpoint from some source). -The check can be implemented in the following way: +Clients may choose to validate that the input Weak Subjectivity Checkpoint is +not stale at the time of startup. To support this mechanism, the client needs to +take the state at the Weak Subjectivity Checkpoint as a CLI parameter input (or +fetch the state associated with the input Weak Subjectivity Checkpoint from some +source). The check can be implemented in the following way: #### `is_within_weak_subjectivity_period` diff --git a/ssz/merkle-proofs.md b/ssz/merkle-proofs.md index f8db84be08..202a4ee8a1 100644 --- a/ssz/merkle-proofs.md +++ b/ssz/merkle-proofs.md @@ -51,7 +51,8 @@ def get_power_of_two_floor(x: int) -> int: ## Generalized Merkle tree index -In a binary Merkle tree, we define a "generalized index" of a node as `2**depth + index`. Visually, this looks as follows: +In a binary Merkle tree, we define a "generalized index" of a node as +`2**depth + index`. Visually, this looks as follows: ``` 1 @@ -60,7 +61,10 @@ In a binary Merkle tree, we define a "generalized index" of a node as `2**depth ... ``` -Note that the generalized index has the convenient property that the two children of node `k` are `2k` and `2k+1`, and also that it equals the position of a node in the linear representation of the Merkle tree that's computed by this function: +Note that the generalized index has the convenient property that the two +children of node `k` are `2k` and `2k+1`, and also that it equals the position +of a node in the linear representation of the Merkle tree that's computed by +this function: ```python def merkle_tree(leaves: Sequence[Bytes32]) -> Sequence[Bytes32]: @@ -76,13 +80,16 @@ def merkle_tree(leaves: Sequence[Bytes32]) -> Sequence[Bytes32]: return o ``` -We define a custom type `GeneralizedIndex` as a Python integer type in this document. It can be represented as a Bitvector/Bitlist object as well. +We define a custom type `GeneralizedIndex` as a Python integer type in this +document. It can be represented as a Bitvector/Bitlist object as well. We will define Merkle proofs in terms of generalized indices. ## SSZ object to index -We can describe the hash tree of any SSZ object, rooted in `hash_tree_root(object)`, as a binary Merkle tree whose depth may vary. For example, an object `{x: bytes32, y: List[uint64]}` would look as follows: +We can describe the hash tree of any SSZ object, rooted in +`hash_tree_root(object)`, as a binary Merkle tree whose depth may vary. For +example, an object `{x: bytes32, y: List[uint64]}` would look as follows: ``` root @@ -95,7 +102,16 @@ y_data_root len(y) ....... ``` -We can now define a concept of a "path", a way of describing a function that takes as input an SSZ object and outputs some specific (possibly deeply nested) member. For example, `foo -> foo.x` is a path, as are `foo -> len(foo.y)` and `foo -> foo.y[5].w`. We'll describe paths as lists, which can have two representations. In "human-readable form", they are `["x"]`, `["y", "__len__"]` and `["y", 5, "w"]` respectively. In "encoded form", they are lists of `uint64` values, in these cases (assuming the fields of `foo` in order are `x` then `y`, and `w` is the first field of `y[i]`) `[0]`, `[1, 2**64-1]`, `[1, 5, 0]`. We define `SSZVariableName` as the member variable name string, i.e., a path is presented as a sequence of integers and `SSZVariableName`. +We can now define a concept of a "path", a way of describing a function that +takes as input an SSZ object and outputs some specific (possibly deeply nested) +member. For example, `foo -> foo.x` is a path, as are `foo -> len(foo.y)` and +`foo -> foo.y[5].w`. We'll describe paths as lists, which can have two +representations. In "human-readable form", they are `["x"]`, `["y", "__len__"]` +and `["y", 5, "w"]` respectively. In "encoded form", they are lists of `uint64` +values, in these cases (assuming the fields of `foo` in order are `x` then `y`, +and `w` is the first field of `y[i]`) `[0]`, `[1, 2**64-1]`, `[1, 5, 0]`. We +define `SSZVariableName` as the member variable name string, i.e., a path is +presented as a sequence of integers and `SSZVariableName`. ```python def item_length(typ: SSZType) -> int: @@ -183,7 +199,10 @@ def get_generalized_index(typ: SSZType, *path: PyUnion[int, SSZVariableName]) -> ### Helpers for generalized indices -_Usage note: functions outside this section should manipulate generalized indices using only functions inside this section. This is to make it easier for developers to implement generalized indices with underlying representations other than bigints._ +_Usage note: functions outside this section should manipulate generalized +indices using only functions inside this section. This is to make it easier for +developers to implement generalized indices with underlying representations +other than bigints._ #### `concat_generalized_indices` @@ -242,7 +261,11 @@ def generalized_index_parent(index: GeneralizedIndex) -> GeneralizedIndex: ## Merkle multiproofs -We define a Merkle multiproof as a minimal subset of nodes in a Merkle tree needed to fully authenticate that a set of nodes actually are part of a Merkle tree with some specified root, at a particular set of generalized indices. For example, here is the Merkle multiproof for positions 0, 1, 6 in an 8-node Merkle tree (i.e. generalized indices 8, 9, 14): +We define a Merkle multiproof as a minimal subset of nodes in a Merkle tree +needed to fully authenticate that a set of nodes actually are part of a Merkle +tree with some specified root, at a particular set of generalized indices. For +example, here is the Merkle multiproof for positions 0, 1, 6 in an 8-node Merkle +tree (i.e. generalized indices 8, 9, 14): ``` . @@ -251,9 +274,16 @@ We define a Merkle multiproof as a minimal subset of nodes in a Merkle tree need x x . . . . x * ``` -. are unused nodes, * are used nodes, x are the values we are trying to prove. Notice how despite being a multiproof for 3 values, it requires only 3 auxiliary nodes, the same amount required to prove a single value. Normally the efficiency gains are not quite that extreme, but the savings relative to individual Merkle proofs are still significant. As a rule of thumb, a multiproof for k nodes at the same level of an n-node tree has size `k * (n/k + log(n/k))`. +. are unused nodes, * are used nodes, x are the values we are trying to prove. +Notice how despite being a multiproof for 3 values, it requires only 3 auxiliary +nodes, the same amount required to prove a single value. Normally the efficiency +gains are not quite that extreme, but the savings relative to individual Merkle +proofs are still significant. As a rule of thumb, a multiproof for k nodes at +the same level of an n-node tree has size `k * (n/k + log(n/k))`. -First, we provide a method for computing the generalized indices of the auxiliary tree nodes that a proof of a given set of generalized indices will require: +First, we provide a method for computing the generalized indices of the +auxiliary tree nodes that a proof of a given set of generalized indices will +require: ```python def get_branch_indices(tree_index: GeneralizedIndex) -> Sequence[GeneralizedIndex]: @@ -295,7 +325,8 @@ def get_helper_indices(indices: Sequence[GeneralizedIndex]) -> Sequence[Generali return sorted(all_helper_indices.difference(all_path_indices), reverse=True) ``` -Now we provide the Merkle proof verification functions. First, for single item proofs: +Now we provide the Merkle proof verification functions. First, for single item +proofs: ```python def calculate_merkle_root(leaf: Bytes32, proof: Sequence[Bytes32], index: GeneralizedIndex) -> Root: @@ -348,4 +379,9 @@ def verify_merkle_multiproof(leaves: Sequence[Bytes32], return calculate_multi_merkle_root(leaves, proof, indices) == root ``` -Note that the single-item proof is a special case of a multi-item proof; a valid single-item proof verifies correctly when put into the multi-item verification function (making the natural trivial changes to input arguments, `index -> [index]` and `leaf -> [leaf]`). Note also that `calculate_merkle_root` and `calculate_multi_merkle_root` can be used independently to compute the new Merkle root of a proof with leaves updated. +Note that the single-item proof is a special case of a multi-item proof; a valid +single-item proof verifies correctly when put into the multi-item verification +function (making the natural trivial changes to input arguments, +`index -> [index]` and `leaf -> [leaf]`). Note also that `calculate_merkle_root` +and `calculate_multi_merkle_root` can be used independently to compute the new +Merkle root of a proof with leaves updated. diff --git a/ssz/simple-serialize.md b/ssz/simple-serialize.md index 5569eecb9d..ef5c1e6923 100644 --- a/ssz/simple-serialize.md +++ b/ssz/simple-serialize.md @@ -40,7 +40,8 @@ ### Basic types - `uintN`: `N`-bit unsigned integer (where `N in [8, 16, 32, 64, 128, 256]`) -- `byte`: 8-bit opaque data container, equivalent in serialization and hashing to `uint8` +- `byte`: 8-bit opaque data container, equivalent in serialization and hashing + to `uint8` - `boolean`: `True` or `False` ### Composite types @@ -54,24 +55,34 @@ ``` - **vector**: ordered fixed-length homogeneous collection, with `N` values - notation `Vector[type, N]`, e.g. `Vector[uint64, N]` -- **list**: ordered variable-length homogeneous collection, limited to `N` values +- **list**: ordered variable-length homogeneous collection, limited to `N` + values - notation `List[type, N]`, e.g. `List[uint64, N]` -- **bitvector**: ordered fixed-length collection of `boolean` values, with `N` bits +- **bitvector**: ordered fixed-length collection of `boolean` values, with `N` + bits - notation `Bitvector[N]` -- **bitlist**: ordered variable-length collection of `boolean` values, limited to `N` bits +- **bitlist**: ordered variable-length collection of `boolean` values, limited + to `N` bits - notation `Bitlist[N]` - **union**: union type containing one of the given subtypes - notation `Union[type_0, type_1, ...]`, e.g. `union[None, uint64, uint32]` -*Note*: Both `Vector[boolean, N]` and `Bitvector[N]` are valid, yet distinct due to their different serialization requirements. Similarly, both `List[boolean, N]` and `Bitlist[N]` are valid, yet distinct. Generally `Bitvector[N]`/`Bitlist[N]` are preferred because of their serialization efficiencies. +*Note*: Both `Vector[boolean, N]` and `Bitvector[N]` are valid, yet distinct due +to their different serialization requirements. Similarly, both +`List[boolean, N]` and `Bitlist[N]` are valid, yet distinct. Generally +`Bitvector[N]`/`Bitlist[N]` are preferred because of their serialization +efficiencies. ### Variable-size and fixed-size -We recursively define "variable-size" types to be lists, unions, `Bitlist` and all types that contain a variable-size type. All other types are said to be "fixed-size". +We recursively define "variable-size" types to be lists, unions, `Bitlist` and +all types that contain a variable-size type. All other types are said to be +"fixed-size". ### Byte -Although the SSZ serialization of `byte` is equivalent to that of `uint8`, the former is used for opaque data while the latter is intended as a number. +Although the SSZ serialization of `byte` is equivalent to that of `uint8`, the +former is used for opaque data while the latter is intended as a number. ### Aliases @@ -81,11 +92,13 @@ For convenience we alias: - `BytesN` and `ByteVector[N]` to `Vector[byte, N]` (this is *not* a basic type) - `ByteList[N]` to `List[byte, N]` -Aliases are semantically equivalent to their underlying type and therefore share canonical representations both in SSZ and in related formats. +Aliases are semantically equivalent to their underlying type and therefore share +canonical representations both in SSZ and in related formats. ### Default values -Assuming a helper function `default(type)` which returns the default value for `type`, we can recursively define the default value for all types. +Assuming a helper function `default(type)` which returns the default value for +`type`, we can recursively define the default value for all types. | Type | Default Value | | ---------------------------- | --------------------------------------- | @@ -100,19 +113,23 @@ Assuming a helper function `default(type)` which returns the default value for ` #### `is_zero` -An SSZ object is called zeroed (and thus, `is_zero(object)` returns true) if it is equal to the default value for that type. +An SSZ object is called zeroed (and thus, `is_zero(object)` returns true) if it +is equal to the default value for that type. ### Illegal types - Empty vector types (`Vector[type, 0]`, `Bitvector[0]`) are illegal. - Containers with no fields are illegal. -- The `None` type option in a `Union` type is only legal as the first option (i.e. with index zero). +- The `None` type option in a `Union` type is only legal as the first option + (i.e. with index zero). ## Serialization -We recursively define the `serialize` function which consumes an object `value` (of the type specified) and returns a bytestring of type `bytes`. +We recursively define the `serialize` function which consumes an object `value` +(of the type specified) and returns a bytestring of type `bytes`. -*Note*: In the function definitions below (`serialize`, `hash_tree_root`, `is_variable_size`, etc.) objects implicitly carry their type. +*Note*: In the function definitions below (`serialize`, `hash_tree_root`, +`is_variable_size`, etc.) objects implicitly carry their type. ### `uintN` @@ -139,7 +156,9 @@ return bytes(array) ### `Bitlist[N]` -Note that from the offset coding, the length (in bytes) of the bitlist is known. An additional `1` bit is added to the end, at index `e` where `e` is the length of the bitlist (not the limit), so that the length in bits will also be known. +Note that from the offset coding, the length (in bytes) of the bitlist is known. +An additional `1` bit is added to the end, at index `e` where `e` is the length +of the bitlist (not the limit), so that the length in bits will also be known. ```python array = [0] * ((len(value) // 8) + 1) @@ -171,16 +190,19 @@ return b"".join(fixed_parts + variable_parts) ### Union -A `value` as `Union[T...]` type has properties `value.value` with the contained value, and `value.selector` which indexes the selected `Union` type option `T`. +A `value` as `Union[T...]` type has properties `value.value` with the contained +value, and `value.selector` which indexes the selected `Union` type option `T`. A `Union`: - May have multiple selectors with the same type. -- Should not use selectors above 127 (i.e. highest bit is set), these are reserved for backwards compatible extensions. +- Should not use selectors above 127 (i.e. highest bit is set), these are + reserved for backwards compatible extensions. - Must have at least 1 type option. - May have `None` as first type option, i.e. `selector == 0` - Must have at least 2 type options if the first is `None` -- Is always considered a variable-length type, even if all type options have an equal fixed-length. +- Is always considered a variable-length type, even if all type options have an + equal fixed-length. ```python if value.value is None: @@ -194,81 +216,138 @@ else: ## Deserialization -Because serialization is an injective function (i.e. two distinct objects of the same type will serialize to different values) any bytestring has at most one object it could deserialize to. - -Deserialization can be implemented using a recursive algorithm. The deserialization of basic objects is easy, and from there we can find a simple recursive algorithm for all fixed-size objects. For variable-size objects we have to do one of the following depending on what kind of object it is: - -- Vector/list of a variable-size object: The serialized data will start with offsets of all the serialized objects (`BYTES_PER_LENGTH_OFFSET` bytes each). - - Using the first offset, we can compute the length of the list (divide by `BYTES_PER_LENGTH_OFFSET`), as it gives us the total number of bytes in the offset data. - - The size of each object in the vector/list can be inferred from the difference of two offsets. To get the size of the last object, the total number of bytes has to be known (it is not generally possible to deserialize an SSZ object of unknown length) -- Containers follow the same principles as vectors, with the difference that there may be fixed-size objects in a container as well. This means the `fixed_parts` data will contain offsets as well as fixed-size objects. -- In the case of bitlists, the length in bits cannot be uniquely inferred from the number of bytes in the object. Because of this, they have a bit at the end that is always set. This bit has to be used to infer the size of the bitlist in bits. -- In the case of unions, the first byte of the deserialization scope is deserialized as type selector, the remainder of the scope is deserialized as the selected type. - -Note that deserialization requires hardening against invalid inputs. A non-exhaustive list: +Because serialization is an injective function (i.e. two distinct objects of the +same type will serialize to different values) any bytestring has at most one +object it could deserialize to. + +Deserialization can be implemented using a recursive algorithm. The +deserialization of basic objects is easy, and from there we can find a simple +recursive algorithm for all fixed-size objects. For variable-size objects we +have to do one of the following depending on what kind of object it is: + +- Vector/list of a variable-size object: The serialized data will start with + offsets of all the serialized objects (`BYTES_PER_LENGTH_OFFSET` bytes each). + - Using the first offset, we can compute the length of the list (divide by + `BYTES_PER_LENGTH_OFFSET`), as it gives us the total number of bytes in the + offset data. + - The size of each object in the vector/list can be inferred from the + difference of two offsets. To get the size of the last object, the total + number of bytes has to be known (it is not generally possible to deserialize + an SSZ object of unknown length) +- Containers follow the same principles as vectors, with the difference that + there may be fixed-size objects in a container as well. This means the + `fixed_parts` data will contain offsets as well as fixed-size objects. +- In the case of bitlists, the length in bits cannot be uniquely inferred from + the number of bytes in the object. Because of this, they have a bit at the end + that is always set. This bit has to be used to infer the size of the bitlist + in bits. +- In the case of unions, the first byte of the deserialization scope is + deserialized as type selector, the remainder of the scope is deserialized as + the selected type. + +Note that deserialization requires hardening against invalid inputs. A +non-exhaustive list: - Offsets: out of order, out of range, mismatching minimum element size. - Scope: Extra unused bytes, not aligned with element size. - More elements than a list limit allows. Part of enforcing consensus. - An out-of-bounds selected index in an `Union` -Efficient algorithms for computing this object can be found in [the implementations](#implementations). +Efficient algorithms for computing this object can be found in +[the implementations](#implementations). ## Merkleization We first define helper functions: -- `size_of(B)`, where `B` is a basic type: the length, in bytes, of the serialized form of the basic type. -- `chunk_count(type)`: calculate the amount of leafs for merkleization of the type. +- `size_of(B)`, where `B` is a basic type: the length, in bytes, of the + serialized form of the basic type. +- `chunk_count(type)`: calculate the amount of leaves for merkleization of the + type. - all basic types: `1` - - `Bitlist[N]` and `Bitvector[N]`: `(N + 255) // 256` (dividing by chunk size, rounding up) - - `List[B, N]` and `Vector[B, N]`, where `B` is a basic type: `(N * size_of(B) + 31) // 32` (dividing by chunk size, rounding up) + - `Bitlist[N]` and `Bitvector[N]`: `(N + 255) // 256` (dividing by chunk size, + rounding up) + - `List[B, N]` and `Vector[B, N]`, where `B` is a basic type: + `(N * size_of(B) + 31) // 32` (dividing by chunk size, rounding up) - `List[C, N]` and `Vector[C, N]`, where `C` is a composite type: `N` - containers: `len(fields)` - `pack(values)`: Given ordered objects of the same basic type: 1. Serialize `values` into bytes. - 2. If not aligned to a multiple of `BYTES_PER_CHUNK` bytes, right-pad with zeroes to the next multiple. + 2. If not aligned to a multiple of `BYTES_PER_CHUNK` bytes, right-pad with + zeroes to the next multiple. 3. Partition the bytes into `BYTES_PER_CHUNK`-byte chunks. 4. Return the chunks. -- `pack_bits(bits)`: Given the bits of bitlist or bitvector, get `bitfield_bytes` by packing them in bytes and aligning to the start. The length-delimiting bit for bitlists is excluded. Then return `pack(bitfield_bytes)`. -- `next_pow_of_two(i)`: get the next power of 2 of `i`, if not already a power of 2, with 0 mapping to 1. Examples: `0->1, 1->1, 2->2, 3->4, 4->4, 6->8, 9->16` -- `merkleize(chunks, limit=None)`: Given ordered `BYTES_PER_CHUNK`-byte chunks, merkleize the chunks, and return the root: - - The merkleization depends on the effective input, which must be padded/limited: - - if no limit: pad the `chunks` with zeroed chunks to `next_pow_of_two(len(chunks))` (virtually for memory efficiency). - - if `limit >= len(chunks)`, pad the `chunks` with zeroed chunks to `next_pow_of_two(limit)` (virtually for memory efficiency). - - if `limit < len(chunks)`: do not merkleize, input exceeds limit. Raise an error instead. +- `pack_bits(bits)`: Given the bits of bitlist or bitvector, get + `bitfield_bytes` by packing them in bytes and aligning to the start. The + length-delimiting bit for bitlists is excluded. Then return + `pack(bitfield_bytes)`. +- `next_pow_of_two(i)`: get the next power of 2 of `i`, if not already a power + of 2, with 0 mapping to 1. Examples: + `0->1, 1->1, 2->2, 3->4, 4->4, 6->8, 9->16` +- `merkleize(chunks, limit=None)`: Given ordered `BYTES_PER_CHUNK`-byte chunks, + merkleize the chunks, and return the root: + - The merkleization depends on the effective input, which must be + padded/limited: + - if no limit: pad the `chunks` with zeroed chunks to + `next_pow_of_two(len(chunks))` (virtually for memory efficiency). + - if `limit >= len(chunks)`, pad the `chunks` with zeroed chunks to + `next_pow_of_two(limit)` (virtually for memory efficiency). + - if `limit < len(chunks)`: do not merkleize, input exceeds limit. Raise an + error instead. - Then, merkleize the chunks (empty input is padded to 1 zero chunk): - If `1` chunk: the root is the chunk itself. - If `> 1` chunks: merkleize as binary tree. -- `mix_in_length`: Given a Merkle root `root` and a length `length` (`"uint256"` little-endian serialization) return `hash(root + length)`. -- `mix_in_selector`: Given a Merkle root `root` and a type selector `selector` (`"uint256"` little-endian serialization) return `hash(root + selector)`. - -We now define Merkleization `hash_tree_root(value)` of an object `value` recursively: - -- `merkleize(pack(value))` if `value` is a basic object or a vector of basic objects. -- `merkleize(pack_bits(value), limit=chunk_count(type))` if `value` is a bitvector. -- `mix_in_length(merkleize(pack(value), limit=chunk_count(type)), len(value))` if `value` is a list of basic objects. -- `mix_in_length(merkleize(pack_bits(value), limit=chunk_count(type)), len(value))` if `value` is a bitlist. -- `merkleize([hash_tree_root(element) for element in value])` if `value` is a vector of composite objects or a container. -- `mix_in_length(merkleize([hash_tree_root(element) for element in value], limit=chunk_count(type)), len(value))` if `value` is a list of composite objects. -- `mix_in_selector(hash_tree_root(value.value), value.selector)` if `value` is of union type, and `value.value` is not `None` -- `mix_in_selector(Bytes32(), 0)` if `value` is of union type, and `value.value` is `None` +- `mix_in_length`: Given a Merkle root `root` and a length `length` (`"uint256"` + little-endian serialization) return `hash(root + length)`. +- `mix_in_selector`: Given a Merkle root `root` and a type selector `selector` + (`"uint256"` little-endian serialization) return `hash(root + selector)`. + +We now define Merkleization `hash_tree_root(value)` of an object `value` +recursively: + +- `merkleize(pack(value))` if `value` is a basic object or a vector of basic + objects. +- `merkleize(pack_bits(value), limit=chunk_count(type))` if `value` is a + bitvector. +- `mix_in_length(merkleize(pack(value), limit=chunk_count(type)), len(value))` + if `value` is a list of basic objects. +- `mix_in_length(merkleize(pack_bits(value), limit=chunk_count(type)), len(value))` + if `value` is a bitlist. +- `merkleize([hash_tree_root(element) for element in value])` if `value` is a + vector of composite objects or a container. +- `mix_in_length(merkleize([hash_tree_root(element) for element in value], limit=chunk_count(type)), len(value))` + if `value` is a list of composite objects. +- `mix_in_selector(hash_tree_root(value.value), value.selector)` if `value` is + of union type, and `value.value` is not `None` +- `mix_in_selector(Bytes32(), 0)` if `value` is of union type, and `value.value` + is `None` ## Summaries and expansions -Let `A` be an object derived from another object `B` by replacing some of the (possibly nested) values of `B` by their `hash_tree_root`. We say `A` is a "summary" of `B`, and that `B` is an "expansion" of `A`. Notice `hash_tree_root(A) == hash_tree_root(B)`. +Let `A` be an object derived from another object `B` by replacing some of the +(possibly nested) values of `B` by their `hash_tree_root`. We say `A` is a +"summary" of `B`, and that `B` is an "expansion" of `A`. Notice +`hash_tree_root(A) == hash_tree_root(B)`. -We similarly define "summary types" and "expansion types". For example, [`BeaconBlock`](../specs/phase0/beacon-chain.md#beaconblock) is an expansion type of [`BeaconBlockHeader`](../specs/phase0/beacon-chain.md#beaconblockheader). Notice that objects expand to at most one object of a given expansion type. For example, `BeaconBlockHeader` objects uniquely expand to `BeaconBlock` objects. +We similarly define "summary types" and "expansion types". For example, +[`BeaconBlock`](../specs/phase0/beacon-chain.md#beaconblock) is an expansion +type of +[`BeaconBlockHeader`](../specs/phase0/beacon-chain.md#beaconblockheader). Notice +that objects expand to at most one object of a given expansion type. For +example, `BeaconBlockHeader` objects uniquely expand to `BeaconBlock` objects. ## Implementations -See https://github.com/ethereum/consensus-specs/issues/2138 for a list of current known implementations. +See https://github.com/ethereum/consensus-specs/issues/2138 for a list of +current known implementations. ## JSON mapping -The canonical JSON mapping assigns to each SSZ type a corresponding JSON encoding, enabling an SSZ schema to also define the JSON encoding. +The canonical JSON mapping assigns to each SSZ type a corresponding JSON +encoding, enabling an SSZ schema to also define the JSON encoding. -When decoding JSON data, all fields in the SSZ schema must be present with a value. Parsers may ignore additional JSON fields. +When decoding JSON data, all fields in the SSZ schema must be present with a +value. Parsers may ignore additional JSON fields. | SSZ | JSON | Example | | ---------------------------- | --------------- | ---------------------------------------- | @@ -288,8 +367,12 @@ Integers are encoded as strings to avoid loss of precision in 64-bit values. Aliases are encoded as their underlying type. -`hex-byte-string` is a `0x`-prefixed hex encoding of byte data, as it would appear in an SSZ stream. +`hex-byte-string` is a `0x`-prefixed hex encoding of byte data, as it would +appear in an SSZ stream. -`List` and `Vector` of `byte` (and aliases thereof) are encoded as `hex-byte-string`. `Bitlist` and `Bitvector` similarly map their SSZ-byte encodings to a `hex-byte-string`. +`List` and `Vector` of `byte` (and aliases thereof) are encoded as +`hex-byte-string`. `Bitlist` and `Bitvector` similarly map their SSZ-byte +encodings to a `hex-byte-string`. -`Union` is encoded as an object with a `selector` and `data` field, where the contents of `data` change according to the selector. +`Union` is encoded as an object with a `selector` and `data` field, where the +contents of `data` change according to the selector. diff --git a/tests/core/pyspec/eth2spec/VERSION.txt b/tests/core/pyspec/eth2spec/VERSION.txt index bc80560fad..ddc3bf73ba 100644 --- a/tests/core/pyspec/eth2spec/VERSION.txt +++ b/tests/core/pyspec/eth2spec/VERSION.txt @@ -1 +1 @@ -1.5.0 +1.6.0-alpha.0 diff --git a/tests/core/pyspec/eth2spec/config/config_util.py b/tests/core/pyspec/eth2spec/config/config_util.py index f36e415460..92ab703f13 100644 --- a/tests/core/pyspec/eth2spec/config/config_util.py +++ b/tests/core/pyspec/eth2spec/config/config_util.py @@ -1,5 +1,6 @@ from pathlib import Path -from typing import Dict, Iterable, Union, BinaryIO, TextIO, Any +from typing import Any, BinaryIO, Dict, Iterable, TextIO, Union + from ruamel.yaml import YAML diff --git a/tests/core/pyspec/eth2spec/debug/decode.py b/tests/core/pyspec/eth2spec/debug/decode.py index 1a9d54a06d..05ac232b3f 100644 --- a/tests/core/pyspec/eth2spec/debug/decode.py +++ b/tests/core/pyspec/eth2spec/debug/decode.py @@ -1,14 +1,15 @@ from typing import Any + from eth2spec.utils.ssz.ssz_impl import hash_tree_root from eth2spec.utils.ssz.ssz_typing import ( - uint, - Container, - List, boolean, - Vector, - ByteVector, ByteList, + ByteVector, + Container, + List, + uint, Union, + Vector, View, ) diff --git a/tests/core/pyspec/eth2spec/debug/encode.py b/tests/core/pyspec/eth2spec/debug/encode.py index 8bacf4d9a6..ac2f25e0cf 100644 --- a/tests/core/pyspec/eth2spec/debug/encode.py +++ b/tests/core/pyspec/eth2spec/debug/encode.py @@ -1,13 +1,13 @@ from eth2spec.utils.ssz.ssz_impl import hash_tree_root, serialize from eth2spec.utils.ssz.ssz_typing import ( - uint, - boolean, Bitlist, Bitvector, + boolean, Container, - Vector, List, + uint, Union, + Vector, ) diff --git a/tests/core/pyspec/eth2spec/debug/random_value.py b/tests/core/pyspec/eth2spec/debug/random_value.py index 9ea5909b92..2ef404ee42 100644 --- a/tests/core/pyspec/eth2spec/debug/random_value.py +++ b/tests/core/pyspec/eth2spec/debug/random_value.py @@ -1,21 +1,20 @@ -from random import Random from enum import Enum - +from random import Random from typing import Type from eth2spec.utils.ssz.ssz_typing import ( - View, BasicView, - uint, - Container, - List, - boolean, - Vector, - ByteVector, - ByteList, Bitlist, Bitvector, + boolean, + ByteList, + ByteVector, + Container, + List, + uint, Union, + Vector, + View, ) # in bytes diff --git a/tests/core/pyspec/eth2spec/gen_helpers/gen_base/args.py b/tests/core/pyspec/eth2spec/gen_helpers/gen_base/args.py new file mode 100644 index 0000000000..6211946dc7 --- /dev/null +++ b/tests/core/pyspec/eth2spec/gen_helpers/gen_base/args.py @@ -0,0 +1,73 @@ +import argparse +import os +import pathlib + + +def parse_arguments(): + parser = argparse.ArgumentParser( + prog="generator", + description=f"Generate YAML test suite files.", + ) + parser.add_argument( + "-o", + "--output-dir", + dest="output_dir", + required=True, + type=pathlib.Path, + help="Directory into which the generated YAML files will be dumped.", + ) + parser.add_argument( + "--runners", + dest="runners", + nargs="*", + type=str, + default=[], + required=False, + help="Specify runners to run with. Allows all if no runner names are specified.", + ) + parser.add_argument( + "--presets", + dest="presets", + nargs="*", + type=str, + default=[], + required=False, + help="Specify presets to run with. Allows all if no preset names are specified.", + ) + parser.add_argument( + "--forks", + dest="forks", + nargs="*", + type=str, + default=[], + required=False, + help="Specify forks to run with. Allows all if no fork names are specified.", + ) + parser.add_argument( + "--cases", + dest="cases", + nargs="*", + type=str, + default=[], + required=False, + help="Specify test cases to run with. Allows all if no test case names are specified.", + ) + parser.add_argument( + "--modcheck", + action="store_true", + default=False, + help="Check generator modules, do not run any tests.", + ) + parser.add_argument( + "--verbose", + action="store_true", + default=False, + help="Print more information to the console.", + ) + parser.add_argument( + "--threads", + type=int, + default=os.cpu_count(), + help="Generate tests with N threads. Defaults to core count.", + ) + return parser.parse_args() diff --git a/tests/core/pyspec/eth2spec/gen_helpers/gen_base/dumper.py b/tests/core/pyspec/eth2spec/gen_helpers/gen_base/dumper.py new file mode 100644 index 0000000000..b8209eb839 --- /dev/null +++ b/tests/core/pyspec/eth2spec/gen_helpers/gen_base/dumper.py @@ -0,0 +1,76 @@ +from eth_utils import encode_hex +from ruamel.yaml import YAML +from snappy import compress + +from eth2spec.test import context + +from .gen_typing import TestCase + + +def get_default_yaml(): + yaml = YAML(pure=True) + yaml.default_flow_style = None + + def _represent_none(self, _): + return self.represent_scalar("tag:yaml.org,2002:null", "null") + + def _represent_str(self, data): + if data.startswith("0x"): + # Without this, a zero-byte hex string is represented without quotes. + return self.represent_scalar("tag:yaml.org,2002:str", data, style="'") + return self.represent_str(data) + + yaml.representer.add_representer(type(None), _represent_none) + yaml.representer.add_representer(str, _represent_str) + + return yaml + + +def get_cfg_yaml(): + # Spec config is using a YAML subset + cfg_yaml = YAML(pure=True) + cfg_yaml.default_flow_style = False # Emit separate line for each key + + def cfg_represent_bytes(self, data): + return self.represent_int(encode_hex(data)) + + cfg_yaml.representer.add_representer(bytes, cfg_represent_bytes) + + def cfg_represent_quoted_str(self, data): + return self.represent_scalar("tag:yaml.org,2002:str", data, style="'") + + cfg_yaml.representer.add_representer(context.quoted_str, cfg_represent_quoted_str) + return cfg_yaml + + +class Dumper: + """Helper for dumping test case outputs (cfg, data, meta, ssz).""" + + def __init__(self, default_yaml: YAML = None, cfg_yaml: YAML = None): + self.default_yaml = default_yaml or get_default_yaml() + self.cfg_yaml = cfg_yaml or get_cfg_yaml() + + def dump_meta(self, test_case: TestCase, meta: dict) -> None: + if not meta: + return + self._dump_yaml(test_case, "meta", meta, self.default_yaml) + + def dump_cfg(self, test_case: TestCase, name: str, data: any) -> None: + self._dump_yaml(test_case, name, data, self.cfg_yaml) + + def dump_data(self, test_case: TestCase, name: str, data: any) -> None: + self._dump_yaml(test_case, name, data, self.default_yaml) + + def dump_ssz(self, test_case: TestCase, name: str, data: bytes) -> None: + """Compress and write SSZ data for test case.""" + path = test_case.dir / f"{name}.ssz_snappy" + path.parent.mkdir(parents=True, exist_ok=True) + with path.open("wb") as f: + f.write(compress(data)) + + def _dump_yaml(self, test_case: TestCase, name: str, data: any, yaml_encoder: YAML) -> None: + """Helper to write YAML files for test case.""" + path = test_case.dir / f"{name}.yaml" + path.parent.mkdir(parents=True, exist_ok=True) + with path.open("w") as f: + yaml_encoder.dump(data, f) diff --git a/tests/core/pyspec/eth2spec/gen_helpers/gen_base/gen_runner.py b/tests/core/pyspec/eth2spec/gen_helpers/gen_base/gen_runner.py index 6b739108eb..931671a59c 100644 --- a/tests/core/pyspec/eth2spec/gen_helpers/gen_base/gen_runner.py +++ b/tests/core/pyspec/eth2spec/gen_helpers/gen_base/gen_runner.py @@ -1,470 +1,197 @@ -from dataclasses import ( - dataclass, - field, -) -import os -import time +import multiprocessing import shutil -import argparse -from pathlib import Path -import sys -import json -from typing import Iterable, AnyStr, Any, Callable -import traceback -from collections import namedtuple - -from ruamel.yaml import ( - YAML, -) - -from filelock import FileLock -from snappy import compress -from pathos.multiprocessing import ProcessingPool as Pool +import threading +import time +import uuid +from typing import Any, Iterable -from eth_utils import encode_hex +from pathos.multiprocessing import ProcessingPool as Pool +from rich import box +from rich.console import Console +from rich.live import Live +from rich.table import Table +from rich.text import Text from eth2spec.test import context from eth2spec.test.exceptions import SkippedTest -from .gen_typing import TestProvider -from .settings import ( - GENERATOR_MODE, - MODE_MULTIPROCESSING, - MODE_SINGLE_PROCESS, - NUM_PROCESS, - TIME_THRESHOLD_TO_PRINT, -) - +from .args import parse_arguments +from .dumper import Dumper +from .gen_typing import TestCase +from .utils import install_sigint_handler, time_since # Flag that the runner does NOT run test via pytest context.is_pytest = False -@dataclass -class Diagnostics(object): - collected_test_count: int = 0 - generated_test_count: int = 0 - skipped_test_count: int = 0 - test_identifiers: list = field(default_factory=list) - +def get_shared_prefix(test_cases, min_segments=3): + assert test_cases, "no test cases provided" -TestCaseParams = namedtuple( - "TestCaseParams", - [ - "test_case", - "case_dir", - "log_file", - "file_mode", - ], -) + fields = [ + "preset_name", + "fork_name", + "runner_name", + "handler_name", + ] + prefix = [] + for i, field in enumerate(fields): + values = {getattr(tc, field) for tc in test_cases} + if len(values) == 1: + prefix.append(values.pop()) + elif i < min_segments: + prefix.append("*") + else: + break -def worker_function(item): - return generate_test_vector(*item) + return "::".join(prefix) -def get_default_yaml(): - yaml = YAML(pure=True) - yaml.default_flow_style = None +def execute_test(test_case: TestCase, dumper: Dumper): + """Execute a test and write the outputs to storage.""" + meta: dict[str, Any] = {} + outputs: list[tuple[str, str, Any]] = [] - def _represent_none(self, _): - return self.represent_scalar("tag:yaml.org,2002:null", "null") - - def _represent_str(self, data): - if data.startswith("0x"): - # Without this, a zero-byte hex string is represented without quotes. - return self.represent_scalar("tag:yaml.org,2002:str", data, style="'") - return self.represent_str(data) - - yaml.representer.add_representer(type(None), _represent_none) - yaml.representer.add_representer(str, _represent_str) - - return yaml - - -def get_cfg_yaml(): - # Spec config is using a YAML subset - cfg_yaml = YAML(pure=True) - cfg_yaml.default_flow_style = False # Emit separate line for each key - - def cfg_represent_bytes(self, data): - return self.represent_int(encode_hex(data)) - - cfg_yaml.representer.add_representer(bytes, cfg_represent_bytes) - - def cfg_represent_quoted_str(self, data): - return self.represent_scalar("tag:yaml.org,2002:str", data, style="'") - - cfg_yaml.representer.add_representer(context.quoted_str, cfg_represent_quoted_str) - return cfg_yaml - - -def validate_output_dir(path_str): - path = Path(path_str) - - if not path.exists(): - raise argparse.ArgumentTypeError("Output directory must exist") - - if not path.is_dir(): - raise argparse.ArgumentTypeError("Output path must lead to a directory") - - return path - - -def get_test_case_dir(test_case, output_dir): - return ( - Path(output_dir) - / Path(test_case.preset_name) - / Path(test_case.fork_name) - / Path(test_case.runner_name) - / Path(test_case.handler_name) - / Path(test_case.suite_name) - / Path(test_case.case_name) - ) - - -def get_test_identifier(test_case): - return "::".join( - [ - test_case.preset_name, - test_case.fork_name, - test_case.runner_name, - test_case.handler_name, - test_case.suite_name, - test_case.case_name, - ] - ) - - -def get_incomplete_tag_file(case_dir): - return case_dir / "INCOMPLETE" - - -def run_generator(generator_name, test_providers: Iterable[TestProvider]): - """ - Implementation for a general test generator. - :param generator_name: The name of the generator. (lowercase snake_case) - :param test_providers: A list of test provider, - each of these returns a callable that returns an iterable of test cases. - The call to get the iterable may set global configuration, - and the iterable should not be resumed after a pause with a change of that configuration. - :return: - """ - - parser = argparse.ArgumentParser( - prog="gen-" + generator_name, - description=f"Generate YAML test suite files for {generator_name}.", - ) - parser.add_argument( - "-o", - "--output-dir", - dest="output_dir", - required=True, - type=validate_output_dir, - help="Directory into which the generated YAML files will be dumped.", - ) - parser.add_argument( - "--preset-list", - dest="preset_list", - nargs="*", - type=str, - required=False, - help="Specify presets to run with. Allows all if no preset names are specified.", - ) - parser.add_argument( - "--fork-list", - dest="fork_list", - nargs="*", - type=str, - required=False, - help="Specify forks to run with. Allows all if no fork names are specified.", - ) - parser.add_argument( - "--modcheck", - action="store_true", - default=False, - help="Check generator modules, do not run any tests.", - ) - parser.add_argument( - "--case-list", - dest="case_list", - nargs="*", - type=str, - required=False, - help="Specify test cases to run with. Allows all if no test case names are specified.", - ) - parser.add_argument( - "--verbose", - action="store_true", - default=False, - help="Print more information to the console.", - ) - args = parser.parse_args() + try: + for name, kind, data in test_case.case_fn(): + if kind == "meta": + meta[name] = data + else: + method = getattr(dumper, f"dump_{kind}", None) + if method is None: + raise ValueError(f"Unknown kind {kind!r}") + outputs.append((name, kind, data)) + except SkippedTest: + # Bail without writing any files + raise + + for name, kind, data in outputs: + method = getattr(dumper, f"dump_{kind}") + method(test_case, name, data) + + if meta: + dumper.dump_meta(test_case, meta) + + +def run_generator(input_test_cases: Iterable[TestCase]): + start_time = time.time() + args = parse_arguments() # Bail here if we are checking modules. if args.modcheck: return - output_dir = args.output_dir - file_mode = "w" - log_file = Path(output_dir) / "testgen_error_log.txt" - def debug_print(msg): + """Only print if verbose is enabled.""" if args.verbose: print(msg) - debug_print(f"Generating tests into {output_dir}") - debug_print(f"Error log file: {log_file}") - - # preset_list arg - presets = args.preset_list - if presets is None: - presets = [] - - if len(presets) != 0: - debug_print(f"Filtering test-generator runs to only include presets: {', '.join(presets)}") - - # fork_list arg - forks = args.fork_list - if forks is None: - forks = [] - - if len(forks) != 0: - debug_print(f"Filtering test-generator runs to only include forks: {', '.join(forks)}") - - # case_list arg - cases = args.case_list - if cases is None: - cases = [] - - if len(cases) != 0: - debug_print(f"Filtering test-generator runs to only include test cases: {', '.join(cases)}") - - diagnostics_obj = Diagnostics() - provider_start = time.time() - - if GENERATOR_MODE == MODE_MULTIPROCESSING: - all_test_case_params = [] - - for tprov in test_providers: - # Runs anything that we don't want to repeat for every test case. - tprov.prepare() - - for test_case in tprov.make_cases(): - # If preset list is assigned, filter by presets. - if len(presets) != 0 and test_case.preset_name not in presets: - debug_print(f"Skipped: {get_test_identifier(test_case)}") - continue - - # If fork list is assigned, filter by forks. - if len(forks) != 0 and test_case.fork_name not in forks: - debug_print(f"Skipped: {get_test_identifier(test_case)}") - continue - - # If cases list is assigned, filter by cases. - if len(cases) != 0 and not any(s in test_case.case_name for s in cases): - debug_print(f"Skipped: {get_test_identifier(test_case)}") - continue - - print(f"Collected: {get_test_identifier(test_case)}") - diagnostics_obj.collected_test_count += 1 - - case_dir = get_test_case_dir(test_case, output_dir) - if case_dir.exists(): - # Clear the existing case_dir folder - shutil.rmtree(case_dir) - - if GENERATOR_MODE == MODE_SINGLE_PROCESS: - result, info = generate_test_vector(test_case, case_dir, log_file, file_mode) - if isinstance(result, int): - # Skipped or error - debug_print(info) - elif isinstance(result, str): - # Success - if info > TIME_THRESHOLD_TO_PRINT: - debug_print(f"^^^ Slow test, took {info} seconds ^^^") - write_result_into_diagnostics_obj(result, diagnostics_obj) - elif GENERATOR_MODE == MODE_MULTIPROCESSING: - item = TestCaseParams(test_case, case_dir, log_file, file_mode) - all_test_case_params.append(item) - - if GENERATOR_MODE == MODE_MULTIPROCESSING: - with Pool(processes=NUM_PROCESS) as pool: - results = pool.map(worker_function, iter(all_test_case_params)) - - for result in results: - write_result_into_diagnostics_obj(result[0], diagnostics_obj) - - provider_end = time.time() - span = round(provider_end - provider_start, 2) - - summary_message = f"Completed generation of {generator_name} with {diagnostics_obj.generated_test_count} tests" - summary_message += f" ({diagnostics_obj.skipped_test_count} skipped tests)" - if span > TIME_THRESHOLD_TO_PRINT: - summary_message += f" in {span} seconds" - debug_print(summary_message) - - diagnostics_output = { - "collected_test_count": diagnostics_obj.collected_test_count, - "generated_test_count": diagnostics_obj.generated_test_count, - "skipped_test_count": diagnostics_obj.skipped_test_count, - "test_identifiers": diagnostics_obj.test_identifiers, - "durations": [f"{span} seconds"], - } - diagnostics_path = Path(os.path.join(output_dir, "diagnostics_obj.json")) - diagnostics_lock = FileLock(os.path.join(output_dir, "diagnostics_obj.json.lock")) - with diagnostics_lock: - diagnostics_path.touch(exist_ok=True) - if os.path.getsize(diagnostics_path) == 0: - with open(diagnostics_path, "w+") as f: - json.dump(diagnostics_output, f) - else: - with open(diagnostics_path, "r+") as f: - existing_diagnostics = json.load(f) - for k, v in diagnostics_output.items(): - existing_diagnostics[k] += v - with open(diagnostics_path, "w+") as f: - json.dump(existing_diagnostics, f) - debug_print(f"Wrote diagnostics_obj to {diagnostics_path}") - - -def generate_test_vector(test_case, case_dir, log_file, file_mode): - cfg_yaml = get_cfg_yaml() - yaml = get_default_yaml() - - written_part = False - - test_start = time.time() + console = Console() + dumper = Dumper() + + # Gracefully handle Ctrl+C + install_sigint_handler(console) + + test_cases = [] + for test_case in input_test_cases: + # Check if the test case should be filtered out + if len(args.runners) != 0 and test_case.runner_name not in args.runners: + debug_print(f"Filtered: {test_case.get_identifier()}") + continue + if len(args.presets) != 0 and test_case.preset_name not in args.presets: + debug_print(f"Filtered: {test_case.get_identifier()}") + continue + if len(args.forks) != 0 and test_case.fork_name not in args.forks: + debug_print(f"Filtered: {test_case.get_identifier()}") + continue + if len(args.cases) != 0 and not any(s in test_case.case_name for s in args.cases): + debug_print(f"Filtered: {test_case.get_identifier()}") + continue + + # Set the output dir and add this to out list + test_case.set_output_dir(args.output_dir) + if test_case.dir.exists(): + shutil.rmtree(test_case.dir) + test_cases.append(test_case) + + if len(test_cases) == 0: + return - # Add `INCOMPLETE` tag file to indicate that the test generation has not completed. - incomplete_tag_file = get_incomplete_tag_file(case_dir) - case_dir.mkdir(parents=True, exist_ok=True) - with incomplete_tag_file.open("w") as f: - f.write("\n") + debug_print(f"Generating tests into {args.output_dir}") + tests_prefix = get_shared_prefix(test_cases) - result = None - try: - meta = dict() + def worker_function(data): + """Execute a test case and update active tests.""" + test_case, active_tests = data + key = (uuid.uuid4(), test_case.get_identifier()) + active_tests[key] = time.time() try: - written_part, meta = execute_test( - test_case, case_dir, meta, log_file, file_mode, cfg_yaml, yaml - ) - except SkippedTest as e: - result = 0 # 0 means skipped - shutil.rmtree(case_dir) - return result, e - - # Once all meta data is collected (if any), write it to a meta data file. - if len(meta) != 0: - written_part = True - output_part( - case_dir, log_file, "data", "meta", dump_yaml_fn(meta, "meta", file_mode, yaml) - ) - - except Exception as e: - result = -1 # -1 means error - error_message = f"[ERROR] failed to generate vector(s) for test {case_dir}: {e}" - # Write to error log file - with log_file.open("a+") as f: - f.write(error_message) - traceback.print_exc(file=f) - f.write("\n") - print(error_message) - traceback.print_exc() - else: - # If no written_part, the only file was incomplete_tag_file. Clear the existing case_dir folder. - if not written_part: - print(f"[Error] test case {case_dir} did not produce any written_part") - shutil.rmtree(case_dir) - result = -1 - else: - result = get_test_identifier(test_case) - # Only remove `INCOMPLETE` tag file - os.remove(incomplete_tag_file) - test_end = time.time() - span = round(test_end - test_start, 2) - return result, span - - -def write_result_into_diagnostics_obj(result, diagnostics_obj): - if result == -1: # error - pass - elif result == 0: - diagnostics_obj.skipped_test_count += 1 - elif result is not None: - diagnostics_obj.generated_test_count += 1 - diagnostics_obj.test_identifiers.append(result) - else: - raise Exception(f"Unexpected result: {result}") - - -def dump_yaml_fn(data: Any, name: str, file_mode: str, yaml_encoder: YAML): - def dump(case_path: Path): - out_path = case_path / Path(name + ".yaml") - with out_path.open(file_mode) as f: - yaml_encoder.dump(data, f) - f.close() - - return dump - - -def output_part( - case_dir, - log_file, - out_kind: str, - name: str, - fn: Callable[ - [ - Path, - ], - None, - ], -): - # make sure the test case directory is created before any test part is written. - case_dir.mkdir(parents=True, exist_ok=True) - try: - fn(case_dir) - except (IOError, ValueError) as e: - error_message = ( - f'[Error] error when dumping test "{case_dir}", part "{name}", kind "{out_kind}": {e}' - ) - # Write to error log file - with log_file.open("a+") as f: - f.write(error_message) - traceback.print_exc(file=f) - f.write("\n") - print(error_message) - sys.exit(error_message) - - -def execute_test(test_case, case_dir, meta, log_file, file_mode, cfg_yaml, yaml): - result = test_case.case_fn() - written_part = False - for name, out_kind, data in result: - written_part = True - if out_kind == "meta": - meta[name] = data - elif out_kind == "cfg": - output_part( - case_dir, log_file, out_kind, name, dump_yaml_fn(data, name, file_mode, cfg_yaml) + execute_test(test_case, dumper) + debug_print(f"Generated: {test_case.get_identifier()}") + return "generated" + except SkippedTest: + debug_print(f"Skipped: {test_case.get_identifier()}") + return "skipped" + finally: + del active_tests[key] + + def display_active_tests(active_tests, total_tasks, completed, skipped, width): + """Display a table of active tests.""" + with Live(console=console) as live: + while True: + remaining = total_tasks - completed.value + if remaining == 0: + # Show a final status when the queue is empty + # This is better than showing an empty table + text = Text.from_markup(f"Completed {tests_prefix} in {time_since(start_time)}") + live.update(text) + break + + info = ", ".join( + [ + f"gen={tests_prefix}", + f"threads={args.threads}", + f"total={total_tasks}", + f"skipped={skipped.value}", + f"remaining={remaining}", + f"time={time_since(start_time)}", + ] + ) + column_header = f"Test ({info})" + width = max(width, len(column_header)) + + table = Table(box=box.ROUNDED) + table.add_column(column_header, style="cyan", no_wrap=True, width=width) + table.add_column("Elapsed Time", justify="right", style="magenta") + for k, start in sorted(active_tests.items(), key=lambda x: x[1]): + table.add_row(k[1], f"{time_since(start)}") + live.update(table) + time.sleep(0.25) + + # Generate all of the test cases + with multiprocessing.Manager() as manager: + active_tests = manager.dict() + completed = manager.Value("i", 0) + skipped = manager.Value("i", 0) + width = max([len(t.get_identifier()) for t in test_cases]) + + if not args.verbose: + display_thread = threading.Thread( + target=display_active_tests, + args=(active_tests, len(test_cases), completed, skipped, width), + daemon=True, ) - elif out_kind == "data": - output_part( - case_dir, log_file, out_kind, name, dump_yaml_fn(data, name, file_mode, yaml) - ) - elif out_kind == "ssz": - output_part(case_dir, log_file, out_kind, name, dump_ssz_fn(data, name, file_mode)) - else: - raise ValueError("Unknown out_kind %s" % out_kind) - - return written_part, meta + display_thread.start() + # Map each test case to a thread worker + inputs = [(t, active_tests) for t in test_cases] + for result in Pool(processes=args.threads).uimap(worker_function, inputs): + if result == "skipped": + skipped.value += 1 + completed.value += 1 -def dump_ssz_fn(data: AnyStr, name: str, file_mode: str): - def dump(case_path: Path): - out_path = case_path / Path(name + ".ssz_snappy") - compressed = compress(data) - with out_path.open(file_mode + "b") as f: # write in raw binary mode - f.write(compressed) + if not args.verbose: + display_thread.join() - return dump + elapsed = round(time.time() - start_time, 2) + debug_print(f"Completed generation of {tests_prefix} in {elapsed} seconds") diff --git a/tests/core/pyspec/eth2spec/gen_helpers/gen_base/gen_typing.py b/tests/core/pyspec/eth2spec/gen_helpers/gen_base/gen_typing.py index 2bb66d06c7..26bfc8bc1c 100644 --- a/tests/core/pyspec/eth2spec/gen_helpers/gen_base/gen_typing.py +++ b/tests/core/pyspec/eth2spec/gen_helpers/gen_base/gen_typing.py @@ -1,11 +1,13 @@ +from dataclasses import dataclass +from pathlib import Path from typing import ( Any, Callable, Iterable, NewType, + Optional, Tuple, ) -from dataclasses import dataclass # Elements: name, out_kind, data # @@ -26,11 +28,29 @@ class TestCase(object): suite_name: str case_name: str case_fn: Callable[[], Iterable[TestCasePart]] + dir: Optional[Path] = None + def get_identifier(self): + """Return the human readable identifier.""" + return "::".join( + [ + self.preset_name, + self.fork_name, + self.runner_name, + self.handler_name, + self.suite_name, + self.case_name, + ] + ) -@dataclass -class TestProvider(object): - # Prepares the context for the provider as a whole, as opposed to per-test-case changes. - prepare: Callable[[], None] - # Retrieves an iterable of cases, called after prepare() - make_cases: Callable[[], Iterable[TestCase]] + def set_output_dir(self, output_dir: str) -> None: + """Compute and store the output directory on the instance.""" + self.dir = ( + Path(output_dir) + / self.preset_name + / self.fork_name + / self.runner_name + / self.handler_name + / self.suite_name + / self.case_name + ) diff --git a/tests/core/pyspec/eth2spec/gen_helpers/gen_base/settings.py b/tests/core/pyspec/eth2spec/gen_helpers/gen_base/settings.py deleted file mode 100644 index 816dec91f8..0000000000 --- a/tests/core/pyspec/eth2spec/gen_helpers/gen_base/settings.py +++ /dev/null @@ -1,13 +0,0 @@ -import multiprocessing - - -# Generator mode setting -MODE_SINGLE_PROCESS = "MODE_SINGLE_PROCESS" -MODE_MULTIPROCESSING = "MODE_MULTIPROCESSING" -# Test generator mode -GENERATOR_MODE = MODE_SINGLE_PROCESS -# Number of subprocesses when using MODE_MULTIPROCESSING -NUM_PROCESS = multiprocessing.cpu_count() // 2 - 1 - -# Diagnostics -TIME_THRESHOLD_TO_PRINT = 1.0 # seconds diff --git a/tests/core/pyspec/eth2spec/gen_helpers/gen_base/utils.py b/tests/core/pyspec/eth2spec/gen_helpers/gen_base/utils.py new file mode 100644 index 0000000000..8217adbe82 --- /dev/null +++ b/tests/core/pyspec/eth2spec/gen_helpers/gen_base/utils.py @@ -0,0 +1,35 @@ +import functools +import os +import signal +import time + +from rich.console import Console + + +def install_sigint_handler(console: Console) -> None: + """On Ctrl-C, show the cursor and exit immediately.""" + + def _handle_sigint(signum, frame): + console.show_cursor() + os._exit(0) + + signal.signal(signal.SIGINT, _handle_sigint) + + +@functools.lru_cache(maxsize=None) +def format_seconds(seconds: int) -> str: + """Convert seconds to a more readable time.""" + h, rem = divmod(seconds, 3600) + m, s = divmod(rem, 60) + parts = [] + if h: + parts.append(f"{h}h") + if m: + parts.append(f"{m}m") + parts.append(f"{s}s") + return " ".join(parts) + + +def time_since(start_time: int) -> str: + """Get the duration since some start time.""" + return format_seconds(int(time.time() - start_time)) diff --git a/tests/core/pyspec/eth2spec/gen_helpers/gen_from_tests/gen.py b/tests/core/pyspec/eth2spec/gen_helpers/gen_from_tests/gen.py index ab28e149c6..b91403e301 100644 --- a/tests/core/pyspec/eth2spec/gen_helpers/gen_from_tests/gen.py +++ b/tests/core/pyspec/eth2spec/gen_helpers/gen_from_tests/gen.py @@ -1,14 +1,11 @@ from importlib import import_module from inspect import getmembers, isfunction from pkgutil import walk_packages -from typing import Any, Callable, Dict, Iterable, Optional, List, Union +from typing import Any, Iterable, Optional -from eth2spec.utils import bls +from eth2spec.gen_helpers.gen_base.gen_typing import TestCase from eth2spec.test.helpers.constants import ALL_PRESETS, TESTGEN_FORKS -from eth2spec.test.helpers.typing import SpecForkName, PresetBaseName - -from eth2spec.gen_helpers.gen_base import gen_runner -from eth2spec.gen_helpers.gen_base.gen_typing import TestCase, TestProvider +from eth2spec.test.helpers.typing import PresetBaseName, SpecForkName def generate_case_fn(tfn, generator_mode, phase, preset, bls_active): @@ -66,158 +63,45 @@ def generate_from_tests( ) -def get_provider( - create_provider_fn: Callable[[SpecForkName, PresetBaseName, str, str], TestProvider], - fork_name: SpecForkName, - preset_name: PresetBaseName, - all_mods: Dict[str, Dict[str, Union[List[str], str]]], -) -> Iterable[TestProvider]: - for key, mod_name in all_mods[fork_name].items(): - if not isinstance(mod_name, List): - mod_name = [mod_name] - yield create_provider_fn( - fork_name=fork_name, - preset_name=preset_name, - handler_name=key, - tests_src_mod_name=mod_name, - ) - - -def get_create_provider_fn( - runner_name: str, -) -> Callable[[SpecForkName, str, str, PresetBaseName], TestProvider]: - def prepare_fn() -> None: - bls.use_fastest() - return - - def create_provider( - fork_name: SpecForkName, - preset_name: PresetBaseName, - handler_name: str, - tests_src_mod_name: List[str], - ) -> TestProvider: - def cases_fn() -> Iterable[TestCase]: - for mod_name in tests_src_mod_name: - tests_src = import_module(mod_name) - yield from generate_from_tests( - runner_name=runner_name, - handler_name=handler_name, - src=tests_src, - fork_name=fork_name, - preset_name=preset_name, - ) - - return TestProvider(prepare=prepare_fn, make_cases=cases_fn) - - return create_provider - - -def run_state_test_generators( - runner_name: str, - all_mods: Dict[str, Dict[str, str]], - presets: Iterable[PresetBaseName] = ALL_PRESETS, - forks: Iterable[SpecForkName] = TESTGEN_FORKS, -) -> None: - """ - Generate all available state tests of `TESTGEN_FORKS` forks of `ALL_PRESETS` presets of the given runner. - """ - for preset_name in presets: - for fork_name in forks: - if fork_name in all_mods: - gen_runner.run_generator( - runner_name, - get_provider( - create_provider_fn=get_create_provider_fn(runner_name), - fork_name=fork_name, - preset_name=preset_name, - all_mods=all_mods, - ), - ) - - -def combine_mods(dict_1, dict_2): - """ - Return the merged dicts, where the result value would be a list of the values from two dicts. - """ - # The duplicate dict_1 items would be ignored here. - dict_3 = {**dict_1, **dict_2} - - intersection = dict_1.keys() & dict_2.keys() - for key in intersection: - # To list - if not isinstance(dict_3[key], List): - dict_3[key] = [dict_3[key]] - # Append dict_1 value to list - if isinstance(dict_1[key], List): - dict_3[key] += dict_1[key] - else: - dict_3[key].append(dict_1[key]) - - return dict_3 - - -def check_mods(all_mods, pkg): +def get_expected_modules(package, absolute=False): """ - Raise an exception if there is a missing/unexpected module in all_mods. + Return all modules (which are not packages) inside the given package. """ + modules = [] + eth2spec = import_module("eth2spec") + prefix = eth2spec.__name__ + "." + for _, modname, ispkg in walk_packages(eth2spec.__path__, prefix): + s = package if absolute else f".{package}." + # Skip modules in the unittests package. + # These are not associated with generators. + if ".unittests." in modname: + continue + if s in modname and not ispkg: + modules.append(modname) + return modules - def get_expected_modules(package, absolute=False): - """ - Return all modules (which are not packages) inside the given package. - """ - modules = [] - eth2spec = import_module("eth2spec") - prefix = eth2spec.__name__ + "." - for _, modname, ispkg in walk_packages(eth2spec.__path__, prefix): - s = package if absolute else f".{package}." - if s in modname and not ispkg: - modules.append(modname) - return modules - - mods = [] - for fork in all_mods: - for mod in all_mods[fork].values(): - # If this key has a single value, normalize to list. - if isinstance(mod, str): - mod = [mod] - # For each submodule, check if it is package. - # This is a "trick" we do to reuse a test format. - for sub in mod: - is_package = ".test_" not in sub - if is_package: - mods.extend(get_expected_modules(sub, absolute=True)) - else: - mods.append(sub) +def default_handler_name_fn(mod): + return mod.split(".")[-1].replace("test_", "") - problems = [] - expected_mods = get_expected_modules(pkg) - if mods != expected_mods: - for e in expected_mods: - # Skip forks which are not in all_mods. - # The fork name is the 3rd item in the path. - fork = e.split(".")[2] - if fork not in all_mods: - continue - # Skip modules in the unittests package. - # These are not associated with generators. - if ".unittests." in e: - continue - # The expected module is not in our list of modules. - # Add it to our list of problems. - if e not in mods: - problems.append("missing: " + e) - for t in mods: - # Skip helper modules. - # These do not define test functions. - if t.startswith("eth2spec.test.helpers"): - continue - # There is a module not defined in eth2spec. - # Add it to our list of problems. - if t not in expected_mods: - print("unexpected:", t) - problems.append("unexpected: " + t) - - if problems: - raise Exception("[ERROR] module problems:\n " + "\n ".join(problems)) +def get_test_cases_for( + runner_name: str, + pkg: str = None, + handler_name_fn=default_handler_name_fn, +) -> Iterable[TestCase]: + test_cases = [] + for preset in ALL_PRESETS: + for fork in TESTGEN_FORKS: + for mod in get_expected_modules(pkg or runner_name): + tests_src = import_module(mod) + test_cases.extend( + generate_from_tests( + runner_name=runner_name, + handler_name=handler_name_fn(mod), + src=tests_src, + fork_name=fork, + preset_name=preset, + ) + ) + return test_cases diff --git a/tests/core/pyspec/eth2spec/test/altair/block_processing/sync_aggregate/test_process_sync_aggregate.py b/tests/core/pyspec/eth2spec/test/altair/block_processing/sync_aggregate/test_process_sync_aggregate.py index c585ca3d0d..3ae4061a36 100644 --- a/tests/core/pyspec/eth2spec/test/altair/block_processing/sync_aggregate/test_process_sync_aggregate.py +++ b/tests/core/pyspec/eth2spec/test/altair/block_processing/sync_aggregate/test_process_sync_aggregate.py @@ -1,36 +1,37 @@ import random + +from eth2spec.test.context import ( + always_bls, + default_activation_threshold, + default_balances_electra, + single_phase, + spec_state_test, + spec_test, + with_altair_and_later, + with_custom_state, + with_presets, +) from eth2spec.test.helpers.block import ( build_empty_block_for_next_slot, ) -from eth2spec.test.helpers.state import ( - state_transition_and_sign_block, - transition_to, - next_epoch_via_block, -) from eth2spec.test.helpers.constants import ( MAINNET, MINIMAL, ) +from eth2spec.test.helpers.state import ( + next_epoch_via_block, + state_transition_and_sign_block, + transition_to, +) from eth2spec.test.helpers.sync_committee import ( compute_aggregate_sync_committee_signature, compute_committee_indices, - run_sync_committee_processing, run_successful_sync_committee_test, + run_sync_committee_processing, ) from eth2spec.test.helpers.voluntary_exits import ( get_unslashed_exited_validators, ) -from eth2spec.test.context import ( - with_altair_and_later, - with_presets, - spec_state_test, - always_bls, - single_phase, - with_custom_state, - spec_test, - default_balances_electra, - default_activation_threshold, -) @with_altair_and_later diff --git a/tests/core/pyspec/eth2spec/test/altair/block_processing/sync_aggregate/test_process_sync_aggregate_random.py b/tests/core/pyspec/eth2spec/test/altair/block_processing/sync_aggregate/test_process_sync_aggregate_random.py index 538b19d97d..f83f7eb3e5 100644 --- a/tests/core/pyspec/eth2spec/test/altair/block_processing/sync_aggregate/test_process_sync_aggregate_random.py +++ b/tests/core/pyspec/eth2spec/test/altair/block_processing/sync_aggregate/test_process_sync_aggregate_random.py @@ -1,4 +1,17 @@ import random + +from eth2spec.test.context import ( + default_activation_threshold, + default_balances_electra, + misc_balances, + misc_balances_electra, + single_phase, + spec_state_test, + spec_test, + with_altair_and_later, + with_custom_state, + with_presets, +) from eth2spec.test.helpers.constants import ( MAINNET, MINIMAL, @@ -16,18 +29,6 @@ from eth2spec.test.helpers.voluntary_exits import ( get_unslashed_exited_validators, ) -from eth2spec.test.context import ( - with_altair_and_later, - spec_state_test, - default_activation_threshold, - misc_balances, - single_phase, - with_custom_state, - with_presets, - spec_test, - default_balances_electra, - misc_balances_electra, -) def _test_harness_for_randomized_test_case( diff --git a/tests/core/pyspec/eth2spec/test/altair/block_processing/test_process_deposit.py b/tests/core/pyspec/eth2spec/test/altair/block_processing/test_process_deposit.py index 25c2767269..81881c08dc 100644 --- a/tests/core/pyspec/eth2spec/test/altair/block_processing/test_process_deposit.py +++ b/tests/core/pyspec/eth2spec/test/altair/block_processing/test_process_deposit.py @@ -1,14 +1,12 @@ from eth2spec.test.context import ( - spec_state_test, always_bls, - with_phases, + spec_state_test, with_altair_and_later, + with_phases, ) from eth2spec.test.helpers.constants import ( ALTAIR, ) - - from eth2spec.test.helpers.deposits import ( run_deposit_processing_with_specific_fork_version, ) diff --git a/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_inactivity_updates.py b/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_inactivity_updates.py index bee7ce88c8..bfdff20a66 100644 --- a/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_inactivity_updates.py +++ b/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_inactivity_updates.py @@ -1,24 +1,24 @@ from random import Random from eth2spec.test.context import spec_state_test, with_altair_and_later +from eth2spec.test.helpers.epoch_processing import run_epoch_processing_with from eth2spec.test.helpers.inactivity_scores import ( randomize_inactivity_scores, zero_inactivity_scores, ) -from eth2spec.test.helpers.state import ( - next_epoch, - next_epoch_via_block, - set_full_participation, - set_empty_participation, -) -from eth2spec.test.helpers.voluntary_exits import exit_validators, get_exited_validators -from eth2spec.test.helpers.epoch_processing import run_epoch_processing_with from eth2spec.test.helpers.random import ( randomize_attestation_participation, randomize_previous_epoch_participation, randomize_state, ) from eth2spec.test.helpers.rewards import leaking +from eth2spec.test.helpers.state import ( + next_epoch, + next_epoch_via_block, + set_empty_participation, + set_full_participation, +) +from eth2spec.test.helpers.voluntary_exits import exit_validators, get_exited_validators def run_process_inactivity_updates(spec, state): diff --git a/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_participation_flag_updates.py b/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_participation_flag_updates.py index 206fe9d7e4..049fda229b 100644 --- a/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_participation_flag_updates.py +++ b/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_participation_flag_updates.py @@ -1,16 +1,16 @@ from random import Random -from eth2spec.test.helpers.constants import MINIMAL from eth2spec.test.context import ( + single_phase, + spec_state_test, + spec_test, with_altair_and_later, with_custom_state, - spec_test, - spec_state_test, with_presets, - single_phase, ) -from eth2spec.test.helpers.state import next_epoch_via_block +from eth2spec.test.helpers.constants import MINIMAL from eth2spec.test.helpers.epoch_processing import run_epoch_processing_with +from eth2spec.test.helpers.state import next_epoch_via_block def get_full_flags(spec): diff --git a/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_sync_committee_updates.py b/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_sync_committee_updates.py index 8d808142e3..0a540b5d41 100644 --- a/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_sync_committee_updates.py +++ b/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_sync_committee_updates.py @@ -1,19 +1,18 @@ from eth2spec.test.context import ( always_bls, + misc_balances, + single_phase, spec_state_test, spec_test, with_altair_and_later, - with_presets, with_custom_state, - single_phase, - misc_balances, + with_presets, ) from eth2spec.test.helpers.constants import MINIMAL -from eth2spec.test.helpers.state import transition_to from eth2spec.test.helpers.epoch_processing import ( run_epoch_processing_with, ) - +from eth2spec.test.helpers.state import transition_to # # Note: diff --git a/tests/core/pyspec/eth2spec/test/altair/fork/test_altair_fork_basic.py b/tests/core/pyspec/eth2spec/test/altair/fork/test_altair_fork_basic.py index 7e940ffb15..f02d7e3f5a 100644 --- a/tests/core/pyspec/eth2spec/test/altair/fork/test_altair_fork_basic.py +++ b/tests/core/pyspec/eth2spec/test/altair/fork/test_altair_fork_basic.py @@ -1,27 +1,27 @@ from eth2spec.test.context import ( - with_phases, + large_validator_set, + low_balances, + misc_balances, + spec_test, with_custom_state, + with_phases, with_presets, - spec_test, with_state, - low_balances, - misc_balances, - large_validator_set, ) -from eth2spec.test.utils import with_meta_tags +from eth2spec.test.helpers.altair.fork import ( + ALTAIR_FORK_TEST_META_TAGS, + run_fork_test, +) from eth2spec.test.helpers.constants import ( - PHASE0, ALTAIR, MINIMAL, + PHASE0, ) from eth2spec.test.helpers.state import ( next_epoch, next_epoch_via_block, ) -from eth2spec.test.helpers.altair.fork import ( - ALTAIR_FORK_TEST_META_TAGS, - run_fork_test, -) +from eth2spec.test.utils import with_meta_tags @with_phases(phases=[PHASE0], other_phases=[ALTAIR]) diff --git a/tests/core/pyspec/eth2spec/test/altair/fork/test_altair_fork_random.py b/tests/core/pyspec/eth2spec/test/altair/fork/test_altair_fork_random.py index a4b0de7ad1..32ba880f85 100644 --- a/tests/core/pyspec/eth2spec/test/altair/fork/test_altair_fork_random.py +++ b/tests/core/pyspec/eth2spec/test/altair/fork/test_altair_fork_random.py @@ -1,29 +1,29 @@ from random import Random from eth2spec.test.context import ( - with_phases, + large_validator_set, + low_balances, + misc_balances, + spec_test, with_custom_state, + with_phases, with_presets, - spec_test, with_state, - low_balances, - misc_balances, - large_validator_set, -) -from eth2spec.test.utils import with_meta_tags -from eth2spec.test.helpers.constants import ( - PHASE0, - ALTAIR, - MINIMAL, ) from eth2spec.test.helpers.altair.fork import ( ALTAIR_FORK_TEST_META_TAGS, run_fork_test, ) +from eth2spec.test.helpers.constants import ( + ALTAIR, + MINIMAL, + PHASE0, +) from eth2spec.test.helpers.random import ( - randomize_state, randomize_attestation_participation, + randomize_state, ) +from eth2spec.test.utils import with_meta_tags @with_phases(phases=[PHASE0], other_phases=[ALTAIR]) diff --git a/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py b/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py index e407fcfbdc..14c25bbc51 100644 --- a/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py +++ b/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py @@ -1,13 +1,14 @@ from eth2spec.test.context import ( spec_state_test_with_matching_config, - with_presets, with_light_client, + with_presets, ) from eth2spec.test.helpers.constants import ( MINIMAL, ) from eth2spec.test.helpers.light_client_data_collection import ( add_new_block, + BlockID, finish_lc_data_collection_test, get_lc_bootstrap_block_id, get_lc_update_attested_block_id, @@ -17,7 +18,6 @@ get_light_client_update_for_period, select_new_head, setup_lc_data_collection_test, - BlockID, ) diff --git a/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py b/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py index 87217a3e0f..185c5fc5ee 100644 --- a/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py +++ b/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py @@ -19,8 +19,8 @@ MINIMAL, ) from eth2spec.test.helpers.light_client import ( - get_sync_aggregate, compute_start_slot_at_next_sync_committee_period, + get_sync_aggregate, ) from eth2spec.test.helpers.light_client_sync import ( emit_force_update, diff --git a/tests/core/pyspec/eth2spec/test/altair/light_client/test_update_ranking.py b/tests/core/pyspec/eth2spec/test/altair/light_client/test_update_ranking.py index cfbe95f4e6..e8075dc9b3 100644 --- a/tests/core/pyspec/eth2spec/test/altair/light_client/test_update_ranking.py +++ b/tests/core/pyspec/eth2spec/test/altair/light_client/test_update_ranking.py @@ -1,7 +1,7 @@ from eth2spec.test.context import ( spec_state_test, - with_presets, with_light_client, + with_presets, ) from eth2spec.test.helpers.attestations import ( next_slots_with_attestations, diff --git a/tests/core/pyspec/eth2spec/test/altair/random/test_random.py b/tests/core/pyspec/eth2spec/test/altair/random/test_random.py index 995fc0a74d..9523447565 100644 --- a/tests/core/pyspec/eth2spec/test/altair/random/test_random.py +++ b/tests/core/pyspec/eth2spec/test/altair/random/test_random.py @@ -4,19 +4,17 @@ See the README for that generator for more information. """ -from eth2spec.test.helpers.constants import ALTAIR from eth2spec.test.context import ( + always_bls, misc_balances_in_default_range_with_many_validators, - with_phases, - zero_activation_threshold, only_generator, -) -from eth2spec.test.context import ( - always_bls, + single_phase, spec_test, with_custom_state, - single_phase, + with_phases, + zero_activation_threshold, ) +from eth2spec.test.helpers.constants import ALTAIR from eth2spec.test.utils.randomized_block_tests import ( run_generated_randomized_test, ) diff --git a/tests/core/pyspec/eth2spec/test/altair/rewards/test_inactivity_scores.py b/tests/core/pyspec/eth2spec/test/altair/rewards/test_inactivity_scores.py index 5d1b7ffbf7..c2c1699677 100644 --- a/tests/core/pyspec/eth2spec/test/altair/rewards/test_inactivity_scores.py +++ b/tests/core/pyspec/eth2spec/test/altair/rewards/test_inactivity_scores.py @@ -1,17 +1,17 @@ from random import Random +import eth2spec.test.helpers.rewards as rewards_helpers from eth2spec.test.context import ( - with_altair_and_later, - spec_test, - spec_state_test, - with_custom_state, - single_phase, low_balances, misc_balances, + single_phase, + spec_state_test, + spec_test, + with_altair_and_later, + with_custom_state, ) from eth2spec.test.helpers.inactivity_scores import randomize_inactivity_scores from eth2spec.test.helpers.rewards import leaking -import eth2spec.test.helpers.rewards as rewards_helpers @with_altair_and_later diff --git a/tests/core/pyspec/eth2spec/test/altair/sanity/test_blocks.py b/tests/core/pyspec/eth2spec/test/altair/sanity/test_blocks.py index caa30ae0b1..7be8ca714e 100644 --- a/tests/core/pyspec/eth2spec/test/altair/sanity/test_blocks.py +++ b/tests/core/pyspec/eth2spec/test/altair/sanity/test_blocks.py @@ -1,23 +1,23 @@ from random import Random -from eth2spec.test.helpers.state import ( - state_transition_and_sign_block, - next_epoch, - set_full_participation_previous_epoch, +from eth2spec.test.context import ( + spec_state_test, + with_altair_and_later, ) from eth2spec.test.helpers.block import ( - build_empty_block_for_next_slot, build_empty_block, + build_empty_block_for_next_slot, +) +from eth2spec.test.helpers.inactivity_scores import randomize_inactivity_scores +from eth2spec.test.helpers.rewards import leaking +from eth2spec.test.helpers.state import ( + next_epoch, + set_full_participation_previous_epoch, + state_transition_and_sign_block, ) from eth2spec.test.helpers.sync_committee import ( compute_aggregate_sync_committee_signature, ) -from eth2spec.test.context import ( - with_altair_and_later, - spec_state_test, -) -from eth2spec.test.helpers.rewards import leaking -from eth2spec.test.helpers.inactivity_scores import randomize_inactivity_scores def run_sync_committee_sanity_test(spec, state, fraction_full=1.0, rng=Random(454545)): diff --git a/tests/core/pyspec/eth2spec/test/altair/transition/test_activations_and_exits.py b/tests/core/pyspec/eth2spec/test/altair/transition/test_activations_and_exits.py index 09ba5828e8..e5c649660a 100644 --- a/tests/core/pyspec/eth2spec/test/altair/transition/test_activations_and_exits.py +++ b/tests/core/pyspec/eth2spec/test/altair/transition/test_activations_and_exits.py @@ -1,9 +1,10 @@ import random + from eth2spec.test.context import ( - ForkMeta, ALTAIR, - with_presets, + ForkMeta, with_fork_metas, + with_presets, ) from eth2spec.test.helpers.constants import ( ALL_PRE_POST_FORKS, @@ -11,8 +12,8 @@ ) from eth2spec.test.helpers.fork_transition import ( do_fork, - transition_until_fork, transition_to_next_epoch_and_append_blocks, + transition_until_fork, ) from eth2spec.test.helpers.random import ( exit_random_validators, @@ -20,7 +21,6 @@ set_some_new_deposits, ) - # # Exit # diff --git a/tests/core/pyspec/eth2spec/test/altair/transition/test_leaking.py b/tests/core/pyspec/eth2spec/test/altair/transition/test_leaking.py index 8463e4fd52..f90ce0d103 100644 --- a/tests/core/pyspec/eth2spec/test/altair/transition/test_leaking.py +++ b/tests/core/pyspec/eth2spec/test/altair/transition/test_leaking.py @@ -7,8 +7,8 @@ ) from eth2spec.test.helpers.fork_transition import ( do_fork, - transition_until_fork, transition_to_next_epoch_and_append_blocks, + transition_until_fork, ) diff --git a/tests/core/pyspec/eth2spec/test/altair/transition/test_operations.py b/tests/core/pyspec/eth2spec/test/altair/transition/test_operations.py index 714d06f200..b630a5d32e 100644 --- a/tests/core/pyspec/eth2spec/test/altair/transition/test_operations.py +++ b/tests/core/pyspec/eth2spec/test/altair/transition/test_operations.py @@ -1,6 +1,6 @@ from eth2spec.test.context import ( - ForkMeta, always_bls, + ForkMeta, with_fork_metas, with_presets, ) @@ -13,7 +13,6 @@ run_transition_with_operation, ) - # # PROPOSER_SLASHING # diff --git a/tests/core/pyspec/eth2spec/test/altair/transition/test_slashing.py b/tests/core/pyspec/eth2spec/test/altair/transition/test_slashing.py index b35b974b60..697573e699 100644 --- a/tests/core/pyspec/eth2spec/test/altair/transition/test_slashing.py +++ b/tests/core/pyspec/eth2spec/test/altair/transition/test_slashing.py @@ -1,4 +1,5 @@ import random + from eth2spec.test.context import ( ForkMeta, with_fork_metas, diff --git a/tests/core/pyspec/eth2spec/test/altair/transition/test_transition.py b/tests/core/pyspec/eth2spec/test/altair/transition/test_transition.py index 77654db31a..4c5b17e9a7 100644 --- a/tests/core/pyspec/eth2spec/test/altair/transition/test_transition.py +++ b/tests/core/pyspec/eth2spec/test/altair/transition/test_transition.py @@ -1,18 +1,13 @@ import random + from eth2spec.test.context import ( ForkMeta, with_fork_metas, ) -from eth2spec.test.helpers.random import ( - randomize_state, -) +from eth2spec.test.helpers.attestations import next_slots_with_attestations from eth2spec.test.helpers.constants import ( ALL_PRE_POST_FORKS, ) -from eth2spec.test.helpers.state import ( - next_epoch_via_signed_block, -) -from eth2spec.test.helpers.attestations import next_slots_with_attestations from eth2spec.test.helpers.fork_transition import ( do_fork, no_blocks, @@ -22,6 +17,12 @@ transition_to_next_epoch_and_append_blocks, transition_until_fork, ) +from eth2spec.test.helpers.random import ( + randomize_state, +) +from eth2spec.test.helpers.state import ( + next_epoch_via_signed_block, +) @with_fork_metas( diff --git a/tests/core/pyspec/eth2spec/test/altair/unittests/light_client/test_sync_protocol.py b/tests/core/pyspec/eth2spec/test/altair/unittests/light_client/test_sync_protocol.py index 652d6a2d9a..cef9f4177d 100644 --- a/tests/core/pyspec/eth2spec/test/altair/unittests/light_client/test_sync_protocol.py +++ b/tests/core/pyspec/eth2spec/test/altair/unittests/light_client/test_sync_protocol.py @@ -2,8 +2,8 @@ from eth2spec.test.context import ( spec_state_test_with_matching_config, - with_presets, with_light_client, + with_presets, ) from eth2spec.test.helpers.attestations import ( next_epoch_with_attestations, diff --git a/tests/core/pyspec/eth2spec/test/altair/unittests/test_config_override.py b/tests/core/pyspec/eth2spec/test/altair/unittests/test_config_override.py index 212250fc41..7c2e999cf5 100644 --- a/tests/core/pyspec/eth2spec/test/altair/unittests/test_config_override.py +++ b/tests/core/pyspec/eth2spec/test/altair/unittests/test_config_override.py @@ -9,10 +9,10 @@ with_state, ) from eth2spec.test.helpers.constants import ( - PHASE0, + ALL_PHASES, ALTAIR, BELLATRIX, - ALL_PHASES, + PHASE0, ) from eth2spec.test.helpers.forks import is_post_fork diff --git a/tests/core/pyspec/eth2spec/test/altair/unittests/validator/test_validator.py b/tests/core/pyspec/eth2spec/test/altair/unittests/validator/test_validator.py index 52be98d095..4bd095464c 100644 --- a/tests/core/pyspec/eth2spec/test/altair/unittests/validator/test_validator.py +++ b/tests/core/pyspec/eth2spec/test/altair/unittests/validator/test_validator.py @@ -1,24 +1,25 @@ import random from collections import defaultdict -from eth2spec.utils.ssz.ssz_typing import Bitvector -from eth2spec.utils import bls -from eth2spec.test.helpers.block import build_empty_block -from eth2spec.test.helpers.keys import pubkey_to_privkey, privkeys, pubkeys -from eth2spec.test.helpers.state import transition_to -from eth2spec.test.helpers.sync_committee import compute_sync_committee_signature + from eth2spec.test.context import ( always_bls, spec_state_test, + with_all_phases_from_except, with_altair_and_later, with_presets, - with_all_phases_from_except, ) +from eth2spec.test.helpers.block import build_empty_block from eth2spec.test.helpers.constants import ( - MAINNET, - MINIMAL, ALTAIR, EIP7805, + MAINNET, + MINIMAL, ) +from eth2spec.test.helpers.keys import privkeys, pubkey_to_privkey, pubkeys +from eth2spec.test.helpers.state import transition_to +from eth2spec.test.helpers.sync_committee import compute_sync_committee_signature +from eth2spec.utils import bls +from eth2spec.utils.ssz.ssz_typing import Bitvector rng = random.Random(1337) diff --git a/tests/core/pyspec/eth2spec/test/bellatrix/block_processing/test_process_deposit.py b/tests/core/pyspec/eth2spec/test/bellatrix/block_processing/test_process_deposit.py index 9ab578663f..d3751cab11 100644 --- a/tests/core/pyspec/eth2spec/test/bellatrix/block_processing/test_process_deposit.py +++ b/tests/core/pyspec/eth2spec/test/bellatrix/block_processing/test_process_deposit.py @@ -1,6 +1,6 @@ from eth2spec.test.context import ( - spec_state_test, always_bls, + spec_state_test, with_bellatrix_and_later, ) from eth2spec.test.helpers.deposits import ( diff --git a/tests/core/pyspec/eth2spec/test/bellatrix/block_processing/test_process_execution_payload.py b/tests/core/pyspec/eth2spec/test/bellatrix/block_processing/test_process_execution_payload.py index 32acbf0ec0..b720fe94dc 100644 --- a/tests/core/pyspec/eth2spec/test/bellatrix/block_processing/test_process_execution_payload.py +++ b/tests/core/pyspec/eth2spec/test/bellatrix/block_processing/test_process_execution_payload.py @@ -1,24 +1,27 @@ from random import Random -from eth2spec.test.helpers.execution_payload import ( - build_empty_execution_payload, - build_randomized_execution_payload, - compute_el_block_hash, - get_execution_payload_header, - build_state_with_incomplete_transition, - build_state_with_complete_transition, -) from eth2spec.test.context import ( - BELLATRIX, expect_assertion_error, spec_state_test, + with_all_phases_from_except, with_bellatrix_and_later, - with_bellatrix_until_eip7732, with_phases, ) +from eth2spec.test.helpers.constants import ( + BELLATRIX, + EIP7732, +) +from eth2spec.test.helpers.execution_payload import ( + build_empty_execution_payload, + build_randomized_execution_payload, + build_state_with_complete_transition, + build_state_with_incomplete_transition, + compute_el_block_hash, + get_execution_payload_header, +) +from eth2spec.test.helpers.forks import is_post_eip7732 from eth2spec.test.helpers.keys import privkeys from eth2spec.test.helpers.state import next_slot -from eth2spec.test.helpers.forks import is_post_eip7732 def run_execution_payload_processing( @@ -109,7 +112,7 @@ def run_success_test(spec, state): yield from run_execution_payload_processing(spec, state, execution_payload) -@with_bellatrix_until_eip7732 +@with_all_phases_from_except(BELLATRIX, [EIP7732]) @spec_state_test def test_success_first_payload(spec, state): state = build_state_with_incomplete_transition(spec, state) @@ -117,7 +120,7 @@ def test_success_first_payload(spec, state): yield from run_success_test(spec, state) -@with_bellatrix_until_eip7732 +@with_all_phases_from_except(BELLATRIX, [EIP7732]) @spec_state_test def test_success_regular_payload(spec, state): state = build_state_with_complete_transition(spec, state) @@ -133,14 +136,14 @@ def run_gap_slot_test(spec, state): yield from run_execution_payload_processing(spec, state, execution_payload) -@with_bellatrix_until_eip7732 +@with_all_phases_from_except(BELLATRIX, [EIP7732]) @spec_state_test def test_success_first_payload_with_gap_slot(spec, state): state = build_state_with_incomplete_transition(spec, state) yield from run_gap_slot_test(spec, state) -@with_bellatrix_until_eip7732 +@with_all_phases_from_except(BELLATRIX, [EIP7732]) @spec_state_test def test_success_regular_payload_with_gap_slot(spec, state): state = build_state_with_complete_transition(spec, state) @@ -157,14 +160,14 @@ def run_bad_execution_test(spec, state): ) -@with_bellatrix_until_eip7732 +@with_all_phases_from_except(BELLATRIX, [EIP7732]) @spec_state_test def test_invalid_bad_execution_first_payload(spec, state): state = build_state_with_incomplete_transition(spec, state) yield from run_bad_execution_test(spec, state) -@with_bellatrix_until_eip7732 +@with_all_phases_from_except(BELLATRIX, [EIP7732]) @spec_state_test def test_invalid_bad_execution_regular_payload(spec, state): state = build_state_with_complete_transition(spec, state) @@ -301,14 +304,14 @@ def run_non_empty_extra_data_test(spec, state): assert state.latest_execution_payload_header.extra_data == execution_payload.extra_data -@with_bellatrix_until_eip7732 +@with_all_phases_from_except(BELLATRIX, [EIP7732]) @spec_state_test def test_non_empty_extra_data_first_payload(spec, state): state = build_state_with_incomplete_transition(spec, state) yield from run_non_empty_extra_data_test(spec, state) -@with_bellatrix_until_eip7732 +@with_all_phases_from_except(BELLATRIX, [EIP7732]) @spec_state_test def test_non_empty_extra_data_regular_payload(spec, state): state = build_state_with_complete_transition(spec, state) @@ -332,14 +335,14 @@ def run_non_empty_transactions_test(spec, state): ) -@with_bellatrix_until_eip7732 +@with_all_phases_from_except(BELLATRIX, [EIP7732]) @spec_state_test def test_non_empty_transactions_first_payload(spec, state): state = build_state_with_incomplete_transition(spec, state) yield from run_non_empty_extra_data_test(spec, state) -@with_bellatrix_until_eip7732 +@with_all_phases_from_except(BELLATRIX, [EIP7732]) @spec_state_test def test_non_empty_transactions_regular_payload(spec, state): state = build_state_with_complete_transition(spec, state) @@ -361,14 +364,14 @@ def run_zero_length_transaction_test(spec, state): ) -@with_bellatrix_until_eip7732 +@with_all_phases_from_except(BELLATRIX, [EIP7732]) @spec_state_test def test_zero_length_transaction_first_payload(spec, state): state = build_state_with_incomplete_transition(spec, state) yield from run_zero_length_transaction_test(spec, state) -@with_bellatrix_until_eip7732 +@with_all_phases_from_except(BELLATRIX, [EIP7732]) @spec_state_test def test_zero_length_transaction_regular_payload(spec, state): state = build_state_with_complete_transition(spec, state) @@ -389,7 +392,7 @@ def run_randomized_non_validated_execution_fields_test(spec, state, rng, executi ) -@with_bellatrix_until_eip7732 +@with_all_phases_from_except(BELLATRIX, [EIP7732]) @spec_state_test def test_randomized_non_validated_execution_fields_first_payload__execution_valid(spec, state): rng = Random(1111) @@ -397,7 +400,7 @@ def test_randomized_non_validated_execution_fields_first_payload__execution_vali yield from run_randomized_non_validated_execution_fields_test(spec, state, rng) -@with_bellatrix_until_eip7732 +@with_all_phases_from_except(BELLATRIX, [EIP7732]) @spec_state_test def test_randomized_non_validated_execution_fields_regular_payload__execution_valid(spec, state): rng = Random(2222) @@ -405,7 +408,7 @@ def test_randomized_non_validated_execution_fields_regular_payload__execution_va yield from run_randomized_non_validated_execution_fields_test(spec, state, rng) -@with_bellatrix_until_eip7732 +@with_all_phases_from_except(BELLATRIX, [EIP7732]) @spec_state_test def test_invalid_randomized_non_validated_execution_fields_first_payload__execution_invalid( spec, state @@ -417,7 +420,7 @@ def test_invalid_randomized_non_validated_execution_fields_first_payload__execut ) -@with_bellatrix_until_eip7732 +@with_all_phases_from_except(BELLATRIX, [EIP7732]) @spec_state_test def test_invalid_randomized_non_validated_execution_fields_regular_payload__execution_invalid( spec, state diff --git a/tests/core/pyspec/eth2spec/test/bellatrix/block_processing/test_process_voluntary_exit.py b/tests/core/pyspec/eth2spec/test/bellatrix/block_processing/test_process_voluntary_exit.py index 2754068f2b..99a81ee15f 100644 --- a/tests/core/pyspec/eth2spec/test/bellatrix/block_processing/test_process_voluntary_exit.py +++ b/tests/core/pyspec/eth2spec/test/bellatrix/block_processing/test_process_voluntary_exit.py @@ -1,6 +1,6 @@ from eth2spec.test.context import ( - spec_state_test, always_bls, + spec_state_test, with_bellatrix_and_later, with_phases, ) diff --git a/tests/core/pyspec/eth2spec/test/bellatrix/fork/test_bellatrix_fork_basic.py b/tests/core/pyspec/eth2spec/test/bellatrix/fork/test_bellatrix_fork_basic.py index 3cf7f23bff..23d6a6d766 100644 --- a/tests/core/pyspec/eth2spec/test/bellatrix/fork/test_bellatrix_fork_basic.py +++ b/tests/core/pyspec/eth2spec/test/bellatrix/fork/test_bellatrix_fork_basic.py @@ -1,14 +1,17 @@ from eth2spec.test.context import ( - with_phases, + large_validator_set, + low_balances, + misc_balances, + spec_test, with_custom_state, + with_phases, with_presets, - spec_test, with_state, - low_balances, - misc_balances, - large_validator_set, ) -from eth2spec.test.utils import with_meta_tags +from eth2spec.test.helpers.bellatrix.fork import ( + BELLATRIX_FORK_TEST_META_TAGS, + run_fork_test, +) from eth2spec.test.helpers.constants import ( ALTAIR, BELLATRIX, @@ -18,10 +21,7 @@ next_epoch, next_epoch_via_block, ) -from eth2spec.test.helpers.bellatrix.fork import ( - BELLATRIX_FORK_TEST_META_TAGS, - run_fork_test, -) +from eth2spec.test.utils import with_meta_tags @with_phases(phases=[ALTAIR], other_phases=[BELLATRIX]) diff --git a/tests/core/pyspec/eth2spec/test/bellatrix/fork/test_bellatrix_fork_random.py b/tests/core/pyspec/eth2spec/test/bellatrix/fork/test_bellatrix_fork_random.py index d2800df439..778a333712 100644 --- a/tests/core/pyspec/eth2spec/test/bellatrix/fork/test_bellatrix_fork_random.py +++ b/tests/core/pyspec/eth2spec/test/bellatrix/fork/test_bellatrix_fork_random.py @@ -1,26 +1,26 @@ from random import Random from eth2spec.test.context import ( - with_phases, + large_validator_set, + low_balances, + misc_balances, + spec_test, with_custom_state, + with_phases, with_presets, - spec_test, with_state, - low_balances, - misc_balances, - large_validator_set, ) -from eth2spec.test.utils import with_meta_tags +from eth2spec.test.helpers.bellatrix.fork import ( + BELLATRIX_FORK_TEST_META_TAGS, + run_fork_test, +) from eth2spec.test.helpers.constants import ( ALTAIR, BELLATRIX, MINIMAL, ) -from eth2spec.test.helpers.bellatrix.fork import ( - BELLATRIX_FORK_TEST_META_TAGS, - run_fork_test, -) from eth2spec.test.helpers.random import randomize_state +from eth2spec.test.utils import with_meta_tags @with_phases(phases=[ALTAIR], other_phases=[BELLATRIX]) diff --git a/tests/core/pyspec/eth2spec/test/bellatrix/fork_choice/test_on_merge_block.py b/tests/core/pyspec/eth2spec/test/bellatrix/fork_choice/test_on_merge_block.py index fb072c506b..bc7d473d1d 100644 --- a/tests/core/pyspec/eth2spec/test/bellatrix/fork_choice/test_on_merge_block.py +++ b/tests/core/pyspec/eth2spec/test/bellatrix/fork_choice/test_on_merge_block.py @@ -1,29 +1,25 @@ -from eth2spec.utils.ssz.ssz_typing import uint256 +from eth2spec.test.context import BELLATRIX, spec_state_test, with_phases from eth2spec.test.exceptions import BlockNotFoundException -from eth2spec.test.context import spec_state_test, with_phases, BELLATRIX from eth2spec.test.helpers.block import ( build_empty_block_for_next_slot, ) from eth2spec.test.helpers.execution_payload import ( + build_state_with_incomplete_transition, compute_el_block_hash, ) from eth2spec.test.helpers.fork_choice import ( + add_pow_block, get_genesis_forkchoice_store_and_block, on_tick_and_append_step, tick_and_add_block, ) -from eth2spec.test.helpers.state import ( - state_transition_and_sign_block, -) -from eth2spec.test.helpers.fork_choice import ( - add_pow_block, -) from eth2spec.test.helpers.pow_block import ( prepare_random_pow_block, ) -from eth2spec.test.helpers.execution_payload import ( - build_state_with_incomplete_transition, +from eth2spec.test.helpers.state import ( + state_transition_and_sign_block, ) +from eth2spec.utils.ssz.ssz_typing import uint256 def with_pow_block_patch(spec, blocks, func): diff --git a/tests/core/pyspec/eth2spec/test/bellatrix/fork_choice/test_should_override_forkchoice_update.py b/tests/core/pyspec/eth2spec/test/bellatrix/fork_choice/test_should_override_forkchoice_update.py index 96be0a9330..206a46197f 100644 --- a/tests/core/pyspec/eth2spec/test/bellatrix/fork_choice/test_should_override_forkchoice_update.py +++ b/tests/core/pyspec/eth2spec/test/bellatrix/fork_choice/test_should_override_forkchoice_update.py @@ -1,17 +1,19 @@ from eth2spec.test.context import ( spec_state_test, - with_bellatrix_until_eip7732, + with_all_phases_from_except, with_presets, ) -from eth2spec.test.helpers.constants import ( - MINIMAL, -) from eth2spec.test.helpers.attestations import ( get_valid_attestations_at_slot, ) from eth2spec.test.helpers.block import ( build_empty_block_for_next_slot, ) +from eth2spec.test.helpers.constants import ( + BELLATRIX, + EIP7732, + MINIMAL, +) from eth2spec.test.helpers.fork_choice import ( apply_next_epoch_with_attestations, apply_next_slots_with_attestations, @@ -22,13 +24,13 @@ tick_and_run_on_attestation, ) from eth2spec.test.helpers.state import ( - state_transition_and_sign_block, next_epoch, next_slot, + state_transition_and_sign_block, ) -@with_bellatrix_until_eip7732 +@with_all_phases_from_except(BELLATRIX, [EIP7732]) @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_should_override_forkchoice_update__false(spec, state): @@ -75,7 +77,7 @@ def test_should_override_forkchoice_update__false(spec, state): yield "steps", test_steps -@with_bellatrix_until_eip7732 +@with_all_phases_from_except(BELLATRIX, [EIP7732]) @spec_state_test def test_should_override_forkchoice_update__true(spec, state): test_steps = [] diff --git a/tests/core/pyspec/eth2spec/test/bellatrix/random/test_random.py b/tests/core/pyspec/eth2spec/test/bellatrix/random/test_random.py index 51d27954b0..3cd6d78f07 100644 --- a/tests/core/pyspec/eth2spec/test/bellatrix/random/test_random.py +++ b/tests/core/pyspec/eth2spec/test/bellatrix/random/test_random.py @@ -4,19 +4,17 @@ See the README for that generator for more information. """ -from eth2spec.test.helpers.constants import BELLATRIX from eth2spec.test.context import ( + always_bls, misc_balances_in_default_range_with_many_validators, - with_phases, - zero_activation_threshold, only_generator, -) -from eth2spec.test.context import ( - always_bls, + single_phase, spec_test, with_custom_state, - single_phase, + with_phases, + zero_activation_threshold, ) +from eth2spec.test.helpers.constants import BELLATRIX from eth2spec.test.utils.randomized_block_tests import ( run_generated_randomized_test, ) diff --git a/tests/core/pyspec/eth2spec/test/bellatrix/sanity/test_blocks.py b/tests/core/pyspec/eth2spec/test/bellatrix/sanity/test_blocks.py index d13d9f905f..7c2a00eaa5 100644 --- a/tests/core/pyspec/eth2spec/test/bellatrix/sanity/test_blocks.py +++ b/tests/core/pyspec/eth2spec/test/bellatrix/sanity/test_blocks.py @@ -1,19 +1,24 @@ from random import Random -from eth2spec.test.helpers.state import ( - state_transition_and_sign_block, - next_slot, -) -from eth2spec.test.helpers.block import build_empty_block_for_next_slot -from eth2spec.test.helpers.execution_payload import build_randomized_execution_payload + from eth2spec.test.context import ( BELLATRIX, - with_bellatrix_until_eip7732, - with_phases, spec_state_test, + with_all_phases_from_except, + with_phases, +) +from eth2spec.test.helpers.block import build_empty_block_for_next_slot +from eth2spec.test.helpers.constants import ( + BELLATRIX, + EIP7732, +) +from eth2spec.test.helpers.execution_payload import build_randomized_execution_payload +from eth2spec.test.helpers.state import ( + next_slot, + state_transition_and_sign_block, ) -@with_bellatrix_until_eip7732 +@with_all_phases_from_except(BELLATRIX, [EIP7732]) @spec_state_test def test_empty_block_transition_no_tx(spec, state): yield "pre", state @@ -27,7 +32,7 @@ def test_empty_block_transition_no_tx(spec, state): yield "post", state -@with_bellatrix_until_eip7732 +@with_all_phases_from_except(BELLATRIX, [EIP7732]) @spec_state_test def test_block_transition_randomized_payload(spec, state): yield "pre", state diff --git a/tests/core/pyspec/eth2spec/test/bellatrix/sync/test_optimistic.py b/tests/core/pyspec/eth2spec/test/bellatrix/sync/test_optimistic.py index 5ebd4d940a..c83f3d291c 100644 --- a/tests/core/pyspec/eth2spec/test/bellatrix/sync/test_optimistic.py +++ b/tests/core/pyspec/eth2spec/test/bellatrix/sync/test_optimistic.py @@ -1,6 +1,6 @@ from eth2spec.test.context import ( spec_state_test, - with_bellatrix_until_eip7732, + with_all_phases_from_except, ) from eth2spec.test.helpers.attestations import ( state_transition_with_full_block, @@ -8,6 +8,10 @@ from eth2spec.test.helpers.block import ( build_empty_block_for_next_slot, ) +from eth2spec.test.helpers.constants import ( + BELLATRIX, + EIP7732, +) from eth2spec.test.helpers.execution_payload import ( compute_el_block_hash, ) @@ -16,11 +20,11 @@ on_tick_and_append_step, ) from eth2spec.test.helpers.optimistic_sync import ( - PayloadStatusV1, - PayloadStatusV1Status, - MegaStore, add_optimistic_block, get_optimistic_store, + MegaStore, + PayloadStatusV1, + PayloadStatusV1Status, ) from eth2spec.test.helpers.state import ( next_epoch, @@ -28,7 +32,7 @@ ) -@with_bellatrix_until_eip7732 +@with_all_phases_from_except(BELLATRIX, [EIP7732]) @spec_state_test def test_from_syncing_to_invalid(spec, state): test_steps = [] diff --git a/tests/core/pyspec/eth2spec/test/bellatrix/unittests/test_is_valid_terminal_pow_block.py b/tests/core/pyspec/eth2spec/test/bellatrix/unittests/test_is_valid_terminal_pow_block.py index 8109d92d7c..826a489cb2 100644 --- a/tests/core/pyspec/eth2spec/test/bellatrix/unittests/test_is_valid_terminal_pow_block.py +++ b/tests/core/pyspec/eth2spec/test/bellatrix/unittests/test_is_valid_terminal_pow_block.py @@ -1,11 +1,11 @@ -from eth2spec.utils.ssz.ssz_typing import uint256 -from eth2spec.test.helpers.pow_block import ( - prepare_random_pow_block, -) from eth2spec.test.context import ( spec_state_test, with_bellatrix_and_later, ) +from eth2spec.test.helpers.pow_block import ( + prepare_random_pow_block, +) +from eth2spec.utils.ssz.ssz_typing import uint256 @with_bellatrix_and_later diff --git a/tests/core/pyspec/eth2spec/test/bellatrix/unittests/test_transition.py b/tests/core/pyspec/eth2spec/test/bellatrix/unittests/test_transition.py index f78a2d653c..f6b3bf4c41 100644 --- a/tests/core/pyspec/eth2spec/test/bellatrix/unittests/test_transition.py +++ b/tests/core/pyspec/eth2spec/test/bellatrix/unittests/test_transition.py @@ -1,12 +1,16 @@ -from eth2spec.test.helpers.execution_payload import ( - build_empty_execution_payload, - build_state_with_incomplete_transition, - build_state_with_complete_transition, -) from eth2spec.test.context import ( spec_state_test, + with_all_phases_from_except, with_bellatrix_and_later, - with_bellatrix_until_eip7732, +) +from eth2spec.test.helpers.constants import ( + BELLATRIX, + EIP7732, +) +from eth2spec.test.helpers.execution_payload import ( + build_empty_execution_payload, + build_state_with_complete_transition, + build_state_with_incomplete_transition, ) @@ -33,7 +37,7 @@ def test_success_merge_complete(spec, state): ] -@with_bellatrix_until_eip7732 +@with_all_phases_from_except(BELLATRIX, [EIP7732]) @spec_state_test def test_is_merge_block_and_is_execution_enabled(spec, state): for result in expected_results: diff --git a/tests/core/pyspec/eth2spec/test/bellatrix/unittests/test_validate_merge_block.py b/tests/core/pyspec/eth2spec/test/bellatrix/unittests/test_validate_merge_block.py index cafec07d13..7ae4813a99 100644 --- a/tests/core/pyspec/eth2spec/test/bellatrix/unittests/test_validate_merge_block.py +++ b/tests/core/pyspec/eth2spec/test/bellatrix/unittests/test_validate_merge_block.py @@ -1,5 +1,10 @@ from typing import Optional -from eth2spec.utils.ssz.ssz_typing import uint256, Bytes32 + +from eth2spec.test.context import ( + spec_configured_state_test, + spec_state_test, + with_bellatrix_and_later, +) from eth2spec.test.helpers.block import ( build_empty_block_for_next_slot, ) @@ -7,16 +12,11 @@ build_empty_execution_payload, compute_el_block_hash, ) +from eth2spec.test.helpers.forks import is_post_eip7732 from eth2spec.test.helpers.pow_block import ( prepare_random_pow_chain, ) -from eth2spec.test.helpers.forks import is_post_eip7732 -from eth2spec.test.context import ( - spec_state_test, - with_bellatrix_and_later, - spec_configured_state_test, -) - +from eth2spec.utils.ssz.ssz_typing import Bytes32, uint256 TERMINAL_BLOCK_HASH_CONFIG_VAR = ( "0x0000000000000000000000000000000000000000000000000000000000000001" diff --git a/tests/core/pyspec/eth2spec/test/bellatrix/unittests/validator/test_validator.py b/tests/core/pyspec/eth2spec/test/bellatrix/unittests/validator/test_validator.py index 71fee1170d..42765bb7cf 100644 --- a/tests/core/pyspec/eth2spec/test/bellatrix/unittests/validator/test_validator.py +++ b/tests/core/pyspec/eth2spec/test/bellatrix/unittests/validator/test_validator.py @@ -1,18 +1,17 @@ from copy import deepcopy from typing import Optional -from eth2spec.test.helpers.pow_block import ( - prepare_random_pow_chain, +from eth2spec.test.context import ( + spec_state_test, + with_phases, ) from eth2spec.test.helpers.constants import ( BELLATRIX, ) -from eth2spec.test.context import ( - spec_state_test, - with_phases, +from eth2spec.test.helpers.pow_block import ( + prepare_random_pow_chain, ) - # For test_get_pow_block_at_terminal_total_difficulty IS_HEAD_BLOCK = "is_head_block" IS_HEAD_PARENT_BLOCK = "is_head_parent_block" diff --git a/tests/core/pyspec/eth2spec/test/capella/block_processing/test_process_bls_to_execution_change.py b/tests/core/pyspec/eth2spec/test/capella/block_processing/test_process_bls_to_execution_change.py index 64ea413221..53cba585a6 100644 --- a/tests/core/pyspec/eth2spec/test/capella/block_processing/test_process_bls_to_execution_change.py +++ b/tests/core/pyspec/eth2spec/test/capella/block_processing/test_process_bls_to_execution_change.py @@ -1,14 +1,14 @@ -from eth2spec.test.helpers.keys import pubkeys -from eth2spec.test.helpers.bls_to_execution_changes import get_signed_address_change -from eth2spec.test.helpers.constants import CAPELLA, MAINNET from eth2spec.test.context import ( always_bls, expect_assertion_error, spec_state_test, with_capella_and_later, - with_presets, with_phases, + with_presets, ) +from eth2spec.test.helpers.bls_to_execution_changes import get_signed_address_change +from eth2spec.test.helpers.constants import CAPELLA, MAINNET +from eth2spec.test.helpers.keys import pubkeys def run_bls_to_execution_change_processing(spec, state, signed_address_change, valid=True): diff --git a/tests/core/pyspec/eth2spec/test/capella/block_processing/test_process_deposit.py b/tests/core/pyspec/eth2spec/test/capella/block_processing/test_process_deposit.py index de455bacb1..a435697022 100644 --- a/tests/core/pyspec/eth2spec/test/capella/block_processing/test_process_deposit.py +++ b/tests/core/pyspec/eth2spec/test/capella/block_processing/test_process_deposit.py @@ -1,17 +1,21 @@ from eth2spec.test.context import ( spec_state_test, - with_capella_until_eip7732, + with_all_phases_from_except, +) +from eth2spec.test.helpers.constants import ( + CAPELLA, + EIP7732, ) -from eth2spec.test.helpers.forks import is_post_electra -from eth2spec.test.helpers.state import next_epoch_via_block from eth2spec.test.helpers.deposits import ( prepare_state_and_deposit, run_deposit_processing, ) +from eth2spec.test.helpers.forks import is_post_electra +from eth2spec.test.helpers.state import next_epoch_via_block from eth2spec.test.helpers.withdrawals import set_validator_fully_withdrawable -@with_capella_until_eip7732 +@with_all_phases_from_except(CAPELLA, [EIP7732]) @spec_state_test def test_success_top_up_to_withdrawn_validator(spec, state): validator_index = 0 diff --git a/tests/core/pyspec/eth2spec/test/capella/block_processing/test_process_execution_payload.py b/tests/core/pyspec/eth2spec/test/capella/block_processing/test_process_execution_payload.py index 223931585f..4b8df432eb 100644 --- a/tests/core/pyspec/eth2spec/test/capella/block_processing/test_process_execution_payload.py +++ b/tests/core/pyspec/eth2spec/test/capella/block_processing/test_process_execution_payload.py @@ -1,16 +1,16 @@ -from eth2spec.test.helpers.execution_payload import ( - build_empty_execution_payload, - compute_el_block_hash, - build_state_with_incomplete_transition, +from eth2spec.test.bellatrix.block_processing.test_process_execution_payload import ( + run_execution_payload_processing, ) from eth2spec.test.context import ( spec_state_test, with_capella_and_later, ) -from eth2spec.test.helpers.state import next_slot -from eth2spec.test.bellatrix.block_processing.test_process_execution_payload import ( - run_execution_payload_processing, +from eth2spec.test.helpers.execution_payload import ( + build_empty_execution_payload, + build_state_with_incomplete_transition, + compute_el_block_hash, ) +from eth2spec.test.helpers.state import next_slot @with_capella_and_later diff --git a/tests/core/pyspec/eth2spec/test/capella/block_processing/test_process_withdrawals.py b/tests/core/pyspec/eth2spec/test/capella/block_processing/test_process_withdrawals.py index 1f1cd72c36..27763234e8 100644 --- a/tests/core/pyspec/eth2spec/test/capella/block_processing/test_process_withdrawals.py +++ b/tests/core/pyspec/eth2spec/test/capella/block_processing/test_process_withdrawals.py @@ -2,11 +2,16 @@ from eth2spec.test.context import ( spec_state_test, - with_presets, + with_all_phases_from_except, with_capella_and_later, - with_capella_until_eip7732, + with_presets, +) +from eth2spec.test.helpers.constants import ( + CAPELLA, + EIP7732, + MAINNET, + MINIMAL, ) -from eth2spec.test.helpers.constants import MAINNET, MINIMAL from eth2spec.test.helpers.execution_payload import ( build_empty_execution_payload, compute_el_block_hash, @@ -21,14 +26,14 @@ from eth2spec.test.helpers.withdrawals import ( get_expected_withdrawals, prepare_expected_withdrawals, + run_withdrawals_processing, set_eth1_withdrawal_credential_with_balance, set_validator_fully_withdrawable, set_validator_partially_withdrawable, - run_withdrawals_processing, ) -@with_capella_until_eip7732 +@with_all_phases_from_except(CAPELLA, [EIP7732]) @spec_state_test def test_success_zero_expected_withdrawals(spec, state): assert len(get_expected_withdrawals(spec, state)) == 0 @@ -39,7 +44,7 @@ def test_success_zero_expected_withdrawals(spec, state): yield from run_withdrawals_processing(spec, state, execution_payload) -@with_capella_until_eip7732 +@with_all_phases_from_except(CAPELLA, [EIP7732]) @spec_state_test def test_success_one_full_withdrawal(spec, state): fully_withdrawable_indices, partial_withdrawals_indices = prepare_expected_withdrawals( @@ -60,7 +65,7 @@ def test_success_one_full_withdrawal(spec, state): ) -@with_capella_until_eip7732 +@with_all_phases_from_except(CAPELLA, [EIP7732]) @spec_state_test def test_success_one_partial_withdrawal(spec, state): fully_withdrawable_indices, partial_withdrawals_indices = prepare_expected_withdrawals( @@ -83,7 +88,7 @@ def test_success_one_partial_withdrawal(spec, state): ) -@with_capella_until_eip7732 +@with_all_phases_from_except(CAPELLA, [EIP7732]) @spec_state_test def test_success_mixed_fully_and_partial_withdrawable(spec, state): num_full_withdrawals = spec.MAX_WITHDRAWALS_PER_PAYLOAD // 2 @@ -108,7 +113,7 @@ def test_success_mixed_fully_and_partial_withdrawable(spec, state): ) -@with_capella_until_eip7732 +@with_all_phases_from_except(CAPELLA, [EIP7732]) @with_presets([MAINNET], reason="too few validators with minimal config") @spec_state_test def test_success_all_fully_withdrawable_in_one_sweep(spec, state): @@ -131,7 +136,7 @@ def test_success_all_fully_withdrawable_in_one_sweep(spec, state): ) -@with_capella_until_eip7732 +@with_all_phases_from_except(CAPELLA, [EIP7732]) @with_presets([MINIMAL], reason="too many validators with mainnet config") @spec_state_test def test_success_all_fully_withdrawable(spec, state): @@ -154,7 +159,7 @@ def test_success_all_fully_withdrawable(spec, state): ) -@with_capella_until_eip7732 +@with_all_phases_from_except(CAPELLA, [EIP7732]) @with_presets([MAINNET], reason="too few validators with minimal config") @spec_state_test def test_success_all_partially_withdrawable_in_one_sweep(spec, state): @@ -177,7 +182,7 @@ def test_success_all_partially_withdrawable_in_one_sweep(spec, state): ) -@with_capella_until_eip7732 +@with_all_phases_from_except(CAPELLA, [EIP7732]) @with_presets([MINIMAL], reason="too many validators with mainnet config") @spec_state_test def test_success_all_partially_withdrawable(spec, state): @@ -205,7 +210,7 @@ def test_success_all_partially_withdrawable(spec, state): # -@with_capella_until_eip7732 +@with_all_phases_from_except(CAPELLA, [EIP7732]) @spec_state_test def test_invalid_non_withdrawable_non_empty_withdrawals(spec, state): next_slot(spec, state) @@ -222,7 +227,7 @@ def test_invalid_non_withdrawable_non_empty_withdrawals(spec, state): yield from run_withdrawals_processing(spec, state, execution_payload, valid=False) -@with_capella_until_eip7732 +@with_all_phases_from_except(CAPELLA, [EIP7732]) @spec_state_test def test_invalid_one_expected_full_withdrawal_and_none_in_withdrawals(spec, state): prepare_expected_withdrawals(spec, state, rng=random.Random(42), num_full_withdrawals=1) @@ -235,7 +240,7 @@ def test_invalid_one_expected_full_withdrawal_and_none_in_withdrawals(spec, stat yield from run_withdrawals_processing(spec, state, execution_payload, valid=False) -@with_capella_until_eip7732 +@with_all_phases_from_except(CAPELLA, [EIP7732]) @spec_state_test def test_invalid_one_expected_partial_withdrawal_and_none_in_withdrawals(spec, state): prepare_expected_withdrawals(spec, state, rng=random.Random(42), num_partial_withdrawals=1) @@ -248,7 +253,7 @@ def test_invalid_one_expected_partial_withdrawal_and_none_in_withdrawals(spec, s yield from run_withdrawals_processing(spec, state, execution_payload, valid=False) -@with_capella_until_eip7732 +@with_all_phases_from_except(CAPELLA, [EIP7732]) @spec_state_test def test_invalid_one_expected_full_withdrawal_and_duplicate_in_withdrawals(spec, state): prepare_expected_withdrawals(spec, state, rng=random.Random(42), num_full_withdrawals=2) @@ -261,7 +266,7 @@ def test_invalid_one_expected_full_withdrawal_and_duplicate_in_withdrawals(spec, yield from run_withdrawals_processing(spec, state, execution_payload, valid=False) -@with_capella_until_eip7732 +@with_all_phases_from_except(CAPELLA, [EIP7732]) @spec_state_test def test_invalid_two_expected_partial_withdrawal_and_duplicate_in_withdrawals(spec, state): prepare_expected_withdrawals(spec, state, rng=random.Random(42), num_partial_withdrawals=2) @@ -274,7 +279,7 @@ def test_invalid_two_expected_partial_withdrawal_and_duplicate_in_withdrawals(sp yield from run_withdrawals_processing(spec, state, execution_payload, valid=False) -@with_capella_until_eip7732 +@with_all_phases_from_except(CAPELLA, [EIP7732]) @spec_state_test def test_invalid_max_per_slot_full_withdrawals_and_one_less_in_withdrawals(spec, state): prepare_expected_withdrawals( @@ -289,7 +294,7 @@ def test_invalid_max_per_slot_full_withdrawals_and_one_less_in_withdrawals(spec, yield from run_withdrawals_processing(spec, state, execution_payload, valid=False) -@with_capella_until_eip7732 +@with_all_phases_from_except(CAPELLA, [EIP7732]) @spec_state_test def test_invalid_max_per_slot_partial_withdrawals_and_one_less_in_withdrawals(spec, state): prepare_expected_withdrawals( @@ -304,7 +309,7 @@ def test_invalid_max_per_slot_partial_withdrawals_and_one_less_in_withdrawals(sp yield from run_withdrawals_processing(spec, state, execution_payload, valid=False) -@with_capella_until_eip7732 +@with_all_phases_from_except(CAPELLA, [EIP7732]) @spec_state_test def test_invalid_a_lot_fully_withdrawable_too_few_in_withdrawals(spec, state): prepare_expected_withdrawals( @@ -322,7 +327,7 @@ def test_invalid_a_lot_fully_withdrawable_too_few_in_withdrawals(spec, state): yield from run_withdrawals_processing(spec, state, execution_payload, valid=False) -@with_capella_until_eip7732 +@with_all_phases_from_except(CAPELLA, [EIP7732]) @spec_state_test def test_invalid_a_lot_partially_withdrawable_too_few_in_withdrawals(spec, state): prepare_expected_withdrawals( @@ -340,7 +345,7 @@ def test_invalid_a_lot_partially_withdrawable_too_few_in_withdrawals(spec, state yield from run_withdrawals_processing(spec, state, execution_payload, valid=False) -@with_capella_until_eip7732 +@with_all_phases_from_except(CAPELLA, [EIP7732]) @spec_state_test def test_invalid_a_lot_mixed_withdrawable_in_queue_too_few_in_withdrawals(spec, state): prepare_expected_withdrawals( @@ -364,7 +369,7 @@ def test_invalid_a_lot_mixed_withdrawable_in_queue_too_few_in_withdrawals(spec, # -@with_capella_until_eip7732 +@with_all_phases_from_except(CAPELLA, [EIP7732]) @spec_state_test def test_invalid_incorrect_withdrawal_index(spec, state): prepare_expected_withdrawals(spec, state, rng=random.Random(42), num_full_withdrawals=1) @@ -377,7 +382,7 @@ def test_invalid_incorrect_withdrawal_index(spec, state): yield from run_withdrawals_processing(spec, state, execution_payload, valid=False) -@with_capella_until_eip7732 +@with_all_phases_from_except(CAPELLA, [EIP7732]) @spec_state_test def test_invalid_incorrect_address_full(spec, state): prepare_expected_withdrawals(spec, state, rng=random.Random(42), num_full_withdrawals=1) @@ -390,7 +395,7 @@ def test_invalid_incorrect_address_full(spec, state): yield from run_withdrawals_processing(spec, state, execution_payload, valid=False) -@with_capella_until_eip7732 +@with_all_phases_from_except(CAPELLA, [EIP7732]) @spec_state_test def test_invalid_incorrect_address_partial(spec, state): prepare_expected_withdrawals(spec, state, rng=random.Random(42), num_partial_withdrawals=1) @@ -403,7 +408,7 @@ def test_invalid_incorrect_address_partial(spec, state): yield from run_withdrawals_processing(spec, state, execution_payload, valid=False) -@with_capella_until_eip7732 +@with_all_phases_from_except(CAPELLA, [EIP7732]) @spec_state_test def test_invalid_incorrect_amount_full(spec, state): prepare_expected_withdrawals(spec, state, rng=random.Random(42), num_full_withdrawals=1) @@ -416,7 +421,7 @@ def test_invalid_incorrect_amount_full(spec, state): yield from run_withdrawals_processing(spec, state, execution_payload, valid=False) -@with_capella_until_eip7732 +@with_all_phases_from_except(CAPELLA, [EIP7732]) @spec_state_test def test_invalid_incorrect_amount_partial(spec, state): prepare_expected_withdrawals(spec, state, rng=random.Random(42), num_full_withdrawals=1) @@ -429,7 +434,7 @@ def test_invalid_incorrect_amount_partial(spec, state): yield from run_withdrawals_processing(spec, state, execution_payload, valid=False) -@with_capella_until_eip7732 +@with_all_phases_from_except(CAPELLA, [EIP7732]) @spec_state_test def test_invalid_one_of_many_incorrectly_full(spec, state): prepare_expected_withdrawals( @@ -453,7 +458,7 @@ def test_invalid_one_of_many_incorrectly_full(spec, state): yield from run_withdrawals_processing(spec, state, execution_payload, valid=False) -@with_capella_until_eip7732 +@with_all_phases_from_except(CAPELLA, [EIP7732]) @spec_state_test def test_invalid_one_of_many_incorrectly_partial(spec, state): prepare_expected_withdrawals( @@ -477,7 +482,7 @@ def test_invalid_one_of_many_incorrectly_partial(spec, state): yield from run_withdrawals_processing(spec, state, execution_payload, valid=False) -@with_capella_until_eip7732 +@with_all_phases_from_except(CAPELLA, [EIP7732]) @spec_state_test def test_invalid_many_incorrectly_full(spec, state): prepare_expected_withdrawals( @@ -501,7 +506,7 @@ def test_invalid_many_incorrectly_full(spec, state): yield from run_withdrawals_processing(spec, state, execution_payload, valid=False) -@with_capella_until_eip7732 +@with_all_phases_from_except(CAPELLA, [EIP7732]) @spec_state_test def test_invalid_many_incorrectly_partial(spec, state): prepare_expected_withdrawals( @@ -530,7 +535,7 @@ def test_invalid_many_incorrectly_partial(spec, state): # -@with_capella_until_eip7732 +@with_all_phases_from_except(CAPELLA, [EIP7732]) @spec_state_test def test_withdrawable_epoch_but_0_balance(spec, state): current_epoch = spec.get_current_epoch(state) @@ -546,7 +551,7 @@ def test_withdrawable_epoch_but_0_balance(spec, state): ) -@with_capella_until_eip7732 +@with_all_phases_from_except(CAPELLA, [EIP7732]) @spec_state_test def test_withdrawable_epoch_but_0_effective_balance_0_balance(spec, state): current_epoch = spec.get_current_epoch(state) @@ -562,7 +567,7 @@ def test_withdrawable_epoch_but_0_effective_balance_0_balance(spec, state): ) -@with_capella_until_eip7732 +@with_all_phases_from_except(CAPELLA, [EIP7732]) @spec_state_test def test_withdrawable_epoch_but_0_effective_balance_nonzero_balance(spec, state): current_epoch = spec.get_current_epoch(state) @@ -578,7 +583,7 @@ def test_withdrawable_epoch_but_0_effective_balance_nonzero_balance(spec, state) ) -@with_capella_until_eip7732 +@with_all_phases_from_except(CAPELLA, [EIP7732]) @spec_state_test def test_no_withdrawals_but_some_next_epoch(spec, state): current_epoch = spec.get_current_epoch(state) @@ -594,7 +599,7 @@ def test_no_withdrawals_but_some_next_epoch(spec, state): ) -@with_capella_until_eip7732 +@with_all_phases_from_except(CAPELLA, [EIP7732]) @spec_state_test def test_all_withdrawal(spec, state): # Make all validators withdrawable @@ -632,25 +637,25 @@ def run_random_full_withdrawals_test(spec, state, rng): yield from run_withdrawals_processing(spec, state, execution_payload) -@with_capella_until_eip7732 +@with_all_phases_from_except(CAPELLA, [EIP7732]) @spec_state_test def test_random_full_withdrawals_0(spec, state): yield from run_random_full_withdrawals_test(spec, state, random.Random(444)) -@with_capella_until_eip7732 +@with_all_phases_from_except(CAPELLA, [EIP7732]) @spec_state_test def test_random_full_withdrawals_1(spec, state): yield from run_random_full_withdrawals_test(spec, state, random.Random(420)) -@with_capella_until_eip7732 +@with_all_phases_from_except(CAPELLA, [EIP7732]) @spec_state_test def test_random_full_withdrawals_2(spec, state): yield from run_random_full_withdrawals_test(spec, state, random.Random(200)) -@with_capella_until_eip7732 +@with_all_phases_from_except(CAPELLA, [EIP7732]) @spec_state_test def test_random_full_withdrawals_3(spec, state): yield from run_random_full_withdrawals_test(spec, state, random.Random(2000000)) diff --git a/tests/core/pyspec/eth2spec/test/capella/fork/test_capella_fork_basic.py b/tests/core/pyspec/eth2spec/test/capella/fork/test_capella_fork_basic.py index 1ad4428195..85918bf6ed 100644 --- a/tests/core/pyspec/eth2spec/test/capella/fork/test_capella_fork_basic.py +++ b/tests/core/pyspec/eth2spec/test/capella/fork/test_capella_fork_basic.py @@ -1,14 +1,17 @@ from eth2spec.test.context import ( - with_phases, + large_validator_set, + low_balances, + misc_balances, + spec_test, with_custom_state, + with_phases, with_presets, - spec_test, with_state, - low_balances, - misc_balances, - large_validator_set, ) -from eth2spec.test.utils import with_meta_tags +from eth2spec.test.helpers.capella.fork import ( + CAPELLA_FORK_TEST_META_TAGS, + run_fork_test, +) from eth2spec.test.helpers.constants import ( BELLATRIX, CAPELLA, @@ -18,10 +21,7 @@ next_epoch, next_epoch_via_block, ) -from eth2spec.test.helpers.capella.fork import ( - CAPELLA_FORK_TEST_META_TAGS, - run_fork_test, -) +from eth2spec.test.utils import with_meta_tags @with_phases(phases=[BELLATRIX], other_phases=[CAPELLA]) diff --git a/tests/core/pyspec/eth2spec/test/capella/fork/test_capella_fork_random.py b/tests/core/pyspec/eth2spec/test/capella/fork/test_capella_fork_random.py index a5c53c2205..b3cdb9ba2b 100644 --- a/tests/core/pyspec/eth2spec/test/capella/fork/test_capella_fork_random.py +++ b/tests/core/pyspec/eth2spec/test/capella/fork/test_capella_fork_random.py @@ -1,26 +1,26 @@ from random import Random from eth2spec.test.context import ( - with_phases, + large_validator_set, + low_balances, + misc_balances, + spec_test, with_custom_state, + with_phases, with_presets, - spec_test, with_state, - low_balances, - misc_balances, - large_validator_set, ) -from eth2spec.test.utils import with_meta_tags +from eth2spec.test.helpers.capella.fork import ( + CAPELLA_FORK_TEST_META_TAGS, + run_fork_test, +) from eth2spec.test.helpers.constants import ( BELLATRIX, CAPELLA, MINIMAL, ) -from eth2spec.test.helpers.capella.fork import ( - CAPELLA_FORK_TEST_META_TAGS, - run_fork_test, -) from eth2spec.test.helpers.random import randomize_state +from eth2spec.test.utils import with_meta_tags @with_phases(phases=[BELLATRIX], other_phases=[CAPELLA]) diff --git a/tests/core/pyspec/eth2spec/test/capella/light_client/test_single_merkle_proof.py b/tests/core/pyspec/eth2spec/test/capella/light_client/test_single_merkle_proof.py index d904426a2d..066c260194 100644 --- a/tests/core/pyspec/eth2spec/test/capella/light_client/test_single_merkle_proof.py +++ b/tests/core/pyspec/eth2spec/test/capella/light_client/test_single_merkle_proof.py @@ -1,15 +1,19 @@ from eth2spec.test.context import ( spec_state_test, - with_capella_until_eip7732, + with_all_phases_from_except, with_test_suite_name, ) from eth2spec.test.helpers.attestations import ( state_transition_with_full_block, ) +from eth2spec.test.helpers.constants import ( + CAPELLA, + EIP7732, +) @with_test_suite_name("BeaconBlockBody") -@with_capella_until_eip7732 +@with_all_phases_from_except(CAPELLA, [EIP7732]) @spec_state_test def test_execution_merkle_proof(spec, state): block = state_transition_with_full_block(spec, state, True, False) diff --git a/tests/core/pyspec/eth2spec/test/capella/random/test_random.py b/tests/core/pyspec/eth2spec/test/capella/random/test_random.py index ac3a9aee5e..386b26893e 100644 --- a/tests/core/pyspec/eth2spec/test/capella/random/test_random.py +++ b/tests/core/pyspec/eth2spec/test/capella/random/test_random.py @@ -4,19 +4,17 @@ See the README for that generator for more information. """ -from eth2spec.test.helpers.constants import CAPELLA from eth2spec.test.context import ( + always_bls, misc_balances_in_default_range_with_many_validators, - with_phases, - zero_activation_threshold, only_generator, -) -from eth2spec.test.context import ( - always_bls, + single_phase, spec_test, with_custom_state, - single_phase, + with_phases, + zero_activation_threshold, ) +from eth2spec.test.helpers.constants import CAPELLA from eth2spec.test.utils.randomized_block_tests import ( run_generated_randomized_test, ) diff --git a/tests/core/pyspec/eth2spec/test/capella/sanity/test_blocks.py b/tests/core/pyspec/eth2spec/test/capella/sanity/test_blocks.py index 92700bb7e3..31639f3aff 100644 --- a/tests/core/pyspec/eth2spec/test/capella/sanity/test_blocks.py +++ b/tests/core/pyspec/eth2spec/test/capella/sanity/test_blocks.py @@ -1,46 +1,50 @@ import random -from eth2spec.test.helpers.constants import MINIMAL -from eth2spec.test.helpers.forks import is_post_electra, is_post_eip7732 + from eth2spec.test.context import ( - with_capella_and_later, - with_capella_until_eip7732, spec_state_test, + with_all_phases_from_except, + with_capella_and_later, with_presets, ) -from eth2spec.test.helpers.keys import pubkeys -from eth2spec.test.helpers.state import ( - next_epoch_via_block, - state_transition_and_sign_block, - transition_to, - next_slot, +from eth2spec.test.helpers.attestations import ( + next_epoch_with_attestations, ) from eth2spec.test.helpers.block import ( - build_empty_block_for_next_slot, build_empty_block, + build_empty_block_for_next_slot, ) from eth2spec.test.helpers.bls_to_execution_changes import get_signed_address_change -from eth2spec.test.helpers.attestations import ( - next_epoch_with_attestations, +from eth2spec.test.helpers.constants import ( + CAPELLA, + EIP7732, + MINIMAL, ) +from eth2spec.test.helpers.deposits import ( + prepare_state_and_deposit, +) +from eth2spec.test.helpers.forks import is_post_eip7732, is_post_electra +from eth2spec.test.helpers.keys import pubkeys +from eth2spec.test.helpers.state import ( + next_epoch_via_block, + next_slot, + state_transition_and_sign_block, + transition_to, +) +from eth2spec.test.helpers.voluntary_exits import prepare_signed_exits from eth2spec.test.helpers.withdrawals import ( get_expected_withdrawals, + prepare_expected_withdrawals, set_eth1_withdrawal_credential_with_balance, set_validator_fully_withdrawable, set_validator_partially_withdrawable, - prepare_expected_withdrawals, -) -from eth2spec.test.helpers.deposits import ( - prepare_state_and_deposit, ) -from eth2spec.test.helpers.voluntary_exits import prepare_signed_exits - # # `is_execution_enabled` has been removed from Capella # -@with_capella_until_eip7732 +@with_all_phases_from_except(CAPELLA, [EIP7732]) @spec_state_test def test_invalid_is_execution_enabled_false(spec, state): # Set `latest_execution_payload_header` to empty @@ -206,7 +210,7 @@ def test_invalid_two_bls_changes_of_different_addresses_same_validator_same_bloc # -@with_capella_until_eip7732 +@with_all_phases_from_except(CAPELLA, [EIP7732]) @spec_state_test def test_full_withdrawal_in_epoch_transition(spec, state): index = 0 diff --git a/tests/core/pyspec/eth2spec/test/context.py b/tests/core/pyspec/eth2spec/test/context.py index 8480bdf3c8..c6f3ce204f 100644 --- a/tests/core/pyspec/eth2spec/test/context.py +++ b/tests/core/pyspec/eth2spec/test/context.py @@ -1,46 +1,45 @@ -import pytest +import importlib from copy import deepcopy from dataclasses import dataclass -import importlib +from random import Random +from typing import Any, Callable, Dict, Sequence + +import pytest +from lru import LRU from eth2spec.utils import bls from .exceptions import SkippedTest from .helpers.constants import ( - PHASE0, + ALL_PHASES, + ALLOWED_TEST_RUNNER_FORKS, ALTAIR, BELLATRIX, CAPELLA, DENEB, - ELECTRA, - FULU, EIP7441, EIP7732, + ELECTRA, + FULU, + LIGHT_CLIENT_TESTING_FORKS, MINIMAL, - ALL_PHASES, + PHASE0, POST_FORK_OF, - ALLOWED_TEST_RUNNER_FORKS, - LIGHT_CLIENT_TESTING_FORKS, ) -from .helpers.forks import is_post_fork, is_post_electra +from .helpers.forks import is_post_electra, is_post_fork from .helpers.genesis import create_genesis_state +from .helpers.specs import ( + spec_targets, +) from .helpers.typing import ( Spec, SpecForks, ) -from .helpers.specs import ( - spec_targets, -) from .utils import ( vector_test, with_meta_tags, ) -from random import Random -from typing import Any, Callable, Sequence, Dict - -from lru import LRU - # Without pytest CLI arg or pyspec-test-generator 'preset' argument, this will be the config to apply. DEFAULT_TEST_PRESET = MINIMAL @@ -587,7 +586,10 @@ def wrapper(*args, **kw): # When running test generator, it sets specific `phase` phase = kw["phase"] _phases = [phase] - _other_phases = [POST_FORK_OF[phase]] + if phase in POST_FORK_OF: + _other_phases = [POST_FORK_OF[phase]] + else: + _other_phases = None ret = _run_test_case_with_phases( fn, _phases, _other_phases, kw, args, is_fork_transition=True ) @@ -637,12 +639,6 @@ def wrapper(*args, spec: Spec, **kw): with_fulu_and_later = with_all_phases_from(FULU, all_phases=ALLOWED_TEST_RUNNER_FORKS) with_eip7441_and_later = with_all_phases_from(EIP7441, all_phases=ALLOWED_TEST_RUNNER_FORKS) -with_altair_until_eip7732 = with_all_phases_from_to(ALTAIR, EIP7732) -with_bellatrix_until_eip7732 = with_all_phases_from_to(BELLATRIX, EIP7732) -with_capella_until_eip7732 = with_all_phases_from_to(CAPELLA, EIP7732) -with_deneb_until_eip7732 = with_all_phases_from_to(DENEB, EIP7732) -with_electra_until_eip7732 = with_all_phases_from_to(ELECTRA, EIP7732) - class quoted_str(str): pass diff --git a/tests/core/pyspec/eth2spec/test/deneb/block_processing/test_process_execution_payload.py b/tests/core/pyspec/eth2spec/test/deneb/block_processing/test_process_execution_payload.py index f9bd5ee14f..624b461c85 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/block_processing/test_process_execution_payload.py +++ b/tests/core/pyspec/eth2spec/test/deneb/block_processing/test_process_execution_payload.py @@ -1,21 +1,21 @@ from random import Random -from eth2spec.test.helpers.execution_payload import ( - build_empty_execution_payload, - compute_el_block_hash, - get_execution_payload_header, -) from eth2spec.test.context import ( - spec_state_test, expect_assertion_error, + spec_state_test, with_deneb_and_later, ) -from eth2spec.test.helpers.keys import privkeys -from eth2spec.test.helpers.forks import is_post_eip7732 from eth2spec.test.helpers.blob import ( - get_sample_blob_tx, get_max_blob_count, + get_sample_blob_tx, ) +from eth2spec.test.helpers.execution_payload import ( + build_empty_execution_payload, + compute_el_block_hash, + get_execution_payload_header, +) +from eth2spec.test.helpers.forks import is_post_eip7732 +from eth2spec.test.helpers.keys import privkeys def run_execution_payload_processing( @@ -411,7 +411,7 @@ def test_invalid_exceed_max_blobs_per_block(spec, state): execution_payload = build_empty_execution_payload(spec, state) opaque_tx, _, blob_kzg_commitments, _ = get_sample_blob_tx( - spec, blob_count=get_max_blob_count(spec) + 1 + spec, blob_count=get_max_blob_count(spec, state) + 1 ) execution_payload.transactions = [opaque_tx] diff --git a/tests/core/pyspec/eth2spec/test/deneb/block_processing/test_process_voluntary_exit.py b/tests/core/pyspec/eth2spec/test/deneb/block_processing/test_process_voluntary_exit.py index b01eaab0e1..12daf3b065 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/block_processing/test_process_voluntary_exit.py +++ b/tests/core/pyspec/eth2spec/test/deneb/block_processing/test_process_voluntary_exit.py @@ -1,3 +1,6 @@ +from eth2spec.test.bellatrix.block_processing.test_process_voluntary_exit import ( + run_voluntary_exit_processing_test, +) from eth2spec.test.context import ( always_bls, spec_state_test, @@ -6,9 +9,6 @@ from eth2spec.test.helpers.constants import ( DENEB, ) -from eth2spec.test.bellatrix.block_processing.test_process_voluntary_exit import ( - run_voluntary_exit_processing_test, -) @with_deneb_and_later diff --git a/tests/core/pyspec/eth2spec/test/deneb/epoch_processing/test_process_registry_updates.py b/tests/core/pyspec/eth2spec/test/deneb/epoch_processing/test_process_registry_updates.py index a4db24c193..de81a4353e 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/epoch_processing/test_process_registry_updates.py +++ b/tests/core/pyspec/eth2spec/test/deneb/epoch_processing/test_process_registry_updates.py @@ -1,17 +1,17 @@ -from eth2spec.test.helpers.keys import pubkeys -from eth2spec.test.helpers.forks import is_post_electra -from eth2spec.test.helpers.constants import MINIMAL from eth2spec.test.context import ( - with_deneb_and_later, - spec_test, - spec_state_test, + scaled_churn_balances_equal_activation_churn_limit, + scaled_churn_balances_exceed_activation_churn_limit, single_phase, + spec_state_test, + spec_test, with_custom_state, + with_deneb_and_later, with_presets, - scaled_churn_balances_exceed_activation_churn_limit, - scaled_churn_balances_equal_activation_churn_limit, ) +from eth2spec.test.helpers.constants import MINIMAL from eth2spec.test.helpers.epoch_processing import run_epoch_processing_with +from eth2spec.test.helpers.forks import is_post_electra +from eth2spec.test.helpers.keys import pubkeys def run_process_registry_updates(spec, state): diff --git a/tests/core/pyspec/eth2spec/test/deneb/fork/test_deneb_fork_basic.py b/tests/core/pyspec/eth2spec/test/deneb/fork/test_deneb_fork_basic.py index 4f5a9afbaa..17f5dbf458 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/fork/test_deneb_fork_basic.py +++ b/tests/core/pyspec/eth2spec/test/deneb/fork/test_deneb_fork_basic.py @@ -1,27 +1,27 @@ from eth2spec.test.context import ( - with_phases, + large_validator_set, + low_balances, + misc_balances, + spec_test, with_custom_state, + with_phases, with_presets, - spec_test, with_state, - low_balances, - misc_balances, - large_validator_set, ) -from eth2spec.test.utils import with_meta_tags from eth2spec.test.helpers.constants import ( CAPELLA, DENEB, MINIMAL, ) -from eth2spec.test.helpers.state import ( - next_epoch, - next_epoch_via_block, -) from eth2spec.test.helpers.deneb.fork import ( DENEB_FORK_TEST_META_TAGS, run_fork_test, ) +from eth2spec.test.helpers.state import ( + next_epoch, + next_epoch_via_block, +) +from eth2spec.test.utils import with_meta_tags @with_phases(phases=[CAPELLA], other_phases=[DENEB]) diff --git a/tests/core/pyspec/eth2spec/test/deneb/fork/test_deneb_fork_random.py b/tests/core/pyspec/eth2spec/test/deneb/fork/test_deneb_fork_random.py index ae130110d3..558706806b 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/fork/test_deneb_fork_random.py +++ b/tests/core/pyspec/eth2spec/test/deneb/fork/test_deneb_fork_random.py @@ -1,16 +1,15 @@ from random import Random from eth2spec.test.context import ( - with_phases, + large_validator_set, + low_balances, + misc_balances, + spec_test, with_custom_state, + with_phases, with_presets, - spec_test, with_state, - low_balances, - misc_balances, - large_validator_set, ) -from eth2spec.test.utils import with_meta_tags from eth2spec.test.helpers.constants import ( CAPELLA, DENEB, @@ -21,6 +20,7 @@ run_fork_test, ) from eth2spec.test.helpers.random import randomize_state +from eth2spec.test.utils import with_meta_tags @with_phases(phases=[CAPELLA], other_phases=[DENEB]) diff --git a/tests/core/pyspec/eth2spec/test/deneb/fork_choice/test_on_block.py b/tests/core/pyspec/eth2spec/test/deneb/fork_choice/test_on_block.py index 442d993a1d..4b5cdbc1ce 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/fork_choice/test_on_block.py +++ b/tests/core/pyspec/eth2spec/test/deneb/fork_choice/test_on_block.py @@ -4,16 +4,17 @@ spec_state_test, with_all_phases_from_except, ) - -from eth2spec.test.helpers.constants import ( - DENEB, - FULU, - EIP7732, +from eth2spec.test.helpers.blob import ( + get_sample_blob_tx, ) - from eth2spec.test.helpers.block import ( build_empty_block_for_next_slot, ) +from eth2spec.test.helpers.constants import ( + DENEB, + EIP7732, + FULU, +) from eth2spec.test.helpers.execution_payload import ( compute_el_block_hash, ) @@ -26,9 +27,6 @@ from eth2spec.test.helpers.state import ( state_transition_and_sign_block, ) -from eth2spec.test.helpers.blob import ( - get_sample_blob_tx, -) def get_block_with_blob(spec, state, rng=None): diff --git a/tests/core/pyspec/eth2spec/test/deneb/merkle_proof/test_single_merkle_proof.py b/tests/core/pyspec/eth2spec/test/deneb/merkle_proof/test_single_merkle_proof.py index 3742716462..91e650a32f 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/merkle_proof/test_single_merkle_proof.py +++ b/tests/core/pyspec/eth2spec/test/deneb/merkle_proof/test_single_merkle_proof.py @@ -1,10 +1,17 @@ import random +from eth2spec.debug.random_value import ( + get_random_ssz_object, + RandomizationMode, +) from eth2spec.test.context import ( spec_state_test, with_deneb_and_later, with_test_suite_name, ) +from eth2spec.test.helpers.blob import ( + get_sample_blob_tx, +) from eth2spec.test.helpers.block import ( build_empty_block_for_next_slot, sign_block, @@ -13,13 +20,6 @@ compute_el_block_hash, ) from eth2spec.test.helpers.forks import is_post_eip7732 -from eth2spec.test.helpers.blob import ( - get_sample_blob_tx, -) -from eth2spec.debug.random_value import ( - RandomizationMode, - get_random_ssz_object, -) def _run_blob_kzg_commitment_merkle_proof_test(spec, state, rng=None): diff --git a/tests/core/pyspec/eth2spec/test/deneb/random/test_random.py b/tests/core/pyspec/eth2spec/test/deneb/random/test_random.py index fef3810fa3..461b30db20 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/random/test_random.py +++ b/tests/core/pyspec/eth2spec/test/deneb/random/test_random.py @@ -4,19 +4,17 @@ See the README for that generator for more information. """ -from eth2spec.test.helpers.constants import DENEB from eth2spec.test.context import ( + always_bls, misc_balances_in_default_range_with_many_validators, - with_phases, - zero_activation_threshold, only_generator, -) -from eth2spec.test.context import ( - always_bls, + single_phase, spec_test, with_custom_state, - single_phase, + with_phases, + zero_activation_threshold, ) +from eth2spec.test.helpers.constants import DENEB from eth2spec.test.utils.randomized_block_tests import ( run_generated_randomized_test, ) diff --git a/tests/core/pyspec/eth2spec/test/deneb/sanity/test_blocks.py b/tests/core/pyspec/eth2spec/test/deneb/sanity/test_blocks.py index a2baf73984..9d272a1790 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/sanity/test_blocks.py +++ b/tests/core/pyspec/eth2spec/test/deneb/sanity/test_blocks.py @@ -1,22 +1,26 @@ import random -from eth2spec.test.helpers.state import ( - state_transition_and_sign_block, +from eth2spec.test.context import ( + spec_state_test, + with_all_phases_from_except, +) +from eth2spec.test.helpers.blob import ( + get_max_blob_count, + get_sample_blob_tx, ) from eth2spec.test.helpers.block import ( build_empty_block_for_next_slot, ) -from eth2spec.test.context import ( - spec_state_test, - with_deneb_until_eip7732, +from eth2spec.test.helpers.constants import ( + DENEB, + EIP7732, ) from eth2spec.test.helpers.execution_payload import ( compute_el_block_hash, get_random_tx, ) -from eth2spec.test.helpers.blob import ( - get_sample_blob_tx, - get_max_blob_count, +from eth2spec.test.helpers.state import ( + state_transition_and_sign_block, ) @@ -63,61 +67,63 @@ def run_block_with_blobs( yield "post", state if valid else None -@with_deneb_until_eip7732 +@with_all_phases_from_except(DENEB, [EIP7732]) @spec_state_test def test_zero_blob(spec, state): yield from run_block_with_blobs(spec, state, blob_count=0) -@with_deneb_until_eip7732 +@with_all_phases_from_except(DENEB, [EIP7732]) @spec_state_test def test_one_blob(spec, state): yield from run_block_with_blobs(spec, state, blob_count=1) -@with_deneb_until_eip7732 +@with_all_phases_from_except(DENEB, [EIP7732]) @spec_state_test def test_one_blob_two_txs(spec, state): yield from run_block_with_blobs(spec, state, blob_count=1, tx_count=2) -@with_deneb_until_eip7732 +@with_all_phases_from_except(DENEB, [EIP7732]) @spec_state_test def test_one_blob_max_txs(spec, state): - yield from run_block_with_blobs(spec, state, blob_count=1, tx_count=get_max_blob_count(spec)) + yield from run_block_with_blobs( + spec, state, blob_count=1, tx_count=get_max_blob_count(spec, state) + ) -@with_deneb_until_eip7732 +@with_all_phases_from_except(DENEB, [EIP7732]) @spec_state_test def test_invalid_one_blob_max_plus_one_txs(spec, state): yield from run_block_with_blobs( - spec, state, blob_count=1, tx_count=get_max_blob_count(spec) + 1, valid=False + spec, state, blob_count=1, tx_count=get_max_blob_count(spec, state) + 1, valid=False ) -@with_deneb_until_eip7732 +@with_all_phases_from_except(DENEB, [EIP7732]) @spec_state_test def test_max_blobs_per_block(spec, state): - yield from run_block_with_blobs(spec, state, blob_count=get_max_blob_count(spec)) + yield from run_block_with_blobs(spec, state, blob_count=get_max_blob_count(spec, state)) -@with_deneb_until_eip7732 +@with_all_phases_from_except(DENEB, [EIP7732]) @spec_state_test def test_invalid_max_blobs_per_block_two_txs(spec, state): yield from run_block_with_blobs( - spec, state, blob_count=get_max_blob_count(spec), tx_count=2, valid=False + spec, state, blob_count=get_max_blob_count(spec, state), tx_count=2, valid=False ) -@with_deneb_until_eip7732 +@with_all_phases_from_except(DENEB, [EIP7732]) @spec_state_test def test_invalid_exceed_max_blobs_per_block(spec, state): yield from run_block_with_blobs( - spec, state, blob_count=get_max_blob_count(spec) + 1, valid=False + spec, state, blob_count=get_max_blob_count(spec, state) + 1, valid=False ) -@with_deneb_until_eip7732 +@with_all_phases_from_except(DENEB, [EIP7732]) @spec_state_test def test_mix_blob_tx_and_non_blob_tx(spec, state): yield from run_block_with_blobs(spec, state, blob_count=1, tx_count=1, non_blob_tx_count=1) diff --git a/tests/core/pyspec/eth2spec/test/deneb/transition/test_operations.py b/tests/core/pyspec/eth2spec/test/deneb/transition/test_operations.py index ad62b8d464..3904b1877d 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/transition/test_operations.py +++ b/tests/core/pyspec/eth2spec/test/deneb/transition/test_operations.py @@ -1,6 +1,6 @@ from eth2spec.test.context import ( - ForkMeta, always_bls, + ForkMeta, with_fork_metas, ) from eth2spec.test.helpers.attestations import ( @@ -13,18 +13,17 @@ AFTER_DENEB_PRE_POST_FORKS, DENEB, ) -from eth2spec.test.helpers.state import ( - next_epoch_via_block, - state_transition_and_sign_block, - transition_to, -) from eth2spec.test.helpers.fork_transition import ( - OperationType, do_fork, + OperationType, run_transition_with_operation, transition_until_fork, ) - +from eth2spec.test.helpers.state import ( + next_epoch_via_block, + state_transition_and_sign_block, + transition_to, +) # # BLSToExecutionChange diff --git a/tests/core/pyspec/eth2spec/test/deneb/transition/test_transition.py b/tests/core/pyspec/eth2spec/test/deneb/transition/test_transition.py index 9031b2e84e..5c1b42288f 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/transition/test_transition.py +++ b/tests/core/pyspec/eth2spec/test/deneb/transition/test_transition.py @@ -7,12 +7,12 @@ AFTER_DENEB_PRE_POST_FORKS, MINIMAL, ) -from eth2spec.test.helpers.keys import pubkeys from eth2spec.test.helpers.fork_transition import ( do_fork, transition_to_next_epoch_and_append_blocks, transition_until_fork, ) +from eth2spec.test.helpers.keys import pubkeys def mock_activated_validators(spec, state, mock_activations): diff --git a/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py b/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py index 7e225b57fc..aa72570715 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py +++ b/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py @@ -1,16 +1,16 @@ import random from eth2spec.test.context import ( - spec_test, + always_bls, + expect_assertion_error, single_phase, + spec_test, with_deneb_and_later, - expect_assertion_error, - always_bls, ) from eth2spec.test.helpers.blob import ( - get_sample_blob, - get_poly_in_both_forms, eval_poly_in_coeff_form, + get_poly_in_both_forms, + get_sample_blob, ) from eth2spec.utils import bls from eth2spec.utils.bls import BLS_MODULUS diff --git a/tests/core/pyspec/eth2spec/test/deneb/unittests/test_execution_engine_interface.py b/tests/core/pyspec/eth2spec/test/deneb/unittests/test_execution_engine_interface.py index b2ec5a6adf..c877ee488c 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/unittests/test_execution_engine_interface.py +++ b/tests/core/pyspec/eth2spec/test/deneb/unittests/test_execution_engine_interface.py @@ -1,8 +1,8 @@ from eth2spec.test.context import ( DENEB, spec_state_test, - with_phases, with_deneb_and_later, + with_phases, ) from eth2spec.test.helpers.execution_payload import ( build_empty_execution_payload, diff --git a/tests/core/pyspec/eth2spec/test/deneb/unittests/validator/test_validator.py b/tests/core/pyspec/eth2spec/test/deneb/unittests/validator/test_validator.py index 6f1deedcb8..7a0ff4d794 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/unittests/validator/test_validator.py +++ b/tests/core/pyspec/eth2spec/test/deneb/unittests/validator/test_validator.py @@ -1,16 +1,17 @@ import random + from eth2spec.test.context import ( spec_state_test, with_deneb_and_later, ) -from eth2spec.test.helpers.execution_payload import ( - compute_el_block_hash, -) -from eth2spec.test.helpers.forks import is_post_eip7732 from eth2spec.test.helpers.blob import ( get_sample_blob_tx, ) from eth2spec.test.helpers.block import build_empty_block_for_next_slot, sign_block +from eth2spec.test.helpers.execution_payload import ( + compute_el_block_hash, +) +from eth2spec.test.helpers.forks import is_post_eip7732 def _get_sample_sidecars(spec, state, rng): diff --git a/tests/core/pyspec/eth2spec/test/eip7441/block_processing/test_process_eip7441_opening_proof.py b/tests/core/pyspec/eth2spec/test/eip7441/block_processing/test_process_eip7441_opening_proof.py index 19b0125f12..ea8f240358 100644 --- a/tests/core/pyspec/eth2spec/test/eip7441/block_processing/test_process_eip7441_opening_proof.py +++ b/tests/core/pyspec/eth2spec/test/eip7441/block_processing/test_process_eip7441_opening_proof.py @@ -1,4 +1,4 @@ -from eth2spec.test.context import spec_state_test, with_eip7441_and_later, expect_assertion_error +from eth2spec.test.context import expect_assertion_error, spec_state_test, with_eip7441_and_later from eth2spec.test.helpers.eip7441 import ( compute_whisk_k_commitment, compute_whisk_tracker, diff --git a/tests/core/pyspec/eth2spec/test/eip7441/block_processing/test_process_eip7441_registration.py b/tests/core/pyspec/eth2spec/test/eip7441/block_processing/test_process_eip7441_registration.py index caea59dc44..49ffb2c40e 100644 --- a/tests/core/pyspec/eth2spec/test/eip7441/block_processing/test_process_eip7441_registration.py +++ b/tests/core/pyspec/eth2spec/test/eip7441/block_processing/test_process_eip7441_registration.py @@ -1,9 +1,9 @@ -from eth2spec.test.context import spec_state_test, with_eip7441_and_later, expect_assertion_error +from eth2spec.test.context import expect_assertion_error, spec_state_test, with_eip7441_and_later from eth2spec.test.helpers.eip7441 import ( - set_as_first_proposal, compute_whisk_k_commitment, - set_registration, register_tracker, + set_as_first_proposal, + set_registration, ) diff --git a/tests/core/pyspec/eth2spec/test/eip7441/block_processing/test_process_shuffled_trackers.py b/tests/core/pyspec/eth2spec/test/eip7441/block_processing/test_process_shuffled_trackers.py index 23e3501bdf..24cbed2cdf 100644 --- a/tests/core/pyspec/eth2spec/test/eip7441/block_processing/test_process_shuffled_trackers.py +++ b/tests/core/pyspec/eth2spec/test/eip7441/block_processing/test_process_shuffled_trackers.py @@ -1,8 +1,9 @@ -from eth2spec.test.context import spec_state_test, with_eip7441_and_later, expect_assertion_error -from eth2spec.test.helpers.keys import whisk_ks_initial -from eth2spec.test.helpers.eip7441 import compute_whisk_tracker from curdleproofs import GenerateWhiskShuffleProof +from eth2spec.test.context import expect_assertion_error, spec_state_test, with_eip7441_and_later +from eth2spec.test.helpers.eip7441 import compute_whisk_tracker +from eth2spec.test.helpers.keys import whisk_ks_initial + def set_correct_shuffle_proofs(spec, state, body): pre_shuffle_trackers = get_and_populate_pre_shuffle_trackers(spec, state, body) diff --git a/tests/core/pyspec/eth2spec/test/eip7441/sanity/blocks/test_eip7441.py b/tests/core/pyspec/eth2spec/test/eip7441/sanity/blocks/test_eip7441.py index f2118c8e3b..c856feefbd 100644 --- a/tests/core/pyspec/eth2spec/test/eip7441/sanity/blocks/test_eip7441.py +++ b/tests/core/pyspec/eth2spec/test/eip7441/sanity/blocks/test_eip7441.py @@ -1,9 +1,10 @@ -from eth2spec.test.helpers.block import build_empty_block +from curdleproofs import WhiskTracker + from eth2spec.test.context import spec_state_test, with_eip7441_and_later +from eth2spec.test.helpers.block import build_empty_block +from eth2spec.test.helpers.eip7441 import compute_whisk_tracker_and_commitment from eth2spec.test.helpers.keys import whisk_ks_initial from eth2spec.test.helpers.state import state_transition_and_sign_block -from eth2spec.test.helpers.eip7441 import compute_whisk_tracker_and_commitment -from curdleproofs import WhiskTracker known_whisk_trackers = {} diff --git a/tests/core/pyspec/eth2spec/test/eip7441/unittests/test_config_invariants.py b/tests/core/pyspec/eth2spec/test/eip7441/unittests/test_config_invariants.py index e94df3b8b3..9f615ba2e0 100644 --- a/tests/core/pyspec/eth2spec/test/eip7441/unittests/test_config_invariants.py +++ b/tests/core/pyspec/eth2spec/test/eip7441/unittests/test_config_invariants.py @@ -1,5 +1,4 @@ -from eth2spec.test.context import spec_test, with_eip7441_and_later -from eth2spec.test.context import single_phase +from eth2spec.test.context import single_phase, spec_test, with_eip7441_and_later # Note: remove once whisk is rebased on top of deneb diff --git a/tests/core/pyspec/eth2spec/test/electra/block_processing/test_process_attestation.py b/tests/core/pyspec/eth2spec/test/electra/block_processing/test_process_attestation.py index df10ce5203..bae9c546a9 100644 --- a/tests/core/pyspec/eth2spec/test/electra/block_processing/test_process_attestation.py +++ b/tests/core/pyspec/eth2spec/test/electra/block_processing/test_process_attestation.py @@ -1,4 +1,3 @@ -from eth2spec.test.helpers.constants import MINIMAL from eth2spec.test.context import ( always_bls, spec_state_test, @@ -6,13 +5,14 @@ with_presets, ) from eth2spec.test.helpers.attestations import ( - run_attestation_processing, - get_valid_attestation, - sign_attestation, build_attestation_data, - get_valid_attestation_at_slot, get_empty_eip7549_aggregation_bits, + get_valid_attestation, + get_valid_attestation_at_slot, + run_attestation_processing, + sign_attestation, ) +from eth2spec.test.helpers.constants import MINIMAL from eth2spec.test.helpers.state import ( next_slots, ) diff --git a/tests/core/pyspec/eth2spec/test/electra/block_processing/test_process_consolidation_request.py b/tests/core/pyspec/eth2spec/test/electra/block_processing/test_process_consolidation_request.py index 2e2ae2a7d6..3afa3cb210 100644 --- a/tests/core/pyspec/eth2spec/test/electra/block_processing/test_process_consolidation_request.py +++ b/tests/core/pyspec/eth2spec/test/electra/block_processing/test_process_consolidation_request.py @@ -1,18 +1,18 @@ -from eth2spec.test.helpers.constants import MINIMAL from eth2spec.test.context import ( - with_electra_and_later, - with_presets, - spec_test, - single_phase, - with_custom_state, - scaled_churn_balances_exceed_activation_exit_churn_limit, default_activation_threshold, + scaled_churn_balances_exceed_activation_exit_churn_limit, + single_phase, spec_state_test, + spec_test, + with_custom_state, + with_electra_and_later, + with_presets, ) +from eth2spec.test.helpers.constants import MINIMAL from eth2spec.test.helpers.withdrawals import ( - set_eth1_withdrawal_credential_with_balance, - set_compounding_withdrawal_credential_with_balance, set_compounding_withdrawal_credential, + set_compounding_withdrawal_credential_with_balance, + set_eth1_withdrawal_credential_with_balance, ) # *********************** diff --git a/tests/core/pyspec/eth2spec/test/electra/block_processing/test_process_deposit_request.py b/tests/core/pyspec/eth2spec/test/electra/block_processing/test_process_deposit_request.py index a787b57f87..480b6dafe5 100644 --- a/tests/core/pyspec/eth2spec/test/electra/block_processing/test_process_deposit_request.py +++ b/tests/core/pyspec/eth2spec/test/electra/block_processing/test_process_deposit_request.py @@ -1,4 +1,4 @@ -from eth2spec.test.context import spec_state_test, always_bls, with_electra_and_later +from eth2spec.test.context import always_bls, spec_state_test, with_electra_and_later from eth2spec.test.helpers.deposits import ( prepare_deposit_request, run_deposit_request_processing, diff --git a/tests/core/pyspec/eth2spec/test/electra/block_processing/test_process_voluntary_exit.py b/tests/core/pyspec/eth2spec/test/electra/block_processing/test_process_voluntary_exit.py index fe439b7134..cde2ebaf27 100644 --- a/tests/core/pyspec/eth2spec/test/electra/block_processing/test_process_voluntary_exit.py +++ b/tests/core/pyspec/eth2spec/test/electra/block_processing/test_process_voluntary_exit.py @@ -1,9 +1,9 @@ -from eth2spec.test.helpers.constants import MAINNET from eth2spec.test.context import ( spec_state_test, with_electra_and_later, with_presets, ) +from eth2spec.test.helpers.constants import MAINNET from eth2spec.test.helpers.keys import pubkey_to_privkey from eth2spec.test.helpers.voluntary_exits import ( run_voluntary_exit_processing, diff --git a/tests/core/pyspec/eth2spec/test/electra/block_processing/test_process_withdrawal_request.py b/tests/core/pyspec/eth2spec/test/electra/block_processing/test_process_withdrawal_request.py index 5e8e42a617..c2b17db9ad 100644 --- a/tests/core/pyspec/eth2spec/test/electra/block_processing/test_process_withdrawal_request.py +++ b/tests/core/pyspec/eth2spec/test/electra/block_processing/test_process_withdrawal_request.py @@ -1,7 +1,8 @@ import random + from eth2spec.test.context import ( - spec_state_test, expect_assertion_error, + spec_state_test, with_electra_and_later, with_presets, ) @@ -10,8 +11,8 @@ get_validator_index_by_pubkey, ) from eth2spec.test.helpers.withdrawals import ( - set_eth1_withdrawal_credential_with_balance, set_compounding_withdrawal_credential, + set_eth1_withdrawal_credential_with_balance, ) # diff --git a/tests/core/pyspec/eth2spec/test/electra/block_processing/test_process_withdrawals.py b/tests/core/pyspec/eth2spec/test/electra/block_processing/test_process_withdrawals.py index b55c29722b..5d6f3190dd 100644 --- a/tests/core/pyspec/eth2spec/test/electra/block_processing/test_process_withdrawals.py +++ b/tests/core/pyspec/eth2spec/test/electra/block_processing/test_process_withdrawals.py @@ -12,9 +12,9 @@ ) from eth2spec.test.helpers.withdrawals import ( prepare_expected_withdrawals, + prepare_pending_withdrawal, run_withdrawals_processing, set_compounding_withdrawal_credential_with_balance, - prepare_pending_withdrawal, ) diff --git a/tests/core/pyspec/eth2spec/test/electra/epoch_processing/pending_deposits/test_apply_pending_deposit.py b/tests/core/pyspec/eth2spec/test/electra/epoch_processing/pending_deposits/test_apply_pending_deposit.py index 046e342e43..de65e44140 100644 --- a/tests/core/pyspec/eth2spec/test/electra/epoch_processing/pending_deposits/test_apply_pending_deposit.py +++ b/tests/core/pyspec/eth2spec/test/electra/epoch_processing/pending_deposits/test_apply_pending_deposit.py @@ -1,7 +1,7 @@ from eth2spec.test.context import ( + always_bls, spec_state_test, with_electra_and_later, - always_bls, ) from eth2spec.test.helpers.deposits import ( prepare_pending_deposit, @@ -330,25 +330,6 @@ def test_apply_pending_deposit_top_up__less_effective_balance(spec, state): assert state.validators[validator_index].effective_balance == initial_effective_balance -@with_electra_and_later -@spec_state_test -def test_apply_pending_deposit_top_up__zero_balance(spec, state): - validator_index = 0 - amount = spec.MIN_ACTIVATION_BALANCE // 4 - pending_deposit = prepare_pending_deposit(spec, validator_index, amount, signed=True) - - initial_balance = 0 - initial_effective_balance = 0 - state.balances[validator_index] = initial_balance - state.validators[validator_index].effective_balance = initial_effective_balance - - yield from run_pending_deposit_applying(spec, state, pending_deposit, validator_index) - - assert state.balances[validator_index] == initial_balance + amount - # unchanged effective balance - assert state.validators[validator_index].effective_balance == initial_effective_balance - - @with_electra_and_later @spec_state_test @always_bls diff --git a/tests/core/pyspec/eth2spec/test/electra/epoch_processing/pending_deposits/test_process_pending_deposits.py b/tests/core/pyspec/eth2spec/test/electra/epoch_processing/pending_deposits/test_process_pending_deposits.py index acddd7fd00..57f713fd31 100644 --- a/tests/core/pyspec/eth2spec/test/electra/epoch_processing/pending_deposits/test_process_pending_deposits.py +++ b/tests/core/pyspec/eth2spec/test/electra/epoch_processing/pending_deposits/test_process_pending_deposits.py @@ -1,22 +1,22 @@ -from eth2spec.test.helpers.epoch_processing import run_epoch_processing_with from eth2spec.test.context import ( + always_bls, + default_activation_threshold, + scaled_churn_balances_exceed_activation_exit_churn_limit, + single_phase, spec_state_test, - with_electra_and_later, - with_presets, spec_test, - single_phase, with_custom_state, - scaled_churn_balances_exceed_activation_exit_churn_limit, - default_activation_threshold, - always_bls, + with_electra_and_later, + with_presets, ) +from eth2spec.test.helpers.constants import MINIMAL from eth2spec.test.helpers.deposits import prepare_pending_deposit +from eth2spec.test.helpers.epoch_processing import run_epoch_processing_with from eth2spec.test.helpers.state import ( - next_epoch_with_full_participation, advance_finality_to, + next_epoch_with_full_participation, set_full_participation, ) -from eth2spec.test.helpers.constants import MINIMAL def run_process_pending_deposits(spec, state): diff --git a/tests/core/pyspec/eth2spec/test/electra/epoch_processing/test_process_pending_consolidations.py b/tests/core/pyspec/eth2spec/test/electra/epoch_processing/test_process_pending_consolidations.py index 5cb716db8a..4ab49662bf 100644 --- a/tests/core/pyspec/eth2spec/test/electra/epoch_processing/test_process_pending_consolidations.py +++ b/tests/core/pyspec/eth2spec/test/electra/epoch_processing/test_process_pending_consolidations.py @@ -1,17 +1,17 @@ -from eth2spec.test.helpers.epoch_processing import ( - run_epoch_processing_with, - run_epoch_processing_to, -) from eth2spec.test.context import ( spec_state_test, with_electra_and_later, ) +from eth2spec.test.helpers.epoch_processing import ( + run_epoch_processing_to, + run_epoch_processing_with, +) from eth2spec.test.helpers.state import ( next_epoch_with_full_participation, ) from eth2spec.test.helpers.withdrawals import ( - set_eth1_withdrawal_credential_with_balance, set_compounding_withdrawal_credential_with_balance, + set_eth1_withdrawal_credential_with_balance, ) # *********************** diff --git a/tests/core/pyspec/eth2spec/test/electra/epoch_processing/test_process_registry_updates.py b/tests/core/pyspec/eth2spec/test/electra/epoch_processing/test_process_registry_updates.py index 847f7ec5a6..72c82c5a26 100644 --- a/tests/core/pyspec/eth2spec/test/electra/epoch_processing/test_process_registry_updates.py +++ b/tests/core/pyspec/eth2spec/test/electra/epoch_processing/test_process_registry_updates.py @@ -1,10 +1,10 @@ -from eth2spec.test.helpers.deposits import mock_deposit -from eth2spec.test.helpers.state import next_epoch from eth2spec.test.context import spec_state_test, with_electra_and_later +from eth2spec.test.helpers.deposits import mock_deposit from eth2spec.test.helpers.epoch_processing import run_epoch_processing_with +from eth2spec.test.helpers.state import next_epoch from eth2spec.test.helpers.withdrawals import ( - set_eth1_withdrawal_credential_with_balance, set_compounding_withdrawal_credential_with_balance, + set_eth1_withdrawal_credential_with_balance, ) diff --git a/tests/core/pyspec/eth2spec/test/electra/fork/test_electra_fork_basic.py b/tests/core/pyspec/eth2spec/test/electra/fork/test_electra_fork_basic.py index b439c4ee99..10448ec2e8 100644 --- a/tests/core/pyspec/eth2spec/test/electra/fork/test_electra_fork_basic.py +++ b/tests/core/pyspec/eth2spec/test/electra/fork/test_electra_fork_basic.py @@ -1,27 +1,27 @@ from eth2spec.test.context import ( - with_phases, + large_validator_set, + low_balances, + misc_balances, + spec_test, with_custom_state, + with_phases, with_presets, - spec_test, with_state, - low_balances, - misc_balances, - large_validator_set, ) -from eth2spec.test.utils import with_meta_tags from eth2spec.test.helpers.constants import ( DENEB, ELECTRA, MINIMAL, ) -from eth2spec.test.helpers.state import ( - next_epoch, - next_epoch_via_block, -) from eth2spec.test.helpers.electra.fork import ( ELECTRA_FORK_TEST_META_TAGS, run_fork_test, ) +from eth2spec.test.helpers.state import ( + next_epoch, + next_epoch_via_block, +) +from eth2spec.test.utils import with_meta_tags @with_phases(phases=[DENEB], other_phases=[ELECTRA]) diff --git a/tests/core/pyspec/eth2spec/test/electra/fork/test_electra_fork_random.py b/tests/core/pyspec/eth2spec/test/electra/fork/test_electra_fork_random.py index 90c42bf95f..b165c783a7 100644 --- a/tests/core/pyspec/eth2spec/test/electra/fork/test_electra_fork_random.py +++ b/tests/core/pyspec/eth2spec/test/electra/fork/test_electra_fork_random.py @@ -1,16 +1,15 @@ from random import Random from eth2spec.test.context import ( - with_phases, + large_validator_set, + low_balances, + misc_balances, + spec_test, with_custom_state, + with_phases, with_presets, - spec_test, with_state, - low_balances, - misc_balances, - large_validator_set, ) -from eth2spec.test.utils import with_meta_tags from eth2spec.test.helpers.constants import ( DENEB, ELECTRA, @@ -21,6 +20,7 @@ run_fork_test, ) from eth2spec.test.helpers.random import randomize_state +from eth2spec.test.utils import with_meta_tags @with_phases(phases=[DENEB], other_phases=[ELECTRA]) diff --git a/tests/core/pyspec/eth2spec/test/electra/fork_choice/test_deposit_with_reorg.py b/tests/core/pyspec/eth2spec/test/electra/fork_choice/test_deposit_with_reorg.py index 5f5009bd28..37b1c977aa 100644 --- a/tests/core/pyspec/eth2spec/test/electra/fork_choice/test_deposit_with_reorg.py +++ b/tests/core/pyspec/eth2spec/test/electra/fork_choice/test_deposit_with_reorg.py @@ -1,33 +1,35 @@ -from eth2spec.test.helpers.block import ( - build_empty_block_for_next_slot, -) from eth2spec.test.context import ( - with_presets, spec_state_test, - with_electra_until_eip7732, + with_all_phases_from_except, + with_presets, ) -from eth2spec.test.helpers.execution_payload import ( - compute_el_block_hash_for_block, +from eth2spec.test.helpers.block import ( + build_empty_block_for_next_slot, ) -from eth2spec.test.helpers.state import ( - state_transition_and_sign_block, - next_slot, +from eth2spec.test.helpers.constants import ( + EIP7732, + ELECTRA, + MINIMAL, ) from eth2spec.test.helpers.deposits import ( prepare_deposit_request, ) +from eth2spec.test.helpers.execution_payload import ( + compute_el_block_hash_for_block, +) from eth2spec.test.helpers.fork_choice import ( + apply_next_slots_with_attestations, get_genesis_forkchoice_store_and_block, tick_and_add_block, - apply_next_slots_with_attestations, ) -from eth2spec.test.helpers.constants import ( - MINIMAL, +from eth2spec.test.helpers.state import ( + next_slot, + state_transition_and_sign_block, ) # TODO(jtraglia): In eip7732, how do we set execution requests in the payload envelope? -@with_electra_until_eip7732 +@with_all_phases_from_except(ELECTRA, [EIP7732]) @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_new_validator_deposit_with_multiple_epoch_transitions(spec, state): diff --git a/tests/core/pyspec/eth2spec/test/electra/random/test_random.py b/tests/core/pyspec/eth2spec/test/electra/random/test_random.py index 95bc7af74b..22a63b41f4 100644 --- a/tests/core/pyspec/eth2spec/test/electra/random/test_random.py +++ b/tests/core/pyspec/eth2spec/test/electra/random/test_random.py @@ -4,19 +4,17 @@ See the README for that generator for more information. """ -from eth2spec.test.helpers.constants import ELECTRA from eth2spec.test.context import ( + always_bls, misc_balances_in_default_range_with_many_validators, - with_phases, - zero_activation_threshold, only_generator, -) -from eth2spec.test.context import ( - always_bls, + single_phase, spec_test, with_custom_state, - single_phase, + with_phases, + zero_activation_threshold, ) +from eth2spec.test.helpers.constants import ELECTRA from eth2spec.test.utils.randomized_block_tests import ( run_generated_randomized_test, ) diff --git a/tests/core/pyspec/eth2spec/test/electra/sanity/blocks/test_blocks.py b/tests/core/pyspec/eth2spec/test/electra/sanity/blocks/test_blocks.py index af88357ce2..c5c8cf3f05 100644 --- a/tests/core/pyspec/eth2spec/test/electra/sanity/blocks/test_blocks.py +++ b/tests/core/pyspec/eth2spec/test/electra/sanity/blocks/test_blocks.py @@ -1,41 +1,45 @@ -from eth2spec.test.helpers.constants import MINIMAL -from eth2spec.test.helpers.block import ( - build_empty_block_for_next_slot, - transition_unsigned_block, -) from eth2spec.test.context import ( - spec_test, - spec_state_test, - with_electra_until_eip7732, + default_activation_threshold, + scaled_churn_balances_exceed_activation_exit_churn_limit, single_phase, - with_presets, + spec_state_test, + spec_test, + with_all_phases_from_except, with_custom_state, - scaled_churn_balances_exceed_activation_exit_churn_limit, - default_activation_threshold, + with_presets, +) +from eth2spec.test.helpers.block import ( + build_empty_block_for_next_slot, + transition_unsigned_block, ) from eth2spec.test.helpers.bls_to_execution_changes import ( get_signed_address_change, ) +from eth2spec.test.helpers.constants import ( + EIP7732, + ELECTRA, + MINIMAL, +) +from eth2spec.test.helpers.deposits import ( + prepare_deposit_request, +) from eth2spec.test.helpers.execution_payload import ( compute_el_block_hash_for_block, ) -from eth2spec.test.helpers.voluntary_exits import ( - prepare_signed_exits, -) from eth2spec.test.helpers.state import ( state_transition_and_sign_block, transition_to, ) +from eth2spec.test.helpers.voluntary_exits import ( + prepare_signed_exits, +) from eth2spec.test.helpers.withdrawals import ( - set_eth1_withdrawal_credential_with_balance, set_compounding_withdrawal_credential_with_balance, -) -from eth2spec.test.helpers.deposits import ( - prepare_deposit_request, + set_eth1_withdrawal_credential_with_balance, ) -@with_electra_until_eip7732 +@with_all_phases_from_except(ELECTRA, [EIP7732]) @spec_state_test def test_basic_el_withdrawal_request(spec, state): # move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit @@ -64,7 +68,7 @@ def test_basic_el_withdrawal_request(spec, state): assert state.validators[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH -@with_electra_until_eip7732 +@with_all_phases_from_except(ELECTRA, [EIP7732]) @spec_state_test def test_basic_btec_and_el_withdrawal_request_in_same_block(spec, state): # move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit @@ -109,7 +113,7 @@ def test_basic_btec_and_el_withdrawal_request_in_same_block(spec, state): assert is_execution_address and is_correct_source_address -@with_electra_until_eip7732 +@with_all_phases_from_except(ELECTRA, [EIP7732]) @spec_state_test def test_basic_btec_before_el_withdrawal_request(spec, state): # move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit @@ -158,7 +162,7 @@ def test_basic_btec_before_el_withdrawal_request(spec, state): assert state.validators[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH -@with_electra_until_eip7732 +@with_all_phases_from_except(ELECTRA, [EIP7732]) @spec_state_test def test_cl_exit_and_el_withdrawal_request_in_same_block(spec, state): # move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit @@ -191,7 +195,7 @@ def test_cl_exit_and_el_withdrawal_request_in_same_block(spec, state): assert state.validators[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH -@with_electra_until_eip7732 +@with_all_phases_from_except(ELECTRA, [EIP7732]) @spec_state_test def test_multiple_el_partial_withdrawal_requests_same_validator(spec, state): # move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit @@ -231,7 +235,7 @@ def test_multiple_el_partial_withdrawal_requests_same_validator(spec, state): assert state.validators[validator_index].exit_epoch == spec.FAR_FUTURE_EPOCH -@with_electra_until_eip7732 +@with_all_phases_from_except(ELECTRA, [EIP7732]) @spec_state_test def test_multiple_el_partial_withdrawal_requests_different_validator(spec, state): # move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit @@ -273,7 +277,7 @@ def test_multiple_el_partial_withdrawal_requests_different_validator(spec, state assert state.validators[validator_index].exit_epoch == spec.FAR_FUTURE_EPOCH -@with_electra_until_eip7732 +@with_all_phases_from_except(ELECTRA, [EIP7732]) @spec_state_test def test_withdrawal_and_withdrawal_request_same_validator(spec, state): # Give a validator an excess balance @@ -317,7 +321,7 @@ def test_withdrawal_and_withdrawal_request_same_validator(spec, state): assert len(state.pending_partial_withdrawals) == 0 -@with_electra_until_eip7732 +@with_all_phases_from_except(ELECTRA, [EIP7732]) @spec_state_test def test_withdrawal_and_switch_to_compounding_request_same_validator(spec, state): # Give a validator an excess balance @@ -365,7 +369,7 @@ def test_withdrawal_and_switch_to_compounding_request_same_validator(spec, state assert len(state.pending_deposits) == 0 -@with_electra_until_eip7732 +@with_all_phases_from_except(ELECTRA, [EIP7732]) @spec_state_test def test_deposit_request_with_same_pubkey_different_withdrawal_credentials(spec, state): # signify the eth1 bridge deprecation @@ -423,7 +427,7 @@ def test_deposit_request_with_same_pubkey_different_withdrawal_credentials(spec, ) -@with_electra_until_eip7732 +@with_all_phases_from_except(ELECTRA, [EIP7732]) @spec_state_test def test_deposit_request_max_per_payload(spec, state): # signify the eth1 bridge deprecation @@ -466,7 +470,7 @@ def test_deposit_request_max_per_payload(spec, state): ) -@with_electra_until_eip7732 +@with_all_phases_from_except(ELECTRA, [EIP7732]) @with_presets([MINIMAL], "need sufficient consolidation churn limit") @with_custom_state( balances_fn=scaled_churn_balances_exceed_activation_exit_churn_limit, @@ -567,7 +571,7 @@ def test_withdrawal_and_consolidation_effective_balance_updates(spec, state): assert state.balances[b_index] < state.validators[b_index].effective_balance -@with_electra_until_eip7732 +@with_all_phases_from_except(ELECTRA, [EIP7732]) @with_presets([MINIMAL], "need sufficient consolidation churn limit") @with_custom_state( balances_fn=scaled_churn_balances_exceed_activation_exit_churn_limit, @@ -628,7 +632,7 @@ def test_consolidation_requests_when_pending_consolidation_queue_is_full(spec, s assert len(state.pending_consolidations) == spec.PENDING_CONSOLIDATIONS_LIMIT -@with_electra_until_eip7732 +@with_all_phases_from_except(ELECTRA, [EIP7732]) @with_presets([MINIMAL], "need sufficient consolidation churn limit") @with_custom_state( balances_fn=scaled_churn_balances_exceed_activation_exit_churn_limit, @@ -708,7 +712,7 @@ def test_switch_to_compounding_requests_when_pending_consolidation_queue_is_full assert spec.has_compounding_withdrawal_credential(state.validators[source_index]) -@with_electra_until_eip7732 +@with_all_phases_from_except(ELECTRA, [EIP7732]) @spec_state_test def test_switch_to_compounding_requests_when_too_little_consolidation_churn_limit(spec, state): # Move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit @@ -773,7 +777,7 @@ def test_switch_to_compounding_requests_when_too_little_consolidation_churn_limi assert spec.has_compounding_withdrawal_credential(state.validators[source_index]) -@with_electra_until_eip7732 +@with_all_phases_from_except(ELECTRA, [EIP7732]) @with_presets([MINIMAL], "Keep the size of the test reasonable") @spec_state_test def test_withdrawal_requests_when_pending_withdrawal_queue_is_full(spec, state): @@ -832,7 +836,7 @@ def test_withdrawal_requests_when_pending_withdrawal_queue_is_full(spec, state): assert withdrawal_request_1.amount != withdrawal_request_2.amount -@with_electra_until_eip7732 +@with_all_phases_from_except(ELECTRA, [EIP7732]) @with_presets([MINIMAL], "need sufficient consolidation churn limit") @with_custom_state( balances_fn=scaled_churn_balances_exceed_activation_exit_churn_limit, diff --git a/tests/core/pyspec/eth2spec/test/electra/sanity/blocks/test_deposit_transition.py b/tests/core/pyspec/eth2spec/test/electra/sanity/blocks/test_deposit_transition.py index 8a76550aad..4311b0fdb4 100644 --- a/tests/core/pyspec/eth2spec/test/electra/sanity/blocks/test_deposit_transition.py +++ b/tests/core/pyspec/eth2spec/test/electra/sanity/blocks/test_deposit_transition.py @@ -1,10 +1,10 @@ -from eth2spec.test.helpers.block import ( - build_empty_block_for_next_slot, -) from eth2spec.test.context import ( + ELECTRA, spec_state_test, with_phases, - ELECTRA, +) +from eth2spec.test.helpers.block import ( + build_empty_block_for_next_slot, ) from eth2spec.test.helpers.deposits import ( build_deposit_data, diff --git a/tests/core/pyspec/eth2spec/test/electra/transition/test_operations.py b/tests/core/pyspec/eth2spec/test/electra/transition/test_operations.py index f096fc6335..3bb31c2039 100644 --- a/tests/core/pyspec/eth2spec/test/electra/transition/test_operations.py +++ b/tests/core/pyspec/eth2spec/test/electra/transition/test_operations.py @@ -1,6 +1,6 @@ from eth2spec.test.context import ( - ForkMeta, always_bls, + ForkMeta, with_fork_metas, with_presets, ) @@ -13,7 +13,6 @@ run_transition_with_operation, ) - # # DepositRequest # diff --git a/tests/core/pyspec/eth2spec/test/fulu/fork/test_fulu_fork_basic.py b/tests/core/pyspec/eth2spec/test/fulu/fork/test_fulu_fork_basic.py index 040b301ff6..7c4cbab771 100644 --- a/tests/core/pyspec/eth2spec/test/fulu/fork/test_fulu_fork_basic.py +++ b/tests/core/pyspec/eth2spec/test/fulu/fork/test_fulu_fork_basic.py @@ -1,27 +1,27 @@ from eth2spec.test.context import ( - with_phases, + large_validator_set, + low_balances, + misc_balances, + spec_test, with_custom_state, + with_phases, with_presets, - spec_test, with_state, - low_balances, - misc_balances, - large_validator_set, ) -from eth2spec.test.utils import with_meta_tags from eth2spec.test.helpers.constants import ( ELECTRA, FULU, MINIMAL, ) -from eth2spec.test.helpers.state import ( - next_epoch, - next_epoch_via_block, -) from eth2spec.test.helpers.fulu.fork import ( FULU_FORK_TEST_META_TAGS, run_fork_test, ) +from eth2spec.test.helpers.state import ( + next_epoch, + next_epoch_via_block, +) +from eth2spec.test.utils import with_meta_tags @with_phases(phases=[ELECTRA], other_phases=[FULU]) diff --git a/tests/core/pyspec/eth2spec/test/fulu/fork/test_fulu_fork_random.py b/tests/core/pyspec/eth2spec/test/fulu/fork/test_fulu_fork_random.py index 73ca3f0a4c..c1d3060f1a 100644 --- a/tests/core/pyspec/eth2spec/test/fulu/fork/test_fulu_fork_random.py +++ b/tests/core/pyspec/eth2spec/test/fulu/fork/test_fulu_fork_random.py @@ -1,16 +1,15 @@ from random import Random from eth2spec.test.context import ( - with_phases, + large_validator_set, + low_balances, + misc_balances, + spec_test, with_custom_state, + with_phases, with_presets, - spec_test, with_state, - low_balances, - misc_balances, - large_validator_set, ) -from eth2spec.test.utils import with_meta_tags from eth2spec.test.helpers.constants import ( ELECTRA, FULU, @@ -21,6 +20,7 @@ run_fork_test, ) from eth2spec.test.helpers.random import randomize_state +from eth2spec.test.utils import with_meta_tags @with_phases(phases=[ELECTRA], other_phases=[FULU]) diff --git a/tests/core/pyspec/eth2spec/test/fulu/merkle_proof/test_single_merkle_proof.py b/tests/core/pyspec/eth2spec/test/fulu/merkle_proof/test_single_merkle_proof.py index 3ace0576f0..3b4930cf06 100644 --- a/tests/core/pyspec/eth2spec/test/fulu/merkle_proof/test_single_merkle_proof.py +++ b/tests/core/pyspec/eth2spec/test/fulu/merkle_proof/test_single_merkle_proof.py @@ -1,10 +1,17 @@ import random +from eth2spec.debug.random_value import ( + get_random_ssz_object, + RandomizationMode, +) from eth2spec.test.context import ( spec_state_test, with_fulu_and_later, with_test_suite_name, ) +from eth2spec.test.helpers.blob import ( + get_sample_blob_tx, +) from eth2spec.test.helpers.block import ( build_empty_block_for_next_slot, sign_block, @@ -12,13 +19,6 @@ from eth2spec.test.helpers.execution_payload import ( compute_el_block_hash, ) -from eth2spec.test.helpers.blob import ( - get_sample_blob_tx, -) -from eth2spec.debug.random_value import ( - RandomizationMode, - get_random_ssz_object, -) def _run_blob_kzg_commitments_merkle_proof_test(spec, state, rng=None, blob_count=1): @@ -82,7 +82,7 @@ def test_blob_kzg_commitments_merkle_proof__random_block_1(spec, state): @with_fulu_and_later @spec_state_test def test_blob_kzg_commitments_merkle_proof__multiple_blobs(spec, state): - blob_count = spec.config.MAX_BLOBS_PER_BLOCK_FULU // 2 + blob_count = spec.get_max_blobs_per_block(spec.get_current_epoch(state)) // 2 rng = random.Random(2222) yield from _run_blob_kzg_commitments_merkle_proof_test( spec, state, rng=rng, blob_count=blob_count @@ -93,7 +93,7 @@ def test_blob_kzg_commitments_merkle_proof__multiple_blobs(spec, state): @with_fulu_and_later @spec_state_test def test_blob_kzg_commitments_merkle_proof__max_blobs(spec, state): - max_blobs = spec.config.MAX_BLOBS_PER_BLOCK_FULU + max_blobs = spec.get_max_blobs_per_block(spec.get_current_epoch(state)) rng = random.Random(3333) yield from _run_blob_kzg_commitments_merkle_proof_test( spec, state, rng=rng, blob_count=max_blobs diff --git a/tests/core/pyspec/eth2spec/test/fulu/random/test_random.py b/tests/core/pyspec/eth2spec/test/fulu/random/test_random.py index 66ecbc688c..f7c9ce51e5 100644 --- a/tests/core/pyspec/eth2spec/test/fulu/random/test_random.py +++ b/tests/core/pyspec/eth2spec/test/fulu/random/test_random.py @@ -4,19 +4,17 @@ See the README for that generator for more information. """ -from eth2spec.test.helpers.constants import FULU from eth2spec.test.context import ( + always_bls, misc_balances_in_default_range_with_many_validators, - with_phases, - zero_activation_threshold, only_generator, -) -from eth2spec.test.context import ( - always_bls, + single_phase, spec_test, with_custom_state, - single_phase, + with_phases, + zero_activation_threshold, ) +from eth2spec.test.helpers.constants import FULU from eth2spec.test.utils.randomized_block_tests import ( run_generated_randomized_test, ) diff --git a/tests/core/pyspec/eth2spec/test/fulu/unittests/das/test_das.py b/tests/core/pyspec/eth2spec/test/fulu/unittests/das/test_das.py index b056c86c6d..0f19548ab5 100644 --- a/tests/core/pyspec/eth2spec/test/fulu/unittests/das/test_das.py +++ b/tests/core/pyspec/eth2spec/test/fulu/unittests/das/test_das.py @@ -1,8 +1,9 @@ import random + from eth2spec.test.context import ( expect_assertion_error, - spec_test, single_phase, + spec_test, with_config_overrides, with_fulu_and_later, ) diff --git a/tests/core/pyspec/eth2spec/test/fulu/unittests/polynomial_commitments/test_polynomial_commitments.py b/tests/core/pyspec/eth2spec/test/fulu/unittests/polynomial_commitments/test_polynomial_commitments.py index de21134ecd..d0928df0b3 100644 --- a/tests/core/pyspec/eth2spec/test/fulu/unittests/polynomial_commitments/test_polynomial_commitments.py +++ b/tests/core/pyspec/eth2spec/test/fulu/unittests/polynomial_commitments/test_polynomial_commitments.py @@ -1,8 +1,9 @@ import random + from eth2spec.test.context import ( - spec_test, - single_phase, expect_assertion_error, + single_phase, + spec_test, with_fulu_and_later, ) from eth2spec.test.helpers.blob import ( diff --git a/tests/core/pyspec/eth2spec/test/fulu/unittests/test_config_invariants.py b/tests/core/pyspec/eth2spec/test/fulu/unittests/test_config_invariants.py index c765f4ff25..c96a202969 100644 --- a/tests/core/pyspec/eth2spec/test/fulu/unittests/test_config_invariants.py +++ b/tests/core/pyspec/eth2spec/test/fulu/unittests/test_config_invariants.py @@ -1,7 +1,13 @@ from eth2spec.test.context import ( + expect_assertion_error, single_phase, + spec_state_test, spec_test, with_fulu_and_later, + with_presets, +) +from eth2spec.test.helpers.constants import ( + MAINNET, ) @@ -30,5 +36,10 @@ def test_polynomial_commitments_sampling(spec): @with_fulu_and_later @spec_test @single_phase -def test_networking(spec): - assert spec.config.MAX_BLOBS_PER_BLOCK_FULU <= spec.MAX_BLOB_COMMITMENTS_PER_BLOCK +@with_presets([MAINNET], reason="to have fork epoch number") +def test_blob_schedule(spec): + for entry in spec.config.BLOB_SCHEDULE: + # Check that all epochs are post-Deneb + assert entry["EPOCH"] >= spec.config.DENEB_FORK_EPOCH + # Check that all blob counts are less than the limit + assert entry["MAX_BLOBS_PER_BLOCK"] <= spec.MAX_BLOB_COMMITMENTS_PER_BLOCK diff --git a/tests/core/pyspec/eth2spec/test/fulu/unittests/test_custody.py b/tests/core/pyspec/eth2spec/test/fulu/unittests/test_custody.py index 38b49513b0..1588451c6d 100644 --- a/tests/core/pyspec/eth2spec/test/fulu/unittests/test_custody.py +++ b/tests/core/pyspec/eth2spec/test/fulu/unittests/test_custody.py @@ -1,7 +1,7 @@ from eth2spec.test.context import ( expect_assertion_error, - spec_test, single_phase, + spec_test, with_fulu_and_later, ) diff --git a/tests/core/pyspec/eth2spec/test/fulu/unittests/test_networking.py b/tests/core/pyspec/eth2spec/test/fulu/unittests/test_networking.py index 6a0a182db1..569dd7e14d 100644 --- a/tests/core/pyspec/eth2spec/test/fulu/unittests/test_networking.py +++ b/tests/core/pyspec/eth2spec/test/fulu/unittests/test_networking.py @@ -1,13 +1,17 @@ import random + +from eth2spec.debug.random_value import ( + get_random_ssz_object, + RandomizationMode, +) from eth2spec.test.context import ( single_phase, spec_state_test, spec_test, with_fulu_and_later, ) -from eth2spec.debug.random_value import ( - RandomizationMode, - get_random_ssz_object, +from eth2spec.test.helpers.blob import ( + get_sample_blob_tx, ) from eth2spec.test.helpers.block import ( sign_block, @@ -15,10 +19,6 @@ from eth2spec.test.helpers.execution_payload import ( compute_el_block_hash, ) -from eth2spec.test.helpers.blob import ( - get_sample_blob_tx, -) - # Helper functions diff --git a/tests/core/pyspec/eth2spec/test/fulu/unittests/test_security.py b/tests/core/pyspec/eth2spec/test/fulu/unittests/test_security.py index 1c16c25a24..69c2ce35a8 100644 --- a/tests/core/pyspec/eth2spec/test/fulu/unittests/test_security.py +++ b/tests/core/pyspec/eth2spec/test/fulu/unittests/test_security.py @@ -1,6 +1,6 @@ from eth2spec.test.context import ( - spec_test, single_phase, + spec_test, with_fulu_and_later, with_phases, ) diff --git a/tests/core/pyspec/eth2spec/test/helpers/attestations.py b/tests/core/pyspec/eth2spec/test/helpers/attestations.py index efc23e567e..3ac6071a98 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/attestations.py +++ b/tests/core/pyspec/eth2spec/test/helpers/attestations.py @@ -1,17 +1,17 @@ -from lru import LRU - from typing import List +from lru import LRU + from eth2spec.test.context import expect_assertion_error +from eth2spec.test.helpers.block import build_empty_block_for_next_slot +from eth2spec.test.helpers.forks import is_post_altair, is_post_deneb, is_post_electra +from eth2spec.test.helpers.keys import privkeys from eth2spec.test.helpers.state import ( - payload_state_transition_no_store, - state_transition_and_sign_block, next_epoch, next_slot, + payload_state_transition_no_store, + state_transition_and_sign_block, ) -from eth2spec.test.helpers.block import build_empty_block_for_next_slot -from eth2spec.test.helpers.forks import is_post_altair, is_post_deneb, is_post_electra -from eth2spec.test.helpers.keys import privkeys from eth2spec.utils import bls from eth2spec.utils.ssz.ssz_typing import Bitlist diff --git a/tests/core/pyspec/eth2spec/test/helpers/blob.py b/tests/core/pyspec/eth2spec/test/helpers/blob.py index 1b1272ddc5..f4428685d2 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/blob.py +++ b/tests/core/pyspec/eth2spec/test/helpers/blob.py @@ -1,6 +1,7 @@ import random + from rlp import encode, Serializable -from rlp.sedes import Binary, CountableList, List as RLPList, big_endian_int, binary +from rlp.sedes import big_endian_int, Binary, binary, CountableList, List as RLPList from eth2spec.test.helpers.forks import ( is_post_electra, @@ -118,9 +119,9 @@ def get_sample_blob_tx(spec, blob_count=1, rng=random.Random(5566), is_valid_blo return opaque_tx, blobs, blob_kzg_commitments, blob_kzg_proofs -def get_max_blob_count(spec): +def get_max_blob_count(spec, state): if is_post_fulu(spec): - return spec.config.MAX_BLOBS_PER_BLOCK_FULU + return spec.get_max_blobs_per_block(spec.get_current_epoch(state)) elif is_post_electra(spec): return spec.config.MAX_BLOBS_PER_BLOCK_ELECTRA else: diff --git a/tests/core/pyspec/eth2spec/test/helpers/block.py b/tests/core/pyspec/eth2spec/test/helpers/block.py index d9787aff73..b7e355c1aa 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/block.py +++ b/tests/core/pyspec/eth2spec/test/helpers/block.py @@ -1,33 +1,36 @@ -from eth2spec.test.helpers.execution_payload import build_empty_execution_payload -from eth2spec.test.helpers.execution_payload import build_empty_signed_execution_payload_header -from eth2spec.test.helpers.forks import ( - is_post_eip7441, - is_post_altair, - is_post_bellatrix, - is_post_eip7732, - is_post_electra, -) -from eth2spec.test.helpers.keys import privkeys, whisk_ks_initial, whisk_ks_final -from eth2spec.utils import bls -from eth2spec.utils.bls import only_with_bls -from eth2spec.utils.ssz.ssz_impl import hash_tree_root from curdleproofs import ( + GenerateWhiskShuffleProof, GenerateWhiskTrackerProof, WhiskTracker, - GenerateWhiskShuffleProof, ) -from py_ecc.optimized_bls12_381.optimized_curve import G1, multiply -from py_ecc.typing import Optimized_Field, Optimized_Point3D +from py_arkworks_bls12381 import Scalar from py_ecc.bls.g2_primitives import ( G1_to_pubkey as py_ecc_G1_to_bytes48, pubkey_to_G1 as py_ecc_bytes48_to_G1, ) +from py_ecc.optimized_bls12_381.optimized_curve import G1, multiply +from py_ecc.typing import Optimized_Field, Optimized_Point3D + from eth2spec.test.helpers.eip7441 import ( compute_whisk_tracker_and_commitment, is_first_proposal, resolve_known_tracker, ) -from py_arkworks_bls12381 import Scalar +from eth2spec.test.helpers.execution_payload import ( + build_empty_execution_payload, + build_empty_signed_execution_payload_header, +) +from eth2spec.test.helpers.forks import ( + is_post_altair, + is_post_bellatrix, + is_post_eip7441, + is_post_eip7732, + is_post_electra, +) +from eth2spec.test.helpers.keys import privkeys, whisk_ks_final, whisk_ks_initial +from eth2spec.utils import bls +from eth2spec.utils.bls import only_with_bls +from eth2spec.utils.ssz.ssz_impl import hash_tree_root PointProjective = Optimized_Point3D[Optimized_Field] @@ -38,11 +41,6 @@ def get_proposer_index_maybe(spec, state, slot, proposer_index=None): if slot == state.slot: proposer_index = spec.get_beacon_proposer_index(state) else: - if spec.compute_epoch_at_slot(state.slot) + 1 > spec.compute_epoch_at_slot(slot): - print( - "warning: block slot far away, and no proposer index manually given." - " Signing block is slow due to transition for proposer index calculation." - ) # use stub state to get proposer index of future slot stub_state = state.copy() if stub_state.slot < slot: diff --git a/tests/core/pyspec/eth2spec/test/helpers/bls_to_execution_changes.py b/tests/core/pyspec/eth2spec/test/helpers/bls_to_execution_changes.py index fe70e4b146..c2ad9f75b1 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/bls_to_execution_changes.py +++ b/tests/core/pyspec/eth2spec/test/helpers/bls_to_execution_changes.py @@ -1,5 +1,5 @@ +from eth2spec.test.helpers.keys import privkeys, pubkey_to_privkey, pubkeys from eth2spec.utils import bls -from eth2spec.test.helpers.keys import pubkeys, privkeys, pubkey_to_privkey def get_signed_address_change( diff --git a/tests/core/pyspec/eth2spec/test/helpers/constants.py b/tests/core/pyspec/eth2spec/test/helpers/constants.py index 0ba083f5f1..aa937980ee 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/constants.py +++ b/tests/core/pyspec/eth2spec/test/helpers/constants.py @@ -1,5 +1,4 @@ -from .typing import SpecForkName, PresetBaseName - +from .typing import PresetBaseName, SpecForkName # # SpecForkName @@ -24,7 +23,7 @@ # # The forks that are deployed on Mainnet -MAINNET_FORKS = (PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB) +MAINNET_FORKS = (PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB, ELECTRA) LATEST_FORK = MAINNET_FORKS[-1] # The forks that pytest can run with. # Note: when adding a new fork here, all tests from previous forks with decorator `with_X_and_later` @@ -32,15 +31,15 @@ ALL_PHASES = ( # Formal forks *MAINNET_FORKS, - ELECTRA, FULU, # Experimental patches EIP7732, + EIP7805, ) # The forks that have light client specs -LIGHT_CLIENT_TESTING_FORKS = (*[item for item in MAINNET_FORKS if item != PHASE0], ELECTRA) +LIGHT_CLIENT_TESTING_FORKS = [item for item in MAINNET_FORKS if item != PHASE0] # The forks that output to the test vectors. -TESTGEN_FORKS = (*MAINNET_FORKS, ELECTRA, FULU, EIP7441, EIP7732) +TESTGEN_FORKS = (*MAINNET_FORKS, FULU, EIP7732, EIP7805) # Forks allowed in the test runner `--fork` flag, to fail fast in case of typos ALLOWED_TEST_RUNNER_FORKS = (*ALL_PHASES, EIP7441) @@ -68,6 +67,7 @@ BELLATRIX: CAPELLA, CAPELLA: DENEB, DENEB: ELECTRA, + ELECTRA: FULU, } ALL_PRE_POST_FORKS = POST_FORK_OF.items() diff --git a/tests/core/pyspec/eth2spec/test/helpers/deneb/fork.py b/tests/core/pyspec/eth2spec/test/helpers/deneb/fork.py index 123b16f707..43bed9ec10 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/deneb/fork.py +++ b/tests/core/pyspec/eth2spec/test/helpers/deneb/fork.py @@ -2,7 +2,6 @@ DENEB, ) - DENEB_FORK_TEST_META_TAGS = { "fork": DENEB, } diff --git a/tests/core/pyspec/eth2spec/test/helpers/deposits.py b/tests/core/pyspec/eth2spec/test/helpers/deposits.py index 9a168fe3ea..750724fc67 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/deposits.py +++ b/tests/core/pyspec/eth2spec/test/helpers/deposits.py @@ -1,10 +1,14 @@ from random import Random from eth2spec.test.context import expect_assertion_error +from eth2spec.test.helpers.epoch_processing import ( + run_epoch_processing_from, + run_epoch_processing_to, + run_process_slots_up_to_epoch_boundary, +) from eth2spec.test.helpers.forks import is_post_altair, is_post_electra -from eth2spec.test.helpers.keys import pubkeys, privkeys +from eth2spec.test.helpers.keys import privkeys, pubkeys from eth2spec.test.helpers.state import get_balance -from eth2spec.test.helpers.epoch_processing import run_epoch_processing_to from eth2spec.utils import bls from eth2spec.utils.merkle_minimal import calc_merkle_tree_from_leaves, get_merkle_proof from eth2spec.utils.ssz.ssz_impl import hash_tree_root @@ -424,6 +428,8 @@ def run_pending_deposit_applying(spec, state, pending_deposit, validator_index, Enqueue ``pending_deposit`` and run epoch processing with ``process_pending_deposits``, yielding: - pre-state ('pre') - post-state ('post'). + - pre-epoch-state ('pre_epoch'), state before epoch transition + - post-epoch-state ('post_epoch'), state after epoch transition """ assert is_post_electra(spec) @@ -439,10 +445,6 @@ def run_pending_deposit_applying(spec, state, pending_deposit, validator_index, # append pending deposit state.pending_deposits.append(pending_deposit) - # run to the very beginning of the epoch processing to avoid - # any updates to the validator registry (e.g. ejections) - run_epoch_processing_to(spec, state, "process_justification_and_finalization") - pre_validator_count = len(state.validators) pre_balance = 0 pre_effective_balance = 0 @@ -453,12 +455,18 @@ def run_pending_deposit_applying(spec, state, pending_deposit, validator_index, pre_balance = get_balance(state, validator_index) pre_effective_balance = state.validators[validator_index].effective_balance - yield "pre", state + run_process_slots_up_to_epoch_boundary(spec, state) + yield "pre_epoch", state + run_epoch_processing_to(spec, state, "process_pending_deposits", enable_slots_processing=False) + yield "pre", state spec.process_pending_deposits(state) - yield "post", state + continue_state = state.copy() + run_epoch_processing_from(spec, continue_state, "process_pending_deposits") + yield "post_epoch", continue_state + if effective: if is_top_up: # Top-ups don't add validators diff --git a/tests/core/pyspec/eth2spec/test/helpers/eip7441.py b/tests/core/pyspec/eth2spec/test/helpers/eip7441.py index 9a0ea6a78c..885c983f73 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/eip7441.py +++ b/tests/core/pyspec/eth2spec/test/helpers/eip7441.py @@ -1,9 +1,10 @@ -from typing import Tuple, Optional -from eth_typing import BLSPubkey +from typing import Optional, Tuple + from curdleproofs import GenerateWhiskTrackerProof, WhiskTracker -from eth2spec.test.helpers.keys import whisk_ks_initial +from eth_typing import BLSPubkey from py_arkworks_bls12381 import G1Point, Scalar +from eth2spec.test.helpers.keys import whisk_ks_initial # Map of validator index to initial WhiskTracker (r = 1, k = index) whisk_initial_tracker_cache_by_index = {} diff --git a/tests/core/pyspec/eth2spec/test/helpers/electra/fork.py b/tests/core/pyspec/eth2spec/test/helpers/electra/fork.py index 40dd1d4184..b7274cd441 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/electra/fork.py +++ b/tests/core/pyspec/eth2spec/test/helpers/electra/fork.py @@ -2,7 +2,6 @@ ELECTRA, ) - ELECTRA_FORK_TEST_META_TAGS = { "fork": ELECTRA, } diff --git a/tests/core/pyspec/eth2spec/test/helpers/epoch_processing.py b/tests/core/pyspec/eth2spec/test/helpers/epoch_processing.py index 24f3a5b5be..3cc791e5fe 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/epoch_processing.py +++ b/tests/core/pyspec/eth2spec/test/helpers/epoch_processing.py @@ -37,10 +37,26 @@ def get_process_calls(spec): ] -def run_epoch_processing_to(spec, state, process_name: str): +def run_epoch_processing_to(spec, state, process_name: str, enable_slots_processing=True): """ Processes to the next epoch transition, up to, but not including, the sub-transition named ``process_name`` """ + if enable_slots_processing: + run_process_slots_up_to_epoch_boundary(spec, state) + + # process components of epoch transition before final-updates + for name in get_process_calls(spec): + if name == process_name: + break + # only run when present. Later phases introduce more to the epoch-processing. + if hasattr(spec, name): + getattr(spec, name)(state) + + +def run_process_slots_up_to_epoch_boundary(spec, state): + """ + Processes slots until the next epoch transition + """ slot = state.slot + (spec.SLOTS_PER_EPOCH - state.slot % spec.SLOTS_PER_EPOCH) # transition state to slot before epoch state transition @@ -50,12 +66,20 @@ def run_epoch_processing_to(spec, state, process_name: str): # start transitioning, do one slot update before the epoch itself. spec.process_slot(state) - # process components of epoch transition before final-updates + +def run_epoch_processing_from(spec, state, process_name: str): + """ + Processes to the next epoch transition, from, but not including, the sub-transition named ``process_name`` + """ + assert (state.slot + 1) % spec.SLOTS_PER_EPOCH == 0 + + processing = False for name in get_process_calls(spec): if name == process_name: - break + processing = True + continue # only run when present. Later phases introduce more to the epoch-processing. - if hasattr(spec, name): + if processing and hasattr(spec, name): getattr(spec, name)(state) @@ -64,8 +88,16 @@ def run_epoch_processing_with(spec, state, process_name: str): Processes to the next epoch transition, up to and including the sub-transition named ``process_name`` - pre-state ('pre'), state before calling ``process_name`` - post-state ('post'), state after calling ``process_name`` + - pre-epoch-state ('pre_epoch'), state before epoch transition + - post-epoch-state ('post_epoch'), state after epoch transition + The state passed by reference will be modified to be the ``process_name``post state. """ - run_epoch_processing_to(spec, state, process_name) + run_process_slots_up_to_epoch_boundary(spec, state) + yield "pre_epoch", state + run_epoch_processing_to(spec, state, process_name, enable_slots_processing=False) yield "pre", state getattr(spec, process_name)(state) yield "post", state + continue_state = state.copy() + run_epoch_processing_from(spec, continue_state, process_name) + yield "post_epoch", continue_state diff --git a/tests/core/pyspec/eth2spec/test/helpers/execution_payload.py b/tests/core/pyspec/eth2spec/test/helpers/execution_payload.py index c355fa7df6..c1e6b2b4ab 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/execution_payload.py +++ b/tests/core/pyspec/eth2spec/test/helpers/execution_payload.py @@ -1,19 +1,20 @@ -from eth_hash.auto import keccak from hashlib import sha256 -from trie import HexaryTrie + +from eth_hash.auto import keccak from rlp import encode from rlp.sedes import big_endian_int, Binary, List +from trie import HexaryTrie -from eth2spec.test.helpers.keys import privkeys -from eth2spec.utils.ssz.ssz_impl import hash_tree_root from eth2spec.debug.random_value import get_random_bytes_list -from eth2spec.test.helpers.withdrawals import get_expected_withdrawals from eth2spec.test.helpers.forks import ( is_post_capella, is_post_deneb, - is_post_electra, is_post_eip7732, + is_post_electra, ) +from eth2spec.test.helpers.keys import privkeys +from eth2spec.test.helpers.withdrawals import get_expected_withdrawals +from eth2spec.utils.ssz.ssz_impl import hash_tree_root def get_execution_payload_header(spec, state, execution_payload): diff --git a/tests/core/pyspec/eth2spec/test/helpers/fork_choice.py b/tests/core/pyspec/eth2spec/test/helpers/fork_choice.py index 2e2dbd9b5c..f440913306 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/fork_choice.py +++ b/tests/core/pyspec/eth2spec/test/helpers/fork_choice.py @@ -1,13 +1,14 @@ -from typing import NamedTuple, Sequence, Any +from typing import Any, NamedTuple, Sequence from eth_utils import encode_hex + from eth2spec.test.exceptions import BlockNotFoundException -from eth2spec.test.helpers.forks import is_post_eip7732 from eth2spec.test.helpers.attestations import ( next_epoch_with_attestations, next_slots_with_attestations, state_transition_with_full_block, ) +from eth2spec.test.helpers.forks import is_post_eip7732 from eth2spec.test.helpers.state import ( payload_state_transition, payload_state_transition_no_store, diff --git a/tests/core/pyspec/eth2spec/test/helpers/fork_transition.py b/tests/core/pyspec/eth2spec/test/helpers/fork_transition.py index 3579bd1fd7..83e0f38178 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/fork_transition.py +++ b/tests/core/pyspec/eth2spec/test/helpers/fork_transition.py @@ -1,39 +1,44 @@ -from enum import Enum, auto +from enum import auto, Enum -from eth2spec.test.helpers.attester_slashings import ( - get_valid_attester_slashing_by_indices, -) from eth2spec.test.helpers.attestations import ( next_slots_with_attestations, state_transition_with_full_block, ) +from eth2spec.test.helpers.attester_slashings import ( + get_valid_attester_slashing_by_indices, +) from eth2spec.test.helpers.block import ( - build_empty_block_for_next_slot, build_empty_block, + build_empty_block_for_next_slot, sign_block, ) from eth2spec.test.helpers.bls_to_execution_changes import get_signed_address_change +from eth2spec.test.helpers.consolidations import ( + prepare_switch_to_compounding_request, +) from eth2spec.test.helpers.constants import ( + DENEB, PHASE0, POST_FORK_OF, PREVIOUS_FORK_OF, - DENEB, ) from eth2spec.test.helpers.deposits import ( - prepare_state_and_deposit, prepare_deposit_request, + prepare_state_and_deposit, ) from eth2spec.test.helpers.execution_payload import ( + build_empty_execution_payload, + compute_el_block_hash, compute_el_block_hash_for_block, ) -from eth2spec.test.helpers.proposer_slashings import ( - get_valid_proposer_slashing, -) from eth2spec.test.helpers.forks import ( get_next_fork_transition, is_post_bellatrix, - is_post_electra, is_post_eip7732, + is_post_electra, +) +from eth2spec.test.helpers.proposer_slashings import ( + get_valid_proposer_slashing, ) from eth2spec.test.helpers.state import ( next_slot, @@ -46,13 +51,6 @@ from eth2spec.test.helpers.withdrawals import ( prepare_withdrawal_request, ) -from eth2spec.test.helpers.consolidations import ( - prepare_switch_to_compounding_request, -) -from eth2spec.test.helpers.execution_payload import ( - build_empty_execution_payload, - compute_el_block_hash, -) class OperationType(Enum): diff --git a/tests/core/pyspec/eth2spec/test/helpers/forks.py b/tests/core/pyspec/eth2spec/test/helpers/forks.py index b422f7f0e1..62eeb23ce5 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/forks.py +++ b/tests/core/pyspec/eth2spec/test/helpers/forks.py @@ -1,13 +1,13 @@ from .constants import ( - PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB, - ELECTRA, - FULU, EIP7441, EIP7732, + ELECTRA, + FULU, + PHASE0, PREVIOUS_FORK_OF, ) diff --git a/tests/core/pyspec/eth2spec/test/helpers/fulu/fork.py b/tests/core/pyspec/eth2spec/test/helpers/fulu/fork.py index eac4e25b01..0f2ad6c344 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/fulu/fork.py +++ b/tests/core/pyspec/eth2spec/test/helpers/fulu/fork.py @@ -2,7 +2,6 @@ FULU, ) - FULU_FORK_TEST_META_TAGS = { "fork": FULU, } diff --git a/tests/core/pyspec/eth2spec/test/helpers/genesis.py b/tests/core/pyspec/eth2spec/test/helpers/genesis.py index 1382a6eb07..ab95b9a098 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/genesis.py +++ b/tests/core/pyspec/eth2spec/test/helpers/genesis.py @@ -1,8 +1,13 @@ from hashlib import sha256 + from eth2spec.test.helpers.constants import ( PHASE0, PREVIOUS_FORK_OF, ) +from eth2spec.test.helpers.eip7441 import ( + compute_whisk_initial_k_commitment_cached, + compute_whisk_initial_tracker_cached, +) from eth2spec.test.helpers.execution_payload import ( compute_el_header_block_hash, ) @@ -11,15 +16,11 @@ is_post_bellatrix, is_post_capella, is_post_deneb, - is_post_electra, is_post_eip7441, is_post_eip7732, + is_post_electra, ) from eth2spec.test.helpers.keys import pubkeys -from eth2spec.test.helpers.eip7441 import ( - compute_whisk_initial_tracker_cached, - compute_whisk_initial_k_commitment_cached, -) def build_mock_validator(spec, i: int, balance: int): diff --git a/tests/core/pyspec/eth2spec/test/helpers/light_client.py b/tests/core/pyspec/eth2spec/test/helpers/light_client.py index 101050522d..c15c45e06b 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/light_client.py +++ b/tests/core/pyspec/eth2spec/test/helpers/light_client.py @@ -1,3 +1,5 @@ +from math import floor + from eth2spec.test.helpers.constants import ( CAPELLA, DENEB, @@ -11,7 +13,6 @@ compute_aggregate_sync_committee_signature, compute_committee_indices, ) -from math import floor def latest_finalized_root_gindex(spec): diff --git a/tests/core/pyspec/eth2spec/test/helpers/light_client_data_collection.py b/tests/core/pyspec/eth2spec/test/helpers/light_client_data_collection.py index 125bff9df2..dbd08d4c62 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/light_client_data_collection.py +++ b/tests/core/pyspec/eth2spec/test/helpers/light_client_data_collection.py @@ -1,7 +1,8 @@ -from typing import Any, Dict, List, Set from dataclasses import dataclass +from typing import Any, Dict, List, Set from eth_utils import encode_hex + from eth2spec.test.helpers.constants import ( ALTAIR, ) diff --git a/tests/core/pyspec/eth2spec/test/helpers/light_client_sync.py b/tests/core/pyspec/eth2spec/test/helpers/light_client_sync.py index 0b0ff0811f..8a13fe34b3 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/light_client_sync.py +++ b/tests/core/pyspec/eth2spec/test/helpers/light_client_sync.py @@ -1,6 +1,7 @@ from typing import Any, Dict, List from eth_utils import encode_hex + from eth2spec.test.helpers.attestations import ( next_slots_with_attestations, state_transition_with_full_block, @@ -18,8 +19,8 @@ from eth2spec.test.helpers.light_client import ( get_sync_aggregate, upgrade_lc_bootstrap_to_new_spec, - upgrade_lc_update_to_new_spec, upgrade_lc_store_to_new_spec, + upgrade_lc_update_to_new_spec, ) from eth2spec.test.helpers.state import ( next_slots, diff --git a/tests/core/pyspec/eth2spec/test/helpers/multi_operations.py b/tests/core/pyspec/eth2spec/test/helpers/multi_operations.py index cdbeac2ae0..314ac6a3be 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/multi_operations.py +++ b/tests/core/pyspec/eth2spec/test/helpers/multi_operations.py @@ -1,22 +1,22 @@ from random import Random +from eth2spec.test.helpers.attestations import get_max_attestations, get_valid_attestation +from eth2spec.test.helpers.attester_slashings import get_valid_attester_slashing_by_indices +from eth2spec.test.helpers.block import ( + build_empty_block_for_next_slot, +) +from eth2spec.test.helpers.bls_to_execution_changes import get_signed_address_change +from eth2spec.test.helpers.deposits import build_deposit, deposit_from_context from eth2spec.test.helpers.keys import privkeys, pubkeys +from eth2spec.test.helpers.proposer_slashings import get_valid_proposer_slashing from eth2spec.test.helpers.state import ( state_transition_and_sign_block, ) -from eth2spec.test.helpers.block import ( - build_empty_block_for_next_slot, -) from eth2spec.test.helpers.sync_committee import ( - compute_committee_indices, compute_aggregate_sync_committee_signature, + compute_committee_indices, ) -from eth2spec.test.helpers.proposer_slashings import get_valid_proposer_slashing -from eth2spec.test.helpers.attester_slashings import get_valid_attester_slashing_by_indices -from eth2spec.test.helpers.attestations import get_valid_attestation, get_max_attestations -from eth2spec.test.helpers.deposits import build_deposit, deposit_from_context from eth2spec.test.helpers.voluntary_exits import prepare_signed_exits -from eth2spec.test.helpers.bls_to_execution_changes import get_signed_address_change def run_slash_and_exit(spec, state, slash_index, exit_index, valid=True): diff --git a/tests/core/pyspec/eth2spec/test/helpers/optimistic_sync.py b/tests/core/pyspec/eth2spec/test/helpers/optimistic_sync.py index ad8cc20096..f18ee689dd 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/optimistic_sync.py +++ b/tests/core/pyspec/eth2spec/test/helpers/optimistic_sync.py @@ -7,10 +7,10 @@ from eth_utils import encode_hex -from eth2spec.utils.ssz.ssz_typing import Bytes32 from eth2spec.test.helpers.fork_choice import ( add_block, ) +from eth2spec.utils.ssz.ssz_typing import Bytes32 class PayloadStatusV1StatusAlias(Enum): diff --git a/tests/core/pyspec/eth2spec/test/helpers/pow_block.py b/tests/core/pyspec/eth2spec/test/helpers/pow_block.py index 104730b536..6a92c7703c 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/pow_block.py +++ b/tests/core/pyspec/eth2spec/test/helpers/pow_block.py @@ -1,4 +1,5 @@ from random import Random + from eth2spec.utils.ssz.ssz_typing import uint256 diff --git a/tests/core/pyspec/eth2spec/test/helpers/rewards.py b/tests/core/pyspec/eth2spec/test/helpers/rewards.py index 6b33bf5b72..61728ad88d 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/rewards.py +++ b/tests/core/pyspec/eth2spec/test/helpers/rewards.py @@ -1,21 +1,22 @@ from random import Random + from lru import LRU from eth2spec.phase0.mainnet import VALIDATOR_REGISTRY_LIMIT # equal everywhere, fine to import -from eth2spec.test.helpers.forks import is_post_altair, is_post_bellatrix -from eth2spec.test.helpers.state import ( - next_epoch, +from eth2spec.test.helpers.attestations import ( + cached_prepare_state_with_attestations, ) +from eth2spec.test.helpers.forks import is_post_altair, is_post_bellatrix from eth2spec.test.helpers.random import ( - set_some_new_deposits, exit_random_validators, - slash_random_validators, randomize_state, + set_some_new_deposits, + slash_random_validators, ) -from eth2spec.test.helpers.attestations import ( - cached_prepare_state_with_attestations, +from eth2spec.test.helpers.state import ( + next_epoch, ) -from eth2spec.utils.ssz.ssz_typing import Container, uint64, List +from eth2spec.utils.ssz.ssz_typing import Container, List, uint64 class Deltas(Container): diff --git a/tests/core/pyspec/eth2spec/test/helpers/specs.py b/tests/core/pyspec/eth2spec/test/helpers/specs.py index 811926415a..0785f5d35e 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/specs.py +++ b/tests/core/pyspec/eth2spec/test/helpers/specs.py @@ -1,19 +1,19 @@ from typing import ( Dict, ) + from .constants import ( - MINIMAL, - MAINNET, ALL_PHASES, EIP7441, + MAINNET, + MINIMAL, ) from .typing import ( PresetBaseName, - SpecForkName, Spec, + SpecForkName, ) - # NOTE: special case like `ALLOWED_TEST_RUNNER_FORKS` ALL_EXECUTABLE_SPEC_NAMES = ALL_PHASES + (EIP7441,) diff --git a/tests/core/pyspec/eth2spec/test/helpers/sync_committee.py b/tests/core/pyspec/eth2spec/test/helpers/sync_committee.py index c053714e3f..8237e0ee39 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/sync_committee.py +++ b/tests/core/pyspec/eth2spec/test/helpers/sync_committee.py @@ -3,11 +3,11 @@ from eth2spec.test.context import ( expect_assertion_error, ) -from eth2spec.test.helpers.keys import privkeys from eth2spec.test.helpers.block import ( build_empty_block_for_next_slot, ) from eth2spec.test.helpers.block_processing import run_block_processing_to +from eth2spec.test.helpers.keys import privkeys from eth2spec.utils import bls diff --git a/tests/core/pyspec/eth2spec/test/helpers/voluntary_exits.py b/tests/core/pyspec/eth2spec/test/helpers/voluntary_exits.py index d68e1bf0cc..aa6a31885c 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/voluntary_exits.py +++ b/tests/core/pyspec/eth2spec/test/helpers/voluntary_exits.py @@ -1,8 +1,9 @@ from random import Random -from eth2spec.utils import bls + from eth2spec.test.context import expect_assertion_error from eth2spec.test.helpers.forks import is_post_deneb from eth2spec.test.helpers.keys import privkeys +from eth2spec.utils import bls def prepare_signed_exits(spec, state, indices, fork_version=None): diff --git a/tests/core/pyspec/eth2spec/test/helpers/withdrawals.py b/tests/core/pyspec/eth2spec/test/helpers/withdrawals.py index ab9c7fcebe..c16a61868c 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/withdrawals.py +++ b/tests/core/pyspec/eth2spec/test/helpers/withdrawals.py @@ -1,5 +1,4 @@ -from eth2spec.test.helpers.forks import is_post_electra -from eth2spec.test.helpers.forks import is_post_eip7732 +from eth2spec.test.helpers.forks import is_post_eip7732, is_post_electra def get_expected_withdrawals(spec, state): diff --git a/tests/core/pyspec/eth2spec/test/phase0/block_processing/test_process_attestation.py b/tests/core/pyspec/eth2spec/test/phase0/block_processing/test_process_attestation.py index 00bc783ce9..3546d18328 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/block_processing/test_process_attestation.py +++ b/tests/core/pyspec/eth2spec/test/phase0/block_processing/test_process_attestation.py @@ -1,23 +1,23 @@ from eth2spec.test.context import ( - spec_state_test, always_bls, + low_balances, never_bls, - with_all_phases, + single_phase, + spec_state_test, spec_test, - low_balances, + with_all_phases, with_custom_state, - single_phase, ) from eth2spec.test.helpers.attestations import ( - run_attestation_processing, + compute_max_inclusion_slot, get_valid_attestation, + run_attestation_processing, sign_aggregate_attestation, sign_attestation, - compute_max_inclusion_slot, ) from eth2spec.test.helpers.state import ( - next_slots, next_epoch_via_block, + next_slots, transition_to_slot_via_block, ) from eth2spec.utils.ssz.ssz_typing import Bitlist diff --git a/tests/core/pyspec/eth2spec/test/phase0/block_processing/test_process_attester_slashing.py b/tests/core/pyspec/eth2spec/test/phase0/block_processing/test_process_attester_slashing.py index 1d353354c4..70799da218 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/block_processing/test_process_attester_slashing.py +++ b/tests/core/pyspec/eth2spec/test/phase0/block_processing/test_process_attester_slashing.py @@ -1,23 +1,23 @@ from random import Random from eth2spec.test.context import ( - spec_state_test, - expect_assertion_error, always_bls, - with_all_phases, - with_custom_state, - spec_test, - single_phase, + expect_assertion_error, low_balances, misc_balances, + single_phase, + spec_state_test, + spec_test, + with_all_phases, + with_custom_state, ) from eth2spec.test.helpers.attestations import sign_indexed_attestation from eth2spec.test.helpers.attester_slashings import ( + get_attestation_1_data, + get_attestation_2_data, + get_indexed_attestation_participants, get_valid_attester_slashing, get_valid_attester_slashing_by_indices, - get_indexed_attestation_participants, - get_attestation_2_data, - get_attestation_1_data, ) from eth2spec.test.helpers.proposer_slashings import ( get_min_slashing_penalty_quotient, diff --git a/tests/core/pyspec/eth2spec/test/phase0/block_processing/test_process_block_header.py b/tests/core/pyspec/eth2spec/test/phase0/block_processing/test_process_block_header.py index a5faf3da39..1098e100c1 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/block_processing/test_process_block_header.py +++ b/tests/core/pyspec/eth2spec/test/phase0/block_processing/test_process_block_header.py @@ -1,15 +1,14 @@ from copy import deepcopy -from eth2spec.test.context import spec_state_test, expect_assertion_error, with_all_phases +from eth2spec.test.context import expect_assertion_error, spec_state_test, with_all_phases from eth2spec.test.helpers.block import build_empty_block_for_next_slot -from eth2spec.test.helpers.execution_payload import compute_el_block_hash_for_block -from eth2spec.test.helpers.forks import is_post_bellatrix -from eth2spec.test.helpers.forks import is_post_eip7732 -from eth2spec.test.helpers.state import next_slot from eth2spec.test.helpers.execution_payload import ( build_empty_execution_payload, compute_el_block_hash, + compute_el_block_hash_for_block, ) +from eth2spec.test.helpers.forks import is_post_bellatrix, is_post_eip7732 +from eth2spec.test.helpers.state import next_slot def prepare_state_for_header_processing(spec, state): diff --git a/tests/core/pyspec/eth2spec/test/phase0/block_processing/test_process_deposit.py b/tests/core/pyspec/eth2spec/test/phase0/block_processing/test_process_deposit.py index c21a9596bc..5d1bde5bdd 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/block_processing/test_process_deposit.py +++ b/tests/core/pyspec/eth2spec/test/phase0/block_processing/test_process_deposit.py @@ -1,4 +1,4 @@ -from eth2spec.test.context import spec_state_test, always_bls, with_all_phases +from eth2spec.test.context import always_bls, spec_state_test, with_all_phases from eth2spec.test.helpers.deposits import ( build_deposit, prepare_state_and_deposit, @@ -6,8 +6,8 @@ run_deposit_processing_with_specific_fork_version, sign_deposit_data, ) -from eth2spec.test.helpers.keys import privkeys, pubkeys from eth2spec.test.helpers.forks import is_post_electra +from eth2spec.test.helpers.keys import privkeys, pubkeys @with_all_phases diff --git a/tests/core/pyspec/eth2spec/test/phase0/block_processing/test_process_proposer_slashing.py b/tests/core/pyspec/eth2spec/test/phase0/block_processing/test_process_proposer_slashing.py index 269802a5cd..857bcd1849 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/block_processing/test_process_proposer_slashing.py +++ b/tests/core/pyspec/eth2spec/test/phase0/block_processing/test_process_proposer_slashing.py @@ -1,15 +1,15 @@ from eth2spec.test.context import ( - spec_state_test, - expect_assertion_error, always_bls, + expect_assertion_error, + spec_state_test, with_all_phases, ) from eth2spec.test.helpers.block import build_empty_block_for_next_slot from eth2spec.test.helpers.block_header import sign_block_header from eth2spec.test.helpers.keys import privkeys from eth2spec.test.helpers.proposer_slashings import ( - get_valid_proposer_slashing, check_proposer_slashing_effect, + get_valid_proposer_slashing, ) from eth2spec.test.helpers.state import next_epoch diff --git a/tests/core/pyspec/eth2spec/test/phase0/block_processing/test_process_voluntary_exit.py b/tests/core/pyspec/eth2spec/test/phase0/block_processing/test_process_voluntary_exit.py index d7c7c8cb6d..e34bec23af 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/block_processing/test_process_voluntary_exit.py +++ b/tests/core/pyspec/eth2spec/test/phase0/block_processing/test_process_voluntary_exit.py @@ -1,14 +1,14 @@ -from eth2spec.test.helpers.constants import MINIMAL from eth2spec.test.context import ( - spec_state_test, always_bls, - with_all_phases, - with_presets, - spec_test, + scaled_churn_balances_min_churn_limit, single_phase, + spec_state_test, + spec_test, + with_all_phases, with_custom_state, - scaled_churn_balances_min_churn_limit, + with_presets, ) +from eth2spec.test.helpers.constants import MINIMAL from eth2spec.test.helpers.keys import pubkey_to_privkey from eth2spec.test.helpers.voluntary_exits import ( run_voluntary_exit_processing, diff --git a/tests/core/pyspec/eth2spec/test/phase0/epoch_processing/test_process_effective_balance_updates.py b/tests/core/pyspec/eth2spec/test/phase0/epoch_processing/test_process_effective_balance_updates.py index 5eb5553313..9de298bb44 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/epoch_processing/test_process_effective_balance_updates.py +++ b/tests/core/pyspec/eth2spec/test/phase0/epoch_processing/test_process_effective_balance_updates.py @@ -1,9 +1,13 @@ from eth2spec.test.context import spec_state_test, with_all_phases -from eth2spec.test.helpers.epoch_processing import run_epoch_processing_to +from eth2spec.test.helpers.epoch_processing import ( + run_epoch_processing_from, + run_epoch_processing_to, + run_process_slots_up_to_epoch_boundary, +) +from eth2spec.test.helpers.forks import is_post_electra from eth2spec.test.helpers.withdrawals import ( set_compounding_withdrawal_credential, ) -from eth2spec.test.helpers.forks import is_post_electra @with_all_phases @@ -16,7 +20,11 @@ def run_test_effective_balance_hysteresis(spec, state, with_compounding_credenti assert is_post_electra(spec) or not with_compounding_credentials # Prepare state up to the final-updates. # Then overwrite the balances, we only want to focus to be on the hysteresis based changes. - run_epoch_processing_to(spec, state, "process_effective_balance_updates") + run_process_slots_up_to_epoch_boundary(spec, state) + yield "pre_epoch", state + run_epoch_processing_to( + spec, state, "process_effective_balance_updates", enable_slots_processing=False + ) # Set some edge cases for balances max = ( spec.MAX_EFFECTIVE_BALANCE_ELECTRA @@ -92,3 +100,6 @@ def run_test_effective_balance_hysteresis(spec, state, with_compounding_credenti for i, (_, _, post_eff, name) in enumerate(cases): assert state.validators[i].effective_balance == post_eff, name + + run_epoch_processing_from(spec, state, "process_effective_balance_updates") + yield "post_epoch", state diff --git a/tests/core/pyspec/eth2spec/test/phase0/epoch_processing/test_process_historical_roots_update.py b/tests/core/pyspec/eth2spec/test/phase0/epoch_processing/test_process_historical_roots_update.py index ffac596043..f600fc65a7 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/epoch_processing/test_process_historical_roots_update.py +++ b/tests/core/pyspec/eth2spec/test/phase0/epoch_processing/test_process_historical_roots_update.py @@ -1,7 +1,7 @@ from eth2spec.test.context import ( - PHASE0, ALTAIR, BELLATRIX, + PHASE0, spec_state_test, with_phases, ) diff --git a/tests/core/pyspec/eth2spec/test/phase0/epoch_processing/test_process_justification_and_finalization.py b/tests/core/pyspec/eth2spec/test/phase0/epoch_processing/test_process_justification_and_finalization.py index aabd8e058d..521018eb7e 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/epoch_processing/test_process_justification_and_finalization.py +++ b/tests/core/pyspec/eth2spec/test/phase0/epoch_processing/test_process_justification_and_finalization.py @@ -1,10 +1,11 @@ from random import Random + from eth2spec.test.context import spec_state_test, with_all_phases from eth2spec.test.helpers.epoch_processing import ( run_epoch_processing_with, ) from eth2spec.test.helpers.forks import is_post_altair -from eth2spec.test.helpers.state import transition_to, next_epoch_via_block, next_slot +from eth2spec.test.helpers.state import next_epoch_via_block, next_slot, transition_to from eth2spec.test.helpers.voluntary_exits import get_unslashed_exited_validators diff --git a/tests/core/pyspec/eth2spec/test/phase0/epoch_processing/test_process_registry_updates.py b/tests/core/pyspec/eth2spec/test/phase0/epoch_processing/test_process_registry_updates.py index 1c8e6c8939..4ea89722ae 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/epoch_processing/test_process_registry_updates.py +++ b/tests/core/pyspec/eth2spec/test/phase0/epoch_processing/test_process_registry_updates.py @@ -1,17 +1,17 @@ -from eth2spec.test.helpers.deposits import mock_deposit -from eth2spec.test.helpers.state import next_epoch, next_slots -from eth2spec.test.helpers.forks import is_post_electra -from eth2spec.test.helpers.constants import MINIMAL from eth2spec.test.context import ( - spec_test, + scaled_churn_balances_min_churn_limit, + single_phase, spec_state_test, + spec_test, with_all_phases, - single_phase, with_custom_state, with_presets, - scaled_churn_balances_min_churn_limit, ) +from eth2spec.test.helpers.constants import MINIMAL +from eth2spec.test.helpers.deposits import mock_deposit from eth2spec.test.helpers.epoch_processing import run_epoch_processing_with +from eth2spec.test.helpers.forks import is_post_electra +from eth2spec.test.helpers.state import next_epoch, next_slots def run_process_registry_updates(spec, state): diff --git a/tests/core/pyspec/eth2spec/test/phase0/epoch_processing/test_process_rewards_and_penalties.py b/tests/core/pyspec/eth2spec/test/phase0/epoch_processing/test_process_rewards_and_penalties.py index b347dcada9..89f0eeea0c 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/epoch_processing/test_process_rewards_and_penalties.py +++ b/tests/core/pyspec/eth2spec/test/phase0/epoch_processing/test_process_rewards_and_penalties.py @@ -1,32 +1,33 @@ +from random import Random + from eth2spec.test.context import ( + low_single_balance, + misc_balances, + PHASE0, + single_phase, spec_state_test, spec_test, with_all_phases, - single_phase, - with_phases, - PHASE0, with_custom_state, + with_phases, zero_activation_threshold, - misc_balances, - low_single_balance, -) -from eth2spec.test.helpers.forks import ( - is_post_altair, -) -from eth2spec.test.helpers.state import ( - next_epoch, - next_slot, ) from eth2spec.test.helpers.attestations import ( add_attestations_to_state, get_valid_attestation, - sign_attestation, prepare_state_with_attestations, + sign_attestation, ) -from eth2spec.test.helpers.rewards import leaking from eth2spec.test.helpers.attester_slashings import get_indexed_attestation_participants from eth2spec.test.helpers.epoch_processing import run_epoch_processing_with -from random import Random +from eth2spec.test.helpers.forks import ( + is_post_altair, +) +from eth2spec.test.helpers.rewards import leaking +from eth2spec.test.helpers.state import ( + next_epoch, + next_slot, +) def run_process_rewards_and_penalties(spec, state): diff --git a/tests/core/pyspec/eth2spec/test/phase0/epoch_processing/test_process_slashings.py b/tests/core/pyspec/eth2spec/test/phase0/epoch_processing/test_process_slashings.py index 4c732e9bd0..7ffc355aff 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/epoch_processing/test_process_slashings.py +++ b/tests/core/pyspec/eth2spec/test/phase0/epoch_processing/test_process_slashings.py @@ -1,8 +1,9 @@ from random import Random + from eth2spec.test.context import spec_state_test, with_all_phases from eth2spec.test.helpers.epoch_processing import ( - run_epoch_processing_with, run_epoch_processing_to, + run_epoch_processing_with, ) from eth2spec.test.helpers.forks import ( is_post_altair, @@ -10,9 +11,8 @@ is_post_electra, ) from eth2spec.test.helpers.random import randomize_state -from eth2spec.test.helpers.state import has_active_balance_differential +from eth2spec.test.helpers.state import has_active_balance_differential, next_epoch from eth2spec.test.helpers.voluntary_exits import get_unslashed_exited_validators -from eth2spec.test.helpers.state import next_epoch def run_process_slashings(spec, state): diff --git a/tests/core/pyspec/eth2spec/test/phase0/finality/test_finality.py b/tests/core/pyspec/eth2spec/test/phase0/finality/test_finality.py index 2b22340a5b..e7c962a1ce 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/finality/test_finality.py +++ b/tests/core/pyspec/eth2spec/test/phase0/finality/test_finality.py @@ -1,6 +1,6 @@ from eth2spec.test.context import spec_state_test, with_all_phases -from eth2spec.test.helpers.state import next_epoch_via_block from eth2spec.test.helpers.attestations import next_epoch_with_attestations +from eth2spec.test.helpers.state import next_epoch_via_block def check_finality( diff --git a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_ex_ante.py b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_ex_ante.py index c39016b11b..67b85b0522 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_ex_ante.py +++ b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_ex_ante.py @@ -1,7 +1,7 @@ from eth2spec.test.context import ( spec_state_test, + with_all_phases_from_except, with_altair_and_later, - with_altair_until_eip7732, with_presets, ) from eth2spec.test.helpers.attestations import ( @@ -11,19 +11,23 @@ from eth2spec.test.helpers.block import ( build_empty_block, ) -from eth2spec.test.helpers.constants import MAINNET -from eth2spec.test.helpers.forks import is_post_eip7732 +from eth2spec.test.helpers.constants import ( + ALTAIR, + EIP7732, + MAINNET, +) from eth2spec.test.helpers.fork_choice import ( + add_attestation, + add_block, check_head_against_root, get_genesis_forkchoice_store_and_block, on_tick_and_append_step, - add_attestation, - add_block, tick_and_add_block, ) +from eth2spec.test.helpers.forks import is_post_eip7732 from eth2spec.test.helpers.state import ( - state_transition_and_sign_block, payload_state_transition, + state_transition_and_sign_block, ) @@ -137,7 +141,7 @@ def _get_greater_than_proposer_boost_score(spec, store, state, proposer_boost_ro # TODO(jtraglia): Investigate why this doesn't work with eip7732 -@with_altair_until_eip7732 +@with_all_phases_from_except(ALTAIR, [EIP7732]) @with_presets([MAINNET], reason="to create non-duplicate committee") @spec_state_test def test_ex_ante_attestations_is_greater_than_proposer_boost_with_boost(spec, state): @@ -373,7 +377,7 @@ def _filter_participant_set(participants): # TODO(jtraglia): Investigate why this doesn't work with eip7732 -@with_altair_until_eip7732 +@with_all_phases_from_except(ALTAIR, [EIP7732]) @with_presets([MAINNET], reason="to create non-duplicate committee") @spec_state_test def test_ex_ante_sandwich_with_boost_not_sufficient(spec, state): diff --git a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_get_head.py b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_get_head.py index cb13fd1da6..983783fe6a 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_get_head.py +++ b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_get_head.py @@ -1,8 +1,9 @@ import random + from eth2spec.test.context import ( spec_state_test, + with_all_phases_from_except, with_altair_and_later, - with_altair_until_eip7732, with_presets, ) from eth2spec.test.helpers.attestations import get_valid_attestation, next_epoch_with_attestations @@ -10,28 +11,32 @@ apply_empty_block, build_empty_block_for_next_slot, ) -from eth2spec.test.helpers.constants import MINIMAL +from eth2spec.test.helpers.constants import ( + ALTAIR, + EIP7732, + MINIMAL, +) from eth2spec.test.helpers.fork_choice import ( + add_attestation, add_attester_slashing, add_block, + apply_next_epoch_with_attestations, check_head_against_root, get_anchor_root, - get_genesis_forkchoice_store_and_block, get_formatted_head_output, + get_genesis_forkchoice_store_and_block, on_tick_and_append_step, - add_attestation, - tick_and_run_on_attestation, - tick_and_add_block, output_head_check, - apply_next_epoch_with_attestations, + tick_and_add_block, + tick_and_run_on_attestation, ) from eth2spec.test.helpers.forks import ( is_post_altair, is_post_eip7732, ) from eth2spec.test.helpers.state import ( - next_slots, next_epoch, + next_slots, payload_state_transition, state_transition_and_sign_block, ) @@ -264,7 +269,7 @@ def test_filtered_block_tree(spec, state): # This test is skipped in EIP-7732 because the block's slot decides first on weight ties -@with_altair_until_eip7732 +@with_all_phases_from_except(ALTAIR, [EIP7732]) @spec_state_test def test_proposer_boost_correct_head(spec, state): test_steps = [] diff --git a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_get_proposer_head.py b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_get_proposer_head.py index 9abcee9ccc..b9cb93c927 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_get_proposer_head.py +++ b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_get_proposer_head.py @@ -1,7 +1,8 @@ from eth_utils import encode_hex + from eth2spec.test.context import ( spec_state_test, - with_altair_until_eip7732, + with_all_phases_from_except, ) from eth2spec.test.helpers.attestations import ( get_valid_attestations_at_slot, @@ -9,6 +10,10 @@ from eth2spec.test.helpers.block import ( build_empty_block_for_next_slot, ) +from eth2spec.test.helpers.constants import ( + ALTAIR, + EIP7732, +) from eth2spec.test.helpers.fork_choice import ( apply_next_epoch_with_attestations, apply_next_slots_with_attestations, @@ -25,7 +30,7 @@ ) -@with_altair_until_eip7732 +@with_all_phases_from_except(ALTAIR, [EIP7732]) @spec_state_test def test_basic_is_head_root(spec, state): test_steps = [] @@ -67,7 +72,7 @@ def test_basic_is_head_root(spec, state): yield "steps", test_steps -@with_altair_until_eip7732 +@with_all_phases_from_except(ALTAIR, [EIP7732]) @spec_state_test def test_basic_is_parent_root(spec, state): test_steps = [] diff --git a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py index 0d856f81c1..786c86ceb3 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py +++ b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py @@ -1,17 +1,17 @@ import random + from eth_utils import encode_hex -from eth2spec.utils.ssz.ssz_impl import hash_tree_root from eth2spec.test.context import MINIMAL, spec_state_test, with_altair_and_later, with_presets from eth2spec.test.helpers.attestations import ( next_epoch_with_attestations, next_slots_with_attestations, ) from eth2spec.test.helpers.block import ( - build_empty_block_for_next_slot, build_empty_block, - transition_unsigned_block, + build_empty_block_for_next_slot, sign_block, + transition_unsigned_block, ) from eth2spec.test.helpers.execution_payload import ( build_empty_execution_payload, @@ -19,16 +19,16 @@ compute_el_block_hash_for_block, ) from eth2spec.test.helpers.fork_choice import ( + add_block, + apply_next_epoch_with_attestations, + apply_next_slots_with_attestations, check_head_against_root, + find_next_justifying_slot, get_genesis_forkchoice_store_and_block, get_store_full_state, + is_ready_to_justify, on_tick_and_append_step, - add_block, tick_and_add_block, - apply_next_epoch_with_attestations, - apply_next_slots_with_attestations, - is_ready_to_justify, - find_next_justifying_slot, ) from eth2spec.test.helpers.forks import ( is_post_bellatrix, @@ -40,6 +40,7 @@ payload_state_transition, state_transition_and_sign_block, ) +from eth2spec.utils.ssz.ssz_impl import hash_tree_root rng = random.Random(2020) diff --git a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_reorg.py b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_reorg.py index 6aceb0ec66..685bc1e313 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_reorg.py +++ b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_reorg.py @@ -3,40 +3,39 @@ with_altair_and_later, with_presets, ) -from eth2spec.test.helpers.constants import ( - MINIMAL, -) from eth2spec.test.helpers.attestations import ( - state_transition_with_full_block, get_valid_attestation, get_valid_attestations_at_slot, + state_transition_with_full_block, ) from eth2spec.test.helpers.block import ( build_empty_block, build_empty_block_for_next_slot, ) -from eth2spec.test.helpers.forks import is_post_eip7732 +from eth2spec.test.helpers.constants import ( + MINIMAL, +) from eth2spec.test.helpers.fork_choice import ( - check_head_against_root, - get_genesis_forkchoice_store_and_block, - get_store_full_state, - on_tick_and_append_step, add_attestations, - tick_and_add_block, apply_next_epoch_with_attestations, + check_head_against_root, find_next_justifying_slot, + get_genesis_forkchoice_store_and_block, + get_store_full_state, is_ready_to_justify, + on_tick_and_append_step, payload_state_transition, payload_state_transition_no_store, + tick_and_add_block, ) +from eth2spec.test.helpers.forks import is_post_eip7732 from eth2spec.test.helpers.state import ( - state_transition_and_sign_block, next_epoch, next_slot, + state_transition_and_sign_block, transition_to, ) - TESTING_PRESETS = [MINIMAL] diff --git a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_withholding.py b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_withholding.py index ba8a7b4871..3468b0e336 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_withholding.py +++ b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_withholding.py @@ -3,33 +3,32 @@ with_altair_and_later, with_presets, ) -from eth2spec.test.helpers.constants import ( - MINIMAL, -) from eth2spec.test.helpers.attestations import ( state_transition_with_full_block, ) from eth2spec.test.helpers.block import ( build_empty_block_for_next_slot, ) -from eth2spec.test.helpers.forks import is_post_eip7732 +from eth2spec.test.helpers.constants import ( + MINIMAL, +) from eth2spec.test.helpers.fork_choice import ( + apply_next_epoch_with_attestations, check_head_against_root, + find_next_justifying_slot, get_genesis_forkchoice_store_and_block, get_store_full_state, on_tick_and_append_step, payload_state_transition, payload_state_transition_no_store, tick_and_add_block, - apply_next_epoch_with_attestations, - find_next_justifying_slot, ) +from eth2spec.test.helpers.forks import is_post_eip7732 from eth2spec.test.helpers.state import ( - state_transition_and_sign_block, next_epoch, + state_transition_and_sign_block, ) - TESTING_PRESETS = [MINIMAL] diff --git a/tests/core/pyspec/eth2spec/test/phase0/genesis/test_initialization.py b/tests/core/pyspec/eth2spec/test/phase0/genesis/test_initialization.py index 2c6741c8e1..fad3115ba2 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/genesis/test_initialization.py +++ b/tests/core/pyspec/eth2spec/test/phase0/genesis/test_initialization.py @@ -2,8 +2,8 @@ PHASE0, single_phase, spec_test, - with_presets, with_phases, + with_presets, ) from eth2spec.test.helpers.constants import MINIMAL from eth2spec.test.helpers.deposits import ( diff --git a/tests/core/pyspec/eth2spec/test/phase0/genesis/test_validity.py b/tests/core/pyspec/eth2spec/test/phase0/genesis/test_validity.py index 4b93bab529..092adb1897 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/genesis/test_validity.py +++ b/tests/core/pyspec/eth2spec/test/phase0/genesis/test_validity.py @@ -1,9 +1,9 @@ from eth2spec.test.context import ( PHASE0, - spec_test, single_phase, - with_presets, + spec_test, with_phases, + with_presets, ) from eth2spec.test.helpers.constants import MINIMAL from eth2spec.test.helpers.deposits import ( diff --git a/tests/core/pyspec/eth2spec/test/phase0/random/test_random.py b/tests/core/pyspec/eth2spec/test/phase0/random/test_random.py index 3b93c628e3..0df30d6acd 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/random/test_random.py +++ b/tests/core/pyspec/eth2spec/test/phase0/random/test_random.py @@ -4,19 +4,17 @@ See the README for that generator for more information. """ -from eth2spec.test.helpers.constants import PHASE0 from eth2spec.test.context import ( + always_bls, misc_balances_in_default_range_with_many_validators, - with_phases, - zero_activation_threshold, only_generator, -) -from eth2spec.test.context import ( - always_bls, + single_phase, spec_test, with_custom_state, - single_phase, + with_phases, + zero_activation_threshold, ) +from eth2spec.test.helpers.constants import PHASE0 from eth2spec.test.utils.randomized_block_tests import ( run_generated_randomized_test, ) diff --git a/tests/core/pyspec/eth2spec/test/phase0/rewards/test_basic.py b/tests/core/pyspec/eth2spec/test/phase0/rewards/test_basic.py index 32ace544f5..2fbfa2b697 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/rewards/test_basic.py +++ b/tests/core/pyspec/eth2spec/test/phase0/rewards/test_basic.py @@ -1,6 +1,6 @@ -from eth2spec.test.context import with_all_phases, with_phases, spec_state_test -from eth2spec.test.helpers.constants import PHASE0 import eth2spec.test.helpers.rewards as rewards_helpers +from eth2spec.test.context import spec_state_test, with_all_phases, with_phases +from eth2spec.test.helpers.constants import PHASE0 @with_all_phases diff --git a/tests/core/pyspec/eth2spec/test/phase0/rewards/test_leak.py b/tests/core/pyspec/eth2spec/test/phase0/rewards/test_leak.py index 145977d3da..b1fbe7b326 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/rewards/test_leak.py +++ b/tests/core/pyspec/eth2spec/test/phase0/rewards/test_leak.py @@ -1,7 +1,7 @@ -from eth2spec.test.context import with_all_phases, with_phases, spec_state_test +import eth2spec.test.helpers.rewards as rewards_helpers +from eth2spec.test.context import spec_state_test, with_all_phases, with_phases from eth2spec.test.helpers.constants import PHASE0 from eth2spec.test.helpers.rewards import leaking -import eth2spec.test.helpers.rewards as rewards_helpers @with_all_phases diff --git a/tests/core/pyspec/eth2spec/test/phase0/rewards/test_random.py b/tests/core/pyspec/eth2spec/test/phase0/rewards/test_random.py index cc97e39eea..7e1d5c1741 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/rewards/test_random.py +++ b/tests/core/pyspec/eth2spec/test/phase0/rewards/test_random.py @@ -1,19 +1,19 @@ from random import Random +import eth2spec.test.helpers.rewards as rewards_helpers from eth2spec.test.context import ( - with_all_phases, - spec_test, - spec_state_test, - with_custom_state, - single_phase, low_balances, misc_balances, + single_phase, + spec_state_test, + spec_test, + with_all_phases, + with_custom_state, ) -import eth2spec.test.helpers.rewards as rewards_helpers from eth2spec.test.helpers.random import ( - randomize_state, patch_state_to_non_leaking, randomize_attestation_participation, + randomize_state, ) from eth2spec.test.helpers.state import has_active_balance_differential, next_epoch from eth2spec.test.helpers.voluntary_exits import get_unslashed_exited_validators diff --git a/tests/core/pyspec/eth2spec/test/phase0/sanity/test_blocks.py b/tests/core/pyspec/eth2spec/test/phase0/sanity/test_blocks.py index 83ab1046d5..994b370b09 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/sanity/test_blocks.py +++ b/tests/core/pyspec/eth2spec/test/phase0/sanity/test_blocks.py @@ -1,31 +1,32 @@ from random import Random -from eth2spec.utils import bls -from eth2spec.test.helpers.state import ( - get_balance, - state_transition_and_sign_block, - next_slot, - next_epoch, - next_epoch_via_block, -) -from eth2spec.test.helpers.block import ( - build_empty_block_for_next_slot, - build_empty_block, - sign_block, - transition_unsigned_block, +from eth2spec.test.context import ( + always_bls, + dump_skipping_message, + expect_assertion_error, + large_validator_set, + single_phase, + spec_state_test, + spec_test, + with_all_phases, + with_custom_state, + with_phases, + with_presets, ) -from eth2spec.test.helpers.keys import pubkeys +from eth2spec.test.helpers.attestations import get_valid_attestation from eth2spec.test.helpers.attester_slashings import ( - get_valid_attester_slashing_by_indices, - get_valid_attester_slashing, get_indexed_attestation_participants, get_max_attester_slashings, + get_valid_attester_slashing, + get_valid_attester_slashing_by_indices, ) -from eth2spec.test.helpers.proposer_slashings import ( - get_valid_proposer_slashing, - check_proposer_slashing_effect, +from eth2spec.test.helpers.block import ( + build_empty_block, + build_empty_block_for_next_slot, + sign_block, + transition_unsigned_block, ) -from eth2spec.test.helpers.attestations import get_valid_attestation +from eth2spec.test.helpers.constants import MINIMAL, PHASE0 from eth2spec.test.helpers.deposits import prepare_state_and_deposit from eth2spec.test.helpers.execution_payload import ( build_empty_execution_payload, @@ -33,36 +34,35 @@ compute_el_block_hash, compute_el_block_hash_for_block, ) -from eth2spec.test.helpers.voluntary_exits import prepare_signed_exits +from eth2spec.test.helpers.forks import ( + is_post_altair, + is_post_bellatrix, + is_post_capella, + is_post_eip7732, + is_post_electra, +) +from eth2spec.test.helpers.keys import pubkeys from eth2spec.test.helpers.multi_operations import ( run_slash_and_exit, run_test_full_random_operations, ) +from eth2spec.test.helpers.proposer_slashings import ( + check_proposer_slashing_effect, + get_valid_proposer_slashing, +) +from eth2spec.test.helpers.state import ( + get_balance, + next_epoch, + next_epoch_via_block, + next_slot, + state_transition_and_sign_block, +) from eth2spec.test.helpers.sync_committee import ( compute_committee_indices, compute_sync_committee_participant_reward_and_penalty, ) -from eth2spec.test.helpers.constants import PHASE0, MINIMAL -from eth2spec.test.helpers.forks import ( - is_post_altair, - is_post_bellatrix, - is_post_electra, - is_post_capella, - is_post_eip7732, -) -from eth2spec.test.context import ( - spec_test, - spec_state_test, - dump_skipping_message, - with_phases, - with_all_phases, - single_phase, - expect_assertion_error, - always_bls, - with_presets, - with_custom_state, - large_validator_set, -) +from eth2spec.test.helpers.voluntary_exits import prepare_signed_exits +from eth2spec.utils import bls @with_all_phases diff --git a/tests/core/pyspec/eth2spec/test/phase0/sanity/test_slots.py b/tests/core/pyspec/eth2spec/test/phase0/sanity/test_slots.py index 7e1ab3e6d6..b5d317551b 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/sanity/test_slots.py +++ b/tests/core/pyspec/eth2spec/test/phase0/sanity/test_slots.py @@ -1,10 +1,10 @@ -from eth2spec.test.helpers.forks import ( - is_post_capella, -) from eth2spec.test.context import ( spec_state_test, with_all_phases, ) +from eth2spec.test.helpers.forks import ( + is_post_capella, +) from eth2spec.test.helpers.state import get_state_root, next_epoch, next_slot, transition_to diff --git a/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_attestation.py b/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_attestation.py index d7b0297992..866f27ede8 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_attestation.py +++ b/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_attestation.py @@ -1,15 +1,15 @@ -from eth2spec.test.context import with_all_phases, spec_state_test -from eth2spec.test.helpers.block import build_empty_block_for_next_slot +from eth2spec.test.context import spec_state_test, with_all_phases from eth2spec.test.helpers.attestations import get_valid_attestation, sign_attestation +from eth2spec.test.helpers.block import build_empty_block_for_next_slot from eth2spec.test.helpers.constants import ALL_PHASES -from eth2spec.test.helpers.forks import is_post_electra, is_post_eip7732 +from eth2spec.test.helpers.fork_choice import get_genesis_forkchoice_store +from eth2spec.test.helpers.forks import is_post_eip7732, is_post_electra from eth2spec.test.helpers.state import ( - transition_to, - state_transition_and_sign_block, next_epoch, next_slot, + state_transition_and_sign_block, + transition_to, ) -from eth2spec.test.helpers.fork_choice import get_genesis_forkchoice_store def run_on_attestation(spec, state, store, attestation, valid=True): diff --git a/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_tick.py b/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_tick.py index e9f5c5903a..16fba7e717 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_tick.py +++ b/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_tick.py @@ -1,8 +1,8 @@ -from eth2spec.test.context import with_all_phases, spec_state_test -from eth2spec.test.helpers.fork_choice import get_genesis_forkchoice_store +from eth2spec.test.context import spec_state_test, with_all_phases from eth2spec.test.helpers.block import ( build_empty_block_for_next_slot, ) +from eth2spec.test.helpers.fork_choice import get_genesis_forkchoice_store from eth2spec.test.helpers.state import ( next_epoch, state_transition_and_sign_block, diff --git a/tests/core/pyspec/eth2spec/test/phase0/unittests/math/test_integer_squareroot.py b/tests/core/pyspec/eth2spec/test/phase0/unittests/math/test_integer_squareroot.py index 159f3c9b94..43013c3654 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/unittests/math/test_integer_squareroot.py +++ b/tests/core/pyspec/eth2spec/test/phase0/unittests/math/test_integer_squareroot.py @@ -1,8 +1,9 @@ import random from math import isqrt + from eth2spec.test.context import ( - spec_test, single_phase, + spec_test, with_all_phases, ) diff --git a/tests/core/pyspec/eth2spec/test/phase0/unittests/validator/test_validator_unittest.py b/tests/core/pyspec/eth2spec/test/phase0/unittests/validator/test_validator_unittest.py index e4b71251cb..c40d823aed 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/unittests/validator/test_validator_unittest.py +++ b/tests/core/pyspec/eth2spec/test/phase0/unittests/validator/test_validator_unittest.py @@ -1,16 +1,16 @@ import random from eth2spec.test.context import ( + always_bls, single_phase, spec_state_test, spec_test, - always_bls, - with_phases, with_all_phases, + with_phases, ) -from eth2spec.test.helpers.constants import PHASE0 from eth2spec.test.helpers.attestations import build_attestation_data, get_valid_attestation from eth2spec.test.helpers.block import build_empty_block +from eth2spec.test.helpers.constants import PHASE0 from eth2spec.test.helpers.deposits import prepare_state_and_deposit from eth2spec.test.helpers.keys import privkeys, pubkeys from eth2spec.test.helpers.state import next_epoch @@ -36,7 +36,6 @@ def run_get_committee_assignment(spec, state, epoch, validator_index, valid=True assert committee == spec.get_beacon_committee(state, slot, committee_index) assert committee_index < spec.get_committee_count_per_slot(state, epoch) assert validator_index in committee - assert valid except AssertionError: assert not valid else: diff --git a/tests/core/pyspec/eth2spec/test/utils/__init__.py b/tests/core/pyspec/eth2spec/test/utils/__init__.py index abd79f9ede..cead6efbd7 100644 --- a/tests/core/pyspec/eth2spec/test/utils/__init__.py +++ b/tests/core/pyspec/eth2spec/test/utils/__init__.py @@ -3,7 +3,6 @@ with_meta_tags, ) - __all__ = [ # avoid "unused import" lint error "vector_test", "with_meta_tags", diff --git a/tests/core/pyspec/eth2spec/test/utils/kzg_tests.py b/tests/core/pyspec/eth2spec/test/utils/kzg_tests.py index 63828fbffb..d926b8a774 100644 --- a/tests/core/pyspec/eth2spec/test/utils/kzg_tests.py +++ b/tests/core/pyspec/eth2spec/test/utils/kzg_tests.py @@ -3,9 +3,8 @@ int_to_big_endian, ) -from eth2spec.utils import bls from eth2spec.fulu import spec - +from eth2spec.utils import bls ############################################################################### # Helper functions diff --git a/tests/core/pyspec/eth2spec/test/utils/randomized_block_tests.py b/tests/core/pyspec/eth2spec/test/utils/randomized_block_tests.py index 27fc37a165..58c0e00b14 100644 --- a/tests/core/pyspec/eth2spec/test/utils/randomized_block_tests.py +++ b/tests/core/pyspec/eth2spec/test/utils/randomized_block_tests.py @@ -7,31 +7,31 @@ from random import Random from typing import Callable +from eth2spec.test.helpers.blob import ( + get_sample_blob_tx, +) from eth2spec.test.helpers.execution_payload import ( - compute_el_block_hash_for_block, build_randomized_execution_payload, + compute_el_block_hash_for_block, +) +from eth2spec.test.helpers.inactivity_scores import ( + randomize_inactivity_scores, ) from eth2spec.test.helpers.multi_operations import ( build_random_block_from_state_for_next_slot, get_random_bls_to_execution_changes, + get_random_execution_requests, get_random_sync_aggregate, prepare_state_and_get_random_deposits, - get_random_execution_requests, -) -from eth2spec.test.helpers.inactivity_scores import ( - randomize_inactivity_scores, ) from eth2spec.test.helpers.random import ( - randomize_state as randomize_state_helper, patch_state_to_non_leaking, -) -from eth2spec.test.helpers.blob import ( - get_sample_blob_tx, + randomize_state as randomize_state_helper, ) from eth2spec.test.helpers.state import ( - next_slot, - next_epoch, ensure_state_has_validators_across_lifecycle, + next_epoch, + next_slot, state_transition_and_sign_block, ) diff --git a/tests/core/pyspec/eth2spec/test/utils/utils.py b/tests/core/pyspec/eth2spec/test/utils/utils.py index 8273ff2d98..6dab2514ee 100644 --- a/tests/core/pyspec/eth2spec/test/utils/utils.py +++ b/tests/core/pyspec/eth2spec/test/utils/utils.py @@ -1,6 +1,7 @@ -from typing import Dict, Any -from eth2spec.utils.ssz.ssz_typing import View +from typing import Any, Dict + from eth2spec.utils.ssz.ssz_impl import serialize +from eth2spec.utils.ssz.ssz_typing import View def vector_test(description: str = None): diff --git a/tests/core/pyspec/eth2spec/utils/bls.py b/tests/core/pyspec/eth2spec/utils/bls.py index 5b8f7ca365..f080bf53cb 100644 --- a/tests/core/pyspec/eth2spec/utils/bls.py +++ b/tests/core/pyspec/eth2spec/utils/bls.py @@ -1,38 +1,35 @@ -from py_ecc.bls import G2ProofOfPossession as py_ecc_bls -from py_ecc.bls.g2_primitives import signature_to_G2 as _signature_to_G2 -from py_ecc.utils import prime_field_inv as py_ecc_prime_field_inv -from py_ecc.optimized_bls12_381 import ( # noqa: F401 - G1 as py_ecc_G1, - G2 as py_ecc_G2, - Z1 as py_ecc_Z1, - Z2 as py_ecc_Z2, - add as py_ecc_add, - multiply as py_ecc_mul, - neg as py_ecc_neg, - pairing as py_ecc_pairing, - final_exponentiate as py_ecc_final_exponentiate, - FQ12 as py_ecc_GT, - FQ, - FQ2, +import milagro_bls_binding as milagro_bls # noqa: F401 for BLS switching option +import py_arkworks_bls12381 as arkworks_bls # noqa: F401 for BLS switching option +from py_arkworks_bls12381 import ( + G1Point as arkworks_G1, + G2Point as arkworks_G2, + GT as arkworks_GT, + Scalar as arkworks_Scalar, ) +from py_ecc.bls import G2ProofOfPossession as py_ecc_bls from py_ecc.bls.g2_primitives import ( # noqa: F401 curve_order as BLS_MODULUS, G1_to_pubkey as py_ecc_G1_to_bytes48, - pubkey_to_G1 as py_ecc_bytes48_to_G1, G2_to_signature as py_ecc_G2_to_bytes96, + pubkey_to_G1 as py_ecc_bytes48_to_G1, + signature_to_G2 as _signature_to_G2, signature_to_G2 as py_ecc_bytes96_to_G2, ) -from py_arkworks_bls12381 import ( - G1Point as arkworks_G1, - G2Point as arkworks_G2, - Scalar as arkworks_Scalar, - GT as arkworks_GT, +from py_ecc.optimized_bls12_381 import ( # noqa: F401 + add as py_ecc_add, + final_exponentiate as py_ecc_final_exponentiate, + FQ, + FQ2, + FQ12 as py_ecc_GT, + G1 as py_ecc_G1, + G2 as py_ecc_G2, + multiply as py_ecc_mul, + neg as py_ecc_neg, + pairing as py_ecc_pairing, + Z1 as py_ecc_Z1, + Z2 as py_ecc_Z2, ) - - -import milagro_bls_binding as milagro_bls # noqa: F401 for BLS switching option - -import py_arkworks_bls12381 as arkworks_bls # noqa: F401 for BLS switching option +from py_ecc.utils import prime_field_inv as py_ecc_prime_field_inv class py_ecc_Scalar(FQ): diff --git a/tests/core/pyspec/eth2spec/utils/hash_function.py b/tests/core/pyspec/eth2spec/utils/hash_function.py index ba749db89c..329bc7c216 100644 --- a/tests/core/pyspec/eth2spec/utils/hash_function.py +++ b/tests/core/pyspec/eth2spec/utils/hash_function.py @@ -1,7 +1,8 @@ from hashlib import sha256 -from remerkleable.byte_arrays import Bytes32 from typing import Union +from remerkleable.byte_arrays import Bytes32 + ZERO_BYTES32 = b"\x00" * 32 diff --git a/tests/core/pyspec/eth2spec/utils/kzg.py b/tests/core/pyspec/eth2spec/utils/kzg.py index ea90a46b9b..4bd3407417 100644 --- a/tests/core/pyspec/eth2spec/utils/kzg.py +++ b/tests/core/pyspec/eth2spec/utils/kzg.py @@ -3,22 +3,22 @@ # - https://github.com/asn-d6/kzgverify import json import os +from pathlib import Path from typing import ( - Tuple, Sequence, + Tuple, ) -from pathlib import Path from eth_utils import encode_hex from py_ecc.typing import ( Optimized_Point3D, ) + from eth2spec.utils import bls from eth2spec.utils.bls import ( BLS_MODULUS, ) - PRIMITIVE_ROOT_OF_UNITY = 7 diff --git a/tests/core/pyspec/eth2spec/utils/merkle_minimal.py b/tests/core/pyspec/eth2spec/utils/merkle_minimal.py index 6a6491fb4d..1a889c02f0 100644 --- a/tests/core/pyspec/eth2spec/utils/merkle_minimal.py +++ b/tests/core/pyspec/eth2spec/utils/merkle_minimal.py @@ -1,6 +1,6 @@ -from eth2spec.utils.hash_function import hash from math import log2 +from eth2spec.utils.hash_function import hash ZERO_BYTES32 = b"\x00" * 32 diff --git a/tests/core/pyspec/eth2spec/utils/ssz/ssz_impl.py b/tests/core/pyspec/eth2spec/utils/ssz/ssz_impl.py index 807c302291..f15b54b2da 100644 --- a/tests/core/pyspec/eth2spec/utils/ssz/ssz_impl.py +++ b/tests/core/pyspec/eth2spec/utils/ssz/ssz_impl.py @@ -1,8 +1,8 @@ from typing import TypeVar from remerkleable.basic import uint -from remerkleable.core import Type, View from remerkleable.byte_arrays import Bytes32 +from remerkleable.core import Type, View def ssz_serialize(obj: View) -> bytes: diff --git a/tests/core/pyspec/eth2spec/utils/ssz/ssz_typing.py b/tests/core/pyspec/eth2spec/utils/ssz/ssz_typing.py index a67fd642dd..4c0ae94b8e 100644 --- a/tests/core/pyspec/eth2spec/utils/ssz/ssz_typing.py +++ b/tests/core/pyspec/eth2spec/utils/ssz/ssz_typing.py @@ -1,10 +1,8 @@ -from remerkleable.complex import Container, Vector, List -from remerkleable.union import Union from remerkleable.basic import ( - boolean, bit, - uint, + boolean, byte, + uint, uint8, uint16, uint32, @@ -12,19 +10,20 @@ uint128, uint256, ) -from remerkleable.bitfields import Bitvector, Bitlist +from remerkleable.bitfields import Bitlist, Bitvector from remerkleable.byte_arrays import ( - ByteVector, + ByteList, Bytes1, Bytes4, Bytes8, Bytes32, Bytes48, Bytes96, - ByteList, + ByteVector, ) -from remerkleable.core import BasicView, View, Path - +from remerkleable.complex import Container, List, Vector +from remerkleable.core import BasicView, Path, View +from remerkleable.union import Union Bytes20 = ByteVector[20] # type: ignore Bytes31 = ByteVector[31] # type: ignore diff --git a/tests/core/pyspec/eth2spec/utils/test_merkle_minimal.py b/tests/core/pyspec/eth2spec/utils/test_merkle_minimal.py index d55562c342..97c56374a5 100644 --- a/tests/core/pyspec/eth2spec/utils/test_merkle_minimal.py +++ b/tests/core/pyspec/eth2spec/utils/test_merkle_minimal.py @@ -1,6 +1,7 @@ import pytest -from .merkle_minimal import zerohashes, merkleize_chunks, get_merkle_root + from .hash_function import hash +from .merkle_minimal import get_merkle_root, merkleize_chunks, zerohashes def h(a: bytes, b: bytes) -> bytes: diff --git a/tests/core/pyspec/eth2spec/utils/test_merkle_proof_util.py b/tests/core/pyspec/eth2spec/utils/test_merkle_proof_util.py index c72e9c3b2e..aca064632b 100644 --- a/tests/core/pyspec/eth2spec/utils/test_merkle_proof_util.py +++ b/tests/core/pyspec/eth2spec/utils/test_merkle_proof_util.py @@ -1,6 +1,5 @@ import pytest - # Note: these functions are extract from merkle-proofs.md (deprecated), # the tests are temporary to show correctness while the document is still there. diff --git a/tests/formats/epoch_processing/README.md b/tests/formats/epoch_processing/README.md index 652cae0e91..aeed83f8cf 100644 --- a/tests/formats/epoch_processing/README.md +++ b/tests/formats/epoch_processing/README.md @@ -23,6 +23,14 @@ An SSZ-snappy encoded `BeaconState`, the state before running the epoch sub-tran An SSZ-snappy encoded `BeaconState`, the state after applying the epoch sub-transition. No value if the sub-transition processing is aborted. +### `pre_epoch.ssz_snappy` + +An SSZ-snappy encoded `BeaconState`, the state before running the epoch transition. + +### `post_epoch.ssz_snappy` + +An SSZ-snappy encoded `BeaconState`, the state after applying the epoch transition. No value if the transition processing is aborted. + ## Condition A handler of the `epoch_processing` test-runner should process these cases, @@ -50,3 +58,15 @@ Sub-transitions: - `pending_deposits` (>=Electra) The resulting state should match the expected `post` state. + +## Condition (alternative) + +Instead of having a different handler for each sub-transition, a single handler for all cases should load `pre_full` state, call `process_epoch` and then assert that the result state should match `post_full` state. + +This has the advantages: + +- Less code to maintain for the epoch processing handler. +- Works with single pass epoch processing. +- Can detect bugs related to data dependencies between different sub-transitions. + +As a disadvantage this condition takes more resources to compute, but just a constant amount per test vector. diff --git a/tests/generators/README.md b/tests/generators/README.md deleted file mode 100644 index db99c1693b..0000000000 --- a/tests/generators/README.md +++ /dev/null @@ -1,227 +0,0 @@ -# Consensus test generators - -This directory contains all the generators for tests, consumed by consensus-layer client implementations. - -Any issues with the generators and/or generated tests should be filed in the repository that hosts the generator outputs, - here: [ethereum/consensus-spec-tests](https://github.com/ethereum/consensus-spec-tests). - -On releases, test generators are run by the release manager. Test-generation of mainnet tests can take a significant amount of time, and is better left out of a CI setup. - -An automated nightly tests release system, with a config filter applied, is being considered as implementation needs mature. - - - -- [How to run generators](#how-to-run-generators) - - [Cleaning](#cleaning) - - [Running all test generators](#running-all-test-generators) - - [Running a single generator](#running-a-single-generator) - - [Running generators for specific tests](#running-generators-for-specific-tests) -- [Developing a generator](#developing-a-generator) -- [How to add a new test generator](#how-to-add-a-new-test-generator) -- [How to remove a test generator](#how-to-remove-a-test-generator) - - - -## How to run generators - -Prerequisites: -- Python 3 installed -- PIP 3 -- GNU Make - -### Cleaning - -This removes the existing virtual environments (`/tests/generators//venv`) and generated tests (`../consensus-spec-tests/tests`). - -```bash -make clean && rm -rf ../consensus-spec-tests/tests -``` - -### Running all test generators - -This runs all of the generators. - -```bash -make -j 4 gen_all -``` - -The `-j N` flag makes the generators run in parallel, with `N` being the amount of cores. - -### Running a single generator - -The makefile auto-detects generators in the `tests/generators` directory and provides a tests-gen target (gen_) for each generator. See example: - -```bash -make gen_ssz_static -``` - -### Running generators for specific tests - -Arguments can be appended to configure exactly what tests should be generated (`k=`), on which forks (`fork=`), and which presets (`preset=`). The arguments can be used individually or altogether. See examples: - -```bash -make gen_operations k=invalid_committee_index -``` - -```bash -make gen_operations fork=fulu -``` - -```bash -make gen_operations preset=mainnet -``` - -```bash -make gen_operations k=invalid_committee_index fork=fulu preset=mainnet -``` - -The arguments also accept comma-separated lists to specify multiple values. - -```bash -make gen_operations k=invalid_committee_index,invalid_too_many_committee_bits -``` - -The arguments can also be used for when running all test generators. - -```bash -make gen_all fork=fulu -``` - -## Developing a generator - -The config helper and pyspec is optional, but preferred. We encourage generators to derive tests from the spec itself in order to prevent code duplication and outdated tests. -Applying configurations to the spec is simple and enables you to create test suites with different contexts. - -*Note*: Make sure to run `make pyspec` from the root of the specs repository in order to build the pyspec requirement. - -Write your initial test generator, extending the base generator: - -Write a `main.py` file. The shuffling test generator is a good minimal starting point: - -```python -from eth2spec.phase0 import spec as spec -from eth_utils import to_tuple -from eth2spec.gen_helpers.gen_base import gen_runner, gen_typing -from preset_loader import loader -from typing import Iterable - - -def shuffling_case_fn(seed, count): - yield 'mapping', 'data', { - 'seed': '0x' + seed.hex(), - 'count': count, - 'mapping': [int(spec.compute_shuffled_index(i, count, seed)) for i in range(count)] - } - - -def shuffling_case(seed, count): - return f'shuffle_0x{seed.hex()}_{count}', lambda: shuffling_case_fn(seed, count) - - -@to_tuple -def shuffling_test_cases(): - for seed in [spec.hash(seed_init_value.to_bytes(length=4, byteorder='little')) for seed_init_value in range(30)]: - for count in [0, 1, 2, 3, 5, 10, 33, 100, 1000, 9999]: - yield shuffling_case(seed, count) - - -def create_provider(config_name: str) -> gen_typing.TestProvider: - - def prepare_fn(configs_path: str) -> str: - presets = loader.load_presets(configs_path, config_name) - spec.apply_constants_preset(presets) - return config_name - - def cases_fn() -> Iterable[gen_typing.TestCase]: - for (case_name, case_fn) in shuffling_test_cases(): - yield gen_typing.TestCase( - fork_name='phase0', - runner_name='shuffling', - handler_name='core', - suite_name='shuffle', - case_name=case_name, - case_fn=case_fn - ) - - return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn) - - -if __name__ == "__main__": - gen_runner.run_generator("shuffling", [create_provider("minimal"), create_provider("mainnet")]) -``` - -This generator: -- builds off of `gen_runner.run_generator` to handle configuration / filter / output logic. -- parametrized the creation of a test-provider to support multiple configs. -- Iterates through tests cases. -- Each test case provides a `case_fn`, to be executed by the `gen_runner.run_generator` if the case needs to be generated. But skipped otherwise. - -To extend this, one could decide to parametrize the `shuffling_test_cases` function, and create test provider for any test-yielding function. - -Another example, to generate tests from pytests: - -```python -from eth2spec.phase0 import spec as spec_phase0 -from eth2spec.altair import spec as spec_altair -from eth2spec.test.helpers.constants import PHASE0, ALTAIR - -from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators - - -specs = (spec_phase0, spec_altair) - - -if __name__ == "__main__": - phase_0_mods = {key: 'eth2spec.test.phase0.sanity.test_' + key for key in [ - 'blocks', - 'slots', - ]} - altair_mods = {**{key: 'eth2spec.test.altair.sanity.test_' + key for key in [ - 'blocks', - ]}, **phase_0_mods} # also run the previous phase 0 tests - - all_mods = { - PHASE0: phase_0_mods, - ALTAIR: altair_mods, - } - check_mods(all_mods, "sanity") - - run_state_test_generators(runner_name="sanity", all_mods=all_mods) -``` - -Here multiple phases load the configuration, and the stream of test cases is derived from a pytest file using the `eth2spec.gen_helpers.gen_from_tests.gen.run_state_test_generators` utility. Note that this helper generates all available tests of `TESTGEN_FORKS` forks of `ALL_CONFIGS` configs of the given runner. - -Recommendations: -- You can have more than just one test provider. -- Your test provider is free to output any configuration and combination of runner/handler/fork/case name. -- You can split your test case generators into different Python files/packages; this is good for code organization. -- Use config `minimal` for performance and simplicity, but also implement a suite with the `mainnet` config where necessary. -- You may be able to write your test case provider in a way where it does not make assumptions on constants. - If so, you can generate test cases with different configurations for the same scenario (see example). -- See [`tests/core/gen_helpers/README.md`](../core/pyspec/eth2spec/gen_helpers/README.md) for command line options for generators. - -## How to add a new test generator - -To add a new test generator that builds `New Tests`: - -1. Create a new directory `new_tests` within the `tests/generators` directory. - Note that `new_tests` is also the name of the directory in which the tests will appear in the tests repository later. -2. Your generator is assumed to have a `requirements.txt` file, - with any dependencies it may need. Leave it empty if your generator has none. -3. Your generator is assumed to have a `main.py` file in its root. - By adding the base generator to your requirements, you can make a generator really easily. See docs below. -4. Your generator is called with `-o some/file/path/for_testing/can/be_anything --preset-list mainnet minimal`. - The base generator helps you handle this; you only have to define test case providers. -5. Finally, add any linting or testing commands to the [`Makefile`](../../Makefile), if it can be run locally. - -*Note*: You do not have to change the makefile. -However, if necessary (e.g. not using Python, or mixing in other languages), submit an issue, and it can be a special case. -Do note that generators should be easy to maintain, lean, and based on the spec. - -## How to remove a test generator - -If a test generator is not needed anymore, undo the steps described above and make a new release: - -1. Remove the generator directory. -2. Remove the generated tests in the [`consensus-spec-tests`](https://github.com/ethereum/consensus-spec-tests) repository by opening a pull request there. -3. Make a new release. diff --git a/tests/generators/bls/README.md b/tests/generators/bls/README.md deleted file mode 100644 index 24013f88e7..0000000000 --- a/tests/generators/bls/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# BLS Test Generator - -The [BLS Signature APIs](../../../specs/phase0/beacon-chain.md#bls-signatures) - -Information on the format of the tests can be found in the [BLS test formats documentation](../../formats/bls/README.md). - -## Resources - -- [IETF BLS Signature Scheme](https://datatracker.ietf.org/doc/draft-irtf-cfrg-bls-signature/) -- [Finite Field Arithmetic](http://www.springeronline.com/sgw/cda/pageitems/document/cda_downloaddocument/0,11996,0-0-45-110359-0,00.pdf) -- Chapter 2 of [Elliptic Curve Cryptography](http://cacr.uwaterloo.ca/ecc/). Darrel Hankerson, Alfred Menezes, and Scott Vanstone diff --git a/tests/generators/bls/main.py b/tests/generators/bls/main.py deleted file mode 100644 index 856e613f96..0000000000 --- a/tests/generators/bls/main.py +++ /dev/null @@ -1,559 +0,0 @@ -""" -BLS test vectors generator -""" - -from hashlib import sha256 -from typing import Tuple, Iterable, Any, Callable, Dict - -from eth_utils import ( - encode_hex, - int_to_big_endian, -) -import milagro_bls_binding as milagro_bls - -from eth2spec.utils import bls -from eth2spec.test.helpers.constants import PHASE0, ALTAIR -from eth2spec.test.helpers.typing import SpecForkName -from eth2spec.gen_helpers.gen_base import gen_runner, gen_typing -from eth2spec.altair import spec - - -def to_bytes(i): - return i.to_bytes(32, "big") - - -def hash(x): - return sha256(x).digest() - - -def int_to_hex(n: int, byte_length: int = None) -> str: - byte_value = int_to_big_endian(n) - if byte_length: - byte_value = byte_value.rjust(byte_length, b"\x00") - return encode_hex(byte_value) - - -def hex_to_int(x: str) -> int: - return int(x, 16) - - -MESSAGES = [ - bytes(b"\x00" * 32), - bytes(b"\x56" * 32), - bytes(b"\xab" * 32), -] -SAMPLE_MESSAGE = b"\x12" * 32 - -PRIVKEYS = [ - # Curve order is 256, so private keys use 32 bytes at most. - # Also, not all integers are valid private keys. Therefore, using pre-generated keys. - hex_to_int( - "0x00000000000000000000000000000000263dbd792f5b1be47ed85f8938c0f29586af0d3ac7b977f21c278fe1462040e3" - ), - hex_to_int( - "0x0000000000000000000000000000000047b8192d77bf871b62e87859d653922725724a5c031afeabc60bcef5ff665138" - ), - hex_to_int( - "0x00000000000000000000000000000000328388aff0d4a5b7dc9205abd374e7e98f3cd9f3418edb4eafda5fb16473d216" - ), -] -PUBKEYS = [bls.SkToPk(privkey) for privkey in PRIVKEYS] - -ZERO_PUBKEY = b"\x00" * 48 -G1_POINT_AT_INFINITY = b"\xc0" + b"\x00" * 47 - -ZERO_SIGNATURE = b"\x00" * 96 -G2_POINT_AT_INFINITY = b"\xc0" + b"\x00" * 95 - -ZERO_PRIVKEY = 0 -ZERO_PRIVKEY_BYTES = b"\x00" * 32 - - -def expect_exception(func, *args): - try: - func(*args) - except Exception: - pass - else: - raise Exception("should have raised exception") - - -def case01_sign(): - # Valid cases - for privkey in PRIVKEYS: - for message in MESSAGES: - sig = bls.Sign(privkey, message) - assert sig == milagro_bls.Sign(to_bytes(privkey), message) # double-check with milagro - identifier = f"{int_to_hex(privkey)}_{encode_hex(message)}" - yield f'sign_case_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { - "input": { - "privkey": int_to_hex(privkey), - "message": encode_hex(message), - }, - "output": encode_hex(sig), - } - # Edge case: privkey == 0 - expect_exception(bls.Sign, ZERO_PRIVKEY, message) - expect_exception(milagro_bls.Sign, ZERO_PRIVKEY_BYTES, message) - yield "sign_case_zero_privkey", { - "input": { - "privkey": encode_hex(ZERO_PRIVKEY_BYTES), - "message": encode_hex(message), - }, - "output": None, - } - - -def case02_verify(): - for i, privkey in enumerate(PRIVKEYS): - for message in MESSAGES: - # Valid signature - signature = bls.Sign(privkey, message) - pubkey = bls.SkToPk(privkey) - - assert milagro_bls.SkToPk(to_bytes(privkey)) == pubkey - assert milagro_bls.Sign(to_bytes(privkey), message) == signature - - identifier = f"{encode_hex(pubkey)}_{encode_hex(message)}" - - assert bls.Verify(pubkey, message, signature) - assert milagro_bls.Verify(pubkey, message, signature) - - yield f'verify_valid_case_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { - "input": { - "pubkey": encode_hex(pubkey), - "message": encode_hex(message), - "signature": encode_hex(signature), - }, - "output": True, - } - - # Invalid signatures -- wrong pubkey - wrong_pubkey = bls.SkToPk(PRIVKEYS[(i + 1) % len(PRIVKEYS)]) - identifier = f"{encode_hex(wrong_pubkey)}_{encode_hex(message)}" - assert not bls.Verify(wrong_pubkey, message, signature) - assert not milagro_bls.Verify(wrong_pubkey, message, signature) - yield f'verify_wrong_pubkey_case_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { - "input": { - "pubkey": encode_hex(wrong_pubkey), - "message": encode_hex(message), - "signature": encode_hex(signature), - }, - "output": False, - } - - # Invalid signature -- tampered with signature - tampered_signature = signature[:-4] + b"\xff\xff\xff\xff" - identifier = f"{encode_hex(pubkey)}_{encode_hex(message)}" - assert not bls.Verify(pubkey, message, tampered_signature) - assert not milagro_bls.Verify(pubkey, message, tampered_signature) - yield f'verify_tampered_signature_case_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { - "input": { - "pubkey": encode_hex(pubkey), - "message": encode_hex(message), - "signature": encode_hex(tampered_signature), - }, - "output": False, - } - - # Invalid pubkey and signature with the point at infinity - assert not bls.Verify(G1_POINT_AT_INFINITY, SAMPLE_MESSAGE, G2_POINT_AT_INFINITY) - assert not milagro_bls.Verify(G1_POINT_AT_INFINITY, SAMPLE_MESSAGE, G2_POINT_AT_INFINITY) - yield "verify_infinity_pubkey_and_infinity_signature", { - "input": { - "pubkey": encode_hex(G1_POINT_AT_INFINITY), - "message": encode_hex(SAMPLE_MESSAGE), - "signature": encode_hex(G2_POINT_AT_INFINITY), - }, - "output": False, - } - - -def case03_aggregate(): - for message in MESSAGES: - sigs = [bls.Sign(privkey, message) for privkey in PRIVKEYS] - aggregate_sig = bls.Aggregate(sigs) - assert aggregate_sig == milagro_bls.Aggregate(sigs) - yield f"aggregate_{encode_hex(message)}", { - "input": [encode_hex(sig) for sig in sigs], - "output": encode_hex(aggregate_sig), - } - - # Invalid pubkeys -- len(pubkeys) == 0 - expect_exception(bls.Aggregate, []) - # No signatures to aggregate. Follow IETF BLS spec, return `None` to represent INVALID. - # https://tools.ietf.org/html/draft-irtf-cfrg-bls-signature-04#section-2.8 - yield "aggregate_na_signatures", { - "input": [], - "output": None, - } - - # Valid to aggregate G2 point at infinity - aggregate_sig = bls.Aggregate([G2_POINT_AT_INFINITY]) - assert aggregate_sig == milagro_bls.Aggregate([G2_POINT_AT_INFINITY]) == G2_POINT_AT_INFINITY - yield "aggregate_infinity_signature", { - "input": [encode_hex(G2_POINT_AT_INFINITY)], - "output": encode_hex(aggregate_sig), - } - - -def case04_fast_aggregate_verify(): - for i, message in enumerate(MESSAGES): - privkeys = PRIVKEYS[: i + 1] - sigs = [bls.Sign(privkey, message) for privkey in privkeys] - aggregate_signature = bls.Aggregate(sigs) - pubkeys = [bls.SkToPk(privkey) for privkey in privkeys] - pubkeys_serial = [encode_hex(pubkey) for pubkey in pubkeys] - - # Valid signature - identifier = f"{pubkeys_serial}_{encode_hex(message)}" - assert bls.FastAggregateVerify(pubkeys, message, aggregate_signature) - assert milagro_bls.FastAggregateVerify(pubkeys, message, aggregate_signature) - yield f'fast_aggregate_verify_valid_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { - "input": { - "pubkeys": pubkeys_serial, - "message": encode_hex(message), - "signature": encode_hex(aggregate_signature), - }, - "output": True, - } - - # Invalid signature -- extra pubkey - pubkeys_extra = pubkeys + [bls.SkToPk(PRIVKEYS[-1])] - pubkeys_extra_serial = [encode_hex(pubkey) for pubkey in pubkeys_extra] - identifier = f"{pubkeys_extra_serial}_{encode_hex(message)}" - assert not bls.FastAggregateVerify(pubkeys_extra, message, aggregate_signature) - assert not milagro_bls.FastAggregateVerify(pubkeys_extra, message, aggregate_signature) - yield f'fast_aggregate_verify_extra_pubkey_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { - "input": { - "pubkeys": pubkeys_extra_serial, - "message": encode_hex(message), - "signature": encode_hex(aggregate_signature), - }, - "output": False, - } - - # Invalid signature -- tampered with signature - tampered_signature = aggregate_signature[:-4] + b"\xff\xff\xff\xff" - identifier = f"{pubkeys_serial}_{encode_hex(message)}" - assert not bls.FastAggregateVerify(pubkeys, message, tampered_signature) - assert not milagro_bls.FastAggregateVerify(pubkeys, message, tampered_signature) - yield f'fast_aggregate_verify_tampered_signature_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { - "input": { - "pubkeys": pubkeys_serial, - "message": encode_hex(message), - "signature": encode_hex(tampered_signature), - }, - "output": False, - } - - # Invalid pubkeys and signature -- len(pubkeys) == 0 and signature == Z1_SIGNATURE - assert not bls.FastAggregateVerify([], message, G2_POINT_AT_INFINITY) - assert not milagro_bls.FastAggregateVerify([], message, G2_POINT_AT_INFINITY) - yield "fast_aggregate_verify_na_pubkeys_and_infinity_signature", { - "input": { - "pubkeys": [], - "message": encode_hex(message), - "signature": encode_hex(G2_POINT_AT_INFINITY), - }, - "output": False, - } - - # Invalid pubkeys and signature -- len(pubkeys) == 0 and signature == 0x00... - assert not bls.FastAggregateVerify([], message, ZERO_SIGNATURE) - assert not milagro_bls.FastAggregateVerify([], message, ZERO_SIGNATURE) - yield "fast_aggregate_verify_na_pubkeys_and_zero_signature", { - "input": { - "pubkeys": [], - "message": encode_hex(message), - "signature": encode_hex(ZERO_SIGNATURE), - }, - "output": False, - } - - # Invalid pubkeys and signature -- pubkeys contains point at infinity - pubkeys = PUBKEYS.copy() - pubkeys_with_infinity = pubkeys + [G1_POINT_AT_INFINITY] - signatures = [bls.Sign(privkey, SAMPLE_MESSAGE) for privkey in PRIVKEYS] - aggregate_signature = bls.Aggregate(signatures) - assert not bls.FastAggregateVerify(pubkeys_with_infinity, SAMPLE_MESSAGE, aggregate_signature) - assert not milagro_bls.FastAggregateVerify( - pubkeys_with_infinity, SAMPLE_MESSAGE, aggregate_signature - ) - yield "fast_aggregate_verify_infinity_pubkey", { - "input": { - "pubkeys": [encode_hex(pubkey) for pubkey in pubkeys_with_infinity], - "message": encode_hex(SAMPLE_MESSAGE), - "signature": encode_hex(aggregate_signature), - }, - "output": False, - } - - -def case05_aggregate_verify(): - pubkeys = [] - pubkeys_serial = [] - messages = [] - messages_serial = [] - sigs = [] - for privkey, message in zip(PRIVKEYS, MESSAGES): - sig = bls.Sign(privkey, message) - pubkey = bls.SkToPk(privkey) - pubkeys.append(pubkey) - pubkeys_serial.append(encode_hex(pubkey)) - messages.append(message) - messages_serial.append(encode_hex(message)) - sigs.append(sig) - - aggregate_signature = bls.Aggregate(sigs) - assert bls.AggregateVerify(pubkeys, messages, aggregate_signature) - assert milagro_bls.AggregateVerify(pubkeys, messages, aggregate_signature) - yield "aggregate_verify_valid", { - "input": { - "pubkeys": pubkeys_serial, - "messages": messages_serial, - "signature": encode_hex(aggregate_signature), - }, - "output": True, - } - - tampered_signature = aggregate_signature[:4] + b"\xff\xff\xff\xff" - assert not bls.AggregateVerify(pubkey, messages, tampered_signature) - assert not milagro_bls.AggregateVerify(pubkeys, messages, tampered_signature) - yield "aggregate_verify_tampered_signature", { - "input": { - "pubkeys": pubkeys_serial, - "messages": messages_serial, - "signature": encode_hex(tampered_signature), - }, - "output": False, - } - - # Invalid pubkeys and signature -- len(pubkeys) == 0 and signature == Z1_SIGNATURE - assert not bls.AggregateVerify([], [], G2_POINT_AT_INFINITY) - assert not milagro_bls.AggregateVerify([], [], G2_POINT_AT_INFINITY) - yield "aggregate_verify_na_pubkeys_and_infinity_signature", { - "input": { - "pubkeys": [], - "messages": [], - "signature": encode_hex(G2_POINT_AT_INFINITY), - }, - "output": False, - } - - # Invalid pubkeys and signature -- len(pubkeys) == 0 and signature == 0x00... - assert not bls.AggregateVerify([], [], ZERO_SIGNATURE) - assert not milagro_bls.AggregateVerify([], [], ZERO_SIGNATURE) - yield "aggregate_verify_na_pubkeys_and_zero_signature", { - "input": { - "pubkeys": [], - "messages": [], - "signature": encode_hex(ZERO_SIGNATURE), - }, - "output": False, - } - - # Invalid pubkeys and signature -- pubkeys contains point at infinity - pubkeys_with_infinity = pubkeys + [G1_POINT_AT_INFINITY] - messages_with_sample = messages + [SAMPLE_MESSAGE] - assert not bls.AggregateVerify(pubkeys_with_infinity, messages_with_sample, aggregate_signature) - assert not milagro_bls.AggregateVerify( - pubkeys_with_infinity, messages_with_sample, aggregate_signature - ) - yield "aggregate_verify_infinity_pubkey", { - "input": { - "pubkeys": [encode_hex(pubkey) for pubkey in pubkeys_with_infinity], - "messages": [encode_hex(message) for message in messages_with_sample], - "signature": encode_hex(aggregate_signature), - }, - "output": False, - } - - -def case06_eth_aggregate_pubkeys(): - for pubkey in PUBKEYS: - encoded_pubkey = encode_hex(pubkey) - aggregate_pubkey = spec.eth_aggregate_pubkeys([pubkey]) - # Should be unchanged - assert aggregate_pubkey == milagro_bls._AggregatePKs([pubkey]) == pubkey - # Valid pubkey - yield f'eth_aggregate_pubkeys_valid_{(hash(bytes(encoded_pubkey, "utf-8"))[:8]).hex()}', { - "input": [encode_hex(pubkey)], - "output": encode_hex(aggregate_pubkey), - } - - # Valid pubkeys - aggregate_pubkey = spec.eth_aggregate_pubkeys(PUBKEYS) - assert aggregate_pubkey == milagro_bls._AggregatePKs(PUBKEYS) - yield "eth_aggregate_pubkeys_valid_pubkeys", { - "input": [encode_hex(pubkey) for pubkey in PUBKEYS], - "output": encode_hex(aggregate_pubkey), - } - - # Invalid pubkeys -- len(pubkeys) == 0 - expect_exception(spec.eth_aggregate_pubkeys, []) - expect_exception(milagro_bls._AggregatePKs, []) - yield "eth_aggregate_pubkeys_empty_list", { - "input": [], - "output": None, - } - - # Invalid pubkeys -- [ZERO_PUBKEY] - expect_exception(spec.eth_aggregate_pubkeys, [ZERO_PUBKEY]) - expect_exception(milagro_bls._AggregatePKs, [ZERO_PUBKEY]) - yield "eth_aggregate_pubkeys_zero_pubkey", { - "input": [encode_hex(ZERO_PUBKEY)], - "output": None, - } - - # Invalid pubkeys -- G1 point at infinity - expect_exception(spec.eth_aggregate_pubkeys, [G1_POINT_AT_INFINITY]) - expect_exception(milagro_bls._AggregatePKs, [G1_POINT_AT_INFINITY]) - yield "eth_aggregate_pubkeys_infinity_pubkey", { - "input": [encode_hex(G1_POINT_AT_INFINITY)], - "output": None, - } - - # Invalid pubkeys -- b'\x40\x00\x00\x00....\x00' pubkey - x40_pubkey = b"\x40" + b"\00" * 47 - expect_exception(spec.eth_aggregate_pubkeys, [x40_pubkey]) - expect_exception(milagro_bls._AggregatePKs, [x40_pubkey]) - yield "eth_aggregate_pubkeys_x40_pubkey", { - "input": [encode_hex(x40_pubkey)], - "output": None, - } - - -def case07_eth_fast_aggregate_verify(): - """ - Similar to `case04_fast_aggregate_verify` except for the empty case - """ - for i, message in enumerate(MESSAGES): - privkeys = PRIVKEYS[: i + 1] - sigs = [bls.Sign(privkey, message) for privkey in privkeys] - aggregate_signature = bls.Aggregate(sigs) - pubkeys = [bls.SkToPk(privkey) for privkey in privkeys] - pubkeys_serial = [encode_hex(pubkey) for pubkey in pubkeys] - - # Valid signature - identifier = f"{pubkeys_serial}_{encode_hex(message)}" - assert spec.eth_fast_aggregate_verify(pubkeys, message, aggregate_signature) - yield f'eth_fast_aggregate_verify_valid_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { - "input": { - "pubkeys": pubkeys_serial, - "message": encode_hex(message), - "signature": encode_hex(aggregate_signature), - }, - "output": True, - } - - # Invalid signature -- extra pubkey - pubkeys_extra = pubkeys + [bls.SkToPk(PRIVKEYS[-1])] - pubkeys_extra_serial = [encode_hex(pubkey) for pubkey in pubkeys_extra] - identifier = f"{pubkeys_extra_serial}_{encode_hex(message)}" - assert not spec.eth_fast_aggregate_verify(pubkeys_extra, message, aggregate_signature) - yield f'eth_fast_aggregate_verify_extra_pubkey_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { - "input": { - "pubkeys": pubkeys_extra_serial, - "message": encode_hex(message), - "signature": encode_hex(aggregate_signature), - }, - "output": False, - } - - # Invalid signature -- tampered with signature - tampered_signature = aggregate_signature[:-4] + b"\xff\xff\xff\xff" - identifier = f"{pubkeys_serial}_{encode_hex(message)}" - assert not spec.eth_fast_aggregate_verify(pubkeys, message, tampered_signature) - yield f'eth_fast_aggregate_verify_tampered_signature_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { - "input": { - "pubkeys": pubkeys_serial, - "message": encode_hex(message), - "signature": encode_hex(tampered_signature), - }, - "output": False, - } - - # NOTE: Unlike `FastAggregateVerify`, len(pubkeys) == 0 and signature == G2_POINT_AT_INFINITY is VALID - assert spec.eth_fast_aggregate_verify([], message, G2_POINT_AT_INFINITY) - yield "eth_fast_aggregate_verify_na_pubkeys_and_infinity_signature", { - "input": { - "pubkeys": [], - "message": encode_hex(message), - "signature": encode_hex(G2_POINT_AT_INFINITY), - }, - "output": True, - } - - # Invalid pubkeys and signature -- len(pubkeys) == 0 and signature == 0x00... - assert not spec.eth_fast_aggregate_verify([], message, ZERO_SIGNATURE) - yield "eth_fast_aggregate_verify_na_pubkeys_and_zero_signature", { - "input": { - "pubkeys": [], - "message": encode_hex(message), - "signature": encode_hex(ZERO_SIGNATURE), - }, - "output": False, - } - - # Invalid pubkeys and signature -- pubkeys contains point at infinity - pubkeys = PUBKEYS.copy() - pubkeys_with_infinity = pubkeys + [G1_POINT_AT_INFINITY] - signatures = [bls.Sign(privkey, SAMPLE_MESSAGE) for privkey in PRIVKEYS] - aggregate_signature = bls.Aggregate(signatures) - assert not spec.eth_fast_aggregate_verify( - pubkeys_with_infinity, SAMPLE_MESSAGE, aggregate_signature - ) - yield "eth_fast_aggregate_verify_infinity_pubkey", { - "input": { - "pubkeys": [encode_hex(pubkey) for pubkey in pubkeys_with_infinity], - "message": encode_hex(SAMPLE_MESSAGE), - "signature": encode_hex(aggregate_signature), - }, - "output": False, - } - - -def create_provider( - fork_name: SpecForkName, - handler_name: str, - test_case_fn: Callable[[], Iterable[Tuple[str, Dict[str, Any]]]], -) -> gen_typing.TestProvider: - - def prepare_fn() -> None: - # Nothing to load / change in spec. Maybe in future forks. - # Put the tests into the general config category, to not require any particular configuration. - return - - def cases_fn() -> Iterable[gen_typing.TestCase]: - for data in test_case_fn(): - (case_name, case_content) = data - yield gen_typing.TestCase( - fork_name=fork_name, - preset_name="general", - runner_name="bls", - handler_name=handler_name, - suite_name="bls", - case_name=case_name, - case_fn=lambda: [("data", "data", case_content)], - ) - - return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn) - - -if __name__ == "__main__": - bls.use_py_ecc() # Py-ecc is chosen instead of Milagro, since the code is better understood to be correct. - gen_runner.run_generator( - "bls", - [ - # PHASE0 - create_provider(PHASE0, "sign", case01_sign), - create_provider(PHASE0, "verify", case02_verify), - create_provider(PHASE0, "aggregate", case03_aggregate), - create_provider(PHASE0, "fast_aggregate_verify", case04_fast_aggregate_verify), - create_provider(PHASE0, "aggregate_verify", case05_aggregate_verify), - # ALTAIR - create_provider(ALTAIR, "eth_aggregate_pubkeys", case06_eth_aggregate_pubkeys), - create_provider(ALTAIR, "eth_fast_aggregate_verify", case07_eth_fast_aggregate_verify), - ], - ) diff --git a/tests/generators/epoch_processing/README.md b/tests/generators/epoch_processing/README.md deleted file mode 100644 index 203f93ec10..0000000000 --- a/tests/generators/epoch_processing/README.md +++ /dev/null @@ -1,8 +0,0 @@ -# Epoch processing - -Epoch processing covers the sub-transitions during an epoch change. - -An epoch-processing test-runner can consume these sub-transition test-suites, - and handle different kinds of epoch sub-transitions by processing the cases using the specified test handler. - -Information on the format of the tests can be found in the [epoch-processing test formats documentation](../../formats/epoch_processing/README.md). diff --git a/tests/generators/epoch_processing/main.py b/tests/generators/epoch_processing/main.py deleted file mode 100644 index 79c242f112..0000000000 --- a/tests/generators/epoch_processing/main.py +++ /dev/null @@ -1,88 +0,0 @@ -from eth2spec.gen_helpers.gen_from_tests.gen import ( - run_state_test_generators, - combine_mods, - check_mods, -) -from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB, ELECTRA, FULU - - -if __name__ == "__main__": - phase_0_mods = { - key: "eth2spec.test.phase0.epoch_processing.test_process_" + key - for key in [ - "justification_and_finalization", - "rewards_and_penalties", - "registry_updates", - "slashings", - "eth1_data_reset", - "effective_balance_updates", - "slashings_reset", - "randao_mixes_reset", - "historical_roots_update", - "participation_record_updates", - ] - } - - _new_altair_mods = { - key: "eth2spec.test.altair.epoch_processing.test_process_" + key - for key in [ - "inactivity_updates", - "participation_flag_updates", - "sync_committee_updates", - ] - } - altair_mods = combine_mods(_new_altair_mods, phase_0_mods) - - # No epoch-processing changes in Bellatrix and previous testing repeats with new types, - # so no additional tests required. - bellatrix_mods = altair_mods - - _new_capella_mods = { - key: "eth2spec.test.capella.epoch_processing.test_process_" + key - for key in [ - "historical_summaries_update", - ] - } - capella_mods = combine_mods(_new_capella_mods, bellatrix_mods) - - _new_deneb_mods = { - key: "eth2spec.test.deneb.epoch_processing.test_process_" + key - for key in [ - "registry_updates", - ] - } - deneb_mods = combine_mods(_new_deneb_mods, capella_mods) - - _new_electra_mods_1 = { - key: "eth2spec.test.electra.epoch_processing.test_process_" + key - for key in [ - "effective_balance_updates", - "pending_consolidations", - "registry_updates", - ] - } - # This is a trick to allow tests be split into multiple files and use the same test format. - _new_electra_mods_2 = { - key: "eth2spec.test.electra.epoch_processing." + key - for key in [ - "pending_deposits", - ] - } - _new_electra_mods = {**_new_electra_mods_1, **_new_electra_mods_2} - electra_mods = combine_mods(_new_electra_mods, deneb_mods) - - # No additional Fulu specific epoch processing tests - fulu_mods = electra_mods - - all_mods = { - PHASE0: phase_0_mods, - ALTAIR: altair_mods, - BELLATRIX: bellatrix_mods, - CAPELLA: capella_mods, - DENEB: deneb_mods, - ELECTRA: electra_mods, - FULU: fulu_mods, - } - check_mods(all_mods, "epoch_processing") - - run_state_test_generators(runner_name="epoch_processing", all_mods=all_mods) diff --git a/tests/generators/finality/README.md b/tests/generators/finality/README.md deleted file mode 100644 index dec5819c68..0000000000 --- a/tests/generators/finality/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Finality tests - -Finality tests cover regular state-transitions in a common block-list format to test finality rules. - -Information on the format of the tests can be found in the [finality test formats documentation](../../formats/finality/README.md). diff --git a/tests/generators/finality/main.py b/tests/generators/finality/main.py deleted file mode 100644 index 3bee1c33ae..0000000000 --- a/tests/generators/finality/main.py +++ /dev/null @@ -1,25 +0,0 @@ -from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators, check_mods -from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB, ELECTRA, FULU - - -if __name__ == "__main__": - phase_0_mods = {"finality": "eth2spec.test.phase0.finality.test_finality"} - altair_mods = phase_0_mods # No additional Altair specific finality tests - bellatrix_mods = altair_mods # No additional Bellatrix specific finality tests - capella_mods = bellatrix_mods # No additional Capella specific finality tests - deneb_mods = capella_mods # No additional Deneb specific finality tests - electra_mods = deneb_mods # No additional Electra specific finality tests - fulu_mods = electra_mods # No additional Fulu specific finality tests - - all_mods = { - PHASE0: phase_0_mods, - ALTAIR: altair_mods, - BELLATRIX: bellatrix_mods, - CAPELLA: capella_mods, - DENEB: deneb_mods, - ELECTRA: electra_mods, - FULU: fulu_mods, - } - check_mods(all_mods, "finality") - - run_state_test_generators(runner_name="finality", all_mods=all_mods) diff --git a/tests/generators/fork_choice/README.md b/tests/generators/fork_choice/README.md deleted file mode 100644 index e67b115ba1..0000000000 --- a/tests/generators/fork_choice/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Fork choice tests - -Fork choice tests cover the different forking cases with fork choice helper functions. - -Information on the format of the tests can be found in the [fork choice test formats documentation](../../formats/fork_choice/README.md). diff --git a/tests/generators/fork_choice/main.py b/tests/generators/fork_choice/main.py deleted file mode 100644 index abe6453bc4..0000000000 --- a/tests/generators/fork_choice/main.py +++ /dev/null @@ -1,71 +0,0 @@ -from eth2spec.gen_helpers.gen_from_tests.gen import ( - run_state_test_generators, - combine_mods, - check_mods, -) -from eth2spec.test.helpers.constants import ALTAIR, BELLATRIX, CAPELLA, DENEB, ELECTRA, FULU - - -if __name__ == "__main__": - # Note: Fork choice tests start from Altair - there are no fork choice test for phase 0 anymore - altair_mods = { - key: "eth2spec.test.phase0.fork_choice.test_" + key - for key in [ - "get_head", - "on_block", - "ex_ante", - "reorg", - "withholding", - "get_proposer_head", - ] - } - - # For merge `on_merge_block` test kind added with `pow_block_N.ssz` files with several - # PowBlock's which should be resolved by `get_pow_block(hash: Hash32) -> PowBlock` function - _new_bellatrix_mods = { - key: "eth2spec.test.bellatrix.fork_choice.test_" + key - for key in [ - "on_merge_block", - "should_override_forkchoice_update", - ] - } - bellatrix_mods = combine_mods(_new_bellatrix_mods, altair_mods) - capella_mods = bellatrix_mods # No additional Capella specific fork choice tests - - # Deneb adds `is_data_available` tests - _new_deneb_mods = { - key: "eth2spec.test.deneb.fork_choice.test_" + key - for key in [ - "on_block", - ] - } - deneb_mods = combine_mods(_new_deneb_mods, capella_mods) - - _new_electra_mods = { - key: "eth2spec.test.electra.fork_choice.test_" + key - for key in [ - "deposit_with_reorg", - ] - } - electra_mods = combine_mods(_new_electra_mods, deneb_mods) - - # Fulu adds new `is_data_available` tests - _new_fulu_mods = { - key: "eth2spec.test.fulu.fork_choice.test_" + key - for key in [ - "on_block", - ] - } - fulu_mods = combine_mods(_new_fulu_mods, electra_mods) - - all_mods = { - ALTAIR: altair_mods, - BELLATRIX: bellatrix_mods, - CAPELLA: capella_mods, - DENEB: deneb_mods, - ELECTRA: electra_mods, - FULU: fulu_mods, - } - check_mods(all_mods, "fork_choice") - - run_state_test_generators(runner_name="fork_choice", all_mods=all_mods) diff --git a/tests/generators/forks/main.py b/tests/generators/forks/main.py deleted file mode 100644 index a324cc398c..0000000000 --- a/tests/generators/forks/main.py +++ /dev/null @@ -1,62 +0,0 @@ -from typing import Iterable - -from eth2spec.test.helpers.constants import ( - PHASE0, - ALTAIR, - BELLATRIX, - CAPELLA, - DENEB, - ELECTRA, - FULU, - MINIMAL, - MAINNET, -) -from eth2spec.test.helpers.typing import SpecForkName, PresetBaseName -from eth2spec.test.altair.fork import test_altair_fork_basic, test_altair_fork_random -from eth2spec.test.bellatrix.fork import test_bellatrix_fork_basic, test_bellatrix_fork_random -from eth2spec.test.capella.fork import test_capella_fork_basic, test_capella_fork_random -from eth2spec.test.deneb.fork import test_deneb_fork_basic, test_deneb_fork_random -from eth2spec.test.electra.fork import test_electra_fork_basic, test_electra_fork_random -from eth2spec.test.fulu.fork import test_fulu_fork_basic, test_fulu_fork_random -from eth2spec.gen_helpers.gen_base import gen_runner, gen_typing -from eth2spec.gen_helpers.gen_from_tests.gen import generate_from_tests - - -def create_provider( - tests_src, preset_name: PresetBaseName, phase: SpecForkName, fork_name: SpecForkName -) -> gen_typing.TestProvider: - - def prepare_fn() -> None: - return - - def cases_fn() -> Iterable[gen_typing.TestCase]: - return generate_from_tests( - runner_name="fork", - handler_name="fork", - src=tests_src, - fork_name=fork_name, - preset_name=preset_name, - phase=phase, - ) - - return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn) - - -def _get_fork_tests_providers(): - for preset in [MINIMAL, MAINNET]: - yield create_provider(test_altair_fork_basic, preset, PHASE0, ALTAIR) - yield create_provider(test_altair_fork_random, preset, PHASE0, ALTAIR) - yield create_provider(test_bellatrix_fork_basic, preset, ALTAIR, BELLATRIX) - yield create_provider(test_bellatrix_fork_random, preset, ALTAIR, BELLATRIX) - yield create_provider(test_capella_fork_basic, preset, BELLATRIX, CAPELLA) - yield create_provider(test_capella_fork_random, preset, BELLATRIX, CAPELLA) - yield create_provider(test_deneb_fork_basic, preset, CAPELLA, DENEB) - yield create_provider(test_deneb_fork_random, preset, CAPELLA, DENEB) - yield create_provider(test_electra_fork_basic, preset, DENEB, ELECTRA) - yield create_provider(test_electra_fork_random, preset, DENEB, ELECTRA) - yield create_provider(test_fulu_fork_basic, preset, ELECTRA, FULU) - yield create_provider(test_fulu_fork_random, preset, ELECTRA, FULU) - - -if __name__ == "__main__": - gen_runner.run_generator("forks", list(_get_fork_tests_providers())) diff --git a/tests/generators/genesis/README.md b/tests/generators/genesis/README.md deleted file mode 100644 index e270f6e35e..0000000000 --- a/tests/generators/genesis/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# Genesis test generator - -Genesis tests cover the initialization and validity-based launch trigger for the Beacon Chain genesis state. - -Information on the format of the tests can be found in the [genesis test formats documentation](../../formats/genesis/README.md). - diff --git a/tests/generators/genesis/main.py b/tests/generators/genesis/main.py deleted file mode 100644 index 493d8183e9..0000000000 --- a/tests/generators/genesis/main.py +++ /dev/null @@ -1,32 +0,0 @@ -from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators, check_mods -from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB, ELECTRA, FULU - - -if __name__ == "__main__": - phase_0_mods = { - key: "eth2spec.test.phase0.genesis.test_" + key - for key in [ - "initialization", - "validity", - ] - } - - altair_mods = phase_0_mods # No additional Altair specific genesis tests - bellatrix_mods = altair_mods # No additional Bellatrix specific genesis tests - capella_mods = bellatrix_mods # No additional Capella specific genesis tests - deneb_mods = capella_mods # No additional Deneb specific genesis tests - electra_mods = deneb_mods # No additional Electra specific genesis tests - fulu_mods = electra_mods # No additional Fulu specific genesis tests - - all_mods = { - PHASE0: phase_0_mods, - ALTAIR: altair_mods, - BELLATRIX: bellatrix_mods, - CAPELLA: capella_mods, - DENEB: deneb_mods, - ELECTRA: electra_mods, - FULU: fulu_mods, - } - check_mods(all_mods, "genesis") - - run_state_test_generators(runner_name="genesis", all_mods=all_mods) diff --git a/tests/generators/kzg_4844/README.md b/tests/generators/kzg_4844/README.md deleted file mode 100644 index ab81a85e86..0000000000 --- a/tests/generators/kzg_4844/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# KZG 4844 Test Generator - -These tests are specific to the KZG API required for implementing EIP-4844 \ No newline at end of file diff --git a/tests/generators/kzg_7594/README.md b/tests/generators/kzg_7594/README.md deleted file mode 100644 index 5336255ce0..0000000000 --- a/tests/generators/kzg_7594/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# KZG Test Generator for EIP-7594 - -These tests are specific to the API required for implementing PeerDAS polynomial commitment sampling. \ No newline at end of file diff --git a/tests/generators/light_client/README.md b/tests/generators/light_client/README.md deleted file mode 100644 index 7eabc2520c..0000000000 --- a/tests/generators/light_client/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Light client tests - -The purpose of this test-generator is to provide test-vectors for validating the correct implementation of the light client sync protocol. - -Test-format documentation can be found [here](../../formats/light_client/README.md). diff --git a/tests/generators/light_client/main.py b/tests/generators/light_client/main.py deleted file mode 100644 index 02ab37041e..0000000000 --- a/tests/generators/light_client/main.py +++ /dev/null @@ -1,63 +0,0 @@ -from eth2spec.test.helpers.constants import ALTAIR, BELLATRIX, CAPELLA, DENEB, ELECTRA, FULU -from eth2spec.gen_helpers.gen_from_tests.gen import ( - combine_mods, - run_state_test_generators, - check_mods, -) - - -if __name__ == "__main__": - altair_mods = { - key: "eth2spec.test.altair.light_client.test_" + key - for key in [ - "data_collection", - "single_merkle_proof", - "sync", - "update_ranking", - ] - } - - _new_bellatrix_mods = { - key: "eth2spec.test.bellatrix.light_client.test_" + key - for key in [ - "data_collection", - "sync", - ] - } - bellatrix_mods = combine_mods(_new_bellatrix_mods, altair_mods) - - _new_capella_mods = { - key: "eth2spec.test.capella.light_client.test_" + key - for key in [ - "data_collection", - "single_merkle_proof", - "sync", - ] - } - capella_mods = combine_mods(_new_capella_mods, bellatrix_mods) - - _new_deneb_mods = { - key: "eth2spec.test.deneb.light_client.test_" + key - for key in [ - "sync", - ] - } - deneb_mods = combine_mods(_new_deneb_mods, capella_mods) - - # No additional Electra specific light client tests - electra_mods = deneb_mods - - # No additional Electra specific light client tests - fulu_mods = electra_mods - - all_mods = { - ALTAIR: altair_mods, - BELLATRIX: bellatrix_mods, - CAPELLA: capella_mods, - DENEB: deneb_mods, - ELECTRA: electra_mods, - FULU: fulu_mods, - } - check_mods(all_mods, "light_client") - - run_state_test_generators(runner_name="light_client", all_mods=all_mods) diff --git a/tests/generators/main.py b/tests/generators/main.py new file mode 100644 index 0000000000..a15e213eaa --- /dev/null +++ b/tests/generators/main.py @@ -0,0 +1,20 @@ +import importlib +import os + +from eth2spec.gen_helpers.gen_base import gen_runner + +if __name__ == "__main__": + current_dir = os.path.dirname(__file__) + runners_dir = os.path.join(current_dir, "runners") + + test_cases = [] + for filename in os.listdir(runners_dir): + if not filename.endswith(".py"): + continue + module_name = filename.replace(".py", "") + full_module = f"tests.generators.runners.{module_name}" + mod = importlib.import_module(full_module) + assert hasattr(mod, "get_test_cases"), full_module + test_cases.extend(mod.get_test_cases()) + + gen_runner.run_generator(test_cases) diff --git a/tests/generators/merkle_proof/README.md b/tests/generators/merkle_proof/README.md deleted file mode 100644 index fb4d05fda8..0000000000 --- a/tests/generators/merkle_proof/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Merkle proof tests - -The purpose of this test-generator is to provide test-vectors for validating the correct implementation of the Merkle proof verification. - -Test-format documentation can be found [here](../../formats/merkle_proof/README.md). diff --git a/tests/generators/merkle_proof/__init__.py b/tests/generators/merkle_proof/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/generators/merkle_proof/main.py b/tests/generators/merkle_proof/main.py deleted file mode 100644 index 54cfc6e599..0000000000 --- a/tests/generators/merkle_proof/main.py +++ /dev/null @@ -1,32 +0,0 @@ -from eth2spec.test.helpers.constants import DENEB, ELECTRA, FULU -from eth2spec.gen_helpers.gen_from_tests.gen import ( - run_state_test_generators, - combine_mods, - check_mods, -) - - -if __name__ == "__main__": - deneb_mods = { - key: "eth2spec.test.deneb.merkle_proof.test_" + key - for key in [ - "single_merkle_proof", - ] - } - electra_mods = deneb_mods - _new_fulu_mods = { - key: "eth2spec.test.fulu.merkle_proof.test_" + key - for key in [ - "single_merkle_proof", - ] - } - fulu_mods = combine_mods(_new_fulu_mods, electra_mods) - - all_mods = { - DENEB: deneb_mods, - ELECTRA: electra_mods, - FULU: fulu_mods, - } - check_mods(all_mods, "merkle_proof") - - run_state_test_generators(runner_name="merkle_proof", all_mods=all_mods) diff --git a/tests/generators/networking/README.md b/tests/generators/networking/README.md deleted file mode 100644 index 4b4fea74da..0000000000 --- a/tests/generators/networking/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Networking tests - -The purpose of this test-generator is to provide test-vectors for validating the correct implementation of the networking protocol. - -Test-format documentation can be found [here](../../formats/networking/README.md). diff --git a/tests/generators/networking/__init__.py b/tests/generators/networking/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/generators/networking/main.py b/tests/generators/networking/main.py deleted file mode 100644 index 2e52594ab0..0000000000 --- a/tests/generators/networking/main.py +++ /dev/null @@ -1,16 +0,0 @@ -from eth2spec.test.helpers.constants import FULU -from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators, check_mods - - -if __name__ == "__main__": - fulu_mods = { - key: "eth2spec.test.fulu.networking.test_" + key - for key in [ - "compute_columns_for_custody_group", - "get_custody_groups", - ] - } - all_mods = {FULU: fulu_mods} - check_mods(all_mods, "networking") - - run_state_test_generators(runner_name="networking", all_mods=all_mods) diff --git a/tests/generators/operations/README.md b/tests/generators/operations/README.md deleted file mode 100644 index 29f64295e0..0000000000 --- a/tests/generators/operations/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# Operations - -Operations (or "transactions" in previous spec iterations), - are atomic changes to the state, introduced by embedding in blocks. - -An operation test-runner can consume these operation test-suites, - and handle different kinds of operations by processing the cases using the specified test handler. - -Information on the format of the tests can be found in the [operations test formats documentation](../../formats/operations/README.md). diff --git a/tests/generators/operations/main.py b/tests/generators/operations/main.py deleted file mode 100644 index 0804f31ae5..0000000000 --- a/tests/generators/operations/main.py +++ /dev/null @@ -1,94 +0,0 @@ -from eth2spec.gen_helpers.gen_from_tests.gen import ( - run_state_test_generators, - combine_mods, - check_mods, -) -from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB, ELECTRA, FULU - - -if __name__ == "__main__": - phase_0_mods = { - key: "eth2spec.test.phase0.block_processing.test_process_" + key - for key in [ - "attestation", - "attester_slashing", - "block_header", - "deposit", - "proposer_slashing", - "voluntary_exit", - ] - } - _new_altair_mods = { - **{ - "sync_aggregate": [ - "eth2spec.test.altair.block_processing.sync_aggregate.test_process_" + key - for key in ["sync_aggregate", "sync_aggregate_random"] - ] - }, - **{ - key: "eth2spec.test.altair.block_processing.test_process_" + key - for key in [ - "deposit", - ] - }, - } - altair_mods = combine_mods(_new_altair_mods, phase_0_mods) - - _new_bellatrix_mods = { - key: "eth2spec.test.bellatrix.block_processing.test_process_" + key - for key in [ - "deposit", - "execution_payload", - "voluntary_exit", - ] - } - bellatrix_mods = combine_mods(_new_bellatrix_mods, altair_mods) - - _new_capella_mods = { - key: "eth2spec.test.capella.block_processing.test_process_" + key - for key in [ - "bls_to_execution_change", - "deposit", - "execution_payload", - "withdrawals", - ] - } - capella_mods = combine_mods(_new_capella_mods, bellatrix_mods) - - _new_deneb_mods = { - key: "eth2spec.test.deneb.block_processing.test_process_" + key - for key in [ - "execution_payload", - "voluntary_exit", - ] - } - deneb_mods = combine_mods(_new_deneb_mods, capella_mods) - - _new_electra_mods = { - key: "eth2spec.test.electra.block_processing.test_process_" + key - for key in [ - "attestation", - "consolidation_request", - "deposit_request", - "voluntary_exit", - "withdrawal_request", - "withdrawals", - ] - } - electra_mods = combine_mods(_new_electra_mods, deneb_mods) - - # No additional Fulu specific block processing tests - fulu_mods = electra_mods - - all_mods = { - PHASE0: phase_0_mods, - ALTAIR: altair_mods, - BELLATRIX: bellatrix_mods, - CAPELLA: capella_mods, - DENEB: deneb_mods, - ELECTRA: electra_mods, - FULU: fulu_mods, - } - check_mods(all_mods, "block_processing") - - run_state_test_generators(runner_name="operations", all_mods=all_mods) diff --git a/tests/generators/random/Makefile b/tests/generators/random/Makefile deleted file mode 100644 index 2ec4b6ee03..0000000000 --- a/tests/generators/random/Makefile +++ /dev/null @@ -1,16 +0,0 @@ -all: - if ! test -d venv; then python3 -m venv venv; fi; - . ./venv/bin/activate - pip3 install -r requirements.txt - rm -f ../../core/pyspec/eth2spec/test/phase0/random/test_random.py - rm -f ../../core/pyspec/eth2spec/test/altair/random/test_random.py - rm -f ../../core/pyspec/eth2spec/test/bellatrix/random/test_random.py - rm -f ../../core/pyspec/eth2spec/test/capella/random/test_random.py - rm -f ../../core/pyspec/eth2spec/test/deneb/random/test_random.py - rm -f ../../core/pyspec/eth2spec/test/electra/random/test_random.py - python3 generate.py phase0 > ../../core/pyspec/eth2spec/test/phase0/random/test_random.py - python3 generate.py altair > ../../core/pyspec/eth2spec/test/altair/random/test_random.py - python3 generate.py bellatrix > ../../core/pyspec/eth2spec/test/bellatrix/random/test_random.py - python3 generate.py capella > ../../core/pyspec/eth2spec/test/capella/random/test_random.py - python3 generate.py deneb > ../../core/pyspec/eth2spec/test/deneb/random/test_random.py - python3 generate.py electra > ../../core/pyspec/eth2spec/test/electra/random/test_random.py diff --git a/tests/generators/random/README.md b/tests/generators/random/README.md deleted file mode 100644 index fd17284412..0000000000 --- a/tests/generators/random/README.md +++ /dev/null @@ -1,31 +0,0 @@ -# Randomized tests - -Randomized tests in the format of `sanity` blocks tests, with randomized operations. - -Information on the format of the tests can be found in the [sanity test formats documentation](../../formats/sanity/README.md). - -# To generate test sources - -```bash -$ make -``` - -The necessary commands are in the `Makefile`, as the only target. - -The generated files are committed to the repo so you should not need to do this. - -# To run tests - -Each of the generated test does produce a `pytest` test instance but by default is -currently skipped. Running the test via the generator (see next) will trigger any errors -that would arise during the running of `pytest`. - -# To generate spec tests (from the generated files) - -Run the test generator in the usual way. - -E.g. from the root of this repo, you can run: - -```bash -$ make gen_random -``` diff --git a/tests/generators/random/generate.py b/tests/generators/random/generate.py deleted file mode 100644 index ea9e4480c0..0000000000 --- a/tests/generators/random/generate.py +++ /dev/null @@ -1,297 +0,0 @@ -""" -This test format currently uses code generation to assemble the tests -as the current test infra does not have a facility to dynamically -generate tests that can be seen by ``pytest``. - -This will likely change in future releases of the testing infra. - -NOTE: To add additional scenarios, add test cases below in ``_generate_randomized_scenarios``. -""" - -import sys -import random -import warnings -from typing import Callable -import itertools - -from eth2spec.test.utils.randomized_block_tests import ( - no_block, - no_op_validation, - randomize_state, - randomize_state_altair, - randomize_state_bellatrix, - randomize_state_capella, - randomize_state_deneb, - randomize_state_electra, - randomize_state_fulu, - random_block, - random_block_altair_with_cycling_sync_committee_participation, - random_block_bellatrix, - random_block_capella, - random_block_deneb, - random_block_electra, - random_block_fulu, - last_slot_in_epoch, - random_slot_in_epoch, - penultimate_slot_in_epoch, - epoch_transition, - slot_transition, - transition_with_random_block, - transition_to_leaking, - transition_without_leak, -) -from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB, ELECTRA, FULU - - -# Ensure this many blocks are present in *each* randomized scenario -BLOCK_TRANSITIONS_COUNT = 2 - - -def _normalize_transition(transition): - """ - Provide "empty" or "no op" sub-transitions - to a given transition. - """ - if isinstance(transition, Callable): - transition = transition() - if "epochs_to_skip" not in transition: - transition["epochs_to_skip"] = 0 - if "slots_to_skip" not in transition: - transition["slots_to_skip"] = 0 - if "block_producer" not in transition: - transition["block_producer"] = no_block - if "validation" not in transition: - transition["validation"] = no_op_validation - return transition - - -def _normalize_scenarios(scenarios): - """ - "Normalize" a "scenario" so that a producer of a test case - does not need to provide every expected key/value. - """ - for scenario in scenarios: - transitions = scenario["transitions"] - for i, transition in enumerate(transitions): - transitions[i] = _normalize_transition(transition) - - -def _flatten(t): - leak_transition = t[0] - result = [leak_transition] - for transition_batch in t[1]: - for transition in transition_batch: - if isinstance(transition, tuple): - for subtransition in transition: - result.append(subtransition) - else: - result.append(transition) - return result - - -def _generate_randomized_scenarios(block_randomizer): - """ - Generates a set of randomized testing scenarios. - Return a sequence of "scenarios" where each scenario: - 1. Provides some setup - 2. Provides a sequence of transitions that mutate the state in some way, - possibly yielding blocks along the way - NOTE: scenarios are "normalized" with empty/no-op elements before returning - to the test generation to facilitate brevity when writing scenarios by hand. - NOTE: the main block driver builds a block for the **next** slot, so - the slot transitions are offset by -1 to target certain boundaries. - """ - # go forward 0 or 1 epochs - epochs_set = ( - epoch_transition(n=0), - epoch_transition(n=1), - ) - # within those epochs, go forward to: - slots_set = ( - # the first slot in an epoch (see note in docstring about offsets...) - slot_transition(last_slot_in_epoch), - # the second slot in an epoch - slot_transition(n=0), - # some random number of slots, but not at epoch boundaries - slot_transition(random_slot_in_epoch), - # the last slot in an epoch (see note in docstring about offsets...) - slot_transition(penultimate_slot_in_epoch), - ) - # and produce a block... - blocks_set = (transition_with_random_block(block_randomizer),) - - rng = random.Random(1447) - all_skips = list(itertools.product(epochs_set, slots_set)) - randomized_skips = ( - rng.sample(all_skips, len(all_skips)) for _ in range(BLOCK_TRANSITIONS_COUNT) - ) - - # build a set of block transitions from combinations of sub-transitions - transitions_generator = (itertools.product(prefix, blocks_set) for prefix in randomized_skips) - block_transitions = zip(*transitions_generator) - - # and preface each set of block transitions with the possible leak transitions - leak_transitions = ( - transition_without_leak, - transition_to_leaking, - ) - scenarios = [ - {"transitions": _flatten(t)} for t in itertools.product(leak_transitions, block_transitions) - ] - _normalize_scenarios(scenarios) - return scenarios - - -def _id_from_scenario(test_description): - """ - Construct a name for the scenario based its data. - """ - - def _to_id_part(prefix, x): - suffix = str(x) - if isinstance(x, Callable): - suffix = x.__name__ - return f"{prefix}{suffix}" - - def _id_from_transition(transition): - return ",".join( - ( - _to_id_part("epochs:", transition["epochs_to_skip"]), - _to_id_part("slots:", transition["slots_to_skip"]), - _to_id_part("with-block:", transition["block_producer"]), - ) - ) - - return "|".join(map(_id_from_transition, test_description["transitions"])) - - -test_imports_template = """\"\"\" -This module is generated from the ``random`` test generator. -Please do not edit this file manually. -See the README for that generator for more information. -\"\"\" - -from eth2spec.test.helpers.constants import {phase} -from eth2spec.test.context import ( - misc_balances_in_default_range_with_many_validators, - with_phases, - zero_activation_threshold, - only_generator, -) -from eth2spec.test.context import ( - always_bls, - spec_test, - with_custom_state, - single_phase, -) -from eth2spec.test.utils.randomized_block_tests import ( - run_generated_randomized_test, -)""" - -test_template = """ -@only_generator(\"randomized test for broad coverage, not point-to-point CI\") -@with_phases([{phase}]) -@with_custom_state( - balances_fn=misc_balances_in_default_range_with_many_validators, - threshold_fn=zero_activation_threshold -) -@spec_test -@single_phase -@always_bls -def test_randomized_{index}(spec, state): - # scenario as high-level, informal text: -{name_as_comment} - scenario = {scenario} # noqa: E501 - yield from run_generated_randomized_test( - spec, - state, - scenario, - )""" - - -def _to_comment(name, indent_level): - parts = name.split("|") - indentation = " " * indent_level - parts = [indentation + "# " + part for part in parts] - return "\n".join(parts) - - -def run_generate_tests_to_std_out(phase, state_randomizer, block_randomizer): - scenarios = _generate_randomized_scenarios(block_randomizer) - test_content = {"phase": phase.upper()} - test_imports = test_imports_template.format(**test_content) - test_file = [test_imports] - for index, scenario in enumerate(scenarios): - # required for setup phase - scenario["state_randomizer"] = state_randomizer.__name__ - - # need to pass name, rather than function reference... - transitions = scenario["transitions"] - for transition in transitions: - for name, value in transition.items(): - if isinstance(value, Callable): - transition[name] = value.__name__ - - test_content = test_content.copy() - name = _id_from_scenario(scenario) - test_content["name_as_comment"] = _to_comment(name, 1) - test_content["index"] = index - test_content["scenario"] = scenario - test_instance = test_template.format(**test_content) - test_file.append(test_instance) - print("\n\n".join(test_file)) - - -if __name__ == "__main__": - did_generate = False - if PHASE0 in sys.argv: - did_generate = True - run_generate_tests_to_std_out( - PHASE0, - state_randomizer=randomize_state, - block_randomizer=random_block, - ) - if ALTAIR in sys.argv: - did_generate = True - run_generate_tests_to_std_out( - ALTAIR, - state_randomizer=randomize_state_altair, - block_randomizer=random_block_altair_with_cycling_sync_committee_participation, - ) - if BELLATRIX in sys.argv: - did_generate = True - run_generate_tests_to_std_out( - BELLATRIX, - state_randomizer=randomize_state_bellatrix, - block_randomizer=random_block_bellatrix, - ) - if CAPELLA in sys.argv: - did_generate = True - run_generate_tests_to_std_out( - CAPELLA, - state_randomizer=randomize_state_capella, - block_randomizer=random_block_capella, - ) - if DENEB in sys.argv: - did_generate = True - run_generate_tests_to_std_out( - DENEB, - state_randomizer=randomize_state_deneb, - block_randomizer=random_block_deneb, - ) - if ELECTRA in sys.argv: - did_generate = True - run_generate_tests_to_std_out( - ELECTRA, - state_randomizer=randomize_state_electra, - block_randomizer=random_block_electra, - ) - if FULU in sys.argv: - did_generate = True - run_generate_tests_to_std_out( - FULU, - state_randomizer=randomize_state_fulu, - block_randomizer=random_block_fulu, - ) - if not did_generate: - warnings.warn("no phase given for test generation") diff --git a/tests/generators/random/main.py b/tests/generators/random/main.py deleted file mode 100644 index 3a383d61d9..0000000000 --- a/tests/generators/random/main.py +++ /dev/null @@ -1,60 +0,0 @@ -from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB, ELECTRA, FULU -from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators, check_mods - - -if __name__ == "__main__": - phase_0_mods = { - key: "eth2spec.test.phase0.random.test_" + key - for key in [ - "random", - ] - } - altair_mods = { - key: "eth2spec.test.altair.random.test_" + key - for key in [ - "random", - ] - } - bellatrix_mods = { - key: "eth2spec.test.bellatrix.random.test_" + key - for key in [ - "random", - ] - } - capella_mods = { - key: "eth2spec.test.capella.random.test_" + key - for key in [ - "random", - ] - } - deneb_mods = { - key: "eth2spec.test.deneb.random.test_" + key - for key in [ - "random", - ] - } - electra_mods = { - key: "eth2spec.test.electra.random.test_" + key - for key in [ - "random", - ] - } - fulu_mods = { - key: "eth2spec.test.fulu.random.test_" + key - for key in [ - "random", - ] - } - - all_mods = { - PHASE0: phase_0_mods, - ALTAIR: altair_mods, - BELLATRIX: bellatrix_mods, - CAPELLA: capella_mods, - DENEB: deneb_mods, - ELECTRA: electra_mods, - FULU: fulu_mods, - } - check_mods(all_mods, "random") - - run_state_test_generators(runner_name="random", all_mods=all_mods) diff --git a/tests/generators/rewards/README.md b/tests/generators/rewards/README.md deleted file mode 100644 index 60f106836a..0000000000 --- a/tests/generators/rewards/README.md +++ /dev/null @@ -1,8 +0,0 @@ -# Rewards - -Rewards covers the sub-functions of `process_rewards_and_penalties` for granular testing of components of the rewards function. - -A rewards test-runner can consume these sub-transition test-suites, - and handle different kinds of epoch sub-transitions by processing the cases using the specified test handler. - -Information on the format of the tests can be found in the [rewards test formats documentation](../../formats/rewards/README.md). diff --git a/tests/generators/rewards/main.py b/tests/generators/rewards/main.py deleted file mode 100644 index 3d9065c7ac..0000000000 --- a/tests/generators/rewards/main.py +++ /dev/null @@ -1,37 +0,0 @@ -from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators, check_mods -from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB, ELECTRA, FULU - - -if __name__ == "__main__": - phase_0_mods = { - key: "eth2spec.test.phase0.rewards.test_" + key - for key in [ - "basic", - "leak", - "random", - ] - } - # No additional Altair specific rewards tests, yet. - altair_mods = phase_0_mods - - # No additional Bellatrix specific rewards tests, yet. - # Note: Block rewards are non-epoch rewards and are tested as part of block processing tests. - # Transaction fees are part of the execution-layer. - bellatrix_mods = altair_mods - capella_mods = bellatrix_mods - deneb_mods = capella_mods - electra_mods = deneb_mods - fulu_mods = electra_mods - - all_mods = { - PHASE0: phase_0_mods, - ALTAIR: altair_mods, - BELLATRIX: bellatrix_mods, - CAPELLA: capella_mods, - DENEB: deneb_mods, - ELECTRA: electra_mods, - FULU: fulu_mods, - } - check_mods(all_mods, "rewards") - - run_state_test_generators(runner_name="rewards", all_mods=all_mods) diff --git a/tests/generators/runners/bls.py b/tests/generators/runners/bls.py new file mode 100644 index 0000000000..a38158f997 --- /dev/null +++ b/tests/generators/runners/bls.py @@ -0,0 +1,274 @@ +""" +BLS test vectors generator +""" + +from typing import Iterable + +import milagro_bls_binding as milagro_bls +from eth_utils import encode_hex + +from eth2spec.altair import spec +from eth2spec.gen_helpers.gen_base.gen_typing import TestCase +from eth2spec.test.helpers.constants import ALTAIR +from eth2spec.utils import bls + +############################################################################### +# Helper functions +############################################################################### + + +def hex_to_int(x: str) -> int: + return int(x, 16) + + +def expect_exception(func, *args): + try: + func(*args) + except Exception: + pass + else: + raise Exception("should have raised exception") + + +############################################################################### +# Precomputed constants +############################################################################### + + +MESSAGES = [ + bytes(b"\x00" * 32), + bytes(b"\x56" * 32), + bytes(b"\xab" * 32), +] +SAMPLE_MESSAGE = b"\x12" * 32 + +PRIVKEYS = [ + # Curve order is 256, so private keys use 32 bytes at most. + # Also, not all integers are valid private keys. Therefore, using pre-generated keys. + hex_to_int( + "0x00000000000000000000000000000000263dbd792f5b1be47ed85f8938c0f29586af0d3ac7b977f21c278fe1462040e3" + ), + hex_to_int( + "0x0000000000000000000000000000000047b8192d77bf871b62e87859d653922725724a5c031afeabc60bcef5ff665138" + ), + hex_to_int( + "0x00000000000000000000000000000000328388aff0d4a5b7dc9205abd374e7e98f3cd9f3418edb4eafda5fb16473d216" + ), +] + +ZERO_PUBKEY = b"\x00" * 48 +G1_POINT_AT_INFINITY = b"\xc0" + b"\x00" * 47 + +ZERO_SIGNATURE = b"\x00" * 96 +G2_POINT_AT_INFINITY = b"\xc0" + b"\x00" * 95 + +ZERO_PRIVKEY = 0 +ZERO_PRIVKEY_BYTES = b"\x00" * 32 + + +############################################################################### +# Test cases for eth_aggregate_pubkeys +############################################################################### + + +def case_eth_aggregate_pubkeys(): + def get_test_runner(input_getter): + def _runner(): + pubkeys = input_getter() + try: + aggregate_pubkey = None + aggregate_pubkey = spec.eth_aggregate_pubkeys(pubkeys) + except: + expect_exception(milagro_bls._AggregatePKs, pubkeys) + if aggregate_pubkey is not None: + assert aggregate_pubkey == milagro_bls._AggregatePKs(pubkeys) + return [ + ( + "data", + "data", + { + "input": [encode_hex(pubkey) for pubkey in pubkeys], + "output": ( + encode_hex(aggregate_pubkey) if aggregate_pubkey is not None else None + ), + }, + ) + ] + + return _runner + + # Valid pubkey + for i, privkey in enumerate(PRIVKEYS): + + def get_inputs(privkey=privkey): + return [bls.SkToPk(privkey)] + + yield f"eth_aggregate_pubkeys_valid_{i}", get_test_runner(get_inputs) + + # Valid pubkeys + if True: + + def get_inputs(): + return [bls.SkToPk(privkey) for privkey in PRIVKEYS] + + yield "eth_aggregate_pubkeys_valid_pubkeys", get_test_runner(get_inputs) + + # Invalid pubkeys -- len(pubkeys) == 0 + if True: + + def get_inputs(): + return [] + + yield "eth_aggregate_pubkeys_empty_list", get_test_runner(get_inputs) + + # Invalid pubkeys -- [ZERO_PUBKEY] + if True: + + def get_inputs(): + return [ZERO_PUBKEY] + + yield "eth_aggregate_pubkeys_zero_pubkey", get_test_runner(get_inputs) + + # Invalid pubkeys -- G1 point at infinity + if True: + + def get_inputs(): + return [G1_POINT_AT_INFINITY] + + yield "eth_aggregate_pubkeys_infinity_pubkey", get_test_runner(get_inputs) + + # Invalid pubkeys -- b'\x40\x00\x00\x00....\x00' pubkey + if True: + + def get_inputs(): + return [b"\x40" + b"\00" * 47] + + yield "eth_aggregate_pubkeys_x40_pubkey", get_test_runner(get_inputs) + + +############################################################################### +# Test cases for eth_fast_aggregate_verify +############################################################################### + + +def case_eth_fast_aggregate_verify(): + def get_test_runner(input_getter): + def _runner(): + pubkeys, message, aggregate_signature = input_getter() + try: + ok = None + ok = spec.eth_fast_aggregate_verify(pubkeys, message, aggregate_signature) + except: + pass + return [ + ( + "data", + "data", + { + "input": { + "pubkeys": [encode_hex(pubkey) for pubkey in pubkeys], + "message": encode_hex(message), + "signature": encode_hex(aggregate_signature), + }, + "output": ok if ok is not None else None, + }, + ) + ] + + return _runner + + # Valid signature + for i, message in enumerate(MESSAGES): + + def get_inputs(i=i, message=message): + privkeys = PRIVKEYS[: i + 1] + sigs = [bls.Sign(privkey, message) for privkey in privkeys] + aggregate_signature = bls.Aggregate(sigs) + pubkeys = [bls.SkToPk(privkey) for privkey in privkeys] + return pubkeys, message, aggregate_signature + + yield f"eth_fast_aggregate_verify_valid_{i}", get_test_runner(get_inputs) + + # Invalid signature -- extra pubkey + for i, message in enumerate(MESSAGES): + + def get_inputs(i=i, message=message): + privkeys = PRIVKEYS[: i + 1] + sigs = [bls.Sign(privkey, message) for privkey in privkeys] + aggregate_signature = bls.Aggregate(sigs) + # Add an extra pubkey to the end + pubkeys = [bls.SkToPk(privkey) for privkey in privkeys] + [bls.SkToPk(PRIVKEYS[-1])] + return pubkeys, message, aggregate_signature + + yield f"eth_fast_aggregate_verify_extra_pubkey_{i}", get_test_runner(get_inputs) + + # Invalid signature -- tampered with signature + for i, message in enumerate(MESSAGES): + + def get_inputs(i=i, message=message): + privkeys = PRIVKEYS[: i + 1] + sigs = [bls.Sign(privkey, message) for privkey in privkeys] + aggregate_signature = bls.Aggregate(sigs) + pubkeys = [bls.SkToPk(privkey) for privkey in privkeys] + # Tamper with the signature + tampered_signature = aggregate_signature[:-4] + b"\xff\xff\xff\xff" + return pubkeys, message, tampered_signature + + yield f"eth_fast_aggregate_verify_tampered_signature_{i}", get_test_runner(get_inputs) + + # NOTE: Unlike `FastAggregateVerify`, len(pubkeys) == 0 and signature == G2_POINT_AT_INFINITY is VALID + if True: + + def get_inputs(): + return [], MESSAGES[-1], G2_POINT_AT_INFINITY + + yield "eth_fast_aggregate_verify_na_pubkeys_and_infinity_signature", get_test_runner( + get_inputs + ) + + # Invalid pubkeys and signature -- len(pubkeys) == 0 and signature == 0x00... + if True: + + def get_inputs(): + return [], MESSAGES[-1], ZERO_SIGNATURE + + yield "eth_fast_aggregate_verify_na_pubkeys_and_zero_signature", get_test_runner(get_inputs) + + # Invalid pubkeys and signature -- pubkeys contains point at infinity + if True: + + def get_inputs(): + pubkeys = [bls.SkToPk(privkey) for privkey in PRIVKEYS] + pubkeys_with_infinity = pubkeys + [G1_POINT_AT_INFINITY] + signatures = [bls.Sign(privkey, SAMPLE_MESSAGE) for privkey in PRIVKEYS] + aggregate_signature = bls.Aggregate(signatures) + return pubkeys_with_infinity, SAMPLE_MESSAGE, aggregate_signature + + yield "eth_fast_aggregate_verify_infinity_pubkey", get_test_runner(get_inputs) + + +############################################################################### +# Main logic +############################################################################### + + +def get_test_cases() -> Iterable[TestCase]: + test_cases = [] + handlers = { + "eth_aggregate_pubkeys": case_eth_aggregate_pubkeys, + "eth_fast_aggregate_verify": case_eth_fast_aggregate_verify, + } + for method, fn in handlers.items(): + for case_name, case_fn in fn(): + test_cases.append( + TestCase( + fork_name=ALTAIR, + preset_name="general", + runner_name="bls", + handler_name=method, + suite_name="bls", + case_name=case_name, + case_fn=case_fn, + ) + ) + return test_cases diff --git a/tests/generators/runners/epoch_processing.py b/tests/generators/runners/epoch_processing.py new file mode 100644 index 0000000000..6c4a9b0543 --- /dev/null +++ b/tests/generators/runners/epoch_processing.py @@ -0,0 +1,17 @@ +from typing import Iterable + +from eth2spec.gen_helpers.gen_base.gen_typing import TestCase +from eth2spec.gen_helpers.gen_from_tests.gen import get_test_cases_for + + +def handler_name_fn(mod): + handler_name = mod.split(".")[-1] + if handler_name == "test_apply_pending_deposit": + return "pending_deposits" + handler_name = handler_name.replace("test_process_", "") + handler_name = handler_name.replace("test_apply_", "") + return handler_name + + +def get_test_cases() -> Iterable[TestCase]: + return get_test_cases_for("epoch_processing", handler_name_fn=handler_name_fn) diff --git a/tests/generators/runners/finality.py b/tests/generators/runners/finality.py new file mode 100644 index 0000000000..644ebb7e90 --- /dev/null +++ b/tests/generators/runners/finality.py @@ -0,0 +1,8 @@ +from typing import Iterable + +from eth2spec.gen_helpers.gen_base.gen_typing import TestCase +from eth2spec.gen_helpers.gen_from_tests.gen import get_test_cases_for + + +def get_test_cases() -> Iterable[TestCase]: + return get_test_cases_for("finality") diff --git a/tests/generators/runners/fork_choice.py b/tests/generators/runners/fork_choice.py new file mode 100644 index 0000000000..b8bfeaff23 --- /dev/null +++ b/tests/generators/runners/fork_choice.py @@ -0,0 +1,8 @@ +from typing import Iterable + +from eth2spec.gen_helpers.gen_base.gen_typing import TestCase +from eth2spec.gen_helpers.gen_from_tests.gen import get_test_cases_for + + +def get_test_cases() -> Iterable[TestCase]: + return get_test_cases_for("fork_choice") diff --git a/tests/generators/runners/forks.py b/tests/generators/runners/forks.py new file mode 100644 index 0000000000..d2d1504985 --- /dev/null +++ b/tests/generators/runners/forks.py @@ -0,0 +1,25 @@ +from importlib import import_module +from typing import Iterable + +from eth2spec.gen_helpers.gen_base.gen_typing import TestCase +from eth2spec.gen_helpers.gen_from_tests.gen import generate_from_tests, get_expected_modules +from eth2spec.test.helpers.constants import ALL_PRESETS, POST_FORK_OF + + +def get_test_cases() -> Iterable[TestCase]: + test_cases = [] + for preset in ALL_PRESETS: + for prefork, postfork in POST_FORK_OF.items(): + for mod in get_expected_modules("fork"): + tests_src = import_module(mod) + test_cases.extend( + generate_from_tests( + runner_name="fork", + handler_name="fork", + src=tests_src, + fork_name=postfork, + preset_name=preset, + phase=prefork, + ) + ) + return test_cases diff --git a/tests/generators/runners/genesis.py b/tests/generators/runners/genesis.py new file mode 100644 index 0000000000..33983cfda4 --- /dev/null +++ b/tests/generators/runners/genesis.py @@ -0,0 +1,8 @@ +from typing import Iterable + +from eth2spec.gen_helpers.gen_base.gen_typing import TestCase +from eth2spec.gen_helpers.gen_from_tests.gen import get_test_cases_for + + +def get_test_cases() -> Iterable[TestCase]: + return get_test_cases_for("genesis") diff --git a/tests/generators/kzg_4844/main.py b/tests/generators/runners/kzg_4844.py similarity index 90% rename from tests/generators/kzg_4844/main.py rename to tests/generators/runners/kzg_4844.py index bf2c34fde4..0905c66df7 100644 --- a/tests/generators/kzg_4844/main.py +++ b/tests/generators/runners/kzg_4844.py @@ -3,29 +3,26 @@ """ from functools import lru_cache -from typing import Tuple, Iterable, Any, Callable, Dict +from typing import Iterable from eth_utils import encode_hex from eth2spec.deneb import spec -from eth2spec.gen_helpers.gen_base import gen_runner, gen_typing +from eth2spec.gen_helpers.gen_base.gen_typing import TestCase from eth2spec.test.helpers.constants import DENEB -from eth2spec.test.helpers.typing import SpecForkName from eth2spec.test.utils.kzg_tests import ( BLOB_ALL_TWOS, BLOB_ALL_ZEROS, BLOB_RANDOM_VALID1, + bls_add_one, + encode_hex_list, G1, INVALID_BLOBS, INVALID_FIELD_ELEMENTS, INVALID_G1_POINTS, VALID_BLOBS, VALID_FIELD_ELEMENTS, - bls_add_one, - encode_hex_list, ) -from eth2spec.utils import bls - ############################################################################### # Test helpers @@ -158,7 +155,7 @@ def _runner(): for i, blob in enumerate(VALID_BLOBS): for j, z in enumerate(VALID_FIELD_ELEMENTS): - def get_inputs(): + def get_inputs(blob=blob, z=z): proof, y = spec.compute_kzg_proof(blob, z) commitment = cached_blob_to_kzg_commitment(blob) return commitment, z, y, proof @@ -169,7 +166,7 @@ def get_inputs(): for i, blob in enumerate(VALID_BLOBS): for j, z in enumerate(VALID_FIELD_ELEMENTS): - def get_inputs(): + def get_inputs(blob=blob, z=z): proof_orig, y = spec.compute_kzg_proof(blob, z) proof = bls_add_one(proof_orig) commitment = cached_blob_to_kzg_commitment(blob) @@ -180,7 +177,7 @@ def get_inputs(): # Incorrect `G1_POINT_AT_INFINITY` proof for index, z in enumerate(VALID_FIELD_ELEMENTS): - def get_inputs(): + def get_inputs(z=z): blob = BLOB_RANDOM_VALID1 _, y = spec.compute_kzg_proof(blob, z) commitment = cached_blob_to_kzg_commitment(blob) @@ -194,7 +191,7 @@ def get_inputs(): # Correct `G1_POINT_AT_INFINITY` proof for zero poly for index, z in enumerate(VALID_FIELD_ELEMENTS): - def get_inputs(): + def get_inputs(z=z): blob = BLOB_ALL_ZEROS _, y = spec.compute_kzg_proof(blob, z) commitment = cached_blob_to_kzg_commitment(blob) @@ -208,7 +205,7 @@ def get_inputs(): # Correct `G1_POINT_AT_INFINITY` proof for poly of all twos for index, z in enumerate(VALID_FIELD_ELEMENTS): - def get_inputs(): + def get_inputs(z=z): blob = BLOB_ALL_TWOS _, y = spec.compute_kzg_proof(blob, z) commitment = cached_blob_to_kzg_commitment(blob) @@ -222,7 +219,7 @@ def get_inputs(): # Edge case: Invalid commitment for index, commitment in enumerate(INVALID_G1_POINTS): - def get_inputs(): + def get_inputs(commitment=commitment): blob, z = VALID_BLOBS[2], VALID_FIELD_ELEMENTS[1] proof, y = spec.compute_kzg_proof(blob, z) return commitment, z, y, proof @@ -232,7 +229,7 @@ def get_inputs(): # Edge case: Invalid z for index, z in enumerate(INVALID_FIELD_ELEMENTS): - def get_inputs(): + def get_inputs(z=z): blob, validz = VALID_BLOBS[4], VALID_FIELD_ELEMENTS[1] proof, y = spec.compute_kzg_proof(blob, validz) commitment = cached_blob_to_kzg_commitment(blob) @@ -243,7 +240,7 @@ def get_inputs(): # Edge case: Invalid y for index, y in enumerate(INVALID_FIELD_ELEMENTS): - def get_inputs(): + def get_inputs(y=y): blob, z = VALID_BLOBS[4], VALID_FIELD_ELEMENTS[1] proof, _ = spec.compute_kzg_proof(blob, z) commitment = cached_blob_to_kzg_commitment(blob) @@ -254,7 +251,7 @@ def get_inputs(): # Edge case: Invalid proof for index, proof in enumerate(INVALID_G1_POINTS): - def get_inputs(): + def get_inputs(proof=proof): blob, z = VALID_BLOBS[2], VALID_FIELD_ELEMENTS[1] _, y = spec.compute_kzg_proof(blob, z) commitment = cached_blob_to_kzg_commitment(blob) @@ -296,7 +293,7 @@ def _runner(): # Valid cases for index, blob in enumerate(VALID_BLOBS): - def get_inputs(): + def get_inputs(blob=blob): commitment = cached_blob_to_kzg_commitment(blob) return blob, commitment @@ -305,7 +302,7 @@ def get_inputs(): # Edge case: Invalid blob for index, blob in enumerate(INVALID_BLOBS): - def get_inputs(): + def get_inputs(blob=blob): commitment = G1 return blob, commitment @@ -314,7 +311,7 @@ def get_inputs(): # Edge case: Invalid commitment for index, commitment in enumerate(INVALID_G1_POINTS): - def get_inputs(): + def get_inputs(commitment=commitment): blob = VALID_BLOBS[1] return blob, commitment @@ -355,7 +352,7 @@ def _runner(): # Valid cases for index, blob in enumerate(VALID_BLOBS): - def get_inputs(): + def get_inputs(blob=blob): commitment = cached_blob_to_kzg_commitment(blob) proof = cached_compute_blob_kzg_proof(blob, commitment) return blob, commitment, proof @@ -365,7 +362,7 @@ def get_inputs(): # Incorrect proofs for index, blob in enumerate(VALID_BLOBS): - def get_inputs(): + def get_inputs(blob=blob): commitment = cached_blob_to_kzg_commitment(blob) proof = bls_add_one(cached_compute_blob_kzg_proof(blob, commitment)) return blob, commitment, proof @@ -414,7 +411,7 @@ def get_inputs(): # Edge case: Invalid blob for index, blob in enumerate(INVALID_BLOBS): - def get_inputs(): + def get_inputs(blob=blob): proof = G1 commitment = G1 return blob, commitment, proof @@ -424,7 +421,7 @@ def get_inputs(): # Edge case: Invalid commitment for index, commitment in enumerate(INVALID_G1_POINTS): - def get_inputs(): + def get_inputs(commitment=commitment): blob = VALID_BLOBS[1] proof = G1 return blob, commitment, proof @@ -434,7 +431,7 @@ def get_inputs(): # Edge case: Invalid proof for index, proof in enumerate(INVALID_G1_POINTS): - def get_inputs(): + def get_inputs(proof=proof): blob = VALID_BLOBS[1] commitment = G1 return blob, commitment, proof @@ -476,7 +473,7 @@ def _runner(): # Valid cases for length in range(len(VALID_BLOBS)): - def get_inputs(): + def get_inputs(length=length): blobs = VALID_BLOBS[:length] commitments = [cached_blob_to_kzg_commitment(blob) for blob in blobs] proofs = [ @@ -520,7 +517,7 @@ def get_inputs(): # Edge case: Invalid blobs for index, blob in enumerate(INVALID_BLOBS): - def get_inputs(): + def get_inputs(blob=blob): blobs = VALID_BLOBS commitments = [cached_blob_to_kzg_commitment(blob) for blob in blobs] proofs = [ @@ -535,7 +532,7 @@ def get_inputs(): # Edge case: Invalid commitment for index, commitment in enumerate(INVALID_G1_POINTS): - def get_inputs(): + def get_inputs(commitment=commitment): blobs = VALID_BLOBS commitments = [cached_blob_to_kzg_commitment(blob) for blob in blobs] proofs = [ @@ -552,7 +549,7 @@ def get_inputs(): # Edge case: Invalid proof for index, proof in enumerate(INVALID_G1_POINTS): - def get_inputs(): + def get_inputs(proof=proof): blobs = VALID_BLOBS commitments = [cached_blob_to_kzg_commitment(blob) for blob in blobs] proofs = [ @@ -617,42 +614,28 @@ def get_inputs(): ############################################################################### -def create_provider( - fork_name: SpecForkName, - handler_name: str, - test_case_fn: Callable[[], Iterable[Tuple[str, Dict[str, Any]]]], -) -> gen_typing.TestProvider: +def get_test_cases() -> Iterable[TestCase]: + test_case_fns = [ + ("blob_to_kzg_commitment", case_blob_to_kzg_commitment), + ("compute_kzg_proof", case_compute_kzg_proof), + ("verify_kzg_proof", case_verify_kzg_proof), + ("compute_blob_kzg_proof", case_compute_blob_kzg_proof), + ("verify_blob_kzg_proof", case_verify_blob_kzg_proof), + ("verify_blob_kzg_proof_batch", case_verify_blob_kzg_proof_batch), + ] - def prepare_fn() -> None: - # Nothing to load / change in spec. Maybe in future forks. - # Put the tests into the general config category, to not require any particular configuration. - return - - def cases_fn() -> Iterable[gen_typing.TestCase]: + test_cases = [] + for handler_name, test_case_fn in test_case_fns: for case_name, case_fn in test_case_fn(): - yield gen_typing.TestCase( - fork_name=fork_name, - preset_name="general", - runner_name="kzg", - handler_name=handler_name, - suite_name="kzg-mainnet", - case_name=case_name, - case_fn=case_fn, + test_cases.append( + TestCase( + fork_name=DENEB, + preset_name="general", + runner_name="kzg", + handler_name=handler_name, + suite_name="kzg-mainnet", + case_name=case_name, + case_fn=case_fn, + ) ) - - return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn) - - -if __name__ == "__main__": - bls.use_arkworks() - gen_runner.run_generator( - "kzg", - [ - create_provider(DENEB, "blob_to_kzg_commitment", case_blob_to_kzg_commitment), - create_provider(DENEB, "compute_kzg_proof", case_compute_kzg_proof), - create_provider(DENEB, "verify_kzg_proof", case_verify_kzg_proof), - create_provider(DENEB, "compute_blob_kzg_proof", case_compute_blob_kzg_proof), - create_provider(DENEB, "verify_blob_kzg_proof", case_verify_blob_kzg_proof), - create_provider(DENEB, "verify_blob_kzg_proof_batch", case_verify_blob_kzg_proof_batch), - ], - ) + return test_cases diff --git a/tests/generators/kzg_7594/main.py b/tests/generators/runners/kzg_7594.py similarity index 92% rename from tests/generators/kzg_7594/main.py rename to tests/generators/runners/kzg_7594.py index d7097554fc..086dcec624 100644 --- a/tests/generators/kzg_7594/main.py +++ b/tests/generators/runners/kzg_7594.py @@ -3,26 +3,23 @@ """ from functools import lru_cache -from typing import Tuple, Iterable, Any, Callable, Dict +from typing import Iterable from eth_utils import encode_hex from eth2spec.fulu import spec -from eth2spec.gen_helpers.gen_base import gen_runner, gen_typing +from eth2spec.gen_helpers.gen_base.gen_typing import TestCase from eth2spec.test.helpers.constants import FULU -from eth2spec.test.helpers.typing import SpecForkName from eth2spec.test.utils.kzg_tests import ( + bls_add_one, CELL_RANDOM_VALID1, CELL_RANDOM_VALID2, + encode_hex_list, INVALID_BLOBS, INVALID_G1_POINTS, INVALID_INDIVIDUAL_CELL_BYTES, VALID_BLOBS, - bls_add_one, - encode_hex_list, ) -from eth2spec.utils import bls - ############################################################################### # Test helpers @@ -148,7 +145,7 @@ def _runner(): # Valid cases for index, blob in enumerate(VALID_BLOBS): - def get_inputs(): + def get_inputs(blob=blob): cells, proofs = cached_compute_cells_and_kzg_proofs(blob) commitments = [cached_blob_to_kzg_commitment(blob) for _ in cells] cell_indices = list(range(spec.CELLS_PER_EXT_BLOB)) @@ -240,7 +237,7 @@ def get_inputs(): # Edge case: Invalid commitment for index, commitment in enumerate(INVALID_G1_POINTS): - def get_inputs(): + def get_inputs(index=index, commitment=commitment): cells, proofs = cached_compute_cells_and_kzg_proofs( VALID_BLOBS[index % len(INVALID_G1_POINTS)] ) @@ -271,7 +268,7 @@ def get_inputs(): # Edge case: Invalid cell for index, cell in enumerate(INVALID_INDIVIDUAL_CELL_BYTES): - def get_inputs(): + def get_inputs(index=index, cell=cell): cells, proofs = cached_compute_cells_and_kzg_proofs( VALID_BLOBS[index % len(INVALID_INDIVIDUAL_CELL_BYTES)] ) @@ -291,7 +288,7 @@ def get_inputs(): # Edge case: Invalid proof for index, proof in enumerate(INVALID_G1_POINTS): - def get_inputs(): + def get_inputs(index=index, proof=proof): cells, proofs = cached_compute_cells_and_kzg_proofs( VALID_BLOBS[index % len(INVALID_G1_POINTS)] ) @@ -511,7 +508,7 @@ def get_inputs(): # Edge case: Invalid cell for index, cell in enumerate(INVALID_INDIVIDUAL_CELL_BYTES): - def get_inputs(): + def get_inputs(cell=cell): cells, _ = cached_compute_cells_and_kzg_proofs(VALID_BLOBS[6]) cell_indices = list(range(spec.CELLS_PER_EXT_BLOB // 2)) partial_cells = [cells[cell_index] for cell_index in cell_indices] @@ -577,43 +574,26 @@ def get_inputs(): ############################################################################### -def create_provider( - fork_name: SpecForkName, - handler_name: str, - test_case_fn: Callable[[], Iterable[Tuple[str, Dict[str, Any]]]], -) -> gen_typing.TestProvider: - def prepare_fn() -> None: - # Nothing to load / change in spec. Maybe in future forks. - # Put the tests into the general config category, to not require any particular configuration. - return +def get_test_cases() -> Iterable[TestCase]: + test_case_fns = [ + ("compute_cells", case_compute_cells), + ("compute_cells_and_kzg_proofs", case_compute_cells_and_kzg_proofs), + ("verify_cell_kzg_proof_batch", case_verify_cell_kzg_proof_batch), + ("recover_cells_and_kzg_proofs", case_recover_cells_and_kzg_proofs), + ] - def cases_fn() -> Iterable[gen_typing.TestCase]: + test_cases = [] + for handler_name, test_case_fn in test_case_fns: for case_name, case_fn in test_case_fn(): - yield gen_typing.TestCase( - fork_name=fork_name, - preset_name="general", - runner_name="kzg", - handler_name=handler_name, - suite_name="kzg-mainnet", - case_name=case_name, - case_fn=case_fn, + test_cases.append( + TestCase( + fork_name=FULU, + preset_name="general", + runner_name="kzg", + handler_name=handler_name, + suite_name="kzg-mainnet", + case_name=case_name, + case_fn=case_fn, + ) ) - - return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn) - - -if __name__ == "__main__": - bls.use_arkworks() - gen_runner.run_generator( - "kzg_7594", - [ - create_provider(FULU, "compute_cells", case_compute_cells), - create_provider( - FULU, "compute_cells_and_kzg_proofs", case_compute_cells_and_kzg_proofs - ), - create_provider(FULU, "verify_cell_kzg_proof_batch", case_verify_cell_kzg_proof_batch), - create_provider( - FULU, "recover_cells_and_kzg_proofs", case_recover_cells_and_kzg_proofs - ), - ], - ) + return test_cases diff --git a/tests/generators/runners/light_client.py b/tests/generators/runners/light_client.py new file mode 100644 index 0000000000..bd4fa54d63 --- /dev/null +++ b/tests/generators/runners/light_client.py @@ -0,0 +1,15 @@ +from typing import Iterable + +from eth2spec.gen_helpers.gen_base.gen_typing import TestCase +from eth2spec.gen_helpers.gen_from_tests.gen import get_test_cases_for + + +def handler_name_fn(mod): + handler_name = mod.split(".")[-1] + if handler_name == "test_sync_protocol": + return "sync" + return handler_name.replace("test_", "") + + +def get_test_cases() -> Iterable[TestCase]: + return get_test_cases_for("light_client", handler_name_fn=handler_name_fn) diff --git a/tests/generators/runners/merkle_proof.py b/tests/generators/runners/merkle_proof.py new file mode 100644 index 0000000000..1c467f9ddc --- /dev/null +++ b/tests/generators/runners/merkle_proof.py @@ -0,0 +1,8 @@ +from typing import Iterable + +from eth2spec.gen_helpers.gen_base.gen_typing import TestCase +from eth2spec.gen_helpers.gen_from_tests.gen import get_test_cases_for + + +def get_test_cases() -> Iterable[TestCase]: + return get_test_cases_for("merkle_proof") diff --git a/tests/generators/runners/networking.py b/tests/generators/runners/networking.py new file mode 100644 index 0000000000..294bba427e --- /dev/null +++ b/tests/generators/runners/networking.py @@ -0,0 +1,8 @@ +from typing import Iterable + +from eth2spec.gen_helpers.gen_base.gen_typing import TestCase +from eth2spec.gen_helpers.gen_from_tests.gen import get_test_cases_for + + +def get_test_cases() -> Iterable[TestCase]: + return get_test_cases_for("networking") diff --git a/tests/generators/runners/operations.py b/tests/generators/runners/operations.py new file mode 100644 index 0000000000..42cbae3643 --- /dev/null +++ b/tests/generators/runners/operations.py @@ -0,0 +1,15 @@ +from typing import Iterable + +from eth2spec.gen_helpers.gen_base.gen_typing import TestCase +from eth2spec.gen_helpers.gen_from_tests.gen import get_test_cases_for + + +def handler_name_fn(mod): + handler_name = mod.split(".")[-1] + if handler_name == "test_process_sync_aggregate_random": + return "sync_aggregate" + return handler_name.replace("test_process_", "") + + +def get_test_cases() -> Iterable[TestCase]: + return get_test_cases_for("operations", pkg="block_processing", handler_name_fn=handler_name_fn) diff --git a/tests/generators/runners/random.py b/tests/generators/runners/random.py new file mode 100644 index 0000000000..7f9eff145f --- /dev/null +++ b/tests/generators/runners/random.py @@ -0,0 +1,8 @@ +from typing import Iterable + +from eth2spec.gen_helpers.gen_base.gen_typing import TestCase +from eth2spec.gen_helpers.gen_from_tests.gen import get_test_cases_for + + +def get_test_cases() -> Iterable[TestCase]: + return get_test_cases_for("random") diff --git a/tests/generators/runners/rewards.py b/tests/generators/runners/rewards.py new file mode 100644 index 0000000000..642d0a402b --- /dev/null +++ b/tests/generators/runners/rewards.py @@ -0,0 +1,8 @@ +from typing import Iterable + +from eth2spec.gen_helpers.gen_base.gen_typing import TestCase +from eth2spec.gen_helpers.gen_from_tests.gen import get_test_cases_for + + +def get_test_cases() -> Iterable[TestCase]: + return get_test_cases_for("rewards") diff --git a/tests/generators/runners/sanity.py b/tests/generators/runners/sanity.py new file mode 100644 index 0000000000..2b029609d0 --- /dev/null +++ b/tests/generators/runners/sanity.py @@ -0,0 +1,15 @@ +from typing import Iterable + +from eth2spec.gen_helpers.gen_base.gen_typing import TestCase +from eth2spec.gen_helpers.gen_from_tests.gen import get_test_cases_for + + +def handler_name_fn(mod): + handler_name = mod.split(".")[-1] + if handler_name == "test_deposit_transition": + return "blocks" + return handler_name.replace("test_", "") + + +def get_test_cases() -> Iterable[TestCase]: + return get_test_cases_for("sanity", handler_name_fn=handler_name_fn) diff --git a/tests/generators/runners/shuffling.py b/tests/generators/runners/shuffling.py new file mode 100644 index 0000000000..151f0a5b4c --- /dev/null +++ b/tests/generators/runners/shuffling.py @@ -0,0 +1,52 @@ +import random +from typing import Iterable + +from eth2spec.gen_helpers.gen_base.gen_typing import TestCase +from eth2spec.phase0 import mainnet as spec_mainnet, minimal as spec_minimal +from eth2spec.test.helpers.constants import ALL_PRESETS, MAINNET, MINIMAL, PHASE0 + + +def generate_random_bytes(rng=random.Random(5566)): + random_bytes = bytes(rng.randint(0, 255) for _ in range(32)) + return random_bytes + + +def shuffling_case_fn(spec, seed, count): + yield "mapping", "data", { + "seed": "0x" + seed.hex(), + "count": count, + "mapping": [int(spec.compute_shuffled_index(i, count, seed)) for i in range(count)], + } + + +def shuffling_case(spec, seed, count): + return f"shuffle_0x{seed.hex()}_{count}", lambda: shuffling_case_fn(spec, seed, count) + + +def shuffling_test_cases(spec): + # NOTE: somehow the random.Random generated seeds do not have pickle issue. + rng = random.Random(1234) + seeds = [generate_random_bytes(rng) for i in range(30)] + for seed in seeds: + for count in [0, 1, 2, 3, 5, 10, 33, 100, 1000, 9999]: + yield shuffling_case(spec, seed, count) + + +def get_test_cases() -> Iterable[TestCase]: + test_cases = [] + + for preset in ALL_PRESETS: + spec = {MAINNET: spec_mainnet, MINIMAL: spec_minimal}[preset] + for case_name, case_fn in shuffling_test_cases(spec): + test_cases.append( + TestCase( + fork_name=PHASE0, + preset_name=preset, + runner_name="shuffling", + handler_name="core", + suite_name="shuffle", + case_name=case_name, + case_fn=case_fn, + ) + ) + return test_cases diff --git a/tests/generators/runners/ssz_generic.py b/tests/generators/runners/ssz_generic.py new file mode 100644 index 0000000000..72f575784b --- /dev/null +++ b/tests/generators/runners/ssz_generic.py @@ -0,0 +1,46 @@ +from typing import Iterable + +from eth2spec.gen_helpers.gen_base.gen_typing import TestCase +from eth2spec.test.helpers.constants import PHASE0 + +from .ssz_generic_cases import ( + ssz_basic_vector, + ssz_bitlist, + ssz_bitvector, + ssz_boolean, + ssz_container, + ssz_uints, +) + + +def get_test_cases() -> Iterable[TestCase]: + test_case_fns = [ + ("basic_vector", "valid", ssz_basic_vector.valid_cases), + ("basic_vector", "invalid", ssz_basic_vector.invalid_cases), + ("bitlist", "valid", ssz_bitlist.valid_cases), + ("bitlist", "invalid", ssz_bitlist.invalid_cases), + ("bitvector", "valid", ssz_bitvector.valid_cases), + ("bitvector", "invalid", ssz_bitvector.invalid_cases), + ("boolean", "valid", ssz_boolean.valid_cases), + ("boolean", "invalid", ssz_boolean.invalid_cases), + ("uints", "valid", ssz_uints.valid_cases), + ("uints", "invalid", ssz_uints.invalid_cases), + ("containers", "valid", ssz_container.valid_cases), + ("containers", "invalid", ssz_container.invalid_cases), + ] + + test_cases = [] + for handler_name, suite_name, test_case_fn in test_case_fns: + for case_name, case_fn in test_case_fn(): + test_cases.append( + TestCase( + fork_name=PHASE0, + preset_name="general", + runner_name="ssz_generic", + handler_name=handler_name, + suite_name=suite_name, + case_name=case_name, + case_fn=case_fn, + ) + ) + return test_cases diff --git a/tests/generators/light_client/__init__.py b/tests/generators/runners/ssz_generic_cases/__init__.py similarity index 100% rename from tests/generators/light_client/__init__.py rename to tests/generators/runners/ssz_generic_cases/__init__.py diff --git a/tests/generators/ssz_generic/ssz_basic_vector.py b/tests/generators/runners/ssz_generic_cases/ssz_basic_vector.py similarity index 72% rename from tests/generators/ssz_generic/ssz_basic_vector.py rename to tests/generators/runners/ssz_generic_cases/ssz_basic_vector.py index 3d2ecb3282..bc34471e57 100644 --- a/tests/generators/ssz_generic/ssz_basic_vector.py +++ b/tests/generators/runners/ssz_generic_cases/ssz_basic_vector.py @@ -1,5 +1,10 @@ -from ssz_test_case import invalid_test_case, valid_test_case +from random import Random +from typing import Dict, Type + +from eth2spec.debug.random_value import get_random_ssz_object, RandomizationMode +from eth2spec.utils.ssz.ssz_impl import serialize from eth2spec.utils.ssz.ssz_typing import ( + BasicView, boolean, uint8, uint16, @@ -8,12 +13,9 @@ uint128, uint256, Vector, - BasicView, ) -from eth2spec.utils.ssz.ssz_impl import serialize -from random import Random -from typing import Dict, Type -from eth2spec.debug.random_value import RandomizationMode, get_random_ssz_object + +from .ssz_test_case import invalid_test_case, valid_test_case def basic_vector_case_fn( @@ -49,7 +51,9 @@ def valid_cases(): for length in [1, 2, 3, 4, 5, 8, 16, 31, 512, 513]: for mode in random_modes: yield f"vec_{name}_{length}_{mode.to_name()}", valid_test_case( - lambda: basic_vector_case_fn(rng, mode, typ, length) + lambda rng=rng, mode=mode, typ=typ, length=length: basic_vector_case_fn( + rng, mode, typ, length + ) ) @@ -73,15 +77,23 @@ def invalid_cases(): ) else: yield f"vec_{name}_{length}_{mode.to_name()}_one_less", invalid_test_case( - lambda: serialize(basic_vector_case_fn(rng, mode, typ, length - 1)) + lambda rng=rng, mode=mode, typ=typ, length=length: serialize( + basic_vector_case_fn(rng, mode, typ, length - 1) + ) ) yield f"vec_{name}_{length}_{mode.to_name()}_one_more", invalid_test_case( - lambda: serialize(basic_vector_case_fn(rng, mode, typ, length + 1)) + lambda rng=rng, mode=mode, typ=typ, length=length: serialize( + basic_vector_case_fn(rng, mode, typ, length + 1) + ) ) yield f"vec_{name}_{length}_{mode.to_name()}_one_byte_less", invalid_test_case( - lambda: serialize(basic_vector_case_fn(rng, mode, typ, length))[:-1] + lambda rng=rng, mode=mode, typ=typ, length=length: serialize( + basic_vector_case_fn(rng, mode, typ, length) + )[:-1] ) yield f"vec_{name}_{length}_{mode.to_name()}_one_byte_more", invalid_test_case( - lambda: serialize(basic_vector_case_fn(rng, mode, typ, length)) + lambda rng=rng, mode=mode, typ=typ, length=length: serialize( + basic_vector_case_fn(rng, mode, typ, length) + ) + serialize(basic_vector_case_fn(rng, mode, uint8, 1)) ) diff --git a/tests/generators/ssz_generic/ssz_bitlist.py b/tests/generators/runners/ssz_generic_cases/ssz_bitlist.py similarity index 79% rename from tests/generators/ssz_generic/ssz_bitlist.py rename to tests/generators/runners/ssz_generic_cases/ssz_bitlist.py index e876f4636a..4c3c54c5a4 100644 --- a/tests/generators/ssz_generic/ssz_bitlist.py +++ b/tests/generators/runners/ssz_generic_cases/ssz_bitlist.py @@ -1,8 +1,10 @@ -from ssz_test_case import invalid_test_case, valid_test_case -from eth2spec.utils.ssz.ssz_typing import Bitlist -from eth2spec.utils.ssz.ssz_impl import serialize from random import Random -from eth2spec.debug.random_value import RandomizationMode, get_random_ssz_object + +from eth2spec.debug.random_value import get_random_ssz_object, RandomizationMode +from eth2spec.utils.ssz.ssz_impl import serialize +from eth2spec.utils.ssz.ssz_typing import Bitlist + +from .ssz_test_case import invalid_test_case, valid_test_case def bitlist_case_fn(rng: Random, mode: RandomizationMode, limit: int): @@ -28,7 +30,7 @@ def valid_cases(): RandomizationMode.mode_max, ]: yield f"bitlist_{size}_{mode.to_name()}_{variation}", valid_test_case( - lambda: bitlist_case_fn(rng, mode, size) + lambda rng=rng, mode=mode, size=size: bitlist_case_fn(rng, mode, size) ) @@ -51,5 +53,7 @@ def invalid_cases(): (512, 513), ]: yield f"bitlist_{typ_limit}_but_{test_limit}", invalid_test_case( - lambda: serialize(bitlist_case_fn(rng, RandomizationMode.mode_max_count, test_limit)) + lambda rng=rng, test_limit=test_limit: serialize( + bitlist_case_fn(rng, RandomizationMode.mode_max_count, test_limit) + ) ) diff --git a/tests/generators/ssz_generic/ssz_bitvector.py b/tests/generators/runners/ssz_generic_cases/ssz_bitvector.py similarity index 86% rename from tests/generators/ssz_generic/ssz_bitvector.py rename to tests/generators/runners/ssz_generic_cases/ssz_bitvector.py index 9ce34dc84c..428ad9e814 100644 --- a/tests/generators/ssz_generic/ssz_bitvector.py +++ b/tests/generators/runners/ssz_generic_cases/ssz_bitvector.py @@ -1,8 +1,10 @@ -from ssz_test_case import invalid_test_case, valid_test_case -from eth2spec.utils.ssz.ssz_typing import Bitvector -from eth2spec.utils.ssz.ssz_impl import serialize from random import Random -from eth2spec.debug.random_value import RandomizationMode, get_random_ssz_object + +from eth2spec.debug.random_value import get_random_ssz_object, RandomizationMode +from eth2spec.utils.ssz.ssz_impl import serialize +from eth2spec.utils.ssz.ssz_typing import Bitvector + +from .ssz_test_case import invalid_test_case, valid_test_case def bitvector_case_fn( @@ -35,7 +37,7 @@ def valid_cases(): RandomizationMode.mode_max, ]: yield f"bitvec_{size}_{mode.to_name()}", valid_test_case( - lambda: bitvector_case_fn(rng, mode, size) + lambda rng=rng, mode=mode, size=size: bitvector_case_fn(rng, mode, size) ) @@ -64,7 +66,7 @@ def invalid_cases(): RandomizationMode.mode_max, ]: yield f"bitvec_{typ_size}_{mode.to_name()}_{test_size}", invalid_test_case( - lambda: serialize( + lambda rng=rng, mode=mode, test_size=test_size, typ_size=typ_size: serialize( bitvector_case_fn(rng, mode, test_size, invalid_making_pos=typ_size) ) ) diff --git a/tests/generators/ssz_generic/ssz_boolean.py b/tests/generators/runners/ssz_generic_cases/ssz_boolean.py similarity index 87% rename from tests/generators/ssz_generic/ssz_boolean.py rename to tests/generators/runners/ssz_generic_cases/ssz_boolean.py index 1598b6c4a4..f42868ce80 100644 --- a/tests/generators/ssz_generic/ssz_boolean.py +++ b/tests/generators/runners/ssz_generic_cases/ssz_boolean.py @@ -1,6 +1,7 @@ -from ssz_test_case import valid_test_case, invalid_test_case from eth2spec.utils.ssz.ssz_typing import boolean +from .ssz_test_case import invalid_test_case, valid_test_case + def valid_cases(): yield "true", valid_test_case(lambda: boolean(True)) diff --git a/tests/generators/ssz_generic/ssz_container.py b/tests/generators/runners/ssz_generic_cases/ssz_container.py similarity index 83% rename from tests/generators/ssz_generic/ssz_container.py rename to tests/generators/runners/ssz_generic_cases/ssz_container.py index 055dc1caad..2083958829 100644 --- a/tests/generators/ssz_generic/ssz_container.py +++ b/tests/generators/runners/ssz_generic_cases/ssz_container.py @@ -1,22 +1,24 @@ -from ssz_test_case import invalid_test_case, valid_test_case +from random import Random +from typing import Callable, Dict, Sequence, Tuple, Type + +from eth2spec.debug.random_value import get_random_ssz_object, RandomizationMode +from eth2spec.utils.ssz.ssz_impl import serialize from eth2spec.utils.ssz.ssz_typing import ( - View, - Container, + Bitlist, + Bitvector, byte, + ByteList, + Container, + List, uint8, uint16, uint32, uint64, - List, - ByteList, Vector, - Bitvector, - Bitlist, + View, ) -from eth2spec.utils.ssz.ssz_impl import serialize -from random import Random -from typing import Dict, Tuple, Sequence, Callable, Type -from eth2spec.debug.random_value import RandomizationMode, get_random_ssz_object + +from .ssz_test_case import invalid_test_case, valid_test_case class SingleFieldTestStruct(Container): @@ -79,7 +81,7 @@ def valid_cases(): for name, (typ, offsets) in PRESET_CONTAINERS.items(): for mode in [RandomizationMode.mode_zero, RandomizationMode.mode_max]: yield f"{name}_{mode.to_name()}", valid_test_case( - lambda: container_case_fn(rng, mode, typ) + lambda rng=rng, mode=mode, typ=typ: container_case_fn(rng, mode, typ) ) if len(offsets) == 0: @@ -94,7 +96,9 @@ def valid_cases(): for mode in modes: for variation in range(3): yield f"{name}_{mode.to_name()}_chaos_{variation}", valid_test_case( - lambda: container_case_fn(rng, mode, typ, chaos=True) + lambda rng=rng, mode=mode, typ=typ: container_case_fn( + rng, mode, typ, chaos=True + ) ) # Notes: Below is the second wave of iteration, and only the random mode is selected # for container without offset since ``RandomizationMode.mode_zero`` and ``RandomizationMode.mode_max`` @@ -103,7 +107,7 @@ def valid_cases(): for mode in modes: for variation in range(10): yield f"{name}_{mode.to_name()}_{variation}", valid_test_case( - lambda: container_case_fn(rng, mode, typ) + lambda rng=rng, mode=mode, typ=typ: container_case_fn(rng, mode, typ) ) @@ -123,7 +127,9 @@ def invalid_cases(): for name, (typ, offsets) in PRESET_CONTAINERS.items(): # using mode_max_count, so that the extra byte cannot be picked up as normal list content yield f"{name}_extra_byte", invalid_test_case( - lambda: serialize(container_case_fn(rng, RandomizationMode.mode_max_count, typ)) + lambda rng=rng, typ=typ: serialize( + container_case_fn(rng, RandomizationMode.mode_max_count, typ) + ) + b"\xff" ) @@ -138,14 +144,14 @@ def invalid_cases(): ]: for index, offset_index in enumerate(offsets): yield f"{name}_{mode.to_name()}_offset_{offset_index}_plus_one", invalid_test_case( - lambda: mod_offset( + lambda rng=rng, mode=mode, typ=typ, offset_index=offset_index: mod_offset( b=serialize(container_case_fn(rng, mode, typ)), offset_index=offset_index, change=lambda x: x + 1, ) ) yield f"{name}_{mode.to_name()}_offset_{offset_index}_zeroed", invalid_test_case( - lambda: mod_offset( + lambda rng=rng, mode=mode, typ=typ, offset_index=offset_index: mod_offset( b=serialize(container_case_fn(rng, mode, typ)), offset_index=offset_index, change=lambda x: 0, @@ -153,7 +159,7 @@ def invalid_cases(): ) if index == 0: yield f"{name}_{mode.to_name()}_offset_{offset_index}_minus_one", invalid_test_case( - lambda: mod_offset( + lambda rng=rng, mode=mode, typ=typ, offset_index=offset_index: mod_offset( b=serialize(container_case_fn(rng, mode, typ)), offset_index=offset_index, change=lambda x: x - 1, @@ -163,11 +169,11 @@ def invalid_cases(): serialized = serialize(container_case_fn(rng, mode, typ)) serialized = serialized + serialized[:2] yield f"{name}_{mode.to_name()}_last_offset_{offset_index}_overflow", invalid_test_case( - lambda: serialized + lambda serialized=serialized: serialized ) if mode == RandomizationMode.mode_one_count: serialized = serialize(container_case_fn(rng, mode, typ)) serialized = serialized + serialized[:1] yield f"{name}_{mode.to_name()}_last_offset_{offset_index}_wrong_byte_length", invalid_test_case( - lambda: serialized + lambda serialized=serialized: serialized ) diff --git a/tests/generators/ssz_generic/ssz_test_case.py b/tests/generators/runners/ssz_generic_cases/ssz_test_case.py similarity index 88% rename from tests/generators/ssz_generic/ssz_test_case.py rename to tests/generators/runners/ssz_generic_cases/ssz_test_case.py index 264fe56954..5450b7431f 100644 --- a/tests/generators/ssz_generic/ssz_test_case.py +++ b/tests/generators/runners/ssz_generic_cases/ssz_test_case.py @@ -1,7 +1,8 @@ -from eth2spec.utils.ssz.ssz_impl import serialize, hash_tree_root +from typing import Callable + from eth2spec.debug.encode import encode +from eth2spec.utils.ssz.ssz_impl import hash_tree_root, serialize from eth2spec.utils.ssz.ssz_typing import View -from typing import Callable def valid_test_case(value_fn: Callable[[], View]): diff --git a/tests/generators/ssz_generic/ssz_uints.py b/tests/generators/runners/ssz_generic_cases/ssz_uints.py similarity index 66% rename from tests/generators/ssz_generic/ssz_uints.py rename to tests/generators/runners/ssz_generic_cases/ssz_uints.py index 816c1faa97..8c6cf8e264 100644 --- a/tests/generators/ssz_generic/ssz_uints.py +++ b/tests/generators/runners/ssz_generic_cases/ssz_uints.py @@ -1,8 +1,10 @@ -from ssz_test_case import invalid_test_case, valid_test_case -from eth2spec.utils.ssz.ssz_typing import BasicView, uint8, uint16, uint32, uint64, uint128, uint256 from random import Random from typing import Type -from eth2spec.debug.random_value import RandomizationMode, get_random_ssz_object + +from eth2spec.debug.random_value import get_random_ssz_object, RandomizationMode +from eth2spec.utils.ssz.ssz_typing import BasicView, uint8, uint16, uint32, uint64, uint128, uint256 + +from .ssz_test_case import invalid_test_case, valid_test_case def uint_case_fn(rng: Random, mode: RandomizationMode, typ: Type[BasicView]): @@ -20,15 +22,17 @@ def valid_cases(): mode = RandomizationMode.mode_random byte_len = uint_type.type_byte_length() yield f"uint_{byte_len * 8}_last_byte_empty", valid_test_case( - lambda: uint_type((2 ** ((byte_len - 1) * 8)) - 1) + lambda uint_type=uint_type, byte_len=byte_len: uint_type( + (2 ** ((byte_len - 1) * 8)) - 1 + ) ) for variation in range(5): yield f"uint_{byte_len * 8}_{mode.to_name()}_{variation}", valid_test_case( - lambda: uint_case_fn(rng, mode, uint_type) + lambda rng=rng, mode=mode, uint_type=uint_type: uint_case_fn(rng, mode, uint_type) ) for mode in [RandomizationMode.mode_zero, RandomizationMode.mode_max]: yield f"uint_{byte_len * 8}_{mode.to_name()}", valid_test_case( - lambda: uint_case_fn(rng, mode, uint_type) + lambda rng=rng, mode=mode, uint_type=uint_type: uint_case_fn(rng, mode, uint_type) ) @@ -36,15 +40,17 @@ def invalid_cases(): for uint_type in UINT_TYPES: byte_len = uint_type.type_byte_length() yield f"uint_{byte_len * 8}_one_too_high", invalid_test_case( - lambda: (2 ** (byte_len * 8)).to_bytes(byte_len + 1, "little") + lambda byte_len=byte_len: (2 ** (byte_len * 8)).to_bytes(byte_len + 1, "little") ) for uint_type in [uint8, uint16, uint32, uint64, uint128, uint256]: byte_len = uint_type.type_byte_length() yield f"uint_{byte_len * 8}_one_byte_longer", invalid_test_case( - lambda: (2 ** (byte_len * 8) - 1).to_bytes(byte_len + 1, "little") + lambda byte_len=byte_len: (2 ** (byte_len * 8) - 1).to_bytes(byte_len + 1, "little") ) for uint_type in [uint8, uint16, uint32, uint64, uint128, uint256]: byte_len = uint_type.type_byte_length() yield f"uint_{byte_len * 8}_one_byte_shorter", invalid_test_case( - lambda: (2 ** ((byte_len - 1) * 8) - 1).to_bytes(byte_len - 1, "little") + lambda byte_len=byte_len: (2 ** ((byte_len - 1) * 8) - 1).to_bytes( + byte_len - 1, "little" + ) ) diff --git a/tests/generators/ssz_generic/uint_test_cases.py b/tests/generators/runners/ssz_generic_cases/uint_test_cases.py similarity index 100% rename from tests/generators/ssz_generic/uint_test_cases.py rename to tests/generators/runners/ssz_generic_cases/uint_test_cases.py index 82704ea420..cf444d4eea 100644 --- a/tests/generators/ssz_generic/uint_test_cases.py +++ b/tests/generators/runners/ssz_generic_cases/uint_test_cases.py @@ -3,14 +3,14 @@ from eth_utils import ( to_tuple, ) - -import ssz -from ssz.sedes import ( - UInt, -) from renderers import ( render_test_case, ) +from ssz.sedes import ( + UInt, +) + +import ssz random.seed(0) diff --git a/tests/generators/runners/ssz_static.py b/tests/generators/runners/ssz_static.py new file mode 100644 index 0000000000..e9983e1196 --- /dev/null +++ b/tests/generators/runners/ssz_static.py @@ -0,0 +1,104 @@ +import hashlib +from inspect import getmembers, isclass +from random import Random +from typing import Iterable + +from eth2spec.debug import encode, random_value +from eth2spec.gen_helpers.gen_base.gen_typing import TestCase, TestCasePart +from eth2spec.test.context import spec_targets +from eth2spec.test.helpers.constants import MAINNET, MINIMAL, TESTGEN_FORKS +from eth2spec.utils.ssz.ssz_impl import ( + hash_tree_root, + serialize, +) +from eth2spec.utils.ssz.ssz_typing import Container + +MAX_BYTES_LENGTH = 1000 +MAX_LIST_LENGTH = 10 + + +def create_test_case( + seed: int, typ, mode: random_value.RandomizationMode, chaos: bool +) -> Iterable[TestCasePart]: + rng = Random(seed) + value = random_value.get_random_ssz_object( + rng, typ, MAX_BYTES_LENGTH, MAX_LIST_LENGTH, mode, chaos + ) + yield "value", "data", encode.encode(value) + yield "serialized", "ssz", serialize(value) + roots_data = {"root": "0x" + hash_tree_root(value).hex()} + yield "roots", "data", roots_data + + +def get_spec_ssz_types(spec): + return [ + (name, value) + for (name, value) in getmembers(spec, isclass) + if issubclass(value, Container) + and value != Container # only the subclasses, not the imported base class + ] + + +def deterministic_seed(**kwargs) -> int: + """Need this since hash() is not deterministic between runs.""" + m = hashlib.sha256() + for k, v in sorted(kwargs.items()): + m.update(f"{k}={v}".encode("utf-8")) + return int.from_bytes(m.digest()[:8], "little") + + +def ssz_static_cases( + fork_name: str, + preset_name: str, + name, + ssz_type, + mode: random_value.RandomizationMode, + chaos: bool, + count: int, +): + random_mode_name = mode.to_name() + for i in range(count): + seed = deterministic_seed( + fork_name=fork_name, + preset_name=preset_name, + name=name, + ssz_type_name=ssz_type.__name__, + random_mode_name=random_mode_name, + chaos=chaos, + count=count, + i=i, + ) + + def case_fn(seed=seed): + """Need to bind to seed value.""" + return create_test_case(seed, ssz_type, mode, chaos) + + yield TestCase( + fork_name=fork_name, + preset_name=preset_name, + runner_name="ssz_static", + handler_name=name, + suite_name=f"ssz_{random_mode_name}{'_chaos' if chaos else ''}", + case_name=f"case_{i}", + case_fn=case_fn, + ) + + +def get_test_cases() -> Iterable[TestCase]: + settings = [] + for mode in random_value.RandomizationMode: + settings.append((MINIMAL, mode, False, 30)) + settings.append((MINIMAL, random_value.RandomizationMode.mode_random, True, 30)) + settings.append((MAINNET, random_value.RandomizationMode.mode_random, False, 5)) + + test_cases = [] + for fork in TESTGEN_FORKS: + for preset, mode, chaos, cases_if_random in settings: + count = cases_if_random if chaos or mode.is_changing() else 1 + spec = spec_targets[preset][fork] + for name, ssz_type in get_spec_ssz_types(spec): + test_cases.extend( + ssz_static_cases(fork, preset, name, ssz_type, mode, chaos, count) + ) + + return test_cases diff --git a/tests/generators/runners/sync.py b/tests/generators/runners/sync.py new file mode 100644 index 0000000000..d05790bc08 --- /dev/null +++ b/tests/generators/runners/sync.py @@ -0,0 +1,8 @@ +from typing import Iterable + +from eth2spec.gen_helpers.gen_base.gen_typing import TestCase +from eth2spec.gen_helpers.gen_from_tests.gen import get_test_cases_for + + +def get_test_cases() -> Iterable[TestCase]: + return get_test_cases_for("sync") diff --git a/tests/generators/runners/transition.py b/tests/generators/runners/transition.py new file mode 100644 index 0000000000..76a30f3b79 --- /dev/null +++ b/tests/generators/runners/transition.py @@ -0,0 +1,25 @@ +from importlib import import_module +from typing import Iterable + +from eth2spec.gen_helpers.gen_base.gen_typing import TestCase +from eth2spec.gen_helpers.gen_from_tests.gen import generate_from_tests, get_expected_modules +from eth2spec.test.helpers.constants import ALL_PRESETS, POST_FORK_OF + + +def get_test_cases() -> Iterable[TestCase]: + test_cases = [] + for preset in ALL_PRESETS: + for prefork, postfork in POST_FORK_OF.items(): + for mod in get_expected_modules("transition"): + tests_src = import_module(mod) + test_cases.extend( + generate_from_tests( + runner_name="transition", + handler_name="core", + src=tests_src, + fork_name=postfork, + preset_name=preset, + phase=prefork, + ) + ) + return test_cases diff --git a/tests/generators/sanity/README.md b/tests/generators/sanity/README.md deleted file mode 100644 index 9a5f5b25d6..0000000000 --- a/tests/generators/sanity/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Sanity tests - -Sanity tests cover regular state-transitions in a common block-list format, to ensure the basics work. - -Information on the format of the tests can be found in the [sanity test formats documentation](../../formats/sanity/README.md). diff --git a/tests/generators/sanity/main.py b/tests/generators/sanity/main.py deleted file mode 100644 index 33e39cbf18..0000000000 --- a/tests/generators/sanity/main.py +++ /dev/null @@ -1,82 +0,0 @@ -from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB, ELECTRA, FULU -from eth2spec.gen_helpers.gen_from_tests.gen import ( - run_state_test_generators, - combine_mods, - check_mods, -) - - -if __name__ == "__main__": - phase_0_mods = { - key: "eth2spec.test.phase0.sanity.test_" + key - for key in [ - "blocks", - "slots", - ] - } - - _new_altair_mods = { - key: "eth2spec.test.altair.sanity.test_" + key - for key in [ - "blocks", - ] - } - altair_mods = combine_mods(_new_altair_mods, phase_0_mods) - - _new_bellatrix_mods = { - key: "eth2spec.test.bellatrix.sanity.test_" + key - for key in [ - "blocks", - ] - } - bellatrix_mods = combine_mods(_new_bellatrix_mods, altair_mods) - - _new_capella_mods = { - key: "eth2spec.test.capella.sanity.test_" + key - for key in [ - "blocks", - ] - } - capella_mods = combine_mods(_new_capella_mods, bellatrix_mods) - - _new_deneb_mods = { - key: "eth2spec.test.deneb.sanity.test_" + key - for key in [ - "blocks", - ] - } - deneb_mods = combine_mods(_new_deneb_mods, capella_mods) - - # This is a "hack" which allows other test files (e.g., test_deposit_transition.py) - # to reuse the sanity/block test format. If a new test file is added or removed, - # do not forget to update sanity/block/__init__.py accordingly. - _new_electra_mods_1 = { - key: "eth2spec.test.electra.sanity." + key - for key in [ - "blocks", - ] - } - _new_electra_mods_2 = { - key: "eth2spec.test.electra.sanity.test_" + key - for key in [ - "slots", - ] - } - _new_electra_mods = {**_new_electra_mods_1, **_new_electra_mods_2} - electra_mods = combine_mods(_new_electra_mods, deneb_mods) - - # No additional Fulu specific sanity tests - fulu_mods = electra_mods - - all_mods = { - PHASE0: phase_0_mods, - ALTAIR: altair_mods, - BELLATRIX: bellatrix_mods, - CAPELLA: capella_mods, - DENEB: deneb_mods, - ELECTRA: electra_mods, - FULU: fulu_mods, - } - check_mods(all_mods, "sanity") - - run_state_test_generators(runner_name="sanity", all_mods=all_mods) diff --git a/tests/generators/shuffling/README.md b/tests/generators/shuffling/README.md deleted file mode 100644 index 81ddaba15f..0000000000 --- a/tests/generators/shuffling/README.md +++ /dev/null @@ -1,10 +0,0 @@ -# Shuffling Tests - -Tests for the swap-or-not shuffling in the beacon chain. - -Tips for initial shuffling write: -- run with `round_count = 1` first, do the same with pyspec. -- start with permute index -- optimized shuffling implementations: - - vitalik, Python: https://github.com/ethereum/consensus-specs/pull/576#issue-250741806 - - protolambda, Go: https://github.com/protolambda/eth2-shuffle diff --git a/tests/generators/shuffling/main.py b/tests/generators/shuffling/main.py deleted file mode 100644 index a96b3eece6..0000000000 --- a/tests/generators/shuffling/main.py +++ /dev/null @@ -1,66 +0,0 @@ -from typing import Iterable -import random - -from eth2spec.gen_helpers.gen_base import gen_runner, gen_typing -from eth2spec.test.helpers.typing import PresetBaseName - -from eth2spec.phase0 import mainnet as spec_mainnet, minimal as spec_minimal -from eth2spec.test.helpers.constants import PHASE0, MINIMAL, MAINNET - - -def generate_random_bytes(rng=random.Random(5566)): - random_bytes = bytes(rng.randint(0, 255) for _ in range(32)) - return random_bytes - - -# NOTE: somehow the random.Random generated seeds do not have pickle issue. -rng = random.Random(1234) -seeds = [generate_random_bytes(rng) for i in range(30)] - - -def shuffling_case_fn(spec, seed, count): - yield "mapping", "data", { - "seed": "0x" + seed.hex(), - "count": count, - "mapping": [int(spec.compute_shuffled_index(i, count, seed)) for i in range(count)], - } - - -def shuffling_case(spec, seed, count): - return f"shuffle_0x{seed.hex()}_{count}", lambda: shuffling_case_fn(spec, seed, count) - - -def shuffling_test_cases(spec): - for seed in seeds: - for count in [0, 1, 2, 3, 5, 10, 33, 100, 1000, 9999]: - yield shuffling_case(spec, seed, count) - - -def create_provider(preset_name: PresetBaseName) -> gen_typing.TestProvider: - - def prepare_fn() -> None: - return - - def cases_fn() -> Iterable[gen_typing.TestCase]: - if preset_name == MAINNET: - spec = spec_mainnet - elif preset_name == MINIMAL: - spec = spec_minimal - else: - raise Exception(f"unrecognized preset: {preset_name}") - for case_name, case_fn in shuffling_test_cases(spec): - yield gen_typing.TestCase( - fork_name=PHASE0, - preset_name=preset_name, - runner_name="shuffling", - handler_name="core", - suite_name="shuffle", - case_name=case_name, - case_fn=case_fn, - ) - - return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn) - - -if __name__ == "__main__": - gen_runner.run_generator("shuffling", [create_provider(MINIMAL), create_provider(MAINNET)]) diff --git a/tests/generators/ssz_generic/__init__.py b/tests/generators/ssz_generic/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/generators/ssz_generic/main.py b/tests/generators/ssz_generic/main.py deleted file mode 100644 index 124bc20573..0000000000 --- a/tests/generators/ssz_generic/main.py +++ /dev/null @@ -1,49 +0,0 @@ -from typing import Iterable -from eth2spec.gen_helpers.gen_base import gen_runner, gen_typing -import ssz_basic_vector -import ssz_bitlist -import ssz_bitvector -import ssz_boolean -import ssz_uints -import ssz_container -from eth2spec.test.helpers.constants import PHASE0 - - -def create_provider(handler_name: str, suite_name: str, case_maker) -> gen_typing.TestProvider: - - def prepare_fn() -> None: - return - - def cases_fn() -> Iterable[gen_typing.TestCase]: - for case_name, case_fn in case_maker(): - yield gen_typing.TestCase( - fork_name=PHASE0, - preset_name="general", - runner_name="ssz_generic", - handler_name=handler_name, - suite_name=suite_name, - case_name=case_name, - case_fn=case_fn, - ) - - return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn) - - -if __name__ == "__main__": - gen_runner.run_generator( - "ssz_generic", - [ - create_provider("basic_vector", "valid", ssz_basic_vector.valid_cases), - create_provider("basic_vector", "invalid", ssz_basic_vector.invalid_cases), - create_provider("bitlist", "valid", ssz_bitlist.valid_cases), - create_provider("bitlist", "invalid", ssz_bitlist.invalid_cases), - create_provider("bitvector", "valid", ssz_bitvector.valid_cases), - create_provider("bitvector", "invalid", ssz_bitvector.invalid_cases), - create_provider("boolean", "valid", ssz_boolean.valid_cases), - create_provider("boolean", "invalid", ssz_boolean.invalid_cases), - create_provider("uints", "valid", ssz_uints.valid_cases), - create_provider("uints", "invalid", ssz_uints.invalid_cases), - create_provider("containers", "valid", ssz_container.valid_cases), - create_provider("containers", "invalid", ssz_container.invalid_cases), - ], - ) diff --git a/tests/generators/ssz_static/README.md b/tests/generators/ssz_static/README.md deleted file mode 100644 index 3434fe174b..0000000000 --- a/tests/generators/ssz_static/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# SSZ-static - -The purpose of this test-generator is to provide test-vectors for the most important applications of SSZ: - the serialization and hashing of Ethereum data type. - -Test-format documentation can be found [here](../../formats/ssz_static/README.md). diff --git a/tests/generators/ssz_static/__init__.py b/tests/generators/ssz_static/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/generators/ssz_static/main.py b/tests/generators/ssz_static/main.py deleted file mode 100644 index e95e49ad98..0000000000 --- a/tests/generators/ssz_static/main.py +++ /dev/null @@ -1,110 +0,0 @@ -from random import Random -from typing import Iterable -from inspect import getmembers, isclass - -from eth2spec.gen_helpers.gen_base import gen_runner, gen_typing - -from eth2spec.debug import random_value, encode -from eth2spec.test.helpers.constants import TESTGEN_FORKS, MINIMAL, MAINNET -from eth2spec.test.context import spec_targets -from eth2spec.utils.ssz.ssz_typing import Container -from eth2spec.utils.ssz.ssz_impl import ( - hash_tree_root, - serialize, -) - - -MAX_BYTES_LENGTH = 1000 -MAX_LIST_LENGTH = 10 - - -def create_test_case( - rng: Random, typ, mode: random_value.RandomizationMode, chaos: bool -) -> Iterable[gen_typing.TestCasePart]: - value = random_value.get_random_ssz_object( - rng, typ, MAX_BYTES_LENGTH, MAX_LIST_LENGTH, mode, chaos - ) - yield "value", "data", encode.encode(value) - yield "serialized", "ssz", serialize(value) - roots_data = {"root": "0x" + hash_tree_root(value).hex()} - yield "roots", "data", roots_data - - -def get_spec_ssz_types(spec): - return [ - (name, value) - for (name, value) in getmembers(spec, isclass) - if issubclass(value, Container) - and value != Container # only the subclasses, not the imported base class - ] - - -def ssz_static_cases( - fork_name: str, - preset_name: str, - seed: int, - name, - ssz_type, - mode: random_value.RandomizationMode, - chaos: bool, - count: int, -): - random_mode_name = mode.to_name() - - # Reproducible RNG - rng = Random(seed) - - for i in range(count): - yield gen_typing.TestCase( - fork_name=fork_name, - preset_name=preset_name, - runner_name="ssz_static", - handler_name=name, - suite_name=f"ssz_{random_mode_name}{'_chaos' if chaos else ''}", - case_name=f"case_{i}", - case_fn=lambda: create_test_case(rng, ssz_type, mode, chaos), - ) - - -def create_provider( - fork_name, - preset_name: str, - seed: int, - mode: random_value.RandomizationMode, - chaos: bool, - cases_if_random: int, -) -> gen_typing.TestProvider: - def prepare_fn() -> None: - return - - def cases_fn() -> Iterable[gen_typing.TestCase]: - count = cases_if_random if chaos or mode.is_changing() else 1 - spec = spec_targets[preset_name][fork_name] - - for i, (name, ssz_type) in enumerate(get_spec_ssz_types(spec)): - yield from ssz_static_cases( - fork_name, preset_name, seed * 1000 + i, name, ssz_type, mode, chaos, count - ) - - return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn) - - -if __name__ == "__main__": - # [(seed, config name, randomization mode, chaos on/off, cases_if_random)] - settings = [] - seed = 1 - for mode in random_value.RandomizationMode: - settings.append((seed, MINIMAL, mode, False, 30)) - seed += 1 - settings.append((seed, MINIMAL, random_value.RandomizationMode.mode_random, True, 30)) - seed += 1 - settings.append((seed, MAINNET, random_value.RandomizationMode.mode_random, False, 5)) - seed += 1 - for fork in TESTGEN_FORKS: - gen_runner.run_generator( - "ssz_static", - [ - create_provider(fork, preset_name, seed, mode, chaos, cases_if_random) - for (seed, preset_name, mode, chaos, cases_if_random) in settings - ], - ) diff --git a/tests/generators/sync/main.py b/tests/generators/sync/main.py deleted file mode 100644 index 415c95ad4c..0000000000 --- a/tests/generators/sync/main.py +++ /dev/null @@ -1,26 +0,0 @@ -from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators, check_mods -from eth2spec.test.helpers.constants import BELLATRIX, CAPELLA, DENEB, ELECTRA, FULU - - -if __name__ == "__main__": - bellatrix_mods = { - key: "eth2spec.test.bellatrix.sync.test_" + key - for key in [ - "optimistic", - ] - } - capella_mods = bellatrix_mods - deneb_mods = capella_mods - electra_mods = deneb_mods - fulu_mods = electra_mods - - all_mods = { - BELLATRIX: bellatrix_mods, - CAPELLA: capella_mods, - DENEB: deneb_mods, - ELECTRA: electra_mods, - FULU: fulu_mods, - } - check_mods(all_mods, "sync") - - run_state_test_generators(runner_name="sync", all_mods=all_mods) diff --git a/tests/generators/transition/main.py b/tests/generators/transition/main.py deleted file mode 100644 index 4a6b19b0fa..0000000000 --- a/tests/generators/transition/main.py +++ /dev/null @@ -1,67 +0,0 @@ -from typing import Iterable - -from eth2spec.test.helpers.constants import ( - MINIMAL, - MAINNET, - ALL_PRE_POST_FORKS, -) -from eth2spec.gen_helpers.gen_base import gen_runner, gen_typing -from eth2spec.gen_helpers.gen_from_tests.gen import ( - generate_from_tests, -) -from eth2spec.test.altair.transition import ( - test_transition as test_altair_transition, - test_activations_and_exits as test_altair_activations_and_exits, - test_leaking as test_altair_leaking, - test_slashing as test_altair_slashing, - test_operations as test_altair_operations, -) -from eth2spec.test.deneb.transition import ( - test_operations as test_deneb_operations, - test_transition as test_deneb_transition, -) -from eth2spec.test.electra.transition import ( - test_operations as test_electra_operations, -) - - -def create_provider( - tests_src, preset_name: str, pre_fork_name: str, post_fork_name: str -) -> gen_typing.TestProvider: - - def prepare_fn() -> None: - return - - def cases_fn() -> Iterable[gen_typing.TestCase]: - return generate_from_tests( - runner_name="transition", - handler_name="core", - src=tests_src, - fork_name=post_fork_name, - phase=pre_fork_name, - preset_name=preset_name, - ) - - return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn) - - -if __name__ == "__main__": - all_tests = ( - test_altair_transition, - test_altair_activations_and_exits, - test_altair_leaking, - test_altair_slashing, - test_altair_operations, - test_deneb_operations, - test_deneb_transition, - test_electra_operations, - ) - for transition_test_module in all_tests: - for pre_fork, post_fork in ALL_PRE_POST_FORKS: - gen_runner.run_generator( - "transition", - [ - create_provider(transition_test_module, MINIMAL, pre_fork, post_fork), - create_provider(transition_test_module, MAINNET, pre_fork, post_fork), - ], - )