diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index ff52b55ae4..55f62754ed 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -105,13 +105,35 @@ jobs: - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 with: submodules: recursive + fetch-depth: 0 # Fetch full history for commit comparison - name: Setup Python uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 with: python-version: "3.11" - uses: ./.github/actions/setup-env + - name: Get changed files and save to disk + id: get-changed-files + run: | + if [ "${{ github.event_name }}" = "pull_request" ]; then + BASE_SHA=$(git merge-base "${{ github.event.pull_request.base.sha }}" "${{ github.event.pull_request.head.sha }}") + HEAD_SHA="${{ github.event.pull_request.head.sha }}" + else + # On push or force push to the feature branch + BASE_SHA=$(git merge-base "${{ github.event.before }}" "${{ github.sha }}") + HEAD_SHA="${{ github.sha }}" + fi + + echo "Diffing commits: $BASE_SHA..$HEAD_SHA" + + # Get changed files and save to disk + FILE_LIST="changed_files.txt" + git diff --name-only "$BASE_SHA" "$HEAD_SHA" > "$FILE_LIST" + echo "Changed files saved to $FILE_LIST" + echo "file_list=$FILE_LIST" >> $GITHUB_OUTPUT + echo "List of files changed in the PR" + cat $FILE_LIST - name: Run json infra tests - run: tox -e json_infra + run: tox -e json_infra -- --file-list="${{ steps.get-changed-files.outputs.file_list }}" - name: Upload coverage reports to Codecov uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 with: diff --git a/src/ethereum_spec_tools/evm_tools/statetest/__init__.py b/src/ethereum_spec_tools/evm_tools/statetest/__init__.py index cd58cb3028..8015d43672 100644 --- a/src/ethereum_spec_tools/evm_tools/statetest/__init__.py +++ b/src/ethereum_spec_tools/evm_tools/statetest/__init__.py @@ -9,7 +9,7 @@ from copy import deepcopy from dataclasses import dataclass from io import StringIO -from typing import Any, Dict, Iterable, List, Optional, TextIO +from typing import Any, Dict, Generator, Iterable, List, Optional, TextIO from ethereum.utils.hexadecimal import hex_to_bytes @@ -35,6 +35,41 @@ class TestCase: transaction: Dict +def read_test_case( + test_file_path: str, key: str, test: Dict[str, Any] +) -> Generator[TestCase, None, None]: + """ + Given a key and a value, return a `TestCase` object. + """ + env = test["env"] + if not isinstance(env, dict): + raise TypeError("env not dict") + + pre = test["pre"] + if not isinstance(pre, dict): + raise TypeError("pre not dict") + + transaction = test["transaction"] + if not isinstance(transaction, dict): + raise TypeError("transaction not dict") + + for fork_name, content in test["post"].items(): + for idx, post in enumerate(content): + if not isinstance(post, dict): + raise TypeError(f'post["{fork_name}"] not dict') + + yield TestCase( + path=test_file_path, + key=key, + index=idx, + fork_name=fork_name, + post=post, + env=env, + pre=pre, + transaction=transaction, + ) + + def read_test_cases(test_file_path: str) -> Iterable[TestCase]: """ Given a path to a filled state test in JSON format, return all the @@ -44,33 +79,7 @@ def read_test_cases(test_file_path: str) -> Iterable[TestCase]: tests = json.load(test_file) for key, test in tests.items(): - env = test["env"] - if not isinstance(env, dict): - raise TypeError("env not dict") - - pre = test["pre"] - if not isinstance(pre, dict): - raise TypeError("pre not dict") - - transaction = test["transaction"] - if not isinstance(transaction, dict): - raise TypeError("transaction not dict") - - for fork_name, content in test["post"].items(): - for idx, post in enumerate(content): - if not isinstance(post, dict): - raise TypeError(f'post["{fork_name}"] not dict') - - yield TestCase( - path=test_file_path, - key=key, - index=idx, - fork_name=fork_name, - post=post, - env=env, - pre=pre, - transaction=transaction, - ) + yield from read_test_case(test_file_path, key, test) def run_test_case( diff --git a/tests/json_infra/__init__.py b/tests/json_infra/__init__.py index 0b43585434..fd2db5553f 100644 --- a/tests/json_infra/__init__.py +++ b/tests/json_infra/__init__.py @@ -1,9 +1,11 @@ """Tests related to json infrastructure.""" -from typing import Dict, Optional, TypedDict +from typing import Dict, TypedDict from typing_extensions import NotRequired +from .hardfork import TestHardfork + class _FixtureSource(TypedDict): url: str @@ -31,113 +33,6 @@ class _FixtureSource(TypedDict): } -def _get_fixture_path(key: str) -> str: - return TEST_FIXTURES[key]["fixture_path"] - - -def _build_ethereum_test_paths( - base_path: str, legacy_fork: Optional[str] = None -) -> tuple: - if legacy_fork: - bc_path = f"{base_path}/LegacyTests/{legacy_fork}/BlockchainTests/" - state_path = ( - f"{base_path}/LegacyTests/{legacy_fork}/GeneralStateTests/" - ) - else: - bc_path = f"{base_path}/BlockchainTests/" - state_path = f"{base_path}/GeneralStateTests/" - return bc_path, state_path - - -def _build_eest_test_paths(base_path: str) -> tuple: - bc_path = f"{base_path}/fixtures/blockchain_tests/" - state_path = f"{base_path}/fixtures/state_tests/" - return bc_path, state_path - - -# Base paths -ETHEREUM_TESTS_BASE = _get_fixture_path("ethereum_tests") -EEST_TESTS_BASE = _get_fixture_path("latest_fork_tests") - -# Ethereum test paths -( - PRE_CONSTANTINOPLE_BC_ETHEREUM_TESTS, - PRE_CONSTANTINOPLE_STATE_ETHEREUM_TESTS, -) = _build_ethereum_test_paths(ETHEREUM_TESTS_BASE, "Constantinople") -( - PRE_CANCUN_BC_ETHEREUM_TESTS, - PRE_CANCUN_STATE_ETHEREUM_TESTS, -) = _build_ethereum_test_paths(ETHEREUM_TESTS_BASE, "Cancun") -BC_ETHEREUM_TESTS, STATE_ETHEREUM_TESTS = _build_ethereum_test_paths( - ETHEREUM_TESTS_BASE -) - -# EEST test paths -EEST_BC_TESTS, EEST_STATE_TESTS = _build_eest_test_paths(EEST_TESTS_BASE) - -ForkConfig = TypedDict( - "ForkConfig", - { - "eels_fork": str, - "blockchain_test_dirs": list[str], - "state_test_dirs": list[str], - }, -) - - -def _create_fork_config( - eels_fork: str, bc_dirs: list, state_dirs: list -) -> ForkConfig: - return { - "eels_fork": eels_fork, - "blockchain_test_dirs": bc_dirs, - "state_test_dirs": state_dirs, - } - - -PRE_CONSTANTINOPLE_DIRS = ( - [PRE_CONSTANTINOPLE_BC_ETHEREUM_TESTS, EEST_BC_TESTS], - [PRE_CONSTANTINOPLE_STATE_ETHEREUM_TESTS, EEST_STATE_TESTS], -) - -PRE_CANCUN_DIRS = ( - [PRE_CANCUN_BC_ETHEREUM_TESTS, EEST_BC_TESTS], - [PRE_CANCUN_STATE_ETHEREUM_TESTS, EEST_STATE_TESTS], -) - -CURRENT_DIRS = ( - [BC_ETHEREUM_TESTS, EEST_BC_TESTS], - [STATE_ETHEREUM_TESTS, EEST_STATE_TESTS], -) - -FORKS: Dict[str, ForkConfig] = { - **{ - json_fork: _create_fork_config(eels_fork, *PRE_CONSTANTINOPLE_DIRS) - for json_fork, eels_fork in [ - ("Frontier", "frontier"), - ("Homestead", "homestead"), - ("EIP150", "tangerine_whistle"), - ("EIP158", "spurious_dragon"), - ("Byzantium", "byzantium"), - ("ConstantinopleFix", "constantinople"), - ] - }, - **{ - json_fork: _create_fork_config(eels_fork, *PRE_CANCUN_DIRS) - for json_fork, eels_fork in [ - ("Istanbul", "istanbul"), - ("Berlin", "berlin"), - ("London", "london"), - ("Paris", "paris"), - ("Shanghai", "shanghai"), - ] - }, - **{ - json_fork: _create_fork_config(eels_fork, *CURRENT_DIRS) - for json_fork, eels_fork in [ - ("Cancun", "cancun"), - ("Prague", "prague"), - ("Osaka", "osaka"), - ] - }, +FORKS: Dict[str, TestHardfork] = { + fork.json_test_name: fork for fork in TestHardfork.discover() } diff --git a/tests/json_infra/conftest.py b/tests/json_infra/conftest.py index ee19715578..7c3bae1171 100644 --- a/tests/json_infra/conftest.py +++ b/tests/json_infra/conftest.py @@ -3,22 +3,29 @@ import os import shutil import tarfile +from glob import glob from pathlib import Path -from typing import Callable, Final, Optional, Set +from typing import ( + Callable, + Final, + Self, + Set, +) import git import requests_cache -from _pytest.config import Config from _pytest.config.argparsing import Parser from _pytest.nodes import Item from filelock import FileLock from git.exc import GitCommandError, InvalidGitRepositoryError -from pytest import Session, StashKey, fixture +from pytest import Collector, Config, Session, fixture from requests_cache import CachedSession from requests_cache.backends.sqlite import SQLiteCache -from typing_extensions import Self -from . import TEST_FIXTURES +from . import FORKS, TEST_FIXTURES +from .helpers import FixturesFile, FixtureTestItem +from .helpers.select_tests import extract_affected_forks +from .stash_keys import desired_forks_key, fixture_lock try: from xdist import get_xdist_worker_id @@ -45,7 +52,7 @@ def _(path: str | Path) -> Path: def pytest_addoption(parser: Parser) -> None: """ - Accept --evm-trace option in pytest. + Accept custom options in pytest. """ parser.addoption( "--optimized", @@ -66,10 +73,37 @@ def pytest_addoption(parser: Parser) -> None: ) parser.addoption( - "--fork", - dest="fork", + "--from", + action="store", + dest="forks_from", + default="", + type=str, + help="Run tests from and including the specified fork.", + ) + parser.addoption( + "--until", + action="store", + dest="forks_until", + default="", type=str, - help="Run tests for this fork only (e.g., --fork Osaka)", + help="Run tests until and including the specified fork.", + ) + parser.addoption( + "--fork", + action="store", + dest="single_fork", + default="", + help="Only run tests for the specified fork.", + ) + parser.addoption( + "--file-list", + action="store", + dest="file_list", + help=( + "Only run tests relevant to a list of file paths in the " + "repository. This option specifies the path to a file which " + "contains a list of relevant paths." + ), ) @@ -91,38 +125,54 @@ def pytest_configure(config: Config) -> None: # Replace the function in the module ethereum.trace.set_evm_trace(Eip3155Tracer()) + # Process fork range options + desired_fork = config.getoption("single_fork", "") + forks_from = config.getoption("forks_from", "") + forks_until = config.getoption("forks_until", "") + file_list = config.getoption("file_list", None) + + desired_forks = [] + all_forks = list(FORKS.keys()) + if desired_fork: + if desired_fork not in all_forks: + raise ValueError(f"Unknown fork: {desired_fork}") + desired_forks.append(desired_fork) + elif forks_from or forks_until: + # Determine start and end indices + start_idx = 0 + end_idx = len(all_forks) + + if forks_from: + try: + start_idx = all_forks.index(forks_from) + except ValueError as e: + raise ValueError(f"Unknown fork: {forks_from}") from e -def pytest_collection_modifyitems(config: Config, items: list[Item]) -> None: - """Filter test items based on the specified fork option.""" - desired_fork = config.getoption("fork", None) - if not desired_fork: - return + if forks_until: + try: + # +1 to include the until fork + end_idx = all_forks.index(forks_until) + 1 + except ValueError as e: + raise ValueError(f"Unknown fork: {forks_until}") from e - selected = [] - deselected = [] - - for item in items: - forks_of_test = [m.args[0] for m in item.iter_markers(name="fork")] - if forks_of_test and desired_fork not in forks_of_test: - deselected.append(item) - # Check if the test has a vm test marker - elif any(item.iter_markers(name="vm_test")): - callspec = getattr(item, "callspec", None) - if not callspec or "fork" not in getattr(callspec, "params", {}): - # no fork param on this test. We keep the test - selected.append(item) - continue - fork_param = callspec.params["fork"] - if fork_param[0] == desired_fork: - selected.append(item) - else: - deselected.append(item) - else: - selected.append(item) + # Validate the fork range + if start_idx >= end_idx: + raise ValueError(f"{forks_until} is before {forks_from}") - if deselected: - config.hook.pytest_deselected(items=deselected) - items[:] = selected # keep only what matches + # Extract the fork range + desired_forks = all_forks[start_idx:end_idx] + elif file_list: + desired_forks = extract_affected_forks(config.rootpath, file_list) + else: + desired_forks = all_forks + + if not any(desired_forks): + print("No fork specific tests will be run!!!") + else: + fork_list_str = ", ".join(desired_forks) + print(f"Running tests for the following forks: {fork_list_str}") + + config.stash[desired_forks_key] = desired_forks class _FixturesDownloader: @@ -228,9 +278,6 @@ def __exit__( self.keep_cache_keys.clear() -fixture_lock = StashKey[Optional[FileLock]]() - - def pytest_sessionstart(session: Session) -> None: """Initialize test fixtures and file locking at session start.""" if get_xdist_worker_id(session) != "master": @@ -260,6 +307,17 @@ def pytest_sessionstart(session: Session) -> None: fixture_path, ) + # Remove any python files in the downloaded files to avoid + # importing them. + for python_file in glob( + os.path.join(fixture_path, "**/*.py"), recursive=True + ): + try: + os.unlink(python_file) + except FileNotFoundError: + # Not breaking error, another process deleted it first + pass + def pytest_sessionfinish(session: Session, exitstatus: int) -> None: """Clean up file locks at session finish.""" @@ -272,3 +330,28 @@ def pytest_sessionfinish(session: Session, exitstatus: int) -> None: assert lock_file is not None lock_file.release() + + +def pytest_collect_file( + file_path: Path, parent: Collector +) -> Collector | None: + """ + Pytest hook that collects test cases from fixture JSON files. + """ + if file_path.suffix == ".json": + return FixturesFile.from_parent(parent, path=file_path) + return None + + +def pytest_runtest_teardown(item: Item, nextitem: Item) -> None: + """ + Drop cache from a `FixtureTestItem` if the next one is not of the + same type or does not belong to the same fixtures file. + """ + if isinstance(item, FixtureTestItem): + if ( + nextitem is None + or not isinstance(nextitem, FixtureTestItem) + or item.fixtures_file != nextitem.fixtures_file + ): + item.fixtures_file.clear_data_cache() diff --git a/tests/json_infra/hardfork.py b/tests/json_infra/hardfork.py new file mode 100644 index 0000000000..4be5d67d1b --- /dev/null +++ b/tests/json_infra/hardfork.py @@ -0,0 +1,31 @@ +""" +Test-specific Hardfork subclass. + +Extends the base Hardfork class with test infrastructure properties. +""" + +from ethereum_spec_tools.forks import Hardfork + + +class TestHardfork(Hardfork): + """ + Hardfork subclass with test-specific properties. + + This class extends the base Hardfork class with properties needed + for test infrastructure, keeping test-specific concerns separated + from the core fork metadata. + """ + + @property + def json_test_name(self) -> str: + """ + Name of the hard fork in the test json fixtures. + """ + if self.title_case_name == "Tangerine Whistle": + return "EIP150" + elif self.title_case_name == "Spurious Dragon": + return "EIP158" + elif self.title_case_name == "Constantinople": + return "ConstantinopleFix" + else: + return self.title_case_name diff --git a/tests/json_infra/helpers/__init__.py b/tests/json_infra/helpers/__init__.py index 3214c2cc14..2980c854e2 100644 --- a/tests/json_infra/helpers/__init__.py +++ b/tests/json_infra/helpers/__init__.py @@ -1 +1,10 @@ """Helpers to load tests from JSON files.""" + +from .fixtures import ALL_FIXTURE_TYPES, Fixture, FixturesFile, FixtureTestItem +from .load_blockchain_tests import BlockchainTestFixture +from .load_state_tests import StateTestFixture + +ALL_FIXTURE_TYPES.append(BlockchainTestFixture) +ALL_FIXTURE_TYPES.append(StateTestFixture) + +__all__ = ["ALL_FIXTURE_TYPES", "Fixture", "FixturesFile", "FixtureTestItem"] diff --git a/tests/json_infra/helpers/exceptional_test_patterns.py b/tests/json_infra/helpers/exceptional_test_patterns.py index cebb1a9ef5..658f32d581 100644 --- a/tests/json_infra/helpers/exceptional_test_patterns.py +++ b/tests/json_infra/helpers/exceptional_test_patterns.py @@ -68,6 +68,8 @@ def exceptional_blockchain_test_patterns( f"tests/{ef}/eip2537_bls_12_381_precompiles/test_bls12_pairing\\.py::test_valid\\[fork_{jf}-blockchain_test-inf_pair-\\]", f"tests/{ef}/eip2537_bls_12_381_precompiles/test_bls12_pairing\\.py::test_valid\\[fork_{jf}-blockchain_test-multi_inf_pair-\\]", f"tests/{ef}/eip2935_historical_block_hashes_from_state/test_block_hashes\\.py::test_block_hashes_history\\[fork_{jf}-blockchain_test-full_history_plus_one_check_blockhash_first\\]", + # Static tests from EEST + "tests/json_infra/fixtures/latest_fork_tests/fixtures/blockchain_tests/static", ) # These are tests that are considered to be incorrect, @@ -129,6 +131,8 @@ def exceptional_state_test_patterns( f"tests/{ef}/eip2537_bls_12_381_precompiles/test_bls12_pairing\\.py::test_valid\\[fork_{jf}-state_test-bls_pairing_e(aG1,bG2)=e(G1,abG2)-\\]", f"tests/{ef}/eip2537_bls_12_381_precompiles/test_bls12_pairing\\.py::test_valid\\[fork_{jf}-state_test-inf_pair-\\]", f"tests/{ef}/eip2537_bls_12_381_precompiles/test_bls12_pairing\\.py::test_valid\\[fork_{jf}-state_test-multi_inf_pair-\\]", + # Static tests from EEST + "tests/json_infra/fixtures/latest_fork_tests/fixtures/state_tests/static", ) return TestPatterns( diff --git a/tests/json_infra/helpers/fixtures.py b/tests/json_infra/helpers/fixtures.py new file mode 100644 index 0000000000..38c6b2f308 --- /dev/null +++ b/tests/json_infra/helpers/fixtures.py @@ -0,0 +1,119 @@ +"""Base class for all fixture loaders.""" + +import json +from abc import ABC, abstractmethod +from functools import cached_property +from typing import Any, Dict, Generator, List, Self, Type + +from _pytest.nodes import Node +from pytest import Collector, Config, File, Item + + +class FixtureTestItem(Item): + """ + Test item that comes from a fixture file. + """ + + @property + def fixtures_file(self) -> "FixturesFile": + """Return the fixtures file from which the test was extracted.""" + raise NotImplementedError() + + +class Fixture(ABC): + """ + Single fixture from a JSON file. + + It can be subclassed in combination with Item or Collector to create a + fixture that can be collected by pytest. + """ + + test_file: str + test_key: str + + def __init__( + self, + *args: Any, + test_file: str, + test_key: str, + **kwargs: Any, + ): + super().__init__(*args, **kwargs) + self.test_file = test_file + self.test_key = test_key + + @classmethod + def from_parent( + cls, + parent: Node, + **kwargs: Any, + ) -> Self: + """Pytest hook that returns a fixture from a JSON file.""" + return super().from_parent( # type: ignore[misc] + parent=parent, **kwargs + ) + + @classmethod + @abstractmethod + def is_format(cls, test_dict: Dict[str, Any]) -> bool: + """Return true if the object can be parsed as the fixture type.""" + pass + + @classmethod + @abstractmethod + def has_desired_fork( + cls, test_dict: Dict[str, Any], config: Config + ) -> bool: + """ + Check if the fork(s) relevant to this item/ + collector are in the desired forks list. + """ + pass + + +ALL_FIXTURE_TYPES: List[Type[Fixture]] = [] + + +class FixturesFile(File): + """Single JSON file containing fixtures.""" + + @cached_property + def data(self) -> Dict[str, Any]: + """Return the JSON data of the full file.""" + # loaded once per worker per file (thanks to cached_property) + with self.fspath.open("r", encoding="utf-8") as f: + return json.load(f) + + def clear_data_cache(self) -> None: + """Drop the data cache.""" + if hasattr(self, "data"): + del self.data + + def collect( + self: Self, + ) -> Generator[Item | Collector, None, None]: + """Collect test cases from a single JSON fixtures file.""" + try: + loaded_file = self.data + except Exception: + return # Skip *.json files that are unreadable. + if isinstance(loaded_file, dict): + for key, test_dict in loaded_file.items(): + if not isinstance(test_dict, dict): + continue + for fixture_type in ALL_FIXTURE_TYPES: + if not fixture_type.is_format(test_dict): + continue + # Check if we should collect this test + if not fixture_type.has_desired_fork( + test_dict, self.config + ): + continue + yield fixture_type.from_parent( # type: ignore + parent=self, + name=key, + test_file=str(self.path), + test_key=key, + ) + # Make sure we don't keep anything from collection in memory. + self.clear_data_cache() diff --git a/tests/json_infra/helpers/load_blockchain_tests.py b/tests/json_infra/helpers/load_blockchain_tests.py index 990e941a35..90c0a68cda 100644 --- a/tests/json_infra/helpers/load_blockchain_tests.py +++ b/tests/json_infra/helpers/load_blockchain_tests.py @@ -1,14 +1,12 @@ """Helpers to load and run blockchain tests from JSON files.""" import importlib -import json -import os.path -from glob import glob -from typing import Any, Dict, Generator +from pathlib import Path +from typing import Any, Dict, Tuple from unittest.mock import call, patch import pytest -from _pytest.mark.structures import ParameterSet +from _pytest.config import Config from ethereum_rlp import rlp from ethereum_rlp.exceptions import RLPException from ethereum_types.numeric import U64 @@ -19,7 +17,9 @@ from ethereum_spec_tools.evm_tools.loaders.fixture_loader import Load from .. import FORKS +from ..stash_keys import desired_forks_key from .exceptional_test_patterns import exceptional_blockchain_test_patterns +from .fixtures import Fixture, FixturesFile, FixtureTestItem class NoTestsFoundError(Exception): @@ -29,79 +29,6 @@ class NoTestsFoundError(Exception): """ -def run_blockchain_st_test(test_case: Dict, load: Load) -> None: - """Run a blockchain state test from JSON test case data.""" - test_file = test_case["test_file"] - test_key = test_case["test_key"] - - with open(test_file, "r") as fp: - data = json.load(fp) - - json_data = data[test_key] - - if "postState" not in json_data: - pytest.xfail(f"{test_case} doesn't have post state") - - genesis_header = load.json_to_header(json_data["genesisBlockHeader"]) - parameters = [ - genesis_header, - (), - (), - ] - if hasattr(genesis_header, "withdrawals_root"): - parameters.append(()) - - if hasattr(genesis_header, "requests_root"): - parameters.append(()) - - genesis_block = load.fork.Block(*parameters) - - genesis_header_hash = hex_to_bytes(json_data["genesisBlockHeader"]["hash"]) - assert keccak256(rlp.encode(genesis_header)) == genesis_header_hash - genesis_rlp = hex_to_bytes(json_data["genesisRLP"]) - assert rlp.encode(genesis_block) == genesis_rlp - - try: - state = load.json_to_state(json_data["pre"]) - except StateWithEmptyAccount as e: - pytest.xfail(str(e)) - - chain = load.fork.BlockChain( - blocks=[genesis_block], - state=state, - chain_id=U64(json_data["genesisBlockHeader"].get("chainId", 1)), - ) - - mock_pow = ( - json_data["sealEngine"] == "NoProof" and not load.fork.proof_of_stake - ) - - for json_block in json_data["blocks"]: - block_exception = None - for key, value in json_block.items(): - if key.startswith("expectException"): - block_exception = value - break - - if block_exception: - # TODO: Once all the specific exception types are thrown, - # only `pytest.raises` the correct exception type instead of - # all of them. - with pytest.raises((EthereumException, RLPException)): - add_block_to_chain(chain, json_block, load, mock_pow) - return - else: - add_block_to_chain(chain, json_block, load, mock_pow) - - last_block_hash = hex_to_bytes(json_data["lastblockhash"]) - assert keccak256(rlp.encode(chain.blocks[-1].header)) == last_block_hash - - expected_post_state = load.json_to_state(json_data["postState"]) - assert chain.state == expected_post_state - load.fork.close_state(chain.state) - load.fork.close_state(expected_post_state) - - def add_block_to_chain( chain: Any, json_block: Any, load: Load, mock_pow: bool ) -> None: @@ -133,96 +60,170 @@ def add_block_to_chain( ) -# Functions that fetch individual test cases -def load_json_fixture(test_file: str, json_fork: str) -> Generator: - """Load test cases from a JSON fixture file for the specified fork.""" - # Extract the pure basename of the file without the path to the file. - # Ex: Extract "world.json" from "path/to/file/world.json" - # Extract the filename without the extension. Ex: Extract "world" from - # "world.json" - with open(test_file, "r") as fp: - data = json.load(fp) - - # Search tests by looking at the `network` attribute - found_keys = [] - for key, test in data.items(): - if "network" not in test: - continue - - if test["network"] == json_fork: - found_keys.append(key) - - if not any(found_keys): - raise NoTestsFoundError - - for _key in found_keys: - yield { - "test_file": test_file, - "test_key": _key, - "json_fork": json_fork, - } - - -def fetch_blockchain_tests( - json_fork: str, -) -> Generator[Dict | ParameterSet, None, None]: - """Fetch all blockchain test cases for the specified JSON fork.""" - # Filter FORKS based on fork_option parameter - eels_fork = FORKS[json_fork]["eels_fork"] - test_dirs = FORKS[json_fork]["blockchain_test_dirs"] - - test_patterns = exceptional_blockchain_test_patterns(json_fork, eels_fork) - - # Get all the files to iterate over from both eest_tests_path - # and ethereum_tests_path - all_jsons = [] - for test_dir in test_dirs: - all_jsons.extend( - glob(os.path.join(test_dir, "**/*.json"), recursive=True) +class BlockchainTestFixture(Fixture, FixtureTestItem): + """Single blockchain test fixture from a JSON file.""" + + fork_name: str + + def __init__( + self, + *args: Any, + **kwargs: Any, + ) -> None: + """Initialize a single blockchain test fixture from a JSON file.""" + super().__init__(*args, **kwargs) + self.fork_name = self.test_dict["network"] + self.add_marker(pytest.mark.fork(self.fork_name)) + self.add_marker("json_blockchain_tests") + self.eels_fork = FORKS[self.fork_name].short_name + + # Mark tests with exceptional markers + test_patterns = exceptional_blockchain_test_patterns( + self.fork_name, self.eels_fork + ) + if any(x.search(self.nodeid) for x in test_patterns.expected_fail): + self.add_marker(pytest.mark.skip("Expected to fail")) + if any(x.search(self.nodeid) for x in test_patterns.slow): + self.add_marker("slow") + if any(x.search(self.nodeid) for x in test_patterns.big_memory): + self.add_marker("bigmem") + + @property + def fixtures_file(self) -> FixturesFile: + """Fixtures file from which the test fixture was collected.""" + parent = self.parent + assert parent is not None + assert isinstance(parent, FixturesFile) + return parent + + @property + def test_dict(self) -> Dict[str, Any]: + """Load test from disk.""" + loaded_file = self.fixtures_file.data + return loaded_file[self.test_key] + + def runtest(self) -> None: + """Run a blockchain state test from JSON test case data.""" + json_data = self.test_dict + if "postState" not in json_data: + pytest.xfail( + f"{self.test_file}[{self.test_key}] doesn't have post state" + ) + + # Currently, there are 5 tests in the ethereum/tests fixtures + # where we have non block specific exceptions. + # For example: All the blocks process correctly but the final + # block hash provided in the test is not correct. Or all the + # blocks process correctly but the post state provided is not + # right. Since these tests do not directly have anything to do + # with the state teansition itself, we skip these + # See src/BlockchainTestsFiller/InvalidBlocks/bcExpectSection + # in ethereum/tests + if "exceptions" in json_data: + pytest.xfail( + f"{self.test_file}[{self.test_key}] has unrelated exceptions" + ) + + load = Load( + self.fork_name, + self.eels_fork, ) - files_to_iterate = [] - for full_path in all_jsons: - if not any(x.search(full_path) for x in test_patterns.expected_fail): - # If a file or folder is marked for ignore, - # it can already be dropped at this stage - files_to_iterate.append(full_path) + genesis_header = load.json_to_header(json_data["genesisBlockHeader"]) + parameters = [ + genesis_header, + (), + (), + ] + if hasattr(genesis_header, "withdrawals_root"): + parameters.append(()) + + if hasattr(genesis_header, "requests_root"): + parameters.append(()) + + genesis_block = load.fork.Block(*parameters) + + genesis_header_hash = hex_to_bytes( + json_data["genesisBlockHeader"]["hash"] + ) + assert keccak256(rlp.encode(genesis_header)) == genesis_header_hash + genesis_rlp = hex_to_bytes(json_data["genesisRLP"]) + assert rlp.encode(genesis_block) == genesis_rlp - # Start yielding individual test cases from the file list - for _test_file in files_to_iterate: try: - for _test_case in load_json_fixture(_test_file, json_fork): - # _identifier could identify files, folders through test_file - # individual cases through test_key - _identifier = ( - "(" - + _test_case["test_file"] - + "|" - + _test_case["test_key"] - + ")" - ) - _test_case["eels_fork"] = eels_fork - if any( - x.search(_identifier) for x in test_patterns.expected_fail - ): - continue - elif any(x.search(_identifier) for x in test_patterns.slow): - yield pytest.param(_test_case, marks=pytest.mark.slow) - elif any( - x.search(_identifier) for x in test_patterns.big_memory - ): - yield pytest.param(_test_case, marks=pytest.mark.bigmem) - else: - yield _test_case - except NoTestsFoundError: - # file doesn't contain tests for the given fork - continue - - -# Test case Identifier -def idfn(test_case: Dict) -> str: - """Generate test case identifier from test case dictionary.""" - if isinstance(test_case, dict): - folder_name = test_case["test_file"].split("/")[-2] - # Assign Folder name and test_key to identify tests in output - return folder_name + " - " + test_case["test_key"] + state = load.json_to_state(json_data["pre"]) + except StateWithEmptyAccount as e: + pytest.xfail(str(e)) + + chain = load.fork.BlockChain( + blocks=[genesis_block], + state=state, + chain_id=U64(json_data["genesisBlockHeader"].get("chainId", 1)), + ) + + mock_pow = ( + json_data["sealEngine"] == "NoProof" + and not load.fork.proof_of_stake + ) + + for json_block in json_data["blocks"]: + block_exception = None + for key, value in json_block.items(): + if key.startswith("expectException"): + block_exception = value + break + if key == "exceptions": + block_exception = value + break + + if block_exception: + # TODO: Once all the specific exception types are thrown, + # only `pytest.raises` the correct exception type instead + # of all of them. + with pytest.raises((EthereumException, RLPException)): + add_block_to_chain(chain, json_block, load, mock_pow) + load.fork.close_state(chain.state) + return + else: + add_block_to_chain(chain, json_block, load, mock_pow) + + last_block_hash = hex_to_bytes(json_data["lastblockhash"]) + assert ( + keccak256(rlp.encode(chain.blocks[-1].header)) == last_block_hash + ) + + expected_post_state = load.json_to_state(json_data["postState"]) + assert chain.state == expected_post_state + load.fork.close_state(chain.state) + load.fork.close_state(expected_post_state) + + def reportinfo(self) -> Tuple[Path, int, str]: + """Return information for test reporting.""" + return self.path, 1, self.name + + @classmethod + def is_format(cls, test_dict: Dict[str, Any]) -> bool: + """Return true if the object can be parsed as the fixture type.""" + if "genesisBlockHeader" not in test_dict: + return False + if "blocks" not in test_dict: + return False + if "engineNewPayloads" in test_dict: + return False + if "preHash" in test_dict: + return False + if "network" not in test_dict: + return False + return True + + @classmethod + def has_desired_fork( + cls, test_dict: Dict[str, Any], config: Config + ) -> bool: + """ + Check if the item fork is in the desired forks list. + """ + desired_forks = config.stash.get(desired_forks_key, None) + if desired_forks is None or test_dict["network"] in desired_forks: + return True + return False diff --git a/tests/json_infra/helpers/load_state_tests.py b/tests/json_infra/helpers/load_state_tests.py index 37e6813402..cefed17182 100644 --- a/tests/json_infra/helpers/load_state_tests.py +++ b/tests/json_infra/helpers/load_state_tests.py @@ -1,148 +1,228 @@ """Helper functions to load and run general state tests for Ethereum forks.""" import json -import os import sys -from glob import glob from io import StringIO -from typing import Dict, Generator +from typing import Any, Dict, Iterable, List import pytest +from _pytest.config import Config +from _pytest.nodes import Item +from pytest import Collector from ethereum.exceptions import StateWithEmptyAccount from ethereum.utils.hexadecimal import hex_to_bytes from ethereum_spec_tools.evm_tools import create_parser -from ethereum_spec_tools.evm_tools.statetest import read_test_cases +from ethereum_spec_tools.evm_tools.statetest import read_test_case from ethereum_spec_tools.evm_tools.t8n import T8N from .. import FORKS -from .exceptional_test_patterns import exceptional_state_test_patterns +from ..stash_keys import desired_forks_key +from .exceptional_test_patterns import ( + exceptional_state_test_patterns, +) +from .fixtures import Fixture, FixturesFile, FixtureTestItem parser = create_parser() -def fetch_state_tests(json_fork: str) -> Generator: - """ - Fetches all the general state tests from the given directory. - """ - # Filter FORKS based on fork_option parameter - eels_fork = FORKS[json_fork]["eels_fork"] - test_dirs = FORKS[json_fork]["state_test_dirs"] - - test_patterns = exceptional_state_test_patterns(json_fork, eels_fork) - - # Get all the files to iterate over from both eest_tests_path - # and ethereum_tests_path - all_jsons = [] - for test_dir in test_dirs: - all_jsons.extend( - glob(os.path.join(test_dir, "**/*.json"), recursive=True) +class StateTest(FixtureTestItem): + """Single state test case item.""" + + index: int + fork_name: str + + def __init__( + self, + *args: Any, + index: int, + fork_name: str, + **kwargs: Any, + ) -> None: + """Initialize a single test case item.""" + super().__init__(*args, **kwargs) + self.index = index + self.fork_name = fork_name + self.add_marker(pytest.mark.fork(self.fork_name)) + self.add_marker("evm_tools") + self.add_marker("json_state_tests") + eels_fork = FORKS[fork_name].short_name + + # Mark tests with exceptional markers + test_patterns = exceptional_state_test_patterns(fork_name, eels_fork) + if any(x.search(self.nodeid) for x in test_patterns.slow): + self.add_marker("slow") + + @property + def state_test_fixture(self) -> "StateTestFixture": + """Return the state test fixture this test belongs to.""" + parent = self.parent + assert parent is not None + assert isinstance(parent, StateTestFixture) + return parent + + @property + def test_key(self) -> str: + """Return the key of the state test fixture in the fixture file.""" + return self.state_test_fixture.test_key + + @property + def fixtures_file(self) -> FixturesFile: + """Fixtures file from which the test fixture was collected.""" + return self.state_test_fixture.fixtures_file + + @property + def test_dict(self) -> Dict[str, Any]: + """Load test from disk.""" + loaded_file = self.fixtures_file.data + return loaded_file[self.test_key] + + def runtest(self) -> None: + """ + Runs a single general state test. + """ + json_fork = self.fork_name + test_dict = self.test_dict + + env = test_dict["env"] + try: + env["blockHashes"] = {"0": env["previousHash"]} + except KeyError: + env["blockHashes"] = {} + env["withdrawals"] = [] + + alloc = test_dict["pre"] + + post = test_dict["post"][self.fork_name][self.index] + post_hash = post["hash"] + d = post["indexes"]["data"] + g = post["indexes"]["gas"] + v = post["indexes"]["value"] + + tx = {} + for k, value in test_dict["transaction"].items(): + if k == "data": + tx["input"] = value[d] + elif k == "gasLimit": + tx["gas"] = value[g] + elif k == "value": + tx[k] = value[v] + elif k == "accessLists": + if value[d] is not None: + tx["accessList"] = value[d] + else: + tx[k] = value + + txs = [tx] + + in_stream = StringIO( + json.dumps( + { + "env": env, + "alloc": alloc, + "txs": txs, + } + ) ) - for test_file_path in all_jsons: - test_cases = read_test_cases(test_file_path) - - for test_case in test_cases: - if test_case.fork_name != json_fork: - continue - - test_case_dict = { - "test_file": test_case.path, - "test_key": test_case.key, - "index": test_case.index, - "json_fork": json_fork, - } + # Run the t8n tool + t8n_args = [ + "t8n", + "--input.alloc", + "stdin", + "--input.env", + "stdin", + "--input.txs", + "stdin", + "--state.fork", + f"{json_fork}", + "--state-test", + ] + t8n_options = parser.parse_args(t8n_args) - if any(x.search(test_case.key) for x in test_patterns.slow): - yield pytest.param(test_case_dict, marks=pytest.mark.slow) - else: - yield test_case_dict + try: + t8n = T8N(t8n_options, sys.stdout, in_stream) + except StateWithEmptyAccount as e: + pytest.xfail(str(e)) + t8n.run_state_test() -def idfn(test_case: Dict) -> str: - """ - Identify the test case. - """ - if isinstance(test_case, dict): - folder_name = test_case["test_file"].split("/")[-2] - test_key = test_case["test_key"] - index = test_case["index"] + if "expectException" in post: + assert 0 in t8n.txs.rejected_txs + return - return f"{folder_name} - {test_key} - {index}" + assert hex_to_bytes(post_hash) == t8n.result.state_root -def run_state_test(test_case: Dict[str, str]) -> None: +class StateTestFixture(Fixture, Collector): """ - Runs a single general state test. + State test fixture from a JSON file that can contain multiple test + cases. """ - test_file = test_case["test_file"] - test_key = test_case["test_key"] - index = test_case["index"] - json_fork = test_case["json_fork"] - with open(test_file) as f: - tests = json.load(f) - - env = tests[test_key]["env"] - try: - env["blockHashes"] = {"0": env["previousHash"]} - except KeyError: - env["blockHashes"] = {} - env["withdrawals"] = [] - - alloc = tests[test_key]["pre"] - - post = tests[test_key]["post"][json_fork][index] - post_hash = post["hash"] - d = post["indexes"]["data"] - g = post["indexes"]["gas"] - v = post["indexes"]["value"] - - tx = {} - for k, value in tests[test_key]["transaction"].items(): - if k == "data": - tx["input"] = value[d] - elif k == "gasLimit": - tx["gas"] = value[g] - elif k == "value": - tx[k] = value[v] - elif k == "accessLists": - if value[d] is not None: - tx["accessList"] = value[d] - else: - tx[k] = value - - txs = [tx] - - in_stream = StringIO( - json.dumps( - { - "env": env, - "alloc": alloc, - "txs": txs, - } - ) - ) - - # Run the t8n tool - t8n_args = [ - "t8n", - "--input.alloc", - "stdin", - "--input.env", - "stdin", - "--input.txs", - "stdin", - "--state.fork", - f"{json_fork}", - "--state-test", - ] - t8n_options = parser.parse_args(t8n_args) - - try: - t8n = T8N(t8n_options, sys.stdout, in_stream) - except StateWithEmptyAccount as e: - pytest.xfail(str(e)) - - t8n.run_state_test() - - assert hex_to_bytes(post_hash) == t8n.result.state_root + + @classmethod + def is_format(cls, test_dict: Dict[str, Any]) -> bool: + """Return true if the object can be parsed as the fixture type.""" + if "env" not in test_dict: + return False + if "pre" not in test_dict: + return False + if "transaction" not in test_dict: + return False + if "post" not in test_dict: + return False + return True + + @property + def fixtures_file(self) -> FixturesFile: + """Fixtures file from which the test fixture was collected.""" + parent = self.parent + assert parent is not None + assert isinstance(parent, FixturesFile) + return parent + + @property + def test_dict(self) -> Dict[str, Any]: + """Load test from disk.""" + loaded_file = self.fixtures_file.data + return loaded_file[self.test_key] + + def collect(self) -> Iterable[Item | Collector]: + """Collect state test cases inside of this fixture.""" + desired_forks: List[str] = self.config.stash.get(desired_forks_key, []) + for test_case in read_test_case( + test_file_path=self.test_file, + key=self.test_key, + test=self.test_dict, + ): + # The has_desired_fork method is used to skip the entire + # fixture file if it does not feature any of the desired + # forks. The below check is performed on the individual + # test cases within a fixture file in order to keep + # nothing other than the desired forks. + if test_case.fork_name not in desired_forks: + continue + name = f"{test_case.fork_name}::{test_case.index}" + yield StateTest.from_parent( + parent=self, + name=name, + index=test_case.index, + fork_name=test_case.fork_name, + ) + + @classmethod + def has_desired_fork( + cls, test_dict: Dict[str, Any], config: Config + ) -> bool: + """ + Check if the collector fork list has at least + one fork in the desired fork list. + """ + desired_forks = config.stash.get(desired_forks_key, None) + if desired_forks is None: + return True + + for network in test_dict["post"].keys(): + if network in desired_forks: + return True + return False diff --git a/tests/json_infra/helpers/select_tests.py b/tests/json_infra/helpers/select_tests.py new file mode 100644 index 0000000000..5ff1c25c25 --- /dev/null +++ b/tests/json_infra/helpers/select_tests.py @@ -0,0 +1,88 @@ +""" +Targeted test selection based on changed files. + +This module reads a list of changed files and determines which fork +folders have been modified, then provides functions to generate targeted +pytest commands. +""" + +from pathlib import Path +from typing import List + +from .. import TestHardfork + +FORK_MAPPING = { + fork.short_name: fork.json_test_name for fork in TestHardfork.discover() +} + + +def extract_affected_forks(repo_root: Path, files_path: str) -> List[str]: + """ + Extract fork names from changed file paths read from disk. + + Args: + repo_root: Root directory of the repository config. + files_path: Path to file containing changed file paths + (one per line) + + Returns: + List of fork json_test_names that have been affected + + """ + all_forks = [fork.json_test_name for fork in TestHardfork.discover()] + # Read changed files from disk + changed_files_file = Path(files_path) + if not changed_files_file.exists(): + print(f"File list file {files_path} does not exist or is empty!!") + return all_forks + + with open(changed_files_file, "r") as f: + changed_files = [line.strip() for line in f if line.strip()] + + # Extract affected forks + affected_forks = set() + + for file_path_str in changed_files: + if not file_path_str or file_path_str.startswith("#"): + # Skip empty lines and comments + continue + + try: + # Normalize the path + file_path = Path(file_path_str) + + # Convert absolute paths to relative + if file_path.is_absolute(): + try: + file_path = file_path.relative_to(repo_root) + except ValueError: + # Path is outside repo, skip it + continue + + except (TypeError, ValueError, OSError): + # Skip invalid paths + continue + + if file_path.is_relative_to("tests/json_infra/"): + # Run all forks if something changes in the test + # framework + return all_forks + if file_path.is_relative_to("src/ethereum_spec_tools/evm_tools"): + # Run all forks if something changes in the evm + # tools + return all_forks + + if file_path.is_relative_to("src/ethereum/"): + parts = Path(file_path).parts + if len(parts) < 4 or parts[2] != "forks": + # Run all tests if something changes in the + # non fork-specific part of src/ethereum + return all_forks + + # Run tests for specific forks + fork_short_name = parts[3] + fork_json_name = FORK_MAPPING.get(fork_short_name) + if fork_json_name: + affected_forks.add(fork_json_name) + + return list(affected_forks) diff --git a/tests/json_infra/stash_keys.py b/tests/json_infra/stash_keys.py new file mode 100644 index 0000000000..021d9a83b5 --- /dev/null +++ b/tests/json_infra/stash_keys.py @@ -0,0 +1,9 @@ +"""Shared StashKey definitions for json_infra tests.""" + +from typing import Optional + +from filelock import FileLock +from pytest import StashKey + +desired_forks_key = StashKey[list[str]]() +fixture_lock = StashKey[Optional[FileLock]]() diff --git a/tests/json_infra/test_blockchain_tests.py b/tests/json_infra/test_blockchain_tests.py deleted file mode 100644 index 9e19a361cf..0000000000 --- a/tests/json_infra/test_blockchain_tests.py +++ /dev/null @@ -1,40 +0,0 @@ -"""Run the blockchain tests from json fixtures.""" - -from typing import Callable, Dict - -import pytest - -from . import FORKS -from .helpers.load_blockchain_tests import ( - Load, - fetch_blockchain_tests, - idfn, - run_blockchain_st_test, -) - - -def _generate_test_function(fork_name: str) -> Callable: - """Generates a test function for blockchain tests for a specific fork.""" - - @pytest.mark.fork(fork_name) - @pytest.mark.json_blockchain_tests - @pytest.mark.parametrize( - "blockchain_test_case", - fetch_blockchain_tests(fork_name), - ids=idfn, - ) - def test_func(blockchain_test_case: Dict) -> None: - load = Load( - blockchain_test_case["json_fork"], - blockchain_test_case["eels_fork"], - ) - run_blockchain_st_test(blockchain_test_case, load=load) - - test_func.__name__ = f"test_blockchain_tests_{fork_name.lower()}" - return test_func - - -for fork_name in FORKS.keys(): - locals()[f"test_blockchain_tests_{fork_name.lower()}"] = ( - _generate_test_function(fork_name) - ) diff --git a/tests/json_infra/test_ethash.py b/tests/json_infra/test_ethash.py index 86b52432b2..ec2368083c 100644 --- a/tests/json_infra/test_ethash.py +++ b/tests/json_infra/test_ethash.py @@ -44,7 +44,7 @@ @pytest.mark.parametrize("json_fork", POW_FORKS) def test_ethtest_fixtures(json_fork: str) -> None: """Tests ethash proof-of-work validation against ethereum test fixtures.""" - eels_fork = FORKS[json_fork]["eels_fork"] + eels_fork = FORKS[json_fork].short_name fork_module = importlib.import_module(f"ethereum.forks.{eels_fork}.fork") ethereum_tests = load_pow_test_fixtures(json_fork) @@ -79,7 +79,7 @@ def load_pow_test_fixtures(json_fork: str) -> List[Dict[str, Any]]: Loads proof-of-work test fixtures for a specific fork from JSON files. """ - eels_fork = FORKS[json_fork]["eels_fork"] + eels_fork = FORKS[json_fork].short_name header = importlib.import_module( f"ethereum.forks.{eels_fork}.blocks" ).Header @@ -122,7 +122,7 @@ def test_pow_validation_block_headers( Tests proof-of-work validation on real block headers for specific forks. """ - eels_fork = FORKS[json_fork]["eels_fork"] + eels_fork = FORKS[json_fork].short_name fork_module = importlib.import_module(f"ethereum.forks.{eels_fork}.fork") block_str_data = cast( diff --git a/tests/json_infra/test_state_tests.py b/tests/json_infra/test_state_tests.py deleted file mode 100644 index 20bb578654..0000000000 --- a/tests/json_infra/test_state_tests.py +++ /dev/null @@ -1,32 +0,0 @@ -"""Run the state tests from json fixtures.""" - -from typing import Callable, Dict - -import pytest - -from . import FORKS -from .helpers.load_state_tests import fetch_state_tests, idfn, run_state_test - - -def _generate_test_function(fork_name: str) -> Callable: - """Generates a test function for state tests for a specific fork.""" - - @pytest.mark.fork(fork_name) - @pytest.mark.evm_tools - @pytest.mark.json_state_tests - @pytest.mark.parametrize( - "state_test_case", - fetch_state_tests(fork_name), - ids=idfn, - ) - def test_func(state_test_case: Dict) -> None: - run_state_test(state_test_case) - - test_func.__name__ = f"test_state_tests_{fork_name.lower()}" - return test_func - - -for fork_name in FORKS.keys(): - locals()[f"test_state_tests_{fork_name.lower()}"] = ( - _generate_test_function(fork_name) - ) diff --git a/tests/json_infra/test_trie.py b/tests/json_infra/test_trie.py index c0de3f1709..e950fbf486 100644 --- a/tests/json_infra/test_trie.py +++ b/tests/json_infra/test_trie.py @@ -31,7 +31,7 @@ def test_trie_secure_hex(fork: str) -> None: """Tests secure trie implementation with hex-encoded test data.""" tests = load_tests("hex_encoded_securetrie_test.json") - eels_fork = FORKS[fork]["eels_fork"] + eels_fork = FORKS[fork].short_name trie_module = importlib.import_module(f"ethereum.forks.{eels_fork}.trie") for name, test in tests.items(): @@ -48,7 +48,7 @@ def test_trie_secure(fork: str) -> None: """Tests secure trie implementation with standard test data.""" tests = load_tests("trietest_secureTrie.json") - eels_fork = FORKS[fork]["eels_fork"] + eels_fork = FORKS[fork].short_name trie_module = importlib.import_module(f"ethereum.forks.{eels_fork}.trie") for name, test in tests.items(): @@ -65,7 +65,7 @@ def test_trie_secure_any_order(fork: str) -> None: """Tests secure trie implementation with any-order test data.""" tests = load_tests("trieanyorder_secureTrie.json") - eels_fork = FORKS[fork]["eels_fork"] + eels_fork = FORKS[fork].short_name trie_module = importlib.import_module(f"ethereum.forks.{eels_fork}.trie") for name, test in tests.items(): @@ -82,7 +82,7 @@ def test_trie(fork: str) -> None: """Tests non-secure trie implementation with standard test data.""" tests = load_tests("trietest.json") - eels_fork = FORKS[fork]["eels_fork"] + eels_fork = FORKS[fork].short_name trie_module = importlib.import_module(f"ethereum.forks.{eels_fork}.trie") for name, test in tests.items(): @@ -99,7 +99,7 @@ def test_trie_any_order(fork: str) -> None: """Tests non-secure trie implementation with any-order test data.""" tests = load_tests("trieanyorder.json") - eels_fork = FORKS[fork]["eels_fork"] + eels_fork = FORKS[fork].short_name trie_module = importlib.import_module(f"ethereum.forks.{eels_fork}.trie") for name, test in tests.items(): diff --git a/tox.ini b/tox.ini index e52433e286..0208dda6df 100644 --- a/tox.ini +++ b/tox.ini @@ -48,15 +48,15 @@ commands = commands = pytest \ -m "not slow" \ - -n auto --maxprocesses 6 \ + -n auto --maxprocesses 6 --dist=loadfile \ --cov-config=pyproject.toml \ --cov=ethereum \ --cov-report=term \ --cov-report "xml:{toxworkdir}/coverage.xml" \ --no-cov-on-fail \ --cov-branch \ - --ignore-glob='tests/json_infra/fixtures/*' \ --basetemp="{temp_dir}/pytest" \ + {posargs} \ tests/json_infra [testenv:py3] @@ -99,8 +99,7 @@ passenv = commands = pytest \ -m "not slow and not evm_tools" \ - -n auto --maxprocesses 5 \ - --ignore-glob='tests/json_infra/fixtures/*' \ + -n auto --maxprocesses 5 --dist=loadfile \ --ignore-glob='tests/test_t8n.py' \ --ignore-glob='eest_tests/*' \ --basetemp="{temp_dir}/pytest" \