diff --git a/.gitignore b/.gitignore index 5829ce2876..9e409446ae 100644 --- a/.gitignore +++ b/.gitignore @@ -9,6 +9,9 @@ # AI .claude/ +# Benchmark fixed opcode counts +.fixed_opcode_counts.json + # C extensions *.so diff --git a/packages/testing/pyproject.toml b/packages/testing/pyproject.toml index 891ec2fe9a..669163764e 100644 --- a/packages/testing/pyproject.toml +++ b/packages/testing/pyproject.toml @@ -101,6 +101,7 @@ extract_config = "execution_testing.cli.extract_config:extract_config" compare_fixtures = "execution_testing.cli.compare_fixtures:main" modify_static_test_gas_limits = "execution_testing.cli.modify_static_test_gas_limits:main" validate_changelog = "execution_testing.cli.tox_helpers:validate_changelog" +benchmark_parser = "execution_testing.cli.benchmark_parser:main" [tool.setuptools.packages.find] where = ["src"] diff --git a/packages/testing/src/execution_testing/cli/benchmark_parser.py b/packages/testing/src/execution_testing/cli/benchmark_parser.py new file mode 100644 index 0000000000..e188239c93 --- /dev/null +++ b/packages/testing/src/execution_testing/cli/benchmark_parser.py @@ -0,0 +1,364 @@ +""" +Parser to analyze benchmark tests and maintain the opcode counts mapping. + +This script uses Python's AST to analyze benchmark tests and generate/update +the scenario configs in `.fixed_opcode_counts.json`. + +Usage: + uv run benchmark_parser # Update `.fixed_opcode_counts.json` + uv run benchmark_parser --check # Check for new/missing entries (CI) +""" + +import argparse +import ast +import json +import sys +from pathlib import Path + + +def get_repo_root() -> Path: + """Get the repository root directory.""" + current = Path.cwd() + while current != current.parent: + if (current / "tests" / "benchmark").exists(): + return current + current = current.parent + raise FileNotFoundError("Could not find repository root") + + +def get_benchmark_dir() -> Path: + """Get the benchmark tests directory.""" + return get_repo_root() / "tests" / "benchmark" + + +def get_config_file() -> Path: + """Get the .fixed_opcode_counts.json config file path.""" + return get_repo_root() / ".fixed_opcode_counts.json" + + +class OpcodeExtractor(ast.NodeVisitor): + """Extract opcode parametrizations from benchmark test functions.""" + + def __init__(self, source_code: str): + self.source_code = source_code + self.patterns: list[str] = [] + + def visit_FunctionDef(self, node: ast.FunctionDef) -> None: + """Visit function definitions and extract opcode patterns.""" + if not node.name.startswith("test_"): + return + + # Check if function has benchmark_test parameter + if not self._has_benchmark_test_param(node): + return + + # Filter for code generator usage (required for fixed-opcode-count mode) + if not self._uses_code_generator(node): + return + + # Extract opcode parametrizations + test_name = node.name + opcodes = self._extract_opcodes(node) + + if opcodes: + # Test parametrizes on opcodes - create pattern for each + for opcode in opcodes: + pattern = f"{test_name}.*{opcode}.*" + self.patterns.append(pattern) + else: + # Test doesn't parametrize on opcodes - use test name only + pattern = f"{test_name}.*" + self.patterns.append(pattern) + + def _has_benchmark_test_param(self, node: ast.FunctionDef) -> bool: + """Check if function has benchmark_test parameter.""" + return any(arg.arg == "benchmark_test" for arg in node.args.args) + + def _uses_code_generator(self, node: ast.FunctionDef) -> bool: + """Check if function body uses code_generator parameter.""" + func_start = node.lineno - 1 + func_end = node.end_lineno + if func_end is None: + return False + func_source = "\n".join( + self.source_code.splitlines()[func_start:func_end] + ) + return "code_generator=" in func_source + + def _extract_opcodes(self, node: ast.FunctionDef) -> list[str]: + """Extract opcode values from @pytest.mark.parametrize decorators.""" + opcodes: list[str] = [] + + for decorator in node.decorator_list: + if not self._is_parametrize_decorator(decorator): + continue + + if not isinstance(decorator, ast.Call) or len(decorator.args) < 2: + continue + + # Get parameter names (first arg) + param_names = decorator.args[0] + if isinstance(param_names, ast.Constant): + param_str = str(param_names.value).lower() + else: + continue + + # Check if "opcode" is in parameter names + if "opcode" not in param_str: + continue + + # Extract opcode values from second arg (the list) + param_values = decorator.args[1] + opcodes.extend(self._parse_opcode_values(param_values)) + + return opcodes + + def _is_parametrize_decorator(self, decorator: ast.expr) -> bool: + """Check if decorator is @pytest.mark.parametrize.""" + if isinstance(decorator, ast.Call): + if isinstance(decorator.func, ast.Attribute): + if ( + isinstance(decorator.func.value, ast.Attribute) + and decorator.func.value.attr == "mark" + and decorator.func.attr == "parametrize" + ): + return True + return False + + def _parse_opcode_values(self, values_node: ast.expr) -> list[str]: + """Parse opcode values from the parametrize list.""" + opcodes: list[str] = [] + + if not isinstance(values_node, (ast.List, ast.Tuple)): + return opcodes + + for element in values_node.elts: + opcode_name = self._extract_opcode_name(element) + if opcode_name: + opcodes.append(opcode_name) + + return opcodes + + def _extract_opcode_name(self, node: ast.expr) -> str | None: + """ + Extract opcode name from various AST node types. + + Supported patterns (opcode must be first element): + + Case 1 - Direct opcode reference: + @pytest.mark.parametrize("opcode", [Op.ADD, Op.MUL]) + Result: ["ADD", "MUL"] + + Case 2a - pytest.param with direct opcode: + @pytest.mark.parametrize("opcode", [pytest.param(Op.ADD, id="add")]) + Result: ["ADD"] + + Case 2b - pytest.param with tuple (opcode first): + @pytest.mark.parametrize("opcode,arg", [pytest.param((Op.ADD, 123))]) + Result: ["ADD"] + + Case 3 - Plain tuple (opcode first): + @pytest.mark.parametrize("opcode,arg", [(Op.ADD, 123), (Op.MUL, 456)]) + Result: ["ADD", "MUL"] + """ + # Case 1: Direct opcode - Op.ADD + if isinstance(node, ast.Attribute): + return node.attr + + # Case 2: pytest.param(Op.ADD, ...) or pytest.param((Op.ADD, x), ...) + if isinstance(node, ast.Call): + if len(node.args) > 0: + first_arg = node.args[0] + # Case 2a: pytest.param(Op.ADD, ...) + if isinstance(first_arg, ast.Attribute): + return first_arg.attr + # Case 2b: pytest.param((Op.ADD, x), ...) + elif isinstance(first_arg, ast.Tuple) and first_arg.elts: + first_elem = first_arg.elts[0] + if isinstance(first_elem, ast.Attribute): + return first_elem.attr + + # Case 3: Plain tuple - (Op.ADD, args) + if isinstance(node, ast.Tuple) and node.elts: + first_elem = node.elts[0] + if isinstance(first_elem, ast.Attribute): + return first_elem.attr + + return None + + +def scan_benchmark_tests( + base_path: Path, +) -> tuple[dict[str, list[int]], dict[str, Path]]: + """ + Scan benchmark test files and extract opcode patterns. + + Returns: + Tuple of (config, pattern_sources) where: + - config: mapping of pattern -> opcode counts + - pattern_sources: mapping of pattern -> source file path + """ + config: dict[str, list[int]] = {} + pattern_sources: dict[str, Path] = {} + default_counts = [1] + + test_files = [ + f + for f in base_path.rglob("test_*.py") + if "configs" not in str(f) and "stateful" not in str(f) + ] + + for test_file in test_files: + try: + source = test_file.read_text() + tree = ast.parse(source) + + extractor = OpcodeExtractor(source) + extractor.visit(tree) + + for pattern in extractor.patterns: + if pattern not in config: + config[pattern] = default_counts + pattern_sources[pattern] = test_file + except Exception as e: + print(f"Warning: Failed to parse {test_file}: {e}") + continue + + return config, pattern_sources + + +def load_existing_config(config_file: Path) -> dict[str, list[int]]: + """Load existing config from .fixed_opcode_counts.json.""" + if not config_file.exists(): + return {} + + try: + data = json.loads(config_file.read_text()) + return data.get("scenario_configs", {}) + except (json.JSONDecodeError, KeyError): + return {} + + +def categorize_patterns( + config: dict[str, list[int]], pattern_sources: dict[str, Path] +) -> dict[str, list[str]]: + """ + Categorize patterns by deriving category from source file name. + + Example: test_arithmetic.py -> ARITHMETIC + """ + categories: dict[str, list[str]] = {} + + for pattern in config.keys(): + if pattern in pattern_sources: + source_file = pattern_sources[pattern] + file_name = source_file.stem + if file_name.startswith("test_"): + category = file_name[5:].upper() # Remove "test_" prefix + else: + category = "OTHER" + else: + category = "OTHER" + + if category not in categories: + categories[category] = [] + categories[category].append(pattern) + + return {k: sorted(v) for k, v in sorted(categories.items())} + + +def generate_config_json( + config: dict[str, list[int]], + pattern_sources: dict[str, Path], +) -> str: + """Generate the JSON config file content.""" + categories = categorize_patterns(config, pattern_sources) + + scenario_configs: dict[str, list[int]] = {} + for _, patterns in categories.items(): + for pattern in patterns: + scenario_configs[pattern] = config[pattern] + + output = {"scenario_configs": scenario_configs} + + return json.dumps(output, indent=2) + "\n" + + +def main() -> int: + """Main entry point.""" + parser = argparse.ArgumentParser( + description="Analyze benchmark tests and maintain opcode count mapping" + ) + parser.add_argument( + "--check", + action="store_true", + help="Check for new/missing entries (CI mode, exits 1 if out of sync)", + ) + args = parser.parse_args() + + try: + benchmark_dir = get_benchmark_dir() + config_file = get_config_file() + except FileNotFoundError as e: + print(f"Error: {e}", file=sys.stderr) + return 1 + + print(f"Scanning benchmark tests in {benchmark_dir}...") + detected, pattern_sources = scan_benchmark_tests(benchmark_dir) + print(f"Detected {len(detected)} opcode patterns") + + existing = load_existing_config(config_file) + print(f"Loaded {len(existing)} existing entries") + + detected_keys = set(detected.keys()) + existing_keys = set(existing.keys()) + new_patterns = sorted(detected_keys - existing_keys) + obsolete_patterns = sorted(existing_keys - detected_keys) + + merged = detected.copy() + for pattern, counts in existing.items(): + if pattern in detected_keys: + merged[pattern] = counts + + print("\n" + "=" * 60) + print(f"Detected {len(detected)} patterns in tests") + print(f"Existing entries: {len(existing)}") + + if new_patterns: + print(f"\n+ Found {len(new_patterns)} NEW patterns:") + for p in new_patterns[:15]: + print(f" {p}") + if len(new_patterns) > 15: + print(f" ... and {len(new_patterns) - 15} more") + + if obsolete_patterns: + print(f"\n- Found {len(obsolete_patterns)} OBSOLETE patterns:") + for p in obsolete_patterns[:15]: + print(f" {p}") + if len(obsolete_patterns) > 15: + print(f" ... and {len(obsolete_patterns) - 15} more") + + if not new_patterns and not obsolete_patterns: + print("\nConfiguration is up to date!") + + print("=" * 60) + + if args.check: + if new_patterns or obsolete_patterns: + print("\nRun 'uv run benchmark_parser' (without --check) to sync.") + return 1 + return 0 + + for pattern in obsolete_patterns: + print(f"Removing obsolete: {pattern}") + if pattern in merged: + del merged[pattern] + + content = generate_config_json(merged, pattern_sources) + config_file.write_text(content) + print(f"\nUpdated {config_file}") + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/packages/testing/src/execution_testing/cli/pytest_commands/plugins/filler/tests/test_benchmarking.py b/packages/testing/src/execution_testing/cli/pytest_commands/plugins/filler/tests/test_benchmarking.py index cf4f553780..82077315fd 100644 --- a/packages/testing/src/execution_testing/cli/pytest_commands/plugins/filler/tests/test_benchmarking.py +++ b/packages/testing/src/execution_testing/cli/pytest_commands/plugins/filler/tests/test_benchmarking.py @@ -2,6 +2,7 @@ import textwrap from pathlib import Path +from typing import List import pytest @@ -30,6 +31,25 @@ def test_dummy_no_benchmark_test(state_test) -> None: """ ) +test_module_with_repricing = textwrap.dedent( + """\ + import pytest + + from execution_testing import Environment + + @pytest.mark.valid_at("Prague") + @pytest.mark.benchmark + @pytest.mark.repricing + def test_benchmark_with_repricing(state_test, gas_benchmark_value) -> None: + state_test(env=env, pre={}, post={}, tx=None) + + @pytest.mark.valid_at("Prague") + @pytest.mark.benchmark + def test_benchmark_without_repricing(state_test, gas_benchmark_value) -> None: + state_test(env=env, pre={}, post={}, tx=None) + """ +) + def setup_test_directory_structure( pytester: pytest.Pytester, test_content: str, test_filename: str @@ -72,7 +92,7 @@ def test_gas_benchmark_option_added(pytester: pytest.Pytester) -> None: assert result.ret == 0 assert any("--gas-benchmark-values" in line for line in result.outlines) assert any( - "Specify gas benchmark values for tests" in line + "Gas limits (in millions) for benchmark tests" in line for line in result.outlines ) @@ -143,3 +163,60 @@ def test_benchmarking_mode_not_configured_without_option( assert not any( "benchmark-gas-value_30M" in line for line in result.outlines ) + + +@pytest.mark.parametrize( + "benchmark_option,benchmark_args", + [ + pytest.param( + "--gas-benchmark-values", + ["10"], + id="gas-benchmark-values", + ), + pytest.param( + "--fixed-opcode-count", + ["1"], + id="fixed-opcode-count", + ), + ], +) +def test_repricing_marker_filter_with_benchmark_options( + pytester: pytest.Pytester, + benchmark_option: str, + benchmark_args: List[str], +) -> None: + """ + Test that -m repricing filter works with both --gas-benchmark-values and + --fixed-opcode-count options. + + When -m repricing is specified along with a benchmark option, only tests + with the repricing marker should be collected. + """ + setup_test_directory_structure( + pytester, test_module_with_repricing, "test_repricing_filter.py" + ) + + # Test with -m repricing filter - should only collect repricing-marked tests + result = pytester.runpytest( + "-c", + "pytest-fill.ini", + "--fork", + "Prague", + benchmark_option, + *benchmark_args, + "-m", + "repricing", + "tests/istanbul/dummy_test_module/", + "--collect-only", + "-q", + ) + + assert result.ret == 0 + # The repricing test should be collected + assert any( + "test_benchmark_with_repricing" in line for line in result.outlines + ) + # The non-repricing test should NOT be collected + assert not any( + "test_benchmark_without_repricing" in line for line in result.outlines + ) diff --git a/packages/testing/src/execution_testing/cli/pytest_commands/plugins/help/help.py b/packages/testing/src/execution_testing/cli/pytest_commands/plugins/help/help.py index 7fd815f3da..33d0d9c939 100644 --- a/packages/testing/src/execution_testing/cli/pytest_commands/plugins/help/help.py +++ b/packages/testing/src/execution_testing/cli/pytest_commands/plugins/help/help.py @@ -92,6 +92,7 @@ def pytest_configure(config: pytest.Config) -> None: "pre-allocation behavior during test filling", "ported", "witness", + "benchmark", ], ) elif config.getoption("show_consume_help"): @@ -113,6 +114,7 @@ def pytest_configure(config: pytest.Config) -> None: "sender key fixtures", "remote seed sender", "chain configuration", + "benchmark", ], ) elif config.getoption("show_execute_hive_help"): @@ -126,6 +128,7 @@ def pytest_configure(config: pytest.Config) -> None: "sender key fixtures", "remote seed sender", "chain configuration", + "benchmark", ], ) elif config.getoption("show_execute_recover_help"): diff --git a/packages/testing/src/execution_testing/cli/pytest_commands/plugins/shared/benchmarking.py b/packages/testing/src/execution_testing/cli/pytest_commands/plugins/shared/benchmarking.py index d5c1e62883..acc29794ae 100644 --- a/packages/testing/src/execution_testing/cli/pytest_commands/plugins/shared/benchmarking.py +++ b/packages/testing/src/execution_testing/cli/pytest_commands/plugins/shared/benchmarking.py @@ -1,5 +1,11 @@ """The module contains the pytest hooks for the gas benchmark values.""" +import json +import re +import warnings +from pathlib import Path +from typing import Any + import pytest from execution_testing.test_types import Environment, EnvironmentDefaults @@ -8,25 +14,36 @@ def pytest_addoption(parser: pytest.Parser) -> None: - """Add command line options for gas benchmark values.""" - evm_group = parser.getgroup( - "evm", "Arguments defining evm executable behavior" + """Add command line options for benchmark tests.""" + benchmark_group = parser.getgroup( + "benchmarking", "Arguments for benchmark test execution" ) - evm_group.addoption( + benchmark_group.addoption( "--gas-benchmark-values", action="store", dest="gas_benchmark_value", type=str, default=None, - help="Specify gas benchmark values for tests as a comma-separated list.", + help=( + "Gas limits (in millions) for benchmark tests. " + "Example: '100,500' runs tests with 100M and 500M gas. " + "Cannot be used with --fixed-opcode-count." + ), ) - evm_group.addoption( + benchmark_group.addoption( "--fixed-opcode-count", action="store", dest="fixed_opcode_count", type=str, default=None, - help="Specify fixed opcode counts (in thousands) for benchmark tests as a comma-separated list.", + nargs="?", + const="", + help=( + "Opcode counts (in thousands) for benchmark tests. " + "Example: '1,10,100' runs tests with 1K, 10K, 100K opcodes. " + "Without value, uses .fixed_opcode_counts.json config. " + "Cannot be used with --gas-benchmark-values." + ), ) @@ -41,16 +58,81 @@ def pytest_configure(config: pytest.Config) -> None: config.op_mode = OpMode.BENCHMARKING # type: ignore[attr-defined] +def load_opcode_counts_config( + config: pytest.Config, +) -> dict[str, Any] | None: + """ + Load the opcode counts configuration from `.fixed_opcode_counts.json`. + + Returns dictionary with scenario_configs and default_counts, or None + if not found. + """ + config_file = Path(config.rootpath) / ".fixed_opcode_counts.json" + + if not config_file.exists(): + return None + + try: + data = json.loads(config_file.read_text()) + return { + "scenario_configs": data.get("scenario_configs", {}), + "default_counts": [1], + } + except (json.JSONDecodeError, KeyError): + return None + + +def get_opcode_counts_for_test( + test_name: str, + scenario_configs: dict[str, list[int]], + default_counts: list[int], +) -> list[int]: + """ + Get opcode counts for a test using regex pattern matching. + """ + # Try exact match first (faster) + if test_name in scenario_configs: + return scenario_configs[test_name] + + # Try regex patterns + for pattern, counts in scenario_configs.items(): + if pattern == test_name: + continue + try: + if re.search(pattern, test_name): + return counts + except re.error: + continue + + return default_counts + + def pytest_collection_modifyitems( config: pytest.Config, items: list[pytest.Item] ) -> None: - """Filter tests based on repricing marker""" + """Filter tests based on repricing marker when benchmark options are used.""" gas_benchmark_value = config.getoption("gas_benchmark_value") fixed_opcode_count = config.getoption("fixed_opcode_count") - if not gas_benchmark_value and not fixed_opcode_count: + # Only filter if either benchmark option is provided + if not gas_benchmark_value and fixed_opcode_count is None: return + # Load config data if --fixed-opcode-count flag provided without value + if fixed_opcode_count == "": + config_data = load_opcode_counts_config(config) + if config_data: + config._opcode_counts_config = config_data # type: ignore[attr-defined] + else: + warnings.warn( + "--fixed-opcode-count was provided without a value, but " + ".fixed_opcode_counts.json was not found. " + "Run 'uv run benchmark_parser' to generate it, or provide " + "explicit values (e.g., --fixed-opcode-count 1,10,100).", + UserWarning, + stacklevel=1, + ) + # Check if -m repricing marker filter was specified markexpr = config.getoption("markexpr", "") if "repricing" not in markexpr or "not repricing" in markexpr: @@ -85,10 +167,10 @@ def pytest_collection_modifyitems( def pytest_generate_tests(metafunc: pytest.Metafunc) -> None: """Generate tests for the gas benchmark values and fixed opcode counts.""" gas_benchmark_values = metafunc.config.getoption("gas_benchmark_value") - fixed_opcode_counts = metafunc.config.getoption("fixed_opcode_count") + fixed_opcode_counts_cli = metafunc.config.getoption("fixed_opcode_count") # Ensure mutual exclusivity - if gas_benchmark_values and fixed_opcode_counts: + if gas_benchmark_values and fixed_opcode_counts_cli: raise pytest.UsageError( "--gas-benchmark-values and --fixed-opcode-count are mutually exclusive. " "Use only one at a time." @@ -111,16 +193,47 @@ def pytest_generate_tests(metafunc: pytest.Metafunc) -> None: ) if "fixed_opcode_count" in metafunc.fixturenames: - if fixed_opcode_counts: - opcode_counts = [ - int(x.strip()) for x in fixed_opcode_counts.split(",") + # Parametrize for any benchmark test when --fixed-opcode-count is provided + if fixed_opcode_counts_cli is None: + return + + opcode_counts_to_use = None + + if fixed_opcode_counts_cli: + # CLI flag with value takes precedence + opcode_counts_to_use = [ + int(x.strip()) for x in fixed_opcode_counts_cli.split(",") ] + else: + # Flag provided without value - load from config file + # Check if config data was already loaded in pytest_collection_modifyitems + config_data = getattr( + metafunc.config, "_opcode_counts_config", None + ) + + # If not loaded yet (pytest_generate_tests runs first), load it now + if config_data is None: + config_data = load_opcode_counts_config(metafunc.config) + if config_data: + metafunc.config._opcode_counts_config = config_data # type: ignore[attr-defined] + + if config_data: + # Look up opcode counts using regex pattern matching + test_name = metafunc.function.__name__ + opcode_counts_to_use = get_opcode_counts_for_test( + test_name, + config_data.get("scenario_configs", {}), + config_data.get("default_counts", [1]), + ) + + # Parametrize if we have counts to use + if opcode_counts_to_use: opcode_count_parameters = [ pytest.param( opcode_count, id=f"opcount_{opcode_count}K", ) - for opcode_count in opcode_counts + for opcode_count in opcode_counts_to_use ] metafunc.parametrize( "fixed_opcode_count", @@ -135,8 +248,9 @@ def gas_benchmark_value(request: pytest.FixtureRequest) -> int: if hasattr(request, "param"): return request.param - # If --fixed-opcode-count is specified, use high gas limit to avoid gas constraints - if request.config.getoption("fixed_opcode_count"): + # Only use high gas limit if --fixed-opcode-count flag was provided + fixed_opcode_count = request.config.getoption("fixed_opcode_count") + if fixed_opcode_count is not None: return HIGH_GAS_LIMIT return EnvironmentDefaults.gas_limit diff --git a/tests/benchmark/compute/instruction/test_arithmetic.py b/tests/benchmark/compute/instruction/test_arithmetic.py index 60797c824f..c7a517ca48 100644 --- a/tests/benchmark/compute/instruction/test_arithmetic.py +++ b/tests/benchmark/compute/instruction/test_arithmetic.py @@ -160,11 +160,11 @@ def test_arithmetic( @pytest.mark.repricing(mod_bits=255) @pytest.mark.parametrize("mod_bits", [255, 191, 127, 63]) -@pytest.mark.parametrize("op", [Op.MOD, Op.SMOD]) +@pytest.mark.parametrize("opcode", [Op.MOD, Op.SMOD]) def test_mod( benchmark_test: BenchmarkTestFiller, mod_bits: int, - op: Op, + opcode: Op, ) -> None: """ Benchmark MOD instructions. @@ -185,7 +185,7 @@ def test_mod( # just the SMOD implementation will have to additionally handle the # sign bits. # The result stays negative. - should_negate = op == Op.SMOD + should_negate = opcode == Op.SMOD num_numerators = 15 numerator_bits = 256 if not should_negate else 255 @@ -200,7 +200,7 @@ def test_mod( # Select the random seed giving the longest found MOD chain. You can look # for a longer one by increasing the numerators_min_len. This will activate # the while loop below. - match op, mod_bits: + match opcode, mod_bits: case Op.MOD, 255: seed = 20393 numerators_min_len = 750 @@ -226,7 +226,7 @@ def test_mod( seed = 7562 numerators_min_len = 720 case _: - raise ValueError(f"{mod_bits}-bit {op} not supported.") + raise ValueError(f"{mod_bits}-bit {opcode} not supported.") while True: rng = random.Random(seed) @@ -263,7 +263,7 @@ def test_mod( setup = sum((Op.PUSH32[n] for n in numerators), Bytecode()) attack_block = ( Op.CALLDATALOAD(0) - + sum(make_dup(len(numerators) - i) + op for i in indexes) + + sum(make_dup(len(numerators) - i) + opcode for i in indexes) + Op.POP ) @@ -279,13 +279,13 @@ def test_mod( @pytest.mark.repricing(mod_bits=255) @pytest.mark.parametrize("mod_bits", [255, 191, 127, 63]) -@pytest.mark.parametrize("op", [Op.ADDMOD, Op.MULMOD]) +@pytest.mark.parametrize("opcode", [Op.ADDMOD, Op.MULMOD]) def test_mod_arithmetic( benchmark_test: BenchmarkTestFiller, pre: Alloc, fork: Fork, mod_bits: int, - op: Op, + opcode: Op, gas_benchmark_value: int, ) -> None: """ @@ -316,7 +316,7 @@ def test_mod_arithmetic( # for a longer one by increasing the op_chain_len. This will activate the # while loop below. op_chain_len = 666 - match op, mod_bits: + match opcode, mod_bits: case Op.ADDMOD, 255: seed = 4 case Op.ADDMOD, 191: @@ -337,7 +337,7 @@ def test_mod_arithmetic( seed = 4193 op_chain_len = 600 case _: - raise ValueError(f"{mod_bits}-bit {op} not supported.") + raise ValueError(f"{mod_bits}-bit {opcode} not supported.") while True: rng = random.Random(seed) @@ -345,7 +345,7 @@ def test_mod_arithmetic( initial_mod = rng.randint(2 ** (mod_bits - 1), 2**mod_bits - 1) # Evaluate the op chain and collect the order of accessing numerators. - op_fn = operator.add if op == Op.ADDMOD else operator.mul + op_fn = operator.add if opcode == Op.ADDMOD else operator.mul mod = initial_mod indexes: list[int] = [] while mod >= mod_min and len(indexes) < op_chain_len: @@ -366,7 +366,7 @@ def test_mod_arithmetic( code_segment = ( Op.CALLDATALOAD(0) + sum( - make_dup(len(args) - i) + Op.PUSH32[fixed_arg] + op + make_dup(len(args) - i) + Op.PUSH32[fixed_arg] + opcode for i in indexes ) + Op.POP