diff --git a/packages/testing/src/execution_testing/cli/pytest_commands/plugins/execute/execute.py b/packages/testing/src/execution_testing/cli/pytest_commands/plugins/execute/execute.py index 4b08311533..12696a4870 100644 --- a/packages/testing/src/execution_testing/cli/pytest_commands/plugins/execute/execute.py +++ b/packages/testing/src/execution_testing/cli/pytest_commands/plugins/execute/execute.py @@ -372,6 +372,8 @@ def base_test_parametrizer_func( eth_rpc: EthRPC, engine_rpc: EngineRPC | None, collector: Collector, + gas_benchmark_value: int, + fixed_opcode_count: int | None, ) -> Type[BaseTest]: """ Fixture used to instantiate an auto-fillable BaseTest object from @@ -404,9 +406,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: ) # Set default for expected_benchmark_gas_used if "expected_benchmark_gas_used" not in kwargs: - kwargs["expected_benchmark_gas_used"] = ( - request.getfixturevalue("gas_benchmark_value") - ) + kwargs["expected_benchmark_gas_used"] = gas_benchmark_value kwargs["fork"] = fork kwargs |= { p: request.getfixturevalue(p) diff --git a/packages/testing/src/execution_testing/cli/pytest_commands/plugins/filler/filler.py b/packages/testing/src/execution_testing/cli/pytest_commands/plugins/filler/filler.py index dfde4d1304..341ee2e3b3 100644 --- a/packages/testing/src/execution_testing/cli/pytest_commands/plugins/filler/filler.py +++ b/packages/testing/src/execution_testing/cli/pytest_commands/plugins/filler/filler.py @@ -1321,6 +1321,7 @@ def base_test_parametrizer_func( test_case_description: str, fixture_source_url: str, gas_benchmark_value: int, + fixed_opcode_count: int | None, witness_generator: Any, ) -> Any: """ diff --git a/packages/testing/src/execution_testing/cli/pytest_commands/plugins/shared/benchmarking.py b/packages/testing/src/execution_testing/cli/pytest_commands/plugins/shared/benchmarking.py index 6680ff969a..f7f071d488 100644 --- a/packages/testing/src/execution_testing/cli/pytest_commands/plugins/shared/benchmarking.py +++ b/packages/testing/src/execution_testing/cli/pytest_commands/plugins/shared/benchmarking.py @@ -20,19 +20,75 @@ def pytest_addoption(parser: pytest.Parser) -> None: default=None, help="Specify gas benchmark values for tests as a comma-separated list.", ) + evm_group.addoption( + "--fixed-opcode-count", + action="store", + dest="fixed_opcode_count", + type=str, + default=None, + help="Specify fixed opcode counts (in thousands) for benchmark tests as a comma-separated list.", + ) @pytest.hookimpl(tryfirst=True) def pytest_configure(config: pytest.Config) -> None: """Configure the fill and execute mode to benchmarking.""" + config.addinivalue_line( + "markers", + "repricing: Mark test as reference test for gas repricing analysis", + ) if config.getoption("gas_benchmark_value"): config.op_mode = OpMode.BENCHMARKING # type: ignore[attr-defined] +def pytest_collection_modifyitems( + config: pytest.Config, items: list[pytest.Item] +) -> None: + """Remove non-repricing tests when --fixed-opcode-count is specified.""" + fixed_opcode_count = config.getoption("fixed_opcode_count") + if not fixed_opcode_count: + # If --fixed-opcode-count is not specified, don't filter anything + return + + filtered = [] + for item in items: + if not item.get_closest_marker("benchmark"): + continue + + repricing_marker = item.get_closest_marker("repricing") + if not repricing_marker: + continue + + if not repricing_marker.kwargs: + filtered.append(item) + continue + + if hasattr(item, "callspec"): + if all( + item.callspec.params.get(key) == value + for key, value in repricing_marker.kwargs.items() + ): + filtered.append(item) + else: + if not repricing_marker.kwargs: + filtered.append(item) + + items[:] = filtered + + def pytest_generate_tests(metafunc: pytest.Metafunc) -> None: - """Generate tests for the gas benchmark values.""" + """Generate tests for the gas benchmark values and fixed opcode counts.""" + gas_benchmark_values = metafunc.config.getoption("gas_benchmark_value") + fixed_opcode_counts = metafunc.config.getoption("fixed_opcode_count") + + # Ensure mutual exclusivity + if gas_benchmark_values and fixed_opcode_counts: + raise pytest.UsageError( + "--gas-benchmark-values and --fixed-opcode-count are mutually exclusive. " + "Use only one at a time." + ) + if "gas_benchmark_value" in metafunc.fixturenames: - gas_benchmark_values = metafunc.config.getoption("gas_benchmark_value") if gas_benchmark_values: gas_values = [ int(x.strip()) for x in gas_benchmark_values.split(",") @@ -48,6 +104,29 @@ def pytest_generate_tests(metafunc: pytest.Metafunc) -> None: "gas_benchmark_value", gas_parameters, scope="function" ) + if "fixed_opcode_count" in metafunc.fixturenames: + # Only parametrize if test has repricing marker + has_repricing = ( + metafunc.definition.get_closest_marker("repricing") is not None + ) + if has_repricing: + if fixed_opcode_counts: + opcode_counts = [ + int(x.strip()) for x in fixed_opcode_counts.split(",") + ] + opcode_count_parameters = [ + pytest.param( + opcode_count, + id=f"opcount_{opcode_count}K", + ) + for opcode_count in opcode_counts + ] + metafunc.parametrize( + "fixed_opcode_count", + opcode_count_parameters, + scope="function", + ) + @pytest.fixture(scope="function") def gas_benchmark_value(request: pytest.FixtureRequest) -> int: @@ -55,10 +134,24 @@ def gas_benchmark_value(request: pytest.FixtureRequest) -> int: if hasattr(request, "param"): return request.param + # If --fixed-opcode-count is specified, use high gas limit to avoid gas constraints + if request.config.getoption("fixed_opcode_count"): + return HIGH_GAS_LIMIT + return EnvironmentDefaults.gas_limit +@pytest.fixture(scope="function") +def fixed_opcode_count(request: pytest.FixtureRequest) -> int | None: + """Return a fixed opcode count for the current test, or None if not set.""" + if hasattr(request, "param"): + return request.param + + return None + + BENCHMARKING_MAX_GAS = 1_000_000_000_000 +HIGH_GAS_LIMIT = 1_000_000_000 @pytest.fixture diff --git a/packages/testing/src/execution_testing/cli/pytest_commands/plugins/shared/execute_fill.py b/packages/testing/src/execution_testing/cli/pytest_commands/plugins/shared/execute_fill.py index fc4ab0101e..88a329944f 100644 --- a/packages/testing/src/execution_testing/cli/pytest_commands/plugins/shared/execute_fill.py +++ b/packages/testing/src/execution_testing/cli/pytest_commands/plugins/shared/execute_fill.py @@ -19,6 +19,7 @@ ALL_FIXTURE_PARAMETERS = { "gas_benchmark_value", + "fixed_opcode_count", "genesis_environment", "env", } diff --git a/packages/testing/src/execution_testing/specs/benchmark.py b/packages/testing/src/execution_testing/specs/benchmark.py index 00078d6866..4d1f4d7f42 100644 --- a/packages/testing/src/execution_testing/specs/benchmark.py +++ b/packages/testing/src/execution_testing/specs/benchmark.py @@ -53,6 +53,7 @@ class BenchmarkCodeGenerator(ABC): setup: Bytecode = field(default_factory=Bytecode) cleanup: Bytecode = field(default_factory=Bytecode) tx_kwargs: Dict[str, Any] = field(default_factory=dict) + fixed_opcode_count: int | None = None code_padding_opcode: Op | None = None _contract_address: Address | None = None @@ -61,6 +62,50 @@ def deploy_contracts(self, *, pre: Alloc, fork: Fork) -> Address: """Deploy any contracts needed for the benchmark.""" ... + def deploy_fix_count_contracts(self, *, pre: Alloc, fork: Fork) -> Address: + """Deploy the contract with a fixed opcode count.""" + code = self.generate_repeated_code( + repeated_code=self.attack_block, + setup=self.setup, + cleanup=self.cleanup, + fork=fork, + ) + self._target_contract_address = pre.deploy_contract(code=code) + + iterations = self.fixed_opcode_count + assert iterations is not None, "fixed_opcode_count is not set" + + prefix = Op.CALLDATACOPY( + Op.PUSH0, Op.PUSH0, Op.CALLDATASIZE + ) + Op.PUSH4(iterations) + opcode = ( + prefix + + Op.JUMPDEST + + Op.POP( + Op.STATICCALL( + gas=Op.GAS, + address=self._target_contract_address, + args_offset=0, + args_size=Op.CALLDATASIZE, + ret_offset=0, + ret_size=0, + ) + ) + + Op.PUSH1(1) + + Op.SWAP1 + + Op.SUB + + Op.DUP1 + + Op.ISZERO + + Op.ISZERO + + Op.PUSH1(len(prefix)) + + Op.JUMPI + + Op.STOP + ) + self._validate_code_size(opcode, fork) + + self._contract_address = pre.deploy_contract(code=opcode) + return self._contract_address + def generate_transaction( self, *, pre: Alloc, gas_benchmark_value: int ) -> Transaction: @@ -102,9 +147,18 @@ def generate_repeated_code( available_space = max_code_size - overhead max_iterations = available_space // len(repeated_code) + # Use fixed_opcode_count if provided, otherwise fill to max + if self.fixed_opcode_count is not None: + max_iterations = min(max_iterations, 1000) + + print(f"max_iterations: {max_iterations}") + # TODO: Unify the PUSH0 and PUSH1 usage. - code = setup + Op.JUMPDEST + repeated_code * max_iterations + cleanup - code += Op.JUMP(len(setup)) if len(setup) > 0 else Op.PUSH0 + Op.JUMP + code = setup + Op.JUMPDEST + repeated_code * max_iterations + if self.fixed_opcode_count is None: + code += cleanup + ( + Op.JUMP(len(setup)) if len(setup) > 0 else Op.PUSH0 + Op.JUMP + ) # Pad the code to the maximum code size. if self.code_padding_opcode is not None: code += self.code_padding_opcode * (max_code_size - len(code)) @@ -142,6 +196,7 @@ class BenchmarkTest(BaseTest): gas_benchmark_value: int = Field( default_factory=lambda: int(Environment().gas_limit) ) + fixed_opcode_count: int | None = None code_generator: BenchmarkCodeGenerator | None = None supported_fixture_formats: ClassVar[ @@ -163,6 +218,7 @@ class BenchmarkTest(BaseTest): supported_markers: ClassVar[Dict[str, str]] = { "blockchain_test_engine_only": "Only generate a blockchain test engine fixture", "blockchain_test_only": "Only generate a blockchain test fixture", + "repricing": "Mark test as reference test for gas repricing analysis", } def model_post_init(self, __context: Any, /) -> None: @@ -193,7 +249,18 @@ def model_post_init(self, __context: Any, /) -> None: blocks: List[Block] = self.setup_blocks if self.code_generator is not None: - generated_blocks = self.generate_blocks_from_code_generator() + # Inject fixed_opcode_count into the code generator if provided + self.code_generator.fixed_opcode_count = self.fixed_opcode_count + + # In fixed opcode count mode, skip gas validation since we're + # measuring performance by operation count, not gas usage + if self.fixed_opcode_count is not None: + self.skip_gas_used_validation = True + generated_blocks = ( + self.generate_fixed_opcode_count_transactions() + ) + else: + generated_blocks = self.generate_blocks_from_code_generator() blocks += generated_blocks elif self.blocks is not None: @@ -294,6 +361,22 @@ def generate_blocks_from_code_generator(self) -> List[Block]: return [execution_block] + def generate_fixed_opcode_count_transactions(self) -> List[Block]: + """Generate transactions with a fixed opcode count.""" + if self.code_generator is None: + raise Exception("Code generator is not set") + self.code_generator.deploy_fix_count_contracts( + pre=self.pre, fork=self.fork + ) + gas_limit = ( + self.fork.transaction_gas_limit_cap() or self.gas_benchmark_value + ) + benchmark_tx = self.code_generator.generate_transaction( + pre=self.pre, gas_benchmark_value=gas_limit + ) + execution_block = Block(txs=[benchmark_tx]) + return [execution_block] + def generate_blockchain_test(self) -> BlockchainTest: """Create a BlockchainTest from this BenchmarkTest.""" return BlockchainTest.from_test( diff --git a/tests/benchmark/compute/instruction/test_account_query.py b/tests/benchmark/compute/instruction/test_account_query.py index 5d008cf9b8..fe278de97a 100644 --- a/tests/benchmark/compute/instruction/test_account_query.py +++ b/tests/benchmark/compute/instruction/test_account_query.py @@ -40,6 +40,7 @@ ) +@pytest.mark.repricing(contract_balance=0) @pytest.mark.parametrize("contract_balance", [0, 1]) def test_selfbalance( benchmark_test: BenchmarkTestFiller, @@ -54,6 +55,7 @@ def test_selfbalance( ) +@pytest.mark.repricing def test_codesize( benchmark_test: BenchmarkTestFiller, ) -> None: @@ -345,6 +347,7 @@ def test_extcodecopy_warm( benchmark_test(tx=tx) +@pytest.mark.repricing(absent_target=False) @pytest.mark.parametrize( "opcode", [ diff --git a/tests/benchmark/compute/instruction/test_arithmetic.py b/tests/benchmark/compute/instruction/test_arithmetic.py index d3e81899d0..60797c824f 100644 --- a/tests/benchmark/compute/instruction/test_arithmetic.py +++ b/tests/benchmark/compute/instruction/test_arithmetic.py @@ -36,20 +36,23 @@ @pytest.mark.parametrize( "opcode,opcode_args", [ - ( + pytest.param( Op.ADD, DEFAULT_BINOP_ARGS, + marks=pytest.mark.repricing, ), - ( + pytest.param( Op.MUL, DEFAULT_BINOP_ARGS, + marks=pytest.mark.repricing, ), - ( + pytest.param( # After every 2 SUB operations, values return to initial. Op.SUB, DEFAULT_BINOP_ARGS, + marks=pytest.mark.repricing, ), - ( + pytest.param( # This has the cycle of 2: # v[0] = a // b # v[1] = a // v[0] = a // (a // b) = b @@ -62,8 +65,9 @@ # optimized paths for division by 1 and 2 words. 0x100000000000000000000000000000033, ), + marks=pytest.mark.repricing, ), - ( + pytest.param( # This has the cycle of 2, see above. Op.DIV, ( @@ -74,7 +78,7 @@ 0x10000000000000033, ), ), - ( + pytest.param( # Same as DIV-0 # But the numerator made positive, and the divisor made negative. Op.SDIV, @@ -82,8 +86,9 @@ 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFCD, ), + marks=pytest.mark.repricing, ), - ( + pytest.param( # Same as DIV-1 # But the numerator made positive, and the divisor made negative. Op.SDIV, @@ -92,17 +97,19 @@ 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFCD, ), ), - ( + pytest.param( # Not suitable for MOD, as values quickly become zero. Op.MOD, DEFAULT_BINOP_ARGS, + marks=pytest.mark.repricing, ), - ( + pytest.param( # Not suitable for SMOD, as values quickly become zero. Op.SMOD, DEFAULT_BINOP_ARGS, + marks=pytest.mark.repricing, ), - ( + pytest.param( # This keeps the values unchanged # pow(2**256-1, 2**256-1, 2**256) == 2**256-1. Op.EXP, @@ -110,14 +117,16 @@ 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF, ), + marks=pytest.mark.repricing, ), - ( + pytest.param( # Not great, as we always sign-extend the 4 bytes. Op.SIGNEXTEND, ( 3, 0xFFDADADA, # Negative to have more work. ), + marks=pytest.mark.repricing, ), ], ids=lambda param: "" if isinstance(param, tuple) else param, @@ -149,6 +158,7 @@ def test_arithmetic( ) +@pytest.mark.repricing(mod_bits=255) @pytest.mark.parametrize("mod_bits", [255, 191, 127, 63]) @pytest.mark.parametrize("op", [Op.MOD, Op.SMOD]) def test_mod( @@ -267,6 +277,7 @@ def test_mod( ) +@pytest.mark.repricing(mod_bits=255) @pytest.mark.parametrize("mod_bits", [255, 191, 127, 63]) @pytest.mark.parametrize("op", [Op.ADDMOD, Op.MULMOD]) def test_mod_arithmetic( diff --git a/tests/benchmark/compute/instruction/test_bitwise.py b/tests/benchmark/compute/instruction/test_bitwise.py index 2cdb6f96b0..01750454ae 100644 --- a/tests/benchmark/compute/instruction/test_bitwise.py +++ b/tests/benchmark/compute/instruction/test_bitwise.py @@ -36,6 +36,7 @@ ) +@pytest.mark.repricing @pytest.mark.parametrize( "opcode,opcode_args", [ @@ -109,6 +110,7 @@ def test_bitwise( ) +@pytest.mark.repricing def test_not_op( benchmark_test: BenchmarkTestFiller, ) -> None: @@ -193,6 +195,7 @@ def select_shift_amount( benchmark_test(tx=tx) +@pytest.mark.repricing @pytest.mark.valid_from("Osaka") def test_clz_same(benchmark_test: BenchmarkTestFiller) -> None: """Benchmark CLZ instruction with same input.""" diff --git a/tests/benchmark/compute/instruction/test_block_context.py b/tests/benchmark/compute/instruction/test_block_context.py index 6c02ac1459..ebb56e4f3a 100644 --- a/tests/benchmark/compute/instruction/test_block_context.py +++ b/tests/benchmark/compute/instruction/test_block_context.py @@ -22,6 +22,7 @@ ) +@pytest.mark.repricing @pytest.mark.parametrize( "opcode", [ @@ -45,6 +46,7 @@ def test_block_context_ops( ) +@pytest.mark.repricing @pytest.mark.parametrize( "index", [ diff --git a/tests/benchmark/compute/instruction/test_call_context.py b/tests/benchmark/compute/instruction/test_call_context.py index 564a8b63cb..b4ca1bc40b 100644 --- a/tests/benchmark/compute/instruction/test_call_context.py +++ b/tests/benchmark/compute/instruction/test_call_context.py @@ -31,6 +31,7 @@ ) +@pytest.mark.repricing @pytest.mark.parametrize( "opcode", [ @@ -48,6 +49,7 @@ def test_call_frame_context_ops( ) +@pytest.mark.repricing(calldata_length=1_000) @pytest.mark.parametrize("calldata_length", [0, 1_000, 10_000]) def test_calldatasize( benchmark_test: BenchmarkTestFiller, @@ -62,6 +64,7 @@ def test_calldatasize( ) +@pytest.mark.repricing(non_zero_value=True, from_origin=True) @pytest.mark.parametrize("non_zero_value", [True, False]) @pytest.mark.parametrize("from_origin", [True, False]) def test_callvalue( @@ -101,6 +104,7 @@ def test_callvalue( benchmark_test(tx=tx) +@pytest.mark.repricing(calldata=b"") @pytest.mark.parametrize( "calldata", [ @@ -229,6 +233,10 @@ def test_calldatacopy( benchmark_test(tx=tx) +@pytest.mark.repricing( + returned_size=1, + return_data_style=ReturnDataStyle.RETURN, +) @pytest.mark.parametrize( "return_data_style", [ @@ -272,6 +280,7 @@ def test_returndatasize_nonzero( ) +@pytest.mark.repricing def test_returndatasize_zero( benchmark_test: BenchmarkTestFiller, ) -> None: @@ -281,6 +290,7 @@ def test_returndatasize_zero( ) +@pytest.mark.repricing(size=10 * 1024, fixed_dst=True) @pytest.mark.parametrize( "size", [ diff --git a/tests/benchmark/compute/instruction/test_comparison.py b/tests/benchmark/compute/instruction/test_comparison.py index 5ccf213b84..bb93444100 100644 --- a/tests/benchmark/compute/instruction/test_comparison.py +++ b/tests/benchmark/compute/instruction/test_comparison.py @@ -18,6 +18,7 @@ ) +@pytest.mark.repricing @pytest.mark.parametrize( "opcode,opcode_args", [ @@ -79,6 +80,7 @@ def test_comparison( ) +@pytest.mark.repricing def test_iszero( benchmark_test: BenchmarkTestFiller, ) -> None: diff --git a/tests/benchmark/compute/instruction/test_control_flow.py b/tests/benchmark/compute/instruction/test_control_flow.py index ac89b3196a..ce8e0e4f52 100644 --- a/tests/benchmark/compute/instruction/test_control_flow.py +++ b/tests/benchmark/compute/instruction/test_control_flow.py @@ -10,6 +10,7 @@ - JUMPDEST """ +import pytest from execution_testing import ( Alloc, BenchmarkTestFiller, @@ -23,6 +24,7 @@ # STOP, JUMP, JUMPI, PC, GAS, JUMPDEST +@pytest.mark.repricing def test_gas_op( benchmark_test: BenchmarkTestFiller, ) -> None: @@ -54,6 +56,7 @@ def test_jumps( benchmark_test(tx=tx) +@pytest.mark.repricing def test_jumpi_fallthrough( benchmark_test: BenchmarkTestFiller, ) -> None: @@ -80,6 +83,7 @@ def test_jumpis( benchmark_test(tx=tx) +@pytest.mark.repricing def test_jumpdests( benchmark_test: BenchmarkTestFiller, ) -> None: diff --git a/tests/benchmark/compute/instruction/test_keccak.py b/tests/benchmark/compute/instruction/test_keccak.py index c49897b6ac..4cefa708a4 100644 --- a/tests/benchmark/compute/instruction/test_keccak.py +++ b/tests/benchmark/compute/instruction/test_keccak.py @@ -18,6 +18,7 @@ KECCAK_RATE = 136 +@pytest.mark.repricing def test_keccak_max_permutations( benchmark_test: BenchmarkTestFiller, fork: Fork, diff --git a/tests/benchmark/compute/instruction/test_log.py b/tests/benchmark/compute/instruction/test_log.py index 29f13813ce..eaccaf5700 100644 --- a/tests/benchmark/compute/instruction/test_log.py +++ b/tests/benchmark/compute/instruction/test_log.py @@ -18,6 +18,12 @@ ) +@pytest.mark.repricing( + size=1024 * 1024, + non_zero_data=True, + zeros_topic=False, + fixed_offset=True, +) @pytest.mark.parametrize( "opcode", [ diff --git a/tests/benchmark/compute/instruction/test_memory.py b/tests/benchmark/compute/instruction/test_memory.py index a3afc5e548..e4a9decb8e 100644 --- a/tests/benchmark/compute/instruction/test_memory.py +++ b/tests/benchmark/compute/instruction/test_memory.py @@ -19,6 +19,7 @@ ) +@pytest.mark.repricing(mem_size=1_000) @pytest.mark.parametrize("mem_size", [0, 1, 1_000, 100_000, 1_000_000]) def test_msize( benchmark_test: BenchmarkTestFiller, @@ -34,6 +35,11 @@ def test_msize( ) +@pytest.mark.repricing( + offset=31, + offset_initialized=True, + big_memory_expansion=True, +) @pytest.mark.parametrize("opcode", [Op.MLOAD, Op.MSTORE, Op.MSTORE8]) @pytest.mark.parametrize("offset", [0, 1, 31]) @pytest.mark.parametrize("offset_initialized", [True, False]) @@ -67,6 +73,7 @@ def test_memory_access( ) +@pytest.mark.repricing(size=10 * 1024, fixed_src_dst=True) @pytest.mark.parametrize( "size", [ diff --git a/tests/benchmark/compute/instruction/test_stack.py b/tests/benchmark/compute/instruction/test_stack.py index 01b5588bf9..0dd197b026 100644 --- a/tests/benchmark/compute/instruction/test_stack.py +++ b/tests/benchmark/compute/instruction/test_stack.py @@ -17,6 +17,7 @@ ) +@pytest.mark.repricing @pytest.mark.parametrize( "opcode", [ @@ -50,6 +51,7 @@ def test_swap( ) +@pytest.mark.repricing @pytest.mark.parametrize( "opcode", [ @@ -85,6 +87,7 @@ def test_dup( ) +@pytest.mark.repricing @pytest.mark.parametrize( "opcode", [ diff --git a/tests/benchmark/compute/instruction/test_storage.py b/tests/benchmark/compute/instruction/test_storage.py index 0b9864a20e..9d68c52e6c 100644 --- a/tests/benchmark/compute/instruction/test_storage.py +++ b/tests/benchmark/compute/instruction/test_storage.py @@ -28,6 +28,7 @@ from tests.benchmark.compute.helpers import StorageAction, TransactionResult +@pytest.mark.repricing(fixed_key=False, fixed_value=False) @pytest.mark.parametrize("fixed_key", [True, False]) @pytest.mark.parametrize("fixed_value", [True, False]) def test_tload( @@ -59,6 +60,7 @@ def test_tload( ) +@pytest.mark.repricing(fixed_key=False, fixed_value=False) @pytest.mark.parametrize("fixed_key", [True, False]) @pytest.mark.parametrize("fixed_value", [True, False]) def test_tstore( diff --git a/tests/benchmark/compute/instruction/test_system.py b/tests/benchmark/compute/instruction/test_system.py index 0bb379dd91..e13321c864 100644 --- a/tests/benchmark/compute/instruction/test_system.py +++ b/tests/benchmark/compute/instruction/test_system.py @@ -424,6 +424,7 @@ def test_creates_collisions( ) +@pytest.mark.repricing(return_size=1024, return_non_zero_data=True) @pytest.mark.parametrize( "opcode", [Op.RETURN, Op.REVERT], diff --git a/tests/benchmark/compute/instruction/test_tx_context.py b/tests/benchmark/compute/instruction/test_tx_context.py index 68fb855df0..12cc1e2acf 100644 --- a/tests/benchmark/compute/instruction/test_tx_context.py +++ b/tests/benchmark/compute/instruction/test_tx_context.py @@ -22,6 +22,7 @@ from tests.cancun.eip4844_blobs.spec import Spec as BlobsSpec +@pytest.mark.repricing @pytest.mark.parametrize( "opcode", [ @@ -39,6 +40,7 @@ def test_call_frame_context_ops( ) +@pytest.mark.repricing(blob_index=0, blobs_present=0) @pytest.mark.parametrize( "blob_index, blobs_present", [