From 8118fff79c2411433ea5e08a1bc94160455d6ec7 Mon Sep 17 00:00:00 2001 From: zhiqiangxu <652732310@qq.com> Date: Fri, 29 Nov 2024 09:49:45 +0800 Subject: [PATCH 001/111] use CallContract to detect tx failure early if gasLimit is specified (#13117) --- op-service/txmgr/txmgr.go | 32 +++++++++++++++++++------------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/op-service/txmgr/txmgr.go b/op-service/txmgr/txmgr.go index e633a74c288..a73e84c9273 100644 --- a/op-service/txmgr/txmgr.go +++ b/op-service/txmgr/txmgr.go @@ -363,26 +363,32 @@ func (m *SimpleTxManager) craftTx(ctx context.Context, candidate TxCandidate) (* } } + // Calculate the intrinsic gas for the transaction + callMsg := ethereum.CallMsg{ + From: m.cfg.From, + To: candidate.To, + GasTipCap: gasTipCap, + GasFeeCap: gasFeeCap, + Data: candidate.TxData, + Value: candidate.Value, + } + if len(blobHashes) > 0 { + callMsg.BlobGasFeeCap = blobBaseFee + callMsg.BlobHashes = blobHashes + } // If the gas limit is set, we can use that as the gas if gasLimit == 0 { - // Calculate the intrinsic gas for the transaction - callMsg := ethereum.CallMsg{ - From: m.cfg.From, - To: candidate.To, - GasTipCap: gasTipCap, - GasFeeCap: gasFeeCap, - Data: candidate.TxData, - Value: candidate.Value, - } - if len(blobHashes) > 0 { - callMsg.BlobGasFeeCap = blobBaseFee - callMsg.BlobHashes = blobHashes - } gas, err := m.backend.EstimateGas(ctx, callMsg) if err != nil { return nil, fmt.Errorf("failed to estimate gas: %w", errutil.TryAddRevertReason(err)) } gasLimit = gas + } else { + callMsg.Gas = gasLimit + _, err := m.backend.CallContract(ctx, callMsg, nil) + if err != nil { + return nil, fmt.Errorf("failed to call: %w", errutil.TryAddRevertReason(err)) + } } var txMessage types.TxData From 9f84a4e3ac73fadafed4f61889357115632cdf8f Mon Sep 17 00:00:00 2001 From: Michael Amadi Date: Fri, 29 Nov 2024 03:36:06 +0100 Subject: [PATCH 002/111] fix mip2 test stack too deep error (#13138) --- .../contracts-bedrock/test/cannon/MIPS2.t.sol | 33 +++++++------------ 1 file changed, 12 insertions(+), 21 deletions(-) diff --git a/packages/contracts-bedrock/test/cannon/MIPS2.t.sol b/packages/contracts-bedrock/test/cannon/MIPS2.t.sol index 2cc0519ea5b..d38ab89ff55 100644 --- a/packages/contracts-bedrock/test/cannon/MIPS2.t.sol +++ b/packages/contracts-bedrock/test/cannon/MIPS2.t.sol @@ -789,8 +789,9 @@ contract MIPS2_Test is CommonTest { vm.expectRevert(InvalidMemoryProof.selector); mips.step(encodeState(state), bytes.concat(threadWitness, invalidInsnAndMemProof, memProof2), 0); - (, bytes memory invalidMemProof2) = - ffi.getCannonMemoryProof2(pc, insn, timespecAddr, secs + 1, timespecAddr + 4); + uint32 _secs = secs + 1; + uint32 _timespecAddr = timespecAddr + 4; + (, bytes memory invalidMemProof2) = ffi.getCannonMemoryProof2(pc, insn, timespecAddr, _secs, _timespecAddr); vm.expectRevert(InvalidSecondMemoryProof.selector); mips.step(encodeState(state), bytes.concat(threadWitness, insnAndMemProof, invalidMemProof2), 0); } @@ -2766,31 +2767,21 @@ contract MIPS2_Test is CommonTest { } function encodeState(IMIPS2.State memory _state) internal pure returns (bytes memory) { - // Split up encoding to get around stack-too-deep error - return abi.encodePacked(encodeStateA(_state), encodeStateB(_state)); - } - - function encodeStateA(IMIPS2.State memory _state) internal pure returns (bytes memory) { - return abi.encodePacked( + bytes memory a = abi.encodePacked( _state.memRoot, _state.preimageKey, _state.preimageOffset, _state.heap, _state.llReservationStatus, - _state.llAddress, - _state.llOwnerThread, - _state.exitCode, - _state.exited, - _state.step, - _state.stepsSinceLastContextSwitch, - _state.wakeup, - _state.traverseRight, - _state.leftThreadStack + _state.llAddress ); - } - - function encodeStateB(IMIPS2.State memory _state) internal pure returns (bytes memory) { - return abi.encodePacked(_state.rightThreadStack, _state.nextThreadID); + bytes memory b = abi.encodePacked( + _state.llOwnerThread, _state.exitCode, _state.exited, _state.step, _state.stepsSinceLastContextSwitch + ); + bytes memory c = abi.encodePacked( + _state.wakeup, _state.traverseRight, _state.leftThreadStack, _state.rightThreadStack, _state.nextThreadID + ); + return abi.encodePacked(a, b, c); } function copyState(IMIPS2.State memory _state) internal pure returns (IMIPS2.State memory out_) { From b2f662873a97eb6748b62295ce35700f59a3db19 Mon Sep 17 00:00:00 2001 From: Michael Amadi Date: Sat, 30 Nov 2024 16:51:41 +0100 Subject: [PATCH 003/111] make safecall test support forge coverage (#13147) --- .../test/libraries/SafeCall.t.sol | 53 ++++++++++++++++--- 1 file changed, 47 insertions(+), 6 deletions(-) diff --git a/packages/contracts-bedrock/test/libraries/SafeCall.t.sol b/packages/contracts-bedrock/test/libraries/SafeCall.t.sol index ffdf07b09b8..ef1f8876efb 100644 --- a/packages/contracts-bedrock/test/libraries/SafeCall.t.sol +++ b/packages/contracts-bedrock/test/libraries/SafeCall.t.sol @@ -3,6 +3,7 @@ pragma solidity 0.8.15; // Testing utilities import { Test } from "forge-std/Test.sol"; +import { VmSafe } from "forge-std/Vm.sol"; import { StdCheatsSafe } from "forge-std/StdCheats.sol"; // Target contract @@ -122,9 +123,29 @@ contract SafeCall_Test is Test { for (uint64 i = 40_000; i < 100_000; i++) { uint256 snapshot = vm.snapshot(); - // 65_922 is the exact amount of gas required to make the safe call - // successfully. - if (i < 65_922) { + // The values below are best gotten by setting the value to a high number and running the test with a + // verbosity of `-vvv` then setting the value to the value (gas arg) of the failed assertion. + // A faster way to do this for forge coverage cases, is to comment out the optimizer and optimizer runs in + // the foundry.toml file and then run forge test. This is faster because forge test only compiles modified + // contracts unlike forge coverage. + uint256 expected; + + // Because forge coverage always runs with the optimizer disabled, + // if forge coverage is run before testing this with forge test or forge snapshot, forge clean should be + // run first so that it recompiles the contracts using the foundry.toml optimizer settings. + if (vm.isContext(VmSafe.ForgeContext.Coverage)) { + // 66_290 is the exact amount of gas required to make the safe call + // successfully with the optimizer disabled (ran via forge coverage) + expected = 66_290; + } else if (vm.isContext(VmSafe.ForgeContext.Test) || vm.isContext(VmSafe.ForgeContext.Snapshot)) { + // 65_922 is the exact amount of gas required to make the safe call + // successfully with the foundry.toml optimizer settings. + expected = 65_922; + } else { + revert("SafeCall_Test: unknown context"); + } + + if (i < expected) { assertFalse(caller.makeSafeCall(i, 25_000)); } else { vm.expectCallMinGas(address(caller), 0, 25_000, abi.encodeCall(caller.setA, (1))); @@ -142,9 +163,29 @@ contract SafeCall_Test is Test { for (uint64 i = 15_200_000; i < 15_300_000; i++) { uint256 snapshot = vm.snapshot(); - // 15_278_621 is the exact amount of gas required to make the safe call - // successfully. - if (i < 15_278_621) { + // The values below are best gotten by setting the value to a high number and running the test with a + // verbosity of `-vvv` then setting the value to the value (gas arg) of the failed assertion. + // A faster way to do this for forge coverage cases, is to comment out the optimizer and optimizer runs in + // the foundry.toml file and then run forge test. This is faster because forge test only compiles modified + // contracts unlike forge coverage. + uint256 expected; + + // Because forge coverage always runs with the optimizer disabled, + // if forge coverage is run before testing this with forge test or forge snapshot, forge clean should be + // run first so that it recompiles the contracts using the foundry.toml optimizer settings. + if (vm.isContext(VmSafe.ForgeContext.Coverage)) { + // 15_278_989 is the exact amount of gas required to make the safe call + // successfully with the optimizer disabled (ran via forge coverage) + expected = 15_278_989; + } else if (vm.isContext(VmSafe.ForgeContext.Test) || vm.isContext(VmSafe.ForgeContext.Snapshot)) { + // 15_278_621 is the exact amount of gas required to make the safe call + // successfully with the foundry.toml optimizer settings. + expected = 15_278_621; + } else { + revert("SafeCall_Test: unknown context"); + } + + if (i < expected) { assertFalse(caller.makeSafeCall(i, 15_000_000)); } else { vm.expectCallMinGas(address(caller), 0, 15_000_000, abi.encodeCall(caller.setA, (1))); From 4c0387b5bdc1cecfc4530a2692f1bd8542d3a652 Mon Sep 17 00:00:00 2001 From: smartcontracts Date: Sun, 1 Dec 2024 20:00:35 -0500 Subject: [PATCH 004/111] feat: allow arguments to contracts test recipe (#13152) Adds the ability to supply arguments to the "just test" recipe in the contracts package. Developers frequently need to add arguments to test specific contracts and running "just test" as the unified testing command is better than flipping to "forge test". --- packages/contracts-bedrock/justfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/contracts-bedrock/justfile b/packages/contracts-bedrock/justfile index fe6f4c0834c..b82717c4a26 100644 --- a/packages/contracts-bedrock/justfile +++ b/packages/contracts-bedrock/justfile @@ -54,8 +54,8 @@ clean: ######################################################## # Runs standard contract tests. -test: build-go-ffi - forge test +test *ARGS: build-go-ffi + forge test {{ARGS}} # Runs standard contract tests with rerun flag. test-rerun: build-go-ffi From b6c28d5dc5b48b1df0e5ba003704c3e700fb8eb7 Mon Sep 17 00:00:00 2001 From: zhiqiangxu <652732310@qq.com> Date: Mon, 2 Dec 2024 17:51:18 +0800 Subject: [PATCH 005/111] estimate gas correctly for blob tx in increaseGasPrice (#13116) --- op-service/txmgr/txmgr.go | 29 ++++++++++++++++++----------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/op-service/txmgr/txmgr.go b/op-service/txmgr/txmgr.go index a73e84c9273..353951af633 100644 --- a/op-service/txmgr/txmgr.go +++ b/op-service/txmgr/txmgr.go @@ -803,14 +803,30 @@ func (m *SimpleTxManager) increaseGasPrice(ctx context.Context, tx *types.Transa } // Re-estimate gaslimit in case things have changed or a previous gaslimit estimate was wrong - gas, err := m.backend.EstimateGas(ctx, ethereum.CallMsg{ + callMsg := ethereum.CallMsg{ From: m.cfg.From, To: tx.To(), GasTipCap: bumpedTip, GasFeeCap: bumpedFee, Data: tx.Data(), Value: tx.Value(), - }) + } + var bumpedBlobFee *big.Int + if tx.Type() == types.BlobTxType { + // Blob transactions have an additional blob gas price we must specify, so we must make sure it is + // getting bumped appropriately. + bumpedBlobFee = calcThresholdValue(tx.BlobGasFeeCap(), true) + if bumpedBlobFee.Cmp(blobBaseFee) < 0 { + bumpedBlobFee = blobBaseFee + } + if err := m.checkBlobFeeLimits(blobBaseFee, bumpedBlobFee); err != nil { + return nil, err + } + + callMsg.BlobGasFeeCap = bumpedBlobFee + callMsg.BlobHashes = tx.BlobHashes() + } + gas, err := m.backend.EstimateGas(ctx, callMsg) if err != nil { // If this is a transaction resubmission, we sometimes see this outcome because the // original tx can get included in a block just before the above call. In this case the @@ -836,15 +852,6 @@ func (m *SimpleTxManager) increaseGasPrice(ctx context.Context, tx *types.Transa var newTx *types.Transaction if tx.Type() == types.BlobTxType { - // Blob transactions have an additional blob gas price we must specify, so we must make sure it is - // getting bumped appropriately. - bumpedBlobFee := calcThresholdValue(tx.BlobGasFeeCap(), true) - if bumpedBlobFee.Cmp(blobBaseFee) < 0 { - bumpedBlobFee = blobBaseFee - } - if err := m.checkBlobFeeLimits(blobBaseFee, bumpedBlobFee); err != nil { - return nil, err - } message := &types.BlobTx{ Nonce: tx.Nonce(), To: *tx.To(), From 4ee839ae8996c2d421a2d85fd5471897840014fa Mon Sep 17 00:00:00 2001 From: Matthew Slipper Date: Mon, 2 Dec 2024 03:37:28 -0700 Subject: [PATCH 006/111] op-batcher: Fix test flake due to log (#13145) There was one other spot where the batcher would log after tests exit, causing a panic. --- op-batcher/batcher/driver.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/op-batcher/batcher/driver.go b/op-batcher/batcher/driver.go index 58e533becff..d52a31bba60 100644 --- a/op-batcher/batcher/driver.go +++ b/op-batcher/batcher/driver.go @@ -779,12 +779,14 @@ func (l *BatchSubmitter) publishToAltDAAndL1(txdata txData, queue *txmgr.Queue[t if err != nil { // Don't log context cancelled events because they are expected, // and can happen after tests complete which causes a panic. - if !errors.Is(err, context.Canceled) { + if errors.Is(err, context.Canceled) { + l.recordFailedDARequest(txdata.ID(), nil) + } else { l.Log.Error("Failed to post input to Alt DA", "error", err) + // requeue frame if we fail to post to the DA Provider so it can be retried + // note: this assumes that the da server caches requests, otherwise it might lead to resubmissions of the blobs + l.recordFailedDARequest(txdata.ID(), err) } - // requeue frame if we fail to post to the DA Provider so it can be retried - // note: this assumes that the da server caches requests, otherwise it might lead to resubmissions of the blobs - l.recordFailedDARequest(txdata.ID(), err) return nil } l.Log.Info("Set altda input", "commitment", comm, "tx", txdata.ID()) From d1a2198ba195e3b7b32e228c8ad4ee953294126d Mon Sep 17 00:00:00 2001 From: Skylar Ray <137945430+sky-coderay@users.noreply.github.com> Date: Mon, 2 Dec 2024 17:37:33 +0200 Subject: [PATCH 007/111] Fix typos in `op-conductor/README.md` documentation (#13128) * Update README.md * Update README.md * Update README.md --- op-conductor/README.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/op-conductor/README.md b/op-conductor/README.md index 497156b8eee..c436d0248ac 100644 --- a/op-conductor/README.md +++ b/op-conductor/README.md @@ -1,3 +1,4 @@ + # op-conductor op-conductor is an auxiliary service designed to enhance the reliability and availability of a sequencer in @@ -20,7 +21,7 @@ For configuration and runbook, please refer to [RUNBOOK.md](./RUNBOOK.md) ### Architecture Typically you can setup a 3 nodes sequencer cluster, each one with op-conductor running alongside the sequencer in different regions / AZs. -Below diagram showcaes how conductor interacts with relevant op-stack components. +Below diagram showcases how conductor interacts with relevant op-stack components. ![op-conductor setup](./assets/setup.svg) @@ -93,6 +94,6 @@ There are 2 situations we need to consider. 1. Leadership transfer triggered by raft consensus protocol (network partition, etc) 1. In this case, a new leader will be elected regardless of its sync status, it could be behind for a few blocks - 2. The solution is to simple, wait until the elected leader catch up to tip (same as the FSM tip) + 2. The solution is to simply wait until the elected leader catches up to tip (same as the FSM tip) 2. Leadership transfer triggered by us (Conductor detected unhealthy sequencer) 1. In this case, we have the choice to determine which node to transfer leadership to, we can simply query the latest block from candidates within the network and transfer directly to the one with the most up to date blocks. From cdae7f2b6a47e226bb5ec7506d3f3f3d9c367814 Mon Sep 17 00:00:00 2001 From: smartcontracts Date: Mon, 2 Dec 2024 11:11:13 -0500 Subject: [PATCH 008/111] fix: stack too deep in PermissionedDisputeGame (#13141) Fixes the stack-too-deep error in the PermissionedDisputeGame by updating the game to use a struct as the constructor parameter. --- .../scripts/deploy/Deploy.s.sol | 81 ++++------- .../scripts/deploy/DeployDisputeGame.s.sol | 48 +------ .../upgrades/holocene/DeployUpgrade.s.sol | 44 +++--- .../snapshots/abi/FaultDisputeGame.json | 103 +++++++------- .../abi/PermissionedDisputeGame.json | 103 +++++++------- .../snapshots/semver-lock.json | 4 +- .../src/dispute/FaultDisputeGame.sol | 80 ++++++----- .../src/dispute/PermissionedDisputeGame.sol | 42 +----- .../dispute/interfaces/IFaultDisputeGame.sol | 27 ++-- .../interfaces/IPermissionedDisputeGame.sol | 14 +- .../test/dispute/FaultDisputeGame.t.sol | 132 ++++++++++-------- .../dispute/PermissionedDisputeGame.t.sol | 23 +-- 12 files changed, 317 insertions(+), 384 deletions(-) diff --git a/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol b/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol index 9e688f29ae6..56e6b6feba3 100644 --- a/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol @@ -64,20 +64,6 @@ import { IPreimageOracle } from "src/cannon/interfaces/IPreimageOracle.sol"; contract Deploy is Deployer { using stdJson for string; - /// @notice FaultDisputeGameParams is a struct that contains the parameters necessary to call - /// the function _setFaultGameImplementation. This struct exists because the EVM needs - /// to finally adopt PUSHN and get rid of stack too deep once and for all. - /// Someday we will look back and laugh about stack too deep, today is not that day. - struct FaultDisputeGameParams { - IAnchorStateRegistry anchorStateRegistry; - IDelayedWETH weth; - GameType gameType; - Claim absolutePrestate; - IBigStepper faultVm; - uint256 maxGameDepth; - Duration maxClockDuration; - } - //////////////////////////////////////////////////////////////// // Modifiers // //////////////////////////////////////////////////////////////// @@ -871,14 +857,17 @@ contract Deploy is Deployer { // Set the Cannon FaultDisputeGame implementation in the factory. _setFaultGameImplementation({ _factory: factory, - _params: FaultDisputeGameParams({ - anchorStateRegistry: IAnchorStateRegistry(mustGetAddress("AnchorStateRegistryProxy")), - weth: weth, + _params: IFaultDisputeGame.GameConstructorParams({ gameType: GameTypes.CANNON, absolutePrestate: loadMipsAbsolutePrestate(), - faultVm: IBigStepper(mustGetAddress("Mips")), maxGameDepth: cfg.faultGameMaxDepth(), - maxClockDuration: Duration.wrap(uint64(cfg.faultGameMaxClockDuration())) + splitDepth: cfg.faultGameSplitDepth(), + clockExtension: Duration.wrap(uint64(cfg.faultGameClockExtension())), + maxClockDuration: Duration.wrap(uint64(cfg.faultGameMaxClockDuration())), + vm: IBigStepper(mustGetAddress("Mips")), + weth: weth, + anchorStateRegistry: IAnchorStateRegistry(mustGetAddress("AnchorStateRegistryProxy")), + l2ChainId: cfg.l2ChainID() }) }); } @@ -892,15 +881,18 @@ contract Deploy is Deployer { Claim outputAbsolutePrestate = Claim.wrap(bytes32(cfg.faultGameAbsolutePrestate())); _setFaultGameImplementation({ _factory: factory, - _params: FaultDisputeGameParams({ - anchorStateRegistry: IAnchorStateRegistry(mustGetAddress("AnchorStateRegistryProxy")), - weth: weth, + _params: IFaultDisputeGame.GameConstructorParams({ gameType: GameTypes.ALPHABET, absolutePrestate: outputAbsolutePrestate, - faultVm: IBigStepper(new AlphabetVM(outputAbsolutePrestate, IPreimageOracle(mustGetAddress("PreimageOracle")))), // The max depth for the alphabet trace is always 3. Add 1 because split depth is fully inclusive. maxGameDepth: cfg.faultGameSplitDepth() + 3 + 1, - maxClockDuration: Duration.wrap(uint64(cfg.faultGameMaxClockDuration())) + splitDepth: cfg.faultGameSplitDepth(), + clockExtension: Duration.wrap(uint64(cfg.faultGameClockExtension())), + maxClockDuration: Duration.wrap(uint64(cfg.faultGameMaxClockDuration())), + vm: IBigStepper(new AlphabetVM(outputAbsolutePrestate, IPreimageOracle(mustGetAddress("PreimageOracle")))), + weth: weth, + anchorStateRegistry: IAnchorStateRegistry(mustGetAddress("AnchorStateRegistryProxy")), + l2ChainId: cfg.l2ChainID() }) }); } @@ -925,23 +917,26 @@ contract Deploy is Deployer { ); _setFaultGameImplementation({ _factory: factory, - _params: FaultDisputeGameParams({ - anchorStateRegistry: IAnchorStateRegistry(mustGetAddress("AnchorStateRegistryProxy")), - weth: weth, + _params: IFaultDisputeGame.GameConstructorParams({ gameType: GameTypes.FAST, absolutePrestate: outputAbsolutePrestate, - faultVm: IBigStepper(new AlphabetVM(outputAbsolutePrestate, fastOracle)), // The max depth for the alphabet trace is always 3. Add 1 because split depth is fully inclusive. maxGameDepth: cfg.faultGameSplitDepth() + 3 + 1, - maxClockDuration: Duration.wrap(0) // Resolvable immediately - }) + splitDepth: cfg.faultGameSplitDepth(), + clockExtension: Duration.wrap(uint64(cfg.faultGameClockExtension())), + maxClockDuration: Duration.wrap(0), // Resolvable immediately + vm: IBigStepper(new AlphabetVM(outputAbsolutePrestate, fastOracle)), + weth: weth, + anchorStateRegistry: IAnchorStateRegistry(mustGetAddress("AnchorStateRegistryProxy")), + l2ChainId: cfg.l2ChainID() + }) }); } /// @notice Sets the implementation for the given fault game type in the `DisputeGameFactory`. function _setFaultGameImplementation( IDisputeGameFactory _factory, - FaultDisputeGameParams memory _params + IFaultDisputeGame.GameConstructorParams memory _params ) internal { @@ -954,37 +949,19 @@ contract Deploy is Deployer { } uint32 rawGameType = GameType.unwrap(_params.gameType); - - // Redefine _param variable to avoid stack too deep error during compilation - FaultDisputeGameParams memory _params_ = _params; require( rawGameType != GameTypes.PERMISSIONED_CANNON.raw(), "Deploy: Permissioned Game should be deployed by OPCM" ); + _factory.setImplementation( - _params_.gameType, + _params.gameType, IDisputeGame( DeployUtils.create2AndSave({ _save: this, _salt: _implSalt(), _name: "FaultDisputeGame", _nick: string.concat("FaultDisputeGame_", vm.toString(rawGameType)), - _args: DeployUtils.encodeConstructor( - abi.encodeCall( - IFaultDisputeGame.__constructor__, - ( - _params_.gameType, - _params_.absolutePrestate, - _params_.maxGameDepth, - cfg.faultGameSplitDepth(), - Duration.wrap(uint64(cfg.faultGameClockExtension())), - _params_.maxClockDuration, - _params_.faultVm, - _params_.weth, - IAnchorStateRegistry(mustGetAddress("AnchorStateRegistryProxy")), - cfg.l2ChainID() - ) - ) - ) + _args: DeployUtils.encodeConstructor(abi.encodeCall(IFaultDisputeGame.__constructor__, (_params))) }) ) ); diff --git a/packages/contracts-bedrock/scripts/deploy/DeployDisputeGame.s.sol b/packages/contracts-bedrock/scripts/deploy/DeployDisputeGame.s.sol index 153e4b65fdb..67893d2cb13 100644 --- a/packages/contracts-bedrock/scripts/deploy/DeployDisputeGame.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/DeployDisputeGame.s.sol @@ -270,19 +270,17 @@ contract DeployDisputeGame is Script { function deployDisputeGameImpl(DeployDisputeGameInput _dgi, DeployDisputeGameOutput _dgo) internal { // Shove the arguments into a struct to avoid stack-too-deep errors. - DisputeGameConstructorArgs memory args = DisputeGameConstructorArgs({ + IFaultDisputeGame.GameConstructorParams memory args = IFaultDisputeGame.GameConstructorParams({ gameType: GameType.wrap(uint32(_dgi.gameType())), absolutePrestate: Claim.wrap(_dgi.absolutePrestate()), maxGameDepth: _dgi.maxGameDepth(), splitDepth: _dgi.splitDepth(), clockExtension: Duration.wrap(uint64(_dgi.clockExtension())), maxClockDuration: Duration.wrap(uint64(_dgi.maxClockDuration())), - gameVm: IBigStepper(address(_dgi.vmAddress())), - delayedWethProxy: _dgi.delayedWethProxy(), - anchorStateRegistryProxy: _dgi.anchorStateRegistryProxy(), - l2ChainId: _dgi.l2ChainId(), - proposer: _dgi.proposer(), - challenger: _dgi.challenger() + vm: IBigStepper(address(_dgi.vmAddress())), + weth: _dgi.delayedWethProxy(), + anchorStateRegistry: _dgi.anchorStateRegistryProxy(), + l2ChainId: _dgi.l2ChainId() }); // PermissionedDisputeGame is used as the type here because it is a superset of @@ -294,23 +292,7 @@ contract DeployDisputeGame is Script { impl = IPermissionedDisputeGame( DeployUtils.create1({ _name: "FaultDisputeGame", - _args: DeployUtils.encodeConstructor( - abi.encodeCall( - IFaultDisputeGame.__constructor__, - ( - args.gameType, - args.absolutePrestate, - args.maxGameDepth, - args.splitDepth, - args.clockExtension, - args.maxClockDuration, - args.gameVm, - args.delayedWethProxy, - args.anchorStateRegistryProxy, - args.l2ChainId - ) - ) - ) + _args: DeployUtils.encodeConstructor(abi.encodeCall(IFaultDisputeGame.__constructor__, (args))) }) ); } else { @@ -318,23 +300,7 @@ contract DeployDisputeGame is Script { DeployUtils.create1({ _name: "PermissionedDisputeGame", _args: DeployUtils.encodeConstructor( - abi.encodeCall( - IPermissionedDisputeGame.__constructor__, - ( - args.gameType, - args.absolutePrestate, - args.maxGameDepth, - args.splitDepth, - args.clockExtension, - args.maxClockDuration, - args.gameVm, - args.delayedWethProxy, - args.anchorStateRegistryProxy, - args.l2ChainId, - args.proposer, - args.challenger - ) - ) + abi.encodeCall(IPermissionedDisputeGame.__constructor__, (args, _dgi.proposer(), _dgi.challenger())) ) }) ); diff --git a/packages/contracts-bedrock/scripts/upgrades/holocene/DeployUpgrade.s.sol b/packages/contracts-bedrock/scripts/upgrades/holocene/DeployUpgrade.s.sol index 8203e42b060..680c4bea127 100644 --- a/packages/contracts-bedrock/scripts/upgrades/holocene/DeployUpgrade.s.sol +++ b/packages/contracts-bedrock/scripts/upgrades/holocene/DeployUpgrade.s.sol @@ -138,16 +138,18 @@ contract DeployUpgrade is Deployer { bytes memory constructorInput = abi.encodeCall( IFaultDisputeGame.__constructor__, ( - GameTypes.CANNON, - Claim.wrap(bytes32(cfg.faultGameAbsolutePrestate())), - cfg.faultGameMaxDepth(), - cfg.faultGameSplitDepth(), - Duration.wrap(uint64(cfg.faultGameClockExtension())), - Duration.wrap(uint64(cfg.faultGameMaxClockDuration())), - IBigStepper(mustGetAddress("MIPS")), - IDelayedWETH(payable(mustGetAddress("DelayedWETHProxyFDG"))), - IAnchorStateRegistry(mustGetAddress("AnchorStateRegistry")), - cfg.l2ChainID() + IFaultDisputeGame.GameConstructorParams({ + gameType: GameTypes.CANNON, + absolutePrestate: Claim.wrap(bytes32(cfg.faultGameAbsolutePrestate())), + maxGameDepth: cfg.faultGameMaxDepth(), + splitDepth: cfg.faultGameSplitDepth(), + clockExtension: Duration.wrap(uint64(cfg.faultGameClockExtension())), + maxClockDuration: Duration.wrap(uint64(cfg.faultGameMaxClockDuration())), + vm: IBigStepper(mustGetAddress("MIPS")), + weth: IDelayedWETH(payable(mustGetAddress("DelayedWETHProxyFDG"))), + anchorStateRegistry: IAnchorStateRegistry(mustGetAddress("AnchorStateRegistry")), + l2ChainId: cfg.l2ChainID() + }) ) ); @@ -197,16 +199,18 @@ contract DeployUpgrade is Deployer { bytes memory constructorInput = abi.encodeCall( IPermissionedDisputeGame.__constructor__, ( - GameTypes.PERMISSIONED_CANNON, - Claim.wrap(bytes32(cfg.faultGameAbsolutePrestate())), - cfg.faultGameMaxDepth(), - cfg.faultGameSplitDepth(), - Duration.wrap(uint64(cfg.faultGameClockExtension())), - Duration.wrap(uint64(cfg.faultGameMaxClockDuration())), - IBigStepper(mustGetAddress("MIPS")), - IDelayedWETH(payable(mustGetAddress("DelayedWETHProxyPDG"))), - IAnchorStateRegistry(mustGetAddress("AnchorStateRegistry")), - cfg.l2ChainID(), + IFaultDisputeGame.GameConstructorParams({ + gameType: GameTypes.PERMISSIONED_CANNON, + absolutePrestate: Claim.wrap(bytes32(cfg.faultGameAbsolutePrestate())), + maxGameDepth: cfg.faultGameMaxDepth(), + splitDepth: cfg.faultGameSplitDepth(), + clockExtension: Duration.wrap(uint64(cfg.faultGameClockExtension())), + maxClockDuration: Duration.wrap(uint64(cfg.faultGameMaxClockDuration())), + vm: IBigStepper(mustGetAddress("MIPS")), + weth: IDelayedWETH(payable(mustGetAddress("DelayedWETHProxyPDG"))), + anchorStateRegistry: IAnchorStateRegistry(mustGetAddress("AnchorStateRegistry")), + l2ChainId: cfg.l2ChainID() + }), cfg.l2OutputOracleProposer(), cfg.l2OutputOracleChallenger() ) diff --git a/packages/contracts-bedrock/snapshots/abi/FaultDisputeGame.json b/packages/contracts-bedrock/snapshots/abi/FaultDisputeGame.json index e1e59c38701..a2f02cce13b 100644 --- a/packages/contracts-bedrock/snapshots/abi/FaultDisputeGame.json +++ b/packages/contracts-bedrock/snapshots/abi/FaultDisputeGame.json @@ -2,54 +2,61 @@ { "inputs": [ { - "internalType": "GameType", - "name": "_gameType", - "type": "uint32" - }, - { - "internalType": "Claim", - "name": "_absolutePrestate", - "type": "bytes32" - }, - { - "internalType": "uint256", - "name": "_maxGameDepth", - "type": "uint256" - }, - { - "internalType": "uint256", - "name": "_splitDepth", - "type": "uint256" - }, - { - "internalType": "Duration", - "name": "_clockExtension", - "type": "uint64" - }, - { - "internalType": "Duration", - "name": "_maxClockDuration", - "type": "uint64" - }, - { - "internalType": "contract IBigStepper", - "name": "_vm", - "type": "address" - }, - { - "internalType": "contract IDelayedWETH", - "name": "_weth", - "type": "address" - }, - { - "internalType": "contract IAnchorStateRegistry", - "name": "_anchorStateRegistry", - "type": "address" - }, - { - "internalType": "uint256", - "name": "_l2ChainId", - "type": "uint256" + "components": [ + { + "internalType": "GameType", + "name": "gameType", + "type": "uint32" + }, + { + "internalType": "Claim", + "name": "absolutePrestate", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "maxGameDepth", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "splitDepth", + "type": "uint256" + }, + { + "internalType": "Duration", + "name": "clockExtension", + "type": "uint64" + }, + { + "internalType": "Duration", + "name": "maxClockDuration", + "type": "uint64" + }, + { + "internalType": "contract IBigStepper", + "name": "vm", + "type": "address" + }, + { + "internalType": "contract IDelayedWETH", + "name": "weth", + "type": "address" + }, + { + "internalType": "contract IAnchorStateRegistry", + "name": "anchorStateRegistry", + "type": "address" + }, + { + "internalType": "uint256", + "name": "l2ChainId", + "type": "uint256" + } + ], + "internalType": "struct FaultDisputeGame.GameConstructorParams", + "name": "_params", + "type": "tuple" } ], "stateMutability": "nonpayable", diff --git a/packages/contracts-bedrock/snapshots/abi/PermissionedDisputeGame.json b/packages/contracts-bedrock/snapshots/abi/PermissionedDisputeGame.json index fd9737cc584..eebc4adf16e 100644 --- a/packages/contracts-bedrock/snapshots/abi/PermissionedDisputeGame.json +++ b/packages/contracts-bedrock/snapshots/abi/PermissionedDisputeGame.json @@ -2,54 +2,61 @@ { "inputs": [ { - "internalType": "GameType", - "name": "_gameType", - "type": "uint32" - }, - { - "internalType": "Claim", - "name": "_absolutePrestate", - "type": "bytes32" - }, - { - "internalType": "uint256", - "name": "_maxGameDepth", - "type": "uint256" - }, - { - "internalType": "uint256", - "name": "_splitDepth", - "type": "uint256" - }, - { - "internalType": "Duration", - "name": "_clockExtension", - "type": "uint64" - }, - { - "internalType": "Duration", - "name": "_maxClockDuration", - "type": "uint64" - }, - { - "internalType": "contract IBigStepper", - "name": "_vm", - "type": "address" - }, - { - "internalType": "contract IDelayedWETH", - "name": "_weth", - "type": "address" - }, - { - "internalType": "contract IAnchorStateRegistry", - "name": "_anchorStateRegistry", - "type": "address" - }, - { - "internalType": "uint256", - "name": "_l2ChainId", - "type": "uint256" + "components": [ + { + "internalType": "GameType", + "name": "gameType", + "type": "uint32" + }, + { + "internalType": "Claim", + "name": "absolutePrestate", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "maxGameDepth", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "splitDepth", + "type": "uint256" + }, + { + "internalType": "Duration", + "name": "clockExtension", + "type": "uint64" + }, + { + "internalType": "Duration", + "name": "maxClockDuration", + "type": "uint64" + }, + { + "internalType": "contract IBigStepper", + "name": "vm", + "type": "address" + }, + { + "internalType": "contract IDelayedWETH", + "name": "weth", + "type": "address" + }, + { + "internalType": "contract IAnchorStateRegistry", + "name": "anchorStateRegistry", + "type": "address" + }, + { + "internalType": "uint256", + "name": "l2ChainId", + "type": "uint256" + } + ], + "internalType": "struct FaultDisputeGame.GameConstructorParams", + "name": "_params", + "type": "tuple" }, { "internalType": "address", diff --git a/packages/contracts-bedrock/snapshots/semver-lock.json b/packages/contracts-bedrock/snapshots/semver-lock.json index 8bd5ea33f77..32de3eeb7f3 100644 --- a/packages/contracts-bedrock/snapshots/semver-lock.json +++ b/packages/contracts-bedrock/snapshots/semver-lock.json @@ -164,8 +164,8 @@ "sourceCodeHash": "0x9cb0851b6e471461f2bb369bd72eef4cffe8a0d1345546608a2aa6795540211d" }, "src/dispute/FaultDisputeGame.sol": { - "initCodeHash": "0xa352179f5055232764aac6b66a3ff5a6b3bfae2101d20c077f714b0ed7e40eef", - "sourceCodeHash": "0x730eff9147294c115a0a53e7e75771bcc4a517beb48457140ab929a8d1510893" + "initCodeHash": "0x7441e418d3b4229f519c8c027f3fd7a5487206b833110b794cca104a1a2c73fe", + "sourceCodeHash": "0x8f4bf662fe8d56e9aabaa7742033880f0900cd6221c7711c1dbefe985a841104" }, "src/legacy/DeployerWhitelist.sol": { "initCodeHash": "0xf232863fde5cd65368bcb4a79b41b5a4a09c59ede5070f82fd3f13f681bea7d8", diff --git a/packages/contracts-bedrock/src/dispute/FaultDisputeGame.sol b/packages/contracts-bedrock/src/dispute/FaultDisputeGame.sol index 2bd5ec67e96..6aad60e4283 100644 --- a/packages/contracts-bedrock/src/dispute/FaultDisputeGame.sol +++ b/packages/contracts-bedrock/src/dispute/FaultDisputeGame.sol @@ -86,6 +86,21 @@ contract FaultDisputeGame is Clone, ISemver { address counteredBy; } + /// @notice Parameters for creating a new FaultDisputeGame. We place this into a struct to + /// avoid stack-too-deep errors when compiling without the optimizer enabled. + struct GameConstructorParams { + GameType gameType; + Claim absolutePrestate; + uint256 maxGameDepth; + uint256 splitDepth; + Duration clockExtension; + Duration maxClockDuration; + IBigStepper vm; + IDelayedWETH weth; + IAnchorStateRegistry anchorStateRegistry; + uint256 l2ChainId; + } + //////////////////////////////////////////////////////////////// // Events // //////////////////////////////////////////////////////////////// @@ -146,8 +161,8 @@ contract FaultDisputeGame is Clone, ISemver { uint256 internal constant HEADER_BLOCK_NUMBER_INDEX = 8; /// @notice Semantic version. - /// @custom:semver 1.3.1-beta.7 - string public constant version = "1.3.1-beta.7"; + /// @custom:semver 1.3.1-beta.8 + string public constant version = "1.3.1-beta.8"; /// @notice The starting timestamp of the game Timestamp public createdAt; @@ -189,69 +204,52 @@ contract FaultDisputeGame is Clone, ISemver { /// @notice The latest finalized output root, serving as the anchor for output bisection. OutputRoot public startingOutputRoot; - /// @param _gameType The type ID of the game. - /// @param _absolutePrestate The absolute prestate of the instruction trace. - /// @param _maxGameDepth The maximum depth of bisection. - /// @param _splitDepth The final depth of the output bisection portion of the game. - /// @param _clockExtension The clock extension to perform when the remaining duration is less than the extension. - /// @param _maxClockDuration The maximum amount of time that may accumulate on a team's chess clock. - /// @param _vm An onchain VM that performs single instruction steps on an FPP trace. - /// @param _weth WETH contract for holding ETH. - /// @param _anchorStateRegistry The contract that stores the anchor state for each game type. - /// @param _l2ChainId Chain ID of the L2 network this contract argues about. - constructor( - GameType _gameType, - Claim _absolutePrestate, - uint256 _maxGameDepth, - uint256 _splitDepth, - Duration _clockExtension, - Duration _maxClockDuration, - IBigStepper _vm, - IDelayedWETH _weth, - IAnchorStateRegistry _anchorStateRegistry, - uint256 _l2ChainId - ) { + /// @param _params Parameters for creating a new FaultDisputeGame. + constructor(GameConstructorParams memory _params) { // The max game depth may not be greater than `LibPosition.MAX_POSITION_BITLEN - 1`. - if (_maxGameDepth > LibPosition.MAX_POSITION_BITLEN - 1) revert MaxDepthTooLarge(); + if (_params.maxGameDepth > LibPosition.MAX_POSITION_BITLEN - 1) revert MaxDepthTooLarge(); // The split depth plus one cannot be greater than or equal to the max game depth. We add // an additional depth to the split depth to avoid a bug in trace ancestor lookup. We know // that the case where the split depth is the max value for uint256 is equivalent to the // second check though we do need to check it explicitly to avoid an overflow. - if (_splitDepth == type(uint256).max || _splitDepth + 1 >= _maxGameDepth) revert InvalidSplitDepth(); + if (_params.splitDepth == type(uint256).max || _params.splitDepth + 1 >= _params.maxGameDepth) { + revert InvalidSplitDepth(); + } // The split depth cannot be 0 or 1 to stay in bounds of clock extension arithmetic. - if (_splitDepth < 2) revert InvalidSplitDepth(); + if (_params.splitDepth < 2) revert InvalidSplitDepth(); // The PreimageOracle challenge period must fit into uint64 so we can safely use it here. // Runtime check was added instead of changing the ABI since the contract is already // deployed in production. We perform the same check within the PreimageOracle for the // benefit of developers but also perform this check here defensively. - if (_vm.oracle().challengePeriod() > type(uint64).max) revert InvalidChallengePeriod(); + if (_params.vm.oracle().challengePeriod() > type(uint64).max) revert InvalidChallengePeriod(); // Determine the maximum clock extension which is either the split depth extension or the // maximum game depth extension depending on the configuration of these contracts. - uint256 splitDepthExtension = uint256(_clockExtension.raw()) * 2; - uint256 maxGameDepthExtension = uint256(_clockExtension.raw()) + uint256(_vm.oracle().challengePeriod()); + uint256 splitDepthExtension = uint256(_params.clockExtension.raw()) * 2; + uint256 maxGameDepthExtension = + uint256(_params.clockExtension.raw()) + uint256(_params.vm.oracle().challengePeriod()); uint256 maxClockExtension = Math.max(splitDepthExtension, maxGameDepthExtension); // The maximum clock extension must fit into a uint64. if (maxClockExtension > type(uint64).max) revert InvalidClockExtension(); // The maximum clock extension may not be greater than the maximum clock duration. - if (uint64(maxClockExtension) > _maxClockDuration.raw()) revert InvalidClockExtension(); + if (uint64(maxClockExtension) > _params.maxClockDuration.raw()) revert InvalidClockExtension(); // Set up initial game state. - GAME_TYPE = _gameType; - ABSOLUTE_PRESTATE = _absolutePrestate; - MAX_GAME_DEPTH = _maxGameDepth; - SPLIT_DEPTH = _splitDepth; - CLOCK_EXTENSION = _clockExtension; - MAX_CLOCK_DURATION = _maxClockDuration; - VM = _vm; - WETH = _weth; - ANCHOR_STATE_REGISTRY = _anchorStateRegistry; - L2_CHAIN_ID = _l2ChainId; + GAME_TYPE = _params.gameType; + ABSOLUTE_PRESTATE = _params.absolutePrestate; + MAX_GAME_DEPTH = _params.maxGameDepth; + SPLIT_DEPTH = _params.splitDepth; + CLOCK_EXTENSION = _params.clockExtension; + MAX_CLOCK_DURATION = _params.maxClockDuration; + VM = _params.vm; + WETH = _params.weth; + ANCHOR_STATE_REGISTRY = _params.anchorStateRegistry; + L2_CHAIN_ID = _params.l2ChainId; } /// @notice Initializes the contract. diff --git a/packages/contracts-bedrock/src/dispute/PermissionedDisputeGame.sol b/packages/contracts-bedrock/src/dispute/PermissionedDisputeGame.sol index 373498f55bf..de907695d00 100644 --- a/packages/contracts-bedrock/src/dispute/PermissionedDisputeGame.sol +++ b/packages/contracts-bedrock/src/dispute/PermissionedDisputeGame.sol @@ -5,14 +5,9 @@ pragma solidity 0.8.15; import { FaultDisputeGame } from "src/dispute/FaultDisputeGame.sol"; // Libraries -import { GameType, Claim, Duration } from "src/dispute/lib/Types.sol"; +import { Claim } from "src/dispute/lib/Types.sol"; import { BadAuth } from "src/dispute/lib/Errors.sol"; -// Interfaces -import { IDelayedWETH } from "src/dispute/interfaces/IDelayedWETH.sol"; -import { IAnchorStateRegistry } from "src/dispute/interfaces/IAnchorStateRegistry.sol"; -import { IBigStepper } from "src/dispute/interfaces/IBigStepper.sol"; - /// @title PermissionedDisputeGame /// @notice PermissionedDisputeGame is a contract that inherits from `FaultDisputeGame`, and contains two roles: /// - The `challenger` role, which is allowed to challenge a dispute. @@ -36,44 +31,15 @@ contract PermissionedDisputeGame is FaultDisputeGame { _; } - /// @param _gameType The type ID of the game. - /// @param _absolutePrestate The absolute prestate of the instruction trace. - /// @param _maxGameDepth The maximum depth of bisection. - /// @param _splitDepth The final depth of the output bisection portion of the game. - /// @param _clockExtension The clock extension to perform when the remaining duration is less than the extension. - /// @param _maxClockDuration The maximum amount of time that may accumulate on a team's chess clock. - /// @param _vm An onchain VM that performs single instruction steps on an FPP trace. - /// @param _weth WETH contract for holding ETH. - /// @param _anchorStateRegistry The contract that stores the anchor state for each game type. - /// @param _l2ChainId Chain ID of the L2 network this contract argues about. + /// @param _params Parameters for creating a new FaultDisputeGame. /// @param _proposer Address that is allowed to create instances of this contract. /// @param _challenger Address that is allowed to challenge instances of this contract. constructor( - GameType _gameType, - Claim _absolutePrestate, - uint256 _maxGameDepth, - uint256 _splitDepth, - Duration _clockExtension, - Duration _maxClockDuration, - IBigStepper _vm, - IDelayedWETH _weth, - IAnchorStateRegistry _anchorStateRegistry, - uint256 _l2ChainId, + GameConstructorParams memory _params, address _proposer, address _challenger ) - FaultDisputeGame( - _gameType, - _absolutePrestate, - _maxGameDepth, - _splitDepth, - _clockExtension, - _maxClockDuration, - _vm, - _weth, - _anchorStateRegistry, - _l2ChainId - ) + FaultDisputeGame(_params) { PROPOSER = _proposer; CHALLENGER = _challenger; diff --git a/packages/contracts-bedrock/src/dispute/interfaces/IFaultDisputeGame.sol b/packages/contracts-bedrock/src/dispute/interfaces/IFaultDisputeGame.sol index 8c5bac02e9b..a188063de54 100644 --- a/packages/contracts-bedrock/src/dispute/interfaces/IFaultDisputeGame.sol +++ b/packages/contracts-bedrock/src/dispute/interfaces/IFaultDisputeGame.sol @@ -26,6 +26,19 @@ interface IFaultDisputeGame is IDisputeGame { address counteredBy; } + struct GameConstructorParams { + GameType gameType; + Claim absolutePrestate; + uint256 maxGameDepth; + uint256 splitDepth; + Duration clockExtension; + Duration maxClockDuration; + IBigStepper vm; + IDelayedWETH weth; + IAnchorStateRegistry anchorStateRegistry; + uint256 l2ChainId; + } + error AlreadyInitialized(); error AnchorRootNotFound(); error BlockNumberMatches(); @@ -113,17 +126,5 @@ interface IFaultDisputeGame is IDisputeGame { function vm() external view returns (IBigStepper vm_); function weth() external view returns (IDelayedWETH weth_); - function __constructor__( - GameType _gameType, - Claim _absolutePrestate, - uint256 _maxGameDepth, - uint256 _splitDepth, - Duration _clockExtension, - Duration _maxClockDuration, - IBigStepper _vm, - IDelayedWETH _weth, - IAnchorStateRegistry _anchorStateRegistry, - uint256 _l2ChainId - ) - external; + function __constructor__(GameConstructorParams memory _params) external; } diff --git a/packages/contracts-bedrock/src/dispute/interfaces/IPermissionedDisputeGame.sol b/packages/contracts-bedrock/src/dispute/interfaces/IPermissionedDisputeGame.sol index c5a5a187ec1..08c9cd5aa2b 100644 --- a/packages/contracts-bedrock/src/dispute/interfaces/IPermissionedDisputeGame.sol +++ b/packages/contracts-bedrock/src/dispute/interfaces/IPermissionedDisputeGame.sol @@ -2,12 +2,13 @@ pragma solidity ^0.8.0; import { Types } from "src/libraries/Types.sol"; -import { GameType, Claim, Position, Clock, Hash, Duration } from "src/dispute/lib/Types.sol"; +import { Claim, Position, Clock, Hash, Duration } from "src/dispute/lib/Types.sol"; import { IAnchorStateRegistry } from "src/dispute/interfaces/IAnchorStateRegistry.sol"; import { IDelayedWETH } from "src/dispute/interfaces/IDelayedWETH.sol"; import { IBigStepper } from "src/dispute/interfaces/IBigStepper.sol"; import { IDisputeGame } from "src/dispute/interfaces/IDisputeGame.sol"; +import { IFaultDisputeGame } from "src/dispute/interfaces/IFaultDisputeGame.sol"; interface IPermissionedDisputeGame is IDisputeGame { struct ClaimData { @@ -120,16 +121,7 @@ interface IPermissionedDisputeGame is IDisputeGame { function challenger() external view returns (address challenger_); function __constructor__( - GameType _gameType, - Claim _absolutePrestate, - uint256 _maxGameDepth, - uint256 _splitDepth, - Duration _clockExtension, - Duration _maxClockDuration, - IBigStepper _vm, - IDelayedWETH _weth, - IAnchorStateRegistry _anchorStateRegistry, - uint256 _l2ChainId, + IFaultDisputeGame.GameConstructorParams memory _params, address _proposer, address _challenger ) diff --git a/packages/contracts-bedrock/test/dispute/FaultDisputeGame.t.sol b/packages/contracts-bedrock/test/dispute/FaultDisputeGame.t.sol index 70aad007e40..df820bcb3cc 100644 --- a/packages/contracts-bedrock/test/dispute/FaultDisputeGame.t.sol +++ b/packages/contracts-bedrock/test/dispute/FaultDisputeGame.t.sol @@ -73,16 +73,18 @@ contract FaultDisputeGame_Init is DisputeGameFactory_Init { abi.encodeCall( IFaultDisputeGame.__constructor__, ( - GAME_TYPE, - absolutePrestate, - 2 ** 3, - 2 ** 2, - Duration.wrap(3 hours), - Duration.wrap(3.5 days), - _vm, - delayedWeth, - anchorStateRegistry, - 10 + IFaultDisputeGame.GameConstructorParams({ + gameType: GAME_TYPE, + absolutePrestate: absolutePrestate, + maxGameDepth: 2 ** 3, + splitDepth: 2 ** 2, + clockExtension: Duration.wrap(3 hours), + maxClockDuration: Duration.wrap(3.5 days), + vm: _vm, + weth: delayedWeth, + anchorStateRegistry: anchorStateRegistry, + l2ChainId: 10 + }) ) ) ) @@ -154,16 +156,18 @@ contract FaultDisputeGame_Test is FaultDisputeGame_Init { abi.encodeCall( IFaultDisputeGame.__constructor__, ( - GAME_TYPE, - absolutePrestate, - _maxGameDepth, - _maxGameDepth + 1, - Duration.wrap(3 hours), - Duration.wrap(3.5 days), - alphabetVM, - IDelayedWETH(payable(address(0))), - IAnchorStateRegistry(address(0)), - 10 + IFaultDisputeGame.GameConstructorParams({ + gameType: GAME_TYPE, + absolutePrestate: absolutePrestate, + maxGameDepth: _maxGameDepth, + splitDepth: _maxGameDepth + 1, + clockExtension: Duration.wrap(3 hours), + maxClockDuration: Duration.wrap(3.5 days), + vm: alphabetVM, + weth: IDelayedWETH(payable(address(0))), + anchorStateRegistry: IAnchorStateRegistry(address(0)), + l2ChainId: 10 + }) ) ) ) @@ -196,16 +200,18 @@ contract FaultDisputeGame_Test is FaultDisputeGame_Init { abi.encodeCall( IFaultDisputeGame.__constructor__, ( - GAME_TYPE, - absolutePrestate, - 2 ** 3, - 2 ** 2, - Duration.wrap(3 hours), - Duration.wrap(3.5 days), - alphabetVM, - IDelayedWETH(payable(address(0))), - IAnchorStateRegistry(address(0)), - 10 + IFaultDisputeGame.GameConstructorParams({ + gameType: GAME_TYPE, + absolutePrestate: absolutePrestate, + maxGameDepth: 2 ** 3, + splitDepth: 2 ** 2, + clockExtension: Duration.wrap(3 hours), + maxClockDuration: Duration.wrap(3.5 days), + vm: alphabetVM, + weth: IDelayedWETH(payable(address(0))), + anchorStateRegistry: IAnchorStateRegistry(address(0)), + l2ChainId: 10 + }) ) ) ) @@ -234,16 +240,18 @@ contract FaultDisputeGame_Test is FaultDisputeGame_Init { abi.encodeCall( IFaultDisputeGame.__constructor__, ( - GAME_TYPE, - absolutePrestate, - maxGameDepth, - _splitDepth, - Duration.wrap(3 hours), - Duration.wrap(3.5 days), - alphabetVM, - IDelayedWETH(payable(address(0))), - IAnchorStateRegistry(address(0)), - 10 + IFaultDisputeGame.GameConstructorParams({ + gameType: GAME_TYPE, + absolutePrestate: absolutePrestate, + maxGameDepth: maxGameDepth, + splitDepth: _splitDepth, + clockExtension: Duration.wrap(3 hours), + maxClockDuration: Duration.wrap(3.5 days), + vm: alphabetVM, + weth: IDelayedWETH(payable(address(0))), + anchorStateRegistry: IAnchorStateRegistry(address(0)), + l2ChainId: 10 + }) ) ) ) @@ -272,16 +280,18 @@ contract FaultDisputeGame_Test is FaultDisputeGame_Init { abi.encodeCall( IFaultDisputeGame.__constructor__, ( - GAME_TYPE, - absolutePrestate, - 2 ** 3, - _splitDepth, - Duration.wrap(3 hours), - Duration.wrap(3.5 days), - alphabetVM, - IDelayedWETH(payable(address(0))), - IAnchorStateRegistry(address(0)), - 10 + IFaultDisputeGame.GameConstructorParams({ + gameType: GAME_TYPE, + absolutePrestate: absolutePrestate, + maxGameDepth: 2 ** 3, + splitDepth: _splitDepth, + clockExtension: Duration.wrap(3 hours), + maxClockDuration: Duration.wrap(3.5 days), + vm: alphabetVM, + weth: IDelayedWETH(payable(address(0))), + anchorStateRegistry: IAnchorStateRegistry(address(0)), + l2ChainId: 10 + }) ) ) ) @@ -318,16 +328,18 @@ contract FaultDisputeGame_Test is FaultDisputeGame_Init { abi.encodeCall( IFaultDisputeGame.__constructor__, ( - GAME_TYPE, - absolutePrestate, - 16, - 8, - Duration.wrap(_clockExtension), - Duration.wrap(_maxClockDuration), - alphabetVM, - IDelayedWETH(payable(address(0))), - IAnchorStateRegistry(address(0)), - 10 + IFaultDisputeGame.GameConstructorParams({ + gameType: GAME_TYPE, + absolutePrestate: absolutePrestate, + maxGameDepth: 16, + splitDepth: 8, + clockExtension: Duration.wrap(_clockExtension), + maxClockDuration: Duration.wrap(_maxClockDuration), + vm: alphabetVM, + weth: IDelayedWETH(payable(address(0))), + anchorStateRegistry: IAnchorStateRegistry(address(0)), + l2ChainId: 10 + }) ) ) ) diff --git a/packages/contracts-bedrock/test/dispute/PermissionedDisputeGame.t.sol b/packages/contracts-bedrock/test/dispute/PermissionedDisputeGame.t.sol index 99b70b9c6df..20c45bf44de 100644 --- a/packages/contracts-bedrock/test/dispute/PermissionedDisputeGame.t.sol +++ b/packages/contracts-bedrock/test/dispute/PermissionedDisputeGame.t.sol @@ -17,6 +17,7 @@ import "src/dispute/lib/Errors.sol"; import { IPreimageOracle } from "src/dispute/interfaces/IBigStepper.sol"; import { IDelayedWETH } from "src/dispute/interfaces/IDelayedWETH.sol"; import { IPermissionedDisputeGame } from "src/dispute/interfaces/IPermissionedDisputeGame.sol"; +import { IFaultDisputeGame } from "src/dispute/interfaces/IFaultDisputeGame.sol"; contract PermissionedDisputeGame_Init is DisputeGameFactory_Init { /// @dev The type of the game being tested. @@ -67,16 +68,18 @@ contract PermissionedDisputeGame_Init is DisputeGameFactory_Init { abi.encodeCall( IPermissionedDisputeGame.__constructor__, ( - GAME_TYPE, - absolutePrestate, - 2 ** 3, - 2 ** 2, - Duration.wrap(3 hours), - Duration.wrap(3.5 days), - _vm, - _weth, - anchorStateRegistry, - 10, + IFaultDisputeGame.GameConstructorParams({ + gameType: GAME_TYPE, + absolutePrestate: absolutePrestate, + maxGameDepth: 2 ** 3, + splitDepth: 2 ** 2, + clockExtension: Duration.wrap(3 hours), + maxClockDuration: Duration.wrap(3.5 days), + vm: _vm, + weth: _weth, + anchorStateRegistry: anchorStateRegistry, + l2ChainId: 10 + }), PROPOSER, CHALLENGER ) From 6e799a6cc621cb8031b42a00eeda7dd5618aed72 Mon Sep 17 00:00:00 2001 From: George Knee Date: Mon, 2 Dec 2024 16:31:02 +0000 Subject: [PATCH 009/111] txmgr/Queue: add additional assertions to test to check for tx ordering (#13124) * add additional assertions to test to check for tx ordering * enhance test to handle tx confirmation ID ordering * increase size of 0th tx to expose race condition * change approach since the backend does not order txs by nonce * tidy * clarify * Update op-service/txmgr/queue_test.go Co-authored-by: Sebastian Stammler --------- Co-authored-by: Sebastian Stammler --- op-service/txmgr/queue_test.go | 47 +++++++++++++++++++++++++++------- 1 file changed, 38 insertions(+), 9 deletions(-) diff --git a/op-service/txmgr/queue_test.go b/op-service/txmgr/queue_test.go index 00219b913ad..27dce154bcc 100644 --- a/op-service/txmgr/queue_test.go +++ b/op-service/txmgr/queue_test.go @@ -63,6 +63,10 @@ func TestQueue_Send(t *testing.T) { calls []queueCall // calls to the queue txs []testTx // txs to generate from the factory (and potentially error in send) nonces []uint64 // expected sent tx nonces after all calls are made + // With Holocene, it is important that transactions are included on chain in the same order as they are sent. + // The txmgr.Queue.Send() method should ensure nonces are determined _synchronously_ even if transactions + // are otherwise launched asynchronously. + confirmedIds []uint // expected tx Ids after all calls are made }{ { name: "success", @@ -75,7 +79,8 @@ func TestQueue_Send(t *testing.T) { {}, {}, }, - nonces: []uint64{0, 1}, + nonces: []uint64{0, 1}, + confirmedIds: []uint{0, 1}, }, { name: "no limit", @@ -88,7 +93,8 @@ func TestQueue_Send(t *testing.T) { {}, {}, }, - nonces: []uint64{0, 1}, + nonces: []uint64{0, 1}, + confirmedIds: []uint{0, 1}, }, { name: "single threaded", @@ -101,7 +107,8 @@ func TestQueue_Send(t *testing.T) { txs: []testTx{ {}, }, - nonces: []uint64{0}, + nonces: []uint64{0}, + confirmedIds: []uint{0}, }, { name: "single threaded blocking", @@ -117,7 +124,8 @@ func TestQueue_Send(t *testing.T) { {}, {}, }, - nonces: []uint64{0, 1, 2}, + nonces: []uint64{0, 1, 2}, + confirmedIds: []uint{0, 2, 3}, }, { name: "dual threaded blocking", @@ -137,7 +145,8 @@ func TestQueue_Send(t *testing.T) { {}, {}, }, - nonces: []uint64{0, 1, 2, 3, 4}, + nonces: []uint64{0, 1, 2, 3, 4}, + confirmedIds: []uint{0, 1, 3, 4, 5}, }, { name: "subsequent txs fail after tx failure", @@ -152,7 +161,8 @@ func TestQueue_Send(t *testing.T) { {sendErr: true}, {}, }, - nonces: []uint64{0, 1}, + nonces: []uint64{0, 1}, + confirmedIds: []uint{0}, }, } for _, test := range testCases { @@ -176,9 +186,11 @@ func TestQueue_Send(t *testing.T) { // track the nonces, and return any expected errors from tx sending var ( - nonces []uint64 - nonceMu sync.Mutex + nonces []uint64 + nonceForTxId map[uint]uint64 // maps from txid to nonce + nonceMu sync.Mutex ) + nonceForTxId = make(map[uint]uint64) sendTx := func(ctx context.Context, tx *types.Transaction) error { index := int(tx.Data()[0]) nonceMu.Lock() @@ -191,8 +203,12 @@ func TestQueue_Send(t *testing.T) { if testTx != nil && testTx.sendErr { return core.ErrNonceTooLow } + txHash := tx.Hash() + nonceMu.Lock() backend.mine(&txHash, tx.GasFeeCap(), nil) + nonceForTxId[uint(index)] = tx.Nonce() + nonceMu.Unlock() return nil } backend.setTxSender(sendTx) @@ -209,15 +225,28 @@ func TestQueue_Send(t *testing.T) { TxData: []byte{byte(i)}, To: &common.Address{}, } + if i == 0 { + // Make the first tx much larger to expose + // any race conditions in the queue + candidate.TxData = make([]byte, 100_000) + } receiptChs[i] = make(chan TxReceipt[int], 1) queued := c.call(i, candidate, receiptChs[i], queue) require.Equal(t, c.queued, queued, msg) } // wait for the queue to drain (all txs complete or failed) _ = queue.Wait() - // check that the nonces match + + // NOTE the backend in this test does not order transactions based on the nonce + // So what we want to check is that the txs match expectations when they are ordered + // in the same way as the nonces. slices.Sort(nonces) require.Equal(t, test.nonces, nonces, "expected nonces do not match") + for i, id := range test.confirmedIds { + require.Equal(t, nonces[i], nonceForTxId[id], + "nonce for tx id %d was %d instead of %d", id, nonceForTxId[id], nonces[i]) + } + // check receipts for i, c := range test.calls { if !c.queued { From 8f31e601eafaffa50a8718d23b54846b1d740e77 Mon Sep 17 00:00:00 2001 From: zhiqiangxu <652732310@qq.com> Date: Tue, 3 Dec 2024 00:32:40 +0800 Subject: [PATCH 010/111] SendDepositTx: make `applyL2Opts` nil-able (#13158) * SendDepositTx: make applyL2Opts nil-able * move maxBlobs closer to used place * fix typo --- op-e2e/actions/batcher/l2_batcher_test.go | 2 +- op-e2e/system/da/brotli_batcher_test.go | 2 +- op-e2e/system/da/eip4844_test.go | 6 +++--- op-e2e/system/helpers/tx_helper.go | 4 +++- op-e2e/system/verifier/basic_test.go | 2 +- 5 files changed, 9 insertions(+), 7 deletions(-) diff --git a/op-e2e/actions/batcher/l2_batcher_test.go b/op-e2e/actions/batcher/l2_batcher_test.go index 8906dcbed4e..3fec73db4f7 100644 --- a/op-e2e/actions/batcher/l2_batcher_test.go +++ b/op-e2e/actions/batcher/l2_batcher_test.go @@ -408,7 +408,7 @@ func ExtendedTimeWithoutL1Batches(gt *testing.T, deltaTimeOffset *hexutil.Uint64 // - Fill 40 L2 blocks to near max-capacity, with txs of 120 KB each // - Buffer the L2 blocks into channels together as much as possible, submit data-txs only when necessary // (just before crossing the max RLP channel size) -// - Limit the data-tx size to 40 KB, to force data to be split across multiple datat-txs +// - Limit the data-tx size to 40 KB, to force data to be split across multiple data-txs // - Defer all data-tx inclusion till the end // - Fill L1 blocks with data-txs until we have processed them all // - Run the verifier, and check if it derives the same L2 chain as was created by the sequencer. diff --git a/op-e2e/system/da/brotli_batcher_test.go b/op-e2e/system/da/brotli_batcher_test.go index fe9b4a9fab9..fd44c6365ea 100644 --- a/op-e2e/system/da/brotli_batcher_test.go +++ b/op-e2e/system/da/brotli_batcher_test.go @@ -43,7 +43,7 @@ func setupAliceAccount(t *testing.T, cfg e2esys.SystemConfig, sys *e2esys.System require.NoError(t, err) mintAmount := big.NewInt(1_000_000_000_000) opts.Value = mintAmount - helpers.SendDepositTx(t, cfg, l1Client, l2Verif, opts, func(l2Opts *helpers.DepositTxOpts) {}) + helpers.SendDepositTx(t, cfg, l1Client, l2Verif, opts, nil) // Confirm balance ctx, cancel = context.WithTimeout(context.Background(), 15*time.Second) diff --git a/op-e2e/system/da/eip4844_test.go b/op-e2e/system/da/eip4844_test.go index f3cf8fc7f03..e1b6468378e 100644 --- a/op-e2e/system/da/eip4844_test.go +++ b/op-e2e/system/da/eip4844_test.go @@ -57,7 +57,6 @@ func testSystem4844E2E(t *testing.T, multiBlob bool, daType batcherFlags.DataAva cfg.BatcherBatchType = derive.SpanBatchType cfg.DeployConfig.L1GenesisBlockBaseFeePerGas = (*hexutil.Big)(big.NewInt(7000)) - const maxBlobs = eth.MaxBlobsPerBlobTx var maxL1TxSize int if multiBlob { cfg.BatcherTargetNumFrames = eth.MaxBlobsPerBlobTx @@ -120,7 +119,7 @@ func testSystem4844E2E(t *testing.T, multiBlob bool, daType batcherFlags.DataAva require.NoError(t, err) mintAmount := big.NewInt(1_000_000_000_000) opts.Value = mintAmount - helpers.SendDepositTx(t, cfg, l1Client, l2Verif, opts, func(l2Opts *helpers.DepositTxOpts) {}) + helpers.SendDepositTx(t, cfg, l1Client, l2Verif, opts, nil) // Confirm balance ctx2, cancel2 := context.WithTimeout(context.Background(), 20*time.Second) @@ -214,7 +213,8 @@ func testSystem4844E2E(t *testing.T, multiBlob bool, daType batcherFlags.DataAva if !multiBlob { require.NotZero(t, numBlobs, "single-blob: expected to find L1 blob tx") } else { - require.Equal(t, maxBlobs, numBlobs, fmt.Sprintf("multi-blob: expected to find L1 blob tx with %d blobs", eth.MaxBlobsPerBlobTx)) + const maxBlobs = eth.MaxBlobsPerBlobTx + require.Equal(t, maxBlobs, numBlobs, fmt.Sprintf("multi-blob: expected to find L1 blob tx with %d blobs", maxBlobs)) // blob tx should have filled up all but last blob bcl := sys.L1BeaconHTTPClient() hashes := toIndexedBlobHashes(blobTx.BlobHashes()...) diff --git a/op-e2e/system/helpers/tx_helper.go b/op-e2e/system/helpers/tx_helper.go index f5cb11aa8a1..10c16c0e746 100644 --- a/op-e2e/system/helpers/tx_helper.go +++ b/op-e2e/system/helpers/tx_helper.go @@ -28,7 +28,9 @@ import ( // Returns the receipt of the L2 transaction func SendDepositTx(t *testing.T, cfg e2esys.SystemConfig, l1Client *ethclient.Client, l2Client *ethclient.Client, l1Opts *bind.TransactOpts, applyL2Opts DepositTxOptsFn) *types.Receipt { l2Opts := defaultDepositTxOpts(l1Opts) - applyL2Opts(l2Opts) + if applyL2Opts != nil { + applyL2Opts(l2Opts) + } // Find deposit contract depositContract, err := bindings.NewOptimismPortal(cfg.L1Deployments.OptimismPortalProxy, l1Client) diff --git a/op-e2e/system/verifier/basic_test.go b/op-e2e/system/verifier/basic_test.go index effe1f41214..d0791d5ec48 100644 --- a/op-e2e/system/verifier/basic_test.go +++ b/op-e2e/system/verifier/basic_test.go @@ -71,7 +71,7 @@ func runE2ESystemTest(t *testing.T, sys *e2esys.System) { require.Nil(t, err) mintAmount := big.NewInt(1_000_000_000_000) opts.Value = mintAmount - helpers.SendDepositTx(t, sys.Cfg, l1Client, l2Verif, opts, func(l2Opts *helpers.DepositTxOpts) {}) + helpers.SendDepositTx(t, sys.Cfg, l1Client, l2Verif, opts, nil) // Confirm balance ctx, cancel = context.WithTimeout(context.Background(), 15*time.Second) From 1a1ab6ece629521f40c09ab85f6cfb4bdc2f56fa Mon Sep 17 00:00:00 2001 From: Matthew Slipper Date: Mon, 2 Dec 2024 09:48:48 -0700 Subject: [PATCH 011/111] op-e2e: Expose context in supersystem op-e2e (#13135) When any of the setup transactions in `TestInterop_EmitLogs` fail to be mined, the test hangs until it times out. See [here](https://app.circleci.com/pipelines/github/ethereum-optimism/optimism/73118/workflows/b48debaa-55bc-4138-a419-a87c236cbc58/jobs/2994655/artifacts) for an example. This PR updates the SuperSystem API to take a context from the test itself to allow for timeouts. --- op-e2e/interop/interop_test.go | 12 +++++++++--- op-e2e/interop/supersystem.go | 13 +++++++------ 2 files changed, 16 insertions(+), 9 deletions(-) diff --git a/op-e2e/interop/interop_test.go b/op-e2e/interop/interop_test.go index 302413dd98e..81f09c125aa 100644 --- a/op-e2e/interop/interop_test.go +++ b/op-e2e/interop/interop_test.go @@ -121,7 +121,9 @@ func TestInterop_EmitLogs(t *testing.T) { var emitParallel sync.WaitGroup emitOn := func(chainID string) { for i := 0; i < numEmits; i++ { - s2.EmitData(chainID, "Alice", payload1) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + s2.EmitData(ctx, chainID, "Alice", payload1) + cancel() } emitParallel.Done() } @@ -218,7 +220,9 @@ func TestInteropBlockBuilding(t *testing.T) { // Add chain A as dependency to chain B, // such that we can execute a message on B that was initiated on A. - depRec := s2.AddDependency(chainB, s2.ChainID(chainA)) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + depRec := s2.AddDependency(ctx, chainB, s2.ChainID(chainA)) + cancel() t.Logf("Dependency set in L1 block %d", depRec.BlockNumber) rollupClA, err := dial.DialRollupClientWithTimeout(context.Background(), time.Second*15, logger, s2.OpNode(chainA).UserRPC().RPC()) @@ -233,7 +237,9 @@ func TestInteropBlockBuilding(t *testing.T) { t.Log("Dependency information has been processed in L2 block") // emit log on chain A - emitRec := s2.EmitData(chainA, "Alice", "hello world") + ctx, cancel = context.WithTimeout(context.Background(), 30*time.Second) + emitRec := s2.EmitData(ctx, chainA, "Alice", "hello world") + cancel() t.Logf("Emitted a log event in block %d", emitRec.BlockNumber.Uint64()) // Wait for initiating side to become cross-unsafe diff --git a/op-e2e/interop/supersystem.go b/op-e2e/interop/supersystem.go index de7e25bb9c7..8fc663f377a 100644 --- a/op-e2e/interop/supersystem.go +++ b/op-e2e/interop/supersystem.go @@ -103,9 +103,9 @@ type SuperSystem interface { // Deploy the Emitter Contract, which emits Event Logs DeployEmitterContract(network string, username string) common.Address // Use the Emitter Contract to emit an Event Log - EmitData(network string, username string, data string) *types.Receipt + EmitData(ctx context.Context, network string, username string, data string) *types.Receipt // AddDependency adds a dependency (by chain ID) to the given chain - AddDependency(network string, dep *big.Int) *types.Receipt + AddDependency(ctx context.Context, network string, dep *big.Int) *types.Receipt // ExecuteMessage calls the CrossL2Inbox executeMessage function ExecuteMessage( ctx context.Context, @@ -767,7 +767,7 @@ func (s *interopE2ESystem) ExecuteMessage( return bind.WaitMined(ctx, s.L2GethClient(id), tx) } -func (s *interopE2ESystem) AddDependency(id string, dep *big.Int) *types.Receipt { +func (s *interopE2ESystem) AddDependency(ctx context.Context, id string, dep *big.Int) *types.Receipt { // There is a note in OPContractsManagerInterop that the proxy-admin is used for now, // even though it should be a separate dependency-set-manager address. secret, err := s.hdWallet.Secret(devkeys.ChainOperatorKey{ @@ -779,7 +779,7 @@ func (s *interopE2ESystem) AddDependency(id string, dep *big.Int) *types.Receipt auth, err := bind.NewKeyedTransactorWithChainID(secret, s.worldOutput.L1.Genesis.Config.ChainID) require.NoError(s.t, err) - balance, err := s.l1GethClient.BalanceAt(context.Background(), crypto.PubkeyToAddress(secret.PublicKey), nil) + balance, err := s.l1GethClient.BalanceAt(ctx, crypto.PubkeyToAddress(secret.PublicKey), nil) require.NoError(s.t, err) require.False(s.t, balance.Sign() == 0, "system config owner needs a balance") @@ -790,7 +790,7 @@ func (s *interopE2ESystem) AddDependency(id string, dep *big.Int) *types.Receipt tx, err := contract.SystemconfigTransactor.AddDependency(auth, dep) require.NoError(s.t, err) - receipt, err := wait.ForReceiptOK(context.Background(), s.L1GethClient(), tx.Hash()) + receipt, err := wait.ForReceiptOK(ctx, s.L1GethClient(), tx.Hash()) require.NoError(s.t, err) return receipt } @@ -813,6 +813,7 @@ func (s *interopE2ESystem) DeployEmitterContract( } func (s *interopE2ESystem) EmitData( + ctx context.Context, id string, sender string, data string, @@ -828,7 +829,7 @@ func (s *interopE2ESystem) EmitData( contract := s.Contract(id, "emitter").(*emit.Emit) tx, err := contract.EmitTransactor.EmitData(auth, []byte(data)) require.NoError(s.t, err) - receipt, err := bind.WaitMined(context.Background(), s.L2GethClient(id), tx) + receipt, err := bind.WaitMined(ctx, s.L2GethClient(id), tx) require.NoError(s.t, err) return receipt } From a486daec4b1b4449e12d8863df7e70502707c2b5 Mon Sep 17 00:00:00 2001 From: Michael Amadi Date: Mon, 2 Dec 2024 17:59:05 +0100 Subject: [PATCH 012/111] fix mips contracts stack too deep errors (#13137) --- .../snapshots/semver-lock.json | 12 ++--- .../contracts-bedrock/src/cannon/MIPS.sol | 7 +-- .../contracts-bedrock/src/cannon/MIPS2.sol | 7 +-- .../contracts-bedrock/src/cannon/MIPS64.sol | 7 +-- .../src/cannon/libraries/MIPS64Syscalls.sol | 53 ++++++++++--------- .../src/cannon/libraries/MIPSSyscalls.sol | 53 ++++++++++--------- 6 files changed, 76 insertions(+), 63 deletions(-) diff --git a/packages/contracts-bedrock/snapshots/semver-lock.json b/packages/contracts-bedrock/snapshots/semver-lock.json index 32de3eeb7f3..7c7cadb614b 100644 --- a/packages/contracts-bedrock/snapshots/semver-lock.json +++ b/packages/contracts-bedrock/snapshots/semver-lock.json @@ -136,16 +136,16 @@ "sourceCodeHash": "0x0fa0633a769e73f5937514c0003ba7947a1c275bbe5b85d78879c42f0ed8895b" }, "src/cannon/MIPS.sol": { - "initCodeHash": "0xb4aec227019dacd6194d6aeb9ca68c23c60b95618d18a4ebc09243514aeb1f05", - "sourceCodeHash": "0x4d43b3f2918486aa76d2d59ac42e4f6aa2f58538c7e95a5cb99b63c9588b5f1c" + "initCodeHash": "0x2ad89ddfd9b6091af7fa4f337f6f0134b946838b53a056995eea5982e4b903c3", + "sourceCodeHash": "0xcd5f9762e641e4a1f6b07420ea3b2bfcaac5656da10c66a0f367ee4d4af37e9c" }, "src/cannon/MIPS2.sol": { - "initCodeHash": "0xe3879b5772820d837bc1c77c32a1200eb26cf901d9302dff9f0e9759331e380e", - "sourceCodeHash": "0x1c45a8f4c8c9ded7043d63965cb114d17f801c6cd4d8233cb16838c5f9a02675" + "initCodeHash": "0x013e6005f56eae789a48ec9766d17c0be950d765ada3eb936c9a90337567c940", + "sourceCodeHash": "0xdd7684d0b8efd0bbee4abae76195c649b325ecd3e09f41b687cf3a091639c887" }, "src/cannon/MIPS64.sol": { - "initCodeHash": "0xa4a761f480a26ec1926c5a8b4831440211c0441bd41d503b0aad189e030d35dc", - "sourceCodeHash": "0x7ddcf8584f9bd92abd1eb45bc198f5b0ec54acaf292f60e919d674cc56fb8abc" + "initCodeHash": "0xa5c2105b0d6f90d6f5ab1782f6d7acda505674a3c11355e0b401d66250406c0c", + "sourceCodeHash": "0x7cb189f5081c4aae84a7b23329309d944cb54f075ec18b03240d1725c03adc3b" }, "src/cannon/PreimageOracle.sol": { "initCodeHash": "0x2bef439027c37c65dd8e7d9a987ff14e1dba94ee5fe5f316a77ecf46a8db4b3f", diff --git a/packages/contracts-bedrock/src/cannon/MIPS.sol b/packages/contracts-bedrock/src/cannon/MIPS.sol index 62b3084f74a..93ee98b070e 100644 --- a/packages/contracts-bedrock/src/cannon/MIPS.sol +++ b/packages/contracts-bedrock/src/cannon/MIPS.sol @@ -47,8 +47,8 @@ contract MIPS is ISemver { } /// @notice The semantic version of the MIPS contract. - /// @custom:semver 1.2.1-beta.8 - string public constant version = "1.2.1-beta.8"; + /// @custom:semver 1.2.1-beta.9 + string public constant version = "1.2.1-beta.9"; /// @notice The preimage oracle contract. IPreimageOracle internal immutable ORACLE; @@ -183,7 +183,7 @@ contract MIPS is ISemver { }); (v0, v1, state.preimageOffset, state.memRoot,,) = sys.handleSysRead(args); } else if (syscall_no == sys.SYS_WRITE) { - (v0, v1, state.preimageKey, state.preimageOffset) = sys.handleSysWrite({ + sys.SysWriteParams memory args = sys.SysWriteParams({ _a0: a0, _a1: a1, _a2: a2, @@ -192,6 +192,7 @@ contract MIPS is ISemver { _proofOffset: MIPSMemory.memoryProofOffset(STEP_PROOF_OFFSET, 1), _memRoot: state.memRoot }); + (v0, v1, state.preimageKey, state.preimageOffset) = sys.handleSysWrite(args); } else if (syscall_no == sys.SYS_FCNTL) { (v0, v1) = sys.handleSysFcntl(a0, a1); } diff --git a/packages/contracts-bedrock/src/cannon/MIPS2.sol b/packages/contracts-bedrock/src/cannon/MIPS2.sol index d28238980f3..8090c5e3d8b 100644 --- a/packages/contracts-bedrock/src/cannon/MIPS2.sol +++ b/packages/contracts-bedrock/src/cannon/MIPS2.sol @@ -63,8 +63,8 @@ contract MIPS2 is ISemver { } /// @notice The semantic version of the MIPS2 contract. - /// @custom:semver 1.0.0-beta.23 - string public constant version = "1.0.0-beta.23"; + /// @custom:semver 1.0.0-beta.24 + string public constant version = "1.0.0-beta.24"; /// @notice The preimage oracle contract. IPreimageOracle internal immutable ORACLE; @@ -462,7 +462,7 @@ contract MIPS2 is ISemver { // Encapsulate execution to avoid stack-too-deep error (v0, v1) = execSysRead(state, args); } else if (syscall_no == sys.SYS_WRITE) { - (v0, v1, state.preimageKey, state.preimageOffset) = sys.handleSysWrite({ + sys.SysWriteParams memory args = sys.SysWriteParams({ _a0: a0, _a1: a1, _a2: a2, @@ -471,6 +471,7 @@ contract MIPS2 is ISemver { _proofOffset: MIPSMemory.memoryProofOffset(MEM_PROOF_OFFSET, 1), _memRoot: state.memRoot }); + (v0, v1, state.preimageKey, state.preimageOffset) = sys.handleSysWrite(args); } else if (syscall_no == sys.SYS_FCNTL) { (v0, v1) = sys.handleSysFcntl(a0, a1); } else if (syscall_no == sys.SYS_GETTID) { diff --git a/packages/contracts-bedrock/src/cannon/MIPS64.sol b/packages/contracts-bedrock/src/cannon/MIPS64.sol index bb2f4369c2a..91269251c36 100644 --- a/packages/contracts-bedrock/src/cannon/MIPS64.sol +++ b/packages/contracts-bedrock/src/cannon/MIPS64.sol @@ -67,8 +67,8 @@ contract MIPS64 is ISemver { } /// @notice The semantic version of the MIPS64 contract. - /// @custom:semver 1.0.0-beta.5 - string public constant version = "1.0.0-beta.5"; + /// @custom:semver 1.0.0-beta.6 + string public constant version = "1.0.0-beta.6"; /// @notice The preimage oracle contract. IPreimageOracle internal immutable ORACLE; @@ -504,7 +504,7 @@ contract MIPS64 is ISemver { // Encapsulate execution to avoid stack-too-deep error (v0, v1) = execSysRead(state, args); } else if (syscall_no == sys.SYS_WRITE) { - (v0, v1, state.preimageKey, state.preimageOffset) = sys.handleSysWrite({ + sys.SysWriteParams memory args = sys.SysWriteParams({ _a0: a0, _a1: a1, _a2: a2, @@ -513,6 +513,7 @@ contract MIPS64 is ISemver { _proofOffset: MIPS64Memory.memoryProofOffset(MEM_PROOF_OFFSET, 1), _memRoot: state.memRoot }); + (v0, v1, state.preimageKey, state.preimageOffset) = sys.handleSysWrite(args); } else if (syscall_no == sys.SYS_FCNTL) { (v0, v1) = sys.handleSysFcntl(a0, a1); } else if (syscall_no == sys.SYS_GETTID) { diff --git a/packages/contracts-bedrock/src/cannon/libraries/MIPS64Syscalls.sol b/packages/contracts-bedrock/src/cannon/libraries/MIPS64Syscalls.sol index d4b706da4b5..68cca83c4bf 100644 --- a/packages/contracts-bedrock/src/cannon/libraries/MIPS64Syscalls.sol +++ b/packages/contracts-bedrock/src/cannon/libraries/MIPS64Syscalls.sol @@ -30,6 +30,23 @@ library MIPS64Syscalls { bytes32 memRoot; } + /// @custom:field _a0 The file descriptor. + /// @custom:field _a1 The memory address to read from. + /// @custom:field _a2 The number of bytes to read. + /// @custom:field _preimageKey The current preimaageKey. + /// @custom:field _preimageOffset The current preimageOffset. + /// @custom:field _proofOffset The offset of the memory proof in calldata. + /// @custom:field _memRoot The current memory root. + struct SysWriteParams { + uint64 _a0; + uint64 _a1; + uint64 _a2; + bytes32 _preimageKey; + uint64 _preimageOffset; + uint256 _proofOffset; + bytes32 _memRoot; + } + uint64 internal constant U64_MASK = 0xFFffFFffFFffFFff; uint64 internal constant PAGE_ADDR_MASK = 4095; uint64 internal constant PAGE_SIZE = 4096; @@ -309,26 +326,11 @@ library MIPS64Syscalls { } /// @notice Like a Linux write syscall. Splits unaligned writes into aligned writes. - /// @param _a0 The file descriptor. - /// @param _a1 The memory address to read from. - /// @param _a2 The number of bytes to read. - /// @param _preimageKey The current preimaageKey. - /// @param _preimageOffset The current preimageOffset. - /// @param _proofOffset The offset of the memory proof in calldata. - /// @param _memRoot The current memory root. /// @return v0_ The number of bytes written, or -1 on error. /// @return v1_ The error code, or 0 if empty. /// @return newPreimageKey_ The new preimageKey. /// @return newPreimageOffset_ The new preimageOffset. - function handleSysWrite( - uint64 _a0, - uint64 _a1, - uint64 _a2, - bytes32 _preimageKey, - uint64 _preimageOffset, - uint256 _proofOffset, - bytes32 _memRoot - ) + function handleSysWrite(SysWriteParams memory _args) internal pure returns (uint64 v0_, uint64 v1_, bytes32 newPreimageKey_, uint64 newPreimageOffset_) @@ -338,20 +340,22 @@ library MIPS64Syscalls { // returns: v0_ = written, v1_ = err code v0_ = uint64(0); v1_ = uint64(0); - newPreimageKey_ = _preimageKey; - newPreimageOffset_ = _preimageOffset; + newPreimageKey_ = _args._preimageKey; + newPreimageOffset_ = _args._preimageOffset; - if (_a0 == FD_STDOUT || _a0 == FD_STDERR || _a0 == FD_HINT_WRITE) { - v0_ = _a2; // tell program we have written everything + if (_args._a0 == FD_STDOUT || _args._a0 == FD_STDERR || _args._a0 == FD_HINT_WRITE) { + v0_ = _args._a2; // tell program we have written everything } // pre-image oracle - else if (_a0 == FD_PREIMAGE_WRITE) { + else if (_args._a0 == FD_PREIMAGE_WRITE) { // mask the addr to align it to 4 bytes - uint64 mem = MIPS64Memory.readMem(_memRoot, _a1 & arch.ADDRESS_MASK, _proofOffset); - bytes32 key = _preimageKey; + uint64 mem = MIPS64Memory.readMem(_args._memRoot, _args._a1 & arch.ADDRESS_MASK, _args._proofOffset); + bytes32 key = _args._preimageKey; // Construct pre-image key from memory // We use assembly for more precise ops, and no var count limit + uint64 _a1 = _args._a1; + uint64 _a2 = _args._a2; assembly { let alignment := and(_a1, EXT_MASK) // the read might not start at an aligned address let space := sub(WORD_SIZE_BYTES, alignment) // remaining space in memory word @@ -361,11 +365,12 @@ library MIPS64Syscalls { mem := and(shr(mul(sub(space, _a2), 8), mem), mask) // align value to right, mask it key := or(key, mem) // insert into key } + _args._a2 = _a2; // Write pre-image key to oracle newPreimageKey_ = key; newPreimageOffset_ = 0; // reset offset, to read new pre-image data from the start - v0_ = _a2; + v0_ = _args._a2; } else { v0_ = U64_MASK; v1_ = EBADF; diff --git a/packages/contracts-bedrock/src/cannon/libraries/MIPSSyscalls.sol b/packages/contracts-bedrock/src/cannon/libraries/MIPSSyscalls.sol index 8e3516c0c1e..57206ea3701 100644 --- a/packages/contracts-bedrock/src/cannon/libraries/MIPSSyscalls.sol +++ b/packages/contracts-bedrock/src/cannon/libraries/MIPSSyscalls.sol @@ -29,6 +29,23 @@ library MIPSSyscalls { bytes32 memRoot; } + /// @custom:field _a0 The file descriptor. + /// @custom:field _a1 The memory address to read from. + /// @custom:field _a2 The number of bytes to read. + /// @custom:field _preimageKey The current preimaageKey. + /// @custom:field _preimageOffset The current preimageOffset. + /// @custom:field _proofOffset The offset of the memory proof in calldata. + /// @custom:field _memRoot The current memory root. + struct SysWriteParams { + uint32 _a0; + uint32 _a1; + uint32 _a2; + bytes32 _preimageKey; + uint32 _preimageOffset; + uint256 _proofOffset; + bytes32 _memRoot; + } + uint32 internal constant SYS_MMAP = 4090; uint32 internal constant SYS_BRK = 4045; uint32 internal constant SYS_CLONE = 4120; @@ -299,26 +316,11 @@ library MIPSSyscalls { } /// @notice Like a Linux write syscall. Splits unaligned writes into aligned writes. - /// @param _a0 The file descriptor. - /// @param _a1 The memory address to read from. - /// @param _a2 The number of bytes to read. - /// @param _preimageKey The current preimaageKey. - /// @param _preimageOffset The current preimageOffset. - /// @param _proofOffset The offset of the memory proof in calldata. - /// @param _memRoot The current memory root. /// @return v0_ The number of bytes written, or -1 on error. /// @return v1_ The error code, or 0 if empty. /// @return newPreimageKey_ The new preimageKey. /// @return newPreimageOffset_ The new preimageOffset. - function handleSysWrite( - uint32 _a0, - uint32 _a1, - uint32 _a2, - bytes32 _preimageKey, - uint32 _preimageOffset, - uint256 _proofOffset, - bytes32 _memRoot - ) + function handleSysWrite(SysWriteParams memory _args) internal pure returns (uint32 v0_, uint32 v1_, bytes32 newPreimageKey_, uint32 newPreimageOffset_) @@ -328,20 +330,22 @@ library MIPSSyscalls { // returns: v0_ = written, v1_ = err code v0_ = uint32(0); v1_ = uint32(0); - newPreimageKey_ = _preimageKey; - newPreimageOffset_ = _preimageOffset; + newPreimageKey_ = _args._preimageKey; + newPreimageOffset_ = _args._preimageOffset; - if (_a0 == FD_STDOUT || _a0 == FD_STDERR || _a0 == FD_HINT_WRITE) { - v0_ = _a2; // tell program we have written everything + if (_args._a0 == FD_STDOUT || _args._a0 == FD_STDERR || _args._a0 == FD_HINT_WRITE) { + v0_ = _args._a2; // tell program we have written everything } // pre-image oracle - else if (_a0 == FD_PREIMAGE_WRITE) { + else if (_args._a0 == FD_PREIMAGE_WRITE) { // mask the addr to align it to 4 bytes - uint32 mem = MIPSMemory.readMem(_memRoot, _a1 & 0xFFffFFfc, _proofOffset); - bytes32 key = _preimageKey; + uint32 mem = MIPSMemory.readMem(_args._memRoot, _args._a1 & 0xFFffFFfc, _args._proofOffset); + bytes32 key = _args._preimageKey; // Construct pre-image key from memory // We use assembly for more precise ops, and no var count limit + uint32 _a1 = _args._a1; + uint32 _a2 = _args._a2; assembly { let alignment := and(_a1, 3) // the read might not start at an aligned address let space := sub(4, alignment) // remaining space in memory word @@ -351,11 +355,12 @@ library MIPSSyscalls { mem := and(shr(mul(sub(space, _a2), 8), mem), mask) // align value to right, mask it key := or(key, mem) // insert into key } + _args._a2 = _a2; // Write pre-image key to oracle newPreimageKey_ = key; newPreimageOffset_ = 0; // reset offset, to read new pre-image data from the start - v0_ = _a2; + v0_ = _args._a2; } else { v0_ = 0xFFffFFff; v1_ = EBADF; From 4777cdf506cfd9e1c70dd6f0e00e102034e3c261 Mon Sep 17 00:00:00 2001 From: clabby Date: Mon, 2 Dec 2024 12:20:11 -0500 Subject: [PATCH 013/111] chore: Bump `kona` in `proofs-tools` image (#13170) --- docker-bake.hcl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-bake.hcl b/docker-bake.hcl index 53fd8e865be..150b7db0b08 100644 --- a/docker-bake.hcl +++ b/docker-bake.hcl @@ -207,7 +207,7 @@ target "proofs-tools" { context = "." args = { CHALLENGER_VERSION="b46bffed42db3442d7484f089278d59f51503049" - KONA_VERSION="kona-client-v0.1.0-beta.1" + KONA_VERSION="kona-client-v0.1.0-beta.3" } target="proofs-tools" platforms = split(",", PLATFORMS) From 9345537aca29c2783a1791573410588265555751 Mon Sep 17 00:00:00 2001 From: Michael Amadi Date: Mon, 2 Dec 2024 19:08:00 +0100 Subject: [PATCH 014/111] Sc/disable ci forge coverage (#13174) * disable forge coverage in ci * disable forge coverage in ci * disable forge coverage in ci --- .circleci/config.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 1467e9e24bf..a50ad6c2269 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1320,7 +1320,10 @@ workflows: test_list: git diff origin/develop...HEAD --name-only --diff-filter=AM -- './test/**/*.t.sol' | sed 's|packages/contracts-bedrock/||' test_timeout: 1h test_profile: ciheavy - - contracts-bedrock-coverage + - contracts-bedrock-coverage: + filters: + branches: + ignore: /.*/ - contracts-bedrock-checks: requires: - contracts-bedrock-build From 23a65e8199bb00313774e2d14fed4b429f627bf8 Mon Sep 17 00:00:00 2001 From: protolambda Date: Mon, 2 Dec 2024 20:03:30 +0100 Subject: [PATCH 015/111] op-chain-ops: add readme (#13166) --- op-chain-ops/README.md | 80 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 80 insertions(+) create mode 100644 op-chain-ops/README.md diff --git a/op-chain-ops/README.md b/op-chain-ops/README.md new file mode 100644 index 00000000000..98d0d974722 --- /dev/null +++ b/op-chain-ops/README.md @@ -0,0 +1,80 @@ +# `op-chain-ops` + +Issues: [monorepo](https://github.com/ethereum-optimism/optimism/issues?q=is%3Aissue%20state%3Aopen%20label%3AA-op-chain-ops) + +Pull requests: [monorepo](https://github.com/ethereum-optimism/optimism/pulls?q=is%3Aopen+is%3Apr+label%3AA-op-chain-ops) + +This is an OP Stack utils package for chain operations, +ranging from EVM tooling to chain generation. + +Packages: +- `clients`: utils for chain checker tools. +- `cmd`: upgrade validation tools, debug tools, attributes formatting tools. +- `crossdomain`: utils to interact with L1 <> L2 cross-domain messages. +- `devkeys`: generate OP-Stack development keys from a common source. +- `foundry`: utils to read foundry artifacts. +- `genesis`: OP Stack genesis-configs generation, pre OPCM. +- `interopgen`: interop test-chain genesis config generation. +- `script`: foundry-like solidity scripting environment in Go. +- `solc`: utils to read solidity compiler artifacts data. +- `srcmap`: utils for solidity source-maps loaded from foundry-artifacts. + +## Usage + +Upgrade checks and chain utilities can be found in `./cmd`: +these are not officially published in OP-Stack monorepo releases, +but can be built from source. + +Utils: +```text +cmd/ +├── check-canyon - Checks for Canyon network upgrade +├── check-delta - Checks for Delta network upgrade +├── check-deploy-config - Checks of the (legacy) Deploy Config +├── check-derivation - Check that transactions can be confirmed and safety can be consolidated +├── check-ecotone - Checks for Ecotone network upgrade +├── check-fjord - Checks for Fjord network upgrade +├── deposit-hash - Determine the L2 deposit tx hash, based on log event(s) emitted by a L1 tx. +├── ecotone-scalar - Translate between serialized and human-readable L1 fee scalars (introduced in Ecotone upgrade). +├── op-simulate - Simulate a remote transaction in a local Geth EVM for block-processing debugging. +├── protocol-version - Translate between serialized and human-readable protocol versions. +├── receipt-reference-builder - Receipt data collector for pre-Canyon deposit-nonce metadata. +└── unclaimed-credits - Utilitiy to inspect credits of resolved fault-proof games. +``` + +## Product + +### Optimization target + +Provide tools for chain-setup and inspection tools for deployment, upgrades, and testing. +This includes `op-deployer`, OP-Contracts-Manager (OPCM), upgrade-check scripts, and `op-e2e` testing. + +### Vision + +- Upgrade checking scripts should become more extensible, and maybe be bundled in a single check-script CLI tool. +- Serve chain inspection/processing building-blocks for test setups and tooling like op-deployer. +- `interopgen` is meant to be temporary, and consolidate with `op-deployer`. + This change depends largely on the future of `op-e2e`, + where system tests may be replaced in favor of tests set up by `op-e2e`. +- `script` is a Go version of `forge` script, with hooks and customization options, + for better integration into tooling such as `op-deployer`. + This package should evolve to serve testing and `op-deployer` as best as possible, + it is not a full `forge` replacement. +- `genesis` will shrink over time, as more of the genesis responsibilities are automated away into + the protocol through system-transactions, and tooling such as `op-deployer` and OPCM. + +## Design principles + +- Provide high-quality bindings to accelerate testing and tooling development. +- Minimal introspection into fragile solidity details. + +There is a trade-off here in how minimal the tooling is: +generally we aim to provide dedicated functionality in Go for better integration, +if the target tool is significant Go service of its own. +If not, then `op-chain-ops` should not be extended, and the design of the target tool should be adjusted instead. + +## Testing + +- Upgrade checks are tested against live devnet/testnet upgrades, before testing against mainnet. + Testing here is aimed to expand to end-to-end testing, for better integrated test feedback of these tools. +- Utils have unit-test coverage of their own, and are used widely in end-to-end testing itself. From ebd2cfd6c1fe47dc988a4c6a8f2ce43d5fafa3f4 Mon Sep 17 00:00:00 2001 From: protolambda Date: Mon, 2 Dec 2024 20:03:47 +0100 Subject: [PATCH 016/111] op-e2e: update readme (#13165) --- op-e2e/README.md | 122 +++++++++++++++++++++++++++++++++++++---------- 1 file changed, 97 insertions(+), 25 deletions(-) diff --git a/op-e2e/README.md b/op-e2e/README.md index a52eaab6325..ba385547461 100644 --- a/op-e2e/README.md +++ b/op-e2e/README.md @@ -1,33 +1,105 @@ -# op-e2e +# `op-e2e` -The end to end tests in this repo depend on genesis state that is -created with the `bedrock-devnet` package. To create this state, -run the following commands from the root of the repository: +Issues: [monorepo](https://github.com/ethereum-optimism/optimism/issues?q=is%3Aissue%20state%3Aopen%20label%3AA-op-e2e) -```bash -make cannon-prestate -make devnet-allocs -``` +Pull requests: [monorepo](https://github.com/ethereum-optimism/optimism/pulls?q=is%3Aopen+is%3Apr+label%3AA-op-e2e) + +Design docs: +- [test infra draft design-doc]: active discussion of end-to-end testing approach -This will leave artifacts in the `.devnet` directory that will be -read into `op-e2e` at runtime. The default deploy configuration -used for starting all `op-e2e` based tests can be found in -`packages/contracts-bedrock/deploy-config/devnetL1.json`. There -are some values that are safe to change in memory in `op-e2e` at -runtime, but others cannot be changed or else it will result in -broken tests. Any changes to `devnetL1.json` should result in -rebuilding the `.devnet` artifacts before the new values will -be present in the `op-e2e` tests. +[test infra draft design-doc](https://github.com/ethereum-optimism/design-docs/pull/165) -## Running tests -Consult the [Makefile](./Makefile) in this directory. Run, e.g.: +`op-e2e` is a collection of Go integration tests. +It is named `e2e` after end-to-end testing, +for those tests where we integration-test the full system, rather than only specific services. + + +## Quickstart ```bash -make test-http +make test-actions +make test-ws ``` -### Troubleshooting -If you encounter errors: -* ensure you have the latest version of foundry installed: `just update-foundry` -* try deleting the `packages/contracts-bedrock/forge-artifacts` directory -* try `forge clean && rm -rf lib && forge install` within the `packages/contracts-bedrock` directory +## Overview + +`op-e2e` can be categorized as following: +- `op-e2e/actions/`: imperative test style, more DSL-like, with a focus on the state-transition parts of services. + Parallel processing is actively avoided, and a mock clock is used. + - `op-e2e/actions/*`: sub-packages categorize specific domains to test. + - `op-e2e/actions/interop`: notable sub-package, where multiple L2s are attached together, + for integration-testing across multiple L2 chains. + - `op-e2e/actions/proofs`: notable sub-package, where proof-related state-transition testing is implemented, + with experimental support to cover alternative proof implementations. +- `op-e2e/system`: integration tests with a L1 miner and a L2 with sequencer, verifier, batcher and proposer. + These tests do run each service almost fully, including parallel background jobs and real system clock. + These tests focus less on the onchain state-transition aspects, and more on the offchain integration aspects. + - `op-e2e/faultproofs`: system tests with fault-proofs stack attached + - `op-e2e/interop`: system tests with a distinct Interop "SuperSystem", to run multiple L2 chains. +- `op-e2e/opgeth`: integration tests between test-mocks and op-geth execution-engine. + - also includes upgrade-tests to ensure testing of op-stack Go components around a network upgrade. + +### `action`-tests + +Action tests are set up in a compositional way: +each service is instantiated as actor, and tests can choose to run just the relevant set of actors. +E.g. a test about data-availability can instantiate the batcher, but omit the proposer. + +One action, across all services, runs at a time. +No live background processing or system clock affects the actors: +this enables individual actions to be deterministic and reproducible. + +With this synchronous processing, action-test can reliably navigate towards +these otherwise hard-to-reach edge-cases, and ensure the state-transition of service, +and the interactions between this state, are covered. + +Action-tests do not cover background processes or peripherals. +E.g. P2P, CLI usage, and dynamic block building are not covered. + +### `system`-tests + +System tests are more complete than `action` tests, but also require a live system. +This trade-off enables coverage of most of each Go service, +at the cost of making navigation to cover the known edge-cases less reliable and reproducible. +This test-type is thus used primarily for testing of the offchain service aspects. + +By running a more full system, test-runners also run into resource-limits more quickly. +This may result in lag or even stalled services. +Improvements, as described in the [test infra draft design-doc], +are in active development, to make test execution more reliable. + +### `op-e2e/opgeth` + +Integration-testing with op-geth, to cover engine behavior, without setting up a full test environment. +These tests are limited in scope, and may be changed at a later stage, to support alternative EL implementations. + +## Product + +### Optimization target + +Historically `op-e2e` has been optimized for test-coverage of the Go OP-Stack. +This is changing with the advance of alternative OP-Stack client implementations. + +New test framework improvements should optimize for multi-client testing. + +### Vision + +Generally, design-discussion and feedback from active test users converges on: +- a need to share test-resources, to host more tests while reducing overhead. +- a need for a DSL, to better express common test constructs. +- less involved test pre-requisites: the environment should be light and simple, welcoming new contributors. + E.g. no undocumented one-off makefile prerequisites. + +## Design principles + +- Interfaces first. We should not hardcode test-utilities against any specific client implementation, + this makes a test less parameterizable and less cross-client portable. +- Abstract setup to make it the default to reduce resource usage. + E.g. RPC transports can run in-process, and avoid websocket or HTTP costs, + and ideally the test-writer does not have to think about the difference. +- Avoid one-off test chain-configurations. Tests with more realistic parameters are more comparable to production, + and easier consolidated onto shared testing resources. +- Write helpers and DSL utilities, avoid re-implementing common testing steps. + The better the test environment, the more inviting it is for someone new to help improve test coverage. +- Use the right test-type. Do not spawn a full system for something of very limited scope, + e.g. when it fits better in a unit-test. From 4b755207f07c5ae6e0f917df8618dcbf5bde5c35 Mon Sep 17 00:00:00 2001 From: protolambda Date: Mon, 2 Dec 2024 20:04:08 +0100 Subject: [PATCH 017/111] op-node: update readme (#13164) --- op-node/README.md | 297 ++++++++++++++++++++++++++++++++++------------ 1 file changed, 222 insertions(+), 75 deletions(-) diff --git a/op-node/README.md b/op-node/README.md index cfc3793052f..b9b28fa05f1 100644 --- a/op-node/README.md +++ b/op-node/README.md @@ -1,105 +1,252 @@ - - -**Table of Contents** *generated with [DocToc](https://github.com/thlorenz/doctoc)* +# `op-node` -- [op-node](#op-node) - - [Compiling](#compiling) - - [Testing](#testing) - - [Running](#running) - - [L2 Genesis Generation](#l2-genesis-generation) - - [L1 Devnet Genesis Generation](#l1-devnet-genesis-generation) +Issues: +[monorepo](https://github.com/ethereum-optimism/optimism/issues?q=is%3Aissue%20state%3Aopen%20label%3AA-op-node) - +Pull requests: +[monorepo](https://github.com/ethereum-optimism/optimism/pulls?q=is%3Aopen+is%3Apr+label%3AA-op-node) -# op-node +User docs: +- [How to run a node](https://docs.optimism.io/builders/node-operators/rollup-node) -This is the reference implementation of the [rollup-node spec](https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/rollup-node.md). -It can be thought of like the consensus layer client of an OP Stack chain where it must run with an OP Stack execution layer client -like [op-geth](https://github.com/ethereum-optimism/op-geth). +Specs: +- [rollup-node spec] -## Compiling +The op-node implements the [rollup-node spec]. +It functions as a Consensus Layer client of an OP Stack chain. +This builds, relays and verifies the canonical chain of blocks. +The blocks are processed by an execution layer client, like [op-geth]. -Compile a binary: +[rollup-node spec]: https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/rollup-node.m +[op-geth]: https://github.com/ethereum-optimism/op-geth -```shell +## Quickstart + +```bash make op-node + +# Network selection: +# - Join any of the pre-configured networks with the `--network` flag. +# - Alternatively, join a custom network with the `--rollup.config` flag. +# +# Essential Connections: +# - L1 ethereum RPC, to fetch blocks, receipts, finality +# - L1 beacon API, to fetch blobs +# - L2 engine API, to apply new blocks to +# - P2P TCP port, to expose publicly, to retrieve and relay the latest L2 blocks +# - P2P UDP port, to expose publicly, to discover other nodes to peer with +# - RPC port, to serve RPC of the op-node +# +# Other: +# - Sync mode: how to interact with the execution-engine, +# such that it enters the preferred form of syncing: +# - consensus-layer (block by block sync) +# - execution-layer (e.g. snap-sync) +# +# Tip: every CLI flag has an env-var equivalent (run `op-node --help` for more information) +./bin/op-node \ + --network=op-sepolia \ + --l1=ws://localhost:8546 \ + --l1.beacon=http://localhost:4000 \ + --l2=ws://localhost:9001 \ + --p2p.listen.tcp=9222 + --p2p.listen.udp=9222 + --rpc.port=7000 \ + --syncmode=execution-layer + +# If running inside docker, ake sure to mount the below persistent data as (host) volume, +# it may be lost on restart otherwise: +# - P2P private key: auto-generated when missing, used to maintain a stable peer identity. +# - Peerstore DB: remember peer records to connect with, used to not wait for peer discovery. +# - Discovery DB: maintain DHT data, to avoid repeating some discovery work after restarting. + --p2p.priv.path=opnode_p2p_priv.txt \ + --p2p.peerstore.path=opnode_peerstore_db \ + --p2p.discovery.path=opnode_discovery_db \ + --p2p.priv.path=opnode_p2p_priv.txt ``` -## Testing +## Usage -Run op-node unit tests: +### Build from source -```shell -make test +```bash +# from op-node dir: +make op-node +./bin/op-node --help ``` -## Running +### Run from source -Configuration options can be reviewed with: - -```shell -./bin/op-node --help +```bash +# from op-node dir: +go run ./cmd --help ``` -[eth-json-rpc-spec]: https://ethereum.github.io/execution-apis/api-documentation +### Build docker image -To start syncing the rollup: +See `op-node` docker-bake target. -Connect to one L1 Execution Client that supports the [Ethereum JSON-RPC spec][eth-json-rpc-spec], -an L1 Consensus Client that supports the [Beacon Node API](https://ethereum.github.io/beacon-APIs) and -an OP Stack based Execution Client that supports the [Ethereum JSON-RPC spec][eth-json-rpc-spec]: +## Implementation overview -- L1: use any L1 client, RPC, websocket, or IPC (connection config may differ) -- L2: use any OP Stack Execution Client like [`op-geth`](https://github.com/ethereum-optimism/op-geth) +### Interactions -Note that websockets or IPC is preferred for event notifications to improve sync, http RPC works with adaptive polling. + + -```shell -./bin/op-node \ - --l1=ws://localhost:8546 \ - --l1.beacon=http://localhost:4000 \ - --l2=ws://localhost:9001 \ - --rollup.config=./path-to-network-config/rollup.json \ - --rpc.addr=127.0.0.1 \ - --rpc.port=7000 -``` +## Product -## L2 Genesis Generation +The op-node **builds**, **relays** and **verifies** the canonical chain of blocks. -The `op-node` can generate geth compatible `genesis.json` files. These files -can be used with `geth init` to initialize the `StateDB` with accounts, storage, -code and balances. The L2 state must be initialized with predeploy contracts -that exist in the state and act as system level contracts. The `op-node` can -generate a genesis file with these predeploys configured correctly given -an L1 RPC URL, a deploy config, L2 genesis allocs and a L1 deployments artifact. +The op-node does not store critical data: +the op-node can recover from any existing L2 chain pre-state +that is sufficiently synced such that available input data can complete the sync. -The deploy config contains all of the config required to deploy the -system. Examples can be found in `packages/contracts-bedrock/deploy-config`. Each -deploy config file is a JSON file. The L2 allocs can be generated using a forge script -in the `contracts-bedrock` package and the L1 deployments are a JSON file that is the -output of doing a L1 contracts deployment. +The op-node **builds** blocks: +either from scratch as a sequencer, or from block-inputs (made available through L1) as verifier. -Example usage: +The block **relay** is a happy-path: the P2P sync is optional, and does not affect the ability to verify. +However, the block relay is still important for UX, as it lowers the latency to the latest state. -```bash -$ ./bin/op-node genesis l2 \ - --l1-rpc $ETH_RPC_URL \ - --deploy-config \ - --l2-allocs \ - --l1-deployments \ - --outfile.l2 \ - --outfile.rollup -``` +The blocks are **verified**: only valid L2 blocks that can be reproduced from L1 data are accepted. -## L1 Devnet Genesis Generation +### Optimization target -It is also possible to generate a devnet L1 `genesis.json` file. The L1 allocs can -be generated with the foundry L1 contracts deployment script if the extra parameter -`--sig 'runWithStateDump()` is added to the deployment command. + + +**Safely and reliably sync the canonical chain** + +The op-node implements the three core product features as following: + +- Block **building**: extend the chain at a throughput rate and latency that is safe to relay and verify. +- Block **relaying**: while keeping throughput high and latency low, prevent single points of failure. +- Block **verification**: efficiently sync, but always fully verify, follow the canonical chain. + +Trade-offs are made here: verification safety is at odds ideal throughput, latency, efficiency. +Or in other words: safety vs. liveness. Chain parameters determine this. +The implementation offers this trade-off, siding with safety by default, +and design-choices should aim to improve the trade-off. + +### Vision + +The op-node is changing in two ways: +- [Reliability](#reliability): improve the reliability with improved processing, testing and syncing. +- [Interoperability](#interoperability): cross-chain messaging support. + +#### Reliability + +- Parallel derivation processes: [Issue 10864](https://github.com/ethereum-optimism/optimism/issues/10864) +- Event tests: [Issue 13163](https://github.com/ethereum-optimism/optimism/issues/13163) +- Improving P2P sync: [Issue 11779](https://github.com/ethereum-optimism/optimism/issues/11779) + +#### Interoperability + +The OP Stack is make chains natively interoperable: +messages between chains form safety dependencies, and verified asynchronously. +Asynchronous verification entails that the op-node reorgs away a block +if and when the block is determined to be invalid. + +The [op-supervisor] specializes in this dependency verification work. + +The op-node encapsulates all the single-chain concerns: +it prepares the local safety data-points (DA confirmation and block contents) for the op-supervisor. + +The op-supervisor then verifies the cross-chain safety, and promotes the block safety level accordingly, +which the op-node then follows. + +See [Interop specs] and [Interop design-docs] for more information about interoperability. + +[op-supervisor]: ../op-supervisor/README.md + +### User stories + + + +As *a user* I want *reliability* so that I *don't miss blocks or fall out of sync*. +As *a RaaS dev* I want *easy configuration and monitoring* so that I *can run more chains*. +As *a customizoor* I want *clear extensible APIs* so that I *can avoid forking and be a contributor*. +As *a protocol dev* I want *integration with tests* so that I *assert protocol conformance* +As *a proof dev* I want *reusable state-transition code* so that I *don't reimplement the same thing*. + +## Design principles + + +- Encapsulate the state-transition: + - Use interfaces to abstract file-IO / concurrency / etc. away from state-transition logic. + - Ensure code-sharing with action-tests and op-program. +- No critical database: + - Persisting data is ok, but it should be recoverable from external data without too much work. + - The best chain "sync" is no sync. +- Keep the tech-stack compatible with ethereum L1: + - L1 offers well-adopted and battle tested libraries and standards, e.g. LibP2P, DiscV5, JSON-RPC. + - L1 supports a tech-stack in different languages, ensuring client-diversity, important to L2 as well. + - Downstream devs of OP-Stack should be able to pull in *one* instance of a library, that serves both OP-Stack and L1. + +## Failure modes + +This is a brief overview of what might fail, and how the op-node responds. + +### L1 downtime + +When the L1 data-source is temporarily unavailable the op-node `safe`/`finalized` progression halts. +Blocks may continue to sync through the happy-path if P2P connectivity is undisrupted. + +### No batch confirmation + +As per the [rollup-node spec] the sequencing-window ensures that after a bounded period of L1 blocks +the verifier will infer blocks, to ensure liveness of blocks with deposited transactions. +The op-node will continue to process the happy-path in the mean time, +which may have to be reorged out if it does not match the blocks that is inferred after sequencing window expiry. + +### L1 reorg + +L1 reorgs are detected passively during traversal: upon traversal to block `N+1`, +if the next canonical block has a parent-hash that does not match the +current block `N` we know the remote L1 chain view has diverged. + +When this happens, the op-node assumes the local view is wrong, and resets itself to follow that of the remote node, +dropping any non-canonical blocks in the process. + +### No L1 finality + +When L1 does not finalize for an extended period of time, +the op-node is also unable to finalize the L2 chain for the same time. + +Note that the `safe` block in the execution-layer is bootstrapped from the `finalized` block: +some verification work may repeat after a restart. + +Blocks will continue to be derived from L1 batch-submissions, and optimistic processing will also continue to function. + +### P2P failure + +On P2P failure, e.g. issues with peering or failed propagation of block-data, the `unsafe` part of the chain may stall. +The `unsafe` part of the chain will no longer progress optimistically ahead of the `safe` part. + +The `safe` blocks will continue to be derived from L1 however, providing a higher-latency access to the latest chain. + +The op-node may pick back up the latest `unsafe` blocks after recovering its P2P connectivity, +and buffering `unsafe` blocks until the `safe` blocks progress meets the first known buffered `unsafe` block. + +### Restarts and resyncing + +After a restart, or detection of missing chain data, +the op-node dynamically determines what L1 data is required to continue, based on the syncing state of execution-engine. +If the sync-state is far behind, the op-node may need archived blob data to sync from the original L1 inputs. + +A faster alternative may be to bootstrap through the execution-layer sync mode, +where the execution-engine may perform an optimized long-range sync, such as snap-sync. + +## Testing + + + +- Unit tests: encapsulated functionality, fuzz tests, etc. in the op-node Go packages. +- `op-e2e` action tests: in-progress Go testing, focused on the onchain aspects, + e.g. state-transition edge-cases. This applies primarily to the derivation pipeline. +- `op-e2e` system tests: in-process Go testing, focused on the offchain aspects of the op-node, + e.g. background work, P2P integration, general service functionality. +- Local devnet tests: full end to end testing, but set up on minimal resources. +- Kurtosis tests: new automated devnet-like testing. Work in progress. +- Long-running devnet: roll-out for experimental features, to ensure sufficient stability for testnet users. +- Long-running testnet: battle-testing in public environment. +- Shadow-forks: design phase, testing experiments against shadow copies of real networks. -```bash -$ ./bin/op-node genesis l1 \ - --deploy-config $CONTRACTS_BEDROCK/deploy-config \ - --l1-deployments \ - --l1-allocs -``` From 0c13f11c676b8a947e7f6762412a7859acf72457 Mon Sep 17 00:00:00 2001 From: protolambda Date: Mon, 2 Dec 2024 20:04:24 +0100 Subject: [PATCH 018/111] op-service: add readme (#13162) --- op-service/README.md | 75 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) create mode 100644 op-service/README.md diff --git a/op-service/README.md b/op-service/README.md new file mode 100644 index 00000000000..9f8bf03c54c --- /dev/null +++ b/op-service/README.md @@ -0,0 +1,75 @@ +# `op-service` + +Issues: [monorepo](https://github.com/ethereum-optimism/optimism/issues?q=is%3Aissue%20state%3Aopen%20label%3AA-op-service) + +Pull requests: [monorepo](https://github.com/ethereum-optimism/optimism/pulls?q=is%3Aopen+is%3Apr+label%3AA-op-service) + +`op-service` is a collection of Go utilities to build OP-Stack services with. + +```text +├── cliapp - Flag and lifecycle handling for a Urfave v2 CLI app. +├── client - RPC and HTTP client utils +├── clock - Clock interface, system clock, tickers, mock/test time utils +├── crypto - Cryptography utils, complements geth crypto package +├── ctxinterrupt - Blocking/Interrupt handling +├── dial - Dialing util functions for RPC clients +├── endpoint - Abstracts away type of RPC endpoint +├── enum - Utils to create enums +├── errutil - Utils to work with customized errors +├── eth - Common Ethereum data types and OP-Stack extension types +├── flags - Utils and flag types for CLI usage +├── httputil - Utils to create enhanced HTTP Server +├── ioutil - File utils, including atomic files and compression +├── jsonutil - JSON encoding/decoding utils +├── locks - Lock utils, like read-write wrapped types +├── log - Logging CLI and middleware utils +├── metrics - Metrics types, metering abstractions, server utils +├── oppprof - P-Prof CLI types and server setup +├── predeploys - OP-Stack predeploy definitions +├── queue - Generic queue implementation +├── retry - Function retry utils +├── rpc - RPC server utils +├── safego - Utils to make Go memory more safe +├── serialize - Binary serialization abstractions +├── signer - CLI flags and bindings to work with a remote signer +├── solabi - Utils to encode/decode Solidity ABI formatted data +├── sources - RPC client bindings +├── tasks - Err-group with panic handling +├── testlog - Test logger and log-capture utils for testing +├── testutils - Simplified Ethereum types, mock RPC bindings, utils for testing. +├── tls - CLI flags and utils to work with TLS connections +├── txmgr - Transaction manager: automated nonce, fee and confirmation handling. +└── *.go - Miscellaneous utils (soon to be deprecated / moved) +``` + +## Usage + +From `op-service` dir: +```bash +# Run Go tests +make test +# Run Go fuzz tests +make fuzz +``` + +## Product + +### Optimization target + +Provide solid reusable building blocks for all OP-Stack Go services. + +### Vision + +- Remove unused utilities: `op-service` itself needs to stay maintainable. +- Make all Go services consistent: `op-service` modules can be used to simplify and improve more Go services. + +## Design principles + +- Reduce boilerplate in Go services: provide service building utils ranging from CLI to testing. +- Protect devs from sharp edges in the Go std-lib: think of providing missing composition, + proper resource-closing, well set up network-binding, safe concurrency utils. + +## Testing + +Each op-service package has its own unit-testing. +More advanced utils, such as the transaction manager, are covered in `op-e2e` as well. From e76c04d11b60e14b3888a9fb786ff48f39d6775b Mon Sep 17 00:00:00 2001 From: protolambda Date: Mon, 2 Dec 2024 20:04:44 +0100 Subject: [PATCH 019/111] op-wheel: add readme (#13161) --- op-wheel/README.md | 92 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 92 insertions(+) create mode 100644 op-wheel/README.md diff --git a/op-wheel/README.md b/op-wheel/README.md new file mode 100644 index 00000000000..ce5dfa298f1 --- /dev/null +++ b/op-wheel/README.md @@ -0,0 +1,92 @@ +# `op-wheel` + +Issues: [monorepo](https://github.com/ethereum-optimism/optimism/issues?q=is%3Aissue%20state%3Aopen%20label%3AA-op-wheel) + +Pull requests: [monorepo](https://github.com/ethereum-optimism/optimism/pulls?q=is%3Aopen+is%3Apr+label%3AA-op-wheel) + +`op-wheel` is a CLI tool to direct the engine one way or the other with DB cheats and Engine API routines. + +It was named the "wheel" because of two reasons: +- Figuratively, it allows to steer the stack, an interface for a *driver* (like the op-node sub-component) to control the execution *engine* (e.g. op-geth). +- Idiomatically, like the Unix wheel-bit and its slang origins: empower a user to execute restricted commands, or more generally just someone with great power or influence. + +## Quickstart + +### Cheat utils + +Cheating commands to modify a Geth database without corresponding in-protocol change. + +The `cheat` sub-command has sub-commands for interacting with the DB, making patches, and dumping debug data. + +Note that the validity of state-changes, as applied through patches, +does not get checked until the block is re-processed. +This can be used ot trick the node into things like hypothetical +test-states or shadow-forks without diverging the block-hashes. + +To run: +```bash +go run ./op-wheel/cmd cheat --help +``` + +### Engine utils + +Engine API commands to build/reorg/rewind/finalize/copy blocks. + +Each sub-command dials the engine API endpoint (with provided JWT secret) and then runs the action. + +To run: +```bash +go run ./op-wheel/cmd engine --help +``` + +## Usage + +### Build from source + +```bash +# from op-wheel dir: +make op-wheel +./bin/op-wheel --help +``` + +### Run from source + +```bash +# from op-wheel dir: +go run ./cmd --help +``` + +### Build docker image + +See `op-wheel` docker-bake target. + +## Product + +`op-wheel` is a tool for expert-users to perform advanced data recoveries, tests and overrides. +This tool optimizes for reusability of these expert actions, to make them less error-prone. + +This is not part of a standard release / process, as this tool is not used commonly, +and the end-user is expected to be familiar with building from source. + +Actions that are common enough to be used at least once by the average end-user should +be part of the op-node or other standard op-stack release. + +## Design principles + +Design for an expert-user: this tool aims to provide full control over critical op-stack data +such as the engine-API and database itself, without hiding important information. + +However, even as expert-user, wrong assumptions can be made. +Defaults should aim to reduce errors, and leave the stack in a safe state to recover from. + +## Failure modes + +This tool is not used in the happy-path, but can be critical during expert-recovery of advanced failure modes. +E.g. database recovery after Geth database corruption, or manual forkchoice overrides. +Most importantly, each CLI command used for recovery aims to be verbose, +and avoids leaving an inconsistent state after failed or interrupted recovery. + +## Testing + +This is a test-utility more than a production tool, and thus does currently not have test-coverage of its own. +However, when it is used as tool during (dev/test) chain or node issues, usage does inform fixes/improvements. From 35c1ffdaf562e00ac50d6e568d31d5a08258d705 Mon Sep 17 00:00:00 2001 From: protolambda Date: Mon, 2 Dec 2024 20:05:06 +0100 Subject: [PATCH 020/111] op-proposer: add readme (#13160) --- op-proposer/README.md | 153 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 153 insertions(+) create mode 100644 op-proposer/README.md diff --git a/op-proposer/README.md b/op-proposer/README.md new file mode 100644 index 00000000000..56f086061e1 --- /dev/null +++ b/op-proposer/README.md @@ -0,0 +1,153 @@ +# `op-proposer` + +Issues: [monorepo](https://github.com/ethereum-optimism/optimism/issues?q=is%3Aissue%20state%3Aopen%20label%3AA-op-proposer) + +Pull requests: [monorepo](https://github.com/ethereum-optimism/optimism/pulls?q=is%3Aopen+is%3Apr+label%3AA-op-proposer) + +User docs: +- [Proposer Configuration docs] + +[Proposer Configuration docs]: https://docs.optimism.io/builders/chain-operators/configuration/proposer + +Specs: +- [`proposals.md`](https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/proposals.md) +- [`withdrawals`](https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/withdrawals.md) +- [`fault-proof/stage-one/bridge-integration.md`](https://github.com/ethereum-optimism/specs/blob/main/specs/fault-proof/stage-one/bridge-integration.md) + +The `op-proposer` is a light-weight service to automate output-root proposal transactions on regular interval. +Each proposal transaction submits a claim of the L2 state to L1. + +Chains with a pre-Fault-Proof deployment make proposal-transactions towards a pre-fault-proofs `OptimismPortal` deployment. + +Chains with permissioned or permissionless Fault Proofs make proposal-transactions to the `DisputeGameFactory`, +which instantiates claims (each claim being a new fault-proof "game"), +which can then be resolved for the proposals to persist. + +Withdrawals are authenticated against resolved proposals, +with an inclusion-proof of a withdrawn message (as registered in the L2 withdrawal-contract storage). + +## Quickstart + +```bash +go run ./op-proposer/cmd \ + --l1-eth-rpc http://l1:8545 \ + --rollup-rpc: http://op-node:8545 \ + --game-factory-address=changeme \ + --game-type=changeme +``` + +See [Proposer Configuration docs] for customization of the transaction-managent, +and usage of a remote signer to isolate the proposer secret key. + +On test networks, `--allow-non-finalized` may be used to make proposals sooner, to reduce test time. + +## Usage + +### Build from source + +```bash +make op-proposer + +./bin/op-proposer --help +``` + +### Run from source + +```bash +# from op-proposer dir: +go run ./cmd --help +``` + +### Build docker image + +See `op-proposer` docker-bake target. + +## Overview + + + +The op-proposer relays subjective `finalized` blocks (irreversible, as locally verified) +to L1 by constructing and submitting proposals. +The proposed claims can then be resolved, and used for withdrawals on L1. + +```mermaid +sequenceDiagram +autonumber + +participant portal as OptimismPortal (v2)
contract +participant challenger as op-challenger +participant claim as Fault Dispute Game
contract +participant dgf as Dispute Game Factory
contract on L1 +participant proposer as op-proposer +participant opnode as op-node +participant el as Execution Engine
(e.g. op-geth) + +proposer ->>opnode: query output-root +opnode ->>el: query block and withdrawals-root +el -->> opnode: return block and withdrawals-root +opnode -->> proposer: return output root +proposer ->> dgf: send claim +proposer ->> proposer: repeat with next claim +dgf ->> claim: create game contract +challenger ->> claim: resolve (or counter) claim +portal -->> claim: proveWithdrawalTransaction checks game state +``` + +The `op-proposer` itself is a light-weight loop to maintain this relay: +schedule when to propose, inspect what to propose, transact on L1 to proposer, and repeat. + +## Product + +### Optimization target + +The `op-proposer` code optimizes for simplicity. + +Proposals are few and far-between, commonly only at a 1 hour interval. +Proposal execution speed affects tests more than it does production, and thus not a primary optimization target. + +Most costs are made in the proposal contract execution, +not the operation of the op-proposer, and thus not the primary optimization concern. + +Proposals are critical to safety however, and simplicity is thus important to this service. + +### Vision + +The pre-fault-proof proposal functionality is effectively unused code, and may be removed in the near future. +Solutions for alternative proving systems are a work in progress. + +With the proposed withdrawals-root feature (see [Isthmus upgrade feature]), +the op-node will soon no longer have to query the storage separately +from the block-header that it constructs an output-root for. +This lowers the requirements to run a proposer, +since no archive-node is required anymore to determine the withdrawals-root. + +[Isthmus upgrade feature]: https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/isthmus/exec-engine.md#l2tol1messagepasser-storage-root-in-header + +Testing of this service may be further improved by decoupling the scheduling and processing. +Better encapsulated processing would lend itself better to [op-e2e](../op-e2e) action-tests. + + +## Design principles + + + +- Reuse the transaction-management: this is the most complicated part of the op-proposer, but is common with other services. +- Keep the proposal flow simple: given that we only expect one transaction per hour, + but the transaction is a critical claim, we have a strong preference for safety over liveness. + +## Failure modes + + + +While disabled by default, the op-proposer is capable of submitting proposals too eagerly. +A proposal for unfinalized L2 state that does not hold true later may result in an invalid claim on L1, +and thus in dispute-game penalties. + +Assuming finality, the op-proposer is only really subject to liveness failures: +- to L1 RPC failure (mitigated with redundancy in L1 RPC) +- local temporary failure, e.g. offline execution engine (mitigated with alerts) + or odd tx-inclusion situations (mitigated with fresh state upon restart). + +## Testing + +The `op-proposer` integration is covered in system `op-e2e` tests. From 623609aef393cf11281dac4907e4b393ef28de0d Mon Sep 17 00:00:00 2001 From: smartcontracts Date: Mon, 2 Dec 2024 14:53:33 -0500 Subject: [PATCH 021/111] feat: make Cannon libraries verison agnostic (#13175) Updates the Cannon libraries to be version agnostic instead of being pinned to 0.8.15. This is OK because we do not deploy libraries outside of contracts that have pinned contract versions. --- .../contracts-bedrock/src/cannon/libraries/CannonErrors.sol | 2 +- packages/contracts-bedrock/src/cannon/libraries/CannonTypes.sol | 2 +- packages/contracts-bedrock/src/cannon/libraries/MIPS64Arch.sol | 2 +- .../src/cannon/libraries/MIPS64Instructions.sol | 2 +- .../contracts-bedrock/src/cannon/libraries/MIPS64Memory.sol | 2 +- packages/contracts-bedrock/src/cannon/libraries/MIPS64State.sol | 2 +- .../contracts-bedrock/src/cannon/libraries/MIPS64Syscalls.sol | 2 +- .../contracts-bedrock/src/cannon/libraries/MIPSInstructions.sol | 2 +- packages/contracts-bedrock/src/cannon/libraries/MIPSMemory.sol | 2 +- packages/contracts-bedrock/src/cannon/libraries/MIPSState.sol | 2 +- .../contracts-bedrock/src/cannon/libraries/MIPSSyscalls.sol | 2 +- 11 files changed, 11 insertions(+), 11 deletions(-) diff --git a/packages/contracts-bedrock/src/cannon/libraries/CannonErrors.sol b/packages/contracts-bedrock/src/cannon/libraries/CannonErrors.sol index 3649852cec6..dd0e78a3a33 100644 --- a/packages/contracts-bedrock/src/cannon/libraries/CannonErrors.sol +++ b/packages/contracts-bedrock/src/cannon/libraries/CannonErrors.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.15; +pragma solidity ^0.8.0; /// @notice Thrown when a passed part offset is out of bounds. error PartOffsetOOB(); diff --git a/packages/contracts-bedrock/src/cannon/libraries/CannonTypes.sol b/packages/contracts-bedrock/src/cannon/libraries/CannonTypes.sol index 2e7c50ed862..26d0a17edae 100644 --- a/packages/contracts-bedrock/src/cannon/libraries/CannonTypes.sol +++ b/packages/contracts-bedrock/src/cannon/libraries/CannonTypes.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.15; +pragma solidity ^0.8.0; using LPPMetadataLib for LPPMetaData global; diff --git a/packages/contracts-bedrock/src/cannon/libraries/MIPS64Arch.sol b/packages/contracts-bedrock/src/cannon/libraries/MIPS64Arch.sol index 34a8d39f5d4..a1d689e731d 100644 --- a/packages/contracts-bedrock/src/cannon/libraries/MIPS64Arch.sol +++ b/packages/contracts-bedrock/src/cannon/libraries/MIPS64Arch.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.15; +pragma solidity ^0.8.0; library MIPS64Arch { uint64 internal constant WORD_SIZE = 64; diff --git a/packages/contracts-bedrock/src/cannon/libraries/MIPS64Instructions.sol b/packages/contracts-bedrock/src/cannon/libraries/MIPS64Instructions.sol index 7494135ace1..6191cdfbe0a 100644 --- a/packages/contracts-bedrock/src/cannon/libraries/MIPS64Instructions.sol +++ b/packages/contracts-bedrock/src/cannon/libraries/MIPS64Instructions.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.15; +pragma solidity ^0.8.0; // Libraries import { MIPS64Memory } from "src/cannon/libraries/MIPS64Memory.sol"; diff --git a/packages/contracts-bedrock/src/cannon/libraries/MIPS64Memory.sol b/packages/contracts-bedrock/src/cannon/libraries/MIPS64Memory.sol index f6e0633466e..2f77fcd599c 100644 --- a/packages/contracts-bedrock/src/cannon/libraries/MIPS64Memory.sol +++ b/packages/contracts-bedrock/src/cannon/libraries/MIPS64Memory.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.15; +pragma solidity ^0.8.0; // Libraries import { InvalidMemoryProof } from "src/cannon/libraries/CannonErrors.sol"; diff --git a/packages/contracts-bedrock/src/cannon/libraries/MIPS64State.sol b/packages/contracts-bedrock/src/cannon/libraries/MIPS64State.sol index 09c347841b9..c7102dea0fd 100644 --- a/packages/contracts-bedrock/src/cannon/libraries/MIPS64State.sol +++ b/packages/contracts-bedrock/src/cannon/libraries/MIPS64State.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.15; +pragma solidity ^0.8.0; // Libraries import { InvalidExitedValue } from "src/cannon/libraries/CannonErrors.sol"; diff --git a/packages/contracts-bedrock/src/cannon/libraries/MIPS64Syscalls.sol b/packages/contracts-bedrock/src/cannon/libraries/MIPS64Syscalls.sol index 68cca83c4bf..e8556979bc7 100644 --- a/packages/contracts-bedrock/src/cannon/libraries/MIPS64Syscalls.sol +++ b/packages/contracts-bedrock/src/cannon/libraries/MIPS64Syscalls.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.15; +pragma solidity ^0.8.0; // Libraries import { MIPS64Memory } from "src/cannon/libraries/MIPS64Memory.sol"; diff --git a/packages/contracts-bedrock/src/cannon/libraries/MIPSInstructions.sol b/packages/contracts-bedrock/src/cannon/libraries/MIPSInstructions.sol index f2af43da0fe..70ed5e20c3b 100644 --- a/packages/contracts-bedrock/src/cannon/libraries/MIPSInstructions.sol +++ b/packages/contracts-bedrock/src/cannon/libraries/MIPSInstructions.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.15; +pragma solidity ^0.8.0; // Libraries import { MIPSMemory } from "src/cannon/libraries/MIPSMemory.sol"; diff --git a/packages/contracts-bedrock/src/cannon/libraries/MIPSMemory.sol b/packages/contracts-bedrock/src/cannon/libraries/MIPSMemory.sol index b78cc04a895..1d3942f7050 100644 --- a/packages/contracts-bedrock/src/cannon/libraries/MIPSMemory.sol +++ b/packages/contracts-bedrock/src/cannon/libraries/MIPSMemory.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.15; +pragma solidity ^0.8.0; // Libraries import { InvalidMemoryProof } from "src/cannon/libraries/CannonErrors.sol"; diff --git a/packages/contracts-bedrock/src/cannon/libraries/MIPSState.sol b/packages/contracts-bedrock/src/cannon/libraries/MIPSState.sol index 22fb608bcf5..b2982b5b16a 100644 --- a/packages/contracts-bedrock/src/cannon/libraries/MIPSState.sol +++ b/packages/contracts-bedrock/src/cannon/libraries/MIPSState.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.15; +pragma solidity ^0.8.0; // Libraries import { InvalidExitedValue } from "src/cannon/libraries/CannonErrors.sol"; diff --git a/packages/contracts-bedrock/src/cannon/libraries/MIPSSyscalls.sol b/packages/contracts-bedrock/src/cannon/libraries/MIPSSyscalls.sol index 57206ea3701..8ba85bc7e37 100644 --- a/packages/contracts-bedrock/src/cannon/libraries/MIPSSyscalls.sol +++ b/packages/contracts-bedrock/src/cannon/libraries/MIPSSyscalls.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.15; +pragma solidity ^0.8.0; // Libraries import { MIPSMemory } from "src/cannon/libraries/MIPSMemory.sol"; From a2b1a7e111eb92891ccb94fab5806eb3735d07ef Mon Sep 17 00:00:00 2001 From: smartcontracts Date: Mon, 2 Dec 2024 14:56:27 -0500 Subject: [PATCH 022/111] feat: common core for contracts Go check scripts (#13134) Introduces a new common base framework for writing contracts check scripts in Go. Many of the check scripts basically do the exact same logic of somehow parsing either the interfaces or the artifact files. Goal of this small project is to make the process of writing new checks easier and more reliable. We demonstrate this framework in action by porting the test-names script to use this new framework. --- .circleci/config.yml | 1 + go.mod | 1 + go.sum | 2 + op-chain-ops/solc/types.go | 156 +++++++++- .../scripts/checks/common/util.go | 124 ++++++++ .../scripts/checks/common/util_test.go | 180 +++++++++++ .../scripts/checks/test-names/main.go | 202 ++++++------- .../scripts/checks/test-names/main_test.go | 280 ++++++++++++++++++ 8 files changed, 830 insertions(+), 116 deletions(-) create mode 100644 packages/contracts-bedrock/scripts/checks/common/util.go create mode 100644 packages/contracts-bedrock/scripts/checks/common/util_test.go create mode 100644 packages/contracts-bedrock/scripts/checks/test-names/main_test.go diff --git a/.circleci/config.yml b/.circleci/config.yml index a50ad6c2269..1f7b8a95e61 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1382,6 +1382,7 @@ workflows: op-e2e/interop op-e2e/actions op-e2e/faultproofs + packages/contracts-bedrock/scripts/checks requires: - contracts-bedrock-build - cannon-prestate diff --git a/go.mod b/go.mod index 7e6f8737d80..94f571c65b6 100644 --- a/go.mod +++ b/go.mod @@ -7,6 +7,7 @@ toolchain go1.22.7 require ( github.com/BurntSushi/toml v1.4.0 github.com/andybalholm/brotli v1.1.0 + github.com/bmatcuk/doublestar/v4 v4.7.1 github.com/btcsuite/btcd v0.24.2 github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 github.com/cockroachdb/pebble v1.1.2 diff --git a/go.sum b/go.sum index d18aff8414a..49bc0c62eb1 100644 --- a/go.sum +++ b/go.sum @@ -50,6 +50,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE= github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= +github.com/bmatcuk/doublestar/v4 v4.7.1 h1:fdDeAqgT47acgwd9bd9HxJRDmc9UAmPpc+2m0CXv75Q= +github.com/bmatcuk/doublestar/v4 v4.7.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= diff --git a/op-chain-ops/solc/types.go b/op-chain-ops/solc/types.go index de6edb90d23..56ea47a9ad5 100644 --- a/op-chain-ops/solc/types.go +++ b/op-chain-ops/solc/types.go @@ -1,7 +1,6 @@ package solc import ( - "encoding/json" "fmt" "github.com/ethereum/go-ethereum/accounts/abi" @@ -129,5 +128,158 @@ type Ast struct { Id uint `json:"id"` License string `json:"license"` NodeType string `json:"nodeType"` - Nodes json.RawMessage `json:"nodes"` + Nodes []AstNode `json:"nodes"` + Src string `json:"src"` +} + +type AstNode struct { + Id int `json:"id"` + NodeType string `json:"nodeType"` + Src string `json:"src"` + Nodes []AstNode `json:"nodes,omitempty"` + Abstract bool `json:"abstract,omitempty"` + BaseContracts []AstBaseContract `json:"baseContracts,omitempty"` + CanonicalName string `json:"canonicalName,omitempty"` + ContractDependencies []int `json:"contractDependencies,omitempty"` + ContractKind string `json:"contractKind,omitempty"` + Documentation interface{} `json:"documentation,omitempty"` + FullyImplemented bool `json:"fullyImplemented,omitempty"` + LinearizedBaseContracts []int `json:"linearizedBaseContracts,omitempty"` + Name string `json:"name,omitempty"` + NameLocation string `json:"nameLocation,omitempty"` + Scope int `json:"scope,omitempty"` + UsedErrors []int `json:"usedErrors,omitempty"` + UsedEvents []int `json:"usedEvents,omitempty"` + + // Function specific + Body *AstBlock `json:"body,omitempty"` + Parameters *AstParameterList `json:"parameters,omitempty"` + ReturnParameters *AstParameterList `json:"returnParameters,omitempty"` + StateMutability string `json:"stateMutability,omitempty"` + Virtual bool `json:"virtual,omitempty"` + Visibility string `json:"visibility,omitempty"` + + // Variable specific + Constant bool `json:"constant,omitempty"` + Mutability string `json:"mutability,omitempty"` + StateVariable bool `json:"stateVariable,omitempty"` + StorageLocation string `json:"storageLocation,omitempty"` + TypeDescriptions *AstTypeDescriptions `json:"typeDescriptions,omitempty"` + TypeName *AstTypeName `json:"typeName,omitempty"` + + // Expression specific + Expression *Expression `json:"expression,omitempty"` + IsConstant bool `json:"isConstant,omitempty"` + IsLValue bool `json:"isLValue,omitempty"` + IsPure bool `json:"isPure,omitempty"` + LValueRequested bool `json:"lValueRequested,omitempty"` + + // Literal specific + HexValue string `json:"hexValue,omitempty"` + Kind string `json:"kind,omitempty"` + Value interface{} `json:"value,omitempty"` + + // Other fields + Arguments []Expression `json:"arguments,omitempty"` + Condition *Expression `json:"condition,omitempty"` + TrueBody *AstBlock `json:"trueBody,omitempty"` + FalseBody *AstBlock `json:"falseBody,omitempty"` + Operator string `json:"operator,omitempty"` +} + +type AstBaseContract struct { + BaseName *AstTypeName `json:"baseName"` + Id int `json:"id"` + NodeType string `json:"nodeType"` + Src string `json:"src"` +} + +type AstDocumentation struct { + Id int `json:"id"` + NodeType string `json:"nodeType"` + Src string `json:"src"` + Text string `json:"text"` +} + +type AstBlock struct { + Id int `json:"id"` + NodeType string `json:"nodeType"` + Src string `json:"src"` + Statements []AstNode `json:"statements"` +} + +type AstParameterList struct { + Id int `json:"id"` + NodeType string `json:"nodeType"` + Parameters []AstNode `json:"parameters"` + Src string `json:"src"` +} + +type AstTypeDescriptions struct { + TypeIdentifier string `json:"typeIdentifier"` + TypeString string `json:"typeString"` +} + +type AstTypeName struct { + Id int `json:"id"` + Name string `json:"name"` + NodeType string `json:"nodeType"` + Src string `json:"src"` + StateMutability string `json:"stateMutability,omitempty"` + TypeDescriptions *AstTypeDescriptions `json:"typeDescriptions,omitempty"` +} + +type Expression struct { + Id int `json:"id"` + NodeType string `json:"nodeType"` + Src string `json:"src"` + TypeDescriptions *AstTypeDescriptions `json:"typeDescriptions,omitempty"` + Name string `json:"name,omitempty"` + OverloadedDeclarations []int `json:"overloadedDeclarations,omitempty"` + ReferencedDeclaration int `json:"referencedDeclaration,omitempty"` + ArgumentTypes []AstTypeDescriptions `json:"argumentTypes,omitempty"` +} + +type ForgeArtifact struct { + Abi abi.ABI `json:"abi"` + Bytecode CompilerOutputBytecode `json:"bytecode"` + DeployedBytecode CompilerOutputBytecode `json:"deployedBytecode"` + MethodIdentifiers map[string]string `json:"methodIdentifiers"` + RawMetadata string `json:"rawMetadata"` + Metadata ForgeCompilerMetadata `json:"metadata"` + StorageLayout *StorageLayout `json:"storageLayout,omitempty"` + Ast Ast `json:"ast"` + Id int `json:"id"` +} + +type ForgeCompilerMetadata struct { + Compiler ForgeCompilerInfo `json:"compiler"` + Language string `json:"language"` + Output ForgeMetadataOutput `json:"output"` + Settings CompilerSettings `json:"settings"` + Sources map[string]ForgeSourceInfo `json:"sources"` + Version int `json:"version"` +} + +type ForgeCompilerInfo struct { + Version string `json:"version"` +} + +type ForgeMetadataOutput struct { + Abi abi.ABI `json:"abi"` + DevDoc ForgeDocObject `json:"devdoc"` + UserDoc ForgeDocObject `json:"userdoc"` +} + +type ForgeSourceInfo struct { + Keccak256 string `json:"keccak256"` + License string `json:"license"` + Urls []string `json:"urls"` +} + +type ForgeDocObject struct { + Kind string `json:"kind"` + Methods map[string]interface{} `json:"methods"` + Notice string `json:"notice,omitempty"` + Version int `json:"version"` } diff --git a/packages/contracts-bedrock/scripts/checks/common/util.go b/packages/contracts-bedrock/scripts/checks/common/util.go new file mode 100644 index 00000000000..131dff3b898 --- /dev/null +++ b/packages/contracts-bedrock/scripts/checks/common/util.go @@ -0,0 +1,124 @@ +package common + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "runtime" + "sync" + "sync/atomic" + + "github.com/bmatcuk/doublestar/v4" + "github.com/ethereum-optimism/optimism/op-chain-ops/solc" + "golang.org/x/sync/errgroup" +) + +type ErrorReporter struct { + hasErr atomic.Bool + outMtx sync.Mutex +} + +func NewErrorReporter() *ErrorReporter { + return &ErrorReporter{} +} + +func (e *ErrorReporter) Fail(msg string, args ...any) { + e.outMtx.Lock() + // Useful for suppressing error reporting in tests + if os.Getenv("SUPPRESS_ERROR_REPORTER") == "" { + _, _ = fmt.Fprintf(os.Stderr, "❌ "+msg+"\n", args...) + } + e.outMtx.Unlock() + e.hasErr.Store(true) +} + +func (e *ErrorReporter) HasError() bool { + return e.hasErr.Load() +} + +type FileProcessor func(path string) []error + +func ProcessFiles(files map[string]string, processor FileProcessor) error { + g := errgroup.Group{} + g.SetLimit(runtime.NumCPU()) + + reporter := NewErrorReporter() + for name, path := range files { + name, path := name, path // Capture loop variables + g.Go(func() error { + if errs := processor(path); len(errs) > 0 { + for _, err := range errs { + reporter.Fail("%s: %v", name, err) + } + } + return nil + }) + } + + err := g.Wait() + if err != nil { + return fmt.Errorf("processing failed: %w", err) + } + if reporter.HasError() { + return fmt.Errorf("processing failed") + } + return nil +} + +func ProcessFilesGlob(includes, excludes []string, processor FileProcessor) error { + files, err := FindFiles(includes, excludes) + if err != nil { + return err + } + return ProcessFiles(files, processor) +} + +func FindFiles(includes, excludes []string) (map[string]string, error) { + included := make(map[string]string) + excluded := make(map[string]struct{}) + + // Get all included files + for _, pattern := range includes { + matches, err := doublestar.Glob(os.DirFS("."), pattern) + if err != nil { + return nil, fmt.Errorf("glob pattern error: %w", err) + } + for _, match := range matches { + name := filepath.Base(match) + included[name] = match + } + } + + // Get all excluded files + for _, pattern := range excludes { + matches, err := doublestar.Glob(os.DirFS("."), pattern) + if err != nil { + return nil, fmt.Errorf("glob pattern error: %w", err) + } + for _, match := range matches { + excluded[filepath.Base(match)] = struct{}{} + } + } + + // Remove excluded files from result + for name := range excluded { + delete(included, name) + } + + return included, nil +} + +func ReadForgeArtifact(path string) (*solc.ForgeArtifact, error) { + data, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("failed to read artifact: %w", err) + } + + var artifact solc.ForgeArtifact + if err := json.Unmarshal(data, &artifact); err != nil { + return nil, fmt.Errorf("failed to parse artifact: %w", err) + } + + return &artifact, nil +} diff --git a/packages/contracts-bedrock/scripts/checks/common/util_test.go b/packages/contracts-bedrock/scripts/checks/common/util_test.go new file mode 100644 index 00000000000..4defc1c7045 --- /dev/null +++ b/packages/contracts-bedrock/scripts/checks/common/util_test.go @@ -0,0 +1,180 @@ +package common + +import ( + "os" + "path/filepath" + "testing" +) + +func TestErrorReporter(t *testing.T) { + os.Setenv("SUPPRESS_ERROR_REPORTER", "1") + defer os.Unsetenv("SUPPRESS_ERROR_REPORTER") + + reporter := NewErrorReporter() + + if reporter.HasError() { + t.Error("new reporter should not have errors") + } + + reporter.Fail("test error") + + if !reporter.HasError() { + t.Error("reporter should have error after Fail") + } +} + +func TestProcessFiles(t *testing.T) { + os.Setenv("SUPPRESS_ERROR_REPORTER", "1") + defer os.Unsetenv("SUPPRESS_ERROR_REPORTER") + + files := map[string]string{ + "file1": "path1", + "file2": "path2", + } + + // Test successful processing + err := ProcessFiles(files, func(path string) []error { + return nil + }) + if err != nil { + t.Errorf("expected no error, got %v", err) + } + + // Test error handling + err = ProcessFiles(files, func(path string) []error { + var errors []error + errors = append(errors, os.ErrNotExist) + return errors + }) + if err == nil { + t.Error("expected error, got nil") + } +} + +func TestProcessFilesGlob(t *testing.T) { + os.Setenv("SUPPRESS_ERROR_REPORTER", "1") + defer os.Unsetenv("SUPPRESS_ERROR_REPORTER") + + // Create test directory structure + tmpDir := t.TempDir() + if err := os.Chdir(tmpDir); err != nil { + t.Fatal(err) + } + + // Create test files + files := map[string]string{ + "test1.txt": "content1", + "test2.txt": "content2", + "skip.txt": "content3", + } + + for name, content := range files { + if err := os.WriteFile(name, []byte(content), 0644); err != nil { + t.Fatal(err) + } + } + + // Test processing with includes and excludes + includes := []string{"*.txt"} + excludes := []string{"skip.txt"} + + processedFiles := make(map[string]bool) + err := ProcessFilesGlob(includes, excludes, func(path string) []error { + processedFiles[filepath.Base(path)] = true + return nil + }) + + if err != nil { + t.Errorf("ProcessFiles failed: %v", err) + } + + // Verify results + if len(processedFiles) != 2 { + t.Errorf("expected 2 processed files, got %d", len(processedFiles)) + } + if !processedFiles["test1.txt"] { + t.Error("expected to process test1.txt") + } + if !processedFiles["test2.txt"] { + t.Error("expected to process test2.txt") + } + if processedFiles["skip.txt"] { + t.Error("skip.txt should have been excluded") + } +} + +func TestFindFiles(t *testing.T) { + // Create test directory structure + tmpDir := t.TempDir() + if err := os.Chdir(tmpDir); err != nil { + t.Fatal(err) + } + + // Create test files + files := map[string]string{ + "test1.txt": "content1", + "test2.txt": "content2", + "skip.txt": "content3", + } + + for name, content := range files { + if err := os.WriteFile(name, []byte(content), 0644); err != nil { + t.Fatal(err) + } + } + + // Test finding files + includes := []string{"*.txt"} + excludes := []string{"skip.txt"} + + found, err := FindFiles(includes, excludes) + if err != nil { + t.Fatalf("FindFiles failed: %v", err) + } + + // Verify results + if len(found) != 2 { + t.Errorf("expected 2 files, got %d", len(found)) + } + if _, exists := found["test1.txt"]; !exists { + t.Error("expected to find test1.txt") + } + if _, exists := found["test2.txt"]; !exists { + t.Error("expected to find test2.txt") + } + if _, exists := found["skip.txt"]; exists { + t.Error("skip.txt should have been excluded") + } +} + +func TestReadForgeArtifact(t *testing.T) { + // Create a temporary test artifact + tmpDir := t.TempDir() + artifactContent := `{ + "abi": [], + "bytecode": { + "object": "0x123" + }, + "deployedBytecode": { + "object": "0x456" + } + }` + tmpFile := filepath.Join(tmpDir, "Test.json") + if err := os.WriteFile(tmpFile, []byte(artifactContent), 0644); err != nil { + t.Fatal(err) + } + + // Test processing + artifact, err := ReadForgeArtifact(tmpFile) + if err != nil { + t.Fatalf("ReadForgeArtifact failed: %v", err) + } + + // Verify results + if artifact.Bytecode.Object != "0x123" { + t.Errorf("expected bytecode '0x123', got %q", artifact.Bytecode.Object) + } + if artifact.DeployedBytecode.Object != "0x456" { + t.Errorf("expected deployed bytecode '0x456', got %q", artifact.DeployedBytecode.Object) + } +} diff --git a/packages/contracts-bedrock/scripts/checks/test-names/main.go b/packages/contracts-bedrock/scripts/checks/test-names/main.go index 86550f211cb..84ead0d6fa0 100644 --- a/packages/contracts-bedrock/scripts/checks/test-names/main.go +++ b/packages/contracts-bedrock/scripts/checks/test-names/main.go @@ -1,27 +1,87 @@ package main import ( - "encoding/json" "fmt" "os" - "os/exec" - "path/filepath" "strconv" "strings" "unicode" + + "github.com/ethereum-optimism/optimism/op-chain-ops/solc" + "github.com/ethereum-optimism/optimism/packages/contracts-bedrock/scripts/checks/common" ) -type Check func(parts []string) bool +func main() { + if err := common.ProcessFilesGlob( + []string{"forge-artifacts/**/*.json"}, + []string{}, + processFile, + ); err != nil { + fmt.Printf("error: %v\n", err) + os.Exit(1) + } +} + +func processFile(path string) []error { + artifact, err := common.ReadForgeArtifact(path) + if err != nil { + return []error{err} + } + + var errors []error + names := extractTestNames(artifact) + for _, name := range names { + if err = checkTestName(name); err != nil { + errors = append(errors, err) + } + } + + return errors +} + +func extractTestNames(artifact *solc.ForgeArtifact) []string { + isTest := false + for _, entry := range artifact.Abi.Methods { + if entry.Name == "IS_TEST" { + isTest = true + break + } + } + if !isTest { + return nil + } + + names := []string{} + for _, entry := range artifact.Abi.Methods { + if !strings.HasPrefix(entry.Name, "test") { + continue + } + names = append(names, entry.Name) + } + + return names +} + +type CheckFunc func(parts []string) bool type CheckInfo struct { - check Check error string + check CheckFunc } -var excludes = map[string]bool{} - -var checks = []CheckInfo{ - { +var checks = map[string]CheckInfo{ + "doubleUnderscores": { + error: "test names cannot have double underscores", + check: func(parts []string) bool { + for _, part := range parts { + if len(strings.TrimSpace(part)) == 0 { + return false + } + } + return true + }, + }, + "camelCase": { error: "test name parts should be in camelCase", check: func(parts []string) bool { for _, part := range parts { @@ -32,21 +92,24 @@ var checks = []CheckInfo{ return true }, }, - { + "partsCount": { error: "test names should have either 3 or 4 parts, each separated by underscores", check: func(parts []string) bool { return len(parts) == 3 || len(parts) == 4 }, }, - { - error: "test names should begin with \"test\", \"testFuzz\", or \"testDiff\"", + "prefix": { + error: "test names should begin with 'test', 'testFuzz', or 'testDiff'", check: func(parts []string) bool { - return parts[0] == "test" || parts[0] == "testFuzz" || parts[0] == "testDiff" + return len(parts) > 0 && (parts[0] == "test" || parts[0] == "testFuzz" || parts[0] == "testDiff") }, }, - { - error: "test names should end with either \"succeeds\", \"reverts\", \"fails\", \"works\" or \"benchmark[_num]\"", + "suffix": { + error: "test names should end with either 'succeeds', 'reverts', 'fails', 'works', or 'benchmark[_num]'", check: func(parts []string) bool { + if len(parts) == 0 { + return false + } last := parts[len(parts)-1] if last == "succeeds" || last == "reverts" || last == "fails" || last == "works" { return true @@ -58,113 +121,24 @@ var checks = []CheckInfo{ return last == "benchmark" }, }, - { + "failureParts": { error: "failure tests should have 4 parts, third part should indicate the reason for failure", check: func(parts []string) bool { + if len(parts) == 0 { + return false + } last := parts[len(parts)-1] return len(parts) == 4 || (last != "reverts" && last != "fails") }, }, } -func main() { - cmd := exec.Command("forge", "config", "--json") - output, err := cmd.Output() - if err != nil { - fmt.Printf("Error executing forge config: %v\n", err) - os.Exit(1) - } - - var config map[string]interface{} - err = json.Unmarshal(output, &config) - if err != nil { - fmt.Printf("Error parsing forge config: %v\n", err) - os.Exit(1) - } - - outDir, ok := config["out"].(string) - if !ok { - outDir = "out" - } - - fmt.Println("Success:") - var errors []string - - err = filepath.Walk(outDir, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - - if info.IsDir() { - return nil - } - - if excludes[strings.TrimSuffix(filepath.Base(path), filepath.Ext(path))] { - return nil - } - - data, err := os.ReadFile(path) - if err != nil { - return err - } - - var artifact map[string]interface{} - err = json.Unmarshal(data, &artifact) - if err != nil { - return nil // Skip files that are not valid JSON - } - - abi, ok := artifact["abi"].([]interface{}) - if !ok { - return nil +func checkTestName(name string) error { + parts := strings.Split(name, "_") + for _, check := range checks { + if !check.check(parts) { + return fmt.Errorf("%s: %s", name, check.error) } - - isTest := false - for _, element := range abi { - if elem, ok := element.(map[string]interface{}); ok { - if elem["name"] == "IS_TEST" { - isTest = true - break - } - } - } - - if isTest { - success := true - for _, element := range abi { - if elem, ok := element.(map[string]interface{}); ok { - if elem["type"] == "function" { - name, ok := elem["name"].(string) - if !ok || !strings.HasPrefix(name, "test") { - continue - } - - parts := strings.Split(name, "_") - for _, check := range checks { - if !check.check(parts) { - errors = append(errors, fmt.Sprintf("%s#%s: %s", path, name, check.error)) - success = false - } - } - } - } - } - - if success { - fmt.Printf(" - %s\n", filepath.Base(path[:len(path)-len(filepath.Ext(path))])) - } - } - - return nil - }) - - if err != nil { - fmt.Printf("Error walking the path %q: %v\n", outDir, err) - os.Exit(1) - } - - if len(errors) > 0 { - fmt.Println(strings.Join(errors, "\n")) - os.Exit(1) } + return nil } diff --git a/packages/contracts-bedrock/scripts/checks/test-names/main_test.go b/packages/contracts-bedrock/scripts/checks/test-names/main_test.go new file mode 100644 index 00000000000..d4c554f12f1 --- /dev/null +++ b/packages/contracts-bedrock/scripts/checks/test-names/main_test.go @@ -0,0 +1,280 @@ +package main + +import ( + "reflect" + "testing" + + "github.com/ethereum-optimism/optimism/op-chain-ops/solc" + "github.com/ethereum/go-ethereum/accounts/abi" +) + +func TestCamelCaseCheck(t *testing.T) { + tests := []struct { + name string + parts []string + expected bool + }{ + {"valid single part", []string{"test"}, true}, + {"valid multiple parts", []string{"test", "something", "succeeds"}, true}, + {"invalid uppercase", []string{"Test"}, false}, + {"invalid middle uppercase", []string{"test", "Something", "succeeds"}, false}, + {"empty parts", []string{}, true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := checks["camelCase"].check(tt.parts); got != tt.expected { + t.Errorf("checkCamelCase error for %v = %v, want %v", tt.parts, got, tt.expected) + } + }) + } +} + +func TestPartsCountCheck(t *testing.T) { + tests := []struct { + name string + parts []string + expected bool + }{ + {"three parts", []string{"test", "something", "succeeds"}, true}, + {"four parts", []string{"test", "something", "reason", "fails"}, true}, + {"too few parts", []string{"test", "fails"}, false}, + {"too many parts", []string{"test", "a", "b", "c", "fails"}, false}, + {"empty parts", []string{}, false}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := checks["partsCount"].check(tt.parts); got != tt.expected { + t.Errorf("checkPartsCount error for %v = %v, want %v", tt.parts, got, tt.expected) + } + }) + } +} + +func TestPrefixCheck(t *testing.T) { + tests := []struct { + name string + parts []string + expected bool + }{ + {"valid test", []string{"test", "something", "succeeds"}, true}, + {"valid testFuzz", []string{"testFuzz", "something", "succeeds"}, true}, + {"valid testDiff", []string{"testDiff", "something", "succeeds"}, true}, + {"invalid prefix", []string{"testing", "something", "succeeds"}, false}, + {"empty parts", []string{}, false}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := checks["prefix"].check(tt.parts); got != tt.expected { + t.Errorf("checkPrefix error for %v = %v, want %v", tt.parts, got, tt.expected) + } + }) + } +} + +func TestSuffixCheck(t *testing.T) { + tests := []struct { + name string + parts []string + expected bool + }{ + {"valid succeeds", []string{"test", "something", "succeeds"}, true}, + {"valid reverts", []string{"test", "something", "reverts"}, true}, + {"valid fails", []string{"test", "something", "fails"}, true}, + {"valid works", []string{"test", "something", "works"}, true}, + {"valid benchmark", []string{"test", "something", "benchmark"}, true}, + {"valid benchmark_num", []string{"test", "something", "benchmark", "123"}, true}, + {"invalid suffix", []string{"test", "something", "invalid"}, false}, + {"invalid benchmark_text", []string{"test", "something", "benchmark", "abc"}, false}, + {"empty parts", []string{}, false}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := checks["suffix"].check(tt.parts); got != tt.expected { + t.Errorf("checkSuffix error for %v = %v, want %v", tt.parts, got, tt.expected) + } + }) + } +} + +func TestFailurePartsCheck(t *testing.T) { + tests := []struct { + name string + parts []string + expected bool + }{ + {"valid failure with reason", []string{"test", "something", "reason", "fails"}, true}, + {"valid failure with reason", []string{"test", "something", "reason", "reverts"}, true}, + {"invalid failure without reason", []string{"test", "something", "fails"}, false}, + {"invalid failure without reason", []string{"test", "something", "reverts"}, false}, + {"valid non-failure with three parts", []string{"test", "something", "succeeds"}, true}, + {"empty parts", []string{}, false}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := checks["failureParts"].check(tt.parts); got != tt.expected { + t.Errorf("checkFailureParts error for %v = %v, want %v", tt.parts, got, tt.expected) + } + }) + } +} + +func TestCheckTestName(t *testing.T) { + tests := []struct { + name string + testName string + shouldSucceed bool + }{ + // Valid test names - Basic patterns + {"valid basic test succeeds", "test_something_succeeds", true}, + {"valid basic test fails with reason", "test_something_reason_fails", true}, + {"valid basic test reverts with reason", "test_something_reason_reverts", true}, + {"valid basic test works", "test_something_works", true}, + + // Valid test names - Fuzz variants + {"valid fuzz test succeeds", "testFuzz_something_succeeds", true}, + {"valid fuzz test fails with reason", "testFuzz_something_reason_fails", true}, + {"valid fuzz test reverts with reason", "testFuzz_something_reason_reverts", true}, + {"valid fuzz test works", "testFuzz_something_works", true}, + + // Valid test names - Diff variants + {"valid diff test succeeds", "testDiff_something_succeeds", true}, + {"valid diff test fails with reason", "testDiff_something_reason_fails", true}, + {"valid diff test reverts with reason", "testDiff_something_reason_reverts", true}, + {"valid diff test works", "testDiff_something_works", true}, + + // Valid test names - Benchmark variants + {"valid benchmark test", "test_something_benchmark", true}, + {"valid benchmark with number", "test_something_benchmark_123", true}, + {"valid benchmark with large number", "test_something_benchmark_999999", true}, + {"valid benchmark with zero", "test_something_benchmark_0", true}, + + // Valid test names - Complex middle parts + {"valid complex middle part", "test_complexOperation_succeeds", true}, + {"valid multiple word middle", "test_veryComplexOperation_succeeds", true}, + {"valid numbers in middle", "test_operation123_succeeds", true}, + {"valid special case", "test_specialCase_reason_fails", true}, + + // Invalid test names - Prefix issues + {"invalid empty string", "", false}, + {"invalid prefix Test", "Test_something_succeeds", false}, + {"invalid prefix testing", "testing_something_succeeds", false}, + {"invalid prefix testfuzz", "testfuzz_something_succeeds", false}, + {"invalid prefix testdiff", "testdiff_something_succeeds", false}, + {"invalid prefix TEST", "TEST_something_succeeds", false}, + + // Invalid test names - Suffix issues + {"invalid suffix succeed", "test_something_succeed", false}, + {"invalid suffix revert", "test_something_revert", false}, + {"invalid suffix fail", "test_something_fail", false}, + {"invalid suffix work", "test_something_work", false}, + {"invalid suffix benchmarks", "test_something_benchmarks", false}, + {"invalid benchmark suffix text", "test_something_benchmark_abc", false}, + {"invalid benchmark suffix special", "test_something_benchmark_123abc", false}, + + // Invalid test names - Case issues + {"invalid uppercase middle", "test_Something_succeeds", false}, + {"invalid multiple uppercase", "test_SomethingHere_succeeds", false}, + {"invalid all caps middle", "test_SOMETHING_succeeds", false}, + {"invalid mixed case suffix", "test_something_Succeeds", false}, + + // Invalid test names - Structure issues + {"invalid single part", "test", false}, + {"invalid two parts", "test_succeeds", false}, + {"invalid five parts", "test_this_that_those_succeeds", false}, + {"invalid six parts", "test_this_that_those_these_succeeds", false}, + {"invalid failure without reason", "test_something_fails", false}, + {"invalid revert without reason", "test_something_reverts", false}, + + // Invalid test names - Special cases + {"invalid empty parts", "test__succeeds", false}, + {"invalid multiple underscores", "test___succeeds", false}, + {"invalid trailing underscore", "test_something_succeeds_", false}, + {"invalid leading underscore", "_test_something_succeeds", false}, + {"invalid benchmark no number", "test_something_benchmark_", false}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := checkTestName(tt.testName) + if (err != nil) == tt.shouldSucceed { + t.Errorf("checkTestName(%q) error = %v, shouldSucceed %v", tt.testName, err, tt.shouldSucceed) + } + }) + } +} + +func TestExtractTestNames(t *testing.T) { + tests := []struct { + name string + artifact *solc.ForgeArtifact + want []string + }{ + { + name: "valid test contract", + artifact: &solc.ForgeArtifact{ + Abi: abi.ABI{ + Methods: map[string]abi.Method{ + "IS_TEST": {Name: "IS_TEST"}, + "test_something_succeeds": {Name: "test_something_succeeds"}, + "test_other_fails": {Name: "test_other_fails"}, + "not_a_test": {Name: "not_a_test"}, + "testFuzz_something_works": {Name: "testFuzz_something_works"}, + }, + }, + }, + want: []string{ + "test_something_succeeds", + "test_other_fails", + "testFuzz_something_works", + }, + }, + { + name: "non-test contract", + artifact: &solc.ForgeArtifact{ + Abi: abi.ABI{ + Methods: map[string]abi.Method{ + "test_something_succeeds": {Name: "test_something_succeeds"}, + "not_a_test": {Name: "not_a_test"}, + }, + }, + }, + want: nil, + }, + { + name: "empty contract", + artifact: &solc.ForgeArtifact{ + Abi: abi.ABI{ + Methods: map[string]abi.Method{}, + }, + }, + want: nil, + }, + { + name: "test contract with no test methods", + artifact: &solc.ForgeArtifact{ + Abi: abi.ABI{ + Methods: map[string]abi.Method{ + "IS_TEST": {Name: "IS_TEST"}, + "not_a_test": {Name: "not_a_test"}, + "another_method": {Name: "another_method"}, + }, + }, + }, + want: []string{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := extractTestNames(tt.artifact) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("extractTestNames() = %v, want %v", got, tt.want) + } + }) + } +} From 3f43f039a9e68b777045d7e2446947acbd9b0592 Mon Sep 17 00:00:00 2001 From: smartcontracts Date: Mon, 2 Dec 2024 15:24:09 -0500 Subject: [PATCH 023/111] feat: introduce mise (#13156) Introduces mise for polyglot tool dependency management. --- CONTRIBUTING.md | 73 ++++----- justfile | 88 ---------- mise.toml | 52 ++++++ ops/docker/ci-builder/Dockerfile | 152 ++++++------------ ops/docker/ci-builder/Dockerfile.dockerignore | 3 +- ops/scripts/check-foundry.sh | 42 ----- ops/scripts/geth-version-checker.sh | 28 ---- ops/scripts/install-foundry.sh | 63 -------- ops/scripts/install-kontrol.sh | 17 -- packages/contracts-bedrock/README.md | 2 +- packages/contracts-bedrock/justfile | 14 +- .../contracts-bedrock/meta/CONTRIBUTING.md | 106 ------------ .../contracts-bedrock/test/kontrol/README.md | 7 +- 13 files changed, 135 insertions(+), 512 deletions(-) create mode 100644 mise.toml delete mode 100755 ops/scripts/check-foundry.sh delete mode 100755 ops/scripts/geth-version-checker.sh delete mode 100755 ops/scripts/install-foundry.sh delete mode 100755 ops/scripts/install-kontrol.sh delete mode 100644 packages/contracts-bedrock/meta/CONTRIBUTING.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c4e5686db99..2fcb5480c1a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -20,63 +20,54 @@ Interactions within this repository are subject to a [Code of Conduct](https://g ## Development Quick Start -### Software Dependencies - -| Dependency | Version | Version Check Command | -| ------------------------------------------------------------- | -------- | ------------------------ | -| [git](https://git-scm.com/) | `^2` | `git --version` | -| [go](https://go.dev/) | `^1.21` | `go version` | -| [node](https://nodejs.org/en/) | `^20` | `node --version` | -| [nvm](https://github.com/nvm-sh/nvm) | `^0.39` | `nvm --version` | -| [just](https://github.com/casey/just) | `^1.34.0`| `just --version` | -| [foundry](https://github.com/foundry-rs/foundry#installation) | `^0.2.0` | `forge --version` | -| [make](https://linux.die.net/man/1/make) | `^3` | `make --version` | -| [jq](https://github.com/jqlang/jq) | `^1.6` | `jq --version` | -| [direnv](https://direnv.net) | `^2` | `direnv --version` | -| [docker](https://docs.docker.com/get-docker/) | `^24` | `docker --version` | -| [docker compose](https://docs.docker.com/compose/install/) | `^2.23` | `docker compose version` | +### Setting Up -### Notes on Specific Dependencies +Clone the repository and open it: -#### `node` +```bash +git clone git@github.com:ethereum-optimism/optimism.git +cd optimism +``` -Make sure to use the version of `node` specified within [`.nvmrc`](./.nvmrc). -You can use [`nvm`](https://github.com/nvm-sh/nvm) to manage multiple versions of Node.js on your machine and automatically switch to the correct version when you enter this repository. +### Software Dependencies -#### `foundry` +You will need to install a number of software dependencies to effectively contribute to the +Optimism Monorepo. We use [`mise`](https://mise.jdx.dev/) as a dependency manager for these tools. +Once properly installed, `mise` will provide the correct versions for each tool. `mise` does not +replace any other installations of these binaries and will only serve these binaries when you are +working inside of the `optimism` directory. -`foundry` is updated frequently and occasionally contains breaking changes. -This repository pins a specific version of `foundry` inside of [`versions.json`](./versions.json). -Use the command `just update-foundry` at the root of the monorepo to make sure that your version of `foundry` is the same as the one currently being used in CI. +#### Install `mise` -#### `direnv` +Install `mise` by following the instructions provided on the +[Getting Started page](https://mise.jdx.dev/getting-started.html#_1-install-mise-cli). -[`direnv`](https://direnv.net) is a tool used to load environment variables from [`.envrc`](./.envrc) into your shell so you don't have to manually export variables every time you want to use them. -`direnv` only has access to files that you explicitly allow it to see. -After [installing `direnv`](https://direnv.net/docs/installation.html), you will need to **make sure that [`direnv` is hooked into your shell](https://direnv.net/docs/hook.html)**. -Make sure you've followed [the guide on the `direnv` website](https://direnv.net/docs/hook.html), then **close your terminal and reopen it** so that the changes take effect (or `source` your config file if you know how to do that). +#### Trust the `mise.toml` file -#### `docker compose` +`mise` requires that you explicitly trust the `mise.toml` file which lists the dependencies that +this repository uses. After you've installed `mise` you'll be able to trust the file via: -[Docker Desktop](https://docs.docker.com/get-docker/) should come with `docker compose` installed by default. -You'll have to install the `compose` plugin if you're not using Docker Desktop or you're on linux. +```bash +mise trust mise.toml +``` -### Setting Up +#### Install dependencies -Clone the repository and open it: +Use `mise` to install the correct versions for all of the required tools: ```bash -git clone git@github.com:ethereum-optimism/optimism.git -cd optimism +mise install ``` -### Building the Monorepo +#### Installing updates -Make sure that you've installed all of the required [Software Dependencies](#software-dependencies) before you continue. -You will need [foundry](https://github.com/foundry-rs/foundry) to build the smart contracts found within this repository. -Refer to the note on [foundry as a dependency](#foundry) for instructions. +`mise` will notify you if any dependencies are outdated. Simply run `mise install` again to install +the latest versions of the dependencies if you receive these notifications. + +### Building the Monorepo -Install dependencies and build all packages within the monorepo by running: +You must install all of the required [Software Dependencies](#software-dependencies) to build the +Optimism Monorepo. Once you've done so, run the following command to build: ```bash make build @@ -88,7 +79,7 @@ Use the above command to rebuild the monorepo. ### Running tests -Before running tests: **follow the above instructions to get everything built.** +Before running tests: **follow the above instructions to get everything built**. #### Running unit tests (solidity) diff --git a/justfile b/justfile index acf6d8bc602..35900f2a50b 100644 --- a/justfile +++ b/justfile @@ -13,91 +13,3 @@ semgrep-test: # Runs shellcheck. shellcheck: find . -type f -name '*.sh' -not -path '*/node_modules/*' -not -path './packages/contracts-bedrock/lib/*' -not -path './packages/contracts-bedrock/kout*/*' -exec sh -c 'echo "Checking $1"; shellcheck "$1"' _ {} \; - -######################################################## -# DEPENDENCY MANAGEMENT # -######################################################## - -# Generic task for checking if a tool version is up to date. -check-tool-version tool: - #!/usr/bin/env bash - EXPECTED=$(jq -r .{{tool}} < versions.json) - ACTUAL=$(just print-{{tool}}) - if [ "$ACTUAL" = "$EXPECTED" ]; then - echo "✓ {{tool}} versions match" - else - echo "✗ {{tool}} version mismatch (expected $EXPECTED, got $ACTUAL), run 'just install-{{tool}}' to upgrade" - exit 1 - fi - -# Installs foundry -install-foundry: - bash ./ops/scripts/install-foundry.sh - -# Prints current foundry version. -print-foundry: - forge --version - -# Checks if installed foundry version is correct. -check-foundry: - bash ./ops/scripts/check-foundry.sh - -# Installs correct kontrol version. -install-kontrol: - bash ./ops/scripts/install-kontrol.sh - -# Prints current kontrol version. -print-kontrol: - kontrol version - -# Checks if installed kontrol version is correct. -check-kontrol: - just check-tool-version kontrol - -# Installs correct abigen version. -install-abigen: - go install github.com/ethereum/go-ethereum/cmd/abigen@$(jq -r .abigen < versions.json) - -# Prints current abigen version. -print-abigen: - abigen --version | sed -e 's/[^0-9]/ /g' -e 's/^ *//g' -e 's/ *$//g' -e 's/ /./g' -e 's/^/v/' - -# Checks if installed abigen version is correct. -check-abigen: - just check-tool-version abigen - -# Installs correct slither version. -install-slither: - pip3 install slither-analyzer==$(jq -r .slither < versions.json) - -# Prints current slither version. -print-slither: - slither --version - -# Checks if installed slither version is correct. -check-slither: - just check-tool-version slither - -# Installs correct semgrep version. -install-semgrep: - pip3 install semgrep=="$(jq -r .semgrep < versions.json)" - -# Prints current semgrep version. -print-semgrep: - semgrep --version | head -n 1 - -# Checks if installed semgrep version is correct. -check-semgrep: - just check-tool-version semgrep - -# Installs correct go version. -install-go: - echo "error: go must be installed manually" && exit 1 - -# Prints current go version. -print-go: - go version | sed -E 's/.*go([0-9]+\.[0-9]+\.[0-9]+).*/\1/' - -# Checks if installed go version is correct. -check-go: - just check-tool-version go diff --git a/mise.toml b/mise.toml new file mode 100644 index 00000000000..a2cf15470a5 --- /dev/null +++ b/mise.toml @@ -0,0 +1,52 @@ +[tools] + +# Core dependencies +go = "1.22.7" +rust = "1.83.0" +python = "3.12.0" +uv = "0.5.5" +jq = "1.7.1" +shellcheck = "0.10.0" +direnv = "2.35.0" + +# Cargo dependencies +"cargo:just" = "1.37.0" +"cargo:svm-rs" = "0.5.8" + +# Go dependencies +"go:github.com/ethereum/go-ethereum/cmd/abigen" = "1.10.25" +"go:github.com/ethereum/go-ethereum/cmd/geth" = "1.14.7" +"go:github.com/protolambda/eth2-testnet-genesis" = "0.10.0" +"go:gotest.tools/gotestsum" = "1.12.0" +"go:github.com/vektra/mockery/v2" = "2.46.0" +"go:github.com/golangci/golangci-lint/cmd/golangci-lint" = "1.61.0" +"go:github.com/mikefarah/yq/v4" = "4.44.3" + +# Python dependencies +"pipx:slither-analyzer" = "0.10.2" +"pipx:semgrep" = "1.90.0" + +# Foundry dependencies +# Foundry is a special case because it supplies multiple binaries at the same +# GitHub release, so we need to use the aliasing trick to get mise to not error +forge = "nightly-143abd6a768eeb52a5785240b763d72a56987b4a" +cast = "nightly-143abd6a768eeb52a5785240b763d72a56987b4a" +anvil = "nightly-143abd6a768eeb52a5785240b763d72a56987b4a" + +# Fake dependencies +# Put things here if you need to track versions of tools or projects that can't +# actually be managed by mise (yet). Make sure that anything you put in here is +# also found inside of disabled_tools or mise will try to install it. +asterisc = "1.1.1" +kontrol = "1.0.53" +binary_signer = "1.0.4" + +[alias] +forge = "ubi:foundry-rs/foundry[exe=forge]" +cast = "ubi:foundry-rs/foundry[exe=cast]" +anvil = "ubi:foundry-rs/foundry[exe=anvil]" + +[settings] +experimental = true +pipx.uvx = true +disable_tools = ["asterisc", "kontrol", "binary_signer"] diff --git a/ops/docker/ci-builder/Dockerfile b/ops/docker/ci-builder/Dockerfile index a1eb71b4795..cda9a0e71a9 100644 --- a/ops/docker/ci-builder/Dockerfile +++ b/ops/docker/ci-builder/Dockerfile @@ -1,120 +1,64 @@ -# Copy docker buildx in order to generate the absolute prestate -# in the CI pipeline for reproducible fault proof builds -FROM --platform=linux/amd64 docker as buildx +############################################################################### +# BUILDX # +############################################################################### + +FROM --platform=linux/amd64 docker AS buildx COPY --from=docker/buildx-bin /buildx /usr/libexec/docker/cli-plugins/docker-buildx RUN docker buildx version -FROM --platform=linux/amd64 debian:bullseye-slim as rust-build - -SHELL ["/bin/bash", "-c"] - -ENV DEBIAN_FRONTEND=noninteractive -RUN apt-get update && \ - apt-get install -y build-essential git clang lld curl jq - -RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs > rustup.sh && \ - chmod +x ./rustup.sh && \ - sh rustup.sh -y - -# Install nightly toolchain -RUN source $HOME/.profile && rustup update nightly - -RUN source $HOME/.profile && cargo install just -RUN source $HOME/.profile && cargo install svm-rs -# Only diff from upstream docker image is this clone instead -# of COPY. We select a specific commit to use. -COPY ./versions.json ./versions.json -COPY ./ops/scripts/install-foundry.sh ./install-foundry.sh +############################################################################### +# CI BUILDER (BASE) # +############################################################################### -RUN curl -L https://foundry.paradigm.xyz | bash -RUN source $HOME/.profile && ./install-foundry.sh +FROM --platform=linux/amd64 debian:bullseye-slim AS base-builder -RUN strip /root/.foundry/bin/forge && \ - strip /root/.foundry/bin/cast && \ - strip /root/.foundry/bin/anvil && \ - strip /root/.cargo/bin/svm && \ - strip /root/.cargo/bin/just - -FROM --platform=linux/amd64 debian:bullseye-slim as go-build - -RUN apt-get update && apt-get install -y curl ca-certificates jq binutils - -ENV GO_VERSION=1.22.7 - -# Fetch go manually, rather than using a Go base image, so we can copy the installation into the final stage -RUN curl -sL https://go.dev/dl/go$GO_VERSION.linux-amd64.tar.gz -o go$GO_VERSION.linux-amd64.tar.gz && \ - tar -C /usr/local/ -xzvf go$GO_VERSION.linux-amd64.tar.gz - -ENV GOPATH=/go -ENV PATH=/usr/local/go/bin:$GOPATH/bin:$PATH - -# Install the specific version of abigen and geth from version control -COPY ./versions.json ./versions.json -RUN go install github.com/ethereum/go-ethereum/cmd/abigen@$(jq -r .abigen < versions.json) -RUN go install github.com/ethereum/go-ethereum/cmd/geth@$(jq -r .geth < versions.json) - -RUN go install gotest.tools/gotestsum@v1.12.0 -RUN go install github.com/vektra/mockery/v2@v2.46.0 -RUN go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.61.0 -RUN go install github.com/mikefarah/yq/v4@v4.44.3 +# Use bash as the shell +SHELL ["/bin/bash", "-c"] +ENV SHELL=/bin/bash +ENV BASH=/bin/bash -# Strip binaries to reduce size -RUN strip /go/bin/gotestsum && \ - strip /go/bin/mockery && \ - strip /go/bin/golangci-lint && \ - strip /go/bin/abigen && \ - strip /go/bin/geth && \ - strip /go/bin/yq +# Copy mise configuration +COPY ./mise.toml ./mise.toml -FROM --platform=linux/amd64 debian:bullseye-slim as base-builder +# Set up mise environment +ENV PATH="/root/.local/share/mise/shims:$PATH" +ENV PATH="/root/.local/bin:${PATH}" -ENV GOPATH=/go -ENV PATH=/usr/local/go/bin:$GOPATH/bin:$PATH -ENV PATH=/root/.cargo/bin:$PATH +# Install dependencies +# We do this in one mega RUN command to avoid blowing up the size of the image ENV DEBIAN_FRONTEND=noninteractive - -# copy the go installation, but not the module cache (cache will get stale, and would add a lot of weight) -COPY --from=go-build /usr/local/go /usr/local/go - -# copy tools -COPY --from=go-build /go/bin/gotestsum /go/bin/gotestsum -COPY --from=go-build /go/bin/mockery /go/bin/mockery -COPY --from=go-build /go/bin/golangci-lint /go/bin/golangci-lint -COPY --from=go-build /go/bin/abigen /usr/local/bin/abigen -COPY --from=go-build /go/bin/geth /usr/local/bin/geth -COPY --from=go-build /go/bin/yq /go/bin/yq - -# copy tools -COPY --from=rust-build /root/.foundry/bin/forge /usr/local/bin/forge -COPY --from=rust-build /root/.foundry/bin/cast /usr/local/bin/cast -COPY --from=rust-build /root/.foundry/bin/anvil /usr/local/bin/anvil -COPY --from=rust-build /root/.cargo/bin/svm /usr/local/bin/svm -COPY --from=rust-build /root/.cargo/bin/just /usr/local/bin/just - -COPY ./versions.json ./versions.json - RUN /bin/sh -c set -eux; \ apt-get update; \ - apt-get install -y --no-install-recommends bash curl openssh-client git build-essential ca-certificates jq gnupg binutils-mips-linux-gnu python3 python3-pip python3-setuptools; \ + apt-get install -y --no-install-recommends bash curl openssh-client git build-essential ca-certificates gnupg binutils-mips-linux-gnu clang libffi-dev; \ mkdir -p /etc/apt/keyrings; \ curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg; \ chmod a+r /etc/apt/keyrings/docker.gpg; \ echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null; \ apt-get update; \ apt-get install -y docker-ce-cli; \ - ln -s /usr/local/go/bin/gofmt /usr/local/bin/gofmt; \ - pip install capstone pyelftools; \ - pip install semgrep==$(jq -r .semgrep < versions.json); \ + curl https://mise.run | sh; \ + mise trust ./mise.toml; \ + mise install; \ curl -fLSs https://raw.githubusercontent.com/CircleCI-Public/circleci-cli/master/install.sh | bash; \ + pip install capstone pyelftools; \ + go env -w GOMODCACHE=/go/pkg/mod; \ + go env -w GOCACHE=/root/.cache/go-build; \ + ln -s /usr/local/go/bin/gofmt /usr/local/bin/gofmt; \ apt-get purge -y --auto-remove -o APT::AutoRemove::RecommendsImportant=false; \ + go clean -cache -modcache -testcache; \ rm -rf /var/lib/apt/lists/*; \ - rm -rf /root/.cache/pip; + rm -rf /root/.cache/pip; \ + rm -rf /root/.cache/uv; \ + rm -rf /root/.rustup; -RUN svm install 0.8.25 && \ - svm install 0.8.15 && \ - svm install 0.8.19 +# Install Solidity versions +RUN echo "installing Solidity versions" && \ + svm install 0.8.25 && \ + svm install 0.8.19 && \ + svm install 0.8.15 +# Install Codecov uploader RUN echo "downloading and verifying Codecov uploader" && \ curl https://keybase.io/codecovsecurity/pgp_keys.asc | gpg --no-default-keyring --keyring trustedkeys.gpg --import && \ curl -Os "https://uploader.codecov.io/latest/linux/codecov" && \ @@ -129,24 +73,18 @@ RUN echo "downloading and verifying Codecov uploader" && \ # Copy docker buildx COPY --from=buildx /usr/libexec/docker/cli-plugins/docker-buildx /usr/libexec/docker/cli-plugins/docker-buildx -# within docker use bash -SHELL ["/bin/bash", "-c"] +# Set up entrypoint +ENTRYPOINT ["/bin/bash", "-c"] -# set env to use bash -ENV SHELL=/bin/bash -ENV BASH=/bin/bash -ENTRYPOINT ["/bin/bash", "-c"] +############################################################################### +# CI BUILDER (RUST) # +############################################################################### FROM base-builder as rust-builder # Install clang & lld RUN apt-get update && apt-get install -y clang lld -# Copy the rust installation, alongside the installed toolchains -COPY --from=rust-build /root/.cargo /root/.cargo -COPY --from=rust-build /root/.rustup /root/.rustup - -# copy the rust installation, alongside the installed toolchains -COPY --from=rust-build /root/.cargo/bin /root/.cargo/bin -COPY --from=rust-build /root/.rustup /root/.rustup +# Install nightly toolchain +RUN rustup update nightly diff --git a/ops/docker/ci-builder/Dockerfile.dockerignore b/ops/docker/ci-builder/Dockerfile.dockerignore index 229d6f1165c..4f44e253194 100644 --- a/ops/docker/ci-builder/Dockerfile.dockerignore +++ b/ops/docker/ci-builder/Dockerfile.dockerignore @@ -1,4 +1,3 @@ * !/.nvmrc -!/versions.json -!/ops/scripts/install-foundry.sh +!/mise.toml diff --git a/ops/scripts/check-foundry.sh b/ops/scripts/check-foundry.sh deleted file mode 100755 index 530046bd85e..00000000000 --- a/ops/scripts/check-foundry.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/usr/bin/env bash - -VERSIONS_FILE="versions.json" - -if ! command -v jq &> /dev/null -then - # shellcheck disable=SC2006 - echo "Please install jq" >&2 - exit 1 -fi - -if ! command -v forge &> /dev/null -then - # shellcheck disable=SC2006 - echo "Is Foundry not installed? Consider installing via just install-foundry" >&2 - exit 1 -fi - -# Check VERSIONS_FILE has expected foundry property -if ! jq -e '.foundry' "$VERSIONS_FILE" &> /dev/null; then - echo "'foundry' is missing from $VERSIONS_FILE" >&2 - exit 1 -fi - -# Extract the expected foundry version from versions.json -EXPECTED_VERSION=$(jq -r '.foundry' "$VERSIONS_FILE" | cut -c 1-7) -if [ -z "$EXPECTED_VERSION" ]; then - echo "Unable to extract Foundry version from $VERSIONS_FILE" >&2 - exit 1 -fi - -# Extract the installed forge version -INSTALLED_VERSION=$(forge --version | grep -o '[a-f0-9]\{7\}' | head -n 1) - -# Compare the installed timestamp with the expected timestamp -if [ "$INSTALLED_VERSION" = "$EXPECTED_VERSION" ]; then - echo "Foundry version matches the expected version." -else - echo "Mismatch between installed Foundry version ($INSTALLED_VERSION) and expected version ($EXPECTED_VERSION)." - echo "Your version of Foundry may either not be up to date, or it could be a later version." - echo "Running 'just update-foundry' from the repository root will install the expected version." -fi diff --git a/ops/scripts/geth-version-checker.sh b/ops/scripts/geth-version-checker.sh deleted file mode 100755 index 98d94e66413..00000000000 --- a/ops/scripts/geth-version-checker.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash - -SCRIPTS_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -MONOREPO_DIR=$(cd "$SCRIPTS_DIR/../../" && pwd) - -# Extract the version from the geth command output -GETH_VERSION="v$(geth version | grep '^Version:' | awk '{print $2}')" - -# Read the version from the versions file -EXPECTED_GETH_VERSION=$(jq -r .geth < "$MONOREPO_DIR"/versions.json) - -# Check if EXPECTED_GETH_VERSION contains a '-'. If not, append '-stable'. -if [[ $EXPECTED_GETH_VERSION != *-* ]]; then - EXPECTED_GETH_VERSION="${EXPECTED_GETH_VERSION}-stable" -fi - -# Compare the versions -if [[ "$GETH_VERSION" == "$EXPECTED_GETH_VERSION" ]]; then - echo "Geth version $GETH_VERSION is correct!" - exit 0 -else - echo "Geth version does not match!" - echo "Local geth version: $GETH_VERSION" - echo "Expected geth version: $EXPECTED_GETH_VERSION" - exit 1 -fi - - diff --git a/ops/scripts/install-foundry.sh b/ops/scripts/install-foundry.sh deleted file mode 100755 index f8ed7924bf8..00000000000 --- a/ops/scripts/install-foundry.sh +++ /dev/null @@ -1,63 +0,0 @@ -#!/bin/bash - -set -e - -# Check if foundryup exists, if not, install it -if ! command -v foundryup &> /dev/null; then - echo "foundryup not found, installing..." - curl -L https://foundry.paradigm.xyz | bash -fi - -SCRIPTS_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -MONOREPO_DIR=$(cd "$SCRIPTS_DIR/../../" && pwd) - -# Grab the foundry commit hash. -SHA=$(jq -r .foundry < "$MONOREPO_DIR"/versions.json) - -# Check if there is a nightly tag corresponding to the commit hash -TAG="nightly-$SHA" - -# If the foundry repository exists and a branch is checked out, we need to abort -# any changes inside ~/.foundry/foundry-rs/foundry. This is because foundryup will -# attempt to pull the latest changes from the remote repository, which will fail -# if there are any uncommitted changes. -if [ -d ~/.foundry/foundry-rs/foundry ]; then - echo "Foundry repository exists! Aborting any changes..." - cd ~/.foundry/foundry-rs/foundry - git reset --hard - git clean -fd - cd - -fi - -# Create a temporary directory -TMP_DIR=$(mktemp -d) -echo "Created tempdir @ $TMP_DIR" - -# Clone the foundry repo temporarily. We do this to avoid the need for a personal access -# token to interact with the GitHub REST API, and clean it up after we're done. -git clone https://github.com/foundry-rs/foundry.git "$TMP_DIR" && cd "$TMP_DIR" - -# If the nightly tag exists, we can download the pre-built binaries rather than building -# from source. Otherwise, clone the repository, check out the commit SHA, and build `forge`, -# `cast`, `anvil`, and `chisel` from source. -if git rev-parse "$TAG" >/dev/null 2>&1; then - echo "Nightly tag exists! Downloading prebuilt binaries..." - foundryup -v "$TAG" -else - echo "Nightly tag doesn't exist! Building from source..." - git checkout "$SHA" - - # Use native `cargo` build to avoid any rustc environment variables `foundryup` sets. We explicitly - # ignore chisel, as it is not a part of `ci-builder`. - cargo build --bin forge --release - cargo build --bin cast --release - cargo build --bin anvil --release - mkdir -p ~/.foundry/bin - mv target/release/forge ~/.foundry/bin - mv target/release/cast ~/.foundry/bin - mv target/release/anvil ~/.foundry/bin -fi - -# Remove the temporary foundry repo; Used just for checking the nightly tag's existence. -rm -rf "$TMP_DIR" -echo "Removed tempdir @ $TMP_DIR" diff --git a/ops/scripts/install-kontrol.sh b/ops/scripts/install-kontrol.sh deleted file mode 100755 index 5d4044d26b2..00000000000 --- a/ops/scripts/install-kontrol.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash - -set -e - -# Check if kup exists, if not, install it -if ! command -v kup &> /dev/null; then - echo "kup not found, installing..." - yes | bash <(curl -L https://kframework.org/install) -fi - -SCRIPTS_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -MONOREPO_DIR=$(cd "$SCRIPTS_DIR/../../" && pwd) - -# Grab the correct kontrol version. -VERSION=$(jq -r .kontrol < "$MONOREPO_DIR"/versions.json) - -kup install kontrol --version v"$VERSION" diff --git a/packages/contracts-bedrock/README.md b/packages/contracts-bedrock/README.md index a9f1dbeeaa5..24212f38ff3 100644 --- a/packages/contracts-bedrock/README.md +++ b/packages/contracts-bedrock/README.md @@ -66,7 +66,7 @@ See the [Optimism Developer Docs](https://docs.optimism.io/chain/addresses) for ### Contributing Guide Contributions to the OP Stack are always welcome. -Please refer to the [CONTRIBUTING.md](./meta/CONTRIBUTING.md) for more information about how to contribute to the OP Stack smart contracts. +Please refer to the [CONTRIBUTING.md](../../CONTRIBUTING.md) for more information about how to contribute to the OP Stack smart contracts. ### Style Guide diff --git a/packages/contracts-bedrock/justfile b/packages/contracts-bedrock/justfile index b82717c4a26..4ea97d4c61d 100644 --- a/packages/contracts-bedrock/justfile +++ b/packages/contracts-bedrock/justfile @@ -15,24 +15,12 @@ dep-status: # BUILD # ######################################################## -# Checks that the correct version of Foundry is installed. -check-foundry: - cd ../../ && ./ops/scripts/check-foundry.sh - -# Checks that semgrep is installed. -check-semgrep: - cd ../../ && just check-semgrep - -# Checks that the correct versions of Foundry and semgrep are installed. -check-dependencies: - just check-foundry && just check-semgrep - # Core forge build command forge-build: forge build # Builds the contracts. -build: check-dependencies lint-fix-no-fail forge-build interfaces-check-no-build +build: lint-fix-no-fail forge-build interfaces-check-no-build # Builds the go-ffi tool for contract tests. build-go-ffi-default: diff --git a/packages/contracts-bedrock/meta/CONTRIBUTING.md b/packages/contracts-bedrock/meta/CONTRIBUTING.md deleted file mode 100644 index 462aa6660c4..00000000000 --- a/packages/contracts-bedrock/meta/CONTRIBUTING.md +++ /dev/null @@ -1,106 +0,0 @@ -# Contributing to CONTRIBUTING.md - -First off, thanks for taking the time to contribute! - -We welcome and appreciate all kinds of contributions. We ask that before contributing you please review the procedures for each type of contribution available in the [Table of Contents](#table-of-contents). This will streamline the process for both maintainers and contributors. To find ways to contribute, view the [I Want To Contribute](#i-want-to-contribute) section below. Larger contributions should [open an issue](https://github.com/ethereum-optimism/optimism/issues/new) before implementation to ensure changes don't go to waste. - -We're excited to work with you and your contributions to scaling Ethereum! - -## Table of Contents - -- [I Have a Question](#i-have-a-question) -- [I Want To Contribute](#i-want-to-contribute) -- [Reporting Bugs](#reporting-bugs) -- [Suggesting Enhancements](#suggesting-enhancements) -- [Your First Code Contribution](#your-first-code-contribution) -- [Improving The Documentation](#improving-the-documentation) -- [Deploying on Devnet](#deploying-on-devnet) -- [Tools](#tools) - -## I Have a Question - -> **Note** -> Before making an issue, please read the documentation and search the issues to see if your question has already been answered. - -If you have any questions about the smart contracts, please feel free to ask them in the Optimism discord developer channels or create a new detailed issue. - -## I Want To Contribute - -### Reporting Bugs - -**Any and all bug reports on production smart contract code should be submitted privately to the Optimism team so that we can mitigate the issue before it is exploited. Please see our security policy document [here](https://github.com/ethereum-optimism/.github/blob/master/SECURITY.md).** - -### Suggesting Enhancements - -#### Before Submitting an Enhancement - -- Read the documentation and the smart contracts themselves to see if the feature already exists. -- Perform a search in the issues to see if the enhancement has already been suggested. If it has, add a comment to the existing issue instead of opening a new one. - -#### How Do I Submit a Good Enhancement Suggestion? - -Enhancement suggestions are tracked as [GitHub issues](https://github.com/ethereum-optimism/optimism/issues). - -- Use a **clear and descriptive title** for the issue to identify the suggestion. -- Provide a **step-by-step** description of the suggested enhancement in as many details as possible. -- Describe the **current** behavior and why the **intended** behavior you expected to see differs. At this point you can also tell which alternatives do not work for you. -- Explain why this enhancement would be useful in Optimism's smart contracts. You may also want to point out the other projects that solved it better and which could serve as inspiration. - -### Your First Code Contribution - -The best place to begin contributing is by looking through the issues with the `good first issue` label. These are issues that are relatively easy to implement and are a great way to get familiar with the codebase. - -Optimism's smart contracts are written in Solidity and we use [foundry](https://github.com/foundry-rs/foundry) as our development framework. To get started, you'll need to install several dependencies: - -1. [just](https://github.com/casey/just) - Make sure to `just install` -1. [foundry](https://getfoundry.sh) - Foundry is built with [rust](https://www.rust-lang.org/tools/install), and this project uses a pinned version of foundry. Install the rust toolchain with `rustup`. - Make sure to install the version of foundry used by `ci-builder`, defined in the `versions.json` file in the root of this repo under the `foundry` key. Once you have `foundryup` installed, there is a helper to do this: `just install-foundry` -1. [golang](https://golang.org/doc/install) -1. [python](https://www.python.org/downloads/) - -Our [Style Guide](STYLE_GUIDE.md) contains information about the project structure, syntax preferences, naming conventions, and more. Please take a look at it before submitting a PR, and let us know if you spot inconsistencies! - -Once you've read the style guide and are ready to work on your PR, there are a plethora of useful `just` scripts to know about that will help you with development. -You can run `just -l` to list them all, some of the key ones are: - -1. `just build` Builds the smart contracts. -1. `just test` Runs the full `forge` test suite. -1. `just gas-snapshot` Generates the gas snapshot for the smart contracts. -1. `just semver-lock` Generates the semver lockfile. -1. `just snapshots` Generates the storage and ABI snapshots. -1. `just clean` Removes all build artifacts for `forge` and `go` compilations. -1. `just validate-spacers` Validates the positions of the storage slot spacers. -1. `just validate-deploy-configs` Validates the deployment configurations in `deploy-config` -1. `just lint` Runs the linter on the smart contracts and scripts. -1. `just pre-pr` Runs most checks, generators, and linters prior to a PR. For most PRs, this is sufficient to pass CI if everything is in order. -1. `just pre-pr-full` Runs all checks, generators, and linters prior to a PR. - -### Improving The Documentation - -Documentation improvements are more than welcome! If you see a typo or feel that a code comment describes something poorly or incorrectly, please submit a PR with a fix. - -### Deploying on Devnet - -To deploy the smart contracts on a local devnet, run `make devnet-up` in the monorepo root. For more information on the local devnet, see [dev-node](https://docs.optimism.io/chain/testing/dev-node). - -### Tools - -#### Validate Spacing - -In order to make sure that we don't accidentally overwrite storage slots, contract storage layouts are checked to make sure spacing is correct. - -This uses the `snapshots/storageLayout` directory to check contract spacing. Run `just validate-spacers` to check the spacing of all contracts. - -#### Gas Snapshots - -We use forge's `gas-snapshot` subcommand to produce a gas snapshot for tests in `Benchmark.t.sol`. CI will check that the gas snapshot has been updated properly when it runs, so make sure to run `just gas-snapshot`! - -#### Semver Locking - -Many of our smart contracts are semantically versioned. To make sure that changes are not made to a contract without deliberately bumping its version, we commit to the source code and the creation bytecode of its dependencies in a lockfile. Consult the [Style Guide](./STYLE_GUIDE.md#Versioning) for more information about how our contracts are versioned. - -#### Storage Snapshots - -Due to the many proxied contracts in Optimism's protocol, we automate tracking the diff to storage layouts of the contracts in the project. This is to ensure that we don't break a proxy by upgrading its implementation to a contract with a different storage layout. To generate the storage lockfile, run `just snapshots`. diff --git a/packages/contracts-bedrock/test/kontrol/README.md b/packages/contracts-bedrock/test/kontrol/README.md index 7e4dc5a2809..1103feed1a0 100644 --- a/packages/contracts-bedrock/test/kontrol/README.md +++ b/packages/contracts-bedrock/test/kontrol/README.md @@ -48,10 +48,9 @@ The directory is structured as follows ### Installation -1. `cd` to the root of this repo. -2. Install Foundry by running `just install-foundry`. This installs `foundryup`, the foundry toolchain installer, then installs the required foundry version. -3. Install Kontrol by running `just install-kontrol`. This installs `kup`, the package manager for RV tools, then installs the required kontrol version. -4. Install Docker. +1. Make sure that the dependencies for the Optimism Monorepo are installed with `mise`. +1. Install [`kup`](https://github.com/runtimeverification/k/releases/tag/v7.1.180). +1. Use `kup` to [install `kontrol`](https://github.com/runtimeverification/kontrol?tab=readme-ov-file#fast-installation) ## Usage From 176c71ab313c146defb9fc783b0ab8ac08352320 Mon Sep 17 00:00:00 2001 From: zhiqiangxu <652732310@qq.com> Date: Tue, 3 Dec 2024 07:09:52 +0800 Subject: [PATCH 024/111] remove dup log (#13149) --- op-service/txmgr/txmgr.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/op-service/txmgr/txmgr.go b/op-service/txmgr/txmgr.go index 353951af633..e6593f99375 100644 --- a/op-service/txmgr/txmgr.go +++ b/op-service/txmgr/txmgr.go @@ -606,7 +606,7 @@ func (m *SimpleTxManager) sendTx(ctx context.Context, tx *types.Transaction) (*t func (m *SimpleTxManager) publishTx(ctx context.Context, tx *types.Transaction, sendState *SendState) (*types.Transaction, bool) { l := m.txLogger(tx, true) - l.Info("Publishing transaction", "tx", tx.Hash()) + l.Info("Publishing transaction") for { if sendState.bumpFees { From cbfb97ede9d8ab4da2e3c4ef65ac8e366ad5f422 Mon Sep 17 00:00:00 2001 From: Ashutosh Varma Date: Tue, 3 Dec 2024 04:51:18 +0530 Subject: [PATCH 025/111] fix: delayed weth addr in superchain inspect (#13159) --- op-deployer/pkg/deployer/inspect/superchain_registry.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/op-deployer/pkg/deployer/inspect/superchain_registry.go b/op-deployer/pkg/deployer/inspect/superchain_registry.go index e5ee56568b5..c8d57907d9f 100644 --- a/op-deployer/pkg/deployer/inspect/superchain_registry.go +++ b/op-deployer/pkg/deployer/inspect/superchain_registry.go @@ -165,7 +165,7 @@ func createAddressList(l1Contracts *L1Contracts, appliedIntent *state.Intent, ch // Fault proof contracts AnchorStateRegistryProxy: superchain.Address(l1Contracts.OpChainDeployment.AnchorStateRegistryProxyAddress), - DelayedWETHProxy: superchain.Address(l1Contracts.OpChainDeployment.L1CrossDomainMessengerProxyAddress), + DelayedWETHProxy: superchain.Address(l1Contracts.OpChainDeployment.DelayedWETHPermissionedGameProxyAddress), DisputeGameFactoryProxy: superchain.Address(l1Contracts.OpChainDeployment.DisputeGameFactoryProxyAddress), FaultDisputeGame: superchain.Address(l1Contracts.OpChainDeployment.FaultDisputeGameAddress), MIPS: superchain.Address(l1Contracts.ImplementationsDeployment.MipsSingletonAddress), From 72ec8d3ff15f6863cf055b6c7fd2bc82006d5969 Mon Sep 17 00:00:00 2001 From: Matthew Slipper Date: Mon, 2 Dec 2024 18:17:40 -0700 Subject: [PATCH 026/111] ctb: Fix flake in TestExtractTestNames (#13177) Golang map iteration order is random, so we need to sort first. --- .../contracts-bedrock/scripts/checks/test-names/main_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/packages/contracts-bedrock/scripts/checks/test-names/main_test.go b/packages/contracts-bedrock/scripts/checks/test-names/main_test.go index d4c554f12f1..5a9a0fd1846 100644 --- a/packages/contracts-bedrock/scripts/checks/test-names/main_test.go +++ b/packages/contracts-bedrock/scripts/checks/test-names/main_test.go @@ -2,6 +2,7 @@ package main import ( "reflect" + "slices" "testing" "github.com/ethereum-optimism/optimism/op-chain-ops/solc" @@ -272,6 +273,8 @@ func TestExtractTestNames(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got := extractTestNames(tt.artifact) + slices.Sort(got) + slices.Sort(tt.want) if !reflect.DeepEqual(got, tt.want) { t.Errorf("extractTestNames() = %v, want %v", got, tt.want) } From 4de5c8c21a8df77456f640b3a886c52fddb71697 Mon Sep 17 00:00:00 2001 From: Minhyuk Kim Date: Tue, 3 Dec 2024 01:57:34 -0600 Subject: [PATCH 027/111] op-signer, op-node: Integrate op-node with op-signer for block payload signing (#12325) * Initial implementation of integrating op-node with op-signer for remote signer configuration for block payload signing * op-service: remove the requirement for signer.address to be set when using op-service * op-service: add blockpayload_args to send to rpc opsigner_signBlockPayload * Implement mock rpc in gossip_test and apply review * Clean up tests --- op-node/flags/p2p_flags.go | 5 +- op-node/p2p/cli/load_signer.go | 16 ++- op-node/p2p/gossip_test.go | 154 ++++++++++++++++++++++--- op-node/p2p/signer.go | 62 ++++++---- op-node/p2p/signer_test.go | 15 +-- op-node/service.go | 2 +- op-service/signer/blockpayload_args.go | 62 ++++++++++ op-service/signer/cli.go | 23 ++-- op-service/signer/cli_test.go | 2 +- op-service/signer/client.go | 16 +++ op-service/tls/cli.go | 31 ++--- op-service/tls/cli_test.go | 2 +- op-service/txmgr/cli.go | 2 +- 13 files changed, 309 insertions(+), 83 deletions(-) create mode 100644 op-service/signer/blockpayload_args.go diff --git a/op-node/flags/p2p_flags.go b/op-node/flags/p2p_flags.go index 269b973c52d..6a38fbb817f 100644 --- a/op-node/flags/p2p_flags.go +++ b/op-node/flags/p2p_flags.go @@ -7,6 +7,7 @@ import ( "github.com/urfave/cli/v2" "github.com/ethereum-optimism/optimism/op-node/p2p" + opsigner "github.com/ethereum-optimism/optimism/op-service/signer" ) func p2pEnv(envprefix, v string) []string { @@ -87,7 +88,7 @@ func deprecatedP2PFlags(envPrefix string) []cli.Flag { // None of these flags are strictly required. // Some are hidden if they are too technical, or not recommended. func P2PFlags(envPrefix string) []cli.Flag { - return []cli.Flag{ + return append([]cli.Flag{ &cli.BoolFlag{ Name: DisableP2PName, Usage: "Completely disable the P2P stack", @@ -410,5 +411,5 @@ func P2PFlags(envPrefix string) []cli.Flag { Required: false, EnvVars: p2pEnv(envPrefix, "PING"), }, - } + }, opsigner.CLIFlags(envPrefix, P2PCategory)...) } diff --git a/op-node/p2p/cli/load_signer.go b/op-node/p2p/cli/load_signer.go index 7416fa76397..3c0c532edb2 100644 --- a/op-node/p2p/cli/load_signer.go +++ b/op-node/p2p/cli/load_signer.go @@ -5,18 +5,18 @@ import ( "strings" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" "github.com/urfave/cli/v2" "github.com/ethereum-optimism/optimism/op-node/flags" "github.com/ethereum-optimism/optimism/op-node/p2p" + opsigner "github.com/ethereum-optimism/optimism/op-service/signer" ) -// TODO: implement remote signer setup (config to authenticated endpoint) -// and remote signer itself (e.g. a open http client to make signing requests) - // LoadSignerSetup loads a configuration for a Signer to be set up later -func LoadSignerSetup(ctx *cli.Context) (p2p.SignerSetup, error) { +func LoadSignerSetup(ctx *cli.Context, logger log.Logger) (p2p.SignerSetup, error) { key := ctx.String(flags.SequencerP2PKeyName) + signerCfg := opsigner.ReadCLIConfig(ctx) if key != "" { // Mnemonics are bad because they leak *all* keys when they leak. // Unencrypted keys from file are bad because they are easy to leak (and we are not checking file permissions). @@ -26,9 +26,13 @@ func LoadSignerSetup(ctx *cli.Context) (p2p.SignerSetup, error) { } return &p2p.PreparedSigner{Signer: p2p.NewLocalSigner(priv)}, nil + } else if signerCfg.Enabled() { + remoteSigner, err := p2p.NewRemoteSigner(logger, signerCfg) + if err != nil { + return nil, err + } + return &p2p.PreparedSigner{Signer: remoteSigner}, nil } - // TODO: create remote signer - return nil, nil } diff --git a/op-node/p2p/gossip_test.go b/op-node/p2p/gossip_test.go index 0833f270e40..9f047f4b5ba 100644 --- a/op-node/p2p/gossip_test.go +++ b/op-node/p2p/gossip_test.go @@ -3,31 +3,33 @@ package p2p import ( "bytes" "context" + "crypto/ecdsa" "fmt" "io" "math/big" "testing" "time" - "github.com/ethereum-optimism/optimism/op-e2e/e2eutils" - "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/golang/snappy" - // "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-service/eth" + oprpc "github.com/ethereum-optimism/optimism/op-service/rpc" + opsigner "github.com/ethereum-optimism/optimism/op-service/signer" + "github.com/ethereum-optimism/optimism/op-service/testlog" "github.com/ethereum-optimism/optimism/op-service/testutils" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rpc" + pubsub "github.com/libp2p/go-libp2p-pubsub" pubsub_pb "github.com/libp2p/go-libp2p-pubsub/pb" "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/require" - - "github.com/ethereum-optimism/optimism/op-service/testlog" ) func TestGuardGossipValidator(t *testing.T) { @@ -62,30 +64,122 @@ func TestVerifyBlockSignature(t *testing.T) { L2ChainID: big.NewInt(100), } peerId := peer.ID("foo") - secrets, err := e2eutils.DefaultMnemonicConfig.Secrets() + secrets, err := crypto.GenerateKey() + require.NoError(t, err) + msg := []byte("any msg") + + t.Run("Valid", func(t *testing.T) { + runCfg := &testutils.MockRuntimeConfig{P2PSeqAddress: crypto.PubkeyToAddress(secrets.PublicKey)} + signer := &PreparedSigner{Signer: NewLocalSigner(secrets)} + sig, err := signer.Sign(context.Background(), SigningDomainBlocksV1, cfg.L2ChainID, msg) + require.NoError(t, err) + result := verifyBlockSignature(logger, cfg, runCfg, peerId, sig[:], msg) + require.Equal(t, pubsub.ValidationAccept, result) + }) + + t.Run("WrongSigner", func(t *testing.T) { + runCfg := &testutils.MockRuntimeConfig{P2PSeqAddress: common.HexToAddress("0x1234")} + signer := &PreparedSigner{Signer: NewLocalSigner(secrets)} + sig, err := signer.Sign(context.Background(), SigningDomainBlocksV1, cfg.L2ChainID, msg) + require.NoError(t, err) + result := verifyBlockSignature(logger, cfg, runCfg, peerId, sig[:], msg) + require.Equal(t, pubsub.ValidationReject, result) + }) + + t.Run("InvalidSignature", func(t *testing.T) { + runCfg := &testutils.MockRuntimeConfig{P2PSeqAddress: crypto.PubkeyToAddress(secrets.PublicKey)} + sig := make([]byte, 65) + result := verifyBlockSignature(logger, cfg, runCfg, peerId, sig, msg) + require.Equal(t, pubsub.ValidationReject, result) + }) + + t.Run("NoSequencer", func(t *testing.T) { + runCfg := &testutils.MockRuntimeConfig{} + signer := &PreparedSigner{Signer: NewLocalSigner(secrets)} + sig, err := signer.Sign(context.Background(), SigningDomainBlocksV1, cfg.L2ChainID, msg) + require.NoError(t, err) + result := verifyBlockSignature(logger, cfg, runCfg, peerId, sig[:], msg) + require.Equal(t, pubsub.ValidationIgnore, result) + }) +} + +type mockRemoteSigner struct { + priv *ecdsa.PrivateKey +} + +func (t *mockRemoteSigner) SignBlockPayload(args opsigner.BlockPayloadArgs) (hexutil.Bytes, error) { + signingHash, err := args.ToSigningHash() + if err != nil { + return nil, err + } + signature, err := crypto.Sign(signingHash[:], t.priv) + if err != nil { + return nil, err + } + return signature, nil +} + +func TestVerifyBlockSignatureWithRemoteSigner(t *testing.T) { + secrets, err := crypto.GenerateKey() require.NoError(t, err) + + remoteSigner := &mockRemoteSigner{secrets} + server := oprpc.NewServer( + "127.0.0.1", + 0, + "test", + oprpc.WithAPIs([]rpc.API{ + { + Namespace: "opsigner", + Service: remoteSigner, + }, + }), + ) + + require.NoError(t, server.Start()) + defer func() { + _ = server.Stop() + }() + + logger := testlog.Logger(t, log.LevelCrit) + cfg := &rollup.Config{ + L2ChainID: big.NewInt(100), + } + + peerId := peer.ID("foo") msg := []byte("any msg") + signerCfg := opsigner.NewCLIConfig() + signerCfg.Endpoint = fmt.Sprintf("http://%s", server.Endpoint()) + signerCfg.TLSConfig.TLSKey = "" + signerCfg.TLSConfig.TLSCert = "" + signerCfg.TLSConfig.TLSCaCert = "" + signerCfg.TLSConfig.Enabled = false + t.Run("Valid", func(t *testing.T) { - runCfg := &testutils.MockRuntimeConfig{P2PSeqAddress: crypto.PubkeyToAddress(secrets.SequencerP2P.PublicKey)} - signer := &PreparedSigner{Signer: NewLocalSigner(secrets.SequencerP2P)} + runCfg := &testutils.MockRuntimeConfig{P2PSeqAddress: crypto.PubkeyToAddress(secrets.PublicKey)} + remoteSigner, err := NewRemoteSigner(logger, signerCfg) + require.NoError(t, err) + signer := &PreparedSigner{Signer: remoteSigner} sig, err := signer.Sign(context.Background(), SigningDomainBlocksV1, cfg.L2ChainID, msg) require.NoError(t, err) - result := verifyBlockSignature(logger, cfg, runCfg, peerId, sig[:65], msg) + result := verifyBlockSignature(logger, cfg, runCfg, peerId, sig[:], msg) require.Equal(t, pubsub.ValidationAccept, result) }) t.Run("WrongSigner", func(t *testing.T) { runCfg := &testutils.MockRuntimeConfig{P2PSeqAddress: common.HexToAddress("0x1234")} - signer := &PreparedSigner{Signer: NewLocalSigner(secrets.SequencerP2P)} + remoteSigner, err := NewRemoteSigner(logger, signerCfg) + require.NoError(t, err) + signer := &PreparedSigner{Signer: remoteSigner} sig, err := signer.Sign(context.Background(), SigningDomainBlocksV1, cfg.L2ChainID, msg) require.NoError(t, err) - result := verifyBlockSignature(logger, cfg, runCfg, peerId, sig[:65], msg) + result := verifyBlockSignature(logger, cfg, runCfg, peerId, sig[:], msg) require.Equal(t, pubsub.ValidationReject, result) }) t.Run("InvalidSignature", func(t *testing.T) { - runCfg := &testutils.MockRuntimeConfig{P2PSeqAddress: crypto.PubkeyToAddress(secrets.SequencerP2P.PublicKey)} + runCfg := &testutils.MockRuntimeConfig{P2PSeqAddress: crypto.PubkeyToAddress(secrets.PublicKey)} sig := make([]byte, 65) result := verifyBlockSignature(logger, cfg, runCfg, peerId, sig, msg) require.Equal(t, pubsub.ValidationReject, result) @@ -93,12 +187,36 @@ func TestVerifyBlockSignature(t *testing.T) { t.Run("NoSequencer", func(t *testing.T) { runCfg := &testutils.MockRuntimeConfig{} - signer := &PreparedSigner{Signer: NewLocalSigner(secrets.SequencerP2P)} + remoteSigner, err := NewRemoteSigner(logger, signerCfg) + require.NoError(t, err) + signer := &PreparedSigner{Signer: remoteSigner} sig, err := signer.Sign(context.Background(), SigningDomainBlocksV1, cfg.L2ChainID, msg) require.NoError(t, err) - result := verifyBlockSignature(logger, cfg, runCfg, peerId, sig[:65], msg) + result := verifyBlockSignature(logger, cfg, runCfg, peerId, sig[:], msg) require.Equal(t, pubsub.ValidationIgnore, result) }) + + t.Run("RemoteSignerNoTLS", func(t *testing.T) { + signerCfg := opsigner.NewCLIConfig() + signerCfg.Endpoint = fmt.Sprintf("http://%s", server.Endpoint()) + signerCfg.TLSConfig.TLSKey = "invalid" + signerCfg.TLSConfig.TLSCert = "invalid" + signerCfg.TLSConfig.TLSCaCert = "invalid" + signerCfg.TLSConfig.Enabled = true + + _, err := NewRemoteSigner(logger, signerCfg) + require.Error(t, err) + }) + + t.Run("RemoteSignerInvalidEndpoint", func(t *testing.T) { + signerCfg := opsigner.NewCLIConfig() + signerCfg.Endpoint = "Invalid" + signerCfg.TLSConfig.TLSKey = "" + signerCfg.TLSConfig.TLSCert = "" + signerCfg.TLSConfig.TLSCaCert = "" + _, err := NewRemoteSigner(logger, signerCfg) + require.Error(t, err) + }) } type MarshalSSZ interface { @@ -146,10 +264,10 @@ func TestBlockValidator(t *testing.T) { cfg := &rollup.Config{ L2ChainID: big.NewInt(100), } - secrets, err := e2eutils.DefaultMnemonicConfig.Secrets() + secrets, err := crypto.GenerateKey() require.NoError(t, err) - runCfg := &testutils.MockRuntimeConfig{P2PSeqAddress: crypto.PubkeyToAddress(secrets.SequencerP2P.PublicKey)} - signer := &PreparedSigner{Signer: NewLocalSigner(secrets.SequencerP2P)} + runCfg := &testutils.MockRuntimeConfig{P2PSeqAddress: crypto.PubkeyToAddress(secrets.PublicKey)} + signer := &PreparedSigner{Signer: NewLocalSigner(secrets)} // Params Set 2: Call the validation function peerID := peer.ID("foo") diff --git a/op-node/p2p/signer.go b/op-node/p2p/signer.go index cd5e9f94a01..20a52d0a262 100644 --- a/op-node/p2p/signer.go +++ b/op-node/p2p/signer.go @@ -9,8 +9,10 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum-optimism/optimism/op-node/rollup" + opsigner "github.com/ethereum-optimism/optimism/op-service/signer" ) var SigningDomainBlocksV1 = [32]byte{} @@ -20,40 +22,27 @@ type Signer interface { io.Closer } -func SigningHash(domain [32]byte, chainID *big.Int, payloadBytes []byte) (common.Hash, error) { - var msgInput [32 + 32 + 32]byte - // domain: first 32 bytes - copy(msgInput[:32], domain[:]) - // chain_id: second 32 bytes - if chainID.BitLen() > 256 { - return common.Hash{}, errors.New("chain_id is too large") - } - chainID.FillBytes(msgInput[32:64]) - // payload_hash: third 32 bytes, hash of encoded payload - copy(msgInput[64:], crypto.Keccak256(payloadBytes)) - - return crypto.Keccak256Hash(msgInput[:]), nil -} - func BlockSigningHash(cfg *rollup.Config, payloadBytes []byte) (common.Hash, error) { - return SigningHash(SigningDomainBlocksV1, cfg.L2ChainID, payloadBytes) + return opsigner.NewBlockPayloadArgs(SigningDomainBlocksV1, cfg.L2ChainID, payloadBytes, nil).ToSigningHash() } // LocalSigner is suitable for testing type LocalSigner struct { - priv *ecdsa.PrivateKey - hasher func(domain [32]byte, chainID *big.Int, payloadBytes []byte) (common.Hash, error) + priv *ecdsa.PrivateKey } func NewLocalSigner(priv *ecdsa.PrivateKey) *LocalSigner { - return &LocalSigner{priv: priv, hasher: SigningHash} + return &LocalSigner{priv: priv} } func (s *LocalSigner) Sign(ctx context.Context, domain [32]byte, chainID *big.Int, encodedMsg []byte) (sig *[65]byte, err error) { if s.priv == nil { return nil, errors.New("signer is closed") } - signingHash, err := s.hasher(domain, chainID, encodedMsg) + + blockPayloadArgs := opsigner.NewBlockPayloadArgs(domain, chainID, encodedMsg, nil) + signingHash, err := blockPayloadArgs.ToSigningHash() + if err != nil { return nil, err } @@ -69,6 +58,39 @@ func (s *LocalSigner) Close() error { return nil } +type RemoteSigner struct { + client *opsigner.SignerClient + sender *common.Address +} + +func NewRemoteSigner(logger log.Logger, config opsigner.CLIConfig) (*RemoteSigner, error) { + signerClient, err := opsigner.NewSignerClientFromConfig(logger, config) + if err != nil { + return nil, err + } + senderAddress := common.HexToAddress(config.Address) + return &RemoteSigner{signerClient, &senderAddress}, nil +} + +func (s *RemoteSigner) Sign(ctx context.Context, domain [32]byte, chainID *big.Int, encodedMsg []byte) (sig *[65]byte, err error) { + if s.client == nil { + return nil, errors.New("signer is closed") + } + + blockPayloadArgs := opsigner.NewBlockPayloadArgs(domain, chainID, encodedMsg, s.sender) + signature, err := s.client.SignBlockPayload(ctx, blockPayloadArgs) + + if err != nil { + return nil, err + } + return &signature, nil +} + +func (s *RemoteSigner) Close() error { + s.client = nil + return nil +} + type PreparedSigner struct { Signer } diff --git a/op-node/p2p/signer_test.go b/op-node/p2p/signer_test.go index abcfe4825b3..53f78504d18 100644 --- a/op-node/p2p/signer_test.go +++ b/op-node/p2p/signer_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/ethereum-optimism/optimism/op-node/rollup" + opsigner "github.com/ethereum-optimism/optimism/op-service/signer" "github.com/stretchr/testify/require" ) @@ -14,10 +15,10 @@ func TestSigningHash_DifferentDomain(t *testing.T) { } payloadBytes := []byte("arbitraryData") - hash, err := SigningHash(SigningDomainBlocksV1, cfg.L2ChainID, payloadBytes) + hash, err := opsigner.NewBlockPayloadArgs(SigningDomainBlocksV1, cfg.L2ChainID, payloadBytes, nil).ToSigningHash() require.NoError(t, err, "creating first signing hash") - hash2, err := SigningHash([32]byte{3}, cfg.L2ChainID, payloadBytes) + hash2, err := opsigner.NewBlockPayloadArgs([32]byte{3}, cfg.L2ChainID, payloadBytes, nil).ToSigningHash() require.NoError(t, err, "creating second signing hash") require.NotEqual(t, hash, hash2, "signing hash should be different when domain is different") @@ -32,10 +33,10 @@ func TestSigningHash_DifferentChainID(t *testing.T) { } payloadBytes := []byte("arbitraryData") - hash, err := SigningHash(SigningDomainBlocksV1, cfg1.L2ChainID, payloadBytes) + hash, err := opsigner.NewBlockPayloadArgs(SigningDomainBlocksV1, cfg1.L2ChainID, payloadBytes, nil).ToSigningHash() require.NoError(t, err, "creating first signing hash") - hash2, err := SigningHash(SigningDomainBlocksV1, cfg2.L2ChainID, payloadBytes) + hash2, err := opsigner.NewBlockPayloadArgs(SigningDomainBlocksV1, cfg2.L2ChainID, payloadBytes, nil).ToSigningHash() require.NoError(t, err, "creating second signing hash") require.NotEqual(t, hash, hash2, "signing hash should be different when chain ID is different") @@ -46,10 +47,10 @@ func TestSigningHash_DifferentMessage(t *testing.T) { L2ChainID: big.NewInt(100), } - hash, err := SigningHash(SigningDomainBlocksV1, cfg.L2ChainID, []byte("msg1")) + hash, err := opsigner.NewBlockPayloadArgs(SigningDomainBlocksV1, cfg.L2ChainID, []byte("msg1"), nil).ToSigningHash() require.NoError(t, err, "creating first signing hash") - hash2, err := SigningHash(SigningDomainBlocksV1, cfg.L2ChainID, []byte("msg2")) + hash2, err := opsigner.NewBlockPayloadArgs(SigningDomainBlocksV1, cfg.L2ChainID, []byte("msg2"), nil).ToSigningHash() require.NoError(t, err, "creating second signing hash") require.NotEqual(t, hash, hash2, "signing hash should be different when message is different") @@ -62,6 +63,6 @@ func TestSigningHash_LimitChainID(t *testing.T) { cfg := &rollup.Config{ L2ChainID: chainID, } - _, err := SigningHash(SigningDomainBlocksV1, cfg.L2ChainID, []byte("arbitraryData")) + _, err := opsigner.NewBlockPayloadArgs(SigningDomainBlocksV1, cfg.L2ChainID, []byte("arbitraryData"), nil).ToSigningHash() require.ErrorContains(t, err, "chain_id is too large") } diff --git a/op-node/service.go b/op-node/service.go index 4d12c7f5446..55c1c7173b7 100644 --- a/op-node/service.go +++ b/op-node/service.go @@ -49,7 +49,7 @@ func NewConfig(ctx *cli.Context, log log.Logger) (*node.Config, error) { driverConfig := NewDriverConfig(ctx) - p2pSignerSetup, err := p2pcli.LoadSignerSetup(ctx) + p2pSignerSetup, err := p2pcli.LoadSignerSetup(ctx, log) if err != nil { return nil, fmt.Errorf("failed to load p2p signer: %w", err) } diff --git a/op-service/signer/blockpayload_args.go b/op-service/signer/blockpayload_args.go new file mode 100644 index 00000000000..8239bc0967d --- /dev/null +++ b/op-service/signer/blockpayload_args.go @@ -0,0 +1,62 @@ +package signer + +import ( + "errors" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" +) + +// BlockPayloadArgs represents the arguments to sign a new block payload from the sequencer. +type BlockPayloadArgs struct { + Domain [32]byte `json:"domain"` + ChainID *big.Int `json:"chainId"` + PayloadHash []byte `json:"payloadHash"` + PayloadBytes []byte + SenderAddress *common.Address `json:"senderAddress"` +} + +// NewBlockPayloadArgs creates a BlockPayloadArgs struct +func NewBlockPayloadArgs(domain [32]byte, chainId *big.Int, payloadBytes []byte, senderAddress *common.Address) *BlockPayloadArgs { + payloadHash := crypto.Keccak256(payloadBytes) + args := &BlockPayloadArgs{ + Domain: domain, + ChainID: chainId, + PayloadHash: payloadHash, + PayloadBytes: payloadBytes, + SenderAddress: senderAddress, + } + return args +} + +func (args *BlockPayloadArgs) Check() error { + if args.ChainID == nil { + return errors.New("chainId not specified") + } + if len(args.PayloadHash) == 0 { + return errors.New("payloadHash not specified") + } + return nil +} + +// ToSigningHash creates a signingHash from the block payload args. +// Uses the hashing scheme from https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/rollup-node-p2p.md#block-signatures +func (args *BlockPayloadArgs) ToSigningHash() (common.Hash, error) { + if err := args.Check(); err != nil { + return common.Hash{}, err + } + var msgInput [32 + 32 + 32]byte + // domain: first 32 bytes + copy(msgInput[:32], args.Domain[:]) + // chain_id: second 32 bytes + if args.ChainID.BitLen() > 256 { + return common.Hash{}, errors.New("chain_id is too large") + } + args.ChainID.FillBytes(msgInput[32:64]) + + // payload_hash: third 32 bytes, hash of encoded payload + copy(msgInput[64:], args.PayloadHash[:]) + + return crypto.Keccak256Hash(msgInput[:]), nil +} diff --git a/op-service/signer/cli.go b/op-service/signer/cli.go index 0c1df648286..534fd09c97b 100644 --- a/op-service/signer/cli.go +++ b/op-service/signer/cli.go @@ -17,18 +17,20 @@ const ( HeadersFlagName = "signer.header" ) -func CLIFlags(envPrefix string) []cli.Flag { +func CLIFlags(envPrefix string, category string) []cli.Flag { envPrefix += "_SIGNER" flags := []cli.Flag{ &cli.StringFlag{ - Name: EndpointFlagName, - Usage: "Signer endpoint the client will connect to", - EnvVars: opservice.PrefixEnvVar(envPrefix, "ENDPOINT"), + Name: EndpointFlagName, + Usage: "Signer endpoint the client will connect to", + EnvVars: opservice.PrefixEnvVar(envPrefix, "ENDPOINT"), + Category: category, }, &cli.StringFlag{ - Name: AddressFlagName, - Usage: "Address the signer is signing transactions for", - EnvVars: opservice.PrefixEnvVar(envPrefix, "ADDRESS"), + Name: AddressFlagName, + Usage: "Address the signer is signing requests for", + EnvVars: opservice.PrefixEnvVar(envPrefix, "ADDRESS"), + Category: category, }, &cli.StringSliceFlag{ Name: HeadersFlagName, @@ -36,7 +38,7 @@ func CLIFlags(envPrefix string) []cli.Flag { EnvVars: opservice.PrefixEnvVar(envPrefix, "HEADER"), }, } - flags = append(flags, optls.CLIFlagsWithFlagPrefix(envPrefix, "signer")...) + flags = append(flags, optls.CLIFlagsWithFlagPrefix(envPrefix, "signer", category)...) return flags } @@ -65,10 +67,7 @@ func (c CLIConfig) Check() error { } func (c CLIConfig) Enabled() bool { - if c.Endpoint != "" && c.Address != "" { - return true - } - return false + return c.Endpoint != "" && c.Address != "" } func ReadCLIConfig(ctx *cli.Context) CLIConfig { diff --git a/op-service/signer/cli_test.go b/op-service/signer/cli_test.go index 056ed481560..3453258c6ca 100644 --- a/op-service/signer/cli_test.go +++ b/op-service/signer/cli_test.go @@ -93,7 +93,7 @@ func TestInvalidConfig(t *testing.T) { func configForArgs(args ...string) CLIConfig { app := cli.NewApp() - app.Flags = CLIFlags("TEST_") + app.Flags = CLIFlags("TEST_", "") app.Name = "test" var config CLIConfig app.Action = func(ctx *cli.Context) error { diff --git a/op-service/signer/client.go b/op-service/signer/client.go index cdb9094dfe6..acd753ecef7 100644 --- a/op-service/signer/client.go +++ b/op-service/signer/client.go @@ -113,3 +113,19 @@ func (s *SignerClient) SignTransaction(ctx context.Context, chainId *big.Int, fr return &signed, nil } + +func (s *SignerClient) SignBlockPayload(ctx context.Context, args *BlockPayloadArgs) ([65]byte, error) { + var result hexutil.Bytes + + if err := s.client.CallContext(ctx, &result, "opsigner_signBlockPayload", args); err != nil { + return [65]byte{}, fmt.Errorf("opsigner_signBlockPayload failed: %w", err) + } + + if len(result) != 65 { + return [65]byte{}, fmt.Errorf("invalid signature: %s", result.String()) + } + + signature := [65]byte(result) + + return signature, nil +} diff --git a/op-service/tls/cli.go b/op-service/tls/cli.go index e2e086e922c..f85de807a83 100644 --- a/op-service/tls/cli.go +++ b/op-service/tls/cli.go @@ -21,7 +21,7 @@ const ( // CLIFlags returns flags with env var envPrefix // This should be used for server TLS configs, or when client and server tls configs are the same func CLIFlags(envPrefix string) []cli.Flag { - return CLIFlagsWithFlagPrefix(envPrefix, "") + return CLIFlagsWithFlagPrefix(envPrefix, "", "") } var ( @@ -33,7 +33,7 @@ var ( // CLIFlagsWithFlagPrefix returns flags with env var and cli flag prefixes // Should be used for client TLS configs when different from server on the same process -func CLIFlagsWithFlagPrefix(envPrefix string, flagPrefix string) []cli.Flag { +func CLIFlagsWithFlagPrefix(envPrefix string, flagPrefix string, category string) []cli.Flag { prefixFunc := func(flagName string) string { return strings.Trim(fmt.Sprintf("%s.%s", flagPrefix, flagName), ".") } @@ -48,22 +48,25 @@ func CLIFlagsWithFlagPrefix(envPrefix string, flagPrefix string) []cli.Flag { EnvVars: prefixEnvVars("TLS_ENABLED"), }, &cli.StringFlag{ - Name: prefixFunc(TLSCaCertFlagName), - Usage: "tls ca cert path", - Value: defaultTLSCaCert, - EnvVars: prefixEnvVars("TLS_CA"), + Name: prefixFunc(TLSCaCertFlagName), + Usage: "tls ca cert path", + Value: defaultTLSCaCert, + EnvVars: prefixEnvVars("TLS_CA"), + Category: category, }, &cli.StringFlag{ - Name: prefixFunc(TLSCertFlagName), - Usage: "tls cert path", - Value: defaultTLSCert, - EnvVars: prefixEnvVars("TLS_CERT"), + Name: prefixFunc(TLSCertFlagName), + Usage: "tls cert path", + Value: defaultTLSCert, + EnvVars: prefixEnvVars("TLS_CERT"), + Category: category, }, &cli.StringFlag{ - Name: prefixFunc(TLSKeyFlagName), - Usage: "tls key", - Value: defaultTLSKey, - EnvVars: prefixEnvVars("TLS_KEY"), + Name: prefixFunc(TLSKeyFlagName), + Usage: "tls key", + Value: defaultTLSKey, + EnvVars: prefixEnvVars("TLS_KEY"), + Category: category, }, } } diff --git a/op-service/tls/cli_test.go b/op-service/tls/cli_test.go index bd4ea4bf17c..ce5e41d96c2 100644 --- a/op-service/tls/cli_test.go +++ b/op-service/tls/cli_test.go @@ -53,7 +53,7 @@ func TestInvalidConfig(t *testing.T) { func configForArgs(args ...string) CLIConfig { app := cli.NewApp() - app.Flags = CLIFlagsWithFlagPrefix("TEST_", "test") + app.Flags = CLIFlagsWithFlagPrefix("TEST_", "test", "") app.Name = "test" var config CLIConfig app.Action = func(ctx *cli.Context) error { diff --git a/op-service/txmgr/cli.go b/op-service/txmgr/cli.go index 2390933d79c..fc1aa2cceb0 100644 --- a/op-service/txmgr/cli.go +++ b/op-service/txmgr/cli.go @@ -191,7 +191,7 @@ func CLIFlagsWithDefaults(envPrefix string, defaults DefaultFlagValues) []cli.Fl Value: defaults.ReceiptQueryInterval, EnvVars: prefixEnvVars("TXMGR_RECEIPT_QUERY_INTERVAL"), }, - }, opsigner.CLIFlags(envPrefix)...) + }, opsigner.CLIFlags(envPrefix, "")...) } type CLIConfig struct { From 010c8a93fac81ddced220bc3f8155959403eeac8 Mon Sep 17 00:00:00 2001 From: Maurelian Date: Tue, 3 Dec 2024 10:03:52 -0500 Subject: [PATCH 028/111] feat: remove unused _includeDump arg (#13099) * feat: remove unused onlyTestnetOrDevnet modifier * feat: remove unused _includeDump arg --- .../contracts-bedrock/scripts/deploy/Deploy.s.sol | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol b/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol index 56e6b6feba3..c013430262c 100644 --- a/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol @@ -167,14 +167,7 @@ contract Deploy is Deployer { /// @notice Deploy a new OP Chain using an existing SuperchainConfig and ProtocolVersions /// @param _superchainConfigProxy Address of the existing SuperchainConfig proxy /// @param _protocolVersionsProxy Address of the existing ProtocolVersions proxy - /// @param _includeDump Whether to include a state dump after deployment - function runWithSuperchain( - address payable _superchainConfigProxy, - address payable _protocolVersionsProxy, - bool _includeDump - ) - public - { + function runWithSuperchain(address payable _superchainConfigProxy, address payable _protocolVersionsProxy) public { require(_superchainConfigProxy != address(0), "Deploy: must specify address for superchain config proxy"); require(_protocolVersionsProxy != address(0), "Deploy: must specify address for protocol versions proxy"); @@ -191,10 +184,6 @@ contract Deploy is Deployer { save("ProtocolVersionsProxy", _protocolVersionsProxy); _run(false); - - if (_includeDump) { - vm.dumpState(Config.stateDumpPath("")); - } } function runWithStateDump() public { From e1e5d063e42e375b9abc5ecdf3171a153533db9e Mon Sep 17 00:00:00 2001 From: Michael Amadi Date: Tue, 3 Dec 2024 17:32:03 +0100 Subject: [PATCH 029/111] Sc/remove todo justfile (#13179) * remove done todo from just file * remove done todo from just file --- packages/contracts-bedrock/justfile | 1 - 1 file changed, 1 deletion(-) diff --git a/packages/contracts-bedrock/justfile b/packages/contracts-bedrock/justfile index 4ea97d4c61d..387a1240b1b 100644 --- a/packages/contracts-bedrock/justfile +++ b/packages/contracts-bedrock/justfile @@ -204,7 +204,6 @@ semgrep: semgrep-test: cd ../../ && semgrep scan --test --config .semgrep/rules/ .semgrep/tests/ -# TODO: Also run lint-forge-tests-check but we need to fix the test names first. # Runs all checks. check: @just gas-snapshot-check-no-build \ From 3435fcc11e1febd7a0b4ed156830f3d1bf0ac59e Mon Sep 17 00:00:00 2001 From: Michael Amadi Date: Tue, 3 Dec 2024 17:32:37 +0100 Subject: [PATCH 030/111] enables and asserts that forge coverage compiles and runs successfully in ci (#13171) --- packages/contracts-bedrock/justfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/contracts-bedrock/justfile b/packages/contracts-bedrock/justfile index 387a1240b1b..6a34cc6998d 100644 --- a/packages/contracts-bedrock/justfile +++ b/packages/contracts-bedrock/justfile @@ -58,11 +58,11 @@ test-kontrol-no-build: # Runs contract coverage. coverage: build-go-ffi - forge coverage || (bash -c "forge coverage 2>&1 | grep -q 'Stack too deep' && echo -e '\\033[1;33mWARNING\\033[0m: Coverage failed with stack too deep, so overriding and exiting successfully' && exit 0 || exit 1") + forge coverage # Runs contract coverage with lcov. coverage-lcov: build-go-ffi - forge coverage --report lcov || (bash -c "forge coverage --report lcov 2>&1 | grep -q 'Stack too deep' && echo -e '\\033[1;33mWARNING\\033[0m: Coverage failed with stack too deep, so overriding and exiting successfully' && exit 0 || exit 1") + forge coverage --report lcov ######################################################## From f2eaaa838f52efb2f841797a4014924f7227161b Mon Sep 17 00:00:00 2001 From: Maurelian Date: Tue, 3 Dec 2024 12:43:11 -0500 Subject: [PATCH 031/111] feat: Remove thin wrapper _run() function overload (#13101) --- .../scripts/deploy/Deploy.s.sol | 17 +++++++---------- .../scripts/deploy/DeployOwnership.s.sol | 2 +- 2 files changed, 8 insertions(+), 11 deletions(-) diff --git a/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol b/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol index c013430262c..6eec754b248 100644 --- a/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol @@ -161,7 +161,7 @@ contract Deploy is Deployer { /// @notice Deploy all of the L1 contracts necessary for a full Superchain with a single Op Chain. function run() public { console.log("Deploying a fresh OP Stack including SuperchainConfig"); - _run(); + _run({ _needsSuperchain: true }); } /// @notice Deploy a new OP Chain using an existing SuperchainConfig and ProtocolVersions @@ -183,27 +183,24 @@ contract Deploy is Deployer { save("ProtocolVersions", pvProxy.implementation()); save("ProtocolVersionsProxy", _protocolVersionsProxy); - _run(false); + _run({ _needsSuperchain: false }); } + /// @notice Used for L1 alloc generation. function runWithStateDump() public { vm.chainId(cfg.l1ChainID()); - _run(); + _run({ _needsSuperchain: true }); vm.dumpState(Config.stateDumpPath("")); } /// @notice Deploy all L1 contracts and write the state diff to a file. + /// Used to generate kontrol tests. function runWithStateDiff() public stateDiff { - _run(); - } - - /// @notice Compatibility function for tests that override _run(). - function _run() internal virtual { - _run(true); + _run({ _needsSuperchain: true }); } /// @notice Internal function containing the deploy logic. - function _run(bool _needsSuperchain) internal { + function _run(bool _needsSuperchain) internal virtual { console.log("start of L1 Deploy!"); // Set up the Superchain if needed. diff --git a/packages/contracts-bedrock/scripts/deploy/DeployOwnership.s.sol b/packages/contracts-bedrock/scripts/deploy/DeployOwnership.s.sol index e126c39f18c..d6c0b32cd57 100644 --- a/packages/contracts-bedrock/scripts/deploy/DeployOwnership.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/DeployOwnership.s.sol @@ -59,7 +59,7 @@ struct GuardianConfig { /// be used as an example to guide the setup and configuration of the Safe contracts. contract DeployOwnership is Deploy { /// @notice Internal function containing the deploy logic. - function _run() internal override { + function _run(bool) internal override { console.log("start of Ownership Deployment"); // The SuperchainConfig is needed as a constructor argument to the Deputy Guardian Module deploySuperchainConfig(); From e43ade101b33ca1e31530463d012de155cec1b7f Mon Sep 17 00:00:00 2001 From: smartcontracts Date: Tue, 3 Dec 2024 13:12:49 -0500 Subject: [PATCH 032/111] maint: remove versions.json (#13184) Removes the legacy versions.json file and replaces it with references to mise.toml. --- .circleci/config.yml | 4 ++-- Makefile | 2 +- interop-devnet/create-chains.sh | 2 +- mise.toml | 2 ++ ops/docker/Dockerfile.packages | 4 ++-- ops/docker/op-stack-go/Dockerfile | 8 ++++++-- ops/docker/op-stack-go/Dockerfile.dockerignore | 2 +- packages/contracts-bedrock/test/kontrol/README.md | 2 +- .../test/kontrol/scripts/common.sh | 6 +++--- versions.json | 15 --------------- 10 files changed, 19 insertions(+), 28 deletions(-) delete mode 100644 versions.json diff --git a/.circleci/config.yml b/.circleci/config.yml index 1f7b8a95e61..8cdd813d72a 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -257,7 +257,7 @@ jobs: working_directory: packages/contracts-bedrock command: | # Clone asterisc @ the pinned version to fetch remote `RISCV.sol` - ASTERISC_REV="$(cat ../../versions.json | jq -r .asterisc)" + ASTERISC_REV="v$(yq '.tools.asterisc' ../../mise.toml)" REMOTE_ASTERISC_PATH="./src/vendor/asterisc/RISCV_Remote.sol" git clone https://github.com/ethereum-optimism/asterisc \ -b $ASTERISC_REV && \ @@ -526,7 +526,7 @@ jobs: - run: name: Sign command: | - VER=$(jq -r .binary_signer < versions.json) + VER=$(yq '.tools.binary_signer' mise.toml) wget -O - "https://github.com/ethereum-optimism/binary_signer/archive/refs/tags/v${VER}.tar.gz" | tar xz cd "binary_signer-${VER}/signer" diff --git a/Makefile b/Makefile index 4c901d2158a..bcbe3e71f9b 100644 --- a/Makefile +++ b/Makefile @@ -248,5 +248,5 @@ update-op-geth: ## Updates the Geth version used in the project .PHONY: update-op-geth install-eth2-testnet-genesis: - go install -v github.com/protolambda/eth2-testnet-genesis@$(shell jq -r .eth2_testnet_genesis < versions.json) + go install -v github.com/protolambda/eth2-testnet-genesis@v$(shell yq '.tools."go:github.com/protolambda/eth2-testnet-genesis"' mise.toml) .PHONY: install-eth2-testnet-genesis diff --git a/interop-devnet/create-chains.sh b/interop-devnet/create-chains.sh index 50668209915..05e44e67ff2 100755 --- a/interop-devnet/create-chains.sh +++ b/interop-devnet/create-chains.sh @@ -3,7 +3,7 @@ set -eu # Run this with workdir set as root of the repo -if [ -f "../versions.json" ]; then +if [ -f "../mise.toml" ]; then echo "Running create-chains script." else echo "Cannot run create-chains script, must be in interop-devnet dir, but currently in:" diff --git a/mise.toml b/mise.toml index a2cf15470a5..c1b55809da0 100644 --- a/mise.toml +++ b/mise.toml @@ -6,8 +6,10 @@ rust = "1.83.0" python = "3.12.0" uv = "0.5.5" jq = "1.7.1" +yq = "4.44.5" shellcheck = "0.10.0" direnv = "2.35.0" +just = "1.37.0" # Cargo dependencies "cargo:just" = "1.37.0" diff --git a/ops/docker/Dockerfile.packages b/ops/docker/Dockerfile.packages index fd2939cb45f..1e5786412cf 100644 --- a/ops/docker/Dockerfile.packages +++ b/ops/docker/Dockerfile.packages @@ -24,7 +24,7 @@ COPY --from=foundry /usr/local/bin/cast /usr/local/bin/cast WORKDIR /opt/optimism -COPY ./versions.json ./versions.json +COPY ./mise.toml ./mise.toml COPY ./packages ./packages COPY .git/ ./.git COPY .gitmodules ./.gitmodules @@ -66,7 +66,7 @@ COPY --from=1password/op:2 /usr/local/bin/op /usr/local/bin/op RUN mkdir -p /opt/optimism/packages/contracts-bedrock COPY --from=base /opt/optimism/packages/contracts-bedrock /opt/optimism/packages/contracts-bedrock -COPY --from=base /opt/optimism/versions.json /opt/optimism/versions.json +COPY --from=base /opt/optimism/mise.toml /opt/optimism/mise.toml WORKDIR /opt/optimism/packages/contracts-bedrock diff --git a/ops/docker/op-stack-go/Dockerfile b/ops/docker/op-stack-go/Dockerfile index da0fc156f02..ef1da6ce116 100644 --- a/ops/docker/op-stack-go/Dockerfile +++ b/ops/docker/op-stack-go/Dockerfile @@ -14,9 +14,13 @@ FROM --platform=$BUILDPLATFORM golang:1.22.7-alpine3.20 AS builder RUN apk add --no-cache curl tar gzip make gcc musl-dev linux-headers git jq bash +# install yq +RUN wget https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 -O /usr/local/bin/yq && \ + chmod +x /usr/local/bin/yq + # install versioned toolchain -COPY ./versions.json . -RUN curl -L https://github.com/casey/just/releases/download/$(jq -r .just < versions.json)/just-$(jq -r .just < versions.json)-x86_64-unknown-linux-musl.tar.gz | \ +COPY ./mise.toml . +RUN curl -L https://github.com/casey/just/releases/download/$(yq '.tools.just' mise.toml)/just-$(yq '.tools.just' mise.toml)-x86_64-unknown-linux-musl.tar.gz | \ tar xz -C /usr/local/bin just # We copy the go.mod/sum first, so the `go mod download` does not have to re-run if dependencies do not change. diff --git a/ops/docker/op-stack-go/Dockerfile.dockerignore b/ops/docker/op-stack-go/Dockerfile.dockerignore index edf3bbc1c51..847bbbb0eb7 100644 --- a/ops/docker/op-stack-go/Dockerfile.dockerignore +++ b/ops/docker/op-stack-go/Dockerfile.dockerignore @@ -21,4 +21,4 @@ !/go.mod !/go.sum !/just -!/versions.json +!/mise.toml diff --git a/packages/contracts-bedrock/test/kontrol/README.md b/packages/contracts-bedrock/test/kontrol/README.md index 1103feed1a0..9621160976e 100644 --- a/packages/contracts-bedrock/test/kontrol/README.md +++ b/packages/contracts-bedrock/test/kontrol/README.md @@ -90,7 +90,7 @@ Use the [`run-kontrol.sh`](./scripts/run-kontrol.sh) script to runs the proofs i The `run-kontrol.sh` script supports three modes of proof execution: - `container`: Runs the proofs using the same Docker image used in CI. This is the default execution mode—if no arguments are provided, the proofs will be executed in this mode. -- `local`: Runs the proofs with your local Kontrol install, and enforces that the Kontrol version matches the one used in CI, which is specified in [`versions.json`](../../../../versions.json). +- `local`: Runs the proofs with your local Kontrol install, and enforces that the Kontrol version matches the one used in CI, which is specified in [`mise.toml`](../../../../mise.toml). - `dev`: Run the proofs with your local Kontrol install, without enforcing any version in particular. The intended use case is proof development and related matters. It also supports two methods for specifying which tests to execute: diff --git a/packages/contracts-bedrock/test/kontrol/scripts/common.sh b/packages/contracts-bedrock/test/kontrol/scripts/common.sh index 3b99c3e5062..c301b28a9bd 100644 --- a/packages/contracts-bedrock/test/kontrol/scripts/common.sh +++ b/packages/contracts-bedrock/test/kontrol/scripts/common.sh @@ -11,7 +11,7 @@ usage_run_kontrol() { echo "" 1>&2 echo "Execution modes:" echo " container Run in docker container. Reproduce CI execution. (Default)" 1>&2 - echo " local Run locally, enforces registered versions.json version for better reproducibility. (Recommended)" 1>&2 + echo " local Run locally, enforces registered mise.toml version for better reproducibility. (Recommended)" 1>&2 echo " dev Run locally, does NOT enforce registered version. (Useful for developing with new versions and features)" 1>&2 echo "" 1>&2 echo "Tests executed:" @@ -28,7 +28,7 @@ usage_make_summary() { echo "" 1>&2 echo "Execution modes:" echo " container Run in docker container. Reproduce CI execution. (Default)" 1>&2 - echo " local Run locally, enforces registered versions.json version for better reproducibility. (Recommended)" 1>&2 + echo " local Run locally, enforces registered mise.toml version for better reproducibility. (Recommended)" 1>&2 echo " dev Run locally, does NOT enforce registered version. (Useful for developing with new versions and features)" 1>&2 exit 0 } @@ -43,7 +43,7 @@ export CONTAINER_NAME=kontrol-tests if [ "$KONTROL_FP_DEPLOYMENT" = true ]; then export CONTAINER_NAME=kontrol-fp-tests fi -KONTROLRC=$(jq -r .kontrol < "$WORKSPACE_DIR/../../versions.json") +KONTROLRC=$(yq '.tools.kontrol' "$WORKSPACE_DIR/../../mise.toml") export KONTROL_RELEASE=$KONTROLRC export LOCAL=false export SCRIPT_TESTS=false diff --git a/versions.json b/versions.json deleted file mode 100644 index e734259955f..00000000000 --- a/versions.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "go": "1.22.6", - "abigen": "v1.10.25", - "foundry": "143abd6a768eeb52a5785240b763d72a56987b4a", - "geth": "v1.14.7", - "geth_release": "1.14.7-aa55f5ea", - "eth2_testnet_genesis": "v0.10.0", - "nvm": "v20.9.0", - "slither": "0.10.2", - "kontrol": "1.0.53", - "just": "1.34.0", - "binary_signer": "1.0.4", - "semgrep": "1.90.0", - "asterisc": "v1.1.2" -} From 622fb352b935a5dfc083af96701a97bd1ebe9cde Mon Sep 17 00:00:00 2001 From: Matthew Slipper Date: Tue, 3 Dec 2024 11:14:24 -0700 Subject: [PATCH 033/111] op-batcher: Wait for queue to drain before shutdown (#13172) Tests are flaking because the batcher's txmgr still polls for receipts after shutdown. Those polls emit logs, which then cause the tests to panic. To fix, I updated the batcher shutdown code to wait for all pending transactions in the tx queue to complete. --- op-batcher/batcher/driver.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/op-batcher/batcher/driver.go b/op-batcher/batcher/driver.go index d52a31bba60..729626cd946 100644 --- a/op-batcher/batcher/driver.go +++ b/op-batcher/batcher/driver.go @@ -482,6 +482,9 @@ func (l *BatchSubmitter) mainLoop(ctx context.Context, receiptsCh chan txmgr.TxR l.publishStateToL1(queue, receiptsCh, daGroup, l.Config.PollInterval) case <-ctx.Done(): + if err := queue.Wait(); err != nil { + l.Log.Error("error waiting for transactions to complete", "err", err) + } l.Log.Warn("main loop returning") return } From a46cc6163b0eacb1e86097259bb6b50419975e4d Mon Sep 17 00:00:00 2001 From: Sam Stokes <35908605+bitwiseguy@users.noreply.github.com> Date: Tue, 3 Dec 2024 13:15:11 -0500 Subject: [PATCH 034/111] op-deployer: fix nil dereference of SuperchainRoles (#13178) --- op-deployer/pkg/deployer/state/intent.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/op-deployer/pkg/deployer/state/intent.go b/op-deployer/pkg/deployer/state/intent.go index 067ea5bead6..5b1e4222b3e 100644 --- a/op-deployer/pkg/deployer/state/intent.go +++ b/op-deployer/pkg/deployer/state/intent.go @@ -98,6 +98,9 @@ func (c *Intent) validateCustomConfig() error { return ErrL2ContractsLocatorUndefined } + if c.SuperchainRoles == nil { + return errors.New("SuperchainRoles is set to nil") + } if err := c.SuperchainRoles.CheckNoZeroAddresses(); err != nil { return err } @@ -149,7 +152,7 @@ func (c *Intent) validateStandardValues() error { if err != nil { return fmt.Errorf("error getting standard superchain roles: %w", err) } - if *c.SuperchainRoles != *standardSuperchainRoles { + if c.SuperchainRoles == nil || *c.SuperchainRoles != *standardSuperchainRoles { return fmt.Errorf("SuperchainRoles does not match standard value") } From 508ccbe7918f904a354927477d0cfc20f6558154 Mon Sep 17 00:00:00 2001 From: Matthew Slipper Date: Tue, 3 Dec 2024 11:35:03 -0700 Subject: [PATCH 035/111] op-deployer/ctb: Add DeployOPCM script (#13187) * op-deployer/ctb: Add DeployOPCM script Adds a dedicated script to deploy OPCM for use with a future op-deployer bootstrap command. We'll use this for the Holocene deployment. * Update packages/contracts-bedrock/test/opcm/DeployOPCM.t.sol Co-authored-by: blaine * Update packages/contracts-bedrock/test/opcm/DeployOPCM.t.sol Co-authored-by: blaine * remove unused import * forgot an import --------- Co-authored-by: blaine --- .../scripts/deploy/DeployOPCM.s.sol | 280 ++++++++++++++++++ .../test/opcm/DeployOPCM.t.sol | 273 +++++++++++++++++ 2 files changed, 553 insertions(+) create mode 100644 packages/contracts-bedrock/scripts/deploy/DeployOPCM.s.sol create mode 100644 packages/contracts-bedrock/test/opcm/DeployOPCM.t.sol diff --git a/packages/contracts-bedrock/scripts/deploy/DeployOPCM.s.sol b/packages/contracts-bedrock/scripts/deploy/DeployOPCM.s.sol new file mode 100644 index 00000000000..507dd049544 --- /dev/null +++ b/packages/contracts-bedrock/scripts/deploy/DeployOPCM.s.sol @@ -0,0 +1,280 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.15; + +import { Script } from "forge-std/Script.sol"; + +import { LibString } from "@solady/utils/LibString.sol"; + +import { BaseDeployIO } from "scripts/deploy/BaseDeployIO.sol"; +import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; + +import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; +import { IProtocolVersions } from "src/L1/interfaces/IProtocolVersions.sol"; +import { OPContractsManager } from "src/L1/OPContractsManager.sol"; + +contract DeployOPCMInput is BaseDeployIO { + ISuperchainConfig internal _superchainConfig; + IProtocolVersions internal _protocolVersions; + string internal _l1ContractsRelease; + + address internal _addressManagerBlueprint; + address internal _proxyBlueprint; + address internal _proxyAdminBlueprint; + address internal _l1ChugSplashProxyBlueprint; + address internal _resolvedDelegateProxyBlueprint; + address internal _anchorStateRegistryBlueprint; + address internal _permissionedDisputeGame1Blueprint; + address internal _permissionedDisputeGame2Blueprint; + + address internal _l1ERC721BridgeImpl; + address internal _optimismPortalImpl; + address internal _systemConfigImpl; + address internal _optimismMintableERC20FactoryImpl; + address internal _l1CrossDomainMessengerImpl; + address internal _l1StandardBridgeImpl; + address internal _disputeGameFactoryImpl; + address internal _delayedWETHImpl; + address internal _mipsImpl; + + // Setter for address type + function set(bytes4 _sel, address _addr) public { + require(_addr != address(0), "DeployOPCMInput: cannot set zero address"); + + if (_sel == this.superchainConfig.selector) _superchainConfig = ISuperchainConfig(_addr); + else if (_sel == this.protocolVersions.selector) _protocolVersions = IProtocolVersions(_addr); + else if (_sel == this.addressManagerBlueprint.selector) _addressManagerBlueprint = _addr; + else if (_sel == this.proxyBlueprint.selector) _proxyBlueprint = _addr; + else if (_sel == this.proxyAdminBlueprint.selector) _proxyAdminBlueprint = _addr; + else if (_sel == this.l1ChugSplashProxyBlueprint.selector) _l1ChugSplashProxyBlueprint = _addr; + else if (_sel == this.resolvedDelegateProxyBlueprint.selector) _resolvedDelegateProxyBlueprint = _addr; + else if (_sel == this.anchorStateRegistryBlueprint.selector) _anchorStateRegistryBlueprint = _addr; + else if (_sel == this.permissionedDisputeGame1Blueprint.selector) _permissionedDisputeGame1Blueprint = _addr; + else if (_sel == this.permissionedDisputeGame2Blueprint.selector) _permissionedDisputeGame2Blueprint = _addr; + else if (_sel == this.l1ERC721BridgeImpl.selector) _l1ERC721BridgeImpl = _addr; + else if (_sel == this.optimismPortalImpl.selector) _optimismPortalImpl = _addr; + else if (_sel == this.systemConfigImpl.selector) _systemConfigImpl = _addr; + else if (_sel == this.optimismMintableERC20FactoryImpl.selector) _optimismMintableERC20FactoryImpl = _addr; + else if (_sel == this.l1CrossDomainMessengerImpl.selector) _l1CrossDomainMessengerImpl = _addr; + else if (_sel == this.l1StandardBridgeImpl.selector) _l1StandardBridgeImpl = _addr; + else if (_sel == this.disputeGameFactoryImpl.selector) _disputeGameFactoryImpl = _addr; + else if (_sel == this.delayedWETHImpl.selector) _delayedWETHImpl = _addr; + else if (_sel == this.mipsImpl.selector) _mipsImpl = _addr; + else revert("DeployOPCMInput: unknown selector"); + } + + // Setter for string type + function set(bytes4 _sel, string memory _value) public { + require(!LibString.eq(_value, ""), "DeployOPCMInput: cannot set empty string"); + if (_sel == this.l1ContractsRelease.selector) _l1ContractsRelease = _value; + else revert("DeployOPCMInput: unknown selector"); + } + + // Getters + function superchainConfig() public view returns (ISuperchainConfig) { + require(address(_superchainConfig) != address(0), "DeployOPCMInput: not set"); + return _superchainConfig; + } + + function protocolVersions() public view returns (IProtocolVersions) { + require(address(_protocolVersions) != address(0), "DeployOPCMInput: not set"); + return _protocolVersions; + } + + function l1ContractsRelease() public view returns (string memory) { + require(!LibString.eq(_l1ContractsRelease, ""), "DeployOPCMInput: not set"); + return _l1ContractsRelease; + } + + function addressManagerBlueprint() public view returns (address) { + require(_addressManagerBlueprint != address(0), "DeployOPCMInput: not set"); + return _addressManagerBlueprint; + } + + function proxyBlueprint() public view returns (address) { + require(_proxyBlueprint != address(0), "DeployOPCMInput: not set"); + return _proxyBlueprint; + } + + function proxyAdminBlueprint() public view returns (address) { + require(_proxyAdminBlueprint != address(0), "DeployOPCMInput: not set"); + return _proxyAdminBlueprint; + } + + function l1ChugSplashProxyBlueprint() public view returns (address) { + require(_l1ChugSplashProxyBlueprint != address(0), "DeployOPCMInput: not set"); + return _l1ChugSplashProxyBlueprint; + } + + function resolvedDelegateProxyBlueprint() public view returns (address) { + require(_resolvedDelegateProxyBlueprint != address(0), "DeployOPCMInput: not set"); + return _resolvedDelegateProxyBlueprint; + } + + function anchorStateRegistryBlueprint() public view returns (address) { + require(_anchorStateRegistryBlueprint != address(0), "DeployOPCMInput: not set"); + return _anchorStateRegistryBlueprint; + } + + function permissionedDisputeGame1Blueprint() public view returns (address) { + require(_permissionedDisputeGame1Blueprint != address(0), "DeployOPCMInput: not set"); + return _permissionedDisputeGame1Blueprint; + } + + function permissionedDisputeGame2Blueprint() public view returns (address) { + require(_permissionedDisputeGame2Blueprint != address(0), "DeployOPCMInput: not set"); + return _permissionedDisputeGame2Blueprint; + } + + function l1ERC721BridgeImpl() public view returns (address) { + require(_l1ERC721BridgeImpl != address(0), "DeployOPCMInput: not set"); + return _l1ERC721BridgeImpl; + } + + function optimismPortalImpl() public view returns (address) { + require(_optimismPortalImpl != address(0), "DeployOPCMInput: not set"); + return _optimismPortalImpl; + } + + function systemConfigImpl() public view returns (address) { + require(_systemConfigImpl != address(0), "DeployOPCMInput: not set"); + return _systemConfigImpl; + } + + function optimismMintableERC20FactoryImpl() public view returns (address) { + require(_optimismMintableERC20FactoryImpl != address(0), "DeployOPCMInput: not set"); + return _optimismMintableERC20FactoryImpl; + } + + function l1CrossDomainMessengerImpl() public view returns (address) { + require(_l1CrossDomainMessengerImpl != address(0), "DeployOPCMInput: not set"); + return _l1CrossDomainMessengerImpl; + } + + function l1StandardBridgeImpl() public view returns (address) { + require(_l1StandardBridgeImpl != address(0), "DeployOPCMInput: not set"); + return _l1StandardBridgeImpl; + } + + function disputeGameFactoryImpl() public view returns (address) { + require(_disputeGameFactoryImpl != address(0), "DeployOPCMInput: not set"); + return _disputeGameFactoryImpl; + } + + function delayedWETHImpl() public view returns (address) { + require(_delayedWETHImpl != address(0), "DeployOPCMInput: not set"); + return _delayedWETHImpl; + } + + function mipsImpl() public view returns (address) { + require(_mipsImpl != address(0), "DeployOPCMInput: not set"); + return _mipsImpl; + } +} + +contract DeployOPCMOutput is BaseDeployIO { + OPContractsManager internal _opcm; + + // Setter for address type + function set(bytes4 _sel, address _addr) public { + require(_addr != address(0), "DeployOPCMOutput: cannot set zero address"); + if (_sel == this.opcm.selector) _opcm = OPContractsManager(_addr); + else revert("DeployOPCMOutput: unknown selector"); + } + + // Getter + function opcm() public view returns (OPContractsManager) { + require(address(_opcm) != address(0), "DeployOPCMOutput: not set"); + return _opcm; + } +} + +contract DeployOPCM is Script { + function run(DeployOPCMInput _doi, DeployOPCMOutput _doo) public { + OPContractsManager.Blueprints memory blueprints = OPContractsManager.Blueprints({ + addressManager: _doi.addressManagerBlueprint(), + proxy: _doi.proxyBlueprint(), + proxyAdmin: _doi.proxyAdminBlueprint(), + l1ChugSplashProxy: _doi.l1ChugSplashProxyBlueprint(), + resolvedDelegateProxy: _doi.resolvedDelegateProxyBlueprint(), + anchorStateRegistry: _doi.anchorStateRegistryBlueprint(), + permissionedDisputeGame1: _doi.permissionedDisputeGame1Blueprint(), + permissionedDisputeGame2: _doi.permissionedDisputeGame2Blueprint() + }); + OPContractsManager.Implementations memory implementations = OPContractsManager.Implementations({ + l1ERC721BridgeImpl: address(_doi.l1ERC721BridgeImpl()), + optimismPortalImpl: address(_doi.optimismPortalImpl()), + systemConfigImpl: address(_doi.systemConfigImpl()), + optimismMintableERC20FactoryImpl: address(_doi.optimismMintableERC20FactoryImpl()), + l1CrossDomainMessengerImpl: address(_doi.l1CrossDomainMessengerImpl()), + l1StandardBridgeImpl: address(_doi.l1StandardBridgeImpl()), + disputeGameFactoryImpl: address(_doi.disputeGameFactoryImpl()), + delayedWETHImpl: address(_doi.delayedWETHImpl()), + mipsImpl: address(_doi.mipsImpl()) + }); + + OPContractsManager opcm_ = deployOPCM( + _doi.superchainConfig(), _doi.protocolVersions(), blueprints, implementations, _doi.l1ContractsRelease() + ); + _doo.set(_doo.opcm.selector, address(opcm_)); + + assertValidOpcm(_doi, _doo); + } + + function deployOPCM( + ISuperchainConfig _superchainConfig, + IProtocolVersions _protocolVersions, + OPContractsManager.Blueprints memory _blueprints, + OPContractsManager.Implementations memory _implementations, + string memory _l1ContractsRelease + ) + public + returns (OPContractsManager opcm_) + { + vm.broadcast(msg.sender); + opcm_ = new OPContractsManager( + _superchainConfig, _protocolVersions, _l1ContractsRelease, _blueprints, _implementations + ); + vm.label(address(opcm_), "OPContractsManager"); + } + + function assertValidOpcm(DeployOPCMInput _doi, DeployOPCMOutput _doo) public view { + OPContractsManager impl = OPContractsManager(address(_doo.opcm())); + require(address(impl.superchainConfig()) == address(_doi.superchainConfig()), "OPCMI-10"); + require(address(impl.protocolVersions()) == address(_doi.protocolVersions()), "OPCMI-20"); + require(LibString.eq(impl.l1ContractsRelease(), _doi.l1ContractsRelease())); + + OPContractsManager.Blueprints memory blueprints = impl.blueprints(); + require(blueprints.addressManager == _doi.addressManagerBlueprint(), "OPCMI-40"); + require(blueprints.proxy == _doi.proxyBlueprint(), "OPCMI-50"); + require(blueprints.proxyAdmin == _doi.proxyAdminBlueprint(), "OPCMI-60"); + require(blueprints.l1ChugSplashProxy == _doi.l1ChugSplashProxyBlueprint(), "OPCMI-70"); + require(blueprints.resolvedDelegateProxy == _doi.resolvedDelegateProxyBlueprint(), "OPCMI-80"); + require(blueprints.anchorStateRegistry == _doi.anchorStateRegistryBlueprint(), "OPCMI-90"); + require(blueprints.permissionedDisputeGame1 == _doi.permissionedDisputeGame1Blueprint(), "OPCMI-100"); + require(blueprints.permissionedDisputeGame2 == _doi.permissionedDisputeGame2Blueprint(), "OPCMI-110"); + + OPContractsManager.Implementations memory implementations = impl.implementations(); + require(implementations.l1ERC721BridgeImpl == _doi.l1ERC721BridgeImpl(), "OPCMI-120"); + require(implementations.optimismPortalImpl == _doi.optimismPortalImpl(), "OPCMI-130"); + require(implementations.systemConfigImpl == _doi.systemConfigImpl(), "OPCMI-140"); + require( + implementations.optimismMintableERC20FactoryImpl == _doi.optimismMintableERC20FactoryImpl(), "OPCMI-150" + ); + require(implementations.l1CrossDomainMessengerImpl == _doi.l1CrossDomainMessengerImpl(), "OPCMI-160"); + require(implementations.l1StandardBridgeImpl == _doi.l1StandardBridgeImpl(), "OPCMI-170"); + require(implementations.disputeGameFactoryImpl == _doi.disputeGameFactoryImpl(), "OPCMI-180"); + require(implementations.delayedWETHImpl == _doi.delayedWETHImpl(), "OPCMI-190"); + require(implementations.mipsImpl == _doi.mipsImpl(), "OPCMI-200"); + } + + function etchIOContracts() public returns (DeployOPCMInput doi_, DeployOPCMOutput doo_) { + (doi_, doo_) = getIOContracts(); + vm.etch(address(doi_), type(DeployOPCMInput).runtimeCode); + vm.etch(address(doo_), type(DeployOPCMOutput).runtimeCode); + } + + function getIOContracts() public view returns (DeployOPCMInput doi_, DeployOPCMOutput doo_) { + doi_ = DeployOPCMInput(DeployUtils.toIOAddress(msg.sender, "optimism.DeployOPCMInput")); + doo_ = DeployOPCMOutput(DeployUtils.toIOAddress(msg.sender, "optimism.DeployOPCMOutput")); + } +} diff --git a/packages/contracts-bedrock/test/opcm/DeployOPCM.t.sol b/packages/contracts-bedrock/test/opcm/DeployOPCM.t.sol new file mode 100644 index 00000000000..81bf44eb0bc --- /dev/null +++ b/packages/contracts-bedrock/test/opcm/DeployOPCM.t.sol @@ -0,0 +1,273 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.15; + +import { Test } from "forge-std/Test.sol"; +import { DeployOPCM, DeployOPCMInput, DeployOPCMOutput } from "scripts/deploy/DeployOPCM.s.sol"; +import { OPContractsManager } from "src/L1/OPContractsManager.sol"; +import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; +import { IProtocolVersions } from "src/L1/interfaces/IProtocolVersions.sol"; + +contract DeployOPCMInput_Test is Test { + DeployOPCMInput dii; + string release = "1.0.0"; + + function setUp() public { + dii = new DeployOPCMInput(); + } + + function test_getters_whenNotSet_reverts() public { + vm.expectRevert("DeployOPCMInput: not set"); + dii.superchainConfig(); + + vm.expectRevert("DeployOPCMInput: not set"); + dii.protocolVersions(); + + vm.expectRevert("DeployOPCMInput: not set"); + dii.l1ContractsRelease(); + + vm.expectRevert("DeployOPCMInput: not set"); + dii.addressManagerBlueprint(); + + vm.expectRevert("DeployOPCMInput: not set"); + dii.proxyBlueprint(); + + vm.expectRevert("DeployOPCMInput: not set"); + dii.proxyAdminBlueprint(); + + vm.expectRevert("DeployOPCMInput: not set"); + dii.l1ChugSplashProxyBlueprint(); + + vm.expectRevert("DeployOPCMInput: not set"); + dii.resolvedDelegateProxyBlueprint(); + + vm.expectRevert("DeployOPCMInput: not set"); + dii.anchorStateRegistryBlueprint(); + + vm.expectRevert("DeployOPCMInput: not set"); + dii.permissionedDisputeGame1Blueprint(); + + vm.expectRevert("DeployOPCMInput: not set"); + dii.permissionedDisputeGame2Blueprint(); + + vm.expectRevert("DeployOPCMInput: not set"); + dii.l1ERC721BridgeImpl(); + + vm.expectRevert("DeployOPCMInput: not set"); + dii.optimismPortalImpl(); + + vm.expectRevert("DeployOPCMInput: not set"); + dii.systemConfigImpl(); + + vm.expectRevert("DeployOPCMInput: not set"); + dii.optimismMintableERC20FactoryImpl(); + + vm.expectRevert("DeployOPCMInput: not set"); + dii.l1CrossDomainMessengerImpl(); + + vm.expectRevert("DeployOPCMInput: not set"); + dii.l1StandardBridgeImpl(); + + vm.expectRevert("DeployOPCMInput: not set"); + dii.disputeGameFactoryImpl(); + + vm.expectRevert("DeployOPCMInput: not set"); + dii.delayedWETHImpl(); + + vm.expectRevert("DeployOPCMInput: not set"); + dii.mipsImpl(); + } + + // Below setter tests are split into two parts to avoid stack too deep errors + + function test_set_part1_succeeds() public { + ISuperchainConfig superchainConfig = ISuperchainConfig(makeAddr("superchainConfig")); + IProtocolVersions protocolVersions = IProtocolVersions(makeAddr("protocolVersions")); + address addressManagerBlueprint = makeAddr("addressManagerBlueprint"); + address proxyBlueprint = makeAddr("proxyBlueprint"); + address proxyAdminBlueprint = makeAddr("proxyAdminBlueprint"); + address l1ChugSplashProxyBlueprint = makeAddr("l1ChugSplashProxyBlueprint"); + address resolvedDelegateProxyBlueprint = makeAddr("resolvedDelegateProxyBlueprint"); + address anchorStateRegistryBlueprint = makeAddr("anchorStateRegistryBlueprint"); + address permissionedDisputeGame1Blueprint = makeAddr("permissionedDisputeGame1Blueprint"); + address permissionedDisputeGame2Blueprint = makeAddr("permissionedDisputeGame2Blueprint"); + + dii.set(dii.superchainConfig.selector, address(superchainConfig)); + dii.set(dii.protocolVersions.selector, address(protocolVersions)); + dii.set(dii.l1ContractsRelease.selector, release); + dii.set(dii.addressManagerBlueprint.selector, addressManagerBlueprint); + dii.set(dii.proxyBlueprint.selector, proxyBlueprint); + dii.set(dii.proxyAdminBlueprint.selector, proxyAdminBlueprint); + dii.set(dii.l1ChugSplashProxyBlueprint.selector, l1ChugSplashProxyBlueprint); + dii.set(dii.resolvedDelegateProxyBlueprint.selector, resolvedDelegateProxyBlueprint); + dii.set(dii.anchorStateRegistryBlueprint.selector, anchorStateRegistryBlueprint); + dii.set(dii.permissionedDisputeGame1Blueprint.selector, permissionedDisputeGame1Blueprint); + dii.set(dii.permissionedDisputeGame2Blueprint.selector, permissionedDisputeGame2Blueprint); + + assertEq(address(dii.superchainConfig()), address(superchainConfig), "50"); + assertEq(address(dii.protocolVersions()), address(protocolVersions), "100"); + assertEq(dii.l1ContractsRelease(), release, "150"); + assertEq(dii.addressManagerBlueprint(), addressManagerBlueprint, "200"); + assertEq(dii.proxyBlueprint(), proxyBlueprint, "250"); + assertEq(dii.proxyAdminBlueprint(), proxyAdminBlueprint, "300"); + assertEq(dii.l1ChugSplashProxyBlueprint(), l1ChugSplashProxyBlueprint, "350"); + assertEq(dii.resolvedDelegateProxyBlueprint(), resolvedDelegateProxyBlueprint, "400"); + assertEq(dii.anchorStateRegistryBlueprint(), anchorStateRegistryBlueprint, "450"); + assertEq(dii.permissionedDisputeGame1Blueprint(), permissionedDisputeGame1Blueprint, "500"); + assertEq(dii.permissionedDisputeGame2Blueprint(), permissionedDisputeGame2Blueprint, "550"); + } + + function test_set_part2_succeeds() public { + address l1ERC721BridgeImpl = makeAddr("l1ERC721BridgeImpl"); + address optimismPortalImpl = makeAddr("optimismPortalImpl"); + address systemConfigImpl = makeAddr("systemConfigImpl"); + address optimismMintableERC20FactoryImpl = makeAddr("optimismMintableERC20FactoryImpl"); + address l1CrossDomainMessengerImpl = makeAddr("l1CrossDomainMessengerImpl"); + address l1StandardBridgeImpl = makeAddr("l1StandardBridgeImpl"); + address disputeGameFactoryImpl = makeAddr("disputeGameFactoryImpl"); + address delayedWETHImpl = makeAddr("delayedWETHImpl"); + address mipsImpl = makeAddr("mipsImpl"); + + dii.set(dii.l1ERC721BridgeImpl.selector, l1ERC721BridgeImpl); + dii.set(dii.optimismPortalImpl.selector, optimismPortalImpl); + dii.set(dii.systemConfigImpl.selector, systemConfigImpl); + dii.set(dii.optimismMintableERC20FactoryImpl.selector, optimismMintableERC20FactoryImpl); + dii.set(dii.l1CrossDomainMessengerImpl.selector, l1CrossDomainMessengerImpl); + dii.set(dii.l1StandardBridgeImpl.selector, l1StandardBridgeImpl); + dii.set(dii.disputeGameFactoryImpl.selector, disputeGameFactoryImpl); + dii.set(dii.delayedWETHImpl.selector, delayedWETHImpl); + dii.set(dii.mipsImpl.selector, mipsImpl); + + assertEq(dii.l1ERC721BridgeImpl(), l1ERC721BridgeImpl, "600"); + assertEq(dii.optimismPortalImpl(), optimismPortalImpl, "650"); + assertEq(dii.systemConfigImpl(), systemConfigImpl, "700"); + assertEq(dii.optimismMintableERC20FactoryImpl(), optimismMintableERC20FactoryImpl, "750"); + assertEq(dii.l1CrossDomainMessengerImpl(), l1CrossDomainMessengerImpl, "800"); + assertEq(dii.l1StandardBridgeImpl(), l1StandardBridgeImpl, "850"); + assertEq(dii.disputeGameFactoryImpl(), disputeGameFactoryImpl, "900"); + assertEq(dii.delayedWETHImpl(), delayedWETHImpl, "950"); + assertEq(dii.mipsImpl(), mipsImpl, "1000"); + } + + function test_set_withZeroAddress_reverts() public { + vm.expectRevert("DeployOPCMInput: cannot set zero address"); + dii.set(dii.superchainConfig.selector, address(0)); + } + + function test_set_withEmptyString_reverts() public { + vm.expectRevert("DeployOPCMInput: cannot set empty string"); + dii.set(dii.l1ContractsRelease.selector, ""); + } + + function test_set_withInvalidSelector_reverts() public { + vm.expectRevert("DeployOPCMInput: unknown selector"); + dii.set(bytes4(0xdeadbeef), address(1)); + } + + function test_set_withInvalidStringSelector_reverts() public { + vm.expectRevert("DeployOPCMInput: unknown selector"); + dii.set(bytes4(0xdeadbeef), "test"); + } +} + +contract DeployOPCMOutput_Test is Test { + DeployOPCMOutput doo; + + function setUp() public { + doo = new DeployOPCMOutput(); + } + + function test_getters_whenNotSet_reverts() public { + vm.expectRevert("DeployOPCMOutput: not set"); + doo.opcm(); + } + + function test_set_succeeds() public { + OPContractsManager opcm = OPContractsManager(makeAddr("opcm")); + vm.etch(address(opcm), hex"01"); + + doo.set(doo.opcm.selector, address(opcm)); + + assertEq(address(doo.opcm()), address(opcm), "50"); + } + + function test_set_withZeroAddress_reverts() public { + vm.expectRevert("DeployOPCMOutput: cannot set zero address"); + doo.set(doo.opcm.selector, address(0)); + } + + function test_set_withInvalidSelector_reverts() public { + vm.expectRevert("DeployOPCMOutput: unknown selector"); + doo.set(bytes4(0xdeadbeef), makeAddr("test")); + } +} + +contract DeployOPCMTest is Test { + DeployOPCM deployOPCM; + DeployOPCMInput doi; + DeployOPCMOutput doo; + + ISuperchainConfig superchainConfigProxy = ISuperchainConfig(makeAddr("superchainConfigProxy")); + IProtocolVersions protocolVersionsProxy = IProtocolVersions(makeAddr("protocolVersionsProxy")); + + function setUp() public virtual { + deployOPCM = new DeployOPCM(); + (doi, doo) = deployOPCM.etchIOContracts(); + } + + function test_run_succeeds() public { + doi.set(doi.superchainConfig.selector, address(superchainConfigProxy)); + doi.set(doi.protocolVersions.selector, address(protocolVersionsProxy)); + doi.set(doi.l1ContractsRelease.selector, "1.0.0"); + + // Set and etch blueprints + doi.set(doi.addressManagerBlueprint.selector, makeAddr("addressManagerBlueprint")); + doi.set(doi.proxyBlueprint.selector, makeAddr("proxyBlueprint")); + doi.set(doi.proxyAdminBlueprint.selector, makeAddr("proxyAdminBlueprint")); + doi.set(doi.l1ChugSplashProxyBlueprint.selector, makeAddr("l1ChugSplashProxyBlueprint")); + doi.set(doi.resolvedDelegateProxyBlueprint.selector, makeAddr("resolvedDelegateProxyBlueprint")); + doi.set(doi.anchorStateRegistryBlueprint.selector, makeAddr("anchorStateRegistryBlueprint")); + doi.set(doi.permissionedDisputeGame1Blueprint.selector, makeAddr("permissionedDisputeGame1Blueprint")); + doi.set(doi.permissionedDisputeGame2Blueprint.selector, makeAddr("permissionedDisputeGame2Blueprint")); + + // Set and etch implementations + doi.set(doi.l1ERC721BridgeImpl.selector, makeAddr("l1ERC721BridgeImpl")); + doi.set(doi.optimismPortalImpl.selector, makeAddr("optimismPortalImpl")); + doi.set(doi.systemConfigImpl.selector, makeAddr("systemConfigImpl")); + doi.set(doi.optimismMintableERC20FactoryImpl.selector, makeAddr("optimismMintableERC20FactoryImpl")); + doi.set(doi.l1CrossDomainMessengerImpl.selector, makeAddr("l1CrossDomainMessengerImpl")); + doi.set(doi.l1StandardBridgeImpl.selector, makeAddr("l1StandardBridgeImpl")); + doi.set(doi.disputeGameFactoryImpl.selector, makeAddr("disputeGameFactoryImpl")); + doi.set(doi.delayedWETHImpl.selector, makeAddr("delayedWETHImpl")); + doi.set(doi.mipsImpl.selector, makeAddr("mipsImpl")); + + // Etch all addresses with dummy bytecode + vm.etch(address(doi.superchainConfig()), hex"01"); + vm.etch(address(doi.protocolVersions()), hex"01"); + + vm.etch(doi.addressManagerBlueprint(), hex"01"); + vm.etch(doi.proxyBlueprint(), hex"01"); + vm.etch(doi.proxyAdminBlueprint(), hex"01"); + vm.etch(doi.l1ChugSplashProxyBlueprint(), hex"01"); + vm.etch(doi.resolvedDelegateProxyBlueprint(), hex"01"); + vm.etch(doi.anchorStateRegistryBlueprint(), hex"01"); + vm.etch(doi.permissionedDisputeGame1Blueprint(), hex"01"); + vm.etch(doi.permissionedDisputeGame2Blueprint(), hex"01"); + + vm.etch(doi.l1ERC721BridgeImpl(), hex"01"); + vm.etch(doi.optimismPortalImpl(), hex"01"); + vm.etch(doi.systemConfigImpl(), hex"01"); + vm.etch(doi.optimismMintableERC20FactoryImpl(), hex"01"); + vm.etch(doi.l1CrossDomainMessengerImpl(), hex"01"); + vm.etch(doi.l1StandardBridgeImpl(), hex"01"); + vm.etch(doi.disputeGameFactoryImpl(), hex"01"); + vm.etch(doi.delayedWETHImpl(), hex"01"); + vm.etch(doi.mipsImpl(), hex"01"); + + deployOPCM.run(doi, doo); + + assertNotEq(address(doo.opcm()), address(0)); + + // sanity check to ensure that the OPCM is validated + deployOPCM.assertValidOpcm(doi, doo); + } +} From e101cd8955069f184e07f9de522b9b8c25e8625f Mon Sep 17 00:00:00 2001 From: Matthew Slipper Date: Tue, 3 Dec 2024 12:11:33 -0700 Subject: [PATCH 036/111] op-e2e: Recover gracefully from log-after-exit panics (#13190) * op-e2e: Recover gracefully from log-after-exit panics There are a lot of places where we log after tests exit. This PR recovers from those panics so tests can continue. * add helper --- op-service/testlog/testlog.go | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/op-service/testlog/testlog.go b/op-service/testlog/testlog.go index fe7ede207cb..965fbe9d47b 100644 --- a/op-service/testlog/testlog.go +++ b/op-service/testlog/testlog.go @@ -159,11 +159,22 @@ func (l *logger) flush() { scanner := bufio.NewScanner(l.buf) for scanner.Scan() { - l.t.Logf("%*s%s", padding, "", scanner.Text()) + l.internalFlush("%*s%s", padding, "", scanner.Text()) } l.buf.Reset() } +func (l *logger) internalFlush(format string, args ...any) { + defer func() { + if r := recover(); r != nil { + log.Warn("testlog: panic during flush", "recover", r) + } + }() + + l.t.Helper() + l.t.Logf(format, args...) +} + // The Go testing lib uses the runtime package to get info about the calling site, and then decorates the line. // We can't disable this decoration, but we can adjust the contents to align by padding after the info. // To pad the right amount, we estimate how long the info is. From 67dd69338fa417061f9ae531e369d0261f87e669 Mon Sep 17 00:00:00 2001 From: clabby Date: Tue, 3 Dec 2024 14:22:52 -0500 Subject: [PATCH 037/111] chore(opc): Bump `asterisc` version (#13189) * chore(opc): Bump `asterisc` version * semver lock --- mise.toml | 2 +- packages/contracts-bedrock/snapshots/semver-lock.json | 4 ++-- packages/contracts-bedrock/src/vendor/asterisc/RISCV.sol | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/mise.toml b/mise.toml index c1b55809da0..29bf5636e12 100644 --- a/mise.toml +++ b/mise.toml @@ -39,7 +39,7 @@ anvil = "nightly-143abd6a768eeb52a5785240b763d72a56987b4a" # Put things here if you need to track versions of tools or projects that can't # actually be managed by mise (yet). Make sure that anything you put in here is # also found inside of disabled_tools or mise will try to install it. -asterisc = "1.1.1" +asterisc = "1.2.0" kontrol = "1.0.53" binary_signer = "1.0.4" diff --git a/packages/contracts-bedrock/snapshots/semver-lock.json b/packages/contracts-bedrock/snapshots/semver-lock.json index 7c7cadb614b..1904131f2b0 100644 --- a/packages/contracts-bedrock/snapshots/semver-lock.json +++ b/packages/contracts-bedrock/snapshots/semver-lock.json @@ -212,8 +212,8 @@ "sourceCodeHash": "0x5dc6b0b4ae4ab29085c52f74a4498d8a3d04928b844491749cd7186623e8b967" }, "src/vendor/asterisc/RISCV.sol": { - "initCodeHash": "0x6b4323061187f2c8efe8de43bf1ecdc0798e2d95ad69470ed4151dadc094fedf", - "sourceCodeHash": "0x26cae049cf171efcc84c946a400704c30ebec5dba4f1548d1f1529f68f56c1ec" + "initCodeHash": "0x7329cca924e189eeaa2d883234f6cb5fd787c8bf3339d8298e721778c2947ce5", + "sourceCodeHash": "0xf85f3dac3e0fcc7f52fb62f8e66b74f2bc6798b6c9fb43e1a610c5edc04be412" }, "src/vendor/eas/EAS.sol": { "initCodeHash": "0xf96d1ebc530ed95e2dffebcfa2b4a1f18103235e6352d97838b77b7a2c14567b", diff --git a/packages/contracts-bedrock/src/vendor/asterisc/RISCV.sol b/packages/contracts-bedrock/src/vendor/asterisc/RISCV.sol index 09fa93a0fcd..10a29679a25 100644 --- a/packages/contracts-bedrock/src/vendor/asterisc/RISCV.sol +++ b/packages/contracts-bedrock/src/vendor/asterisc/RISCV.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.15; +pragma solidity 0.8.25; import { IPreimageOracle } from "src/cannon/interfaces/IPreimageOracle.sol"; import { IBigStepper } from "src/dispute/interfaces/IBigStepper.sol"; @@ -14,8 +14,8 @@ contract RISCV is IBigStepper { IPreimageOracle public oracle; /// @notice The version of the contract. - /// @custom:semver 1.1.0-rc.2 - string public constant version = "1.1.0-rc.2"; + /// @custom:semver 1.2.0-rc.1 + string public constant version = "1.2.0-rc.1"; /// @param _oracle The preimage oracle contract. constructor(IPreimageOracle _oracle) { From d0c4e370618c7ed4a3ab7ce6f699f0df62ddd8b9 Mon Sep 17 00:00:00 2001 From: Yann Hodique Date: Tue, 3 Dec 2024 21:13:43 +0100 Subject: [PATCH 038/111] op components migration to just (#13185) * build(op-supervisor): migrate build to just * build(op-service): migrate build to just * build(op-conductor): migrate build to just * build(op-wheel): migrate build to just * build(op-challenger): cleanup visualize target The referenced visualize.sh script doesn't exist. It was moved around and finally deleted in #12286 * build(op-challenger): migrate build to just * build(op-chain-ops): migrate build to just * build(op-dispute-mon): migrate build to just * build(op-alt-da): migrate build to just --- op-alt-da/Makefile | 23 ++--------------- op-alt-da/justfile | 20 +++++++++++++++ op-chain-ops/Makefile | 55 ++--------------------------------------- op-chain-ops/justfile | 41 ++++++++++++++++++++++++++++++ op-challenger/Makefile | 36 ++------------------------- op-challenger/justfile | 25 +++++++++++++++++++ op-conductor/Makefile | 27 ++------------------ op-conductor/justfile | 23 +++++++++++++++++ op-dispute-mon/Makefile | 22 ++--------------- op-dispute-mon/justfile | 21 ++++++++++++++++ op-service/Makefile | 28 ++------------------- op-service/justfile | 23 +++++++++++++++++ op-supervisor/Makefile | 24 ++---------------- op-supervisor/justfile | 21 ++++++++++++++++ op-wheel/Makefile | 15 ++--------- op-wheel/justfile | 13 ++++++++++ 16 files changed, 203 insertions(+), 214 deletions(-) create mode 100644 op-alt-da/justfile create mode 100644 op-chain-ops/justfile create mode 100644 op-challenger/justfile create mode 100644 op-conductor/justfile create mode 100644 op-dispute-mon/justfile create mode 100644 op-service/justfile create mode 100644 op-supervisor/justfile create mode 100644 op-wheel/justfile diff --git a/op-alt-da/Makefile b/op-alt-da/Makefile index c98ea24c209..eb6fb9b7804 100644 --- a/op-alt-da/Makefile +++ b/op-alt-da/Makefile @@ -1,22 +1,3 @@ -GITCOMMIT ?= $(shell git rev-parse HEAD) -GITDATE ?= $(shell git show -s --format='%ct') -VERSION ?= v0.0.0 +DEPRECATED_TARGETS := da-server clean test -LDFLAGSSTRING +=-X main.GitCommit=$(GITCOMMIT) -LDFLAGSSTRING +=-X main.GitDate=$(GITDATE) -LDFLAGSSTRING +=-X main.Version=$(VERSION) -LDFLAGS := -ldflags "$(LDFLAGSSTRING)" - -da-server: - env GO111MODULE=on GOOS=$(TARGETOS) GOARCH=$(TARGETARCH) CGO_ENABLED=0 go build -v $(LDFLAGS) -o ./bin/da-server ./cmd/daserver - -clean: - rm bin/da-server - -test: - go test -v ./... - -.PHONY: \ - op-batcher \ - clean \ - test +include ../just/deprecated.mk diff --git a/op-alt-da/justfile b/op-alt-da/justfile new file mode 100644 index 00000000000..f1dbced35b6 --- /dev/null +++ b/op-alt-da/justfile @@ -0,0 +1,20 @@ +import '../just/go.just' + +# Build ldflags string +_LDFLAGSSTRING := "'" + trim( + "-X main.GitCommit=" + GITCOMMIT + " " + \ + "-X main.GitDate=" + GITDATE + " " + \ + "-X main.Version=" + VERSION + " " + \ + "") + "'" + +BINARY := "./bin/da-server" + +# Build the da-server binary +da-server: (go_build BINARY "./cmd/daserver" "-ldflags" _LDFLAGSSTRING) + +# Remove build artifacts +clean: + rm -f {{BINARY}} + +# Run tests +test: (go_test "./...") diff --git a/op-chain-ops/Makefile b/op-chain-ops/Makefile index ad595b65e36..4dac7c1b6bb 100644 --- a/op-chain-ops/Makefile +++ b/op-chain-ops/Makefile @@ -1,54 +1,3 @@ -GITCOMMIT ?= $(shell git rev-parse HEAD) -GITDATE ?= $(shell git show -s --format='%ct') +DEPRECATED_TARGETS := ecotone-scalar receipt-reference-builder test op-deployer fuzz sync-standard-version -# Find the github tag that points to this commit. If none are found, set the version string to "untagged" -# Prioritizes release tag, if one exists, over tags suffixed with "-rc" -VERSION ?= $(shell tags=$$(git tag --points-at $(GITCOMMIT) | grep '^op-deployer/' | sed 's/op-deployer\///' | sort -V); \ - preferred_tag=$$(echo "$$tags" | grep -v -- '-rc' | tail -n 1); \ - if [ -z "$$preferred_tag" ]; then \ - if [ -z "$$tags" ]; then \ - echo "untagged"; \ - else \ - echo "$$tags" | tail -n 1; \ - fi \ - else \ - echo $$preferred_tag; \ - fi) - -LDFLAGSSTRING +=-X main.GitCommit=$(GITCOMMIT) -LDFLAGSSTRING +=-X main.GitDate=$(GITDATE) -LDFLAGSSTRING +=-X github.com/ethereum-optimism/optimism/op-chain-ops/deployer/version.Version=$(VERSION) -LDFLAGSSTRING +=-X github.com/ethereum-optimism/optimism/op-chain-ops/deployer/version.Meta=$(VERSION_META) -LDFLAGS := -ldflags "$(LDFLAGSSTRING)" - -# Use the old Apple linker to workaround broken xcode - https://github.com/golang/go/issues/65169 -ifeq ($(shell uname),Darwin) - FUZZLDFLAGS := -ldflags=-extldflags=-Wl,-ld_classic -endif - -ecotone-scalar: - go build -o ./bin/ecotone-scalar ./cmd/ecotone-scalar/main.go - -receipt-reference-builder: - go build -o ./bin/receipt-reference-builder ./cmd/receipt-reference-builder/*.go - -test: - go test ./... - -op-deployer: - GOOS=$(TARGETOS) GOARCH=$(TARGETARCH) CGO_ENABLED=0 go build -v $(LDFLAGS) -o ./bin/op-deployer ../op-deployer/cmd/op-deployer/main.go - -fuzz: - printf "%s\n" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzEncodeDecodeWithdrawal ./crossdomain" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzEncodeDecodeLegacyWithdrawal ./crossdomain" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzAliasing ./crossdomain" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzVersionedNonce ./crossdomain" \ - | parallel -j 8 {} - - -sync-standard-version: - curl -Lo ./deployer/opcm/standard-versions-mainnet.toml https://raw.githubusercontent.com/ethereum-optimism/superchain-registry/refs/heads/main/validation/standard/standard-versions-mainnet.toml - curl -Lo ./deployer/opcm/standard-versions-sepolia.toml https://raw.githubusercontent.com/ethereum-optimism/superchain-registry/refs/heads/main/validation/standard/standard-versions-sepolia.toml - -.PHONY: test fuzz op-deployer sync-standard-version +include ../just/deprecated.mk diff --git a/op-chain-ops/justfile b/op-chain-ops/justfile new file mode 100644 index 00000000000..a9c2bcad62d --- /dev/null +++ b/op-chain-ops/justfile @@ -0,0 +1,41 @@ +import '../just/go.just' + +# Build ldflags string +_VERSION_META_STR := if VERSION_META != "" { "+" + VERSION_META } else { "" } +_LDFLAGSSTRING := "'" + trim( + "-X main.GitCommit=" + GITCOMMIT + " " + \ + "-X main.GitDate=" + GITDATE + " " + \ + "-X github.com/ethereum-optimism/optimism/op-chain-ops/deployer/version.Version=" + VERSION + " " + \ + "-X github.com/ethereum-optimism/optimism/op-chain-ops/deployer/version.Meta=" + _VERSION_META_STR + " " + \ + "") + "'" + +# Build ecotone-scalar binary +ecotone-scalar: (go_build "./bin/ecotone-scalar" "./cmd/ecotone-scalar" "-ldflags" _LDFLAGSSTRING) + +# Build receipt-reference-builder binary +receipt-reference-builder: (go_build "./bin/receipt-reference-builder" "./cmd/receipt-reference-builder" "-ldflags" _LDFLAGSSTRING) + +# Run tests +test: (go_test "./...") + +# Build op-deployer binary +op-deployer: + just ../op-deployer/build + mkdir -p ./bin && ln -f ../op-deployer/bin/op-deployer ./bin/op-deployer + +# Run fuzzing tests +[private] +fuzz_task FUZZ TIME='10s': (go_fuzz FUZZ TIME "./crossdomain") + +fuzz: + printf "%s\n" \ + "FuzzEncodeDecodeWithdrawal" \ + "FuzzEncodeDecodeLegacyWithdrawal" \ + "FuzzAliasing" \ + "FuzzVersionedNonce" \ + | parallel -j {{PARALLEL_JOBS}} {{just_executable()}} fuzz_task {} + +# Sync standard versions +sync-standard-version: + curl -Lo ./deployer/opcm/standard-versions-mainnet.toml https://raw.githubusercontent.com/ethereum-optimism/superchain-registry/refs/heads/main/validation/standard/standard-versions-mainnet.toml + curl -Lo ./deployer/opcm/standard-versions-sepolia.toml https://raw.githubusercontent.com/ethereum-optimism/superchain-registry/refs/heads/main/validation/standard/standard-versions-sepolia.toml diff --git a/op-challenger/Makefile b/op-challenger/Makefile index 1a7422a4c87..35a202b99c4 100644 --- a/op-challenger/Makefile +++ b/op-challenger/Makefile @@ -1,35 +1,3 @@ -GITCOMMIT ?= $(shell git rev-parse HEAD) -GITDATE ?= $(shell git show -s --format='%ct') -VERSION ?= v0.0.0 +DEPRECATED_TARGETS := op-challenger clean test fuzz visualize -LDFLAGSSTRING +=-X main.GitCommit=$(GITCOMMIT) -LDFLAGSSTRING +=-X main.GitDate=$(GITDATE) -LDFLAGSSTRING +=-X github.com/ethereum-optimism/optimism/op-challenger/version.Version=$(VERSION) -LDFLAGSSTRING +=-X github.com/ethereum-optimism/optimism/op-challenger/version.Meta=$(VERSION_META) -LDFLAGS := -ldflags "$(LDFLAGSSTRING)" - -# Use the old Apple linker to workaround broken xcode - https://github.com/golang/go/issues/65169 -ifeq ($(shell uname),Darwin) - FUZZLDFLAGS := -ldflags=-extldflags=-Wl,-ld_classic -endif - -op-challenger: - env GO111MODULE=on GOOS=$(TARGETOS) GOARCH=$(TARGETARCH) CGO_ENABLED=0 go build -v $(LDFLAGS) -o ./bin/op-challenger ./cmd - -fuzz: - go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzKeccak ./game/keccak/matrix - -clean: - rm bin/op-challenger - -test: - go test -v ./... - -visualize: - ./scripts/visualize.sh - -.PHONY: \ - op-challenger \ - clean \ - test \ - visualize +include ../just/deprecated.mk diff --git a/op-challenger/justfile b/op-challenger/justfile new file mode 100644 index 00000000000..ccb2b5fa7e9 --- /dev/null +++ b/op-challenger/justfile @@ -0,0 +1,25 @@ +import '../just/go.just' + +# Build ldflags string +_VERSION_META_STR := if VERSION_META != "" { "+" + VERSION_META } else { "" } +_LDFLAGSSTRING := "'" + trim( + "-X main.GitCommit=" + GITCOMMIT + " " + \ + "-X main.GitDate=" + GITDATE + " " + \ + "-X github.com/ethereum-optimism/optimism/op-challenger/version.Version=" + VERSION + " " + \ + "-X github.com/ethereum-optimism/optimism/op-challenger/version.Meta=" + _VERSION_META_STR + " " + \ + "") + "'" + +BINARY := "./bin/op-challenger" + +# Build op-challenger binary +op-challenger: (go_build BINARY "./cmd" "-ldflags" _LDFLAGSSTRING) + +# Run fuzzing tests +fuzz: (go_fuzz "FuzzKeccak" "10s" "./game/keccak/matrix") + +# Clean build artifacts +clean: + rm -f {{BINARY}} + +# Run tests +test: (go_test "./...") diff --git a/op-conductor/Makefile b/op-conductor/Makefile index 6360df3da0c..c76af2d563c 100644 --- a/op-conductor/Makefile +++ b/op-conductor/Makefile @@ -1,26 +1,3 @@ -GITCOMMIT ?= $(shell git rev-parse HEAD) -GITDATE ?= $(shell git show -s --format='%ct') -VERSION ?= v0.0.0 +DEPRECATED_TARGETS := op-conductor clean test generate-mocks -LDFLAGSSTRING +=-X main.GitCommit=$(GITCOMMIT) -LDFLAGSSTRING +=-X main.GitDate=$(GITDATE) -LDFLAGSSTRING +=-X main.Version=$(VERSION) -LDFLAGS := -ldflags "$(LDFLAGSSTRING)" - -op-conductor: - env GO111MODULE=on GOOS=$(TARGETOS) GOARCH=$(TARGETARCH) CGO_ENABLED=0 go build -v $(LDFLAGS) -o ./bin/op-conductor ./cmd - -clean: - rm bin/op-conductor - -test: - go test -v ./... - -generate-mocks: - go generate ./... - -.PHONY: \ - op-conductor \ - clean \ - test \ - generate-mocks +include ../just/deprecated.mk diff --git a/op-conductor/justfile b/op-conductor/justfile new file mode 100644 index 00000000000..7ee6ef39bcf --- /dev/null +++ b/op-conductor/justfile @@ -0,0 +1,23 @@ +import '../just/go.just' + +# Build ldflags string +_LDFLAGSSTRING := "'" + trim( + "-X main.GitCommit=" + GITCOMMIT + " " + \ + "-X main.GitDate=" + GITDATE + " " + \ + "-X main.Version=" + VERSION + " " + \ + "") + "'" + +BINARY := "./bin/op-conductor" + +# Build op-conductor binary +op-conductor: (go_build BINARY "./cmd" "-ldflags" _LDFLAGSSTRING) + +# Clean build artifacts +clean: + rm -f {{BINARY}} + +# Run tests +test: (go_test "./...") + +# Generate mocks +generate-mocks: (go_generate "./...") \ No newline at end of file diff --git a/op-dispute-mon/Makefile b/op-dispute-mon/Makefile index d94a0fa95a9..6bb994650bf 100644 --- a/op-dispute-mon/Makefile +++ b/op-dispute-mon/Makefile @@ -1,21 +1,3 @@ -GITCOMMIT ?= $(shell git rev-parse HEAD) -GITDATE ?= $(shell git show -s --format='%ct') -VERSION ?= v0.0.0 +DEPRECATED_TARGETS := op-dispute-mon clean test -LDFLAGSSTRING +=-X main.GitCommit=$(GITCOMMIT) -LDFLAGSSTRING +=-X main.GitDate=$(GITDATE) -LDFLAGSSTRING +=-X github.com/ethereum-optimism/optimism/op-dispute-mon/version.Version=$(VERSION) -LDFLAGSSTRING +=-X github.com/ethereum-optimism/optimism/op-dispute-mon/version.Meta=$(VERSION_META) -LDFLAGS := -ldflags "$(LDFLAGSSTRING)" - -op-dispute-mon: - env GO111MODULE=on GOOS=$(TARGETOS) GOARCH=$(TARGETARCH) CGO_ENABLED=0 go build -v $(LDFLAGS) -o ./bin/op-dispute-mon ./cmd -.PHONY: op-dispute-mon - -clean: - rm bin/op-dispute-mon -.PHONY: clean - -test: - go test -v ./... -.PHONY: test +include ../just/deprecated.mk diff --git a/op-dispute-mon/justfile b/op-dispute-mon/justfile new file mode 100644 index 00000000000..3788cbd2b14 --- /dev/null +++ b/op-dispute-mon/justfile @@ -0,0 +1,21 @@ +import '../just/go.just' + +# Build ldflags string +_LDFLAGSSTRING := "'" + trim( + "-X main.GitCommit=" + GITCOMMIT + " " + \ + "-X main.GitDate=" + GITDATE + " " + \ + "-X github.com/ethereum-optimism/optimism/op-dispute-mon/version.Version=" + VERSION + " " + \ + "-X github.com/ethereum-optimism/optimism/op-dispute-mon/version.Meta=" + VERSION_META + " " + \ + "") + "'" + +BINARY := "./bin/op-dispute-mon" + +# Build op-dispute-mon binary +op-dispute-mon: (go_build BINARY "./cmd" "-ldflags" _LDFLAGSSTRING) + +# Clean build artifacts +clean: + rm -f {{BINARY}} + +# Run tests +test: (go_test "./...") diff --git a/op-service/Makefile b/op-service/Makefile index f57c0b6b53a..eeb03302985 100644 --- a/op-service/Makefile +++ b/op-service/Makefile @@ -1,27 +1,3 @@ -# Use the old Apple linker to workaround broken xcode - https://github.com/golang/go/issues/65169 -ifeq ($(shell uname),Darwin) - FUZZLDFLAGS := -ldflags=-extldflags=-Wl,-ld_classic -endif +DEPRECATED_TARGETS := test generate-mocks fuzz -test: - go test -v ./... - -generate-mocks: - go generate ./... - -fuzz: - printf "%s\n" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzExecutionPayloadUnmarshal ./eth" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzExecutionPayloadMarshalUnmarshalV1 ./eth" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzExecutionPayloadMarshalUnmarshalV2 ./eth" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzExecutionPayloadMarshalUnmarshalV3 ./eth" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzOBP01 ./eth" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzEncodeDecodeBlob ./eth" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzDetectNonBijectivity ./eth" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzEncodeScalar ./eth" \ - | parallel -j 8 {} - -.PHONY: \ - test \ - generate-mocks \ - fuzz +include ../just/deprecated.mk diff --git a/op-service/justfile b/op-service/justfile new file mode 100644 index 00000000000..bec1214f308 --- /dev/null +++ b/op-service/justfile @@ -0,0 +1,23 @@ +import '../just/go.just' + +# Run tests +test: (go_test "./...") + +# Generate mocks +generate-mocks: (go_generate "./...") + +[private] +service_fuzz_task FUZZ TIME='10s': (go_fuzz FUZZ TIME "./eth") + +# Run fuzzing tests +fuzz: + printf "%s\n" \ + "FuzzExecutionPayloadUnmarshal" \ + "FuzzExecutionPayloadMarshalUnmarshalV1" \ + "FuzzExecutionPayloadMarshalUnmarshalV2" \ + "FuzzExecutionPayloadMarshalUnmarshalV3" \ + "FuzzOBP01" \ + "FuzzEncodeDecodeBlob" \ + "FuzzDetectNonBijectivity" \ + "FuzzEncodeScalar" \ + | parallel -j {{PARALLEL_JOBS}} {{just_executable()}} service_fuzz_task {} diff --git a/op-supervisor/Makefile b/op-supervisor/Makefile index de4f2d9d261..144f7abd606 100644 --- a/op-supervisor/Makefile +++ b/op-supervisor/Makefile @@ -1,23 +1,3 @@ -GITCOMMIT ?= $(shell git rev-parse HEAD) -GITDATE ?= $(shell git show -s --format='%ct') -VERSION ?= v0.0.0 +DEPRECATED_TARGETS := op-supervisor clean test -LDFLAGSSTRING +=-X main.GitCommit=$(GITCOMMIT) -LDFLAGSSTRING +=-X main.GitDate=$(GITDATE) -LDFLAGSSTRING +=-X main.Version=$(VERSION) -LDFLAGSSTRING +=-X main.Meta=$(VERSION_META) -LDFLAGS := -ldflags "$(LDFLAGSSTRING)" - -op-supervisor: - env GO111MODULE=on GOOS=$(TARGETOS) GOARCH=$(TARGETARCH) CGO_ENABLED=0 go build -v $(LDFLAGS) -o ./bin/op-supervisor ./cmd - -clean: - rm bin/op-supervisor - -test: - go test -v ./... - -.PHONY: \ - op-supervisor \ - clean \ - test +include ../just/deprecated.mk diff --git a/op-supervisor/justfile b/op-supervisor/justfile new file mode 100644 index 00000000000..7063a3ffb05 --- /dev/null +++ b/op-supervisor/justfile @@ -0,0 +1,21 @@ +import '../just/go.just' + +# Build ldflags string +_LDFLAGSSTRING := "'" + trim( + "-X main.GitCommit=" + GITCOMMIT + " " + \ + "-X main.GitDate=" + GITDATE + " " + \ + "-X main.Version=" + VERSION + " " + \ + "-X main.Meta=" + VERSION_META + " " + \ + "") + "'" + +BINARY := "./bin/op-supervisor" + +# Build op-supervisor binary +op-supervisor: (go_build BINARY "./cmd" "-ldflags" _LDFLAGSSTRING) + +# Clean build artifacts +clean: + rm -f {{BINARY}} + +# Run tests +test: (go_test "./...") diff --git a/op-wheel/Makefile b/op-wheel/Makefile index 06d11401202..78363075e99 100644 --- a/op-wheel/Makefile +++ b/op-wheel/Makefile @@ -1,14 +1,3 @@ -GITCOMMIT ?= $(shell git rev-parse HEAD) -GITDATE ?= $(shell git show -s --format='%ct') -VERSION ?= v0.0.0 +DEPRECATED_TARGETS := op-wheel -LDFLAGSSTRING +=-X main.GitCommit=$(GITCOMMIT) -LDFLAGSSTRING +=-X main.GitDate=$(GITDATE) -LDFLAGSSTRING +=-X main.Version=$(VERSION) -LDFLAGS := -ldflags "$(LDFLAGSSTRING)" - -op-wheel: - env GO111MODULE=on GOOS=$(TARGETOS) GOARCH=$(TARGETARCH) go build -v $(LDFLAGS) -o ./bin/op-wheel ./cmd - -.PHONY: \ - op-wheel +include ../just/deprecated.mk diff --git a/op-wheel/justfile b/op-wheel/justfile new file mode 100644 index 00000000000..40696592ed3 --- /dev/null +++ b/op-wheel/justfile @@ -0,0 +1,13 @@ +import '../just/go.just' + +# Build ldflags string +_LDFLAGSSTRING := "'" + trim( + "-X main.GitCommit=" + GITCOMMIT + " " + \ + "-X main.GitDate=" + GITDATE + " " + \ + "-X main.Version=" + VERSION + " " + \ + "") + "'" + +BINARY := "./bin/op-wheel" + +# Build op-wheel binary +op-wheel: (go_build BINARY "./cmd" "-ldflags" _LDFLAGSSTRING) \ No newline at end of file From 6be52fc221b57279d03487eefe3534ccd7d42fb5 Mon Sep 17 00:00:00 2001 From: Adrian Sutton Date: Wed, 4 Dec 2024 08:42:19 +1000 Subject: [PATCH 039/111] op-program: Add v1.4.0-rc.2 to list of op-program releases. (#13193) --- op-program/prestates/releases.json | 3 +++ 1 file changed, 3 insertions(+) diff --git a/op-program/prestates/releases.json b/op-program/prestates/releases.json index 63c54a0ba58..75106c4b3c9 100644 --- a/op-program/prestates/releases.json +++ b/op-program/prestates/releases.json @@ -1,5 +1,8 @@ [ { + "version": "1.4.0-rc.2", + "hash": "0x0364e4e72922e7d649338f558f8a14b50ca31922a1484e73ea03987fb1516095" + }, { "version": "1.4.0-rc.1", "hash": "0x03925193e3e89f87835bbdf3a813f60b2aa818a36bbe71cd5d8fd7e79f5e8afe" }, From 5d52959b8343ad6f6dc4e9bc6f023a2aeb974a11 Mon Sep 17 00:00:00 2001 From: clabby Date: Tue, 3 Dec 2024 17:45:24 -0500 Subject: [PATCH 040/111] chore(ops): Bump `kona` in `proofs-tools` (#13196) --- docker-bake.hcl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-bake.hcl b/docker-bake.hcl index 150b7db0b08..57f5dbbfff6 100644 --- a/docker-bake.hcl +++ b/docker-bake.hcl @@ -207,7 +207,7 @@ target "proofs-tools" { context = "." args = { CHALLENGER_VERSION="b46bffed42db3442d7484f089278d59f51503049" - KONA_VERSION="kona-client-v0.1.0-beta.3" + KONA_VERSION="kona-client-v0.1.0-beta.4" } target="proofs-tools" platforms = split(",", PLATFORMS) From 4ce84f1bea7493c061ec7450ea74258a185cbce0 Mon Sep 17 00:00:00 2001 From: smartcontracts Date: Tue, 3 Dec 2024 18:50:24 -0500 Subject: [PATCH 041/111] maint: bump forge version (#13197) Bumps the version of forge/cast/anvil being used in the monorepo. --- mise.toml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mise.toml b/mise.toml index 29bf5636e12..5fb473de889 100644 --- a/mise.toml +++ b/mise.toml @@ -31,9 +31,9 @@ just = "1.37.0" # Foundry dependencies # Foundry is a special case because it supplies multiple binaries at the same # GitHub release, so we need to use the aliasing trick to get mise to not error -forge = "nightly-143abd6a768eeb52a5785240b763d72a56987b4a" -cast = "nightly-143abd6a768eeb52a5785240b763d72a56987b4a" -anvil = "nightly-143abd6a768eeb52a5785240b763d72a56987b4a" +forge = "nightly-e5dbb7a320c2b871c4a4a1006ad3c15a08fcf17b" +cast = "nightly-e5dbb7a320c2b871c4a4a1006ad3c15a08fcf17b" +anvil = "nightly-e5dbb7a320c2b871c4a4a1006ad3c15a08fcf17b" # Fake dependencies # Put things here if you need to track versions of tools or projects that can't From c36de049fc576239448417f153d16e3b0c0f76b6 Mon Sep 17 00:00:00 2001 From: refcell Date: Tue, 3 Dec 2024 19:07:13 -0500 Subject: [PATCH 042/111] chore(ops): Support kona + asterisc in the op-challenger (#13198) * chore(ops): add support for the op-challenger to run kona + asterisc * fix: update docker bake * fixes --- docker-bake.hcl | 7 ++++++- ops/docker/op-stack-go/Dockerfile | 22 +++++++++++++++++++--- 2 files changed, 25 insertions(+), 4 deletions(-) diff --git a/docker-bake.hcl b/docker-bake.hcl index 57f5dbbfff6..f0b57e56519 100644 --- a/docker-bake.hcl +++ b/docker-bake.hcl @@ -6,6 +6,10 @@ variable "REPOSITORY" { default = "oplabs-tools-artifacts/images" } +variable "KONA_VERSION" { + default = "kona-client-v0.1.0-beta.4" +} + variable "GIT_COMMIT" { default = "dev" } @@ -119,6 +123,7 @@ target "op-challenger" { GIT_COMMIT = "${GIT_COMMIT}" GIT_DATE = "${GIT_DATE}" OP_CHALLENGER_VERSION = "${OP_CHALLENGER_VERSION}" + KONA_VERSION="${KONA_VERSION}" } target = "op-challenger-target" platforms = split(",", PLATFORMS) @@ -207,7 +212,7 @@ target "proofs-tools" { context = "." args = { CHALLENGER_VERSION="b46bffed42db3442d7484f089278d59f51503049" - KONA_VERSION="kona-client-v0.1.0-beta.4" + KONA_VERSION="${KONA_VERSION}" } target="proofs-tools" platforms = split(",", PLATFORMS) diff --git a/ops/docker/op-stack-go/Dockerfile b/ops/docker/op-stack-go/Dockerfile index ef1da6ce116..9384aef5e24 100644 --- a/ops/docker/op-stack-go/Dockerfile +++ b/ops/docker/op-stack-go/Dockerfile @@ -9,6 +9,13 @@ # It will default to the target platform. ARG TARGET_BASE_IMAGE=alpine:3.20 +# The ubuntu target base image is used for the op-challenger build with kona and asterisc. +ARG UBUNTU_TARGET_BASE_IMAGE=ubuntu:22.04 + +# The version of kona to use. +# The only build that uses this is `op-challenger-target`. +ARG KONA_VERSION=none + # We may be cross-building for another platform. Specify which platform we need as builder. FROM --platform=$BUILDPLATFORM golang:1.22.7-alpine3.20 AS builder @@ -136,14 +143,23 @@ FROM --platform=$TARGETPLATFORM $TARGET_BASE_IMAGE AS op-node-target COPY --from=op-node-builder /app/op-node/bin/op-node /usr/local/bin/ CMD ["op-node"] -FROM --platform=$TARGETPLATFORM $TARGET_BASE_IMAGE AS op-challenger-target +# Make the kona docker image published by upstream available as a source to copy kona and asterisc from. +FROM --platform=$BUILDPLATFORM ghcr.io/anton-rs/kona/kona-fpp-asterisc:$KONA_VERSION AS kona + +# Also produce an op-challenger loaded with kona and asterisc using ubuntu +FROM --platform=$TARGETPLATFORM $UBUNTU_TARGET_BASE_IMAGE AS op-challenger-target +RUN apt-get update && apt-get install -y --no-install-recommends musl openssl ca-certificates COPY --from=op-challenger-builder /app/op-challenger/bin/op-challenger /usr/local/bin/ -# Make the bundled op-program the default cannon server +# Copy in op-program and cannon COPY --from=op-program-builder /app/op-program/bin/op-program /usr/local/bin/ ENV OP_CHALLENGER_CANNON_SERVER /usr/local/bin/op-program -# Make the bundled cannon the default cannon executable COPY --from=cannon-builder /app/cannon/bin/cannon /usr/local/bin/ ENV OP_CHALLENGER_CANNON_BIN /usr/local/bin/cannon +# Copy in kona and asterisc +COPY --from=kona /kona-host /usr/local/bin/ +ENV OP_CHALLENGER_ASTERISC_KONA_SERVER=/usr/local/bin/kona-host +COPY --from=kona /asterisc /usr/local/bin/ +ENV OP_CHALLENGER_ASTERISC_BIN=/usr/local/bin/asterisc CMD ["op-challenger"] FROM --platform=$TARGETPLATFORM $TARGET_BASE_IMAGE AS op-dispute-mon-target From 56670a730dea0cc8ff38503729788121dd7fe8c5 Mon Sep 17 00:00:00 2001 From: smartcontracts Date: Tue, 3 Dec 2024 21:00:40 -0500 Subject: [PATCH 043/111] fix: workaround for foundry snapshot nuking bug (#13209) Workaround for a bug? feature? in foundry that causes it to nuke whatever snapshots directory you have configured. Since this now defaults to "snapshots" it was nuking our snapshots folder. --- packages/contracts-bedrock/foundry.toml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/packages/contracts-bedrock/foundry.toml b/packages/contracts-bedrock/foundry.toml index 5812b129f91..6bad8be9d4c 100644 --- a/packages/contracts-bedrock/foundry.toml +++ b/packages/contracts-bedrock/foundry.toml @@ -8,6 +8,7 @@ src = 'src' out = 'forge-artifacts' script = 'scripts' build_info_path = 'artifacts/build-info' +snapshots = 'notarealpath' # workaround for foundry#9477 optimizer = true optimizer_runs = 999999 @@ -15,7 +16,7 @@ optimizer_runs = 999999 extra_output = ['devdoc', 'userdoc', 'metadata', 'storageLayout'] bytecode_hash = 'none' ast = true -evm_version = "cancun" +evm_version = 'cancun' remappings = [ '@openzeppelin/contracts-upgradeable/=lib/openzeppelin-contracts-upgradeable/contracts', From 7457c568919082d032b7d4b4c564bb50bec7aa48 Mon Sep 17 00:00:00 2001 From: clabby Date: Tue, 3 Dec 2024 21:14:29 -0500 Subject: [PATCH 044/111] feat(op-challenger): Add `TraceTypeAsteriscKona` to default `--trace-type` option (#13208) --- op-challenger/cmd/main_test.go | 22 +++++++++++++++++++--- op-challenger/flags/flags.go | 2 +- 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/op-challenger/cmd/main_test.go b/op-challenger/cmd/main_test.go index daa4e44f26b..52c9c6c4d9f 100644 --- a/op-challenger/cmd/main_test.go +++ b/op-challenger/cmd/main_test.go @@ -90,9 +90,9 @@ func TestL1Beacon(t *testing.T) { func TestTraceType(t *testing.T) { t.Run("Default", func(t *testing.T) { - expectedDefault := types.TraceTypeCannon - cfg := configForArgs(t, addRequiredArgsExcept(expectedDefault, "--trace-type")) - require.Equal(t, []types.TraceType{expectedDefault}, cfg.TraceTypes) + expectedDefault := []types.TraceType{types.TraceTypeCannon, types.TraceTypeAsteriscKona} + cfg := configForArgs(t, addRequiredArgsForMultipleTracesExcept(expectedDefault, "--trace-type")) + require.Equal(t, expectedDefault, cfg.TraceTypes) }) for _, traceType := range types.TraceTypes { @@ -995,6 +995,12 @@ func addRequiredArgsExcept(traceType types.TraceType, name string, optionalArgs return append(toArgList(req), optionalArgs...) } +func addRequiredArgsForMultipleTracesExcept(traceType []types.TraceType, name string, optionalArgs ...string) []string { + req := requiredArgsMultiple(traceType) + delete(req, name) + return append(toArgList(req), optionalArgs...) +} + func addRequiredArgsExceptArr(traceType types.TraceType, names []string, optionalArgs ...string) []string { req := requiredArgs(traceType) for _, name := range names { @@ -1003,6 +1009,16 @@ func addRequiredArgsExceptArr(traceType types.TraceType, names []string, optiona return append(toArgList(req), optionalArgs...) } +func requiredArgsMultiple(traceType []types.TraceType) map[string]string { + args := make(map[string]string) + for _, t := range traceType { + for name, value := range requiredArgs(t) { + args[name] = value + } + } + return args +} + func requiredArgs(traceType types.TraceType) map[string]string { args := map[string]string{ "--l1-eth-rpc": l1EthRpc, diff --git a/op-challenger/flags/flags.go b/op-challenger/flags/flags.go index 041ee3601d2..19d6bc79042 100644 --- a/op-challenger/flags/flags.go +++ b/op-challenger/flags/flags.go @@ -65,7 +65,7 @@ var ( Name: "trace-type", Usage: "The trace types to support. Valid options: " + openum.EnumString(types.TraceTypes), EnvVars: prefixEnvVars("TRACE_TYPE"), - Value: cli.NewStringSlice(types.TraceTypeCannon.String()), + Value: cli.NewStringSlice(types.TraceTypeCannon.String(), types.TraceTypeAsteriscKona.String()), } DatadirFlag = &cli.StringFlag{ Name: "datadir", From b002feab877bddfb40bceb9734499b3d8d0c7b25 Mon Sep 17 00:00:00 2001 From: smartcontracts Date: Tue, 3 Dec 2024 23:02:37 -0500 Subject: [PATCH 045/111] maint: remove holocene upgrade folder (#13214) Removes the Holocene upgrade folder. We only needed it as part of a specific release and don't need it in develop. --- .../scripts/upgrades/holocene/.env.example | 89 ----- .../scripts/upgrades/holocene/.gitignore | 2 - .../upgrades/holocene/DeployUpgrade.s.sol | 341 ------------------ .../scripts/upgrades/holocene/README.md | 56 --- .../scripts/upgrades/holocene/justfile | 29 -- .../upgrades/holocene/scripts/common.sh | 131 ------- .../upgrades/holocene/scripts/deploy.sh | 46 --- .../scripts/upgrades/holocene/scripts/main.sh | 152 -------- .../holocene/scripts/proofs-bundle.sh | 54 --- .../holocene/scripts/sc-ops-proofs.sh | 47 --- .../holocene/scripts/sc-ops-sys-cfg.sh | 38 -- .../holocene/scripts/sys-cfg-bundle.sh | 35 -- .../templates/fdg_bundle_extension.json | 29 -- .../proof_upgrade_bundle_template.json | 38 -- .../templates/proofs-sc-ops-task/.env.example | 6 - .../templates/proofs-sc-ops-task/README.md | 46 --- .../proofs-sc-ops-task/VALIDATION.md | 24 -- .../sys-cfg-sc-ops-task/.env.example | 6 - .../templates/sys-cfg-sc-ops-task/README.md | 42 --- .../sys-cfg-sc-ops-task/VALIDATION.md | 19 - .../sys_cfg_upgrade_bundle_template.json | 38 -- .../upgrades/holocene/upgrade.dockerfile | 65 ---- 22 files changed, 1333 deletions(-) delete mode 100644 packages/contracts-bedrock/scripts/upgrades/holocene/.env.example delete mode 100644 packages/contracts-bedrock/scripts/upgrades/holocene/.gitignore delete mode 100644 packages/contracts-bedrock/scripts/upgrades/holocene/DeployUpgrade.s.sol delete mode 100644 packages/contracts-bedrock/scripts/upgrades/holocene/README.md delete mode 100644 packages/contracts-bedrock/scripts/upgrades/holocene/justfile delete mode 100755 packages/contracts-bedrock/scripts/upgrades/holocene/scripts/common.sh delete mode 100755 packages/contracts-bedrock/scripts/upgrades/holocene/scripts/deploy.sh delete mode 100755 packages/contracts-bedrock/scripts/upgrades/holocene/scripts/main.sh delete mode 100755 packages/contracts-bedrock/scripts/upgrades/holocene/scripts/proofs-bundle.sh delete mode 100755 packages/contracts-bedrock/scripts/upgrades/holocene/scripts/sc-ops-proofs.sh delete mode 100755 packages/contracts-bedrock/scripts/upgrades/holocene/scripts/sc-ops-sys-cfg.sh delete mode 100755 packages/contracts-bedrock/scripts/upgrades/holocene/scripts/sys-cfg-bundle.sh delete mode 100644 packages/contracts-bedrock/scripts/upgrades/holocene/templates/fdg_bundle_extension.json delete mode 100644 packages/contracts-bedrock/scripts/upgrades/holocene/templates/proof_upgrade_bundle_template.json delete mode 100644 packages/contracts-bedrock/scripts/upgrades/holocene/templates/proofs-sc-ops-task/.env.example delete mode 100644 packages/contracts-bedrock/scripts/upgrades/holocene/templates/proofs-sc-ops-task/README.md delete mode 100644 packages/contracts-bedrock/scripts/upgrades/holocene/templates/proofs-sc-ops-task/VALIDATION.md delete mode 100644 packages/contracts-bedrock/scripts/upgrades/holocene/templates/sys-cfg-sc-ops-task/.env.example delete mode 100644 packages/contracts-bedrock/scripts/upgrades/holocene/templates/sys-cfg-sc-ops-task/README.md delete mode 100644 packages/contracts-bedrock/scripts/upgrades/holocene/templates/sys-cfg-sc-ops-task/VALIDATION.md delete mode 100644 packages/contracts-bedrock/scripts/upgrades/holocene/templates/sys_cfg_upgrade_bundle_template.json delete mode 100644 packages/contracts-bedrock/scripts/upgrades/holocene/upgrade.dockerfile diff --git a/packages/contracts-bedrock/scripts/upgrades/holocene/.env.example b/packages/contracts-bedrock/scripts/upgrades/holocene/.env.example deleted file mode 100644 index 2d7d84e67aa..00000000000 --- a/packages/contracts-bedrock/scripts/upgrades/holocene/.env.example +++ /dev/null @@ -1,89 +0,0 @@ -############################################## -# ↓ Required ↓ # -############################################## - -# NOTE: The deploy config must be provided as a first argument to `just run`! - -# The network to deploy the contracts to. -# Must be one of 'mainnet', 'sepolia' -NETWORK= - -# Etherscan API key used to verify contract bytecode -ETHERSCAN_API_KEY= - -# RPC URL for the L1 network that matches $NETWORK -ETH_RPC_URL= - -# Private key used to deploy the new contracts for this upgrade -PRIVATE_KEY= - -# Address of deployed `PreimageOracle` contract. -PREIMAGE_ORACLE_ADDR= - -# Address of deployed `AnchorStateRegistry` proxy contract. -ANCHOR_STATE_REGISTRY_PROXY_ADDR= - -# Address of the `SuperchainConfig` proxy contract. -SUPERCHAIN_CONFIG_PROXY_ADDR= - -# Address of deployed `ProxyAdmin` contract. -PROXY_ADMIN_ADDR= - -# Address of deployed `SystemConfig` proxy contract. -SYSTEM_CONFIG_PROXY_ADDR= - -# Address of deployed `DisputeGameFactory` proxy contract. -DISPUTE_GAME_FACTORY_PROXY_ADDR= - -# Whether or not to deploy and include any fault proof contracts in the upgrade. -# -# If 'true', the `PermissionedDisputeGame` contract will be deployed and included in the upgrade. -# If 'false', the `PermissionedDisputeGame` contract will not be deployed or included in the upgrade. -# -# Must be one of 'true', 'false' -# Cannot be 'false' if `USE_PERMISSIONLESS_FAULT_PROOFS` is 'true' -USE_FAULT_PROOFS=true - -# Whether or not to deploy and include the `FaultDisputeGame` contract in the upgrade. -# -# If 'true', the `FaultDisputeGame` contract will be deployed and included in the upgrade. -# If 'false', the `FaultDisputeGame` contract will not be deployed or included in the upgrade. -# -# Must be one of 'true', 'false' -# Cannot be 'true' if `USE_FAULT_PROOFS` is 'false' -USE_PERMISSIONLESS_FAULT_PROOFS=true - -################################################### -# ↓ Optional ↓ # -# Do not set if you don't know what you're doing. # -################################################### - -# Address of the deployed `SystemConfig` implementation for Holocene. -# -# This implementation is reused across L2 deployments based on the L1 @ `ETH_RPC_URL`. -# If you are not the first to deploy Holocene on this L1, this field should be set to -# the existing deployment address. -# -# If this field is not set, the `superchain-registry` will be consulted for the implementation address. -# If this field is set to the zero address, a new `SystemConfig` implementation will be deployed. -SYSTEM_CONFIG_IMPL_ADDR= - -# Address of the deployed `MIPS` implementation for Holocene. -# -# This implementation is reused across L2 deployments based on the L1 @ `ETH_RPC_URL`. -# If you are not the first to deploy Holocene on this L1, this field should be set to -# the existing deployment address. -# -# If this field is not set, the `superchain-registry` will be consulted for the implementation address. -# If this field is set to the zero address, a new `MIPS` implementation will be deployed. -MIPS_IMPL_ADDR= - -# Address of deployed `DelayedWETH` implementation contract. -# -# This implementation is reused across L2 deployments based on the L1 @ `ETH_RPC_URL`. -# If you are not the first to deploy permissionless fault proofs on L1, this field should be -# set to the existing deployment address. -# -# If this field is not set, the `superchain-registry` will be consulted for the implementation address. -# If this field is set to the zero address, a new `DelayedWETH` implementation will be deployed. -DELAYED_WETH_IMPL_ADDR= diff --git a/packages/contracts-bedrock/scripts/upgrades/holocene/.gitignore b/packages/contracts-bedrock/scripts/upgrades/holocene/.gitignore deleted file mode 100644 index 442ed87d930..00000000000 --- a/packages/contracts-bedrock/scripts/upgrades/holocene/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -# Environment -.env diff --git a/packages/contracts-bedrock/scripts/upgrades/holocene/DeployUpgrade.s.sol b/packages/contracts-bedrock/scripts/upgrades/holocene/DeployUpgrade.s.sol deleted file mode 100644 index 680c4bea127..00000000000 --- a/packages/contracts-bedrock/scripts/upgrades/holocene/DeployUpgrade.s.sol +++ /dev/null @@ -1,341 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.15; - -// Forge -import { console2 as console } from "forge-std/console2.sol"; - -// Scripts -import { Deployer } from "scripts/deploy/Deployer.sol"; -import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; - -// Utils -import { Claim, GameTypes, Duration } from "src/dispute/lib/Types.sol"; - -// Interfaces -import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; -import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; -import { IProxy } from "src/universal/interfaces/IProxy.sol"; -import { - IFaultDisputeGame, - IBigStepper, - IAnchorStateRegistry, - IDelayedWETH -} from "src/dispute/interfaces/IFaultDisputeGame.sol"; -import { IPermissionedDisputeGame } from "src/dispute/interfaces/IPermissionedDisputeGame.sol"; -import { IMIPS, IPreimageOracle } from "src/cannon/interfaces/IMIPS.sol"; - -/// @title DeployUpgrade -/// @notice A deployment script for smart contract upgrades surrounding the Holocene hardfork. -contract DeployUpgrade is Deployer { - /// @dev The entrypoint to the deployment script. - function deploy( - address _proxyAdmin, - address _superchainConfig, - address _systemConfigImpl, - address _mipsImpl, - address _delayedWETH, - address _preimageOracle, - address _anchorStateRegistry, - bool _useFaultProofs, - bool _usePermissionlessFaultProofs - ) - public - { - // Shim the existing contracts that this upgrade is dependent on. - shim({ - _proxyAdmin: _proxyAdmin, - _superchainConfig: _superchainConfig, - _systemConfigImpl: _systemConfigImpl, - _mipsImpl: _mipsImpl, - _delayedWETH: _delayedWETH, - _preimageOracle: _preimageOracle, - _anchorStateRegistry: _anchorStateRegistry - }); - - // Deploy conditional implementations. - if (_systemConfigImpl == address(0)) deploySystemConfigImplementation(); - - if (_useFaultProofs) { - if (_mipsImpl == address(0)) deployMIPSImplementation(); - if (_delayedWETH == address(0)) deployDelayedWETH(); - - // Deploy: - // 1. New `DelayedWETH` proxy contracts for the `FaultDisputeGame` and `PermissionedDisputeGame`. - // 2. New `FaultDisputeGame` and `PermissionedDisputeGame` implementation contracts. - deployDelayedWETHProxy("PDG"); - deployPermissionedDisputeGameImplementation(); - if (_usePermissionlessFaultProofs) { - deployDelayedWETHProxy("FDG"); - deployFaultDisputeGameImplementation(); - } - - // Run deployment checks. - checkMIPS(); - checkPermissionedDisputeGame(); - checkDelayedWETH("PDG"); - if (_usePermissionlessFaultProofs) { - checkFaultDisputeGame(); - checkDelayedWETH("FDG"); - } - } - - // Print the deployment summary. - printSummary(); - } - - /// @dev Shims the existing contracts that this upgrade is dependent on. - function shim( - address _proxyAdmin, - address _superchainConfig, - address _systemConfigImpl, - address _mipsImpl, - address _delayedWETH, - address _preimageOracle, - address _anchorStateRegistry - ) - public - { - prankDeployment("ProxyAdmin", _proxyAdmin); - prankDeployment("SuperchainConfig", _superchainConfig); - if (_systemConfigImpl != address(0)) prankDeployment("SystemConfig", _systemConfigImpl); - if (_mipsImpl != address(0)) prankDeployment("MIPS", _mipsImpl); - if (_delayedWETH != address(0)) prankDeployment("DelayedWETH", _delayedWETH); - prankDeployment("PreimageOracle", _preimageOracle); - prankDeployment("AnchorStateRegistry", _anchorStateRegistry); - } - - /// @dev Deploys the Holocene `SystemConfig` implementation contract. - function deploySystemConfigImplementation() public { - vm.broadcast(msg.sender); - address systemConfig = DeployUtils.create1( - "SystemConfig", DeployUtils.encodeConstructor(abi.encodeCall(ISystemConfig.__constructor__, ())) - ); - save("SystemConfig", systemConfig); - } - - /// @dev Deploys the new `MIPS` implementation contract. - function deployMIPSImplementation() public { - vm.broadcast(msg.sender); - address mips = DeployUtils.create1({ - _name: "MIPS", - _args: DeployUtils.encodeConstructor( - abi.encodeCall(IMIPS.__constructor__, (IPreimageOracle(mustGetAddress("PreimageOracle")))) - ) - }); - save("MIPS", mips); - } - - /// @dev Checks if the `MIPS` contract is correctly configured. - function checkMIPS() public view { - IMIPS mips = IMIPS(mustGetAddress("MIPS")); - require( - address(mips.oracle()) == mustGetAddress("PreimageOracle"), "DeployHoloceneUpgrade: invalid MIPS oracle" - ); - } - - /// @dev Deploys the Holocene `FaultDisputeGame` implementation contract. - function deployFaultDisputeGameImplementation() public { - bytes memory constructorInput = abi.encodeCall( - IFaultDisputeGame.__constructor__, - ( - IFaultDisputeGame.GameConstructorParams({ - gameType: GameTypes.CANNON, - absolutePrestate: Claim.wrap(bytes32(cfg.faultGameAbsolutePrestate())), - maxGameDepth: cfg.faultGameMaxDepth(), - splitDepth: cfg.faultGameSplitDepth(), - clockExtension: Duration.wrap(uint64(cfg.faultGameClockExtension())), - maxClockDuration: Duration.wrap(uint64(cfg.faultGameMaxClockDuration())), - vm: IBigStepper(mustGetAddress("MIPS")), - weth: IDelayedWETH(payable(mustGetAddress("DelayedWETHProxyFDG"))), - anchorStateRegistry: IAnchorStateRegistry(mustGetAddress("AnchorStateRegistry")), - l2ChainId: cfg.l2ChainID() - }) - ) - ); - - vm.broadcast(msg.sender); - address fdg = DeployUtils.create1("FaultDisputeGame", DeployUtils.encodeConstructor(constructorInput)); - save("FaultDisputeGame", fdg); - } - - /// @dev Checks if the `FaultDisputeGame` contract is correctly configured. - function checkFaultDisputeGame() public view { - IFaultDisputeGame fdg = IFaultDisputeGame(mustGetAddress("FaultDisputeGame")); - require( - fdg.gameType().raw() == GameTypes.CANNON.raw(), "DeployHoloceneUpgrade: invalid FaultDisputeGame gameType" - ); - require( - fdg.absolutePrestate().raw() == bytes32(cfg.faultGameAbsolutePrestate()), - "DeployHoloceneUpgrade: invalid FaultDisputeGame absolutePrestate" - ); - require( - fdg.maxGameDepth() == cfg.faultGameMaxDepth(), "DeployHoloceneUpgrade: invalid FaultDisputeGame maxDepth" - ); - require( - fdg.splitDepth() == cfg.faultGameSplitDepth(), "DeployHoloceneUpgrade: invalid FaultDisputeGame splitDepth" - ); - require( - fdg.clockExtension().raw() == cfg.faultGameClockExtension(), - "DeployHoloceneUpgrade: invalid FaultDisputeGame clockExtension" - ); - require( - fdg.maxClockDuration().raw() == cfg.faultGameMaxClockDuration(), - "DeployHoloceneUpgrade: invalid FaultDisputeGame maxClockDuration" - ); - require(address(fdg.vm()) == mustGetAddress("MIPS"), "DeployHoloceneUpgrade: invalid FaultDisputeGame MIPS"); - require( - address(fdg.weth()) == mustGetAddress("DelayedWETHProxyFDG"), - "DeployHoloceneUpgrade: invalid FaultDisputeGame DelayedWETH" - ); - require( - address(fdg.anchorStateRegistry()) == mustGetAddress("AnchorStateRegistry"), - "DeployHoloceneUpgrade: invalid FaultDisputeGame AnchorStateRegistry" - ); - require(fdg.l2ChainId() == cfg.l2ChainID(), "DeployHoloceneUpgrade: invalid FaultDisputeGame l2ChainID"); - } - - /// @dev Deploys the Holocene `PermissionedDisputeGame` implementation contract. - function deployPermissionedDisputeGameImplementation() public { - bytes memory constructorInput = abi.encodeCall( - IPermissionedDisputeGame.__constructor__, - ( - IFaultDisputeGame.GameConstructorParams({ - gameType: GameTypes.PERMISSIONED_CANNON, - absolutePrestate: Claim.wrap(bytes32(cfg.faultGameAbsolutePrestate())), - maxGameDepth: cfg.faultGameMaxDepth(), - splitDepth: cfg.faultGameSplitDepth(), - clockExtension: Duration.wrap(uint64(cfg.faultGameClockExtension())), - maxClockDuration: Duration.wrap(uint64(cfg.faultGameMaxClockDuration())), - vm: IBigStepper(mustGetAddress("MIPS")), - weth: IDelayedWETH(payable(mustGetAddress("DelayedWETHProxyPDG"))), - anchorStateRegistry: IAnchorStateRegistry(mustGetAddress("AnchorStateRegistry")), - l2ChainId: cfg.l2ChainID() - }), - cfg.l2OutputOracleProposer(), - cfg.l2OutputOracleChallenger() - ) - ); - - vm.broadcast(msg.sender); - address fdg = DeployUtils.create1("PermissionedDisputeGame", DeployUtils.encodeConstructor(constructorInput)); - save("PermissionedDisputeGame", fdg); - } - - /// @dev Checks if the `PermissionedDisputeGame` contract is correctly configured. - function checkPermissionedDisputeGame() public view { - IPermissionedDisputeGame pdg = IPermissionedDisputeGame(mustGetAddress("PermissionedDisputeGame")); - require( - pdg.gameType().raw() == GameTypes.PERMISSIONED_CANNON.raw(), - "DeployHoloceneUpgrade: invalid PermissionedDisputeGame gameType" - ); - require( - pdg.absolutePrestate().raw() == bytes32(cfg.faultGameAbsolutePrestate()), - "DeployHoloceneUpgrade: invalid PermissionedDisputeGame absolutePrestate" - ); - require( - pdg.maxGameDepth() == cfg.faultGameMaxDepth(), - "DeployHoloceneUpgrade: invalid PermissionedDisputeGame maxDepth" - ); - require( - pdg.splitDepth() == cfg.faultGameSplitDepth(), - "DeployHoloceneUpgrade: invalid PermissionedDisputeGame splitDepth" - ); - require( - pdg.clockExtension().raw() == cfg.faultGameClockExtension(), - "DeployHoloceneUpgrade: invalid PermissionedDisputeGame clockExtension" - ); - require( - pdg.maxClockDuration().raw() == cfg.faultGameMaxClockDuration(), - "DeployHoloceneUpgrade: invalid PermissionedDisputeGame maxClockDuration" - ); - require( - address(pdg.vm()) == mustGetAddress("MIPS"), "DeployHoloceneUpgrade: invalid PermissionedDisputeGame MIPS" - ); - require( - address(pdg.weth()) == mustGetAddress("DelayedWETHProxyPDG"), - "DeployHoloceneUpgrade: invalid PermissionedDisputeGame DelayedWETH" - ); - require( - address(pdg.anchorStateRegistry()) == mustGetAddress("AnchorStateRegistry"), - "DeployHoloceneUpgrade: invalid PermissionedDisputeGame AnchorStateRegistry" - ); - require(pdg.l2ChainId() == cfg.l2ChainID(), "DeployHoloceneUpgrade: invalid PermissionedDisputeGame l2ChainID"); - require( - pdg.proposer() == cfg.l2OutputOracleProposer(), - "DeployHoloceneUpgrade: invalid PermissionedDisputeGame proposer" - ); - require( - pdg.challenger() == cfg.l2OutputOracleChallenger(), - "DeployHoloceneUpgrade: invalid PermissionedDisputeGame challenger" - ); - } - - /// @dev Deploys a new implementation of the `DelayedWETH` contract. - function deployDelayedWETH() public { - uint256 delay = cfg.faultGameWithdrawalDelay(); - - vm.broadcast(msg.sender); - address impl = DeployUtils.create1({ - _name: "DelayedWETH", - _args: DeployUtils.encodeConstructor(abi.encodeCall(IDelayedWETH.__constructor__, (delay))) - }); - - // Save the new implementation address. - save("DelayedWETH", impl); - } - - /// @dev Deploys a new proxy contract with a new `DelayedWETH` implementation. - function deployDelayedWETHProxy(string memory _variant) public { - address delayedWethOwner = cfg.finalSystemOwner(); - address proxyAdmin = mustGetAddress("ProxyAdmin"); - address impl = mustGetAddress("DelayedWETH"); - ISuperchainConfig superchainConfig = ISuperchainConfig(mustGetAddress("SuperchainConfig")); - string memory finalName = string.concat("DelayedWETHProxy", _variant); - - // Deploy the implementation and proxy contracts. - vm.broadcast(msg.sender); - IProxy proxy = IProxy( - DeployUtils.create1({ - _name: "Proxy", - _args: DeployUtils.encodeConstructor(abi.encodeCall(IProxy.__constructor__, (msg.sender))) - }) - ); - - // Upgrade the proxy to the implementation and initialize it. - vm.broadcast(msg.sender); - proxy.upgradeToAndCall(impl, abi.encodeCall(IDelayedWETH.initialize, (delayedWethOwner, superchainConfig))); - - // Transfer the admin role of the proxy to the ProxyAdmin, now that we've upgraded - // and initialized the proxy. - vm.broadcast(msg.sender); - proxy.changeAdmin(proxyAdmin); - - // Save the proxy address. - save(finalName, address(proxy)); - } - - /// @dev Checks if the `DelayedWETH` contract is correctly configured. - function checkDelayedWETH(string memory _variant) internal { - string memory finalName = string.concat("DelayedWETHProxy", _variant); - IDelayedWETH delayedWeth = IDelayedWETH(mustGetAddress(finalName)); - require( - delayedWeth.delay() == cfg.faultGameWithdrawalDelay(), "DeployHoloceneUpgrade: invalid DelayedWETH delay" - ); - require( - delayedWeth.config() == ISuperchainConfig(mustGetAddress("SuperchainConfig")), - "DeployHoloceneUpgrade: invalid DelayedWETH config" - ); - - vm.prank(mustGetAddress("ProxyAdmin")); - address admin = IProxy(payable(address(delayedWeth))).admin(); - require(admin == mustGetAddress("ProxyAdmin"), "DeployHoloceneUpgrade: invalid DelayedWETH admin"); - } - - /// @dev Prints a summary of the deployment. - function printSummary() internal view { - console.log("1. SystemConfig: %s", mustGetAddress("SystemConfig")); - console.log("2. MIPS: %s", getAddress("MIPS")); - console.log("3. FaultDisputeGame: %s", getAddress("FaultDisputeGame")); - console.log("4. PermissionedDisputeGame: %s", getAddress("PermissionedDisputeGame")); - } -} diff --git a/packages/contracts-bedrock/scripts/upgrades/holocene/README.md b/packages/contracts-bedrock/scripts/upgrades/holocene/README.md deleted file mode 100644 index a671362bff8..00000000000 --- a/packages/contracts-bedrock/scripts/upgrades/holocene/README.md +++ /dev/null @@ -1,56 +0,0 @@ -# Holocene Upgrade - -This directory contains a repeatable task for: -* upgrading an `op-contracts/v1.6.0` deployment to `op-contracts/v1.8.0`. -* upgrading an `op-contracts/v1.3.0` deployment to `op-contracts/v1.8.0`, while retaining the `L2OutputOracle`. - -## Dependencies - -- [`docker`](https://docs.docker.com/engine/install/) -- [`just`](https://github.com/casey/just) -- [`foundry`](https://getfoundry.sh/) - -## Usage - -This script has several different modes of operation. Namely: -1. Deploy and upgrade `op-contracts/1.6.0` -> `op-contracts/v1.8.0` - - Always upgrade the `SystemConfig` - - FP options: - - With permissionless fault proofs enabled (incl. `FaultDisputeGame`) - - With permissioned fault proofs enabled (excl. `FaultDisputeGame`) -1. Deploy and upgrade `op-contracts/v1.3.0` -> `op-contracts/v1.8.0`, with the `L2OutputOracle` still active. - - Only upgrade the `SystemConfig` - -```sh -# 1. Clone the monorepo and navigate to this directory. -git clone git@github.com:ethereum-optimism/monorepo.git && \ - cd monorepo/packages/contracts-bedrock/scripts/upgrades/holocene - -# 2. Set up the `.env` file -# -# Read the documentation carefully, and when in doubt, reach out to the OP Labs team. -cp .env.example .env && vim .env - -# 3. Run the upgrade task. -# -# This task will: -# - Deploy the new smart contract implementations. -# - Optionally, generate a safe upgrade bundle. -# - Optionally, generate a `superchain-ops` upgrade task. -# -# The first argument must be the absolute path to your deploy-config.json. -# You can optionally specify an output folder path different from the default `output/` as a -# second argument to `just run`, also as an absolute path. -just run $(realpath path/to/deploy-config.json) -``` - -Note that in order to build the Docker image, you have to allow Docker to use at least 16GB of -memory, or the Solidity compilations may fail. Docker's default is only 8GB. - -The `deploy-config.json` that you use for your chain must set the latest `faultGameAbsolutePrestate` -value, not the one at deployment. There's currently one available that includes the Sepolia -Superchain Holocene activations for Base, OP, Mode and Zora: -`0x03925193e3e89f87835bbdf3a813f60b2aa818a36bbe71cd5d8fd7e79f5e8afe` - -If you want to make local modifications to the scripts in `scripts/`, you need to build the Docker -image again with `just build-image` before running `just run`. diff --git a/packages/contracts-bedrock/scripts/upgrades/holocene/justfile b/packages/contracts-bedrock/scripts/upgrades/holocene/justfile deleted file mode 100644 index ac08df68132..00000000000 --- a/packages/contracts-bedrock/scripts/upgrades/holocene/justfile +++ /dev/null @@ -1,29 +0,0 @@ -# Default recipe to list help menu. -default: - @just --list - -# Run the deployment / upgrade generation image. If the image is not present locally, -# it will be built. -run deploy-config-path output-folder-path="$(pwd)/output/" *args='': - #!/bin/bash - if [ ! "$(docker images -q op-holocene-upgrade:local 2> /dev/null)" ]; then - just build-image - fi - - mkdir -p {{output-folder-path}} - - # Run the deployment. - docker run -it \ - --rm \ - -v {{output-folder-path}}:/output \ - -v {{deploy-config-path}}:/app/packages/contracts-bedrock/deploy-config/deploy-config.json \ - --env-file=.env \ - op-holocene-upgrade:local {{args}} - -# Build the image locally. -build-image: - docker build \ - -t op-holocene-upgrade:local \ - -f upgrade.dockerfile \ - --build-arg REV=op-contracts/v1.8.0-rc.2 \ - . diff --git a/packages/contracts-bedrock/scripts/upgrades/holocene/scripts/common.sh b/packages/contracts-bedrock/scripts/upgrades/holocene/scripts/common.sh deleted file mode 100755 index 0f4e80b29c4..00000000000 --- a/packages/contracts-bedrock/scripts/upgrades/holocene/scripts/common.sh +++ /dev/null @@ -1,131 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -# Associative array to store cached TOML content for different URLs -# Used by fetch_standard_address and fetch_superchain_config_address -declare -A CACHED_TOML_CONTENT - -# error_handler -# -# Basic error handler -error_handler() { - echo "Error occurred in ${BASH_SOURCE[1]} at line: ${BASH_LINENO[0]}" - echo "Error message: $BASH_COMMAND" - exit 1 -} - -# Register the error handler -trap error_handler ERR - -# reqenv -# -# Checks if a specified environment variable is set. -# -# Arguments: -# $1 - The name of the environment variable to check -# -# Exits with status 1 if: -# - The specified environment variable is not set -reqenv() { - if [ -z "$1" ]; then - echo "Error: $1 is not set" - exit 1 - fi -} - -# prompt -# -# Prompts the user for a yes/no response. -# -# Arguments: -# $1 - The prompt message -# -# Exits with status 1 if: -# - The user does not respond with 'y' -# - The process is interrupted -prompt() { - read -p "$1 [Y/n] " -n 1 -r - echo - if [[ ! $REPLY =~ ^[Yy]$ ]]; then - [[ "$0" = "${BASH_SOURCE[0]}" ]] && exit 1 || return 1 - exit 1 - fi -} - -# fetch_standard_address -# -# Fetches the implementation address for a given contract from a TOML file. -# The TOML file is downloaded from a URL specified in ADDRESSES_TOML_URL -# environment variable. Results are cached to avoid repeated downloads. -# -# Arguments: -# $1 - Network name -# $2 - The release version -# $3 - The name of the contract to look up -# -# Returns: -# The implementation address of the specified contract -# -# Exits with status 1 if: -# - Failed to fetch the TOML file -# - The release version is not found in the TOML file -# - The implementation address for the specified contract is not found -fetch_standard_address() { - local network_name="$1" - local release_version="$2" - local contract_name="$3" - - # Determine the correct toml url - local toml_url="https://raw.githubusercontent.com/ethereum-optimism/superchain-registry/refs/heads/main/validation/standard/standard-versions" - if [ "$network_name" = "mainnet" ]; then - toml_url="$toml_url-mainnet.toml" - elif [ "$network_name" = "sepolia" ]; then - toml_url="$toml_url-sepolia.toml" - else - echo "Error: NETWORK must be set to 'mainnet' or 'sepolia'" - exit 1 - fi - - # Fetch the TOML file content from the URL if not already cached for this URL - if [ -z "${CACHED_TOML_CONTENT[$toml_url]:-}" ]; then - CACHED_TOML_CONTENT[$toml_url]=$(curl -s "$toml_url") - # shellcheck disable=SC2181 - if [ $? -ne 0 ]; then - echo "Error: Failed to fetch TOML file from $toml_url" - exit 1 - fi - fi - - # Use the cached content for the current URL - local toml_content="${CACHED_TOML_CONTENT[$toml_url]}" - - # Find the section for v1.6.0 release - # shellcheck disable=SC2155 - local section_content=$(echo "$toml_content" | awk -v version="$release_version" ' - $0 ~ "^\\[releases.\"op-contracts/v" version "\"\\]" { - flag=1; - next - } - flag && /^\[/ { - exit - } - flag { - print - } - ') - if [ -z "$section_content" ]; then - echo "Error: v$release_version release section not found in addresses TOML" - exit 1 - fi - - # Extract the implementation address for the specified contract - local regex="(address|implementation_address) = \"(0x[a-fA-F0-9]{40})\"" - # shellcheck disable=SC2155 - local data=$(echo "$section_content" | grep "${contract_name}") - if [[ $data =~ $regex ]]; then - echo "${BASH_REMATCH[2]}" - else - echo "Error: Implementation address for $contract_name not found in v$release_version release" - exit 1 - fi -} diff --git a/packages/contracts-bedrock/scripts/upgrades/holocene/scripts/deploy.sh b/packages/contracts-bedrock/scripts/upgrades/holocene/scripts/deploy.sh deleted file mode 100755 index d1e15aa63e7..00000000000 --- a/packages/contracts-bedrock/scripts/upgrades/holocene/scripts/deploy.sh +++ /dev/null @@ -1,46 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -# Grab the script directory -SCRIPT_DIR=$(dirname "$0") - -# Load common.sh -# shellcheck disable=SC1091 -source "$SCRIPT_DIR/common.sh" - -# Check required environment variables -reqenv "ETH_RPC_URL" -reqenv "PRIVATE_KEY" -reqenv "ETHERSCAN_API_KEY" -reqenv "DEPLOY_CONFIG_PATH" -reqenv "IMPL_SALT" - -# Check required address environment variables -reqenv "PROXY_ADMIN_ADDR" -reqenv "SUPERCHAIN_CONFIG_PROXY_ADDR" -reqenv "PREIMAGE_ORACLE_ADDR" -reqenv "ANCHOR_STATE_REGISTRY_PROXY_ADDR" -reqenv "DELAYED_WETH_IMPL_ADDR" -reqenv "SYSTEM_CONFIG_IMPL_ADDR" -reqenv "MIPS_IMPL_ADDR" -reqenv "USE_FAULT_PROOFS" -reqenv "USE_PERMISSIONLESS_FAULT_PROOFS" - -# Run the upgrade script -forge script DeployUpgrade.s.sol \ - --rpc-url "$ETH_RPC_URL" \ - --private-key "$PRIVATE_KEY" \ - --etherscan-api-key "$ETHERSCAN_API_KEY" \ - --sig "deploy(address,address,address,address,address,address,address,bool,bool)" \ - "$PROXY_ADMIN_ADDR" \ - "$SUPERCHAIN_CONFIG_PROXY_ADDR" \ - "$SYSTEM_CONFIG_IMPL_ADDR" \ - "$MIPS_IMPL_ADDR" \ - "$DELAYED_WETH_IMPL_ADDR" \ - "$PREIMAGE_ORACLE_ADDR" \ - "$ANCHOR_STATE_REGISTRY_PROXY_ADDR" \ - "$USE_FAULT_PROOFS" \ - "$USE_PERMISSIONLESS_FAULT_PROOFS" \ - --broadcast \ - --verify \ - --slow diff --git a/packages/contracts-bedrock/scripts/upgrades/holocene/scripts/main.sh b/packages/contracts-bedrock/scripts/upgrades/holocene/scripts/main.sh deleted file mode 100755 index 7ae6a55ed72..00000000000 --- a/packages/contracts-bedrock/scripts/upgrades/holocene/scripts/main.sh +++ /dev/null @@ -1,152 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -# Grab the script directory -SCRIPT_DIR=$(dirname "$0") - -# Load common.sh -# shellcheck disable=SC1091 -source "$SCRIPT_DIR/common.sh" - -echo " -⠄⢀⠀⠀⡐⠠⠀⢂⣠⡤⣤⣖⣤⣤⣄⢢⣤⣤⣤⡀⠄⠰⠀⠆⠀⠀⠠⠀⠆⠠⢀⠢⠐⠆⡀ -⠔⢀⠀⠄⡀⠔⢠⣾⣿⣿⣷⣿⣿⣿⣿⣷⣟⣛⣻⣿⣤⡠⢀⠆⢄⠂⠠⢠⠀⠄⠀⢠⠰⢄⠄ -⣈⠀⠐⡀⠀⠈⣾⣿⣿⣿⣿⡿⡿⣿⣿⣿⣿⡿⢿⣿⣿⣿⣦⠀⠂⠀⢈⠀⠁⠂⡀⢀⠠⠈⡀ -⠆⠈⠀⠄⣃⣼⣿⣿⣿⠿⡿⣿⣿⣷⣾⣷⣾⡾⣿⣿⣿⢿⡟⡇⠠⠁⠨⠐⠀⠃⠱⠊⠀⠀⠄ -⠐⣠⣶⣿⣿⣿⣿⣿⣿⣿⣾⣮⣛⢿⣿⡿⠘⣇⢯⣹⣶⣷⣿⣿⡄⠆⢐⠀⢄⠢⡒⠐⠠⢀⠂ -⢴⣿⣿⣿⣿⣿⣿⡿⠿⢿⣿⣿⣟⢿⣶⣾⣿⣧⢻⣿⢿⣿⣿⣿⠂⠈⢀⠈⡀⡁⢀⠉⢀⠀⠀ -⡜⣿⣿⣿⣿⣿⣿⣦⡻⠀⢨⣽⣿⣿⣿⣿⣿⣿⣦⡛⣾⣭⣃⣀⣦⠀⠨⠐⠀⠄⠃⠈⠀⠀⠁ -⣳⡘⣿⣿⣿⣿⣿⣿⣿⣿⣿⢛⣭⣿⣿⣿⣿⣿⣿⣿⢢⣿⣿⣿⣿⡇⢀⠲⠂⠄⢀⡐⠠⠐⠂ -⢣⠵⢹⣿⣿⣿⣿⣿⣿⣿⣿⣧⣛⣻⣿⣭⣿⣿⠻⢥⣿⣿⣿⣿⣿⣿⡄⠀⠁⡀⢀⢈⠀⢀⠁ -⡼⣹⡘⣿⣿⣿⣿⣿⣿⣿⣿⣿⡿⠿⣟⣻⠿⠿⣿⣿⣿⣿⣿⣿⣿⣿⠇⠀⠀⠁⠉⠐⠀⠂⠈ -⡷⣡⠧⢹⣿⣿⣿⣿⣿⣿⣗⣶⣾⣿⣿⣿⣿⣿⠮⢿⣿⣿⡿⠿⣿⣿⢀⣀⠂⡁⠐⢌⠐⡠⠁ -⢷⠡⠏⢧⢌⢻⣿⣿⣿⣟⣿⣿⣻⣿⡿⠿⣛⣵⢟⣭⡭⠥⠮⠕⣒⣒⣚⣮⣰⢤⣤⣄⣀⡠⠄ -⠁⠠⠀⠄⣏⢦⡙⣿⣿⣽⣟⣿⡟⣬⣾⣿⣿⣿⣾⣆⠭⠽⠶⢶⢶⣖⣶⣹⣖⡿⣿⣿⣿⣿⡆ -⠀⡁⠂⡟⡜⣦⢫⣌⠻⣷⣿⢏⣾⣿⣿⣿⢿⣿⢿⣿⣿⣿⣿⣿⠿⣛⣛⣛⠛⣭⣿⣿⢻⣽⡇ -⣏⠖⣮⢱⣋⢖⣣⢎⡳⢤⡅⣾⣿⢯⣷⡏⡖⣰⣝⣚⣙⣫⢍⡶⠷⣽⢭⣛⡇⠀⢰⣶⣾⣿⠀ -⡮⡝⢦⣓⢎⡳⡜⠎⠡⠁⢸⣿⣿⣿⣿⣁⢠⣿⡿⣿⡟⣎⣯⣽⣋⡷⣾⣹⡃⠀⢸⣿⣿⢿⠀ -⣳⠁⠀⡝⢮⣱⢹⠀⠂⠈⣿⣿⣿⣿⡻⣈⣸⣿⣙⢾⢿⣹⡶⣿⣼⣗⣻⡞⣡⠁⣼⣿⣿⣿⡀ -⡔⢦⢥⡛⣜⠦⣏⡄⡈⣸⢿⣿⡿⣽⢃⠇⣿⣧⡝⣟⡳⢾⣹⣟⡻⣾⣹⢣⠞⣄⣯⣿⣷⣿⡆ - -*~ [ Grug Deployer mk2 ] ~*- - ~*- [ Holocene ] -*~ -" - -# Set variables from environment or error. -export RELEASE="1.8.0" -export NETWORK="${NETWORK:?NETWORK must be set}" -export ETHERSCAN_API_KEY=${ETHERSCAN_API_KEY:?ETHERSCAN_API_KEY must be set} -export ETH_RPC_URL=${ETH_RPC_URL:?ETH_RPC_URL must be set} -export PRIVATE_KEY=${PRIVATE_KEY:?PRIVATE_KEY must be set} -export OUTPUT_FOLDER_PATH="/output" -export SYSTEM_CONFIG_IMPL_ADDR=${SYSTEM_CONFIG_IMPL_ADDR:-$(fetch_standard_address "$NETWORK" "$RELEASE" "system_config")} -export MIPS_IMPL_ADDR=${MIPS_IMPL_ADDR:-$(fetch_standard_address "$NETWORK" "$RELEASE" "mips")} -export PREIMAGE_ORACLE_ADDR=${PREIMAGE_ORACLE_ADDR:?PREIMAGE_ORACLE_ADDR must be set} -export ANCHOR_STATE_REGISTRY_PROXY_ADDR=${ANCHOR_STATE_REGISTRY_PROXY_ADDR:?ANCHOR_STATE_REGISTRY_PROXY_ADDR must be set} -export DELAYED_WETH_IMPL_ADDR=${DELAYED_WETH_IMPL_ADDR:-$(fetch_standard_address "$NETWORK" "$RELEASE" "delayed_weth")} -export PROXY_ADMIN_ADDR=${PROXY_ADMIN_ADDR:?PROXY_ADMIN_ADDR must be set} -export SUPERCHAIN_CONFIG_PROXY_ADDR=${SUPERCHAIN_CONFIG_PROXY_ADDR:?SUPERCHAIN_CONFIG_ADDR must be set} -export SYSTEM_CONFIG_PROXY_ADDR=${SYSTEM_CONFIG_PROXY_ADDR:?SYSTEM_CONFIG_PROXY_ADDR must be set} -export DISPUTE_GAME_FACTORY_PROXY_ADDR=${DISPUTE_GAME_FACTORY_PROXY_ADDR:?DISPUTE_GAME_FACTORY_PROXY_ADDR must be set} -export USE_FAULT_PROOFS=${USE_FAULT_PROOFS:?USE_FAULT_PROOFS must be set} -export USE_PERMISSIONLESS_FAULT_PROOFS=${USE_PERMISSIONLESS_FAULT_PROOFS:?USE_PERMISSIONLESS_FAULT_PROOFS must be set} - -# Sanity check FP configuration. -if [[ $USE_PERMISSIONLESS_FAULT_PROOFS == true && $USE_FAULT_PROOFS == false ]]; then - echo "Error: USE_PERMISSIONLESS_FAULT_PROOFS cannot be true if USE_FAULT_PROOFS is false" - exit 1 -fi - -# Make the output folder, if it doesn't exist -mkdir -p "$OUTPUT_FOLDER_PATH" - -# Find the contracts-bedrock directory -CONTRACTS_BEDROCK_DIR=$(pwd) -while [[ "$CONTRACTS_BEDROCK_DIR" != "/" && "${CONTRACTS_BEDROCK_DIR##*/}" != "contracts-bedrock" ]]; do - CONTRACTS_BEDROCK_DIR=$(dirname "$CONTRACTS_BEDROCK_DIR") -done - -# Error out if we couldn't find it for some reason -if [[ "$CONTRACTS_BEDROCK_DIR" == "/" ]]; then - echo "Error: 'contracts-bedrock' directory not found" - exit 1 -fi - -# The deploy config is mounted via Docker to this file -export DEPLOY_CONFIG_PATH="$CONTRACTS_BEDROCK_DIR/deploy-config/deploy-config.json" - -# Run deploy.sh if deployments.json does not exist -DEPLOY_LOG_PATH="$OUTPUT_FOLDER_PATH/deploy.log" -DEPLOYMENTS_JSON_PATH="$OUTPUT_FOLDER_PATH/deployments.json" -if [[ ! -f "$DEPLOYMENTS_JSON_PATH" ]]; then - if ! "$SCRIPT_DIR/deploy.sh" | tee "$DEPLOY_LOG_PATH"; then - echo "Error: deploy.sh failed" - exit 1 - fi -else - prompt "Skipping deployment as $DEPLOYMENTS_JSON_PATH already exists. Continue?" -fi - -# Extract the addresses from the deployment logs -# shellcheck disable=2155 -export SYSTEM_CONFIG_IMPL=$(grep "1. SystemConfig:" "$DEPLOY_LOG_PATH" | awk '{print $3}') -# shellcheck disable=2155 -export MIPS_IMPL=$(grep "2. MIPS:" "$DEPLOY_LOG_PATH" | awk '{print $3}') -# shellcheck disable=2155 -export FDG_IMPL=$(grep "3. FaultDisputeGame:" "$DEPLOY_LOG_PATH" | awk '{print $3}') -# shellcheck disable=2155 -export PDG_IMPL=$(grep "4. PermissionedDisputeGame:" "$DEPLOY_LOG_PATH" | awk '{print $3}') - -# Ensure that the addresses were extracted properly -reqenv "SYSTEM_CONFIG_IMPL" -reqenv "MIPS_IMPL" -reqenv "FDG_IMPL" -reqenv "PDG_IMPL" - -# Generate deployments.json with extracted addresses -cat <"$DEPLOYMENTS_JSON_PATH" -{ - "SystemConfig": "$SYSTEM_CONFIG_IMPL", - "MIPS": "$MIPS_IMPL", - "FaultDisputeGame": "$FDG_IMPL", - "PermissionedDisputeGame": "$PDG_IMPL" -} -EOF - -echo "✨ Deployed contracts and saved addresses to \"$DEPLOYMENTS_JSON_PATH\"" - -# Print a message when the script exits -trap 'echo "✨ Done. Artifacts are available in \"$OUTPUT_FOLDER_PATH\""' EXIT - -prompt "Generate safe upgrade bundle for SystemConfig?" - -# Generate the system config upgrade bundle -if ! "$SCRIPT_DIR/sys-cfg-bundle.sh"; then - echo "Error: sys-cfg-bundle.sh failed" - exit 1 -fi - -prompt "Generate superchain-ops upgrade task for SystemConfig upgrade bundle?" - -# Generate the superchain-ops upgrade task -if ! "$SCRIPT_DIR/sc-ops-sys-cfg.sh"; then - echo "Error: sc-ops-sys-cfg.sh failed" - exit 1 -fi - -if [[ $USE_FAULT_PROOFS == true ]]; then - prompt "Generate safe upgrade bundle for proofs contracts?" - - # Generate the proofs contracts' upgrade bundle - if ! "$SCRIPT_DIR/proofs-bundle.sh"; then - echo "Error: proofs-bundle.sh failed" - exit 1 - fi - - prompt "Generate superchain-ops upgrade task for proofs contracts upgrade bundle?" - - # Generate the superchain-ops upgrade task - if ! "$SCRIPT_DIR/sc-ops-proofs.sh"; then - echo "Error: sc-ops-proofs.sh failed" - exit 1 - fi -fi diff --git a/packages/contracts-bedrock/scripts/upgrades/holocene/scripts/proofs-bundle.sh b/packages/contracts-bedrock/scripts/upgrades/holocene/scripts/proofs-bundle.sh deleted file mode 100755 index bc0b0c7ff31..00000000000 --- a/packages/contracts-bedrock/scripts/upgrades/holocene/scripts/proofs-bundle.sh +++ /dev/null @@ -1,54 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -# Grab the script directory -SCRIPT_DIR=$(dirname "$0") - -# Load common.sh -# shellcheck disable=SC1091 -source "$SCRIPT_DIR/common.sh" - -# Check the env -reqenv "ETH_RPC_URL" -reqenv "OUTPUT_FOLDER_PATH" -reqenv "MIPS_IMPL" -reqenv "FDG_IMPL" -reqenv "PDG_IMPL" -reqenv "DISPUTE_GAME_FACTORY_PROXY_ADDR" -reqenv "USE_PERMISSIONLESS_FAULT_PROOFS" - -# Local environment -BUNDLE_PATH="$OUTPUT_FOLDER_PATH/proofs_bundle.json" -L1_CHAIN_ID=$(cast chain-id) - -# Copy the bundle template -cp ./templates/proof_upgrade_bundle_template.json "$BUNDLE_PATH" - -# Tx 1: Upgrade PermissionedDisputeGame implementation -TX_1_PAYLOAD=$(cast calldata "setImplementation(uint32,address)" 1 "$PDG_IMPL") - -# Tx 2: Upgrade FaultDisputeGame implementation -TX_2_PAYLOAD=$(cast calldata "setImplementation(uint32,address)" 0 "$FDG_IMPL") - -# Replace variables -sed -i "s/\$L1_CHAIN_ID/$L1_CHAIN_ID/g" "$BUNDLE_PATH" -sed -i "s/\$PDG_IMPL/$PDG_IMPL/g" "$BUNDLE_PATH" -sed -i "s/\$TX_1_PAYLOAD/$TX_1_PAYLOAD/g" "$BUNDLE_PATH" -sed -i "s/\$TX_2_PAYLOAD/$TX_2_PAYLOAD/g" "$BUNDLE_PATH" - -# Conditionally, if the FDG is being deployed, append the bundle extension -if [ "$USE_PERMISSIONLESS_FAULT_PROOFS" == true ]; then - echo "✨ USE_PERMISSIONLESS_FAULT_PROOFS=true | Adding FDG deployment to upgrade bundle." - jq --argjson fdg_extension "$(cat ./templates/fdg_bundle_extension.json)" \ - '.transactions += [$fdg_extension]' \ - "$BUNDLE_PATH" >"$BUNDLE_PATH.tmp" - mv "$BUNDLE_PATH.tmp" "$BUNDLE_PATH" - - # Replace variables - sed -i "s/\$FDG_IMPL/$FDG_IMPL/g" "$BUNDLE_PATH" - sed -i "s/\$TX_2_PAYLOAD/$TX_2_PAYLOAD/g" "$BUNDLE_PATH" -fi - -sed -i "s/\$DISPUTE_GAME_FACTORY_PROXY_ADDR/$DISPUTE_GAME_FACTORY_PROXY_ADDR/g" "$BUNDLE_PATH" - -echo "✨ Generated proof contracts upgrade bundle at \"$BUNDLE_PATH\"" diff --git a/packages/contracts-bedrock/scripts/upgrades/holocene/scripts/sc-ops-proofs.sh b/packages/contracts-bedrock/scripts/upgrades/holocene/scripts/sc-ops-proofs.sh deleted file mode 100755 index ec386727d3b..00000000000 --- a/packages/contracts-bedrock/scripts/upgrades/holocene/scripts/sc-ops-proofs.sh +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -# Grab the script directory -SCRIPT_DIR=$(dirname "$0") - -# Load common.sh -# shellcheck disable=SC1091 -source "$SCRIPT_DIR/common.sh" - -# Check required environment variables -reqenv "OUTPUT_FOLDER_PATH" -reqenv "MIPS_IMPL" -reqenv "FDG_IMPL" -reqenv "PDG_IMPL" -reqenv "DISPUTE_GAME_FACTORY_PROXY_ADDR" - -# Create directory for the task -TASK_DIR="$OUTPUT_FOLDER_PATH/proofs-sc-ops-task" -mkdir -p "$TASK_DIR" - -# Copy the bundle and task template -cp "$OUTPUT_FOLDER_PATH/proofs_bundle.json" "$TASK_DIR/input.json" -cp -R "$SCRIPT_DIR/../templates/proofs-sc-ops-task/." "$TASK_DIR/" - -# Generate the task overview -msup render -i "$TASK_DIR/input.json" -o "$TASK_DIR/OVERVIEW.md" - -# Generate the README -sed -i "s/\$MIPS_IMPL/$MIPS_IMPL/g" "$TASK_DIR/README.md" -sed -i "s/\$FDG_IMPL/$FDG_IMPL/g" "$TASK_DIR/README.md" -sed -i "s/\$PDG_IMPL/$PDG_IMPL/g" "$TASK_DIR/README.md" - -# Generate the validation doc -OLD_FDG=$(cast call "$DISPUTE_GAME_FACTORY_PROXY_ADDR" "gameImpls(uint32)" 0) -OLD_PDG=$(cast call "$DISPUTE_GAME_FACTORY_PROXY_ADDR" "gameImpls(uint32)" 1) - -PADDED_OLD_FDG=$(cast 2u "$OLD_FDG") -PADDED_OLD_PDG=$(cast 2u "$OLD_PDG") -PADDED_FDG_IMPL=$(cast 2u "$FDG_IMPL") -PADDED_PDG_IMPL=$(cast 2u "$PDG_IMPL") - -sed -i "s/\$DISPUTE_GAME_FACTORY_PROXY_ADDR/$DISPUTE_GAME_FACTORY_PROXY_ADDR/g" "$TASK_DIR/VALIDATION.md" -sed -i "s/\$OLD_FDG/$PADDED_OLD_FDG/g" "$TASK_DIR/VALIDATION.md" -sed -i "s/\$FDG_IMPL/$PADDED_FDG_IMPL/g" "$TASK_DIR/VALIDATION.md" -sed -i "s/\$PDG_IMPL/$PADDED_PDG_IMPL/g" "$TASK_DIR/VALIDATION.md" -sed -i "s/\$OLD_PDG/$PADDED_OLD_PDG/g" "$TASK_DIR/VALIDATION.md" diff --git a/packages/contracts-bedrock/scripts/upgrades/holocene/scripts/sc-ops-sys-cfg.sh b/packages/contracts-bedrock/scripts/upgrades/holocene/scripts/sc-ops-sys-cfg.sh deleted file mode 100755 index a87de445d3d..00000000000 --- a/packages/contracts-bedrock/scripts/upgrades/holocene/scripts/sc-ops-sys-cfg.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -# Grab the script directory -SCRIPT_DIR=$(dirname "$0") - -# Load common.sh -# shellcheck disable=SC1091 -source "$SCRIPT_DIR/common.sh" - -# Check required environment variables -reqenv "OUTPUT_FOLDER_PATH" -reqenv "SYSTEM_CONFIG_IMPL" -reqenv "SYSTEM_CONFIG_PROXY_ADDR" - -# Create directory for the task -TASK_DIR="$OUTPUT_FOLDER_PATH/sys-cfg-sc-ops-task" -mkdir -p "$TASK_DIR" - -# Copy the bundle and task template -cp "$OUTPUT_FOLDER_PATH/sys_cfg_bundle.json" "$TASK_DIR/input.json" -cp -R "$SCRIPT_DIR/../templates/sys-cfg-sc-ops-task/." "$TASK_DIR/" - -# Generate the task overview -msup render -i "$TASK_DIR/input.json" -o "$TASK_DIR/OVERVIEW.md" - -# Generate the README -sed -i "s/\$SYSTEM_CONFIG_IMPL/$SYSTEM_CONFIG_IMPL/g" "$TASK_DIR/README.md" - -# Generate the validation doc -OLD_SYS_CFG=$(cast impl "$SYSTEM_CONFIG_PROXY_ADDR") - -PADDED_OLD_SYS_CFG=$(cast 2u "$OLD_SYS_CFG") -PADDED_SYS_CFG=$(cast 2u "$SYSTEM_CONFIG_IMPL") - -sed -i "s/\$SYSTEM_CONFIG_PROXY_ADDR/$SYSTEM_CONFIG_PROXY_ADDR/g" "$TASK_DIR/VALIDATION.md" -sed -i "s/\$OLD_SYS_CFG/$PADDED_OLD_SYS_CFG/g" "$TASK_DIR/VALIDATION.md" -sed -i "s/\$SYSTEM_CONFIG_IMPL/$PADDED_SYS_CFG/g" "$TASK_DIR/VALIDATION.md" diff --git a/packages/contracts-bedrock/scripts/upgrades/holocene/scripts/sys-cfg-bundle.sh b/packages/contracts-bedrock/scripts/upgrades/holocene/scripts/sys-cfg-bundle.sh deleted file mode 100755 index bde4917e6b8..00000000000 --- a/packages/contracts-bedrock/scripts/upgrades/holocene/scripts/sys-cfg-bundle.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -# Grab the script directory -SCRIPT_DIR=$(dirname "$0") - -# Load common.sh -# shellcheck disable=SC1091 -source "$SCRIPT_DIR/common.sh" - -# Check the env -reqenv "ETH_RPC_URL" -reqenv "OUTPUT_FOLDER_PATH" -reqenv "PROXY_ADMIN_ADDR" -reqenv "SYSTEM_CONFIG_PROXY_ADDR" -reqenv "SYSTEM_CONFIG_IMPL" - -# Local environment -BUNDLE_PATH="$OUTPUT_FOLDER_PATH/sys_cfg_bundle.json" -L1_CHAIN_ID=$(cast chain-id) - -# Copy the bundle template -cp ./templates/sys_cfg_upgrade_bundle_template.json "$BUNDLE_PATH" - -# Tx 1: Upgrade SystemConfigProxy implementation -TX_1_PAYLOAD=$(cast calldata "upgrade(address,address)" "$SYSTEM_CONFIG_PROXY_ADDR" "$SYSTEM_CONFIG_IMPL") - -# Replace variables -sed -i "s/\$L1_CHAIN_ID/$L1_CHAIN_ID/g" "$BUNDLE_PATH" -sed -i "s/\$PROXY_ADMIN_ADDR/$PROXY_ADMIN_ADDR/g" "$BUNDLE_PATH" -sed -i "s/\$SYSTEM_CONFIG_PROXY_ADDR/$SYSTEM_CONFIG_PROXY_ADDR/g" "$BUNDLE_PATH" -sed -i "s/\$SYSTEM_CONFIG_IMPL/$SYSTEM_CONFIG_IMPL/g" "$BUNDLE_PATH" -sed -i "s/\$TX_1_PAYLOAD/$TX_1_PAYLOAD/g" "$BUNDLE_PATH" - -echo "✨ Generated SystemConfig upgrade bundle at \"$BUNDLE_PATH\"" diff --git a/packages/contracts-bedrock/scripts/upgrades/holocene/templates/fdg_bundle_extension.json b/packages/contracts-bedrock/scripts/upgrades/holocene/templates/fdg_bundle_extension.json deleted file mode 100644 index fa6680496bd..00000000000 --- a/packages/contracts-bedrock/scripts/upgrades/holocene/templates/fdg_bundle_extension.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "metadata": { - "name": "Upgrade `CANNON` game type in `DisputeGameFactory`", - "description": "Upgrades the `CANNON` game type to the new Holocene deployment, with an updated version of `op-program` as the absolute prestate hash." - }, - "to": "$DISPUTE_GAME_FACTORY_PROXY_ADDR", - "value": "0x0", - "data": "$TX_2_PAYLOAD", - "contractMethod": { - "type": "function", - "name": "setImplementation", - "inputs": [ - { - "name": "_gameType", - "type": "uint32" - }, - { - "name": "_impl", - "type": "address" - } - ], - "outputs": [], - "stateMutability": "nonpayable" - }, - "contractInputsValues": { - "_gameType": "0", - "_impl": "$FDG_IMPL" - } -} diff --git a/packages/contracts-bedrock/scripts/upgrades/holocene/templates/proof_upgrade_bundle_template.json b/packages/contracts-bedrock/scripts/upgrades/holocene/templates/proof_upgrade_bundle_template.json deleted file mode 100644 index e74360f02a9..00000000000 --- a/packages/contracts-bedrock/scripts/upgrades/holocene/templates/proof_upgrade_bundle_template.json +++ /dev/null @@ -1,38 +0,0 @@ -{ - "chainId": $L1_CHAIN_ID, - "metadata": { - "name": "Holocene Hardfork - Proof Contract Upgrades", - "description": "Upgrades the `MIPS.sol`, `FaultDisputeGame.sol`, and `PermissionedDisputeGame.sol` contracts for Holocene." - }, - "transactions": [ - { - "metadata": { - "name": "Upgrade `PERMISSIONED_CANNON` game type in `DisputeGameFactory`", - "description": "Upgrades the `PERMISSIONED_CANNON` game type to the new Holocene deployment, with an updated version of `op-program` as the absolute prestate hash." - }, - "to": "$DISPUTE_GAME_FACTORY_PROXY_ADDR", - "value": "0x0", - "data": "$TX_1_PAYLOAD", - "contractMethod": { - "type": "function", - "name": "setImplementation", - "inputs": [ - { - "name": "_gameType", - "type": "uint32" - }, - { - "name": "_impl", - "type": "address" - } - ], - "outputs": [], - "stateMutability": "nonpayable" - }, - "contractInputsValues": { - "_gameType": "1", - "_impl": "$PDG_IMPL" - } - } - ] -} diff --git a/packages/contracts-bedrock/scripts/upgrades/holocene/templates/proofs-sc-ops-task/.env.example b/packages/contracts-bedrock/scripts/upgrades/holocene/templates/proofs-sc-ops-task/.env.example deleted file mode 100644 index ba2beff7f42..00000000000 --- a/packages/contracts-bedrock/scripts/upgrades/holocene/templates/proofs-sc-ops-task/.env.example +++ /dev/null @@ -1,6 +0,0 @@ -ETH_RPC_URL= -SUPERCHAIN_CONFIG_ADDR= -COUNCIL_SAFE= -FOUNDATION_SAFE= -OWNER_SAFE= -SAFE_NONCE= diff --git a/packages/contracts-bedrock/scripts/upgrades/holocene/templates/proofs-sc-ops-task/README.md b/packages/contracts-bedrock/scripts/upgrades/holocene/templates/proofs-sc-ops-task/README.md deleted file mode 100644 index e2077747dba..00000000000 --- a/packages/contracts-bedrock/scripts/upgrades/holocene/templates/proofs-sc-ops-task/README.md +++ /dev/null @@ -1,46 +0,0 @@ -# Holocene Hardfork Upgrade - -Status: DRAFT, NOT READY TO SIGN - -## Objective - -Upgrades the Fault Proof contracts for the Holocene hardfork. - -The proposal was: - -- [ ] Posted on the governance forum. -- [ ] Approved by Token House voting. -- [ ] Not vetoed by the Citizens' house. -- [ ] Executed on OP Mainnet. - -The governance proposal should be treated as the source of truth and used to verify the correctness of the onchain operations. - -Governance post of the upgrade can be found at . - -This upgrades the Fault Proof contracts in the -[op-contracts/v1.8.0](https://github.com/ethereum-optimism/optimism/tree/op-contracts/v1.8.0-rc.1) release. - -## Pre-deployments - -- `MIPS` - `$MIPS_IMPL` -- `FaultDisputeGame` - `$FDG_IMPL` -- `PermissionedDisputeGame` - `$PDG_IMPL` - -## Simulation - -Please see the "Simulating and Verifying the Transaction" instructions in [NESTED.md](../../../NESTED.md). -When simulating, ensure the logs say `Using script /your/path/to/superchain-ops/tasks//NestedSignFromJson.s.sol`. -This ensures all safety checks are run. If the default `NestedSignFromJson.s.sol` script is shown (without the full path), something is wrong and the safety checks will not run. - -## State Validation - -Please see the instructions for [validation](./VALIDATION.md). - -## Execution - -This upgrade -* Changes dispute game implementation of the `CANNON` and `PERMISSIONED_CANNON` game types to contain a `op-program` release for the Holocene hardfork, which contains - the Holocene fork implementation as well as a `ChainConfig` and `RollupConfig` for the L2 chain being upgraded. -* Upgrades `MIPS.sol` to support the `F_GETFD` syscall, required by the golang 1.22+ runtime. - -See the [overview](./OVERVIEW.md) and `input.json` bundle for more details. diff --git a/packages/contracts-bedrock/scripts/upgrades/holocene/templates/proofs-sc-ops-task/VALIDATION.md b/packages/contracts-bedrock/scripts/upgrades/holocene/templates/proofs-sc-ops-task/VALIDATION.md deleted file mode 100644 index 50ba3ca106b..00000000000 --- a/packages/contracts-bedrock/scripts/upgrades/holocene/templates/proofs-sc-ops-task/VALIDATION.md +++ /dev/null @@ -1,24 +0,0 @@ -# Validation - -This document can be used to validate the state diff resulting from the execution of the upgrade -transaction. - -For each contract listed in the state diff, please verify that no contracts or state changes shown in the Tenderly diff are missing from this document. Additionally, please verify that for each contract: - -- The following state changes (and none others) are made to that contract. This validates that no unexpected state changes occur. -- All addresses (in section headers and storage values) match the provided name, using the Etherscan and Superchain Registry links provided. This validates the bytecode deployed at the addresses contains the correct logic. -- All key values match the semantic meaning provided, which can be validated using the storage layout links provided. - -## State Changes - -### `$DISPUTE_GAME_FACTORY_PROXY_ADDR` (`DisputeGameFactoryProxy`) - -- **Key**: `0xffdfc1249c027f9191656349feb0761381bb32c9f557e01f419fd08754bf5a1b`
- **Before**: `$OLD_FDG`
- **After**: `$FDG_IMPL`
- **Meaning**: Updates the CANNON game type implementation. Verify that the new implementation is set using `cast call $DISPUTE_GAME_FACTORY_PROXY_ADDR "gameImpls(uint32)(address)" 0`. Where `0` is the [`CANNON` game type](https://github.com/ethereum-optimism/optimism/blob/op-contracts/v1.4.0/packages/contracts-bedrock/src/dispute/lib/Types.sol#L28). - -- **Key**: `0x4d5a9bd2e41301728d41c8e705190becb4e74abe869f75bdb405b63716a35f9e`
- **Before**: `$OLD_PDG`
- **After**: `$PDG_IMPL`
- **Meaning**: Updates the PERMISSIONED_CANNON game type implementation. Verify that the new implementation is set using `cast call $DISPUTE_GAME_FACTORY_PROXY_ADDR "gameImpls(uint32)(address)" 1`. Where `1` is the [`PERMISSIONED_CANNON` game type](https://github.com/ethereum-optimism/optimism/blob/op-contracts/v1.4.0/packages/contracts-bedrock/src/dispute/lib/Types.sol#L31). diff --git a/packages/contracts-bedrock/scripts/upgrades/holocene/templates/sys-cfg-sc-ops-task/.env.example b/packages/contracts-bedrock/scripts/upgrades/holocene/templates/sys-cfg-sc-ops-task/.env.example deleted file mode 100644 index ba2beff7f42..00000000000 --- a/packages/contracts-bedrock/scripts/upgrades/holocene/templates/sys-cfg-sc-ops-task/.env.example +++ /dev/null @@ -1,6 +0,0 @@ -ETH_RPC_URL= -SUPERCHAIN_CONFIG_ADDR= -COUNCIL_SAFE= -FOUNDATION_SAFE= -OWNER_SAFE= -SAFE_NONCE= diff --git a/packages/contracts-bedrock/scripts/upgrades/holocene/templates/sys-cfg-sc-ops-task/README.md b/packages/contracts-bedrock/scripts/upgrades/holocene/templates/sys-cfg-sc-ops-task/README.md deleted file mode 100644 index 740219586b8..00000000000 --- a/packages/contracts-bedrock/scripts/upgrades/holocene/templates/sys-cfg-sc-ops-task/README.md +++ /dev/null @@ -1,42 +0,0 @@ -# Holocene Hardfork Upgrade - `SystemConfig` - -Status: DRAFT, NOT READY TO SIGN - -## Objective - -Upgrades the `SystemConfig` for the Holocene hardfork. - -The proposal was: - -- [ ] Posted on the governance forum. -- [ ] Approved by Token House voting. -- [ ] Not vetoed by the Citizens' house. -- [ ] Executed on OP Mainnet. - -The governance proposal should be treated as the source of truth and used to verify the correctness of the onchain operations. - -Governance post of the upgrade can be found at . - -This upgrades the `SystemConfig` in the -[op-contracts/v1.8.0](https://github.com/ethereum-optimism/optimism/tree/op-contracts/v1.8.0-rc.1) release. - -## Pre-deployments - -- `SystemConfig` - `$SYSTEM_CONFIG_IMPL` - -## Simulation - -Please see the "Simulating and Verifying the Transaction" instructions in [NESTED.md](../../../NESTED.md). -When simulating, ensure the logs say `Using script /your/path/to/superchain-ops/tasks//NestedSignFromJson.s.sol`. -This ensures all safety checks are run. If the default `NestedSignFromJson.s.sol` script is shown (without the full path), something is wrong and the safety checks will not run. - -## State Validation - -Please see the instructions for [validation](./VALIDATION.md). - -## Execution - -This upgrade -* Changes the implementation of the `SystemConfig` to hold EIP-1559 parameters for the - -See the [overview](./OVERVIEW.md) and `input.json` bundle for more details. diff --git a/packages/contracts-bedrock/scripts/upgrades/holocene/templates/sys-cfg-sc-ops-task/VALIDATION.md b/packages/contracts-bedrock/scripts/upgrades/holocene/templates/sys-cfg-sc-ops-task/VALIDATION.md deleted file mode 100644 index 3a5e7b67595..00000000000 --- a/packages/contracts-bedrock/scripts/upgrades/holocene/templates/sys-cfg-sc-ops-task/VALIDATION.md +++ /dev/null @@ -1,19 +0,0 @@ -# Validation - -This document can be used to validate the state diff resulting from the execution of the upgrade -transaction. - -For each contract listed in the state diff, please verify that no contracts or state changes shown in the Tenderly diff are missing from this document. Additionally, please verify that for each contract: - -- The following state changes (and none others) are made to that contract. This validates that no unexpected state changes occur. -- All addresses (in section headers and storage values) match the provided name, using the Etherscan and Superchain Registry links provided. This validates the bytecode deployed at the addresses contains the correct logic. -- All key values match the semantic meaning provided, which can be validated using the storage layout links provided. - -## State Changes - -### `$SYSTEM_CONFIG_PROXY_ADDR` (`SystemConfigProxy`) - -- **Key**: `0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc` - **Before**: `$OLD_SYS_CFG` - **After**: `$SYSTEM_CONFIG_IMPL` - **Meaning**: Updates the `SystemConfig` proxy implementation. diff --git a/packages/contracts-bedrock/scripts/upgrades/holocene/templates/sys_cfg_upgrade_bundle_template.json b/packages/contracts-bedrock/scripts/upgrades/holocene/templates/sys_cfg_upgrade_bundle_template.json deleted file mode 100644 index 62746a51982..00000000000 --- a/packages/contracts-bedrock/scripts/upgrades/holocene/templates/sys_cfg_upgrade_bundle_template.json +++ /dev/null @@ -1,38 +0,0 @@ -{ - "chainId": $L1_CHAIN_ID, - "metadata": { - "name": "Holocene Hardfork - SystemConfig Upgrade", - "description": "Upgrades the `SystemConfig.sol` contract for Holocene." - }, - "transactions": [ - { - "metadata": { - "name": "Upgrade `SystemConfig` proxy", - "description": "Upgrades the `SystemConfig` proxy to the new implementation, featuring configurable EIP-1559 parameters." - }, - "to": "$PROXY_ADMIN_ADDR", - "value": "0x0", - "data": "$TX_1_PAYLOAD", - "contractMethod": { - "type": "function", - "name": "upgrade", - "inputs": [ - { - "name": "_proxy", - "type": "address" - }, - { - "name": "_implementation", - "type": "address" - } - ], - "outputs": [], - "stateMutability": "nonpayable" - }, - "contractInputsValues": { - "_proxy": "$SYSTEM_CONFIG_PROXY_ADDR", - "_implementation": "$SYSTEM_CONFIG_IMPL" - } - } - ] -} diff --git a/packages/contracts-bedrock/scripts/upgrades/holocene/upgrade.dockerfile b/packages/contracts-bedrock/scripts/upgrades/holocene/upgrade.dockerfile deleted file mode 100644 index 33166fe6e5a..00000000000 --- a/packages/contracts-bedrock/scripts/upgrades/holocene/upgrade.dockerfile +++ /dev/null @@ -1,65 +0,0 @@ -# Use a base image with necessary tools -FROM ubuntu:20.04 - -ARG REV - -# Install required packages -RUN apt-get update && apt-get install -y \ - git \ - bash \ - curl \ - build-essential \ - jq \ - && rm -rf /var/lib/apt/lists/* - -# Install Rust -RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y -ENV PATH="/root/.cargo/bin:${PATH}" - -# Install just -RUN curl --proto '=https' --tlsv1.2 -sSf https://just.systems/install.sh | bash -s -- --to /usr/local/bin - -# Install msup -RUN git clone https://github.com/clabby/msup.git && \ - cd msup && \ - cargo install --path . - -# Install foundryup -RUN curl -L https://foundry.paradigm.xyz | bash -ENV PATH="/root/.foundry/bin:${PATH}" - -# Set the working directory -WORKDIR /app - -# Clone the repository -RUN git clone https://github.com/ethereum-optimism/optimism.git . - -# Check out the target branch -RUN git checkout $REV - -# Set the working directory to the root of the monorepo -WORKDIR /app - -# Install correct foundry version -RUN just update-foundry - -# Set the working directory to the root of the contracts package -WORKDIR /app/packages/contracts-bedrock - -# Install dependencies -RUN just install - -# Build the contracts package -RUN forge build - -# Deliberately run the upgrade script with invalid args to trigger a build -RUN forge script ./scripts/upgrades/holocene/DeployUpgrade.s.sol || true - -# Set the working directory to where upgrade.sh is located -WORKDIR /app/packages/contracts-bedrock/scripts/upgrades/holocene - -# allows to use modified local scripts -COPY scripts/*.sh ./scripts/ - -# Set the entrypoint to the main.sh script -ENTRYPOINT ["./scripts/main.sh"] From 2d86cab7f9525ad3ddb2199f0e18b9cde85042f1 Mon Sep 17 00:00:00 2001 From: smartcontracts Date: Wed, 4 Dec 2024 01:46:09 -0500 Subject: [PATCH 046/111] feat: timeout fuzz tests after 10 minutes (#13207) * feat: timeout fuzz tests after 10 minutes Uses the new fuzz test timeout feature that we merged into foundry to limit individual test runs to 10 minutes. Simplifies how we need to deal with heavy fuzz testing. * fix: improve bounds for OptimismPortal tests Improves the fuzz bounds for the OptimismPortal tests so that they can complete 20000 runs without throwing. --- .circleci/config.yml | 2 +- packages/contracts-bedrock/foundry.toml | 2 ++ .../test/L1/OptimismPortal.t.sol | 24 +++++++++---------- .../test/L1/OptimismPortal2.t.sol | 23 +++++++++--------- .../test/cannon/PreimageOracle.t.sol | 3 --- .../test/safe/LivenessGuard.t.sol | 1 - .../test/setup/DeployVariations.t.sol | 2 -- 7 files changed, 27 insertions(+), 30 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 8cdd813d72a..665bfc5e6c9 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -3,7 +3,7 @@ version: 2.1 parameters: ci_builder_image: type: string - default: us-docker.pkg.dev/oplabs-tools-artifacts/images/ci-builder:v0.54.0 + default: us-docker.pkg.dev/oplabs-tools-artifacts/images/ci-builder:v0.55.0 ci_builder_rust_image: type: string default: us-docker.pkg.dev/oplabs-tools-artifacts/images/ci-builder-rust:latest diff --git a/packages/contracts-bedrock/foundry.toml b/packages/contracts-bedrock/foundry.toml index 6bad8be9d4c..782369b0485 100644 --- a/packages/contracts-bedrock/foundry.toml +++ b/packages/contracts-bedrock/foundry.toml @@ -83,10 +83,12 @@ depth = 32 [profile.ciheavy.fuzz] runs = 20000 +timeout = 600 [profile.ciheavy.invariant] runs = 128 depth = 512 +timeout = 600 ################################################################ # PROFILE: LITE # diff --git a/packages/contracts-bedrock/test/L1/OptimismPortal.t.sol b/packages/contracts-bedrock/test/L1/OptimismPortal.t.sol index 8172fb14b79..a0a49227e71 100644 --- a/packages/contracts-bedrock/test/L1/OptimismPortal.t.sol +++ b/packages/contracts-bedrock/test/L1/OptimismPortal.t.sol @@ -201,7 +201,6 @@ contract OptimismPortal_Test is CommonTest { } /// @dev Tests that `depositTransaction` succeeds when msg.sender == tx.origin and non-custom gas is used. - /// forge-config: ciheavy.fuzz.runs = 8192 function testFuzz_depositTransaction_senderIsOrigin_succeeds( address _to, uint256 _mint, @@ -227,7 +226,6 @@ contract OptimismPortal_Test is CommonTest { } /// @dev Tests that `depositTransaction` succeeds when msg.sender != tx.origin and non-custom gas is used. - /// forge-config: ciheavy.fuzz.runs = 8192 function testFuzz_depositTransaction_senderNotOrigin_succeeds( address _to, uint256 _mint, @@ -310,7 +308,6 @@ contract OptimismPortal_Test is CommonTest { } /// @dev Tests that `depositTransaction` succeeds for an EOA. - /// forge-config: ciheavy.fuzz.runs = 8192 function testFuzz_depositTransaction_eoa_succeeds( address _to, uint64 _gasLimit, @@ -355,7 +352,6 @@ contract OptimismPortal_Test is CommonTest { } /// @dev Tests that `depositTransaction` succeeds for a contract. - /// forge-config: ciheavy.fuzz.runs = 8192 function testFuzz_depositTransaction_contract_succeeds( address _to, uint64 _gasLimit, @@ -1214,7 +1210,6 @@ contract OptimismPortal_FinalizeWithdrawal_Test is CommonTest { } /// @dev Tests that `finalizeWithdrawalTransaction` succeeds. - /// forge-config: ciheavy.fuzz.runs = 8192 function testDiff_finalizeWithdrawalTransaction_succeeds( address _sender, address _target, @@ -1337,7 +1332,6 @@ contract OptimismPortalResourceFuzz_Test is CommonTest { uint256 constant MAX_GAS_LIMIT = 30_000_000; /// @dev Test that various values of the resource metering config will not break deposits. - /// forge-config: ciheavy.fuzz.runs = 10000 function testFuzz_systemConfigDeposit_succeeds( uint32 _maxResourceLimit, uint8 _elasticityMultiplier, @@ -1356,8 +1350,13 @@ contract OptimismPortalResourceFuzz_Test is CommonTest { uint64 gasLimit = systemConfig.gasLimit(); // Bound resource config + _systemTxMaxGas = uint32(bound(_systemTxMaxGas, 0, gasLimit - 21000)); _maxResourceLimit = uint32(bound(_maxResourceLimit, 21000, MAX_GAS_LIMIT / 8)); + _maxResourceLimit = uint32(bound(_maxResourceLimit, 21000, gasLimit - _systemTxMaxGas)); + _maximumBaseFee = uint128(bound(_maximumBaseFee, 1, type(uint128).max)); + _minimumBaseFee = uint32(bound(_minimumBaseFee, 0, _maximumBaseFee - 1)); _gasLimit = uint64(bound(_gasLimit, 21000, _maxResourceLimit)); + _gasLimit = uint64(bound(_gasLimit, 0, gasLimit)); _prevBaseFee = uint128(bound(_prevBaseFee, 0, 3 gwei)); _prevBoughtGas = uint64(bound(_prevBoughtGas, 0, _maxResourceLimit - _gasLimit)); _blockDiff = uint8(bound(_blockDiff, 0, 3)); @@ -1365,11 +1364,16 @@ contract OptimismPortalResourceFuzz_Test is CommonTest { _elasticityMultiplier = uint8(bound(_elasticityMultiplier, 1, type(uint8).max)); // Prevent values that would cause reverts - vm.assume(gasLimit >= _gasLimit); - vm.assume(_minimumBaseFee < _maximumBaseFee); vm.assume(uint256(_maxResourceLimit) + uint256(_systemTxMaxGas) <= gasLimit); vm.assume(((_maxResourceLimit / _elasticityMultiplier) * _elasticityMultiplier) == _maxResourceLimit); + // Although we typically want to limit the usage of vm.assume, we've constructed the above + // bounds to satisfy the assumptions listed in this specific section. These assumptions + // serve only to act as an additional sanity check on top of the bounds and should not + // result in an unnecessary number of test rejections. + vm.assume(gasLimit >= _gasLimit); + vm.assume(_minimumBaseFee < _maximumBaseFee); + // Base fee can increase quickly and mean that we can't buy the amount of gas we want. // Here we add a VM assumption to bound the potential increase. // Compute the maximum possible increase in base fee. @@ -1472,7 +1476,6 @@ contract OptimismPortalWithMockERC20_Test is OptimismPortal_FinalizeWithdrawal_T } /// @dev Tests that `depositERC20Transaction` succeeds when msg.sender == tx.origin. - /// forge-config: ciheavy.fuzz.runs = 8192 function testFuzz_depositERC20Transaction_senderIsOrigin_succeeds( address _to, uint256 _mint, @@ -1498,7 +1501,6 @@ contract OptimismPortalWithMockERC20_Test is OptimismPortal_FinalizeWithdrawal_T } /// @dev Tests that `depositERC20Transaction` succeeds when msg.sender != tx.origin. - /// forge-config: ciheavy.fuzz.runs = 8192 function testFuzz_depositERC20Transaction_senderNotOrigin_succeeds( address _to, uint256 _mint, @@ -1697,7 +1699,6 @@ contract OptimismPortalWithMockERC20_Test is OptimismPortal_FinalizeWithdrawal_T } /// @dev Tests that `depositTransaction` succeeds when a custom gas token is used but the msg.value is zero. - /// forge-config: ciheavy.fuzz.runs = 8192 function testFuzz_depositTransaction_customGasTokenWithNoValueAndSenderIsOrigin_succeeds( address _to, uint256 _value, @@ -1721,7 +1722,6 @@ contract OptimismPortalWithMockERC20_Test is OptimismPortal_FinalizeWithdrawal_T } /// @dev Tests that `depositTransaction` succeeds when a custom gas token is used but the msg.value is zero. - /// forge-config: ciheavy.fuzz.runs = 8192 function testFuzz_depositTransaction_customGasTokenWithNoValueAndSenderNotOrigin_succeeds( address _to, uint256 _value, diff --git a/packages/contracts-bedrock/test/L1/OptimismPortal2.t.sol b/packages/contracts-bedrock/test/L1/OptimismPortal2.t.sol index 083644755d3..44c1c8f9fd1 100644 --- a/packages/contracts-bedrock/test/L1/OptimismPortal2.t.sol +++ b/packages/contracts-bedrock/test/L1/OptimismPortal2.t.sol @@ -211,7 +211,6 @@ contract OptimismPortal2_Test is CommonTest { } /// @dev Tests that `depositTransaction` succeeds for an EOA. - /// forge-config: ciheavy.fuzz.runs = 8192 function testFuzz_depositTransaction_eoa_succeeds( address _to, uint64 _gasLimit, @@ -256,7 +255,6 @@ contract OptimismPortal2_Test is CommonTest { } /// @dev Tests that `depositTransaction` succeeds for a contract. - /// forge-config: ciheavy.fuzz.runs = 8192 function testFuzz_depositTransaction_contract_succeeds( address _to, uint64 _gasLimit, @@ -1326,7 +1324,6 @@ contract OptimismPortal2_FinalizeWithdrawal_Test is CommonTest { } /// @dev Tests that `finalizeWithdrawalTransaction` succeeds. - /// forge-config: ciheavy.fuzz.runs = 8192 function testDiff_finalizeWithdrawalTransaction_succeeds( address _sender, address _target, @@ -1610,7 +1607,6 @@ contract OptimismPortal2_ResourceFuzz_Test is CommonTest { } /// @dev Test that various values of the resource metering config will not break deposits. - /// forge-config: ciheavy.fuzz.runs = 10000 function testFuzz_systemConfigDeposit_succeeds( uint32 _maxResourceLimit, uint8 _elasticityMultiplier, @@ -1629,8 +1625,13 @@ contract OptimismPortal2_ResourceFuzz_Test is CommonTest { uint64 gasLimit = systemConfig.gasLimit(); // Bound resource config + _systemTxMaxGas = uint32(bound(_systemTxMaxGas, 0, gasLimit - 21000)); _maxResourceLimit = uint32(bound(_maxResourceLimit, 21000, MAX_GAS_LIMIT / 8)); + _maxResourceLimit = uint32(bound(_maxResourceLimit, 21000, gasLimit - _systemTxMaxGas)); + _maximumBaseFee = uint128(bound(_maximumBaseFee, 1, type(uint128).max)); + _minimumBaseFee = uint32(bound(_minimumBaseFee, 0, _maximumBaseFee - 1)); _gasLimit = uint64(bound(_gasLimit, 21000, _maxResourceLimit)); + _gasLimit = uint64(bound(_gasLimit, 0, gasLimit)); _prevBaseFee = uint128(bound(_prevBaseFee, 0, 3 gwei)); _prevBoughtGas = uint64(bound(_prevBoughtGas, 0, _maxResourceLimit - _gasLimit)); _blockDiff = uint8(bound(_blockDiff, 0, 3)); @@ -1638,12 +1639,16 @@ contract OptimismPortal2_ResourceFuzz_Test is CommonTest { _elasticityMultiplier = uint8(bound(_elasticityMultiplier, 1, type(uint8).max)); // Prevent values that would cause reverts - vm.assume(gasLimit >= _gasLimit); - vm.assume(_minimumBaseFee < _maximumBaseFee); - vm.assume(_baseFeeMaxChangeDenominator > 1); vm.assume(uint256(_maxResourceLimit) + uint256(_systemTxMaxGas) <= gasLimit); vm.assume(((_maxResourceLimit / _elasticityMultiplier) * _elasticityMultiplier) == _maxResourceLimit); + // Although we typically want to limit the usage of vm.assume, we've constructed the above + // bounds to satisfy the assumptions listed in this specific section. These assumptions + // serve only to act as an additional sanity check on top of the bounds and should not + // result in an unnecessary number of test rejections. + vm.assume(gasLimit >= _gasLimit); + vm.assume(_minimumBaseFee < _maximumBaseFee); + // Base fee can increase quickly and mean that we can't buy the amount of gas we want. // Here we add a VM assumption to bound the potential increase. // Compute the maximum possible increase in base fee. @@ -1746,7 +1751,6 @@ contract OptimismPortal2WithMockERC20_Test is OptimismPortal2_FinalizeWithdrawal } /// @dev Tests that `depositERC20Transaction` succeeds when msg.sender == tx.origin. - /// forge-config: ciheavy.fuzz.runs = 8192 function testFuzz_depositERC20Transaction_senderIsOrigin_succeeds( address _to, uint256 _mint, @@ -1772,7 +1776,6 @@ contract OptimismPortal2WithMockERC20_Test is OptimismPortal2_FinalizeWithdrawal } /// @dev Tests that `depositERC20Transaction` succeeds when msg.sender != tx.origin. - /// forge-config: ciheavy.fuzz.runs = 8192 function testFuzz_depositERC20Transaction_senderNotOrigin_succeeds( address _to, uint256 _mint, @@ -1980,7 +1983,6 @@ contract OptimismPortal2WithMockERC20_Test is OptimismPortal2_FinalizeWithdrawal } /// @dev Tests that `depositTransaction` succeeds when a custom gas token is used but the msg.value is zero. - /// forge-config: ciheavy.fuzz.runs = 8192 function testFuzz_depositTransaction_customGasTokenWithNoValueAndSenderIsOrigin_succeeds( address _to, uint256 _value, @@ -2004,7 +2006,6 @@ contract OptimismPortal2WithMockERC20_Test is OptimismPortal2_FinalizeWithdrawal } /// @dev Tests that `depositTransaction` succeeds when a custom gas token is used but the msg.value is zero. - /// forge-config: ciheavy.fuzz.runs = 8192 function testFuzz_depositTransaction_customGasTokenWithNoValueAndSenderNotOrigin_succeeds( address _to, uint256 _value, diff --git a/packages/contracts-bedrock/test/cannon/PreimageOracle.t.sol b/packages/contracts-bedrock/test/cannon/PreimageOracle.t.sol index 1c4d41728fc..6293f652e52 100644 --- a/packages/contracts-bedrock/test/cannon/PreimageOracle.t.sol +++ b/packages/contracts-bedrock/test/cannon/PreimageOracle.t.sol @@ -889,7 +889,6 @@ contract PreimageOracle_LargePreimageProposals_Test is Test { /// @notice Tests that squeezing a large preimage proposal after the challenge period has passed always succeeds and /// persists the correct data. - /// forge-config: ciheavy.fuzz.runs = 512 function testFuzz_squeezeLPP_succeeds(uint256 _numBlocks, uint32 _partOffset) public { _numBlocks = bound(_numBlocks, 1, 2 ** 8); _partOffset = uint32(bound(_partOffset, 0, _numBlocks * LibKeccak.BLOCK_SIZE_BYTES + 8 - 1)); @@ -1087,7 +1086,6 @@ contract PreimageOracle_LargePreimageProposals_Test is Test { /// @notice Tests that challenging the first divergence in a large preimage proposal at an arbitrary location /// in the leaf values always succeeds. - /// forge-config: ciheavy.fuzz.runs = 512 function testFuzz_challenge_arbitraryLocation_succeeds(uint256 _lastCorrectLeafIdx, uint256 _numBlocks) public { _numBlocks = bound(_numBlocks, 1, 2 ** 8); _lastCorrectLeafIdx = bound(_lastCorrectLeafIdx, 0, _numBlocks - 1); @@ -1140,7 +1138,6 @@ contract PreimageOracle_LargePreimageProposals_Test is Test { } /// @notice Tests that challenging the a divergence in a large preimage proposal at the first leaf always succeeds. - /// forge-config: ciheavy.fuzz.runs = 1024 function testFuzz_challengeFirst_succeeds(uint256 _numBlocks) public { _numBlocks = bound(_numBlocks, 1, 2 ** 8); diff --git a/packages/contracts-bedrock/test/safe/LivenessGuard.t.sol b/packages/contracts-bedrock/test/safe/LivenessGuard.t.sol index a7b67b415b2..90a0ecd9f1f 100644 --- a/packages/contracts-bedrock/test/safe/LivenessGuard.t.sol +++ b/packages/contracts-bedrock/test/safe/LivenessGuard.t.sol @@ -228,7 +228,6 @@ contract LivenessGuard_FuzzOwnerManagement_Test is StdCheats, StdUtils, Liveness mapping(address => uint256) privateKeys; /// @dev Tests that the guard correctly manages the lastLive mapping when owners are added, removed, or swapped - /// forge-config: ciheavy.fuzz.runs = 8192 function testFuzz_ownerManagement_works( uint256 initialOwners, uint256 threshold, diff --git a/packages/contracts-bedrock/test/setup/DeployVariations.t.sol b/packages/contracts-bedrock/test/setup/DeployVariations.t.sol index 31f687f0fdb..94628067e11 100644 --- a/packages/contracts-bedrock/test/setup/DeployVariations.t.sol +++ b/packages/contracts-bedrock/test/setup/DeployVariations.t.sol @@ -22,7 +22,6 @@ contract DeployVariations_Test is CommonTest { } } - /// forge-config: ciheavy.fuzz.runs = 512 /// @dev It should be possible to enable Fault Proofs with any mix of CGT and Alt-DA. function testFuzz_enableFaultProofs_succeeds(bool _enableCGT, bool _enableAltDa) public virtual { enableAddOns(_enableCGT, _enableAltDa); @@ -30,7 +29,6 @@ contract DeployVariations_Test is CommonTest { super.setUp(); } - /// forge-config: ciheavy.fuzz.runs = 512 /// @dev It should be possible to enable Fault Proofs and Interop with any mix of CGT and Alt-DA. function test_enableInteropAndFaultProofs_succeeds(bool _enableCGT, bool _enableAltDa) public virtual { enableAddOns(_enableCGT, _enableAltDa); From cd8bab41543bbfae553bf882569484b07f20b993 Mon Sep 17 00:00:00 2001 From: George Knee Date: Wed, 4 Dec 2024 08:58:39 +0000 Subject: [PATCH 047/111] e2e/actions: enhance holocene activation action test to cover execution layer (#13188) * enhance holoene activation action test to cover execution layer * apply suggestions from code review * undo changes to submodule --- .../proofs/holocene_activation_test.go | 37 +++++++++++++------ 1 file changed, 25 insertions(+), 12 deletions(-) diff --git a/op-e2e/actions/proofs/holocene_activation_test.go b/op-e2e/actions/proofs/holocene_activation_test.go index 55b8c1162de..1cdd3aba457 100644 --- a/op-e2e/actions/proofs/holocene_activation_test.go +++ b/op-e2e/actions/proofs/holocene_activation_test.go @@ -26,12 +26,14 @@ func Test_ProgramAction_HoloceneActivation(gt *testing.T) { env := helpers.NewL2FaultProofEnv(t, testCfg, helpers.NewTestParams(), helpers.NewBatcherCfg(), setHoloceneTime) - t.Log("HoloceneTime: ", env.Sequencer.RollupCfg.HoloceneTime) - - // Build the L2 chain - blocks := []uint{1, 2} - targetHeadNumber := 2 - for env.Engine.L2Chain().CurrentBlock().Number.Uint64() < uint64(targetHeadNumber) { + t.Logf("L2 Genesis Time: %d, HoloceneTime: %d ", env.Sequencer.RollupCfg.Genesis.L2Time, *env.Sequencer.RollupCfg.HoloceneTime) + + // Build the L2 chain until the Holocene activation time, + // which for the Execution Engine is an L2 block timestamp + // https://specs.optimism.io/protocol/holocene/exec-engine.html?highlight=holocene#timestamp-activation + for env.Engine.L2Chain().CurrentBlock().Time < *env.Sequencer.RollupCfg.HoloceneTime { + b := env.Engine.L2Chain().GetBlockByHash(env.Sequencer.L2Unsafe().Hash) + require.Equal(t, "", string(b.Extra()), "extra data should be empty before Holocene activation") env.Sequencer.ActL2StartBlock(t) // Send an L2 tx env.Alice.L2.ActResetTxOpts(t) @@ -39,15 +41,24 @@ func Test_ProgramAction_HoloceneActivation(gt *testing.T) { env.Alice.L2.ActMakeTx(t) env.Engine.ActL2IncludeTx(env.Alice.Address())(t) env.Sequencer.ActL2EndBlock(t) + t.Log("Unsafe block with timestamp %d", b.Time) } + b := env.Engine.L2Chain().GetBlockByHash(env.Sequencer.L2Unsafe().Hash) + require.Len(t, b.Extra(), 9, "extra data should be 9 bytes after Holocene activation") // Build up a local list of frames - orderedFrames := make([][]byte, 0, 2) + orderedFrames := make([][]byte, 0, 1) + // Submit the first two blocks, this will be enough to trigger Holocene _derivation_ + // which is activated by the L1 inclusion block timestamp + // https://specs.optimism.io/protocol/holocene/derivation.html?highlight=holoce#activation + // block 1 will be 12 seconda after genesis, and 2 seconds before Holocene activation + // block 2 will be 24 seconds after genesis, and 10 seconds after Holocene activation + blocksToSubmit := []uint{1, 2} // Buffer the blocks in the batcher and populate orderedFrames list env.Batcher.ActCreateChannel(t, false) - for i, blockNum := range blocks { + for i, blockNum := range blocksToSubmit { env.Batcher.ActAddBlockByNumber(t, int64(blockNum), actionsHelpers.BlockLogger(t)) - if i == len(blocks)-1 { + if i == len(blocksToSubmit)-1 { env.Batcher.ActL2ChannelClose(t) } frame := env.Batcher.ReadNextOutputFrame(t) @@ -64,7 +75,7 @@ func Test_ProgramAction_HoloceneActivation(gt *testing.T) { // Submit first frame env.Batcher.ActL2BatchSubmitRaw(t, orderedFrames[0]) - includeBatchTx() // block should have a timestamp of 12s after genesis + includeBatchTx() // L1 block should have a timestamp of 12s after genesis // Holocene should activate 14s after genesis, so that the previous l1 block // was before HoloceneTime and the next l1 block is after it @@ -78,8 +89,11 @@ func Test_ProgramAction_HoloceneActivation(gt *testing.T) { env.Sequencer.ActL2PipelineFull(t) l2SafeHead := env.Sequencer.L2Safe() + t.Log(l2SafeHead.Time) require.EqualValues(t, uint64(0), l2SafeHead.Number) // channel should be dropped, so no safe head progression - t.Log("Safe head progressed as expected", "l2SafeHeadNumber", l2SafeHead.Number) + if uint64(0) == l2SafeHead.Number { + t.Log("Safe head progressed as expected", "l2SafeHeadNumber", l2SafeHead.Number) + } // Log assertions filters := []string{ @@ -92,7 +106,6 @@ func Test_ProgramAction_HoloceneActivation(gt *testing.T) { recs := env.Logs.FindLogs(testlog.NewMessageContainsFilter(filter), testlog.NewAttributesFilter("role", "sequencer")) require.Len(t, recs, 1, "searching for %d instances of '%s' in logs from role %s", 1, filter, "sequencer") } - env.RunFaultProofProgram(t, l2SafeHead.Number, testCfg.CheckResult, testCfg.InputParams...) } From c3e535c9a858b21176033380058dfcee87055526 Mon Sep 17 00:00:00 2001 From: Matthew Slipper Date: Wed, 4 Dec 2024 07:27:14 -0700 Subject: [PATCH 048/111] ops: Remove contracts-bedrock Docker image (#13218) This has since been replaced with op-deployer. --- .circleci/config.yml | 9 -------- docker-bake.hcl | 9 -------- ops/docker/Dockerfile.packages | 38 ---------------------------------- 3 files changed, 56 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 665bfc5e6c9..bfe83b91f04 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1688,15 +1688,6 @@ workflows: name: <>-cross-platform requires: - <>-docker-publish - - docker-build: - name: contracts-bedrock-docker-publish - docker_name: contracts-bedrock - docker_tags: <>,<> - resource_class: xlarge - publish: true - context: - - oplabs-gcr - - slack scheduled-preimage-reproducibility: when: diff --git a/docker-bake.hcl b/docker-bake.hcl index f0b57e56519..3f59c433640 100644 --- a/docker-bake.hcl +++ b/docker-bake.hcl @@ -246,15 +246,6 @@ target "ci-builder-rust" { tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/ci-builder-rust:${tag}"] } -target "contracts-bedrock" { - dockerfile = "./ops/docker/Dockerfile.packages" - context = "." - target = "contracts-bedrock" - # See comment in Dockerfile.packages for why we only build for linux/amd64. - platforms = ["linux/amd64"] - tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/contracts-bedrock:${tag}"] -} - target "op-deployer" { dockerfile = "ops/docker/op-stack-go/Dockerfile" context = "." diff --git a/ops/docker/Dockerfile.packages b/ops/docker/Dockerfile.packages index 1e5786412cf..d4c8041321b 100644 --- a/ops/docker/Dockerfile.packages +++ b/ops/docker/Dockerfile.packages @@ -33,41 +33,3 @@ RUN git submodule update --init --recursive \ && cd packages/contracts-bedrock \ && just forge-build \ && echo $(git rev-parse HEAD) > .gitcommit - -FROM --platform=linux/amd64 debian:bookworm-20240812-slim as contracts-bedrock - -RUN apt-get update && apt-get install -y \ - curl \ - jq \ - ca-certificates \ - git \ - make \ - bash \ - --no-install-recommends - -COPY /ops/docker/oplabs.crt /usr/local/share/ca-certificates/oplabs.crt - -RUN chmod 644 /usr/local/share/ca-certificates/oplabs.crt \ - && update-ca-certificates - -COPY --from=foundry /usr/local/bin/just /usr/local/bin/just -COPY --from=foundry /usr/local/bin/forge /usr/local/bin/forge -COPY --from=foundry /usr/local/bin/cast /usr/local/bin/cast -COPY --from=foundry /usr/local/bin/svm /usr/local/bin/svm - -RUN svm install 0.8.25 && \ - svm install 0.8.15 && \ - svm install 0.8.19 && \ - svm install 0.8.26 - -# Not to be confused with OP, this is a OnePassword CLI tool. -COPY --from=1password/op:2 /usr/local/bin/op /usr/local/bin/op - -RUN mkdir -p /opt/optimism/packages/contracts-bedrock - -COPY --from=base /opt/optimism/packages/contracts-bedrock /opt/optimism/packages/contracts-bedrock -COPY --from=base /opt/optimism/mise.toml /opt/optimism/mise.toml - -WORKDIR /opt/optimism/packages/contracts-bedrock - -CMD ["echo", "Override this command to use this image."] From ecdb788a18022d4b78d6288c1f6ec94c152d02f8 Mon Sep 17 00:00:00 2001 From: smartcontracts Date: Wed, 4 Dec 2024 13:07:04 -0500 Subject: [PATCH 049/111] maint: clean up some usage of vm.assume (#13216) Cleans up some usage of vm.assume. Will be a series of commits that do mostly the same thing. --- .../test/L1/ResourceMetering.t.sol | 7 +++-- .../test/L1/SystemConfig.t.sol | 23 ++++++++++----- .../test/L1/SystemConfigInterop.t.sol | 21 ++++++++++---- .../contracts-bedrock/test/L2/L1Block.t.sol | 29 ++++++++++++------- .../test/libraries/SafeCall.t.sol | 2 +- .../test/safe/DeputyGuardianModule.t.sol | 8 ++++- 6 files changed, 62 insertions(+), 28 deletions(-) diff --git a/packages/contracts-bedrock/test/L1/ResourceMetering.t.sol b/packages/contracts-bedrock/test/L1/ResourceMetering.t.sol index 18f5ba82283..8b262b55f2a 100644 --- a/packages/contracts-bedrock/test/L1/ResourceMetering.t.sol +++ b/packages/contracts-bedrock/test/L1/ResourceMetering.t.sol @@ -201,20 +201,21 @@ contract ResourceMetering_Test is Test { function testFuzz_meter_largeBlockDiff_succeeds(uint64 _amount, uint256 _blockDiff) external { // This test fails if the following line is commented out. // At 12 seconds per block, this number is effectively unreachable. - vm.assume(_blockDiff < 433576281058164217753225238677900874458691); + _blockDiff = uint256(bound(_blockDiff, 0, 433576281058164217753225238677900874458690)); ResourceMetering.ResourceConfig memory rcfg = meter.resourceConfig(); uint64 target = uint64(rcfg.maxResourceLimit) / uint64(rcfg.elasticityMultiplier); uint64 elasticityMultiplier = uint64(rcfg.elasticityMultiplier); - vm.assume(_amount < target * elasticityMultiplier); + _amount = uint64(bound(_amount, 0, target * elasticityMultiplier)); + vm.roll(initialBlockNum + _blockDiff); meter.use(_amount); } function testFuzz_meter_useGas_succeeds(uint64 _amount) external { (, uint64 prevBoughtGas,) = meter.params(); - vm.assume(prevBoughtGas + _amount <= meter.resourceConfig().maxResourceLimit); + _amount = uint64(bound(_amount, 0, meter.resourceConfig().maxResourceLimit - prevBoughtGas)); meter.use(_amount); diff --git a/packages/contracts-bedrock/test/L1/SystemConfig.t.sol b/packages/contracts-bedrock/test/L1/SystemConfig.t.sol index fd5fd296f8f..ae025c10df9 100644 --- a/packages/contracts-bedrock/test/L1/SystemConfig.t.sol +++ b/packages/contracts-bedrock/test/L1/SystemConfig.t.sol @@ -385,12 +385,21 @@ contract SystemConfig_Init_CustomGasToken is SystemConfig_Init { // don't use multicall3's address vm.assume(_token != MULTICALL3_ADDRESS); - vm.assume(bytes(_name).length <= 32); - vm.assume(bytes(_symbol).length <= 32); + // Using vm.assume() would cause too many test rejections. + string memory name = _name; + if (bytes(_name).length > 32) { + name = _name[:32]; + } + + // Using vm.assume() would cause too many test rejections. + string memory symbol = _symbol; + if (bytes(_symbol).length > 32) { + symbol = _symbol[:32]; + } vm.mockCall(_token, abi.encodeCall(token.decimals, ()), abi.encode(18)); - vm.mockCall(_token, abi.encodeCall(token.name, ()), abi.encode(_name)); - vm.mockCall(_token, abi.encodeCall(token.symbol, ()), abi.encode(_symbol)); + vm.mockCall(_token, abi.encodeCall(token.name, ()), abi.encode(name)); + vm.mockCall(_token, abi.encodeCall(token.symbol, ()), abi.encode(symbol)); cleanStorageAndInit(_token); @@ -403,8 +412,8 @@ contract SystemConfig_Init_CustomGasToken is SystemConfig_Init { assertEq(systemConfig.gasPayingTokenSymbol(), "ETH"); } else { assertEq(addr, _token); - assertEq(systemConfig.gasPayingTokenName(), _name); - assertEq(systemConfig.gasPayingTokenSymbol(), _symbol); + assertEq(systemConfig.gasPayingTokenName(), name); + assertEq(systemConfig.gasPayingTokenSymbol(), symbol); } } @@ -555,7 +564,7 @@ contract SystemConfig_Setters_TestFail is SystemConfig_Init { /// @dev Tests that `setEIP1559Params` reverts if the elasticity is zero. function test_setEIP1559Params_zeroElasticity_reverts(uint32 _denominator) external { - vm.assume(_denominator >= 1); + _denominator = uint32(bound(_denominator, 1, type(uint32).max)); vm.prank(systemConfig.owner()); vm.expectRevert("SystemConfig: elasticity must be >= 1"); systemConfig.setEIP1559Params({ _denominator: _denominator, _elasticity: 0 }); diff --git a/packages/contracts-bedrock/test/L1/SystemConfigInterop.t.sol b/packages/contracts-bedrock/test/L1/SystemConfigInterop.t.sol index 5132dc40e04..e66f2e4d1ec 100644 --- a/packages/contracts-bedrock/test/L1/SystemConfigInterop.t.sol +++ b/packages/contracts-bedrock/test/L1/SystemConfigInterop.t.sol @@ -51,12 +51,21 @@ contract SystemConfigInterop_Test is CommonTest { vm.assume(_token != address(0)); vm.assume(_token != Constants.ETHER); - vm.assume(bytes(_name).length <= 32); - vm.assume(bytes(_symbol).length <= 32); + // Using vm.assume() would cause too many test rejections. + string memory name = _name; + if (bytes(_name).length > 32) { + name = _name[:32]; + } + + // Using vm.assume() would cause too many test rejections. + string memory symbol = _symbol; + if (bytes(_symbol).length > 32) { + symbol = _symbol[:32]; + } vm.mockCall(_token, abi.encodeCall(ERC20.decimals, ()), abi.encode(18)); - vm.mockCall(_token, abi.encodeCall(ERC20.name, ()), abi.encode(_name)); - vm.mockCall(_token, abi.encodeCall(ERC20.symbol, ()), abi.encode(_symbol)); + vm.mockCall(_token, abi.encodeCall(ERC20.name, ()), abi.encode(name)); + vm.mockCall(_token, abi.encodeCall(ERC20.symbol, ()), abi.encode(symbol)); vm.expectCall( address(optimismPortal), @@ -67,8 +76,8 @@ contract SystemConfigInterop_Test is CommonTest { StaticConfig.encodeSetGasPayingToken({ _token: _token, _decimals: 18, - _name: GasPayingToken.sanitize(_name), - _symbol: GasPayingToken.sanitize(_symbol) + _name: GasPayingToken.sanitize(name), + _symbol: GasPayingToken.sanitize(symbol) }) ) ) diff --git a/packages/contracts-bedrock/test/L2/L1Block.t.sol b/packages/contracts-bedrock/test/L2/L1Block.t.sol index 762553a2ff2..d3e3b7d02e4 100644 --- a/packages/contracts-bedrock/test/L2/L1Block.t.sol +++ b/packages/contracts-bedrock/test/L2/L1Block.t.sol @@ -169,31 +169,40 @@ contract L1BlockCustomGasToken_Test is L1BlockTest { function testFuzz_setGasPayingToken_succeeds( address _token, uint8 _decimals, - string memory _name, - string memory _symbol + string calldata _name, + string calldata _symbol ) external { vm.assume(_token != address(0)); vm.assume(_token != Constants.ETHER); - vm.assume(bytes(_name).length <= 32); - vm.assume(bytes(_symbol).length <= 32); - bytes32 name = bytes32(abi.encodePacked(_name)); - bytes32 symbol = bytes32(abi.encodePacked(_symbol)); + // Using vm.assume() would cause too many test rejections. + string memory name = _name; + if (bytes(_name).length > 32) { + name = _name[:32]; + } + bytes32 b32name = bytes32(abi.encodePacked(name)); + + // Using vm.assume() would cause too many test rejections. + string memory symbol = _symbol; + if (bytes(_symbol).length > 32) { + symbol = _symbol[:32]; + } + bytes32 b32symbol = bytes32(abi.encodePacked(symbol)); vm.expectEmit(address(l1Block)); - emit GasPayingTokenSet({ token: _token, decimals: _decimals, name: name, symbol: symbol }); + emit GasPayingTokenSet({ token: _token, decimals: _decimals, name: b32name, symbol: b32symbol }); vm.prank(depositor); - l1Block.setGasPayingToken({ _token: _token, _decimals: _decimals, _name: name, _symbol: symbol }); + l1Block.setGasPayingToken({ _token: _token, _decimals: _decimals, _name: b32name, _symbol: b32symbol }); (address token, uint8 decimals) = l1Block.gasPayingToken(); assertEq(token, _token); assertEq(decimals, _decimals); - assertEq(_name, l1Block.gasPayingTokenName()); - assertEq(_symbol, l1Block.gasPayingTokenSymbol()); + assertEq(name, l1Block.gasPayingTokenName()); + assertEq(symbol, l1Block.gasPayingTokenSymbol()); assertTrue(l1Block.isCustomGasToken()); } diff --git a/packages/contracts-bedrock/test/libraries/SafeCall.t.sol b/packages/contracts-bedrock/test/libraries/SafeCall.t.sol index ef1f8876efb..d05e952e44d 100644 --- a/packages/contracts-bedrock/test/libraries/SafeCall.t.sol +++ b/packages/contracts-bedrock/test/libraries/SafeCall.t.sol @@ -53,7 +53,7 @@ contract SafeCall_Test is Test { /// @dev Tests that the `send` function with value succeeds. function testFuzz_sendWithGas_succeeds(address _from, address _to, uint64 _gas, uint256 _value) external { - vm.assume(_gas != 0); + _gas = uint64(bound(_gas, 1, type(uint64).max)); sendTest({ _from: _from, _to: _to, _gas: _gas, _value: _value }); } diff --git a/packages/contracts-bedrock/test/safe/DeputyGuardianModule.t.sol b/packages/contracts-bedrock/test/safe/DeputyGuardianModule.t.sol index 8dd1ba970ab..84abdbc48a8 100644 --- a/packages/contracts-bedrock/test/safe/DeputyGuardianModule.t.sol +++ b/packages/contracts-bedrock/test/safe/DeputyGuardianModule.t.sol @@ -257,7 +257,13 @@ contract DeputyGuardianModule_setRespectedGameType_Test is DeputyGuardianModule_ contract DeputyGuardianModule_setRespectedGameType_TestFail is DeputyGuardianModule_TestInit { /// @dev Tests that `setRespectedGameType` when called by a non deputy guardian. function testFuzz_setRespectedGameType_notDeputyGuardian_reverts(GameType _gameType) external { - vm.assume(GameType.unwrap(optimismPortal2.respectedGameType()) != GameType.unwrap(_gameType)); + // Change the game type if it's the same to avoid test rejections. + if (GameType.unwrap(optimismPortal2.respectedGameType()) == GameType.unwrap(_gameType)) { + unchecked { + _gameType = GameType.wrap(GameType.unwrap(_gameType) + 1); + } + } + vm.expectRevert(abi.encodeWithSelector(Unauthorized.selector)); deputyGuardianModule.setRespectedGameType(optimismPortal2, _gameType); assertNotEq(GameType.unwrap(optimismPortal2.respectedGameType()), GameType.unwrap(_gameType)); From ec05937c623150d402689fa060e5814190ff7db6 Mon Sep 17 00:00:00 2001 From: smartcontracts Date: Wed, 4 Dec 2024 13:17:08 -0500 Subject: [PATCH 050/111] feat: move interfaces to a new top-level folder (#13114) Moves all contract interfaces to a new top-level folder. Keeps things much cleaner and more sane for a future auto-generated interface system. --- .circleci/config.yml | 3 + .semgrep/rules/sol-rules.yaml | 18 +- packages/contracts-bedrock/foundry.toml | 3 +- .../L1}/IDataAvailabilityChallenge.sol | 0 .../L1}/IL1CrossDomainMessenger.sol | 8 +- .../L1}/IL1CrossDomainMessengerV160.sol | 6 +- .../L1}/IL1ERC721Bridge.sol | 6 +- .../L1}/IL1StandardBridge.sol | 8 +- .../L1}/IL1StandardBridgeV160.sol | 8 +- .../L1}/IL2OutputOracle.sol | 0 .../L1}/IOptimismPortal.sol | 6 +- .../L1}/IOptimismPortal2.sol | 8 +- .../L1}/IOptimismPortalInterop.sol | 10 +- .../L1}/IProtocolVersions.sol | 0 .../L1}/IResourceMetering.sol | 0 .../L1}/ISuperchainConfig.sol | 0 .../L1}/ISystemConfig.sol | 2 +- .../L1}/ISystemConfigInterop.sol | 4 +- .../L2}/IBaseFeeVault.sol | 0 .../L2}/ICrossL2Inbox.sol | 0 .../L2}/IDependencySet.sol | 0 .../interfaces => interfaces/L2}/IERC7802.sol | 0 .../L2}/IETHLiquidity.sol | 0 .../L2}/IFeeVault.sol | 0 .../L2}/IGasPriceOracle.sol | 0 .../interfaces => interfaces/L2}/IL1Block.sol | 0 .../L2}/IL1BlockInterop.sol | 0 .../L2}/IL1FeeVault.sol | 0 .../L2}/IL2CrossDomainMessenger.sol | 2 +- .../L2}/IL2ERC721Bridge.sol | 2 +- .../L2}/IL2StandardBridge.sol | 2 +- .../L2}/IL2StandardBridgeInterop.sol | 4 +- .../L2}/IL2ToL1MessagePasser.sol | 0 .../L2}/IL2ToL2CrossDomainMessenger.sol | 0 .../L2}/IMintableAndBurnableERC20.sol | 0 .../L2}/IOptimismERC20Factory.sol | 0 .../L2}/IOptimismSuperchainERC20.sol | 2 +- .../L2}/IOptimismSuperchainERC20Beacon.sol | 2 +- .../L2}/IOptimismSuperchainERC20Factory.sol | 4 +- .../L2}/ISequencerFeeVault.sol | 0 .../L2}/ISuperchainERC20.sol | 6 +- .../L2}/ISuperchainTokenBridge.sol | 2 +- .../L2}/ISuperchainWETH.sol | 6 +- .../cannon}/IMIPS.sol | 4 +- .../cannon}/IMIPS2.sol | 4 +- .../cannon}/IPreimageOracle.sol | 0 .../dispute}/IAnchorStateRegistry.sol | 6 +- .../dispute}/IBigStepper.sol | 2 +- .../dispute}/IDelayedWETH.sol | 2 +- .../dispute}/IDisputeGame.sol | 2 +- .../dispute}/IDisputeGameFactory.sol | 2 +- .../dispute}/IFaultDisputeGame.sol | 8 +- .../dispute}/IInitializable.sol | 0 .../dispute}/IPermissionedDisputeGame.sol | 10 +- .../governance}/IGovernanceToken.sol | 0 .../governance}/IMintManager.sol | 2 +- .../legacy}/IAddressManager.sol | 2 +- .../legacy}/IDeployerWhitelist.sol | 0 .../legacy}/IL1BlockNumber.sol | 2 +- .../legacy}/IL1ChugSplashProxy.sol | 0 .../legacy}/ILegacyMessagePasser.sol | 2 +- .../legacy}/IResolvedDelegateProxy.sol | 2 +- .../safe}/IDeputyGuardianModule.sol | 12 +- .../safe}/ILivenessGuard.sol | 2 +- .../safe}/ILivenessModule.sol | 2 +- .../universal}/ICrossDomainMessenger.sol | 0 .../universal}/IEIP712.sol | 0 .../universal}/IERC721Bridge.sol | 2 +- .../universal}/ILegacyMintableERC20.sol | 0 .../universal}/IOptimismMintableERC20.sol | 0 .../IOptimismMintableERC20Factory.sol | 0 .../universal}/IOptimismMintableERC721.sol | 0 .../IOptimismMintableERC721Factory.sol | 0 .../universal}/IOwnable.sol | 0 .../universal}/IProxy.sol | 0 .../universal}/IProxyAdmin.sol | 2 +- .../universal}/ISemver.sol | 0 .../universal}/IStandardBridge.sol | 2 +- .../universal}/IStaticERC1967Proxy.sol | 0 .../universal}/IWETH98.sol | 0 .../vendor}/IERC20Solady.sol | 0 .../vendor}/IGelatoTreasury.sol | 0 .../vendor/asterisc}/IRISCV.sol | 4 +- .../contracts-bedrock/scripts/L2Genesis.s.sol | 26 +-- .../scripts/checks/check-semver-diff.sh | 21 ++ .../scripts/deploy/ChainAssertions.sol | 32 +-- .../scripts/deploy/Deploy.s.sol | 34 +-- .../scripts/deploy/DeployAltDA.s.sol | 6 +- .../scripts/deploy/DeployAsterisc.s.sol | 4 +- .../scripts/deploy/DeployDelayedWETH.s.sol | 6 +- .../scripts/deploy/DeployDisputeGame.s.sol | 10 +- .../deploy/DeployImplementations.s.sol | 30 +-- .../scripts/deploy/DeployMIPS.s.sol | 4 +- .../scripts/deploy/DeployOPCM.s.sol | 4 +- .../scripts/deploy/DeployOPChain.s.sol | 34 +-- .../scripts/deploy/DeployOwnership.s.sol | 2 +- .../scripts/deploy/DeploySuperchain.s.sol | 8 +- .../deploy/ReadImplementationAddresses.s.sol | 8 +- .../scripts/libraries/Constants.sol | 2 +- .../scripts/libraries/DeployUtils.sol | 8 +- .../snapshots/semver-lock.json | 220 +++++++++--------- .../src/L1/DataAvailabilityChallenge.sol | 6 +- .../src/L1/L1CrossDomainMessenger.sol | 12 +- .../src/L1/L1ERC721Bridge.sol | 12 +- .../src/L1/L1StandardBridge.sol | 12 +- .../src/L1/L2OutputOracle.sol | 6 +- .../src/L1/OPContractsManager.sol | 46 ++-- .../src/L1/OPContractsManagerInterop.sol | 10 +- .../src/L1/OptimismPortal.sol | 16 +- .../src/L1/OptimismPortal2.sol | 18 +- .../src/L1/OptimismPortalInterop.sol | 6 +- .../src/L1/ProtocolVersions.sol | 6 +- .../src/L1/SuperchainConfig.sol | 6 +- .../contracts-bedrock/src/L1/SystemConfig.sol | 10 +- .../src/L1/SystemConfigInterop.sol | 10 +- .../contracts-bedrock/src/L2/BaseFeeVault.sol | 6 +- .../src/L2/CrossDomainOwnable2.sol | 2 +- .../src/L2/CrossDomainOwnable3.sol | 2 +- .../contracts-bedrock/src/L2/CrossL2Inbox.sol | 10 +- .../contracts-bedrock/src/L2/ETHLiquidity.sol | 8 +- .../contracts-bedrock/src/L2/FeeVault.sol | 2 +- .../src/L2/GasPriceOracle.sol | 8 +- packages/contracts-bedrock/src/L2/L1Block.sol | 6 +- .../src/L2/L1BlockInterop.sol | 4 +- .../contracts-bedrock/src/L2/L1FeeVault.sol | 6 +- .../src/L2/L2CrossDomainMessenger.sol | 10 +- .../src/L2/L2ERC721Bridge.sol | 12 +- .../src/L2/L2StandardBridge.sol | 10 +- .../src/L2/L2StandardBridgeInterop.sol | 8 +- .../src/L2/L2ToL1MessagePasser.sol | 6 +- .../src/L2/L2ToL2CrossDomainMessenger.sol | 10 +- .../src/L2/OptimismSuperchainERC20.sol | 6 +- .../src/L2/OptimismSuperchainERC20Beacon.sol | 6 +- .../src/L2/OptimismSuperchainERC20Factory.sol | 6 +- .../src/L2/SequencerFeeVault.sol | 6 +- .../src/L2/SuperchainERC20.sol | 8 +- .../src/L2/SuperchainTokenBridge.sol | 10 +- .../src/L2/SuperchainWETH.sol | 14 +- packages/contracts-bedrock/src/L2/WETH.sol | 8 +- .../contracts-bedrock/src/cannon/MIPS.sol | 8 +- .../contracts-bedrock/src/cannon/MIPS2.sol | 8 +- .../contracts-bedrock/src/cannon/MIPS64.sol | 8 +- .../src/cannon/PreimageOracle.sol | 6 +- .../src/cannon/libraries/MIPS64Syscalls.sol | 2 +- .../src/cannon/libraries/MIPSSyscalls.sol | 2 +- .../src/dispute/AnchorStateRegistry.sol | 14 +- .../src/dispute/DelayedWETH.sol | 8 +- .../src/dispute/DisputeGameFactory.sol | 8 +- .../src/dispute/FaultDisputeGame.sol | 12 +- .../src/governance/MintManager.sol | 2 +- .../src/legacy/DeployerWhitelist.sol | 6 +- .../src/legacy/L1BlockNumber.sol | 8 +- .../src/legacy/L1ChugSplashProxy.sol | 2 +- .../src/legacy/LegacyMessagePasser.sol | 6 +- .../src/libraries/Constants.sol | 2 +- .../drippie/dripchecks/CheckGelatoLow.sol | 2 +- .../src/safe/DeputyGuardianModule.sol | 16 +- .../src/safe/LivenessGuard.sol | 6 +- .../src/safe/LivenessModule.sol | 6 +- .../src/universal/ERC721Bridge.sol | 2 +- .../src/universal/OptimismMintableERC20.sol | 10 +- .../OptimismMintableERC20Factory.sol | 8 +- .../src/universal/OptimismMintableERC721.sol | 8 +- .../OptimismMintableERC721Factory.sol | 6 +- .../src/universal/ProxyAdmin.sol | 10 +- .../src/universal/StandardBridge.sol | 6 +- .../src/universal/StorageSetter.sol | 6 +- .../src/vendor/asterisc/RISCV.sol | 4 +- .../contracts-bedrock/src/vendor/eas/EAS.sol | 6 +- .../src/vendor/eas/SchemaRegistry.sol | 6 +- .../test/L1/DataAvailabilityChallenge.t.sol | 2 +- .../test/L1/L1CrossDomainMessenger.t.sol | 8 +- .../test/L1/L1ERC721Bridge.t.sol | 8 +- .../test/L1/L1StandardBridge.t.sol | 8 +- .../test/L1/L2OutputOracle.t.sol | 2 +- .../test/L1/OPContractsManager.t.sol | 4 +- .../test/L1/OptimismPortal.t.sol | 10 +- .../test/L1/OptimismPortal2.t.sol | 12 +- .../test/L1/OptimismPortalInterop.t.sol | 4 +- .../test/L1/ProtocolVersions.t.sol | 4 +- .../test/L1/ResourceMetering.t.sol | 2 +- .../test/L1/SuperchainConfig.t.sol | 4 +- .../test/L1/SystemConfig.t.sol | 6 +- .../test/L1/SystemConfigInterop.t.sol | 8 +- .../test/L2/CrossL2Inbox.t.sol | 2 +- .../test/L2/L1BlockInterop.t.sol | 2 +- .../test/L2/L2CrossDomainMessenger.t.sol | 4 +- .../test/L2/L2ERC721Bridge.t.sol | 4 +- .../test/L2/L2StandardBridge.t.sol | 8 +- .../test/L2/L2StandardBridgeInterop.t.sol | 8 +- .../test/L2/L2ToL2CrossDomainMessenger.t.sol | 2 +- .../test/L2/OptimismSuperchainERC20.t.sol | 4 +- .../L2/OptimismSuperchainERC20Factory.t.sol | 2 +- .../test/L2/Preinstalls.t.sol | 2 +- .../test/L2/SequencerFeeVault.t.sol | 2 +- .../test/L2/SuperchainERC20.t.sol | 4 +- .../test/L2/SuperchainTokenBridge.t.sol | 10 +- .../test/L2/SuperchainWETH.t.sol | 8 +- .../test/actors/FaultDisputeActors.sol | 2 +- .../contracts-bedrock/test/cannon/MIPS.t.sol | 4 +- .../contracts-bedrock/test/cannon/MIPS2.t.sol | 4 +- .../test/cannon/PreimageOracle.t.sol | 2 +- .../test/dispute/DisputeGameFactory.t.sol | 4 +- .../test/dispute/FaultDisputeGame.t.sol | 10 +- .../dispute/PermissionedDisputeGame.t.sol | 8 +- .../test/dispute/WETH98.t.sol | 2 +- .../test/governance/MintManager.t.sol | 4 +- .../invariants/CrossDomainMessenger.t.sol | 4 +- .../test/invariants/ETHLiquidity.t.sol | 2 +- .../test/invariants/FaultDisputeGame.t.sol | 2 +- .../test/invariants/L2OutputOracle.t.sol | 2 +- .../test/invariants/OptimismPortal.t.sol | 4 +- .../test/invariants/OptimismPortal2.t.sol | 6 +- .../test/invariants/ResourceMetering.t.sol | 2 +- .../test/invariants/SuperchainWETH.t.sol | 2 +- .../test/invariants/SystemConfig.t.sol | 4 +- .../contracts-bedrock/test/kontrol/README.md | 4 +- .../proofs/L1CrossDomainMessenger.k.sol | 4 +- .../test/kontrol/proofs/L1ERC721Bridge.k.sol | 6 +- .../kontrol/proofs/L1StandardBridge.k.sol | 6 +- .../test/kontrol/proofs/OptimismPortal.k.sol | 4 +- .../test/kontrol/proofs/OptimismPortal2.k.sol | 4 +- .../test/legacy/DeployerWhitelist.t.sol | 2 +- .../test/legacy/L1BlockNumber.t.sol | 4 +- .../test/legacy/L1ChugSplashProxy.t.sol | 2 +- .../test/legacy/LegacyMintableERC20.t.sol | 2 +- .../test/legacy/ResolvedDelegateProxy.t.sol | 4 +- .../test/mocks/AlphabetVM.sol | 2 +- .../test/opcm/DeployAltDA.t.sol | 6 +- .../test/opcm/DeployImplementations.t.sol | 28 +-- .../test/opcm/DeployOPCM.t.sol | 4 +- .../test/opcm/DeployOPChain.t.sol | 20 +- .../test/opcm/DeploySuperchain.t.sol | 2 +- .../test/safe/DeputyGuardianModule.t.sol | 12 +- .../contracts-bedrock/test/setup/Events.sol | 2 +- .../contracts-bedrock/test/setup/Setup.sol | 64 ++--- .../test/universal/BenchmarkTest.t.sol | 4 +- .../test/universal/CrossDomainMessenger.t.sol | 2 +- .../universal/OptimismMintableERC20.t.sol | 4 +- .../OptimismMintableERC20Factory.t.sol | 4 +- .../test/universal/Proxy.t.sol | 2 +- .../test/universal/ProxyAdmin.t.sol | 10 +- .../test/universal/Specs.t.sol | 22 +- .../test/vendor/Initializable.t.sol | 10 +- .../test/vendor/InitializableOZv5.t.sol | 2 +- 245 files changed, 845 insertions(+), 820 deletions(-) rename packages/contracts-bedrock/{src/L1/interfaces => interfaces/L1}/IDataAvailabilityChallenge.sol (100%) rename packages/contracts-bedrock/{src/L1/interfaces => interfaces/L1}/IL1CrossDomainMessenger.sol (68%) rename packages/contracts-bedrock/{src/L1/interfaces => interfaces/L1}/IL1CrossDomainMessengerV160.sol (76%) rename packages/contracts-bedrock/{src/L1/interfaces => interfaces/L1}/IL1ERC721Bridge.sol (82%) rename packages/contracts-bedrock/{src/L1/interfaces => interfaces/L1}/IL1StandardBridge.sol (87%) rename packages/contracts-bedrock/{src/L1/interfaces => interfaces/L1}/IL1StandardBridgeV160.sol (88%) rename packages/contracts-bedrock/{src/L1/interfaces => interfaces/L1}/IL2OutputOracle.sol (100%) rename packages/contracts-bedrock/{src/L1/interfaces => interfaces/L1}/IOptimismPortal.sol (93%) rename packages/contracts-bedrock/{src/L1/interfaces => interfaces/L1}/IOptimismPortal2.sol (93%) rename packages/contracts-bedrock/{src/L1/interfaces => interfaces/L1}/IOptimismPortalInterop.sol (92%) rename packages/contracts-bedrock/{src/L1/interfaces => interfaces/L1}/IProtocolVersions.sol (100%) rename packages/contracts-bedrock/{src/L1/interfaces => interfaces/L1}/IResourceMetering.sol (100%) rename packages/contracts-bedrock/{src/L1/interfaces => interfaces/L1}/ISuperchainConfig.sol (100%) rename packages/contracts-bedrock/{src/L1/interfaces => interfaces/L1}/ISystemConfig.sol (98%) rename packages/contracts-bedrock/{src/L1/interfaces => interfaces/L1}/ISystemConfigInterop.sol (96%) rename packages/contracts-bedrock/{src/L2/interfaces => interfaces/L2}/IBaseFeeVault.sol (100%) rename packages/contracts-bedrock/{src/L2/interfaces => interfaces/L2}/ICrossL2Inbox.sol (100%) rename packages/contracts-bedrock/{src/L2/interfaces => interfaces/L2}/IDependencySet.sol (100%) rename packages/contracts-bedrock/{src/L2/interfaces => interfaces/L2}/IERC7802.sol (100%) rename packages/contracts-bedrock/{src/L2/interfaces => interfaces/L2}/IETHLiquidity.sol (100%) rename packages/contracts-bedrock/{src/L2/interfaces => interfaces/L2}/IFeeVault.sol (100%) rename packages/contracts-bedrock/{src/L2/interfaces => interfaces/L2}/IGasPriceOracle.sol (100%) rename packages/contracts-bedrock/{src/L2/interfaces => interfaces/L2}/IL1Block.sol (100%) rename packages/contracts-bedrock/{src/L2/interfaces => interfaces/L2}/IL1BlockInterop.sol (100%) rename packages/contracts-bedrock/{src/L2/interfaces => interfaces/L2}/IL1FeeVault.sol (100%) rename packages/contracts-bedrock/{src/L2/interfaces => interfaces/L2}/IL2CrossDomainMessenger.sol (83%) rename packages/contracts-bedrock/{src/L2/interfaces => interfaces/L2}/IL2ERC721Bridge.sol (86%) rename packages/contracts-bedrock/{src/L2/interfaces => interfaces/L2}/IL2StandardBridge.sol (93%) rename packages/contracts-bedrock/{src/L2/interfaces => interfaces/L2}/IL2StandardBridgeInterop.sol (94%) rename packages/contracts-bedrock/{src/L2/interfaces => interfaces/L2}/IL2ToL1MessagePasser.sol (100%) rename packages/contracts-bedrock/{src/L2/interfaces => interfaces/L2}/IL2ToL2CrossDomainMessenger.sol (100%) rename packages/contracts-bedrock/{src/L2/interfaces => interfaces/L2}/IMintableAndBurnableERC20.sol (100%) rename packages/contracts-bedrock/{src/L2/interfaces => interfaces/L2}/IOptimismERC20Factory.sol (100%) rename packages/contracts-bedrock/{src/L2/interfaces => interfaces/L2}/IOptimismSuperchainERC20.sol (92%) rename packages/contracts-bedrock/{src/L2/interfaces => interfaces/L2}/IOptimismSuperchainERC20Beacon.sol (83%) rename packages/contracts-bedrock/{src/L2/interfaces => interfaces/L2}/IOptimismSuperchainERC20Factory.sol (80%) rename packages/contracts-bedrock/{src/L2/interfaces => interfaces/L2}/ISequencerFeeVault.sol (100%) rename packages/contracts-bedrock/{src/L2/interfaces => interfaces/L2}/ISuperchainERC20.sol (70%) rename packages/contracts-bedrock/{src/L2/interfaces => interfaces/L2}/ISuperchainTokenBridge.sol (93%) rename packages/contracts-bedrock/{src/L2/interfaces => interfaces/L2}/ISuperchainWETH.sol (81%) rename packages/contracts-bedrock/{src/cannon/interfaces => interfaces/cannon}/IMIPS.sol (84%) rename packages/contracts-bedrock/{src/cannon/interfaces => interfaces/cannon}/IMIPS2.sol (90%) rename packages/contracts-bedrock/{src/cannon/interfaces => interfaces/cannon}/IPreimageOracle.sol (100%) rename packages/contracts-bedrock/{src/dispute/interfaces => interfaces/dispute}/IAnchorStateRegistry.sol (81%) rename packages/contracts-bedrock/{src/dispute/interfaces => interfaces/dispute}/IBigStepper.sol (97%) rename packages/contracts-bedrock/{src/dispute/interfaces => interfaces/dispute}/IDelayedWETH.sol (96%) rename packages/contracts-bedrock/{src/dispute/interfaces => interfaces/dispute}/IDisputeGame.sol (92%) rename packages/contracts-bedrock/{src/dispute/interfaces => interfaces/dispute}/IDisputeGameFactory.sol (97%) rename packages/contracts-bedrock/{src/dispute/interfaces => interfaces/dispute}/IFaultDisputeGame.sol (94%) rename packages/contracts-bedrock/{src/dispute/interfaces => interfaces/dispute}/IInitializable.sol (100%) rename packages/contracts-bedrock/{src/dispute/interfaces => interfaces/dispute}/IPermissionedDisputeGame.sol (93%) rename packages/contracts-bedrock/{src/governance/interfaces => interfaces/governance}/IGovernanceToken.sol (100%) rename packages/contracts-bedrock/{src/governance/interfaces => interfaces/governance}/IMintManager.sol (91%) rename packages/contracts-bedrock/{src/legacy/interfaces => interfaces/legacy}/IAddressManager.sol (87%) rename packages/contracts-bedrock/{src/legacy/interfaces => interfaces/legacy}/IDeployerWhitelist.sol (100%) rename packages/contracts-bedrock/{src/legacy/interfaces => interfaces/legacy}/IL1BlockNumber.sol (84%) rename packages/contracts-bedrock/{src/legacy/interfaces => interfaces/legacy}/IL1ChugSplashProxy.sol (100%) rename packages/contracts-bedrock/{src/legacy/interfaces => interfaces/legacy}/ILegacyMessagePasser.sol (85%) rename packages/contracts-bedrock/{src/legacy/interfaces => interfaces/legacy}/IResolvedDelegateProxy.sol (82%) rename packages/contracts-bedrock/{src/safe/interfaces => interfaces/safe}/IDeputyGuardianModule.sol (73%) rename packages/contracts-bedrock/{src/safe/interfaces => interfaces/safe}/ILivenessGuard.sol (93%) rename packages/contracts-bedrock/{src/safe/interfaces => interfaces/safe}/ILivenessModule.sol (95%) rename packages/contracts-bedrock/{src/universal/interfaces => interfaces/universal}/ICrossDomainMessenger.sol (100%) rename packages/contracts-bedrock/{src/universal/interfaces => interfaces/universal}/IEIP712.sol (100%) rename packages/contracts-bedrock/{src/universal/interfaces => interfaces/universal}/IERC721Bridge.sol (93%) rename packages/contracts-bedrock/{src/universal/interfaces => interfaces/universal}/ILegacyMintableERC20.sol (100%) rename packages/contracts-bedrock/{src/universal/interfaces => interfaces/universal}/IOptimismMintableERC20.sol (100%) rename packages/contracts-bedrock/{src/universal/interfaces => interfaces/universal}/IOptimismMintableERC20Factory.sol (100%) rename packages/contracts-bedrock/{src/universal/interfaces => interfaces/universal}/IOptimismMintableERC721.sol (100%) rename packages/contracts-bedrock/{src/universal/interfaces => interfaces/universal}/IOptimismMintableERC721Factory.sol (100%) rename packages/contracts-bedrock/{src/universal/interfaces => interfaces/universal}/IOwnable.sol (100%) rename packages/contracts-bedrock/{src/universal/interfaces => interfaces/universal}/IProxy.sol (100%) rename packages/contracts-bedrock/{src/universal/interfaces => interfaces/universal}/IProxyAdmin.sol (95%) rename packages/contracts-bedrock/{src/universal/interfaces => interfaces/universal}/ISemver.sol (100%) rename packages/contracts-bedrock/{src/universal/interfaces => interfaces/universal}/IStandardBridge.sol (95%) rename packages/contracts-bedrock/{src/universal/interfaces => interfaces/universal}/IStaticERC1967Proxy.sol (100%) rename packages/contracts-bedrock/{src/universal/interfaces => interfaces/universal}/IWETH98.sol (100%) rename packages/contracts-bedrock/{src/vendor/interfaces => interfaces/vendor}/IERC20Solady.sol (100%) rename packages/contracts-bedrock/{src/vendor/interfaces => interfaces/vendor}/IGelatoTreasury.sol (100%) rename packages/contracts-bedrock/{src/vendor/asterisc/interfaces => interfaces/vendor/asterisc}/IRISCV.sol (73%) diff --git a/.circleci/config.yml b/.circleci/config.yml index bfe83b91f04..719cad05766 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -265,6 +265,9 @@ jobs: # Replace import paths sed -i -e 's/@optimism\///' $REMOTE_ASTERISC_PATH + # Replace legacy interface paths + sed -i -e 's/src\/cannon\/interfaces\//interfaces\/cannon\//g' $REMOTE_ASTERISC_PATH + sed -i -e 's/src\/dispute\/interfaces\//interfaces\/dispute\//g' $REMOTE_ASTERISC_PATH # Replace contract name sed -i -e 's/contract RISCV/contract RISCV_Remote/' $REMOTE_ASTERISC_PATH diff --git a/.semgrep/rules/sol-rules.yaml b/.semgrep/rules/sol-rules.yaml index 57dc88a3e51..177fabb6fa8 100644 --- a/.semgrep/rules/sol-rules.yaml +++ b/.semgrep/rules/sol-rules.yaml @@ -84,18 +84,18 @@ rules: pattern-regex: function\s+\w+\s*\(\s*([^)]*?\b\w+\s+(?!_)(?!memory\b)(?!calldata\b)(?!storage\b)(?!payable\b)\w+\s*(?=,|\))) paths: exclude: - - packages/contracts-bedrock/src/universal/interfaces/IOptimismMintableERC721.sol - - packages/contracts-bedrock/src/universal/interfaces/IWETH98.sol - - packages/contracts-bedrock/src/dispute/interfaces/IDelayedWETH.sol + - packages/contracts-bedrock/interfaces/universal/IOptimismMintableERC721.sol + - packages/contracts-bedrock/interfaces/universal/IWETH98.sol + - packages/contracts-bedrock/interfaces/dispute/IDelayedWETH.sol - op-chain-ops/script/testdata/scripts/ScriptExample.s.sol - packages/contracts-bedrock/test - packages/contracts-bedrock/scripts/libraries/Solarray.sol - packages/contracts-bedrock/scripts/interfaces/IGnosisSafe.sol - - packages/contracts-bedrock/src/universal/interfaces/IWETH.sol + - packages/contracts-bedrock/interfaces/universal/IWETH.sol - packages/contracts-bedrock/src/universal/WETH98.sol - - packages/contracts-bedrock/src/L2/interfaces/ISuperchainWETH.sol + - packages/contracts-bedrock/interfaces/L2/ISuperchainWETH.sol - packages/contracts-bedrock/src/L2/SuperchainWETH.sol - - packages/contracts-bedrock/src/governance/interfaces/IGovernanceToken.sol + - packages/contracts-bedrock/interfaces/governance/IGovernanceToken.sol - packages/contracts-bedrock/src/governance/GovernanceToken.sol - id: sol-style-return-arg-fmt @@ -105,13 +105,13 @@ rules: pattern-regex: returns\s*(\w+\s*)?\(\s*([^)]*?\b\w+\s+(?!memory\b)(?!calldata\b)(?!storage\b)(?!payable\b)\w+(? mapping(address => WithdrawalRequest)) public withdrawals; diff --git a/packages/contracts-bedrock/src/dispute/DisputeGameFactory.sol b/packages/contracts-bedrock/src/dispute/DisputeGameFactory.sol index 4a512f0ca2a..583f432f3e0 100644 --- a/packages/contracts-bedrock/src/dispute/DisputeGameFactory.sol +++ b/packages/contracts-bedrock/src/dispute/DisputeGameFactory.sol @@ -10,8 +10,8 @@ import { GameType, Claim, GameId, Timestamp, Hash, LibGameId } from "src/dispute import { NoImplementation, IncorrectBondAmount, GameAlreadyExists } from "src/dispute/lib/Errors.sol"; // Interfaces -import { ISemver } from "src/universal/interfaces/ISemver.sol"; -import { IDisputeGame } from "src/dispute/interfaces/IDisputeGame.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; +import { IDisputeGame } from "interfaces/dispute/IDisputeGame.sol"; /// @custom:proxied true /// @title DisputeGameFactory @@ -49,8 +49,8 @@ contract DisputeGameFactory is OwnableUpgradeable, ISemver { } /// @notice Semantic version. - /// @custom:semver 1.0.1-beta.3 - string public constant version = "1.0.1-beta.3"; + /// @custom:semver 1.0.1-beta.4 + string public constant version = "1.0.1-beta.4"; /// @notice `gameImpls` is a mapping that maps `GameType`s to their respective /// `IDisputeGame` implementations. diff --git a/packages/contracts-bedrock/src/dispute/FaultDisputeGame.sol b/packages/contracts-bedrock/src/dispute/FaultDisputeGame.sol index 6aad60e4283..02a2cee3ca8 100644 --- a/packages/contracts-bedrock/src/dispute/FaultDisputeGame.sol +++ b/packages/contracts-bedrock/src/dispute/FaultDisputeGame.sol @@ -55,10 +55,10 @@ import { } from "src/dispute/lib/Errors.sol"; // Interfaces -import { ISemver } from "src/universal/interfaces/ISemver.sol"; -import { IDelayedWETH } from "src/dispute/interfaces/IDelayedWETH.sol"; -import { IBigStepper, IPreimageOracle } from "src/dispute/interfaces/IBigStepper.sol"; -import { IAnchorStateRegistry } from "src/dispute/interfaces/IAnchorStateRegistry.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; +import { IDelayedWETH } from "interfaces/dispute/IDelayedWETH.sol"; +import { IBigStepper, IPreimageOracle } from "interfaces/dispute/IBigStepper.sol"; +import { IAnchorStateRegistry } from "interfaces/dispute/IAnchorStateRegistry.sol"; /// @title FaultDisputeGame /// @notice An implementation of the `IFaultDisputeGame` interface. @@ -161,8 +161,8 @@ contract FaultDisputeGame is Clone, ISemver { uint256 internal constant HEADER_BLOCK_NUMBER_INDEX = 8; /// @notice Semantic version. - /// @custom:semver 1.3.1-beta.8 - string public constant version = "1.3.1-beta.8"; + /// @custom:semver 1.3.1-beta.9 + string public constant version = "1.3.1-beta.9"; /// @notice The starting timestamp of the game Timestamp public createdAt; diff --git a/packages/contracts-bedrock/src/governance/MintManager.sol b/packages/contracts-bedrock/src/governance/MintManager.sol index 43ef618ba05..0f58e391c51 100644 --- a/packages/contracts-bedrock/src/governance/MintManager.sol +++ b/packages/contracts-bedrock/src/governance/MintManager.sol @@ -5,7 +5,7 @@ pragma solidity 0.8.15; import { Ownable } from "@openzeppelin/contracts/access/Ownable.sol"; // Interfaces -import { IGovernanceToken } from "src/governance/interfaces/IGovernanceToken.sol"; +import { IGovernanceToken } from "interfaces/governance/IGovernanceToken.sol"; /// @title MintManager /// @notice Set as `owner` of the governance token and responsible for the token inflation diff --git a/packages/contracts-bedrock/src/legacy/DeployerWhitelist.sol b/packages/contracts-bedrock/src/legacy/DeployerWhitelist.sol index e247f65ffb9..05a7798aeca 100644 --- a/packages/contracts-bedrock/src/legacy/DeployerWhitelist.sol +++ b/packages/contracts-bedrock/src/legacy/DeployerWhitelist.sol @@ -2,7 +2,7 @@ pragma solidity 0.8.15; // Interfaces -import { ISemver } from "src/universal/interfaces/ISemver.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; /// @custom:legacy true /// @custom:proxied true @@ -42,8 +42,8 @@ contract DeployerWhitelist is ISemver { } /// @notice Semantic version. - /// @custom:semver 1.1.1-beta.2 - string public constant version = "1.1.1-beta.2"; + /// @custom:semver 1.1.1-beta.3 + string public constant version = "1.1.1-beta.3"; /// @notice Adds or removes an address from the deployment whitelist. /// @param _deployer Address to update permissions for. diff --git a/packages/contracts-bedrock/src/legacy/L1BlockNumber.sol b/packages/contracts-bedrock/src/legacy/L1BlockNumber.sol index 19a595a3fad..6e0e33fa948 100644 --- a/packages/contracts-bedrock/src/legacy/L1BlockNumber.sol +++ b/packages/contracts-bedrock/src/legacy/L1BlockNumber.sol @@ -5,8 +5,8 @@ pragma solidity 0.8.15; import { Predeploys } from "src/libraries/Predeploys.sol"; // Interfaces -import { ISemver } from "src/universal/interfaces/ISemver.sol"; -import { IL1Block } from "src/L2/interfaces/IL1Block.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; +import { IL1Block } from "interfaces/L2/IL1Block.sol"; /// @custom:legacy true /// @custom:proxied true @@ -18,8 +18,8 @@ import { IL1Block } from "src/L2/interfaces/IL1Block.sol"; /// contract instead. contract L1BlockNumber is ISemver { /// @notice Semantic version. - /// @custom:semver 1.1.1-beta.2 - string public constant version = "1.1.1-beta.2"; + /// @custom:semver 1.1.1-beta.3 + string public constant version = "1.1.1-beta.3"; /// @notice Returns the L1 block number. receive() external payable { diff --git a/packages/contracts-bedrock/src/legacy/L1ChugSplashProxy.sol b/packages/contracts-bedrock/src/legacy/L1ChugSplashProxy.sol index b6a5363e2fb..a2a62707c6e 100644 --- a/packages/contracts-bedrock/src/legacy/L1ChugSplashProxy.sol +++ b/packages/contracts-bedrock/src/legacy/L1ChugSplashProxy.sol @@ -5,7 +5,7 @@ pragma solidity 0.8.15; import { Constants } from "src/libraries/Constants.sol"; // Interfaces -import { IL1ChugSplashDeployer } from "src/legacy/interfaces/IL1ChugSplashProxy.sol"; +import { IL1ChugSplashDeployer } from "interfaces/legacy/IL1ChugSplashProxy.sol"; /// @custom:legacy true /// @title L1ChugSplashProxy diff --git a/packages/contracts-bedrock/src/legacy/LegacyMessagePasser.sol b/packages/contracts-bedrock/src/legacy/LegacyMessagePasser.sol index d215f29bdc7..bdc81ad2839 100644 --- a/packages/contracts-bedrock/src/legacy/LegacyMessagePasser.sol +++ b/packages/contracts-bedrock/src/legacy/LegacyMessagePasser.sol @@ -2,7 +2,7 @@ pragma solidity 0.8.15; // Interfaces -import { ISemver } from "src/universal/interfaces/ISemver.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; /// @custom:legacy true /// @custom:proxied true @@ -15,8 +15,8 @@ contract LegacyMessagePasser is ISemver { mapping(bytes32 => bool) public sentMessages; /// @notice Semantic version. - /// @custom:semver 1.1.1-beta.2 - string public constant version = "1.1.1-beta.2"; + /// @custom:semver 1.1.1-beta.3 + string public constant version = "1.1.1-beta.3"; /// @notice Passes a message to L1. /// @param _message Message to pass to L1. diff --git a/packages/contracts-bedrock/src/libraries/Constants.sol b/packages/contracts-bedrock/src/libraries/Constants.sol index 6a99410b5bc..6dcf3611956 100644 --- a/packages/contracts-bedrock/src/libraries/Constants.sol +++ b/packages/contracts-bedrock/src/libraries/Constants.sol @@ -2,7 +2,7 @@ pragma solidity ^0.8.0; // Interfaces -import { IResourceMetering } from "src/L1/interfaces/IResourceMetering.sol"; +import { IResourceMetering } from "interfaces/L1/IResourceMetering.sol"; /// @title Constants /// @notice Constants is a library for storing constants. Simple! Don't put everything in here, just diff --git a/packages/contracts-bedrock/src/periphery/drippie/dripchecks/CheckGelatoLow.sol b/packages/contracts-bedrock/src/periphery/drippie/dripchecks/CheckGelatoLow.sol index 7324e451c7d..a3c79b3fc36 100644 --- a/packages/contracts-bedrock/src/periphery/drippie/dripchecks/CheckGelatoLow.sol +++ b/packages/contracts-bedrock/src/periphery/drippie/dripchecks/CheckGelatoLow.sol @@ -2,7 +2,7 @@ pragma solidity 0.8.15; // Interfaces -import { IGelatoTreasury } from "src/vendor/interfaces/IGelatoTreasury.sol"; +import { IGelatoTreasury } from "interfaces/vendor/IGelatoTreasury.sol"; import { IDripCheck } from "src/periphery/drippie/IDripCheck.sol"; /// @title CheckGelatoLow diff --git a/packages/contracts-bedrock/src/safe/DeputyGuardianModule.sol b/packages/contracts-bedrock/src/safe/DeputyGuardianModule.sol index 673cf2a1efb..a742c452ef0 100644 --- a/packages/contracts-bedrock/src/safe/DeputyGuardianModule.sol +++ b/packages/contracts-bedrock/src/safe/DeputyGuardianModule.sol @@ -10,12 +10,12 @@ import { Unauthorized } from "src/libraries/PortalErrors.sol"; import { GameType, Timestamp } from "src/dispute/lib/Types.sol"; // Interfaces -import { IAnchorStateRegistry } from "src/dispute/interfaces/IAnchorStateRegistry.sol"; -import { IFaultDisputeGame } from "src/dispute/interfaces/IFaultDisputeGame.sol"; -import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; -import { IOptimismPortal2 } from "src/L1/interfaces/IOptimismPortal2.sol"; -import { IDisputeGame } from "src/dispute/interfaces/IDisputeGame.sol"; -import { ISemver } from "src/universal/interfaces/ISemver.sol"; +import { IAnchorStateRegistry } from "interfaces/dispute/IAnchorStateRegistry.sol"; +import { IFaultDisputeGame } from "interfaces/dispute/IFaultDisputeGame.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; +import { IOptimismPortal2 } from "interfaces/L1/IOptimismPortal2.sol"; +import { IDisputeGame } from "interfaces/dispute/IDisputeGame.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; /// @title DeputyGuardianModule /// @notice This module is intended to be enabled on the Security Council Safe, which will own the Guardian role in the @@ -48,8 +48,8 @@ contract DeputyGuardianModule is ISemver { address internal immutable DEPUTY_GUARDIAN; /// @notice Semantic version. - /// @custom:semver 2.0.1-beta.4 - string public constant version = "2.0.1-beta.4"; + /// @custom:semver 2.0.1-beta.5 + string public constant version = "2.0.1-beta.5"; // Constructor to initialize the Safe and baseModule instances constructor(Safe _safe, ISuperchainConfig _superchainConfig, address _deputyGuardian) { diff --git a/packages/contracts-bedrock/src/safe/LivenessGuard.sol b/packages/contracts-bedrock/src/safe/LivenessGuard.sol index f5966a9e146..46c0072f7ba 100644 --- a/packages/contracts-bedrock/src/safe/LivenessGuard.sol +++ b/packages/contracts-bedrock/src/safe/LivenessGuard.sol @@ -11,7 +11,7 @@ import { EnumerableSet } from "@openzeppelin/contracts/utils/structs/EnumerableS import { SafeSigners } from "src/safe/SafeSigners.sol"; // Interfaces -import { ISemver } from "src/universal/interfaces/ISemver.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; /// @title LivenessGuard /// @notice This Guard contract is used to track the liveness of Safe owners. @@ -30,8 +30,8 @@ contract LivenessGuard is ISemver, BaseGuard { event OwnerRecorded(address owner); /// @notice Semantic version. - /// @custom:semver 1.0.1-beta.3 - string public constant version = "1.0.1-beta.3"; + /// @custom:semver 1.0.1-beta.4 + string public constant version = "1.0.1-beta.4"; /// @notice The safe account for which this contract will be the guard. Safe internal immutable SAFE; diff --git a/packages/contracts-bedrock/src/safe/LivenessModule.sol b/packages/contracts-bedrock/src/safe/LivenessModule.sol index 50ab03d450c..a033507176c 100644 --- a/packages/contracts-bedrock/src/safe/LivenessModule.sol +++ b/packages/contracts-bedrock/src/safe/LivenessModule.sol @@ -10,7 +10,7 @@ import { OwnerManager } from "safe-contracts/base/OwnerManager.sol"; import { LivenessGuard } from "src/safe/LivenessGuard.sol"; // Interfaces -import { ISemver } from "src/universal/interfaces/ISemver.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; /// @title LivenessModule /// @notice This module is intended to be used in conjunction with the LivenessGuard. In the event @@ -58,8 +58,8 @@ contract LivenessModule is ISemver { uint256 internal constant GUARD_STORAGE_SLOT = 0x4a204f620c8c5ccdca3fd54d003badd85ba500436a431f0cbda4f558c93c34c8; /// @notice Semantic version. - /// @custom:semver 1.2.1-beta.2 - string public constant version = "1.2.1-beta.2"; + /// @custom:semver 1.2.1-beta.3 + string public constant version = "1.2.1-beta.3"; // Constructor to initialize the Safe and baseModule instances constructor( diff --git a/packages/contracts-bedrock/src/universal/ERC721Bridge.sol b/packages/contracts-bedrock/src/universal/ERC721Bridge.sol index 3c6d7fe816f..c989da56c7b 100644 --- a/packages/contracts-bedrock/src/universal/ERC721Bridge.sol +++ b/packages/contracts-bedrock/src/universal/ERC721Bridge.sol @@ -8,7 +8,7 @@ import { Initializable } from "@openzeppelin/contracts/proxy/utils/Initializable import { Address } from "@openzeppelin/contracts/utils/Address.sol"; // Interfaces -import { ICrossDomainMessenger } from "src/universal/interfaces/ICrossDomainMessenger.sol"; +import { ICrossDomainMessenger } from "interfaces/universal/ICrossDomainMessenger.sol"; /// @title ERC721Bridge /// @notice ERC721Bridge is a base contract for the L1 and L2 ERC721 bridges. diff --git a/packages/contracts-bedrock/src/universal/OptimismMintableERC20.sol b/packages/contracts-bedrock/src/universal/OptimismMintableERC20.sol index 0e25cd33938..62aaf45b2fc 100644 --- a/packages/contracts-bedrock/src/universal/OptimismMintableERC20.sol +++ b/packages/contracts-bedrock/src/universal/OptimismMintableERC20.sol @@ -10,9 +10,9 @@ import { Preinstalls } from "src/libraries/Preinstalls.sol"; // Interfaces import { IERC165 } from "@openzeppelin/contracts/utils/introspection/IERC165.sol"; -import { ISemver } from "src/universal/interfaces/ISemver.sol"; -import { IOptimismMintableERC20 } from "src/universal/interfaces/IOptimismMintableERC20.sol"; -import { ILegacyMintableERC20 } from "src/universal/interfaces/ILegacyMintableERC20.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; +import { IOptimismMintableERC20 } from "interfaces/universal/IOptimismMintableERC20.sol"; +import { ILegacyMintableERC20 } from "interfaces/universal/ILegacyMintableERC20.sol"; /// @title OptimismMintableERC20 /// @notice OptimismMintableERC20 is a standard extension of the base ERC20 token contract designed @@ -47,8 +47,8 @@ contract OptimismMintableERC20 is ERC20Permit, ISemver { } /// @notice Semantic version. - /// @custom:semver 1.4.0-beta.3 - string public constant version = "1.4.0-beta.3"; + /// @custom:semver 1.4.0-beta.4 + string public constant version = "1.4.0-beta.4"; /// @notice Getter function for the permit2 address. It deterministically deployed /// so it will always be at the same address. It is also included as a preinstall, diff --git a/packages/contracts-bedrock/src/universal/OptimismMintableERC20Factory.sol b/packages/contracts-bedrock/src/universal/OptimismMintableERC20Factory.sol index 4f4b01602cc..62d2de8e7f3 100644 --- a/packages/contracts-bedrock/src/universal/OptimismMintableERC20Factory.sol +++ b/packages/contracts-bedrock/src/universal/OptimismMintableERC20Factory.sol @@ -6,8 +6,8 @@ import { Initializable } from "@openzeppelin/contracts/proxy/utils/Initializable import { OptimismMintableERC20 } from "src/universal/OptimismMintableERC20.sol"; // Interfaces -import { ISemver } from "src/universal/interfaces/ISemver.sol"; -import { IOptimismERC20Factory } from "src/L2/interfaces/IOptimismERC20Factory.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; +import { IOptimismERC20Factory } from "interfaces/L2/IOptimismERC20Factory.sol"; /// @custom:proxied true /// @custom:predeployed 0x4200000000000000000000000000000000000012 @@ -51,8 +51,8 @@ contract OptimismMintableERC20Factory is ISemver, Initializable, IOptimismERC20F /// the OptimismMintableERC20 token contract since this contract /// is responsible for deploying OptimismMintableERC20 contracts. /// @notice Semantic version. - /// @custom:semver 1.10.1-beta.5 - string public constant version = "1.10.1-beta.5"; + /// @custom:semver 1.10.1-beta.6 + string public constant version = "1.10.1-beta.6"; /// @notice Constructs the OptimismMintableERC20Factory contract. constructor() { diff --git a/packages/contracts-bedrock/src/universal/OptimismMintableERC721.sol b/packages/contracts-bedrock/src/universal/OptimismMintableERC721.sol index 140c6ce5e6d..63d4eb29a14 100644 --- a/packages/contracts-bedrock/src/universal/OptimismMintableERC721.sol +++ b/packages/contracts-bedrock/src/universal/OptimismMintableERC721.sol @@ -9,8 +9,8 @@ import { ERC721 } from "@openzeppelin/contracts/token/ERC721/ERC721.sol"; import { Strings } from "@openzeppelin/contracts/utils/Strings.sol"; // Interfaces -import { ISemver } from "src/universal/interfaces/ISemver.sol"; -import { IOptimismMintableERC721 } from "src/universal/interfaces/IOptimismMintableERC721.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; +import { IOptimismMintableERC721 } from "interfaces/universal/IOptimismMintableERC721.sol"; /// @title OptimismMintableERC721 /// @notice This contract is the remote representation for some token that lives on another network, @@ -46,8 +46,8 @@ contract OptimismMintableERC721 is ERC721Enumerable, ISemver { } /// @notice Semantic version. - /// @custom:semver 1.3.1-beta.4 - string public constant version = "1.3.1-beta.4"; + /// @custom:semver 1.3.1-beta.5 + string public constant version = "1.3.1-beta.5"; /// @param _bridge Address of the bridge on this network. /// @param _remoteChainId Chain ID where the remote token is deployed. diff --git a/packages/contracts-bedrock/src/universal/OptimismMintableERC721Factory.sol b/packages/contracts-bedrock/src/universal/OptimismMintableERC721Factory.sol index da05e8f4f34..aa137378fcc 100644 --- a/packages/contracts-bedrock/src/universal/OptimismMintableERC721Factory.sol +++ b/packages/contracts-bedrock/src/universal/OptimismMintableERC721Factory.sol @@ -5,7 +5,7 @@ pragma solidity 0.8.15; import { OptimismMintableERC721 } from "src/universal/OptimismMintableERC721.sol"; // Interfaces -import { ISemver } from "src/universal/interfaces/ISemver.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; /// @title OptimismMintableERC721Factory /// @notice Factory contract for creating OptimismMintableERC721 contracts. @@ -28,8 +28,8 @@ contract OptimismMintableERC721Factory is ISemver { event OptimismMintableERC721Created(address indexed localToken, address indexed remoteToken, address deployer); /// @notice Semantic version. - /// @custom:semver 1.4.1-beta.5 - string public constant version = "1.4.1-beta.5"; + /// @custom:semver 1.4.1-beta.6 + string public constant version = "1.4.1-beta.6"; /// @notice The semver MUST be bumped any time that there is a change in /// the OptimismMintableERC721 token contract since this contract diff --git a/packages/contracts-bedrock/src/universal/ProxyAdmin.sol b/packages/contracts-bedrock/src/universal/ProxyAdmin.sol index dec119398c0..9e7cd908242 100644 --- a/packages/contracts-bedrock/src/universal/ProxyAdmin.sol +++ b/packages/contracts-bedrock/src/universal/ProxyAdmin.sol @@ -8,11 +8,11 @@ import { Ownable } from "@openzeppelin/contracts/access/Ownable.sol"; import { Constants } from "src/libraries/Constants.sol"; // Interfaces -import { IAddressManager } from "src/legacy/interfaces/IAddressManager.sol"; -import { IL1ChugSplashProxy } from "src/legacy/interfaces/IL1ChugSplashProxy.sol"; -import { IStaticL1ChugSplashProxy } from "src/legacy/interfaces/IL1ChugSplashProxy.sol"; -import { IStaticERC1967Proxy } from "src/universal/interfaces/IStaticERC1967Proxy.sol"; -import { IProxy } from "src/universal/interfaces/IProxy.sol"; +import { IAddressManager } from "interfaces/legacy/IAddressManager.sol"; +import { IL1ChugSplashProxy } from "interfaces/legacy/IL1ChugSplashProxy.sol"; +import { IStaticL1ChugSplashProxy } from "interfaces/legacy/IL1ChugSplashProxy.sol"; +import { IStaticERC1967Proxy } from "interfaces/universal/IStaticERC1967Proxy.sol"; +import { IProxy } from "interfaces/universal/IProxy.sol"; /// @title ProxyAdmin /// @notice This is an auxiliary contract meant to be assigned as the admin of an ERC1967 Proxy, diff --git a/packages/contracts-bedrock/src/universal/StandardBridge.sol b/packages/contracts-bedrock/src/universal/StandardBridge.sol index dceb2fd4147..ff01560ad1a 100644 --- a/packages/contracts-bedrock/src/universal/StandardBridge.sol +++ b/packages/contracts-bedrock/src/universal/StandardBridge.sol @@ -13,9 +13,9 @@ import { Constants } from "src/libraries/Constants.sol"; // Interfaces import { IERC20 } from "@openzeppelin/contracts/token/ERC20/IERC20.sol"; -import { IOptimismMintableERC20 } from "src/universal/interfaces/IOptimismMintableERC20.sol"; -import { ILegacyMintableERC20 } from "src/universal/interfaces/ILegacyMintableERC20.sol"; -import { ICrossDomainMessenger } from "src/universal/interfaces/ICrossDomainMessenger.sol"; +import { IOptimismMintableERC20 } from "interfaces/universal/IOptimismMintableERC20.sol"; +import { ILegacyMintableERC20 } from "interfaces/universal/ILegacyMintableERC20.sol"; +import { ICrossDomainMessenger } from "interfaces/universal/ICrossDomainMessenger.sol"; /// @custom:upgradeable /// @title StandardBridge diff --git a/packages/contracts-bedrock/src/universal/StorageSetter.sol b/packages/contracts-bedrock/src/universal/StorageSetter.sol index e6e4b5df545..9656ca21c5d 100644 --- a/packages/contracts-bedrock/src/universal/StorageSetter.sol +++ b/packages/contracts-bedrock/src/universal/StorageSetter.sol @@ -5,7 +5,7 @@ pragma solidity 0.8.15; import { Storage } from "src/libraries/Storage.sol"; // Interfaces -import { ISemver } from "src/universal/interfaces/ISemver.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; /// @title StorageSetter /// @notice A simple contract that allows setting arbitrary storage slots. @@ -19,8 +19,8 @@ contract StorageSetter is ISemver { } /// @notice Semantic version. - /// @custom:semver 1.2.1-beta.3 - string public constant version = "1.2.1-beta.3"; + /// @custom:semver 1.2.1-beta.4 + string public constant version = "1.2.1-beta.4"; /// @notice Stores a bytes32 `_value` at `_slot`. Any storage slots that /// are packed should be set through this interface. diff --git a/packages/contracts-bedrock/src/vendor/asterisc/RISCV.sol b/packages/contracts-bedrock/src/vendor/asterisc/RISCV.sol index 10a29679a25..4a9304935f5 100644 --- a/packages/contracts-bedrock/src/vendor/asterisc/RISCV.sol +++ b/packages/contracts-bedrock/src/vendor/asterisc/RISCV.sol @@ -1,8 +1,8 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.25; -import { IPreimageOracle } from "src/cannon/interfaces/IPreimageOracle.sol"; -import { IBigStepper } from "src/dispute/interfaces/IBigStepper.sol"; +import { IPreimageOracle } from "interfaces/cannon/IPreimageOracle.sol"; +import { IBigStepper } from "interfaces/dispute/IBigStepper.sol"; /// @title RISCV /// @notice The RISCV contract emulates a single RISCV hart cycle statelessly, using memory proofs to verify the diff --git a/packages/contracts-bedrock/src/vendor/eas/EAS.sol b/packages/contracts-bedrock/src/vendor/eas/EAS.sol index 1cebdc81934..dd8840789d5 100644 --- a/packages/contracts-bedrock/src/vendor/eas/EAS.sol +++ b/packages/contracts-bedrock/src/vendor/eas/EAS.sol @@ -2,7 +2,7 @@ pragma solidity 0.8.19; import { Address } from "@openzeppelin/contracts/utils/Address.sol"; -import { ISemver } from "src/universal/interfaces/ISemver.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; import { Predeploys } from "src/libraries/Predeploys.sol"; import { EIP1271Verifier } from "src/vendor/eas/eip1271/EIP1271Verifier.sol"; import { ISchemaResolver } from "src/vendor/eas/resolver/ISchemaResolver.sol"; @@ -80,8 +80,8 @@ contract EAS is IEAS, ISemver, EIP1271Verifier { uint256[MAX_GAP - 3] private __gap; /// @notice Semantic version. - /// @custom:semver 1.4.1-beta.1 - string public constant version = "1.4.1-beta.1"; + /// @custom:semver 1.4.1-beta.2 + string public constant version = "1.4.1-beta.2"; /// @dev Creates a new EAS instance. constructor() EIP1271Verifier("EAS", "1.3.0") { } diff --git a/packages/contracts-bedrock/src/vendor/eas/SchemaRegistry.sol b/packages/contracts-bedrock/src/vendor/eas/SchemaRegistry.sol index 1adca3d6c3e..98f87c35b53 100644 --- a/packages/contracts-bedrock/src/vendor/eas/SchemaRegistry.sol +++ b/packages/contracts-bedrock/src/vendor/eas/SchemaRegistry.sol @@ -1,7 +1,7 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.19; -import { ISemver } from "src/universal/interfaces/ISemver.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; import { ISchemaResolver } from "src/vendor/eas/resolver/ISchemaResolver.sol"; import { EMPTY_UID, MAX_GAP } from "src/vendor/eas/Common.sol"; import { ISchemaRegistry, SchemaRecord } from "src/vendor/eas/ISchemaRegistry.sol"; @@ -20,8 +20,8 @@ contract SchemaRegistry is ISchemaRegistry, ISemver { uint256[MAX_GAP - 1] private __gap; /// @notice Semantic version. - /// @custom:semver 1.3.1-beta.1 - string public constant version = "1.3.1-beta.1"; + /// @custom:semver 1.3.1-beta.2 + string public constant version = "1.3.1-beta.2"; /// @inheritdoc ISchemaRegistry function register(string calldata schema, ISchemaResolver resolver, bool revocable) external returns (bytes32) { diff --git a/packages/contracts-bedrock/test/L1/DataAvailabilityChallenge.t.sol b/packages/contracts-bedrock/test/L1/DataAvailabilityChallenge.t.sol index 921330060b3..ab0ca82c61f 100644 --- a/packages/contracts-bedrock/test/L1/DataAvailabilityChallenge.t.sol +++ b/packages/contracts-bedrock/test/L1/DataAvailabilityChallenge.t.sol @@ -6,7 +6,7 @@ import { ChallengeStatus, Challenge, CommitmentType -} from "src/L1/interfaces/IDataAvailabilityChallenge.sol"; +} from "interfaces/L1/IDataAvailabilityChallenge.sol"; import { computeCommitmentKeccak256 } from "src/L1/DataAvailabilityChallenge.sol"; import { CommonTest } from "test/setup/CommonTest.sol"; import { Preinstalls } from "src/libraries/Preinstalls.sol"; diff --git a/packages/contracts-bedrock/test/L1/L1CrossDomainMessenger.t.sol b/packages/contracts-bedrock/test/L1/L1CrossDomainMessenger.t.sol index 22345d860e4..d7c5dc29b34 100644 --- a/packages/contracts-bedrock/test/L1/L1CrossDomainMessenger.t.sol +++ b/packages/contracts-bedrock/test/L1/L1CrossDomainMessenger.t.sol @@ -13,10 +13,10 @@ import { Hashing } from "src/libraries/Hashing.sol"; import { Encoding } from "src/libraries/Encoding.sol"; // Target contract dependencies -import { IL1CrossDomainMessenger } from "src/L1/interfaces/IL1CrossDomainMessenger.sol"; -import { IOptimismPortal } from "src/L1/interfaces/IOptimismPortal.sol"; -import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; -import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; +import { IL1CrossDomainMessenger } from "interfaces/L1/IL1CrossDomainMessenger.sol"; +import { IOptimismPortal } from "interfaces/L1/IOptimismPortal.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; +import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; contract L1CrossDomainMessenger_Test is CommonTest { /// @dev The receiver address diff --git a/packages/contracts-bedrock/test/L1/L1ERC721Bridge.t.sol b/packages/contracts-bedrock/test/L1/L1ERC721Bridge.t.sol index fd9d0d89b0f..88913a76ba3 100644 --- a/packages/contracts-bedrock/test/L1/L1ERC721Bridge.t.sol +++ b/packages/contracts-bedrock/test/L1/L1ERC721Bridge.t.sol @@ -11,10 +11,10 @@ import { ERC721 } from "@openzeppelin/contracts/token/ERC721/ERC721.sol"; import { Predeploys } from "src/libraries/Predeploys.sol"; // Interfaces -import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; -import { ICrossDomainMessenger } from "src/universal/interfaces/ICrossDomainMessenger.sol"; -import { IL1ERC721Bridge } from "src/L1/interfaces/IL1ERC721Bridge.sol"; -import { IL2ERC721Bridge } from "src/L2/interfaces/IL2ERC721Bridge.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; +import { ICrossDomainMessenger } from "interfaces/universal/ICrossDomainMessenger.sol"; +import { IL1ERC721Bridge } from "interfaces/L1/IL1ERC721Bridge.sol"; +import { IL2ERC721Bridge } from "interfaces/L2/IL2ERC721Bridge.sol"; /// @dev Test ERC721 contract. contract TestERC721 is ERC721 { diff --git a/packages/contracts-bedrock/test/L1/L1StandardBridge.t.sol b/packages/contracts-bedrock/test/L1/L1StandardBridge.t.sol index 97ef01262ab..4cc1b8f3599 100644 --- a/packages/contracts-bedrock/test/L1/L1StandardBridge.t.sol +++ b/packages/contracts-bedrock/test/L1/L1StandardBridge.t.sol @@ -14,10 +14,10 @@ import { Predeploys } from "src/libraries/Predeploys.sol"; import { AddressAliasHelper } from "src/vendor/AddressAliasHelper.sol"; // Interfaces -import { ICrossDomainMessenger } from "src/universal/interfaces/ICrossDomainMessenger.sol"; -import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; -import { IOptimismPortal } from "src/L1/interfaces/IOptimismPortal.sol"; -import { IL1StandardBridge } from "src/L1/interfaces/IL1StandardBridge.sol"; +import { ICrossDomainMessenger } from "interfaces/universal/ICrossDomainMessenger.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; +import { IOptimismPortal } from "interfaces/L1/IOptimismPortal.sol"; +import { IL1StandardBridge } from "interfaces/L1/IL1StandardBridge.sol"; contract L1StandardBridge_Getter_Test is CommonTest { /// @dev Test that the accessors return the correct initialized values. diff --git a/packages/contracts-bedrock/test/L1/L2OutputOracle.t.sol b/packages/contracts-bedrock/test/L1/L2OutputOracle.t.sol index 5719d3fa558..e4d53ddd9e7 100644 --- a/packages/contracts-bedrock/test/L1/L2OutputOracle.t.sol +++ b/packages/contracts-bedrock/test/L1/L2OutputOracle.t.sol @@ -15,7 +15,7 @@ import { Proxy } from "src/universal/Proxy.sol"; // Target contract import { L2OutputOracle } from "src/L1/L2OutputOracle.sol"; -import { IL2OutputOracle } from "src/L1/interfaces/IL2OutputOracle.sol"; +import { IL2OutputOracle } from "interfaces/L1/IL2OutputOracle.sol"; contract L2OutputOracle_TestBase is CommonTest { function setUp() public override { diff --git a/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol b/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol index e828b415621..418eebd9e64 100644 --- a/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol +++ b/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol @@ -7,8 +7,8 @@ import { DeployOPChainInput } from "scripts/deploy/DeployOPChain.s.sol"; import { DeployOPChain_TestBase } from "test/opcm/DeployOPChain.t.sol"; import { OPContractsManager } from "src/L1/OPContractsManager.sol"; -import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; -import { IProtocolVersions } from "src/L1/interfaces/IProtocolVersions.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; +import { IProtocolVersions } from "interfaces/L1/IProtocolVersions.sol"; // Exposes internal functions for testing. contract OPContractsManager_Harness is OPContractsManager { diff --git a/packages/contracts-bedrock/test/L1/OptimismPortal.t.sol b/packages/contracts-bedrock/test/L1/OptimismPortal.t.sol index a0a49227e71..b575fdacff4 100644 --- a/packages/contracts-bedrock/test/L1/OptimismPortal.t.sol +++ b/packages/contracts-bedrock/test/L1/OptimismPortal.t.sol @@ -22,11 +22,11 @@ import { AddressAliasHelper } from "src/vendor/AddressAliasHelper.sol"; import "src/libraries/PortalErrors.sol"; // Interfaces -import { IResourceMetering } from "src/L1/interfaces/IResourceMetering.sol"; -import { IL2OutputOracle } from "src/L1/interfaces/IL2OutputOracle.sol"; -import { IL1Block } from "src/L2/interfaces/IL1Block.sol"; -import { IOptimismPortal } from "src/L1/interfaces/IOptimismPortal.sol"; -import { IProxy } from "src/universal/interfaces/IProxy.sol"; +import { IResourceMetering } from "interfaces/L1/IResourceMetering.sol"; +import { IL2OutputOracle } from "interfaces/L1/IL2OutputOracle.sol"; +import { IL1Block } from "interfaces/L2/IL1Block.sol"; +import { IOptimismPortal } from "interfaces/L1/IOptimismPortal.sol"; +import { IProxy } from "interfaces/universal/IProxy.sol"; contract OptimismPortal_Test is CommonTest { address depositor; diff --git a/packages/contracts-bedrock/test/L1/OptimismPortal2.t.sol b/packages/contracts-bedrock/test/L1/OptimismPortal2.t.sol index 44c1c8f9fd1..830323936a6 100644 --- a/packages/contracts-bedrock/test/L1/OptimismPortal2.t.sol +++ b/packages/contracts-bedrock/test/L1/OptimismPortal2.t.sol @@ -23,12 +23,12 @@ import "src/dispute/lib/Types.sol"; import "src/libraries/PortalErrors.sol"; // Interfaces -import { IResourceMetering } from "src/L1/interfaces/IResourceMetering.sol"; -import { IL1Block } from "src/L2/interfaces/IL1Block.sol"; -import { IOptimismPortal2 } from "src/L1/interfaces/IOptimismPortal2.sol"; -import { IDisputeGame } from "src/dispute/interfaces/IDisputeGame.sol"; -import { IFaultDisputeGame } from "src/dispute/interfaces/IFaultDisputeGame.sol"; -import { IProxy } from "src/universal/interfaces/IProxy.sol"; +import { IResourceMetering } from "interfaces/L1/IResourceMetering.sol"; +import { IL1Block } from "interfaces/L2/IL1Block.sol"; +import { IOptimismPortal2 } from "interfaces/L1/IOptimismPortal2.sol"; +import { IDisputeGame } from "interfaces/dispute/IDisputeGame.sol"; +import { IFaultDisputeGame } from "interfaces/dispute/IFaultDisputeGame.sol"; +import { IProxy } from "interfaces/universal/IProxy.sol"; contract OptimismPortal2_Test is CommonTest { address depositor; diff --git a/packages/contracts-bedrock/test/L1/OptimismPortalInterop.t.sol b/packages/contracts-bedrock/test/L1/OptimismPortalInterop.t.sol index c39bf0ee0f7..0c08cab3e2d 100644 --- a/packages/contracts-bedrock/test/L1/OptimismPortalInterop.t.sol +++ b/packages/contracts-bedrock/test/L1/OptimismPortalInterop.t.sol @@ -10,8 +10,8 @@ import { Predeploys } from "src/libraries/Predeploys.sol"; import "src/libraries/PortalErrors.sol"; // Interfaces -import { IL1BlockInterop, ConfigType } from "src/L2/interfaces/IL1BlockInterop.sol"; -import { IOptimismPortalInterop } from "src/L1/interfaces/IOptimismPortalInterop.sol"; +import { IL1BlockInterop, ConfigType } from "interfaces/L2/IL1BlockInterop.sol"; +import { IOptimismPortalInterop } from "interfaces/L1/IOptimismPortalInterop.sol"; contract OptimismPortalInterop_Test is CommonTest { /// @notice Marked virtual to be overridden in diff --git a/packages/contracts-bedrock/test/L1/ProtocolVersions.t.sol b/packages/contracts-bedrock/test/L1/ProtocolVersions.t.sol index fc6ea447d23..28d9ef2b0b8 100644 --- a/packages/contracts-bedrock/test/L1/ProtocolVersions.t.sol +++ b/packages/contracts-bedrock/test/L1/ProtocolVersions.t.sol @@ -6,8 +6,8 @@ import { CommonTest } from "test/setup/CommonTest.sol"; import { EIP1967Helper } from "test/mocks/EIP1967Helper.sol"; // Interfaces -import { IProxy } from "src/universal/interfaces/IProxy.sol"; -import { IProtocolVersions, ProtocolVersion } from "src/L1/interfaces/IProtocolVersions.sol"; +import { IProxy } from "interfaces/universal/IProxy.sol"; +import { IProtocolVersions, ProtocolVersion } from "interfaces/L1/IProtocolVersions.sol"; contract ProtocolVersions_Init is CommonTest { event ConfigUpdate(uint256 indexed version, IProtocolVersions.UpdateType indexed updateType, bytes data); diff --git a/packages/contracts-bedrock/test/L1/ResourceMetering.t.sol b/packages/contracts-bedrock/test/L1/ResourceMetering.t.sol index 8b262b55f2a..d49aa2337bb 100644 --- a/packages/contracts-bedrock/test/L1/ResourceMetering.t.sol +++ b/packages/contracts-bedrock/test/L1/ResourceMetering.t.sol @@ -11,7 +11,7 @@ import { ResourceMetering } from "src/L1/ResourceMetering.sol"; import { Constants } from "src/libraries/Constants.sol"; // Interfaces -import { IResourceMetering } from "src/L1/interfaces/IResourceMetering.sol"; +import { IResourceMetering } from "interfaces/L1/IResourceMetering.sol"; contract MeterUser is ResourceMetering { ResourceMetering.ResourceConfig public innerConfig; diff --git a/packages/contracts-bedrock/test/L1/SuperchainConfig.t.sol b/packages/contracts-bedrock/test/L1/SuperchainConfig.t.sol index 2772ec0c2a3..ed51e019aca 100644 --- a/packages/contracts-bedrock/test/L1/SuperchainConfig.t.sol +++ b/packages/contracts-bedrock/test/L1/SuperchainConfig.t.sol @@ -4,10 +4,10 @@ pragma solidity 0.8.15; import { CommonTest } from "test/setup/CommonTest.sol"; // Target contract dependencies -import { IProxy } from "src/universal/interfaces/IProxy.sol"; +import { IProxy } from "interfaces/universal/IProxy.sol"; // Target contract -import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; diff --git a/packages/contracts-bedrock/test/L1/SystemConfig.t.sol b/packages/contracts-bedrock/test/L1/SystemConfig.t.sol index ae025c10df9..f7cea088bcf 100644 --- a/packages/contracts-bedrock/test/L1/SystemConfig.t.sol +++ b/packages/contracts-bedrock/test/L1/SystemConfig.t.sol @@ -13,9 +13,9 @@ import { Predeploys } from "src/libraries/Predeploys.sol"; import { GasPayingToken } from "src/libraries/GasPayingToken.sol"; // Interfaces -import { IResourceMetering } from "src/L1/interfaces/IResourceMetering.sol"; -import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; -import { IL1Block } from "src/L2/interfaces/IL1Block.sol"; +import { IResourceMetering } from "interfaces/L1/IResourceMetering.sol"; +import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; +import { IL1Block } from "interfaces/L2/IL1Block.sol"; contract SystemConfig_Init is CommonTest { event ConfigUpdate(uint256 indexed version, ISystemConfig.UpdateType indexed updateType, bytes data); diff --git a/packages/contracts-bedrock/test/L1/SystemConfigInterop.t.sol b/packages/contracts-bedrock/test/L1/SystemConfigInterop.t.sol index e66f2e4d1ec..8fd6daeed84 100644 --- a/packages/contracts-bedrock/test/L1/SystemConfigInterop.t.sol +++ b/packages/contracts-bedrock/test/L1/SystemConfigInterop.t.sol @@ -13,10 +13,10 @@ import { StaticConfig } from "src/libraries/StaticConfig.sol"; import { GasPayingToken } from "src/libraries/GasPayingToken.sol"; // Interfaces -import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; -import { ISystemConfigInterop } from "src/L1/interfaces/ISystemConfigInterop.sol"; -import { IOptimismPortalInterop } from "src/L1/interfaces/IOptimismPortalInterop.sol"; -import { ConfigType } from "src/L2/interfaces/IL1BlockInterop.sol"; +import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; +import { ISystemConfigInterop } from "interfaces/L1/ISystemConfigInterop.sol"; +import { IOptimismPortalInterop } from "interfaces/L1/IOptimismPortalInterop.sol"; +import { ConfigType } from "interfaces/L2/IL1BlockInterop.sol"; contract SystemConfigInterop_Test is CommonTest { /// @notice Marked virtual to be overridden in diff --git a/packages/contracts-bedrock/test/L2/CrossL2Inbox.t.sol b/packages/contracts-bedrock/test/L2/CrossL2Inbox.t.sol index 25b6e711c1e..100019034df 100644 --- a/packages/contracts-bedrock/test/L2/CrossL2Inbox.t.sol +++ b/packages/contracts-bedrock/test/L2/CrossL2Inbox.t.sol @@ -20,7 +20,7 @@ import { NotDepositor, InteropStartAlreadySet } from "src/L2/CrossL2Inbox.sol"; -import { IL1BlockInterop } from "src/L2/interfaces/IL1BlockInterop.sol"; +import { IL1BlockInterop } from "interfaces/L2/IL1BlockInterop.sol"; /// @title CrossL2InboxWithModifiableTransientStorage /// @dev CrossL2Inbox contract with methods to modify the transient storage. diff --git a/packages/contracts-bedrock/test/L2/L1BlockInterop.t.sol b/packages/contracts-bedrock/test/L2/L1BlockInterop.t.sol index 01d3e6108f9..a5e086c86d5 100644 --- a/packages/contracts-bedrock/test/L2/L1BlockInterop.t.sol +++ b/packages/contracts-bedrock/test/L2/L1BlockInterop.t.sol @@ -10,7 +10,7 @@ import { Predeploys } from "src/libraries/Predeploys.sol"; import "src/libraries/L1BlockErrors.sol"; // Interfaces -import { IL1BlockInterop, ConfigType } from "src/L2/interfaces/IL1BlockInterop.sol"; +import { IL1BlockInterop, ConfigType } from "interfaces/L2/IL1BlockInterop.sol"; contract L1BlockInteropTest is CommonTest { event GasPayingTokenSet(address indexed token, uint8 indexed decimals, bytes32 name, bytes32 symbol); diff --git a/packages/contracts-bedrock/test/L2/L2CrossDomainMessenger.t.sol b/packages/contracts-bedrock/test/L2/L2CrossDomainMessenger.t.sol index f5ef2d63559..33b8e0bfb88 100644 --- a/packages/contracts-bedrock/test/L2/L2CrossDomainMessenger.t.sol +++ b/packages/contracts-bedrock/test/L2/L2CrossDomainMessenger.t.sol @@ -14,8 +14,8 @@ import { Types } from "src/libraries/Types.sol"; import { AddressAliasHelper } from "src/vendor/AddressAliasHelper.sol"; // Interfaces -import { IL2CrossDomainMessenger } from "src/L2/interfaces/IL2CrossDomainMessenger.sol"; -import { IL2ToL1MessagePasser } from "src/L2/interfaces/IL2ToL1MessagePasser.sol"; +import { IL2CrossDomainMessenger } from "interfaces/L2/IL2CrossDomainMessenger.sol"; +import { IL2ToL1MessagePasser } from "interfaces/L2/IL2ToL1MessagePasser.sol"; contract L2CrossDomainMessenger_Test is CommonTest { /// @dev Receiver address for testing diff --git a/packages/contracts-bedrock/test/L2/L2ERC721Bridge.t.sol b/packages/contracts-bedrock/test/L2/L2ERC721Bridge.t.sol index b614711d101..1fbb2cae0b8 100644 --- a/packages/contracts-bedrock/test/L2/L2ERC721Bridge.t.sol +++ b/packages/contracts-bedrock/test/L2/L2ERC721Bridge.t.sol @@ -9,8 +9,8 @@ import { ERC721 } from "@openzeppelin/contracts/token/ERC721/ERC721.sol"; import { OptimismMintableERC721 } from "src/universal/OptimismMintableERC721.sol"; // Interfaces -import { IL1ERC721Bridge } from "src/L1/interfaces/IL1ERC721Bridge.sol"; -import { IL2ERC721Bridge } from "src/L2/interfaces/IL2ERC721Bridge.sol"; +import { IL1ERC721Bridge } from "interfaces/L1/IL1ERC721Bridge.sol"; +import { IL2ERC721Bridge } from "interfaces/L2/IL2ERC721Bridge.sol"; contract TestERC721 is ERC721 { constructor() ERC721("Test", "TST") { } diff --git a/packages/contracts-bedrock/test/L2/L2StandardBridge.t.sol b/packages/contracts-bedrock/test/L2/L2StandardBridge.t.sol index fb68ce09318..f7b61083e24 100644 --- a/packages/contracts-bedrock/test/L2/L2StandardBridge.t.sol +++ b/packages/contracts-bedrock/test/L2/L2StandardBridge.t.sol @@ -16,10 +16,10 @@ import { Hashing } from "src/libraries/Hashing.sol"; import { Types } from "src/libraries/Types.sol"; // Interfaces -import { ICrossDomainMessenger } from "src/universal/interfaces/ICrossDomainMessenger.sol"; -import { IStandardBridge } from "src/universal/interfaces/IStandardBridge.sol"; -import { IL2ToL1MessagePasser } from "src/L2/interfaces/IL2ToL1MessagePasser.sol"; -import { IL2StandardBridge } from "src/L2/interfaces/IL2StandardBridge.sol"; +import { ICrossDomainMessenger } from "interfaces/universal/ICrossDomainMessenger.sol"; +import { IStandardBridge } from "interfaces/universal/IStandardBridge.sol"; +import { IL2ToL1MessagePasser } from "interfaces/L2/IL2ToL1MessagePasser.sol"; +import { IL2StandardBridge } from "interfaces/L2/IL2StandardBridge.sol"; contract L2StandardBridge_Test is CommonTest { using stdStorage for StdStorage; diff --git a/packages/contracts-bedrock/test/L2/L2StandardBridgeInterop.t.sol b/packages/contracts-bedrock/test/L2/L2StandardBridgeInterop.t.sol index 30212d7ad62..a57c38a644d 100644 --- a/packages/contracts-bedrock/test/L2/L2StandardBridgeInterop.t.sol +++ b/packages/contracts-bedrock/test/L2/L2StandardBridgeInterop.t.sol @@ -5,13 +5,13 @@ pragma solidity 0.8.15; import { CommonTest } from "test/setup/CommonTest.sol"; // Interfaces -import { IMintableAndBurnableERC20 } from "src/L2/interfaces/IMintableAndBurnableERC20.sol"; -import { IL2StandardBridgeInterop } from "src/L2/interfaces/IL2StandardBridgeInterop.sol"; +import { IMintableAndBurnableERC20 } from "interfaces/L2/IMintableAndBurnableERC20.sol"; +import { IL2StandardBridgeInterop } from "interfaces/L2/IL2StandardBridgeInterop.sol"; import { IERC20Metadata } from "@openzeppelin/contracts/token/ERC20/extensions/IERC20Metadata.sol"; import { IERC165 } from "@openzeppelin/contracts/utils/introspection/IERC165.sol"; -import { IOptimismMintableERC20 } from "src/universal/interfaces/IOptimismMintableERC20.sol"; +import { IOptimismMintableERC20 } from "interfaces/universal/IOptimismMintableERC20.sol"; import { ILegacyMintableERC20 } from "src/universal/OptimismMintableERC20.sol"; -import { IOptimismERC20Factory } from "src/L2/interfaces/IOptimismERC20Factory.sol"; +import { IOptimismERC20Factory } from "interfaces/L2/IOptimismERC20Factory.sol"; contract L2StandardBridgeInterop_Test is CommonTest { /// @notice Emitted when a conversion is made. diff --git a/packages/contracts-bedrock/test/L2/L2ToL2CrossDomainMessenger.t.sol b/packages/contracts-bedrock/test/L2/L2ToL2CrossDomainMessenger.t.sol index fcfa375c1b5..3b431485369 100644 --- a/packages/contracts-bedrock/test/L2/L2ToL2CrossDomainMessenger.t.sol +++ b/packages/contracts-bedrock/test/L2/L2ToL2CrossDomainMessenger.t.sol @@ -27,7 +27,7 @@ import { } from "src/L2/L2ToL2CrossDomainMessenger.sol"; // Interfaces -import { ICrossL2Inbox, Identifier } from "src/L2/interfaces/ICrossL2Inbox.sol"; +import { ICrossL2Inbox, Identifier } from "interfaces/L2/ICrossL2Inbox.sol"; /// @title L2ToL2CrossDomainMessengerWithModifiableTransientStorage /// @dev L2ToL2CrossDomainMessenger contract with methods to modify the transient storage. diff --git a/packages/contracts-bedrock/test/L2/OptimismSuperchainERC20.t.sol b/packages/contracts-bedrock/test/L2/OptimismSuperchainERC20.t.sol index d5b999a922e..80ee2e62022 100644 --- a/packages/contracts-bedrock/test/L2/OptimismSuperchainERC20.t.sol +++ b/packages/contracts-bedrock/test/L2/OptimismSuperchainERC20.t.sol @@ -11,14 +11,14 @@ import { Predeploys } from "src/libraries/Predeploys.sol"; import { Initializable } from "@openzeppelin/contracts-v5/proxy/utils/Initializable.sol"; import { IERC165 } from "@openzeppelin/contracts-v5/utils/introspection/IERC165.sol"; import { IERC20 } from "@openzeppelin/contracts/token/ERC20/IERC20.sol"; -import { IERC7802 } from "src/L2/interfaces/IERC7802.sol"; +import { IERC7802 } from "interfaces/L2/IERC7802.sol"; import { IBeacon } from "@openzeppelin/contracts-v5/proxy/beacon/IBeacon.sol"; import { BeaconProxy } from "@openzeppelin/contracts-v5/proxy/beacon/BeaconProxy.sol"; import { Unauthorized } from "src/libraries/errors/CommonErrors.sol"; import { Preinstalls } from "src/libraries/Preinstalls.sol"; // Target contract -import { IOptimismSuperchainERC20 } from "src/L2/interfaces/IOptimismSuperchainERC20.sol"; +import { IOptimismSuperchainERC20 } from "interfaces/L2/IOptimismSuperchainERC20.sol"; import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; /// @title OptimismSuperchainERC20Test diff --git a/packages/contracts-bedrock/test/L2/OptimismSuperchainERC20Factory.t.sol b/packages/contracts-bedrock/test/L2/OptimismSuperchainERC20Factory.t.sol index d8d7f86f26a..a2f7125fc21 100644 --- a/packages/contracts-bedrock/test/L2/OptimismSuperchainERC20Factory.t.sol +++ b/packages/contracts-bedrock/test/L2/OptimismSuperchainERC20Factory.t.sol @@ -8,7 +8,7 @@ import { CommonTest } from "test/setup/CommonTest.sol"; import { CREATE3, Bytes32AddressLib } from "@rari-capital/solmate/src/utils/CREATE3.sol"; // Target contract -import { IOptimismSuperchainERC20 } from "src/L2/interfaces/IOptimismSuperchainERC20.sol"; +import { IOptimismSuperchainERC20 } from "interfaces/L2/IOptimismSuperchainERC20.sol"; import { IERC20Metadata } from "@openzeppelin/contracts/interfaces/IERC20Metadata.sol"; /// @title OptimismSuperchainERC20FactoryTest diff --git a/packages/contracts-bedrock/test/L2/Preinstalls.t.sol b/packages/contracts-bedrock/test/L2/Preinstalls.t.sol index 5eec3f811de..d0ead497834 100644 --- a/packages/contracts-bedrock/test/L2/Preinstalls.t.sol +++ b/packages/contracts-bedrock/test/L2/Preinstalls.t.sol @@ -4,7 +4,7 @@ pragma solidity 0.8.15; import { CommonTest } from "test/setup/CommonTest.sol"; import { Preinstalls } from "src/libraries/Preinstalls.sol"; import { Bytes } from "src/libraries/Bytes.sol"; -import { IEIP712 } from "src/universal/interfaces/IEIP712.sol"; +import { IEIP712 } from "interfaces/universal/IEIP712.sol"; /// @title PreinstallsTest contract PreinstallsTest is CommonTest { diff --git a/packages/contracts-bedrock/test/L2/SequencerFeeVault.t.sol b/packages/contracts-bedrock/test/L2/SequencerFeeVault.t.sol index ca8c3806b38..dc27fbbad9c 100644 --- a/packages/contracts-bedrock/test/L2/SequencerFeeVault.t.sol +++ b/packages/contracts-bedrock/test/L2/SequencerFeeVault.t.sol @@ -7,7 +7,7 @@ import { Reverter } from "test/mocks/Callers.sol"; import { EIP1967Helper } from "test/mocks/EIP1967Helper.sol"; // Contracts -import { ISequencerFeeVault } from "src/L2/interfaces/ISequencerFeeVault.sol"; +import { ISequencerFeeVault } from "interfaces/L2/ISequencerFeeVault.sol"; // Libraries import { Hashing } from "src/libraries/Hashing.sol"; diff --git a/packages/contracts-bedrock/test/L2/SuperchainERC20.t.sol b/packages/contracts-bedrock/test/L2/SuperchainERC20.t.sol index 2afb4ba03d1..788792a4def 100644 --- a/packages/contracts-bedrock/test/L2/SuperchainERC20.t.sol +++ b/packages/contracts-bedrock/test/L2/SuperchainERC20.t.sol @@ -9,9 +9,9 @@ import { Predeploys } from "src/libraries/Predeploys.sol"; // Target contract import { SuperchainERC20 } from "src/L2/SuperchainERC20.sol"; -import { IERC7802, IERC165 } from "src/L2/interfaces/IERC7802.sol"; +import { IERC7802, IERC165 } from "interfaces/L2/IERC7802.sol"; import { IERC20 } from "@openzeppelin/contracts/token/ERC20/IERC20.sol"; -import { ISuperchainERC20 } from "src/L2/interfaces/ISuperchainERC20.sol"; +import { ISuperchainERC20 } from "interfaces/L2/ISuperchainERC20.sol"; import { MockSuperchainERC20Implementation } from "test/mocks/SuperchainERC20Implementation.sol"; /// @title SuperchainERC20Test diff --git a/packages/contracts-bedrock/test/L2/SuperchainTokenBridge.t.sol b/packages/contracts-bedrock/test/L2/SuperchainTokenBridge.t.sol index 3c39e8b1792..2a63961ce41 100644 --- a/packages/contracts-bedrock/test/L2/SuperchainTokenBridge.t.sol +++ b/packages/contracts-bedrock/test/L2/SuperchainTokenBridge.t.sol @@ -6,14 +6,14 @@ import { CommonTest } from "test/setup/CommonTest.sol"; // Libraries import { Predeploys } from "src/libraries/Predeploys.sol"; -import { IL2ToL2CrossDomainMessenger } from "src/L2/interfaces/IL2ToL2CrossDomainMessenger.sol"; +import { IL2ToL2CrossDomainMessenger } from "interfaces/L2/IL2ToL2CrossDomainMessenger.sol"; // Target contract -import { ISuperchainTokenBridge } from "src/L2/interfaces/ISuperchainTokenBridge.sol"; -import { ISuperchainERC20 } from "src/L2/interfaces/ISuperchainERC20.sol"; -import { IOptimismSuperchainERC20Factory } from "src/L2/interfaces/IOptimismSuperchainERC20Factory.sol"; +import { ISuperchainTokenBridge } from "interfaces/L2/ISuperchainTokenBridge.sol"; +import { ISuperchainERC20 } from "interfaces/L2/ISuperchainERC20.sol"; +import { IOptimismSuperchainERC20Factory } from "interfaces/L2/IOptimismSuperchainERC20Factory.sol"; import { IERC20 } from "@openzeppelin/contracts/interfaces/IERC20.sol"; -import { IERC7802 } from "src/L2/interfaces/IERC7802.sol"; +import { IERC7802 } from "interfaces/L2/IERC7802.sol"; /// @title SuperchainTokenBridgeTest /// @notice Contract for testing the SuperchainTokenBridge contract. diff --git a/packages/contracts-bedrock/test/L2/SuperchainWETH.t.sol b/packages/contracts-bedrock/test/L2/SuperchainWETH.t.sol index 621acaa3aef..bc59c76c116 100644 --- a/packages/contracts-bedrock/test/L2/SuperchainWETH.t.sol +++ b/packages/contracts-bedrock/test/L2/SuperchainWETH.t.sol @@ -10,11 +10,11 @@ import { NotCustomGasToken, Unauthorized, ZeroAddress } from "src/libraries/erro import { Preinstalls } from "src/libraries/Preinstalls.sol"; // Interfaces -import { IETHLiquidity } from "src/L2/interfaces/IETHLiquidity.sol"; -import { ISuperchainWETH } from "src/L2/interfaces/ISuperchainWETH.sol"; -import { IERC7802, IERC165 } from "src/L2/interfaces/IERC7802.sol"; +import { IETHLiquidity } from "interfaces/L2/IETHLiquidity.sol"; +import { ISuperchainWETH } from "interfaces/L2/ISuperchainWETH.sol"; +import { IERC7802, IERC165 } from "interfaces/L2/IERC7802.sol"; import { IERC20 } from "@openzeppelin/contracts/token/ERC20/IERC20.sol"; -import { IL2ToL2CrossDomainMessenger } from "src/L2/interfaces/IL2ToL2CrossDomainMessenger.sol"; +import { IL2ToL2CrossDomainMessenger } from "interfaces/L2/IL2ToL2CrossDomainMessenger.sol"; /// @title SuperchainWETH_Test /// @notice Contract for testing the SuperchainWETH contract. diff --git a/packages/contracts-bedrock/test/actors/FaultDisputeActors.sol b/packages/contracts-bedrock/test/actors/FaultDisputeActors.sol index 20f18eddc3c..b2e7a1d64d1 100644 --- a/packages/contracts-bedrock/test/actors/FaultDisputeActors.sol +++ b/packages/contracts-bedrock/test/actors/FaultDisputeActors.sol @@ -8,7 +8,7 @@ import { CommonBase } from "forge-std/Base.sol"; import "src/dispute/lib/Types.sol"; // Interfaces -import { IFaultDisputeGame } from "src/dispute/interfaces/IFaultDisputeGame.sol"; +import { IFaultDisputeGame } from "interfaces/dispute/IFaultDisputeGame.sol"; /// @title GameSolver /// @notice The `GameSolver` contract is a contract that can produce an array of available diff --git a/packages/contracts-bedrock/test/cannon/MIPS.t.sol b/packages/contracts-bedrock/test/cannon/MIPS.t.sol index 62100f31cfb..aa983098121 100644 --- a/packages/contracts-bedrock/test/cannon/MIPS.t.sol +++ b/packages/contracts-bedrock/test/cannon/MIPS.t.sol @@ -14,8 +14,8 @@ import { InvalidExitedValue, InvalidMemoryProof } from "src/cannon/libraries/Can import "src/dispute/lib/Types.sol"; // Interfaces -import { IMIPS } from "src/cannon/interfaces/IMIPS.sol"; -import { IPreimageOracle } from "src/cannon/interfaces/IPreimageOracle.sol"; +import { IMIPS } from "interfaces/cannon/IMIPS.sol"; +import { IPreimageOracle } from "interfaces/cannon/IPreimageOracle.sol"; contract MIPS_Test is CommonTest { IMIPS internal mips; diff --git a/packages/contracts-bedrock/test/cannon/MIPS2.t.sol b/packages/contracts-bedrock/test/cannon/MIPS2.t.sol index d38ab89ff55..07c5883c17b 100644 --- a/packages/contracts-bedrock/test/cannon/MIPS2.t.sol +++ b/packages/contracts-bedrock/test/cannon/MIPS2.t.sol @@ -14,8 +14,8 @@ import { InvalidExitedValue, InvalidMemoryProof, InvalidSecondMemoryProof } from import "src/dispute/lib/Types.sol"; // Interfaces -import { IMIPS2 } from "src/cannon/interfaces/IMIPS2.sol"; -import { IPreimageOracle } from "src/cannon/interfaces/IPreimageOracle.sol"; +import { IMIPS2 } from "interfaces/cannon/IMIPS2.sol"; +import { IPreimageOracle } from "interfaces/cannon/IPreimageOracle.sol"; contract ThreadStack { bytes32 internal constant EMPTY_THREAD_ROOT = hex"ad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5"; diff --git a/packages/contracts-bedrock/test/cannon/PreimageOracle.t.sol b/packages/contracts-bedrock/test/cannon/PreimageOracle.t.sol index 6293f652e52..d6c7c520a5f 100644 --- a/packages/contracts-bedrock/test/cannon/PreimageOracle.t.sol +++ b/packages/contracts-bedrock/test/cannon/PreimageOracle.t.sol @@ -16,7 +16,7 @@ import "src/cannon/libraries/CannonErrors.sol"; import "src/cannon/libraries/CannonTypes.sol"; // Interfaces -import { IPreimageOracle } from "src/cannon/interfaces/IPreimageOracle.sol"; +import { IPreimageOracle } from "interfaces/cannon/IPreimageOracle.sol"; contract PreimageOracle_Test is Test { IPreimageOracle oracle; diff --git a/packages/contracts-bedrock/test/dispute/DisputeGameFactory.t.sol b/packages/contracts-bedrock/test/dispute/DisputeGameFactory.t.sol index b75809f1d79..f40a641994b 100644 --- a/packages/contracts-bedrock/test/dispute/DisputeGameFactory.t.sol +++ b/packages/contracts-bedrock/test/dispute/DisputeGameFactory.t.sol @@ -13,8 +13,8 @@ import "src/dispute/lib/Types.sol"; import "src/dispute/lib/Errors.sol"; // Interfaces -import { IDisputeGameFactory } from "src/dispute/interfaces/IDisputeGameFactory.sol"; -import { IDisputeGame } from "src/dispute/interfaces/IDisputeGame.sol"; +import { IDisputeGameFactory } from "interfaces/dispute/IDisputeGameFactory.sol"; +import { IDisputeGame } from "interfaces/dispute/IDisputeGame.sol"; contract DisputeGameFactory_Init is CommonTest { FakeClone fakeClone; diff --git a/packages/contracts-bedrock/test/dispute/FaultDisputeGame.t.sol b/packages/contracts-bedrock/test/dispute/FaultDisputeGame.t.sol index df820bcb3cc..50fde1fad4c 100644 --- a/packages/contracts-bedrock/test/dispute/FaultDisputeGame.t.sol +++ b/packages/contracts-bedrock/test/dispute/FaultDisputeGame.t.sol @@ -24,11 +24,11 @@ import "src/dispute/lib/Types.sol"; import "src/dispute/lib/Errors.sol"; // Interfaces -import { IDisputeGame } from "src/dispute/interfaces/IDisputeGame.sol"; -import { IPreimageOracle } from "src/dispute/interfaces/IBigStepper.sol"; -import { IAnchorStateRegistry } from "src/dispute/interfaces/IAnchorStateRegistry.sol"; -import { IFaultDisputeGame } from "src/dispute/interfaces/IFaultDisputeGame.sol"; -import { IDelayedWETH } from "src/dispute/interfaces/IDelayedWETH.sol"; +import { IDisputeGame } from "interfaces/dispute/IDisputeGame.sol"; +import { IPreimageOracle } from "interfaces/dispute/IBigStepper.sol"; +import { IAnchorStateRegistry } from "interfaces/dispute/IAnchorStateRegistry.sol"; +import { IFaultDisputeGame } from "interfaces/dispute/IFaultDisputeGame.sol"; +import { IDelayedWETH } from "interfaces/dispute/IDelayedWETH.sol"; contract FaultDisputeGame_Init is DisputeGameFactory_Init { /// @dev The type of the game being tested. diff --git a/packages/contracts-bedrock/test/dispute/PermissionedDisputeGame.t.sol b/packages/contracts-bedrock/test/dispute/PermissionedDisputeGame.t.sol index 20c45bf44de..8c74bee750a 100644 --- a/packages/contracts-bedrock/test/dispute/PermissionedDisputeGame.t.sol +++ b/packages/contracts-bedrock/test/dispute/PermissionedDisputeGame.t.sol @@ -14,10 +14,10 @@ import "src/dispute/lib/Types.sol"; import "src/dispute/lib/Errors.sol"; // Interfaces -import { IPreimageOracle } from "src/dispute/interfaces/IBigStepper.sol"; -import { IDelayedWETH } from "src/dispute/interfaces/IDelayedWETH.sol"; -import { IPermissionedDisputeGame } from "src/dispute/interfaces/IPermissionedDisputeGame.sol"; -import { IFaultDisputeGame } from "src/dispute/interfaces/IFaultDisputeGame.sol"; +import { IPreimageOracle } from "interfaces/dispute/IBigStepper.sol"; +import { IDelayedWETH } from "interfaces/dispute/IDelayedWETH.sol"; +import { IPermissionedDisputeGame } from "interfaces/dispute/IPermissionedDisputeGame.sol"; +import { IFaultDisputeGame } from "interfaces/dispute/IFaultDisputeGame.sol"; contract PermissionedDisputeGame_Init is DisputeGameFactory_Init { /// @dev The type of the game being tested. diff --git a/packages/contracts-bedrock/test/dispute/WETH98.t.sol b/packages/contracts-bedrock/test/dispute/WETH98.t.sol index b26cd927f12..f207248be19 100644 --- a/packages/contracts-bedrock/test/dispute/WETH98.t.sol +++ b/packages/contracts-bedrock/test/dispute/WETH98.t.sol @@ -5,7 +5,7 @@ pragma solidity 0.8.15; import { Test } from "forge-std/Test.sol"; // Contracts -import { IWETH98 } from "src/universal/interfaces/IWETH98.sol"; +import { IWETH98 } from "interfaces/universal/IWETH98.sol"; import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; contract WETH98_Test is Test { diff --git a/packages/contracts-bedrock/test/governance/MintManager.t.sol b/packages/contracts-bedrock/test/governance/MintManager.t.sol index c29e2560283..04255474d7a 100644 --- a/packages/contracts-bedrock/test/governance/MintManager.t.sol +++ b/packages/contracts-bedrock/test/governance/MintManager.t.sol @@ -5,8 +5,8 @@ pragma solidity 0.8.15; import { CommonTest } from "test/setup/CommonTest.sol"; // Interfaces -import { IGovernanceToken } from "src/governance/interfaces/IGovernanceToken.sol"; -import { IMintManager } from "src/governance/interfaces/IMintManager.sol"; +import { IGovernanceToken } from "interfaces/governance/IGovernanceToken.sol"; +import { IMintManager } from "interfaces/governance/IMintManager.sol"; import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; contract MintManager_Initializer is CommonTest { diff --git a/packages/contracts-bedrock/test/invariants/CrossDomainMessenger.t.sol b/packages/contracts-bedrock/test/invariants/CrossDomainMessenger.t.sol index 3bf4ad3f7b3..06df8d1ea07 100644 --- a/packages/contracts-bedrock/test/invariants/CrossDomainMessenger.t.sol +++ b/packages/contracts-bedrock/test/invariants/CrossDomainMessenger.t.sol @@ -3,8 +3,8 @@ pragma solidity 0.8.15; import { StdUtils } from "forge-std/StdUtils.sol"; import { Vm } from "forge-std/Vm.sol"; -import { IOptimismPortal } from "src/L1/interfaces/IOptimismPortal.sol"; -import { IL1CrossDomainMessenger } from "src/L1/interfaces/IL1CrossDomainMessenger.sol"; +import { IOptimismPortal } from "interfaces/L1/IOptimismPortal.sol"; +import { IL1CrossDomainMessenger } from "interfaces/L1/IL1CrossDomainMessenger.sol"; import { CommonTest } from "test/setup/CommonTest.sol"; import { Predeploys } from "src/libraries/Predeploys.sol"; import { Constants } from "src/libraries/Constants.sol"; diff --git a/packages/contracts-bedrock/test/invariants/ETHLiquidity.t.sol b/packages/contracts-bedrock/test/invariants/ETHLiquidity.t.sol index 8d1221a792c..4d674681073 100644 --- a/packages/contracts-bedrock/test/invariants/ETHLiquidity.t.sol +++ b/packages/contracts-bedrock/test/invariants/ETHLiquidity.t.sol @@ -10,7 +10,7 @@ import { CommonTest } from "test/setup/CommonTest.sol"; import { Predeploys } from "src/libraries/Predeploys.sol"; // Interfaces -import { IETHLiquidity } from "src/L2/interfaces/IETHLiquidity.sol"; +import { IETHLiquidity } from "interfaces/L2/IETHLiquidity.sol"; /// @title ETHLiquidity_User /// @notice Actor contract that interacts with the ETHLiquidity contract. Always pretends to be the diff --git a/packages/contracts-bedrock/test/invariants/FaultDisputeGame.t.sol b/packages/contracts-bedrock/test/invariants/FaultDisputeGame.t.sol index 94b46930ad4..705e21bd4cf 100644 --- a/packages/contracts-bedrock/test/invariants/FaultDisputeGame.t.sol +++ b/packages/contracts-bedrock/test/invariants/FaultDisputeGame.t.sol @@ -11,7 +11,7 @@ import "src/dispute/lib/Types.sol"; import "src/dispute/lib/Errors.sol"; // Interfaces -import { IFaultDisputeGame } from "src/dispute/interfaces/IFaultDisputeGame.sol"; +import { IFaultDisputeGame } from "interfaces/dispute/IFaultDisputeGame.sol"; contract FaultDisputeGame_Solvency_Invariant is FaultDisputeGame_Init { Claim internal constant ROOT_CLAIM = Claim.wrap(bytes32(uint256(10))); diff --git a/packages/contracts-bedrock/test/invariants/L2OutputOracle.t.sol b/packages/contracts-bedrock/test/invariants/L2OutputOracle.t.sol index d7d6914841c..fce298d2ceb 100644 --- a/packages/contracts-bedrock/test/invariants/L2OutputOracle.t.sol +++ b/packages/contracts-bedrock/test/invariants/L2OutputOracle.t.sol @@ -2,7 +2,7 @@ pragma solidity 0.8.15; import { CommonTest } from "test/setup/CommonTest.sol"; -import { IL2OutputOracle } from "src/L1/interfaces/IL2OutputOracle.sol"; +import { IL2OutputOracle } from "interfaces/L1/IL2OutputOracle.sol"; import { Vm } from "forge-std/Vm.sol"; contract L2OutputOracle_Proposer { diff --git a/packages/contracts-bedrock/test/invariants/OptimismPortal.t.sol b/packages/contracts-bedrock/test/invariants/OptimismPortal.t.sol index ae754cd0e96..42bf52e1de8 100644 --- a/packages/contracts-bedrock/test/invariants/OptimismPortal.t.sol +++ b/packages/contracts-bedrock/test/invariants/OptimismPortal.t.sol @@ -4,9 +4,9 @@ pragma solidity 0.8.15; import { StdUtils } from "forge-std/Test.sol"; import { Vm } from "forge-std/Vm.sol"; -import { IOptimismPortal } from "src/L1/interfaces/IOptimismPortal.sol"; +import { IOptimismPortal } from "interfaces/L1/IOptimismPortal.sol"; import { ResourceMetering } from "src/L1/ResourceMetering.sol"; -import { IResourceMetering } from "src/L1/interfaces/IResourceMetering.sol"; +import { IResourceMetering } from "interfaces/L1/IResourceMetering.sol"; import { Constants } from "src/libraries/Constants.sol"; import { CommonTest } from "test/setup/CommonTest.sol"; diff --git a/packages/contracts-bedrock/test/invariants/OptimismPortal2.t.sol b/packages/contracts-bedrock/test/invariants/OptimismPortal2.t.sol index 5e0e866dcfb..0a870bc651f 100644 --- a/packages/contracts-bedrock/test/invariants/OptimismPortal2.t.sol +++ b/packages/contracts-bedrock/test/invariants/OptimismPortal2.t.sol @@ -17,9 +17,9 @@ import "src/dispute/lib/Types.sol"; import "src/libraries/PortalErrors.sol"; // Interfaces -import { IOptimismPortal2 } from "src/L1/interfaces/IOptimismPortal2.sol"; -import { IResourceMetering } from "src/L1/interfaces/IResourceMetering.sol"; -import { IFaultDisputeGame } from "src/dispute/interfaces/IFaultDisputeGame.sol"; +import { IOptimismPortal2 } from "interfaces/L1/IOptimismPortal2.sol"; +import { IResourceMetering } from "interfaces/L1/IResourceMetering.sol"; +import { IFaultDisputeGame } from "interfaces/dispute/IFaultDisputeGame.sol"; contract OptimismPortal2_Depositor is StdUtils, ResourceMetering { Vm internal vm; diff --git a/packages/contracts-bedrock/test/invariants/ResourceMetering.t.sol b/packages/contracts-bedrock/test/invariants/ResourceMetering.t.sol index 4652f9b9e36..49793f2adf1 100644 --- a/packages/contracts-bedrock/test/invariants/ResourceMetering.t.sol +++ b/packages/contracts-bedrock/test/invariants/ResourceMetering.t.sol @@ -8,7 +8,7 @@ import { StdInvariant } from "forge-std/StdInvariant.sol"; import { Arithmetic } from "src/libraries/Arithmetic.sol"; import { ResourceMetering } from "src/L1/ResourceMetering.sol"; -import { IResourceMetering } from "src/L1/interfaces/IResourceMetering.sol"; +import { IResourceMetering } from "interfaces/L1/IResourceMetering.sol"; import { Constants } from "src/libraries/Constants.sol"; import { InvariantTest } from "test/invariants/InvariantTest.sol"; diff --git a/packages/contracts-bedrock/test/invariants/SuperchainWETH.t.sol b/packages/contracts-bedrock/test/invariants/SuperchainWETH.t.sol index 24ffc0a5796..bb6ee569da1 100644 --- a/packages/contracts-bedrock/test/invariants/SuperchainWETH.t.sol +++ b/packages/contracts-bedrock/test/invariants/SuperchainWETH.t.sol @@ -7,7 +7,7 @@ import { Vm } from "forge-std/Vm.sol"; import { CommonTest } from "test/setup/CommonTest.sol"; // Interfaces -import { ISuperchainWETH } from "src/L2/interfaces/ISuperchainWETH.sol"; +import { ISuperchainWETH } from "interfaces/L2/ISuperchainWETH.sol"; /// @title SuperchainWETH_User /// @notice Actor contract that interacts with the SuperchainWETH contract. diff --git a/packages/contracts-bedrock/test/invariants/SystemConfig.t.sol b/packages/contracts-bedrock/test/invariants/SystemConfig.t.sol index 5b0b300abda..68add058f60 100644 --- a/packages/contracts-bedrock/test/invariants/SystemConfig.t.sol +++ b/packages/contracts-bedrock/test/invariants/SystemConfig.t.sol @@ -2,8 +2,8 @@ pragma solidity 0.8.15; import { Test } from "forge-std/Test.sol"; -import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; -import { IProxy } from "src/universal/interfaces/IProxy.sol"; +import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; +import { IProxy } from "interfaces/universal/IProxy.sol"; import { Constants } from "src/libraries/Constants.sol"; import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; diff --git a/packages/contracts-bedrock/test/kontrol/README.md b/packages/contracts-bedrock/test/kontrol/README.md index 9621160976e..cf809eb65b4 100644 --- a/packages/contracts-bedrock/test/kontrol/README.md +++ b/packages/contracts-bedrock/test/kontrol/README.md @@ -121,8 +121,8 @@ Write your proof in a `.k.sol` file in the [`proofs`](./proofs/) folder, which i To reference the correct addresses for writing the tests, first import the signatures as in this example: ```solidity -import { IOptimismPortal as OptimismPortal } from "src/L1/interfaces/IOptimismPortal.sol"; -import { ISuperchainConfig as SuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; +import { IOptimismPortal as OptimismPortal } from "interfaces/L1/IOptimismPortal.sol"; +import { ISuperchainConfig as SuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; ``` Declare the correspondent variables and cast the correct signatures to the correct addresses: diff --git a/packages/contracts-bedrock/test/kontrol/proofs/L1CrossDomainMessenger.k.sol b/packages/contracts-bedrock/test/kontrol/proofs/L1CrossDomainMessenger.k.sol index a04defe5d9e..80adc430211 100644 --- a/packages/contracts-bedrock/test/kontrol/proofs/L1CrossDomainMessenger.k.sol +++ b/packages/contracts-bedrock/test/kontrol/proofs/L1CrossDomainMessenger.k.sol @@ -3,8 +3,8 @@ pragma solidity ^0.8.13; import { DeploymentSummaryFaultProofs } from "./utils/DeploymentSummaryFaultProofs.sol"; import { KontrolUtils } from "./utils/KontrolUtils.sol"; -import { IL1CrossDomainMessenger as L1CrossDomainMessenger } from "src/L1/interfaces/IL1CrossDomainMessenger.sol"; -import { ISuperchainConfig as SuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; +import { IL1CrossDomainMessenger as L1CrossDomainMessenger } from "interfaces/L1/IL1CrossDomainMessenger.sol"; +import { ISuperchainConfig as SuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; contract L1CrossDomainMessengerKontrol is DeploymentSummaryFaultProofs, KontrolUtils { L1CrossDomainMessenger l1CrossDomainMessenger; diff --git a/packages/contracts-bedrock/test/kontrol/proofs/L1ERC721Bridge.k.sol b/packages/contracts-bedrock/test/kontrol/proofs/L1ERC721Bridge.k.sol index 5e45e3e3a9f..6a86fbd637b 100644 --- a/packages/contracts-bedrock/test/kontrol/proofs/L1ERC721Bridge.k.sol +++ b/packages/contracts-bedrock/test/kontrol/proofs/L1ERC721Bridge.k.sol @@ -3,9 +3,9 @@ pragma solidity ^0.8.13; import { DeploymentSummaryFaultProofs } from "./utils/DeploymentSummaryFaultProofs.sol"; import { KontrolUtils } from "./utils/KontrolUtils.sol"; -import { IL1ERC721Bridge as L1ERC721Bridge } from "src/L1/interfaces/IL1ERC721Bridge.sol"; -import { ISuperchainConfig as SuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; -import { ICrossDomainMessenger as CrossDomainMessenger } from "src/universal/interfaces/ICrossDomainMessenger.sol"; +import { IL1ERC721Bridge as L1ERC721Bridge } from "interfaces/L1/IL1ERC721Bridge.sol"; +import { ISuperchainConfig as SuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; +import { ICrossDomainMessenger as CrossDomainMessenger } from "interfaces/universal/ICrossDomainMessenger.sol"; contract L1ERC721BridgeKontrol is DeploymentSummaryFaultProofs, KontrolUtils { L1ERC721Bridge l1ERC721Bridge; diff --git a/packages/contracts-bedrock/test/kontrol/proofs/L1StandardBridge.k.sol b/packages/contracts-bedrock/test/kontrol/proofs/L1StandardBridge.k.sol index b5f8793426e..d25e57ae288 100644 --- a/packages/contracts-bedrock/test/kontrol/proofs/L1StandardBridge.k.sol +++ b/packages/contracts-bedrock/test/kontrol/proofs/L1StandardBridge.k.sol @@ -3,9 +3,9 @@ pragma solidity ^0.8.13; import { DeploymentSummaryFaultProofs } from "./utils/DeploymentSummaryFaultProofs.sol"; import { KontrolUtils } from "./utils/KontrolUtils.sol"; -import { IL1StandardBridge as L1StandardBridge } from "src/L1/interfaces/IL1StandardBridge.sol"; -import { ISuperchainConfig as SuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; -import { ICrossDomainMessenger as CrossDomainMessenger } from "src/universal/interfaces/ICrossDomainMessenger.sol"; +import { IL1StandardBridge as L1StandardBridge } from "interfaces/L1/IL1StandardBridge.sol"; +import { ISuperchainConfig as SuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; +import { ICrossDomainMessenger as CrossDomainMessenger } from "interfaces/universal/ICrossDomainMessenger.sol"; contract L1StandardBridgeKontrol is DeploymentSummaryFaultProofs, KontrolUtils { L1StandardBridge l1standardBridge; diff --git a/packages/contracts-bedrock/test/kontrol/proofs/OptimismPortal.k.sol b/packages/contracts-bedrock/test/kontrol/proofs/OptimismPortal.k.sol index f0cf6cac773..15bdc2b33da 100644 --- a/packages/contracts-bedrock/test/kontrol/proofs/OptimismPortal.k.sol +++ b/packages/contracts-bedrock/test/kontrol/proofs/OptimismPortal.k.sol @@ -4,8 +4,8 @@ pragma solidity ^0.8.13; import { DeploymentSummary } from "./utils/DeploymentSummary.sol"; import { KontrolUtils } from "./utils/KontrolUtils.sol"; import { Types } from "src/libraries/Types.sol"; -import { IOptimismPortal as OptimismPortal } from "src/L1/interfaces/IOptimismPortal.sol"; -import { ISuperchainConfig as SuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; +import { IOptimismPortal as OptimismPortal } from "interfaces/L1/IOptimismPortal.sol"; +import { ISuperchainConfig as SuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; import "src/libraries/PortalErrors.sol"; contract OptimismPortalKontrol is DeploymentSummary, KontrolUtils { diff --git a/packages/contracts-bedrock/test/kontrol/proofs/OptimismPortal2.k.sol b/packages/contracts-bedrock/test/kontrol/proofs/OptimismPortal2.k.sol index d561b8b8509..1d16f1f99cb 100644 --- a/packages/contracts-bedrock/test/kontrol/proofs/OptimismPortal2.k.sol +++ b/packages/contracts-bedrock/test/kontrol/proofs/OptimismPortal2.k.sol @@ -4,8 +4,8 @@ pragma solidity ^0.8.13; import { DeploymentSummaryFaultProofs } from "./utils/DeploymentSummaryFaultProofs.sol"; import { KontrolUtils } from "./utils/KontrolUtils.sol"; import { Types } from "src/libraries/Types.sol"; -import { IOptimismPortal as OptimismPortal } from "src/L1/interfaces/IOptimismPortal.sol"; -import { ISuperchainConfig as SuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; +import { IOptimismPortal as OptimismPortal } from "interfaces/L1/IOptimismPortal.sol"; +import { ISuperchainConfig as SuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; import "src/libraries/PortalErrors.sol"; contract OptimismPortal2Kontrol is DeploymentSummaryFaultProofs, KontrolUtils { diff --git a/packages/contracts-bedrock/test/legacy/DeployerWhitelist.t.sol b/packages/contracts-bedrock/test/legacy/DeployerWhitelist.t.sol index 6d0abaea8d2..2a0f4792053 100644 --- a/packages/contracts-bedrock/test/legacy/DeployerWhitelist.t.sol +++ b/packages/contracts-bedrock/test/legacy/DeployerWhitelist.t.sol @@ -5,7 +5,7 @@ pragma solidity 0.8.15; import { Test } from "forge-std/Test.sol"; // Target contract -import { IDeployerWhitelist } from "src/legacy/interfaces/IDeployerWhitelist.sol"; +import { IDeployerWhitelist } from "interfaces/legacy/IDeployerWhitelist.sol"; import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; contract DeployerWhitelist_Test is Test { diff --git a/packages/contracts-bedrock/test/legacy/L1BlockNumber.t.sol b/packages/contracts-bedrock/test/legacy/L1BlockNumber.t.sol index 203605f3b1b..2c976027044 100644 --- a/packages/contracts-bedrock/test/legacy/L1BlockNumber.t.sol +++ b/packages/contracts-bedrock/test/legacy/L1BlockNumber.t.sol @@ -9,8 +9,8 @@ import { Predeploys } from "src/libraries/Predeploys.sol"; import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; // Interfaces -import { IL1BlockNumber } from "src/legacy/interfaces/IL1BlockNumber.sol"; -import { IL1Block } from "src/L2/interfaces/IL1Block.sol"; +import { IL1BlockNumber } from "interfaces/legacy/IL1BlockNumber.sol"; +import { IL1Block } from "interfaces/L2/IL1Block.sol"; contract L1BlockNumberTest is Test { IL1Block lb; diff --git a/packages/contracts-bedrock/test/legacy/L1ChugSplashProxy.t.sol b/packages/contracts-bedrock/test/legacy/L1ChugSplashProxy.t.sol index dcfab12cb60..28d9e58ed8a 100644 --- a/packages/contracts-bedrock/test/legacy/L1ChugSplashProxy.t.sol +++ b/packages/contracts-bedrock/test/legacy/L1ChugSplashProxy.t.sol @@ -6,7 +6,7 @@ import { Test } from "forge-std/Test.sol"; import { VmSafe } from "forge-std/Vm.sol"; // Target contract -import { IL1ChugSplashProxy } from "src/legacy/interfaces/IL1ChugSplashProxy.sol"; +import { IL1ChugSplashProxy } from "interfaces/legacy/IL1ChugSplashProxy.sol"; import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; contract Owner { diff --git a/packages/contracts-bedrock/test/legacy/LegacyMintableERC20.t.sol b/packages/contracts-bedrock/test/legacy/LegacyMintableERC20.t.sol index 061b6eee6c5..07f43cf5a61 100644 --- a/packages/contracts-bedrock/test/legacy/LegacyMintableERC20.t.sol +++ b/packages/contracts-bedrock/test/legacy/LegacyMintableERC20.t.sol @@ -5,7 +5,7 @@ pragma solidity 0.8.15; import { CommonTest } from "test/setup/CommonTest.sol"; import { LegacyMintableERC20 } from "src/legacy/LegacyMintableERC20.sol"; -import { ILegacyMintableERC20 } from "src/universal/interfaces/ILegacyMintableERC20.sol"; +import { ILegacyMintableERC20 } from "interfaces/universal/ILegacyMintableERC20.sol"; contract LegacyMintableERC20_Test is CommonTest { LegacyMintableERC20 legacyMintableERC20; diff --git a/packages/contracts-bedrock/test/legacy/ResolvedDelegateProxy.t.sol b/packages/contracts-bedrock/test/legacy/ResolvedDelegateProxy.t.sol index 5b1f40d55bd..19fbdec8f7c 100644 --- a/packages/contracts-bedrock/test/legacy/ResolvedDelegateProxy.t.sol +++ b/packages/contracts-bedrock/test/legacy/ResolvedDelegateProxy.t.sol @@ -5,10 +5,10 @@ pragma solidity 0.8.15; import { Test } from "forge-std/Test.sol"; // Target contract dependencies -import { IAddressManager } from "src/legacy/interfaces/IAddressManager.sol"; +import { IAddressManager } from "interfaces/legacy/IAddressManager.sol"; // Target contract -import { IResolvedDelegateProxy } from "src/legacy/interfaces/IResolvedDelegateProxy.sol"; +import { IResolvedDelegateProxy } from "interfaces/legacy/IResolvedDelegateProxy.sol"; import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; contract ResolvedDelegateProxy_Test is Test { diff --git a/packages/contracts-bedrock/test/mocks/AlphabetVM.sol b/packages/contracts-bedrock/test/mocks/AlphabetVM.sol index 6ecf74e2286..ae024e5b2a1 100644 --- a/packages/contracts-bedrock/test/mocks/AlphabetVM.sol +++ b/packages/contracts-bedrock/test/mocks/AlphabetVM.sol @@ -6,7 +6,7 @@ import { PreimageKeyLib } from "src/cannon/PreimageKeyLib.sol"; import "src/dispute/lib/Types.sol"; // Interfaces -import { IBigStepper, IPreimageOracle } from "src/dispute/interfaces/IBigStepper.sol"; +import { IBigStepper, IPreimageOracle } from "interfaces/dispute/IBigStepper.sol"; /// @title AlphabetVM /// @dev A mock VM for the purpose of testing the dispute game infrastructure. Note that this only works diff --git a/packages/contracts-bedrock/test/opcm/DeployAltDA.t.sol b/packages/contracts-bedrock/test/opcm/DeployAltDA.t.sol index d7cdc69f37e..a23708e427a 100644 --- a/packages/contracts-bedrock/test/opcm/DeployAltDA.t.sol +++ b/packages/contracts-bedrock/test/opcm/DeployAltDA.t.sol @@ -4,9 +4,9 @@ pragma solidity 0.8.15; import { Test } from "forge-std/Test.sol"; import { DeployAltDAInput, DeployAltDAOutput, DeployAltDA } from "scripts/deploy/DeployAltDA.s.sol"; -import { IDataAvailabilityChallenge } from "src/L1/interfaces/IDataAvailabilityChallenge.sol"; -import { IProxyAdmin } from "src/universal/interfaces/IProxyAdmin.sol"; -import { IProxy } from "src/universal/interfaces/IProxy.sol"; +import { IDataAvailabilityChallenge } from "interfaces/L1/IDataAvailabilityChallenge.sol"; +import { IProxyAdmin } from "interfaces/universal/IProxyAdmin.sol"; +import { IProxy } from "interfaces/universal/IProxy.sol"; import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; contract DeployAltDAInput_Test is Test { diff --git a/packages/contracts-bedrock/test/opcm/DeployImplementations.t.sol b/packages/contracts-bedrock/test/opcm/DeployImplementations.t.sol index 8d3feac2de0..584aa59a9c2 100644 --- a/packages/contracts-bedrock/test/opcm/DeployImplementations.t.sol +++ b/packages/contracts-bedrock/test/opcm/DeployImplementations.t.sol @@ -4,22 +4,22 @@ pragma solidity 0.8.15; import { Test, stdStorage, StdStorage } from "forge-std/Test.sol"; import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; -import { IDelayedWETH } from "src/dispute/interfaces/IDelayedWETH.sol"; -import { IPreimageOracle } from "src/cannon/interfaces/IPreimageOracle.sol"; -import { IMIPS } from "src/cannon/interfaces/IMIPS.sol"; -import { IDisputeGameFactory } from "src/dispute/interfaces/IDisputeGameFactory.sol"; +import { IDelayedWETH } from "interfaces/dispute/IDelayedWETH.sol"; +import { IPreimageOracle } from "interfaces/cannon/IPreimageOracle.sol"; +import { IMIPS } from "interfaces/cannon/IMIPS.sol"; +import { IDisputeGameFactory } from "interfaces/dispute/IDisputeGameFactory.sol"; -import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; -import { IProtocolVersions } from "src/L1/interfaces/IProtocolVersions.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; +import { IProtocolVersions } from "interfaces/L1/IProtocolVersions.sol"; import { OPContractsManager } from "src/L1/OPContractsManager.sol"; -import { IOptimismPortal2 } from "src/L1/interfaces/IOptimismPortal2.sol"; -import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; -import { IL1CrossDomainMessenger } from "src/L1/interfaces/IL1CrossDomainMessenger.sol"; -import { IL1ERC721Bridge } from "src/L1/interfaces/IL1ERC721Bridge.sol"; -import { IL1StandardBridge } from "src/L1/interfaces/IL1StandardBridge.sol"; -import { IOptimismMintableERC20Factory } from "src/universal/interfaces/IOptimismMintableERC20Factory.sol"; -import { IProxyAdmin } from "src/universal/interfaces/IProxyAdmin.sol"; -import { IProxy } from "src/universal/interfaces/IProxy.sol"; +import { IOptimismPortal2 } from "interfaces/L1/IOptimismPortal2.sol"; +import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; +import { IL1CrossDomainMessenger } from "interfaces/L1/IL1CrossDomainMessenger.sol"; +import { IL1ERC721Bridge } from "interfaces/L1/IL1ERC721Bridge.sol"; +import { IL1StandardBridge } from "interfaces/L1/IL1StandardBridge.sol"; +import { IOptimismMintableERC20Factory } from "interfaces/universal/IOptimismMintableERC20Factory.sol"; +import { IProxyAdmin } from "interfaces/universal/IProxyAdmin.sol"; +import { IProxy } from "interfaces/universal/IProxy.sol"; import { DeployImplementationsInput, diff --git a/packages/contracts-bedrock/test/opcm/DeployOPCM.t.sol b/packages/contracts-bedrock/test/opcm/DeployOPCM.t.sol index 81bf44eb0bc..959b9c7031f 100644 --- a/packages/contracts-bedrock/test/opcm/DeployOPCM.t.sol +++ b/packages/contracts-bedrock/test/opcm/DeployOPCM.t.sol @@ -4,8 +4,8 @@ pragma solidity 0.8.15; import { Test } from "forge-std/Test.sol"; import { DeployOPCM, DeployOPCMInput, DeployOPCMOutput } from "scripts/deploy/DeployOPCM.s.sol"; import { OPContractsManager } from "src/L1/OPContractsManager.sol"; -import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; -import { IProtocolVersions } from "src/L1/interfaces/IProtocolVersions.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; +import { IProtocolVersions } from "interfaces/L1/IProtocolVersions.sol"; contract DeployOPCMInput_Test is Test { DeployOPCMInput dii; diff --git a/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol b/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol index 5280328168b..5ac899579d6 100644 --- a/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol +++ b/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol @@ -13,19 +13,19 @@ import { import { DeployOPChainInput, DeployOPChain, DeployOPChainOutput } from "scripts/deploy/DeployOPChain.s.sol"; import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; -import { IProxyAdmin } from "src/universal/interfaces/IProxyAdmin.sol"; +import { IProxyAdmin } from "interfaces/universal/IProxyAdmin.sol"; -import { IAddressManager } from "src/legacy/interfaces/IAddressManager.sol"; -import { IAnchorStateRegistry } from "src/dispute/interfaces/IAnchorStateRegistry.sol"; -import { IFaultDisputeGame } from "src/dispute/interfaces/IFaultDisputeGame.sol"; -import { IPermissionedDisputeGame } from "src/dispute/interfaces/IPermissionedDisputeGame.sol"; -import { IL1ChugSplashProxy } from "src/legacy/interfaces/IL1ChugSplashProxy.sol"; -import { IResolvedDelegateProxy } from "src/legacy/interfaces/IResolvedDelegateProxy.sol"; +import { IAddressManager } from "interfaces/legacy/IAddressManager.sol"; +import { IAnchorStateRegistry } from "interfaces/dispute/IAnchorStateRegistry.sol"; +import { IFaultDisputeGame } from "interfaces/dispute/IFaultDisputeGame.sol"; +import { IPermissionedDisputeGame } from "interfaces/dispute/IPermissionedDisputeGame.sol"; +import { IL1ChugSplashProxy } from "interfaces/legacy/IL1ChugSplashProxy.sol"; +import { IResolvedDelegateProxy } from "interfaces/legacy/IResolvedDelegateProxy.sol"; -import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; -import { IProtocolVersions, ProtocolVersion } from "src/L1/interfaces/IProtocolVersions.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; +import { IProtocolVersions, ProtocolVersion } from "interfaces/L1/IProtocolVersions.sol"; import { OPContractsManager } from "src/L1/OPContractsManager.sol"; -import { IProxy } from "src/universal/interfaces/IProxy.sol"; +import { IProxy } from "interfaces/universal/IProxy.sol"; import { Claim, Duration, GameType, GameTypes, Hash, OutputRoot } from "src/dispute/lib/Types.sol"; diff --git a/packages/contracts-bedrock/test/opcm/DeploySuperchain.t.sol b/packages/contracts-bedrock/test/opcm/DeploySuperchain.t.sol index 924957cc180..93c3c0c9344 100644 --- a/packages/contracts-bedrock/test/opcm/DeploySuperchain.t.sol +++ b/packages/contracts-bedrock/test/opcm/DeploySuperchain.t.sol @@ -7,7 +7,7 @@ import { stdToml } from "forge-std/StdToml.sol"; import { ProxyAdmin } from "src/universal/ProxyAdmin.sol"; import { Proxy } from "src/universal/Proxy.sol"; import { SuperchainConfig } from "src/L1/SuperchainConfig.sol"; -import { IProtocolVersions, ProtocolVersion } from "src/L1/interfaces/IProtocolVersions.sol"; +import { IProtocolVersions, ProtocolVersion } from "interfaces/L1/IProtocolVersions.sol"; import { DeploySuperchainInput, DeploySuperchain, DeploySuperchainOutput } from "scripts/deploy/DeploySuperchain.s.sol"; contract DeploySuperchainInput_Test is Test { diff --git a/packages/contracts-bedrock/test/safe/DeputyGuardianModule.t.sol b/packages/contracts-bedrock/test/safe/DeputyGuardianModule.t.sol index 84abdbc48a8..21fad7bb547 100644 --- a/packages/contracts-bedrock/test/safe/DeputyGuardianModule.t.sol +++ b/packages/contracts-bedrock/test/safe/DeputyGuardianModule.t.sol @@ -8,15 +8,15 @@ import { GnosisSafe as Safe } from "safe-contracts/GnosisSafe.sol"; import "test/safe-tools/SafeTestTools.sol"; // Contracts -import { IDeputyGuardianModule } from "src/safe/interfaces/IDeputyGuardianModule.sol"; +import { IDeputyGuardianModule } from "interfaces/safe/IDeputyGuardianModule.sol"; // Libraries import "src/dispute/lib/Types.sol"; // Interfaces -import { IDisputeGame } from "src/dispute/interfaces/IDisputeGame.sol"; -import { IFaultDisputeGame } from "src/dispute/interfaces/IFaultDisputeGame.sol"; -import { IAnchorStateRegistry } from "src/dispute/interfaces/IAnchorStateRegistry.sol"; +import { IDisputeGame } from "interfaces/dispute/IDisputeGame.sol"; +import { IFaultDisputeGame } from "interfaces/dispute/IFaultDisputeGame.sol"; +import { IAnchorStateRegistry } from "interfaces/dispute/IAnchorStateRegistry.sol"; import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; contract DeputyGuardianModule_TestInit is CommonTest, SafeTestTools { @@ -294,8 +294,8 @@ contract DeputyGuardianModule_NoPortalCollisions_Test is DeputyGuardianModule_Te excludes[0] = "src/dispute/lib/*"; excludes[1] = "src/L1/OptimismPortal2.sol"; excludes[2] = "src/L1/OptimismPortalInterop.sol"; - excludes[3] = "src/L1/interfaces/IOptimismPortal2.sol"; - excludes[4] = "src/L1/interfaces/IOptimismPortalInterop.sol"; + excludes[3] = "interfaces/L1/IOptimismPortal2.sol"; + excludes[4] = "interfaces/L1/IOptimismPortalInterop.sol"; Abi[] memory abis = ForgeArtifacts.getContractFunctionAbis("src/{L1,dispute,universal}", excludes); for (uint256 i; i < abis.length; i++) { for (uint256 j; j < abis[i].entries.length; j++) { diff --git a/packages/contracts-bedrock/test/setup/Events.sol b/packages/contracts-bedrock/test/setup/Events.sol index 966b236c30c..7056f0cbdd6 100644 --- a/packages/contracts-bedrock/test/setup/Events.sol +++ b/packages/contracts-bedrock/test/setup/Events.sol @@ -1,7 +1,7 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; -import { IDisputeGame } from "src/dispute/interfaces/IDisputeGame.sol"; +import { IDisputeGame } from "interfaces/dispute/IDisputeGame.sol"; import "src/dispute/lib/Types.sol"; import { Types } from "src/libraries/Types.sol"; diff --git a/packages/contracts-bedrock/test/setup/Setup.sol b/packages/contracts-bedrock/test/setup/Setup.sol index ef2b654b241..613dd067647 100644 --- a/packages/contracts-bedrock/test/setup/Setup.sol +++ b/packages/contracts-bedrock/test/setup/Setup.sol @@ -17,38 +17,38 @@ import { Preinstalls } from "src/libraries/Preinstalls.sol"; import { AddressAliasHelper } from "src/vendor/AddressAliasHelper.sol"; // Interfaces -import { IOptimismPortal } from "src/L1/interfaces/IOptimismPortal.sol"; -import { IOptimismPortal2 } from "src/L1/interfaces/IOptimismPortal2.sol"; -import { IL1CrossDomainMessenger } from "src/L1/interfaces/IL1CrossDomainMessenger.sol"; -import { IL2OutputOracle } from "src/L1/interfaces/IL2OutputOracle.sol"; -import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; -import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; -import { IDataAvailabilityChallenge } from "src/L1/interfaces/IDataAvailabilityChallenge.sol"; -import { IL1StandardBridge } from "src/L1/interfaces/IL1StandardBridge.sol"; -import { IProtocolVersions } from "src/L1/interfaces/IProtocolVersions.sol"; -import { IL1ERC721Bridge } from "src/L1/interfaces/IL1ERC721Bridge.sol"; -import { IOptimismMintableERC721Factory } from "src/universal/interfaces/IOptimismMintableERC721Factory.sol"; -import { IDisputeGameFactory } from "src/dispute/interfaces/IDisputeGameFactory.sol"; -import { IDelayedWETH } from "src/dispute/interfaces/IDelayedWETH.sol"; -import { IAnchorStateRegistry } from "src/dispute/interfaces/IAnchorStateRegistry.sol"; -import { IL2CrossDomainMessenger } from "src/L2/interfaces/IL2CrossDomainMessenger.sol"; -import { IL2StandardBridgeInterop } from "src/L2/interfaces/IL2StandardBridgeInterop.sol"; -import { IL2ToL1MessagePasser } from "src/L2/interfaces/IL2ToL1MessagePasser.sol"; -import { IL2ERC721Bridge } from "src/L2/interfaces/IL2ERC721Bridge.sol"; -import { IOptimismMintableERC20Factory } from "src/universal/interfaces/IOptimismMintableERC20Factory.sol"; -import { IAddressManager } from "src/legacy/interfaces/IAddressManager.sol"; -import { IOptimismSuperchainERC20Factory } from "src/L2/interfaces/IOptimismSuperchainERC20Factory.sol"; -import { IBaseFeeVault } from "src/L2/interfaces/IBaseFeeVault.sol"; -import { ISequencerFeeVault } from "src/L2/interfaces/ISequencerFeeVault.sol"; -import { IL1FeeVault } from "src/L2/interfaces/IL1FeeVault.sol"; -import { IGasPriceOracle } from "src/L2/interfaces/IGasPriceOracle.sol"; -import { IL1Block } from "src/L2/interfaces/IL1Block.sol"; -import { ISuperchainWETH } from "src/L2/interfaces/ISuperchainWETH.sol"; -import { IETHLiquidity } from "src/L2/interfaces/IETHLiquidity.sol"; -import { IWETH98 } from "src/universal/interfaces/IWETH98.sol"; -import { IGovernanceToken } from "src/governance/interfaces/IGovernanceToken.sol"; -import { ILegacyMessagePasser } from "src/legacy/interfaces/ILegacyMessagePasser.sol"; -import { ISuperchainTokenBridge } from "src/L2/interfaces/ISuperchainTokenBridge.sol"; +import { IOptimismPortal } from "interfaces/L1/IOptimismPortal.sol"; +import { IOptimismPortal2 } from "interfaces/L1/IOptimismPortal2.sol"; +import { IL1CrossDomainMessenger } from "interfaces/L1/IL1CrossDomainMessenger.sol"; +import { IL2OutputOracle } from "interfaces/L1/IL2OutputOracle.sol"; +import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; +import { IDataAvailabilityChallenge } from "interfaces/L1/IDataAvailabilityChallenge.sol"; +import { IL1StandardBridge } from "interfaces/L1/IL1StandardBridge.sol"; +import { IProtocolVersions } from "interfaces/L1/IProtocolVersions.sol"; +import { IL1ERC721Bridge } from "interfaces/L1/IL1ERC721Bridge.sol"; +import { IOptimismMintableERC721Factory } from "interfaces/universal/IOptimismMintableERC721Factory.sol"; +import { IDisputeGameFactory } from "interfaces/dispute/IDisputeGameFactory.sol"; +import { IDelayedWETH } from "interfaces/dispute/IDelayedWETH.sol"; +import { IAnchorStateRegistry } from "interfaces/dispute/IAnchorStateRegistry.sol"; +import { IL2CrossDomainMessenger } from "interfaces/L2/IL2CrossDomainMessenger.sol"; +import { IL2StandardBridgeInterop } from "interfaces/L2/IL2StandardBridgeInterop.sol"; +import { IL2ToL1MessagePasser } from "interfaces/L2/IL2ToL1MessagePasser.sol"; +import { IL2ERC721Bridge } from "interfaces/L2/IL2ERC721Bridge.sol"; +import { IOptimismMintableERC20Factory } from "interfaces/universal/IOptimismMintableERC20Factory.sol"; +import { IAddressManager } from "interfaces/legacy/IAddressManager.sol"; +import { IOptimismSuperchainERC20Factory } from "interfaces/L2/IOptimismSuperchainERC20Factory.sol"; +import { IBaseFeeVault } from "interfaces/L2/IBaseFeeVault.sol"; +import { ISequencerFeeVault } from "interfaces/L2/ISequencerFeeVault.sol"; +import { IL1FeeVault } from "interfaces/L2/IL1FeeVault.sol"; +import { IGasPriceOracle } from "interfaces/L2/IGasPriceOracle.sol"; +import { IL1Block } from "interfaces/L2/IL1Block.sol"; +import { ISuperchainWETH } from "interfaces/L2/ISuperchainWETH.sol"; +import { IETHLiquidity } from "interfaces/L2/IETHLiquidity.sol"; +import { IWETH98 } from "interfaces/universal/IWETH98.sol"; +import { IGovernanceToken } from "interfaces/governance/IGovernanceToken.sol"; +import { ILegacyMessagePasser } from "interfaces/legacy/ILegacyMessagePasser.sol"; +import { ISuperchainTokenBridge } from "interfaces/L2/ISuperchainTokenBridge.sol"; /// @title Setup /// @dev This contact is responsible for setting up the contracts in state. It currently diff --git a/packages/contracts-bedrock/test/universal/BenchmarkTest.t.sol b/packages/contracts-bedrock/test/universal/BenchmarkTest.t.sol index fd0a9bfc784..c27b493344b 100644 --- a/packages/contracts-bedrock/test/universal/BenchmarkTest.t.sol +++ b/packages/contracts-bedrock/test/universal/BenchmarkTest.t.sol @@ -9,11 +9,11 @@ import { CommonTest } from "test/setup/CommonTest.sol"; // Libraries import { Types } from "src/libraries/Types.sol"; import { SafeCall } from "src/libraries/SafeCall.sol"; -import { IL1BlockInterop } from "src/L2/interfaces/IL1BlockInterop.sol"; +import { IL1BlockInterop } from "interfaces/L2/IL1BlockInterop.sol"; import { Encoding } from "src/libraries/Encoding.sol"; // Interfaces -import { ICrossDomainMessenger } from "src/universal/interfaces/ICrossDomainMessenger.sol"; +import { ICrossDomainMessenger } from "interfaces/universal/ICrossDomainMessenger.sol"; import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; // Free function for setting the prevBaseFee param in the OptimismPortal. diff --git a/packages/contracts-bedrock/test/universal/CrossDomainMessenger.t.sol b/packages/contracts-bedrock/test/universal/CrossDomainMessenger.t.sol index 50398e4a892..12cc2a8c53b 100644 --- a/packages/contracts-bedrock/test/universal/CrossDomainMessenger.t.sol +++ b/packages/contracts-bedrock/test/universal/CrossDomainMessenger.t.sol @@ -10,7 +10,7 @@ import { Predeploys } from "src/libraries/Predeploys.sol"; import { Hashing } from "src/libraries/Hashing.sol"; import { Encoding } from "src/libraries/Encoding.sol"; -import { IL1CrossDomainMessenger } from "src/L1/interfaces/IL1CrossDomainMessenger.sol"; +import { IL1CrossDomainMessenger } from "interfaces/L1/IL1CrossDomainMessenger.sol"; // CrossDomainMessenger_Test is for testing functionality which is common to both the L1 and L2 // CrossDomainMessenger contracts. For simplicity, we use the L1 Messenger as the test contract. diff --git a/packages/contracts-bedrock/test/universal/OptimismMintableERC20.t.sol b/packages/contracts-bedrock/test/universal/OptimismMintableERC20.t.sol index d56a97b19db..4925c801885 100644 --- a/packages/contracts-bedrock/test/universal/OptimismMintableERC20.t.sol +++ b/packages/contracts-bedrock/test/universal/OptimismMintableERC20.t.sol @@ -2,8 +2,8 @@ pragma solidity 0.8.15; import { CommonTest } from "test/setup/CommonTest.sol"; -import { IOptimismMintableERC20 } from "src/universal/interfaces/IOptimismMintableERC20.sol"; -import { ILegacyMintableERC20 } from "src/universal/interfaces/ILegacyMintableERC20.sol"; +import { IOptimismMintableERC20 } from "interfaces/universal/IOptimismMintableERC20.sol"; +import { ILegacyMintableERC20 } from "interfaces/universal/ILegacyMintableERC20.sol"; import { IERC165 } from "@openzeppelin/contracts/utils/introspection/IERC165.sol"; contract OptimismMintableERC20_Test is CommonTest { diff --git a/packages/contracts-bedrock/test/universal/OptimismMintableERC20Factory.t.sol b/packages/contracts-bedrock/test/universal/OptimismMintableERC20Factory.t.sol index 74df9e729e8..867c11b3884 100644 --- a/packages/contracts-bedrock/test/universal/OptimismMintableERC20Factory.t.sol +++ b/packages/contracts-bedrock/test/universal/OptimismMintableERC20Factory.t.sol @@ -11,8 +11,8 @@ import { OptimismMintableERC20 } from "src/universal/OptimismMintableERC20.sol"; import { OptimismMintableERC20Factory } from "src/universal/OptimismMintableERC20Factory.sol"; // Interfaces -import { IProxy } from "src/universal/interfaces/IProxy.sol"; -import { IOptimismMintableERC20Factory } from "src/universal/interfaces/IOptimismMintableERC20Factory.sol"; +import { IProxy } from "interfaces/universal/IProxy.sol"; +import { IOptimismMintableERC20Factory } from "interfaces/universal/IOptimismMintableERC20Factory.sol"; contract OptimismMintableTokenFactory_Test is CommonTest { event StandardL2TokenCreated(address indexed remoteToken, address indexed localToken); diff --git a/packages/contracts-bedrock/test/universal/Proxy.t.sol b/packages/contracts-bedrock/test/universal/Proxy.t.sol index 8c6aa7ae513..437fabfe671 100644 --- a/packages/contracts-bedrock/test/universal/Proxy.t.sol +++ b/packages/contracts-bedrock/test/universal/Proxy.t.sol @@ -3,7 +3,7 @@ pragma solidity 0.8.15; import { Test } from "forge-std/Test.sol"; import { Bytes32AddressLib } from "@rari-capital/solmate/src/utils/Bytes32AddressLib.sol"; -import { IProxy } from "src/universal/interfaces/IProxy.sol"; +import { IProxy } from "interfaces/universal/IProxy.sol"; import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; contract SimpleStorage { diff --git a/packages/contracts-bedrock/test/universal/ProxyAdmin.t.sol b/packages/contracts-bedrock/test/universal/ProxyAdmin.t.sol index 04e416cbd3a..b1b6fa92a20 100644 --- a/packages/contracts-bedrock/test/universal/ProxyAdmin.t.sol +++ b/packages/contracts-bedrock/test/universal/ProxyAdmin.t.sol @@ -6,11 +6,11 @@ import { Test } from "forge-std/Test.sol"; import { SimpleStorage } from "test/universal/Proxy.t.sol"; // Interfaces -import { IAddressManager } from "src/legacy/interfaces/IAddressManager.sol"; -import { IL1ChugSplashProxy } from "src/legacy/interfaces/IL1ChugSplashProxy.sol"; -import { IResolvedDelegateProxy } from "src/legacy/interfaces/IResolvedDelegateProxy.sol"; -import { IProxy } from "src/universal/interfaces/IProxy.sol"; -import { IProxyAdmin } from "src/universal/interfaces/IProxyAdmin.sol"; +import { IAddressManager } from "interfaces/legacy/IAddressManager.sol"; +import { IL1ChugSplashProxy } from "interfaces/legacy/IL1ChugSplashProxy.sol"; +import { IResolvedDelegateProxy } from "interfaces/legacy/IResolvedDelegateProxy.sol"; +import { IProxy } from "interfaces/universal/IProxy.sol"; +import { IProxyAdmin } from "interfaces/universal/IProxyAdmin.sol"; import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; diff --git a/packages/contracts-bedrock/test/universal/Specs.t.sol b/packages/contracts-bedrock/test/universal/Specs.t.sol index 49e35e835f3..d8c48849875 100644 --- a/packages/contracts-bedrock/test/universal/Specs.t.sol +++ b/packages/contracts-bedrock/test/universal/Specs.t.sol @@ -12,13 +12,13 @@ import { ForgeArtifacts, Abi, AbiEntry } from "scripts/libraries/ForgeArtifacts. import { OPContractsManager } from "src/L1/OPContractsManager.sol"; // Interfaces -import { IOptimismPortal } from "src/L1/interfaces/IOptimismPortal.sol"; -import { IOptimismPortal2 } from "src/L1/interfaces/IOptimismPortal2.sol"; -import { IOptimismPortalInterop } from "src/L1/interfaces/IOptimismPortalInterop.sol"; -import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; -import { ISystemConfigInterop } from "src/L1/interfaces/ISystemConfigInterop.sol"; -import { IDataAvailabilityChallenge } from "src/L1/interfaces/IDataAvailabilityChallenge.sol"; -import { IProtocolVersions } from "src/L1/interfaces/IProtocolVersions.sol"; +import { IOptimismPortal } from "interfaces/L1/IOptimismPortal.sol"; +import { IOptimismPortal2 } from "interfaces/L1/IOptimismPortal2.sol"; +import { IOptimismPortalInterop } from "interfaces/L1/IOptimismPortalInterop.sol"; +import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; +import { ISystemConfigInterop } from "interfaces/L1/ISystemConfigInterop.sol"; +import { IDataAvailabilityChallenge } from "interfaces/L1/IDataAvailabilityChallenge.sol"; +import { IProtocolVersions } from "interfaces/L1/IProtocolVersions.sol"; /// @title Specification_Test /// @dev Specifies common security properties of entrypoints to L1 contracts, including authorization and @@ -940,12 +940,12 @@ contract Specification_Test is CommonTest { /// @notice Ensures that there's an auth spec for every L1 contract function. function test_contractAuth_works() public { string[] memory pathExcludes = new string[](6); - pathExcludes[0] = "src/dispute/interfaces/*"; + pathExcludes[0] = "interfaces/dispute/*"; pathExcludes[1] = "src/dispute/lib/*"; pathExcludes[2] = "src/safe/SafeSigners.sol"; - pathExcludes[3] = "src/L1/interfaces/*"; - pathExcludes[4] = "src/governance/interfaces/*"; - pathExcludes[5] = "src/safe/interfaces/*"; + pathExcludes[3] = "interfaces/L1/*"; + pathExcludes[4] = "interfaces/governance/*"; + pathExcludes[5] = "interfaces/safe/*"; Abi[] memory abis = ForgeArtifacts.getContractFunctionAbis( "src/{L1,dispute,governance,safe,universal/ProxyAdmin.sol,universal/WETH98.sol}", pathExcludes ); diff --git a/packages/contracts-bedrock/test/vendor/Initializable.t.sol b/packages/contracts-bedrock/test/vendor/Initializable.t.sol index eb43ae18759..1f9fd946c51 100644 --- a/packages/contracts-bedrock/test/vendor/Initializable.t.sol +++ b/packages/contracts-bedrock/test/vendor/Initializable.t.sol @@ -14,11 +14,11 @@ import { Constants } from "src/libraries/Constants.sol"; import { GameType } from "src/dispute/lib/Types.sol"; // Interfaces -import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; -import { IResourceMetering } from "src/L1/interfaces/IResourceMetering.sol"; -import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; -import { ProtocolVersion } from "src/L1/interfaces/IProtocolVersions.sol"; -import { IAnchorStateRegistry } from "src/dispute/interfaces/IAnchorStateRegistry.sol"; +import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; +import { IResourceMetering } from "interfaces/L1/IResourceMetering.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; +import { ProtocolVersion } from "interfaces/L1/IProtocolVersions.sol"; +import { IAnchorStateRegistry } from "interfaces/dispute/IAnchorStateRegistry.sol"; /// @title Initializer_Test /// @dev Ensures that the `initialize()` function on contracts cannot be called more than diff --git a/packages/contracts-bedrock/test/vendor/InitializableOZv5.t.sol b/packages/contracts-bedrock/test/vendor/InitializableOZv5.t.sol index 4b663d697ba..51c2fce2667 100644 --- a/packages/contracts-bedrock/test/vendor/InitializableOZv5.t.sol +++ b/packages/contracts-bedrock/test/vendor/InitializableOZv5.t.sol @@ -2,7 +2,7 @@ pragma solidity 0.8.25; import { Test } from "forge-std/Test.sol"; -import { IOptimismSuperchainERC20 } from "src/L2/interfaces/IOptimismSuperchainERC20.sol"; +import { IOptimismSuperchainERC20 } from "interfaces/L2/IOptimismSuperchainERC20.sol"; import { Initializable } from "@openzeppelin/contracts-v5/proxy/utils/Initializable.sol"; import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; /// @title InitializerOZv5_Test From 21827a2f3564d3cfb7a36f6005bd39e168fa6e44 Mon Sep 17 00:00:00 2001 From: smartcontracts Date: Wed, 4 Dec 2024 14:32:53 -0500 Subject: [PATCH 051/111] maint: update unused imports check to use new pattern (#13227) Updates the unused imports contract check to use the new framework that we recently merged. --- .../scripts/checks/unused-imports/main.go | 118 ++++------------ .../checks/unused-imports/main_test.go | 131 ++++++++++++++++++ 2 files changed, 159 insertions(+), 90 deletions(-) create mode 100644 packages/contracts-bedrock/scripts/checks/unused-imports/main_test.go diff --git a/packages/contracts-bedrock/scripts/checks/unused-imports/main.go b/packages/contracts-bedrock/scripts/checks/unused-imports/main.go index ae4acb528b4..df3ab4f4494 100644 --- a/packages/contracts-bedrock/scripts/checks/unused-imports/main.go +++ b/packages/contracts-bedrock/scripts/checks/unused-imports/main.go @@ -2,101 +2,51 @@ package main import ( "bufio" - "errors" "fmt" "os" - "path/filepath" "regexp" - "runtime" "strings" - "sync" - "sync/atomic" + + "github.com/ethereum-optimism/optimism/packages/contracts-bedrock/scripts/checks/common" ) var importPattern = regexp.MustCompile(`import\s*{([^}]+)}`) var asPattern = regexp.MustCompile(`(\S+)\s+as\s+(\S+)`) func main() { - if err := run(); err != nil { - writeStderr("an error occurred: %v", err) + if err := common.ProcessFilesGlob( + []string{"src/**/*.sol", "scripts/**/*.sol", "test/**/*.sol"}, + []string{}, + processFile, + ); err != nil { + fmt.Printf("error: %v\n", err) os.Exit(1) } } -func writeStderr(msg string, args ...any) { - _, _ = fmt.Fprintf(os.Stderr, msg+"\n", args...) -} - -func run() error { - cwd, err := os.Getwd() - if err != nil { - return fmt.Errorf("failed to get current working directory: %w", err) - } - - var hasErr atomic.Bool - var outMtx sync.Mutex - fail := func(msg string, args ...any) { - outMtx.Lock() - writeStderr("❌ "+msg, args...) - outMtx.Unlock() - hasErr.Store(true) - } - - dirs := []string{"src", "scripts", "test"} - sem := make(chan struct{}, runtime.NumCPU()) - - for _, dir := range dirs { - dirPath := filepath.Join(cwd, dir) - if _, err := os.Stat(dirPath); errors.Is(err, os.ErrNotExist) { - continue - } - - err := filepath.Walk(dirPath, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if !info.IsDir() && strings.HasSuffix(info.Name(), ".sol") { - sem <- struct{}{} - go func() { - defer func() { <-sem }() - processFile(path, fail) - }() - } - return nil - }) - - if err != nil { - return fmt.Errorf("failed to walk directory %s: %w", dir, err) - } - } - - for i := 0; i < cap(sem); i++ { - sem <- struct{}{} - } - - if hasErr.Load() { - return errors.New("unused imports check failed, see logs above") - } - - return nil -} - -func processFile(filePath string, fail func(string, ...any)) { +func processFile(filePath string) []error { content, err := os.ReadFile(filePath) if err != nil { - fail("%s: failed to read file: %v", filePath, err) - return + return []error{fmt.Errorf("%s: failed to read file: %w", filePath, err)} } imports := findImports(string(content)) - unusedImports := checkUnusedImports(imports, string(content)) + var unusedImports []string + for _, imp := range imports { + if !isImportUsed(imp, string(content)) { + unusedImports = append(unusedImports, imp) + } + } if len(unusedImports) > 0 { - fail("File: %s\nUnused imports:", filePath) + var errors []error for _, unused := range unusedImports { - fail(" - %s", unused) + errors = append(errors, fmt.Errorf("%s", unused)) } + return errors } + + return nil } func findImports(content string) []string { @@ -106,31 +56,19 @@ func findImports(content string) []string { if len(match) > 1 { importList := strings.Split(match[1], ",") for _, imp := range importList { - imports = append(imports, strings.TrimSpace(imp)) + imp = strings.TrimSpace(imp) + if asMatch := asPattern.FindStringSubmatch(imp); len(asMatch) > 2 { + // Use the renamed identifier (after 'as') + imports = append(imports, strings.TrimSpace(asMatch[2])) + } else { + imports = append(imports, imp) + } } } } return imports } -func checkUnusedImports(imports []string, content string) []string { - var unusedImports []string - for _, imp := range imports { - searchTerm := imp - displayName := imp - - if match := asPattern.FindStringSubmatch(imp); len(match) > 2 { - searchTerm = match[2] - displayName = fmt.Sprintf("%s as %s", match[1], match[2]) - } - - if !isImportUsed(searchTerm, content) { - unusedImports = append(unusedImports, displayName) - } - } - return unusedImports -} - func isImportUsed(imp, content string) bool { scanner := bufio.NewScanner(strings.NewReader(content)) for scanner.Scan() { diff --git a/packages/contracts-bedrock/scripts/checks/unused-imports/main_test.go b/packages/contracts-bedrock/scripts/checks/unused-imports/main_test.go new file mode 100644 index 00000000000..7d03867a8a4 --- /dev/null +++ b/packages/contracts-bedrock/scripts/checks/unused-imports/main_test.go @@ -0,0 +1,131 @@ +package main + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func Test_findImports(t *testing.T) { + tests := []struct { + name string + content string + expected []string + }{ + { + name: "finds single named import", + content: ` + pragma solidity ^0.8.0; + import { Contract } from "./Contract.sol"; + contract Test {} + `, + expected: []string{"Contract"}, + }, + { + name: "finds multiple named imports", + content: ` + pragma solidity ^0.8.0; + import { Contract1, Contract2 } from "./Contracts.sol"; + contract Test {} + `, + expected: []string{"Contract1", "Contract2"}, + }, + { + name: "handles import with as keyword", + content: ` + pragma solidity ^0.8.0; + import { Contract as Renamed } from "./Contract.sol"; + contract Test {} + `, + expected: []string{"Renamed"}, + }, + { + name: "handles multiple imports with as keyword", + content: ` + pragma solidity ^0.8.0; + import { Contract1 as C1, Contract2 as C2 } from "./Contracts.sol"; + contract Test {} + `, + expected: []string{"C1", "C2"}, + }, + { + name: "ignores regular imports", + content: ` + pragma solidity ^0.8.0; + import "./Contract.sol"; + contract Test {} + `, + expected: nil, + }, + { + name: "empty content", + content: "", + expected: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := findImports(tt.content) + require.Equal(t, tt.expected, result) + }) + } +} + +func Test_isImportUsed(t *testing.T) { + tests := []struct { + name string + importedName string + content string + expected bool + }{ + { + name: "import used in contract", + importedName: "UsedContract", + content: ` + contract Test { + UsedContract used; + } + `, + expected: true, + }, + { + name: "import used in inheritance", + importedName: "BaseContract", + content: ` + contract Test is BaseContract { + } + `, + expected: true, + }, + { + name: "import used in function", + importedName: "Utility", + content: ` + contract Test { + function test() { + Utility.doSomething(); + } + } + `, + expected: true, + }, + { + name: "import not used", + importedName: "UnusedContract", + content: ` + contract Test { + OtherContract other; + } + `, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := isImportUsed(tt.importedName, tt.content) + require.Equal(t, tt.expected, result) + }) + } +} From da85e698c4d6999c673e6e80233b08b217d62e2d Mon Sep 17 00:00:00 2001 From: Michael Amadi Date: Wed, 4 Dec 2024 20:35:11 +0100 Subject: [PATCH 052/111] run forge coverage periodically (#13222) * run forge coverage periodically * run forge coverage periodically * run forge coverage periodically * fixes * fixes * run it every 4 hours * run it every 4 hours --- .circleci/config.yml | 44 +++++++++++++++++-------- packages/contracts-bedrock/foundry.toml | 14 ++++++++ 2 files changed, 45 insertions(+), 13 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 719cad05766..561ad1f3fb5 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -47,6 +47,9 @@ parameters: stale_check_dispatch: type: boolean default: false + contracts_coverage_dispatch: + type: boolean + default: false orbs: go: circleci/go@1.8.0 @@ -290,10 +293,14 @@ jobs: machine: true resource_class: ethereum-optimism/latitude-1 parameters: - skip_pattern: - description: Glob pattern of tests to skip + build_args: + description: Forge build arguments type: string default: "" + profile: + description: Profile to use for building + type: string + default: ci steps: - checkout - install-contracts-dependencies @@ -306,9 +313,9 @@ jobs: working_directory: packages/contracts-bedrock - run: name: Build contracts - command: forge build --deny-warnings --skip <> + command: forge build <> environment: - FOUNDRY_PROFILE: ci + FOUNDRY_PROFILE: <> working_directory: packages/contracts-bedrock - run: name: Generate allocs @@ -608,11 +615,11 @@ jobs: command: just coverage-lcov no_output_timeout: 18m environment: - FOUNDRY_PROFILE: ci + FOUNDRY_PROFILE: cicoverage working_directory: packages/contracts-bedrock - codecov/upload: disable_search: true - files: ./packages/contracts-bedrock/coverage/lcov.info + files: ./packages/contracts-bedrock/lcov.info flags: contracts-bedrock-tests contracts-bedrock-tests: @@ -1301,8 +1308,12 @@ workflows: jobs: - go-mod-download - contracts-bedrock-build: + name: contracts-bedrock-build # Build with just core + script contracts. - skip_pattern: test + build_args: --deny-warnings --skip test + - contracts-bedrock-build: + name: contracts-bedrock-build-coverage + profile: cicoverage - check-kontrol-build: requires: - contracts-bedrock-build @@ -1323,10 +1334,6 @@ workflows: test_list: git diff origin/develop...HEAD --name-only --diff-filter=AM -- './test/**/*.t.sol' | sed 's|packages/contracts-bedrock/||' test_timeout: 1h test_profile: ciheavy - - contracts-bedrock-coverage: - filters: - branches: - ignore: /.*/ - contracts-bedrock-checks: requires: - contracts-bedrock-build @@ -1599,7 +1606,7 @@ workflows: context: - slack - contracts-bedrock-build: - skip_pattern: test + build_args: --deny-warnings --skip test context: - slack - go-tests: @@ -1637,7 +1644,7 @@ workflows: - equal: [true, << pipeline.parameters.cannon_full_test_dispatch >>] jobs: - contracts-bedrock-build: - skip_pattern: test + build_args: --deny-warnings --skip test - cannon-go-lint-and-test: name: cannon-go-lint-and-test-<>-bit requires: @@ -1648,6 +1655,17 @@ workflows: parameters: mips_word_size: [32, 64] + scheduled-forge-coverage: + when: + or: + - equal: [build_four_hours, <>] + - equal: [true, << pipeline.parameters.contracts_coverage_dispatch >>] + jobs: + - contracts-bedrock-build: + name: contracts-bedrock-build-coverage + profile: cicoverage + - contracts-bedrock-coverage + scheduled-docker-publish: when: or: diff --git a/packages/contracts-bedrock/foundry.toml b/packages/contracts-bedrock/foundry.toml index 0e3d3fc3e9a..b77e7bca431 100644 --- a/packages/contracts-bedrock/foundry.toml +++ b/packages/contracts-bedrock/foundry.toml @@ -78,6 +78,20 @@ runs = 512 runs = 256 depth = 32 +################################################################ +# PROFILE: CICOVERAGE # +################################################################ + +[profile.cicoverage] +optimizer = false + +[profile.cicoverage.fuzz] +runs = 512 + +[profile.cicoverage.invariant] +runs = 256 +depth = 32 + ################################################################ # PROFILE: CIHEAVY # ################################################################ From 531e3d9ffd47cb9810eb218633c5a2a9504c20aa Mon Sep 17 00:00:00 2001 From: Matthew Slipper Date: Wed, 4 Dec 2024 12:56:50 -0700 Subject: [PATCH 053/111] op-deployer: Block deploying tags to chains without OPCM (#13231) * op-deployer: Block deploying tags to chains without OPCM * fix test --- .../deployer/integration_test/apply_test.go | 9 ++--- op-deployer/pkg/deployer/pipeline/init.go | 35 ++++++------------- 2 files changed, 12 insertions(+), 32 deletions(-) diff --git a/op-deployer/pkg/deployer/integration_test/apply_test.go b/op-deployer/pkg/deployer/integration_test/apply_test.go index 98a5c89b851..a6ae4dab3ad 100644 --- a/op-deployer/pkg/deployer/integration_test/apply_test.go +++ b/op-deployer/pkg/deployer/integration_test/apply_test.go @@ -152,12 +152,10 @@ func TestEndToEndApply(t *testing.T) { t.Run("chain with tagged artifacts", func(t *testing.T) { intent, st := newIntent(t, l1ChainID, dk, l2ChainID1, loc, loc) - cg := ethClientCodeGetter(ctx, l1Client) - intent.L1ContractsLocator = artifacts.DefaultL1ContractsLocator intent.L2ContractsLocator = artifacts.DefaultL2ContractsLocator - require.NoError(t, deployer.ApplyPipeline( + require.ErrorIs(t, deployer.ApplyPipeline( ctx, deployer.ApplyPipelineOpts{ L1RPCUrl: rpcURL, @@ -167,10 +165,7 @@ func TestEndToEndApply(t *testing.T) { Logger: lgr, StateWriter: pipeline.NoopStateWriter(), }, - )) - - validateSuperchainDeployment(t, st, cg) - validateOPChainDeployment(t, cg, st, intent) + ), pipeline.ErrRefusingToDeployTaggedReleaseWithoutOPCM) }) } diff --git a/op-deployer/pkg/deployer/pipeline/init.go b/op-deployer/pkg/deployer/pipeline/init.go index f7be4eae03d..2b9f1cbd45f 100644 --- a/op-deployer/pkg/deployer/pipeline/init.go +++ b/op-deployer/pkg/deployer/pipeline/init.go @@ -1,16 +1,14 @@ package pipeline import ( - "bufio" "context" "crypto/rand" + "errors" "fmt" "os" "strings" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/standard" - "github.com/mattn/go-isatty" - "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/state" "github.com/ethereum-optimism/optimism/op-chain-ops/script" @@ -18,6 +16,8 @@ import ( "github.com/ethereum/go-ethereum/common" ) +var ErrRefusingToDeployTaggedReleaseWithoutOPCM = errors.New("refusing to deploy tagged release without OPCM") + func IsSupportedStateVersion(version int) bool { return version == 1 } @@ -141,32 +141,17 @@ func displayWarning() error { ####################### WARNING! WARNING WARNING! ####################### You are deploying a tagged release to a chain with no pre-deployed OPCM. -The contracts you are deploying may not be audited, or match a governance -approved release. +Due to a quirk of our contract version system, this can lead to deploying +contracts containing unaudited or untested code. As a result, this +functionality is currently disabled. + +We will fix this in an upcoming release. -USE OF THIS DEPLOYMENT IS NOT RECOMMENDED FOR PRODUCTION. USE AT YOUR OWN -RISK. BUGS OR LOSS OF FUNDS MAY OCCUR. WE HOPE YOU KNOW WHAT YOU ARE -DOING. +This process will now exit. ####################### WARNING! WARNING WARNING! ####################### `, "\n") _, _ = fmt.Fprint(os.Stderr, warning) - - if isatty.IsTerminal(os.Stdout.Fd()) { - _, _ = fmt.Fprintf(os.Stderr, "Please confirm that you have read and understood the warning above [y/n]: ") - - reader := bufio.NewReader(os.Stdin) - input, err := reader.ReadString('\n') - if err != nil { - return fmt.Errorf("failed to read input: %w", err) - } - - input = strings.ToLower(strings.TrimSpace(input)) - if input != "y" && input != "yes" { - return fmt.Errorf("aborted") - } - } - - return nil + return ErrRefusingToDeployTaggedReleaseWithoutOPCM } From 33889f547c6da12fc8a20ef3a9a51e1cc84be4ae Mon Sep 17 00:00:00 2001 From: Matthew Slipper Date: Wed, 4 Dec 2024 13:07:26 -0700 Subject: [PATCH 054/111] ci: Run contract checks and tests on self-hosted infra (#13219) * ci: Run contract checks and tests on self-hosted infra * whoops * submodule update with multiple jobs * increase jobs * comment --- .circleci/config.yml | 34 +++++++++------------------------- 1 file changed, 9 insertions(+), 25 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 561ad1f3fb5..8c9efc2c481 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -123,7 +123,11 @@ commands: description: "Install the dependencies for the smart contracts" steps: - run: - command: just install + name: Install dependencies + command: | + # Manually craft the submodule update command in order to take advantage + # of the -j parameter, which speeds it up a lot. + git submodule update --init --recursive --force -j 8 working_directory: packages/contracts-bedrock notify-failures-on-develop: @@ -623,14 +627,9 @@ jobs: flags: contracts-bedrock-tests contracts-bedrock-tests: - docker: - - image: <> - resource_class: xlarge + machine: true + resource_class: ethereum-optimism/latitude-1 parameters: - test_parallelism: - description: Number of test jobs to run in parallel - type: integer - default: 4 test_list: description: List of test files to run type: string @@ -654,7 +653,6 @@ jobs: description: Profile to use for testing type: string default: ci - parallelism: <> steps: - checkout - attach_workspace: { at: "." } @@ -670,14 +668,6 @@ jobs: working_directory: packages/contracts-bedrock - check-changed: patterns: contracts-bedrock,op-node - - restore_cache: - name: Restore Go modules cache - key: gomod-{{ checksum "go.sum" }} - - restore_cache: - name: Restore Go build cache - keys: - - golang-build-cache-contracts-bedrock-tests-{{ checksum "go.sum" }} - - golang-build-cache-contracts-bedrock-tests- - run: name: Print dependencies command: just dep-status @@ -725,17 +715,14 @@ jobs: - notify-failures-on-develop contracts-bedrock-checks: - docker: - - image: <> - resource_class: xlarge + machine: true + resource_class: ethereum-optimism/latitude-1 steps: - checkout - attach_workspace: { at: "." } - install-contracts-dependencies - check-changed: patterns: contracts-bedrock,op-node - - setup_remote_docker: - docker_layer_caching: true - run: name: print forge version command: forge --version @@ -1320,17 +1307,14 @@ workflows: - contracts-bedrock-tests: # Test everything except PreimageOracle.t.sol since it's slow. name: contracts-bedrock-tests - test_parallelism: 4 test_list: find test -name "*.t.sol" -not -name "PreimageOracle.t.sol" - contracts-bedrock-tests: # PreimageOracle test is slow, run it separately to unblock CI. name: contracts-bedrock-tests-preimage-oracle - test_parallelism: 1 test_list: find test -name "PreimageOracle.t.sol" - contracts-bedrock-tests: # Heavily fuzz any fuzz tests within added or modified test files. name: contracts-bedrock-tests-heavy-fuzz-modified - test_parallelism: 1 test_list: git diff origin/develop...HEAD --name-only --diff-filter=AM -- './test/**/*.t.sol' | sed 's|packages/contracts-bedrock/||' test_timeout: 1h test_profile: ciheavy From d68380f0c9e136e1531cd31848e541c4821c2ec6 Mon Sep 17 00:00:00 2001 From: Michael Amadi Date: Wed, 4 Dec 2024 21:25:25 +0100 Subject: [PATCH 055/111] fix (#13237) --- .circleci/config.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 8c9efc2c481..a646df4c88e 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1641,13 +1641,13 @@ workflows: scheduled-forge-coverage: when: - or: - - equal: [build_four_hours, <>] - - equal: [true, << pipeline.parameters.contracts_coverage_dispatch >>] + and: + - or: + - equal: ["develop", <>] + - equal: [true, <>] + - not: + equal: [scheduled_pipeline, << pipeline.trigger_source >>] jobs: - - contracts-bedrock-build: - name: contracts-bedrock-build-coverage - profile: cicoverage - contracts-bedrock-coverage scheduled-docker-publish: From d45a046d8538170f200bdf61cd33d60c918d93eb Mon Sep 17 00:00:00 2001 From: Matthew Slipper Date: Wed, 4 Dec 2024 13:28:06 -0700 Subject: [PATCH 056/111] op-e2e: Add timeouts to sequencer failover tests (#13224) * op-e2e: Add timeouts to sequencer failover tests These can sometimes never return, which leads to tests timing out in CI. This PR adds timeouts so we can get feedback faster. * avoid capturing parent t in subtest --- op-e2e/system/conductor/sequencer_failover_test.go | 10 +++++++--- op-e2e/system/gastoken/gastoken_test.go | 10 +++++----- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/op-e2e/system/conductor/sequencer_failover_test.go b/op-e2e/system/conductor/sequencer_failover_test.go index 5722dc3b82e..9004b528fc5 100644 --- a/op-e2e/system/conductor/sequencer_failover_test.go +++ b/op-e2e/system/conductor/sequencer_failover_test.go @@ -4,6 +4,7 @@ import ( "context" "sort" "testing" + "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/rpc" @@ -28,7 +29,8 @@ func TestSequencerFailover_SetupCluster(t *testing.T) { // [Category: conductor rpc] // In this test, we test all rpcs exposed by conductor. func TestSequencerFailover_ConductorRPC(t *testing.T) { - ctx := context.Background() + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancel() sys, conductors, cleanup := setupSequencerFailoverTest(t) defer cleanup() @@ -176,7 +178,8 @@ func TestSequencerFailover_ActiveSequencerDown(t *testing.T) { sys, conductors, cleanup := setupSequencerFailoverTest(t) defer cleanup() - ctx := context.Background() + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancel() leaderId, leader := findLeader(t, conductors) err := sys.RollupNodes[leaderId].Stop(ctx) // Stop the current leader sequencer require.NoError(t, err) @@ -205,7 +208,8 @@ func TestSequencerFailover_DisasterRecovery_OverrideLeader(t *testing.T) { defer cleanup() // randomly stop 2 nodes in the cluster to simulate a disaster. - ctx := context.Background() + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancel() err := conductors[Sequencer1Name].service.Stop(ctx) require.NoError(t, err) err = conductors[Sequencer2Name].service.Stop(ctx) diff --git a/op-e2e/system/gastoken/gastoken_test.go b/op-e2e/system/gastoken/gastoken_test.go index 7e03b19d393..b5f3f56e2f6 100644 --- a/op-e2e/system/gastoken/gastoken_test.go +++ b/op-e2e/system/gastoken/gastoken_test.go @@ -57,7 +57,7 @@ func testCustomGasToken(t *testing.T, allocType config.AllocType) { uint8(18), } - setup := func() gasTokenTestOpts { + setup := func(t *testing.T) gasTokenTestOpts { cfg := e2esys.DefaultSystemConfig(t, e2esys.WithAllocType(allocType)) offset := hexutil.Uint64(0) cfg.DeployConfig.L2GenesisRegolithTimeOffset = &offset @@ -111,7 +111,7 @@ func testCustomGasToken(t *testing.T, allocType config.AllocType) { t.Run("deposit", func(t *testing.T) { op_e2e.InitParallel(t) - gto := setup() + gto := setup(t) checkDeposit(t, gto, false) setCustomGasToken(t, gto.cfg, gto.sys, gto.weth9Address) checkDeposit(t, gto, true) @@ -119,7 +119,7 @@ func testCustomGasToken(t *testing.T, allocType config.AllocType) { t.Run("withdrawal", func(t *testing.T) { op_e2e.InitParallel(t) - gto := setup() + gto := setup(t) setCustomGasToken(t, gto.cfg, gto.sys, gto.weth9Address) checkDeposit(t, gto, true) checkWithdrawal(t, gto) @@ -127,7 +127,7 @@ func testCustomGasToken(t *testing.T, allocType config.AllocType) { t.Run("fee withdrawal", func(t *testing.T) { op_e2e.InitParallel(t) - gto := setup() + gto := setup(t) setCustomGasToken(t, gto.cfg, gto.sys, gto.weth9Address) checkDeposit(t, gto, true) checkFeeWithdrawal(t, gto, true) @@ -135,7 +135,7 @@ func testCustomGasToken(t *testing.T, allocType config.AllocType) { t.Run("token name and symbol", func(t *testing.T) { op_e2e.InitParallel(t) - gto := setup() + gto := setup(t) checkL1TokenNameAndSymbol(t, gto, gto.disabledExpectations) checkL2TokenNameAndSymbol(t, gto, gto.disabledExpectations) checkWETHTokenNameAndSymbol(t, gto, gto.disabledExpectations) From d164b6d06d259655a36bb4560ac8d6bfe7ae389d Mon Sep 17 00:00:00 2001 From: refcell Date: Wed, 4 Dec 2024 16:47:26 -0500 Subject: [PATCH 057/111] fix: bump kona-client version (#13242) --- docker-bake.hcl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-bake.hcl b/docker-bake.hcl index 3f59c433640..64e22327629 100644 --- a/docker-bake.hcl +++ b/docker-bake.hcl @@ -7,7 +7,7 @@ variable "REPOSITORY" { } variable "KONA_VERSION" { - default = "kona-client-v0.1.0-beta.4" + default = "kona-client-v0.1.0-beta.5" } variable "GIT_COMMIT" { From 260f36e2b645c7e9120e6ce95a0ce10e0db12cba Mon Sep 17 00:00:00 2001 From: clabby Date: Wed, 4 Dec 2024 17:47:17 -0500 Subject: [PATCH 058/111] chore(op-deployer): Fork in asterisc + dispute game deployment jobs (#13229) --- .../pkg/deployer/bootstrap/asterisc.go | 12 ++++ .../pkg/deployer/bootstrap/delayed_weth.go | 27 +++++++- .../pkg/deployer/bootstrap/dispute_game.go | 69 ++++++++----------- 3 files changed, 63 insertions(+), 45 deletions(-) diff --git a/op-deployer/pkg/deployer/bootstrap/asterisc.go b/op-deployer/pkg/deployer/bootstrap/asterisc.go index bad2b5ebbb1..b091f8c4d6f 100644 --- a/op-deployer/pkg/deployer/bootstrap/asterisc.go +++ b/op-deployer/pkg/deployer/bootstrap/asterisc.go @@ -162,6 +162,18 @@ func Asterisc(ctx context.Context, cfg AsteriscConfig) error { return fmt.Errorf("failed to create script host: %w", err) } + latest, err := l1Client.HeaderByNumber(ctx, nil) + if err != nil { + return fmt.Errorf("failed to get latest block: %w", err) + } + + if _, err := l1Host.CreateSelectFork( + script.ForkWithURLOrAlias("main"), + script.ForkWithBlockNumberU256(latest.Number), + ); err != nil { + return fmt.Errorf("failed to select fork: %w", err) + } + dgo, err := opcm.DeployAsterisc( l1Host, opcm.DeployAsteriscInput{ diff --git a/op-deployer/pkg/deployer/bootstrap/delayed_weth.go b/op-deployer/pkg/deployer/bootstrap/delayed_weth.go index 67b1b66760a..1209043d31d 100644 --- a/op-deployer/pkg/deployer/bootstrap/delayed_weth.go +++ b/op-deployer/pkg/deployer/bootstrap/delayed_weth.go @@ -7,6 +7,8 @@ import ( "math/big" "strings" + "github.com/ethereum-optimism/optimism/op-chain-ops/script" + "github.com/ethereum-optimism/optimism/op-chain-ops/script/forking" artifacts2 "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/artifacts" "github.com/ethereum-optimism/optimism/op-deployer/pkg/env" @@ -26,6 +28,7 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rpc" "github.com/urfave/cli/v2" ) @@ -158,9 +161,9 @@ func DelayedWETH(ctx context.Context, cfg DelayedWETHConfig) error { return fmt.Errorf("failed to create broadcaster: %w", err) } - nonce, err := l1Client.NonceAt(ctx, chainDeployer, nil) + l1RPC, err := rpc.Dial(cfg.L1RPCUrl) if err != nil { - return fmt.Errorf("failed to get starting nonce: %w", err) + return fmt.Errorf("failed to connect to L1 RPC: %w", err) } host, err := env.DefaultScriptHost( @@ -168,11 +171,29 @@ func DelayedWETH(ctx context.Context, cfg DelayedWETHConfig) error { lgr, chainDeployer, artifactsFS, + script.WithForkHook(func(cfg *script.ForkConfig) (forking.ForkSource, error) { + src, err := forking.RPCSourceByNumber(cfg.URLOrAlias, l1RPC, *cfg.BlockNumber) + if err != nil { + return nil, fmt.Errorf("failed to create RPC fork source: %w", err) + } + return forking.Cache(src), nil + }), ) if err != nil { return fmt.Errorf("failed to create script host: %w", err) } - host.SetNonce(chainDeployer, nonce) + + latest, err := l1Client.HeaderByNumber(ctx, nil) + if err != nil { + return fmt.Errorf("failed to get latest block: %w", err) + } + + if _, err := host.CreateSelectFork( + script.ForkWithURLOrAlias("main"), + script.ForkWithBlockNumberU256(latest.Number), + ); err != nil { + return fmt.Errorf("failed to select fork: %w", err) + } var release string if cfg.ArtifactsLocator.IsTag() { diff --git a/op-deployer/pkg/deployer/bootstrap/dispute_game.go b/op-deployer/pkg/deployer/bootstrap/dispute_game.go index f33f5106053..441fd73d9ea 100644 --- a/op-deployer/pkg/deployer/bootstrap/dispute_game.go +++ b/op-deployer/pkg/deployer/bootstrap/dispute_game.go @@ -6,10 +6,10 @@ import ( "fmt" "strings" + "github.com/ethereum-optimism/optimism/op-chain-ops/script" + "github.com/ethereum-optimism/optimism/op-chain-ops/script/forking" artifacts2 "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/artifacts" - "github.com/ethereum-optimism/optimism/packages/contracts-bedrock/snapshots" - "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rpc" "github.com/ethereum-optimism/optimism/op-deployer/pkg/env" @@ -149,6 +149,10 @@ func DisputeGame(ctx context.Context, cfg DisputeGameConfig) error { if err != nil { return fmt.Errorf("failed to connect to L1 RPC: %w", err) } + l1Rpc, err := rpc.Dial(cfg.L1RPCUrl) + if err != nil { + return fmt.Errorf("failed to connect to L1 RPC: %w", err) + } chainID, err := l1Client.ChainID(ctx) if err != nil { @@ -175,21 +179,34 @@ func DisputeGame(ctx context.Context, cfg DisputeGameConfig) error { return fmt.Errorf("failed to create broadcaster: %w", err) } - nonce, err := l1Client.NonceAt(ctx, chainDeployer, nil) - if err != nil { - return fmt.Errorf("failed to get starting nonce: %w", err) - } - host, err := env.DefaultScriptHost( bcaster, lgr, chainDeployer, artifactsFS, + script.WithForkHook(func(forkCfg *script.ForkConfig) (forking.ForkSource, error) { + src, err := forking.RPCSourceByNumber(forkCfg.URLOrAlias, l1Rpc, *forkCfg.BlockNumber) + if err != nil { + return nil, fmt.Errorf("failed to create RPC fork source: %w", err) + } + return forking.Cache(src), nil + }), ) if err != nil { - return fmt.Errorf("failed to create script host: %w", err) + return fmt.Errorf("failed to create L1 script host: %w", err) + } + + latest, err := l1Client.HeaderByNumber(ctx, nil) + if err != nil { + return fmt.Errorf("failed to get latest block: %w", err) + } + + if _, err := host.CreateSelectFork( + script.ForkWithURLOrAlias("main"), + script.ForkWithBlockNumberU256(latest.Number), + ); err != nil { + return fmt.Errorf("failed to select fork: %w", err) } - host.SetNonce(chainDeployer, nonce) var release string if cfg.ArtifactsLocator.IsTag() { @@ -198,27 +215,7 @@ func DisputeGame(ctx context.Context, cfg DisputeGameConfig) error { release = "dev" } - // We need to etch the VM and PreimageOracle addresses so that they have nonzero code - // and the checks in the FaultDisputeGame constructor pass. - oracleAddr, err := loadOracleAddr(ctx, l1Client, cfg.Vm) - if err != nil { - return err - } - addresses := []common.Address{ - cfg.Vm, - oracleAddr, - } - for _, addr := range addresses { - code, err := l1Client.CodeAt(ctx, addr, nil) - if err != nil { - return fmt.Errorf("failed to get code for %v: %w", addr, err) - } - host.ImportAccount(addr, types.Account{ - Code: code, - }) - } lgr.Info("deploying dispute game", "release", release) - dgo, err := opcm.DeployDisputeGame( host, opcm.DeployDisputeGameInput{ @@ -254,15 +251,3 @@ func DisputeGame(ctx context.Context, cfg DisputeGameConfig) error { } return nil } - -func loadOracleAddr(ctx context.Context, l1Client *ethclient.Client, vmAddr common.Address) (common.Address, error) { - callData, err := snapshots.LoadMIPSABI().Pack("oracle") - if err != nil { - return common.Address{}, fmt.Errorf("failed to create vm.oracle() calldata: %w", err) - } - result, err := l1Client.CallContract(ctx, ethereum.CallMsg{Data: callData, To: &vmAddr}, nil) - if err != nil { - return common.Address{}, fmt.Errorf("failed to call vm.oracle(): %w", err) - } - return common.BytesToAddress(result), nil -} From b5bc9892fcf0e622bba184becd08cc226fd0d840 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 4 Dec 2024 16:15:49 -0700 Subject: [PATCH 059/111] dependabot(gomod): bump golang.org/x/sync from 0.9.0 to 0.10.0 (#13233) Bumps [golang.org/x/sync](https://github.com/golang/sync) from 0.9.0 to 0.10.0. - [Commits](https://github.com/golang/sync/compare/v0.9.0...v0.10.0) --- updated-dependencies: - dependency-name: golang.org/x/sync dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 94f571c65b6..20f71beb536 100644 --- a/go.mod +++ b/go.mod @@ -49,7 +49,7 @@ require ( github.com/urfave/cli/v2 v2.27.5 golang.org/x/crypto v0.28.0 golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c - golang.org/x/sync v0.9.0 + golang.org/x/sync v0.10.0 golang.org/x/term v0.25.0 golang.org/x/time v0.7.0 ) diff --git a/go.sum b/go.sum index 49bc0c62eb1..839780fc6f3 100644 --- a/go.sum +++ b/go.sum @@ -930,8 +930,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= -golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= From e4713361124a8f7955cd48ff313ca44eabc0a451 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 4 Dec 2024 16:16:14 -0700 Subject: [PATCH 060/111] dependabot(gomod): bump github.com/kurtosis-tech/kurtosis/api/golang (#13234) Bumps [github.com/kurtosis-tech/kurtosis/api/golang](https://github.com/kurtosis-tech/kurtosis) from 1.4.2 to 1.4.3. - [Release notes](https://github.com/kurtosis-tech/kurtosis/releases) - [Changelog](https://github.com/kurtosis-tech/kurtosis/blob/main/CHANGELOG.md) - [Commits](https://github.com/kurtosis-tech/kurtosis/compare/1.4.2...1.4.3) --- updated-dependencies: - dependency-name: github.com/kurtosis-tech/kurtosis/api/golang dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 20f71beb536..e3b57ace633 100644 --- a/go.mod +++ b/go.mod @@ -29,7 +29,7 @@ require ( github.com/ipfs/go-datastore v0.6.0 github.com/ipfs/go-ds-leveldb v0.5.0 github.com/klauspost/compress v1.17.11 - github.com/kurtosis-tech/kurtosis/api/golang v1.4.2 + github.com/kurtosis-tech/kurtosis/api/golang v1.4.3 github.com/libp2p/go-libp2p v0.36.2 github.com/libp2p/go-libp2p-mplex v0.9.0 github.com/libp2p/go-libp2p-pubsub v0.12.0 diff --git a/go.sum b/go.sum index 839780fc6f3..bcae4d056f0 100644 --- a/go.sum +++ b/go.sum @@ -442,8 +442,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kurtosis-tech/kurtosis-portal/api/golang v0.0.0-20230818182330-1a86869414d2 h1:izciXrFyFR+ihJ7nLTOkoIX5GzBPIp8gVKlw94gIc98= github.com/kurtosis-tech/kurtosis-portal/api/golang v0.0.0-20230818182330-1a86869414d2/go.mod h1:bWSMQK3WHVTGHX9CjxPAb/LtzcmfOxID2wdzakSWQxo= -github.com/kurtosis-tech/kurtosis/api/golang v1.4.2 h1:x9jpXBGuLTWuILVUZWZtgDYY9amhyhzRVHxDFlYEJB4= -github.com/kurtosis-tech/kurtosis/api/golang v1.4.2/go.mod h1:9T22P7Vv3j5g6sbm78DxHQ4s9C4Cj3s9JjFQ7DFyYpM= +github.com/kurtosis-tech/kurtosis/api/golang v1.4.3 h1:CkrfwpBAOQ9TOCUrVWSv5C7d3hLBNjU4kAYSbL6EHf0= +github.com/kurtosis-tech/kurtosis/api/golang v1.4.3/go.mod h1:9T22P7Vv3j5g6sbm78DxHQ4s9C4Cj3s9JjFQ7DFyYpM= github.com/kurtosis-tech/kurtosis/contexts-config-store v0.0.0-20230818184218-f4e3e773463b h1:hMoIM99QKcYQqsnK4AF7Lovi9ZD9ac6lZLZ5D/jx2x8= github.com/kurtosis-tech/kurtosis/contexts-config-store v0.0.0-20230818184218-f4e3e773463b/go.mod h1:4pFdrRwDz5R+Fov2ZuTaPhAVgjA2jhGh1Izf832sX7A= github.com/kurtosis-tech/kurtosis/grpc-file-transfer/golang v0.0.0-20230803130419-099ee7a4e3dc h1:7IlEpSehmWcNXOFpNP24Cu5HQI3af7GCBQw//m+LnvQ= From e253b19db381db203865fb2f76aa454ab014d50c Mon Sep 17 00:00:00 2001 From: smartcontracts Date: Wed, 4 Dec 2024 18:18:31 -0500 Subject: [PATCH 061/111] maint: rewrite spacers check to use new framework (#13232) Updates the spacers check to use the new framework for contracts checks written in Go. Adds tests for the functions that this check uses. --- .../scripts/checks/spacers/main.go | 184 +++++++----------- .../scripts/checks/spacers/main_test.go | 167 ++++++++++++++++ 2 files changed, 233 insertions(+), 118 deletions(-) create mode 100644 packages/contracts-bedrock/scripts/checks/spacers/main_test.go diff --git a/packages/contracts-bedrock/scripts/checks/spacers/main.go b/packages/contracts-bedrock/scripts/checks/spacers/main.go index 3360bda74d3..daf617defb9 100644 --- a/packages/contracts-bedrock/scripts/checks/spacers/main.go +++ b/packages/contracts-bedrock/scripts/checks/spacers/main.go @@ -1,175 +1,123 @@ package main import ( - "encoding/json" "fmt" "os" - "path/filepath" "regexp" "strconv" "strings" -) - -// directoryPath is the path to the artifacts directory. -// It can be configured as the first argument to the script or -// defaults to the forge-artifacts directory. -var directoryPath string - -func init() { - if len(os.Args) > 1 { - directoryPath = os.Args[1] - } else { - currentDir, _ := os.Getwd() - directoryPath = filepath.Join(currentDir, "forge-artifacts") - } -} - -// skipped returns true if the contract should be skipped when inspecting its storage layout. -func skipped(contractName string) bool { - return strings.Contains(contractName, "CrossDomainMessengerLegacySpacer") -} - -// variableInfo represents the parsed variable information. -type variableInfo struct { - name string - slot int - offset int - length int -} -// parseVariableInfo parses out variable info from the variable structure in standard compiler json output. -func parseVariableInfo(variable map[string]interface{}) (variableInfo, error) { - var info variableInfo - var err error + "github.com/ethereum-optimism/optimism/op-chain-ops/solc" + "github.com/ethereum-optimism/optimism/packages/contracts-bedrock/scripts/checks/common" +) - info.name = variable["label"].(string) - info.slot, err = strconv.Atoi(variable["slot"].(string)) - if err != nil { - return info, err +func parseVariableLength(variableType string, types map[string]solc.StorageLayoutType) (int, error) { + if t, exists := types[variableType]; exists { + return int(t.NumberOfBytes), nil } - info.offset = int(variable["offset"].(float64)) - variableType := variable["type"].(string) if strings.HasPrefix(variableType, "t_mapping") { - info.length = 32 + return 32, nil } else if strings.HasPrefix(variableType, "t_uint") { re := regexp.MustCompile(`uint(\d+)`) matches := re.FindStringSubmatch(variableType) if len(matches) > 1 { bitSize, _ := strconv.Atoi(matches[1]) - info.length = bitSize / 8 + return bitSize / 8, nil } } else if strings.HasPrefix(variableType, "t_bytes_") { - info.length = 32 + return 32, nil } else if strings.HasPrefix(variableType, "t_bytes") { re := regexp.MustCompile(`bytes(\d+)`) matches := re.FindStringSubmatch(variableType) if len(matches) > 1 { - info.length, _ = strconv.Atoi(matches[1]) + return strconv.Atoi(matches[1]) } } else if strings.HasPrefix(variableType, "t_address") { - info.length = 20 + return 20, nil } else if strings.HasPrefix(variableType, "t_bool") { - info.length = 1 + return 1, nil } else if strings.HasPrefix(variableType, "t_array") { re := regexp.MustCompile(`^t_array\((\w+)\)(\d+)`) matches := re.FindStringSubmatch(variableType) if len(matches) > 2 { innerType := matches[1] size, _ := strconv.Atoi(matches[2]) - innerInfo, err := parseVariableInfo(map[string]interface{}{ - "label": variable["label"], - "offset": variable["offset"], - "slot": variable["slot"], - "type": innerType, - }) + length, err := parseVariableLength(innerType, types) if err != nil { - return info, err + return 0, err } - info.length = innerInfo.length * size + return length * size, nil } - } else { - return info, fmt.Errorf("%s: unsupported type %s, add it to the script", info.name, variableType) } - return info, nil + return 0, fmt.Errorf("unsupported type %s, add it to the script", variableType) } -func main() { - err := filepath.Walk(directoryPath, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - - if info.IsDir() || strings.Contains(path, "t.sol") { - return nil - } - - raw, err := os.ReadFile(path) - if err != nil { - return err - } - - var artifact map[string]interface{} - err = json.Unmarshal(raw, &artifact) - if err != nil { - return err - } - - storageLayout, ok := artifact["storageLayout"].(map[string]interface{}) - if !ok { - return nil - } +func validateSpacer(variable solc.StorageLayoutEntry, types map[string]solc.StorageLayoutType) []error { + var errors []error - storage, ok := storageLayout["storage"].([]interface{}) - if !ok { - return nil - } + parts := strings.Split(variable.Label, "_") + if len(parts) != 4 { + return []error{fmt.Errorf("invalid spacer name format: %s", variable.Label)} + } - for _, v := range storage { - variable := v.(map[string]interface{}) - fqn := variable["contract"].(string) + expectedSlot, _ := strconv.Atoi(parts[1]) + expectedOffset, _ := strconv.Atoi(parts[2]) + expectedLength, _ := strconv.Atoi(parts[3]) - if skipped(fqn) { - continue - } + actualLength, err := parseVariableLength(variable.Type, types) + if err != nil { + return []error{err} + } - label := variable["label"].(string) - if strings.HasPrefix(label, "spacer_") { - parts := strings.Split(label, "_") - if len(parts) != 4 { - return fmt.Errorf("invalid spacer name format: %s", label) - } + if int(variable.Slot) != expectedSlot { + errors = append(errors, fmt.Errorf("%s %s is in slot %d but should be in %d", + variable.Contract, variable.Label, variable.Slot, expectedSlot)) + } - slot, _ := strconv.Atoi(parts[1]) - offset, _ := strconv.Atoi(parts[2]) - length, _ := strconv.Atoi(parts[3]) + if int(variable.Offset) != expectedOffset { + errors = append(errors, fmt.Errorf("%s %s is at offset %d but should be at %d", + variable.Contract, variable.Label, variable.Offset, expectedOffset)) + } - variableInfo, err := parseVariableInfo(variable) - if err != nil { - return err - } + if actualLength != expectedLength { + errors = append(errors, fmt.Errorf("%s %s is %d bytes long but should be %d", + variable.Contract, variable.Label, actualLength, expectedLength)) + } - if slot != variableInfo.slot { - return fmt.Errorf("%s %s is in slot %d but should be in %d", fqn, label, variableInfo.slot, slot) - } + return errors +} - if offset != variableInfo.offset { - return fmt.Errorf("%s %s is at offset %d but should be at %d", fqn, label, variableInfo.offset, offset) - } +func processFile(path string) []error { + artifact, err := common.ReadForgeArtifact(path) + if err != nil { + return []error{err} + } - if length != variableInfo.length { - return fmt.Errorf("%s %s is %d bytes long but should be %d", fqn, label, variableInfo.length, length) - } + if artifact.StorageLayout == nil { + return nil + } - fmt.Printf("%s.%s is valid\n", fqn, label) + var errors []error + for _, variable := range artifact.StorageLayout.Storage { + if strings.HasPrefix(variable.Label, "spacer_") { + if errs := validateSpacer(variable, artifact.StorageLayout.Types); len(errs) > 0 { + errors = append(errors, errs...) + continue } } + } - return nil - }) + return errors +} - if err != nil { +func main() { + if err := common.ProcessFilesGlob( + []string{"forge-artifacts/**/*.json"}, + []string{"forge-artifacts/**/CrossDomainMessengerLegacySpacer{0,1}.json"}, + processFile, + ); err != nil { fmt.Printf("Error: %v\n", err) os.Exit(1) } diff --git a/packages/contracts-bedrock/scripts/checks/spacers/main_test.go b/packages/contracts-bedrock/scripts/checks/spacers/main_test.go new file mode 100644 index 00000000000..7548fa80d03 --- /dev/null +++ b/packages/contracts-bedrock/scripts/checks/spacers/main_test.go @@ -0,0 +1,167 @@ +package main + +import ( + "testing" + + "github.com/ethereum-optimism/optimism/op-chain-ops/solc" + "github.com/stretchr/testify/require" +) + +func Test_parseVariableLength(t *testing.T) { + tests := []struct { + name string + variableType string + types map[string]solc.StorageLayoutType + expected int + expectError bool + }{ + { + name: "uses type from map", + variableType: "t_custom", + types: map[string]solc.StorageLayoutType{ + "t_custom": {NumberOfBytes: 16}, + }, + expected: 16, + }, + { + name: "mapping type", + variableType: "t_mapping(address,uint256)", + expected: 32, + }, + { + name: "uint type", + variableType: "t_uint256", + expected: 32, + }, + { + name: "bytes_ type", + variableType: "t_bytes_storage", + expected: 32, + }, + { + name: "bytes type", + variableType: "t_bytes32", + expected: 32, + }, + { + name: "address type", + variableType: "t_address", + expected: 20, + }, + { + name: "bool type", + variableType: "t_bool", + expected: 1, + }, + { + name: "array type", + variableType: "t_array(t_uint256)2", + expected: 64, // 2 * 32 + }, + { + name: "unsupported type", + variableType: "t_unknown", + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + length, err := parseVariableLength(tt.variableType, tt.types) + if tt.expectError { + require.Error(t, err) + } else { + require.NoError(t, err) + require.Equal(t, tt.expected, length) + } + }) + } +} + +func Test_validateSpacer(t *testing.T) { + tests := []struct { + name string + variable solc.StorageLayoutEntry + types map[string]solc.StorageLayoutType + expectedErrs int + errorContains string + }{ + { + name: "valid spacer", + variable: solc.StorageLayoutEntry{ + Contract: "TestContract", + Label: "spacer_1_2_32", + Slot: 1, + Offset: 2, + Type: "t_uint256", + }, + types: map[string]solc.StorageLayoutType{ + "t_uint256": {NumberOfBytes: 32}, + }, + expectedErrs: 0, + }, + { + name: "invalid name format", + variable: solc.StorageLayoutEntry{ + Label: "spacer_invalid", + }, + expectedErrs: 1, + errorContains: "invalid spacer name format", + }, + { + name: "wrong slot", + variable: solc.StorageLayoutEntry{ + Contract: "TestContract", + Label: "spacer_1_2_32", + Slot: 2, + Offset: 2, + Type: "t_uint256", + }, + types: map[string]solc.StorageLayoutType{ + "t_uint256": {NumberOfBytes: 32}, + }, + expectedErrs: 1, + errorContains: "is in slot", + }, + { + name: "wrong offset", + variable: solc.StorageLayoutEntry{ + Contract: "TestContract", + Label: "spacer_1_2_32", + Slot: 1, + Offset: 3, + Type: "t_uint256", + }, + types: map[string]solc.StorageLayoutType{ + "t_uint256": {NumberOfBytes: 32}, + }, + expectedErrs: 1, + errorContains: "is at offset", + }, + { + name: "wrong length", + variable: solc.StorageLayoutEntry{ + Contract: "TestContract", + Label: "spacer_1_2_32", + Slot: 1, + Offset: 2, + Type: "t_uint128", + }, + types: map[string]solc.StorageLayoutType{ + "t_uint128": {NumberOfBytes: 16}, + }, + expectedErrs: 1, + errorContains: "bytes long", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + errors := validateSpacer(tt.variable, tt.types) + require.Len(t, errors, tt.expectedErrs) + if tt.errorContains != "" { + require.Contains(t, errors[0].Error(), tt.errorContains) + } + }) + } +} From a5556262b87b6dbee3d59e70b4d172bfc775f395 Mon Sep 17 00:00:00 2001 From: smartcontracts Date: Wed, 4 Dec 2024 18:19:53 -0500 Subject: [PATCH 062/111] fix: develop-forge-coverage ci job name (#13241) Fixes the name for scheduled-forge-coverage to develop-forge-coverage in line with the existing pattern. --- .circleci/config.yml | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index a646df4c88e..01b508b8ecf 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1573,6 +1573,17 @@ workflows: jobs: - publish-contract-artifacts + develop-forge-coverage: + when: + and: + - or: + - equal: ["develop", <>] + - equal: [true, <>] + - not: + equal: [scheduled_pipeline, << pipeline.trigger_source >>] + jobs: + - contracts-bedrock-coverage + develop-fault-proofs: when: and: @@ -1639,17 +1650,6 @@ workflows: parameters: mips_word_size: [32, 64] - scheduled-forge-coverage: - when: - and: - - or: - - equal: ["develop", <>] - - equal: [true, <>] - - not: - equal: [scheduled_pipeline, << pipeline.trigger_source >>] - jobs: - - contracts-bedrock-coverage - scheduled-docker-publish: when: or: From d3fbc5761f86157dace84c3f6af48181b12fe36c Mon Sep 17 00:00:00 2001 From: Matthew Slipper Date: Wed, 4 Dec 2024 16:21:28 -0700 Subject: [PATCH 063/111] op-deployer: Update OPCM bootstrap command (#13238) * op-deployer: Update OPCM bootstrap command Updates the OPCM bootstrap command to allow bootstrapping Holocene. * fix merge artifact --- op-deployer/justfile | 10 +- .../pkg/deployer/bootstrap/asterisc.go | 25 +- op-deployer/pkg/deployer/bootstrap/flags.go | 24 +- op-deployer/pkg/deployer/bootstrap/opcm.go | 231 ++++++++---------- .../pkg/deployer/bootstrap/opcm_test.go | 82 +++++++ op-deployer/pkg/deployer/opcm/opcm.go | 56 +++++ op-deployer/pkg/deployer/opcm/script.go | 48 ++++ .../standard/standard-versions-sepolia.toml | 21 +- op-deployer/pkg/deployer/standard/standard.go | 48 ++++ op-deployer/pkg/env/host.go | 50 ++++ op-e2e/e2eutils/retryproxy/proxy.go | 16 ++ op-service/testutils/anvil/anvil.go | 6 +- 12 files changed, 460 insertions(+), 157 deletions(-) create mode 100644 op-deployer/pkg/deployer/bootstrap/opcm_test.go create mode 100644 op-deployer/pkg/deployer/opcm/opcm.go create mode 100644 op-deployer/pkg/deployer/opcm/script.go diff --git a/op-deployer/justfile b/op-deployer/justfile index df5d2870066..740aa7c2ceb 100644 --- a/op-deployer/justfile +++ b/op-deployer/justfile @@ -1,2 +1,10 @@ build: - go build -o bin/op-deployer cmd/op-deployer/main.go \ No newline at end of file + go build -o bin/op-deployer cmd/op-deployer/main.go + +download-artifacts checksum outfile: + curl -o {{outfile}} -L https://storage.googleapis.com/oplabs-contract-artifacts/artifacts-v1-{{checksum}}.tar.gz + +calculate-artifacts-hash checksum: + just download-artifacts {{checksum}} /tmp/artifact.tgz + sha256sum /tmp/artifact.tgz + rm /tmp/artifact.tgz \ No newline at end of file diff --git a/op-deployer/pkg/deployer/bootstrap/asterisc.go b/op-deployer/pkg/deployer/bootstrap/asterisc.go index b091f8c4d6f..e7cd4e6036c 100644 --- a/op-deployer/pkg/deployer/bootstrap/asterisc.go +++ b/op-deployer/pkg/deployer/bootstrap/asterisc.go @@ -6,8 +6,6 @@ import ( "fmt" "strings" - "github.com/ethereum-optimism/optimism/op-chain-ops/script" - "github.com/ethereum-optimism/optimism/op-chain-ops/script/forking" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/artifacts" "github.com/ethereum/go-ethereum/common" @@ -145,35 +143,18 @@ func Asterisc(ctx context.Context, cfg AsteriscConfig) error { return fmt.Errorf("failed to connect to L1 RPC: %w", err) } - l1Host, err := env.DefaultScriptHost( + l1Host, err := env.DefaultForkedScriptHost( + ctx, bcaster, lgr, chainDeployer, artifactsFS, - script.WithForkHook(func(cfg *script.ForkConfig) (forking.ForkSource, error) { - src, err := forking.RPCSourceByNumber(cfg.URLOrAlias, l1RPC, *cfg.BlockNumber) - if err != nil { - return nil, fmt.Errorf("failed to create RPC fork source: %w", err) - } - return forking.Cache(src), nil - }), + l1RPC, ) if err != nil { return fmt.Errorf("failed to create script host: %w", err) } - latest, err := l1Client.HeaderByNumber(ctx, nil) - if err != nil { - return fmt.Errorf("failed to get latest block: %w", err) - } - - if _, err := l1Host.CreateSelectFork( - script.ForkWithURLOrAlias("main"), - script.ForkWithBlockNumberU256(latest.Number), - ); err != nil { - return fmt.Errorf("failed to select fork: %w", err) - } - dgo, err := opcm.DeployAsterisc( l1Host, opcm.DeployAsteriscInput{ diff --git a/op-deployer/pkg/deployer/bootstrap/flags.go b/op-deployer/pkg/deployer/bootstrap/flags.go index 58fb7fc1436..1832a3122ed 100644 --- a/op-deployer/pkg/deployer/bootstrap/flags.go +++ b/op-deployer/pkg/deployer/bootstrap/flags.go @@ -1,6 +1,8 @@ package bootstrap import ( + "errors" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/standard" "github.com/ethereum-optimism/optimism/op-service/cliapp" @@ -30,6 +32,7 @@ const ( ProposerFlagName = "proposer" ChallengerFlagName = "challenger" PreimageOracleFlagName = "preimage-oracle" + ReleaseFlagName = "release" ) var ( @@ -153,18 +156,26 @@ var ( EnvVars: deployer.PrefixEnvVar("PREIMAGE_ORACLE"), Value: common.Address{}.Hex(), } + ReleaseFlag = &cli.StringFlag{ + Name: ReleaseFlagName, + Usage: "Release to deploy.", + EnvVars: deployer.PrefixEnvVar("RELEASE"), + } ) var OPCMFlags = []cli.Flag{ deployer.L1RPCURLFlag, deployer.PrivateKeyFlag, - ArtifactsLocatorFlag, + ReleaseFlag, +} + +var ImplementationsFlags = []cli.Flag{ + MIPSVersionFlag, WithdrawalDelaySecondsFlag, MinProposalSizeBytesFlag, ChallengePeriodSecondsFlag, ProofMaturityDelaySecondsFlag, DisputeGameFinalityDelaySecondsFlag, - MIPSVersionFlag, } var DelayedWETHFlags = []cli.Flag{ @@ -212,6 +223,15 @@ var Commands = []*cli.Command{ Flags: cliapp.ProtectFlags(OPCMFlags), Action: OPCMCLI, }, + { + Name: "implementations", + Usage: "Bootstraps implementations.", + Flags: cliapp.ProtectFlags(ImplementationsFlags), + Action: func(context *cli.Context) error { + return errors.New("not implemented yet") + }, + Hidden: true, + }, { Name: "delayedweth", Usage: "Bootstrap an instance of DelayedWETH.", diff --git a/op-deployer/pkg/deployer/bootstrap/opcm.go b/op-deployer/pkg/deployer/bootstrap/opcm.go index 89a8c3df512..5d42d1bfa95 100644 --- a/op-deployer/pkg/deployer/bootstrap/opcm.go +++ b/op-deployer/pkg/deployer/bootstrap/opcm.go @@ -3,12 +3,12 @@ package bootstrap import ( "context" "crypto/ecdsa" - "crypto/rand" "fmt" - "math/big" "strings" - artifacts2 "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/artifacts" + "github.com/ethereum/go-ethereum/rpc" + + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/artifacts" "github.com/ethereum-optimism/optimism/op-deployer/pkg/env" @@ -18,7 +18,6 @@ import ( "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/opcm" - "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/pipeline" opcrypto "github.com/ethereum-optimism/optimism/op-service/crypto" "github.com/ethereum-optimism/optimism/op-service/ctxinterrupt" @@ -26,7 +25,6 @@ import ( "github.com/ethereum-optimism/optimism/op-service/jsonutil" oplog "github.com/ethereum-optimism/optimism/op-service/log" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/log" @@ -34,12 +32,10 @@ import ( ) type OPCMConfig struct { - pipeline.SuperchainProofParams - - L1RPCUrl string - PrivateKey string - Logger log.Logger - ArtifactsLocator *artifacts2.Locator + L1RPCUrl string + PrivateKey string + Release string + Logger log.Logger privateKeyECDSA *ecdsa.PrivateKey } @@ -53,6 +49,10 @@ func (c *OPCMConfig) Check() error { return fmt.Errorf("private key must be specified") } + if c.Release == "" { + return fmt.Errorf("release must be specified") + } + privECDSA, err := crypto.HexToECDSA(strings.TrimPrefix(c.PrivateKey, "0x")) if err != nil { return fmt.Errorf("failed to parse private key: %w", err) @@ -63,34 +63,6 @@ func (c *OPCMConfig) Check() error { return fmt.Errorf("logger must be specified") } - if c.ArtifactsLocator == nil { - return fmt.Errorf("artifacts locator must be specified") - } - - if c.WithdrawalDelaySeconds == 0 { - c.WithdrawalDelaySeconds = standard.WithdrawalDelaySeconds - } - - if c.MinProposalSizeBytes == 0 { - c.MinProposalSizeBytes = standard.MinProposalSizeBytes - } - - if c.ChallengePeriodSeconds == 0 { - c.ChallengePeriodSeconds = standard.ChallengePeriodSeconds - } - - if c.ProofMaturityDelaySeconds == 0 { - c.ProofMaturityDelaySeconds = standard.ProofMaturityDelaySeconds - } - - if c.DisputeGameFinalityDelaySeconds == 0 { - c.DisputeGameFinalityDelaySeconds = standard.DisputeGameFinalityDelaySeconds - } - - if c.MIPSVersion == 0 { - c.MIPSVersion = standard.MIPSVersion - } - return nil } @@ -101,33 +73,30 @@ func OPCMCLI(cliCtx *cli.Context) error { l1RPCUrl := cliCtx.String(deployer.L1RPCURLFlagName) privateKey := cliCtx.String(deployer.PrivateKeyFlagName) - artifactsURLStr := cliCtx.String(ArtifactsLocatorFlagName) - artifactsLocator := new(artifacts2.Locator) - if err := artifactsLocator.UnmarshalText([]byte(artifactsURLStr)); err != nil { - return fmt.Errorf("failed to parse artifacts URL: %w", err) - } + release := cliCtx.String(ReleaseFlagName) ctx := ctxinterrupt.WithCancelOnInterrupt(cliCtx.Context) - return OPCM(ctx, OPCMConfig{ - L1RPCUrl: l1RPCUrl, - PrivateKey: privateKey, - Logger: l, - ArtifactsLocator: artifactsLocator, - SuperchainProofParams: pipeline.SuperchainProofParams{ - WithdrawalDelaySeconds: cliCtx.Uint64(WithdrawalDelaySecondsFlagName), - MinProposalSizeBytes: cliCtx.Uint64(MinProposalSizeBytesFlagName), - ChallengePeriodSeconds: cliCtx.Uint64(ChallengePeriodSecondsFlagName), - ProofMaturityDelaySeconds: cliCtx.Uint64(ProofMaturityDelaySecondsFlagName), - DisputeGameFinalityDelaySeconds: cliCtx.Uint64(DisputeGameFinalityDelaySecondsFlagName), - MIPSVersion: cliCtx.Uint64(MIPSVersionFlagName), - }, + out, err := OPCM(ctx, OPCMConfig{ + L1RPCUrl: l1RPCUrl, + PrivateKey: privateKey, + Release: release, + Logger: l, }) + if err != nil { + return fmt.Errorf("failed to deploy OPCM: %w", err) + } + + if err := jsonutil.WriteJSON(out, ioutil.ToStdOut()); err != nil { + return fmt.Errorf("failed to write output: %w", err) + } + return nil } -func OPCM(ctx context.Context, cfg OPCMConfig) error { +func OPCM(ctx context.Context, cfg OPCMConfig) (opcm.DeployOPCMOutput, error) { + var out opcm.DeployOPCMOutput if err := cfg.Check(); err != nil { - return fmt.Errorf("invalid config for OPCM: %w", err) + return out, fmt.Errorf("invalid config for OPCM: %w", err) } lgr := cfg.Logger @@ -135,35 +104,33 @@ func OPCM(ctx context.Context, cfg OPCMConfig) error { lgr.Info("artifacts download progress", "current", curr, "total", total) } - artifactsFS, cleanup, err := artifacts2.Download(ctx, cfg.ArtifactsLocator, progressor) + l1RPC, err := rpc.Dial(cfg.L1RPCUrl) if err != nil { - return fmt.Errorf("failed to download artifacts: %w", err) + return out, fmt.Errorf("failed to connect to L1 RPC: %w", err) } - defer func() { - if err := cleanup(); err != nil { - lgr.Warn("failed to clean up artifacts", "err", err) - } - }() - l1Client, err := ethclient.Dial(cfg.L1RPCUrl) - if err != nil { - return fmt.Errorf("failed to connect to L1 RPC: %w", err) - } + l1Client := ethclient.NewClient(l1RPC) chainID, err := l1Client.ChainID(ctx) if err != nil { - return fmt.Errorf("failed to get chain ID: %w", err) + return out, fmt.Errorf("failed to get chain ID: %w", err) } chainIDU64 := chainID.Uint64() - superCfg, err := standard.SuperchainFor(chainIDU64) + loc, err := artifacts.NewLocatorFromTag(cfg.Release) if err != nil { - return fmt.Errorf("error getting superchain config: %w", err) + return out, fmt.Errorf("failed to create artifacts locator: %w", err) } - standardVersionsTOML, err := standard.L1VersionsDataFor(chainIDU64) + + artifactsFS, cleanup, err := artifacts.Download(ctx, loc, progressor) if err != nil { - return fmt.Errorf("error getting standard versions TOML: %w", err) + return out, fmt.Errorf("failed to download artifacts: %w", err) } + defer func() { + if err := cleanup(); err != nil { + lgr.Warn("failed to clean up artifacts", "err", err) + } + }() signer := opcrypto.SignerFnFromBind(opcrypto.PrivateKeySignerFn(cfg.privateKeyECDSA, chainID)) chainDeployer := crypto.PubkeyToAddress(cfg.privateKeyECDSA.PublicKey) @@ -176,83 +143,87 @@ func OPCM(ctx context.Context, cfg OPCMConfig) error { From: chainDeployer, }) if err != nil { - return fmt.Errorf("failed to create broadcaster: %w", err) + return out, fmt.Errorf("failed to create broadcaster: %w", err) } - nonce, err := l1Client.NonceAt(ctx, chainDeployer, nil) - if err != nil { - return fmt.Errorf("failed to get starting nonce: %w", err) - } - - host, err := env.DefaultScriptHost( + host, err := env.DefaultForkedScriptHost( + ctx, bcaster, lgr, chainDeployer, artifactsFS, + l1RPC, ) if err != nil { - return fmt.Errorf("failed to create script host: %w", err) - } - host.SetNonce(chainDeployer, nonce) - - var l1ContractsRelease string - if cfg.ArtifactsLocator.IsTag() { - l1ContractsRelease = cfg.ArtifactsLocator.Tag - } else { - l1ContractsRelease = "dev" + return out, fmt.Errorf("failed to create script host: %w", err) } - lgr.Info("deploying OPCM", "l1ContractsRelease", l1ContractsRelease) + lgr.Info("deploying OPCM", "l1ContractsRelease", cfg.Release) - // We need to etch the Superchain addresses so that they have nonzero code - // and the checks in the OPCM constructor pass. - superchainConfigAddr := common.Address(*superCfg.Config.SuperchainConfigAddr) - protocolVersionsAddr := common.Address(*superCfg.Config.ProtocolVersionsAddr) - addresses := []common.Address{ - superchainConfigAddr, - protocolVersionsAddr, - } - for _, addr := range addresses { - host.ImportAccount(addr, types.Account{ - Code: []byte{0x00}, - }) - } - - var salt common.Hash - _, err = rand.Read(salt[:]) + input, err := DeployOPCMInputForChain(cfg.Release, chainIDU64) if err != nil { - return fmt.Errorf("failed to generate CREATE2 salt: %w", err) + return out, fmt.Errorf("error creating OPCM input: %w", err) } - dio, err := opcm.DeployImplementations( + out, err = opcm.DeployOPCM( host, - opcm.DeployImplementationsInput{ - Salt: salt, - WithdrawalDelaySeconds: new(big.Int).SetUint64(cfg.WithdrawalDelaySeconds), - MinProposalSizeBytes: new(big.Int).SetUint64(cfg.MinProposalSizeBytes), - ChallengePeriodSeconds: new(big.Int).SetUint64(cfg.ChallengePeriodSeconds), - ProofMaturityDelaySeconds: new(big.Int).SetUint64(cfg.ProofMaturityDelaySeconds), - DisputeGameFinalityDelaySeconds: new(big.Int).SetUint64(cfg.DisputeGameFinalityDelaySeconds), - MipsVersion: new(big.Int).SetUint64(cfg.MIPSVersion), - L1ContractsRelease: l1ContractsRelease, - SuperchainConfigProxy: superchainConfigAddr, - ProtocolVersionsProxy: protocolVersionsAddr, - StandardVersionsToml: standardVersionsTOML, - UseInterop: false, - }, + input, ) if err != nil { - return fmt.Errorf("error deploying implementations: %w", err) + return out, fmt.Errorf("error deploying implementations: %w", err) } if _, err := bcaster.Broadcast(ctx); err != nil { - return fmt.Errorf("failed to broadcast: %w", err) + return out, fmt.Errorf("failed to broadcast: %w", err) } - lgr.Info("deployed implementations") + lgr.Info("deployed OPCM") - if err := jsonutil.WriteJSON(dio, ioutil.ToStdOut()); err != nil { - return fmt.Errorf("failed to write output: %w", err) + return out, nil +} + +func DeployOPCMInputForChain(release string, chainID uint64) (opcm.DeployOPCMInput, error) { + superchain, err := standard.SuperchainFor(chainID) + if err != nil { + return opcm.DeployOPCMInput{}, fmt.Errorf("error getting superchain config: %w", err) } - return nil + + l1VersionsData, err := standard.L1VersionsFor(chainID) + if err != nil { + return opcm.DeployOPCMInput{}, fmt.Errorf("error getting L1 versions: %w", err) + } + releases, ok := l1VersionsData.Releases[release] + if !ok { + return opcm.DeployOPCMInput{}, fmt.Errorf("release not found: %s", release) + } + + blueprints, err := standard.OPCMBlueprintsFor(chainID) + if err != nil { + return opcm.DeployOPCMInput{}, fmt.Errorf("error getting OPCM blueprints: %w", err) + } + + return opcm.DeployOPCMInput{ + SuperchainConfig: common.Address(*superchain.Config.SuperchainConfigAddr), + ProtocolVersions: common.Address(*superchain.Config.ProtocolVersionsAddr), + L1ContractsRelease: strings.TrimPrefix(release, "op-contracts/"), + + AddressManagerBlueprint: blueprints.AddressManager, + ProxyBlueprint: blueprints.Proxy, + ProxyAdminBlueprint: blueprints.ProxyAdmin, + L1ChugSplashProxyBlueprint: blueprints.L1ChugSplashProxy, + ResolvedDelegateProxyBlueprint: blueprints.ResolvedDelegateProxy, + AnchorStateRegistryBlueprint: blueprints.AnchorStateRegistry, + PermissionedDisputeGame1Blueprint: blueprints.PermissionedDisputeGame1, + PermissionedDisputeGame2Blueprint: blueprints.PermissionedDisputeGame2, + + L1ERC721BridgeImpl: releases.L1ERC721Bridge.ImplementationAddress, + OptimismPortalImpl: releases.OptimismPortal.ImplementationAddress, + SystemConfigImpl: releases.SystemConfig.ImplementationAddress, + OptimismMintableERC20FactoryImpl: releases.OptimismMintableERC20Factory.ImplementationAddress, + L1CrossDomainMessengerImpl: releases.L1CrossDomainMessenger.ImplementationAddress, + L1StandardBridgeImpl: releases.L1StandardBridge.ImplementationAddress, + DisputeGameFactoryImpl: releases.DisputeGameFactory.ImplementationAddress, + DelayedWETHImpl: releases.DelayedWETH.ImplementationAddress, + MipsImpl: releases.MIPS.Address, + }, nil } diff --git a/op-deployer/pkg/deployer/bootstrap/opcm_test.go b/op-deployer/pkg/deployer/bootstrap/opcm_test.go new file mode 100644 index 00000000000..5a4d1e8de3d --- /dev/null +++ b/op-deployer/pkg/deployer/bootstrap/opcm_test.go @@ -0,0 +1,82 @@ +package bootstrap + +import ( + "context" + "log/slog" + "os" + "strings" + "testing" + "time" + + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/retryproxy" + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/ethereum-optimism/optimism/op-service/testutils/anvil" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/stretchr/testify/require" +) + +var networks = []string{"mainnet", "sepolia"} + +var versions = []string{"v1.8.0-rc.3"} + +func TestOPCMLiveChain(t *testing.T) { + for _, network := range networks { + for _, version := range versions { + t.Run(network+"-"+version, func(t *testing.T) { + if version == "v1.8.0-rc.3" && network == "mainnet" { + t.Skip("v1.8.0-rc.3 not supported on mainnet yet") + } + + envVar := strings.ToUpper(network) + "_RPC_URL" + rpcURL := os.Getenv(envVar) + require.NotEmpty(t, rpcURL, "must specify RPC url via %s env var", envVar) + testOPCMLiveChain(t, "op-contracts/"+version, rpcURL) + }) + } + } +} + +func testOPCMLiveChain(t *testing.T, version string, forkRPCURL string) { + t.Parallel() + + if forkRPCURL == "" { + t.Skip("forkRPCURL not set") + } + + lgr := testlog.Logger(t, slog.LevelDebug) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + retryProxy := retryproxy.New(lgr, forkRPCURL) + require.NoError(t, retryProxy.Start()) + t.Cleanup(func() { + require.NoError(t, retryProxy.Stop()) + }) + + runner, err := anvil.New( + retryProxy.Endpoint(), + lgr, + ) + require.NoError(t, err) + + require.NoError(t, runner.Start(ctx)) + t.Cleanup(func() { + require.NoError(t, runner.Stop()) + }) + + out, err := OPCM(ctx, OPCMConfig{ + L1RPCUrl: runner.RPCUrl(), + PrivateKey: "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80", + Release: version, + Logger: lgr, + }) + require.NoError(t, err) + require.NotEmpty(t, out.Opcm) + + client, err := ethclient.Dial(runner.RPCUrl()) + require.NoError(t, err) + code, err := client.CodeAt(ctx, out.Opcm, nil) + require.NoError(t, err) + require.NotEmpty(t, code) +} diff --git a/op-deployer/pkg/deployer/opcm/opcm.go b/op-deployer/pkg/deployer/opcm/opcm.go new file mode 100644 index 00000000000..9de3348be28 --- /dev/null +++ b/op-deployer/pkg/deployer/opcm/opcm.go @@ -0,0 +1,56 @@ +package opcm + +import ( + "fmt" + + "github.com/ethereum-optimism/optimism/op-chain-ops/script" + "github.com/ethereum/go-ethereum/common" +) + +type DeployOPCMInput struct { + SuperchainConfig common.Address + ProtocolVersions common.Address + L1ContractsRelease string + + AddressManagerBlueprint common.Address + ProxyBlueprint common.Address + ProxyAdminBlueprint common.Address + L1ChugSplashProxyBlueprint common.Address + ResolvedDelegateProxyBlueprint common.Address + AnchorStateRegistryBlueprint common.Address + PermissionedDisputeGame1Blueprint common.Address + PermissionedDisputeGame2Blueprint common.Address + + L1ERC721BridgeImpl common.Address + OptimismPortalImpl common.Address + SystemConfigImpl common.Address + OptimismMintableERC20FactoryImpl common.Address + L1CrossDomainMessengerImpl common.Address + L1StandardBridgeImpl common.Address + DisputeGameFactoryImpl common.Address + DelayedWETHImpl common.Address + MipsImpl common.Address +} + +type DeployOPCMOutput struct { + Opcm common.Address +} + +func DeployOPCM( + host *script.Host, + input DeployOPCMInput, +) (DeployOPCMOutput, error) { + scriptFile := "DeployOPCM.s.sol" + contractName := "DeployOPCM" + + out, err := RunBasicScript[DeployOPCMInput, DeployOPCMOutput](host, input, scriptFile, contractName) + if err != nil { + return DeployOPCMOutput{}, fmt.Errorf("failed to deploy OPCM: %w", err) + } + + if err := host.RememberOnLabel("OPContractsManager", "OPContractsManager.sol", "OPContractsManager"); err != nil { + return DeployOPCMOutput{}, fmt.Errorf("failed to link OPContractsManager label: %w", err) + } + + return out, nil +} diff --git a/op-deployer/pkg/deployer/opcm/script.go b/op-deployer/pkg/deployer/opcm/script.go new file mode 100644 index 00000000000..6a138f67cb8 --- /dev/null +++ b/op-deployer/pkg/deployer/opcm/script.go @@ -0,0 +1,48 @@ +package opcm + +import ( + "fmt" + + "github.com/ethereum-optimism/optimism/op-chain-ops/script" + "github.com/ethereum/go-ethereum/common" +) + +type BasicScriptIO struct { + Run func(input, output common.Address) error +} + +func RunBasicScript[I any, O any]( + host *script.Host, + input I, + scriptFile string, + contractName string, +) (O, error) { + var output O + inputAddr := host.NewScriptAddress() + outputAddr := host.NewScriptAddress() + + cleanupInput, err := script.WithPrecompileAtAddress[*I](host, inputAddr, &input) + if err != nil { + return output, fmt.Errorf("failed to insert input precompile: %w", err) + } + defer cleanupInput() + + cleanupOutput, err := script.WithPrecompileAtAddress[*O](host, outputAddr, &output, + script.WithFieldSetter[*O]) + if err != nil { + return output, fmt.Errorf("failed to insert output precompile: %w", err) + } + defer cleanupOutput() + + deployScript, cleanupDeploy, err := script.WithScript[BasicScriptIO](host, scriptFile, contractName) + if err != nil { + return output, fmt.Errorf("failed to load %s script: %w", scriptFile, err) + } + defer cleanupDeploy() + + if err := deployScript.Run(inputAddr, outputAddr); err != nil { + return output, fmt.Errorf("failed to run %s script: %w", scriptFile, err) + } + + return output, nil +} diff --git a/op-deployer/pkg/deployer/standard/standard-versions-sepolia.toml b/op-deployer/pkg/deployer/standard/standard-versions-sepolia.toml index 277f9d09630..25b63b4ac65 100644 --- a/op-deployer/pkg/deployer/standard/standard-versions-sepolia.toml +++ b/op-deployer/pkg/deployer/standard/standard-versions-sepolia.toml @@ -5,6 +5,25 @@ # * proxied : specify a standard "implementation_address" # * neither : specify neither a standard "address" nor "implementation_address" +# Holocene https://github.com/ethereum-optimism/optimism/releases/tag/op-contracts%2Fv1.8.0-rc.3 +[releases."op-contracts/v1.8.0-rc.3"] +# Updated in this release +system_config = { version = "2.3.0", implementation_address = "0x33b83E4C305c908B2Fc181dDa36e230213058d7d" } # UPDATED IN THIS RELEASE +fault_dispute_game = { version = "1.3.1" } # UPDATED IN THIS RELEASE +permissioned_dispute_game = { version = "1.3.1" } # UPDATED IN THIS RELEASE +mips = { version = "1.2.1", address = "0x69470D6970Cd2A006b84B1d4d70179c892cFCE01" } # UPDATED IN THIS RELEASE +# Unchanged in this release +optimism_portal = { version = "3.10.0", implementation_address = "0x35028bae87d71cbc192d545d38f960ba30b4b233" } +anchor_state_registry = { version = "2.0.0" } +delayed_weth = { version = "1.1.0", implementation_address = "0x07f69b19532476c6cd03056d6bc3f1b110ab7538" } +dispute_game_factory = { version = "1.0.0", implementation_address = "0xa51bea7e4d34206c0bcb04a776292f2f19f0beec" } +preimage_oracle = { version = "1.1.2", address = "0x92240135b46fc1142dA181f550aE8f595B858854" } +l1_cross_domain_messenger = { version = "2.3.0", implementation_address = "0xD3494713A5cfaD3F5359379DfA074E2Ac8C6Fd65" } +l1_erc721_bridge = { version = "2.1.0", implementation_address = "0xae2af01232a6c4a4d3012c5ec5b1b35059caf10d" } +l1_standard_bridge = { version = "2.1.0", implementation_address = "0x64b5a5ed26dcb17370ff4d33a8d503f0fbd06cff" } +# l2_output_oracle -- This contract not used in fault proofs +optimism_mintable_erc20_factory = { version = "1.9.0", implementation_address = "0xe01efbeb1089d1d1db9c6c8b135c934c0734c846" } + # Fault Proofs https://github.com/ethereum-optimism/optimism/releases/tag/op-contracts%2Fv1.6.0 [releases."op-contracts/v1.6.0"] optimism_portal = { version = "3.10.0", implementation_address = "0x35028bae87d71cbc192d545d38f960ba30b4b233" } @@ -20,4 +39,4 @@ l1_cross_domain_messenger = { version = "2.3.0", implementation_address = "0xD34 l1_erc721_bridge = { version = "2.1.0", implementation_address = "0xae2af01232a6c4a4d3012c5ec5b1b35059caf10d" } l1_standard_bridge = { version = "2.1.0", implementation_address = "0x64b5a5ed26dcb17370ff4d33a8d503f0fbd06cff" } # l2_output_oracle -- This contract not used in fault proofs -optimism_mintable_erc20_factory = { version = "1.9.0", implementation_address = "0xe01efbeb1089d1d1db9c6c8b135c934c0734c846" } +optimism_mintable_erc20_factory = { version = "1.9.0", implementation_address = "0xe01efbeb1089d1d1db9c6c8b135c934c0734c846" } \ No newline at end of file diff --git a/op-deployer/pkg/deployer/standard/standard.go b/op-deployer/pkg/deployer/standard/standard.go index d2fe5e5d942..72cff3cb926 100644 --- a/op-deployer/pkg/deployer/standard/standard.go +++ b/op-deployer/pkg/deployer/standard/standard.go @@ -78,6 +78,50 @@ type VersionRelease struct { var _ embed.FS +type OPCMBlueprints struct { + AddressManager common.Address + Proxy common.Address + ProxyAdmin common.Address + L1ChugSplashProxy common.Address + ResolvedDelegateProxy common.Address + AnchorStateRegistry common.Address + PermissionedDisputeGame1 common.Address + PermissionedDisputeGame2 common.Address +} + +var sepoliaBlueprints = OPCMBlueprints{ + AddressManager: common.HexToAddress("0x3125a4cB2179E04203D3Eb2b5784aaef9FD64216"), + Proxy: common.HexToAddress("0xe650ADb86a0de96e2c434D0a52E7D5B70980D6f1"), + ProxyAdmin: common.HexToAddress("0x3AC6b88F6bC4A5038DB7718dE47a5ab1a9609319"), + L1ChugSplashProxy: common.HexToAddress("0x58770FC7ed304c43D2B70248914eb34A741cF411"), + ResolvedDelegateProxy: common.HexToAddress("0x0449adB72D489a137d476aB49c6b812161754fD3"), + AnchorStateRegistry: common.HexToAddress("0xB98095199437883b7661E0D58256060f3bc730a4"), + PermissionedDisputeGame1: common.HexToAddress("0xf72Ac5f164cC024DE09a2c249441715b69a16eAb"), + PermissionedDisputeGame2: common.HexToAddress("0x713dAC5A23728477547b484f9e0D751077E300a2"), +} + +var mainnetBlueprints = OPCMBlueprints{ + AddressManager: common.HexToAddress("0x29aA24714c06914d9689e933cae2293C569AfeEa"), + Proxy: common.HexToAddress("0x3626ebD458c7f34FD98789A373593fF2fc227bA0"), + ProxyAdmin: common.HexToAddress("0x7170678A5CFFb6872606d251B3CcdB27De962631"), + L1ChugSplashProxy: common.HexToAddress("0x538906C8B000D621fd11B7e8642f504dD8730837"), + ResolvedDelegateProxy: common.HexToAddress("0xF12bD34d6a1d26d230240ECEA761f77e2013926E"), + AnchorStateRegistry: common.HexToAddress("0xbA7Be2bEE016568274a4D1E6c852Bb9a99FaAB8B"), + PermissionedDisputeGame1: common.HexToAddress("0xb94bF6130Df8BD9a9eA45D8dD8C18957002d1986"), + PermissionedDisputeGame2: common.HexToAddress("0xe0a642B249CF6cbF0fF7b4dDf41443Ea7a5C8Cc8"), +} + +func OPCMBlueprintsFor(chainID uint64) (OPCMBlueprints, error) { + switch chainID { + case 1: + return mainnetBlueprints, nil + case 11155111: + return sepoliaBlueprints, nil + default: + return OPCMBlueprints{}, fmt.Errorf("unsupported chain ID: %d", chainID) + } +} + func L1VersionsDataFor(chainID uint64) (string, error) { switch chainID { case 1: @@ -215,6 +259,8 @@ func ArtifactsURLForTag(tag string) (*url.URL, error) { return url.Parse(standardArtifactsURL("e1f0c4020618c4a98972e7124c39686cab2e31d5d7846f9ce5e0d5eed0f5ff32")) case "op-contracts/v1.7.0-beta.1+l2-contracts": return url.Parse(standardArtifactsURL("b0fb1f6f674519d637cff39a22187a5993d7f81a6d7b7be6507a0b50a5e38597")) + case "op-contracts/v1.8.0-rc.3": + return url.Parse(standardArtifactsURL("3bcff2944953862596d5fd0125d166a04af2ba6426dc693983291d3cb86b2e2e")) default: return nil, fmt.Errorf("unsupported tag: %s", tag) } @@ -226,6 +272,8 @@ func ArtifactsHashForTag(tag string) (common.Hash, error) { return common.HexToHash("d20a930cc0ff204c2d93b7aa60755ec7859ba4f328b881f5090c6a6a2a86dcba"), nil case "op-contracts/v1.7.0-beta.1+l2-contracts": return common.HexToHash("9e3ad322ec9b2775d59143ce6874892f9b04781742c603ad59165159e90b00b9"), nil + case "op-contracts/v1.8.0-rc.3": + return common.HexToHash("7c133142165fbbdba28ced5d9a04af8bea68baf58b19a07cdd8ae531b01fbe9d"), nil default: return common.Hash{}, fmt.Errorf("unsupported tag: %s", tag) } diff --git a/op-deployer/pkg/env/host.go b/op-deployer/pkg/env/host.go index 61d30b4ce25..47389f49232 100644 --- a/op-deployer/pkg/env/host.go +++ b/op-deployer/pkg/env/host.go @@ -1,8 +1,13 @@ package env import ( + "context" "fmt" + "github.com/ethereum-optimism/optimism/op-chain-ops/script/forking" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/rpc" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/broadcaster" "github.com/ethereum-optimism/optimism/op-chain-ops/foundry" @@ -39,3 +44,48 @@ func DefaultScriptHost( return h, nil } + +func DefaultForkedScriptHost( + ctx context.Context, + bcaster broadcaster.Broadcaster, + lgr log.Logger, + deployer common.Address, + artifacts foundry.StatDirFs, + forkRPC *rpc.Client, + additionalOpts ...script.HostOption, +) (*script.Host, error) { + h, err := DefaultScriptHost( + bcaster, + lgr, + deployer, + artifacts, + append([]script.HostOption{ + script.WithForkHook(func(cfg *script.ForkConfig) (forking.ForkSource, error) { + src, err := forking.RPCSourceByNumber(cfg.URLOrAlias, forkRPC, *cfg.BlockNumber) + if err != nil { + return nil, fmt.Errorf("failed to create RPC fork source: %w", err) + } + return forking.Cache(src), nil + }), + }, additionalOpts...)..., + ) + if err != nil { + return nil, fmt.Errorf("failed to create default script host: %w", err) + } + + client := ethclient.NewClient(forkRPC) + + latest, err := client.HeaderByNumber(ctx, nil) + if err != nil { + return nil, fmt.Errorf("failed to get latest block: %w", err) + } + + if _, err := h.CreateSelectFork( + script.ForkWithURLOrAlias("main"), + script.ForkWithBlockNumberU256(latest.Number), + ); err != nil { + return nil, fmt.Errorf("failed to select fork: %w", err) + } + + return h, nil +} diff --git a/op-e2e/e2eutils/retryproxy/proxy.go b/op-e2e/e2eutils/retryproxy/proxy.go index a74d5d8c869..d36b9e29d9d 100644 --- a/op-e2e/e2eutils/retryproxy/proxy.go +++ b/op-e2e/e2eutils/retryproxy/proxy.go @@ -3,6 +3,7 @@ package retryproxy import ( "bytes" "context" + "encoding/json" "fmt" "io" "net" @@ -13,6 +14,10 @@ import ( "github.com/ethereum/go-ethereum/log" ) +type jsonRPCReq struct { + Method string `json:"method"` +} + var copyHeaders = []string{ "Content-Type", } @@ -92,6 +97,8 @@ func (p *RetryProxy) Endpoint() string { } func (p *RetryProxy) ServeHTTP(w http.ResponseWriter, r *http.Request) { + start := time.Now() + if r.Method != http.MethodPost { http.Error(w, "method not allowed", http.StatusMethodNotAllowed) return @@ -138,7 +145,16 @@ func (p *RetryProxy) ServeHTTP(w http.ResponseWriter, r *http.Request) { if _, err := io.Copy(w, bytes.NewReader(resBody)); err != nil { p.lgr.Error("failed to copy response", "err", err) http.Error(w, "failed to copy response", http.StatusInternalServerError) + return } + + var jReq jsonRPCReq + if err := json.Unmarshal(reqBody, &jReq); err != nil { + p.lgr.Warn("failed to unmarshal request", "err", err) + return + } + + p.lgr.Debug("proxied request", "method", jReq.Method, "dur", time.Since(start)) } func (p *RetryProxy) doProxyReq(ctx context.Context, body []byte) (*http.Response, error) { diff --git a/op-service/testutils/anvil/anvil.go b/op-service/testutils/anvil/anvil.go index 50590a096a7..406d7d07c77 100644 --- a/op-service/testutils/anvil/anvil.go +++ b/op-service/testutils/anvil/anvil.go @@ -33,6 +33,10 @@ type Runner struct { } func New(l1RPCURL string, logger log.Logger) (*Runner, error) { + if _, err := exec.LookPath("anvil"); err != nil { + return nil, fmt.Errorf("anvil not found in PATH: %w", err) + } + proc := exec.Command( "anvil", "--fork-url", l1RPCURL, @@ -106,7 +110,7 @@ func (r *Runner) outputStream(stream io.ReadCloser) { } } - r.logger.Debug("[ANVIL] " + scanner.Text()) + r.logger.Debug("[ANVIL] " + line) } } From 139dabe54ad7c9455085df67093ddc03202c9550 Mon Sep 17 00:00:00 2001 From: Matthew Slipper Date: Wed, 4 Dec 2024 17:01:35 -0700 Subject: [PATCH 064/111] ci: Remove Kurtosis-specific go job (#13249) * ci: Remove Kurtosis-specific go job Kurtosis landed [my fix](https://github.com/kurtosis-tech/kurtosis/pull/2567) for the race condition that prevented us from running a shared Kurtosis instance between jobs. This PR brings that back. * remove dep --- .circleci/config.yml | 65 +------------------------------------------- 1 file changed, 1 insertion(+), 64 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 01b508b8ecf..54f19de4e9b 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -807,63 +807,6 @@ jobs: make lint-go working_directory: . - go-test-kurtosis: - parameters: - module: - description: Go Module Name - type: string - uses_artifacts: - description: Uses contract artifacts - type: boolean - default: false - test_directory: - description: Test directory - type: string - default: "./..." - machine: - image: <> - resource_class: xlarge - steps: - - run: - name: Install components - command: | - go version - go install gotest.tools/gotestsum@v1.11.0 - - run: - name: Install Kurtosis - command: | - echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list - sudo apt update - sudo apt install kurtosis-cli=1.4.0 - kurtosis engine start - - checkout - - when: - condition: <> - steps: - - attach_workspace: { at: "." } - - run: - name: prep results dir - command: | - # Make sure the workspace is properly owned - mkdir -p ./tmp/test-results - mkdir -p ./tmp/testlogs - - run: - name: run tests - command: | - ENABLE_KURTOSIS=true gotestsum \ - --format=testname \ - --junitfile=../tmp/test-results/<>.xml \ - --jsonfile=../tmp/testlogs/log.json \ - -- -parallel=$(nproc) \ - -coverpkg=github.com/ethereum-optimism/optimism/... \ - -coverprofile=coverage.out <> - working_directory: <> - - store_test_results: - path: tmp/test-results - - store_artifacts: - path: tmp/testlogs - when: always - go-tests: parameters: notify: @@ -919,6 +862,7 @@ jobs: formatted_packages="$formatted_packages ./$package/..." done + export ENABLE_KURTOSIS=true export OP_E2E_CANNON_ENABLED="false" export OP_E2E_SKIP_SLOW_TEST=true export OP_E2E_USE_HTTP=true @@ -1351,12 +1295,6 @@ workflows: on_changes: op-e2e,packages/contracts-bedrock/src uses_artifacts: true requires: ["contracts-bedrock-build"] - - go-test-kurtosis: - name: op-deployer-integration - module: op-deployer - test_directory: ./pkg/deployer/integration_test - uses_artifacts: true - requires: ["contracts-bedrock-build"] - go-tests: packages: | op-batcher @@ -1391,7 +1329,6 @@ workflows: - check-generated-mocks-op-node - check-generated-mocks-op-service - go-mod-download - - op-deployer-integration - op-program-compat # Not needed for the devnet but we want to make sure they build successfully - cannon-docker-build From 37d7bda6a89aa7d7822debd8c104ae1680c24054 Mon Sep 17 00:00:00 2001 From: Adrian Sutton Date: Thu, 5 Dec 2024 10:19:28 +1000 Subject: [PATCH 065/111] op-challenger: Use target platform for kona docker image. (#13251) --- ops/docker/op-stack-go/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ops/docker/op-stack-go/Dockerfile b/ops/docker/op-stack-go/Dockerfile index 9384aef5e24..b6fb3f45c90 100644 --- a/ops/docker/op-stack-go/Dockerfile +++ b/ops/docker/op-stack-go/Dockerfile @@ -144,7 +144,7 @@ COPY --from=op-node-builder /app/op-node/bin/op-node /usr/local/bin/ CMD ["op-node"] # Make the kona docker image published by upstream available as a source to copy kona and asterisc from. -FROM --platform=$BUILDPLATFORM ghcr.io/anton-rs/kona/kona-fpp-asterisc:$KONA_VERSION AS kona +FROM --platform=$TARGETPLATFORM ghcr.io/anton-rs/kona/kona-fpp-asterisc:$KONA_VERSION AS kona # Also produce an op-challenger loaded with kona and asterisc using ubuntu FROM --platform=$TARGETPLATFORM $UBUNTU_TARGET_BASE_IMAGE AS op-challenger-target From a88f63981a97972efe3a24bb72d38b8709410f15 Mon Sep 17 00:00:00 2001 From: clabby Date: Wed, 4 Dec 2024 19:53:14 -0500 Subject: [PATCH 066/111] chore(op-deployer): Accept existing impl in `DelayedWETH` bootstrap task (#13250) * chore(op-deployer): Accept existing impl in `DelayedWETH` bootstrap task * remove check --- .../pkg/deployer/bootstrap/delayed_weth.go | 9 ++- op-deployer/pkg/deployer/bootstrap/flags.go | 10 ++- op-deployer/pkg/deployer/opcm/delayed_weth.go | 2 +- .../pkg/deployer/opcm/delayed_weth_test.go | 62 ++++++++++++------- .../scripts/deploy/DeployDelayedWETH.s.sol | 45 +++----------- 5 files changed, 61 insertions(+), 67 deletions(-) diff --git a/op-deployer/pkg/deployer/bootstrap/delayed_weth.go b/op-deployer/pkg/deployer/bootstrap/delayed_weth.go index 1209043d31d..aa9eea361ac 100644 --- a/op-deployer/pkg/deployer/bootstrap/delayed_weth.go +++ b/op-deployer/pkg/deployer/bootstrap/delayed_weth.go @@ -37,6 +37,7 @@ type DelayedWETHConfig struct { PrivateKey string Logger log.Logger ArtifactsLocator *artifacts2.Locator + DelayedWethImpl common.Address privateKeyECDSA *ecdsa.PrivateKey } @@ -90,11 +91,13 @@ func NewDelayedWETHConfigFromClI(cliCtx *cli.Context, l log.Logger) (DelayedWETH if err := artifactsLocator.UnmarshalText([]byte(artifactsURLStr)); err != nil { return DelayedWETHConfig{}, fmt.Errorf("failed to parse artifacts URL: %w", err) } + delayedWethImpl := common.HexToAddress(cliCtx.String(DelayedWethImplFlagName)) config := DelayedWETHConfig{ L1RPCUrl: l1RPCUrl, PrivateKey: privateKey, Logger: l, ArtifactsLocator: artifactsLocator, + DelayedWethImpl: delayedWethImpl, } return config, nil } @@ -134,10 +137,6 @@ func DelayedWETH(ctx context.Context, cfg DelayedWETHConfig) error { if err != nil { return fmt.Errorf("error getting superchain config: %w", err) } - standardVersionsTOML, err := standard.L1VersionsDataFor(chainIDU64) - if err != nil { - return fmt.Errorf("error getting standard versions TOML: %w", err) - } proxyAdmin, err := standard.ManagerOwnerAddrFor(chainIDU64) if err != nil { return fmt.Errorf("error getting superchain proxy admin: %w", err) @@ -210,9 +209,9 @@ func DelayedWETH(ctx context.Context, cfg DelayedWETHConfig) error { host, opcm.DeployDelayedWETHInput{ Release: release, - StandardVersionsToml: standardVersionsTOML, ProxyAdmin: proxyAdmin, SuperchainConfigProxy: superchainConfigAddr, + DelayedWethImpl: cfg.DelayedWethImpl, DelayedWethOwner: delayedWethOwner, DelayedWethDelay: big.NewInt(604800), }, diff --git a/op-deployer/pkg/deployer/bootstrap/flags.go b/op-deployer/pkg/deployer/bootstrap/flags.go index 1832a3122ed..f6a130f075a 100644 --- a/op-deployer/pkg/deployer/bootstrap/flags.go +++ b/op-deployer/pkg/deployer/bootstrap/flags.go @@ -26,13 +26,14 @@ const ( SplitDepthFlagName = "split-depth" ClockExtensionFlagName = "clock-extension" MaxClockDurationFlagName = "max-clock-duration" - DelayedWethProxyFlagName = "delayed-weth-proxy" AnchorStateRegistryProxyFlagName = "anchor-state-registry-proxy" L2ChainIdFlagName = "l2-chain-id" ProposerFlagName = "proposer" ChallengerFlagName = "challenger" PreimageOracleFlagName = "preimage-oracle" ReleaseFlagName = "release" + DelayedWethProxyFlagName = "delayed-weth-proxy" + DelayedWethImplFlagName = "delayed-weth-impl" ) var ( @@ -128,6 +129,12 @@ var ( Usage: "Delayed WETH proxy.", EnvVars: deployer.PrefixEnvVar("DELAYED_WETH_PROXY"), } + DelayedWethImplFlag = &cli.StringFlag{ + Name: DelayedWethImplFlagName, + Usage: "Delayed WETH implementation.", + EnvVars: deployer.PrefixEnvVar("DELAYED_WETH_IMPL"), + Value: common.Address{}.Hex(), + } AnchorStateRegistryProxyFlag = &cli.StringFlag{ Name: AnchorStateRegistryProxyFlagName, Usage: "Anchor state registry proxy.", @@ -182,6 +189,7 @@ var DelayedWETHFlags = []cli.Flag{ deployer.L1RPCURLFlag, deployer.PrivateKeyFlag, ArtifactsLocatorFlag, + DelayedWethImplFlag, } var DisputeGameFlags = []cli.Flag{ diff --git a/op-deployer/pkg/deployer/opcm/delayed_weth.go b/op-deployer/pkg/deployer/opcm/delayed_weth.go index d94e25cb909..8a0623dd468 100644 --- a/op-deployer/pkg/deployer/opcm/delayed_weth.go +++ b/op-deployer/pkg/deployer/opcm/delayed_weth.go @@ -11,9 +11,9 @@ import ( type DeployDelayedWETHInput struct { Release string - StandardVersionsToml string ProxyAdmin common.Address SuperchainConfigProxy common.Address + DelayedWethImpl common.Address DelayedWethOwner common.Address DelayedWethDelay *big.Int } diff --git a/op-deployer/pkg/deployer/opcm/delayed_weth_test.go b/op-deployer/pkg/deployer/opcm/delayed_weth_test.go index 071afe89fdb..3a8be1b3e0d 100644 --- a/op-deployer/pkg/deployer/opcm/delayed_weth_test.go +++ b/op-deployer/pkg/deployer/opcm/delayed_weth_test.go @@ -5,7 +5,6 @@ import ( "testing" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/broadcaster" - "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/standard" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/testutil" "github.com/ethereum-optimism/optimism/op-deployer/pkg/env" "github.com/ethereum-optimism/optimism/op-service/testlog" @@ -17,29 +16,44 @@ import ( func TestDeployDelayedWETH(t *testing.T) { _, artifacts := testutil.LocalArtifacts(t) - host, err := env.DefaultScriptHost( - broadcaster.NoopBroadcaster(), - testlog.Logger(t, log.LevelInfo), - common.Address{'D'}, - artifacts, - ) - require.NoError(t, err) - - standardVersionsTOML, err := standard.L1VersionsDataFor(11155111) - require.NoError(t, err) - - input := DeployDelayedWETHInput{ - Release: "dev", - StandardVersionsToml: standardVersionsTOML, - ProxyAdmin: common.Address{'P'}, - SuperchainConfigProxy: common.Address{'S'}, - DelayedWethOwner: common.Address{'O'}, - DelayedWethDelay: big.NewInt(100), + testCases := []struct { + TestName string + Impl common.Address + }{ + { + TestName: "ExistingImpl", + Impl: common.Address{'I'}, + }, + { + TestName: "NoExistingImpl", + Impl: common.Address{}, + }, } - output, err := DeployDelayedWETH(host, input) - require.NoError(t, err) - - require.NotEmpty(t, output.DelayedWethImpl) - require.NotEmpty(t, output.DelayedWethProxy) + for _, testCase := range testCases { + t.Run(testCase.TestName, func(t *testing.T) { + host, err := env.DefaultScriptHost( + broadcaster.NoopBroadcaster(), + testlog.Logger(t, log.LevelInfo), + common.Address{'D'}, + artifacts, + ) + require.NoError(t, err) + + input := DeployDelayedWETHInput{ + Release: "dev", + ProxyAdmin: common.Address{'P'}, + SuperchainConfigProxy: common.Address{'S'}, + DelayedWethImpl: testCase.Impl, + DelayedWethOwner: common.Address{'O'}, + DelayedWethDelay: big.NewInt(100), + } + + output, err := DeployDelayedWETH(host, input) + require.NoError(t, err) + + require.NotEmpty(t, output.DelayedWethImpl) + require.NotEmpty(t, output.DelayedWethProxy) + }) + } } diff --git a/packages/contracts-bedrock/scripts/deploy/DeployDelayedWETH.s.sol b/packages/contracts-bedrock/scripts/deploy/DeployDelayedWETH.s.sol index c450b1f155e..eb386f2ff40 100644 --- a/packages/contracts-bedrock/scripts/deploy/DeployDelayedWETH.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/DeployDelayedWETH.s.sol @@ -20,9 +20,9 @@ import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; contract DeployDelayedWETHInput is BaseDeployIO { /// Required inputs. string internal _release; - string internal _standardVersionsToml; address public _proxyAdmin; ISuperchainConfig public _superchainConfigProxy; + address public _delayedWethImpl; address public _delayedWethOwner; uint256 public _delayedWethDelay; @@ -45,6 +45,8 @@ contract DeployDelayedWETHInput is BaseDeployIO { } else if (_sel == this.delayedWethOwner.selector) { require(_value != address(0), "DeployDelayedWETH: delayedWethOwner cannot be zero address"); _delayedWethOwner = _value; + } else if (_sel == this.delayedWethImpl.selector) { + _delayedWethImpl = _value; } else { revert("DeployDelayedWETH: unknown selector"); } @@ -54,9 +56,6 @@ contract DeployDelayedWETHInput is BaseDeployIO { if (_sel == this.release.selector) { require(!LibString.eq(_value, ""), "DeployDelayedWETH: release cannot be empty"); _release = _value; - } else if (_sel == this.standardVersionsToml.selector) { - require(!LibString.eq(_value, ""), "DeployDelayedWETH: standardVersionsToml cannot be empty"); - _standardVersionsToml = _value; } else { revert("DeployDelayedWETH: unknown selector"); } @@ -67,11 +66,6 @@ contract DeployDelayedWETHInput is BaseDeployIO { return _release; } - function standardVersionsToml() public view returns (string memory) { - require(!LibString.eq(_standardVersionsToml, ""), "DeployDelayedWETH: standardVersionsToml not set"); - return _standardVersionsToml; - } - function proxyAdmin() public view returns (address) { require(_proxyAdmin != address(0), "DeployDelayedWETH: proxyAdmin not set"); return _proxyAdmin; @@ -82,6 +76,11 @@ contract DeployDelayedWETHInput is BaseDeployIO { return _superchainConfigProxy; } + function delayedWethImpl() public view returns (address) { + require(_delayedWethImpl != address(0), "DeployDelayedWETH: delayedWethImpl not set"); + return _delayedWethImpl; + } + function delayedWethOwner() public view returns (address) { require(_delayedWethOwner != address(0), "DeployDelayedWETH: delayedWethOwner not set"); return _delayedWethOwner; @@ -166,11 +165,9 @@ contract DeployDelayedWETH is Script { function deployDelayedWethImpl(DeployDelayedWETHInput _dwi, DeployDelayedWETHOutput _dwo) internal { string memory release = _dwi.release(); - string memory stdVerToml = _dwi.standardVersionsToml(); - string memory contractName = "delayed_weth"; IDelayedWETH impl; - address existingImplementation = getReleaseAddress(release, contractName, stdVerToml); + address existingImplementation = _dwi.delayedWethImpl(); if (existingImplementation != address(0)) { impl = IDelayedWETH(payable(existingImplementation)); } else if (isDevelopRelease(release)) { @@ -214,30 +211,6 @@ contract DeployDelayedWETH is Script { _dwo.set(_dwo.delayedWethProxy.selector, address(proxy)); } - // Zero address is returned if the address is not found in '_standardVersionsToml'. - function getReleaseAddress( - string memory _version, - string memory _contractName, - string memory _standardVersionsToml - ) - internal - pure - returns (address addr_) - { - string memory baseKey = string.concat('.releases["', _version, '"].', _contractName); - string memory implAddressKey = string.concat(baseKey, ".implementation_address"); - string memory addressKey = string.concat(baseKey, ".address"); - try vm.parseTomlAddress(_standardVersionsToml, implAddressKey) returns (address parsedAddr_) { - addr_ = parsedAddr_; - } catch { - try vm.parseTomlAddress(_standardVersionsToml, addressKey) returns (address parsedAddr_) { - addr_ = parsedAddr_; - } catch { - addr_ = address(0); - } - } - } - // A release is considered a 'develop' release if it does not start with 'op-contracts'. function isDevelopRelease(string memory _release) internal pure returns (bool) { return !LibString.startsWith(_release, "op-contracts"); From 94056b990146b90f70035f74a35ce5526090afaf Mon Sep 17 00:00:00 2001 From: Adrian Sutton Date: Thu, 5 Dec 2024 14:01:04 +1000 Subject: [PATCH 067/111] op-challenger, op-program: Require specific opt-in to use the custom config chain ID indicator (#13217) * op-challenger: Support --cannon-l2-chain-id to pass chain ID through to op-program. * op-program: Default to assuming configs are available in client. Provide a --l2.custom flag to set the chain ID to the custom chain indicator so the client will load configs via the preimage oracle. Since loading configs via the preimage oracle doesn't work on-chain, this is a safer and simpler default now that op-program can be built with custom configs embedded. * op-challenger: Switch to custom L2 flag instead of specifying chain ID * op-challenger: Fix boolean option in op-program execution. * op-e2e: Set custom L2 flag in precompiles test --- op-challenger/cmd/main_test.go | 13 ++- op-challenger/flags/flags.go | 25 ++++-- op-challenger/game/fault/trace/vm/executor.go | 1 + .../trace/vm/op_program_server_executor.go | 3 + .../vm/op_program_server_executor_test.go | 13 +++ op-e2e/e2eutils/challenger/helper.go | 1 + op-e2e/faultproofs/precompile_test.go | 1 + op-program/host/cmd/main_test.go | 25 ++++++ op-program/host/config/config.go | 80 ++++++++++--------- op-program/host/config/config_test.go | 7 +- op-program/host/flags/flags.go | 12 +++ op-program/host/kvstore/local.go | 14 +--- op-program/host/kvstore/local_test.go | 18 +++-- 13 files changed, 147 insertions(+), 66 deletions(-) diff --git a/op-challenger/cmd/main_test.go b/op-challenger/cmd/main_test.go index 52c9c6c4d9f..018b8b423c3 100644 --- a/op-challenger/cmd/main_test.go +++ b/op-challenger/cmd/main_test.go @@ -765,7 +765,7 @@ func TestCannonRequiredArgs(t *testing.T) { t.Run(fmt.Sprintf("TestMustNotSpecifyCannonNetworkAndRollup-%v", traceType), func(t *testing.T) { verifyArgsInvalid( t, - "flag cannon-network can not be used with cannon-rollup-config and cannon-l2-genesis", + "flag cannon-network can not be used with cannon-rollup-config, cannon-l2-genesis or cannon-l2-custom", addRequiredArgsExcept(traceType, "--cannon-network", "--cannon-network", cannonNetwork, "--cannon-rollup-config=rollup.json")) }) @@ -777,9 +777,10 @@ func TestCannonRequiredArgs(t *testing.T) { args["--network"] = cannonNetwork args["--cannon-rollup-config"] = "rollup.json" args["--cannon-l2-genesis"] = "gensis.json" + args["--cannon-l2-custom"] = "true" verifyArgsInvalid( t, - "flag network can not be used with cannon-rollup-config and cannon-l2-genesis", + "flag network can not be used with cannon-rollup-config, cannon-l2-genesis or cannon-l2-custom", toArgList(args)) }) @@ -813,6 +814,14 @@ func TestCannonRequiredArgs(t *testing.T) { }) }) + t.Run(fmt.Sprintf("TestSetCannonL2ChainId-%v", traceType), func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgsExcept(traceType, "--cannon-network", + "--cannon-rollup-config=rollup.json", + "--cannon-l2-genesis=genesis.json", + "--cannon-l2-custom")) + require.True(t, cfg.Cannon.L2Custom) + }) + t.Run(fmt.Sprintf("TestCannonRollupConfig-%v", traceType), func(t *testing.T) { t.Run("NotRequiredForAlphabetTrace", func(t *testing.T) { configForArgs(t, addRequiredArgsExcept(types.TraceTypeAlphabet, "--cannon-rollup-config")) diff --git a/op-challenger/flags/flags.go b/op-challenger/flags/flags.go index 19d6bc79042..3face06bdc8 100644 --- a/op-challenger/flags/flags.go +++ b/op-challenger/flags/flags.go @@ -115,6 +115,14 @@ var ( Usage: fmt.Sprintf("Deprecated: Use %v instead", flags.NetworkFlagName), EnvVars: prefixEnvVars("CANNON_NETWORK"), } + CannonL2CustomFlag = &cli.BoolFlag{ + Name: "cannon-l2-custom", + Usage: "Notify the op-program host that the L2 chain uses custom config to be loaded via the preimage oracle. " + + "WARNING: This is incompatible with on-chain testing and must only be used for testing purposes.", + EnvVars: prefixEnvVars("CANNON_L2_CUSTOM"), + Value: false, + Hidden: true, + } CannonRollupConfigFlag = &cli.StringFlag{ Name: "cannon-rollup-config", Usage: "Rollup chain parameters (cannon trace type only)", @@ -249,6 +257,7 @@ var optionalFlags = []cli.Flag{ AdditionalBondClaimants, GameAllowlistFlag, CannonNetworkFlag, + CannonL2CustomFlag, CannonRollupConfigFlag, CannonL2GenesisFlag, CannonBinFlag, @@ -296,14 +305,17 @@ func CheckCannonFlags(ctx *cli.Context) error { CannonNetworkFlag.Name, flags.NetworkFlagName, CannonRollupConfigFlag.Name, CannonL2GenesisFlag.Name) } if ctx.IsSet(flags.NetworkFlagName) && - (ctx.IsSet(CannonRollupConfigFlag.Name) || ctx.IsSet(CannonL2GenesisFlag.Name)) { - return fmt.Errorf("flag %v can not be used with %v and %v", - flags.NetworkFlagName, CannonRollupConfigFlag.Name, CannonL2GenesisFlag.Name) + (ctx.IsSet(CannonRollupConfigFlag.Name) || ctx.IsSet(CannonL2GenesisFlag.Name) || ctx.Bool(CannonL2CustomFlag.Name)) { + return fmt.Errorf("flag %v can not be used with %v, %v or %v", + flags.NetworkFlagName, CannonRollupConfigFlag.Name, CannonL2GenesisFlag.Name, CannonL2CustomFlag.Name) } if ctx.IsSet(CannonNetworkFlag.Name) && - (ctx.IsSet(CannonRollupConfigFlag.Name) || ctx.IsSet(CannonL2GenesisFlag.Name)) { - return fmt.Errorf("flag %v can not be used with %v and %v", - CannonNetworkFlag.Name, CannonRollupConfigFlag.Name, CannonL2GenesisFlag.Name) + (ctx.IsSet(CannonRollupConfigFlag.Name) || ctx.IsSet(CannonL2GenesisFlag.Name) || ctx.Bool(CannonL2CustomFlag.Name)) { + return fmt.Errorf("flag %v can not be used with %v, %v or %v", + CannonNetworkFlag.Name, CannonRollupConfigFlag.Name, CannonL2GenesisFlag.Name, CannonL2CustomFlag.Name) + } + if ctx.Bool(CannonL2CustomFlag.Name) && !(ctx.IsSet(CannonRollupConfigFlag.Name) && ctx.IsSet(CannonL2GenesisFlag.Name)) { + return fmt.Errorf("flag %v and %v must be set when %v is true", CannonRollupConfigFlag.Name, CannonL2GenesisFlag.Name, CannonL2CustomFlag.Name) } if !ctx.IsSet(CannonBinFlag.Name) { return fmt.Errorf("flag %s is required", CannonBinFlag.Name) @@ -563,6 +575,7 @@ func NewConfigFromCLI(ctx *cli.Context, logger log.Logger) (*config.Config, erro VmBin: ctx.String(CannonBinFlag.Name), Server: ctx.String(CannonServerFlag.Name), Network: cannonNetwork, + L2Custom: ctx.Bool(CannonL2CustomFlag.Name), RollupConfigPath: ctx.String(CannonRollupConfigFlag.Name), L2GenesisPath: ctx.String(CannonL2GenesisFlag.Name), SnapshotFreq: ctx.Uint(CannonSnapshotFreqFlag.Name), diff --git a/op-challenger/game/fault/trace/vm/executor.go b/op-challenger/game/fault/trace/vm/executor.go index 9e2cd0d29e9..524124a6eab 100644 --- a/op-challenger/game/fault/trace/vm/executor.go +++ b/op-challenger/game/fault/trace/vm/executor.go @@ -41,6 +41,7 @@ type Config struct { L2 string Server string // Path to the executable that provides the pre-image oracle server Network string + L2Custom bool RollupConfigPath string L2GenesisPath string } diff --git a/op-challenger/game/fault/trace/vm/op_program_server_executor.go b/op-challenger/game/fault/trace/vm/op_program_server_executor.go index 1b62e42938e..c353b1a4a16 100644 --- a/op-challenger/game/fault/trace/vm/op_program_server_executor.go +++ b/op-challenger/game/fault/trace/vm/op_program_server_executor.go @@ -54,5 +54,8 @@ func (s *OpProgramServerExecutor) OracleCommand(cfg Config, dataDir string, inpu logLevel = "CRIT" } args = append(args, "--log.level", logLevel) + if cfg.L2Custom { + args = append(args, "--l2.custom") + } return args, nil } diff --git a/op-challenger/game/fault/trace/vm/op_program_server_executor_test.go b/op-challenger/game/fault/trace/vm/op_program_server_executor_test.go index 90fad9507b9..8cf69534c7b 100644 --- a/op-challenger/game/fault/trace/vm/op_program_server_executor_test.go +++ b/op-challenger/game/fault/trace/vm/op_program_server_executor_test.go @@ -19,6 +19,12 @@ func TestOpProgramFillHostCommand(t *testing.T) { toPairs := func(args []string) map[string]string { pairs := make(map[string]string, len(args)/2) for i := 0; i < len(args); i += 2 { + // l2.custom is a boolean flag so can't accept a value after a space + if args[i] == "--l2.custom" { + pairs[args[i]] = "true" + i-- + continue + } pairs[args[i]] = args[i+1] } return pairs @@ -72,6 +78,13 @@ func TestOpProgramFillHostCommand(t *testing.T) { require.Equal(t, "op-test", pairs["--network"]) }) + t.Run("WithL2ChainID", func(t *testing.T) { + pairs := oracleCommand(t, log.LvlInfo, func(c *Config) { + c.L2Custom = true + }) + require.Equal(t, "true", pairs["--l2.custom"]) + }) + t.Run("WithRollupConfigPath", func(t *testing.T) { pairs := oracleCommand(t, log.LvlInfo, func(c *Config) { c.RollupConfigPath = "rollup.config.json" diff --git a/op-e2e/e2eutils/challenger/helper.go b/op-e2e/e2eutils/challenger/helper.go index 8e31f1311e8..925e38856ed 100644 --- a/op-e2e/e2eutils/challenger/helper.go +++ b/op-e2e/e2eutils/challenger/helper.go @@ -190,6 +190,7 @@ func NewChallengerConfig(t *testing.T, sys EndpointProvider, l2NodeName string, l1Endpoint := sys.NodeEndpoint("l1").RPC() l1Beacon := sys.L1BeaconEndpoint().RestHTTP() cfg := config.NewConfig(common.Address{}, l1Endpoint, l1Beacon, sys.RollupEndpoint(l2NodeName).RPC(), sys.NodeEndpoint(l2NodeName).RPC(), t.TempDir()) + cfg.Cannon.L2Custom = true // The devnet can't set the absolute prestate output root because the contracts are deployed in L1 genesis // before the L2 genesis is known. cfg.AllowInvalidPrestate = true diff --git a/op-e2e/faultproofs/precompile_test.go b/op-e2e/faultproofs/precompile_test.go index 2beabfba54d..89b2ff92d32 100644 --- a/op-e2e/faultproofs/precompile_test.go +++ b/op-e2e/faultproofs/precompile_test.go @@ -269,6 +269,7 @@ func runCannon(t *testing.T, ctx context.Context, sys *e2esys.System, inputs uti dir := t.TempDir() proofsDir := filepath.Join(dir, "cannon-proofs") cfg := config.NewConfig(common.Address{}, l1Endpoint, l1Beacon, rollupEndpoint, l2Endpoint, dir) + cfg.Cannon.L2Custom = true cannonOpts(&cfg) logger := testlog.Logger(t, log.LevelInfo).New("role", "cannon") diff --git a/op-program/host/cmd/main_test.go b/op-program/host/cmd/main_test.go index 35ca46d2472..9994e87100b 100644 --- a/op-program/host/cmd/main_test.go +++ b/op-program/host/cmd/main_test.go @@ -9,6 +9,7 @@ import ( "github.com/ethereum-optimism/optimism/op-node/chaincfg" "github.com/ethereum-optimism/optimism/op-program/chainconfig" + "github.com/ethereum-optimism/optimism/op-program/client" "github.com/ethereum-optimism/optimism/op-program/host/config" "github.com/ethereum-optimism/optimism/op-program/host/types" oplog "github.com/ethereum-optimism/optimism/op-service/log" @@ -160,6 +161,30 @@ func TestL2Genesis(t *testing.T) { }) } +func TestL2ChainID(t *testing.T) { + t.Run("DefaultToNetworkChainID", func(t *testing.T) { + cfg := configForArgs(t, replaceRequiredArg("--network", "op-mainnet")) + require.Equal(t, uint64(10), cfg.L2ChainID) + }) + + t.Run("DefaultToGenesisChainID", func(t *testing.T) { + rollupCfgFile := writeValidRollupConfig(t) + genesisFile := writeValidGenesis(t) + cfg := configForArgs(t, addRequiredArgsExcept("--network", "--rollup.config", rollupCfgFile, "--l2.genesis", genesisFile)) + require.Equal(t, l2GenesisConfig.ChainID.Uint64(), cfg.L2ChainID) + }) + + t.Run("OverrideToCustomIndicator", func(t *testing.T) { + rollupCfgFile := writeValidRollupConfig(t) + genesisFile := writeValidGenesis(t) + cfg := configForArgs(t, addRequiredArgsExcept("--network", + "--rollup.config", rollupCfgFile, + "--l2.genesis", genesisFile, + "--l2.custom")) + require.Equal(t, client.CustomChainIDIndicator, cfg.L2ChainID) + }) +} + func TestL2Head(t *testing.T) { t.Run("Required", func(t *testing.T) { verifyArgsInvalid(t, "flag l2.head is required", addRequiredArgsExcept("--l2.head")) diff --git a/op-program/host/config/config.go b/op-program/host/config/config.go index 46442e07f43..7daf2d17918 100644 --- a/op-program/host/config/config.go +++ b/op-program/host/config/config.go @@ -10,6 +10,7 @@ import ( "github.com/ethereum-optimism/optimism/op-node/chaincfg" "github.com/ethereum-optimism/optimism/op-program/chainconfig" + "github.com/ethereum-optimism/optimism/op-program/client" "github.com/ethereum-optimism/optimism/op-program/host/types" "github.com/ethereum-optimism/optimism/op-node/rollup" @@ -37,7 +38,8 @@ var ( ) type Config struct { - Rollup *rollup.Config + L2ChainID uint64 + Rollup *rollup.Config // DataDir is the directory to read/write pre-image data from/to. // If not set, an in-memory key-value store is used and fetching data must be enabled DataDir string @@ -75,9 +77,6 @@ type Config struct { // ServerMode indicates that the program should run in pre-image server mode and wait for requests. // No client program is run. ServerMode bool - - // IsCustomChainConfig indicates that the program uses a custom chain configuration - IsCustomChainConfig bool } func (c *Config) Check() error { @@ -131,19 +130,23 @@ func NewConfig( l2Claim common.Hash, l2ClaimBlockNum uint64, ) *Config { - _, err := params.LoadOPStackChainConfig(l2Genesis.ChainID.Uint64()) - isCustomConfig := err != nil + l2ChainID := l2Genesis.ChainID.Uint64() + _, err := params.LoadOPStackChainConfig(l2ChainID) + if err != nil { + // Unknown chain ID so assume it is custom + l2ChainID = client.CustomChainIDIndicator + } return &Config{ - Rollup: rollupCfg, - L2ChainConfig: l2Genesis, - L1Head: l1Head, - L2Head: l2Head, - L2OutputRoot: l2OutputRoot, - L2Claim: l2Claim, - L2ClaimBlockNumber: l2ClaimBlockNum, - L1RPCKind: sources.RPCKindStandard, - IsCustomChainConfig: isCustomConfig, - DataFormat: types.DataFormatDirectory, + L2ChainID: l2ChainID, + Rollup: rollupCfg, + L2ChainConfig: l2Genesis, + L1Head: l1Head, + L2Head: l2Head, + L2OutputRoot: l2OutputRoot, + L2Claim: l2Claim, + L2ClaimBlockNumber: l2ClaimBlockNum, + L1RPCKind: sources.RPCKindStandard, + DataFormat: types.DataFormatDirectory, } } @@ -177,7 +180,7 @@ func NewConfigFromCLI(log log.Logger, ctx *cli.Context) (*Config, error) { var err error var rollupCfg *rollup.Config var l2ChainConfig *params.ChainConfig - var isCustomConfig bool + var l2ChainID uint64 networkName := ctx.String(flags.Network.Name) if networkName != "" { var chainID uint64 @@ -197,6 +200,7 @@ func NewConfigFromCLI(log log.Logger, ctx *cli.Context) (*Config, error) { if err != nil { return nil, fmt.Errorf("failed to load rollup config for chain %d: %w", chainID, err) } + l2ChainID = chainID } else { l2GenesisPath := ctx.String(flags.L2GenesisPath.Name) l2ChainConfig, err = loadChainConfigFromGenesis(l2GenesisPath) @@ -210,7 +214,11 @@ func NewConfigFromCLI(log log.Logger, ctx *cli.Context) (*Config, error) { return nil, fmt.Errorf("invalid rollup config: %w", err) } - isCustomConfig = true + l2ChainID = l2ChainConfig.ChainID.Uint64() + if ctx.Bool(flags.L2Custom.Name) { + log.Warn("Using custom chain configuration via preimage oracle. This is not compatible with on-chain execution.") + l2ChainID = client.CustomChainIDIndicator + } } dbFormat := types.DataFormat(ctx.String(flags.DataFormat.Name)) @@ -218,24 +226,24 @@ func NewConfigFromCLI(log log.Logger, ctx *cli.Context) (*Config, error) { return nil, fmt.Errorf("invalid %w: %v", ErrInvalidDataFormat, dbFormat) } return &Config{ - Rollup: rollupCfg, - DataDir: ctx.String(flags.DataDir.Name), - DataFormat: dbFormat, - L2URL: ctx.String(flags.L2NodeAddr.Name), - L2ExperimentalURL: ctx.String(flags.L2NodeExperimentalAddr.Name), - L2ChainConfig: l2ChainConfig, - L2Head: l2Head, - L2OutputRoot: l2OutputRoot, - L2Claim: l2Claim, - L2ClaimBlockNumber: l2ClaimBlockNum, - L1Head: l1Head, - L1URL: ctx.String(flags.L1NodeAddr.Name), - L1BeaconURL: ctx.String(flags.L1BeaconAddr.Name), - L1TrustRPC: ctx.Bool(flags.L1TrustRPC.Name), - L1RPCKind: sources.RPCProviderKind(ctx.String(flags.L1RPCProviderKind.Name)), - ExecCmd: ctx.String(flags.Exec.Name), - ServerMode: ctx.Bool(flags.Server.Name), - IsCustomChainConfig: isCustomConfig, + L2ChainID: l2ChainID, + Rollup: rollupCfg, + DataDir: ctx.String(flags.DataDir.Name), + DataFormat: dbFormat, + L2URL: ctx.String(flags.L2NodeAddr.Name), + L2ExperimentalURL: ctx.String(flags.L2NodeExperimentalAddr.Name), + L2ChainConfig: l2ChainConfig, + L2Head: l2Head, + L2OutputRoot: l2OutputRoot, + L2Claim: l2Claim, + L2ClaimBlockNumber: l2ClaimBlockNum, + L1Head: l1Head, + L1URL: ctx.String(flags.L1NodeAddr.Name), + L1BeaconURL: ctx.String(flags.L1BeaconAddr.Name), + L1TrustRPC: ctx.Bool(flags.L1TrustRPC.Name), + L1RPCKind: sources.RPCProviderKind(ctx.String(flags.L1RPCProviderKind.Name)), + ExecCmd: ctx.String(flags.Exec.Name), + ServerMode: ctx.Bool(flags.Server.Name), }, nil } diff --git a/op-program/host/config/config_test.go b/op-program/host/config/config_test.go index 971807d8d6b..5c812f88fda 100644 --- a/op-program/host/config/config_test.go +++ b/op-program/host/config/config_test.go @@ -8,6 +8,7 @@ import ( "github.com/ethereum-optimism/optimism/op-node/chaincfg" "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-program/chainconfig" + "github.com/ethereum-optimism/optimism/op-program/client" "github.com/ethereum-optimism/optimism/op-program/host/types" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/params" @@ -163,15 +164,15 @@ func TestRejectExecAndServerMode(t *testing.T) { require.ErrorIs(t, err, ErrNoExecInServerMode) } -func TestIsCustomChainConfig(t *testing.T) { +func TestCustomL2ChainID(t *testing.T) { t.Run("nonCustom", func(t *testing.T) { cfg := validConfig() - require.Equal(t, cfg.IsCustomChainConfig, false) + require.Equal(t, cfg.L2ChainID, validL2Genesis.ChainID.Uint64()) }) t.Run("custom", func(t *testing.T) { customChainConfig := ¶ms.ChainConfig{ChainID: big.NewInt(0x1212121212)} cfg := NewConfig(validRollupConfig, customChainConfig, validL1Head, validL2Head, validL2OutputRoot, validL2Claim, validL2ClaimBlockNum) - require.Equal(t, cfg.IsCustomChainConfig, true) + require.Equal(t, cfg.L2ChainID, client.CustomChainIDIndicator) }) } diff --git a/op-program/host/flags/flags.go b/op-program/host/flags/flags.go index b7c59845153..a8d5798c06c 100644 --- a/op-program/host/flags/flags.go +++ b/op-program/host/flags/flags.go @@ -21,6 +21,14 @@ func prefixEnvVars(name string) []string { } var ( + L2Custom = &cli.BoolFlag{ + Name: "l2.custom", + Usage: "Override the L2 chain ID to the custom chain indicator for custom chain configuration not present in the client program. " + + "WARNING: This is not compatible with on-chain execution and must only be used for testing.", + EnvVars: prefixEnvVars("L2_CHAINID"), + Value: false, + Hidden: true, + } RollupConfig = &cli.StringFlag{ Name: "rollup.config", Usage: "Rollup chain parameters", @@ -131,6 +139,7 @@ var requiredFlags = []cli.Flag{ } var programFlags = []cli.Flag{ + L2Custom, RollupConfig, Network, DataDir, @@ -167,6 +176,9 @@ func CheckRequired(ctx *cli.Context) error { if ctx.String(L2GenesisPath.Name) != "" && network != "" { return fmt.Errorf("cannot specify both %s and %s", L2GenesisPath.Name, Network.Name) } + if ctx.Bool(L2Custom.Name) && rollupConfig == "" { + return fmt.Errorf("flag %s cannot be used with named networks", L2Custom.Name) + } for _, flag := range requiredFlags { if !ctx.IsSet(flag.Names()[0]) { return fmt.Errorf("flag %s is required", flag.Names()[0]) diff --git a/op-program/host/kvstore/local.go b/op-program/host/kvstore/local.go index b81b427e415..1c1bfa80327 100644 --- a/op-program/host/kvstore/local.go +++ b/op-program/host/kvstore/local.go @@ -38,22 +38,14 @@ func (s *LocalPreimageSource) Get(key common.Hash) ([]byte, error) { case l2ClaimBlockNumberKey: return binary.BigEndian.AppendUint64(nil, s.config.L2ClaimBlockNumber), nil case l2ChainIDKey: - // The CustomChainIDIndicator informs the client to rely on the L2ChainConfigKey to - // read the chain config. Otherwise, it'll attempt to read a non-existent hardcoded chain config - var chainID uint64 - if s.config.IsCustomChainConfig { - chainID = client.CustomChainIDIndicator - } else { - chainID = s.config.L2ChainConfig.ChainID.Uint64() - } - return binary.BigEndian.AppendUint64(nil, chainID), nil + return binary.BigEndian.AppendUint64(nil, s.config.L2ChainID), nil case l2ChainConfigKey: - if !s.config.IsCustomChainConfig { + if s.config.L2ChainID != client.CustomChainIDIndicator { return nil, ErrNotFound } return json.Marshal(s.config.L2ChainConfig) case rollupKey: - if !s.config.IsCustomChainConfig { + if s.config.L2ChainID != client.CustomChainIDIndicator { return nil, ErrNotFound } return json.Marshal(s.config.Rollup) diff --git a/op-program/host/kvstore/local_test.go b/op-program/host/kvstore/local_test.go index 7d379024aa2..6402f731cac 100644 --- a/op-program/host/kvstore/local_test.go +++ b/op-program/host/kvstore/local_test.go @@ -7,6 +7,7 @@ import ( "github.com/ethereum-optimism/optimism/op-node/chaincfg" preimage "github.com/ethereum-optimism/optimism/op-preimage" + "github.com/ethereum-optimism/optimism/op-program/client" "github.com/ethereum-optimism/optimism/op-program/host/config" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/params" @@ -15,6 +16,7 @@ import ( func TestLocalPreimageSource(t *testing.T) { cfg := &config.Config{ + L2ChainID: 86, Rollup: chaincfg.OPSepolia(), L1Head: common.HexToHash("0x1111"), L2OutputRoot: common.HexToHash("0x2222"), @@ -32,7 +34,7 @@ func TestLocalPreimageSource(t *testing.T) { {"L2OutputRoot", l2OutputRootKey, cfg.L2OutputRoot.Bytes()}, {"L2Claim", l2ClaimKey, cfg.L2Claim.Bytes()}, {"L2ClaimBlockNumber", l2ClaimBlockNumberKey, binary.BigEndian.AppendUint64(nil, cfg.L2ClaimBlockNumber)}, - {"L2ChainID", l2ChainIDKey, binary.BigEndian.AppendUint64(nil, cfg.L2ChainConfig.ChainID.Uint64())}, + {"L2ChainID", l2ChainIDKey, binary.BigEndian.AppendUint64(nil, 86)}, {"Rollup", rollupKey, nil}, // Only available for custom chain configs {"ChainConfig", l2ChainConfigKey, nil}, // Only available for custom chain configs {"Unknown", preimage.LocalIndexKey(1000).PreimageKey(), nil}, @@ -52,13 +54,13 @@ func TestLocalPreimageSource(t *testing.T) { func TestGetCustomChainConfigPreimages(t *testing.T) { cfg := &config.Config{ - Rollup: chaincfg.OPSepolia(), - IsCustomChainConfig: true, - L1Head: common.HexToHash("0x1111"), - L2OutputRoot: common.HexToHash("0x2222"), - L2Claim: common.HexToHash("0x3333"), - L2ClaimBlockNumber: 1234, - L2ChainConfig: params.SepoliaChainConfig, + Rollup: chaincfg.OPSepolia(), + L2ChainID: client.CustomChainIDIndicator, + L1Head: common.HexToHash("0x1111"), + L2OutputRoot: common.HexToHash("0x2222"), + L2Claim: common.HexToHash("0x3333"), + L2ClaimBlockNumber: 1234, + L2ChainConfig: params.SepoliaChainConfig, } source := NewLocalPreimageSource(cfg) actualRollup, err := source.Get(rollupKey) From 25972bd87766339f865991b05711c05f7e61bcaf Mon Sep 17 00:00:00 2001 From: mountcount <166301065+mountcount@users.noreply.github.com> Date: Thu, 5 Dec 2024 23:15:57 +0800 Subject: [PATCH 068/111] chore: fix some problematic function names in comment (#13142) Signed-off-by: mountcount --- op-service/txmgr/send_state_test.go | 2 +- op-supervisor/supervisor/frontend/frontend.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/op-service/txmgr/send_state_test.go b/op-service/txmgr/send_state_test.go index d48db5fd151..0da3d8ba19f 100644 --- a/op-service/txmgr/send_state_test.go +++ b/op-service/txmgr/send_state_test.go @@ -58,7 +58,7 @@ func TestSendStateNoAbortAfterProcessOtherError(t *testing.T) { require.Nil(t, sendState.CriticalError()) } -// TestSendStateAbortSafelyAfterNonceTooLowButNoTxMined asserts that we will abort after the very +// TestSendStateAbortSafelyAfterNonceTooLowNoTxPublished asserts that we will abort after the very // first none-too-low error if a tx hasn't yet been published. func TestSendStateAbortSafelyAfterNonceTooLowNoTxPublished(t *testing.T) { sendState := newSendState() diff --git a/op-supervisor/supervisor/frontend/frontend.go b/op-supervisor/supervisor/frontend/frontend.go index 0a5b70a4799..6a43e1fedb0 100644 --- a/op-supervisor/supervisor/frontend/frontend.go +++ b/op-supervisor/supervisor/frontend/frontend.go @@ -47,7 +47,7 @@ func (q *QueryFrontend) CheckMessage(identifier types.Identifier, payloadHash co return q.Supervisor.CheckMessage(identifier, payloadHash) } -// CheckMessage checks the safety-level of a collection of messages, +// CheckMessages checks the safety-level of a collection of messages, // and returns if the minimum safety-level is met for all messages. func (q *QueryFrontend) CheckMessages( messages []types.Message, From 564b0c17198bff7c3eebc7c328e1bf57b9a46188 Mon Sep 17 00:00:00 2001 From: zhiqiangxu <652732310@qq.com> Date: Thu, 5 Dec 2024 23:19:29 +0800 Subject: [PATCH 069/111] NewChannelBuilder => newChannelBuilder, and move it into channel_builder_test.go (#13148) --- op-batcher/batcher/channel_builder.go | 12 ---- op-batcher/batcher/channel_builder_test.go | 67 +++++++++++++--------- 2 files changed, 40 insertions(+), 39 deletions(-) diff --git a/op-batcher/batcher/channel_builder.go b/op-batcher/batcher/channel_builder.go index 597b5ed3e14..56069e5bf16 100644 --- a/op-batcher/batcher/channel_builder.go +++ b/op-batcher/batcher/channel_builder.go @@ -88,18 +88,6 @@ type ChannelBuilder struct { outputBytes int } -// NewChannelBuilder creates a new channel builder or returns an error if the -// channel out could not be created. -// it acts as a factory for either a span or singular channel out -func NewChannelBuilder(cfg ChannelConfig, rollupCfg *rollup.Config, latestL1OriginBlockNum uint64) (*ChannelBuilder, error) { - co, err := NewChannelOut(cfg, rollupCfg) - if err != nil { - return nil, fmt.Errorf("creating channel out: %w", err) - } - - return NewChannelBuilderWithChannelOut(cfg, rollupCfg, latestL1OriginBlockNum, co), nil -} - func NewChannelBuilderWithChannelOut(cfg ChannelConfig, rollupCfg *rollup.Config, latestL1OriginBlockNum uint64, channelOut derive.ChannelOut) *ChannelBuilder { cb := &ChannelBuilder{ cfg: cfg, diff --git a/op-batcher/batcher/channel_builder_test.go b/op-batcher/batcher/channel_builder_test.go index 6994186b7f0..a6cb9371f9c 100644 --- a/op-batcher/batcher/channel_builder_test.go +++ b/op-batcher/batcher/channel_builder_test.go @@ -3,6 +3,7 @@ package batcher import ( "bytes" "errors" + "fmt" "math" "math/big" "math/rand" @@ -27,6 +28,18 @@ var defaultTestRollupConfig = &rollup.Config{ L2ChainID: big.NewInt(1234), } +// newChannelBuilder creates a new channel builder or returns an error if the +// channel out could not be created. +// it acts as a factory for either a span or singular channel out +func newChannelBuilder(cfg ChannelConfig, rollupCfg *rollup.Config, latestL1OriginBlockNum uint64) (*ChannelBuilder, error) { + co, err := NewChannelOut(cfg, rollupCfg) + if err != nil { + return nil, fmt.Errorf("creating channel out: %w", err) + } + + return NewChannelBuilderWithChannelOut(cfg, rollupCfg, latestL1OriginBlockNum, co), nil +} + // addMiniBlock adds a minimal valid L2 block to the channel builder using the // ChannelBuilder.AddBlock method. func addMiniBlock(cb *ChannelBuilder) error { @@ -106,7 +119,7 @@ func FuzzDurationTimeoutZeroMaxChannelDuration(f *testing.F) { f.Fuzz(func(t *testing.T, l1BlockNum uint64) { channelConfig := defaultTestChannelConfig() channelConfig.MaxChannelDuration = 0 - cb, err := NewChannelBuilder(channelConfig, defaultTestRollupConfig, latestL1BlockOrigin) + cb, err := newChannelBuilder(channelConfig, defaultTestRollupConfig, latestL1BlockOrigin) require.NoError(t, err) cb.timeout = 0 cb.updateDurationTimeout(l1BlockNum) @@ -129,7 +142,7 @@ func FuzzChannelBuilder_DurationZero(f *testing.F) { // Create the channel builder channelConfig := defaultTestChannelConfig() channelConfig.MaxChannelDuration = maxChannelDuration - cb, err := NewChannelBuilder(channelConfig, defaultTestRollupConfig, latestL1BlockOrigin) + cb, err := newChannelBuilder(channelConfig, defaultTestRollupConfig, latestL1BlockOrigin) require.NoError(t, err) // Whenever the timeout is set to 0, the channel builder should have a duration timeout @@ -156,7 +169,7 @@ func FuzzDurationTimeoutMaxChannelDuration(f *testing.F) { // Create the channel builder channelConfig := defaultTestChannelConfig() channelConfig.MaxChannelDuration = maxChannelDuration - cb, err := NewChannelBuilder(channelConfig, defaultTestRollupConfig, latestL1BlockOrigin) + cb, err := newChannelBuilder(channelConfig, defaultTestRollupConfig, latestL1BlockOrigin) require.NoError(t, err) // Whenever the timeout is greater than the l1BlockNum, @@ -190,7 +203,7 @@ func FuzzChannelCloseTimeout(f *testing.F) { channelConfig := defaultTestChannelConfig() channelConfig.ChannelTimeout = channelTimeout channelConfig.SubSafetyMargin = subSafetyMargin - cb, err := NewChannelBuilder(channelConfig, defaultTestRollupConfig, latestL1BlockOrigin) + cb, err := newChannelBuilder(channelConfig, defaultTestRollupConfig, latestL1BlockOrigin) require.NoError(t, err) // Check the timeout @@ -218,7 +231,7 @@ func FuzzChannelZeroCloseTimeout(f *testing.F) { channelConfig := defaultTestChannelConfig() channelConfig.ChannelTimeout = channelTimeout channelConfig.SubSafetyMargin = subSafetyMargin - cb, err := NewChannelBuilder(channelConfig, defaultTestRollupConfig, latestL1BlockOrigin) + cb, err := newChannelBuilder(channelConfig, defaultTestRollupConfig, latestL1BlockOrigin) require.NoError(t, err) // Check the timeout @@ -245,7 +258,7 @@ func FuzzSeqWindowClose(f *testing.F) { channelConfig := defaultTestChannelConfig() channelConfig.SeqWindowSize = seqWindowSize channelConfig.SubSafetyMargin = subSafetyMargin - cb, err := NewChannelBuilder(channelConfig, defaultTestRollupConfig, latestL1BlockOrigin) + cb, err := newChannelBuilder(channelConfig, defaultTestRollupConfig, latestL1BlockOrigin) require.NoError(t, err) // Check the timeout @@ -273,7 +286,7 @@ func FuzzSeqWindowZeroTimeoutClose(f *testing.F) { channelConfig := defaultTestChannelConfig() channelConfig.SeqWindowSize = seqWindowSize channelConfig.SubSafetyMargin = subSafetyMargin - cb, err := NewChannelBuilder(channelConfig, defaultTestRollupConfig, latestL1BlockOrigin) + cb, err := newChannelBuilder(channelConfig, defaultTestRollupConfig, latestL1BlockOrigin) require.NoError(t, err) // Check the timeout @@ -321,7 +334,7 @@ func TestChannelBuilder_NextFrame(t *testing.T) { channelConfig := defaultTestChannelConfig() // Create a new channel builder - cb, err := NewChannelBuilder(channelConfig, defaultTestRollupConfig, latestL1BlockOrigin) + cb, err := newChannelBuilder(channelConfig, defaultTestRollupConfig, latestL1BlockOrigin) require.NoError(t, err) // Mock the internals of `ChannelBuilder.outputFrame` @@ -362,7 +375,7 @@ func ChannelBuilder_OutputWrongFramePanic(t *testing.T, batchType uint) { channelConfig.BatchType = batchType // Construct a channel builder - cb, err := NewChannelBuilder(channelConfig, defaultTestRollupConfig, latestL1BlockOrigin) + cb, err := newChannelBuilder(channelConfig, defaultTestRollupConfig, latestL1BlockOrigin) require.NoError(t, err) // Mock the internals of `ChannelBuilder.outputFrame` @@ -398,7 +411,7 @@ func TestChannelBuilder_OutputFrames(t *testing.T) { channelConfig.InitNoneCompressor() // Construct the channel builder - cb, err := NewChannelBuilder(channelConfig, defaultTestRollupConfig, latestL1BlockOrigin) + cb, err := newChannelBuilder(channelConfig, defaultTestRollupConfig, latestL1BlockOrigin) require.NoError(t, err) require.False(t, cb.IsFull()) require.Equal(t, 0, cb.PendingFrames()) @@ -452,7 +465,7 @@ func ChannelBuilder_OutputFrames_SpanBatch(t *testing.T, algo derive.Compression channelConfig.InitRatioCompressor(1, algo) // Construct the channel builder - cb, err := NewChannelBuilder(channelConfig, defaultTestRollupConfig, latestL1BlockOrigin) + cb, err := newChannelBuilder(channelConfig, defaultTestRollupConfig, latestL1BlockOrigin) require.NoError(t, err) require.False(t, cb.IsFull()) require.Equal(t, 0, cb.PendingFrames()) @@ -510,7 +523,7 @@ func ChannelBuilder_MaxRLPBytesPerChannel(t *testing.T, batchType uint) { channelConfig.BatchType = batchType // Construct the channel builder - cb, err := NewChannelBuilder(channelConfig, defaultTestRollupConfig, latestL1BlockOrigin) + cb, err := newChannelBuilder(channelConfig, defaultTestRollupConfig, latestL1BlockOrigin) require.NoError(t, err) // Add a block that overflows the [ChannelOut] @@ -532,7 +545,7 @@ func ChannelBuilder_MaxRLPBytesPerChannelFjord(t *testing.T, batchType uint) { channelConfig.BatchType = batchType // Construct the channel builder - cb, err := NewChannelBuilder(channelConfig, defaultTestRollupConfig, latestL1BlockOrigin) + cb, err := newChannelBuilder(channelConfig, defaultTestRollupConfig, latestL1BlockOrigin) require.NoError(t, err) // Count how many a block that overflows the [ChannelOut] @@ -553,7 +566,7 @@ func ChannelBuilder_MaxRLPBytesPerChannelFjord(t *testing.T, batchType uint) { channelConfig.InitNoneCompressor() channelConfig.BatchType = batchType - cb, err = NewChannelBuilder(channelConfig, rollupConfig, latestL1BlockOrigin) + cb, err = newChannelBuilder(channelConfig, rollupConfig, latestL1BlockOrigin) require.NoError(t, err) // try add double the amount of block, it should not error @@ -576,7 +589,7 @@ func ChannelBuilder_OutputFramesMaxFrameIndex(t *testing.T, batchType uint) { // Continuously add blocks until the max frame index is reached // This should cause the [ChannelBuilder.OutputFrames] function // to error - cb, err := NewChannelBuilder(channelConfig, defaultTestRollupConfig, latestL1BlockOrigin) + cb, err := newChannelBuilder(channelConfig, defaultTestRollupConfig, latestL1BlockOrigin) require.NoError(t, err) require.False(t, cb.IsFull()) require.Equal(t, 0, cb.PendingFrames()) @@ -612,7 +625,7 @@ func TestChannelBuilder_FullShadowCompressor(t *testing.T) { } cfg.InitShadowCompressor(derive.Zlib) - cb, err := NewChannelBuilder(cfg, defaultTestRollupConfig, latestL1BlockOrigin) + cb, err := newChannelBuilder(cfg, defaultTestRollupConfig, latestL1BlockOrigin) require.NoError(err) rng := rand.New(rand.NewSource(420)) @@ -644,7 +657,7 @@ func ChannelBuilder_AddBlock(t *testing.T, batchType uint) { channelConfig.InitRatioCompressor(1, derive.Zlib) // Construct the channel builder - cb, err := NewChannelBuilder(channelConfig, defaultTestRollupConfig, latestL1BlockOrigin) + cb, err := newChannelBuilder(channelConfig, defaultTestRollupConfig, latestL1BlockOrigin) require.NoError(t, err) // Add a nonsense block to the channel builder @@ -670,7 +683,7 @@ func TestChannelBuilder_CheckTimeout(t *testing.T) { channelConfig := defaultTestChannelConfig() // Construct the channel builder - cb, err := NewChannelBuilder(channelConfig, defaultTestRollupConfig, latestL1BlockOrigin) + cb, err := newChannelBuilder(channelConfig, defaultTestRollupConfig, latestL1BlockOrigin) require.NoError(t, err) // Assert timeout is setup correctly @@ -695,7 +708,7 @@ func TestChannelBuilder_CheckTimeoutZeroMaxChannelDuration(t *testing.T) { channelConfig.MaxChannelDuration = 0 // Construct the channel builder - cb, err := NewChannelBuilder(channelConfig, defaultTestRollupConfig, latestL1BlockOrigin) + cb, err := newChannelBuilder(channelConfig, defaultTestRollupConfig, latestL1BlockOrigin) require.NoError(t, err) // Without a max channel duration, timeout should not be set @@ -718,7 +731,7 @@ func TestChannelBuilder_FramePublished(t *testing.T) { cfg.SubSafetyMargin = 100 // Construct the channel builder - cb, err := NewChannelBuilder(cfg, defaultTestRollupConfig, latestL1BlockOrigin) + cb, err := newChannelBuilder(cfg, defaultTestRollupConfig, latestL1BlockOrigin) require.NoError(t, err) require.Equal(t, latestL1BlockOrigin+cfg.MaxChannelDuration, cb.timeout) @@ -735,7 +748,7 @@ func TestChannelBuilder_FramePublished(t *testing.T) { } func TestChannelBuilder_LatestL1Origin(t *testing.T) { - cb, err := NewChannelBuilder(defaultTestChannelConfig(), defaultTestRollupConfig, latestL1BlockOrigin) + cb, err := newChannelBuilder(defaultTestChannelConfig(), defaultTestRollupConfig, latestL1BlockOrigin) require.NoError(t, err) require.Equal(t, eth.BlockID{}, cb.LatestL1Origin()) @@ -757,7 +770,7 @@ func TestChannelBuilder_LatestL1Origin(t *testing.T) { } func TestChannelBuilder_OldestL1Origin(t *testing.T) { - cb, err := NewChannelBuilder(defaultTestChannelConfig(), defaultTestRollupConfig, latestL1BlockOrigin) + cb, err := newChannelBuilder(defaultTestChannelConfig(), defaultTestRollupConfig, latestL1BlockOrigin) require.NoError(t, err) require.Equal(t, eth.BlockID{}, cb.OldestL1Origin()) @@ -779,7 +792,7 @@ func TestChannelBuilder_OldestL1Origin(t *testing.T) { } func TestChannelBuilder_LatestL2(t *testing.T) { - cb, err := NewChannelBuilder(defaultTestChannelConfig(), defaultTestRollupConfig, latestL1BlockOrigin) + cb, err := newChannelBuilder(defaultTestChannelConfig(), defaultTestRollupConfig, latestL1BlockOrigin) require.NoError(t, err) require.Equal(t, eth.BlockID{}, cb.LatestL2()) @@ -801,7 +814,7 @@ func TestChannelBuilder_LatestL2(t *testing.T) { } func TestChannelBuilder_OldestL2(t *testing.T) { - cb, err := NewChannelBuilder(defaultTestChannelConfig(), defaultTestRollupConfig, latestL1BlockOrigin) + cb, err := newChannelBuilder(defaultTestChannelConfig(), defaultTestRollupConfig, latestL1BlockOrigin) require.NoError(t, err) require.Equal(t, eth.BlockID{}, cb.OldestL2()) @@ -831,7 +844,7 @@ func ChannelBuilder_PendingFrames_TotalFrames(t *testing.T, batchType uint) { cfg.TargetNumFrames = tnf cfg.BatchType = batchType cfg.InitShadowCompressor(derive.Zlib) - cb, err := NewChannelBuilder(cfg, defaultTestRollupConfig, latestL1BlockOrigin) + cb, err := newChannelBuilder(cfg, defaultTestRollupConfig, latestL1BlockOrigin) require.NoError(err) // initial builder should be empty @@ -876,7 +889,7 @@ func ChannelBuilder_InputBytes(t *testing.T, batchType uint) { chainId := big.NewInt(1234) spanBatch = derive.NewSpanBatch(uint64(0), chainId) } - cb, err := NewChannelBuilder(cfg, defaultTestRollupConfig, latestL1BlockOrigin) + cb, err := newChannelBuilder(cfg, defaultTestRollupConfig, latestL1BlockOrigin) require.NoError(err) require.Zero(cb.InputBytes()) @@ -913,7 +926,7 @@ func ChannelBuilder_OutputBytes(t *testing.T, batchType uint) { cfg.TargetNumFrames = 16 cfg.BatchType = batchType cfg.InitRatioCompressor(1.0, derive.Zlib) - cb, err := NewChannelBuilder(cfg, defaultTestRollupConfig, latestL1BlockOrigin) + cb, err := newChannelBuilder(cfg, defaultTestRollupConfig, latestL1BlockOrigin) require.NoError(err, "NewChannelBuilder") require.Zero(cb.OutputBytes()) From 0648499c5b166b8ae51d4a23d42981be0c44c583 Mon Sep 17 00:00:00 2001 From: smartcontracts Date: Thu, 5 Dec 2024 12:09:48 -0500 Subject: [PATCH 070/111] fix: codecov config (#13244) --- codecov.yml | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/codecov.yml b/codecov.yml index 96b195edabf..8fd1bc51773 100644 --- a/codecov.yml +++ b/codecov.yml @@ -1,5 +1,6 @@ codecov: require_ci_to_pass: false + comment: layout: "diff, flags, files" behavior: default @@ -12,8 +13,14 @@ ignore: - "**/*.t.sol" - "packages/contracts-bedrock/test/**/*.sol" - "packages/contracts-bedrock/scripts/**/*.sol" - - "packages/contracts-bedrock/contracts/vendor/WETH9.sol" - - 'packages/contracts-bedrock/contracts/EAS/**/*.sol' + - "packages/contracts-bedrock/src/vendor/**/*.sol" + - "packages/contracts-bedrock/src/interfaces/**/*.sol" + # TODO: add coverage for MIPS64 back once tests are merged in + - "packages/contracts-bedrock/src/cannon/MIPS64.sol" + - "packages/contracts-bedrock/src/cannon/libraries/MIPS64Instructions.sol" + - "packages/contracts-bedrock/src/cannon/libraries/MIPS64State.sol" + - "packages/contracts-bedrock/src/cannon/libraries/MIPS64Syscalls.sol" + coverage: status: patch: @@ -26,16 +33,14 @@ coverage: project: default: informational: true + flag_management: # Note: flags should have the same name as the circleci job in which they # are uploaded. individual_flags: - - name: contracts-bedrock-tests + - name: contracts-bedrock-coverage paths: - packages/contracts-bedrock/src statuses: - type: patch target: 100% - - name: bedrock-go-tests - - name: contracts-tests - - name: sdk-tests From ee5c794dda5500ba452a45d88e3665fa5257efd0 Mon Sep 17 00:00:00 2001 From: Matthew Slipper Date: Thu, 5 Dec 2024 10:13:18 -0700 Subject: [PATCH 071/111] op-deployer: Clean up to use latest libs (#13257) --- .../pkg/deployer/bootstrap/delayed_weth.go | 25 ++----------- .../pkg/deployer/bootstrap/dispute_game.go | 25 ++----------- op-deployer/pkg/deployer/bootstrap/mips.go | 16 ++++----- op-deployer/pkg/deployer/opcm/asterisc.go | 36 +------------------ op-deployer/pkg/deployer/opcm/delayed_weth.go | 35 +----------------- op-deployer/pkg/deployer/opcm/dispute_game.go | 32 +---------------- op-deployer/pkg/deployer/opcm/mips.go | 32 +---------------- op-deployer/pkg/deployer/opcm/opchain.go | 31 +--------------- op-deployer/pkg/deployer/opcm/opcm.go | 5 +-- op-deployer/pkg/deployer/opcm/superchain.go | 35 +----------------- 10 files changed, 21 insertions(+), 251 deletions(-) diff --git a/op-deployer/pkg/deployer/bootstrap/delayed_weth.go b/op-deployer/pkg/deployer/bootstrap/delayed_weth.go index aa9eea361ac..9c3e0bc07fa 100644 --- a/op-deployer/pkg/deployer/bootstrap/delayed_weth.go +++ b/op-deployer/pkg/deployer/bootstrap/delayed_weth.go @@ -7,8 +7,6 @@ import ( "math/big" "strings" - "github.com/ethereum-optimism/optimism/op-chain-ops/script" - "github.com/ethereum-optimism/optimism/op-chain-ops/script/forking" artifacts2 "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/artifacts" "github.com/ethereum-optimism/optimism/op-deployer/pkg/env" @@ -165,35 +163,18 @@ func DelayedWETH(ctx context.Context, cfg DelayedWETHConfig) error { return fmt.Errorf("failed to connect to L1 RPC: %w", err) } - host, err := env.DefaultScriptHost( + host, err := env.DefaultForkedScriptHost( + ctx, bcaster, lgr, chainDeployer, artifactsFS, - script.WithForkHook(func(cfg *script.ForkConfig) (forking.ForkSource, error) { - src, err := forking.RPCSourceByNumber(cfg.URLOrAlias, l1RPC, *cfg.BlockNumber) - if err != nil { - return nil, fmt.Errorf("failed to create RPC fork source: %w", err) - } - return forking.Cache(src), nil - }), + l1RPC, ) if err != nil { return fmt.Errorf("failed to create script host: %w", err) } - latest, err := l1Client.HeaderByNumber(ctx, nil) - if err != nil { - return fmt.Errorf("failed to get latest block: %w", err) - } - - if _, err := host.CreateSelectFork( - script.ForkWithURLOrAlias("main"), - script.ForkWithBlockNumberU256(latest.Number), - ); err != nil { - return fmt.Errorf("failed to select fork: %w", err) - } - var release string if cfg.ArtifactsLocator.IsTag() { release = cfg.ArtifactsLocator.Tag diff --git a/op-deployer/pkg/deployer/bootstrap/dispute_game.go b/op-deployer/pkg/deployer/bootstrap/dispute_game.go index 441fd73d9ea..3f1d354f28f 100644 --- a/op-deployer/pkg/deployer/bootstrap/dispute_game.go +++ b/op-deployer/pkg/deployer/bootstrap/dispute_game.go @@ -6,8 +6,6 @@ import ( "fmt" "strings" - "github.com/ethereum-optimism/optimism/op-chain-ops/script" - "github.com/ethereum-optimism/optimism/op-chain-ops/script/forking" artifacts2 "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/artifacts" "github.com/ethereum/go-ethereum/rpc" @@ -179,35 +177,18 @@ func DisputeGame(ctx context.Context, cfg DisputeGameConfig) error { return fmt.Errorf("failed to create broadcaster: %w", err) } - host, err := env.DefaultScriptHost( + host, err := env.DefaultForkedScriptHost( + ctx, bcaster, lgr, chainDeployer, artifactsFS, - script.WithForkHook(func(forkCfg *script.ForkConfig) (forking.ForkSource, error) { - src, err := forking.RPCSourceByNumber(forkCfg.URLOrAlias, l1Rpc, *forkCfg.BlockNumber) - if err != nil { - return nil, fmt.Errorf("failed to create RPC fork source: %w", err) - } - return forking.Cache(src), nil - }), + l1Rpc, ) if err != nil { return fmt.Errorf("failed to create L1 script host: %w", err) } - latest, err := l1Client.HeaderByNumber(ctx, nil) - if err != nil { - return fmt.Errorf("failed to get latest block: %w", err) - } - - if _, err := host.CreateSelectFork( - script.ForkWithURLOrAlias("main"), - script.ForkWithBlockNumberU256(latest.Number), - ); err != nil { - return fmt.Errorf("failed to select fork: %w", err) - } - var release string if cfg.ArtifactsLocator.IsTag() { release = cfg.ArtifactsLocator.Tag diff --git a/op-deployer/pkg/deployer/bootstrap/mips.go b/op-deployer/pkg/deployer/bootstrap/mips.go index efc1fd1c6e0..9c8fd555fb4 100644 --- a/op-deployer/pkg/deployer/bootstrap/mips.go +++ b/op-deployer/pkg/deployer/bootstrap/mips.go @@ -6,6 +6,8 @@ import ( "fmt" "strings" + "github.com/ethereum/go-ethereum/rpc" + artifacts2 "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/artifacts" "github.com/ethereum/go-ethereum/common" @@ -123,11 +125,13 @@ func MIPS(ctx context.Context, cfg MIPSConfig) error { } }() - l1Client, err := ethclient.Dial(cfg.L1RPCUrl) + l1RPC, err := rpc.Dial(cfg.L1RPCUrl) if err != nil { return fmt.Errorf("failed to connect to L1 RPC: %w", err) } + l1Client := ethclient.NewClient(l1RPC) + chainID, err := l1Client.ChainID(ctx) if err != nil { return fmt.Errorf("failed to get chain ID: %w", err) @@ -147,21 +151,17 @@ func MIPS(ctx context.Context, cfg MIPSConfig) error { return fmt.Errorf("failed to create broadcaster: %w", err) } - nonce, err := l1Client.NonceAt(ctx, chainDeployer, nil) - if err != nil { - return fmt.Errorf("failed to get starting nonce: %w", err) - } - - host, err := env.DefaultScriptHost( + host, err := env.DefaultForkedScriptHost( + ctx, bcaster, lgr, chainDeployer, artifactsFS, + l1RPC, ) if err != nil { return fmt.Errorf("failed to create script host: %w", err) } - host.SetNonce(chainDeployer, nonce) var release string if cfg.ArtifactsLocator.IsTag() { diff --git a/op-deployer/pkg/deployer/opcm/asterisc.go b/op-deployer/pkg/deployer/opcm/asterisc.go index 9ba8959e354..053da43b425 100644 --- a/op-deployer/pkg/deployer/opcm/asterisc.go +++ b/op-deployer/pkg/deployer/opcm/asterisc.go @@ -1,8 +1,6 @@ package opcm import ( - "fmt" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum-optimism/optimism/op-chain-ops/script" @@ -24,41 +22,9 @@ func (output *DeployAsteriscOutput) CheckOutput(input common.Address) error { return nil } -type DeployAsteriscScript struct { - Run func(input, output common.Address) error -} - func DeployAsterisc( host *script.Host, input DeployAsteriscInput, ) (DeployAsteriscOutput, error) { - var output DeployAsteriscOutput - inputAddr := host.NewScriptAddress() - outputAddr := host.NewScriptAddress() - - cleanupInput, err := script.WithPrecompileAtAddress[*DeployAsteriscInput](host, inputAddr, &input) - if err != nil { - return output, fmt.Errorf("failed to insert DeployAsteriscInput precompile: %w", err) - } - defer cleanupInput() - - cleanupOutput, err := script.WithPrecompileAtAddress[*DeployAsteriscOutput](host, outputAddr, &output, - script.WithFieldSetter[*DeployAsteriscOutput]) - if err != nil { - return output, fmt.Errorf("failed to insert DeployAsteriscOutput precompile: %w", err) - } - defer cleanupOutput() - - implContract := "DeployAsterisc" - deployScript, cleanupDeploy, err := script.WithScript[DeployAsteriscScript](host, "DeployAsterisc.s.sol", implContract) - if err != nil { - return output, fmt.Errorf("failed to load %s script: %w", implContract, err) - } - defer cleanupDeploy() - - if err := deployScript.Run(inputAddr, outputAddr); err != nil { - return output, fmt.Errorf("failed to run %s script: %w", implContract, err) - } - - return output, nil + return RunBasicScript[DeployAsteriscInput, DeployAsteriscOutput](host, input, "DeployAsterisc.s.sol", "DeployAsterisc") } diff --git a/op-deployer/pkg/deployer/opcm/delayed_weth.go b/op-deployer/pkg/deployer/opcm/delayed_weth.go index 8a0623dd468..06f533956f8 100644 --- a/op-deployer/pkg/deployer/opcm/delayed_weth.go +++ b/op-deployer/pkg/deployer/opcm/delayed_weth.go @@ -1,7 +1,6 @@ package opcm import ( - "fmt" "math/big" "github.com/ethereum/go-ethereum/common" @@ -31,41 +30,9 @@ func (output *DeployDelayedWETHOutput) CheckOutput(input common.Address) error { return nil } -type DeployDelayedWETHScript struct { - Run func(input, output common.Address) error -} - func DeployDelayedWETH( host *script.Host, input DeployDelayedWETHInput, ) (DeployDelayedWETHOutput, error) { - var output DeployDelayedWETHOutput - inputAddr := host.NewScriptAddress() - outputAddr := host.NewScriptAddress() - - cleanupInput, err := script.WithPrecompileAtAddress[*DeployDelayedWETHInput](host, inputAddr, &input) - if err != nil { - return output, fmt.Errorf("failed to insert DeployDelayedWETHInput precompile: %w", err) - } - defer cleanupInput() - - cleanupOutput, err := script.WithPrecompileAtAddress[*DeployDelayedWETHOutput](host, outputAddr, &output, - script.WithFieldSetter[*DeployDelayedWETHOutput]) - if err != nil { - return output, fmt.Errorf("failed to insert DeployDelayedWETHOutput precompile: %w", err) - } - defer cleanupOutput() - - implContract := "DeployDelayedWETH" - deployScript, cleanupDeploy, err := script.WithScript[DeployDelayedWETHScript](host, "DeployDelayedWETH.s.sol", implContract) - if err != nil { - return output, fmt.Errorf("failed to load %s script: %w", implContract, err) - } - defer cleanupDeploy() - - if err := deployScript.Run(inputAddr, outputAddr); err != nil { - return output, fmt.Errorf("failed to run %s script: %w", implContract, err) - } - - return output, nil + return RunBasicScript[DeployDelayedWETHInput, DeployDelayedWETHOutput](host, input, "DeployDelayedWETH.s.sol", "DeployDelayedWETH") } diff --git a/op-deployer/pkg/deployer/opcm/dispute_game.go b/op-deployer/pkg/deployer/opcm/dispute_game.go index 481a401d07d..7128a08e8b0 100644 --- a/op-deployer/pkg/deployer/opcm/dispute_game.go +++ b/op-deployer/pkg/deployer/opcm/dispute_game.go @@ -1,8 +1,6 @@ package opcm import ( - "fmt" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum-optimism/optimism/op-chain-ops/script" @@ -46,33 +44,5 @@ func DeployDisputeGame( host *script.Host, input DeployDisputeGameInput, ) (DeployDisputeGameOutput, error) { - var output DeployDisputeGameOutput - inputAddr := host.NewScriptAddress() - outputAddr := host.NewScriptAddress() - - cleanupInput, err := script.WithPrecompileAtAddress[*DeployDisputeGameInput](host, inputAddr, &input) - if err != nil { - return output, fmt.Errorf("failed to insert DeployDisputeGameInput precompile: %w", err) - } - defer cleanupInput() - - cleanupOutput, err := script.WithPrecompileAtAddress[*DeployDisputeGameOutput](host, outputAddr, &output, - script.WithFieldSetter[*DeployDisputeGameOutput]) - if err != nil { - return output, fmt.Errorf("failed to insert DeployDisputeGameOutput precompile: %w", err) - } - defer cleanupOutput() - - implContract := "DeployDisputeGame" - deployScript, cleanupDeploy, err := script.WithScript[DeployDisputeGameScript](host, "DeployDisputeGame.s.sol", implContract) - if err != nil { - return output, fmt.Errorf("failed to load %s script: %w", implContract, err) - } - defer cleanupDeploy() - - if err := deployScript.Run(inputAddr, outputAddr); err != nil { - return output, fmt.Errorf("failed to run %s script: %w", implContract, err) - } - - return output, nil + return RunBasicScript[DeployDisputeGameInput, DeployDisputeGameOutput](host, input, "DeployDisputeGame.s.sol", "DeployDisputeGame") } diff --git a/op-deployer/pkg/deployer/opcm/mips.go b/op-deployer/pkg/deployer/opcm/mips.go index 5d1a7798ced..6150b308835 100644 --- a/op-deployer/pkg/deployer/opcm/mips.go +++ b/op-deployer/pkg/deployer/opcm/mips.go @@ -1,8 +1,6 @@ package opcm import ( - "fmt" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum-optimism/optimism/op-chain-ops/script" @@ -33,33 +31,5 @@ func DeployMIPS( host *script.Host, input DeployMIPSInput, ) (DeployMIPSOutput, error) { - var output DeployMIPSOutput - inputAddr := host.NewScriptAddress() - outputAddr := host.NewScriptAddress() - - cleanupInput, err := script.WithPrecompileAtAddress[*DeployMIPSInput](host, inputAddr, &input) - if err != nil { - return output, fmt.Errorf("failed to insert DeployMIPSInput precompile: %w", err) - } - defer cleanupInput() - - cleanupOutput, err := script.WithPrecompileAtAddress[*DeployMIPSOutput](host, outputAddr, &output, - script.WithFieldSetter[*DeployMIPSOutput]) - if err != nil { - return output, fmt.Errorf("failed to insert DeployMIPSOutput precompile: %w", err) - } - defer cleanupOutput() - - implContract := "DeployMIPS" - deployScript, cleanupDeploy, err := script.WithScript[DeployMIPSScript](host, "DeployMIPS.s.sol", implContract) - if err != nil { - return output, fmt.Errorf("failed to load %s script: %w", implContract, err) - } - defer cleanupDeploy() - - if err := deployScript.Run(inputAddr, outputAddr); err != nil { - return output, fmt.Errorf("failed to run %s script: %w", implContract, err) - } - - return output, nil + return RunBasicScript[DeployMIPSInput, DeployMIPSOutput](host, input, "DeployMIPS.s.sol", "DeployMIPS") } diff --git a/op-deployer/pkg/deployer/opcm/opchain.go b/op-deployer/pkg/deployer/opcm/opchain.go index 8c7e60fec4d..2a866450133 100644 --- a/op-deployer/pkg/deployer/opcm/opchain.go +++ b/op-deployer/pkg/deployer/opcm/opchain.go @@ -88,36 +88,7 @@ func DeployOPChainIsthmus(host *script.Host, input DeployOPChainInputIsthmus) (D } func deployOPChain[T any](host *script.Host, input T) (DeployOPChainOutput, error) { - var dco DeployOPChainOutput - inputAddr := host.NewScriptAddress() - outputAddr := host.NewScriptAddress() - - cleanupInput, err := script.WithPrecompileAtAddress[*T](host, inputAddr, &input) - if err != nil { - return dco, fmt.Errorf("failed to insert DeployOPChainInput precompile: %w", err) - } - defer cleanupInput() - host.Label(inputAddr, "DeployOPChainInput") - - cleanupOutput, err := script.WithPrecompileAtAddress[*DeployOPChainOutput](host, outputAddr, &dco, - script.WithFieldSetter[*DeployOPChainOutput]) - if err != nil { - return dco, fmt.Errorf("failed to insert DeployOPChainOutput precompile: %w", err) - } - defer cleanupOutput() - host.Label(outputAddr, "DeployOPChainOutput") - - deployScript, cleanupDeploy, err := script.WithScript[DeployOPChainScript](host, "DeployOPChain.s.sol", "DeployOPChain") - if err != nil { - return dco, fmt.Errorf("failed to load DeployOPChain script: %w", err) - } - defer cleanupDeploy() - - if err := deployScript.Run(inputAddr, outputAddr); err != nil { - return dco, fmt.Errorf("failed to run DeployOPChain script: %w", err) - } - - return dco, nil + return RunBasicScript[T, DeployOPChainOutput](host, input, "DeployOPChain.s.sol", "DeployOPChain") } type ReadImplementationAddressesInput struct { diff --git a/op-deployer/pkg/deployer/opcm/opcm.go b/op-deployer/pkg/deployer/opcm/opcm.go index 9de3348be28..26179271b69 100644 --- a/op-deployer/pkg/deployer/opcm/opcm.go +++ b/op-deployer/pkg/deployer/opcm/opcm.go @@ -40,10 +40,7 @@ func DeployOPCM( host *script.Host, input DeployOPCMInput, ) (DeployOPCMOutput, error) { - scriptFile := "DeployOPCM.s.sol" - contractName := "DeployOPCM" - - out, err := RunBasicScript[DeployOPCMInput, DeployOPCMOutput](host, input, scriptFile, contractName) + out, err := RunBasicScript[DeployOPCMInput, DeployOPCMOutput](host, input, "DeployOPCM.s.sol", "DeployOPCM") if err != nil { return DeployOPCMOutput{}, fmt.Errorf("failed to deploy OPCM: %w", err) } diff --git a/op-deployer/pkg/deployer/opcm/superchain.go b/op-deployer/pkg/deployer/opcm/superchain.go index 4f648bbfa8a..fcbccc3cea4 100644 --- a/op-deployer/pkg/deployer/opcm/superchain.go +++ b/op-deployer/pkg/deployer/opcm/superchain.go @@ -1,7 +1,6 @@ package opcm import ( - "fmt" "math/big" "github.com/ethereum-optimism/optimism/op-chain-ops/foundry" @@ -53,37 +52,5 @@ type DeploySuperchainOpts struct { } func DeploySuperchain(h *script.Host, input DeploySuperchainInput) (DeploySuperchainOutput, error) { - var dso DeploySuperchainOutput - - inputAddr := h.NewScriptAddress() - outputAddr := h.NewScriptAddress() - - cleanupInput, err := script.WithPrecompileAtAddress[*DeploySuperchainInput](h, inputAddr, &input) - if err != nil { - return dso, fmt.Errorf("failed to insert DeploySuperchainInput precompile: %w", err) - } - defer cleanupInput() - - cleanupOutput, err := script.WithPrecompileAtAddress[*DeploySuperchainOutput]( - h, - outputAddr, - &dso, - script.WithFieldSetter[*DeploySuperchainOutput], - ) - if err != nil { - return dso, fmt.Errorf("failed to insert DeploySuperchainOutput precompile: %w", err) - } - defer cleanupOutput() - - deployScript, cleanupDeploy, err := script.WithScript[DeploySuperchainScript](h, "DeploySuperchain.s.sol", "DeploySuperchain") - if err != nil { - return dso, fmt.Errorf("failed to load DeploySuperchain script: %w", err) - } - defer cleanupDeploy() - - if err := deployScript.Run(inputAddr, outputAddr); err != nil { - return dso, fmt.Errorf("failed to run DeploySuperchain script: %w", err) - } - - return dso, nil + return RunBasicScript[DeploySuperchainInput, DeploySuperchainOutput](h, input, "DeploySuperchain.s.sol", "DeploySuperchain") } From 1eb223d8d878237969c95bf3616b3e074b861d61 Mon Sep 17 00:00:00 2001 From: Axel Kingsley Date: Thu, 5 Dec 2024 14:11:05 -0600 Subject: [PATCH 072/111] interop: parallelized receipt fetching (#13044) * interop: parallelized receipt fetching * fix test * remove elastic thread count * Add Debug Message for Range Fetching * rename end to last * Remove Println --------- Co-authored-by: Matthew Slipper --- .../backend/processors/chain_processor.go | 148 ++++++++++++++---- 1 file changed, 117 insertions(+), 31 deletions(-) diff --git a/op-supervisor/supervisor/backend/processors/chain_processor.go b/op-supervisor/supervisor/backend/processors/chain_processor.go index 688f68ac611..1b4ffcc822e 100644 --- a/op-supervisor/supervisor/backend/processors/chain_processor.go +++ b/op-supervisor/supervisor/backend/processors/chain_processor.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "slices" "sync" "sync/atomic" "time" @@ -62,20 +63,23 @@ type ChainProcessor struct { ctx context.Context cancel context.CancelFunc wg sync.WaitGroup + + maxFetcherThreads int } func NewChainProcessor(log log.Logger, chain types.ChainID, processor LogProcessor, rewinder DatabaseRewinder, onIndexed func()) *ChainProcessor { ctx, cancel := context.WithCancel(context.Background()) out := &ChainProcessor{ - log: log.New("chain", chain), - client: nil, - chain: chain, - processor: processor, - rewinder: rewinder, - newHead: make(chan struct{}, 1), - onIndexed: onIndexed, - ctx: ctx, - cancel: cancel, + log: log.New("chain", chain), + client: nil, + chain: chain, + processor: processor, + rewinder: rewinder, + newHead: make(chan struct{}, 1), + onIndexed: onIndexed, + ctx: ctx, + cancel: cancel, + maxFetcherThreads: 10, } return out } @@ -131,15 +135,15 @@ func (s *ChainProcessor) work() { if s.ctx.Err() != nil { // check if we are closing down return } + _, err := s.rangeUpdate() target := s.nextNum() - if err := s.update(target); err != nil { + if err != nil { if errors.Is(err, ethereum.NotFound) { s.log.Debug("Event-indexer cannot find next block yet", "target", target, "err", err) } else if errors.Is(err, types.ErrNoRPCSource) { s.log.Warn("No RPC source configured, cannot process new blocks") } else { s.log.Error("Failed to process new block", "err", err) - // idle until next update trigger } } else if x := s.lastHead.Load(); target+1 <= x { s.log.Debug("Continuing with next block", "newTarget", target+1, "lastHead", x) @@ -151,34 +155,115 @@ func (s *ChainProcessor) work() { } } -func (s *ChainProcessor) update(nextNum uint64) error { +func (s *ChainProcessor) rangeUpdate() (int, error) { s.clientLock.Lock() defer s.clientLock.Unlock() - if s.client == nil { - return types.ErrNoRPCSource + return 0, types.ErrNoRPCSource + } + + // define the range of blocks to fetch + // [next, last] inclusive with a max of s.fetcherThreads blocks + next := s.nextNum() + last := s.lastHead.Load() + // next is already beyond the end, nothing to do + if next > last { + return 0, nil + } + nums := make([]uint64, 0) + for i := next; i <= last; i++ { + nums = append(nums, i) + // only collect as many blocks as we can fetch in parallel + if len(nums) >= s.maxFetcherThreads { + break + } + } + + s.log.Debug("Fetching blocks", "chain", s.chain.String(), "next", next, "last", last, "count", len(nums)) + + // make a structure to receive parallel results + type keyedResult struct { + num uint64 + blockRef *eth.BlockRef + receipts gethtypes.Receipts + err error + } + parallelResults := make(chan keyedResult, len(nums)) + + // each thread will fetch a block and its receipts and send the result to the channel + fetch := func(wg *sync.WaitGroup, num uint64) { + defer wg.Done() + // ensure we emit the result at the end + result := keyedResult{num, nil, nil, nil} + defer func() { parallelResults <- result }() + + // fetch the block ref + ctx, cancel := context.WithTimeout(s.ctx, time.Second*10) + nextL1, err := s.client.L1BlockRefByNumber(ctx, num) + cancel() + if err != nil { + result.err = err + return + } + next := eth.BlockRef{ + Hash: nextL1.Hash, + ParentHash: nextL1.ParentHash, + Number: nextL1.Number, + Time: nextL1.Time, + } + result.blockRef = &next + + // fetch receipts + ctx, cancel = context.WithTimeout(s.ctx, time.Second*10) + _, receipts, err := s.client.FetchReceipts(ctx, next.Hash) + cancel() + if err != nil { + result.err = err + return + } + result.receipts = receipts } - ctx, cancel := context.WithTimeout(s.ctx, time.Second*10) - nextL1, err := s.client.L1BlockRefByNumber(ctx, nextNum) - next := eth.BlockRef{ - Hash: nextL1.Hash, - ParentHash: nextL1.ParentHash, - Number: nextL1.Number, - Time: nextL1.Time, + // kick off the fetches and wait for them to complete + var wg sync.WaitGroup + for _, num := range nums { + wg.Add(1) + go fetch(&wg, num) } - cancel() - if err != nil { - return fmt.Errorf("failed to fetch next block: %w", err) + wg.Wait() + + // collect and sort the results + results := make([]keyedResult, len(nums)) + for i := range nums { + result := <-parallelResults + results[i] = result } + slices.SortFunc(results, func(a, b keyedResult) int { + if a.num < b.num { + return -1 + } + if a.num > b.num { + return 1 + } + return 0 + }) - // Try and fetch the receipts - ctx, cancel = context.WithTimeout(s.ctx, time.Second*10) - _, receipts, err := s.client.FetchReceipts(ctx, next.Hash) - cancel() - if err != nil { - return fmt.Errorf("failed to fetch receipts of block: %w", err) + // process the results in order and return the first error encountered, + // and the number of blocks processed successfully by this call + for i := range results { + if results[i].err != nil { + return i, fmt.Errorf("failed to fetch block %d: %w", results[i].num, results[i].err) + } + // process the receipts + err := s.process(s.ctx, *results[i].blockRef, results[i].receipts) + if err != nil { + return i, fmt.Errorf("failed to process block %d: %w", results[i].num, err) + } } + return len(results), nil +} + +func (s *ChainProcessor) process(ctx context.Context, next eth.BlockRef, receipts gethtypes.Receipts) error { if err := s.processor.ProcessLogs(ctx, next, receipts); err != nil { s.log.Error("Failed to process block", "block", next, "err", err) @@ -187,7 +272,7 @@ func (s *ChainProcessor) update(nextNum uint64) error { } // Try to rewind the database to the previous block to remove any logs from this block that were written - if err := s.rewinder.Rewind(s.chain, nextNum-1); err != nil { + if err := s.rewinder.Rewind(s.chain, next.Number-1); err != nil { // If any logs were written, our next attempt to write will fail and we'll retry this rewind. // If no logs were written successfully then the rewind wouldn't have done anything anyway. s.log.Error("Failed to rewind after error processing block", "block", next, "err", err) @@ -197,6 +282,7 @@ func (s *ChainProcessor) update(nextNum uint64) error { s.log.Info("Indexed block events", "block", next, "txs", len(receipts)) s.onIndexed() return nil + } func (s *ChainProcessor) OnNewHead(head eth.BlockRef) error { From 9548d53a64f7ef738c2173620c90b28dc821950a Mon Sep 17 00:00:00 2001 From: Matthew Slipper Date: Thu, 5 Dec 2024 14:02:25 -0700 Subject: [PATCH 073/111] op-conductor: Fix hang in testing (#13266) I've found a [deadlock](https://app.circleci.com/pipelines/github/ethereum-optimism/optimism/73846/workflows/19369ca9-9eaa-4021-9eb8-589a06e7bd34/jobs/3018041) in the op-conductor tests. The inner conductor loop is stuck waiting on a queue signal that never arrives, which in turn causes the test to hang since the coordinating waitgroup is never decremented. I believe this happens because the conductor's `queueAction` method is non-blocking, so nothing ever triggers conductor's inner loop when conductor start up. I've updated the code to use a blocking channel write when conductor starts to avoid this issue. The traces of the deadlock look like this, for reference: ``` goroutine 227 [semacquire, 9 minutes]: sync.runtime_Semacquire(0xc0004df577?) /usr/local/go/src/runtime/sema.go:62 +0x25 sync.(*WaitGroup).Wait(0x0?) /usr/local/go/src/sync/waitgroup.go:116 +0x48 github.com/ethereum-optimism/optimism/op-conductor/conductor.(*OpConductorTestSuite).execute(0xc0003e4008, 0x0) /var/opt/circleci/data/workdir/op-conductor/conductor/service_test.go:177 +0x65 github.com/ethereum-optimism/optimism/op-conductor/conductor.(*OpConductorTestSuite).executeAction(...) /var/opt/circleci/data/workdir/op-conductor/conductor/service_test.go:197 github.com/ethereum-optimism/optimism/op-conductor/conductor.(*OpConductorTestSuite).enableSynchronization(0xc0003e4008) /var/opt/circleci/data/workdir/op-conductor/conductor/service_test.go:163 +0x93 github.com/ethereum-optimism/optimism/op-conductor/conductor.(*OpConductorTestSuite).TestScenario4(0xc0003e4008) /var/opt/circleci/data/workdir/op-conductor/conductor/service_test.go:420 +0x27 reflect.Value.call({0xc000131900?, 0xc0001b3740?, 0x1fed5b8?}, {0x191d63c, 0x4}, {0xc0004dff28, 0x1, 0x16f12e0?}) /usr/local/go/src/reflect/value.go:596 +0xca6 reflect.Value.Call({0xc000131900?, 0xc0001b3740?, 0x28307b8?}, {0xc0004dff28?, 0xf?, 0x0?}) /usr/local/go/src/reflect/value.go:380 +0xb9 github.com/stretchr/testify/suite.Run.func1(0xc0001f9ba0) /home/circleci/go/pkg/mod/github.com/stretchr/testify@v1.10.0/suite/suite.go:202 +0x4a5 testing.tRunner(0xc0001f9ba0, 0xc0001f4e10) /usr/local/go/src/testing/testing.go:1689 +0xfb created by testing.(*T).Run in goroutine 166 /usr/local/go/src/testing/testing.go:1742 +0x390 goroutine 229 [select, 9 minutes]: github.com/ethereum-optimism/optimism/op-conductor/conductor.(*OpConductor).loopAction(0xc00056ab40) /var/opt/circleci/data/workdir/op-conductor/conductor/service.go:577 +0x14f github.com/ethereum-optimism/optimism/op-conductor/conductor.(*OpConductorTestSuite).enableSynchronization.func1() /var/opt/circleci/data/workdir/op-conductor/conductor/service_test.go:159 +0x33 github.com/ethereum-optimism/optimism/op-conductor/conductor.(*OpConductor).loop(0xc00056ab40) /var/opt/circleci/data/workdir/op-conductor/conductor/service.go:570 +0x99 created by github.com/ethereum-optimism/optimism/op-conductor/conductor.(*OpConductor).Start in goroutine 227 /var/opt/circleci/data/workdir/op-conductor/conductor/service.go:376 +0x1f6 FAIL github.com/ethereum-optimism/optimism/op-conductor/conductor 600.103s ``` --- op-conductor/conductor/service.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/op-conductor/conductor/service.go b/op-conductor/conductor/service.go index cccba2c76ac..89948b614f0 100644 --- a/op-conductor/conductor/service.go +++ b/op-conductor/conductor/service.go @@ -381,7 +381,9 @@ func (oc *OpConductor) Start(ctx context.Context) error { oc.log.Info("OpConductor started") // queue an action in case sequencer is not in the desired state. oc.prevState = NewState(oc.leader.Load(), oc.healthy.Load(), oc.seqActive.Load()) - oc.queueAction() + // Immediately queue an action. This is made blocking to ensure that start is not + // considered complete until the first action is executed. + oc.actionCh <- struct{}{} return nil } From 750ed2025f4ef00c6c5a9f1ffee90c98fdeb39bf Mon Sep 17 00:00:00 2001 From: Adrian Sutton Date: Fri, 6 Dec 2024 07:05:23 +1000 Subject: [PATCH 074/111] op-dispute-mon: Support asterisc kona game types. (#13270) --- op-dispute-mon/mon/extract/caller.go | 3 ++- op-dispute-mon/mon/extract/caller_test.go | 8 ++++++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/op-dispute-mon/mon/extract/caller.go b/op-dispute-mon/mon/extract/caller.go index 03c130cd3dc..0b88360b69a 100644 --- a/op-dispute-mon/mon/extract/caller.go +++ b/op-dispute-mon/mon/extract/caller.go @@ -55,7 +55,8 @@ func (g *GameCallerCreator) CreateContract(ctx context.Context, game gameTypes.G faultTypes.PermissionedGameType, faultTypes.AsteriscGameType, faultTypes.AlphabetGameType, - faultTypes.FastGameType: + faultTypes.FastGameType, + faultTypes.AsteriscKonaGameType: fdg, err := contracts.NewFaultDisputeGameContract(ctx, g.m, game.Proxy, g.caller) if err != nil { return nil, fmt.Errorf("failed to create fault dispute game contract: %w", err) diff --git a/op-dispute-mon/mon/extract/caller_test.go b/op-dispute-mon/mon/extract/caller_test.go index 065f06c051a..61c7f349c82 100644 --- a/op-dispute-mon/mon/extract/caller_test.go +++ b/op-dispute-mon/mon/extract/caller_test.go @@ -47,10 +47,14 @@ func TestMetadataCreator_CreateContract(t *testing.T) { name: "validFastGameType", game: types.GameMetadata{GameType: uint32(faultTypes.FastGameType), Proxy: fdgAddr}, }, + { + name: "validAsteriscKonaGameType", + game: types.GameMetadata{GameType: uint32(faultTypes.AsteriscKonaGameType), Proxy: fdgAddr}, + }, { name: "InvalidGameType", - game: types.GameMetadata{GameType: 3, Proxy: fdgAddr}, - expectedErr: fmt.Errorf("unsupported game type: 3"), + game: types.GameMetadata{GameType: 4, Proxy: fdgAddr}, + expectedErr: fmt.Errorf("unsupported game type: 4"), }, } From a8c8851e62cc1f2a053fa1d6500f5686aa54709c Mon Sep 17 00:00:00 2001 From: smartcontracts Date: Thu, 5 Dec 2024 17:18:36 -0500 Subject: [PATCH 075/111] maint: remove unnecessary snapshots check script (#13248) We can do this with a justfile recipe. Co-authored-by: Matthew Slipper --- packages/contracts-bedrock/justfile | 5 ++--- .../scripts/checks/check-snapshots.sh | 17 ----------------- 2 files changed, 2 insertions(+), 20 deletions(-) delete mode 100755 packages/contracts-bedrock/scripts/checks/check-snapshots.sh diff --git a/packages/contracts-bedrock/justfile b/packages/contracts-bedrock/justfile index 6a34cc6998d..423ee453d6b 100644 --- a/packages/contracts-bedrock/justfile +++ b/packages/contracts-bedrock/justfile @@ -130,11 +130,10 @@ gas-snapshot-check: build-go-ffi gas-snapshot-check-no-build # Checks if the snapshots are up to date without building. snapshots-check-no-build: - ./scripts/checks/check-snapshots.sh --no-build + just snapshots-no-build && git diff --exit-code snapshots # Checks if the snapshots are up to date. -snapshots-check: - ./scripts/checks/check-snapshots.sh +snapshots-check: build snapshots-check-no-build # Checks interface correctness without building. interfaces-check-no-build: diff --git a/packages/contracts-bedrock/scripts/checks/check-snapshots.sh b/packages/contracts-bedrock/scripts/checks/check-snapshots.sh deleted file mode 100755 index 18557eba52a..00000000000 --- a/packages/contracts-bedrock/scripts/checks/check-snapshots.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -# Check for the --no-build flag -# Generate snapshots -if [ "${1:-}" == "--no-build" ]; then - just snapshots-no-build -else - just snapshots -fi - -# Check if the generated `snapshots` files are different from the committed versions -if git diff --exit-code snapshots > /dev/null; then - [ -z "$(git ls-files --others --exclude-standard snapshots)" ] || exit 1 -else - exit 1 -fi From ce72380799a777a15fd8fda75c36ce39aa8114f5 Mon Sep 17 00:00:00 2001 From: Matthew Slipper Date: Thu, 5 Dec 2024 15:42:55 -0700 Subject: [PATCH 076/111] op-conductor: Temporarily skip flaky test (#13276) --- op-conductor/conductor/service_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/op-conductor/conductor/service_test.go b/op-conductor/conductor/service_test.go index 87df417a468..196c7e3456c 100644 --- a/op-conductor/conductor/service_test.go +++ b/op-conductor/conductor/service_test.go @@ -876,5 +876,6 @@ func (s *OpConductorTestSuite) TestHandleInitError() { } func TestControlLoop(t *testing.T) { + t.Skipf("Skipping test, it's flaky and needs to be fixed") suite.Run(t, new(OpConductorTestSuite)) } From 53b3af705c1100b3ee61d2c3eabf49847cf33c4a Mon Sep 17 00:00:00 2001 From: clabby Date: Thu, 5 Dec 2024 18:00:14 -0500 Subject: [PATCH 077/111] fix(op-challenger): Clarify `op-challenger create-game` flags (#13269) --- op-challenger/cmd/create_game.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/op-challenger/cmd/create_game.go b/op-challenger/cmd/create_game.go index fdf3e9a4176..1103df58c20 100644 --- a/op-challenger/cmd/create_game.go +++ b/op-challenger/cmd/create_game.go @@ -18,11 +18,11 @@ import ( ) var ( - TraceTypeFlag = &cli.StringFlag{ - Name: "trace-type", - Usage: "Trace types to support.", + GameTypeFlag = &cli.StringFlag{ + Name: "game-type", + Usage: "Game type to create (numeric values).", EnvVars: opservice.PrefixEnvVar(flags.EnvVarPrefix, "TRACE_TYPE"), - Value: types.TraceTypeCannon.String(), + Value: types.CannonGameType.String(), } OutputRootFlag = &cli.StringFlag{ Name: "output-root", @@ -38,7 +38,7 @@ var ( func CreateGame(ctx *cli.Context) error { outputRoot := common.HexToHash(ctx.String(OutputRootFlag.Name)) - traceType := ctx.Uint64(TraceTypeFlag.Name) + gameType := ctx.Uint64(GameTypeFlag.Name) l2BlockNum := ctx.Uint64(L2BlockNumFlag.Name) contract, txMgr, err := NewContractWithTxMgr[*contracts.DisputeGameFactoryContract](ctx, flags.FactoryAddress, @@ -50,7 +50,7 @@ func CreateGame(ctx *cli.Context) error { } creator := tools.NewGameCreator(contract, txMgr) - gameAddr, err := creator.CreateGame(ctx.Context, outputRoot, traceType, l2BlockNum) + gameAddr, err := creator.CreateGame(ctx.Context, outputRoot, gameType, l2BlockNum) if err != nil { return fmt.Errorf("failed to create game: %w", err) } @@ -63,7 +63,7 @@ func createGameFlags() []cli.Flag { flags.L1EthRpcFlag, flags.NetworkFlag, flags.FactoryAddressFlag, - TraceTypeFlag, + GameTypeFlag, OutputRootFlag, L2BlockNumFlag, } From 9dda729047bbe06160fbadf9f79a11f744e485ba Mon Sep 17 00:00:00 2001 From: Dmitry <98899785+mdqst@users.noreply.github.com> Date: Fri, 6 Dec 2024 05:54:57 +0300 Subject: [PATCH 078/111] Typo fix Update flags.mk (#13226) --- just/flags.mk | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/just/flags.mk b/just/flags.mk index 121a3eb70e9..684562546a7 100644 --- a/just/flags.mk +++ b/just/flags.mk @@ -7,7 +7,7 @@ # # MAKEFLAGS is a string of the form: # "abc --foo --bar=baz -- VAR1=val1 VAR2=val2", namely: -# - abc is the concatnation of all short flags +# - abc is the concatenation of all short flags # - --foo and --bar=baz are long options, # - -- is the separator between flags and variable assignments, # - VAR1=val1 and VAR2=val2 are variable assignments @@ -21,4 +21,4 @@ tmp-flags := $(wordlist 2,$(words $(MAKEFLAGS)),$(MAKEFLAGS)) # Then remove all long options, including the -- separator, if needed. That # leaves only variable assignments. -JUSTFLAGS := $(patsubst --%,,$(tmp-flags)) \ No newline at end of file +JUSTFLAGS := $(patsubst --%,,$(tmp-flags)) From d6fa448a2dffe67e571021b3c584a458c5ca2e9f Mon Sep 17 00:00:00 2001 From: Roberto Bayardo Date: Thu, 5 Dec 2024 18:57:40 -0800 Subject: [PATCH 079/111] increase the transaction throttling limit to allow >99% of transactions (#13047) Co-authored-by: Matthew Slipper --- op-batcher/flags/flags.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/op-batcher/flags/flags.go b/op-batcher/flags/flags.go index d5681ea8723..df8eca4a612 100644 --- a/op-batcher/flags/flags.go +++ b/op-batcher/flags/flags.go @@ -171,7 +171,7 @@ var ( ThrottleTxSizeFlag = &cli.IntFlag{ Name: "throttle-tx-size", Usage: "The DA size of transactions to start throttling when we are over the throttle threshold", - Value: 300, // most transactions compress to under 300 bytes. TODO: compute exact distribution + Value: 5000, // less than 1% of all transactions should be affected by this limit EnvVars: prefixEnvVars("THROTTLE_TX_SIZE"), } ThrottleBlockSizeFlag = &cli.IntFlag{ From f17da354edf105c86652784d016a3e11b3c8585f Mon Sep 17 00:00:00 2001 From: clabby Date: Thu, 5 Dec 2024 22:38:09 -0500 Subject: [PATCH 080/111] feat(op-deployer): `Proxy` bootstrap command (#13213) * feat(op-deployer): `Proxy` bootstrap command * code review updates * linter --------- Co-authored-by: Matthew Slipper --- op-deployer/pkg/deployer/bootstrap/flags.go | 21 +++ op-deployer/pkg/deployer/bootstrap/proxy.go | 178 ++++++++++++++++++ op-deployer/pkg/deployer/opcm/proxy.go | 30 +++ op-deployer/pkg/deployer/opcm/proxy_test.go | 34 ++++ .../scripts/deploy/DeployProxy.s.sol | 84 +++++++++ 5 files changed, 347 insertions(+) create mode 100644 op-deployer/pkg/deployer/bootstrap/proxy.go create mode 100644 op-deployer/pkg/deployer/opcm/proxy.go create mode 100644 op-deployer/pkg/deployer/opcm/proxy_test.go create mode 100644 packages/contracts-bedrock/scripts/deploy/DeployProxy.s.sol diff --git a/op-deployer/pkg/deployer/bootstrap/flags.go b/op-deployer/pkg/deployer/bootstrap/flags.go index f6a130f075a..20cf02b9337 100644 --- a/op-deployer/pkg/deployer/bootstrap/flags.go +++ b/op-deployer/pkg/deployer/bootstrap/flags.go @@ -34,6 +34,7 @@ const ( ReleaseFlagName = "release" DelayedWethProxyFlagName = "delayed-weth-proxy" DelayedWethImplFlagName = "delayed-weth-impl" + ProxyOwnerFlagName = "proxy-owner" ) var ( @@ -167,6 +168,13 @@ var ( Name: ReleaseFlagName, Usage: "Release to deploy.", EnvVars: deployer.PrefixEnvVar("RELEASE"), + Value: common.Address{}.Hex(), + } + ProxyOwnerFlag = &cli.StringFlag{ + Name: ProxyOwnerFlagName, + Usage: "Proxy owner address.", + EnvVars: deployer.PrefixEnvVar("PROXY_OWNER"), + Value: common.Address{}.Hex(), } ) @@ -224,6 +232,13 @@ var MIPSFlags = append(BaseFPVMFlags, MIPSVersionFlag) var AsteriscFlags = BaseFPVMFlags +var ProxyFlags = []cli.Flag{ + deployer.L1RPCURLFlag, + deployer.PrivateKeyFlag, + ArtifactsLocatorFlag, + ProxyOwnerFlag, +} + var Commands = []*cli.Command{ { Name: "opcm", @@ -264,4 +279,10 @@ var Commands = []*cli.Command{ Flags: cliapp.ProtectFlags(AsteriscFlags), Action: AsteriscCLI, }, + { + Name: "proxy", + Usage: "Bootstrap a ERC-1967 Proxy without an implementation set.", + Flags: cliapp.ProtectFlags(ProxyFlags), + Action: ProxyCLI, + }, } diff --git a/op-deployer/pkg/deployer/bootstrap/proxy.go b/op-deployer/pkg/deployer/bootstrap/proxy.go new file mode 100644 index 00000000000..c96e497c8c7 --- /dev/null +++ b/op-deployer/pkg/deployer/bootstrap/proxy.go @@ -0,0 +1,178 @@ +package bootstrap + +import ( + "context" + "crypto/ecdsa" + "fmt" + "strings" + + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/artifacts" + "github.com/ethereum/go-ethereum/common" + + "github.com/ethereum-optimism/optimism/op-deployer/pkg/env" + + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/broadcaster" + + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/opcm" + opcrypto "github.com/ethereum-optimism/optimism/op-service/crypto" + "github.com/ethereum-optimism/optimism/op-service/ctxinterrupt" + "github.com/ethereum-optimism/optimism/op-service/ioutil" + "github.com/ethereum-optimism/optimism/op-service/jsonutil" + oplog "github.com/ethereum-optimism/optimism/op-service/log" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rpc" + "github.com/urfave/cli/v2" +) + +type ProxyConfig struct { + L1RPCUrl string + PrivateKey string + Logger log.Logger + ArtifactsLocator *artifacts.Locator + + privateKeyECDSA *ecdsa.PrivateKey + + Owner common.Address +} + +func (c *ProxyConfig) Check() error { + if c.L1RPCUrl == "" { + return fmt.Errorf("l1RPCUrl must be specified") + } + + if c.PrivateKey == "" { + return fmt.Errorf("private key must be specified") + } + + privECDSA, err := crypto.HexToECDSA(strings.TrimPrefix(c.PrivateKey, "0x")) + if err != nil { + return fmt.Errorf("failed to parse private key: %w", err) + } + c.privateKeyECDSA = privECDSA + + if c.Logger == nil { + return fmt.Errorf("logger must be specified") + } + + if c.ArtifactsLocator == nil { + return fmt.Errorf("artifacts locator must be specified") + } + + if c.Owner == (common.Address{}) { + return fmt.Errorf("proxy owner must be specified") + } + + return nil +} + +func ProxyCLI(cliCtx *cli.Context) error { + logCfg := oplog.ReadCLIConfig(cliCtx) + l := oplog.NewLogger(oplog.AppOut(cliCtx), logCfg) + oplog.SetGlobalLogHandler(l.Handler()) + + l1RPCUrl := cliCtx.String(deployer.L1RPCURLFlagName) + privateKey := cliCtx.String(deployer.PrivateKeyFlagName) + artifactsURLStr := cliCtx.String(ArtifactsLocatorFlagName) + artifactsLocator := new(artifacts.Locator) + if err := artifactsLocator.UnmarshalText([]byte(artifactsURLStr)); err != nil { + return fmt.Errorf("failed to parse artifacts URL: %w", err) + } + + owner := common.HexToAddress(cliCtx.String(ProxyOwnerFlagName)) + + ctx := ctxinterrupt.WithCancelOnInterrupt(cliCtx.Context) + + return Proxy(ctx, ProxyConfig{ + L1RPCUrl: l1RPCUrl, + PrivateKey: privateKey, + Logger: l, + ArtifactsLocator: artifactsLocator, + Owner: owner, + }) +} + +func Proxy(ctx context.Context, cfg ProxyConfig) error { + if err := cfg.Check(); err != nil { + return fmt.Errorf("invalid config for Proxy: %w", err) + } + + lgr := cfg.Logger + progressor := func(curr, total int64) { + lgr.Info("artifacts download progress", "current", curr, "total", total) + } + + artifactsFS, cleanup, err := artifacts.Download(ctx, cfg.ArtifactsLocator, progressor) + if err != nil { + return fmt.Errorf("failed to download artifacts: %w", err) + } + defer func() { + if err := cleanup(); err != nil { + lgr.Warn("failed to clean up artifacts", "err", err) + } + }() + + l1Client, err := ethclient.Dial(cfg.L1RPCUrl) + if err != nil { + return fmt.Errorf("failed to connect to L1 RPC: %w", err) + } + + chainID, err := l1Client.ChainID(ctx) + if err != nil { + return fmt.Errorf("failed to get chain ID: %w", err) + } + + signer := opcrypto.SignerFnFromBind(opcrypto.PrivateKeySignerFn(cfg.privateKeyECDSA, chainID)) + chainDeployer := crypto.PubkeyToAddress(cfg.privateKeyECDSA.PublicKey) + + bcaster, err := broadcaster.NewKeyedBroadcaster(broadcaster.KeyedBroadcasterOpts{ + Logger: lgr, + ChainID: chainID, + Client: l1Client, + Signer: signer, + From: chainDeployer, + }) + if err != nil { + return fmt.Errorf("failed to create broadcaster: %w", err) + } + + l1RPC, err := rpc.Dial(cfg.L1RPCUrl) + if err != nil { + return fmt.Errorf("failed to connect to L1 RPC: %w", err) + } + + l1Host, err := env.DefaultForkedScriptHost( + ctx, + bcaster, + lgr, + chainDeployer, + artifactsFS, + l1RPC, + ) + if err != nil { + return fmt.Errorf("failed to create script host: %w", err) + } + + dgo, err := opcm.DeployProxy( + l1Host, + opcm.DeployProxyInput{ + Owner: cfg.Owner, + }, + ) + if err != nil { + return fmt.Errorf("error deploying proxy: %w", err) + } + + if _, err := bcaster.Broadcast(ctx); err != nil { + return fmt.Errorf("failed to broadcast: %w", err) + } + + lgr.Info("deployed new ERC-1967 proxy") + + if err := jsonutil.WriteJSON(dgo, ioutil.ToStdOut()); err != nil { + return fmt.Errorf("failed to write output: %w", err) + } + return nil +} diff --git a/op-deployer/pkg/deployer/opcm/proxy.go b/op-deployer/pkg/deployer/opcm/proxy.go new file mode 100644 index 00000000000..850c337f83f --- /dev/null +++ b/op-deployer/pkg/deployer/opcm/proxy.go @@ -0,0 +1,30 @@ +package opcm + +import ( + "github.com/ethereum/go-ethereum/common" + + "github.com/ethereum-optimism/optimism/op-chain-ops/script" +) + +type DeployProxyInput struct { + Owner common.Address +} + +func (input *DeployProxyInput) InputSet() bool { + return true +} + +type DeployProxyOutput struct { + Proxy common.Address +} + +type DeployProxyScript struct { + Run func(input, output common.Address) error +} + +func DeployProxy( + host *script.Host, + input DeployProxyInput, +) (DeployProxyOutput, error) { + return RunBasicScript[DeployProxyInput, DeployProxyOutput](host, input, "DeployProxy.s.sol", "DeployProxy") +} diff --git a/op-deployer/pkg/deployer/opcm/proxy_test.go b/op-deployer/pkg/deployer/opcm/proxy_test.go new file mode 100644 index 00000000000..bb9cb350a36 --- /dev/null +++ b/op-deployer/pkg/deployer/opcm/proxy_test.go @@ -0,0 +1,34 @@ +package opcm + +import ( + "testing" + + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/broadcaster" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/testutil" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/env" + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/stretchr/testify/require" +) + +func TestDeployProxy(t *testing.T) { + _, artifacts := testutil.LocalArtifacts(t) + + host, err := env.DefaultScriptHost( + broadcaster.NoopBroadcaster(), + testlog.Logger(t, log.LevelInfo), + common.Address{'D'}, + artifacts, + ) + require.NoError(t, err) + + input := DeployProxyInput{ + Owner: common.Address{0xab}, + } + + output, err := DeployProxy(host, input) + require.NoError(t, err) + + require.NotEmpty(t, output.Proxy) +} diff --git a/packages/contracts-bedrock/scripts/deploy/DeployProxy.s.sol b/packages/contracts-bedrock/scripts/deploy/DeployProxy.s.sol new file mode 100644 index 00000000000..08eadc671bc --- /dev/null +++ b/packages/contracts-bedrock/scripts/deploy/DeployProxy.s.sol @@ -0,0 +1,84 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.15; + +// Forge +import { Script } from "forge-std/Script.sol"; + +// Scripts +import { BaseDeployIO } from "scripts/deploy/BaseDeployIO.sol"; +import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; + +// Interfaces +import { IProxy } from "interfaces/universal/IProxy.sol"; + +/// @title DeployProxyInput +contract DeployProxyInput is BaseDeployIO { + // Specify the owner of the proxy that is being deployed + address internal _owner; + + function set(bytes4 _sel, address _value) public { + if (_sel == this.owner.selector) { + require(_value != address(0), "DeployProxy: owner cannot be empty"); + _owner = _value; + } else { + revert("DeployProxy: unknown selector"); + } + } + + function owner() public view returns (address) { + require(_owner != address(0), "DeployProxy: owner not set"); + return _owner; + } +} + +/// @title DeployProxyOutput +contract DeployProxyOutput is BaseDeployIO { + IProxy internal _proxy; + + function set(bytes4 _sel, address _value) public { + if (_sel == this.proxy.selector) { + require(_value != address(0), "DeployProxy: proxy cannot be zero address"); + _proxy = IProxy(payable(_value)); + } else { + revert("DeployProxy: unknown selector"); + } + } + + function proxy() public view returns (IProxy) { + DeployUtils.assertValidContractAddress(address(_proxy)); + return _proxy; + } +} + +/// @title DeployProxy +contract DeployProxy is Script { + function run(DeployProxyInput _mi, DeployProxyOutput _mo) public { + deployProxySingleton(_mi, _mo); + checkOutput(_mi, _mo); + } + + function deployProxySingleton(DeployProxyInput _mi, DeployProxyOutput _mo) internal { + address owner = _mi.owner(); + vm.broadcast(msg.sender); + IProxy proxy = IProxy( + DeployUtils.create1({ + _name: "Proxy", + _args: DeployUtils.encodeConstructor(abi.encodeCall(IProxy.__constructor__, (owner))) + }) + ); + + vm.label(address(proxy), "Proxy"); + _mo.set(_mo.proxy.selector, address(proxy)); + } + + function checkOutput(DeployProxyInput _mi, DeployProxyOutput _mo) public { + DeployUtils.assertValidContractAddress(address(_mo.proxy())); + IProxy prox = _mo.proxy(); + vm.prank(_mi.owner()); + address proxyOwner = prox.admin(); + + require( + proxyOwner == _mi.owner(), "DeployProxy: owner of proxy does not match the owner specified in the input" + ); + } +} From ad868c53f74a21a70a412971d472c14e9a0012a7 Mon Sep 17 00:00:00 2001 From: Matthew Slipper Date: Thu, 5 Dec 2024 22:51:48 -0700 Subject: [PATCH 081/111] ctb: Fix concurrent map writes error (#13278) `ProcessFilesGlob` calls the callback concurrently, so this test needs to lock the `processedFiles` map to prevent panics. See [here](https://app.circleci.com/pipelines/github/ethereum-optimism/optimism/73932/workflows/951eb7de-0611-4bea-b4de-5d3a56c9bf37/jobs/3021176) for an example of this happening. --- packages/contracts-bedrock/scripts/checks/common/util_test.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/packages/contracts-bedrock/scripts/checks/common/util_test.go b/packages/contracts-bedrock/scripts/checks/common/util_test.go index 4defc1c7045..d523ad9aa5d 100644 --- a/packages/contracts-bedrock/scripts/checks/common/util_test.go +++ b/packages/contracts-bedrock/scripts/checks/common/util_test.go @@ -3,6 +3,7 @@ package common import ( "os" "path/filepath" + "sync" "testing" ) @@ -79,8 +80,11 @@ func TestProcessFilesGlob(t *testing.T) { excludes := []string{"skip.txt"} processedFiles := make(map[string]bool) + var mtx sync.Mutex err := ProcessFilesGlob(includes, excludes, func(path string) []error { + mtx.Lock() processedFiles[filepath.Base(path)] = true + mtx.Unlock() return nil }) From f9eaf1fc2a633f12b817ff2d9782612ff8175eb6 Mon Sep 17 00:00:00 2001 From: George Knee Date: Fri, 6 Dec 2024 10:51:04 +0000 Subject: [PATCH 082/111] op-batcher: extract state pruning, block fetching and progress checking into a single pure function (#13060) * remove lastStoredBlock and lastL1Tip from BatchSubmitter state We can use the channelManager's state to infer lastStoredBlock. And lastL1Tip is actually unused. * change log line wording * fix typo * remove unecessary method * WIP first pass at computeSyncActions * computeSyncAction takes a ChannelStatuser interface also report fully inclusive range of blocks to load * add happy path test case * clearState is a pointer we can use nil value to signal no state clearing should be performed * add more test cases * add another test case * computeSyncActions only takes prevCurrentL1, not prevSyncStatus * add batcher restart case * safe chain reorg case * failed to make progress case * simplify log messages, print entire struct * add godoc * wire up computeSyncActions * cache prevCurrentL1 on BatchSubmitter * document stages * fix loadBlocksIntoState range interpretation * pass syncStatus, not pointer to syncStatus and add test case for no progress * check unsafe status before trying to get more blocks * do not panic on invalid block ranges return an error instead. This error is ultimated swallowed, matching existing behaviour. * test: add assetions and mock data about blockID passed to clearState * add readme section on max channel duration * add back unit tests for pruning methods * fix pruneBlocks behaviour when blockCursor pointed at block which is now pruned * rename waitForNodeSync to sequencerOutOfSync * Introduce SeqOutOfSyncError * move SyncActions code to a separate file * ChannelStatuser -> channelStatuser * SeqOutOfSyncError -> ErrSeqOutOfSync * move ctx to first position in fn signature * do not update cached prevCurrentL1 value if there is an ErrSeqOutOfSync * Always warn log when computeSyncActions returns an error * move sync actions test to separate file * computeSyncActions returns a bool, not an error There is only ever one kind of error returned * SyncActions -> syncActions * define local variables to aid readability * organise computeSyncActions and introduce startAfresh syncAction Add comments explaining logical flow: the checks get increasingly deep and we return early where possible. * undo changes to submodule * move test utils to sync_actions_test.go file * ensure pruneChannels clears currentChannel when appropriate * fix submodule" * don't try to get number of block if none exists * improve log * Update op-batcher/batcher/driver.go Co-authored-by: Sebastian Stammler * use struct for block range, not array * use startAfresh in one more place * add test case for multiple channels also set HeadL1 to more realistic values (generally ahead of currentL1 due to nonzero confirmation depth) * print value of *struct in Stringer * add test case when there are no blocks in state * Update op-batcher/batcher/sync_actions.go Co-authored-by: Sebastian Stammler * tighten up log messages and test descriptions --------- Co-authored-by: Sebastian Stammler --- op-batcher/batcher/channel.go | 4 + op-batcher/batcher/channel_manager.go | 80 ++----- op-batcher/batcher/channel_manager_test.go | 259 +++++++++++---------- op-batcher/batcher/driver.go | 90 +++---- op-batcher/batcher/sync_actions.go | 143 ++++++++++++ op-batcher/batcher/sync_actions_test.go | 248 ++++++++++++++++++++ op-batcher/readme.md | 9 +- 7 files changed, 591 insertions(+), 242 deletions(-) create mode 100644 op-batcher/batcher/sync_actions.go create mode 100644 op-batcher/batcher/sync_actions_test.go diff --git a/op-batcher/batcher/channel.go b/op-batcher/batcher/channel.go index 95abcb46a7f..6b936c112d3 100644 --- a/op-batcher/batcher/channel.go +++ b/op-batcher/batcher/channel.go @@ -217,3 +217,7 @@ func (c *channel) OldestL2() eth.BlockID { func (c *channel) Close() { c.channelBuilder.Close() } + +func (c *channel) MaxInclusionBlock() uint64 { + return c.maxInclusionBlock +} diff --git a/op-batcher/batcher/channel_manager.go b/op-batcher/batcher/channel_manager.go index 1da2def78da..b5cfc6ebc1b 100644 --- a/op-batcher/batcher/channel_manager.go +++ b/op-batcher/batcher/channel_manager.go @@ -464,78 +464,30 @@ func l2BlockRefFromBlockAndL1Info(block *types.Block, l1info *derive.L1BlockInfo var ErrPendingAfterClose = errors.New("pending channels remain after closing channel-manager") -// pruneSafeBlocks dequeues blocks from the internal blocks queue -// if they have now become safe. -func (s *channelManager) pruneSafeBlocks(newSafeHead eth.L2BlockRef) { - oldestBlock, ok := s.blocks.Peek() +// pruneSafeBlocks dequeues the provided number of blocks from the internal blocks queue +func (s *channelManager) pruneSafeBlocks(num int) { + _, ok := s.blocks.DequeueN(int(num)) if !ok { - // no blocks to prune - return + panic("tried to prune more blocks than available") } - - if newSafeHead.Number+1 == oldestBlock.NumberU64() { - // no blocks to prune - return - } - - if newSafeHead.Number+1 < oldestBlock.NumberU64() { - // This could happen if there was an L1 reorg. - // Or if the sequencer restarted. - s.log.Warn("safe head reversed, clearing channel manager state", - "oldestBlock", eth.ToBlockID(oldestBlock), - "newSafeBlock", newSafeHead) - // We should restart work from the new safe head, - // and therefore prune all the blocks. - s.Clear(newSafeHead.L1Origin) - return - } - - numBlocksToDequeue := newSafeHead.Number + 1 - oldestBlock.NumberU64() - - if numBlocksToDequeue > uint64(s.blocks.Len()) { - // This could happen if the batcher restarted. - // The sequencer may have derived the safe chain - // from channels sent by a previous batcher instance. - s.log.Warn("safe head above unsafe head, clearing channel manager state", - "unsafeBlock", eth.ToBlockID(s.blocks[s.blocks.Len()-1]), - "newSafeBlock", newSafeHead) - // We should restart work from the new safe head, - // and therefore prune all the blocks. - s.Clear(newSafeHead.L1Origin) - return - } - - if s.blocks[numBlocksToDequeue-1].Hash() != newSafeHead.Hash { - s.log.Warn("safe chain reorg, clearing channel manager state", - "existingBlock", eth.ToBlockID(s.blocks[numBlocksToDequeue-1]), - "newSafeBlock", newSafeHead) - // We should restart work from the new safe head, - // and therefore prune all the blocks. - s.Clear(newSafeHead.L1Origin) - return - } - - // This shouldn't return an error because - // We already checked numBlocksToDequeue <= s.blocks.Len() - _, _ = s.blocks.DequeueN(int(numBlocksToDequeue)) - s.blockCursor -= int(numBlocksToDequeue) - + s.blockCursor -= int(num) if s.blockCursor < 0 { - panic("negative blockCursor") + s.blockCursor = 0 } } -// pruneChannels dequeues channels from the internal channels queue -// if they were built using blocks which are now safe -func (s *channelManager) pruneChannels(newSafeHead eth.L2BlockRef) { - i := 0 - for _, ch := range s.channelQueue { - if ch.LatestL2().Number > newSafeHead.Number { - break +// pruneChannels dequeues the provided number of channels from the internal channels queue +func (s *channelManager) pruneChannels(num int) { + clearCurrentChannel := false + for i := 0; i < num; i++ { + if s.channelQueue[i] == s.currentChannel { + clearCurrentChannel = true } - i++ } - s.channelQueue = s.channelQueue[i:] + s.channelQueue = s.channelQueue[num:] + if clearCurrentChannel { + s.currentChannel = nil + } } // PendingDABytes returns the current number of bytes pending to be written to the DA layer (from blocks fetched from L2 diff --git a/op-batcher/batcher/channel_manager_test.go b/op-batcher/batcher/channel_manager_test.go index 32aae1b06dd..d1a0037a5d0 100644 --- a/op-batcher/batcher/channel_manager_test.go +++ b/op-batcher/batcher/channel_manager_test.go @@ -463,14 +463,12 @@ func TestChannelManager_handleChannelInvalidated(t *testing.T) { } func TestChannelManager_PruneBlocks(t *testing.T) { - l := testlog.Logger(t, log.LevelDebug) cfg := channelManagerTestConfig(100, derive.SingularBatchType) - m := NewChannelManager(l, metrics.NoopMetrics, cfg, defaultTestRollupConfig) - + cfg.InitNoneCompressor() a := types.NewBlock(&types.Header{ Number: big.NewInt(0), }, nil, nil, nil) - b := types.NewBlock(&types.Header{ // This will shortly become the safe head + b := types.NewBlock(&types.Header{ Number: big.NewInt(1), ParentHash: a.Hash(), }, nil, nil, nil) @@ -479,132 +477,157 @@ func TestChannelManager_PruneBlocks(t *testing.T) { ParentHash: b.Hash(), }, nil, nil, nil) - require.NoError(t, m.AddL2Block(a)) - m.blockCursor += 1 - require.NoError(t, m.AddL2Block(b)) - m.blockCursor += 1 - require.NoError(t, m.AddL2Block(c)) - m.blockCursor += 1 - - // Normal path - m.pruneSafeBlocks(eth.L2BlockRef{ - Hash: b.Hash(), - Number: b.NumberU64(), - }) - require.Equal(t, queue.Queue[*types.Block]{c}, m.blocks) - - // Safe chain didn't move, nothing to prune - m.pruneSafeBlocks(eth.L2BlockRef{ - Hash: b.Hash(), - Number: b.NumberU64(), - }) - require.Equal(t, queue.Queue[*types.Block]{c}, m.blocks) - - // Safe chain moved beyond the blocks we had - // state should be cleared - m.pruneSafeBlocks(eth.L2BlockRef{ - Hash: c.Hash(), - Number: uint64(99), - }) - require.Equal(t, queue.Queue[*types.Block]{}, m.blocks) - - // No blocks to prune, NOOP - m.pruneSafeBlocks(eth.L2BlockRef{ - Hash: c.Hash(), - Number: c.NumberU64(), - }) - require.Equal(t, queue.Queue[*types.Block]{}, m.blocks) - - // Put another block in - d := types.NewBlock(&types.Header{ - Number: big.NewInt(3), - ParentHash: c.Hash(), - }, nil, nil, nil) - require.NoError(t, m.AddL2Block(d)) - m.blockCursor += 1 - - // Safe chain reorg - // state should be cleared - m.pruneSafeBlocks(eth.L2BlockRef{ - Hash: a.Hash(), - Number: uint64(3), - }) - require.Equal(t, queue.Queue[*types.Block]{}, m.blocks) - - // Put another block in - require.NoError(t, m.AddL2Block(d)) - m.blockCursor += 1 + type testCase struct { + name string + initialQ queue.Queue[*types.Block] + initialBlockCursor int + numChannelsToPrune int + expectedQ queue.Queue[*types.Block] + expectedBlockCursor int + } - // Safe chain reversed - // state should be cleared - m.pruneSafeBlocks(eth.L2BlockRef{ - Hash: a.Hash(), // unused - Number: uint64(1), - }) - require.Equal(t, queue.Queue[*types.Block]{}, m.blocks) + for _, tc := range []testCase{ + { + name: "[A,B,C]*+1->[B,C]*", // * denotes the cursor + initialQ: queue.Queue[*types.Block]{a, b, c}, + initialBlockCursor: 3, + numChannelsToPrune: 1, + expectedQ: queue.Queue[*types.Block]{b, c}, + expectedBlockCursor: 2, + }, + { + name: "[A,B,C*]+1->[B,C*]", + initialQ: queue.Queue[*types.Block]{a, b, c}, + initialBlockCursor: 2, + numChannelsToPrune: 1, + expectedQ: queue.Queue[*types.Block]{b, c}, + expectedBlockCursor: 1, + }, + { + name: "[A,B,C]*+2->[C]*", + initialQ: queue.Queue[*types.Block]{a, b, c}, + initialBlockCursor: 3, + numChannelsToPrune: 2, + expectedQ: queue.Queue[*types.Block]{c}, + expectedBlockCursor: 1, + }, + { + name: "[A,B,C*]+2->[C*]", + initialQ: queue.Queue[*types.Block]{a, b, c}, + initialBlockCursor: 2, + numChannelsToPrune: 2, + expectedQ: queue.Queue[*types.Block]{c}, + expectedBlockCursor: 0, + }, + { + name: "[A*,B,C]+1->[B*,C]", + initialQ: queue.Queue[*types.Block]{a, b, c}, + initialBlockCursor: 0, + numChannelsToPrune: 1, + expectedQ: queue.Queue[*types.Block]{b, c}, + expectedBlockCursor: 0, + }, + { + name: "[A,B,C]+3->[]", + initialQ: queue.Queue[*types.Block]{a, b, c}, + initialBlockCursor: 3, + numChannelsToPrune: 3, + expectedQ: queue.Queue[*types.Block]{}, + expectedBlockCursor: 0, + }, + { + name: "[A,B,C]*+4->panic", + initialQ: queue.Queue[*types.Block]{a, b, c}, + initialBlockCursor: 3, + numChannelsToPrune: 4, + expectedQ: nil, // declare that the prune method should panic + expectedBlockCursor: 0, + }, + } { + t.Run(tc.name, func(t *testing.T) { + l := testlog.Logger(t, log.LevelCrit) + m := NewChannelManager(l, metrics.NoopMetrics, cfg, defaultTestRollupConfig) + m.blocks = tc.initialQ + m.blockCursor = tc.initialBlockCursor + if tc.expectedQ != nil { + m.pruneSafeBlocks(tc.numChannelsToPrune) + require.Equal(t, tc.expectedQ, m.blocks) + } else { + require.Panics(t, func() { m.pruneSafeBlocks(tc.numChannelsToPrune) }) + } + }) + } } func TestChannelManager_PruneChannels(t *testing.T) { - l := testlog.Logger(t, log.LevelCrit) cfg := channelManagerTestConfig(100, derive.SingularBatchType) - cfg.InitNoneCompressor() - m := NewChannelManager(l, metrics.NoopMetrics, cfg, defaultTestRollupConfig) - - A, err := newChannelWithChannelOut(l, metrics.NoopMetrics, cfg, m.rollupCfg, 0) - require.NoError(t, err) - B, err := newChannelWithChannelOut(l, metrics.NoopMetrics, cfg, m.rollupCfg, 0) - require.NoError(t, err) - C, err := newChannelWithChannelOut(l, metrics.NoopMetrics, cfg, m.rollupCfg, 0) - require.NoError(t, err) - - m.channelQueue = []*channel{A, B, C} - - numTx := 1 - rng := rand.New(rand.NewSource(123)) - a0 := derivetest.RandomL2BlockWithChainId(rng, numTx, defaultTestRollupConfig.L2ChainID) - a0 = a0.WithSeal(&types.Header{Number: big.NewInt(0)}) - a1 := derivetest.RandomL2BlockWithChainId(rng, numTx, defaultTestRollupConfig.L2ChainID) - a1 = a1.WithSeal(&types.Header{Number: big.NewInt(1)}) - b2 := derivetest.RandomL2BlockWithChainId(rng, numTx, defaultTestRollupConfig.L2ChainID) - b2 = b2.WithSeal(&types.Header{Number: big.NewInt(2)}) - b3 := derivetest.RandomL2BlockWithChainId(rng, numTx, defaultTestRollupConfig.L2ChainID) - b3 = b3.WithSeal(&types.Header{Number: big.NewInt(3)}) - c4 := derivetest.RandomL2BlockWithChainId(rng, numTx, defaultTestRollupConfig.L2ChainID) - c4 = c4.WithSeal(&types.Header{Number: big.NewInt(4)}) - - _, err = A.AddBlock(a0) - require.NoError(t, err) - _, err = A.AddBlock(a1) - require.NoError(t, err) - - _, err = B.AddBlock(b2) + A, err := newChannelWithChannelOut(nil, metrics.NoopMetrics, cfg, defaultTestRollupConfig, 0) require.NoError(t, err) - _, err = B.AddBlock(b3) + B, err := newChannelWithChannelOut(nil, metrics.NoopMetrics, cfg, defaultTestRollupConfig, 0) require.NoError(t, err) - - _, err = C.AddBlock(c4) + C, err := newChannelWithChannelOut(nil, metrics.NoopMetrics, cfg, defaultTestRollupConfig, 0) require.NoError(t, err) - m.pruneChannels(eth.L2BlockRef{ - Number: uint64(3), - }) - - require.Equal(t, []*channel{C}, m.channelQueue) - - m.pruneChannels(eth.L2BlockRef{ - Number: uint64(4), - }) - - require.Equal(t, []*channel{}, m.channelQueue) - - m.pruneChannels(eth.L2BlockRef{ - Number: uint64(4), - }) - - require.Equal(t, []*channel{}, m.channelQueue) + type testCase struct { + name string + initialQ []*channel + initialCurrentChannel *channel + numChannelsToPrune int + expectedQ []*channel + expectedCurrentChannel *channel + } + for _, tc := range []testCase{ + { + name: "[A,B,C]+1->[B,C]", + initialQ: []*channel{A, B, C}, + numChannelsToPrune: 1, + expectedQ: []*channel{B, C}, + }, + { + name: "[A,B,C]+3->[] + currentChannel=C", + initialQ: []*channel{A, B, C}, + initialCurrentChannel: C, + numChannelsToPrune: 3, + expectedQ: []*channel{}, + expectedCurrentChannel: nil, + }, + { + name: "[A,B,C]+2->[C]", + initialQ: []*channel{A, B, C}, + numChannelsToPrune: 2, + expectedQ: []*channel{C}, + }, + { + name: "[A,B,C]+3->[]", + initialQ: []*channel{A, B, C}, + numChannelsToPrune: 3, + expectedQ: []*channel{}, + }, + { + name: "[A,B,C]+4->panic", + initialQ: []*channel{A, B, C}, + numChannelsToPrune: 4, + expectedQ: nil, // declare that the prune method should panic + }, + } { + t.Run(tc.name, func(t *testing.T) { + l := testlog.Logger(t, log.LevelCrit) + m := NewChannelManager(l, metrics.NoopMetrics, cfg, defaultTestRollupConfig) + m.channelQueue = tc.initialQ + m.currentChannel = tc.initialCurrentChannel + if tc.expectedQ != nil { + m.pruneChannels(tc.numChannelsToPrune) + require.Equal(t, tc.expectedQ, m.channelQueue) + require.Equal(t, tc.expectedCurrentChannel, m.currentChannel) + } else { + require.Panics(t, func() { m.pruneChannels(tc.numChannelsToPrune) }) + } + }) + } } + func TestChannelManager_ChannelOutFactory(t *testing.T) { type ChannelOutWrapper struct { derive.ChannelOut diff --git a/op-batcher/batcher/driver.go b/op-batcher/batcher/driver.go index 729626cd946..1483c4dc7cc 100644 --- a/op-batcher/batcher/driver.go +++ b/op-batcher/batcher/driver.go @@ -114,7 +114,8 @@ type BatchSubmitter struct { txpoolState TxPoolState txpoolBlockedBlob bool - state *channelManager + state *channelManager + prevCurrentL1 eth.L1BlockRef // cached CurrentL1 from the last syncStatus } // NewBatchSubmitter initializes the BatchSubmitter driver from a preconfigured DriverSetup @@ -241,28 +242,15 @@ func (l *BatchSubmitter) StopBatchSubmitting(ctx context.Context) error { return nil } -// loadBlocksIntoState loads all blocks since the previous stored block -// It does the following: -// 1. Fetch the sync status of the sequencer -// 2. Check if the sync status is valid or if we are all the way up to date -// 3. Check if it needs to initialize state OR it is lagging (todo: lagging just means race condition?) -// 4. Load all new blocks into the local state. -// 5. Dequeue blocks from local state which are now safe. -// -// If there is a reorg, it will reset the last stored block but not clear the internal state so -// the state can be flushed to L1. -func (l *BatchSubmitter) loadBlocksIntoState(syncStatus eth.SyncStatus, ctx context.Context) error { - start, end, err := l.calculateL2BlockRangeToStore(syncStatus) - if err != nil { - l.Log.Warn("Error calculating L2 block range", "err", err) - return err - } else if start.Number >= end.Number { - return errors.New("start number is >= end number") +// loadBlocksIntoState loads the blocks between start and end (inclusive). +// If there is a reorg, it will return an error. +func (l *BatchSubmitter) loadBlocksIntoState(ctx context.Context, start, end uint64) error { + if end <= start { + return fmt.Errorf("start number is >= end number %d,%d", start, end) } - var latestBlock *types.Block // Add all blocks to "state" - for i := start.Number + 1; i < end.Number+1; i++ { + for i := start; i <= end; i++ { block, err := l.loadBlockIntoState(ctx, i) if errors.Is(err, ErrReorg) { l.Log.Warn("Found L2 reorg", "block_number", i) @@ -358,34 +346,6 @@ func (l *BatchSubmitter) getSyncStatus(ctx context.Context) (*eth.SyncStatus, er return syncStatus, nil } -// calculateL2BlockRangeToStore determines the range (start,end] that should be loaded into the local state. -func (l *BatchSubmitter) calculateL2BlockRangeToStore(syncStatus eth.SyncStatus) (eth.BlockID, eth.BlockID, error) { - if syncStatus.HeadL1 == (eth.L1BlockRef{}) { - return eth.BlockID{}, eth.BlockID{}, errors.New("empty sync status") - } - // Check if we should even attempt to load any blocks. TODO: May not need this check - if syncStatus.SafeL2.Number >= syncStatus.UnsafeL2.Number { - return eth.BlockID{}, eth.BlockID{}, fmt.Errorf("L2 safe head(%d) >= L2 unsafe head(%d)", syncStatus.SafeL2.Number, syncStatus.UnsafeL2.Number) - } - - lastStoredBlock := l.state.LastStoredBlock() - start := lastStoredBlock - end := syncStatus.UnsafeL2.ID() - - // Check last stored block to see if it is empty or has lagged behind. - // It lagging implies that the op-node processed some batches that - // were submitted prior to the current instance of the batcher being alive. - if lastStoredBlock == (eth.BlockID{}) { - l.Log.Info("Resuming batch-submitter work at safe-head", "safe", syncStatus.SafeL2) - start = syncStatus.SafeL2.ID() - } else if lastStoredBlock.Number < syncStatus.SafeL2.Number { - l.Log.Warn("Last stored block lagged behind L2 safe head: batch submission will continue from the safe head now", "last", lastStoredBlock, "safe", syncStatus.SafeL2) - start = syncStatus.SafeL2.ID() - } - - return start, end, nil -} - // The following things occur: // New L2 block (reorg or not) // L1 transaction is confirmed @@ -464,20 +424,34 @@ func (l *BatchSubmitter) mainLoop(ctx context.Context, receiptsCh chan txmgr.TxR continue } - l.state.pruneSafeBlocks(syncStatus.SafeL2) - l.state.pruneChannels(syncStatus.SafeL2) + // Decide appropriate actions + syncActions, outOfSync := computeSyncActions(*syncStatus, l.prevCurrentL1, l.state.blocks, l.state.channelQueue, l.Log) - err = l.state.CheckExpectedProgress(*syncStatus) - if err != nil { - l.Log.Warn("error checking expected progress, clearing state and waiting for node sync", "err", err) - l.waitNodeSyncAndClearState() + if outOfSync { + // If the sequencer is out of sync + // do nothing and wait to see if it has + // got in sync on the next tick. + l.Log.Warn("Sequencer is out of sync, retrying next tick.") continue } - if err := l.loadBlocksIntoState(*syncStatus, l.shutdownCtx); errors.Is(err, ErrReorg) { - l.Log.Warn("error loading blocks, clearing state and waiting for node sync", "err", err) - l.waitNodeSyncAndClearState() - continue + l.prevCurrentL1 = syncStatus.CurrentL1 + + // Manage existing state / garbage collection + if syncActions.clearState != nil { + l.state.Clear(*syncActions.clearState) + } else { + l.state.pruneSafeBlocks(syncActions.blocksToPrune) + l.state.pruneChannels(syncActions.channelsToPrune) + } + + if syncActions.blocksToLoad != nil { + // Get fresh unsafe blocks + if err := l.loadBlocksIntoState(l.shutdownCtx, syncActions.blocksToLoad.start, syncActions.blocksToLoad.end); errors.Is(err, ErrReorg) { + l.Log.Warn("error loading blocks, clearing state and waiting for node sync", "err", err) + l.waitNodeSyncAndClearState() + continue + } } l.publishStateToL1(queue, receiptsCh, daGroup, l.Config.PollInterval) diff --git a/op-batcher/batcher/sync_actions.go b/op-batcher/batcher/sync_actions.go new file mode 100644 index 00000000000..8f4121d7d5f --- /dev/null +++ b/op-batcher/batcher/sync_actions.go @@ -0,0 +1,143 @@ +package batcher + +import ( + "fmt" + + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/queue" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" +) + +type channelStatuser interface { + isFullySubmitted() bool + isTimedOut() bool + LatestL2() eth.BlockID + MaxInclusionBlock() uint64 +} + +type inclusiveBlockRange struct{ start, end uint64 } +type syncActions struct { + clearState *eth.BlockID + blocksToPrune int + channelsToPrune int + blocksToLoad *inclusiveBlockRange // the blocks that should be loaded into the local state. + // NOTE this range is inclusive on both ends, which is a change to previous behaviour. +} + +func (s syncActions) String() string { + return fmt.Sprintf( + "SyncActions{blocksToPrune: %d, channelsToPrune: %d, clearState: %v, blocksToLoad: %v}", s.blocksToPrune, s.channelsToPrune, s.clearState, s.blocksToLoad) +} + +// computeSyncActions determines the actions that should be taken based on the inputs provided. The inputs are the current +// state of the batcher (blocks and channels), the new sync status, and the previous current L1 block. The actions are returned +// in a struct specifying the number of blocks to prune, the number of channels to prune, whether to wait for node sync, the block +// range to load into the local state, and whether to clear the state entirely. Returns an boolean indicating if the sequencer is out of sync. +func computeSyncActions[T channelStatuser](newSyncStatus eth.SyncStatus, prevCurrentL1 eth.L1BlockRef, blocks queue.Queue[*types.Block], channels []T, l log.Logger) (syncActions, bool) { + + // PART 1: Initial checks on the sync status + if newSyncStatus.HeadL1 == (eth.L1BlockRef{}) { + l.Warn("empty sync status") + return syncActions{}, true + } + + if newSyncStatus.CurrentL1.Number < prevCurrentL1.Number { + // This can happen when the sequencer restarts + l.Warn("sequencer currentL1 reversed") + return syncActions{}, true + } + + // PART 2: checks involving only the oldest block in the state + oldestBlockInState, hasBlocks := blocks.Peek() + oldestUnsafeBlockNum := newSyncStatus.SafeL2.Number + 1 + youngestUnsafeBlockNum := newSyncStatus.UnsafeL2.Number + + if !hasBlocks { + s := syncActions{ + blocksToLoad: &inclusiveBlockRange{oldestUnsafeBlockNum, youngestUnsafeBlockNum}, + } + l.Info("no blocks in state", "syncActions", s) + return s, false + } + + // These actions apply in multiple unhappy scenarios below, where + // we detect that the existing state is invalidated + // and we need to start over from the sequencer's oldest + // unsafe (and not safe) block. + startAfresh := syncActions{ + clearState: &newSyncStatus.SafeL2.L1Origin, + blocksToLoad: &inclusiveBlockRange{oldestUnsafeBlockNum, youngestUnsafeBlockNum}, + } + + oldestBlockInStateNum := oldestBlockInState.NumberU64() + + if oldestUnsafeBlockNum < oldestBlockInStateNum { + l.Warn("oldest unsafe block is below oldest block in state", "syncActions", startAfresh, "oldestBlockInState", oldestBlockInState, "newSafeBlock", newSyncStatus.SafeL2) + return startAfresh, false + } + + // PART 3: checks involving all blocks in state + newestBlockInState := blocks[blocks.Len()-1] + newestBlockInStateNum := newestBlockInState.NumberU64() + + numBlocksToDequeue := oldestUnsafeBlockNum - oldestBlockInStateNum + + if numBlocksToDequeue > uint64(blocks.Len()) { + // This could happen if the batcher restarted. + // The sequencer may have derived the safe chain + // from channels sent by a previous batcher instance. + l.Warn("oldest unsafe block above newest block in state, clearing channel manager state", + "oldestUnsafeBlockNum", oldestUnsafeBlockNum, + "newestBlockInState", eth.ToBlockID(newestBlockInState), + "syncActions", + startAfresh) + return startAfresh, false + } + + if numBlocksToDequeue > 0 && blocks[numBlocksToDequeue-1].Hash() != newSyncStatus.SafeL2.Hash { + l.Warn("safe chain reorg, clearing channel manager state", + "existingBlock", eth.ToBlockID(blocks[numBlocksToDequeue-1]), + "newSafeBlock", newSyncStatus.SafeL2, + "syncActions", startAfresh) + return startAfresh, false + } + + // PART 4: checks involving channels + for _, ch := range channels { + if ch.isFullySubmitted() && + !ch.isTimedOut() && + newSyncStatus.CurrentL1.Number > ch.MaxInclusionBlock() && + newSyncStatus.SafeL2.Number < ch.LatestL2().Number { + // Safe head did not make the expected progress + // for a fully submitted channel. This indicates + // that the derivation pipeline may have stalled + // e.g. because of Holocene strict ordering rules. + l.Warn("sequencer did not make expected progress", + "existingBlock", eth.ToBlockID(blocks[numBlocksToDequeue-1]), + "newSafeBlock", newSyncStatus.SafeL2, + "syncActions", startAfresh) + return startAfresh, false + } + } + + // PART 5: happy path + numChannelsToPrune := 0 + for _, ch := range channels { + if ch.LatestL2().Number > newSyncStatus.SafeL2.Number { + // If the channel has blocks which are not yet safe + // we do not want to prune it. + break + } + numChannelsToPrune++ + } + + start := newestBlockInStateNum + 1 + end := youngestUnsafeBlockNum + + return syncActions{ + blocksToPrune: int(numBlocksToDequeue), + channelsToPrune: numChannelsToPrune, + blocksToLoad: &inclusiveBlockRange{start, end}, + }, false +} diff --git a/op-batcher/batcher/sync_actions_test.go b/op-batcher/batcher/sync_actions_test.go new file mode 100644 index 00000000000..f48ed9dabfb --- /dev/null +++ b/op-batcher/batcher/sync_actions_test.go @@ -0,0 +1,248 @@ +package batcher + +import ( + "math/big" + "testing" + + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/queue" + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" + "github.com/stretchr/testify/require" +) + +type testChannelStatuser struct { + latestL2 eth.BlockID + inclusionBlock uint64 + fullySubmitted, timedOut bool +} + +func (tcs testChannelStatuser) LatestL2() eth.BlockID { + return tcs.latestL2 +} + +func (tcs testChannelStatuser) MaxInclusionBlock() uint64 { + return tcs.inclusionBlock +} +func (tcs testChannelStatuser) isFullySubmitted() bool { + return tcs.fullySubmitted +} + +func (tcs testChannelStatuser) isTimedOut() bool { + return tcs.timedOut +} + +func TestBatchSubmitter_computeSyncActions(t *testing.T) { + + block101 := types.NewBlockWithHeader(&types.Header{Number: big.NewInt(101)}) + block102 := types.NewBlockWithHeader(&types.Header{Number: big.NewInt(102)}) + block103 := types.NewBlockWithHeader(&types.Header{Number: big.NewInt(103)}) + + channel103 := testChannelStatuser{ + latestL2: eth.ToBlockID(block103), + inclusionBlock: 1, + fullySubmitted: true, + timedOut: false, + } + + block104 := types.NewBlockWithHeader(&types.Header{Number: big.NewInt(104)}) + + channel104 := testChannelStatuser{ + latestL2: eth.ToBlockID(block104), + inclusionBlock: 1, + fullySubmitted: false, + timedOut: false, + } + + type TestCase struct { + name string + // inputs + newSyncStatus eth.SyncStatus + prevCurrentL1 eth.L1BlockRef + blocks queue.Queue[*types.Block] + channels []channelStatuser + // expectations + expected syncActions + expectedSeqOutOfSync bool + expectedLogs []string + } + + testCases := []TestCase{ + {name: "empty sync status", + // This can happen when the sequencer recovers from a reorg + newSyncStatus: eth.SyncStatus{}, + expected: syncActions{}, + expectedSeqOutOfSync: true, + expectedLogs: []string{"empty sync status"}, + }, + {name: "current l1 reversed", + // This can happen when the sequencer restarts or is switched + // to a backup sequencer: + newSyncStatus: eth.SyncStatus{ + HeadL1: eth.BlockRef{Number: 2}, + CurrentL1: eth.BlockRef{Number: 1}, + }, + prevCurrentL1: eth.BlockRef{Number: 2}, + expected: syncActions{}, + expectedSeqOutOfSync: true, + expectedLogs: []string{"sequencer currentL1 reversed"}, + }, + {name: "gap between safe chain and state", + // This can happen if there is an L1 reorg: + // although the sequencer has derived up the same + // L1 block height, it derived fewer safe L2 blocks. + newSyncStatus: eth.SyncStatus{ + HeadL1: eth.BlockRef{Number: 6}, + CurrentL1: eth.BlockRef{Number: 1}, + SafeL2: eth.L2BlockRef{Number: 100, L1Origin: eth.BlockID{Number: 1}}, + UnsafeL2: eth.L2BlockRef{Number: 109}, + }, + prevCurrentL1: eth.BlockRef{Number: 1}, + blocks: queue.Queue[*types.Block]{block102, block103}, // note absence of block101 + channels: []channelStatuser{channel103}, + expected: syncActions{ + clearState: ð.BlockID{Number: 1}, + blocksToLoad: &inclusiveBlockRange{101, 109}, + }, + expectedLogs: []string{"oldest unsafe block is below oldest block in state"}, + }, + {name: "unexpectedly good progress", + // This can happen if another batcher instance got some blocks + // included in the safe chain: + newSyncStatus: eth.SyncStatus{ + HeadL1: eth.BlockRef{Number: 6}, + CurrentL1: eth.BlockRef{Number: 2}, + SafeL2: eth.L2BlockRef{Number: 104, L1Origin: eth.BlockID{Number: 1}}, + UnsafeL2: eth.L2BlockRef{Number: 109}, + }, + prevCurrentL1: eth.BlockRef{Number: 1}, + blocks: queue.Queue[*types.Block]{block101, block102, block103}, + channels: []channelStatuser{channel103}, + expected: syncActions{ + clearState: ð.BlockID{Number: 1}, + blocksToLoad: &inclusiveBlockRange{105, 109}, + }, + expectedLogs: []string{"oldest unsafe block above newest block in state"}, + }, + {name: "safe chain reorg", + // This can happen if there is an L1 reorg, the safe chain is at an acceptable + // height but it does not descend from the blocks in state: + newSyncStatus: eth.SyncStatus{ + HeadL1: eth.BlockRef{Number: 5}, + CurrentL1: eth.BlockRef{Number: 2}, + SafeL2: eth.L2BlockRef{Number: 103, Hash: block101.Hash(), L1Origin: eth.BlockID{Number: 1}}, // note hash mismatch + UnsafeL2: eth.L2BlockRef{Number: 109}, + }, + prevCurrentL1: eth.BlockRef{Number: 1}, + blocks: queue.Queue[*types.Block]{block101, block102, block103}, + channels: []channelStatuser{channel103}, + expected: syncActions{ + clearState: ð.BlockID{Number: 1}, + blocksToLoad: &inclusiveBlockRange{104, 109}, + }, + expectedLogs: []string{"safe chain reorg"}, + }, + {name: "failed to make expected progress", + // This could happen if the batcher unexpectedly violates the + // Holocene derivation rules: + newSyncStatus: eth.SyncStatus{ + HeadL1: eth.BlockRef{Number: 3}, + CurrentL1: eth.BlockRef{Number: 2}, + SafeL2: eth.L2BlockRef{Number: 101, Hash: block101.Hash(), L1Origin: eth.BlockID{Number: 1}}, + UnsafeL2: eth.L2BlockRef{Number: 109}, + }, + prevCurrentL1: eth.BlockRef{Number: 1}, + blocks: queue.Queue[*types.Block]{block101, block102, block103}, + channels: []channelStatuser{channel103}, + expected: syncActions{ + clearState: ð.BlockID{Number: 1}, + blocksToLoad: &inclusiveBlockRange{102, 109}, + }, + expectedLogs: []string{"sequencer did not make expected progress"}, + }, + {name: "no progress", + // This can happen if we have a long channel duration + // and we didn't submit or have any txs confirmed since + // the last sync. + newSyncStatus: eth.SyncStatus{ + HeadL1: eth.BlockRef{Number: 4}, + CurrentL1: eth.BlockRef{Number: 1}, + SafeL2: eth.L2BlockRef{Number: 100}, + UnsafeL2: eth.L2BlockRef{Number: 109}, + }, + prevCurrentL1: eth.BlockRef{Number: 1}, + blocks: queue.Queue[*types.Block]{block101, block102, block103}, + channels: []channelStatuser{channel103}, + expected: syncActions{ + blocksToLoad: &inclusiveBlockRange{104, 109}, + }, + }, + {name: "no blocks", + // This happens when the batcher is starting up for the first time + newSyncStatus: eth.SyncStatus{ + HeadL1: eth.BlockRef{Number: 5}, + CurrentL1: eth.BlockRef{Number: 2}, + SafeL2: eth.L2BlockRef{Number: 103, Hash: block103.Hash()}, + UnsafeL2: eth.L2BlockRef{Number: 109}, + }, + prevCurrentL1: eth.BlockRef{Number: 1}, + blocks: queue.Queue[*types.Block]{}, + channels: []channelStatuser{}, + expected: syncActions{ + blocksToLoad: &inclusiveBlockRange{104, 109}, + }, + }, + {name: "happy path", + // This happens when the safe chain is being progressed as expected: + newSyncStatus: eth.SyncStatus{ + HeadL1: eth.BlockRef{Number: 5}, + CurrentL1: eth.BlockRef{Number: 2}, + SafeL2: eth.L2BlockRef{Number: 103, Hash: block103.Hash()}, + UnsafeL2: eth.L2BlockRef{Number: 109}, + }, + prevCurrentL1: eth.BlockRef{Number: 1}, + blocks: queue.Queue[*types.Block]{block101, block102, block103}, + channels: []channelStatuser{channel103}, + expected: syncActions{ + blocksToPrune: 3, + channelsToPrune: 1, + blocksToLoad: &inclusiveBlockRange{104, 109}, + }, + }, + {name: "happy path + multiple channels", + newSyncStatus: eth.SyncStatus{ + HeadL1: eth.BlockRef{Number: 5}, + CurrentL1: eth.BlockRef{Number: 2}, + SafeL2: eth.L2BlockRef{Number: 103, Hash: block103.Hash()}, + UnsafeL2: eth.L2BlockRef{Number: 109}, + }, + prevCurrentL1: eth.BlockRef{Number: 1}, + blocks: queue.Queue[*types.Block]{block101, block102, block103, block104}, + channels: []channelStatuser{channel103, channel104}, + expected: syncActions{ + blocksToPrune: 3, + channelsToPrune: 1, + blocksToLoad: &inclusiveBlockRange{105, 109}, + }, + }, + } + + for _, tc := range testCases { + + t.Run(tc.name, func(t *testing.T) { + l, h := testlog.CaptureLogger(t, log.LevelDebug) + + result, outOfSync := computeSyncActions( + tc.newSyncStatus, tc.prevCurrentL1, tc.blocks, tc.channels, l, + ) + + require.Equal(t, tc.expected, result) + require.Equal(t, tc.expectedSeqOutOfSync, outOfSync) + for _, e := range tc.expectedLogs { + r := h.FindLog(testlog.NewMessageContainsFilter(e)) + require.NotNil(t, r, "could not find log message containing '%s'", e) + } + }) + } +} diff --git a/op-batcher/readme.md b/op-batcher/readme.md index ba547845f09..9e81ff978fe 100644 --- a/op-batcher/readme.md +++ b/op-batcher/readme.md @@ -32,11 +32,14 @@ The philosophy behind the current architecture is: ### Happy path In the happy path, the batcher periodically: +0. Queries the sequencer's syncStatus and + a. (optionally) waits for it to ingest more L1 data before taking action + b. prunes blocks and channels from its internal state which are no longer required 1. Enqueues unsafe blocks and dequeues safe blocks from the sequencer to its internal state. 2. Enqueues a new channel, if necessary. 3. Processes some unprocessed blocks into the current channel, triggers the compression of the block data and the creation of frames. 4. Sends frames from the channel queue to the DA layer as (e.g. to Ethereum L1 as calldata or blob transactions). -5. If there is more transaction data to send, go to 2. Else wait for a tick and go to 1. +5. If there is more transaction data to send, go to 2. Else wait for a tick and go to 0. The `blockCursor` state variable tracks the next unprocessed block. @@ -57,7 +60,6 @@ At the current time, the batcher should be optimized for correctness, simplicity The batcher can almost always recover from unforeseen situations by being restarted. - Some complexity is permitted, however, for handling data availability switching, so that the batcher is not wasting money for longer periods of time. ### Data Availability Backlog @@ -79,6 +81,9 @@ transaction. But in the case of a DA backlog (as defined by OP_BATCHER_THROTTLE_ block builder to instead impose a (tighter) block level limit of OP_BATCHER_THROTTLE_BLOCK_SIZE, and a single transaction limit of OP_BATCHER_THROTTLE_TRANSACTION_SIZE. +### Max Channel Duration +The batcher tries to ensure that batches are posted at a minimum frequency specified by `MAX_CHANNEL_DURATION`. To achiveve this, it caches the l1 origin of the last submitted channel, and will force close a channel if the timestamp of the l1 head moves beyond the timestamp of that l1 origin plus `MAX_CHANNEL_DURATION`. When clearing its state, e.g. following the detection of a reorg, the batcher will not clear the cached l1 origin: this way, the regular posting of batches will not be disturbed by events like reorgs. + ## Known issues and future work Link to [open issues with the `op-batcher` tag](https://github.com/ethereum-optimism/optimism/issues?q=is%3Aopen+is%3Aissue+label%3AA-op-batcher). From e5498be3bc3fcd1d5af6f54352266f310cc277bc Mon Sep 17 00:00:00 2001 From: zhiqiangxu <652732310@qq.com> Date: Fri, 6 Dec 2024 21:08:37 +0800 Subject: [PATCH 083/111] fix loadBlocksIntoState (#13282) --- op-batcher/batcher/driver.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/op-batcher/batcher/driver.go b/op-batcher/batcher/driver.go index 1483c4dc7cc..a2b2bd88122 100644 --- a/op-batcher/batcher/driver.go +++ b/op-batcher/batcher/driver.go @@ -245,8 +245,8 @@ func (l *BatchSubmitter) StopBatchSubmitting(ctx context.Context) error { // loadBlocksIntoState loads the blocks between start and end (inclusive). // If there is a reorg, it will return an error. func (l *BatchSubmitter) loadBlocksIntoState(ctx context.Context, start, end uint64) error { - if end <= start { - return fmt.Errorf("start number is >= end number %d,%d", start, end) + if end < start { + return fmt.Errorf("start number is > end number %d,%d", start, end) } var latestBlock *types.Block // Add all blocks to "state" From cde5fd7a9771f24e5d3b73b829c1770d94f27abe Mon Sep 17 00:00:00 2001 From: Matthew Slipper Date: Fri, 6 Dec 2024 09:05:33 -0700 Subject: [PATCH 084/111] ci: Pin ethereum package version (#13286) --- op-deployer/pkg/deployer/integration_test/apply_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/op-deployer/pkg/deployer/integration_test/apply_test.go b/op-deployer/pkg/deployer/integration_test/apply_test.go index a6ae4dab3ad..1605219a8d9 100644 --- a/op-deployer/pkg/deployer/integration_test/apply_test.go +++ b/op-deployer/pkg/deployer/integration_test/apply_test.go @@ -91,7 +91,7 @@ func TestEndToEndApply(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - enclaveCtx := kurtosisutil.StartEnclave(t, ctx, lgr, "github.com/ethpandaops/ethereum-package", TestParams) + enclaveCtx := kurtosisutil.StartEnclave(t, ctx, lgr, "github.com/ethpandaops/ethereum-package@4.4.0", TestParams) service, err := enclaveCtx.GetServiceContext("el-1-geth-lighthouse") require.NoError(t, err) From 7c8d28dd3d42b24d8be55c98fe4372bde4d93e0f Mon Sep 17 00:00:00 2001 From: Matthew Slipper Date: Fri, 6 Dec 2024 09:05:45 -0700 Subject: [PATCH 085/111] ci: Remove go-mod-download (#13277) --- .circleci/config.yml | 56 ++------------------------------------------ 1 file changed, 2 insertions(+), 54 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 54f19de4e9b..6fd1da6335c 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -947,30 +947,16 @@ jobs: mentions: "@proofs-team" cannon-stf-verify: - docker: - - image: <> + machine: true + resource_class: ethereum-optimism/latitude-1 steps: - checkout - - setup_remote_docker - - restore_cache: - name: Restore Go modules cache - key: gomod-{{ checksum "go.sum" }} - - restore_cache: - name: Restore Go build cache - keys: - - golang-build-cache-cannon-stf-verify-{{ checksum "go.sum" }} - - golang-build-cache-cannon-stf-verify- - run: name: Build cannon command: make cannon - run: name: Verify the Cannon STF command: make -C ./cannon cannon-stf-verify - - save_cache: - name: Save Go build cache - key: golang-build-cache-cannon-stf-verify-{{ checksum "go.sum" }} - paths: - - "/root/.cache/go-build" - notify-failures-on-develop: mentions: "@proofs-team" @@ -1018,38 +1004,6 @@ jobs: no_output_timeout: 20m - notify-failures-on-develop - go-mod-download: - docker: - - image: <> - parameters: - file: - default: go.sum - description: The file name of checksum for restore_cache and save_cache. - type: string - key: - default: gomod - description: The key of restore_cache and save_cache. - type: string - steps: - - checkout - - restore_cache: - key: << parameters.key >>-{{ checksum "<< parameters.file >>" }} - name: Restore Go modules cache - - run: - name: Sanity check go mod cache path - command: test "$(go env GOMODCACHE)" == "/go/pkg/mod" # yes, it's an odd path - - run: - command: go mod download - name: Download Go module dependencies - - run: - name: "Go mod tidy" - command: make mod-tidy && git diff --exit-code - - save_cache: - key: << parameters.key >>-{{ checksum "<< parameters.file >>" }} - name: Save Go modules cache - paths: - - "/go/pkg/mod" - bedrock-go-tests: # just a helper, that depends on all the actual test jobs docker: # Use a smaller base image to avoid pulling the huge ci-builder @@ -1237,7 +1191,6 @@ workflows: - not: equal: [scheduled_pipeline, << pipeline.trigger_source >>] jobs: - - go-mod-download - contracts-bedrock-build: name: contracts-bedrock-build # Build with just core + script contracts. @@ -1321,14 +1274,12 @@ workflows: - op-program-compat - bedrock-go-tests: requires: - - go-mod-download - go-lint - cannon-build-test-vectors - cannon-go-lint-and-test-32-bit - cannon-go-lint-and-test-64-bit - check-generated-mocks-op-node - check-generated-mocks-op-service - - go-mod-download - op-program-compat # Not needed for the devnet but we want to make sure they build successfully - cannon-docker-build @@ -1530,11 +1481,8 @@ workflows: - not: equal: [scheduled_pipeline, << pipeline.trigger_source >>] jobs: - - go-mod-download - cannon-prestate - cannon-stf-verify: - requires: - - go-mod-download context: - slack - contracts-bedrock-build: From 0c9b1d5268827044c5be409645ed52dad56d47f6 Mon Sep 17 00:00:00 2001 From: George Knee Date: Fri, 6 Dec 2024 16:36:44 +0000 Subject: [PATCH 086/111] fix possible panic in computeSyncActions (#13287) --- op-batcher/batcher/sync_actions.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/op-batcher/batcher/sync_actions.go b/op-batcher/batcher/sync_actions.go index 8f4121d7d5f..6031d2ce258 100644 --- a/op-batcher/batcher/sync_actions.go +++ b/op-batcher/batcher/sync_actions.go @@ -114,7 +114,7 @@ func computeSyncActions[T channelStatuser](newSyncStatus eth.SyncStatus, prevCur // that the derivation pipeline may have stalled // e.g. because of Holocene strict ordering rules. l.Warn("sequencer did not make expected progress", - "existingBlock", eth.ToBlockID(blocks[numBlocksToDequeue-1]), + "existingBlock", ch.LatestL2(), "newSafeBlock", newSyncStatus.SafeL2, "syncActions", startAfresh) return startAfresh, false From 1ac5a35f66dba762e26275d1bfe0a64121f4da97 Mon Sep 17 00:00:00 2001 From: Matthew Slipper Date: Fri, 6 Dec 2024 09:38:05 -0700 Subject: [PATCH 087/111] ci: revert cannon-stf-verify (#13289) --- .circleci/config.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 6fd1da6335c..1cdb7c4fe9d 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -947,10 +947,11 @@ jobs: mentions: "@proofs-team" cannon-stf-verify: - machine: true - resource_class: ethereum-optimism/latitude-1 + docker: + - image: <> steps: - checkout + - setup_remote_docker - run: name: Build cannon command: make cannon From bb6f5001e9a668c51c54c3e07645373dd06b6300 Mon Sep 17 00:00:00 2001 From: Dmitry <98899785+mdqst@users.noreply.github.com> Date: Fri, 6 Dec 2024 19:52:29 +0300 Subject: [PATCH 088/111] docs: Fix formatting issue in "Production Releases" Update README.md (#13281) Fix formatting issue in "Production Releases" section --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index bda740f63c2..498b1a29b71 100644 --- a/README.md +++ b/README.md @@ -99,7 +99,7 @@ For contract releases, refer to the GitHub release notes for a given release whi Tags of the form `v`, such as `v1.1.4`, indicate releases of all Go code only, and **DO NOT** include smart contracts. This naming scheme is required by Golang. -In the above list, this means these `v` releases contain all `op-*` components and exclude all `contracts-*` components. `op-geth` embeds upstream geth’s version inside its own version as follows: `vMAJOR.GETH_MAJOR GETH_MINOR GETH_PATCH.PATCH`. Basically, geth’s version is our minor version. From d949564d754451b694484ff563007a130472e97d Mon Sep 17 00:00:00 2001 From: zhiqiangxu <652732310@qq.com> Date: Sat, 7 Dec 2024 01:09:58 +0800 Subject: [PATCH 089/111] add `CheckAndDial` to avoid duplicate code (#13146) * Don't repeat yourself * add a log for loadBlocksIntoState * op-batcher: fix log in batcher/driver.go * modify log --------- Co-authored-by: protolambda Co-authored-by: Matthew Slipper --- op-batcher/batcher/driver.go | 6 ++++++ op-service/client/rpc.go | 22 +++++++++++++--------- op-service/dial/dial.go | 11 +---------- 3 files changed, 20 insertions(+), 19 deletions(-) diff --git a/op-batcher/batcher/driver.go b/op-batcher/batcher/driver.go index a2b2bd88122..5021f1ff37c 100644 --- a/op-batcher/batcher/driver.go +++ b/op-batcher/batcher/driver.go @@ -248,6 +248,12 @@ func (l *BatchSubmitter) loadBlocksIntoState(ctx context.Context, start, end uin if end < start { return fmt.Errorf("start number is > end number %d,%d", start, end) } + + // we don't want to print it in the 1-block case as `loadBlockIntoState` already does + if end > start { + l.Log.Info("Loading range of multiple blocks into state", "start", start, "end", end) + } + var latestBlock *types.Block // Add all blocks to "state" for i := start; i <= end; i++ { diff --git a/op-service/client/rpc.go b/op-service/client/rpc.go index c37a4a53dd0..a1413d0d9d9 100644 --- a/op-service/client/rpc.go +++ b/op-service/client/rpc.go @@ -152,18 +152,22 @@ func dialRPCClientWithBackoff(ctx context.Context, log log.Logger, addr string, bOff = retry.Fixed(cfg.fixedDialBackoff) } return retry.Do(ctx, cfg.backoffAttempts, bOff, func() (*rpc.Client, error) { - if !IsURLAvailable(ctx, addr) { - log.Warn("failed to dial address, but may connect later", "addr", addr) - return nil, fmt.Errorf("address unavailable (%s)", addr) - } - client, err := rpc.DialOptions(ctx, addr, cfg.gethRPCOptions...) - if err != nil { - return nil, fmt.Errorf("failed to dial address (%s): %w", addr, err) - } - return client, nil + return CheckAndDial(ctx, log, addr, cfg.gethRPCOptions...) }) } +func CheckAndDial(ctx context.Context, log log.Logger, addr string, options ...rpc.ClientOption) (*rpc.Client, error) { + if !IsURLAvailable(ctx, addr) { + log.Warn("failed to dial address, but may connect later", "addr", addr) + return nil, fmt.Errorf("address unavailable (%s)", addr) + } + client, err := rpc.DialOptions(ctx, addr, options...) + if err != nil { + return nil, fmt.Errorf("failed to dial address (%s): %w", addr, err) + } + return client, nil +} + func IsURLAvailable(ctx context.Context, address string) bool { u, err := url.Parse(address) if err != nil { diff --git a/op-service/dial/dial.go b/op-service/dial/dial.go index ee7ca35e588..4bd38333189 100644 --- a/op-service/dial/dial.go +++ b/op-service/dial/dial.go @@ -2,7 +2,6 @@ package dial import ( "context" - "fmt" "time" "github.com/ethereum-optimism/optimism/op-service/client" @@ -72,13 +71,5 @@ func dialRPCClientWithBackoff(ctx context.Context, log log.Logger, addr string) // Dials a JSON-RPC endpoint once. func dialRPCClient(ctx context.Context, log log.Logger, addr string) (*rpc.Client, error) { - if !client.IsURLAvailable(ctx, addr) { - log.Warn("failed to dial address, but may connect later", "addr", addr) - return nil, fmt.Errorf("address unavailable (%s)", addr) - } - client, err := rpc.DialOptions(ctx, addr) - if err != nil { - return nil, fmt.Errorf("failed to dial address (%s): %w", addr, err) - } - return client, nil + return client.CheckAndDial(ctx, log, addr) } From 354337cfd8f97f2d009c0a6667ecc4b67217bee5 Mon Sep 17 00:00:00 2001 From: Sebastian Stammler Date: Fri, 6 Dec 2024 18:14:12 +0100 Subject: [PATCH 090/111] txmgr: Disable default batcher tx send timeout (#13284) With Holocene, batcher transaction ordering has to be strictly preserved, so trying to send a transaction candidate should just never timeout. Note that the txmgr will still bump fees to get a transaction for the same nonce submitted. --- op-service/txmgr/cli.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/op-service/txmgr/cli.go b/op-service/txmgr/cli.go index fc1aa2cceb0..b345f43bd2d 100644 --- a/op-service/txmgr/cli.go +++ b/op-service/txmgr/cli.go @@ -79,7 +79,7 @@ var ( MinBaseFeeGwei: 1.0, ResubmissionTimeout: 48 * time.Second, NetworkTimeout: 10 * time.Second, - TxSendTimeout: 10 * time.Minute, + TxSendTimeout: 0, // Try sending txs indefinitely, to preserve tx ordering for Holocene TxNotInMempoolTimeout: 2 * time.Minute, ReceiptQueryInterval: 12 * time.Second, } From f21f95e231327838e713791635d3e44d2551c461 Mon Sep 17 00:00:00 2001 From: smartcontracts Date: Fri, 6 Dec 2024 12:37:57 -0500 Subject: [PATCH 091/111] fix: have semver-lock build contracts by default (#13223) semver-lock justfile task didn't build by default which was confusing. Now it does. --- packages/contracts-bedrock/justfile | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/packages/contracts-bedrock/justfile b/packages/contracts-bedrock/justfile index 423ee453d6b..c4669f63130 100644 --- a/packages/contracts-bedrock/justfile +++ b/packages/contracts-bedrock/justfile @@ -104,10 +104,13 @@ kontrol-summary-full: kontrol-summary kontrol-summary-fp snapshots-abi-storage: go run ./scripts/autogen/generate-snapshots . -# Updates the snapshots/semver-lock.json file. -semver-lock: +# Updates the snapshots/semver-lock.json file without building contracts. +semver-lock-no-build: go run scripts/autogen/generate-semver-lock/main.go +# Updates the snapshots/semver-lock.json file. +semver-lock: build semver-lock-no-build + # Generates core snapshots without building contracts. Currently just an alias for # snapshots-abi-storage because we no longer run Kontrol snapshots here. Run # kontrol-summary-full to build the Kontrol summaries if necessary. From f0461a141ae456c00df8ecb9e099e1310e501d92 Mon Sep 17 00:00:00 2001 From: Michael Amadi Date: Fri, 6 Dec 2024 19:47:46 +0100 Subject: [PATCH 092/111] ctb: remove unnecessary vm.assumes (#13264) * test utils * test utils * rm unncesessary vm.assume * rm unncesessary vm.assume * rm unncesessary vm.assume * fixes... * rm testutils --- .../test/L1/DataAvailabilityChallenge.t.sol | 47 ++++++++++--------- .../test/L1/OptimismPortal.t.sol | 42 ++++++++++------- .../test/L1/OptimismPortal2.t.sol | 33 ++++++++----- .../test/L1/SystemConfig.t.sol | 4 +- .../test/L2/CrossL2Inbox.t.sol | 8 ++-- .../test/L2/L1BlockInterop.t.sol | 2 - .../test/L2/L2ToL2CrossDomainMessenger.t.sol | 2 +- .../contracts-bedrock/test/cannon/MIPS2.t.sol | 7 +-- .../test/libraries/Bytes.t.sol | 17 +++++-- .../test/libraries/GasPayingToken.t.sol | 12 +++-- .../test/libraries/SafeCall.t.sol | 2 +- .../test/libraries/TransientContext.t.sol | 8 ++-- .../drippie/dripchecks/CheckBalanceLow.t.sol | 9 ++-- .../test/universal/CrossDomainMessenger.t.sol | 7 ++- 14 files changed, 120 insertions(+), 80 deletions(-) diff --git a/packages/contracts-bedrock/test/L1/DataAvailabilityChallenge.t.sol b/packages/contracts-bedrock/test/L1/DataAvailabilityChallenge.t.sol index ab0ca82c61f..05daec205ad 100644 --- a/packages/contracts-bedrock/test/L1/DataAvailabilityChallenge.t.sol +++ b/packages/contracts-bedrock/test/L1/DataAvailabilityChallenge.t.sol @@ -36,7 +36,6 @@ contract DataAvailabilityChallengeTest is CommonTest { // EntryPoint will revert if using amount > type(uint112).max. vm.assume(sender != Preinstalls.EntryPoint_v060); vm.assume(sender != address(dataAvailabilityChallenge)); - vm.assume(sender.balance == 0); vm.deal(sender, amount); vm.prank(sender); @@ -59,7 +58,6 @@ contract DataAvailabilityChallengeTest is CommonTest { vm.assume(sender != Preinstalls.EntryPoint_v060); vm.assume(sender != address(dataAvailabilityChallenge)); vm.assume(sender != deploy.mustGetAddress("DataAvailabilityChallenge")); - vm.assume(sender.balance == 0); vm.deal(sender, amount); vm.prank(sender); @@ -86,10 +84,11 @@ contract DataAvailabilityChallengeTest is CommonTest { vm.assume(challenger != address(0)); // Assume the block number is not close to the max uint256 value - vm.assume( - challengedBlockNumber - < type(uint256).max - dataAvailabilityChallenge.challengeWindow() - - dataAvailabilityChallenge.resolveWindow() + challengedBlockNumber = bound( + challengedBlockNumber, + 0, + type(uint256).max - dataAvailabilityChallenge.challengeWindow() - dataAvailabilityChallenge.resolveWindow() + - 1 ); uint256 requiredBond = dataAvailabilityChallenge.bondSize(); @@ -139,10 +138,11 @@ contract DataAvailabilityChallengeTest is CommonTest { vm.assume(challenger != address(0)); // Assume the block number is not close to the max uint256 value - vm.assume( - challengedBlockNumber - < type(uint256).max - dataAvailabilityChallenge.challengeWindow() - - dataAvailabilityChallenge.resolveWindow() + challengedBlockNumber = bound( + challengedBlockNumber, + 0, + type(uint256).max - dataAvailabilityChallenge.challengeWindow() - dataAvailabilityChallenge.resolveWindow() + - 1 ); uint256 requiredBond = dataAvailabilityChallenge.bondSize(); @@ -265,10 +265,11 @@ contract DataAvailabilityChallengeTest is CommonTest { dataAvailabilityChallenge.setResolverRefundPercentage(resolverRefundPercentage); // Assume the block number is not close to the max uint256 value - vm.assume( - challengedBlockNumber - < type(uint256).max - dataAvailabilityChallenge.challengeWindow() - - dataAvailabilityChallenge.resolveWindow() + challengedBlockNumber = bound( + challengedBlockNumber, + 0, + type(uint256).max - dataAvailabilityChallenge.challengeWindow() - dataAvailabilityChallenge.resolveWindow() + - 1 ); bytes memory challengedCommitment = computeCommitmentKeccak256(preImage); @@ -356,10 +357,11 @@ contract DataAvailabilityChallengeTest is CommonTest { dataAvailabilityChallenge.setResolverRefundPercentage(resolverRefundPercentage); // Assume the block number is not close to the max uint256 value - vm.assume( - challengedBlockNumber - < type(uint256).max - dataAvailabilityChallenge.challengeWindow() - - dataAvailabilityChallenge.resolveWindow() + challengedBlockNumber = bound( + challengedBlockNumber, + 0, + type(uint256).max - dataAvailabilityChallenge.challengeWindow() - dataAvailabilityChallenge.resolveWindow() + - 1 ); bytes memory challengedCommitment = computeCommitmentKeccak256(wrongPreImage); @@ -458,10 +460,11 @@ contract DataAvailabilityChallengeTest is CommonTest { function test_unlockBond_succeeds(bytes memory preImage, uint256 challengedBlockNumber) public { // Assume the block number is not close to the max uint256 value - vm.assume( - challengedBlockNumber - < type(uint256).max - dataAvailabilityChallenge.challengeWindow() - - dataAvailabilityChallenge.resolveWindow() + challengedBlockNumber = bound( + challengedBlockNumber, + 0, + type(uint256).max - dataAvailabilityChallenge.challengeWindow() - dataAvailabilityChallenge.resolveWindow() + - 1 ); bytes memory challengedCommitment = computeCommitmentKeccak256(preImage); diff --git a/packages/contracts-bedrock/test/L1/OptimismPortal.t.sol b/packages/contracts-bedrock/test/L1/OptimismPortal.t.sol index b575fdacff4..77b0d9833ea 100644 --- a/packages/contracts-bedrock/test/L1/OptimismPortal.t.sol +++ b/packages/contracts-bedrock/test/L1/OptimismPortal.t.sol @@ -162,14 +162,16 @@ contract OptimismPortal_Test is CommonTest { uint256 _value, uint64 _gasLimit, bool _isCreation, - bytes memory _data + bytes calldata _data ) internal { if (_isCreation) { _to = address(0); } - vm.assume(_data.length <= 120_000); + if (_data.length > 120_000) { + _data = _data[0:120_000]; + } IResourceMetering.ResourceConfig memory rcfg = systemConfig.resourceConfig(); _gasLimit = uint64(bound(_gasLimit, optimismPortal.minimumGasLimit(uint64(_data.length)), rcfg.maxResourceLimit)); @@ -207,7 +209,7 @@ contract OptimismPortal_Test is CommonTest { uint256 _value, uint64 _gasLimit, bool _isCreation, - bytes memory _data + bytes calldata _data ) external { @@ -232,7 +234,7 @@ contract OptimismPortal_Test is CommonTest { uint256 _value, uint64 _gasLimit, bool _isCreation, - bytes memory _data + bytes calldata _data ) external { @@ -476,13 +478,17 @@ contract OptimismPortal_Test is CommonTest { /// `depositTransaction` function. This is a simple differential test. function test_setGasPayingToken_correctEvent_succeeds( address _token, - string memory _name, - string memory _symbol + string calldata _name, + string calldata _symbol ) external { - vm.assume(bytes(_name).length <= 32); - vm.assume(bytes(_symbol).length <= 32); + if (bytes(_name).length > 32) { + _name = _name[0:32]; + } + if (bytes(_symbol).length > 32) { + _symbol = _symbol[0:32]; + } bytes32 name = GasPayingToken.sanitize(_name); bytes32 symbol = GasPayingToken.sanitize(_symbol); @@ -1436,14 +1442,16 @@ contract OptimismPortalWithMockERC20_Test is OptimismPortal_FinalizeWithdrawal_T uint256 _value, uint64 _gasLimit, bool _isCreation, - bytes memory _data + bytes calldata _data ) internal { if (_isCreation) { _to = address(0); } - vm.assume(_data.length <= 120_000); + if (_data.length > 120_000) { + _data = _data[0:120_000]; + } IResourceMetering.ResourceConfig memory rcfg = systemConfig.resourceConfig(); _gasLimit = uint64(bound(_gasLimit, optimismPortal.minimumGasLimit(uint64(_data.length)), rcfg.maxResourceLimit)); @@ -1482,7 +1490,7 @@ contract OptimismPortalWithMockERC20_Test is OptimismPortal_FinalizeWithdrawal_T uint256 _value, uint64 _gasLimit, bool _isCreation, - bytes memory _data + bytes calldata _data ) external { @@ -1507,7 +1515,7 @@ contract OptimismPortalWithMockERC20_Test is OptimismPortal_FinalizeWithdrawal_T uint256 _value, uint64 _gasLimit, bool _isCreation, - bytes memory _data + bytes calldata _data ) external { @@ -1663,14 +1671,16 @@ contract OptimismPortalWithMockERC20_Test is OptimismPortal_FinalizeWithdrawal_T uint256 _value, uint64 _gasLimit, bool _isCreation, - bytes memory _data + bytes calldata _data ) internal { if (_isCreation) { _to = address(0); } - vm.assume(_data.length <= 120_000); + if (_data.length > 120_000) { + _data = _data[0:120_000]; + } IResourceMetering.ResourceConfig memory rcfg = systemConfig.resourceConfig(); _gasLimit = uint64(bound(_gasLimit, optimismPortal.minimumGasLimit(uint64(_data.length)), rcfg.maxResourceLimit)); @@ -1704,7 +1714,7 @@ contract OptimismPortalWithMockERC20_Test is OptimismPortal_FinalizeWithdrawal_T uint256 _value, uint64 _gasLimit, bool _isCreation, - bytes memory _data + bytes calldata _data ) external { @@ -1727,7 +1737,7 @@ contract OptimismPortalWithMockERC20_Test is OptimismPortal_FinalizeWithdrawal_T uint256 _value, uint64 _gasLimit, bool _isCreation, - bytes memory _data + bytes calldata _data ) external { diff --git a/packages/contracts-bedrock/test/L1/OptimismPortal2.t.sol b/packages/contracts-bedrock/test/L1/OptimismPortal2.t.sol index 830323936a6..309abd39089 100644 --- a/packages/contracts-bedrock/test/L1/OptimismPortal2.t.sol +++ b/packages/contracts-bedrock/test/L1/OptimismPortal2.t.sol @@ -329,13 +329,17 @@ contract OptimismPortal2_Test is CommonTest { /// `depositTransaction` function. This is a simple differential test. function test_setGasPayingToken_correctEvent_succeeds( address _token, - string memory _name, - string memory _symbol + string calldata _name, + string calldata _symbol ) external { - vm.assume(bytes(_name).length <= 32); - vm.assume(bytes(_symbol).length <= 32); + if (bytes(_name).length > 32) { + _name = _name[0:32]; + } + if (bytes(_symbol).length > 32) { + _symbol = _symbol[0:32]; + } bytes32 name = GasPayingToken.sanitize(_name); bytes32 symbol = GasPayingToken.sanitize(_symbol); @@ -1711,14 +1715,16 @@ contract OptimismPortal2WithMockERC20_Test is OptimismPortal2_FinalizeWithdrawal uint256 _value, uint64 _gasLimit, bool _isCreation, - bytes memory _data + bytes calldata _data ) internal { if (_isCreation) { _to = address(0); } - vm.assume(_data.length <= 120_000); + if (_data.length > 120_000) { + _data = _data[0:120_000]; + } IResourceMetering.ResourceConfig memory rcfg = systemConfig.resourceConfig(); _gasLimit = uint64(bound(_gasLimit, optimismPortal2.minimumGasLimit(uint64(_data.length)), rcfg.maxResourceLimit)); @@ -1757,7 +1763,7 @@ contract OptimismPortal2WithMockERC20_Test is OptimismPortal2_FinalizeWithdrawal uint256 _value, uint64 _gasLimit, bool _isCreation, - bytes memory _data + bytes calldata _data ) external { @@ -1782,7 +1788,7 @@ contract OptimismPortal2WithMockERC20_Test is OptimismPortal2_FinalizeWithdrawal uint256 _value, uint64 _gasLimit, bool _isCreation, - bytes memory _data + bytes calldata _data ) external { @@ -1947,14 +1953,17 @@ contract OptimismPortal2WithMockERC20_Test is OptimismPortal2_FinalizeWithdrawal uint256 _value, uint64 _gasLimit, bool _isCreation, - bytes memory _data + bytes calldata _data ) internal { if (_isCreation) { _to = address(0); } - vm.assume(_data.length <= 120_000); + if (_data.length > 120_000) { + _data = _data[0:120_000]; + } + IResourceMetering.ResourceConfig memory rcfg = systemConfig.resourceConfig(); _gasLimit = uint64(bound(_gasLimit, optimismPortal2.minimumGasLimit(uint64(_data.length)), rcfg.maxResourceLimit)); @@ -1988,7 +1997,7 @@ contract OptimismPortal2WithMockERC20_Test is OptimismPortal2_FinalizeWithdrawal uint256 _value, uint64 _gasLimit, bool _isCreation, - bytes memory _data + bytes calldata _data ) external { @@ -2011,7 +2020,7 @@ contract OptimismPortal2WithMockERC20_Test is OptimismPortal2_FinalizeWithdrawal uint256 _value, uint64 _gasLimit, bool _isCreation, - bytes memory _data + bytes calldata _data ) external { diff --git a/packages/contracts-bedrock/test/L1/SystemConfig.t.sol b/packages/contracts-bedrock/test/L1/SystemConfig.t.sol index f7cea088bcf..1e9e565fb32 100644 --- a/packages/contracts-bedrock/test/L1/SystemConfig.t.sol +++ b/packages/contracts-bedrock/test/L1/SystemConfig.t.sol @@ -639,8 +639,8 @@ contract SystemConfig_Setters_Test is SystemConfig_Init { /// @dev Tests that `setEIP1559Params` updates the EIP1559 parameters successfully. function testFuzz_setEIP1559Params_succeeds(uint32 _denominator, uint32 _elasticity) external { - vm.assume(_denominator > 1); - vm.assume(_elasticity > 1); + _denominator = uint32(bound(_denominator, 2, type(uint32).max)); + _elasticity = uint32(bound(_elasticity, 2, type(uint32).max)); vm.expectEmit(address(systemConfig)); emit ConfigUpdate( diff --git a/packages/contracts-bedrock/test/L2/CrossL2Inbox.t.sol b/packages/contracts-bedrock/test/L2/CrossL2Inbox.t.sol index 100019034df..918b0f4b8da 100644 --- a/packages/contracts-bedrock/test/L2/CrossL2Inbox.t.sol +++ b/packages/contracts-bedrock/test/L2/CrossL2Inbox.t.sol @@ -298,7 +298,7 @@ contract CrossL2InboxTest is Test { /// @dev Tests that the `executeMessage` function reverts when called with an identifier with an invalid timestamp. function testFuzz_executeMessage_invalidTimestamp_reverts( - Identifier calldata _id, + Identifier memory _id, address _target, bytes calldata _message, uint256 _value @@ -307,7 +307,7 @@ contract CrossL2InboxTest is Test { setInteropStart { // Ensure that the id's timestamp is invalid (greater than the current block timestamp) - vm.assume(_id.timestamp > block.timestamp); + _id.timestamp = bound(_id.timestamp, block.timestamp + 1, type(uint256).max); // Ensure is not a deposit transaction vm.mockCall({ @@ -488,7 +488,7 @@ contract CrossL2InboxTest is Test { /// @dev Tests that the `validateMessage` function reverts when called with an identifier with a timestamp later /// than current block.timestamp. function testFuzz_validateMessage_invalidTimestamp_reverts( - Identifier calldata _id, + Identifier memory _id, bytes32 _messageHash ) external @@ -502,7 +502,7 @@ contract CrossL2InboxTest is Test { }); // Ensure that the id's timestamp is invalid (greater than the current block timestamp) - vm.assume(_id.timestamp > block.timestamp); + _id.timestamp = bound(_id.timestamp, block.timestamp + 1, type(uint256).max); // Expect a revert with the InvalidTimestamp selector vm.expectRevert(InvalidTimestamp.selector); diff --git a/packages/contracts-bedrock/test/L2/L1BlockInterop.t.sol b/packages/contracts-bedrock/test/L2/L1BlockInterop.t.sol index a5e086c86d5..40dfa459e16 100644 --- a/packages/contracts-bedrock/test/L2/L1BlockInterop.t.sol +++ b/packages/contracts-bedrock/test/L2/L1BlockInterop.t.sol @@ -64,8 +64,6 @@ contract L1BlockInteropTest is CommonTest { /// @dev Tests that the dependency set size is correct when adding an arbitrary number of chain IDs. function testFuzz_dependencySetSize_succeeds(uint8 _dependencySetSize) public prankDepositor { - vm.assume(_dependencySetSize <= type(uint8).max); - uint256 uniqueCount = 0; for (uint256 i = 0; i < _dependencySetSize; i++) { diff --git a/packages/contracts-bedrock/test/L2/L2ToL2CrossDomainMessenger.t.sol b/packages/contracts-bedrock/test/L2/L2ToL2CrossDomainMessenger.t.sol index 3b431485369..0c064f68605 100644 --- a/packages/contracts-bedrock/test/L2/L2ToL2CrossDomainMessenger.t.sol +++ b/packages/contracts-bedrock/test/L2/L2ToL2CrossDomainMessenger.t.sol @@ -144,7 +144,7 @@ contract L2ToL2CrossDomainMessengerTest is Test { vm.assume(_target != Predeploys.CROSS_L2_INBOX && _target != Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER); // Ensure that _value is greater than 0 - vm.assume(_value > 0); + _value = bound(_value, 1, type(uint256).max); // Add sufficient value to the contract to send the message with vm.deal(address(this), _value); diff --git a/packages/contracts-bedrock/test/cannon/MIPS2.t.sol b/packages/contracts-bedrock/test/cannon/MIPS2.t.sol index 07c5883c17b..bd2522f83fa 100644 --- a/packages/contracts-bedrock/test/cannon/MIPS2.t.sol +++ b/packages/contracts-bedrock/test/cannon/MIPS2.t.sol @@ -1130,7 +1130,7 @@ contract MIPS2_Test is CommonTest { ) public { - vm.assume(_wakeup != sys.FUTEX_EMPTY_ADDR); + _wakeup = uint32(_bound(_wakeup, 0, sys.FUTEX_EMPTY_ADDR - 1)); threading.createThread(); threading.createThread(); @@ -1178,8 +1178,9 @@ contract MIPS2_Test is CommonTest { ) public { - vm.assume(_wakeup != sys.FUTEX_EMPTY_ADDR); - vm.assume(_wakeup != _futexAddr); + // -2 incase _wakeup == _futexAddr and _wakeup needs to be incremented + _wakeup = uint32(_bound(_wakeup, 0, sys.FUTEX_EMPTY_ADDR - 2)); + if (_wakeup == _futexAddr) _wakeup++; threading.createThread(); threading.createThread(); diff --git a/packages/contracts-bedrock/test/libraries/Bytes.t.sol b/packages/contracts-bedrock/test/libraries/Bytes.t.sol index e38d369ab55..57c5950058d 100644 --- a/packages/contracts-bedrock/test/libraries/Bytes.t.sol +++ b/packages/contracts-bedrock/test/libraries/Bytes.t.sol @@ -127,10 +127,19 @@ contract Bytes_slice_TestFail is Test { /// @notice Tests that, when given an input bytes array of length `n`, the `slice` function will /// always revert if `_start + _length > n`. function testFuzz_slice_outOfBounds_reverts(bytes memory _input, uint256 _start, uint256 _length) public { - // We want a valid start index and a length that will not overflow. - vm.assume(_start < _input.length && _length < type(uint256).max - 31); + // We want a valid start index that will not overflow. + if (_input.length == 0) { + _start = 0; + } else { + _start = bound(_start, 0, _input.length - 1); + } + // And a length that will not overflow. // But, we want an invalid slice length. - vm.assume(_start + _length > _input.length); + if (_start > 31) { + _length = bound(_length, (_input.length - _start) + 1, type(uint256).max - _start); + } else { + _length = bound(_length, (_input.length - _start) + 1, type(uint256).max - 31); + } vm.expectRevert("slice_outOfBounds"); Bytes.slice(_input, _start, _length); @@ -140,7 +149,7 @@ contract Bytes_slice_TestFail is Test { /// the `slice` function reverts. function testFuzz_slice_lengthOverflows_reverts(bytes memory _input, uint256 _start, uint256 _length) public { // Ensure that the `_length` will overflow if a number >= 31 is added to it. - vm.assume(_length > type(uint256).max - 31); + _length = uint256(bound(_length, type(uint256).max - 30, type(uint256).max)); vm.expectRevert("slice_overflow"); Bytes.slice(_input, _start, _length); diff --git a/packages/contracts-bedrock/test/libraries/GasPayingToken.t.sol b/packages/contracts-bedrock/test/libraries/GasPayingToken.t.sol index f6ad014db3a..b2b87eeee41 100644 --- a/packages/contracts-bedrock/test/libraries/GasPayingToken.t.sol +++ b/packages/contracts-bedrock/test/libraries/GasPayingToken.t.sol @@ -57,15 +57,19 @@ contract GasPayingToken_Roundtrip_Test is Test { function testFuzz_setGetWithSanitize_succeeds( address _token, uint8 _decimals, - string memory _name, - string memory _symbol + string calldata _name, + string calldata _symbol ) external { vm.assume(_token != address(0)); - vm.assume(bytes(_name).length <= 32); - vm.assume(bytes(_symbol).length <= 32); vm.assume(_token != Constants.ETHER); + if (bytes(_name).length > 32) { + _name = string(bytes(_name)[0:32]); + } + if (bytes(_symbol).length > 32) { + _symbol = string(bytes(_symbol)[0:32]); + } GasPayingToken.set(_token, _decimals, GasPayingToken.sanitize(_name), GasPayingToken.sanitize(_symbol)); diff --git a/packages/contracts-bedrock/test/libraries/SafeCall.t.sol b/packages/contracts-bedrock/test/libraries/SafeCall.t.sol index d05e952e44d..b87970dbb23 100644 --- a/packages/contracts-bedrock/test/libraries/SafeCall.t.sol +++ b/packages/contracts-bedrock/test/libraries/SafeCall.t.sol @@ -12,7 +12,7 @@ import { SafeCall } from "src/libraries/SafeCall.sol"; contract SafeCall_Test is Test { /// @notice Helper function to deduplicate code. Makes all assumptions required for these tests. function assumeNot(address _addr) internal { - vm.assume(_addr.balance == 0); + vm.deal(_addr, 0); vm.assume(_addr != address(this)); assumeAddressIsNot(_addr, StdCheatsSafe.AddressType.ForgeAddress, StdCheatsSafe.AddressType.Precompile); } diff --git a/packages/contracts-bedrock/test/libraries/TransientContext.t.sol b/packages/contracts-bedrock/test/libraries/TransientContext.t.sol index 20e8434b9c1..e075a61b9f1 100644 --- a/packages/contracts-bedrock/test/libraries/TransientContext.t.sol +++ b/packages/contracts-bedrock/test/libraries/TransientContext.t.sol @@ -26,7 +26,7 @@ contract TransientContextTest is Test { /// @notice Tests that `increment()` increments the call depth. /// @param _startingCallDepth Starting call depth. function testFuzz_increment_succeeds(uint256 _startingCallDepth) public { - vm.assume(_startingCallDepth < type(uint256).max); + _startingCallDepth = bound(_startingCallDepth, 0, type(uint256).max - 1); assembly ("memory-safe") { tstore(sload(callDepthSlot.slot), _startingCallDepth) } @@ -39,7 +39,7 @@ contract TransientContextTest is Test { /// @notice Tests that `decrement()` decrements the call depth. /// @param _startingCallDepth Starting call depth. function testFuzz_decrement_succeeds(uint256 _startingCallDepth) public { - vm.assume(_startingCallDepth > 0); + _startingCallDepth = bound(_startingCallDepth, 1, type(uint256).max); assembly ("memory-safe") { tstore(sload(callDepthSlot.slot), _startingCallDepth) } @@ -144,7 +144,7 @@ contract TransientReentrancyAwareTest is TransientContextTest, TransientReentran /// @param _slot Slot to test. /// @param _value Value to test. function testFuzz_reentrantAware_succeeds(uint256 _callDepth, bytes32 _slot, uint256 _value) public { - vm.assume(_callDepth < type(uint256).max); + _callDepth = bound(_callDepth, 0, type(uint256).max - 1); assembly ("memory-safe") { tstore(sload(callDepthSlot.slot), _callDepth) } @@ -172,7 +172,7 @@ contract TransientReentrancyAwareTest is TransientContextTest, TransientReentran ) public { - vm.assume(_callDepth < type(uint256).max - 1); + _callDepth = bound(_callDepth, 0, type(uint256).max - 2); assembly ("memory-safe") { tstore(sload(callDepthSlot.slot), _callDepth) } diff --git a/packages/contracts-bedrock/test/periphery/drippie/dripchecks/CheckBalanceLow.t.sol b/packages/contracts-bedrock/test/periphery/drippie/dripchecks/CheckBalanceLow.t.sol index bd4d22de828..b06f11dfb62 100644 --- a/packages/contracts-bedrock/test/periphery/drippie/dripchecks/CheckBalanceLow.t.sol +++ b/packages/contracts-bedrock/test/periphery/drippie/dripchecks/CheckBalanceLow.t.sol @@ -23,10 +23,13 @@ contract CheckBalanceLowTest is Test { /// @notice Fuzz the `check` function and assert that it always returns true /// when the target's balance is smaller than the threshold. - function testFuzz_check_succeeds(address _target, uint256 _threshold) external view { + function testFuzz_check_succeeds(address _target, uint256 _threshold) external { CheckBalanceLow.Params memory p = CheckBalanceLow.Params({ target: _target, threshold: _threshold }); - vm.assume(_target.balance < _threshold); + if (_target.balance >= p.threshold) { + if (_threshold == 0) p.threshold = 1; + vm.deal(_target, p.threshold - 1); + } assertEq(c.check(abi.encode(p)), true); } @@ -37,7 +40,7 @@ contract CheckBalanceLowTest is Test { CheckBalanceLow.Params memory p = CheckBalanceLow.Params({ target: _target, threshold: _threshold }); // prevent overflows - vm.assume(_threshold != type(uint256).max); + _threshold = bound(_threshold, 0, type(uint256).max - 1); vm.deal(_target, _threshold + 1); assertEq(c.check(abi.encode(p)), false); diff --git a/packages/contracts-bedrock/test/universal/CrossDomainMessenger.t.sol b/packages/contracts-bedrock/test/universal/CrossDomainMessenger.t.sol index 12cc2a8c53b..d4b1bc3c54d 100644 --- a/packages/contracts-bedrock/test/universal/CrossDomainMessenger.t.sol +++ b/packages/contracts-bedrock/test/universal/CrossDomainMessenger.t.sol @@ -30,8 +30,11 @@ contract CrossDomainMessenger_BaseGas_Test is CommonTest { /// or equal to the minimum gas limit value on the OptimismPortal. /// This guarantees that the messengers will always pass sufficient /// gas to the OptimismPortal. - function testFuzz_baseGas_portalMinGasLimit_succeeds(bytes memory _data, uint32 _minGasLimit) external view { - vm.assume(_data.length <= type(uint64).max); + function testFuzz_baseGas_portalMinGasLimit_succeeds(bytes calldata _data, uint32 _minGasLimit) external view { + if (_data.length > type(uint64).max) { + _data = _data[0:type(uint64).max]; + } + uint64 baseGas = l1CrossDomainMessenger.baseGas(_data, _minGasLimit); uint64 minGasLimit = optimismPortal.minimumGasLimit(uint64(_data.length)); assertTrue(baseGas >= minGasLimit); From 17511a21dc9714049350f4e7a57228ba8b751273 Mon Sep 17 00:00:00 2001 From: protolambda Date: Fri, 6 Dec 2024 20:25:03 +0100 Subject: [PATCH 093/111] op-supervisor: readme (#12819) * op-supervisor: readme draft * op-supervisor: fix review suggestions * op-supervisor: readme extension * op-supervisor: fix mermaid diagram syntax --- op-supervisor/README.md | 299 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 299 insertions(+) create mode 100644 op-supervisor/README.md diff --git a/op-supervisor/README.md b/op-supervisor/README.md new file mode 100644 index 00000000000..20159aa4db1 --- /dev/null +++ b/op-supervisor/README.md @@ -0,0 +1,299 @@ +# `op-supervisor` + +Issues: [monorepo](https://github.com/ethereum-optimism/optimism/issues?q=is%3Aissue%20state%3Aopen%20label%3AA-op-supervisor) + +Pull requests: [monorepo](https://github.com/ethereum-optimism/optimism/pulls?q=is%3Aopen+is%3Apr+label%3AA-op-supervisor) + +User docs: +- [op-supervisor](https://docs.optimism.io/stack/interop/op-supervisor) + +Specs: +- [interop specs] + +`op-supervisor` is a service to monitor chains, and quickly determine +cross-chain message safety, for native interoperability. +The `op-supervisor` functions as a [superchain backend], implementing the [interop specs]. + +[superchain backend]: https://github.com/ethereum-optimism/design-docs/blob/main/protocol/superchain-backend.md +[interop specs]: https://github.com/ethereum-optimism/specs/tree/main/specs/interop + +*Warning: this implementation is a work in progress, in active development.* + + +## Quickstart + +```bash +make op-supervisor + +# Key configurables: +# datadir: where to store indexed interop data +# dependency-set: where to find chain dependencies (this format is changing, and may be fully onchain in a later iteration) +# l2-rpcs: L2 RPC endpoints to fetch data from (optional, can also be added using the `admin_addL2RPC in the admin-RPC) +./bin/op-supervisor \ + --datadir="./op-supervisor-data" \ + --dependency-set="./my-network-configs/dependency-set.json" \ + --l2-rpcs="ws://example1:8545,ws://example2:8545" \ + --rpc.enable-admin \ + --rpc.port=8545 +``` + +## Usage + +### Build from source + +```bash +# from op-supervisor dir: +make op-supervisor +./bin/op-supervisor --help +``` + +### Run from source + +```bash +# from op-supervisor dir: +go run ./cmd --help +``` + +### Build docker image + +See `op-supervisor` docker-bake target. + +## Overview + +### About safety + +There are 3 stages of block safety: + +- `unsafe`: optimistically processed blocks +- `safe`: blocks reproducible from valid dependencies +- `finalized`: blocks reproducible from irreversibly valid dependencies + +**Pre-interop**, the only dependency is DA (data availability), i.e. the batch data to derive the chain from. +**Post-interop**, other L2s may be a dependency also. +The op-supervisor tracks these dependencies, to maintain a global view of cross-chain message safety. + +New blocks are considered `local unsafe`: sufficient to process the block locally, without guarantees. +Once the L2 dependencies are met we consider it `cross unsafe`: still missing DA, but forming a valid messaging graph. + +Once the DA dependency is met, we consider it `local safe`: +enough to reproduce the local L2 chain content, but not to reason about cross-L2 interactions. + +Once both L2 and DA dependencies are met, we consider it `cross safe`. +A `cross-safe` block may be "derived from" a L1 block that confirms all L2 data to reproduce +the local chain as well as the cross-L2 dependencies. +Hence this may take additional L1 data, beyond what a `local safe` block is derived from. + +And once the dependencies become irreversibly valid, we consider it `finalized`. +We can thus look at what `cross-safe` has been derived from, and verify against the + +```mermaid +flowchart TD + LocalUnsafe[Local Unsafe]--Pass cross-L2 checks-->CrossUnsafe[Cross Unsafe] + LocalUnsafe--Individually reproducible from L1-->LocalSafe + LocalSafe[Local Safe]--All cross-L2 dependencies checked
and reproducible from L1-->CrossSafe[Cross Safe] + CrossSafe--Dependencies are irreversible-->Finalized +``` + +### Verification flow + +Warning: the data flow design is actively changing, see [design-doc 171]. + +[design-doc 171]: https://github.com/ethereum-optimism/design-docs/pull/171 + +Op-nodes, or any compatible consensus-layer L2 node, interact with the op-supervisor, to: + +- share the "local" data with the supervisor +- view the "cross" safety once the supervisor has sufficient information + +```mermaid +sequenceDiagram +autonumber + +participant opgethA as op-geth A +participant opnodeA as op-node A +participant opsup as op-supervisor +participant opnodeB as op-node B + +Note over opnodeA: on new block + +opnodeA ->> opgethA: engine process unsafe block +opgethA -->> opnodeA: engine proccessed unsafe block +opnodeA ->> opsup: update Local unsafe +opnodeB ->> opsup: update Local unsafe (maybe) +opsup ->> opgethA: Fetch receipts +opgethA -->> opsup: receipts + +opsup ->> opsup: cross-unsafe worker + +Note left of opnodeA: (changing - delay unsafeView call) + +opnodeA ->> opsup: unsafeView +opsup -->> opnodeA: cross unsafe +opnodeA ->> opnodeA: reorg if we need to +opnodeA ->> opnodeA: backtrack unsafe if we need to + +Note over opnodeA: on derived block + +opnodeA ->> opsup: update Local safe +opnodeB ->> opsup: update Local safe (maybe) +opsup ->> opsup: cross-safe worker + +Note left of opnodeA: (changing - delay safeView call) + +opnodeA ->> opsup: safeView +opsup -->> opnodeA: cross safe + +opnodeA ->> opnodeA: reorg if we need to +opnodeA ->> opnodeA: backtrack safe if we need to + +opnodeA->>opgethA: engine forkchoice-update of safe block + +Note over opnodeA: on finalized L1 + +opnodeA->>opsup: finalized L1 +opsup-->>opnodeA: finalized L2 + +opnodeA->>opgethA: engine forkchoice-update of finalized block +``` + +Implementers note: the op-supervisor needs "local" data +from the chains before being able to provide "cross" verified updated views. +The op-node is not currently notified when the "cross" verified view changes, +and thus relies on a revisit of the op-supervisor to determine change. + +### Databases + +The op-supervisor maintains a few databases: +- Log database (`events` kind): per chain, we maintain a running list of log-events, + separated by block-seals. + I.e. this persists the cross-L2 dependency information. +- `local safe` (`fromda` kind): per chain, we store which L2 block + was locally derived from which L1 block. + I.e. this persists the DA dependency information. +- `cross safe` (`fromda` kind): per chain, we store which L2 block + became cross-safe, given all the L2 data available, at which L1 block. + I.e. this persists the merged results of verifying both DA and cross-L2 dependencies. + +Additionally, the op-supervisor tracks `cross unsafe` in memory, not persisting it to a database: +it can quickly reproduce this after data-loss by verifying if cross-L2 dependencies +are met by `unsafe` data, starting from the latest known `cross safe` block. + +The latest `L1 finalized` block is tracked ephemerally as well: +the `L2 finalized` block is determined dynamically, +given what was `cross safe` at this finalized point in L1. + +For both the `events` and `fromda` DB kinds an append-only format was chosen +to make the database efficient and robust: +data can be read in parallel, does not require compaction (a known problem with execution-layer databases), +and data can always be rewound to a previous consistent state by truncating to a checkpoint. +The database can be searched with binary lookups, and written with O(1) appends. + +### Internal Architecture + +```mermaid +flowchart TD + user-->opnode + user-->opgeth + opnode[op-node]==block checks==>frontend[frontend RPC] + opgeth[op-geth]==tx-pool checks==>frontend + + frontend<==>backend + + backend--local unsafe updates-->chainprocessor + backend--local safe updates-->localFromDA + chainsDB--query results-->backend + + crossunsafeworker[Cross unsafe worker
per chain] + crosssafeworker[Cross safe worker
per chain] + + subgraph chainsDB[Chains DB] + logDB[Event Log DB
per chain] + localFromDA[Local-safe DB
per chain] + crossFromDA[Cross-safe DB
per chain] + + crossunsafe[Cross-unsafe
per chain] + + finalizedL1[Finalized L1] + end + + chainprocessor[Chain processor
per chain] + + opgeth--blocks/receipts-->chainprocessor + chainprocessor--block-seal and log entries-->logDB + + logDB--candidate
unsafe blocks-->crossunsafeworker + logDB--msg reads-->crossunsafeworker + crossunsafeworker -- cross-unsafe
updates --> crossunsafe + + localFromDA--candidate
safe blocks-->crosssafeworker + + logDB--msg reads-->crosssafeworker + crosssafeworker--cross-safe
updates-->crossFromDA + crossFromDA--known
cross-safe-->crosssafeworker +``` + +Main components: +- `frontend`: public API surface +- `backend`: implements the API (updates, queries, reorgs) +- `ChainsDB`: hosts the databases, one of each kind, per chain +- `Chain processor`: indexes blocks/events, including unsafe blocks +- `Cross-unsafe worker`: updates cross-unsafe, by cross-verifying unsafe data +- `Cross-safe worker`: updates cross-safe, by cross-verifying safe data within a L1 view + +Note that the `cross-unsafe` worker operates on any available L2 dependency data, +whereas the `cross-safe` worker incrementally expands the L1 scope, +to capture the `cross-safe` state relative to each L1 block. + +Most supervisor branching logic deals with the edge-cases that come with +syncing dependency data, and updating the safety views as the dependencies change. +This is where the service differs most from interop development simulations: +*dependency verification is critical to safety*, +and requires dependencies on DA to be consolidated with the dependencies on cross-chain messaging. + + +## Product + +### Optimization target + +The `op-supervisor` implementation optimizes safe determination of cross-chain message safety, +with fast feedback to readers. + +Data is indexed fast and optimistically to have a minimum level of feedback about a message or block. +Indexing changes are then propagated, allowing the safety-checks to quickly +follow up with asynchronous full verification of the safety. + +### Vision + +The `op-supervisor` is actively changing. +The most immediate changes are that to the architecture and data flow, as outlined in [design-doc 171]. + +Full support for chain reorgs (detecting them, and resolving them) is the +next priority after the above architecture and data changes. + +Further background on the design-choices of op-supervisor can be found in the +[superchain backend desgin-doc](https://github.com/ethereum-optimism/design-docs/blob/main/protocol/superchain-backend.md). + +## Design principles + +- Each indexing or safety kind of change is encapsulated in its own asynchronous job. +- Increments in indexing and safety are propagated, such that other follow-up work can be triggered without delay. +- A read-only subset of the API is served, sufficient for nodes to stay in sync, assuming a healthy op-supervisor. +- Databases are rewound trivially by dropping trailing information. +- Databases can be copied at any time, for convenient snapshots. + +## Failure modes + +See [design-doc 171] for discussion of missing data and syncing related failure modes. + +Generally the supervisor aims to provide existing static data in the case of disruption of cross-chain verification, +such that a chain which does not take on new interop dependencies, can continue to be extended with safe blocks. + +I.e. safety must be guaranteed at all times, +but a minimal level of liveness can be maintained by holding off on cross-chain message acceptance +while allowing regular single-chain functionaltiy to proceed. + +## Testing + +- `op-e2e/interop`: Go interop system-tests, focused on offchain aspects of services to run end to end. +- `op-e2e/actions/interop`: Go interop action-tests, focused on onchain aspects such as safety and state-transition. +- `interop-devnet`: docker-compose to run interoperable chains locally. From 403ae8103cf08010651889e45e5afe17ad2a0d25 Mon Sep 17 00:00:00 2001 From: blaine Date: Fri, 6 Dec 2024 14:30:23 -0500 Subject: [PATCH 094/111] fix: Adding more global overrides to apply test. (#13297) --- .../deployer/integration_test/apply_test.go | 35 +++++++++++++++++-- 1 file changed, 33 insertions(+), 2 deletions(-) diff --git a/op-deployer/pkg/deployer/integration_test/apply_test.go b/op-deployer/pkg/deployer/integration_test/apply_test.go index 1605219a8d9..1512da44713 100644 --- a/op-deployer/pkg/deployer/integration_test/apply_test.go +++ b/op-deployer/pkg/deployer/integration_test/apply_test.go @@ -13,6 +13,7 @@ import ( "maps" "math/big" "os" + "strings" "testing" "time" @@ -415,7 +416,7 @@ func testApplyExistingOPCM(t *testing.T, l1ChainID uint64, forkRPCUrl string, ve } } -func TestL2BlockTimeOverride(t *testing.T) { +func TestGlobalOverrides(t *testing.T) { op_e2e.InitParallel(t) kurtosisutil.Test(t) @@ -423,8 +424,28 @@ func TestL2BlockTimeOverride(t *testing.T) { defer cancel() opts, intent, st := setupGenesisChain(t, defaultL1ChainID) + expectedGasLimit := strings.ToLower("0x1C9C380") + expectedBaseFeeVaultRecipient := common.HexToAddress("0x0000000000000000000000000000000000000001") + expectedL1FeeVaultRecipient := common.HexToAddress("0x0000000000000000000000000000000000000002") + expectedSequencerFeeVaultRecipient := common.HexToAddress("0x0000000000000000000000000000000000000003") + expectedBaseFeeVaultMinimumWithdrawalAmount := strings.ToLower("0x1BC16D674EC80000") + expectedBaseFeeVaultWithdrawalNetwork := genesis.FromUint8(0) + expectedEnableGovernance := false + expectedGasPriceOracleBaseFeeScalar := uint32(1300) + expectedEIP1559Denominator := uint64(500) + expectedUseFaultProofs := false intent.GlobalDeployOverrides = map[string]interface{}{ - "l2BlockTime": float64(3), + "l2BlockTime": float64(3), + "l2GenesisBlockGasLimit": expectedGasLimit, + "baseFeeVaultRecipient": expectedBaseFeeVaultRecipient, + "l1FeeVaultRecipient": expectedL1FeeVaultRecipient, + "sequencerFeeVaultRecipient": expectedSequencerFeeVaultRecipient, + "baseFeeVaultMinimumWithdrawalAmount": expectedBaseFeeVaultMinimumWithdrawalAmount, + "baseFeeVaultWithdrawalNetwork": expectedBaseFeeVaultWithdrawalNetwork, + "enableGovernance": expectedEnableGovernance, + "gasPriceOracleBaseFeeScalar": expectedGasPriceOracleBaseFeeScalar, + "eip1559Denominator": expectedEIP1559Denominator, + "useFaultProofs": expectedUseFaultProofs, } require.NoError(t, deployer.ApplyPipeline(ctx, opts)) @@ -432,6 +453,16 @@ func TestL2BlockTimeOverride(t *testing.T) { cfg, err := state.CombineDeployConfig(intent, intent.Chains[0], st, st.Chains[0]) require.NoError(t, err) require.Equal(t, uint64(3), cfg.L2InitializationConfig.L2CoreDeployConfig.L2BlockTime, "L2 block time should be 3 seconds") + require.Equal(t, expectedGasLimit, strings.ToLower(cfg.L2InitializationConfig.L2GenesisBlockDeployConfig.L2GenesisBlockGasLimit.String()), "L2 Genesis Block Gas Limit should be 30_000_000") + require.Equal(t, expectedBaseFeeVaultRecipient, cfg.L2InitializationConfig.L2VaultsDeployConfig.BaseFeeVaultRecipient, "Base Fee Vault Recipient should be the expected address") + require.Equal(t, expectedL1FeeVaultRecipient, cfg.L2InitializationConfig.L2VaultsDeployConfig.L1FeeVaultRecipient, "L1 Fee Vault Recipient should be the expected address") + require.Equal(t, expectedSequencerFeeVaultRecipient, cfg.L2InitializationConfig.L2VaultsDeployConfig.SequencerFeeVaultRecipient, "Sequencer Fee Vault Recipient should be the expected address") + require.Equal(t, expectedBaseFeeVaultMinimumWithdrawalAmount, strings.ToLower(cfg.L2InitializationConfig.L2VaultsDeployConfig.BaseFeeVaultMinimumWithdrawalAmount.String()), "Base Fee Vault Minimum Withdrawal Amount should be the expected value") + require.Equal(t, expectedBaseFeeVaultWithdrawalNetwork, cfg.L2InitializationConfig.L2VaultsDeployConfig.BaseFeeVaultWithdrawalNetwork, "Base Fee Vault Withdrawal Network should be the expected value") + require.Equal(t, expectedEnableGovernance, cfg.L2InitializationConfig.GovernanceDeployConfig.EnableGovernance, "Governance should be disabled") + require.Equal(t, expectedGasPriceOracleBaseFeeScalar, cfg.L2InitializationConfig.GasPriceOracleDeployConfig.GasPriceOracleBaseFeeScalar, "Gas Price Oracle Base Fee Scalar should be the expected value") + require.Equal(t, expectedEIP1559Denominator, cfg.L2InitializationConfig.EIP1559DeployConfig.EIP1559Denominator, "EIP-1559 Denominator should be the expected value") + require.Equal(t, expectedUseFaultProofs, cfg.L2InitializationConfig.UseInterop, "Fault proofs should be enabled") } func TestApplyGenesisStrategy(t *testing.T) { From 63140ba2bd4fb407ce4155ffc0f81a7e07b00f39 Mon Sep 17 00:00:00 2001 From: Matthew Slipper Date: Fri, 6 Dec 2024 12:42:58 -0700 Subject: [PATCH 095/111] op-deployer: Bootstrap superchain command (#13294) * op-deployer: Bootstrap superchain command Fixes https://github.com/ethereum-optimism/optimism/issues/13265. * fix test --- op-deployer/pkg/deployer/bootstrap/flags.go | 57 +++++ .../pkg/deployer/bootstrap/opcm_test.go | 6 +- .../pkg/deployer/bootstrap/superchain.go | 218 ++++++++++++++++++ .../pkg/deployer/bootstrap/superchain_test.go | 95 ++++++++ 4 files changed, 375 insertions(+), 1 deletion(-) create mode 100644 op-deployer/pkg/deployer/bootstrap/superchain.go create mode 100644 op-deployer/pkg/deployer/bootstrap/superchain_test.go diff --git a/op-deployer/pkg/deployer/bootstrap/flags.go b/op-deployer/pkg/deployer/bootstrap/flags.go index 20cf02b9337..3991829bd46 100644 --- a/op-deployer/pkg/deployer/bootstrap/flags.go +++ b/op-deployer/pkg/deployer/bootstrap/flags.go @@ -35,6 +35,12 @@ const ( DelayedWethProxyFlagName = "delayed-weth-proxy" DelayedWethImplFlagName = "delayed-weth-impl" ProxyOwnerFlagName = "proxy-owner" + SuperchainProxyAdminOwnerFlagName = "superchain-proxy-admin-owner" + ProtocolVersionsOwnerFlagName = "protocol-versions-owner" + GuardianFlagName = "guardian" + PausedFlagName = "paused" + RequiredProtocolVersionFlagName = "required-protocol-version" + RecommendedProtocolVersionFlagName = "recommended-protocol-version" ) var ( @@ -176,6 +182,39 @@ var ( EnvVars: deployer.PrefixEnvVar("PROXY_OWNER"), Value: common.Address{}.Hex(), } + SuperchainProxyAdminOwnerFlag = &cli.StringFlag{ + Name: SuperchainProxyAdminOwnerFlagName, + Usage: "Owner address for the superchain proxy admin", + EnvVars: deployer.PrefixEnvVar("SUPERCHAIN_PROXY_ADMIN_OWNER"), + Value: common.Address{}.Hex(), + } + ProtocolVersionsOwnerFlag = &cli.StringFlag{ + Name: ProtocolVersionsOwnerFlagName, + Usage: "Owner address for protocol versions", + EnvVars: deployer.PrefixEnvVar("PROTOCOL_VERSIONS_OWNER"), + Value: common.Address{}.Hex(), + } + GuardianFlag = &cli.StringFlag{ + Name: GuardianFlagName, + Usage: "Guardian address", + EnvVars: deployer.PrefixEnvVar("GUARDIAN"), + Value: common.Address{}.Hex(), + } + PausedFlag = &cli.BoolFlag{ + Name: PausedFlagName, + Usage: "Initial paused state", + EnvVars: deployer.PrefixEnvVar("PAUSED"), + } + RequiredProtocolVersionFlag = &cli.StringFlag{ + Name: RequiredProtocolVersionFlagName, + Usage: "Required protocol version (semver)", + EnvVars: deployer.PrefixEnvVar("REQUIRED_PROTOCOL_VERSION"), + } + RecommendedProtocolVersionFlag = &cli.StringFlag{ + Name: RecommendedProtocolVersionFlagName, + Usage: "Recommended protocol version (semver)", + EnvVars: deployer.PrefixEnvVar("RECOMMENDED_PROTOCOL_VERSION"), + } ) var OPCMFlags = []cli.Flag{ @@ -239,6 +278,18 @@ var ProxyFlags = []cli.Flag{ ProxyOwnerFlag, } +var SuperchainFlags = []cli.Flag{ + deployer.L1RPCURLFlag, + deployer.PrivateKeyFlag, + ArtifactsLocatorFlag, + SuperchainProxyAdminOwnerFlag, + ProtocolVersionsOwnerFlag, + GuardianFlag, + PausedFlag, + RequiredProtocolVersionFlag, + RecommendedProtocolVersionFlag, +} + var Commands = []*cli.Command{ { Name: "opcm", @@ -285,4 +336,10 @@ var Commands = []*cli.Command{ Flags: cliapp.ProtectFlags(ProxyFlags), Action: ProxyCLI, }, + { + Name: "superchain", + Usage: "Bootstrap the Superchain configuration", + Flags: cliapp.ProtectFlags(SuperchainFlags), + Action: SuperchainCLI, + }, } diff --git a/op-deployer/pkg/deployer/bootstrap/opcm_test.go b/op-deployer/pkg/deployer/bootstrap/opcm_test.go index 5a4d1e8de3d..4ee089229df 100644 --- a/op-deployer/pkg/deployer/bootstrap/opcm_test.go +++ b/op-deployer/pkg/deployer/bootstrap/opcm_test.go @@ -17,7 +17,7 @@ import ( var networks = []string{"mainnet", "sepolia"} -var versions = []string{"v1.8.0-rc.3"} +var versions = []string{"v1.8.0-rc.3", "v1.6.0"} func TestOPCMLiveChain(t *testing.T) { for _, network := range networks { @@ -27,6 +27,10 @@ func TestOPCMLiveChain(t *testing.T) { t.Skip("v1.8.0-rc.3 not supported on mainnet yet") } + if version == "v1.6.0" { + t.Skip("v1.6.0 not supported") + } + envVar := strings.ToUpper(network) + "_RPC_URL" rpcURL := os.Getenv(envVar) require.NotEmpty(t, rpcURL, "must specify RPC url via %s env var", envVar) diff --git a/op-deployer/pkg/deployer/bootstrap/superchain.go b/op-deployer/pkg/deployer/bootstrap/superchain.go new file mode 100644 index 00000000000..837f406c091 --- /dev/null +++ b/op-deployer/pkg/deployer/bootstrap/superchain.go @@ -0,0 +1,218 @@ +package bootstrap + +import ( + "context" + "crypto/ecdsa" + "fmt" + "strings" + + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/artifacts" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/broadcaster" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/opcm" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/env" + opcrypto "github.com/ethereum-optimism/optimism/op-service/crypto" + "github.com/ethereum-optimism/optimism/op-service/ctxinterrupt" + "github.com/ethereum-optimism/optimism/op-service/ioutil" + "github.com/ethereum-optimism/optimism/op-service/jsonutil" + oplog "github.com/ethereum-optimism/optimism/op-service/log" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rpc" + "github.com/urfave/cli/v2" +) + +type SuperchainConfig struct { + L1RPCUrl string + PrivateKey string + Logger log.Logger + ArtifactsLocator *artifacts.Locator + + privateKeyECDSA *ecdsa.PrivateKey + + SuperchainProxyAdminOwner common.Address + ProtocolVersionsOwner common.Address + Guardian common.Address + Paused bool + RequiredProtocolVersion params.ProtocolVersion + RecommendedProtocolVersion params.ProtocolVersion +} + +func (c *SuperchainConfig) Check() error { + if c.L1RPCUrl == "" { + return fmt.Errorf("l1RPCUrl must be specified") + } + + if c.PrivateKey == "" { + return fmt.Errorf("private key must be specified") + } + + privECDSA, err := crypto.HexToECDSA(strings.TrimPrefix(c.PrivateKey, "0x")) + if err != nil { + return fmt.Errorf("failed to parse private key: %w", err) + } + c.privateKeyECDSA = privECDSA + + if c.Logger == nil { + return fmt.Errorf("logger must be specified") + } + + if c.ArtifactsLocator == nil { + return fmt.Errorf("artifacts locator must be specified") + } + + if c.SuperchainProxyAdminOwner == (common.Address{}) { + return fmt.Errorf("superchain proxy admin owner must be specified") + } + + if c.ProtocolVersionsOwner == (common.Address{}) { + return fmt.Errorf("protocol versions owner must be specified") + } + + if c.Guardian == (common.Address{}) { + return fmt.Errorf("guardian must be specified") + } + + return nil +} + +func SuperchainCLI(cliCtx *cli.Context) error { + logCfg := oplog.ReadCLIConfig(cliCtx) + l := oplog.NewLogger(oplog.AppOut(cliCtx), logCfg) + oplog.SetGlobalLogHandler(l.Handler()) + + l1RPCUrl := cliCtx.String(deployer.L1RPCURLFlagName) + privateKey := cliCtx.String(deployer.PrivateKeyFlagName) + artifactsURLStr := cliCtx.String(ArtifactsLocatorFlagName) + artifactsLocator := new(artifacts.Locator) + if err := artifactsLocator.UnmarshalText([]byte(artifactsURLStr)); err != nil { + return fmt.Errorf("failed to parse artifacts URL: %w", err) + } + + superchainProxyAdminOwner := common.HexToAddress(cliCtx.String(SuperchainProxyAdminOwnerFlagName)) + protocolVersionsOwner := common.HexToAddress(cliCtx.String(ProtocolVersionsOwnerFlagName)) + guardian := common.HexToAddress(cliCtx.String(GuardianFlagName)) + paused := cliCtx.Bool(PausedFlagName) + requiredVersionStr := cliCtx.String(RequiredProtocolVersionFlagName) + recommendedVersionStr := cliCtx.String(RecommendedProtocolVersionFlagName) + + cfg := SuperchainConfig{ + L1RPCUrl: l1RPCUrl, + PrivateKey: privateKey, + Logger: l, + ArtifactsLocator: artifactsLocator, + SuperchainProxyAdminOwner: superchainProxyAdminOwner, + ProtocolVersionsOwner: protocolVersionsOwner, + Guardian: guardian, + Paused: paused, + } + + if err := cfg.RequiredProtocolVersion.UnmarshalText([]byte(requiredVersionStr)); err != nil { + return fmt.Errorf("failed to parse required protocol version: %w", err) + } + if err := cfg.RecommendedProtocolVersion.UnmarshalText([]byte(recommendedVersionStr)); err != nil { + return fmt.Errorf("failed to parse required protocol version: %w", err) + } + + ctx := ctxinterrupt.WithCancelOnInterrupt(cliCtx.Context) + + dso, err := Superchain(ctx, cfg) + if err != nil { + return fmt.Errorf("failed to deploy superchain: %w", err) + } + + if err := jsonutil.WriteJSON(dso, ioutil.ToStdOut()); err != nil { + return fmt.Errorf("failed to write output: %w", err) + } + return nil +} + +func Superchain(ctx context.Context, cfg SuperchainConfig) (opcm.DeploySuperchainOutput, error) { + var dso opcm.DeploySuperchainOutput + + if err := cfg.Check(); err != nil { + return dso, fmt.Errorf("invalid config for Superchain: %w", err) + } + + lgr := cfg.Logger + progressor := func(curr, total int64) { + lgr.Info("artifacts download progress", "current", curr, "total", total) + } + + artifactsFS, cleanup, err := artifacts.Download(ctx, cfg.ArtifactsLocator, progressor) + if err != nil { + return dso, fmt.Errorf("failed to download artifacts: %w", err) + } + defer func() { + if err := cleanup(); err != nil { + lgr.Warn("failed to clean up artifacts", "err", err) + } + }() + + l1Client, err := ethclient.Dial(cfg.L1RPCUrl) + if err != nil { + return dso, fmt.Errorf("failed to connect to L1 RPC: %w", err) + } + + chainID, err := l1Client.ChainID(ctx) + if err != nil { + return dso, fmt.Errorf("failed to get chain ID: %w", err) + } + + signer := opcrypto.SignerFnFromBind(opcrypto.PrivateKeySignerFn(cfg.privateKeyECDSA, chainID)) + chainDeployer := crypto.PubkeyToAddress(cfg.privateKeyECDSA.PublicKey) + + bcaster, err := broadcaster.NewKeyedBroadcaster(broadcaster.KeyedBroadcasterOpts{ + Logger: lgr, + ChainID: chainID, + Client: l1Client, + Signer: signer, + From: chainDeployer, + }) + if err != nil { + return dso, fmt.Errorf("failed to create broadcaster: %w", err) + } + + l1RPC, err := rpc.Dial(cfg.L1RPCUrl) + if err != nil { + return dso, fmt.Errorf("failed to connect to L1 RPC: %w", err) + } + + l1Host, err := env.DefaultForkedScriptHost( + ctx, + bcaster, + lgr, + chainDeployer, + artifactsFS, + l1RPC, + ) + if err != nil { + return dso, fmt.Errorf("failed to create script host: %w", err) + } + + dso, err = opcm.DeploySuperchain( + l1Host, + opcm.DeploySuperchainInput{ + SuperchainProxyAdminOwner: cfg.SuperchainProxyAdminOwner, + ProtocolVersionsOwner: cfg.ProtocolVersionsOwner, + Guardian: cfg.Guardian, + Paused: cfg.Paused, + RequiredProtocolVersion: cfg.RequiredProtocolVersion, + RecommendedProtocolVersion: cfg.RecommendedProtocolVersion, + }, + ) + if err != nil { + return dso, fmt.Errorf("error deploying superchain: %w", err) + } + + if _, err := bcaster.Broadcast(ctx); err != nil { + return dso, fmt.Errorf("failed to broadcast: %w", err) + } + + lgr.Info("deployed superchain configuration") + + return dso, nil +} diff --git a/op-deployer/pkg/deployer/bootstrap/superchain_test.go b/op-deployer/pkg/deployer/bootstrap/superchain_test.go new file mode 100644 index 00000000000..f2198102acb --- /dev/null +++ b/op-deployer/pkg/deployer/bootstrap/superchain_test.go @@ -0,0 +1,95 @@ +package bootstrap + +import ( + "context" + "log/slog" + "os" + "strings" + "testing" + "time" + + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/artifacts" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/retryproxy" + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/ethereum-optimism/optimism/op-service/testutils/anvil" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/params" + "github.com/stretchr/testify/require" +) + +func TestSuperchain(t *testing.T) { + for _, network := range networks { + for _, version := range versions { + t.Run(network+"-"+version, func(t *testing.T) { + envVar := strings.ToUpper(network) + "_RPC_URL" + rpcURL := os.Getenv(envVar) + require.NotEmpty(t, rpcURL, "must specify RPC url via %s env var", envVar) + testSuperchain(t, rpcURL, version) + }) + } + } +} + +func testSuperchain(t *testing.T, forkRPCURL string, version string) { + t.Parallel() + + if forkRPCURL == "" { + t.Skip("forkRPCURL not set") + } + + lgr := testlog.Logger(t, slog.LevelDebug) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + retryProxy := retryproxy.New(lgr, forkRPCURL) + require.NoError(t, retryProxy.Start()) + t.Cleanup(func() { + require.NoError(t, retryProxy.Stop()) + }) + + runner, err := anvil.New( + retryProxy.Endpoint(), + lgr, + ) + require.NoError(t, err) + + require.NoError(t, runner.Start(ctx)) + t.Cleanup(func() { + require.NoError(t, runner.Stop()) + }) + + out, err := Superchain(ctx, SuperchainConfig{ + L1RPCUrl: runner.RPCUrl(), + PrivateKey: "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80", + ArtifactsLocator: artifacts.MustNewLocatorFromTag("op-contracts/" + version), + Logger: lgr, + + SuperchainProxyAdminOwner: common.Address{'S'}, + ProtocolVersionsOwner: common.Address{'P'}, + Guardian: common.Address{'G'}, + Paused: false, + RequiredProtocolVersion: params.ProtocolVersionV0{Major: 1}.Encode(), + RecommendedProtocolVersion: params.ProtocolVersionV0{Major: 2}.Encode(), + }) + require.NoError(t, err) + + client, err := ethclient.Dial(runner.RPCUrl()) + require.NoError(t, err) + + addresses := []common.Address{ + out.SuperchainConfigProxy, + out.SuperchainConfigImpl, + out.SuperchainProxyAdmin, + out.ProtocolVersionsImpl, + out.ProtocolVersionsProxy, + } + for _, addr := range addresses { + require.NotEmpty(t, addr) + + code, err := client.CodeAt(ctx, addr, nil) + require.NoError(t, err) + require.NotEmpty(t, code) + } +} From e1dbb88060d20553ccaa3a45ee86ea3ad6d3541d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 6 Dec 2024 12:45:25 -0700 Subject: [PATCH 096/111] dependabot(gomod): bump github.com/holiman/uint256 from 1.3.1 to 1.3.2 (#13298) Bumps [github.com/holiman/uint256](https://github.com/holiman/uint256) from 1.3.1 to 1.3.2. - [Release notes](https://github.com/holiman/uint256/releases) - [Commits](https://github.com/holiman/uint256/compare/v1.3.1...v1.3.2) --- updated-dependencies: - dependency-name: github.com/holiman/uint256 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index e3b57ace633..b28cb70089b 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,7 @@ require ( github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/hashicorp/raft v1.7.1 github.com/hashicorp/raft-boltdb/v2 v2.3.0 - github.com/holiman/uint256 v1.3.1 + github.com/holiman/uint256 v1.3.2 github.com/ipfs/go-datastore v0.6.0 github.com/ipfs/go-ds-leveldb v0.5.0 github.com/klauspost/compress v1.17.11 diff --git a/go.sum b/go.sum index bcae4d056f0..5ee5e903e71 100644 --- a/go.sum +++ b/go.sum @@ -368,8 +368,8 @@ github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4 h1:X4egAf/gcS1zATw6w github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc= github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= -github.com/holiman/uint256 v1.3.1 h1:JfTzmih28bittyHM8z360dCjIA9dbPIBlcTI6lmctQs= -github.com/holiman/uint256 v1.3.1/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= +github.com/holiman/uint256 v1.3.2 h1:a9EgMPSC1AAaj1SZL5zIQD3WbwTuHrMGOerLjGmM/TA= +github.com/holiman/uint256 v1.3.2/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= From b4b90adf20b37bd1ed46fec25cd13507dfb617a9 Mon Sep 17 00:00:00 2001 From: Matthew Slipper Date: Fri, 6 Dec 2024 12:46:17 -0700 Subject: [PATCH 097/111] ctb: Reduce heavy fuzz runs in CI (#13296) --- packages/contracts-bedrock/foundry.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/contracts-bedrock/foundry.toml b/packages/contracts-bedrock/foundry.toml index b77e7bca431..167826343ca 100644 --- a/packages/contracts-bedrock/foundry.toml +++ b/packages/contracts-bedrock/foundry.toml @@ -98,12 +98,12 @@ depth = 32 [profile.ciheavy.fuzz] runs = 20000 -timeout = 600 +timeout = 300 [profile.ciheavy.invariant] runs = 128 depth = 512 -timeout = 600 +timeout = 300 ################################################################ # PROFILE: LITE # From f6d5ff4bb0b385c1cd2bbe67e8fe231c5a01226b Mon Sep 17 00:00:00 2001 From: Dmitry <98899785+mdqst@users.noreply.github.com> Date: Fri, 6 Dec 2024 22:52:22 +0300 Subject: [PATCH 098/111] docs: Clarify directory reference Update README.md (#13293) --- bedrock-devnet/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bedrock-devnet/README.md b/bedrock-devnet/README.md index dccb8cee9d3..f3ed9e2ae85 100644 --- a/bedrock-devnet/README.md +++ b/bedrock-devnet/README.md @@ -2,4 +2,4 @@ This is a utility for running a local Bedrock devnet. It is designed to replace the legacy Bash-based devnet runner as part of a progressive migration away from Bash automation. -The easiest way to invoke this script is to run `make devnet-up` from the root of this repository. Otherwise, to use this script run `python3 main.py --monorepo-dir=`. You may need to set `PYTHONPATH` to this directory if you are invoking the script from somewhere other than `bedrock-devnet`. +The easiest way to invoke this script is to run `make devnet-up` from the root of this repository. Otherwise, to use this script run `python3 main.py --monorepo-dir=`. You may need to set `PYTHONPATH` to this directory if you invoke the script from somewhere other than the `bedrock-devnet` directory. From cf1ce689d66334d353bd295b1502573143c45c08 Mon Sep 17 00:00:00 2001 From: Maurelian Date: Fri, 6 Dec 2024 15:18:46 -0500 Subject: [PATCH 099/111] feat: remove unused systemConfigs mapping from OPCM (#13295) * feat: remove unused systemConfigs mapping from OPCM * fix semver lock --- .../snapshots/abi/OPContractsManager.json | 19 ------------------- .../abi/OPContractsManagerInterop.json | 19 ------------------- .../snapshots/semver-lock.json | 4 ++-- .../storageLayout/OPContractsManager.json | 11 ++--------- .../OPContractsManagerInterop.json | 11 ++--------- .../src/L1/OPContractsManager.sol | 11 ++++------- .../test/universal/Specs.t.sol | 2 -- 7 files changed, 10 insertions(+), 67 deletions(-) diff --git a/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json b/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json index b5758eca610..a541ae2e101 100644 --- a/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json +++ b/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json @@ -500,25 +500,6 @@ "stateMutability": "view", "type": "function" }, - { - "inputs": [ - { - "internalType": "uint256", - "name": "", - "type": "uint256" - } - ], - "name": "systemConfigs", - "outputs": [ - { - "internalType": "contract ISystemConfig", - "name": "", - "type": "address" - } - ], - "stateMutability": "view", - "type": "function" - }, { "inputs": [], "name": "version", diff --git a/packages/contracts-bedrock/snapshots/abi/OPContractsManagerInterop.json b/packages/contracts-bedrock/snapshots/abi/OPContractsManagerInterop.json index b5758eca610..a541ae2e101 100644 --- a/packages/contracts-bedrock/snapshots/abi/OPContractsManagerInterop.json +++ b/packages/contracts-bedrock/snapshots/abi/OPContractsManagerInterop.json @@ -500,25 +500,6 @@ "stateMutability": "view", "type": "function" }, - { - "inputs": [ - { - "internalType": "uint256", - "name": "", - "type": "uint256" - } - ], - "name": "systemConfigs", - "outputs": [ - { - "internalType": "contract ISystemConfig", - "name": "", - "type": "address" - } - ], - "stateMutability": "view", - "type": "function" - }, { "inputs": [], "name": "version", diff --git a/packages/contracts-bedrock/snapshots/semver-lock.json b/packages/contracts-bedrock/snapshots/semver-lock.json index b31b3f287e7..dcfdaeb982f 100644 --- a/packages/contracts-bedrock/snapshots/semver-lock.json +++ b/packages/contracts-bedrock/snapshots/semver-lock.json @@ -20,8 +20,8 @@ "sourceCodeHash": "0x8aafeffb41332fddf2fb1ef4fc033bd1f323cdc5b199c6951da73e3cb86276e6" }, "src/L1/OPContractsManager.sol": { - "initCodeHash": "0x320a6d4417eb0d2597b4c6f4caa37bbf9e35f38d3ad27ddb57f149c680e6afff", - "sourceCodeHash": "0x3a6ac40939df1d9f4c88caa4e6139454de5e0ad4a241b27d1bab65a3ae44610d" + "initCodeHash": "0x1eb781ca3f3609dbf13ecb9fe34063155871510b148ac63348a4947858c196ba", + "sourceCodeHash": "0xf1e9fe3f37414e1f1824ce18cd6164c33ca64527b419d6fa52690b6351534822" }, "src/L1/OptimismPortal.sol": { "initCodeHash": "0xa8b2f8a6d1092c5e64529736462ebb35daa9ea9e67585f7de8e3e5394682ee64", diff --git a/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManager.json b/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManager.json index aa8148b34cb..5eae73490b1 100644 --- a/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManager.json +++ b/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManager.json @@ -6,25 +6,18 @@ "slot": "0", "type": "string" }, - { - "bytes": "32", - "label": "systemConfigs", - "offset": 0, - "slot": "1", - "type": "mapping(uint256 => contract ISystemConfig)" - }, { "bytes": "256", "label": "blueprint", "offset": 0, - "slot": "2", + "slot": "1", "type": "struct OPContractsManager.Blueprints" }, { "bytes": "288", "label": "implementation", "offset": 0, - "slot": "10", + "slot": "9", "type": "struct OPContractsManager.Implementations" } ] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManagerInterop.json b/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManagerInterop.json index aa8148b34cb..5eae73490b1 100644 --- a/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManagerInterop.json +++ b/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManagerInterop.json @@ -6,25 +6,18 @@ "slot": "0", "type": "string" }, - { - "bytes": "32", - "label": "systemConfigs", - "offset": 0, - "slot": "1", - "type": "mapping(uint256 => contract ISystemConfig)" - }, { "bytes": "256", "label": "blueprint", "offset": 0, - "slot": "2", + "slot": "1", "type": "struct OPContractsManager.Blueprints" }, { "bytes": "288", "label": "implementation", "offset": 0, - "slot": "10", + "slot": "9", "type": "struct OPContractsManager.Implementations" } ] \ No newline at end of file diff --git a/packages/contracts-bedrock/src/L1/OPContractsManager.sol b/packages/contracts-bedrock/src/L1/OPContractsManager.sol index aad48374704..fb7da0aa6ed 100644 --- a/packages/contracts-bedrock/src/L1/OPContractsManager.sol +++ b/packages/contracts-bedrock/src/L1/OPContractsManager.sol @@ -114,8 +114,8 @@ contract OPContractsManager is ISemver { // -------- Constants and Variables -------- - /// @custom:semver 1.0.0-beta.23 - string public constant version = "1.0.0-beta.23"; + /// @custom:semver 1.0.0-beta.24 + string public constant version = "1.0.0-beta.24"; /// @notice Represents the interface version so consumers know how to decode the DeployOutput struct /// that's emitted in the `Deployed` event. Whenever that struct changes, a new version should be used. @@ -127,13 +127,10 @@ contract OPContractsManager is ISemver { /// @notice Address of the ProtocolVersions contract shared by all chains. IProtocolVersions public immutable protocolVersions; - // @notice L1 smart contracts release deployed by this version of OPCM. This is used in opcm to signal which version - // of the L1 smart contracts is deployed. It takes the format of `op-contracts/vX.Y.Z`. + /// @notice L1 smart contracts release deployed by this version of OPCM. This is used in opcm to signal which + /// version of the L1 smart contracts is deployed. It takes the format of `op-contracts/vX.Y.Z`. string public l1ContractsRelease; - /// @notice Maps an L2 Chain ID to the SystemConfig for that chain. - mapping(uint256 => ISystemConfig) public systemConfigs; - /// @notice Addresses of the Blueprint contracts. /// This is internal because if public the autogenerated getter method would return a tuple of /// addresses, but we want it to return a struct. diff --git a/packages/contracts-bedrock/test/universal/Specs.t.sol b/packages/contracts-bedrock/test/universal/Specs.t.sol index d8c48849875..cbd00c0569e 100644 --- a/packages/contracts-bedrock/test/universal/Specs.t.sol +++ b/packages/contracts-bedrock/test/universal/Specs.t.sol @@ -838,7 +838,6 @@ contract Specification_Test is CommonTest { _addSpec({ _name: "OPContractsManager", _sel: _getSel("superchainConfig()") }); _addSpec({ _name: "OPContractsManager", _sel: _getSel("protocolVersions()") }); _addSpec({ _name: "OPContractsManager", _sel: _getSel("l1ContractsRelease()") }); - _addSpec({ _name: "OPContractsManager", _sel: _getSel("systemConfigs(uint256)") }); _addSpec({ _name: "OPContractsManager", _sel: _getSel("OUTPUT_VERSION()") }); _addSpec({ _name: "OPContractsManager", _sel: OPContractsManager.deploy.selector }); _addSpec({ _name: "OPContractsManager", _sel: OPContractsManager.blueprints.selector }); @@ -850,7 +849,6 @@ contract Specification_Test is CommonTest { _addSpec({ _name: "OPContractsManagerInterop", _sel: _getSel("superchainConfig()") }); _addSpec({ _name: "OPContractsManagerInterop", _sel: _getSel("protocolVersions()") }); _addSpec({ _name: "OPContractsManagerInterop", _sel: _getSel("l1ContractsRelease()") }); - _addSpec({ _name: "OPContractsManagerInterop", _sel: _getSel("systemConfigs(uint256)") }); _addSpec({ _name: "OPContractsManagerInterop", _sel: _getSel("OUTPUT_VERSION()") }); _addSpec({ _name: "OPContractsManagerInterop", _sel: OPContractsManager.deploy.selector }); _addSpec({ _name: "OPContractsManagerInterop", _sel: OPContractsManager.blueprints.selector }); From 4317c093fbe951c57c0e36037a9aa281e8e0795c Mon Sep 17 00:00:00 2001 From: zhiqiangxu <652732310@qq.com> Date: Sat, 7 Dec 2024 04:46:12 +0800 Subject: [PATCH 100/111] refactor: single source of truth for `DerivationVersion` (#11901) * single source of truth for DerivationVersion * op-node/rollup: fix imports --------- Co-authored-by: protolambda --- op-alt-da/commitment.go | 5 +++-- op-alt-da/commitment_test.go | 3 ++- op-alt-da/params.go | 5 ----- op-batcher/batcher/tx_data.go | 5 +++-- op-e2e/actions/helpers/l2_batcher.go | 5 +++-- op-node/rollup/derive/altda_data_source.go | 3 ++- op-node/rollup/derive/channel_out.go | 3 ++- op-node/rollup/derive/frame.go | 4 +++- op-node/rollup/derive/frame_queue_test.go | 8 +++++--- op-node/rollup/derive/frame_test.go | 8 +++++--- op-node/rollup/derive/params.go | 7 ------- op-node/rollup/derive/params/versions.go | 6 ++++++ 12 files changed, 34 insertions(+), 28 deletions(-) create mode 100644 op-node/rollup/derive/params/versions.go diff --git a/op-alt-da/commitment.go b/op-alt-da/commitment.go index a6fa5424665..157eb8b1854 100644 --- a/op-alt-da/commitment.go +++ b/op-alt-da/commitment.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" + "github.com/ethereum-optimism/optimism/op-node/rollup/derive/params" "github.com/ethereum/go-ethereum/crypto" ) @@ -115,7 +116,7 @@ func (c Keccak256Commitment) Encode() []byte { // TxData adds an extra version byte to signal it's a commitment. func (c Keccak256Commitment) TxData() []byte { - return append([]byte{TxDataVersion1}, c.Encode()...) + return append([]byte{params.DerivationVersion1}, c.Encode()...) } // Verify checks if the commitment matches the given input. @@ -155,7 +156,7 @@ func (c GenericCommitment) Encode() []byte { // TxData adds an extra version byte to signal it's a commitment. func (c GenericCommitment) TxData() []byte { - return append([]byte{TxDataVersion1}, c.Encode()...) + return append([]byte{params.DerivationVersion1}, c.Encode()...) } // Verify always returns true for GenericCommitment because the DA Server must validate the data before returning it to the op-node. diff --git a/op-alt-da/commitment_test.go b/op-alt-da/commitment_test.go index 52abc5d8865..e4656133d69 100644 --- a/op-alt-da/commitment_test.go +++ b/op-alt-da/commitment_test.go @@ -3,6 +3,7 @@ package altda import ( "testing" + "github.com/ethereum-optimism/optimism/op-node/rollup/derive/params" "github.com/stretchr/testify/require" ) @@ -54,7 +55,7 @@ func TestCommitmentData(t *testing.T) { // Test that reencoding the commitment returns the same data require.Equal(t, tc.commData, comm.Encode()) // Test that TxData() returns the same data as the original, prepended with a version byte - require.Equal(t, append([]byte{TxDataVersion1}, tc.commData...), comm.TxData()) + require.Equal(t, append([]byte{params.DerivationVersion1}, tc.commData...), comm.TxData()) // Test that Verify() returns no error for the correct data require.NoError(t, comm.Verify(tc.commData)) diff --git a/op-alt-da/params.go b/op-alt-da/params.go index 86339200f7f..bc0762ba131 100644 --- a/op-alt-da/params.go +++ b/op-alt-da/params.go @@ -4,8 +4,3 @@ package altda // challenge in the Data Availability Challenge contract. Value in number of bytes. // This value can only be changed in a hard fork. const MaxInputSize = 130672 - -// TxDataVersion1 is the version number for batcher transactions containing -// altDA commitments. It should not collide with DerivationVersion which is still -// used downstream when parsing the frames. -const TxDataVersion1 = 1 diff --git a/op-batcher/batcher/tx_data.go b/op-batcher/batcher/tx_data.go index d0f5474fd5f..0165f85f079 100644 --- a/op-batcher/batcher/tx_data.go +++ b/op-batcher/batcher/tx_data.go @@ -5,6 +5,7 @@ import ( "strings" "github.com/ethereum-optimism/optimism/op-node/rollup/derive" + "github.com/ethereum-optimism/optimism/op-node/rollup/derive/params" "github.com/ethereum-optimism/optimism/op-service/eth" ) @@ -35,7 +36,7 @@ func (td *txData) ID() txID { // It's a version byte (0) followed by the concatenated frames for this transaction. func (td *txData) CallData() []byte { data := make([]byte, 1, 1+td.Len()) - data[0] = derive.DerivationVersion0 + data[0] = params.DerivationVersion0 for _, f := range td.frames { data = append(data, f.data...) } @@ -46,7 +47,7 @@ func (td *txData) Blobs() ([]*eth.Blob, error) { blobs := make([]*eth.Blob, 0, len(td.frames)) for _, f := range td.frames { var blob eth.Blob - if err := blob.FromData(append([]byte{derive.DerivationVersion0}, f.data...)); err != nil { + if err := blob.FromData(append([]byte{params.DerivationVersion0}, f.data...)); err != nil { return nil, err } blobs = append(blobs, &blob) diff --git a/op-e2e/actions/helpers/l2_batcher.go b/op-e2e/actions/helpers/l2_batcher.go index 9fc9971a26e..88fde9bd9a9 100644 --- a/op-e2e/actions/helpers/l2_batcher.go +++ b/op-e2e/actions/helpers/l2_batcher.go @@ -26,6 +26,7 @@ import ( "github.com/ethereum-optimism/optimism/op-e2e/e2eutils" "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup/derive" + derive_params "github.com/ethereum-optimism/optimism/op-node/rollup/derive/params" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/txmgr" ) @@ -295,7 +296,7 @@ func (s *L2Batcher) ReadNextOutputFrame(t Testing) []byte { } // Collect the output frame data := new(bytes.Buffer) - data.WriteByte(derive.DerivationVersion0) + data.WriteByte(derive_params.DerivationVersion0) // subtract one, to account for the version byte if _, err := s.L2ChannelOut.OutputFrame(data, s.l2BatcherCfg.MaxL1TxSize-1); err == io.EOF { s.L2ChannelOut = nil @@ -400,7 +401,7 @@ func (s *L2Batcher) ActL2BatchSubmitMultiBlob(t Testing, numBlobs int) { blobs := make([]*eth.Blob, numBlobs) for i := 0; i < numBlobs; i++ { data := new(bytes.Buffer) - data.WriteByte(derive.DerivationVersion0) + data.WriteByte(derive_params.DerivationVersion0) // write only a few bytes to all but the last blob l := uint64(derive.FrameV0OverHeadSize + 4) // 4 bytes content if i == numBlobs-1 { diff --git a/op-node/rollup/derive/altda_data_source.go b/op-node/rollup/derive/altda_data_source.go index 829d9399a4d..2945a2a9e57 100644 --- a/op-node/rollup/derive/altda_data_source.go +++ b/op-node/rollup/derive/altda_data_source.go @@ -6,6 +6,7 @@ import ( "fmt" altda "github.com/ethereum-optimism/optimism/op-alt-da" + "github.com/ethereum-optimism/optimism/op-node/rollup/derive/params" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum/go-ethereum/log" ) @@ -56,7 +57,7 @@ func (s *AltDADataSource) Next(ctx context.Context) (eth.Data, error) { } // If the tx data type is not altDA, we forward it downstream to let the next // steps validate and potentially parse it as L1 DA inputs. - if data[0] != altda.TxDataVersion1 { + if data[0] != params.DerivationVersion1 { return data, nil } diff --git a/op-node/rollup/derive/channel_out.go b/op-node/rollup/derive/channel_out.go index 87f39d2eb09..7a97d7708ef 100644 --- a/op-node/rollup/derive/channel_out.go +++ b/op-node/rollup/derive/channel_out.go @@ -8,6 +8,7 @@ import ( "io" "github.com/ethereum-optimism/optimism/op-node/rollup" + "github.com/ethereum-optimism/optimism/op-node/rollup/derive/params" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/rlp" @@ -275,7 +276,7 @@ func ForceCloseTxData(frames []Frame) ([]byte, error) { } var out bytes.Buffer - out.WriteByte(DerivationVersion0) + out.WriteByte(params.DerivationVersion0) if !closed { f := Frame{ diff --git a/op-node/rollup/derive/frame.go b/op-node/rollup/derive/frame.go index 0baa1e120a1..e18562560e7 100644 --- a/op-node/rollup/derive/frame.go +++ b/op-node/rollup/derive/frame.go @@ -6,6 +6,8 @@ import ( "errors" "fmt" "io" + + "github.com/ethereum-optimism/optimism/op-node/rollup/derive/params" ) // Frames cannot be larger than 1 MB. @@ -130,7 +132,7 @@ func ParseFrames(data []byte) ([]Frame, error) { if len(data) == 0 { return nil, errors.New("data array must not be empty") } - if data[0] != DerivationVersion0 { + if data[0] != params.DerivationVersion0 { return nil, fmt.Errorf("invalid derivation format byte: got %d", data[0]) } buf := bytes.NewBuffer(data[1:]) diff --git a/op-node/rollup/derive/frame_queue_test.go b/op-node/rollup/derive/frame_queue_test.go index a0a57f4f387..14ca377f6a6 100644 --- a/op-node/rollup/derive/frame_queue_test.go +++ b/op-node/rollup/derive/frame_queue_test.go @@ -7,12 +7,14 @@ import ( "log/slog" "testing" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup/derive/mocks" + "github.com/ethereum-optimism/optimism/op-node/rollup/derive/params" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/testlog" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" ) func TestPruneFrameQueue(t *testing.T) { @@ -126,7 +128,7 @@ func testFrameQueue_NextFrame(t *testing.T, holocene bool) { } var inBuf bytes.Buffer - inBuf.WriteByte(DerivationVersion0) + inBuf.WriteByte(params.DerivationVersion0) for _, f := range inFrames { require.NoError(t, f.MarshalBinary(&inBuf)) } diff --git a/op-node/rollup/derive/frame_test.go b/op-node/rollup/derive/frame_test.go index 240cc0a58d8..277e976fcca 100644 --- a/op-node/rollup/derive/frame_test.go +++ b/op-node/rollup/derive/frame_test.go @@ -9,8 +9,10 @@ import ( "testing" "time" - "github.com/ethereum-optimism/optimism/op-service/testutils" "github.com/stretchr/testify/require" + + "github.com/ethereum-optimism/optimism/op-node/rollup/derive/params" + "github.com/ethereum-optimism/optimism/op-service/testutils" ) func FuzzFrameUnmarshalBinary(f *testing.F) { @@ -164,7 +166,7 @@ func TestParseFramesInvalidVer(t *testing.T) { } func TestParseFramesOnlyVersion(t *testing.T) { - frames, err := ParseFrames([]byte{DerivationVersion0}) + frames, err := ParseFrames([]byte{params.DerivationVersion0}) require.Empty(t, frames) require.Error(t, err) } @@ -206,7 +208,7 @@ func TestParseFramesTruncated(t *testing.T) { // frames. func txMarshalFrames(frames []Frame) ([]byte, error) { var data bytes.Buffer - if err := data.WriteByte(DerivationVersion0); err != nil { + if err := data.WriteByte(params.DerivationVersion0); err != nil { return nil, err } for _, frame := range frames { diff --git a/op-node/rollup/derive/params.go b/op-node/rollup/derive/params.go index 385c4087930..c4511d95088 100644 --- a/op-node/rollup/derive/params.go +++ b/op-node/rollup/derive/params.go @@ -4,8 +4,6 @@ import ( "encoding/hex" "errors" "fmt" - - altda "github.com/ethereum-optimism/optimism/op-alt-da" ) // count the tagging info as 200 in terms of buffer size. @@ -19,11 +17,6 @@ func frameSize(frame Frame) uint64 { return uint64(len(frame.Data)) + frameOverhead } -const DerivationVersion0 = 0 - -// DerivationVersion1 is reserved for batcher transactions containing altDA commitments. -const DerivationVersion1 = altda.TxDataVersion1 - // MaxSpanBatchElementCount is the maximum number of blocks, transactions in total, // or transaction per block allowed in a span batch. const MaxSpanBatchElementCount = 10_000_000 diff --git a/op-node/rollup/derive/params/versions.go b/op-node/rollup/derive/params/versions.go new file mode 100644 index 00000000000..0c723e8a9c2 --- /dev/null +++ b/op-node/rollup/derive/params/versions.go @@ -0,0 +1,6 @@ +package params + +const DerivationVersion0 = 0 + +// DerivationVersion1 is reserved for batcher transactions containing altDA commitments. +const DerivationVersion1 = 1 From 6de3799c7f13e4f5699a29be43505ac05786769d Mon Sep 17 00:00:00 2001 From: Matthew Slipper Date: Fri, 6 Dec 2024 14:39:53 -0700 Subject: [PATCH 101/111] ci: Reduce fuzz runs in basic CI (#13299) * ci: Specify number of jobs in contract tests * reduce runs * decrease runs * don't rebuild on semver lock * remove -j --- .circleci/config.yml | 2 +- packages/contracts-bedrock/foundry.toml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 1cdb7c4fe9d..3ae5b5f7848 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -733,7 +733,7 @@ jobs: - run-contracts-check: command: semgrep - run-contracts-check: - command: semver-lock + command: semver-lock-no-build - run-contracts-check: command: semver-diff-check-no-build - run-contracts-check: diff --git a/packages/contracts-bedrock/foundry.toml b/packages/contracts-bedrock/foundry.toml index 167826343ca..181391e7c49 100644 --- a/packages/contracts-bedrock/foundry.toml +++ b/packages/contracts-bedrock/foundry.toml @@ -72,10 +72,10 @@ wrap_comments=true ################################################################ [profile.ci.fuzz] -runs = 512 +runs = 128 [profile.ci.invariant] -runs = 256 +runs = 64 depth = 32 ################################################################ From a2e7d852a8a16e526564401c41abad56173790d3 Mon Sep 17 00:00:00 2001 From: smartcontracts Date: Fri, 6 Dec 2024 17:10:28 -0500 Subject: [PATCH 102/111] maint: bump forge version (#13301) Bumps versions being used for forge, cast, and anvil. --- mise.toml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mise.toml b/mise.toml index 5fb473de889..0dba715900c 100644 --- a/mise.toml +++ b/mise.toml @@ -31,9 +31,9 @@ just = "1.37.0" # Foundry dependencies # Foundry is a special case because it supplies multiple binaries at the same # GitHub release, so we need to use the aliasing trick to get mise to not error -forge = "nightly-e5dbb7a320c2b871c4a4a1006ad3c15a08fcf17b" -cast = "nightly-e5dbb7a320c2b871c4a4a1006ad3c15a08fcf17b" -anvil = "nightly-e5dbb7a320c2b871c4a4a1006ad3c15a08fcf17b" +forge = "nightly-e52076714ace23c7a68e14f0048a40be3c6c8f0b" +cast = "nightly-e52076714ace23c7a68e14f0048a40be3c6c8f0b" +anvil = "nightly-e52076714ace23c7a68e14f0048a40be3c6c8f0b" # Fake dependencies # Put things here if you need to track versions of tools or projects that can't From d6106dde5551252b96f9fd24b4fe46e01313e316 Mon Sep 17 00:00:00 2001 From: Matthew Slipper Date: Fri, 6 Dec 2024 15:25:31 -0700 Subject: [PATCH 103/111] op-deployer: Support output files in bootstrap (#13302) * op-deployer: Support output files in bootstrap * lint --- .../pkg/deployer/bootstrap/asterisc.go | 40 +++++++++------- .../pkg/deployer/bootstrap/delayed_weth.go | 46 +++++++++++-------- .../pkg/deployer/bootstrap/dispute_game.go | 44 ++++++++++-------- op-deployer/pkg/deployer/bootstrap/flags.go | 13 ++++++ op-deployer/pkg/deployer/bootstrap/mips.go | 37 +++++++++------ op-deployer/pkg/deployer/bootstrap/opcm.go | 3 +- op-deployer/pkg/deployer/bootstrap/proxy.go | 40 +++++++++------- .../pkg/deployer/bootstrap/superchain.go | 3 +- 8 files changed, 137 insertions(+), 89 deletions(-) diff --git a/op-deployer/pkg/deployer/bootstrap/asterisc.go b/op-deployer/pkg/deployer/bootstrap/asterisc.go index e7cd4e6036c..22039f65faf 100644 --- a/op-deployer/pkg/deployer/bootstrap/asterisc.go +++ b/op-deployer/pkg/deployer/bootstrap/asterisc.go @@ -73,6 +73,7 @@ func AsteriscCLI(cliCtx *cli.Context) error { l := oplog.NewLogger(oplog.AppOut(cliCtx), logCfg) oplog.SetGlobalLogHandler(l.Handler()) + outfile := cliCtx.String(OutfileFlagName) l1RPCUrl := cliCtx.String(deployer.L1RPCURLFlagName) privateKey := cliCtx.String(deployer.PrivateKeyFlagName) artifactsURLStr := cliCtx.String(ArtifactsLocatorFlagName) @@ -85,18 +86,27 @@ func AsteriscCLI(cliCtx *cli.Context) error { ctx := ctxinterrupt.WithCancelOnInterrupt(cliCtx.Context) - return Asterisc(ctx, AsteriscConfig{ + dao, err := Asterisc(ctx, AsteriscConfig{ L1RPCUrl: l1RPCUrl, PrivateKey: privateKey, Logger: l, ArtifactsLocator: artifactsLocator, PreimageOracle: preimageOracle, }) + if err != nil { + return fmt.Errorf("failed to deploy Asterisc: %w", err) + } + + if err := jsonutil.WriteJSON(dao, ioutil.ToStdOutOrFileOrNoop(outfile, 0o755)); err != nil { + return fmt.Errorf("failed to write output: %w", err) + } + return nil } -func Asterisc(ctx context.Context, cfg AsteriscConfig) error { +func Asterisc(ctx context.Context, cfg AsteriscConfig) (opcm.DeployAsteriscOutput, error) { + var dao opcm.DeployAsteriscOutput if err := cfg.Check(); err != nil { - return fmt.Errorf("invalid config for Asterisc: %w", err) + return dao, fmt.Errorf("invalid config for Asterisc: %w", err) } lgr := cfg.Logger @@ -106,7 +116,7 @@ func Asterisc(ctx context.Context, cfg AsteriscConfig) error { artifactsFS, cleanup, err := artifacts.Download(ctx, cfg.ArtifactsLocator, progressor) if err != nil { - return fmt.Errorf("failed to download artifacts: %w", err) + return dao, fmt.Errorf("failed to download artifacts: %w", err) } defer func() { if err := cleanup(); err != nil { @@ -116,12 +126,12 @@ func Asterisc(ctx context.Context, cfg AsteriscConfig) error { l1Client, err := ethclient.Dial(cfg.L1RPCUrl) if err != nil { - return fmt.Errorf("failed to connect to L1 RPC: %w", err) + return dao, fmt.Errorf("failed to connect to L1 RPC: %w", err) } chainID, err := l1Client.ChainID(ctx) if err != nil { - return fmt.Errorf("failed to get chain ID: %w", err) + return dao, fmt.Errorf("failed to get chain ID: %w", err) } signer := opcrypto.SignerFnFromBind(opcrypto.PrivateKeySignerFn(cfg.privateKeyECDSA, chainID)) @@ -135,12 +145,12 @@ func Asterisc(ctx context.Context, cfg AsteriscConfig) error { From: chainDeployer, }) if err != nil { - return fmt.Errorf("failed to create broadcaster: %w", err) + return dao, fmt.Errorf("failed to create broadcaster: %w", err) } l1RPC, err := rpc.Dial(cfg.L1RPCUrl) if err != nil { - return fmt.Errorf("failed to connect to L1 RPC: %w", err) + return dao, fmt.Errorf("failed to connect to L1 RPC: %w", err) } l1Host, err := env.DefaultForkedScriptHost( @@ -152,27 +162,23 @@ func Asterisc(ctx context.Context, cfg AsteriscConfig) error { l1RPC, ) if err != nil { - return fmt.Errorf("failed to create script host: %w", err) + return dao, fmt.Errorf("failed to create script host: %w", err) } - dgo, err := opcm.DeployAsterisc( + dao, err = opcm.DeployAsterisc( l1Host, opcm.DeployAsteriscInput{ PreimageOracle: cfg.PreimageOracle, }, ) if err != nil { - return fmt.Errorf("error deploying asterisc VM: %w", err) + return dao, fmt.Errorf("error deploying asterisc VM: %w", err) } if _, err := bcaster.Broadcast(ctx); err != nil { - return fmt.Errorf("failed to broadcast: %w", err) + return dao, fmt.Errorf("failed to broadcast: %w", err) } lgr.Info("deployed asterisc VM") - - if err := jsonutil.WriteJSON(dgo, ioutil.ToStdOut()); err != nil { - return fmt.Errorf("failed to write output: %w", err) - } - return nil + return dao, nil } diff --git a/op-deployer/pkg/deployer/bootstrap/delayed_weth.go b/op-deployer/pkg/deployer/bootstrap/delayed_weth.go index 9c3e0bc07fa..71fea24cc73 100644 --- a/op-deployer/pkg/deployer/bootstrap/delayed_weth.go +++ b/op-deployer/pkg/deployer/bootstrap/delayed_weth.go @@ -71,6 +71,7 @@ func DelayedWETHCLI(cliCtx *cli.Context) error { l := oplog.NewLogger(oplog.AppOut(cliCtx), logCfg) oplog.SetGlobalLogHandler(l.Handler()) + outfile := cliCtx.String(OutfileFlagName) config, err := NewDelayedWETHConfigFromClI(cliCtx, l) if err != nil { return err @@ -78,7 +79,15 @@ func DelayedWETHCLI(cliCtx *cli.Context) error { ctx := ctxinterrupt.WithCancelOnInterrupt(cliCtx.Context) - return DelayedWETH(ctx, config) + dwo, err := DelayedWETH(ctx, config) + if err != nil { + return fmt.Errorf("failed to deploy DelayedWETH: %w", err) + } + + if err := jsonutil.WriteJSON(dwo, ioutil.ToStdOutOrFileOrNoop(outfile, 0o755)); err != nil { + return fmt.Errorf("failed to write output: %w", err) + } + return nil } func NewDelayedWETHConfigFromClI(cliCtx *cli.Context, l log.Logger) (DelayedWETHConfig, error) { @@ -100,9 +109,10 @@ func NewDelayedWETHConfigFromClI(cliCtx *cli.Context, l log.Logger) (DelayedWETH return config, nil } -func DelayedWETH(ctx context.Context, cfg DelayedWETHConfig) error { +func DelayedWETH(ctx context.Context, cfg DelayedWETHConfig) (opcm.DeployDelayedWETHOutput, error) { + var dwo opcm.DeployDelayedWETHOutput if err := cfg.Check(); err != nil { - return fmt.Errorf("invalid config for DelayedWETH: %w", err) + return dwo, fmt.Errorf("invalid config for DelayedWETH: %w", err) } lgr := cfg.Logger @@ -112,7 +122,7 @@ func DelayedWETH(ctx context.Context, cfg DelayedWETHConfig) error { artifactsFS, cleanup, err := artifacts2.Download(ctx, cfg.ArtifactsLocator, progressor) if err != nil { - return fmt.Errorf("failed to download artifacts: %w", err) + return dwo, fmt.Errorf("failed to download artifacts: %w", err) } defer func() { if err := cleanup(); err != nil { @@ -122,26 +132,26 @@ func DelayedWETH(ctx context.Context, cfg DelayedWETHConfig) error { l1Client, err := ethclient.Dial(cfg.L1RPCUrl) if err != nil { - return fmt.Errorf("failed to connect to L1 RPC: %w", err) + return dwo, fmt.Errorf("failed to connect to L1 RPC: %w", err) } chainID, err := l1Client.ChainID(ctx) if err != nil { - return fmt.Errorf("failed to get chain ID: %w", err) + return dwo, fmt.Errorf("failed to get chain ID: %w", err) } chainIDU64 := chainID.Uint64() superCfg, err := standard.SuperchainFor(chainIDU64) if err != nil { - return fmt.Errorf("error getting superchain config: %w", err) + return dwo, fmt.Errorf("error getting superchain config: %w", err) } proxyAdmin, err := standard.ManagerOwnerAddrFor(chainIDU64) if err != nil { - return fmt.Errorf("error getting superchain proxy admin: %w", err) + return dwo, fmt.Errorf("error getting superchain proxy admin: %w", err) } delayedWethOwner, err := standard.SystemOwnerAddrFor(chainIDU64) if err != nil { - return fmt.Errorf("error getting superchain system owner: %w", err) + return dwo, fmt.Errorf("error getting superchain system owner: %w", err) } signer := opcrypto.SignerFnFromBind(opcrypto.PrivateKeySignerFn(cfg.privateKeyECDSA, chainID)) @@ -155,12 +165,12 @@ func DelayedWETH(ctx context.Context, cfg DelayedWETHConfig) error { From: chainDeployer, }) if err != nil { - return fmt.Errorf("failed to create broadcaster: %w", err) + return dwo, fmt.Errorf("failed to create broadcaster: %w", err) } l1RPC, err := rpc.Dial(cfg.L1RPCUrl) if err != nil { - return fmt.Errorf("failed to connect to L1 RPC: %w", err) + return dwo, fmt.Errorf("failed to connect to L1 RPC: %w", err) } host, err := env.DefaultForkedScriptHost( @@ -172,7 +182,7 @@ func DelayedWETH(ctx context.Context, cfg DelayedWETHConfig) error { l1RPC, ) if err != nil { - return fmt.Errorf("failed to create script host: %w", err) + return dwo, fmt.Errorf("failed to create script host: %w", err) } var release string @@ -186,7 +196,7 @@ func DelayedWETH(ctx context.Context, cfg DelayedWETHConfig) error { superchainConfigAddr := common.Address(*superCfg.Config.SuperchainConfigAddr) - dwo, err := opcm.DeployDelayedWETH( + dwo, err = opcm.DeployDelayedWETH( host, opcm.DeployDelayedWETHInput{ Release: release, @@ -198,17 +208,13 @@ func DelayedWETH(ctx context.Context, cfg DelayedWETHConfig) error { }, ) if err != nil { - return fmt.Errorf("error deploying DelayedWETH: %w", err) + return dwo, fmt.Errorf("error deploying DelayedWETH: %w", err) } if _, err := bcaster.Broadcast(ctx); err != nil { - return fmt.Errorf("failed to broadcast: %w", err) + return dwo, fmt.Errorf("failed to broadcast: %w", err) } lgr.Info("deployed DelayedWETH") - - if err := jsonutil.WriteJSON(dwo, ioutil.ToStdOut()); err != nil { - return fmt.Errorf("failed to write output: %w", err) - } - return nil + return dwo, nil } diff --git a/op-deployer/pkg/deployer/bootstrap/dispute_game.go b/op-deployer/pkg/deployer/bootstrap/dispute_game.go index 3f1d354f28f..acadd1ab9f9 100644 --- a/op-deployer/pkg/deployer/bootstrap/dispute_game.go +++ b/op-deployer/pkg/deployer/bootstrap/dispute_game.go @@ -83,12 +83,22 @@ func DisputeGameCLI(cliCtx *cli.Context) error { l := oplog.NewLogger(oplog.AppOut(cliCtx), logCfg) oplog.SetGlobalLogHandler(l.Handler()) + outfile := cliCtx.String(OutfileFlagName) cfg, err := NewDisputeGameConfigFromCLI(cliCtx, l) if err != nil { return err } ctx := ctxinterrupt.WithCancelOnInterrupt(cliCtx.Context) - return DisputeGame(ctx, cfg) + dgo, err := DisputeGame(ctx, cfg) + if err != nil { + return fmt.Errorf("failed to deploy dispute game: %w", err) + } + + if err := jsonutil.WriteJSON(dgo, ioutil.ToStdOutOrFileOrNoop(outfile, 0o755)); err != nil { + return fmt.Errorf("failed to write output: %w", err) + } + + return nil } func NewDisputeGameConfigFromCLI(cliCtx *cli.Context, l log.Logger) (DisputeGameConfig, error) { @@ -123,9 +133,10 @@ func NewDisputeGameConfigFromCLI(cliCtx *cli.Context, l log.Logger) (DisputeGame return cfg, nil } -func DisputeGame(ctx context.Context, cfg DisputeGameConfig) error { +func DisputeGame(ctx context.Context, cfg DisputeGameConfig) (opcm.DeployDisputeGameOutput, error) { + var dgo opcm.DeployDisputeGameOutput if err := cfg.Check(); err != nil { - return fmt.Errorf("invalid config for DisputeGame: %w", err) + return dgo, fmt.Errorf("invalid config for DisputeGame: %w", err) } lgr := cfg.Logger @@ -135,7 +146,7 @@ func DisputeGame(ctx context.Context, cfg DisputeGameConfig) error { artifactsFS, cleanup, err := artifacts2.Download(ctx, cfg.ArtifactsLocator, progressor) if err != nil { - return fmt.Errorf("failed to download artifacts: %w", err) + return dgo, fmt.Errorf("failed to download artifacts: %w", err) } defer func() { if err := cleanup(); err != nil { @@ -145,22 +156,22 @@ func DisputeGame(ctx context.Context, cfg DisputeGameConfig) error { l1Client, err := ethclient.Dial(cfg.L1RPCUrl) if err != nil { - return fmt.Errorf("failed to connect to L1 RPC: %w", err) + return dgo, fmt.Errorf("failed to connect to L1 RPC: %w", err) } l1Rpc, err := rpc.Dial(cfg.L1RPCUrl) if err != nil { - return fmt.Errorf("failed to connect to L1 RPC: %w", err) + return dgo, fmt.Errorf("failed to connect to L1 RPC: %w", err) } chainID, err := l1Client.ChainID(ctx) if err != nil { - return fmt.Errorf("failed to get chain ID: %w", err) + return dgo, fmt.Errorf("failed to get chain ID: %w", err) } chainIDU64 := chainID.Uint64() standardVersionsTOML, err := standard.L1VersionsDataFor(chainIDU64) if err != nil { - return fmt.Errorf("error getting standard versions TOML: %w", err) + return dgo, fmt.Errorf("error getting standard versions TOML: %w", err) } signer := opcrypto.SignerFnFromBind(opcrypto.PrivateKeySignerFn(cfg.privateKeyECDSA, chainID)) @@ -174,7 +185,7 @@ func DisputeGame(ctx context.Context, cfg DisputeGameConfig) error { From: chainDeployer, }) if err != nil { - return fmt.Errorf("failed to create broadcaster: %w", err) + return dgo, fmt.Errorf("failed to create broadcaster: %w", err) } host, err := env.DefaultForkedScriptHost( @@ -186,7 +197,7 @@ func DisputeGame(ctx context.Context, cfg DisputeGameConfig) error { l1Rpc, ) if err != nil { - return fmt.Errorf("failed to create L1 script host: %w", err) + return dgo, fmt.Errorf("failed to create L1 script host: %w", err) } var release string @@ -197,7 +208,8 @@ func DisputeGame(ctx context.Context, cfg DisputeGameConfig) error { } lgr.Info("deploying dispute game", "release", release) - dgo, err := opcm.DeployDisputeGame( + + dgo, err = opcm.DeployDisputeGame( host, opcm.DeployDisputeGameInput{ Release: release, @@ -218,17 +230,13 @@ func DisputeGame(ctx context.Context, cfg DisputeGameConfig) error { }, ) if err != nil { - return fmt.Errorf("error deploying dispute game: %w", err) + return dgo, fmt.Errorf("error deploying dispute game: %w", err) } if _, err := bcaster.Broadcast(ctx); err != nil { - return fmt.Errorf("failed to broadcast: %w", err) + return dgo, fmt.Errorf("failed to broadcast: %w", err) } lgr.Info("deployed dispute game") - - if err := jsonutil.WriteJSON(dgo, ioutil.ToStdOut()); err != nil { - return fmt.Errorf("failed to write output: %w", err) - } - return nil + return dgo, nil } diff --git a/op-deployer/pkg/deployer/bootstrap/flags.go b/op-deployer/pkg/deployer/bootstrap/flags.go index 3991829bd46..c7c720d310d 100644 --- a/op-deployer/pkg/deployer/bootstrap/flags.go +++ b/op-deployer/pkg/deployer/bootstrap/flags.go @@ -11,6 +11,7 @@ import ( ) const ( + OutfileFlagName = "outfile" ArtifactsLocatorFlagName = "artifacts-locator" WithdrawalDelaySecondsFlagName = "withdrawal-delay-seconds" MinProposalSizeBytesFlagName = "min-proposal-size-bytes" @@ -44,6 +45,12 @@ const ( ) var ( + OutfileFlag = &cli.StringFlag{ + Name: OutfileFlagName, + Usage: "Output file. Use - for stdout.", + EnvVars: deployer.PrefixEnvVar("OUTFILE"), + Value: "-", + } ArtifactsLocatorFlag = &cli.StringFlag{ Name: ArtifactsLocatorFlagName, Usage: "Locator for artifacts.", @@ -221,6 +228,7 @@ var OPCMFlags = []cli.Flag{ deployer.L1RPCURLFlag, deployer.PrivateKeyFlag, ReleaseFlag, + OutfileFlag, } var ImplementationsFlags = []cli.Flag{ @@ -235,6 +243,7 @@ var ImplementationsFlags = []cli.Flag{ var DelayedWETHFlags = []cli.Flag{ deployer.L1RPCURLFlag, deployer.PrivateKeyFlag, + OutfileFlag, ArtifactsLocatorFlag, DelayedWethImplFlag, } @@ -242,6 +251,7 @@ var DelayedWETHFlags = []cli.Flag{ var DisputeGameFlags = []cli.Flag{ deployer.L1RPCURLFlag, deployer.PrivateKeyFlag, + OutfileFlag, ArtifactsLocatorFlag, MinProposalSizeBytesFlag, ChallengePeriodSecondsFlag, @@ -263,6 +273,7 @@ var DisputeGameFlags = []cli.Flag{ var BaseFPVMFlags = []cli.Flag{ deployer.L1RPCURLFlag, deployer.PrivateKeyFlag, + OutfileFlag, ArtifactsLocatorFlag, PreimageOracleFlag, } @@ -274,6 +285,7 @@ var AsteriscFlags = BaseFPVMFlags var ProxyFlags = []cli.Flag{ deployer.L1RPCURLFlag, deployer.PrivateKeyFlag, + OutfileFlag, ArtifactsLocatorFlag, ProxyOwnerFlag, } @@ -281,6 +293,7 @@ var ProxyFlags = []cli.Flag{ var SuperchainFlags = []cli.Flag{ deployer.L1RPCURLFlag, deployer.PrivateKeyFlag, + OutfileFlag, ArtifactsLocatorFlag, SuperchainProxyAdminOwnerFlag, ProtocolVersionsOwnerFlag, diff --git a/op-deployer/pkg/deployer/bootstrap/mips.go b/op-deployer/pkg/deployer/bootstrap/mips.go index 9c8fd555fb4..d5bd95752f9 100644 --- a/op-deployer/pkg/deployer/bootstrap/mips.go +++ b/op-deployer/pkg/deployer/bootstrap/mips.go @@ -84,6 +84,7 @@ func MIPSCLI(cliCtx *cli.Context) error { l1RPCUrl := cliCtx.String(deployer.L1RPCURLFlagName) privateKey := cliCtx.String(deployer.PrivateKeyFlagName) + outfile := cliCtx.String(OutfileFlagName) artifactsURLStr := cliCtx.String(ArtifactsLocatorFlagName) artifactsLocator := new(artifacts2.Locator) if err := artifactsLocator.UnmarshalText([]byte(artifactsURLStr)); err != nil { @@ -95,7 +96,7 @@ func MIPSCLI(cliCtx *cli.Context) error { ctx := ctxinterrupt.WithCancelOnInterrupt(cliCtx.Context) - return MIPS(ctx, MIPSConfig{ + dmo, err := MIPS(ctx, MIPSConfig{ L1RPCUrl: l1RPCUrl, PrivateKey: privateKey, Logger: l, @@ -103,11 +104,20 @@ func MIPSCLI(cliCtx *cli.Context) error { MipsVersion: mipsVersion, PreimageOracle: preimageOracle, }) + if err != nil { + return fmt.Errorf("failed to deploy MIPS: %w", err) + } + + if err := jsonutil.WriteJSON(dmo, ioutil.ToStdOutOrFileOrNoop(outfile, 0o755)); err != nil { + return fmt.Errorf("failed to write output: %w", err) + } + return nil } -func MIPS(ctx context.Context, cfg MIPSConfig) error { +func MIPS(ctx context.Context, cfg MIPSConfig) (opcm.DeployMIPSOutput, error) { + var dmo opcm.DeployMIPSOutput if err := cfg.Check(); err != nil { - return fmt.Errorf("invalid config for MIPS: %w", err) + return dmo, fmt.Errorf("invalid config for MIPS: %w", err) } lgr := cfg.Logger @@ -117,7 +127,7 @@ func MIPS(ctx context.Context, cfg MIPSConfig) error { artifactsFS, cleanup, err := artifacts2.Download(ctx, cfg.ArtifactsLocator, progressor) if err != nil { - return fmt.Errorf("failed to download artifacts: %w", err) + return dmo, fmt.Errorf("failed to download artifacts: %w", err) } defer func() { if err := cleanup(); err != nil { @@ -127,14 +137,14 @@ func MIPS(ctx context.Context, cfg MIPSConfig) error { l1RPC, err := rpc.Dial(cfg.L1RPCUrl) if err != nil { - return fmt.Errorf("failed to connect to L1 RPC: %w", err) + return dmo, fmt.Errorf("failed to connect to L1 RPC: %w", err) } l1Client := ethclient.NewClient(l1RPC) chainID, err := l1Client.ChainID(ctx) if err != nil { - return fmt.Errorf("failed to get chain ID: %w", err) + return dmo, fmt.Errorf("failed to get chain ID: %w", err) } signer := opcrypto.SignerFnFromBind(opcrypto.PrivateKeySignerFn(cfg.privateKeyECDSA, chainID)) @@ -148,7 +158,7 @@ func MIPS(ctx context.Context, cfg MIPSConfig) error { From: chainDeployer, }) if err != nil { - return fmt.Errorf("failed to create broadcaster: %w", err) + return dmo, fmt.Errorf("failed to create broadcaster: %w", err) } host, err := env.DefaultForkedScriptHost( @@ -160,7 +170,7 @@ func MIPS(ctx context.Context, cfg MIPSConfig) error { l1RPC, ) if err != nil { - return fmt.Errorf("failed to create script host: %w", err) + return dmo, fmt.Errorf("failed to create script host: %w", err) } var release string @@ -172,7 +182,7 @@ func MIPS(ctx context.Context, cfg MIPSConfig) error { lgr.Info("deploying dispute game", "release", release) - dgo, err := opcm.DeployMIPS( + dmo, err = opcm.DeployMIPS( host, opcm.DeployMIPSInput{ MipsVersion: cfg.MipsVersion, @@ -180,17 +190,14 @@ func MIPS(ctx context.Context, cfg MIPSConfig) error { }, ) if err != nil { - return fmt.Errorf("error deploying dispute game: %w", err) + return dmo, fmt.Errorf("error deploying dispute game: %w", err) } if _, err := bcaster.Broadcast(ctx); err != nil { - return fmt.Errorf("failed to broadcast: %w", err) + return dmo, fmt.Errorf("failed to broadcast: %w", err) } lgr.Info("deployed dispute game") - if err := jsonutil.WriteJSON(dgo, ioutil.ToStdOut()); err != nil { - return fmt.Errorf("failed to write output: %w", err) - } - return nil + return dmo, nil } diff --git a/op-deployer/pkg/deployer/bootstrap/opcm.go b/op-deployer/pkg/deployer/bootstrap/opcm.go index 5d42d1bfa95..0aaadd5c1d1 100644 --- a/op-deployer/pkg/deployer/bootstrap/opcm.go +++ b/op-deployer/pkg/deployer/bootstrap/opcm.go @@ -73,6 +73,7 @@ func OPCMCLI(cliCtx *cli.Context) error { l1RPCUrl := cliCtx.String(deployer.L1RPCURLFlagName) privateKey := cliCtx.String(deployer.PrivateKeyFlagName) + outfile := cliCtx.String(OutfileFlagName) release := cliCtx.String(ReleaseFlagName) ctx := ctxinterrupt.WithCancelOnInterrupt(cliCtx.Context) @@ -87,7 +88,7 @@ func OPCMCLI(cliCtx *cli.Context) error { return fmt.Errorf("failed to deploy OPCM: %w", err) } - if err := jsonutil.WriteJSON(out, ioutil.ToStdOut()); err != nil { + if err := jsonutil.WriteJSON(out, ioutil.ToStdOutOrFileOrNoop(outfile, 0o755)); err != nil { return fmt.Errorf("failed to write output: %w", err) } return nil diff --git a/op-deployer/pkg/deployer/bootstrap/proxy.go b/op-deployer/pkg/deployer/bootstrap/proxy.go index c96e497c8c7..ee8399b4bb5 100644 --- a/op-deployer/pkg/deployer/bootstrap/proxy.go +++ b/op-deployer/pkg/deployer/bootstrap/proxy.go @@ -75,6 +75,7 @@ func ProxyCLI(cliCtx *cli.Context) error { l1RPCUrl := cliCtx.String(deployer.L1RPCURLFlagName) privateKey := cliCtx.String(deployer.PrivateKeyFlagName) + outfile := cliCtx.String(OutfileFlagName) artifactsURLStr := cliCtx.String(ArtifactsLocatorFlagName) artifactsLocator := new(artifacts.Locator) if err := artifactsLocator.UnmarshalText([]byte(artifactsURLStr)); err != nil { @@ -85,18 +86,27 @@ func ProxyCLI(cliCtx *cli.Context) error { ctx := ctxinterrupt.WithCancelOnInterrupt(cliCtx.Context) - return Proxy(ctx, ProxyConfig{ + dpo, err := Proxy(ctx, ProxyConfig{ L1RPCUrl: l1RPCUrl, PrivateKey: privateKey, Logger: l, ArtifactsLocator: artifactsLocator, Owner: owner, }) + if err != nil { + return fmt.Errorf("failed to deploy Proxy: %w", err) + } + + if err := jsonutil.WriteJSON(dpo, ioutil.ToStdOutOrFileOrNoop(outfile, 0o755)); err != nil { + return fmt.Errorf("failed to write output: %w", err) + } + return nil } -func Proxy(ctx context.Context, cfg ProxyConfig) error { +func Proxy(ctx context.Context, cfg ProxyConfig) (opcm.DeployProxyOutput, error) { + var dpo opcm.DeployProxyOutput if err := cfg.Check(); err != nil { - return fmt.Errorf("invalid config for Proxy: %w", err) + return dpo, fmt.Errorf("invalid config for Proxy: %w", err) } lgr := cfg.Logger @@ -106,7 +116,7 @@ func Proxy(ctx context.Context, cfg ProxyConfig) error { artifactsFS, cleanup, err := artifacts.Download(ctx, cfg.ArtifactsLocator, progressor) if err != nil { - return fmt.Errorf("failed to download artifacts: %w", err) + return dpo, fmt.Errorf("failed to download artifacts: %w", err) } defer func() { if err := cleanup(); err != nil { @@ -116,12 +126,12 @@ func Proxy(ctx context.Context, cfg ProxyConfig) error { l1Client, err := ethclient.Dial(cfg.L1RPCUrl) if err != nil { - return fmt.Errorf("failed to connect to L1 RPC: %w", err) + return dpo, fmt.Errorf("failed to connect to L1 RPC: %w", err) } chainID, err := l1Client.ChainID(ctx) if err != nil { - return fmt.Errorf("failed to get chain ID: %w", err) + return dpo, fmt.Errorf("failed to get chain ID: %w", err) } signer := opcrypto.SignerFnFromBind(opcrypto.PrivateKeySignerFn(cfg.privateKeyECDSA, chainID)) @@ -135,12 +145,12 @@ func Proxy(ctx context.Context, cfg ProxyConfig) error { From: chainDeployer, }) if err != nil { - return fmt.Errorf("failed to create broadcaster: %w", err) + return dpo, fmt.Errorf("failed to create broadcaster: %w", err) } l1RPC, err := rpc.Dial(cfg.L1RPCUrl) if err != nil { - return fmt.Errorf("failed to connect to L1 RPC: %w", err) + return dpo, fmt.Errorf("failed to connect to L1 RPC: %w", err) } l1Host, err := env.DefaultForkedScriptHost( @@ -152,27 +162,23 @@ func Proxy(ctx context.Context, cfg ProxyConfig) error { l1RPC, ) if err != nil { - return fmt.Errorf("failed to create script host: %w", err) + return dpo, fmt.Errorf("failed to create script host: %w", err) } - dgo, err := opcm.DeployProxy( + dpo, err = opcm.DeployProxy( l1Host, opcm.DeployProxyInput{ Owner: cfg.Owner, }, ) if err != nil { - return fmt.Errorf("error deploying proxy: %w", err) + return dpo, fmt.Errorf("error deploying proxy: %w", err) } if _, err := bcaster.Broadcast(ctx); err != nil { - return fmt.Errorf("failed to broadcast: %w", err) + return dpo, fmt.Errorf("failed to broadcast: %w", err) } lgr.Info("deployed new ERC-1967 proxy") - - if err := jsonutil.WriteJSON(dgo, ioutil.ToStdOut()); err != nil { - return fmt.Errorf("failed to write output: %w", err) - } - return nil + return dpo, nil } diff --git a/op-deployer/pkg/deployer/bootstrap/superchain.go b/op-deployer/pkg/deployer/bootstrap/superchain.go index 837f406c091..d9c5ed3d523 100644 --- a/op-deployer/pkg/deployer/bootstrap/superchain.go +++ b/op-deployer/pkg/deployer/bootstrap/superchain.go @@ -98,6 +98,7 @@ func SuperchainCLI(cliCtx *cli.Context) error { paused := cliCtx.Bool(PausedFlagName) requiredVersionStr := cliCtx.String(RequiredProtocolVersionFlagName) recommendedVersionStr := cliCtx.String(RecommendedProtocolVersionFlagName) + outfile := cliCtx.String(OutfileFlagName) cfg := SuperchainConfig{ L1RPCUrl: l1RPCUrl, @@ -124,7 +125,7 @@ func SuperchainCLI(cliCtx *cli.Context) error { return fmt.Errorf("failed to deploy superchain: %w", err) } - if err := jsonutil.WriteJSON(dso, ioutil.ToStdOut()); err != nil { + if err := jsonutil.WriteJSON(dso, ioutil.ToStdOutOrFileOrNoop(outfile, 0o755)); err != nil { return fmt.Errorf("failed to write output: %w", err) } return nil From 830fa2a7b327eb9738ebe51869d66ae5db2acfed Mon Sep 17 00:00:00 2001 From: Matthew Slipper Date: Fri, 6 Dec 2024 15:53:34 -0700 Subject: [PATCH 104/111] Revert "maint: bump forge version (#13301)" (#13304) This reverts commit a2e7d852a8a16e526564401c41abad56173790d3. --- mise.toml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mise.toml b/mise.toml index 0dba715900c..5fb473de889 100644 --- a/mise.toml +++ b/mise.toml @@ -31,9 +31,9 @@ just = "1.37.0" # Foundry dependencies # Foundry is a special case because it supplies multiple binaries at the same # GitHub release, so we need to use the aliasing trick to get mise to not error -forge = "nightly-e52076714ace23c7a68e14f0048a40be3c6c8f0b" -cast = "nightly-e52076714ace23c7a68e14f0048a40be3c6c8f0b" -anvil = "nightly-e52076714ace23c7a68e14f0048a40be3c6c8f0b" +forge = "nightly-e5dbb7a320c2b871c4a4a1006ad3c15a08fcf17b" +cast = "nightly-e5dbb7a320c2b871c4a4a1006ad3c15a08fcf17b" +anvil = "nightly-e5dbb7a320c2b871c4a4a1006ad3c15a08fcf17b" # Fake dependencies # Put things here if you need to track versions of tools or projects that can't From ad9e554945a76e379ee78580348ee49325a7374d Mon Sep 17 00:00:00 2001 From: Inphi Date: Fri, 6 Dec 2024 15:01:47 -0800 Subject: [PATCH 105/111] cannon: Forward evm options in testutil.ValidateEVM (#13221) --- cannon/mipsevm/testutil/mips.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cannon/mipsevm/testutil/mips.go b/cannon/mipsevm/testutil/mips.go index 0d5322c6fb6..43de91f0962 100644 --- a/cannon/mipsevm/testutil/mips.go +++ b/cannon/mipsevm/testutil/mips.go @@ -223,7 +223,7 @@ func (v *EvmValidator) ValidateEVM(t *testing.T, stepWitness *mipsevm.StepWitnes // ValidateEVM runs a single evm step and validates against an FPVM poststate func ValidateEVM(t *testing.T, stepWitness *mipsevm.StepWitness, step uint64, goVm mipsevm.FPVM, hashFn mipsevm.HashFn, contracts *ContractMetadata, opts ...evmOption) { - validator := NewEvmValidator(t, hashFn, contracts) + validator := NewEvmValidator(t, hashFn, contracts, opts...) validator.ValidateEVM(t, stepWitness, step, goVm) } From 4c88138e9f0d2922337d825fb3a34f435eee67c0 Mon Sep 17 00:00:00 2001 From: I love OP Date: Fri, 6 Dec 2024 20:39:42 -0300 Subject: [PATCH 106/111] Update op-conductor rpc api path (#12760) Co-authored-by: Matthew Slipper --- op-conductor/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/op-conductor/README.md b/op-conductor/README.md index c436d0248ac..aacc0ab65cb 100644 --- a/op-conductor/README.md +++ b/op-conductor/README.md @@ -48,7 +48,7 @@ This way you could understand how we handle the state transitions. ### RPC design -conductor provides rpc APIs for ease of cluster management, see [api](./op-conductor/rpc/api.go) for rpc definitions and how they can be used. +conductor provides rpc APIs for ease of cluster management, see [api](./rpc/api.go) for rpc definitions and how they can be used. ### Failure Scenario walkthrough From 2824b3b2c001f07c582c37213e6f0152b12ac3e2 Mon Sep 17 00:00:00 2001 From: Francis Li Date: Sun, 8 Dec 2024 12:19:25 -0800 Subject: [PATCH 107/111] [conductor] Fix test race condition (#13314) * Fix conductor test race condition * update * Address shell ci check * rerun CI for non-related issue --- op-conductor/conductor/service.go | 4 +--- op-conductor/conductor/service_test.go | 6 +++--- .../{run_test_100times.sh => run_test_1000times.sh} | 5 +++-- 3 files changed, 7 insertions(+), 8 deletions(-) rename op-conductor/{run_test_100times.sh => run_test_1000times.sh} (54%) diff --git a/op-conductor/conductor/service.go b/op-conductor/conductor/service.go index 89948b614f0..cccba2c76ac 100644 --- a/op-conductor/conductor/service.go +++ b/op-conductor/conductor/service.go @@ -381,9 +381,7 @@ func (oc *OpConductor) Start(ctx context.Context) error { oc.log.Info("OpConductor started") // queue an action in case sequencer is not in the desired state. oc.prevState = NewState(oc.leader.Load(), oc.healthy.Load(), oc.seqActive.Load()) - // Immediately queue an action. This is made blocking to ensure that start is not - // considered complete until the first action is executed. - oc.actionCh <- struct{}{} + oc.queueAction() return nil } diff --git a/op-conductor/conductor/service_test.go b/op-conductor/conductor/service_test.go index 196c7e3456c..ae7332af2b9 100644 --- a/op-conductor/conductor/service_test.go +++ b/op-conductor/conductor/service_test.go @@ -106,7 +106,7 @@ func (s *OpConductorTestSuite) SetupSuite() { s.metrics = &metrics.NoopMetricsImpl{} s.cfg = mockConfig(s.T()) s.version = "v0.0.1" - s.next = make(chan struct{}, 1) + s.next = make(chan struct{}) } func (s *OpConductorTestSuite) SetupTest() { @@ -129,7 +129,8 @@ func (s *OpConductorTestSuite) SetupTest() { s.conductor.leaderUpdateCh = s.leaderUpdateCh s.err = errors.New("error") - s.syncEnabled = false // default to no sync, turn it on by calling s.enableSynchronization() + s.syncEnabled = false // default to no sync, turn it on by calling s.enableSynchronization() + s.wg = sync.WaitGroup{} // create new wg for every test in case last test didn't finish the action loop during shutdown. } func (s *OpConductorTestSuite) TearDownTest() { @@ -876,6 +877,5 @@ func (s *OpConductorTestSuite) TestHandleInitError() { } func TestControlLoop(t *testing.T) { - t.Skipf("Skipping test, it's flaky and needs to be fixed") suite.Run(t, new(OpConductorTestSuite)) } diff --git a/op-conductor/run_test_100times.sh b/op-conductor/run_test_1000times.sh similarity index 54% rename from op-conductor/run_test_100times.sh rename to op-conductor/run_test_1000times.sh index 5d7500c44e2..f7fde44a00a 100755 --- a/op-conductor/run_test_100times.sh +++ b/op-conductor/run_test_1000times.sh @@ -2,10 +2,11 @@ set -e -for i in {1..100}; do +for i in {1..1000}; do echo "=======================" echo "Running iteration $i" - if ! gotestsum -- -run 'TestControlLoop' ./... --count=1 --timeout=5s -race; then + + if ! go test -v ./conductor/... -race -count=1; then echo "Test failed" exit 1 fi From 53034d287fb0026a3efe9bb3cbf094b8c8153f99 Mon Sep 17 00:00:00 2001 From: George Knee Date: Mon, 9 Dec 2024 14:40:57 +0000 Subject: [PATCH 108/111] op-batcher: syncActions is aware of safe=unsafe edge case (#13292) * add additional test case for computeSyncActions * fixup test * check for unsafe = safe edge case & replace oldestUnsafeBlock with nextSafeBlock * add test case for no progress and safe=unsafe * refine log * rename variable * harmonize log ordering and labels * tighten up test behaviour for expectedLogs * add test case: no progress + unsafe=safe + blocks in state and fix behaviour so we don't try to fetch unsafe blocks if there aren't any, even when there are blocks in state * typo --- op-batcher/batcher/sync_actions.go | 49 ++++++++++-------- op-batcher/batcher/sync_actions_test.go | 67 ++++++++++++++++++++++--- 2 files changed, 89 insertions(+), 27 deletions(-) diff --git a/op-batcher/batcher/sync_actions.go b/op-batcher/batcher/sync_actions.go index 6031d2ce258..76cdf846ad9 100644 --- a/op-batcher/batcher/sync_actions.go +++ b/op-batcher/batcher/sync_actions.go @@ -48,14 +48,17 @@ func computeSyncActions[T channelStatuser](newSyncStatus eth.SyncStatus, prevCur return syncActions{}, true } + var allUnsafeBlocks *inclusiveBlockRange + if newSyncStatus.UnsafeL2.Number > newSyncStatus.SafeL2.Number { + allUnsafeBlocks = &inclusiveBlockRange{newSyncStatus.SafeL2.Number + 1, newSyncStatus.UnsafeL2.Number} + } + // PART 2: checks involving only the oldest block in the state oldestBlockInState, hasBlocks := blocks.Peek() - oldestUnsafeBlockNum := newSyncStatus.SafeL2.Number + 1 - youngestUnsafeBlockNum := newSyncStatus.UnsafeL2.Number if !hasBlocks { s := syncActions{ - blocksToLoad: &inclusiveBlockRange{oldestUnsafeBlockNum, youngestUnsafeBlockNum}, + blocksToLoad: allUnsafeBlocks, } l.Info("no blocks in state", "syncActions", s) return s, false @@ -63,17 +66,21 @@ func computeSyncActions[T channelStatuser](newSyncStatus eth.SyncStatus, prevCur // These actions apply in multiple unhappy scenarios below, where // we detect that the existing state is invalidated - // and we need to start over from the sequencer's oldest - // unsafe (and not safe) block. + // and we need to start over, loading all unsafe blocks + // from the sequencer. startAfresh := syncActions{ clearState: &newSyncStatus.SafeL2.L1Origin, - blocksToLoad: &inclusiveBlockRange{oldestUnsafeBlockNum, youngestUnsafeBlockNum}, + blocksToLoad: allUnsafeBlocks, } oldestBlockInStateNum := oldestBlockInState.NumberU64() + nextSafeBlockNum := newSyncStatus.SafeL2.Number + 1 - if oldestUnsafeBlockNum < oldestBlockInStateNum { - l.Warn("oldest unsafe block is below oldest block in state", "syncActions", startAfresh, "oldestBlockInState", oldestBlockInState, "newSafeBlock", newSyncStatus.SafeL2) + if nextSafeBlockNum < oldestBlockInStateNum { + l.Warn("next safe block is below oldest block in state", + "syncActions", startAfresh, + "oldestBlockInState", oldestBlockInState, + "safeL2", newSyncStatus.SafeL2) return startAfresh, false } @@ -81,25 +88,25 @@ func computeSyncActions[T channelStatuser](newSyncStatus eth.SyncStatus, prevCur newestBlockInState := blocks[blocks.Len()-1] newestBlockInStateNum := newestBlockInState.NumberU64() - numBlocksToDequeue := oldestUnsafeBlockNum - oldestBlockInStateNum + numBlocksToDequeue := nextSafeBlockNum - oldestBlockInStateNum if numBlocksToDequeue > uint64(blocks.Len()) { // This could happen if the batcher restarted. // The sequencer may have derived the safe chain // from channels sent by a previous batcher instance. - l.Warn("oldest unsafe block above newest block in state, clearing channel manager state", - "oldestUnsafeBlockNum", oldestUnsafeBlockNum, + l.Warn("safe head above newest block in state, clearing channel manager state", + "syncActions", startAfresh, + "safeL2", newSyncStatus.SafeL2, "newestBlockInState", eth.ToBlockID(newestBlockInState), - "syncActions", - startAfresh) + ) return startAfresh, false } if numBlocksToDequeue > 0 && blocks[numBlocksToDequeue-1].Hash() != newSyncStatus.SafeL2.Hash { l.Warn("safe chain reorg, clearing channel manager state", + "syncActions", startAfresh, "existingBlock", eth.ToBlockID(blocks[numBlocksToDequeue-1]), - "newSafeBlock", newSyncStatus.SafeL2, - "syncActions", startAfresh) + "safeL2", newSyncStatus.SafeL2) return startAfresh, false } @@ -114,9 +121,9 @@ func computeSyncActions[T channelStatuser](newSyncStatus eth.SyncStatus, prevCur // that the derivation pipeline may have stalled // e.g. because of Holocene strict ordering rules. l.Warn("sequencer did not make expected progress", + "syncActions", startAfresh, "existingBlock", ch.LatestL2(), - "newSafeBlock", newSyncStatus.SafeL2, - "syncActions", startAfresh) + "safeL2", newSyncStatus.SafeL2) return startAfresh, false } } @@ -132,12 +139,14 @@ func computeSyncActions[T channelStatuser](newSyncStatus eth.SyncStatus, prevCur numChannelsToPrune++ } - start := newestBlockInStateNum + 1 - end := youngestUnsafeBlockNum + var allUnsafeBlocksAboveState *inclusiveBlockRange + if newSyncStatus.UnsafeL2.Number > newestBlockInStateNum { + allUnsafeBlocksAboveState = &inclusiveBlockRange{newestBlockInStateNum + 1, newSyncStatus.UnsafeL2.Number} + } return syncActions{ blocksToPrune: int(numBlocksToDequeue), channelsToPrune: numChannelsToPrune, - blocksToLoad: &inclusiveBlockRange{start, end}, + blocksToLoad: allUnsafeBlocksAboveState, }, false } diff --git a/op-batcher/batcher/sync_actions_test.go b/op-batcher/batcher/sync_actions_test.go index f48ed9dabfb..7c4e9b8b96c 100644 --- a/op-batcher/batcher/sync_actions_test.go +++ b/op-batcher/batcher/sync_actions_test.go @@ -55,6 +55,8 @@ func TestBatchSubmitter_computeSyncActions(t *testing.T) { timedOut: false, } + happyCaseLogs := []string{} // in the happy case we expect no logs + type TestCase struct { name string // inputs @@ -105,7 +107,7 @@ func TestBatchSubmitter_computeSyncActions(t *testing.T) { clearState: ð.BlockID{Number: 1}, blocksToLoad: &inclusiveBlockRange{101, 109}, }, - expectedLogs: []string{"oldest unsafe block is below oldest block in state"}, + expectedLogs: []string{"next safe block is below oldest block in state"}, }, {name: "unexpectedly good progress", // This can happen if another batcher instance got some blocks @@ -123,7 +125,7 @@ func TestBatchSubmitter_computeSyncActions(t *testing.T) { clearState: ð.BlockID{Number: 1}, blocksToLoad: &inclusiveBlockRange{105, 109}, }, - expectedLogs: []string{"oldest unsafe block above newest block in state"}, + expectedLogs: []string{"safe head above newest block in state"}, }, {name: "safe chain reorg", // This can happen if there is an L1 reorg, the safe chain is at an acceptable @@ -161,6 +163,23 @@ func TestBatchSubmitter_computeSyncActions(t *testing.T) { }, expectedLogs: []string{"sequencer did not make expected progress"}, }, + {name: "failed to make expected progress (unsafe=safe)", + // Edge case where unsafe = safe + newSyncStatus: eth.SyncStatus{ + HeadL1: eth.BlockRef{Number: 3}, + CurrentL1: eth.BlockRef{Number: 2}, + SafeL2: eth.L2BlockRef{Number: 101, Hash: block101.Hash(), L1Origin: eth.BlockID{Number: 1}}, + UnsafeL2: eth.L2BlockRef{Number: 101}, + }, + prevCurrentL1: eth.BlockRef{Number: 1}, + blocks: queue.Queue[*types.Block]{block102, block103}, + channels: []channelStatuser{channel103}, + expected: syncActions{ + clearState: ð.BlockID{Number: 1}, + // no blocks to load since there are no unsafe blocks + }, + expectedLogs: []string{"sequencer did not make expected progress"}, + }, {name: "no progress", // This can happen if we have a long channel duration // and we didn't submit or have any txs confirmed since @@ -192,6 +211,7 @@ func TestBatchSubmitter_computeSyncActions(t *testing.T) { expected: syncActions{ blocksToLoad: &inclusiveBlockRange{104, 109}, }, + expectedLogs: []string{"no blocks in state"}, }, {name: "happy path", // This happens when the safe chain is being progressed as expected: @@ -225,10 +245,39 @@ func TestBatchSubmitter_computeSyncActions(t *testing.T) { channelsToPrune: 1, blocksToLoad: &inclusiveBlockRange{105, 109}, }, + expectedLogs: happyCaseLogs, + }, + {name: "no progress + unsafe=safe", + newSyncStatus: eth.SyncStatus{ + HeadL1: eth.BlockRef{Number: 5}, + CurrentL1: eth.BlockRef{Number: 2}, + SafeL2: eth.L2BlockRef{Number: 100}, + UnsafeL2: eth.L2BlockRef{Number: 100}, + }, + prevCurrentL1: eth.BlockRef{Number: 1}, + blocks: queue.Queue[*types.Block]{}, + channels: []channelStatuser{}, + expected: syncActions{}, + expectedLogs: []string{"no blocks in state"}, + }, + {name: "no progress + unsafe=safe + blocks in state", + newSyncStatus: eth.SyncStatus{ + HeadL1: eth.BlockRef{Number: 5}, + CurrentL1: eth.BlockRef{Number: 2}, + SafeL2: eth.L2BlockRef{Number: 101, Hash: block101.Hash()}, + UnsafeL2: eth.L2BlockRef{Number: 101}, + }, + prevCurrentL1: eth.BlockRef{Number: 1}, + blocks: queue.Queue[*types.Block]{block101}, + channels: []channelStatuser{}, + expected: syncActions{ + blocksToPrune: 1, + }, + expectedLogs: happyCaseLogs, }, } - for _, tc := range testCases { + for _, tc := range testCases[len(testCases)-1:] { t.Run(tc.name, func(t *testing.T) { l, h := testlog.CaptureLogger(t, log.LevelDebug) @@ -237,11 +286,15 @@ func TestBatchSubmitter_computeSyncActions(t *testing.T) { tc.newSyncStatus, tc.prevCurrentL1, tc.blocks, tc.channels, l, ) - require.Equal(t, tc.expected, result) + require.Equal(t, tc.expected, result, "unexpected actions") require.Equal(t, tc.expectedSeqOutOfSync, outOfSync) - for _, e := range tc.expectedLogs { - r := h.FindLog(testlog.NewMessageContainsFilter(e)) - require.NotNil(t, r, "could not find log message containing '%s'", e) + if tc.expectedLogs == nil { + require.Empty(t, h.Logs, "expected no logs but found some", "logs", h.Logs) + } else { + for _, e := range tc.expectedLogs { + r := h.FindLog(testlog.NewMessageContainsFilter(e)) + require.NotNil(t, r, "could not find log message containing '%s'", e) + } } }) } From b6131611bc29bfdb7a3984938e14e7ce8a1b80ef Mon Sep 17 00:00:00 2001 From: Axel Kingsley Date: Mon, 9 Dec 2024 09:20:38 -0600 Subject: [PATCH 109/111] supervisor: L1 Processor (#13206) * supervisor: L1 Processor * comments ; test fixes * Make L1 source separate from RPC Addr * fix test * Add atomic bool for singleton processor routine --- op-e2e/interop/supersystem.go | 6 +- op-supervisor/cmd/main_test.go | 4 +- op-supervisor/config/config.go | 5 +- op-supervisor/config/config_test.go | 2 +- op-supervisor/flags/flags.go | 7 + op-supervisor/supervisor/backend/backend.go | 54 ++++++++ .../supervisor/backend/backend_test.go | 2 + op-supervisor/supervisor/backend/db/query.go | 26 ++++ op-supervisor/supervisor/backend/db/update.go | 38 ++++++ .../backend/processors/l1_processor.go | 127 ++++++++++++++++++ .../backend/processors/l1_processor_test.go | 107 +++++++++++++++ 11 files changed, 373 insertions(+), 5 deletions(-) create mode 100644 op-supervisor/supervisor/backend/processors/l1_processor.go create mode 100644 op-supervisor/supervisor/backend/processors/l1_processor_test.go diff --git a/op-e2e/interop/supersystem.go b/op-e2e/interop/supersystem.go index 8fc663f377a..158f5d61ec7 100644 --- a/op-e2e/interop/supersystem.go +++ b/op-e2e/interop/supersystem.go @@ -480,6 +480,7 @@ func (s *interopE2ESystem) prepareSupervisor() *supervisor.SupervisorService { EnableAdmin: true, }, L2RPCs: []string{}, + L1RPC: s.l1.UserRPC().RPC(), Datadir: path.Join(s.t.TempDir(), "supervisor"), } depSet := make(map[supervisortypes.ChainID]*depset.StaticConfigDependency) @@ -536,10 +537,11 @@ func (s *interopE2ESystem) prepare(t *testing.T, w worldResourcePaths) { s.hdWallet = s.prepareHDWallet() s.worldDeployment, s.worldOutput = s.prepareWorld(w) - // the supervisor and client are created first so that the L2s can use the supervisor + // L1 first so that the Supervisor and L2s can connect to it + s.beacon, s.l1 = s.prepareL1() + s.supervisor = s.prepareSupervisor() - s.beacon, s.l1 = s.prepareL1() s.l2s = s.prepareL2s() s.prepareContracts() diff --git a/op-supervisor/cmd/main_test.go b/op-supervisor/cmd/main_test.go index c61b6f3cb1c..06fcf78440d 100644 --- a/op-supervisor/cmd/main_test.go +++ b/op-supervisor/cmd/main_test.go @@ -16,6 +16,7 @@ import ( ) var ( + ValidL1RPC = "http://localhost:8545" ValidL2RPCs = []string{"http;//localhost:8545"} ValidDatadir = "./supervisor_test_datadir" ) @@ -38,7 +39,7 @@ func TestLogLevel(t *testing.T) { func TestDefaultCLIOptionsMatchDefaultConfig(t *testing.T) { cfg := configForArgs(t, addRequiredArgs()) depSet := &depset.JsonDependencySetLoader{Path: "test"} - defaultCfgTempl := config.NewConfig(ValidL2RPCs, depSet, ValidDatadir) + defaultCfgTempl := config.NewConfig(ValidL1RPC, ValidL2RPCs, depSet, ValidDatadir) defaultCfg := *defaultCfgTempl defaultCfg.Version = Version require.Equal(t, defaultCfg, *cfg) @@ -125,6 +126,7 @@ func toArgList(req map[string]string) []string { func requiredArgs() map[string]string { args := map[string]string{ + "--l1-rpc": ValidL1RPC, "--l2-rpcs": ValidL2RPCs[0], "--dependency-set": "test", "--datadir": ValidDatadir, diff --git a/op-supervisor/config/config.go b/op-supervisor/config/config.go index b06d0592593..9d733207eaf 100644 --- a/op-supervisor/config/config.go +++ b/op-supervisor/config/config.go @@ -33,6 +33,8 @@ type Config struct { // requiring manual triggers for the backend to process anything. SynchronousProcessors bool + L1RPC string + L2RPCs []string Datadir string } @@ -56,7 +58,7 @@ func (c *Config) Check() error { // NewConfig creates a new config using default values whenever possible. // Required options with no suitable default are passed as parameters. -func NewConfig(l2RPCs []string, depSet depset.DependencySetSource, datadir string) *Config { +func NewConfig(l1RPC string, l2RPCs []string, depSet depset.DependencySetSource, datadir string) *Config { return &Config{ LogConfig: oplog.DefaultCLIConfig(), MetricsConfig: opmetrics.DefaultCLIConfig(), @@ -64,6 +66,7 @@ func NewConfig(l2RPCs []string, depSet depset.DependencySetSource, datadir strin RPC: oprpc.DefaultCLIConfig(), DependencySetSource: depSet, MockRun: false, + L1RPC: l1RPC, L2RPCs: l2RPCs, Datadir: datadir, } diff --git a/op-supervisor/config/config_test.go b/op-supervisor/config/config_test.go index 0d354ca134a..1e2f6f40c00 100644 --- a/op-supervisor/config/config_test.go +++ b/op-supervisor/config/config_test.go @@ -67,5 +67,5 @@ func validConfig() *Config { panic(err) } // Should be valid using only the required arguments passed in via the constructor. - return NewConfig([]string{"http://localhost:8545"}, depSet, "./supervisor_testdir") + return NewConfig("http://localhost:8545", []string{"http://localhost:8545"}, depSet, "./supervisor_testdir") } diff --git a/op-supervisor/flags/flags.go b/op-supervisor/flags/flags.go index 9b49823f304..d12695010a3 100644 --- a/op-supervisor/flags/flags.go +++ b/op-supervisor/flags/flags.go @@ -21,6 +21,11 @@ func prefixEnvVars(name string) []string { } var ( + L1RPCFlag = &cli.StringFlag{ + Name: "l1-rpc", + Usage: "L1 RPC source.", + EnvVars: prefixEnvVars("L1_RPC"), + } L2RPCsFlag = &cli.StringSliceFlag{ Name: "l2-rpcs", Usage: "L2 RPC sources.", @@ -46,6 +51,7 @@ var ( ) var requiredFlags = []cli.Flag{ + L1RPCFlag, L2RPCsFlag, DataDirFlag, DependencySetFlag, @@ -86,6 +92,7 @@ func ConfigFromCLI(ctx *cli.Context, version string) *config.Config { RPC: oprpc.ReadCLIConfig(ctx), DependencySetSource: &depset.JsonDependencySetLoader{Path: ctx.Path(DependencySetFlag.Name)}, MockRun: ctx.Bool(MockRunFlag.Name), + L1RPC: ctx.String(L1RPCFlag.Name), L2RPCs: ctx.StringSlice(L2RPCsFlag.Name), Datadir: ctx.Path(DataDirFlag.Name), } diff --git a/op-supervisor/supervisor/backend/backend.go b/op-supervisor/supervisor/backend/backend.go index 7d368cb40f1..722cabcf3eb 100644 --- a/op-supervisor/supervisor/backend/backend.go +++ b/op-supervisor/supervisor/backend/backend.go @@ -36,6 +36,9 @@ type SupervisorBackend struct { // chainDBs holds on to the DB indices for each chain chainDBs *db.ChainsDB + // l1Processor watches for new L1 blocks, updates the local-safe DB, and kicks off derivation orchestration + l1Processor *processors.L1Processor + // chainProcessors are notified of new unsafe blocks, and add the unsafe log events data into the events DB chainProcessors locks.RWMap[types.ChainID, *processors.ChainProcessor] @@ -125,6 +128,14 @@ func (su *SupervisorBackend) initResources(ctx context.Context, cfg *config.Conf su.chainProcessors.Set(chainID, chainProcessor) } + if cfg.L1RPC != "" { + if err := su.attachL1RPC(ctx, cfg.L1RPC); err != nil { + return fmt.Errorf("failed to create L1 processor: %w", err) + } + } else { + su.logger.Warn("No L1 RPC configured, L1 processor will not be started") + } + // the config has some RPC connections to attach to the chain-processors for _, rpc := range cfg.L2RPCs { err := su.attachRPC(ctx, rpc) @@ -230,6 +241,38 @@ func (su *SupervisorBackend) AttachProcessorSource(chainID types.ChainID, src pr return nil } +func (su *SupervisorBackend) attachL1RPC(ctx context.Context, l1RPCAddr string) error { + su.logger.Info("attaching L1 RPC to L1 processor", "rpc", l1RPCAddr) + + logger := su.logger.New("l1-rpc", l1RPCAddr) + l1RPC, err := client.NewRPC(ctx, logger, l1RPCAddr) + if err != nil { + return fmt.Errorf("failed to setup L1 RPC: %w", err) + } + l1Client, err := sources.NewL1Client( + l1RPC, + su.logger, + nil, + // placeholder config for the L1 + sources.L1ClientSimpleConfig(true, sources.RPCKindBasic, 100)) + if err != nil { + return fmt.Errorf("failed to setup L1 Client: %w", err) + } + su.AttachL1Source(l1Client) + return nil +} + +// attachL1Source attaches an L1 source to the L1 processor. +// If the L1 processor does not exist, it is created and started. +func (su *SupervisorBackend) AttachL1Source(source processors.L1Source) { + if su.l1Processor == nil { + su.l1Processor = processors.NewL1Processor(su.logger, su.chainDBs, source) + su.l1Processor.Start() + } else { + su.l1Processor.AttachClient(source) + } +} + func clientForL2(ctx context.Context, logger log.Logger, rpc string) (client.RPC, types.ChainID, error) { ethClient, err := dial.DialEthClientWithTimeout(ctx, 10*time.Second, logger, rpc) if err != nil { @@ -254,6 +297,11 @@ func (su *SupervisorBackend) Start(ctx context.Context) error { return fmt.Errorf("failed to resume chains db: %w", err) } + // start the L1 processor if it exists + if su.l1Processor != nil { + su.l1Processor.Start() + } + if !su.synchronousProcessors { // Make all the chain-processors run automatic background processing su.chainProcessors.Range(func(_ types.ChainID, processor *processors.ChainProcessor) bool { @@ -278,6 +326,12 @@ func (su *SupervisorBackend) Stop(ctx context.Context) error { return errAlreadyStopped } su.logger.Info("Closing supervisor backend") + + // stop the L1 processor + if su.l1Processor != nil { + su.l1Processor.Stop() + } + // close all processors su.chainProcessors.Range(func(id types.ChainID, processor *processors.ChainProcessor) bool { su.logger.Info("stopping chain processor", "chainID", id) diff --git a/op-supervisor/supervisor/backend/backend_test.go b/op-supervisor/supervisor/backend/backend_test.go index c104bf0bae5..a8fc4de438e 100644 --- a/op-supervisor/supervisor/backend/backend_test.go +++ b/op-supervisor/supervisor/backend/backend_test.go @@ -62,6 +62,7 @@ func TestBackendLifetime(t *testing.T) { require.NoError(t, err) t.Log("initialized!") + l1Src := &testutils.MockL1Source{} src := &testutils.MockL1Source{} blockX := eth.BlockRef{ @@ -77,6 +78,7 @@ func TestBackendLifetime(t *testing.T) { Time: blockX.Time + 2, } + b.AttachL1Source(l1Src) require.NoError(t, b.AttachProcessorSource(chainA, src)) require.FileExists(t, filepath.Join(cfg.Datadir, "900", "log.db"), "must have logs DB 900") diff --git a/op-supervisor/supervisor/backend/db/query.go b/op-supervisor/supervisor/backend/db/query.go index bbee01d71b4..5897ff3c924 100644 --- a/op-supervisor/supervisor/backend/db/query.go +++ b/op-supervisor/supervisor/backend/db/query.go @@ -30,6 +30,32 @@ func (db *ChainsDB) LatestBlockNum(chain types.ChainID) (num uint64, ok bool) { return logDB.LatestSealedBlockNum() } +// LastCommonL1 returns the latest common L1 block between all chains in the database. +// it only considers block numbers, not hash. That's because the L1 source is the same for all chains +// this data can be used to determine the starting point for L1 processing +func (db *ChainsDB) LastCommonL1() (types.BlockSeal, error) { + common := types.BlockSeal{} + for _, chain := range db.depSet.Chains() { + ldb, ok := db.localDBs.Get(chain) + if !ok { + return types.BlockSeal{}, types.ErrUnknownChain + } + _, derivedFrom, err := ldb.Latest() + if err != nil { + return types.BlockSeal{}, fmt.Errorf("failed to determine Last Common L1: %w", err) + } + common = derivedFrom + // if the common block isn't yet set, + // or if the new common block is older than the current common block + // set the common block + if common == (types.BlockSeal{}) || + derivedFrom.Number < common.Number { + common = derivedFrom + } + } + return common, nil +} + func (db *ChainsDB) IsCrossUnsafe(chainID types.ChainID, block eth.BlockID) error { v, ok := db.crossUnsafe.Get(chainID) if !ok { diff --git a/op-supervisor/supervisor/backend/db/update.go b/op-supervisor/supervisor/backend/db/update.go index 7ae7fde58a8..92c78b408a3 100644 --- a/op-supervisor/supervisor/backend/db/update.go +++ b/op-supervisor/supervisor/backend/db/update.go @@ -1,6 +1,7 @@ package db import ( + "errors" "fmt" "github.com/ethereum/go-ethereum/common" @@ -86,3 +87,40 @@ func (db *ChainsDB) UpdateFinalizedL1(finalized eth.BlockRef) error { db.logger.Info("Updated finalized L1", "finalizedL1", finalized) return nil } + +// RecordNewL1 records a new L1 block in the database. +// it uses the latest derived L2 block as the derived block for the new L1 block. +func (db *ChainsDB) RecordNewL1(ref eth.BlockRef) error { + for _, chain := range db.depSet.Chains() { + // get local derivation database + ldb, ok := db.localDBs.Get(chain) + if !ok { + return fmt.Errorf("cannot RecordNewL1 to chain %s: %w", chain, types.ErrUnknownChain) + } + // get the latest derived and derivedFrom blocks + derivedFrom, derived, err := ldb.Latest() + if err != nil { + return fmt.Errorf("failed to get latest derivedFrom for chain %s: %w", chain, err) + } + // make a ref from the latest derived block + derivedParent, err := ldb.PreviousDerived(derived.ID()) + if errors.Is(err, types.ErrFuture) { + db.logger.Warn("Empty DB, Recording first L1 block", "chain", chain, "err", err) + } else if err != nil { + db.logger.Warn("Failed to get latest derivedfrom to insert new L1 block", "chain", chain, "err", err) + return err + } + derivedRef := derived.MustWithParent(derivedParent.ID()) + // don't push the new L1 block if it's not newer than the latest derived block + if derivedFrom.Number >= ref.Number { + db.logger.Warn("L1 block has already been processed for this height", "chain", chain, "block", ref, "latest", derivedFrom) + continue + } + // the database is extended with the new L1 and the existing L2 + if err = db.UpdateLocalSafe(chain, ref, derivedRef); err != nil { + db.logger.Error("Failed to update local safe", "chain", chain, "block", ref, "derived", derived, "err", err) + return err + } + } + return nil +} diff --git a/op-supervisor/supervisor/backend/processors/l1_processor.go b/op-supervisor/supervisor/backend/processors/l1_processor.go new file mode 100644 index 00000000000..063acd056e0 --- /dev/null +++ b/op-supervisor/supervisor/backend/processors/l1_processor.go @@ -0,0 +1,127 @@ +package processors + +import ( + "context" + "sync" + "sync/atomic" + "time" + + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" + "github.com/ethereum/go-ethereum/log" +) + +type chainsDB interface { + RecordNewL1(ref eth.BlockRef) error + LastCommonL1() (types.BlockSeal, error) +} + +type L1Source interface { + L1BlockRefByNumber(ctx context.Context, number uint64) (eth.L1BlockRef, error) +} + +type L1Processor struct { + log log.Logger + client L1Source + clientMu sync.Mutex + running atomic.Bool + + currentNumber uint64 + tickDuration time.Duration + + db chainsDB + + ctx context.Context + cancel context.CancelFunc + wg sync.WaitGroup +} + +func NewL1Processor(log log.Logger, cdb chainsDB, client L1Source) *L1Processor { + ctx, cancel := context.WithCancel(context.Background()) + return &L1Processor{ + client: client, + db: cdb, + log: log.New("service", "l1-processor"), + tickDuration: 6 * time.Second, + ctx: ctx, + cancel: cancel, + } +} + +func (p *L1Processor) AttachClient(client L1Source) { + p.clientMu.Lock() + defer p.clientMu.Unlock() + p.client = client +} + +func (p *L1Processor) Start() { + // if already running, do nothing + if p.running.Load() { + return + } + p.running.Store(true) + p.currentNumber = 0 + // if there is an issue getting the last common L1, default to starting from 0 + // consider making this a fatal error in the future once initialization is more robust + if lastL1, err := p.db.LastCommonL1(); err == nil { + p.currentNumber = lastL1.Number + } + p.wg.Add(1) + go p.worker() +} + +func (p *L1Processor) Stop() { + // if not running, do nothing + if !p.running.Load() { + return + } + p.cancel() + p.wg.Wait() + p.running.Store(false) +} + +// worker runs a loop that checks for new L1 blocks at a regular interval +func (p *L1Processor) worker() { + defer p.wg.Done() + delay := time.NewTicker(p.tickDuration) + for { + select { + case <-p.ctx.Done(): + return + case <-delay.C: + p.log.Debug("Checking for new L1 block", "current", p.currentNumber) + err := p.work() + if err != nil { + p.log.Warn("Failed to process L1", "err", err) + } + } + } +} + +// work checks for a new L1 block and processes it if found +// the starting point is set when Start is called, and blocks are processed searched incrementally +// if a new block is found, it is recorded in the database and the target number is updated +// in the future it will also kick of derivation management for the sync nodes +func (p *L1Processor) work() error { + p.clientMu.Lock() + defer p.clientMu.Unlock() + nextNumber := p.currentNumber + 1 + ref, err := p.client.L1BlockRefByNumber(p.ctx, nextNumber) + if err != nil { + return err + } + // record the new L1 block + p.log.Debug("Processing new L1 block", "block", ref) + err = p.db.RecordNewL1(ref) + if err != nil { + return err + } + + // go drive derivation on this new L1 input + // only possible once bidirectional RPC and new derivers are in place + // could do this as a function callback to a more appropriate driver + + // update the target number + p.currentNumber = nextNumber + return nil +} diff --git a/op-supervisor/supervisor/backend/processors/l1_processor_test.go b/op-supervisor/supervisor/backend/processors/l1_processor_test.go new file mode 100644 index 00000000000..143ebb66b74 --- /dev/null +++ b/op-supervisor/supervisor/backend/processors/l1_processor_test.go @@ -0,0 +1,107 @@ +package processors + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" + "github.com/ethereum/go-ethereum/log" + "github.com/stretchr/testify/require" +) + +type mockChainsDB struct { + recordNewL1Fn func(ref eth.BlockRef) error + lastCommonL1Fn func() (types.BlockSeal, error) +} + +func (m *mockChainsDB) RecordNewL1(ref eth.BlockRef) error { + if m.recordNewL1Fn != nil { + return m.recordNewL1Fn(ref) + } + return nil +} + +func (m *mockChainsDB) LastCommonL1() (types.BlockSeal, error) { + if m.lastCommonL1Fn != nil { + return m.lastCommonL1Fn() + } + return types.BlockSeal{}, nil +} + +type mockL1BlockRefByNumberFetcher struct { + l1BlockByNumberFn func() (eth.L1BlockRef, error) +} + +func (m *mockL1BlockRefByNumberFetcher) L1BlockRefByNumber(context.Context, uint64) (eth.L1BlockRef, error) { + if m.l1BlockByNumberFn != nil { + return m.l1BlockByNumberFn() + } + return eth.L1BlockRef{}, nil +} + +func TestL1Processor(t *testing.T) { + processorForTesting := func() *L1Processor { + ctx, cancel := context.WithCancel(context.Background()) + proc := &L1Processor{ + log: testlog.Logger(t, log.LvlInfo), + client: &mockL1BlockRefByNumberFetcher{}, + currentNumber: 0, + tickDuration: 1 * time.Second, + db: &mockChainsDB{}, + ctx: ctx, + cancel: cancel, + } + return proc + } + t.Run("Initializes LastCommonL1", func(t *testing.T) { + proc := processorForTesting() + proc.db.(*mockChainsDB).lastCommonL1Fn = func() (types.BlockSeal, error) { + return types.BlockSeal{Number: 10}, nil + } + // before starting, the current number should be 0 + require.Equal(t, uint64(0), proc.currentNumber) + proc.Start() + defer proc.Stop() + // after starting, the current number should still be 0 + require.Equal(t, uint64(10), proc.currentNumber) + }) + t.Run("Initializes LastCommonL1 at 0 if error", func(t *testing.T) { + proc := processorForTesting() + proc.db.(*mockChainsDB).lastCommonL1Fn = func() (types.BlockSeal, error) { + return types.BlockSeal{Number: 10}, fmt.Errorf("error") + } + // before starting, the current number should be 0 + require.Equal(t, uint64(0), proc.currentNumber) + proc.Start() + defer proc.Stop() + // the error means the current number should still be 0 + require.Equal(t, uint64(0), proc.currentNumber) + }) + t.Run("Records new L1", func(t *testing.T) { + proc := processorForTesting() + // return a new block number each time + num := uint64(0) + proc.client.(*mockL1BlockRefByNumberFetcher).l1BlockByNumberFn = func() (eth.L1BlockRef, error) { + defer func() { num++ }() + return eth.L1BlockRef{Number: num}, nil + } + // confirm that recordNewL1 is called for each block number received + called := uint64(0) + proc.db.(*mockChainsDB).recordNewL1Fn = func(ref eth.BlockRef) error { + require.Equal(t, called, ref.Number) + called++ + return nil + } + proc.Start() + defer proc.Stop() + require.Eventually(t, func() bool { + return called >= 1 && proc.currentNumber >= 1 + }, 10*time.Second, 100*time.Millisecond) + + }) + +} From a47441c8995e1b02fcddbbe558c65be3763a8a7f Mon Sep 17 00:00:00 2001 From: zhiqiangxu <652732310@qq.com> Date: Mon, 9 Dec 2024 23:35:36 +0800 Subject: [PATCH 110/111] cannon: add `RegSP` constant (#13316) * add RegSP constant * address comments * fix natspec * update semver-lock --- cannon/mipsevm/exec/mips_syscalls.go | 15 ++++++++------- cannon/mipsevm/multithreaded/mips.go | 7 ++++--- cannon/mipsevm/program/patch.go | 3 ++- .../{exec => register}/calling_convention.go | 4 +++- cannon/mipsevm/tests/evm_multithreaded_test.go | 7 ++++--- .../mipsevm/tests/fuzz_evm_multithreaded_test.go | 7 ++++--- .../contracts-bedrock/snapshots/semver-lock.json | 4 ++-- packages/contracts-bedrock/src/cannon/MIPS2.sol | 10 +++++----- .../src/cannon/libraries/MIPSSyscalls.sol | 1 + 9 files changed, 33 insertions(+), 25 deletions(-) rename cannon/mipsevm/{exec => register}/calling_convention.go (92%) diff --git a/cannon/mipsevm/exec/mips_syscalls.go b/cannon/mipsevm/exec/mips_syscalls.go index 6387a2a2b91..8b06c3e2a2d 100644 --- a/cannon/mipsevm/exec/mips_syscalls.go +++ b/cannon/mipsevm/exec/mips_syscalls.go @@ -11,6 +11,7 @@ import ( "github.com/ethereum-optimism/optimism/cannon/mipsevm/arch" "github.com/ethereum-optimism/optimism/cannon/mipsevm/memory" "github.com/ethereum-optimism/optimism/cannon/mipsevm/program" + "github.com/ethereum-optimism/optimism/cannon/mipsevm/register" ) type Word = arch.Word @@ -99,12 +100,12 @@ const ( ) func GetSyscallArgs(registers *[32]Word) (syscallNum, a0, a1, a2, a3 Word) { - syscallNum = registers[RegSyscallNum] // v0 + syscallNum = registers[register.RegSyscallNum] // v0 - a0 = registers[RegSyscallParam1] - a1 = registers[RegSyscallParam2] - a2 = registers[RegSyscallParam3] - a3 = registers[RegSyscallParam4] + a0 = registers[register.RegSyscallParam1] + a1 = registers[register.RegSyscallParam2] + a2 = registers[register.RegSyscallParam3] + a3 = registers[register.RegSyscallParam4] return syscallNum, a0, a1, a2, a3 } @@ -281,8 +282,8 @@ func HandleSysFcntl(a0, a1 Word) (v0, v1 Word) { } func HandleSyscallUpdates(cpu *mipsevm.CpuScalars, registers *[32]Word, v0, v1 Word) { - registers[RegSyscallRet1] = v0 - registers[RegSyscallErrno] = v1 + registers[register.RegSyscallRet1] = v0 + registers[register.RegSyscallErrno] = v1 cpu.PC = cpu.NextPC cpu.NextPC = cpu.NextPC + 4 diff --git a/cannon/mipsevm/multithreaded/mips.go b/cannon/mipsevm/multithreaded/mips.go index b537953a31e..8800c4f7aa6 100644 --- a/cannon/mipsevm/multithreaded/mips.go +++ b/cannon/mipsevm/multithreaded/mips.go @@ -12,6 +12,7 @@ import ( "github.com/ethereum-optimism/optimism/cannon/mipsevm/arch" "github.com/ethereum-optimism/optimism/cannon/mipsevm/exec" "github.com/ethereum-optimism/optimism/cannon/mipsevm/program" + "github.com/ethereum-optimism/optimism/cannon/mipsevm/register" ) type Word = arch.Word @@ -57,10 +58,10 @@ func (m *InstrumentedState) handleSyscall() error { Registers: thread.Registers, } - newThread.Registers[29] = a1 + newThread.Registers[register.RegSP] = a1 // the child will perceive a 0 value as returned value instead, and no error - newThread.Registers[exec.RegSyscallRet1] = 0 - newThread.Registers[exec.RegSyscallErrno] = 0 + newThread.Registers[register.RegSyscallRet1] = 0 + newThread.Registers[register.RegSyscallErrno] = 0 m.state.NextThreadId++ // Preempt this thread for the new one. But not before updating PCs diff --git a/cannon/mipsevm/program/patch.go b/cannon/mipsevm/program/patch.go index 603bb41086a..7f7c72f4939 100644 --- a/cannon/mipsevm/program/patch.go +++ b/cannon/mipsevm/program/patch.go @@ -9,6 +9,7 @@ import ( "github.com/ethereum-optimism/optimism/cannon/mipsevm" "github.com/ethereum-optimism/optimism/cannon/mipsevm/arch" "github.com/ethereum-optimism/optimism/cannon/mipsevm/memory" + "github.com/ethereum-optimism/optimism/cannon/mipsevm/register" ) const WordSizeBytes = arch.WordSizeBytes @@ -63,7 +64,7 @@ func PatchStack(st mipsevm.FPVMState) error { if err := st.GetMemory().SetMemoryRange(sp-4*memory.PageSize, bytes.NewReader(make([]byte, 5*memory.PageSize))); err != nil { return errors.New("failed to allocate page for stack content") } - st.GetRegistersRef()[29] = sp + st.GetRegistersRef()[register.RegSP] = sp storeMem := func(addr Word, v Word) { var dat [WordSizeBytes]byte diff --git a/cannon/mipsevm/exec/calling_convention.go b/cannon/mipsevm/register/calling_convention.go similarity index 92% rename from cannon/mipsevm/exec/calling_convention.go rename to cannon/mipsevm/register/calling_convention.go index 8cbb963a964..37a7931c4c8 100644 --- a/cannon/mipsevm/exec/calling_convention.go +++ b/cannon/mipsevm/register/calling_convention.go @@ -1,4 +1,4 @@ -package exec +package register // FYI: https://en.wikibooks.org/wiki/MIPS_Assembly/Register_File // @@ -12,6 +12,8 @@ const ( RegA2 = 6 // 4th syscall argument; set to 0/1 for success/error RegA3 = 7 + // Stack pointer + RegSP = 29 ) // FYI: https://web.archive.org/web/20231223163047/https://www.linux-mips.org/wiki/Syscall diff --git a/cannon/mipsevm/tests/evm_multithreaded_test.go b/cannon/mipsevm/tests/evm_multithreaded_test.go index 1353932dd71..02cce0addf2 100644 --- a/cannon/mipsevm/tests/evm_multithreaded_test.go +++ b/cannon/mipsevm/tests/evm_multithreaded_test.go @@ -17,6 +17,7 @@ import ( "github.com/ethereum-optimism/optimism/cannon/mipsevm/exec" "github.com/ethereum-optimism/optimism/cannon/mipsevm/multithreaded" mttestutil "github.com/ethereum-optimism/optimism/cannon/mipsevm/multithreaded/testutil" + "github.com/ethereum-optimism/optimism/cannon/mipsevm/register" "github.com/ethereum-optimism/optimism/cannon/mipsevm/testutil" ) @@ -327,9 +328,9 @@ func TestEVM_SysClone_Successful(t *testing.T) { expectedNewThread.PC = state.GetCpu().NextPC expectedNewThread.NextPC = state.GetCpu().NextPC + 4 expectedNewThread.ThreadId = 1 - expectedNewThread.Registers[2] = 0 - expectedNewThread.Registers[7] = 0 - expectedNewThread.Registers[29] = stackPtr + expectedNewThread.Registers[register.RegSyscallRet1] = 0 + expectedNewThread.Registers[register.RegSyscallErrno] = 0 + expectedNewThread.Registers[register.RegSP] = stackPtr var err error var stepWitness *mipsevm.StepWitness diff --git a/cannon/mipsevm/tests/fuzz_evm_multithreaded_test.go b/cannon/mipsevm/tests/fuzz_evm_multithreaded_test.go index 49f45a4a5f2..20a628f4e34 100644 --- a/cannon/mipsevm/tests/fuzz_evm_multithreaded_test.go +++ b/cannon/mipsevm/tests/fuzz_evm_multithreaded_test.go @@ -10,6 +10,7 @@ import ( "github.com/ethereum-optimism/optimism/cannon/mipsevm/exec" "github.com/ethereum-optimism/optimism/cannon/mipsevm/multithreaded" mttestutil "github.com/ethereum-optimism/optimism/cannon/mipsevm/multithreaded/testutil" + "github.com/ethereum-optimism/optimism/cannon/mipsevm/register" "github.com/ethereum-optimism/optimism/cannon/mipsevm/testutil" ) @@ -46,9 +47,9 @@ func FuzzStateSyscallCloneMT(f *testing.F) { epxectedNewThread := expected.ExpectNewThread() epxectedNewThread.PC = state.GetCpu().NextPC epxectedNewThread.NextPC = state.GetCpu().NextPC + 4 - epxectedNewThread.Registers[2] = 0 - epxectedNewThread.Registers[7] = 0 - epxectedNewThread.Registers[29] = stackPtr + epxectedNewThread.Registers[register.RegSyscallNum] = 0 + epxectedNewThread.Registers[register.RegSyscallErrno] = 0 + epxectedNewThread.Registers[register.RegSP] = stackPtr expected.NextThreadId = nextThreadId + 1 expected.StepsSinceLastContextSwitch = 0 if state.TraverseRight { diff --git a/packages/contracts-bedrock/snapshots/semver-lock.json b/packages/contracts-bedrock/snapshots/semver-lock.json index dcfdaeb982f..332d30d23b4 100644 --- a/packages/contracts-bedrock/snapshots/semver-lock.json +++ b/packages/contracts-bedrock/snapshots/semver-lock.json @@ -140,8 +140,8 @@ "sourceCodeHash": "0x6c45dd23cb0d6f9bf4f84855ad0caf70e53dee3fe6c41454f7bf8df52ec3a9af" }, "src/cannon/MIPS2.sol": { - "initCodeHash": "0x7476695bb101cb45213793291124e3ec41e13a02d291837b76d8a35bfc8ec2c1", - "sourceCodeHash": "0xeaceb5d28bd58fca6a234d9291ca01424bf83576d191ee3046272bc4987d0b29" + "initCodeHash": "0x4971f62a6aecf91bd795fa44b5ce3cb77a987719af4f351d4aec5b6c3bf81387", + "sourceCodeHash": "0x8da8be0b7d60af0eb11bd58653f1854d56a8f0616f3aeaeba7ab9ec340d02ac7" }, "src/cannon/MIPS64.sol": { "initCodeHash": "0x6516160f35a85abb65d8102fa71f03cb57518787f9af85bc951f27ee60e6bb8f", diff --git a/packages/contracts-bedrock/src/cannon/MIPS2.sol b/packages/contracts-bedrock/src/cannon/MIPS2.sol index e2af829be6e..56024171dac 100644 --- a/packages/contracts-bedrock/src/cannon/MIPS2.sol +++ b/packages/contracts-bedrock/src/cannon/MIPS2.sol @@ -63,8 +63,8 @@ contract MIPS2 is ISemver { } /// @notice The semantic version of the MIPS2 contract. - /// @custom:semver 1.0.0-beta.25 - string public constant version = "1.0.0-beta.25"; + /// @custom:semver 1.0.0-beta.26 + string public constant version = "1.0.0-beta.26"; /// @notice The preimage oracle contract. IPreimageOracle internal immutable ORACLE; @@ -428,10 +428,10 @@ contract MIPS2 is ISemver { for (uint256 i; i < 32; i++) { newThread.registers[i] = thread.registers[i]; } - newThread.registers[29] = a1; // set stack pointer + newThread.registers[sys.REG_SP] = a1; // set stack pointer // the child will perceive a 0 value as returned value instead, and no error - newThread.registers[2] = 0; - newThread.registers[7] = 0; + newThread.registers[sys.REG_SYSCALL_RET1] = 0; + newThread.registers[sys.REG_SYSCALL_ERRNO] = 0; state.nextThreadID++; // Preempt this thread for the new one. But not before updating PCs diff --git a/packages/contracts-bedrock/src/cannon/libraries/MIPSSyscalls.sol b/packages/contracts-bedrock/src/cannon/libraries/MIPSSyscalls.sol index 8fa62dbbad7..e273c54d654 100644 --- a/packages/contracts-bedrock/src/cannon/libraries/MIPSSyscalls.sol +++ b/packages/contracts-bedrock/src/cannon/libraries/MIPSSyscalls.sol @@ -156,6 +156,7 @@ library MIPSSyscalls { uint32 internal constant REG_A1 = 5; uint32 internal constant REG_A2 = 6; uint32 internal constant REG_A3 = 7; + uint32 internal constant REG_SP = 29; // FYI: https://web.archive.org/web/20231223163047/https://www.linux-mips.org/wiki/Syscall uint32 internal constant REG_SYSCALL_NUM = REG_V0; From 036b1d537a988d21faaa6fcd6f894d46e50109e0 Mon Sep 17 00:00:00 2001 From: agusduha Date: Mon, 9 Dec 2024 14:23:05 -0300 Subject: [PATCH 111/111] fix: pre pr and interfaces imports --- .../interfaces/L1/ISuperchainConfig.sol | 4 ++-- .../snapshots/semver-lock.json | 20 +++++++++---------- .../src/L1/LiquidityMigrator.sol | 2 +- .../src/L1/OptimismPortal2.sol | 4 ++-- .../src/L1/OptimismPortalInterop.sol | 4 ++-- .../src/L1/SharedLockbox.sol | 6 +++--- .../src/L1/SuperchainConfig.sol | 4 ++-- .../src/L1/SystemConfigInterop.sol | 4 ++-- .../test/L1/SharedLockbox.t.sol | 2 +- 9 files changed, 25 insertions(+), 25 deletions(-) diff --git a/packages/contracts-bedrock/interfaces/L1/ISuperchainConfig.sol b/packages/contracts-bedrock/interfaces/L1/ISuperchainConfig.sol index efc0c598c5b..c9fe89923c1 100644 --- a/packages/contracts-bedrock/interfaces/L1/ISuperchainConfig.sol +++ b/packages/contracts-bedrock/interfaces/L1/ISuperchainConfig.sol @@ -1,8 +1,8 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; -import { IDependencySet } from "src/L2/interfaces/IDependencySet.sol"; -import { ISharedLockbox } from "src/L1/interfaces/ISharedLockbox.sol"; +import { IDependencySet } from "interfaces/L2/IDependencySet.sol"; +import { ISharedLockbox } from "interfaces/L1/ISharedLockbox.sol"; interface ISuperchainConfig is IDependencySet { enum UpdateType { diff --git a/packages/contracts-bedrock/snapshots/semver-lock.json b/packages/contracts-bedrock/snapshots/semver-lock.json index 685eff29960..f8344f2ad79 100644 --- a/packages/contracts-bedrock/snapshots/semver-lock.json +++ b/packages/contracts-bedrock/snapshots/semver-lock.json @@ -28,12 +28,12 @@ "sourceCodeHash": "0xb71e8bc24ea9ebb5692762005f2936ba2a00bf169e1e32f504a0f6e23a349a22" }, "src/L1/OptimismPortal2.sol": { - "initCodeHash": "0x215f439adc85bc5ebb1b234c1acc128eecb7c9a43243edbfcd9e701f128224f1", - "sourceCodeHash": "0xb7cbcb27240d0fd85a7a3009676abb93679e3b8723a967c1f310390464399869" + "initCodeHash": "0x3b7d8df1b988111d84149da579a40bc82b7fcf177cc151f5c504a143f4d342ea", + "sourceCodeHash": "0xf5fe46560360ad9bf5068c81e5a3e01b3df4dd8393e0a25f0b57a12cd51a5223" }, "src/L1/OptimismPortalInterop.sol": { - "initCodeHash": "0xbcd9d2a47e2b6076d242762cc072387d9da5532b72d9f0286de2d683cd918b13", - "sourceCodeHash": "0xd3f70c69c35d05f0eec20ac7dae4d86b20066af2b2395387cc62ad80b5a27b06" + "initCodeHash": "0x39b347c4895e627b493364ba4fce0a61659a3ddd4d2d1c5be72a430238b1c987", + "sourceCodeHash": "0x2512df44a0165e83babb347e4f66a24c668e0f37a56e653dca54d3bd08ab5fd8" }, "src/L1/ProtocolVersions.sol": { "initCodeHash": "0xb0ff1661226417342001fe9f0b64c340b7c074ff71579abf05399f4e742aaca1", @@ -41,19 +41,19 @@ }, "src/L1/SharedLockbox.sol": { "initCodeHash": "0x914d95090b8d5b37744a030cd9ac5a2b57c367d695f6a6ae3ad3aa894c599c3c", - "sourceCodeHash": "0x240277f19a6505220efccc90f36d5ab309a19a6fc96f6e53fe6d012b8c56e3ba" + "sourceCodeHash": "0xbc1e6321e241f18175c2c3c0cf898490a31ebe0dbaced1213c59621cdafd63c2" }, "src/L1/SuperchainConfig.sol": { - "initCodeHash": "0x56780fdb911f250dde8a5cfb2aae158405503cf295efd784ba9823667838bd99", - "sourceCodeHash": "0x7cd295c29b65facd7846ea04898e1a1f890859eba9f5ebbf1ac8c76000cdc2e1" + "initCodeHash": "0x09308bf7a01983bfcd69c61a6b4e0499126f8888f352a59ea7a26bf0c97bc158", + "sourceCodeHash": "0x21656a949166ee16854a4b0c67901c1b8b4a90c94c608f9d0e37e20e53f3a466" }, "src/L1/SystemConfig.sol": { "initCodeHash": "0x3ba55b46516de34186ff0cc92af9ca3ff916989ecb7d2fa9e82000f648607985", "sourceCodeHash": "0x4085b02ea01cd16172a1809ddd9be69c567f7b204cefc93f7c4d9071da812daa" }, "src/L1/SystemConfigInterop.sol": { - "initCodeHash": "0x1d122143f63b5c00af41a246b1099ee968af04df7af394db874613d80bc20898", - "sourceCodeHash": "0x2e928783bce90409817c223e6746b74f4c164d614bcf0b0f26a1b82811882974" + "initCodeHash": "0x78b8aea3f431fa5be3c003970a057b7fe1f7fa0aeb2551a10e7557575da5696b", + "sourceCodeHash": "0xf440ef535e53554f744895b89a4c985fd1dfd3b0de4d1cd4bc9216e64e9e16c0" }, "src/L2/BaseFeeVault.sol": { "initCodeHash": "0x6745b7be3895a5e8d373df0066d931bae29c47672ac46c2f5829bd0052cc6d9e", @@ -227,4 +227,4 @@ "initCodeHash": "0x2bfce526f82622288333d53ca3f43a0a94306ba1bab99241daa845f8f4b18bd4", "sourceCodeHash": "0xf49d7b0187912a6bb67926a3222ae51121e9239495213c975b3b4b217ee57a1b" } -} +} \ No newline at end of file diff --git a/packages/contracts-bedrock/src/L1/LiquidityMigrator.sol b/packages/contracts-bedrock/src/L1/LiquidityMigrator.sol index f3b8060996a..48634a460be 100644 --- a/packages/contracts-bedrock/src/L1/LiquidityMigrator.sol +++ b/packages/contracts-bedrock/src/L1/LiquidityMigrator.sol @@ -1,7 +1,7 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { ISharedLockbox } from "./interfaces/ISharedLockbox.sol"; +import { ISharedLockbox } from "interfaces/L1/ISharedLockbox.sol"; /// @custom:proxied true /// @title LiquidityMigrator diff --git a/packages/contracts-bedrock/src/L1/OptimismPortal2.sol b/packages/contracts-bedrock/src/L1/OptimismPortal2.sol index f6cc46cbe65..de9513abcfd 100644 --- a/packages/contracts-bedrock/src/L1/OptimismPortal2.sol +++ b/packages/contracts-bedrock/src/L1/OptimismPortal2.sol @@ -186,9 +186,9 @@ contract OptimismPortal2 is Initializable, ResourceMetering, ISemver { } /// @notice Semantic version. - /// @custom:semver 3.11.0-beta.7 + /// @custom:semver 3.11.0-beta.8 function version() public pure virtual returns (string memory) { - return "3.11.0-beta.7"; + return "3.11.0-beta.8"; } /// @notice Constructs the OptimismPortal contract. diff --git a/packages/contracts-bedrock/src/L1/OptimismPortalInterop.sol b/packages/contracts-bedrock/src/L1/OptimismPortalInterop.sol index 7c0fba75ee7..375678f8ec3 100644 --- a/packages/contracts-bedrock/src/L1/OptimismPortalInterop.sol +++ b/packages/contracts-bedrock/src/L1/OptimismPortalInterop.sol @@ -26,9 +26,9 @@ contract OptimismPortalInterop is OptimismPortal2 { OptimismPortal2(_proofMaturityDelaySeconds, _disputeGameFinalityDelaySeconds, _sharedLockbox) { } - /// @custom:semver +interop-beta.4 + /// @custom:semver +interop-beta.5 function version() public pure override returns (string memory) { - return string.concat(super.version(), "+interop-beta.4"); + return string.concat(super.version(), "+interop-beta.5"); } /// @notice Sets static configuration options for the L2 system. diff --git a/packages/contracts-bedrock/src/L1/SharedLockbox.sol b/packages/contracts-bedrock/src/L1/SharedLockbox.sol index 119fd84c2b6..157668a33f0 100644 --- a/packages/contracts-bedrock/src/L1/SharedLockbox.sol +++ b/packages/contracts-bedrock/src/L1/SharedLockbox.sol @@ -1,9 +1,9 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { ISemver } from "src/universal/interfaces/ISemver.sol"; -import { IOptimismPortal } from "src/L1/interfaces/IOptimismPortal.sol"; -import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; +import { IOptimismPortal } from "interfaces/L1/IOptimismPortal.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; import { Unauthorized, Paused } from "src/libraries/errors/CommonErrors.sol"; /// @custom:proxied true diff --git a/packages/contracts-bedrock/src/L1/SuperchainConfig.sol b/packages/contracts-bedrock/src/L1/SuperchainConfig.sol index 3460a06b743..f719da63536 100644 --- a/packages/contracts-bedrock/src/L1/SuperchainConfig.sol +++ b/packages/contracts-bedrock/src/L1/SuperchainConfig.sol @@ -62,8 +62,8 @@ contract SuperchainConfig is Initializable, ISemver { error ChainAlreadyAdded(); /// @notice Semantic version. - /// @custom:semver 1.1.1-beta.3 - string public constant version = "1.1.1-beta.3"; + /// @custom:semver 1.1.1-beta.4 + string public constant version = "1.1.1-beta.4"; // Mapping from chainId to SystemConfig address mapping(uint256 => address) public systemConfigs; diff --git a/packages/contracts-bedrock/src/L1/SystemConfigInterop.sol b/packages/contracts-bedrock/src/L1/SystemConfigInterop.sol index 35413f004df..875513c1fa0 100644 --- a/packages/contracts-bedrock/src/L1/SystemConfigInterop.sol +++ b/packages/contracts-bedrock/src/L1/SystemConfigInterop.sol @@ -29,9 +29,9 @@ contract SystemConfigInterop is SystemConfig { /// @notice The address of the SuperchainConfig contract. address public immutable SUPERCHAIN_CONFIG; - /// @custom:semver +interop-beta.6 + /// @custom:semver +interop-beta.7 function version() public pure override returns (string memory) { - return string.concat(super.version(), "+interop-beta.6"); + return string.concat(super.version(), "+interop-beta.7"); } /// @notice Constructs the SystemConfig contract. diff --git a/packages/contracts-bedrock/test/L1/SharedLockbox.t.sol b/packages/contracts-bedrock/test/L1/SharedLockbox.t.sol index 73c8369a493..183487a7103 100644 --- a/packages/contracts-bedrock/test/L1/SharedLockbox.t.sol +++ b/packages/contracts-bedrock/test/L1/SharedLockbox.t.sol @@ -9,7 +9,7 @@ import { Unauthorized, Paused as PausedError } from "src/libraries/errors/Common import { SharedLockbox } from "src/L1/SharedLockbox.sol"; // Interfaces -import { IOptimismPortal } from "src/L1/interfaces/IOptimismPortal.sol"; +import { IOptimismPortal } from "interfaces/L1/IOptimismPortal.sol"; contract SharedLockboxTest is CommonTest { event ETHLocked(address indexed portal, uint256 amount);